KVM: s390: Fix user triggerable bug in dead code
[pandora-kernel.git] / arch / s390 / kvm / kvm-s390.c
1 /*
2  * s390host.c --  hosting zSeries kernel virtual machines
3  *
4  * Copyright IBM Corp. 2008,2009
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
14  */
15
16 #include <linux/compiler.h>
17 #include <linux/err.h>
18 #include <linux/fs.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
29 #include <asm/nmi.h>
30 #include <asm/system.h>
31 #include "kvm-s390.h"
32 #include "gaccess.h"
33
34 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35
36 struct kvm_stats_debugfs_item debugfs_entries[] = {
37         { "userspace_handled", VCPU_STAT(exit_userspace) },
38         { "exit_null", VCPU_STAT(exit_null) },
39         { "exit_validity", VCPU_STAT(exit_validity) },
40         { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41         { "exit_external_request", VCPU_STAT(exit_external_request) },
42         { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
43         { "exit_instruction", VCPU_STAT(exit_instruction) },
44         { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45         { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
46         { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
47         { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48         { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
49         { "deliver_external_call", VCPU_STAT(deliver_external_call) },
50         { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
51         { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
52         { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
53         { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
54         { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
55         { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
56         { "exit_wait_state", VCPU_STAT(exit_wait_state) },
57         { "instruction_stidp", VCPU_STAT(instruction_stidp) },
58         { "instruction_spx", VCPU_STAT(instruction_spx) },
59         { "instruction_stpx", VCPU_STAT(instruction_stpx) },
60         { "instruction_stap", VCPU_STAT(instruction_stap) },
61         { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
62         { "instruction_stsch", VCPU_STAT(instruction_stsch) },
63         { "instruction_chsc", VCPU_STAT(instruction_chsc) },
64         { "instruction_stsi", VCPU_STAT(instruction_stsi) },
65         { "instruction_stfl", VCPU_STAT(instruction_stfl) },
66         { "instruction_tprot", VCPU_STAT(instruction_tprot) },
67         { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
68         { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
69         { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
70         { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
71         { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
72         { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
73         { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
74         { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
75         { "diagnose_10", VCPU_STAT(diagnose_10) },
76         { "diagnose_44", VCPU_STAT(diagnose_44) },
77         { NULL }
78 };
79
80 static unsigned long long *facilities;
81
82 /* Section: not file related */
83 int kvm_arch_hardware_enable(void *garbage)
84 {
85         /* every s390 is virtualization enabled ;-) */
86         return 0;
87 }
88
89 void kvm_arch_hardware_disable(void *garbage)
90 {
91 }
92
93 int kvm_arch_hardware_setup(void)
94 {
95         return 0;
96 }
97
98 void kvm_arch_hardware_unsetup(void)
99 {
100 }
101
102 void kvm_arch_check_processor_compat(void *rtn)
103 {
104 }
105
106 int kvm_arch_init(void *opaque)
107 {
108         return 0;
109 }
110
111 void kvm_arch_exit(void)
112 {
113 }
114
115 /* Section: device related */
116 long kvm_arch_dev_ioctl(struct file *filp,
117                         unsigned int ioctl, unsigned long arg)
118 {
119         if (ioctl == KVM_S390_ENABLE_SIE)
120                 return s390_enable_sie();
121         return -EINVAL;
122 }
123
124 int kvm_dev_ioctl_check_extension(long ext)
125 {
126         int r;
127
128         switch (ext) {
129         case KVM_CAP_S390_PSW:
130         case KVM_CAP_S390_GMAP:
131         case KVM_CAP_SYNC_MMU:
132                 r = 1;
133                 break;
134         default:
135                 r = 0;
136         }
137         return r;
138 }
139
140 /* Section: vm related */
141 /*
142  * Get (and clear) the dirty memory log for a memory slot.
143  */
144 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
145                                struct kvm_dirty_log *log)
146 {
147         return 0;
148 }
149
150 long kvm_arch_vm_ioctl(struct file *filp,
151                        unsigned int ioctl, unsigned long arg)
152 {
153         struct kvm *kvm = filp->private_data;
154         void __user *argp = (void __user *)arg;
155         int r;
156
157         switch (ioctl) {
158         case KVM_S390_INTERRUPT: {
159                 struct kvm_s390_interrupt s390int;
160
161                 r = -EFAULT;
162                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
163                         break;
164                 r = kvm_s390_inject_vm(kvm, &s390int);
165                 break;
166         }
167         default:
168                 r = -ENOTTY;
169         }
170
171         return r;
172 }
173
174 int kvm_arch_init_vm(struct kvm *kvm)
175 {
176         int rc;
177         char debug_name[16];
178
179         rc = s390_enable_sie();
180         if (rc)
181                 goto out_err;
182
183         rc = -ENOMEM;
184
185         kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
186         if (!kvm->arch.sca)
187                 goto out_err;
188
189         sprintf(debug_name, "kvm-%u", current->pid);
190
191         kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
192         if (!kvm->arch.dbf)
193                 goto out_nodbf;
194
195         spin_lock_init(&kvm->arch.float_int.lock);
196         INIT_LIST_HEAD(&kvm->arch.float_int.list);
197
198         debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
199         VM_EVENT(kvm, 3, "%s", "vm created");
200
201         kvm->arch.gmap = gmap_alloc(current->mm);
202         if (!kvm->arch.gmap)
203                 goto out_nogmap;
204
205         return 0;
206 out_nogmap:
207         debug_unregister(kvm->arch.dbf);
208 out_nodbf:
209         free_page((unsigned long)(kvm->arch.sca));
210 out_err:
211         return rc;
212 }
213
214 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
215 {
216         VCPU_EVENT(vcpu, 3, "%s", "free cpu");
217         clear_bit(63 - vcpu->vcpu_id, (unsigned long *) &vcpu->kvm->arch.sca->mcn);
218         if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
219                 (__u64) vcpu->arch.sie_block)
220                 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
221         smp_mb();
222         free_page((unsigned long)(vcpu->arch.sie_block));
223         kvm_vcpu_uninit(vcpu);
224         kfree(vcpu);
225 }
226
227 static void kvm_free_vcpus(struct kvm *kvm)
228 {
229         unsigned int i;
230         struct kvm_vcpu *vcpu;
231
232         kvm_for_each_vcpu(i, vcpu, kvm)
233                 kvm_arch_vcpu_destroy(vcpu);
234
235         mutex_lock(&kvm->lock);
236         for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
237                 kvm->vcpus[i] = NULL;
238
239         atomic_set(&kvm->online_vcpus, 0);
240         mutex_unlock(&kvm->lock);
241 }
242
243 void kvm_arch_sync_events(struct kvm *kvm)
244 {
245 }
246
247 void kvm_arch_destroy_vm(struct kvm *kvm)
248 {
249         kvm_free_vcpus(kvm);
250         free_page((unsigned long)(kvm->arch.sca));
251         debug_unregister(kvm->arch.dbf);
252         gmap_free(kvm->arch.gmap);
253 }
254
255 /* Section: vcpu related */
256 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
257 {
258         vcpu->arch.gmap = vcpu->kvm->arch.gmap;
259         return 0;
260 }
261
262 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
263 {
264         /* Nothing todo */
265 }
266
267 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
268 {
269         save_fp_regs(&vcpu->arch.host_fpregs);
270         save_access_regs(vcpu->arch.host_acrs);
271         vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
272         restore_fp_regs(&vcpu->arch.guest_fpregs);
273         restore_access_regs(vcpu->arch.guest_acrs);
274         gmap_enable(vcpu->arch.gmap);
275         atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
276 }
277
278 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
279 {
280         atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
281         gmap_disable(vcpu->arch.gmap);
282         save_fp_regs(&vcpu->arch.guest_fpregs);
283         save_access_regs(vcpu->arch.guest_acrs);
284         restore_fp_regs(&vcpu->arch.host_fpregs);
285         restore_access_regs(vcpu->arch.host_acrs);
286 }
287
288 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
289 {
290         /* this equals initial cpu reset in pop, but we don't switch to ESA */
291         vcpu->arch.sie_block->gpsw.mask = 0UL;
292         vcpu->arch.sie_block->gpsw.addr = 0UL;
293         vcpu->arch.sie_block->prefix    = 0UL;
294         vcpu->arch.sie_block->ihcpu     = 0xffff;
295         vcpu->arch.sie_block->cputm     = 0UL;
296         vcpu->arch.sie_block->ckc       = 0UL;
297         vcpu->arch.sie_block->todpr     = 0;
298         memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
299         vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
300         vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
301         vcpu->arch.guest_fpregs.fpc = 0;
302         asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
303         vcpu->arch.sie_block->gbea = 1;
304 }
305
306 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
307 {
308         atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
309                                                     CPUSTAT_SM |
310                                                     CPUSTAT_STOPPED);
311         vcpu->arch.sie_block->ecb   = 6;
312         vcpu->arch.sie_block->eca   = 0xC1002001U;
313         vcpu->arch.sie_block->fac   = (int) (long) facilities;
314         hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
315         tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
316                      (unsigned long) vcpu);
317         vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
318         get_cpu_id(&vcpu->arch.cpu_id);
319         vcpu->arch.cpu_id.version = 0xff;
320         return 0;
321 }
322
323 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
324                                       unsigned int id)
325 {
326         struct kvm_vcpu *vcpu;
327         int rc = -EINVAL;
328
329         if (id >= KVM_MAX_VCPUS)
330                 goto out;
331
332         rc = -ENOMEM;
333
334         vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
335         if (!vcpu)
336                 goto out;
337
338         vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
339                                         get_zeroed_page(GFP_KERNEL);
340
341         if (!vcpu->arch.sie_block)
342                 goto out_free_cpu;
343
344         vcpu->arch.sie_block->icpua = id;
345         BUG_ON(!kvm->arch.sca);
346         if (!kvm->arch.sca->cpu[id].sda)
347                 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
348         vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
349         vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
350         set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
351
352         spin_lock_init(&vcpu->arch.local_int.lock);
353         INIT_LIST_HEAD(&vcpu->arch.local_int.list);
354         vcpu->arch.local_int.float_int = &kvm->arch.float_int;
355         spin_lock(&kvm->arch.float_int.lock);
356         kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
357         init_waitqueue_head(&vcpu->arch.local_int.wq);
358         vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
359         spin_unlock(&kvm->arch.float_int.lock);
360
361         rc = kvm_vcpu_init(vcpu, kvm, id);
362         if (rc)
363                 goto out_free_sie_block;
364         VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
365                  vcpu->arch.sie_block);
366
367         return vcpu;
368 out_free_sie_block:
369         free_page((unsigned long)(vcpu->arch.sie_block));
370 out_free_cpu:
371         kfree(vcpu);
372 out:
373         return ERR_PTR(rc);
374 }
375
376 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
377 {
378         /* kvm common code refers to this, but never calls it */
379         BUG();
380         return 0;
381 }
382
383 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
384 {
385         kvm_s390_vcpu_initial_reset(vcpu);
386         return 0;
387 }
388
389 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
390 {
391         memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
392         return 0;
393 }
394
395 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
396 {
397         memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
398         return 0;
399 }
400
401 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
402                                   struct kvm_sregs *sregs)
403 {
404         memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
405         memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
406         restore_access_regs(vcpu->arch.guest_acrs);
407         return 0;
408 }
409
410 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
411                                   struct kvm_sregs *sregs)
412 {
413         memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
414         memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
415         return 0;
416 }
417
418 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
419 {
420         memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
421         vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK;
422         restore_fp_regs(&vcpu->arch.guest_fpregs);
423         return 0;
424 }
425
426 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
427 {
428         memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
429         fpu->fpc = vcpu->arch.guest_fpregs.fpc;
430         return 0;
431 }
432
433 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
434 {
435         int rc = 0;
436
437         if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
438                 rc = -EBUSY;
439         else {
440                 vcpu->run->psw_mask = psw.mask;
441                 vcpu->run->psw_addr = psw.addr;
442         }
443         return rc;
444 }
445
446 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
447                                   struct kvm_translation *tr)
448 {
449         return -EINVAL; /* not implemented yet */
450 }
451
452 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
453                                         struct kvm_guest_debug *dbg)
454 {
455         return -EINVAL; /* not implemented yet */
456 }
457
458 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
459                                     struct kvm_mp_state *mp_state)
460 {
461         return -EINVAL; /* not implemented yet */
462 }
463
464 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
465                                     struct kvm_mp_state *mp_state)
466 {
467         return -EINVAL; /* not implemented yet */
468 }
469
470 static void __vcpu_run(struct kvm_vcpu *vcpu)
471 {
472         int rc;
473
474         memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
475
476         if (need_resched())
477                 schedule();
478
479         if (test_thread_flag(TIF_MCCK_PENDING))
480                 s390_handle_mcck();
481
482         kvm_s390_deliver_pending_interrupts(vcpu);
483
484         VCPU_EVENT(vcpu, 6, "entering sie flags %x",
485                    atomic_read(&vcpu->arch.sie_block->cpuflags));
486
487         vcpu->arch.sie_block->icptcode = 0;
488         local_irq_disable();
489         kvm_guest_enter();
490         local_irq_enable();
491         rc = sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs);
492         local_irq_disable();
493         kvm_guest_exit();
494         local_irq_enable();
495
496         if (rc) {
497                 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
498                 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
499         }
500         VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
501                    vcpu->arch.sie_block->icptcode);
502
503         memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
504 }
505
506 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
507 {
508         int rc;
509         sigset_t sigsaved;
510
511 rerun_vcpu:
512         if (vcpu->sigset_active)
513                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
514
515         atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
516
517         BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
518
519         vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
520         vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
521
522         might_fault();
523
524         do {
525                 __vcpu_run(vcpu);
526                 rc = kvm_handle_sie_intercept(vcpu);
527         } while (!signal_pending(current) && !rc);
528
529         if (rc == SIE_INTERCEPT_RERUNVCPU)
530                 goto rerun_vcpu;
531
532         if (signal_pending(current) && !rc) {
533                 kvm_run->exit_reason = KVM_EXIT_INTR;
534                 rc = -EINTR;
535         }
536
537         if (rc == -EOPNOTSUPP) {
538                 /* intercept cannot be handled in-kernel, prepare kvm-run */
539                 kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
540                 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
541                 kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
542                 kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
543                 rc = 0;
544         }
545
546         if (rc == -EREMOTE) {
547                 /* intercept was handled, but userspace support is needed
548                  * kvm_run has been prepared by the handler */
549                 rc = 0;
550         }
551
552         kvm_run->psw_mask     = vcpu->arch.sie_block->gpsw.mask;
553         kvm_run->psw_addr     = vcpu->arch.sie_block->gpsw.addr;
554
555         if (vcpu->sigset_active)
556                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
557
558         vcpu->stat.exit_userspace++;
559         return rc;
560 }
561
562 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
563                        unsigned long n, int prefix)
564 {
565         if (prefix)
566                 return copy_to_guest(vcpu, guestdest, from, n);
567         else
568                 return copy_to_guest_absolute(vcpu, guestdest, from, n);
569 }
570
571 /*
572  * store status at address
573  * we use have two special cases:
574  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
575  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
576  */
577 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
578 {
579         unsigned char archmode = 1;
580         int prefix;
581
582         if (addr == KVM_S390_STORE_STATUS_NOADDR) {
583                 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
584                         return -EFAULT;
585                 addr = SAVE_AREA_BASE;
586                 prefix = 0;
587         } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
588                 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
589                         return -EFAULT;
590                 addr = SAVE_AREA_BASE;
591                 prefix = 1;
592         } else
593                 prefix = 0;
594
595         /*
596          * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
597          * copying in vcpu load/put. Lets update our copies before we save
598          * it into the save area
599          */
600         save_fp_regs(&vcpu->arch.guest_fpregs);
601         save_access_regs(vcpu->arch.guest_acrs);
602
603         if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
604                         vcpu->arch.guest_fpregs.fprs, 128, prefix))
605                 return -EFAULT;
606
607         if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
608                         vcpu->arch.guest_gprs, 128, prefix))
609                 return -EFAULT;
610
611         if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
612                         &vcpu->arch.sie_block->gpsw, 16, prefix))
613                 return -EFAULT;
614
615         if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
616                         &vcpu->arch.sie_block->prefix, 4, prefix))
617                 return -EFAULT;
618
619         if (__guestcopy(vcpu,
620                         addr + offsetof(struct save_area, fp_ctrl_reg),
621                         &vcpu->arch.guest_fpregs.fpc, 4, prefix))
622                 return -EFAULT;
623
624         if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
625                         &vcpu->arch.sie_block->todpr, 4, prefix))
626                 return -EFAULT;
627
628         if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
629                         &vcpu->arch.sie_block->cputm, 8, prefix))
630                 return -EFAULT;
631
632         if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
633                         &vcpu->arch.sie_block->ckc, 8, prefix))
634                 return -EFAULT;
635
636         if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
637                         &vcpu->arch.guest_acrs, 64, prefix))
638                 return -EFAULT;
639
640         if (__guestcopy(vcpu,
641                         addr + offsetof(struct save_area, ctrl_regs),
642                         &vcpu->arch.sie_block->gcr, 128, prefix))
643                 return -EFAULT;
644         return 0;
645 }
646
647 long kvm_arch_vcpu_ioctl(struct file *filp,
648                          unsigned int ioctl, unsigned long arg)
649 {
650         struct kvm_vcpu *vcpu = filp->private_data;
651         void __user *argp = (void __user *)arg;
652         long r;
653
654         switch (ioctl) {
655         case KVM_S390_INTERRUPT: {
656                 struct kvm_s390_interrupt s390int;
657
658                 r = -EFAULT;
659                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
660                         break;
661                 r = kvm_s390_inject_vcpu(vcpu, &s390int);
662                 break;
663         }
664         case KVM_S390_STORE_STATUS:
665                 r = kvm_s390_vcpu_store_status(vcpu, arg);
666                 break;
667         case KVM_S390_SET_INITIAL_PSW: {
668                 psw_t psw;
669
670                 r = -EFAULT;
671                 if (copy_from_user(&psw, argp, sizeof(psw)))
672                         break;
673                 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
674                 break;
675         }
676         case KVM_S390_INITIAL_RESET:
677                 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
678                 break;
679         default:
680                 r = -EINVAL;
681         }
682         return r;
683 }
684
685 /* Section: memory related */
686 int kvm_arch_prepare_memory_region(struct kvm *kvm,
687                                    struct kvm_memory_slot *memslot,
688                                    struct kvm_memory_slot old,
689                                    struct kvm_userspace_memory_region *mem,
690                                    int user_alloc)
691 {
692         /* A few sanity checks. We can have exactly one memory slot which has
693            to start at guest virtual zero and which has to be located at a
694            page boundary in userland and which has to end at a page boundary.
695            The memory in userland is ok to be fragmented into various different
696            vmas. It is okay to mmap() and munmap() stuff in this slot after
697            doing this call at any time */
698
699         if (mem->slot)
700                 return -EINVAL;
701
702         if (mem->guest_phys_addr)
703                 return -EINVAL;
704
705         if (mem->userspace_addr & 0xffffful)
706                 return -EINVAL;
707
708         if (mem->memory_size & 0xffffful)
709                 return -EINVAL;
710
711         if (!user_alloc)
712                 return -EINVAL;
713
714         return 0;
715 }
716
717 void kvm_arch_commit_memory_region(struct kvm *kvm,
718                                 struct kvm_userspace_memory_region *mem,
719                                 struct kvm_memory_slot old,
720                                 int user_alloc)
721 {
722         int rc;
723
724
725         rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
726                 mem->guest_phys_addr, mem->memory_size);
727         if (rc)
728                 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
729         return;
730 }
731
732 void kvm_arch_flush_shadow(struct kvm *kvm)
733 {
734 }
735
736 static int __init kvm_s390_init(void)
737 {
738         int ret;
739         ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
740         if (ret)
741                 return ret;
742
743         /*
744          * guests can ask for up to 255+1 double words, we need a full page
745          * to hold the maximum amount of facilities. On the other hand, we
746          * only set facilities that are known to work in KVM.
747          */
748         facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
749         if (!facilities) {
750                 kvm_exit();
751                 return -ENOMEM;
752         }
753         memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
754         facilities[0] &= 0xff00fff3f47c0000ULL;
755         facilities[1] &= 0x001c000000000000ULL;
756         return 0;
757 }
758
759 static void __exit kvm_s390_exit(void)
760 {
761         free_page((unsigned long) facilities);
762         kvm_exit();
763 }
764
765 module_init(kvm_s390_init);
766 module_exit(kvm_s390_exit);