Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[pandora-kernel.git] / arch / s390 / kvm / kvm-s390.c
1 /*
2  * s390host.c --  hosting zSeries kernel virtual machines
3  *
4  * Copyright IBM Corp. 2008,2009
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
14  */
15
16 #include <linux/compiler.h>
17 #include <linux/err.h>
18 #include <linux/fs.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
29 #include <asm/nmi.h>
30 #include <asm/system.h>
31 #include "kvm-s390.h"
32 #include "gaccess.h"
33
34 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35
36 struct kvm_stats_debugfs_item debugfs_entries[] = {
37         { "userspace_handled", VCPU_STAT(exit_userspace) },
38         { "exit_null", VCPU_STAT(exit_null) },
39         { "exit_validity", VCPU_STAT(exit_validity) },
40         { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41         { "exit_external_request", VCPU_STAT(exit_external_request) },
42         { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
43         { "exit_instruction", VCPU_STAT(exit_instruction) },
44         { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45         { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
46         { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
47         { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48         { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
49         { "deliver_external_call", VCPU_STAT(deliver_external_call) },
50         { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
51         { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
52         { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
53         { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
54         { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
55         { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
56         { "exit_wait_state", VCPU_STAT(exit_wait_state) },
57         { "instruction_stidp", VCPU_STAT(instruction_stidp) },
58         { "instruction_spx", VCPU_STAT(instruction_spx) },
59         { "instruction_stpx", VCPU_STAT(instruction_stpx) },
60         { "instruction_stap", VCPU_STAT(instruction_stap) },
61         { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
62         { "instruction_stsch", VCPU_STAT(instruction_stsch) },
63         { "instruction_chsc", VCPU_STAT(instruction_chsc) },
64         { "instruction_stsi", VCPU_STAT(instruction_stsi) },
65         { "instruction_stfl", VCPU_STAT(instruction_stfl) },
66         { "instruction_tprot", VCPU_STAT(instruction_tprot) },
67         { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
68         { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
69         { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
70         { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
71         { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
72         { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
73         { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
74         { "diagnose_10", VCPU_STAT(diagnose_10) },
75         { "diagnose_44", VCPU_STAT(diagnose_44) },
76         { NULL }
77 };
78
79 static unsigned long long *facilities;
80
81 /* Section: not file related */
82 int kvm_arch_hardware_enable(void *garbage)
83 {
84         /* every s390 is virtualization enabled ;-) */
85         return 0;
86 }
87
88 void kvm_arch_hardware_disable(void *garbage)
89 {
90 }
91
92 int kvm_arch_hardware_setup(void)
93 {
94         return 0;
95 }
96
97 void kvm_arch_hardware_unsetup(void)
98 {
99 }
100
101 void kvm_arch_check_processor_compat(void *rtn)
102 {
103 }
104
105 int kvm_arch_init(void *opaque)
106 {
107         return 0;
108 }
109
110 void kvm_arch_exit(void)
111 {
112 }
113
114 /* Section: device related */
115 long kvm_arch_dev_ioctl(struct file *filp,
116                         unsigned int ioctl, unsigned long arg)
117 {
118         if (ioctl == KVM_S390_ENABLE_SIE)
119                 return s390_enable_sie();
120         return -EINVAL;
121 }
122
123 int kvm_dev_ioctl_check_extension(long ext)
124 {
125         int r;
126
127         switch (ext) {
128         case KVM_CAP_S390_PSW:
129         case KVM_CAP_S390_GMAP:
130                 r = 1;
131                 break;
132         default:
133                 r = 0;
134         }
135         return r;
136 }
137
138 /* Section: vm related */
139 /*
140  * Get (and clear) the dirty memory log for a memory slot.
141  */
142 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
143                                struct kvm_dirty_log *log)
144 {
145         return 0;
146 }
147
148 long kvm_arch_vm_ioctl(struct file *filp,
149                        unsigned int ioctl, unsigned long arg)
150 {
151         struct kvm *kvm = filp->private_data;
152         void __user *argp = (void __user *)arg;
153         int r;
154
155         switch (ioctl) {
156         case KVM_S390_INTERRUPT: {
157                 struct kvm_s390_interrupt s390int;
158
159                 r = -EFAULT;
160                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
161                         break;
162                 r = kvm_s390_inject_vm(kvm, &s390int);
163                 break;
164         }
165         default:
166                 r = -ENOTTY;
167         }
168
169         return r;
170 }
171
172 int kvm_arch_init_vm(struct kvm *kvm)
173 {
174         int rc;
175         char debug_name[16];
176
177         rc = s390_enable_sie();
178         if (rc)
179                 goto out_err;
180
181         rc = -ENOMEM;
182
183         kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
184         if (!kvm->arch.sca)
185                 goto out_err;
186
187         sprintf(debug_name, "kvm-%u", current->pid);
188
189         kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
190         if (!kvm->arch.dbf)
191                 goto out_nodbf;
192
193         spin_lock_init(&kvm->arch.float_int.lock);
194         INIT_LIST_HEAD(&kvm->arch.float_int.list);
195
196         debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
197         VM_EVENT(kvm, 3, "%s", "vm created");
198
199         kvm->arch.gmap = gmap_alloc(current->mm);
200         if (!kvm->arch.gmap)
201                 goto out_nogmap;
202
203         return 0;
204 out_nogmap:
205         debug_unregister(kvm->arch.dbf);
206 out_nodbf:
207         free_page((unsigned long)(kvm->arch.sca));
208 out_err:
209         return rc;
210 }
211
212 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
213 {
214         VCPU_EVENT(vcpu, 3, "%s", "free cpu");
215         clear_bit(63 - vcpu->vcpu_id, (unsigned long *) &vcpu->kvm->arch.sca->mcn);
216         if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
217                 (__u64) vcpu->arch.sie_block)
218                 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
219         smp_mb();
220         free_page((unsigned long)(vcpu->arch.sie_block));
221         kvm_vcpu_uninit(vcpu);
222         kfree(vcpu);
223 }
224
225 static void kvm_free_vcpus(struct kvm *kvm)
226 {
227         unsigned int i;
228         struct kvm_vcpu *vcpu;
229
230         kvm_for_each_vcpu(i, vcpu, kvm)
231                 kvm_arch_vcpu_destroy(vcpu);
232
233         mutex_lock(&kvm->lock);
234         for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
235                 kvm->vcpus[i] = NULL;
236
237         atomic_set(&kvm->online_vcpus, 0);
238         mutex_unlock(&kvm->lock);
239 }
240
241 void kvm_arch_sync_events(struct kvm *kvm)
242 {
243 }
244
245 void kvm_arch_destroy_vm(struct kvm *kvm)
246 {
247         kvm_free_vcpus(kvm);
248         free_page((unsigned long)(kvm->arch.sca));
249         debug_unregister(kvm->arch.dbf);
250         gmap_free(kvm->arch.gmap);
251 }
252
253 /* Section: vcpu related */
254 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
255 {
256         vcpu->arch.gmap = vcpu->kvm->arch.gmap;
257         return 0;
258 }
259
260 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
261 {
262         /* Nothing todo */
263 }
264
265 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
266 {
267         save_fp_regs(&vcpu->arch.host_fpregs);
268         save_access_regs(vcpu->arch.host_acrs);
269         vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
270         restore_fp_regs(&vcpu->arch.guest_fpregs);
271         restore_access_regs(vcpu->arch.guest_acrs);
272         gmap_enable(vcpu->arch.gmap);
273 }
274
275 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
276 {
277         gmap_disable(vcpu->arch.gmap);
278         save_fp_regs(&vcpu->arch.guest_fpregs);
279         save_access_regs(vcpu->arch.guest_acrs);
280         restore_fp_regs(&vcpu->arch.host_fpregs);
281         restore_access_regs(vcpu->arch.host_acrs);
282 }
283
284 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
285 {
286         /* this equals initial cpu reset in pop, but we don't switch to ESA */
287         vcpu->arch.sie_block->gpsw.mask = 0UL;
288         vcpu->arch.sie_block->gpsw.addr = 0UL;
289         vcpu->arch.sie_block->prefix    = 0UL;
290         vcpu->arch.sie_block->ihcpu     = 0xffff;
291         vcpu->arch.sie_block->cputm     = 0UL;
292         vcpu->arch.sie_block->ckc       = 0UL;
293         vcpu->arch.sie_block->todpr     = 0;
294         memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
295         vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
296         vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
297         vcpu->arch.guest_fpregs.fpc = 0;
298         asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
299         vcpu->arch.sie_block->gbea = 1;
300 }
301
302 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
303 {
304         atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | CPUSTAT_SM);
305         vcpu->arch.sie_block->ecb   = 6;
306         vcpu->arch.sie_block->eca   = 0xC1002001U;
307         vcpu->arch.sie_block->fac   = (int) (long) facilities;
308         hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
309         tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
310                      (unsigned long) vcpu);
311         vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
312         get_cpu_id(&vcpu->arch.cpu_id);
313         vcpu->arch.cpu_id.version = 0xff;
314         return 0;
315 }
316
317 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
318                                       unsigned int id)
319 {
320         struct kvm_vcpu *vcpu;
321         int rc = -EINVAL;
322
323         if (id >= KVM_MAX_VCPUS)
324                 goto out;
325
326         rc = -ENOMEM;
327
328         vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
329         if (!vcpu)
330                 goto out;
331
332         vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
333                                         get_zeroed_page(GFP_KERNEL);
334
335         if (!vcpu->arch.sie_block)
336                 goto out_free_cpu;
337
338         vcpu->arch.sie_block->icpua = id;
339         BUG_ON(!kvm->arch.sca);
340         if (!kvm->arch.sca->cpu[id].sda)
341                 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
342         vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
343         vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
344         set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
345
346         spin_lock_init(&vcpu->arch.local_int.lock);
347         INIT_LIST_HEAD(&vcpu->arch.local_int.list);
348         vcpu->arch.local_int.float_int = &kvm->arch.float_int;
349         spin_lock(&kvm->arch.float_int.lock);
350         kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
351         init_waitqueue_head(&vcpu->arch.local_int.wq);
352         vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
353         spin_unlock(&kvm->arch.float_int.lock);
354
355         rc = kvm_vcpu_init(vcpu, kvm, id);
356         if (rc)
357                 goto out_free_sie_block;
358         VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
359                  vcpu->arch.sie_block);
360
361         return vcpu;
362 out_free_sie_block:
363         free_page((unsigned long)(vcpu->arch.sie_block));
364 out_free_cpu:
365         kfree(vcpu);
366 out:
367         return ERR_PTR(rc);
368 }
369
370 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
371 {
372         /* kvm common code refers to this, but never calls it */
373         BUG();
374         return 0;
375 }
376
377 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
378 {
379         kvm_s390_vcpu_initial_reset(vcpu);
380         return 0;
381 }
382
383 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
384 {
385         memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
386         return 0;
387 }
388
389 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
390 {
391         memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
392         return 0;
393 }
394
395 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
396                                   struct kvm_sregs *sregs)
397 {
398         memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
399         memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
400         restore_access_regs(vcpu->arch.guest_acrs);
401         return 0;
402 }
403
404 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
405                                   struct kvm_sregs *sregs)
406 {
407         memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
408         memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
409         return 0;
410 }
411
412 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
413 {
414         memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
415         vcpu->arch.guest_fpregs.fpc = fpu->fpc;
416         restore_fp_regs(&vcpu->arch.guest_fpregs);
417         return 0;
418 }
419
420 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
421 {
422         memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
423         fpu->fpc = vcpu->arch.guest_fpregs.fpc;
424         return 0;
425 }
426
427 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
428 {
429         int rc = 0;
430
431         if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
432                 rc = -EBUSY;
433         else {
434                 vcpu->run->psw_mask = psw.mask;
435                 vcpu->run->psw_addr = psw.addr;
436         }
437         return rc;
438 }
439
440 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
441                                   struct kvm_translation *tr)
442 {
443         return -EINVAL; /* not implemented yet */
444 }
445
446 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
447                                         struct kvm_guest_debug *dbg)
448 {
449         return -EINVAL; /* not implemented yet */
450 }
451
452 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
453                                     struct kvm_mp_state *mp_state)
454 {
455         return -EINVAL; /* not implemented yet */
456 }
457
458 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
459                                     struct kvm_mp_state *mp_state)
460 {
461         return -EINVAL; /* not implemented yet */
462 }
463
464 static void __vcpu_run(struct kvm_vcpu *vcpu)
465 {
466         memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
467
468         if (need_resched())
469                 schedule();
470
471         if (test_thread_flag(TIF_MCCK_PENDING))
472                 s390_handle_mcck();
473
474         kvm_s390_deliver_pending_interrupts(vcpu);
475
476         vcpu->arch.sie_block->icptcode = 0;
477         local_irq_disable();
478         kvm_guest_enter();
479         local_irq_enable();
480         VCPU_EVENT(vcpu, 6, "entering sie flags %x",
481                    atomic_read(&vcpu->arch.sie_block->cpuflags));
482         if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
483                 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
484                 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
485         }
486         VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
487                    vcpu->arch.sie_block->icptcode);
488         local_irq_disable();
489         kvm_guest_exit();
490         local_irq_enable();
491
492         memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
493 }
494
495 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
496 {
497         int rc;
498         sigset_t sigsaved;
499
500 rerun_vcpu:
501         if (vcpu->sigset_active)
502                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
503
504         atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
505
506         BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
507
508         switch (kvm_run->exit_reason) {
509         case KVM_EXIT_S390_SIEIC:
510         case KVM_EXIT_UNKNOWN:
511         case KVM_EXIT_INTR:
512         case KVM_EXIT_S390_RESET:
513                 break;
514         default:
515                 BUG();
516         }
517
518         vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
519         vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
520
521         might_fault();
522
523         do {
524                 __vcpu_run(vcpu);
525                 rc = kvm_handle_sie_intercept(vcpu);
526         } while (!signal_pending(current) && !rc);
527
528         if (rc == SIE_INTERCEPT_RERUNVCPU)
529                 goto rerun_vcpu;
530
531         if (signal_pending(current) && !rc) {
532                 kvm_run->exit_reason = KVM_EXIT_INTR;
533                 rc = -EINTR;
534         }
535
536         if (rc == -EOPNOTSUPP) {
537                 /* intercept cannot be handled in-kernel, prepare kvm-run */
538                 kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
539                 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
540                 kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
541                 kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
542                 rc = 0;
543         }
544
545         if (rc == -EREMOTE) {
546                 /* intercept was handled, but userspace support is needed
547                  * kvm_run has been prepared by the handler */
548                 rc = 0;
549         }
550
551         kvm_run->psw_mask     = vcpu->arch.sie_block->gpsw.mask;
552         kvm_run->psw_addr     = vcpu->arch.sie_block->gpsw.addr;
553
554         if (vcpu->sigset_active)
555                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
556
557         vcpu->stat.exit_userspace++;
558         return rc;
559 }
560
561 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
562                        unsigned long n, int prefix)
563 {
564         if (prefix)
565                 return copy_to_guest(vcpu, guestdest, from, n);
566         else
567                 return copy_to_guest_absolute(vcpu, guestdest, from, n);
568 }
569
570 /*
571  * store status at address
572  * we use have two special cases:
573  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
574  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
575  */
576 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
577 {
578         unsigned char archmode = 1;
579         int prefix;
580
581         if (addr == KVM_S390_STORE_STATUS_NOADDR) {
582                 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
583                         return -EFAULT;
584                 addr = SAVE_AREA_BASE;
585                 prefix = 0;
586         } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
587                 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
588                         return -EFAULT;
589                 addr = SAVE_AREA_BASE;
590                 prefix = 1;
591         } else
592                 prefix = 0;
593
594         if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
595                         vcpu->arch.guest_fpregs.fprs, 128, prefix))
596                 return -EFAULT;
597
598         if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
599                         vcpu->arch.guest_gprs, 128, prefix))
600                 return -EFAULT;
601
602         if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
603                         &vcpu->arch.sie_block->gpsw, 16, prefix))
604                 return -EFAULT;
605
606         if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
607                         &vcpu->arch.sie_block->prefix, 4, prefix))
608                 return -EFAULT;
609
610         if (__guestcopy(vcpu,
611                         addr + offsetof(struct save_area, fp_ctrl_reg),
612                         &vcpu->arch.guest_fpregs.fpc, 4, prefix))
613                 return -EFAULT;
614
615         if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
616                         &vcpu->arch.sie_block->todpr, 4, prefix))
617                 return -EFAULT;
618
619         if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
620                         &vcpu->arch.sie_block->cputm, 8, prefix))
621                 return -EFAULT;
622
623         if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
624                         &vcpu->arch.sie_block->ckc, 8, prefix))
625                 return -EFAULT;
626
627         if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
628                         &vcpu->arch.guest_acrs, 64, prefix))
629                 return -EFAULT;
630
631         if (__guestcopy(vcpu,
632                         addr + offsetof(struct save_area, ctrl_regs),
633                         &vcpu->arch.sie_block->gcr, 128, prefix))
634                 return -EFAULT;
635         return 0;
636 }
637
638 long kvm_arch_vcpu_ioctl(struct file *filp,
639                          unsigned int ioctl, unsigned long arg)
640 {
641         struct kvm_vcpu *vcpu = filp->private_data;
642         void __user *argp = (void __user *)arg;
643         long r;
644
645         switch (ioctl) {
646         case KVM_S390_INTERRUPT: {
647                 struct kvm_s390_interrupt s390int;
648
649                 r = -EFAULT;
650                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
651                         break;
652                 r = kvm_s390_inject_vcpu(vcpu, &s390int);
653                 break;
654         }
655         case KVM_S390_STORE_STATUS:
656                 r = kvm_s390_vcpu_store_status(vcpu, arg);
657                 break;
658         case KVM_S390_SET_INITIAL_PSW: {
659                 psw_t psw;
660
661                 r = -EFAULT;
662                 if (copy_from_user(&psw, argp, sizeof(psw)))
663                         break;
664                 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
665                 break;
666         }
667         case KVM_S390_INITIAL_RESET:
668                 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
669                 break;
670         default:
671                 r = -EINVAL;
672         }
673         return r;
674 }
675
676 /* Section: memory related */
677 int kvm_arch_prepare_memory_region(struct kvm *kvm,
678                                    struct kvm_memory_slot *memslot,
679                                    struct kvm_memory_slot old,
680                                    struct kvm_userspace_memory_region *mem,
681                                    int user_alloc)
682 {
683         /* A few sanity checks. We can have exactly one memory slot which has
684            to start at guest virtual zero and which has to be located at a
685            page boundary in userland and which has to end at a page boundary.
686            The memory in userland is ok to be fragmented into various different
687            vmas. It is okay to mmap() and munmap() stuff in this slot after
688            doing this call at any time */
689
690         if (mem->slot)
691                 return -EINVAL;
692
693         if (mem->guest_phys_addr)
694                 return -EINVAL;
695
696         if (mem->userspace_addr & 0xffffful)
697                 return -EINVAL;
698
699         if (mem->memory_size & 0xffffful)
700                 return -EINVAL;
701
702         if (!user_alloc)
703                 return -EINVAL;
704
705         return 0;
706 }
707
708 void kvm_arch_commit_memory_region(struct kvm *kvm,
709                                 struct kvm_userspace_memory_region *mem,
710                                 struct kvm_memory_slot old,
711                                 int user_alloc)
712 {
713         int rc;
714
715
716         rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
717                 mem->guest_phys_addr, mem->memory_size);
718         if (rc)
719                 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
720         return;
721 }
722
723 void kvm_arch_flush_shadow(struct kvm *kvm)
724 {
725 }
726
727 static int __init kvm_s390_init(void)
728 {
729         int ret;
730         ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
731         if (ret)
732                 return ret;
733
734         /*
735          * guests can ask for up to 255+1 double words, we need a full page
736          * to hold the maximum amount of facilities. On the other hand, we
737          * only set facilities that are known to work in KVM.
738          */
739         facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
740         if (!facilities) {
741                 kvm_exit();
742                 return -ENOMEM;
743         }
744         memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
745         facilities[0] &= 0xff00fff3f47c0000ULL;
746         facilities[1] &= 0x201c000000000000ULL;
747         return 0;
748 }
749
750 static void __exit kvm_s390_exit(void)
751 {
752         free_page((unsigned long) facilities);
753         kvm_exit();
754 }
755
756 module_init(kvm_s390_init);
757 module_exit(kvm_s390_exit);