0cb0da7822d5aaf989490f164214648f29b8f3aa
[pandora-kernel.git] / arch / s390 / kvm / kvm-s390.c
1 /*
2  * s390host.c --  hosting zSeries kernel virtual machines
3  *
4  * Copyright IBM Corp. 2008,2009
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
14  */
15
16 #include <linux/compiler.h>
17 #include <linux/err.h>
18 #include <linux/fs.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
29 #include <asm/nmi.h>
30 #include <asm/system.h>
31 #include "kvm-s390.h"
32 #include "gaccess.h"
33
34 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35
36 struct kvm_stats_debugfs_item debugfs_entries[] = {
37         { "userspace_handled", VCPU_STAT(exit_userspace) },
38         { "exit_null", VCPU_STAT(exit_null) },
39         { "exit_validity", VCPU_STAT(exit_validity) },
40         { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41         { "exit_external_request", VCPU_STAT(exit_external_request) },
42         { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
43         { "exit_instruction", VCPU_STAT(exit_instruction) },
44         { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45         { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
46         { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
47         { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48         { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
49         { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
50         { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
51         { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
52         { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
53         { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
54         { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
55         { "exit_wait_state", VCPU_STAT(exit_wait_state) },
56         { "instruction_stidp", VCPU_STAT(instruction_stidp) },
57         { "instruction_spx", VCPU_STAT(instruction_spx) },
58         { "instruction_stpx", VCPU_STAT(instruction_stpx) },
59         { "instruction_stap", VCPU_STAT(instruction_stap) },
60         { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
61         { "instruction_stsch", VCPU_STAT(instruction_stsch) },
62         { "instruction_chsc", VCPU_STAT(instruction_chsc) },
63         { "instruction_stsi", VCPU_STAT(instruction_stsi) },
64         { "instruction_stfl", VCPU_STAT(instruction_stfl) },
65         { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
66         { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
67         { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
68         { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
69         { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
70         { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
71         { "diagnose_44", VCPU_STAT(diagnose_44) },
72         { NULL }
73 };
74
75 static unsigned long long *facilities;
76
77 /* Section: not file related */
78 int kvm_arch_hardware_enable(void *garbage)
79 {
80         /* every s390 is virtualization enabled ;-) */
81         return 0;
82 }
83
84 void kvm_arch_hardware_disable(void *garbage)
85 {
86 }
87
88 int kvm_arch_hardware_setup(void)
89 {
90         return 0;
91 }
92
93 void kvm_arch_hardware_unsetup(void)
94 {
95 }
96
97 void kvm_arch_check_processor_compat(void *rtn)
98 {
99 }
100
101 int kvm_arch_init(void *opaque)
102 {
103         return 0;
104 }
105
106 void kvm_arch_exit(void)
107 {
108 }
109
110 /* Section: device related */
111 long kvm_arch_dev_ioctl(struct file *filp,
112                         unsigned int ioctl, unsigned long arg)
113 {
114         if (ioctl == KVM_S390_ENABLE_SIE)
115                 return s390_enable_sie();
116         return -EINVAL;
117 }
118
119 int kvm_dev_ioctl_check_extension(long ext)
120 {
121         int r;
122
123         switch (ext) {
124         case KVM_CAP_S390_PSW:
125                 r = 1;
126                 break;
127         default:
128                 r = 0;
129         }
130         return r;
131 }
132
133 /* Section: vm related */
134 /*
135  * Get (and clear) the dirty memory log for a memory slot.
136  */
137 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
138                                struct kvm_dirty_log *log)
139 {
140         return 0;
141 }
142
143 long kvm_arch_vm_ioctl(struct file *filp,
144                        unsigned int ioctl, unsigned long arg)
145 {
146         struct kvm *kvm = filp->private_data;
147         void __user *argp = (void __user *)arg;
148         int r;
149
150         switch (ioctl) {
151         case KVM_S390_INTERRUPT: {
152                 struct kvm_s390_interrupt s390int;
153
154                 r = -EFAULT;
155                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
156                         break;
157                 r = kvm_s390_inject_vm(kvm, &s390int);
158                 break;
159         }
160         default:
161                 r = -ENOTTY;
162         }
163
164         return r;
165 }
166
167 struct kvm *kvm_arch_create_vm(void)
168 {
169         struct kvm *kvm;
170         int rc;
171         char debug_name[16];
172
173         rc = s390_enable_sie();
174         if (rc)
175                 goto out_nokvm;
176
177         rc = -ENOMEM;
178         kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
179         if (!kvm)
180                 goto out_nokvm;
181
182         kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
183         if (!kvm->arch.sca)
184                 goto out_nosca;
185
186         sprintf(debug_name, "kvm-%u", current->pid);
187
188         kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
189         if (!kvm->arch.dbf)
190                 goto out_nodbf;
191
192         spin_lock_init(&kvm->arch.float_int.lock);
193         INIT_LIST_HEAD(&kvm->arch.float_int.list);
194
195         debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
196         VM_EVENT(kvm, 3, "%s", "vm created");
197
198         return kvm;
199 out_nodbf:
200         free_page((unsigned long)(kvm->arch.sca));
201 out_nosca:
202         kfree(kvm);
203 out_nokvm:
204         return ERR_PTR(rc);
205 }
206
207 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
208 {
209         VCPU_EVENT(vcpu, 3, "%s", "free cpu");
210         if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
211                 (__u64) vcpu->arch.sie_block)
212                 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
213         smp_mb();
214         free_page((unsigned long)(vcpu->arch.sie_block));
215         kvm_vcpu_uninit(vcpu);
216         kfree(vcpu);
217 }
218
219 static void kvm_free_vcpus(struct kvm *kvm)
220 {
221         unsigned int i;
222         struct kvm_vcpu *vcpu;
223
224         kvm_for_each_vcpu(i, vcpu, kvm)
225                 kvm_arch_vcpu_destroy(vcpu);
226
227         mutex_lock(&kvm->lock);
228         for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
229                 kvm->vcpus[i] = NULL;
230
231         atomic_set(&kvm->online_vcpus, 0);
232         mutex_unlock(&kvm->lock);
233 }
234
235 void kvm_arch_sync_events(struct kvm *kvm)
236 {
237 }
238
239 void kvm_arch_destroy_vm(struct kvm *kvm)
240 {
241         kvm_free_vcpus(kvm);
242         kvm_free_physmem(kvm);
243         free_page((unsigned long)(kvm->arch.sca));
244         debug_unregister(kvm->arch.dbf);
245         cleanup_srcu_struct(&kvm->srcu);
246         kfree(kvm);
247 }
248
249 /* Section: vcpu related */
250 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
251 {
252         return 0;
253 }
254
255 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
256 {
257         /* Nothing todo */
258 }
259
260 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
261 {
262         save_fp_regs(&vcpu->arch.host_fpregs);
263         save_access_regs(vcpu->arch.host_acrs);
264         vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
265         restore_fp_regs(&vcpu->arch.guest_fpregs);
266         restore_access_regs(vcpu->arch.guest_acrs);
267 }
268
269 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
270 {
271         save_fp_regs(&vcpu->arch.guest_fpregs);
272         save_access_regs(vcpu->arch.guest_acrs);
273         restore_fp_regs(&vcpu->arch.host_fpregs);
274         restore_access_regs(vcpu->arch.host_acrs);
275 }
276
277 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
278 {
279         /* this equals initial cpu reset in pop, but we don't switch to ESA */
280         vcpu->arch.sie_block->gpsw.mask = 0UL;
281         vcpu->arch.sie_block->gpsw.addr = 0UL;
282         vcpu->arch.sie_block->prefix    = 0UL;
283         vcpu->arch.sie_block->ihcpu     = 0xffff;
284         vcpu->arch.sie_block->cputm     = 0UL;
285         vcpu->arch.sie_block->ckc       = 0UL;
286         vcpu->arch.sie_block->todpr     = 0;
287         memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
288         vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
289         vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
290         vcpu->arch.guest_fpregs.fpc = 0;
291         asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
292         vcpu->arch.sie_block->gbea = 1;
293 }
294
295 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
296 {
297         atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
298         set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests);
299         vcpu->arch.sie_block->ecb   = 2;
300         vcpu->arch.sie_block->eca   = 0xC1002001U;
301         vcpu->arch.sie_block->fac   = (int) (long) facilities;
302         hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
303         tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
304                      (unsigned long) vcpu);
305         vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
306         get_cpu_id(&vcpu->arch.cpu_id);
307         vcpu->arch.cpu_id.version = 0xff;
308         return 0;
309 }
310
311 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
312                                       unsigned int id)
313 {
314         struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
315         int rc = -ENOMEM;
316
317         if (!vcpu)
318                 goto out_nomem;
319
320         vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
321                                         get_zeroed_page(GFP_KERNEL);
322
323         if (!vcpu->arch.sie_block)
324                 goto out_free_cpu;
325
326         vcpu->arch.sie_block->icpua = id;
327         BUG_ON(!kvm->arch.sca);
328         if (!kvm->arch.sca->cpu[id].sda)
329                 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
330         vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
331         vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
332
333         spin_lock_init(&vcpu->arch.local_int.lock);
334         INIT_LIST_HEAD(&vcpu->arch.local_int.list);
335         vcpu->arch.local_int.float_int = &kvm->arch.float_int;
336         spin_lock(&kvm->arch.float_int.lock);
337         kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
338         init_waitqueue_head(&vcpu->arch.local_int.wq);
339         vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
340         spin_unlock(&kvm->arch.float_int.lock);
341
342         rc = kvm_vcpu_init(vcpu, kvm, id);
343         if (rc)
344                 goto out_free_sie_block;
345         VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
346                  vcpu->arch.sie_block);
347
348         return vcpu;
349 out_free_sie_block:
350         free_page((unsigned long)(vcpu->arch.sie_block));
351 out_free_cpu:
352         kfree(vcpu);
353 out_nomem:
354         return ERR_PTR(rc);
355 }
356
357 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
358 {
359         /* kvm common code refers to this, but never calls it */
360         BUG();
361         return 0;
362 }
363
364 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
365 {
366         kvm_s390_vcpu_initial_reset(vcpu);
367         return 0;
368 }
369
370 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
371 {
372         memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
373         return 0;
374 }
375
376 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
377 {
378         memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
379         return 0;
380 }
381
382 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
383                                   struct kvm_sregs *sregs)
384 {
385         memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
386         memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
387         return 0;
388 }
389
390 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
391                                   struct kvm_sregs *sregs)
392 {
393         memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
394         memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
395         return 0;
396 }
397
398 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
399 {
400         memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
401         vcpu->arch.guest_fpregs.fpc = fpu->fpc;
402         return 0;
403 }
404
405 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
406 {
407         memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
408         fpu->fpc = vcpu->arch.guest_fpregs.fpc;
409         return 0;
410 }
411
412 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
413 {
414         int rc = 0;
415
416         if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
417                 rc = -EBUSY;
418         else {
419                 vcpu->run->psw_mask = psw.mask;
420                 vcpu->run->psw_addr = psw.addr;
421         }
422         return rc;
423 }
424
425 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
426                                   struct kvm_translation *tr)
427 {
428         return -EINVAL; /* not implemented yet */
429 }
430
431 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
432                                         struct kvm_guest_debug *dbg)
433 {
434         return -EINVAL; /* not implemented yet */
435 }
436
437 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
438                                     struct kvm_mp_state *mp_state)
439 {
440         return -EINVAL; /* not implemented yet */
441 }
442
443 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
444                                     struct kvm_mp_state *mp_state)
445 {
446         return -EINVAL; /* not implemented yet */
447 }
448
449 static void __vcpu_run(struct kvm_vcpu *vcpu)
450 {
451         memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
452
453         if (need_resched())
454                 schedule();
455
456         if (test_thread_flag(TIF_MCCK_PENDING))
457                 s390_handle_mcck();
458
459         kvm_s390_deliver_pending_interrupts(vcpu);
460
461         vcpu->arch.sie_block->icptcode = 0;
462         local_irq_disable();
463         kvm_guest_enter();
464         local_irq_enable();
465         VCPU_EVENT(vcpu, 6, "entering sie flags %x",
466                    atomic_read(&vcpu->arch.sie_block->cpuflags));
467         if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
468                 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
469                 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
470         }
471         VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
472                    vcpu->arch.sie_block->icptcode);
473         local_irq_disable();
474         kvm_guest_exit();
475         local_irq_enable();
476
477         memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
478 }
479
480 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
481 {
482         int rc;
483         sigset_t sigsaved;
484
485 rerun_vcpu:
486         if (vcpu->requests)
487                 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
488                         kvm_s390_vcpu_set_mem(vcpu);
489
490         /* verify, that memory has been registered */
491         if (!vcpu->arch.sie_block->gmslm) {
492                 vcpu_put(vcpu);
493                 VCPU_EVENT(vcpu, 3, "%s", "no memory registered to run vcpu");
494                 return -EINVAL;
495         }
496
497         if (vcpu->sigset_active)
498                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
499
500         atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
501
502         BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
503
504         switch (kvm_run->exit_reason) {
505         case KVM_EXIT_S390_SIEIC:
506         case KVM_EXIT_UNKNOWN:
507         case KVM_EXIT_INTR:
508         case KVM_EXIT_S390_RESET:
509                 break;
510         default:
511                 BUG();
512         }
513
514         vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
515         vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
516
517         might_fault();
518
519         do {
520                 __vcpu_run(vcpu);
521                 rc = kvm_handle_sie_intercept(vcpu);
522         } while (!signal_pending(current) && !rc);
523
524         if (rc == SIE_INTERCEPT_RERUNVCPU)
525                 goto rerun_vcpu;
526
527         if (signal_pending(current) && !rc) {
528                 kvm_run->exit_reason = KVM_EXIT_INTR;
529                 rc = -EINTR;
530         }
531
532         if (rc == -EOPNOTSUPP) {
533                 /* intercept cannot be handled in-kernel, prepare kvm-run */
534                 kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
535                 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
536                 kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
537                 kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
538                 rc = 0;
539         }
540
541         if (rc == -EREMOTE) {
542                 /* intercept was handled, but userspace support is needed
543                  * kvm_run has been prepared by the handler */
544                 rc = 0;
545         }
546
547         kvm_run->psw_mask     = vcpu->arch.sie_block->gpsw.mask;
548         kvm_run->psw_addr     = vcpu->arch.sie_block->gpsw.addr;
549
550         if (vcpu->sigset_active)
551                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
552
553         vcpu->stat.exit_userspace++;
554         return rc;
555 }
556
557 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
558                        unsigned long n, int prefix)
559 {
560         if (prefix)
561                 return copy_to_guest(vcpu, guestdest, from, n);
562         else
563                 return copy_to_guest_absolute(vcpu, guestdest, from, n);
564 }
565
566 /*
567  * store status at address
568  * we use have two special cases:
569  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
570  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
571  */
572 static int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
573 {
574         const unsigned char archmode = 1;
575         int prefix;
576
577         if (addr == KVM_S390_STORE_STATUS_NOADDR) {
578                 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
579                         return -EFAULT;
580                 addr = SAVE_AREA_BASE;
581                 prefix = 0;
582         } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
583                 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
584                         return -EFAULT;
585                 addr = SAVE_AREA_BASE;
586                 prefix = 1;
587         } else
588                 prefix = 0;
589
590         if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
591                         vcpu->arch.guest_fpregs.fprs, 128, prefix))
592                 return -EFAULT;
593
594         if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
595                         vcpu->arch.guest_gprs, 128, prefix))
596                 return -EFAULT;
597
598         if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
599                         &vcpu->arch.sie_block->gpsw, 16, prefix))
600                 return -EFAULT;
601
602         if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
603                         &vcpu->arch.sie_block->prefix, 4, prefix))
604                 return -EFAULT;
605
606         if (__guestcopy(vcpu,
607                         addr + offsetof(struct save_area, fp_ctrl_reg),
608                         &vcpu->arch.guest_fpregs.fpc, 4, prefix))
609                 return -EFAULT;
610
611         if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
612                         &vcpu->arch.sie_block->todpr, 4, prefix))
613                 return -EFAULT;
614
615         if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
616                         &vcpu->arch.sie_block->cputm, 8, prefix))
617                 return -EFAULT;
618
619         if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
620                         &vcpu->arch.sie_block->ckc, 8, prefix))
621                 return -EFAULT;
622
623         if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
624                         &vcpu->arch.guest_acrs, 64, prefix))
625                 return -EFAULT;
626
627         if (__guestcopy(vcpu,
628                         addr + offsetof(struct save_area, ctrl_regs),
629                         &vcpu->arch.sie_block->gcr, 128, prefix))
630                 return -EFAULT;
631         return 0;
632 }
633
634 long kvm_arch_vcpu_ioctl(struct file *filp,
635                          unsigned int ioctl, unsigned long arg)
636 {
637         struct kvm_vcpu *vcpu = filp->private_data;
638         void __user *argp = (void __user *)arg;
639         long r;
640
641         switch (ioctl) {
642         case KVM_S390_INTERRUPT: {
643                 struct kvm_s390_interrupt s390int;
644
645                 r = -EFAULT;
646                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
647                         break;
648                 r = kvm_s390_inject_vcpu(vcpu, &s390int);
649                 break;
650         }
651         case KVM_S390_STORE_STATUS:
652                 r = kvm_s390_vcpu_store_status(vcpu, arg);
653                 break;
654         case KVM_S390_SET_INITIAL_PSW: {
655                 psw_t psw;
656
657                 r = -EFAULT;
658                 if (copy_from_user(&psw, argp, sizeof(psw)))
659                         break;
660                 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
661                 break;
662         }
663         case KVM_S390_INITIAL_RESET:
664                 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
665                 break;
666         default:
667                 r = -EINVAL;
668         }
669         return r;
670 }
671
672 /* Section: memory related */
673 int kvm_arch_prepare_memory_region(struct kvm *kvm,
674                                    struct kvm_memory_slot *memslot,
675                                    struct kvm_memory_slot old,
676                                    struct kvm_userspace_memory_region *mem,
677                                    int user_alloc)
678 {
679         /* A few sanity checks. We can have exactly one memory slot which has
680            to start at guest virtual zero and which has to be located at a
681            page boundary in userland and which has to end at a page boundary.
682            The memory in userland is ok to be fragmented into various different
683            vmas. It is okay to mmap() and munmap() stuff in this slot after
684            doing this call at any time */
685
686         if (mem->slot)
687                 return -EINVAL;
688
689         if (mem->guest_phys_addr)
690                 return -EINVAL;
691
692         if (mem->userspace_addr & (PAGE_SIZE - 1))
693                 return -EINVAL;
694
695         if (mem->memory_size & (PAGE_SIZE - 1))
696                 return -EINVAL;
697
698         if (!user_alloc)
699                 return -EINVAL;
700
701         return 0;
702 }
703
704 void kvm_arch_commit_memory_region(struct kvm *kvm,
705                                 struct kvm_userspace_memory_region *mem,
706                                 struct kvm_memory_slot old,
707                                 int user_alloc)
708 {
709         int i;
710         struct kvm_vcpu *vcpu;
711
712         /* request update of sie control block for all available vcpus */
713         kvm_for_each_vcpu(i, vcpu, kvm) {
714                 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
715                         continue;
716                 kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
717         }
718 }
719
720 void kvm_arch_flush_shadow(struct kvm *kvm)
721 {
722 }
723
724 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
725 {
726         return gfn;
727 }
728
729 static int __init kvm_s390_init(void)
730 {
731         int ret;
732         ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
733         if (ret)
734                 return ret;
735
736         /*
737          * guests can ask for up to 255+1 double words, we need a full page
738          * to hold the maximum amount of facilites. On the other hand, we
739          * only set facilities that are known to work in KVM.
740          */
741         facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
742         if (!facilities) {
743                 kvm_exit();
744                 return -ENOMEM;
745         }
746         stfle(facilities, 1);
747         facilities[0] &= 0xff00fff3f0700000ULL;
748         return 0;
749 }
750
751 static void __exit kvm_s390_exit(void)
752 {
753         free_page((unsigned long) facilities);
754         kvm_exit();
755 }
756
757 module_init(kvm_s390_init);
758 module_exit(kvm_s390_exit);