KVM: s390: Make psw available on all exits, not just a subset
[pandora-kernel.git] / arch / s390 / kvm / kvm-s390.c
1 /*
2  * s390host.c --  hosting zSeries kernel virtual machines
3  *
4  * Copyright IBM Corp. 2008,2009
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
14  */
15
16 #include <linux/compiler.h>
17 #include <linux/err.h>
18 #include <linux/fs.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/lowcore.h>
27 #include <asm/pgtable.h>
28 #include <asm/nmi.h>
29 #include <asm/system.h>
30 #include "kvm-s390.h"
31 #include "gaccess.h"
32
33 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
34
35 struct kvm_stats_debugfs_item debugfs_entries[] = {
36         { "userspace_handled", VCPU_STAT(exit_userspace) },
37         { "exit_null", VCPU_STAT(exit_null) },
38         { "exit_validity", VCPU_STAT(exit_validity) },
39         { "exit_stop_request", VCPU_STAT(exit_stop_request) },
40         { "exit_external_request", VCPU_STAT(exit_external_request) },
41         { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
42         { "exit_instruction", VCPU_STAT(exit_instruction) },
43         { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
44         { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
45         { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
46         { "instruction_lctl", VCPU_STAT(instruction_lctl) },
47         { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
48         { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
49         { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
50         { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
51         { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
52         { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
53         { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
54         { "exit_wait_state", VCPU_STAT(exit_wait_state) },
55         { "instruction_stidp", VCPU_STAT(instruction_stidp) },
56         { "instruction_spx", VCPU_STAT(instruction_spx) },
57         { "instruction_stpx", VCPU_STAT(instruction_stpx) },
58         { "instruction_stap", VCPU_STAT(instruction_stap) },
59         { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
60         { "instruction_stsch", VCPU_STAT(instruction_stsch) },
61         { "instruction_chsc", VCPU_STAT(instruction_chsc) },
62         { "instruction_stsi", VCPU_STAT(instruction_stsi) },
63         { "instruction_stfl", VCPU_STAT(instruction_stfl) },
64         { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
65         { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
66         { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
67         { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
68         { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
69         { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
70         { "diagnose_44", VCPU_STAT(diagnose_44) },
71         { NULL }
72 };
73
74 static unsigned long long *facilities;
75
76 /* Section: not file related */
77 int kvm_arch_hardware_enable(void *garbage)
78 {
79         /* every s390 is virtualization enabled ;-) */
80         return 0;
81 }
82
83 void kvm_arch_hardware_disable(void *garbage)
84 {
85 }
86
87 int kvm_arch_hardware_setup(void)
88 {
89         return 0;
90 }
91
92 void kvm_arch_hardware_unsetup(void)
93 {
94 }
95
96 void kvm_arch_check_processor_compat(void *rtn)
97 {
98 }
99
100 int kvm_arch_init(void *opaque)
101 {
102         return 0;
103 }
104
105 void kvm_arch_exit(void)
106 {
107 }
108
109 /* Section: device related */
110 long kvm_arch_dev_ioctl(struct file *filp,
111                         unsigned int ioctl, unsigned long arg)
112 {
113         if (ioctl == KVM_S390_ENABLE_SIE)
114                 return s390_enable_sie();
115         return -EINVAL;
116 }
117
118 int kvm_dev_ioctl_check_extension(long ext)
119 {
120         int r;
121
122         switch (ext) {
123         case KVM_CAP_S390_PSW:
124                 r = 1;
125                 break;
126         default:
127                 r = 0;
128         }
129         return r;
130 }
131
132 /* Section: vm related */
133 /*
134  * Get (and clear) the dirty memory log for a memory slot.
135  */
136 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
137                                struct kvm_dirty_log *log)
138 {
139         return 0;
140 }
141
142 long kvm_arch_vm_ioctl(struct file *filp,
143                        unsigned int ioctl, unsigned long arg)
144 {
145         struct kvm *kvm = filp->private_data;
146         void __user *argp = (void __user *)arg;
147         int r;
148
149         switch (ioctl) {
150         case KVM_S390_INTERRUPT: {
151                 struct kvm_s390_interrupt s390int;
152
153                 r = -EFAULT;
154                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
155                         break;
156                 r = kvm_s390_inject_vm(kvm, &s390int);
157                 break;
158         }
159         default:
160                 r = -ENOTTY;
161         }
162
163         return r;
164 }
165
166 struct kvm *kvm_arch_create_vm(void)
167 {
168         struct kvm *kvm;
169         int rc;
170         char debug_name[16];
171
172         rc = s390_enable_sie();
173         if (rc)
174                 goto out_nokvm;
175
176         rc = -ENOMEM;
177         kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
178         if (!kvm)
179                 goto out_nokvm;
180
181         kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
182         if (!kvm->arch.sca)
183                 goto out_nosca;
184
185         sprintf(debug_name, "kvm-%u", current->pid);
186
187         kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
188         if (!kvm->arch.dbf)
189                 goto out_nodbf;
190
191         spin_lock_init(&kvm->arch.float_int.lock);
192         INIT_LIST_HEAD(&kvm->arch.float_int.list);
193
194         debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
195         VM_EVENT(kvm, 3, "%s", "vm created");
196
197         return kvm;
198 out_nodbf:
199         free_page((unsigned long)(kvm->arch.sca));
200 out_nosca:
201         kfree(kvm);
202 out_nokvm:
203         return ERR_PTR(rc);
204 }
205
206 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
207 {
208         VCPU_EVENT(vcpu, 3, "%s", "free cpu");
209         if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
210                 (__u64) vcpu->arch.sie_block)
211                 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
212         smp_mb();
213         free_page((unsigned long)(vcpu->arch.sie_block));
214         kvm_vcpu_uninit(vcpu);
215         kfree(vcpu);
216 }
217
218 static void kvm_free_vcpus(struct kvm *kvm)
219 {
220         unsigned int i;
221         struct kvm_vcpu *vcpu;
222
223         kvm_for_each_vcpu(i, vcpu, kvm)
224                 kvm_arch_vcpu_destroy(vcpu);
225
226         mutex_lock(&kvm->lock);
227         for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
228                 kvm->vcpus[i] = NULL;
229
230         atomic_set(&kvm->online_vcpus, 0);
231         mutex_unlock(&kvm->lock);
232 }
233
234 void kvm_arch_sync_events(struct kvm *kvm)
235 {
236 }
237
238 void kvm_arch_destroy_vm(struct kvm *kvm)
239 {
240         kvm_free_vcpus(kvm);
241         kvm_free_physmem(kvm);
242         free_page((unsigned long)(kvm->arch.sca));
243         debug_unregister(kvm->arch.dbf);
244         kfree(kvm);
245 }
246
247 /* Section: vcpu related */
248 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
249 {
250         return 0;
251 }
252
253 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
254 {
255         /* Nothing todo */
256 }
257
258 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
259 {
260         save_fp_regs(&vcpu->arch.host_fpregs);
261         save_access_regs(vcpu->arch.host_acrs);
262         vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
263         restore_fp_regs(&vcpu->arch.guest_fpregs);
264         restore_access_regs(vcpu->arch.guest_acrs);
265 }
266
267 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
268 {
269         save_fp_regs(&vcpu->arch.guest_fpregs);
270         save_access_regs(vcpu->arch.guest_acrs);
271         restore_fp_regs(&vcpu->arch.host_fpregs);
272         restore_access_regs(vcpu->arch.host_acrs);
273 }
274
275 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
276 {
277         /* this equals initial cpu reset in pop, but we don't switch to ESA */
278         vcpu->arch.sie_block->gpsw.mask = 0UL;
279         vcpu->arch.sie_block->gpsw.addr = 0UL;
280         vcpu->arch.sie_block->prefix    = 0UL;
281         vcpu->arch.sie_block->ihcpu     = 0xffff;
282         vcpu->arch.sie_block->cputm     = 0UL;
283         vcpu->arch.sie_block->ckc       = 0UL;
284         vcpu->arch.sie_block->todpr     = 0;
285         memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
286         vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
287         vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
288         vcpu->arch.guest_fpregs.fpc = 0;
289         asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
290         vcpu->arch.sie_block->gbea = 1;
291 }
292
293 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
294 {
295         atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
296         set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests);
297         vcpu->arch.sie_block->ecb   = 2;
298         vcpu->arch.sie_block->eca   = 0xC1002001U;
299         vcpu->arch.sie_block->fac   = (int) (long) facilities;
300         hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
301         tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
302                      (unsigned long) vcpu);
303         vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
304         get_cpu_id(&vcpu->arch.cpu_id);
305         vcpu->arch.cpu_id.version = 0xff;
306         return 0;
307 }
308
309 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
310                                       unsigned int id)
311 {
312         struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
313         int rc = -ENOMEM;
314
315         if (!vcpu)
316                 goto out_nomem;
317
318         vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
319                                         get_zeroed_page(GFP_KERNEL);
320
321         if (!vcpu->arch.sie_block)
322                 goto out_free_cpu;
323
324         vcpu->arch.sie_block->icpua = id;
325         BUG_ON(!kvm->arch.sca);
326         if (!kvm->arch.sca->cpu[id].sda)
327                 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
328         vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
329         vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
330
331         spin_lock_init(&vcpu->arch.local_int.lock);
332         INIT_LIST_HEAD(&vcpu->arch.local_int.list);
333         vcpu->arch.local_int.float_int = &kvm->arch.float_int;
334         spin_lock(&kvm->arch.float_int.lock);
335         kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
336         init_waitqueue_head(&vcpu->arch.local_int.wq);
337         vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
338         spin_unlock(&kvm->arch.float_int.lock);
339
340         rc = kvm_vcpu_init(vcpu, kvm, id);
341         if (rc)
342                 goto out_free_cpu;
343         VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
344                  vcpu->arch.sie_block);
345
346         return vcpu;
347 out_free_cpu:
348         kfree(vcpu);
349 out_nomem:
350         return ERR_PTR(rc);
351 }
352
353 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
354 {
355         /* kvm common code refers to this, but never calls it */
356         BUG();
357         return 0;
358 }
359
360 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
361 {
362         vcpu_load(vcpu);
363         kvm_s390_vcpu_initial_reset(vcpu);
364         vcpu_put(vcpu);
365         return 0;
366 }
367
368 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
369 {
370         vcpu_load(vcpu);
371         memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
372         vcpu_put(vcpu);
373         return 0;
374 }
375
376 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
377 {
378         vcpu_load(vcpu);
379         memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
380         vcpu_put(vcpu);
381         return 0;
382 }
383
384 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
385                                   struct kvm_sregs *sregs)
386 {
387         vcpu_load(vcpu);
388         memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
389         memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
390         vcpu_put(vcpu);
391         return 0;
392 }
393
394 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
395                                   struct kvm_sregs *sregs)
396 {
397         vcpu_load(vcpu);
398         memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
399         memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
400         vcpu_put(vcpu);
401         return 0;
402 }
403
404 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
405 {
406         vcpu_load(vcpu);
407         memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
408         vcpu->arch.guest_fpregs.fpc = fpu->fpc;
409         vcpu_put(vcpu);
410         return 0;
411 }
412
413 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
414 {
415         vcpu_load(vcpu);
416         memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
417         fpu->fpc = vcpu->arch.guest_fpregs.fpc;
418         vcpu_put(vcpu);
419         return 0;
420 }
421
422 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
423 {
424         int rc = 0;
425
426         vcpu_load(vcpu);
427         if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
428                 rc = -EBUSY;
429         else {
430                 vcpu->run->psw_mask = psw.mask;
431                 vcpu->run->psw_addr = psw.addr;
432         }
433         vcpu_put(vcpu);
434         return rc;
435 }
436
437 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
438                                   struct kvm_translation *tr)
439 {
440         return -EINVAL; /* not implemented yet */
441 }
442
443 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
444                                         struct kvm_guest_debug *dbg)
445 {
446         return -EINVAL; /* not implemented yet */
447 }
448
449 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
450                                     struct kvm_mp_state *mp_state)
451 {
452         return -EINVAL; /* not implemented yet */
453 }
454
455 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
456                                     struct kvm_mp_state *mp_state)
457 {
458         return -EINVAL; /* not implemented yet */
459 }
460
461 static void __vcpu_run(struct kvm_vcpu *vcpu)
462 {
463         memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
464
465         if (need_resched())
466                 schedule();
467
468         if (test_thread_flag(TIF_MCCK_PENDING))
469                 s390_handle_mcck();
470
471         kvm_s390_deliver_pending_interrupts(vcpu);
472
473         vcpu->arch.sie_block->icptcode = 0;
474         local_irq_disable();
475         kvm_guest_enter();
476         local_irq_enable();
477         VCPU_EVENT(vcpu, 6, "entering sie flags %x",
478                    atomic_read(&vcpu->arch.sie_block->cpuflags));
479         if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
480                 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
481                 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
482         }
483         VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
484                    vcpu->arch.sie_block->icptcode);
485         local_irq_disable();
486         kvm_guest_exit();
487         local_irq_enable();
488
489         memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
490 }
491
492 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
493 {
494         int rc;
495         sigset_t sigsaved;
496
497         vcpu_load(vcpu);
498
499 rerun_vcpu:
500         if (vcpu->requests)
501                 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
502                         kvm_s390_vcpu_set_mem(vcpu);
503
504         /* verify, that memory has been registered */
505         if (!vcpu->arch.sie_block->gmslm) {
506                 vcpu_put(vcpu);
507                 VCPU_EVENT(vcpu, 3, "%s", "no memory registered to run vcpu");
508                 return -EINVAL;
509         }
510
511         if (vcpu->sigset_active)
512                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
513
514         atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
515
516         BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
517
518         switch (kvm_run->exit_reason) {
519         case KVM_EXIT_S390_SIEIC:
520         case KVM_EXIT_UNKNOWN:
521         case KVM_EXIT_INTR:
522         case KVM_EXIT_S390_RESET:
523                 break;
524         default:
525                 BUG();
526         }
527
528         vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
529         vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
530
531         might_fault();
532
533         do {
534                 __vcpu_run(vcpu);
535                 rc = kvm_handle_sie_intercept(vcpu);
536         } while (!signal_pending(current) && !rc);
537
538         if (rc == SIE_INTERCEPT_RERUNVCPU)
539                 goto rerun_vcpu;
540
541         if (signal_pending(current) && !rc) {
542                 kvm_run->exit_reason = KVM_EXIT_INTR;
543                 rc = -EINTR;
544         }
545
546         if (rc == -ENOTSUPP) {
547                 /* intercept cannot be handled in-kernel, prepare kvm-run */
548                 kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
549                 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
550                 kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
551                 kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
552                 rc = 0;
553         }
554
555         if (rc == -EREMOTE) {
556                 /* intercept was handled, but userspace support is needed
557                  * kvm_run has been prepared by the handler */
558                 rc = 0;
559         }
560
561         kvm_run->psw_mask     = vcpu->arch.sie_block->gpsw.mask;
562         kvm_run->psw_addr     = vcpu->arch.sie_block->gpsw.addr;
563
564         if (vcpu->sigset_active)
565                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
566
567         vcpu_put(vcpu);
568
569         vcpu->stat.exit_userspace++;
570         return rc;
571 }
572
573 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
574                        unsigned long n, int prefix)
575 {
576         if (prefix)
577                 return copy_to_guest(vcpu, guestdest, from, n);
578         else
579                 return copy_to_guest_absolute(vcpu, guestdest, from, n);
580 }
581
582 /*
583  * store status at address
584  * we use have two special cases:
585  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
586  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
587  */
588 int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
589 {
590         const unsigned char archmode = 1;
591         int prefix;
592
593         if (addr == KVM_S390_STORE_STATUS_NOADDR) {
594                 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
595                         return -EFAULT;
596                 addr = SAVE_AREA_BASE;
597                 prefix = 0;
598         } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
599                 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
600                         return -EFAULT;
601                 addr = SAVE_AREA_BASE;
602                 prefix = 1;
603         } else
604                 prefix = 0;
605
606         if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, fp_regs),
607                         vcpu->arch.guest_fpregs.fprs, 128, prefix))
608                 return -EFAULT;
609
610         if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, gp_regs),
611                         vcpu->arch.guest_gprs, 128, prefix))
612                 return -EFAULT;
613
614         if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, psw),
615                         &vcpu->arch.sie_block->gpsw, 16, prefix))
616                 return -EFAULT;
617
618         if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, pref_reg),
619                         &vcpu->arch.sie_block->prefix, 4, prefix))
620                 return -EFAULT;
621
622         if (__guestcopy(vcpu,
623                         addr + offsetof(struct save_area_s390x, fp_ctrl_reg),
624                         &vcpu->arch.guest_fpregs.fpc, 4, prefix))
625                 return -EFAULT;
626
627         if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, tod_reg),
628                         &vcpu->arch.sie_block->todpr, 4, prefix))
629                 return -EFAULT;
630
631         if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, timer),
632                         &vcpu->arch.sie_block->cputm, 8, prefix))
633                 return -EFAULT;
634
635         if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, clk_cmp),
636                         &vcpu->arch.sie_block->ckc, 8, prefix))
637                 return -EFAULT;
638
639         if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, acc_regs),
640                         &vcpu->arch.guest_acrs, 64, prefix))
641                 return -EFAULT;
642
643         if (__guestcopy(vcpu,
644                         addr + offsetof(struct save_area_s390x, ctrl_regs),
645                         &vcpu->arch.sie_block->gcr, 128, prefix))
646                 return -EFAULT;
647         return 0;
648 }
649
650 static int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
651 {
652         int rc;
653
654         vcpu_load(vcpu);
655         rc = __kvm_s390_vcpu_store_status(vcpu, addr);
656         vcpu_put(vcpu);
657         return rc;
658 }
659
660 long kvm_arch_vcpu_ioctl(struct file *filp,
661                          unsigned int ioctl, unsigned long arg)
662 {
663         struct kvm_vcpu *vcpu = filp->private_data;
664         void __user *argp = (void __user *)arg;
665
666         switch (ioctl) {
667         case KVM_S390_INTERRUPT: {
668                 struct kvm_s390_interrupt s390int;
669
670                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
671                         return -EFAULT;
672                 return kvm_s390_inject_vcpu(vcpu, &s390int);
673         }
674         case KVM_S390_STORE_STATUS:
675                 return kvm_s390_vcpu_store_status(vcpu, arg);
676         case KVM_S390_SET_INITIAL_PSW: {
677                 psw_t psw;
678
679                 if (copy_from_user(&psw, argp, sizeof(psw)))
680                         return -EFAULT;
681                 return kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
682         }
683         case KVM_S390_INITIAL_RESET:
684                 return kvm_arch_vcpu_ioctl_initial_reset(vcpu);
685         default:
686                 ;
687         }
688         return -EINVAL;
689 }
690
691 /* Section: memory related */
692 int kvm_arch_set_memory_region(struct kvm *kvm,
693                                 struct kvm_userspace_memory_region *mem,
694                                 struct kvm_memory_slot old,
695                                 int user_alloc)
696 {
697         int i;
698         struct kvm_vcpu *vcpu;
699
700         /* A few sanity checks. We can have exactly one memory slot which has
701            to start at guest virtual zero and which has to be located at a
702            page boundary in userland and which has to end at a page boundary.
703            The memory in userland is ok to be fragmented into various different
704            vmas. It is okay to mmap() and munmap() stuff in this slot after
705            doing this call at any time */
706
707         if (mem->slot)
708                 return -EINVAL;
709
710         if (mem->guest_phys_addr)
711                 return -EINVAL;
712
713         if (mem->userspace_addr & (PAGE_SIZE - 1))
714                 return -EINVAL;
715
716         if (mem->memory_size & (PAGE_SIZE - 1))
717                 return -EINVAL;
718
719         if (!user_alloc)
720                 return -EINVAL;
721
722         /* request update of sie control block for all available vcpus */
723         kvm_for_each_vcpu(i, vcpu, kvm) {
724                 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
725                         continue;
726                 kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
727         }
728
729         return 0;
730 }
731
732 void kvm_arch_flush_shadow(struct kvm *kvm)
733 {
734 }
735
736 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
737 {
738         return gfn;
739 }
740
741 static int __init kvm_s390_init(void)
742 {
743         int ret;
744         ret = kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE);
745         if (ret)
746                 return ret;
747
748         /*
749          * guests can ask for up to 255+1 double words, we need a full page
750          * to hold the maximum amount of facilites. On the other hand, we
751          * only set facilities that are known to work in KVM.
752          */
753         facilities = (unsigned long long *) get_zeroed_page(GFP_DMA);
754         if (!facilities) {
755                 kvm_exit();
756                 return -ENOMEM;
757         }
758         stfle(facilities, 1);
759         facilities[0] &= 0xff00fff3f0700000ULL;
760         return 0;
761 }
762
763 static void __exit kvm_s390_exit(void)
764 {
765         free_page((unsigned long) facilities);
766         kvm_exit();
767 }
768
769 module_init(kvm_s390_init);
770 module_exit(kvm_s390_exit);