Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc
[pandora-kernel.git] / arch / s390 / kvm / kvm-s390.c
1 /*
2  * s390host.c --  hosting zSeries kernel virtual machines
3  *
4  * Copyright IBM Corp. 2008,2009
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
14  */
15
16 #include <linux/compiler.h>
17 #include <linux/err.h>
18 #include <linux/fs.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
29 #include <asm/nmi.h>
30 #include <asm/system.h>
31 #include "kvm-s390.h"
32 #include "gaccess.h"
33
34 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35
36 struct kvm_stats_debugfs_item debugfs_entries[] = {
37         { "userspace_handled", VCPU_STAT(exit_userspace) },
38         { "exit_null", VCPU_STAT(exit_null) },
39         { "exit_validity", VCPU_STAT(exit_validity) },
40         { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41         { "exit_external_request", VCPU_STAT(exit_external_request) },
42         { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
43         { "exit_instruction", VCPU_STAT(exit_instruction) },
44         { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45         { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
46         { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
47         { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48         { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
49         { "deliver_external_call", VCPU_STAT(deliver_external_call) },
50         { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
51         { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
52         { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
53         { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
54         { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
55         { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
56         { "exit_wait_state", VCPU_STAT(exit_wait_state) },
57         { "instruction_stidp", VCPU_STAT(instruction_stidp) },
58         { "instruction_spx", VCPU_STAT(instruction_spx) },
59         { "instruction_stpx", VCPU_STAT(instruction_stpx) },
60         { "instruction_stap", VCPU_STAT(instruction_stap) },
61         { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
62         { "instruction_stsch", VCPU_STAT(instruction_stsch) },
63         { "instruction_chsc", VCPU_STAT(instruction_chsc) },
64         { "instruction_stsi", VCPU_STAT(instruction_stsi) },
65         { "instruction_stfl", VCPU_STAT(instruction_stfl) },
66         { "instruction_tprot", VCPU_STAT(instruction_tprot) },
67         { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
68         { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
69         { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
70         { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
71         { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
72         { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
73         { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
74         { "diagnose_44", VCPU_STAT(diagnose_44) },
75         { NULL }
76 };
77
78 static unsigned long long *facilities;
79
80 /* Section: not file related */
81 int kvm_arch_hardware_enable(void *garbage)
82 {
83         /* every s390 is virtualization enabled ;-) */
84         return 0;
85 }
86
87 void kvm_arch_hardware_disable(void *garbage)
88 {
89 }
90
91 int kvm_arch_hardware_setup(void)
92 {
93         return 0;
94 }
95
96 void kvm_arch_hardware_unsetup(void)
97 {
98 }
99
100 void kvm_arch_check_processor_compat(void *rtn)
101 {
102 }
103
104 int kvm_arch_init(void *opaque)
105 {
106         return 0;
107 }
108
109 void kvm_arch_exit(void)
110 {
111 }
112
113 /* Section: device related */
114 long kvm_arch_dev_ioctl(struct file *filp,
115                         unsigned int ioctl, unsigned long arg)
116 {
117         if (ioctl == KVM_S390_ENABLE_SIE)
118                 return s390_enable_sie();
119         return -EINVAL;
120 }
121
122 int kvm_dev_ioctl_check_extension(long ext)
123 {
124         int r;
125
126         switch (ext) {
127         case KVM_CAP_S390_PSW:
128         case KVM_CAP_S390_GMAP:
129                 r = 1;
130                 break;
131         default:
132                 r = 0;
133         }
134         return r;
135 }
136
137 /* Section: vm related */
138 /*
139  * Get (and clear) the dirty memory log for a memory slot.
140  */
141 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
142                                struct kvm_dirty_log *log)
143 {
144         return 0;
145 }
146
147 long kvm_arch_vm_ioctl(struct file *filp,
148                        unsigned int ioctl, unsigned long arg)
149 {
150         struct kvm *kvm = filp->private_data;
151         void __user *argp = (void __user *)arg;
152         int r;
153
154         switch (ioctl) {
155         case KVM_S390_INTERRUPT: {
156                 struct kvm_s390_interrupt s390int;
157
158                 r = -EFAULT;
159                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
160                         break;
161                 r = kvm_s390_inject_vm(kvm, &s390int);
162                 break;
163         }
164         default:
165                 r = -ENOTTY;
166         }
167
168         return r;
169 }
170
171 int kvm_arch_init_vm(struct kvm *kvm)
172 {
173         int rc;
174         char debug_name[16];
175
176         rc = s390_enable_sie();
177         if (rc)
178                 goto out_err;
179
180         rc = -ENOMEM;
181
182         kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
183         if (!kvm->arch.sca)
184                 goto out_err;
185
186         sprintf(debug_name, "kvm-%u", current->pid);
187
188         kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
189         if (!kvm->arch.dbf)
190                 goto out_nodbf;
191
192         spin_lock_init(&kvm->arch.float_int.lock);
193         INIT_LIST_HEAD(&kvm->arch.float_int.list);
194
195         debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
196         VM_EVENT(kvm, 3, "%s", "vm created");
197
198         kvm->arch.gmap = gmap_alloc(current->mm);
199         if (!kvm->arch.gmap)
200                 goto out_nogmap;
201
202         return 0;
203 out_nogmap:
204         debug_unregister(kvm->arch.dbf);
205 out_nodbf:
206         free_page((unsigned long)(kvm->arch.sca));
207 out_err:
208         return rc;
209 }
210
211 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
212 {
213         VCPU_EVENT(vcpu, 3, "%s", "free cpu");
214         clear_bit(63 - vcpu->vcpu_id, (unsigned long *) &vcpu->kvm->arch.sca->mcn);
215         if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
216                 (__u64) vcpu->arch.sie_block)
217                 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
218         smp_mb();
219         free_page((unsigned long)(vcpu->arch.sie_block));
220         kvm_vcpu_uninit(vcpu);
221         kfree(vcpu);
222 }
223
224 static void kvm_free_vcpus(struct kvm *kvm)
225 {
226         unsigned int i;
227         struct kvm_vcpu *vcpu;
228
229         kvm_for_each_vcpu(i, vcpu, kvm)
230                 kvm_arch_vcpu_destroy(vcpu);
231
232         mutex_lock(&kvm->lock);
233         for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
234                 kvm->vcpus[i] = NULL;
235
236         atomic_set(&kvm->online_vcpus, 0);
237         mutex_unlock(&kvm->lock);
238 }
239
240 void kvm_arch_sync_events(struct kvm *kvm)
241 {
242 }
243
244 void kvm_arch_destroy_vm(struct kvm *kvm)
245 {
246         kvm_free_vcpus(kvm);
247         free_page((unsigned long)(kvm->arch.sca));
248         debug_unregister(kvm->arch.dbf);
249         gmap_free(kvm->arch.gmap);
250 }
251
252 /* Section: vcpu related */
253 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
254 {
255         vcpu->arch.gmap = vcpu->kvm->arch.gmap;
256         return 0;
257 }
258
259 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
260 {
261         /* Nothing todo */
262 }
263
264 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
265 {
266         save_fp_regs(&vcpu->arch.host_fpregs);
267         save_access_regs(vcpu->arch.host_acrs);
268         vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
269         restore_fp_regs(&vcpu->arch.guest_fpregs);
270         restore_access_regs(vcpu->arch.guest_acrs);
271         gmap_enable(vcpu->arch.gmap);
272 }
273
274 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
275 {
276         gmap_disable(vcpu->arch.gmap);
277         save_fp_regs(&vcpu->arch.guest_fpregs);
278         save_access_regs(vcpu->arch.guest_acrs);
279         restore_fp_regs(&vcpu->arch.host_fpregs);
280         restore_access_regs(vcpu->arch.host_acrs);
281 }
282
283 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
284 {
285         /* this equals initial cpu reset in pop, but we don't switch to ESA */
286         vcpu->arch.sie_block->gpsw.mask = 0UL;
287         vcpu->arch.sie_block->gpsw.addr = 0UL;
288         vcpu->arch.sie_block->prefix    = 0UL;
289         vcpu->arch.sie_block->ihcpu     = 0xffff;
290         vcpu->arch.sie_block->cputm     = 0UL;
291         vcpu->arch.sie_block->ckc       = 0UL;
292         vcpu->arch.sie_block->todpr     = 0;
293         memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
294         vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
295         vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
296         vcpu->arch.guest_fpregs.fpc = 0;
297         asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
298         vcpu->arch.sie_block->gbea = 1;
299 }
300
301 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
302 {
303         atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | CPUSTAT_SM);
304         vcpu->arch.sie_block->ecb   = 6;
305         vcpu->arch.sie_block->eca   = 0xC1002001U;
306         vcpu->arch.sie_block->fac   = (int) (long) facilities;
307         hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
308         tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
309                      (unsigned long) vcpu);
310         vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
311         get_cpu_id(&vcpu->arch.cpu_id);
312         vcpu->arch.cpu_id.version = 0xff;
313         return 0;
314 }
315
316 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
317                                       unsigned int id)
318 {
319         struct kvm_vcpu *vcpu;
320         int rc = -EINVAL;
321
322         if (id >= KVM_MAX_VCPUS)
323                 goto out;
324
325         rc = -ENOMEM;
326
327         vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
328         if (!vcpu)
329                 goto out;
330
331         vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
332                                         get_zeroed_page(GFP_KERNEL);
333
334         if (!vcpu->arch.sie_block)
335                 goto out_free_cpu;
336
337         vcpu->arch.sie_block->icpua = id;
338         BUG_ON(!kvm->arch.sca);
339         if (!kvm->arch.sca->cpu[id].sda)
340                 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
341         vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
342         vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
343         set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
344
345         spin_lock_init(&vcpu->arch.local_int.lock);
346         INIT_LIST_HEAD(&vcpu->arch.local_int.list);
347         vcpu->arch.local_int.float_int = &kvm->arch.float_int;
348         spin_lock(&kvm->arch.float_int.lock);
349         kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
350         init_waitqueue_head(&vcpu->arch.local_int.wq);
351         vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
352         spin_unlock(&kvm->arch.float_int.lock);
353
354         rc = kvm_vcpu_init(vcpu, kvm, id);
355         if (rc)
356                 goto out_free_sie_block;
357         VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
358                  vcpu->arch.sie_block);
359
360         return vcpu;
361 out_free_sie_block:
362         free_page((unsigned long)(vcpu->arch.sie_block));
363 out_free_cpu:
364         kfree(vcpu);
365 out:
366         return ERR_PTR(rc);
367 }
368
369 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
370 {
371         /* kvm common code refers to this, but never calls it */
372         BUG();
373         return 0;
374 }
375
376 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
377 {
378         kvm_s390_vcpu_initial_reset(vcpu);
379         return 0;
380 }
381
382 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
383 {
384         memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
385         return 0;
386 }
387
388 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
389 {
390         memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
391         return 0;
392 }
393
394 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
395                                   struct kvm_sregs *sregs)
396 {
397         memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
398         memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
399         restore_access_regs(vcpu->arch.guest_acrs);
400         return 0;
401 }
402
403 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
404                                   struct kvm_sregs *sregs)
405 {
406         memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
407         memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
408         return 0;
409 }
410
411 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
412 {
413         memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
414         vcpu->arch.guest_fpregs.fpc = fpu->fpc;
415         restore_fp_regs(&vcpu->arch.guest_fpregs);
416         return 0;
417 }
418
419 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
420 {
421         memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
422         fpu->fpc = vcpu->arch.guest_fpregs.fpc;
423         return 0;
424 }
425
426 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
427 {
428         int rc = 0;
429
430         if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
431                 rc = -EBUSY;
432         else {
433                 vcpu->run->psw_mask = psw.mask;
434                 vcpu->run->psw_addr = psw.addr;
435         }
436         return rc;
437 }
438
439 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
440                                   struct kvm_translation *tr)
441 {
442         return -EINVAL; /* not implemented yet */
443 }
444
445 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
446                                         struct kvm_guest_debug *dbg)
447 {
448         return -EINVAL; /* not implemented yet */
449 }
450
451 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
452                                     struct kvm_mp_state *mp_state)
453 {
454         return -EINVAL; /* not implemented yet */
455 }
456
457 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
458                                     struct kvm_mp_state *mp_state)
459 {
460         return -EINVAL; /* not implemented yet */
461 }
462
463 static void __vcpu_run(struct kvm_vcpu *vcpu)
464 {
465         memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
466
467         if (need_resched())
468                 schedule();
469
470         if (test_thread_flag(TIF_MCCK_PENDING))
471                 s390_handle_mcck();
472
473         kvm_s390_deliver_pending_interrupts(vcpu);
474
475         vcpu->arch.sie_block->icptcode = 0;
476         local_irq_disable();
477         kvm_guest_enter();
478         local_irq_enable();
479         VCPU_EVENT(vcpu, 6, "entering sie flags %x",
480                    atomic_read(&vcpu->arch.sie_block->cpuflags));
481         if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
482                 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
483                 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
484         }
485         VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
486                    vcpu->arch.sie_block->icptcode);
487         local_irq_disable();
488         kvm_guest_exit();
489         local_irq_enable();
490
491         memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
492 }
493
494 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
495 {
496         int rc;
497         sigset_t sigsaved;
498
499 rerun_vcpu:
500         if (vcpu->sigset_active)
501                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
502
503         atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
504
505         BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
506
507         switch (kvm_run->exit_reason) {
508         case KVM_EXIT_S390_SIEIC:
509         case KVM_EXIT_UNKNOWN:
510         case KVM_EXIT_INTR:
511         case KVM_EXIT_S390_RESET:
512                 break;
513         default:
514                 BUG();
515         }
516
517         vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
518         vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
519
520         might_fault();
521
522         do {
523                 __vcpu_run(vcpu);
524                 rc = kvm_handle_sie_intercept(vcpu);
525         } while (!signal_pending(current) && !rc);
526
527         if (rc == SIE_INTERCEPT_RERUNVCPU)
528                 goto rerun_vcpu;
529
530         if (signal_pending(current) && !rc) {
531                 kvm_run->exit_reason = KVM_EXIT_INTR;
532                 rc = -EINTR;
533         }
534
535         if (rc == -EOPNOTSUPP) {
536                 /* intercept cannot be handled in-kernel, prepare kvm-run */
537                 kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
538                 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
539                 kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
540                 kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
541                 rc = 0;
542         }
543
544         if (rc == -EREMOTE) {
545                 /* intercept was handled, but userspace support is needed
546                  * kvm_run has been prepared by the handler */
547                 rc = 0;
548         }
549
550         kvm_run->psw_mask     = vcpu->arch.sie_block->gpsw.mask;
551         kvm_run->psw_addr     = vcpu->arch.sie_block->gpsw.addr;
552
553         if (vcpu->sigset_active)
554                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
555
556         vcpu->stat.exit_userspace++;
557         return rc;
558 }
559
560 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
561                        unsigned long n, int prefix)
562 {
563         if (prefix)
564                 return copy_to_guest(vcpu, guestdest, from, n);
565         else
566                 return copy_to_guest_absolute(vcpu, guestdest, from, n);
567 }
568
569 /*
570  * store status at address
571  * we use have two special cases:
572  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
573  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
574  */
575 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
576 {
577         unsigned char archmode = 1;
578         int prefix;
579
580         if (addr == KVM_S390_STORE_STATUS_NOADDR) {
581                 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
582                         return -EFAULT;
583                 addr = SAVE_AREA_BASE;
584                 prefix = 0;
585         } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
586                 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
587                         return -EFAULT;
588                 addr = SAVE_AREA_BASE;
589                 prefix = 1;
590         } else
591                 prefix = 0;
592
593         if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
594                         vcpu->arch.guest_fpregs.fprs, 128, prefix))
595                 return -EFAULT;
596
597         if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
598                         vcpu->arch.guest_gprs, 128, prefix))
599                 return -EFAULT;
600
601         if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
602                         &vcpu->arch.sie_block->gpsw, 16, prefix))
603                 return -EFAULT;
604
605         if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
606                         &vcpu->arch.sie_block->prefix, 4, prefix))
607                 return -EFAULT;
608
609         if (__guestcopy(vcpu,
610                         addr + offsetof(struct save_area, fp_ctrl_reg),
611                         &vcpu->arch.guest_fpregs.fpc, 4, prefix))
612                 return -EFAULT;
613
614         if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
615                         &vcpu->arch.sie_block->todpr, 4, prefix))
616                 return -EFAULT;
617
618         if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
619                         &vcpu->arch.sie_block->cputm, 8, prefix))
620                 return -EFAULT;
621
622         if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
623                         &vcpu->arch.sie_block->ckc, 8, prefix))
624                 return -EFAULT;
625
626         if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
627                         &vcpu->arch.guest_acrs, 64, prefix))
628                 return -EFAULT;
629
630         if (__guestcopy(vcpu,
631                         addr + offsetof(struct save_area, ctrl_regs),
632                         &vcpu->arch.sie_block->gcr, 128, prefix))
633                 return -EFAULT;
634         return 0;
635 }
636
637 long kvm_arch_vcpu_ioctl(struct file *filp,
638                          unsigned int ioctl, unsigned long arg)
639 {
640         struct kvm_vcpu *vcpu = filp->private_data;
641         void __user *argp = (void __user *)arg;
642         long r;
643
644         switch (ioctl) {
645         case KVM_S390_INTERRUPT: {
646                 struct kvm_s390_interrupt s390int;
647
648                 r = -EFAULT;
649                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
650                         break;
651                 r = kvm_s390_inject_vcpu(vcpu, &s390int);
652                 break;
653         }
654         case KVM_S390_STORE_STATUS:
655                 r = kvm_s390_vcpu_store_status(vcpu, arg);
656                 break;
657         case KVM_S390_SET_INITIAL_PSW: {
658                 psw_t psw;
659
660                 r = -EFAULT;
661                 if (copy_from_user(&psw, argp, sizeof(psw)))
662                         break;
663                 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
664                 break;
665         }
666         case KVM_S390_INITIAL_RESET:
667                 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
668                 break;
669         default:
670                 r = -EINVAL;
671         }
672         return r;
673 }
674
675 /* Section: memory related */
676 int kvm_arch_prepare_memory_region(struct kvm *kvm,
677                                    struct kvm_memory_slot *memslot,
678                                    struct kvm_memory_slot old,
679                                    struct kvm_userspace_memory_region *mem,
680                                    int user_alloc)
681 {
682         /* A few sanity checks. We can have exactly one memory slot which has
683            to start at guest virtual zero and which has to be located at a
684            page boundary in userland and which has to end at a page boundary.
685            The memory in userland is ok to be fragmented into various different
686            vmas. It is okay to mmap() and munmap() stuff in this slot after
687            doing this call at any time */
688
689         if (mem->slot)
690                 return -EINVAL;
691
692         if (mem->guest_phys_addr)
693                 return -EINVAL;
694
695         if (mem->userspace_addr & 0xffffful)
696                 return -EINVAL;
697
698         if (mem->memory_size & 0xffffful)
699                 return -EINVAL;
700
701         if (!user_alloc)
702                 return -EINVAL;
703
704         return 0;
705 }
706
707 void kvm_arch_commit_memory_region(struct kvm *kvm,
708                                 struct kvm_userspace_memory_region *mem,
709                                 struct kvm_memory_slot old,
710                                 int user_alloc)
711 {
712         int rc;
713
714
715         rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
716                 mem->guest_phys_addr, mem->memory_size);
717         if (rc)
718                 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
719         return;
720 }
721
722 void kvm_arch_flush_shadow(struct kvm *kvm)
723 {
724 }
725
726 static int __init kvm_s390_init(void)
727 {
728         int ret;
729         ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
730         if (ret)
731                 return ret;
732
733         /*
734          * guests can ask for up to 255+1 double words, we need a full page
735          * to hold the maximum amount of facilities. On the other hand, we
736          * only set facilities that are known to work in KVM.
737          */
738         facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
739         if (!facilities) {
740                 kvm_exit();
741                 return -ENOMEM;
742         }
743         memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
744         facilities[0] &= 0xff00fff3f47c0000ULL;
745         facilities[1] &= 0x201c000000000000ULL;
746         return 0;
747 }
748
749 static void __exit kvm_s390_exit(void)
750 {
751         free_page((unsigned long) facilities);
752         kvm_exit();
753 }
754
755 module_init(kvm_s390_init);
756 module_exit(kvm_s390_exit);