dd072b1add06ec475b9eda0c0bc36186be7a4f93
[pandora-kernel.git] / arch / s390 / kvm / kvm-s390.c
1 /*
2  * s390host.c --  hosting zSeries kernel virtual machines
3  *
4  * Copyright IBM Corp. 2008,2009
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
14  */
15
16 #include <linux/compiler.h>
17 #include <linux/err.h>
18 #include <linux/fs.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
29 #include <asm/nmi.h>
30 #include <asm/system.h>
31 #include "kvm-s390.h"
32 #include "gaccess.h"
33
34 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35
36 struct kvm_stats_debugfs_item debugfs_entries[] = {
37         { "userspace_handled", VCPU_STAT(exit_userspace) },
38         { "exit_null", VCPU_STAT(exit_null) },
39         { "exit_validity", VCPU_STAT(exit_validity) },
40         { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41         { "exit_external_request", VCPU_STAT(exit_external_request) },
42         { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
43         { "exit_instruction", VCPU_STAT(exit_instruction) },
44         { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45         { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
46         { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
47         { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48         { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
49         { "deliver_external_call", VCPU_STAT(deliver_external_call) },
50         { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
51         { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
52         { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
53         { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
54         { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
55         { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
56         { "exit_wait_state", VCPU_STAT(exit_wait_state) },
57         { "instruction_stidp", VCPU_STAT(instruction_stidp) },
58         { "instruction_spx", VCPU_STAT(instruction_spx) },
59         { "instruction_stpx", VCPU_STAT(instruction_stpx) },
60         { "instruction_stap", VCPU_STAT(instruction_stap) },
61         { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
62         { "instruction_stsch", VCPU_STAT(instruction_stsch) },
63         { "instruction_chsc", VCPU_STAT(instruction_chsc) },
64         { "instruction_stsi", VCPU_STAT(instruction_stsi) },
65         { "instruction_stfl", VCPU_STAT(instruction_stfl) },
66         { "instruction_tprot", VCPU_STAT(instruction_tprot) },
67         { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
68         { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
69         { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
70         { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
71         { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
72         { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
73         { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
74         { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
75         { "diagnose_10", VCPU_STAT(diagnose_10) },
76         { "diagnose_44", VCPU_STAT(diagnose_44) },
77         { NULL }
78 };
79
80 static unsigned long long *facilities;
81
82 /* Section: not file related */
83 int kvm_arch_hardware_enable(void *garbage)
84 {
85         /* every s390 is virtualization enabled ;-) */
86         return 0;
87 }
88
89 void kvm_arch_hardware_disable(void *garbage)
90 {
91 }
92
93 int kvm_arch_hardware_setup(void)
94 {
95         return 0;
96 }
97
98 void kvm_arch_hardware_unsetup(void)
99 {
100 }
101
102 void kvm_arch_check_processor_compat(void *rtn)
103 {
104 }
105
106 int kvm_arch_init(void *opaque)
107 {
108         return 0;
109 }
110
111 void kvm_arch_exit(void)
112 {
113 }
114
115 /* Section: device related */
116 long kvm_arch_dev_ioctl(struct file *filp,
117                         unsigned int ioctl, unsigned long arg)
118 {
119         if (ioctl == KVM_S390_ENABLE_SIE)
120                 return s390_enable_sie();
121         return -EINVAL;
122 }
123
124 int kvm_dev_ioctl_check_extension(long ext)
125 {
126         int r;
127
128         switch (ext) {
129         case KVM_CAP_S390_PSW:
130         case KVM_CAP_S390_GMAP:
131         case KVM_CAP_SYNC_MMU:
132                 r = 1;
133                 break;
134         default:
135                 r = 0;
136         }
137         return r;
138 }
139
140 /* Section: vm related */
141 /*
142  * Get (and clear) the dirty memory log for a memory slot.
143  */
144 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
145                                struct kvm_dirty_log *log)
146 {
147         return 0;
148 }
149
150 long kvm_arch_vm_ioctl(struct file *filp,
151                        unsigned int ioctl, unsigned long arg)
152 {
153         struct kvm *kvm = filp->private_data;
154         void __user *argp = (void __user *)arg;
155         int r;
156
157         switch (ioctl) {
158         case KVM_S390_INTERRUPT: {
159                 struct kvm_s390_interrupt s390int;
160
161                 r = -EFAULT;
162                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
163                         break;
164                 r = kvm_s390_inject_vm(kvm, &s390int);
165                 break;
166         }
167         default:
168                 r = -ENOTTY;
169         }
170
171         return r;
172 }
173
174 int kvm_arch_init_vm(struct kvm *kvm)
175 {
176         int rc;
177         char debug_name[16];
178
179         rc = s390_enable_sie();
180         if (rc)
181                 goto out_err;
182
183         rc = -ENOMEM;
184
185         kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
186         if (!kvm->arch.sca)
187                 goto out_err;
188
189         sprintf(debug_name, "kvm-%u", current->pid);
190
191         kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
192         if (!kvm->arch.dbf)
193                 goto out_nodbf;
194
195         spin_lock_init(&kvm->arch.float_int.lock);
196         INIT_LIST_HEAD(&kvm->arch.float_int.list);
197
198         debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
199         VM_EVENT(kvm, 3, "%s", "vm created");
200
201         kvm->arch.gmap = gmap_alloc(current->mm);
202         if (!kvm->arch.gmap)
203                 goto out_nogmap;
204
205         return 0;
206 out_nogmap:
207         debug_unregister(kvm->arch.dbf);
208 out_nodbf:
209         free_page((unsigned long)(kvm->arch.sca));
210 out_err:
211         return rc;
212 }
213
214 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
215 {
216         VCPU_EVENT(vcpu, 3, "%s", "free cpu");
217         clear_bit(63 - vcpu->vcpu_id, (unsigned long *) &vcpu->kvm->arch.sca->mcn);
218         if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
219                 (__u64) vcpu->arch.sie_block)
220                 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
221         smp_mb();
222         free_page((unsigned long)(vcpu->arch.sie_block));
223         kvm_vcpu_uninit(vcpu);
224         kfree(vcpu);
225 }
226
227 static void kvm_free_vcpus(struct kvm *kvm)
228 {
229         unsigned int i;
230         struct kvm_vcpu *vcpu;
231
232         kvm_for_each_vcpu(i, vcpu, kvm)
233                 kvm_arch_vcpu_destroy(vcpu);
234
235         mutex_lock(&kvm->lock);
236         for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
237                 kvm->vcpus[i] = NULL;
238
239         atomic_set(&kvm->online_vcpus, 0);
240         mutex_unlock(&kvm->lock);
241 }
242
243 void kvm_arch_sync_events(struct kvm *kvm)
244 {
245 }
246
247 void kvm_arch_destroy_vm(struct kvm *kvm)
248 {
249         kvm_free_vcpus(kvm);
250         free_page((unsigned long)(kvm->arch.sca));
251         debug_unregister(kvm->arch.dbf);
252         gmap_free(kvm->arch.gmap);
253 }
254
255 /* Section: vcpu related */
256 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
257 {
258         vcpu->arch.gmap = vcpu->kvm->arch.gmap;
259         return 0;
260 }
261
262 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
263 {
264         /* Nothing todo */
265 }
266
267 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
268 {
269         save_fp_regs(&vcpu->arch.host_fpregs);
270         save_access_regs(vcpu->arch.host_acrs);
271         vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
272         restore_fp_regs(&vcpu->arch.guest_fpregs);
273         restore_access_regs(vcpu->arch.guest_acrs);
274         gmap_enable(vcpu->arch.gmap);
275         atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
276 }
277
278 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
279 {
280         atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
281         gmap_disable(vcpu->arch.gmap);
282         save_fp_regs(&vcpu->arch.guest_fpregs);
283         save_access_regs(vcpu->arch.guest_acrs);
284         restore_fp_regs(&vcpu->arch.host_fpregs);
285         restore_access_regs(vcpu->arch.host_acrs);
286 }
287
288 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
289 {
290         /* this equals initial cpu reset in pop, but we don't switch to ESA */
291         vcpu->arch.sie_block->gpsw.mask = 0UL;
292         vcpu->arch.sie_block->gpsw.addr = 0UL;
293         vcpu->arch.sie_block->prefix    = 0UL;
294         vcpu->arch.sie_block->ihcpu     = 0xffff;
295         vcpu->arch.sie_block->cputm     = 0UL;
296         vcpu->arch.sie_block->ckc       = 0UL;
297         vcpu->arch.sie_block->todpr     = 0;
298         memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
299         vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
300         vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
301         vcpu->arch.guest_fpregs.fpc = 0;
302         asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
303         vcpu->arch.sie_block->gbea = 1;
304 }
305
306 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
307 {
308         atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
309                                                     CPUSTAT_SM |
310                                                     CPUSTAT_STOPPED);
311         vcpu->arch.sie_block->ecb   = 6;
312         vcpu->arch.sie_block->eca   = 0xC1002001U;
313         vcpu->arch.sie_block->fac   = (int) (long) facilities;
314         hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
315         tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
316                      (unsigned long) vcpu);
317         vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
318         get_cpu_id(&vcpu->arch.cpu_id);
319         vcpu->arch.cpu_id.version = 0xff;
320         return 0;
321 }
322
323 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
324                                       unsigned int id)
325 {
326         struct kvm_vcpu *vcpu;
327         int rc = -EINVAL;
328
329         if (id >= KVM_MAX_VCPUS)
330                 goto out;
331
332         rc = -ENOMEM;
333
334         vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
335         if (!vcpu)
336                 goto out;
337
338         vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
339                                         get_zeroed_page(GFP_KERNEL);
340
341         if (!vcpu->arch.sie_block)
342                 goto out_free_cpu;
343
344         vcpu->arch.sie_block->icpua = id;
345         BUG_ON(!kvm->arch.sca);
346         if (!kvm->arch.sca->cpu[id].sda)
347                 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
348         vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
349         vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
350         set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
351
352         spin_lock_init(&vcpu->arch.local_int.lock);
353         INIT_LIST_HEAD(&vcpu->arch.local_int.list);
354         vcpu->arch.local_int.float_int = &kvm->arch.float_int;
355         spin_lock(&kvm->arch.float_int.lock);
356         kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
357         init_waitqueue_head(&vcpu->arch.local_int.wq);
358         vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
359         spin_unlock(&kvm->arch.float_int.lock);
360
361         rc = kvm_vcpu_init(vcpu, kvm, id);
362         if (rc)
363                 goto out_free_sie_block;
364         VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
365                  vcpu->arch.sie_block);
366
367         return vcpu;
368 out_free_sie_block:
369         free_page((unsigned long)(vcpu->arch.sie_block));
370 out_free_cpu:
371         kfree(vcpu);
372 out:
373         return ERR_PTR(rc);
374 }
375
376 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
377 {
378         /* kvm common code refers to this, but never calls it */
379         BUG();
380         return 0;
381 }
382
383 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
384 {
385         kvm_s390_vcpu_initial_reset(vcpu);
386         return 0;
387 }
388
389 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
390 {
391         memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
392         return 0;
393 }
394
395 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
396 {
397         memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
398         return 0;
399 }
400
401 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
402                                   struct kvm_sregs *sregs)
403 {
404         memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
405         memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
406         restore_access_regs(vcpu->arch.guest_acrs);
407         return 0;
408 }
409
410 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
411                                   struct kvm_sregs *sregs)
412 {
413         memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
414         memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
415         return 0;
416 }
417
418 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
419 {
420         memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
421         vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK;
422         restore_fp_regs(&vcpu->arch.guest_fpregs);
423         return 0;
424 }
425
426 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
427 {
428         memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
429         fpu->fpc = vcpu->arch.guest_fpregs.fpc;
430         return 0;
431 }
432
433 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
434 {
435         int rc = 0;
436
437         if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
438                 rc = -EBUSY;
439         else {
440                 vcpu->run->psw_mask = psw.mask;
441                 vcpu->run->psw_addr = psw.addr;
442         }
443         return rc;
444 }
445
446 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
447                                   struct kvm_translation *tr)
448 {
449         return -EINVAL; /* not implemented yet */
450 }
451
452 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
453                                         struct kvm_guest_debug *dbg)
454 {
455         return -EINVAL; /* not implemented yet */
456 }
457
458 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
459                                     struct kvm_mp_state *mp_state)
460 {
461         return -EINVAL; /* not implemented yet */
462 }
463
464 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
465                                     struct kvm_mp_state *mp_state)
466 {
467         return -EINVAL; /* not implemented yet */
468 }
469
470 static void __vcpu_run(struct kvm_vcpu *vcpu)
471 {
472         int rc;
473
474         memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
475
476         if (need_resched())
477                 schedule();
478
479         if (test_thread_flag(TIF_MCCK_PENDING))
480                 s390_handle_mcck();
481
482         kvm_s390_deliver_pending_interrupts(vcpu);
483
484         VCPU_EVENT(vcpu, 6, "entering sie flags %x",
485                    atomic_read(&vcpu->arch.sie_block->cpuflags));
486
487         vcpu->arch.sie_block->icptcode = 0;
488         local_irq_disable();
489         kvm_guest_enter();
490         local_irq_enable();
491         rc = sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs);
492         local_irq_disable();
493         kvm_guest_exit();
494         local_irq_enable();
495
496         if (rc) {
497                 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
498                 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
499         }
500         VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
501                    vcpu->arch.sie_block->icptcode);
502
503         memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
504 }
505
506 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
507 {
508         int rc;
509         sigset_t sigsaved;
510
511 rerun_vcpu:
512         if (vcpu->sigset_active)
513                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
514
515         atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
516
517         BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
518
519         switch (kvm_run->exit_reason) {
520         case KVM_EXIT_S390_SIEIC:
521         case KVM_EXIT_UNKNOWN:
522         case KVM_EXIT_INTR:
523         case KVM_EXIT_S390_RESET:
524                 break;
525         default:
526                 BUG();
527         }
528
529         vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
530         vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
531
532         might_fault();
533
534         do {
535                 __vcpu_run(vcpu);
536                 rc = kvm_handle_sie_intercept(vcpu);
537         } while (!signal_pending(current) && !rc);
538
539         if (rc == SIE_INTERCEPT_RERUNVCPU)
540                 goto rerun_vcpu;
541
542         if (signal_pending(current) && !rc) {
543                 kvm_run->exit_reason = KVM_EXIT_INTR;
544                 rc = -EINTR;
545         }
546
547         if (rc == -EOPNOTSUPP) {
548                 /* intercept cannot be handled in-kernel, prepare kvm-run */
549                 kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
550                 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
551                 kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
552                 kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
553                 rc = 0;
554         }
555
556         if (rc == -EREMOTE) {
557                 /* intercept was handled, but userspace support is needed
558                  * kvm_run has been prepared by the handler */
559                 rc = 0;
560         }
561
562         kvm_run->psw_mask     = vcpu->arch.sie_block->gpsw.mask;
563         kvm_run->psw_addr     = vcpu->arch.sie_block->gpsw.addr;
564
565         if (vcpu->sigset_active)
566                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
567
568         vcpu->stat.exit_userspace++;
569         return rc;
570 }
571
572 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
573                        unsigned long n, int prefix)
574 {
575         if (prefix)
576                 return copy_to_guest(vcpu, guestdest, from, n);
577         else
578                 return copy_to_guest_absolute(vcpu, guestdest, from, n);
579 }
580
581 /*
582  * store status at address
583  * we use have two special cases:
584  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
585  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
586  */
587 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
588 {
589         unsigned char archmode = 1;
590         int prefix;
591
592         if (addr == KVM_S390_STORE_STATUS_NOADDR) {
593                 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
594                         return -EFAULT;
595                 addr = SAVE_AREA_BASE;
596                 prefix = 0;
597         } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
598                 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
599                         return -EFAULT;
600                 addr = SAVE_AREA_BASE;
601                 prefix = 1;
602         } else
603                 prefix = 0;
604
605         /*
606          * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
607          * copying in vcpu load/put. Lets update our copies before we save
608          * it into the save area
609          */
610         save_fp_regs(&vcpu->arch.guest_fpregs);
611         save_access_regs(vcpu->arch.guest_acrs);
612
613         if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
614                         vcpu->arch.guest_fpregs.fprs, 128, prefix))
615                 return -EFAULT;
616
617         if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
618                         vcpu->arch.guest_gprs, 128, prefix))
619                 return -EFAULT;
620
621         if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
622                         &vcpu->arch.sie_block->gpsw, 16, prefix))
623                 return -EFAULT;
624
625         if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
626                         &vcpu->arch.sie_block->prefix, 4, prefix))
627                 return -EFAULT;
628
629         if (__guestcopy(vcpu,
630                         addr + offsetof(struct save_area, fp_ctrl_reg),
631                         &vcpu->arch.guest_fpregs.fpc, 4, prefix))
632                 return -EFAULT;
633
634         if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
635                         &vcpu->arch.sie_block->todpr, 4, prefix))
636                 return -EFAULT;
637
638         if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
639                         &vcpu->arch.sie_block->cputm, 8, prefix))
640                 return -EFAULT;
641
642         if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
643                         &vcpu->arch.sie_block->ckc, 8, prefix))
644                 return -EFAULT;
645
646         if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
647                         &vcpu->arch.guest_acrs, 64, prefix))
648                 return -EFAULT;
649
650         if (__guestcopy(vcpu,
651                         addr + offsetof(struct save_area, ctrl_regs),
652                         &vcpu->arch.sie_block->gcr, 128, prefix))
653                 return -EFAULT;
654         return 0;
655 }
656
657 long kvm_arch_vcpu_ioctl(struct file *filp,
658                          unsigned int ioctl, unsigned long arg)
659 {
660         struct kvm_vcpu *vcpu = filp->private_data;
661         void __user *argp = (void __user *)arg;
662         long r;
663
664         switch (ioctl) {
665         case KVM_S390_INTERRUPT: {
666                 struct kvm_s390_interrupt s390int;
667
668                 r = -EFAULT;
669                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
670                         break;
671                 r = kvm_s390_inject_vcpu(vcpu, &s390int);
672                 break;
673         }
674         case KVM_S390_STORE_STATUS:
675                 r = kvm_s390_vcpu_store_status(vcpu, arg);
676                 break;
677         case KVM_S390_SET_INITIAL_PSW: {
678                 psw_t psw;
679
680                 r = -EFAULT;
681                 if (copy_from_user(&psw, argp, sizeof(psw)))
682                         break;
683                 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
684                 break;
685         }
686         case KVM_S390_INITIAL_RESET:
687                 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
688                 break;
689         default:
690                 r = -EINVAL;
691         }
692         return r;
693 }
694
695 /* Section: memory related */
696 int kvm_arch_prepare_memory_region(struct kvm *kvm,
697                                    struct kvm_memory_slot *memslot,
698                                    struct kvm_memory_slot old,
699                                    struct kvm_userspace_memory_region *mem,
700                                    int user_alloc)
701 {
702         /* A few sanity checks. We can have exactly one memory slot which has
703            to start at guest virtual zero and which has to be located at a
704            page boundary in userland and which has to end at a page boundary.
705            The memory in userland is ok to be fragmented into various different
706            vmas. It is okay to mmap() and munmap() stuff in this slot after
707            doing this call at any time */
708
709         if (mem->slot)
710                 return -EINVAL;
711
712         if (mem->guest_phys_addr)
713                 return -EINVAL;
714
715         if (mem->userspace_addr & 0xffffful)
716                 return -EINVAL;
717
718         if (mem->memory_size & 0xffffful)
719                 return -EINVAL;
720
721         if (!user_alloc)
722                 return -EINVAL;
723
724         return 0;
725 }
726
727 void kvm_arch_commit_memory_region(struct kvm *kvm,
728                                 struct kvm_userspace_memory_region *mem,
729                                 struct kvm_memory_slot old,
730                                 int user_alloc)
731 {
732         int rc;
733
734
735         rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
736                 mem->guest_phys_addr, mem->memory_size);
737         if (rc)
738                 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
739         return;
740 }
741
742 void kvm_arch_flush_shadow(struct kvm *kvm)
743 {
744 }
745
746 static int __init kvm_s390_init(void)
747 {
748         int ret;
749         ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
750         if (ret)
751                 return ret;
752
753         /*
754          * guests can ask for up to 255+1 double words, we need a full page
755          * to hold the maximum amount of facilities. On the other hand, we
756          * only set facilities that are known to work in KVM.
757          */
758         facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
759         if (!facilities) {
760                 kvm_exit();
761                 return -ENOMEM;
762         }
763         memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
764         facilities[0] &= 0xff00fff3f47c0000ULL;
765         facilities[1] &= 0x001c000000000000ULL;
766         return 0;
767 }
768
769 static void __exit kvm_s390_exit(void)
770 {
771         free_page((unsigned long) facilities);
772         kvm_exit();
773 }
774
775 module_init(kvm_s390_init);
776 module_exit(kvm_s390_exit);