Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6 into for-linus
[pandora-kernel.git] / arch / s390 / kvm / kvm-s390.c
1 /*
2  * s390host.c --  hosting zSeries kernel virtual machines
3  *
4  * Copyright IBM Corp. 2008
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13  */
14
15 #include <linux/compiler.h>
16 #include <linux/err.h>
17 #include <linux/fs.h>
18 #include <linux/init.h>
19 #include <linux/kvm.h>
20 #include <linux/kvm_host.h>
21 #include <linux/module.h>
22 #include <linux/slab.h>
23 #include <linux/timer.h>
24 #include <asm/lowcore.h>
25 #include <asm/pgtable.h>
26
27 #include "kvm-s390.h"
28 #include "gaccess.h"
29
30 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
31
32 struct kvm_stats_debugfs_item debugfs_entries[] = {
33         { "userspace_handled", VCPU_STAT(exit_userspace) },
34         { "exit_null", VCPU_STAT(exit_null) },
35         { "exit_validity", VCPU_STAT(exit_validity) },
36         { "exit_stop_request", VCPU_STAT(exit_stop_request) },
37         { "exit_external_request", VCPU_STAT(exit_external_request) },
38         { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
39         { "exit_instruction", VCPU_STAT(exit_instruction) },
40         { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
41         { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
42         { "instruction_lctg", VCPU_STAT(instruction_lctg) },
43         { "instruction_lctl", VCPU_STAT(instruction_lctl) },
44         { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
45         { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
46         { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
47         { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
48         { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
49         { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
50         { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
51         { "exit_wait_state", VCPU_STAT(exit_wait_state) },
52         { "instruction_stidp", VCPU_STAT(instruction_stidp) },
53         { "instruction_spx", VCPU_STAT(instruction_spx) },
54         { "instruction_stpx", VCPU_STAT(instruction_stpx) },
55         { "instruction_stap", VCPU_STAT(instruction_stap) },
56         { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
57         { "instruction_stsch", VCPU_STAT(instruction_stsch) },
58         { "instruction_chsc", VCPU_STAT(instruction_chsc) },
59         { "instruction_stsi", VCPU_STAT(instruction_stsi) },
60         { "instruction_stfl", VCPU_STAT(instruction_stfl) },
61         { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
62         { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
63         { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
64         { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
65         { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
66         { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
67         { "diagnose_44", VCPU_STAT(diagnose_44) },
68         { NULL }
69 };
70
71
72 /* Section: not file related */
73 void kvm_arch_hardware_enable(void *garbage)
74 {
75         /* every s390 is virtualization enabled ;-) */
76 }
77
78 void kvm_arch_hardware_disable(void *garbage)
79 {
80 }
81
82 void decache_vcpus_on_cpu(int cpu)
83 {
84 }
85
86 int kvm_arch_hardware_setup(void)
87 {
88         return 0;
89 }
90
91 void kvm_arch_hardware_unsetup(void)
92 {
93 }
94
95 void kvm_arch_check_processor_compat(void *rtn)
96 {
97 }
98
99 int kvm_arch_init(void *opaque)
100 {
101         return 0;
102 }
103
104 void kvm_arch_exit(void)
105 {
106 }
107
108 /* Section: device related */
109 long kvm_arch_dev_ioctl(struct file *filp,
110                         unsigned int ioctl, unsigned long arg)
111 {
112         if (ioctl == KVM_S390_ENABLE_SIE)
113                 return s390_enable_sie();
114         return -EINVAL;
115 }
116
117 int kvm_dev_ioctl_check_extension(long ext)
118 {
119         return 0;
120 }
121
122 /* Section: vm related */
123 /*
124  * Get (and clear) the dirty memory log for a memory slot.
125  */
126 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
127                                struct kvm_dirty_log *log)
128 {
129         return 0;
130 }
131
132 long kvm_arch_vm_ioctl(struct file *filp,
133                        unsigned int ioctl, unsigned long arg)
134 {
135         struct kvm *kvm = filp->private_data;
136         void __user *argp = (void __user *)arg;
137         int r;
138
139         switch (ioctl) {
140         case KVM_S390_INTERRUPT: {
141                 struct kvm_s390_interrupt s390int;
142
143                 r = -EFAULT;
144                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
145                         break;
146                 r = kvm_s390_inject_vm(kvm, &s390int);
147                 break;
148         }
149         default:
150                 r = -EINVAL;
151         }
152
153         return r;
154 }
155
156 struct kvm *kvm_arch_create_vm(void)
157 {
158         struct kvm *kvm;
159         int rc;
160         char debug_name[16];
161
162         rc = s390_enable_sie();
163         if (rc)
164                 goto out_nokvm;
165
166         rc = -ENOMEM;
167         kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
168         if (!kvm)
169                 goto out_nokvm;
170
171         kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
172         if (!kvm->arch.sca)
173                 goto out_nosca;
174
175         sprintf(debug_name, "kvm-%u", current->pid);
176
177         kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
178         if (!kvm->arch.dbf)
179                 goto out_nodbf;
180
181         spin_lock_init(&kvm->arch.float_int.lock);
182         INIT_LIST_HEAD(&kvm->arch.float_int.list);
183
184         debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
185         VM_EVENT(kvm, 3, "%s", "vm created");
186
187         try_module_get(THIS_MODULE);
188
189         return kvm;
190 out_nodbf:
191         free_page((unsigned long)(kvm->arch.sca));
192 out_nosca:
193         kfree(kvm);
194 out_nokvm:
195         return ERR_PTR(rc);
196 }
197
198 void kvm_arch_destroy_vm(struct kvm *kvm)
199 {
200         debug_unregister(kvm->arch.dbf);
201         free_page((unsigned long)(kvm->arch.sca));
202         kfree(kvm);
203         module_put(THIS_MODULE);
204 }
205
206 /* Section: vcpu related */
207 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
208 {
209         return 0;
210 }
211
212 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
213 {
214         /* kvm common code refers to this, but does'nt call it */
215         BUG();
216 }
217
218 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
219 {
220         save_fp_regs(&vcpu->arch.host_fpregs);
221         save_access_regs(vcpu->arch.host_acrs);
222         vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
223         restore_fp_regs(&vcpu->arch.guest_fpregs);
224         restore_access_regs(vcpu->arch.guest_acrs);
225 }
226
227 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
228 {
229         save_fp_regs(&vcpu->arch.guest_fpregs);
230         save_access_regs(vcpu->arch.guest_acrs);
231         restore_fp_regs(&vcpu->arch.host_fpregs);
232         restore_access_regs(vcpu->arch.host_acrs);
233 }
234
235 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
236 {
237         /* this equals initial cpu reset in pop, but we don't switch to ESA */
238         vcpu->arch.sie_block->gpsw.mask = 0UL;
239         vcpu->arch.sie_block->gpsw.addr = 0UL;
240         vcpu->arch.sie_block->prefix    = 0UL;
241         vcpu->arch.sie_block->ihcpu     = 0xffff;
242         vcpu->arch.sie_block->cputm     = 0UL;
243         vcpu->arch.sie_block->ckc       = 0UL;
244         vcpu->arch.sie_block->todpr     = 0;
245         memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
246         vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
247         vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
248         vcpu->arch.guest_fpregs.fpc = 0;
249         asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
250         vcpu->arch.sie_block->gbea = 1;
251 }
252
253 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
254 {
255         atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
256         vcpu->arch.sie_block->gmslm = 0xffffffffffUL;
257         vcpu->arch.sie_block->gmsor = 0x000000000000;
258         vcpu->arch.sie_block->ecb   = 2;
259         vcpu->arch.sie_block->eca   = 0xC1002001U;
260         setup_timer(&vcpu->arch.ckc_timer, kvm_s390_idle_wakeup,
261                  (unsigned long) vcpu);
262         get_cpu_id(&vcpu->arch.cpu_id);
263         vcpu->arch.cpu_id.version = 0xfe;
264         return 0;
265 }
266
267 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
268                                       unsigned int id)
269 {
270         struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
271         int rc = -ENOMEM;
272
273         if (!vcpu)
274                 goto out_nomem;
275
276         vcpu->arch.sie_block = (struct sie_block *) get_zeroed_page(GFP_KERNEL);
277
278         if (!vcpu->arch.sie_block)
279                 goto out_free_cpu;
280
281         vcpu->arch.sie_block->icpua = id;
282         BUG_ON(!kvm->arch.sca);
283         BUG_ON(kvm->arch.sca->cpu[id].sda);
284         kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
285         vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
286         vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
287
288         spin_lock_init(&vcpu->arch.local_int.lock);
289         INIT_LIST_HEAD(&vcpu->arch.local_int.list);
290         vcpu->arch.local_int.float_int = &kvm->arch.float_int;
291         spin_lock_bh(&kvm->arch.float_int.lock);
292         kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
293         init_waitqueue_head(&vcpu->arch.local_int.wq);
294         vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
295         spin_unlock_bh(&kvm->arch.float_int.lock);
296
297         rc = kvm_vcpu_init(vcpu, kvm, id);
298         if (rc)
299                 goto out_free_cpu;
300         VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
301                  vcpu->arch.sie_block);
302
303         try_module_get(THIS_MODULE);
304
305         return vcpu;
306 out_free_cpu:
307         kfree(vcpu);
308 out_nomem:
309         return ERR_PTR(rc);
310 }
311
312 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
313 {
314         VCPU_EVENT(vcpu, 3, "%s", "destroy cpu");
315         free_page((unsigned long)(vcpu->arch.sie_block));
316         kfree(vcpu);
317         module_put(THIS_MODULE);
318 }
319
320 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
321 {
322         /* kvm common code refers to this, but never calls it */
323         BUG();
324         return 0;
325 }
326
327 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
328 {
329         vcpu_load(vcpu);
330         kvm_s390_vcpu_initial_reset(vcpu);
331         vcpu_put(vcpu);
332         return 0;
333 }
334
335 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
336 {
337         vcpu_load(vcpu);
338         memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
339         vcpu_put(vcpu);
340         return 0;
341 }
342
343 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
344 {
345         vcpu_load(vcpu);
346         memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
347         vcpu_put(vcpu);
348         return 0;
349 }
350
351 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
352                                   struct kvm_sregs *sregs)
353 {
354         vcpu_load(vcpu);
355         memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
356         memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
357         vcpu_put(vcpu);
358         return 0;
359 }
360
361 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
362                                   struct kvm_sregs *sregs)
363 {
364         vcpu_load(vcpu);
365         memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
366         memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
367         vcpu_put(vcpu);
368         return 0;
369 }
370
371 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
372 {
373         vcpu_load(vcpu);
374         memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
375         vcpu->arch.guest_fpregs.fpc = fpu->fpc;
376         vcpu_put(vcpu);
377         return 0;
378 }
379
380 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
381 {
382         vcpu_load(vcpu);
383         memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
384         fpu->fpc = vcpu->arch.guest_fpregs.fpc;
385         vcpu_put(vcpu);
386         return 0;
387 }
388
389 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
390 {
391         int rc = 0;
392
393         vcpu_load(vcpu);
394         if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
395                 rc = -EBUSY;
396         else
397                 vcpu->arch.sie_block->gpsw = psw;
398         vcpu_put(vcpu);
399         return rc;
400 }
401
402 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
403                                   struct kvm_translation *tr)
404 {
405         return -EINVAL; /* not implemented yet */
406 }
407
408 int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
409                                     struct kvm_debug_guest *dbg)
410 {
411         return -EINVAL; /* not implemented yet */
412 }
413
414 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
415                                     struct kvm_mp_state *mp_state)
416 {
417         return -EINVAL; /* not implemented yet */
418 }
419
420 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
421                                     struct kvm_mp_state *mp_state)
422 {
423         return -EINVAL; /* not implemented yet */
424 }
425
426 static void __vcpu_run(struct kvm_vcpu *vcpu)
427 {
428         memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
429
430         if (need_resched())
431                 schedule();
432
433         vcpu->arch.sie_block->icptcode = 0;
434         local_irq_disable();
435         kvm_guest_enter();
436         local_irq_enable();
437         VCPU_EVENT(vcpu, 6, "entering sie flags %x",
438                    atomic_read(&vcpu->arch.sie_block->cpuflags));
439         sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs);
440         VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
441                    vcpu->arch.sie_block->icptcode);
442         local_irq_disable();
443         kvm_guest_exit();
444         local_irq_enable();
445
446         memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
447 }
448
449 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
450 {
451         int rc;
452         sigset_t sigsaved;
453
454         vcpu_load(vcpu);
455
456         if (vcpu->sigset_active)
457                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
458
459         atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
460
461         BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
462
463         switch (kvm_run->exit_reason) {
464         case KVM_EXIT_S390_SIEIC:
465                 vcpu->arch.sie_block->gpsw.mask = kvm_run->s390_sieic.mask;
466                 vcpu->arch.sie_block->gpsw.addr = kvm_run->s390_sieic.addr;
467                 break;
468         case KVM_EXIT_UNKNOWN:
469         case KVM_EXIT_S390_RESET:
470                 break;
471         default:
472                 BUG();
473         }
474
475         might_sleep();
476
477         do {
478                 kvm_s390_deliver_pending_interrupts(vcpu);
479                 __vcpu_run(vcpu);
480                 rc = kvm_handle_sie_intercept(vcpu);
481         } while (!signal_pending(current) && !rc);
482
483         if (signal_pending(current) && !rc)
484                 rc = -EINTR;
485
486         if (rc == -ENOTSUPP) {
487                 /* intercept cannot be handled in-kernel, prepare kvm-run */
488                 kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
489                 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
490                 kvm_run->s390_sieic.mask     = vcpu->arch.sie_block->gpsw.mask;
491                 kvm_run->s390_sieic.addr     = vcpu->arch.sie_block->gpsw.addr;
492                 kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
493                 kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
494                 rc = 0;
495         }
496
497         if (rc == -EREMOTE) {
498                 /* intercept was handled, but userspace support is needed
499                  * kvm_run has been prepared by the handler */
500                 rc = 0;
501         }
502
503         if (vcpu->sigset_active)
504                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
505
506         vcpu_put(vcpu);
507
508         vcpu->stat.exit_userspace++;
509         return rc;
510 }
511
512 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
513                        unsigned long n, int prefix)
514 {
515         if (prefix)
516                 return copy_to_guest(vcpu, guestdest, from, n);
517         else
518                 return copy_to_guest_absolute(vcpu, guestdest, from, n);
519 }
520
521 /*
522  * store status at address
523  * we use have two special cases:
524  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
525  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
526  */
527 int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
528 {
529         const unsigned char archmode = 1;
530         int prefix;
531
532         if (addr == KVM_S390_STORE_STATUS_NOADDR) {
533                 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
534                         return -EFAULT;
535                 addr = SAVE_AREA_BASE;
536                 prefix = 0;
537         } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
538                 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
539                         return -EFAULT;
540                 addr = SAVE_AREA_BASE;
541                 prefix = 1;
542         } else
543                 prefix = 0;
544
545         if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, fp_regs),
546                         vcpu->arch.guest_fpregs.fprs, 128, prefix))
547                 return -EFAULT;
548
549         if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, gp_regs),
550                         vcpu->arch.guest_gprs, 128, prefix))
551                 return -EFAULT;
552
553         if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, psw),
554                         &vcpu->arch.sie_block->gpsw, 16, prefix))
555                 return -EFAULT;
556
557         if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, pref_reg),
558                         &vcpu->arch.sie_block->prefix, 4, prefix))
559                 return -EFAULT;
560
561         if (__guestcopy(vcpu,
562                         addr + offsetof(struct save_area_s390x, fp_ctrl_reg),
563                         &vcpu->arch.guest_fpregs.fpc, 4, prefix))
564                 return -EFAULT;
565
566         if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, tod_reg),
567                         &vcpu->arch.sie_block->todpr, 4, prefix))
568                 return -EFAULT;
569
570         if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, timer),
571                         &vcpu->arch.sie_block->cputm, 8, prefix))
572                 return -EFAULT;
573
574         if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, clk_cmp),
575                         &vcpu->arch.sie_block->ckc, 8, prefix))
576                 return -EFAULT;
577
578         if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, acc_regs),
579                         &vcpu->arch.guest_acrs, 64, prefix))
580                 return -EFAULT;
581
582         if (__guestcopy(vcpu,
583                         addr + offsetof(struct save_area_s390x, ctrl_regs),
584                         &vcpu->arch.sie_block->gcr, 128, prefix))
585                 return -EFAULT;
586         return 0;
587 }
588
589 static int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
590 {
591         int rc;
592
593         vcpu_load(vcpu);
594         rc = __kvm_s390_vcpu_store_status(vcpu, addr);
595         vcpu_put(vcpu);
596         return rc;
597 }
598
599 long kvm_arch_vcpu_ioctl(struct file *filp,
600                          unsigned int ioctl, unsigned long arg)
601 {
602         struct kvm_vcpu *vcpu = filp->private_data;
603         void __user *argp = (void __user *)arg;
604
605         switch (ioctl) {
606         case KVM_S390_INTERRUPT: {
607                 struct kvm_s390_interrupt s390int;
608
609                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
610                         return -EFAULT;
611                 return kvm_s390_inject_vcpu(vcpu, &s390int);
612         }
613         case KVM_S390_STORE_STATUS:
614                 return kvm_s390_vcpu_store_status(vcpu, arg);
615         case KVM_S390_SET_INITIAL_PSW: {
616                 psw_t psw;
617
618                 if (copy_from_user(&psw, argp, sizeof(psw)))
619                         return -EFAULT;
620                 return kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
621         }
622         case KVM_S390_INITIAL_RESET:
623                 return kvm_arch_vcpu_ioctl_initial_reset(vcpu);
624         default:
625                 ;
626         }
627         return -EINVAL;
628 }
629
630 /* Section: memory related */
631 int kvm_arch_set_memory_region(struct kvm *kvm,
632                                 struct kvm_userspace_memory_region *mem,
633                                 struct kvm_memory_slot old,
634                                 int user_alloc)
635 {
636         /* A few sanity checks. We can have exactly one memory slot which has
637            to start at guest virtual zero and which has to be located at a
638            page boundary in userland and which has to end at a page boundary.
639            The memory in userland is ok to be fragmented into various different
640            vmas. It is okay to mmap() and munmap() stuff in this slot after
641            doing this call at any time */
642
643         if (mem->slot)
644                 return -EINVAL;
645
646         if (mem->guest_phys_addr)
647                 return -EINVAL;
648
649         if (mem->userspace_addr & (PAGE_SIZE - 1))
650                 return -EINVAL;
651
652         if (mem->memory_size & (PAGE_SIZE - 1))
653                 return -EINVAL;
654
655         kvm->arch.guest_origin = mem->userspace_addr;
656         kvm->arch.guest_memsize = mem->memory_size;
657
658         /* FIXME: we do want to interrupt running CPUs and update their memory
659            configuration now to avoid race conditions. But hey, changing the
660            memory layout while virtual CPUs are running is usually bad
661            programming practice. */
662
663         return 0;
664 }
665
666 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
667 {
668         return gfn;
669 }
670
671 static int __init kvm_s390_init(void)
672 {
673         return kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE);
674 }
675
676 static void __exit kvm_s390_exit(void)
677 {
678         kvm_exit();
679 }
680
681 module_init(kvm_s390_init);
682 module_exit(kvm_s390_exit);