Merge git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/v4l-dvb
[pandora-kernel.git] / arch / s390 / kvm / kvm-s390.c
1 /*
2  * s390host.c --  hosting zSeries kernel virtual machines
3  *
4  * Copyright IBM Corp. 2008
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13  */
14
15 #include <linux/compiler.h>
16 #include <linux/err.h>
17 #include <linux/fs.h>
18 #include <linux/init.h>
19 #include <linux/kvm.h>
20 #include <linux/kvm_host.h>
21 #include <linux/module.h>
22 #include <linux/slab.h>
23 #include <linux/timer.h>
24 #include <asm/lowcore.h>
25 #include <asm/pgtable.h>
26
27 #include "kvm-s390.h"
28 #include "gaccess.h"
29
30 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
31
32 struct kvm_stats_debugfs_item debugfs_entries[] = {
33         { "userspace_handled", VCPU_STAT(exit_userspace) },
34         { "exit_null", VCPU_STAT(exit_null) },
35         { "exit_validity", VCPU_STAT(exit_validity) },
36         { "exit_stop_request", VCPU_STAT(exit_stop_request) },
37         { "exit_external_request", VCPU_STAT(exit_external_request) },
38         { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
39         { "exit_instruction", VCPU_STAT(exit_instruction) },
40         { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
41         { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
42         { "instruction_lctg", VCPU_STAT(instruction_lctg) },
43         { "instruction_lctl", VCPU_STAT(instruction_lctl) },
44         { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
45         { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
46         { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
47         { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
48         { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
49         { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
50         { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
51         { "exit_wait_state", VCPU_STAT(exit_wait_state) },
52         { "instruction_stidp", VCPU_STAT(instruction_stidp) },
53         { "instruction_spx", VCPU_STAT(instruction_spx) },
54         { "instruction_stpx", VCPU_STAT(instruction_stpx) },
55         { "instruction_stap", VCPU_STAT(instruction_stap) },
56         { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
57         { "instruction_stsch", VCPU_STAT(instruction_stsch) },
58         { "instruction_chsc", VCPU_STAT(instruction_chsc) },
59         { "instruction_stsi", VCPU_STAT(instruction_stsi) },
60         { "instruction_stfl", VCPU_STAT(instruction_stfl) },
61         { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
62         { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
63         { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
64         { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
65         { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
66         { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
67         { "diagnose_44", VCPU_STAT(diagnose_44) },
68         { NULL }
69 };
70
71
72 /* Section: not file related */
73 void kvm_arch_hardware_enable(void *garbage)
74 {
75         /* every s390 is virtualization enabled ;-) */
76 }
77
78 void kvm_arch_hardware_disable(void *garbage)
79 {
80 }
81
82 int kvm_arch_hardware_setup(void)
83 {
84         return 0;
85 }
86
87 void kvm_arch_hardware_unsetup(void)
88 {
89 }
90
91 void kvm_arch_check_processor_compat(void *rtn)
92 {
93 }
94
95 int kvm_arch_init(void *opaque)
96 {
97         return 0;
98 }
99
100 void kvm_arch_exit(void)
101 {
102 }
103
104 /* Section: device related */
105 long kvm_arch_dev_ioctl(struct file *filp,
106                         unsigned int ioctl, unsigned long arg)
107 {
108         if (ioctl == KVM_S390_ENABLE_SIE)
109                 return s390_enable_sie();
110         return -EINVAL;
111 }
112
113 int kvm_dev_ioctl_check_extension(long ext)
114 {
115         return 0;
116 }
117
118 /* Section: vm related */
119 /*
120  * Get (and clear) the dirty memory log for a memory slot.
121  */
122 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
123                                struct kvm_dirty_log *log)
124 {
125         return 0;
126 }
127
128 long kvm_arch_vm_ioctl(struct file *filp,
129                        unsigned int ioctl, unsigned long arg)
130 {
131         struct kvm *kvm = filp->private_data;
132         void __user *argp = (void __user *)arg;
133         int r;
134
135         switch (ioctl) {
136         case KVM_S390_INTERRUPT: {
137                 struct kvm_s390_interrupt s390int;
138
139                 r = -EFAULT;
140                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
141                         break;
142                 r = kvm_s390_inject_vm(kvm, &s390int);
143                 break;
144         }
145         default:
146                 r = -EINVAL;
147         }
148
149         return r;
150 }
151
152 struct kvm *kvm_arch_create_vm(void)
153 {
154         struct kvm *kvm;
155         int rc;
156         char debug_name[16];
157
158         rc = s390_enable_sie();
159         if (rc)
160                 goto out_nokvm;
161
162         rc = -ENOMEM;
163         kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
164         if (!kvm)
165                 goto out_nokvm;
166
167         kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
168         if (!kvm->arch.sca)
169                 goto out_nosca;
170
171         sprintf(debug_name, "kvm-%u", current->pid);
172
173         kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
174         if (!kvm->arch.dbf)
175                 goto out_nodbf;
176
177         spin_lock_init(&kvm->arch.float_int.lock);
178         INIT_LIST_HEAD(&kvm->arch.float_int.list);
179
180         debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
181         VM_EVENT(kvm, 3, "%s", "vm created");
182
183         try_module_get(THIS_MODULE);
184
185         return kvm;
186 out_nodbf:
187         free_page((unsigned long)(kvm->arch.sca));
188 out_nosca:
189         kfree(kvm);
190 out_nokvm:
191         return ERR_PTR(rc);
192 }
193
194 void kvm_arch_destroy_vm(struct kvm *kvm)
195 {
196         debug_unregister(kvm->arch.dbf);
197         kvm_free_physmem(kvm);
198         free_page((unsigned long)(kvm->arch.sca));
199         kfree(kvm);
200         module_put(THIS_MODULE);
201 }
202
203 /* Section: vcpu related */
204 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
205 {
206         return 0;
207 }
208
209 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
210 {
211         /* kvm common code refers to this, but does'nt call it */
212         BUG();
213 }
214
215 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
216 {
217         save_fp_regs(&vcpu->arch.host_fpregs);
218         save_access_regs(vcpu->arch.host_acrs);
219         vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
220         restore_fp_regs(&vcpu->arch.guest_fpregs);
221         restore_access_regs(vcpu->arch.guest_acrs);
222 }
223
224 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
225 {
226         save_fp_regs(&vcpu->arch.guest_fpregs);
227         save_access_regs(vcpu->arch.guest_acrs);
228         restore_fp_regs(&vcpu->arch.host_fpregs);
229         restore_access_regs(vcpu->arch.host_acrs);
230 }
231
232 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
233 {
234         /* this equals initial cpu reset in pop, but we don't switch to ESA */
235         vcpu->arch.sie_block->gpsw.mask = 0UL;
236         vcpu->arch.sie_block->gpsw.addr = 0UL;
237         vcpu->arch.sie_block->prefix    = 0UL;
238         vcpu->arch.sie_block->ihcpu     = 0xffff;
239         vcpu->arch.sie_block->cputm     = 0UL;
240         vcpu->arch.sie_block->ckc       = 0UL;
241         vcpu->arch.sie_block->todpr     = 0;
242         memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
243         vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
244         vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
245         vcpu->arch.guest_fpregs.fpc = 0;
246         asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
247         vcpu->arch.sie_block->gbea = 1;
248 }
249
250 /* The current code can have up to 256 pages for virtio */
251 #define VIRTIODESCSPACE (256ul * 4096ul)
252
253 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
254 {
255         atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
256         vcpu->arch.sie_block->gmslm = vcpu->kvm->arch.guest_memsize +
257                                       vcpu->kvm->arch.guest_origin +
258                                       VIRTIODESCSPACE - 1ul;
259         vcpu->arch.sie_block->gmsor = vcpu->kvm->arch.guest_origin;
260         vcpu->arch.sie_block->ecb   = 2;
261         vcpu->arch.sie_block->eca   = 0xC1002001U;
262         setup_timer(&vcpu->arch.ckc_timer, kvm_s390_idle_wakeup,
263                  (unsigned long) vcpu);
264         get_cpu_id(&vcpu->arch.cpu_id);
265         vcpu->arch.cpu_id.version = 0xfe;
266         return 0;
267 }
268
269 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
270                                       unsigned int id)
271 {
272         struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
273         int rc = -ENOMEM;
274
275         if (!vcpu)
276                 goto out_nomem;
277
278         vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
279                                         get_zeroed_page(GFP_KERNEL);
280
281         if (!vcpu->arch.sie_block)
282                 goto out_free_cpu;
283
284         vcpu->arch.sie_block->icpua = id;
285         BUG_ON(!kvm->arch.sca);
286         BUG_ON(kvm->arch.sca->cpu[id].sda);
287         kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
288         vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
289         vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
290
291         spin_lock_init(&vcpu->arch.local_int.lock);
292         INIT_LIST_HEAD(&vcpu->arch.local_int.list);
293         vcpu->arch.local_int.float_int = &kvm->arch.float_int;
294         spin_lock_bh(&kvm->arch.float_int.lock);
295         kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
296         init_waitqueue_head(&vcpu->arch.local_int.wq);
297         vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
298         spin_unlock_bh(&kvm->arch.float_int.lock);
299
300         rc = kvm_vcpu_init(vcpu, kvm, id);
301         if (rc)
302                 goto out_free_cpu;
303         VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
304                  vcpu->arch.sie_block);
305
306         try_module_get(THIS_MODULE);
307
308         return vcpu;
309 out_free_cpu:
310         kfree(vcpu);
311 out_nomem:
312         return ERR_PTR(rc);
313 }
314
315 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
316 {
317         VCPU_EVENT(vcpu, 3, "%s", "destroy cpu");
318         free_page((unsigned long)(vcpu->arch.sie_block));
319         kfree(vcpu);
320         module_put(THIS_MODULE);
321 }
322
323 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
324 {
325         /* kvm common code refers to this, but never calls it */
326         BUG();
327         return 0;
328 }
329
330 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
331 {
332         vcpu_load(vcpu);
333         kvm_s390_vcpu_initial_reset(vcpu);
334         vcpu_put(vcpu);
335         return 0;
336 }
337
338 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
339 {
340         vcpu_load(vcpu);
341         memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
342         vcpu_put(vcpu);
343         return 0;
344 }
345
346 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
347 {
348         vcpu_load(vcpu);
349         memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
350         vcpu_put(vcpu);
351         return 0;
352 }
353
354 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
355                                   struct kvm_sregs *sregs)
356 {
357         vcpu_load(vcpu);
358         memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
359         memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
360         vcpu_put(vcpu);
361         return 0;
362 }
363
364 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
365                                   struct kvm_sregs *sregs)
366 {
367         vcpu_load(vcpu);
368         memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
369         memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
370         vcpu_put(vcpu);
371         return 0;
372 }
373
374 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
375 {
376         vcpu_load(vcpu);
377         memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
378         vcpu->arch.guest_fpregs.fpc = fpu->fpc;
379         vcpu_put(vcpu);
380         return 0;
381 }
382
383 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
384 {
385         vcpu_load(vcpu);
386         memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
387         fpu->fpc = vcpu->arch.guest_fpregs.fpc;
388         vcpu_put(vcpu);
389         return 0;
390 }
391
392 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
393 {
394         int rc = 0;
395
396         vcpu_load(vcpu);
397         if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
398                 rc = -EBUSY;
399         else
400                 vcpu->arch.sie_block->gpsw = psw;
401         vcpu_put(vcpu);
402         return rc;
403 }
404
405 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
406                                   struct kvm_translation *tr)
407 {
408         return -EINVAL; /* not implemented yet */
409 }
410
411 int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
412                                     struct kvm_debug_guest *dbg)
413 {
414         return -EINVAL; /* not implemented yet */
415 }
416
417 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
418                                     struct kvm_mp_state *mp_state)
419 {
420         return -EINVAL; /* not implemented yet */
421 }
422
423 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
424                                     struct kvm_mp_state *mp_state)
425 {
426         return -EINVAL; /* not implemented yet */
427 }
428
429 extern void s390_handle_mcck(void);
430
431 static void __vcpu_run(struct kvm_vcpu *vcpu)
432 {
433         memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
434
435         if (need_resched())
436                 schedule();
437
438         if (test_thread_flag(TIF_MCCK_PENDING))
439                 s390_handle_mcck();
440
441         kvm_s390_deliver_pending_interrupts(vcpu);
442
443         vcpu->arch.sie_block->icptcode = 0;
444         local_irq_disable();
445         kvm_guest_enter();
446         local_irq_enable();
447         VCPU_EVENT(vcpu, 6, "entering sie flags %x",
448                    atomic_read(&vcpu->arch.sie_block->cpuflags));
449         if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
450                 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
451                 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
452         }
453         VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
454                    vcpu->arch.sie_block->icptcode);
455         local_irq_disable();
456         kvm_guest_exit();
457         local_irq_enable();
458
459         memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
460 }
461
462 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
463 {
464         int rc;
465         sigset_t sigsaved;
466
467         vcpu_load(vcpu);
468
469         if (vcpu->sigset_active)
470                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
471
472         atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
473
474         BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
475
476         switch (kvm_run->exit_reason) {
477         case KVM_EXIT_S390_SIEIC:
478                 vcpu->arch.sie_block->gpsw.mask = kvm_run->s390_sieic.mask;
479                 vcpu->arch.sie_block->gpsw.addr = kvm_run->s390_sieic.addr;
480                 break;
481         case KVM_EXIT_UNKNOWN:
482         case KVM_EXIT_S390_RESET:
483                 break;
484         default:
485                 BUG();
486         }
487
488         might_sleep();
489
490         do {
491                 __vcpu_run(vcpu);
492                 rc = kvm_handle_sie_intercept(vcpu);
493         } while (!signal_pending(current) && !rc);
494
495         if (signal_pending(current) && !rc)
496                 rc = -EINTR;
497
498         if (rc == -ENOTSUPP) {
499                 /* intercept cannot be handled in-kernel, prepare kvm-run */
500                 kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
501                 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
502                 kvm_run->s390_sieic.mask     = vcpu->arch.sie_block->gpsw.mask;
503                 kvm_run->s390_sieic.addr     = vcpu->arch.sie_block->gpsw.addr;
504                 kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
505                 kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
506                 rc = 0;
507         }
508
509         if (rc == -EREMOTE) {
510                 /* intercept was handled, but userspace support is needed
511                  * kvm_run has been prepared by the handler */
512                 rc = 0;
513         }
514
515         if (vcpu->sigset_active)
516                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
517
518         vcpu_put(vcpu);
519
520         vcpu->stat.exit_userspace++;
521         return rc;
522 }
523
524 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
525                        unsigned long n, int prefix)
526 {
527         if (prefix)
528                 return copy_to_guest(vcpu, guestdest, from, n);
529         else
530                 return copy_to_guest_absolute(vcpu, guestdest, from, n);
531 }
532
533 /*
534  * store status at address
535  * we use have two special cases:
536  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
537  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
538  */
539 int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
540 {
541         const unsigned char archmode = 1;
542         int prefix;
543
544         if (addr == KVM_S390_STORE_STATUS_NOADDR) {
545                 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
546                         return -EFAULT;
547                 addr = SAVE_AREA_BASE;
548                 prefix = 0;
549         } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
550                 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
551                         return -EFAULT;
552                 addr = SAVE_AREA_BASE;
553                 prefix = 1;
554         } else
555                 prefix = 0;
556
557         if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, fp_regs),
558                         vcpu->arch.guest_fpregs.fprs, 128, prefix))
559                 return -EFAULT;
560
561         if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, gp_regs),
562                         vcpu->arch.guest_gprs, 128, prefix))
563                 return -EFAULT;
564
565         if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, psw),
566                         &vcpu->arch.sie_block->gpsw, 16, prefix))
567                 return -EFAULT;
568
569         if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, pref_reg),
570                         &vcpu->arch.sie_block->prefix, 4, prefix))
571                 return -EFAULT;
572
573         if (__guestcopy(vcpu,
574                         addr + offsetof(struct save_area_s390x, fp_ctrl_reg),
575                         &vcpu->arch.guest_fpregs.fpc, 4, prefix))
576                 return -EFAULT;
577
578         if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, tod_reg),
579                         &vcpu->arch.sie_block->todpr, 4, prefix))
580                 return -EFAULT;
581
582         if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, timer),
583                         &vcpu->arch.sie_block->cputm, 8, prefix))
584                 return -EFAULT;
585
586         if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, clk_cmp),
587                         &vcpu->arch.sie_block->ckc, 8, prefix))
588                 return -EFAULT;
589
590         if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, acc_regs),
591                         &vcpu->arch.guest_acrs, 64, prefix))
592                 return -EFAULT;
593
594         if (__guestcopy(vcpu,
595                         addr + offsetof(struct save_area_s390x, ctrl_regs),
596                         &vcpu->arch.sie_block->gcr, 128, prefix))
597                 return -EFAULT;
598         return 0;
599 }
600
601 static int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
602 {
603         int rc;
604
605         vcpu_load(vcpu);
606         rc = __kvm_s390_vcpu_store_status(vcpu, addr);
607         vcpu_put(vcpu);
608         return rc;
609 }
610
611 long kvm_arch_vcpu_ioctl(struct file *filp,
612                          unsigned int ioctl, unsigned long arg)
613 {
614         struct kvm_vcpu *vcpu = filp->private_data;
615         void __user *argp = (void __user *)arg;
616
617         switch (ioctl) {
618         case KVM_S390_INTERRUPT: {
619                 struct kvm_s390_interrupt s390int;
620
621                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
622                         return -EFAULT;
623                 return kvm_s390_inject_vcpu(vcpu, &s390int);
624         }
625         case KVM_S390_STORE_STATUS:
626                 return kvm_s390_vcpu_store_status(vcpu, arg);
627         case KVM_S390_SET_INITIAL_PSW: {
628                 psw_t psw;
629
630                 if (copy_from_user(&psw, argp, sizeof(psw)))
631                         return -EFAULT;
632                 return kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
633         }
634         case KVM_S390_INITIAL_RESET:
635                 return kvm_arch_vcpu_ioctl_initial_reset(vcpu);
636         default:
637                 ;
638         }
639         return -EINVAL;
640 }
641
642 /* Section: memory related */
643 int kvm_arch_set_memory_region(struct kvm *kvm,
644                                 struct kvm_userspace_memory_region *mem,
645                                 struct kvm_memory_slot old,
646                                 int user_alloc)
647 {
648         /* A few sanity checks. We can have exactly one memory slot which has
649            to start at guest virtual zero and which has to be located at a
650            page boundary in userland and which has to end at a page boundary.
651            The memory in userland is ok to be fragmented into various different
652            vmas. It is okay to mmap() and munmap() stuff in this slot after
653            doing this call at any time */
654
655         if (mem->slot)
656                 return -EINVAL;
657
658         if (mem->guest_phys_addr)
659                 return -EINVAL;
660
661         if (mem->userspace_addr & (PAGE_SIZE - 1))
662                 return -EINVAL;
663
664         if (mem->memory_size & (PAGE_SIZE - 1))
665                 return -EINVAL;
666
667         kvm->arch.guest_origin = mem->userspace_addr;
668         kvm->arch.guest_memsize = mem->memory_size;
669
670         /* FIXME: we do want to interrupt running CPUs and update their memory
671            configuration now to avoid race conditions. But hey, changing the
672            memory layout while virtual CPUs are running is usually bad
673            programming practice. */
674
675         return 0;
676 }
677
678 void kvm_arch_flush_shadow(struct kvm *kvm)
679 {
680 }
681
682 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
683 {
684         return gfn;
685 }
686
687 static int __init kvm_s390_init(void)
688 {
689         return kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE);
690 }
691
692 static void __exit kvm_s390_exit(void)
693 {
694         kvm_exit();
695 }
696
697 module_init(kvm_s390_init);
698 module_exit(kvm_s390_exit);