Merge tag 'dm-3.17-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device...
[pandora-kernel.git] / arch / s390 / kvm / kvm-s390.c
1 /*
2  * hosting zSeries kernel virtual machines
3  *
4  * Copyright IBM Corp. 2008, 2009
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
14  *               Jason J. Herne <jjherne@us.ibm.com>
15  */
16
17 #include <linux/compiler.h>
18 #include <linux/err.h>
19 #include <linux/fs.h>
20 #include <linux/hrtimer.h>
21 #include <linux/init.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/module.h>
25 #include <linux/slab.h>
26 #include <linux/timer.h>
27 #include <asm/asm-offsets.h>
28 #include <asm/lowcore.h>
29 #include <asm/pgtable.h>
30 #include <asm/nmi.h>
31 #include <asm/switch_to.h>
32 #include <asm/facility.h>
33 #include <asm/sclp.h>
34 #include "kvm-s390.h"
35 #include "gaccess.h"
36
37 #define CREATE_TRACE_POINTS
38 #include "trace.h"
39 #include "trace-s390.h"
40
41 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
42
43 struct kvm_stats_debugfs_item debugfs_entries[] = {
44         { "userspace_handled", VCPU_STAT(exit_userspace) },
45         { "exit_null", VCPU_STAT(exit_null) },
46         { "exit_validity", VCPU_STAT(exit_validity) },
47         { "exit_stop_request", VCPU_STAT(exit_stop_request) },
48         { "exit_external_request", VCPU_STAT(exit_external_request) },
49         { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
50         { "exit_instruction", VCPU_STAT(exit_instruction) },
51         { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
52         { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
53         { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
54         { "instruction_lctl", VCPU_STAT(instruction_lctl) },
55         { "instruction_stctl", VCPU_STAT(instruction_stctl) },
56         { "instruction_stctg", VCPU_STAT(instruction_stctg) },
57         { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
58         { "deliver_external_call", VCPU_STAT(deliver_external_call) },
59         { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
60         { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
61         { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
62         { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
63         { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
64         { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
65         { "exit_wait_state", VCPU_STAT(exit_wait_state) },
66         { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
67         { "instruction_stidp", VCPU_STAT(instruction_stidp) },
68         { "instruction_spx", VCPU_STAT(instruction_spx) },
69         { "instruction_stpx", VCPU_STAT(instruction_stpx) },
70         { "instruction_stap", VCPU_STAT(instruction_stap) },
71         { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
72         { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
73         { "instruction_stsch", VCPU_STAT(instruction_stsch) },
74         { "instruction_chsc", VCPU_STAT(instruction_chsc) },
75         { "instruction_essa", VCPU_STAT(instruction_essa) },
76         { "instruction_stsi", VCPU_STAT(instruction_stsi) },
77         { "instruction_stfl", VCPU_STAT(instruction_stfl) },
78         { "instruction_tprot", VCPU_STAT(instruction_tprot) },
79         { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
80         { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
81         { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
82         { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
83         { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
84         { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
85         { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
86         { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
87         { "diagnose_10", VCPU_STAT(diagnose_10) },
88         { "diagnose_44", VCPU_STAT(diagnose_44) },
89         { "diagnose_9c", VCPU_STAT(diagnose_9c) },
90         { NULL }
91 };
92
93 unsigned long *vfacilities;
94 static struct gmap_notifier gmap_notifier;
95
96 /* test availability of vfacility */
97 int test_vfacility(unsigned long nr)
98 {
99         return __test_facility(nr, (void *) vfacilities);
100 }
101
102 /* Section: not file related */
103 int kvm_arch_hardware_enable(void *garbage)
104 {
105         /* every s390 is virtualization enabled ;-) */
106         return 0;
107 }
108
109 void kvm_arch_hardware_disable(void *garbage)
110 {
111 }
112
113 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
114
115 int kvm_arch_hardware_setup(void)
116 {
117         gmap_notifier.notifier_call = kvm_gmap_notifier;
118         gmap_register_ipte_notifier(&gmap_notifier);
119         return 0;
120 }
121
122 void kvm_arch_hardware_unsetup(void)
123 {
124         gmap_unregister_ipte_notifier(&gmap_notifier);
125 }
126
127 void kvm_arch_check_processor_compat(void *rtn)
128 {
129 }
130
131 int kvm_arch_init(void *opaque)
132 {
133         return 0;
134 }
135
136 void kvm_arch_exit(void)
137 {
138 }
139
140 /* Section: device related */
141 long kvm_arch_dev_ioctl(struct file *filp,
142                         unsigned int ioctl, unsigned long arg)
143 {
144         if (ioctl == KVM_S390_ENABLE_SIE)
145                 return s390_enable_sie();
146         return -EINVAL;
147 }
148
149 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
150 {
151         int r;
152
153         switch (ext) {
154         case KVM_CAP_S390_PSW:
155         case KVM_CAP_S390_GMAP:
156         case KVM_CAP_SYNC_MMU:
157 #ifdef CONFIG_KVM_S390_UCONTROL
158         case KVM_CAP_S390_UCONTROL:
159 #endif
160         case KVM_CAP_ASYNC_PF:
161         case KVM_CAP_SYNC_REGS:
162         case KVM_CAP_ONE_REG:
163         case KVM_CAP_ENABLE_CAP:
164         case KVM_CAP_S390_CSS_SUPPORT:
165         case KVM_CAP_IRQFD:
166         case KVM_CAP_IOEVENTFD:
167         case KVM_CAP_DEVICE_CTRL:
168         case KVM_CAP_ENABLE_CAP_VM:
169         case KVM_CAP_S390_IRQCHIP:
170         case KVM_CAP_VM_ATTRIBUTES:
171         case KVM_CAP_MP_STATE:
172                 r = 1;
173                 break;
174         case KVM_CAP_NR_VCPUS:
175         case KVM_CAP_MAX_VCPUS:
176                 r = KVM_MAX_VCPUS;
177                 break;
178         case KVM_CAP_NR_MEMSLOTS:
179                 r = KVM_USER_MEM_SLOTS;
180                 break;
181         case KVM_CAP_S390_COW:
182                 r = MACHINE_HAS_ESOP;
183                 break;
184         default:
185                 r = 0;
186         }
187         return r;
188 }
189
190 static void kvm_s390_sync_dirty_log(struct kvm *kvm,
191                                         struct kvm_memory_slot *memslot)
192 {
193         gfn_t cur_gfn, last_gfn;
194         unsigned long address;
195         struct gmap *gmap = kvm->arch.gmap;
196
197         down_read(&gmap->mm->mmap_sem);
198         /* Loop over all guest pages */
199         last_gfn = memslot->base_gfn + memslot->npages;
200         for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
201                 address = gfn_to_hva_memslot(memslot, cur_gfn);
202
203                 if (gmap_test_and_clear_dirty(address, gmap))
204                         mark_page_dirty(kvm, cur_gfn);
205         }
206         up_read(&gmap->mm->mmap_sem);
207 }
208
209 /* Section: vm related */
210 /*
211  * Get (and clear) the dirty memory log for a memory slot.
212  */
213 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
214                                struct kvm_dirty_log *log)
215 {
216         int r;
217         unsigned long n;
218         struct kvm_memory_slot *memslot;
219         int is_dirty = 0;
220
221         mutex_lock(&kvm->slots_lock);
222
223         r = -EINVAL;
224         if (log->slot >= KVM_USER_MEM_SLOTS)
225                 goto out;
226
227         memslot = id_to_memslot(kvm->memslots, log->slot);
228         r = -ENOENT;
229         if (!memslot->dirty_bitmap)
230                 goto out;
231
232         kvm_s390_sync_dirty_log(kvm, memslot);
233         r = kvm_get_dirty_log(kvm, log, &is_dirty);
234         if (r)
235                 goto out;
236
237         /* Clear the dirty log */
238         if (is_dirty) {
239                 n = kvm_dirty_bitmap_bytes(memslot);
240                 memset(memslot->dirty_bitmap, 0, n);
241         }
242         r = 0;
243 out:
244         mutex_unlock(&kvm->slots_lock);
245         return r;
246 }
247
248 static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
249 {
250         int r;
251
252         if (cap->flags)
253                 return -EINVAL;
254
255         switch (cap->cap) {
256         case KVM_CAP_S390_IRQCHIP:
257                 kvm->arch.use_irqchip = 1;
258                 r = 0;
259                 break;
260         default:
261                 r = -EINVAL;
262                 break;
263         }
264         return r;
265 }
266
267 static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
268 {
269         int ret;
270         unsigned int idx;
271         switch (attr->attr) {
272         case KVM_S390_VM_MEM_ENABLE_CMMA:
273                 ret = -EBUSY;
274                 mutex_lock(&kvm->lock);
275                 if (atomic_read(&kvm->online_vcpus) == 0) {
276                         kvm->arch.use_cmma = 1;
277                         ret = 0;
278                 }
279                 mutex_unlock(&kvm->lock);
280                 break;
281         case KVM_S390_VM_MEM_CLR_CMMA:
282                 mutex_lock(&kvm->lock);
283                 idx = srcu_read_lock(&kvm->srcu);
284                 page_table_reset_pgste(kvm->arch.gmap->mm, 0, TASK_SIZE, false);
285                 srcu_read_unlock(&kvm->srcu, idx);
286                 mutex_unlock(&kvm->lock);
287                 ret = 0;
288                 break;
289         default:
290                 ret = -ENXIO;
291                 break;
292         }
293         return ret;
294 }
295
296 static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
297 {
298         int ret;
299
300         switch (attr->group) {
301         case KVM_S390_VM_MEM_CTRL:
302                 ret = kvm_s390_mem_control(kvm, attr);
303                 break;
304         default:
305                 ret = -ENXIO;
306                 break;
307         }
308
309         return ret;
310 }
311
312 static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
313 {
314         return -ENXIO;
315 }
316
317 static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
318 {
319         int ret;
320
321         switch (attr->group) {
322         case KVM_S390_VM_MEM_CTRL:
323                 switch (attr->attr) {
324                 case KVM_S390_VM_MEM_ENABLE_CMMA:
325                 case KVM_S390_VM_MEM_CLR_CMMA:
326                         ret = 0;
327                         break;
328                 default:
329                         ret = -ENXIO;
330                         break;
331                 }
332                 break;
333         default:
334                 ret = -ENXIO;
335                 break;
336         }
337
338         return ret;
339 }
340
341 long kvm_arch_vm_ioctl(struct file *filp,
342                        unsigned int ioctl, unsigned long arg)
343 {
344         struct kvm *kvm = filp->private_data;
345         void __user *argp = (void __user *)arg;
346         struct kvm_device_attr attr;
347         int r;
348
349         switch (ioctl) {
350         case KVM_S390_INTERRUPT: {
351                 struct kvm_s390_interrupt s390int;
352
353                 r = -EFAULT;
354                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
355                         break;
356                 r = kvm_s390_inject_vm(kvm, &s390int);
357                 break;
358         }
359         case KVM_ENABLE_CAP: {
360                 struct kvm_enable_cap cap;
361                 r = -EFAULT;
362                 if (copy_from_user(&cap, argp, sizeof(cap)))
363                         break;
364                 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
365                 break;
366         }
367         case KVM_CREATE_IRQCHIP: {
368                 struct kvm_irq_routing_entry routing;
369
370                 r = -EINVAL;
371                 if (kvm->arch.use_irqchip) {
372                         /* Set up dummy routing. */
373                         memset(&routing, 0, sizeof(routing));
374                         kvm_set_irq_routing(kvm, &routing, 0, 0);
375                         r = 0;
376                 }
377                 break;
378         }
379         case KVM_SET_DEVICE_ATTR: {
380                 r = -EFAULT;
381                 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
382                         break;
383                 r = kvm_s390_vm_set_attr(kvm, &attr);
384                 break;
385         }
386         case KVM_GET_DEVICE_ATTR: {
387                 r = -EFAULT;
388                 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
389                         break;
390                 r = kvm_s390_vm_get_attr(kvm, &attr);
391                 break;
392         }
393         case KVM_HAS_DEVICE_ATTR: {
394                 r = -EFAULT;
395                 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
396                         break;
397                 r = kvm_s390_vm_has_attr(kvm, &attr);
398                 break;
399         }
400         default:
401                 r = -ENOTTY;
402         }
403
404         return r;
405 }
406
407 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
408 {
409         int rc;
410         char debug_name[16];
411         static unsigned long sca_offset;
412
413         rc = -EINVAL;
414 #ifdef CONFIG_KVM_S390_UCONTROL
415         if (type & ~KVM_VM_S390_UCONTROL)
416                 goto out_err;
417         if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
418                 goto out_err;
419 #else
420         if (type)
421                 goto out_err;
422 #endif
423
424         rc = s390_enable_sie();
425         if (rc)
426                 goto out_err;
427
428         rc = -ENOMEM;
429
430         kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
431         if (!kvm->arch.sca)
432                 goto out_err;
433         spin_lock(&kvm_lock);
434         sca_offset = (sca_offset + 16) & 0x7f0;
435         kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
436         spin_unlock(&kvm_lock);
437
438         sprintf(debug_name, "kvm-%u", current->pid);
439
440         kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
441         if (!kvm->arch.dbf)
442                 goto out_nodbf;
443
444         spin_lock_init(&kvm->arch.float_int.lock);
445         INIT_LIST_HEAD(&kvm->arch.float_int.list);
446         init_waitqueue_head(&kvm->arch.ipte_wq);
447
448         debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
449         VM_EVENT(kvm, 3, "%s", "vm created");
450
451         if (type & KVM_VM_S390_UCONTROL) {
452                 kvm->arch.gmap = NULL;
453         } else {
454                 kvm->arch.gmap = gmap_alloc(current->mm);
455                 if (!kvm->arch.gmap)
456                         goto out_nogmap;
457                 kvm->arch.gmap->private = kvm;
458                 kvm->arch.gmap->pfault_enabled = 0;
459         }
460
461         kvm->arch.css_support = 0;
462         kvm->arch.use_irqchip = 0;
463
464         spin_lock_init(&kvm->arch.start_stop_lock);
465
466         return 0;
467 out_nogmap:
468         debug_unregister(kvm->arch.dbf);
469 out_nodbf:
470         free_page((unsigned long)(kvm->arch.sca));
471 out_err:
472         return rc;
473 }
474
475 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
476 {
477         VCPU_EVENT(vcpu, 3, "%s", "free cpu");
478         trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
479         kvm_s390_clear_local_irqs(vcpu);
480         kvm_clear_async_pf_completion_queue(vcpu);
481         if (!kvm_is_ucontrol(vcpu->kvm)) {
482                 clear_bit(63 - vcpu->vcpu_id,
483                           (unsigned long *) &vcpu->kvm->arch.sca->mcn);
484                 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
485                     (__u64) vcpu->arch.sie_block)
486                         vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
487         }
488         smp_mb();
489
490         if (kvm_is_ucontrol(vcpu->kvm))
491                 gmap_free(vcpu->arch.gmap);
492
493         if (kvm_s390_cmma_enabled(vcpu->kvm))
494                 kvm_s390_vcpu_unsetup_cmma(vcpu);
495         free_page((unsigned long)(vcpu->arch.sie_block));
496
497         kvm_vcpu_uninit(vcpu);
498         kmem_cache_free(kvm_vcpu_cache, vcpu);
499 }
500
501 static void kvm_free_vcpus(struct kvm *kvm)
502 {
503         unsigned int i;
504         struct kvm_vcpu *vcpu;
505
506         kvm_for_each_vcpu(i, vcpu, kvm)
507                 kvm_arch_vcpu_destroy(vcpu);
508
509         mutex_lock(&kvm->lock);
510         for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
511                 kvm->vcpus[i] = NULL;
512
513         atomic_set(&kvm->online_vcpus, 0);
514         mutex_unlock(&kvm->lock);
515 }
516
517 void kvm_arch_sync_events(struct kvm *kvm)
518 {
519 }
520
521 void kvm_arch_destroy_vm(struct kvm *kvm)
522 {
523         kvm_free_vcpus(kvm);
524         free_page((unsigned long)(kvm->arch.sca));
525         debug_unregister(kvm->arch.dbf);
526         if (!kvm_is_ucontrol(kvm))
527                 gmap_free(kvm->arch.gmap);
528         kvm_s390_destroy_adapters(kvm);
529         kvm_s390_clear_float_irqs(kvm);
530 }
531
532 /* Section: vcpu related */
533 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
534 {
535         vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
536         kvm_clear_async_pf_completion_queue(vcpu);
537         if (kvm_is_ucontrol(vcpu->kvm)) {
538                 vcpu->arch.gmap = gmap_alloc(current->mm);
539                 if (!vcpu->arch.gmap)
540                         return -ENOMEM;
541                 vcpu->arch.gmap->private = vcpu->kvm;
542                 return 0;
543         }
544
545         vcpu->arch.gmap = vcpu->kvm->arch.gmap;
546         vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
547                                     KVM_SYNC_GPRS |
548                                     KVM_SYNC_ACRS |
549                                     KVM_SYNC_CRS;
550         return 0;
551 }
552
553 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
554 {
555         /* Nothing todo */
556 }
557
558 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
559 {
560         save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
561         save_fp_regs(vcpu->arch.host_fpregs.fprs);
562         save_access_regs(vcpu->arch.host_acrs);
563         restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
564         restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
565         restore_access_regs(vcpu->run->s.regs.acrs);
566         gmap_enable(vcpu->arch.gmap);
567         atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
568 }
569
570 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
571 {
572         atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
573         gmap_disable(vcpu->arch.gmap);
574         save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
575         save_fp_regs(vcpu->arch.guest_fpregs.fprs);
576         save_access_regs(vcpu->run->s.regs.acrs);
577         restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
578         restore_fp_regs(vcpu->arch.host_fpregs.fprs);
579         restore_access_regs(vcpu->arch.host_acrs);
580 }
581
582 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
583 {
584         /* this equals initial cpu reset in pop, but we don't switch to ESA */
585         vcpu->arch.sie_block->gpsw.mask = 0UL;
586         vcpu->arch.sie_block->gpsw.addr = 0UL;
587         kvm_s390_set_prefix(vcpu, 0);
588         vcpu->arch.sie_block->cputm     = 0UL;
589         vcpu->arch.sie_block->ckc       = 0UL;
590         vcpu->arch.sie_block->todpr     = 0;
591         memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
592         vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
593         vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
594         vcpu->arch.guest_fpregs.fpc = 0;
595         asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
596         vcpu->arch.sie_block->gbea = 1;
597         vcpu->arch.sie_block->pp = 0;
598         vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
599         kvm_clear_async_pf_completion_queue(vcpu);
600         if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
601                 kvm_s390_vcpu_stop(vcpu);
602         kvm_s390_clear_local_irqs(vcpu);
603 }
604
605 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
606 {
607         return 0;
608 }
609
610 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
611 {
612         free_page(vcpu->arch.sie_block->cbrlo);
613         vcpu->arch.sie_block->cbrlo = 0;
614 }
615
616 int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
617 {
618         vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
619         if (!vcpu->arch.sie_block->cbrlo)
620                 return -ENOMEM;
621
622         vcpu->arch.sie_block->ecb2 |= 0x80;
623         vcpu->arch.sie_block->ecb2 &= ~0x08;
624         return 0;
625 }
626
627 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
628 {
629         int rc = 0;
630
631         atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
632                                                     CPUSTAT_SM |
633                                                     CPUSTAT_STOPPED |
634                                                     CPUSTAT_GED);
635         vcpu->arch.sie_block->ecb   = 6;
636         if (test_vfacility(50) && test_vfacility(73))
637                 vcpu->arch.sie_block->ecb |= 0x10;
638
639         vcpu->arch.sie_block->ecb2  = 8;
640         vcpu->arch.sie_block->eca   = 0xD1002000U;
641         if (sclp_has_siif())
642                 vcpu->arch.sie_block->eca |= 1;
643         vcpu->arch.sie_block->fac   = (int) (long) vfacilities;
644         vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE |
645                                       ICTL_TPROT;
646
647         if (kvm_s390_cmma_enabled(vcpu->kvm)) {
648                 rc = kvm_s390_vcpu_setup_cmma(vcpu);
649                 if (rc)
650                         return rc;
651         }
652         hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
653         vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
654         get_cpu_id(&vcpu->arch.cpu_id);
655         vcpu->arch.cpu_id.version = 0xff;
656         return rc;
657 }
658
659 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
660                                       unsigned int id)
661 {
662         struct kvm_vcpu *vcpu;
663         struct sie_page *sie_page;
664         int rc = -EINVAL;
665
666         if (id >= KVM_MAX_VCPUS)
667                 goto out;
668
669         rc = -ENOMEM;
670
671         vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
672         if (!vcpu)
673                 goto out;
674
675         sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
676         if (!sie_page)
677                 goto out_free_cpu;
678
679         vcpu->arch.sie_block = &sie_page->sie_block;
680         vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
681
682         vcpu->arch.sie_block->icpua = id;
683         if (!kvm_is_ucontrol(kvm)) {
684                 if (!kvm->arch.sca) {
685                         WARN_ON_ONCE(1);
686                         goto out_free_cpu;
687                 }
688                 if (!kvm->arch.sca->cpu[id].sda)
689                         kvm->arch.sca->cpu[id].sda =
690                                 (__u64) vcpu->arch.sie_block;
691                 vcpu->arch.sie_block->scaoh =
692                         (__u32)(((__u64)kvm->arch.sca) >> 32);
693                 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
694                 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
695         }
696
697         spin_lock_init(&vcpu->arch.local_int.lock);
698         INIT_LIST_HEAD(&vcpu->arch.local_int.list);
699         vcpu->arch.local_int.float_int = &kvm->arch.float_int;
700         vcpu->arch.local_int.wq = &vcpu->wq;
701         vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
702
703         rc = kvm_vcpu_init(vcpu, kvm, id);
704         if (rc)
705                 goto out_free_sie_block;
706         VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
707                  vcpu->arch.sie_block);
708         trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
709
710         return vcpu;
711 out_free_sie_block:
712         free_page((unsigned long)(vcpu->arch.sie_block));
713 out_free_cpu:
714         kmem_cache_free(kvm_vcpu_cache, vcpu);
715 out:
716         return ERR_PTR(rc);
717 }
718
719 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
720 {
721         return kvm_cpu_has_interrupt(vcpu);
722 }
723
724 void s390_vcpu_block(struct kvm_vcpu *vcpu)
725 {
726         atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
727 }
728
729 void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
730 {
731         atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
732 }
733
734 /*
735  * Kick a guest cpu out of SIE and wait until SIE is not running.
736  * If the CPU is not running (e.g. waiting as idle) the function will
737  * return immediately. */
738 void exit_sie(struct kvm_vcpu *vcpu)
739 {
740         atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
741         while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
742                 cpu_relax();
743 }
744
745 /* Kick a guest cpu out of SIE and prevent SIE-reentry */
746 void exit_sie_sync(struct kvm_vcpu *vcpu)
747 {
748         s390_vcpu_block(vcpu);
749         exit_sie(vcpu);
750 }
751
752 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
753 {
754         int i;
755         struct kvm *kvm = gmap->private;
756         struct kvm_vcpu *vcpu;
757
758         kvm_for_each_vcpu(i, vcpu, kvm) {
759                 /* match against both prefix pages */
760                 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
761                         VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
762                         kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
763                         exit_sie_sync(vcpu);
764                 }
765         }
766 }
767
768 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
769 {
770         /* kvm common code refers to this, but never calls it */
771         BUG();
772         return 0;
773 }
774
775 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
776                                            struct kvm_one_reg *reg)
777 {
778         int r = -EINVAL;
779
780         switch (reg->id) {
781         case KVM_REG_S390_TODPR:
782                 r = put_user(vcpu->arch.sie_block->todpr,
783                              (u32 __user *)reg->addr);
784                 break;
785         case KVM_REG_S390_EPOCHDIFF:
786                 r = put_user(vcpu->arch.sie_block->epoch,
787                              (u64 __user *)reg->addr);
788                 break;
789         case KVM_REG_S390_CPU_TIMER:
790                 r = put_user(vcpu->arch.sie_block->cputm,
791                              (u64 __user *)reg->addr);
792                 break;
793         case KVM_REG_S390_CLOCK_COMP:
794                 r = put_user(vcpu->arch.sie_block->ckc,
795                              (u64 __user *)reg->addr);
796                 break;
797         case KVM_REG_S390_PFTOKEN:
798                 r = put_user(vcpu->arch.pfault_token,
799                              (u64 __user *)reg->addr);
800                 break;
801         case KVM_REG_S390_PFCOMPARE:
802                 r = put_user(vcpu->arch.pfault_compare,
803                              (u64 __user *)reg->addr);
804                 break;
805         case KVM_REG_S390_PFSELECT:
806                 r = put_user(vcpu->arch.pfault_select,
807                              (u64 __user *)reg->addr);
808                 break;
809         case KVM_REG_S390_PP:
810                 r = put_user(vcpu->arch.sie_block->pp,
811                              (u64 __user *)reg->addr);
812                 break;
813         case KVM_REG_S390_GBEA:
814                 r = put_user(vcpu->arch.sie_block->gbea,
815                              (u64 __user *)reg->addr);
816                 break;
817         default:
818                 break;
819         }
820
821         return r;
822 }
823
824 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
825                                            struct kvm_one_reg *reg)
826 {
827         int r = -EINVAL;
828
829         switch (reg->id) {
830         case KVM_REG_S390_TODPR:
831                 r = get_user(vcpu->arch.sie_block->todpr,
832                              (u32 __user *)reg->addr);
833                 break;
834         case KVM_REG_S390_EPOCHDIFF:
835                 r = get_user(vcpu->arch.sie_block->epoch,
836                              (u64 __user *)reg->addr);
837                 break;
838         case KVM_REG_S390_CPU_TIMER:
839                 r = get_user(vcpu->arch.sie_block->cputm,
840                              (u64 __user *)reg->addr);
841                 break;
842         case KVM_REG_S390_CLOCK_COMP:
843                 r = get_user(vcpu->arch.sie_block->ckc,
844                              (u64 __user *)reg->addr);
845                 break;
846         case KVM_REG_S390_PFTOKEN:
847                 r = get_user(vcpu->arch.pfault_token,
848                              (u64 __user *)reg->addr);
849                 break;
850         case KVM_REG_S390_PFCOMPARE:
851                 r = get_user(vcpu->arch.pfault_compare,
852                              (u64 __user *)reg->addr);
853                 break;
854         case KVM_REG_S390_PFSELECT:
855                 r = get_user(vcpu->arch.pfault_select,
856                              (u64 __user *)reg->addr);
857                 break;
858         case KVM_REG_S390_PP:
859                 r = get_user(vcpu->arch.sie_block->pp,
860                              (u64 __user *)reg->addr);
861                 break;
862         case KVM_REG_S390_GBEA:
863                 r = get_user(vcpu->arch.sie_block->gbea,
864                              (u64 __user *)reg->addr);
865                 break;
866         default:
867                 break;
868         }
869
870         return r;
871 }
872
873 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
874 {
875         kvm_s390_vcpu_initial_reset(vcpu);
876         return 0;
877 }
878
879 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
880 {
881         memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
882         return 0;
883 }
884
885 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
886 {
887         memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
888         return 0;
889 }
890
891 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
892                                   struct kvm_sregs *sregs)
893 {
894         memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
895         memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
896         restore_access_regs(vcpu->run->s.regs.acrs);
897         return 0;
898 }
899
900 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
901                                   struct kvm_sregs *sregs)
902 {
903         memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
904         memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
905         return 0;
906 }
907
908 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
909 {
910         if (test_fp_ctl(fpu->fpc))
911                 return -EINVAL;
912         memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
913         vcpu->arch.guest_fpregs.fpc = fpu->fpc;
914         restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
915         restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
916         return 0;
917 }
918
919 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
920 {
921         memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
922         fpu->fpc = vcpu->arch.guest_fpregs.fpc;
923         return 0;
924 }
925
926 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
927 {
928         int rc = 0;
929
930         if (!is_vcpu_stopped(vcpu))
931                 rc = -EBUSY;
932         else {
933                 vcpu->run->psw_mask = psw.mask;
934                 vcpu->run->psw_addr = psw.addr;
935         }
936         return rc;
937 }
938
939 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
940                                   struct kvm_translation *tr)
941 {
942         return -EINVAL; /* not implemented yet */
943 }
944
945 #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
946                               KVM_GUESTDBG_USE_HW_BP | \
947                               KVM_GUESTDBG_ENABLE)
948
949 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
950                                         struct kvm_guest_debug *dbg)
951 {
952         int rc = 0;
953
954         vcpu->guest_debug = 0;
955         kvm_s390_clear_bp_data(vcpu);
956
957         if (dbg->control & ~VALID_GUESTDBG_FLAGS)
958                 return -EINVAL;
959
960         if (dbg->control & KVM_GUESTDBG_ENABLE) {
961                 vcpu->guest_debug = dbg->control;
962                 /* enforce guest PER */
963                 atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
964
965                 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
966                         rc = kvm_s390_import_bp_data(vcpu, dbg);
967         } else {
968                 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
969                 vcpu->arch.guestdbg.last_bp = 0;
970         }
971
972         if (rc) {
973                 vcpu->guest_debug = 0;
974                 kvm_s390_clear_bp_data(vcpu);
975                 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
976         }
977
978         return rc;
979 }
980
981 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
982                                     struct kvm_mp_state *mp_state)
983 {
984         /* CHECK_STOP and LOAD are not supported yet */
985         return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
986                                        KVM_MP_STATE_OPERATING;
987 }
988
989 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
990                                     struct kvm_mp_state *mp_state)
991 {
992         int rc = 0;
993
994         /* user space knows about this interface - let it control the state */
995         vcpu->kvm->arch.user_cpu_state_ctrl = 1;
996
997         switch (mp_state->mp_state) {
998         case KVM_MP_STATE_STOPPED:
999                 kvm_s390_vcpu_stop(vcpu);
1000                 break;
1001         case KVM_MP_STATE_OPERATING:
1002                 kvm_s390_vcpu_start(vcpu);
1003                 break;
1004         case KVM_MP_STATE_LOAD:
1005         case KVM_MP_STATE_CHECK_STOP:
1006                 /* fall through - CHECK_STOP and LOAD are not supported yet */
1007         default:
1008                 rc = -ENXIO;
1009         }
1010
1011         return rc;
1012 }
1013
1014 bool kvm_s390_cmma_enabled(struct kvm *kvm)
1015 {
1016         if (!MACHINE_IS_LPAR)
1017                 return false;
1018         /* only enable for z10 and later */
1019         if (!MACHINE_HAS_EDAT1)
1020                 return false;
1021         if (!kvm->arch.use_cmma)
1022                 return false;
1023         return true;
1024 }
1025
1026 static bool ibs_enabled(struct kvm_vcpu *vcpu)
1027 {
1028         return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1029 }
1030
1031 static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1032 {
1033 retry:
1034         s390_vcpu_unblock(vcpu);
1035         /*
1036          * We use MMU_RELOAD just to re-arm the ipte notifier for the
1037          * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1038          * This ensures that the ipte instruction for this request has
1039          * already finished. We might race against a second unmapper that
1040          * wants to set the blocking bit. Lets just retry the request loop.
1041          */
1042         if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
1043                 int rc;
1044                 rc = gmap_ipte_notify(vcpu->arch.gmap,
1045                                       kvm_s390_get_prefix(vcpu),
1046                                       PAGE_SIZE * 2);
1047                 if (rc)
1048                         return rc;
1049                 goto retry;
1050         }
1051
1052         if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1053                 if (!ibs_enabled(vcpu)) {
1054                         trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
1055                         atomic_set_mask(CPUSTAT_IBS,
1056                                         &vcpu->arch.sie_block->cpuflags);
1057                 }
1058                 goto retry;
1059         }
1060
1061         if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1062                 if (ibs_enabled(vcpu)) {
1063                         trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
1064                         atomic_clear_mask(CPUSTAT_IBS,
1065                                           &vcpu->arch.sie_block->cpuflags);
1066                 }
1067                 goto retry;
1068         }
1069
1070         /* nothing to do, just clear the request */
1071         clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
1072
1073         return 0;
1074 }
1075
1076 /**
1077  * kvm_arch_fault_in_page - fault-in guest page if necessary
1078  * @vcpu: The corresponding virtual cpu
1079  * @gpa: Guest physical address
1080  * @writable: Whether the page should be writable or not
1081  *
1082  * Make sure that a guest page has been faulted-in on the host.
1083  *
1084  * Return: Zero on success, negative error code otherwise.
1085  */
1086 long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
1087 {
1088         struct mm_struct *mm = current->mm;
1089         hva_t hva;
1090         long rc;
1091
1092         hva = gmap_fault(gpa, vcpu->arch.gmap);
1093         if (IS_ERR_VALUE(hva))
1094                 return (long)hva;
1095         down_read(&mm->mmap_sem);
1096         rc = get_user_pages(current, mm, hva, 1, writable, 0, NULL, NULL);
1097         up_read(&mm->mmap_sem);
1098
1099         return rc < 0 ? rc : 0;
1100 }
1101
1102 static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
1103                                       unsigned long token)
1104 {
1105         struct kvm_s390_interrupt inti;
1106         inti.parm64 = token;
1107
1108         if (start_token) {
1109                 inti.type = KVM_S390_INT_PFAULT_INIT;
1110                 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &inti));
1111         } else {
1112                 inti.type = KVM_S390_INT_PFAULT_DONE;
1113                 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1114         }
1115 }
1116
1117 void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1118                                      struct kvm_async_pf *work)
1119 {
1120         trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1121         __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1122 }
1123
1124 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1125                                  struct kvm_async_pf *work)
1126 {
1127         trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1128         __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1129 }
1130
1131 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1132                                struct kvm_async_pf *work)
1133 {
1134         /* s390 will always inject the page directly */
1135 }
1136
1137 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1138 {
1139         /*
1140          * s390 will always inject the page directly,
1141          * but we still want check_async_completion to cleanup
1142          */
1143         return true;
1144 }
1145
1146 static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1147 {
1148         hva_t hva;
1149         struct kvm_arch_async_pf arch;
1150         int rc;
1151
1152         if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1153                 return 0;
1154         if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1155             vcpu->arch.pfault_compare)
1156                 return 0;
1157         if (psw_extint_disabled(vcpu))
1158                 return 0;
1159         if (kvm_cpu_has_interrupt(vcpu))
1160                 return 0;
1161         if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1162                 return 0;
1163         if (!vcpu->arch.gmap->pfault_enabled)
1164                 return 0;
1165
1166         hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
1167         hva += current->thread.gmap_addr & ~PAGE_MASK;
1168         if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
1169                 return 0;
1170
1171         rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
1172         return rc;
1173 }
1174
1175 static int vcpu_pre_run(struct kvm_vcpu *vcpu)
1176 {
1177         int rc, cpuflags;
1178
1179         /*
1180          * On s390 notifications for arriving pages will be delivered directly
1181          * to the guest but the house keeping for completed pfaults is
1182          * handled outside the worker.
1183          */
1184         kvm_check_async_pf_completion(vcpu);
1185
1186         memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
1187
1188         if (need_resched())
1189                 schedule();
1190
1191         if (test_cpu_flag(CIF_MCCK_PENDING))
1192                 s390_handle_mcck();
1193
1194         if (!kvm_is_ucontrol(vcpu->kvm))
1195                 kvm_s390_deliver_pending_interrupts(vcpu);
1196
1197         rc = kvm_s390_handle_requests(vcpu);
1198         if (rc)
1199                 return rc;
1200
1201         if (guestdbg_enabled(vcpu)) {
1202                 kvm_s390_backup_guest_per_regs(vcpu);
1203                 kvm_s390_patch_guest_per_regs(vcpu);
1204         }
1205
1206         vcpu->arch.sie_block->icptcode = 0;
1207         cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1208         VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
1209         trace_kvm_s390_sie_enter(vcpu, cpuflags);
1210
1211         return 0;
1212 }
1213
1214 static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
1215 {
1216         int rc = -1;
1217
1218         VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
1219                    vcpu->arch.sie_block->icptcode);
1220         trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
1221
1222         if (guestdbg_enabled(vcpu))
1223                 kvm_s390_restore_guest_per_regs(vcpu);
1224
1225         if (exit_reason >= 0) {
1226                 rc = 0;
1227         } else if (kvm_is_ucontrol(vcpu->kvm)) {
1228                 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1229                 vcpu->run->s390_ucontrol.trans_exc_code =
1230                                                 current->thread.gmap_addr;
1231                 vcpu->run->s390_ucontrol.pgm_code = 0x10;
1232                 rc = -EREMOTE;
1233
1234         } else if (current->thread.gmap_pfault) {
1235                 trace_kvm_s390_major_guest_pfault(vcpu);
1236                 current->thread.gmap_pfault = 0;
1237                 if (kvm_arch_setup_async_pf(vcpu)) {
1238                         rc = 0;
1239                 } else {
1240                         gpa_t gpa = current->thread.gmap_addr;
1241                         rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1242                 }
1243         }
1244
1245         if (rc == -1) {
1246                 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1247                 trace_kvm_s390_sie_fault(vcpu);
1248                 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1249         }
1250
1251         memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
1252
1253         if (rc == 0) {
1254                 if (kvm_is_ucontrol(vcpu->kvm))
1255                         /* Don't exit for host interrupts. */
1256                         rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
1257                 else
1258                         rc = kvm_handle_sie_intercept(vcpu);
1259         }
1260
1261         return rc;
1262 }
1263
1264 static int __vcpu_run(struct kvm_vcpu *vcpu)
1265 {
1266         int rc, exit_reason;
1267
1268         /*
1269          * We try to hold kvm->srcu during most of vcpu_run (except when run-
1270          * ning the guest), so that memslots (and other stuff) are protected
1271          */
1272         vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1273
1274         do {
1275                 rc = vcpu_pre_run(vcpu);
1276                 if (rc)
1277                         break;
1278
1279                 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
1280                 /*
1281                  * As PF_VCPU will be used in fault handler, between
1282                  * guest_enter and guest_exit should be no uaccess.
1283                  */
1284                 preempt_disable();
1285                 kvm_guest_enter();
1286                 preempt_enable();
1287                 exit_reason = sie64a(vcpu->arch.sie_block,
1288                                      vcpu->run->s.regs.gprs);
1289                 kvm_guest_exit();
1290                 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1291
1292                 rc = vcpu_post_run(vcpu, exit_reason);
1293         } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
1294
1295         srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
1296         return rc;
1297 }
1298
1299 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1300 {
1301         int rc;
1302         sigset_t sigsaved;
1303
1304         if (guestdbg_exit_pending(vcpu)) {
1305                 kvm_s390_prepare_debug_exit(vcpu);
1306                 return 0;
1307         }
1308
1309         if (vcpu->sigset_active)
1310                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1311
1312         if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
1313                 kvm_s390_vcpu_start(vcpu);
1314         } else if (is_vcpu_stopped(vcpu)) {
1315                 pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
1316                                    vcpu->vcpu_id);
1317                 return -EINVAL;
1318         }
1319
1320         switch (kvm_run->exit_reason) {
1321         case KVM_EXIT_S390_SIEIC:
1322         case KVM_EXIT_UNKNOWN:
1323         case KVM_EXIT_INTR:
1324         case KVM_EXIT_S390_RESET:
1325         case KVM_EXIT_S390_UCONTROL:
1326         case KVM_EXIT_S390_TSCH:
1327         case KVM_EXIT_DEBUG:
1328                 break;
1329         default:
1330                 BUG();
1331         }
1332
1333         vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1334         vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
1335         if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
1336                 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
1337                 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1338         }
1339         if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
1340                 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
1341                 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
1342                 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1343         }
1344
1345         might_fault();
1346         rc = __vcpu_run(vcpu);
1347
1348         if (signal_pending(current) && !rc) {
1349                 kvm_run->exit_reason = KVM_EXIT_INTR;
1350                 rc = -EINTR;
1351         }
1352
1353         if (guestdbg_exit_pending(vcpu) && !rc)  {
1354                 kvm_s390_prepare_debug_exit(vcpu);
1355                 rc = 0;
1356         }
1357
1358         if (rc == -EOPNOTSUPP) {
1359                 /* intercept cannot be handled in-kernel, prepare kvm-run */
1360                 kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
1361                 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
1362                 kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
1363                 kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
1364                 rc = 0;
1365         }
1366
1367         if (rc == -EREMOTE) {
1368                 /* intercept was handled, but userspace support is needed
1369                  * kvm_run has been prepared by the handler */
1370                 rc = 0;
1371         }
1372
1373         kvm_run->psw_mask     = vcpu->arch.sie_block->gpsw.mask;
1374         kvm_run->psw_addr     = vcpu->arch.sie_block->gpsw.addr;
1375         kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
1376         memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
1377
1378         if (vcpu->sigset_active)
1379                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1380
1381         vcpu->stat.exit_userspace++;
1382         return rc;
1383 }
1384
1385 /*
1386  * store status at address
1387  * we use have two special cases:
1388  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1389  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1390  */
1391 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
1392 {
1393         unsigned char archmode = 1;
1394         unsigned int px;
1395         u64 clkcomp;
1396         int rc;
1397
1398         if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
1399                 if (write_guest_abs(vcpu, 163, &archmode, 1))
1400                         return -EFAULT;
1401                 gpa = SAVE_AREA_BASE;
1402         } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
1403                 if (write_guest_real(vcpu, 163, &archmode, 1))
1404                         return -EFAULT;
1405                 gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
1406         }
1407         rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
1408                              vcpu->arch.guest_fpregs.fprs, 128);
1409         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
1410                               vcpu->run->s.regs.gprs, 128);
1411         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
1412                               &vcpu->arch.sie_block->gpsw, 16);
1413         px = kvm_s390_get_prefix(vcpu);
1414         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
1415                               &px, 4);
1416         rc |= write_guest_abs(vcpu,
1417                               gpa + offsetof(struct save_area, fp_ctrl_reg),
1418                               &vcpu->arch.guest_fpregs.fpc, 4);
1419         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
1420                               &vcpu->arch.sie_block->todpr, 4);
1421         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
1422                               &vcpu->arch.sie_block->cputm, 8);
1423         clkcomp = vcpu->arch.sie_block->ckc >> 8;
1424         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
1425                               &clkcomp, 8);
1426         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
1427                               &vcpu->run->s.regs.acrs, 64);
1428         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
1429                               &vcpu->arch.sie_block->gcr, 128);
1430         return rc ? -EFAULT : 0;
1431 }
1432
1433 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1434 {
1435         /*
1436          * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1437          * copying in vcpu load/put. Lets update our copies before we save
1438          * it into the save area
1439          */
1440         save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1441         save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1442         save_access_regs(vcpu->run->s.regs.acrs);
1443
1444         return kvm_s390_store_status_unloaded(vcpu, addr);
1445 }
1446
1447 static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1448 {
1449         kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
1450         kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
1451         exit_sie_sync(vcpu);
1452 }
1453
1454 static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
1455 {
1456         unsigned int i;
1457         struct kvm_vcpu *vcpu;
1458
1459         kvm_for_each_vcpu(i, vcpu, kvm) {
1460                 __disable_ibs_on_vcpu(vcpu);
1461         }
1462 }
1463
1464 static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1465 {
1466         kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
1467         kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
1468         exit_sie_sync(vcpu);
1469 }
1470
1471 void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
1472 {
1473         int i, online_vcpus, started_vcpus = 0;
1474
1475         if (!is_vcpu_stopped(vcpu))
1476                 return;
1477
1478         trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
1479         /* Only one cpu at a time may enter/leave the STOPPED state. */
1480         spin_lock(&vcpu->kvm->arch.start_stop_lock);
1481         online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1482
1483         for (i = 0; i < online_vcpus; i++) {
1484                 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
1485                         started_vcpus++;
1486         }
1487
1488         if (started_vcpus == 0) {
1489                 /* we're the only active VCPU -> speed it up */
1490                 __enable_ibs_on_vcpu(vcpu);
1491         } else if (started_vcpus == 1) {
1492                 /*
1493                  * As we are starting a second VCPU, we have to disable
1494                  * the IBS facility on all VCPUs to remove potentially
1495                  * oustanding ENABLE requests.
1496                  */
1497                 __disable_ibs_on_all_vcpus(vcpu->kvm);
1498         }
1499
1500         atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
1501         /*
1502          * Another VCPU might have used IBS while we were offline.
1503          * Let's play safe and flush the VCPU at startup.
1504          */
1505         vcpu->arch.sie_block->ihcpu  = 0xffff;
1506         spin_unlock(&vcpu->kvm->arch.start_stop_lock);
1507         return;
1508 }
1509
1510 void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
1511 {
1512         int i, online_vcpus, started_vcpus = 0;
1513         struct kvm_vcpu *started_vcpu = NULL;
1514
1515         if (is_vcpu_stopped(vcpu))
1516                 return;
1517
1518         trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
1519         /* Only one cpu at a time may enter/leave the STOPPED state. */
1520         spin_lock(&vcpu->kvm->arch.start_stop_lock);
1521         online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1522
1523         /* Need to lock access to action_bits to avoid a SIGP race condition */
1524         spin_lock(&vcpu->arch.local_int.lock);
1525         atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
1526
1527         /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
1528         vcpu->arch.local_int.action_bits &=
1529                                  ~(ACTION_STOP_ON_STOP | ACTION_STORE_ON_STOP);
1530         spin_unlock(&vcpu->arch.local_int.lock);
1531
1532         __disable_ibs_on_vcpu(vcpu);
1533
1534         for (i = 0; i < online_vcpus; i++) {
1535                 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
1536                         started_vcpus++;
1537                         started_vcpu = vcpu->kvm->vcpus[i];
1538                 }
1539         }
1540
1541         if (started_vcpus == 1) {
1542                 /*
1543                  * As we only have one VCPU left, we want to enable the
1544                  * IBS facility for that VCPU to speed it up.
1545                  */
1546                 __enable_ibs_on_vcpu(started_vcpu);
1547         }
1548
1549         spin_unlock(&vcpu->kvm->arch.start_stop_lock);
1550         return;
1551 }
1552
1553 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1554                                      struct kvm_enable_cap *cap)
1555 {
1556         int r;
1557
1558         if (cap->flags)
1559                 return -EINVAL;
1560
1561         switch (cap->cap) {
1562         case KVM_CAP_S390_CSS_SUPPORT:
1563                 if (!vcpu->kvm->arch.css_support) {
1564                         vcpu->kvm->arch.css_support = 1;
1565                         trace_kvm_s390_enable_css(vcpu->kvm);
1566                 }
1567                 r = 0;
1568                 break;
1569         default:
1570                 r = -EINVAL;
1571                 break;
1572         }
1573         return r;
1574 }
1575
1576 long kvm_arch_vcpu_ioctl(struct file *filp,
1577                          unsigned int ioctl, unsigned long arg)
1578 {
1579         struct kvm_vcpu *vcpu = filp->private_data;
1580         void __user *argp = (void __user *)arg;
1581         int idx;
1582         long r;
1583
1584         switch (ioctl) {
1585         case KVM_S390_INTERRUPT: {
1586                 struct kvm_s390_interrupt s390int;
1587
1588                 r = -EFAULT;
1589                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
1590                         break;
1591                 r = kvm_s390_inject_vcpu(vcpu, &s390int);
1592                 break;
1593         }
1594         case KVM_S390_STORE_STATUS:
1595                 idx = srcu_read_lock(&vcpu->kvm->srcu);
1596                 r = kvm_s390_vcpu_store_status(vcpu, arg);
1597                 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1598                 break;
1599         case KVM_S390_SET_INITIAL_PSW: {
1600                 psw_t psw;
1601
1602                 r = -EFAULT;
1603                 if (copy_from_user(&psw, argp, sizeof(psw)))
1604                         break;
1605                 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1606                 break;
1607         }
1608         case KVM_S390_INITIAL_RESET:
1609                 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1610                 break;
1611         case KVM_SET_ONE_REG:
1612         case KVM_GET_ONE_REG: {
1613                 struct kvm_one_reg reg;
1614                 r = -EFAULT;
1615                 if (copy_from_user(&reg, argp, sizeof(reg)))
1616                         break;
1617                 if (ioctl == KVM_SET_ONE_REG)
1618                         r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
1619                 else
1620                         r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
1621                 break;
1622         }
1623 #ifdef CONFIG_KVM_S390_UCONTROL
1624         case KVM_S390_UCAS_MAP: {
1625                 struct kvm_s390_ucas_mapping ucasmap;
1626
1627                 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1628                         r = -EFAULT;
1629                         break;
1630                 }
1631
1632                 if (!kvm_is_ucontrol(vcpu->kvm)) {
1633                         r = -EINVAL;
1634                         break;
1635                 }
1636
1637                 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
1638                                      ucasmap.vcpu_addr, ucasmap.length);
1639                 break;
1640         }
1641         case KVM_S390_UCAS_UNMAP: {
1642                 struct kvm_s390_ucas_mapping ucasmap;
1643
1644                 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1645                         r = -EFAULT;
1646                         break;
1647                 }
1648
1649                 if (!kvm_is_ucontrol(vcpu->kvm)) {
1650                         r = -EINVAL;
1651                         break;
1652                 }
1653
1654                 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
1655                         ucasmap.length);
1656                 break;
1657         }
1658 #endif
1659         case KVM_S390_VCPU_FAULT: {
1660                 r = gmap_fault(arg, vcpu->arch.gmap);
1661                 if (!IS_ERR_VALUE(r))
1662                         r = 0;
1663                 break;
1664         }
1665         case KVM_ENABLE_CAP:
1666         {
1667                 struct kvm_enable_cap cap;
1668                 r = -EFAULT;
1669                 if (copy_from_user(&cap, argp, sizeof(cap)))
1670                         break;
1671                 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1672                 break;
1673         }
1674         default:
1675                 r = -ENOTTY;
1676         }
1677         return r;
1678 }
1679
1680 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1681 {
1682 #ifdef CONFIG_KVM_S390_UCONTROL
1683         if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
1684                  && (kvm_is_ucontrol(vcpu->kvm))) {
1685                 vmf->page = virt_to_page(vcpu->arch.sie_block);
1686                 get_page(vmf->page);
1687                 return 0;
1688         }
1689 #endif
1690         return VM_FAULT_SIGBUS;
1691 }
1692
1693 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
1694                            struct kvm_memory_slot *dont)
1695 {
1696 }
1697
1698 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1699                             unsigned long npages)
1700 {
1701         return 0;
1702 }
1703
1704 void kvm_arch_memslots_updated(struct kvm *kvm)
1705 {
1706 }
1707
1708 /* Section: memory related */
1709 int kvm_arch_prepare_memory_region(struct kvm *kvm,
1710                                    struct kvm_memory_slot *memslot,
1711                                    struct kvm_userspace_memory_region *mem,
1712                                    enum kvm_mr_change change)
1713 {
1714         /* A few sanity checks. We can have memory slots which have to be
1715            located/ended at a segment boundary (1MB). The memory in userland is
1716            ok to be fragmented into various different vmas. It is okay to mmap()
1717            and munmap() stuff in this slot after doing this call at any time */
1718
1719         if (mem->userspace_addr & 0xffffful)
1720                 return -EINVAL;
1721
1722         if (mem->memory_size & 0xffffful)
1723                 return -EINVAL;
1724
1725         return 0;
1726 }
1727
1728 void kvm_arch_commit_memory_region(struct kvm *kvm,
1729                                 struct kvm_userspace_memory_region *mem,
1730                                 const struct kvm_memory_slot *old,
1731                                 enum kvm_mr_change change)
1732 {
1733         int rc;
1734
1735         /* If the basics of the memslot do not change, we do not want
1736          * to update the gmap. Every update causes several unnecessary
1737          * segment translation exceptions. This is usually handled just
1738          * fine by the normal fault handler + gmap, but it will also
1739          * cause faults on the prefix page of running guest CPUs.
1740          */
1741         if (old->userspace_addr == mem->userspace_addr &&
1742             old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
1743             old->npages * PAGE_SIZE == mem->memory_size)
1744                 return;
1745
1746         rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1747                 mem->guest_phys_addr, mem->memory_size);
1748         if (rc)
1749                 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
1750         return;
1751 }
1752
1753 void kvm_arch_flush_shadow_all(struct kvm *kvm)
1754 {
1755 }
1756
1757 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1758                                    struct kvm_memory_slot *slot)
1759 {
1760 }
1761
1762 static int __init kvm_s390_init(void)
1763 {
1764         int ret;
1765         ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1766         if (ret)
1767                 return ret;
1768
1769         /*
1770          * guests can ask for up to 255+1 double words, we need a full page
1771          * to hold the maximum amount of facilities. On the other hand, we
1772          * only set facilities that are known to work in KVM.
1773          */
1774         vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
1775         if (!vfacilities) {
1776                 kvm_exit();
1777                 return -ENOMEM;
1778         }
1779         memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
1780         vfacilities[0] &= 0xff82fff3f4fc2000UL;
1781         vfacilities[1] &= 0x005c000000000000UL;
1782         return 0;
1783 }
1784
1785 static void __exit kvm_s390_exit(void)
1786 {
1787         free_page((unsigned long) vfacilities);
1788         kvm_exit();
1789 }
1790
1791 module_init(kvm_s390_init);
1792 module_exit(kvm_s390_exit);
1793
1794 /*
1795  * Enable autoloading of the kvm module.
1796  * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1797  * since x86 takes a different approach.
1798  */
1799 #include <linux/miscdevice.h>
1800 MODULE_ALIAS_MISCDEV(KVM_MINOR);
1801 MODULE_ALIAS("devname:kvm");