2 * s390host.c -- hosting zSeries kernel virtual machines
4 * Copyright IBM Corp. 2008,2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
16 #include <linux/compiler.h>
17 #include <linux/err.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
30 #include <asm/system.h>
34 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
36 struct kvm_stats_debugfs_item debugfs_entries[] = {
37 { "userspace_handled", VCPU_STAT(exit_userspace) },
38 { "exit_null", VCPU_STAT(exit_null) },
39 { "exit_validity", VCPU_STAT(exit_validity) },
40 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41 { "exit_external_request", VCPU_STAT(exit_external_request) },
42 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
43 { "exit_instruction", VCPU_STAT(exit_instruction) },
44 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
46 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
47 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
49 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
50 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
51 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
52 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
53 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
54 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
55 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
56 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
57 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
58 { "instruction_spx", VCPU_STAT(instruction_spx) },
59 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
60 { "instruction_stap", VCPU_STAT(instruction_stap) },
61 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
62 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
63 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
64 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
65 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
66 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
67 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
68 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
69 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
70 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
71 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
72 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
73 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
74 { "diagnose_44", VCPU_STAT(diagnose_44) },
78 static unsigned long long *facilities;
80 /* Section: not file related */
81 int kvm_arch_hardware_enable(void *garbage)
83 /* every s390 is virtualization enabled ;-) */
87 void kvm_arch_hardware_disable(void *garbage)
91 int kvm_arch_hardware_setup(void)
96 void kvm_arch_hardware_unsetup(void)
100 void kvm_arch_check_processor_compat(void *rtn)
104 int kvm_arch_init(void *opaque)
109 void kvm_arch_exit(void)
113 /* Section: device related */
114 long kvm_arch_dev_ioctl(struct file *filp,
115 unsigned int ioctl, unsigned long arg)
117 if (ioctl == KVM_S390_ENABLE_SIE)
118 return s390_enable_sie();
122 int kvm_dev_ioctl_check_extension(long ext)
127 case KVM_CAP_S390_PSW:
128 case KVM_CAP_S390_GMAP:
137 /* Section: vm related */
139 * Get (and clear) the dirty memory log for a memory slot.
141 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
142 struct kvm_dirty_log *log)
147 long kvm_arch_vm_ioctl(struct file *filp,
148 unsigned int ioctl, unsigned long arg)
150 struct kvm *kvm = filp->private_data;
151 void __user *argp = (void __user *)arg;
155 case KVM_S390_INTERRUPT: {
156 struct kvm_s390_interrupt s390int;
159 if (copy_from_user(&s390int, argp, sizeof(s390int)))
161 r = kvm_s390_inject_vm(kvm, &s390int);
171 int kvm_arch_init_vm(struct kvm *kvm)
176 rc = s390_enable_sie();
182 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
186 sprintf(debug_name, "kvm-%u", current->pid);
188 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
192 spin_lock_init(&kvm->arch.float_int.lock);
193 INIT_LIST_HEAD(&kvm->arch.float_int.list);
195 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
196 VM_EVENT(kvm, 3, "%s", "vm created");
198 kvm->arch.gmap = gmap_alloc(current->mm);
204 debug_unregister(kvm->arch.dbf);
206 free_page((unsigned long)(kvm->arch.sca));
211 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
213 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
214 clear_bit(63 - vcpu->vcpu_id, (unsigned long *) &vcpu->kvm->arch.sca->mcn);
215 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
216 (__u64) vcpu->arch.sie_block)
217 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
219 free_page((unsigned long)(vcpu->arch.sie_block));
220 kvm_vcpu_uninit(vcpu);
224 static void kvm_free_vcpus(struct kvm *kvm)
227 struct kvm_vcpu *vcpu;
229 kvm_for_each_vcpu(i, vcpu, kvm)
230 kvm_arch_vcpu_destroy(vcpu);
232 mutex_lock(&kvm->lock);
233 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
234 kvm->vcpus[i] = NULL;
236 atomic_set(&kvm->online_vcpus, 0);
237 mutex_unlock(&kvm->lock);
240 void kvm_arch_sync_events(struct kvm *kvm)
244 void kvm_arch_destroy_vm(struct kvm *kvm)
247 free_page((unsigned long)(kvm->arch.sca));
248 debug_unregister(kvm->arch.dbf);
249 gmap_free(kvm->arch.gmap);
252 /* Section: vcpu related */
253 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
255 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
259 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
264 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
266 save_fp_regs(&vcpu->arch.host_fpregs);
267 save_access_regs(vcpu->arch.host_acrs);
268 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
269 restore_fp_regs(&vcpu->arch.guest_fpregs);
270 restore_access_regs(vcpu->arch.guest_acrs);
271 gmap_enable(vcpu->arch.gmap);
274 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
276 gmap_disable(vcpu->arch.gmap);
277 save_fp_regs(&vcpu->arch.guest_fpregs);
278 save_access_regs(vcpu->arch.guest_acrs);
279 restore_fp_regs(&vcpu->arch.host_fpregs);
280 restore_access_regs(vcpu->arch.host_acrs);
283 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
285 /* this equals initial cpu reset in pop, but we don't switch to ESA */
286 vcpu->arch.sie_block->gpsw.mask = 0UL;
287 vcpu->arch.sie_block->gpsw.addr = 0UL;
288 vcpu->arch.sie_block->prefix = 0UL;
289 vcpu->arch.sie_block->ihcpu = 0xffff;
290 vcpu->arch.sie_block->cputm = 0UL;
291 vcpu->arch.sie_block->ckc = 0UL;
292 vcpu->arch.sie_block->todpr = 0;
293 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
294 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
295 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
296 vcpu->arch.guest_fpregs.fpc = 0;
297 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
298 vcpu->arch.sie_block->gbea = 1;
301 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
303 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | CPUSTAT_SM);
304 vcpu->arch.sie_block->ecb = 6;
305 vcpu->arch.sie_block->eca = 0xC1002001U;
306 vcpu->arch.sie_block->fac = (int) (long) facilities;
307 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
308 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
309 (unsigned long) vcpu);
310 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
311 get_cpu_id(&vcpu->arch.cpu_id);
312 vcpu->arch.cpu_id.version = 0xff;
316 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
319 struct kvm_vcpu *vcpu;
322 if (id >= KVM_MAX_VCPUS)
327 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
331 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
332 get_zeroed_page(GFP_KERNEL);
334 if (!vcpu->arch.sie_block)
337 vcpu->arch.sie_block->icpua = id;
338 BUG_ON(!kvm->arch.sca);
339 if (!kvm->arch.sca->cpu[id].sda)
340 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
341 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
342 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
343 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
345 spin_lock_init(&vcpu->arch.local_int.lock);
346 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
347 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
348 spin_lock(&kvm->arch.float_int.lock);
349 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
350 init_waitqueue_head(&vcpu->arch.local_int.wq);
351 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
352 spin_unlock(&kvm->arch.float_int.lock);
354 rc = kvm_vcpu_init(vcpu, kvm, id);
356 goto out_free_sie_block;
357 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
358 vcpu->arch.sie_block);
362 free_page((unsigned long)(vcpu->arch.sie_block));
369 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
371 /* kvm common code refers to this, but never calls it */
376 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
378 kvm_s390_vcpu_initial_reset(vcpu);
382 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
384 memcpy(&vcpu->arch.guest_gprs, ®s->gprs, sizeof(regs->gprs));
388 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
390 memcpy(®s->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
394 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
395 struct kvm_sregs *sregs)
397 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
398 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
399 restore_access_regs(vcpu->arch.guest_acrs);
403 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
404 struct kvm_sregs *sregs)
406 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
407 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
411 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
413 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
414 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
415 restore_fp_regs(&vcpu->arch.guest_fpregs);
419 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
421 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
422 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
426 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
430 if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
433 vcpu->run->psw_mask = psw.mask;
434 vcpu->run->psw_addr = psw.addr;
439 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
440 struct kvm_translation *tr)
442 return -EINVAL; /* not implemented yet */
445 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
446 struct kvm_guest_debug *dbg)
448 return -EINVAL; /* not implemented yet */
451 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
452 struct kvm_mp_state *mp_state)
454 return -EINVAL; /* not implemented yet */
457 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
458 struct kvm_mp_state *mp_state)
460 return -EINVAL; /* not implemented yet */
463 static void __vcpu_run(struct kvm_vcpu *vcpu)
465 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
470 if (test_thread_flag(TIF_MCCK_PENDING))
473 kvm_s390_deliver_pending_interrupts(vcpu);
475 vcpu->arch.sie_block->icptcode = 0;
479 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
480 atomic_read(&vcpu->arch.sie_block->cpuflags));
481 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
482 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
483 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
485 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
486 vcpu->arch.sie_block->icptcode);
491 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
494 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
500 if (vcpu->sigset_active)
501 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
503 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
505 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
507 switch (kvm_run->exit_reason) {
508 case KVM_EXIT_S390_SIEIC:
509 case KVM_EXIT_UNKNOWN:
511 case KVM_EXIT_S390_RESET:
517 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
518 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
524 rc = kvm_handle_sie_intercept(vcpu);
525 } while (!signal_pending(current) && !rc);
527 if (rc == SIE_INTERCEPT_RERUNVCPU)
530 if (signal_pending(current) && !rc) {
531 kvm_run->exit_reason = KVM_EXIT_INTR;
535 if (rc == -EOPNOTSUPP) {
536 /* intercept cannot be handled in-kernel, prepare kvm-run */
537 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
538 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
539 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
540 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
544 if (rc == -EREMOTE) {
545 /* intercept was handled, but userspace support is needed
546 * kvm_run has been prepared by the handler */
550 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
551 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
553 if (vcpu->sigset_active)
554 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
556 vcpu->stat.exit_userspace++;
560 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
561 unsigned long n, int prefix)
564 return copy_to_guest(vcpu, guestdest, from, n);
566 return copy_to_guest_absolute(vcpu, guestdest, from, n);
570 * store status at address
571 * we use have two special cases:
572 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
573 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
575 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
577 unsigned char archmode = 1;
580 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
581 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
583 addr = SAVE_AREA_BASE;
585 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
586 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
588 addr = SAVE_AREA_BASE;
593 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
594 vcpu->arch.guest_fpregs.fprs, 128, prefix))
597 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
598 vcpu->arch.guest_gprs, 128, prefix))
601 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
602 &vcpu->arch.sie_block->gpsw, 16, prefix))
605 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
606 &vcpu->arch.sie_block->prefix, 4, prefix))
609 if (__guestcopy(vcpu,
610 addr + offsetof(struct save_area, fp_ctrl_reg),
611 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
614 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
615 &vcpu->arch.sie_block->todpr, 4, prefix))
618 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
619 &vcpu->arch.sie_block->cputm, 8, prefix))
622 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
623 &vcpu->arch.sie_block->ckc, 8, prefix))
626 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
627 &vcpu->arch.guest_acrs, 64, prefix))
630 if (__guestcopy(vcpu,
631 addr + offsetof(struct save_area, ctrl_regs),
632 &vcpu->arch.sie_block->gcr, 128, prefix))
637 long kvm_arch_vcpu_ioctl(struct file *filp,
638 unsigned int ioctl, unsigned long arg)
640 struct kvm_vcpu *vcpu = filp->private_data;
641 void __user *argp = (void __user *)arg;
645 case KVM_S390_INTERRUPT: {
646 struct kvm_s390_interrupt s390int;
649 if (copy_from_user(&s390int, argp, sizeof(s390int)))
651 r = kvm_s390_inject_vcpu(vcpu, &s390int);
654 case KVM_S390_STORE_STATUS:
655 r = kvm_s390_vcpu_store_status(vcpu, arg);
657 case KVM_S390_SET_INITIAL_PSW: {
661 if (copy_from_user(&psw, argp, sizeof(psw)))
663 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
666 case KVM_S390_INITIAL_RESET:
667 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
675 /* Section: memory related */
676 int kvm_arch_prepare_memory_region(struct kvm *kvm,
677 struct kvm_memory_slot *memslot,
678 struct kvm_memory_slot old,
679 struct kvm_userspace_memory_region *mem,
682 /* A few sanity checks. We can have exactly one memory slot which has
683 to start at guest virtual zero and which has to be located at a
684 page boundary in userland and which has to end at a page boundary.
685 The memory in userland is ok to be fragmented into various different
686 vmas. It is okay to mmap() and munmap() stuff in this slot after
687 doing this call at any time */
692 if (mem->guest_phys_addr)
695 if (mem->userspace_addr & 0xffffful)
698 if (mem->memory_size & 0xffffful)
707 void kvm_arch_commit_memory_region(struct kvm *kvm,
708 struct kvm_userspace_memory_region *mem,
709 struct kvm_memory_slot old,
715 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
716 mem->guest_phys_addr, mem->memory_size);
718 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
722 void kvm_arch_flush_shadow(struct kvm *kvm)
726 static int __init kvm_s390_init(void)
729 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
734 * guests can ask for up to 255+1 double words, we need a full page
735 * to hold the maximum amount of facilities. On the other hand, we
736 * only set facilities that are known to work in KVM.
738 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
743 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
744 facilities[0] &= 0xff00fff3f47c0000ULL;
745 facilities[1] &= 0x201c000000000000ULL;
749 static void __exit kvm_s390_exit(void)
751 free_page((unsigned long) facilities);
755 module_init(kvm_s390_init);
756 module_exit(kvm_s390_exit);