2 * s390host.c -- hosting zSeries kernel virtual machines
4 * Copyright IBM Corp. 2008,2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
16 #include <linux/compiler.h>
17 #include <linux/err.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
30 #include <asm/system.h>
34 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
36 struct kvm_stats_debugfs_item debugfs_entries[] = {
37 { "userspace_handled", VCPU_STAT(exit_userspace) },
38 { "exit_null", VCPU_STAT(exit_null) },
39 { "exit_validity", VCPU_STAT(exit_validity) },
40 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41 { "exit_external_request", VCPU_STAT(exit_external_request) },
42 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
43 { "exit_instruction", VCPU_STAT(exit_instruction) },
44 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
46 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
47 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
49 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
50 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
51 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
52 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
53 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
54 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
55 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
56 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
57 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
58 { "instruction_spx", VCPU_STAT(instruction_spx) },
59 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
60 { "instruction_stap", VCPU_STAT(instruction_stap) },
61 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
62 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
63 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
64 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
65 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
66 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
67 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
68 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
69 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
70 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
71 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
72 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
73 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
74 { "diagnose_10", VCPU_STAT(diagnose_10) },
75 { "diagnose_44", VCPU_STAT(diagnose_44) },
79 static unsigned long long *facilities;
81 /* Section: not file related */
82 int kvm_arch_hardware_enable(void *garbage)
84 /* every s390 is virtualization enabled ;-) */
88 void kvm_arch_hardware_disable(void *garbage)
92 int kvm_arch_hardware_setup(void)
97 void kvm_arch_hardware_unsetup(void)
101 void kvm_arch_check_processor_compat(void *rtn)
105 int kvm_arch_init(void *opaque)
110 void kvm_arch_exit(void)
114 /* Section: device related */
115 long kvm_arch_dev_ioctl(struct file *filp,
116 unsigned int ioctl, unsigned long arg)
118 if (ioctl == KVM_S390_ENABLE_SIE)
119 return s390_enable_sie();
123 int kvm_dev_ioctl_check_extension(long ext)
128 case KVM_CAP_S390_PSW:
129 case KVM_CAP_S390_GMAP:
138 /* Section: vm related */
140 * Get (and clear) the dirty memory log for a memory slot.
142 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
143 struct kvm_dirty_log *log)
148 long kvm_arch_vm_ioctl(struct file *filp,
149 unsigned int ioctl, unsigned long arg)
151 struct kvm *kvm = filp->private_data;
152 void __user *argp = (void __user *)arg;
156 case KVM_S390_INTERRUPT: {
157 struct kvm_s390_interrupt s390int;
160 if (copy_from_user(&s390int, argp, sizeof(s390int)))
162 r = kvm_s390_inject_vm(kvm, &s390int);
172 int kvm_arch_init_vm(struct kvm *kvm)
177 rc = s390_enable_sie();
183 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
187 sprintf(debug_name, "kvm-%u", current->pid);
189 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
193 spin_lock_init(&kvm->arch.float_int.lock);
194 INIT_LIST_HEAD(&kvm->arch.float_int.list);
196 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
197 VM_EVENT(kvm, 3, "%s", "vm created");
199 kvm->arch.gmap = gmap_alloc(current->mm);
205 debug_unregister(kvm->arch.dbf);
207 free_page((unsigned long)(kvm->arch.sca));
212 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
214 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
215 clear_bit(63 - vcpu->vcpu_id, (unsigned long *) &vcpu->kvm->arch.sca->mcn);
216 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
217 (__u64) vcpu->arch.sie_block)
218 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
220 free_page((unsigned long)(vcpu->arch.sie_block));
221 kvm_vcpu_uninit(vcpu);
225 static void kvm_free_vcpus(struct kvm *kvm)
228 struct kvm_vcpu *vcpu;
230 kvm_for_each_vcpu(i, vcpu, kvm)
231 kvm_arch_vcpu_destroy(vcpu);
233 mutex_lock(&kvm->lock);
234 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
235 kvm->vcpus[i] = NULL;
237 atomic_set(&kvm->online_vcpus, 0);
238 mutex_unlock(&kvm->lock);
241 void kvm_arch_sync_events(struct kvm *kvm)
245 void kvm_arch_destroy_vm(struct kvm *kvm)
248 free_page((unsigned long)(kvm->arch.sca));
249 debug_unregister(kvm->arch.dbf);
250 gmap_free(kvm->arch.gmap);
253 /* Section: vcpu related */
254 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
256 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
260 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
265 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
267 save_fp_regs(&vcpu->arch.host_fpregs);
268 save_access_regs(vcpu->arch.host_acrs);
269 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
270 restore_fp_regs(&vcpu->arch.guest_fpregs);
271 restore_access_regs(vcpu->arch.guest_acrs);
272 gmap_enable(vcpu->arch.gmap);
275 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
277 gmap_disable(vcpu->arch.gmap);
278 save_fp_regs(&vcpu->arch.guest_fpregs);
279 save_access_regs(vcpu->arch.guest_acrs);
280 restore_fp_regs(&vcpu->arch.host_fpregs);
281 restore_access_regs(vcpu->arch.host_acrs);
284 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
286 /* this equals initial cpu reset in pop, but we don't switch to ESA */
287 vcpu->arch.sie_block->gpsw.mask = 0UL;
288 vcpu->arch.sie_block->gpsw.addr = 0UL;
289 vcpu->arch.sie_block->prefix = 0UL;
290 vcpu->arch.sie_block->ihcpu = 0xffff;
291 vcpu->arch.sie_block->cputm = 0UL;
292 vcpu->arch.sie_block->ckc = 0UL;
293 vcpu->arch.sie_block->todpr = 0;
294 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
295 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
296 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
297 vcpu->arch.guest_fpregs.fpc = 0;
298 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
299 vcpu->arch.sie_block->gbea = 1;
302 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
304 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | CPUSTAT_SM);
305 vcpu->arch.sie_block->ecb = 6;
306 vcpu->arch.sie_block->eca = 0xC1002001U;
307 vcpu->arch.sie_block->fac = (int) (long) facilities;
308 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
309 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
310 (unsigned long) vcpu);
311 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
312 get_cpu_id(&vcpu->arch.cpu_id);
313 vcpu->arch.cpu_id.version = 0xff;
317 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
320 struct kvm_vcpu *vcpu;
323 if (id >= KVM_MAX_VCPUS)
328 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
332 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
333 get_zeroed_page(GFP_KERNEL);
335 if (!vcpu->arch.sie_block)
338 vcpu->arch.sie_block->icpua = id;
339 BUG_ON(!kvm->arch.sca);
340 if (!kvm->arch.sca->cpu[id].sda)
341 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
342 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
343 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
344 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
346 spin_lock_init(&vcpu->arch.local_int.lock);
347 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
348 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
349 spin_lock(&kvm->arch.float_int.lock);
350 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
351 init_waitqueue_head(&vcpu->arch.local_int.wq);
352 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
353 spin_unlock(&kvm->arch.float_int.lock);
355 rc = kvm_vcpu_init(vcpu, kvm, id);
357 goto out_free_sie_block;
358 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
359 vcpu->arch.sie_block);
363 free_page((unsigned long)(vcpu->arch.sie_block));
370 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
372 /* kvm common code refers to this, but never calls it */
377 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
379 kvm_s390_vcpu_initial_reset(vcpu);
383 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
385 memcpy(&vcpu->arch.guest_gprs, ®s->gprs, sizeof(regs->gprs));
389 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
391 memcpy(®s->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
395 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
396 struct kvm_sregs *sregs)
398 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
399 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
400 restore_access_regs(vcpu->arch.guest_acrs);
404 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
405 struct kvm_sregs *sregs)
407 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
408 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
412 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
414 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
415 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
416 restore_fp_regs(&vcpu->arch.guest_fpregs);
420 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
422 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
423 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
427 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
431 if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
434 vcpu->run->psw_mask = psw.mask;
435 vcpu->run->psw_addr = psw.addr;
440 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
441 struct kvm_translation *tr)
443 return -EINVAL; /* not implemented yet */
446 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
447 struct kvm_guest_debug *dbg)
449 return -EINVAL; /* not implemented yet */
452 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
453 struct kvm_mp_state *mp_state)
455 return -EINVAL; /* not implemented yet */
458 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
459 struct kvm_mp_state *mp_state)
461 return -EINVAL; /* not implemented yet */
464 static void __vcpu_run(struct kvm_vcpu *vcpu)
466 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
471 if (test_thread_flag(TIF_MCCK_PENDING))
474 kvm_s390_deliver_pending_interrupts(vcpu);
476 vcpu->arch.sie_block->icptcode = 0;
480 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
481 atomic_read(&vcpu->arch.sie_block->cpuflags));
482 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
483 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
484 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
486 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
487 vcpu->arch.sie_block->icptcode);
492 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
495 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
501 if (vcpu->sigset_active)
502 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
504 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
506 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
508 switch (kvm_run->exit_reason) {
509 case KVM_EXIT_S390_SIEIC:
510 case KVM_EXIT_UNKNOWN:
512 case KVM_EXIT_S390_RESET:
518 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
519 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
525 rc = kvm_handle_sie_intercept(vcpu);
526 } while (!signal_pending(current) && !rc);
528 if (rc == SIE_INTERCEPT_RERUNVCPU)
531 if (signal_pending(current) && !rc) {
532 kvm_run->exit_reason = KVM_EXIT_INTR;
536 if (rc == -EOPNOTSUPP) {
537 /* intercept cannot be handled in-kernel, prepare kvm-run */
538 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
539 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
540 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
541 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
545 if (rc == -EREMOTE) {
546 /* intercept was handled, but userspace support is needed
547 * kvm_run has been prepared by the handler */
551 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
552 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
554 if (vcpu->sigset_active)
555 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
557 vcpu->stat.exit_userspace++;
561 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
562 unsigned long n, int prefix)
565 return copy_to_guest(vcpu, guestdest, from, n);
567 return copy_to_guest_absolute(vcpu, guestdest, from, n);
571 * store status at address
572 * we use have two special cases:
573 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
574 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
576 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
578 unsigned char archmode = 1;
581 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
582 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
584 addr = SAVE_AREA_BASE;
586 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
587 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
589 addr = SAVE_AREA_BASE;
594 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
595 vcpu->arch.guest_fpregs.fprs, 128, prefix))
598 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
599 vcpu->arch.guest_gprs, 128, prefix))
602 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
603 &vcpu->arch.sie_block->gpsw, 16, prefix))
606 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
607 &vcpu->arch.sie_block->prefix, 4, prefix))
610 if (__guestcopy(vcpu,
611 addr + offsetof(struct save_area, fp_ctrl_reg),
612 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
615 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
616 &vcpu->arch.sie_block->todpr, 4, prefix))
619 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
620 &vcpu->arch.sie_block->cputm, 8, prefix))
623 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
624 &vcpu->arch.sie_block->ckc, 8, prefix))
627 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
628 &vcpu->arch.guest_acrs, 64, prefix))
631 if (__guestcopy(vcpu,
632 addr + offsetof(struct save_area, ctrl_regs),
633 &vcpu->arch.sie_block->gcr, 128, prefix))
638 long kvm_arch_vcpu_ioctl(struct file *filp,
639 unsigned int ioctl, unsigned long arg)
641 struct kvm_vcpu *vcpu = filp->private_data;
642 void __user *argp = (void __user *)arg;
646 case KVM_S390_INTERRUPT: {
647 struct kvm_s390_interrupt s390int;
650 if (copy_from_user(&s390int, argp, sizeof(s390int)))
652 r = kvm_s390_inject_vcpu(vcpu, &s390int);
655 case KVM_S390_STORE_STATUS:
656 r = kvm_s390_vcpu_store_status(vcpu, arg);
658 case KVM_S390_SET_INITIAL_PSW: {
662 if (copy_from_user(&psw, argp, sizeof(psw)))
664 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
667 case KVM_S390_INITIAL_RESET:
668 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
676 /* Section: memory related */
677 int kvm_arch_prepare_memory_region(struct kvm *kvm,
678 struct kvm_memory_slot *memslot,
679 struct kvm_memory_slot old,
680 struct kvm_userspace_memory_region *mem,
683 /* A few sanity checks. We can have exactly one memory slot which has
684 to start at guest virtual zero and which has to be located at a
685 page boundary in userland and which has to end at a page boundary.
686 The memory in userland is ok to be fragmented into various different
687 vmas. It is okay to mmap() and munmap() stuff in this slot after
688 doing this call at any time */
693 if (mem->guest_phys_addr)
696 if (mem->userspace_addr & 0xffffful)
699 if (mem->memory_size & 0xffffful)
708 void kvm_arch_commit_memory_region(struct kvm *kvm,
709 struct kvm_userspace_memory_region *mem,
710 struct kvm_memory_slot old,
716 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
717 mem->guest_phys_addr, mem->memory_size);
719 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
723 void kvm_arch_flush_shadow(struct kvm *kvm)
727 static int __init kvm_s390_init(void)
730 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
735 * guests can ask for up to 255+1 double words, we need a full page
736 * to hold the maximum amount of facilities. On the other hand, we
737 * only set facilities that are known to work in KVM.
739 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
744 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
745 facilities[0] &= 0xff00fff3f47c0000ULL;
746 facilities[1] &= 0x201c000000000000ULL;
750 static void __exit kvm_s390_exit(void)
752 free_page((unsigned long) facilities);
756 module_init(kvm_s390_init);
757 module_exit(kvm_s390_exit);