2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright IBM Corp. 2007
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/kvm_host.h>
24 #include <linux/module.h>
25 #include <linux/vmalloc.h>
26 #include <linux/hrtimer.h>
28 #include <linux/slab.h>
29 #include <asm/cputable.h>
30 #include <asm/uaccess.h>
31 #include <asm/kvm_ppc.h>
32 #include <asm/tlbflush.h>
34 #include "../mm/mmu_decl.h"
36 #define CREATE_TRACE_POINTS
39 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
44 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
46 return !(v->arch.msr & MSR_WE) || !!(v->arch.pending_exceptions);
50 int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
52 enum emulation_result er;
55 er = kvmppc_emulate_instruction(run, vcpu);
58 /* Future optimization: only reload non-volatiles if they were
59 * actually modified. */
63 run->exit_reason = KVM_EXIT_MMIO;
64 /* We must reload nonvolatiles because "update" load/store
65 * instructions modify register state. */
66 /* Future optimization: only reload non-volatiles if they were
67 * actually modified. */
71 /* XXX Deliver Program interrupt to guest. */
72 printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__,
73 vcpu->arch.last_inst);
83 int kvm_arch_hardware_enable(void *garbage)
88 void kvm_arch_hardware_disable(void *garbage)
92 int kvm_arch_hardware_setup(void)
97 void kvm_arch_hardware_unsetup(void)
101 void kvm_arch_check_processor_compat(void *rtn)
103 *(int *)rtn = kvmppc_core_check_processor_compat();
106 struct kvm *kvm_arch_create_vm(void)
110 kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
112 return ERR_PTR(-ENOMEM);
117 static void kvmppc_free_vcpus(struct kvm *kvm)
120 struct kvm_vcpu *vcpu;
122 kvm_for_each_vcpu(i, vcpu, kvm)
123 kvm_arch_vcpu_free(vcpu);
125 mutex_lock(&kvm->lock);
126 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
127 kvm->vcpus[i] = NULL;
129 atomic_set(&kvm->online_vcpus, 0);
130 mutex_unlock(&kvm->lock);
133 void kvm_arch_sync_events(struct kvm *kvm)
137 void kvm_arch_destroy_vm(struct kvm *kvm)
139 kvmppc_free_vcpus(kvm);
140 kvm_free_physmem(kvm);
141 cleanup_srcu_struct(&kvm->srcu);
145 int kvm_dev_ioctl_check_extension(long ext)
150 case KVM_CAP_PPC_SEGSTATE:
151 case KVM_CAP_PPC_PAIRED_SINGLES:
154 case KVM_CAP_COALESCED_MMIO:
155 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
165 long kvm_arch_dev_ioctl(struct file *filp,
166 unsigned int ioctl, unsigned long arg)
171 int kvm_arch_prepare_memory_region(struct kvm *kvm,
172 struct kvm_memory_slot *memslot,
173 struct kvm_memory_slot old,
174 struct kvm_userspace_memory_region *mem,
180 void kvm_arch_commit_memory_region(struct kvm *kvm,
181 struct kvm_userspace_memory_region *mem,
182 struct kvm_memory_slot old,
189 void kvm_arch_flush_shadow(struct kvm *kvm)
193 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
195 struct kvm_vcpu *vcpu;
196 vcpu = kvmppc_core_vcpu_create(kvm, id);
198 kvmppc_create_vcpu_debugfs(vcpu, id);
202 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
204 /* Make sure we're not using the vcpu anymore */
205 hrtimer_cancel(&vcpu->arch.dec_timer);
206 tasklet_kill(&vcpu->arch.tasklet);
208 kvmppc_remove_vcpu_debugfs(vcpu);
209 kvmppc_core_vcpu_free(vcpu);
212 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
214 kvm_arch_vcpu_free(vcpu);
217 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
219 return kvmppc_core_pending_dec(vcpu);
222 static void kvmppc_decrementer_func(unsigned long data)
224 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
226 kvmppc_core_queue_dec(vcpu);
228 if (waitqueue_active(&vcpu->wq)) {
229 wake_up_interruptible(&vcpu->wq);
230 vcpu->stat.halt_wakeup++;
235 * low level hrtimer wake routine. Because this runs in hardirq context
236 * we schedule a tasklet to do the real work.
238 enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
240 struct kvm_vcpu *vcpu;
242 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
243 tasklet_schedule(&vcpu->arch.tasklet);
245 return HRTIMER_NORESTART;
248 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
250 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
251 tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu);
252 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
257 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
259 kvmppc_mmu_destroy(vcpu);
262 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
264 kvmppc_core_vcpu_load(vcpu, cpu);
267 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
269 kvmppc_core_vcpu_put(vcpu);
272 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
273 struct kvm_guest_debug *dbg)
278 static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
281 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data);
284 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
289 if (run->mmio.len > sizeof(gpr)) {
290 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
294 if (vcpu->arch.mmio_is_bigendian) {
295 switch (run->mmio.len) {
296 case 8: gpr = *(u64 *)run->mmio.data; break;
297 case 4: gpr = *(u32 *)run->mmio.data; break;
298 case 2: gpr = *(u16 *)run->mmio.data; break;
299 case 1: gpr = *(u8 *)run->mmio.data; break;
302 /* Convert BE data from userland back to LE. */
303 switch (run->mmio.len) {
304 case 4: gpr = ld_le32((u32 *)run->mmio.data); break;
305 case 2: gpr = ld_le16((u16 *)run->mmio.data); break;
306 case 1: gpr = *(u8 *)run->mmio.data; break;
310 if (vcpu->arch.mmio_sign_extend) {
311 switch (run->mmio.len) {
326 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
328 switch (vcpu->arch.io_gpr & KVM_REG_EXT_MASK) {
330 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
333 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
336 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
339 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
340 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
347 int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
348 unsigned int rt, unsigned int bytes, int is_bigendian)
350 if (bytes > sizeof(run->mmio.data)) {
351 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
355 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
356 run->mmio.len = bytes;
357 run->mmio.is_write = 0;
359 vcpu->arch.io_gpr = rt;
360 vcpu->arch.mmio_is_bigendian = is_bigendian;
361 vcpu->mmio_needed = 1;
362 vcpu->mmio_is_write = 0;
363 vcpu->arch.mmio_sign_extend = 0;
365 return EMULATE_DO_MMIO;
368 /* Same as above, but sign extends */
369 int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
370 unsigned int rt, unsigned int bytes, int is_bigendian)
374 r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian);
375 vcpu->arch.mmio_sign_extend = 1;
380 int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
381 u64 val, unsigned int bytes, int is_bigendian)
383 void *data = run->mmio.data;
385 if (bytes > sizeof(run->mmio.data)) {
386 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
390 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
391 run->mmio.len = bytes;
392 run->mmio.is_write = 1;
393 vcpu->mmio_needed = 1;
394 vcpu->mmio_is_write = 1;
396 /* Store the value at the lowest bytes in 'data'. */
399 case 8: *(u64 *)data = val; break;
400 case 4: *(u32 *)data = val; break;
401 case 2: *(u16 *)data = val; break;
402 case 1: *(u8 *)data = val; break;
405 /* Store LE value into 'data'. */
407 case 4: st_le32(data, val); break;
408 case 2: st_le16(data, val); break;
409 case 1: *(u8 *)data = val; break;
413 return EMULATE_DO_MMIO;
416 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
423 if (vcpu->sigset_active)
424 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
426 if (vcpu->mmio_needed) {
427 if (!vcpu->mmio_is_write)
428 kvmppc_complete_mmio_load(vcpu, run);
429 vcpu->mmio_needed = 0;
430 } else if (vcpu->arch.dcr_needed) {
431 if (!vcpu->arch.dcr_is_write)
432 kvmppc_complete_dcr_load(vcpu, run);
433 vcpu->arch.dcr_needed = 0;
436 kvmppc_core_deliver_interrupts(vcpu);
440 r = __kvmppc_vcpu_run(run, vcpu);
444 if (vcpu->sigset_active)
445 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
452 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
454 kvmppc_core_queue_external(vcpu, irq);
456 if (waitqueue_active(&vcpu->wq)) {
457 wake_up_interruptible(&vcpu->wq);
458 vcpu->stat.halt_wakeup++;
464 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
465 struct kvm_mp_state *mp_state)
470 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
471 struct kvm_mp_state *mp_state)
476 long kvm_arch_vcpu_ioctl(struct file *filp,
477 unsigned int ioctl, unsigned long arg)
479 struct kvm_vcpu *vcpu = filp->private_data;
480 void __user *argp = (void __user *)arg;
484 case KVM_INTERRUPT: {
485 struct kvm_interrupt irq;
487 if (copy_from_user(&irq, argp, sizeof(irq)))
489 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
500 long kvm_arch_vm_ioctl(struct file *filp,
501 unsigned int ioctl, unsigned long arg)
513 int kvm_arch_init(void *opaque)
518 void kvm_arch_exit(void)