KVM: PPC: Make wakeups work again for Book3S HV guests
[pandora-kernel.git] / arch / powerpc / kvm / powerpc.c
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright IBM Corp. 2007
16  *
17  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18  *          Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
19  */
20
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/kvm_host.h>
24 #include <linux/vmalloc.h>
25 #include <linux/hrtimer.h>
26 #include <linux/fs.h>
27 #include <linux/slab.h>
28 #include <asm/cputable.h>
29 #include <asm/uaccess.h>
30 #include <asm/kvm_ppc.h>
31 #include <asm/tlbflush.h>
32 #include <asm/cputhreads.h>
33 #include "timing.h"
34 #include "../mm/mmu_decl.h"
35
36 #define CREATE_TRACE_POINTS
37 #include "trace.h"
38
39 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
40 {
41         return !(v->arch.shared->msr & MSR_WE) ||
42                !!(v->arch.pending_exceptions) ||
43                v->requests;
44 }
45
46 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
47 {
48         int nr = kvmppc_get_gpr(vcpu, 11);
49         int r;
50         unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
51         unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
52         unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
53         unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
54         unsigned long r2 = 0;
55
56         if (!(vcpu->arch.shared->msr & MSR_SF)) {
57                 /* 32 bit mode */
58                 param1 &= 0xffffffff;
59                 param2 &= 0xffffffff;
60                 param3 &= 0xffffffff;
61                 param4 &= 0xffffffff;
62         }
63
64         switch (nr) {
65         case HC_VENDOR_KVM | KVM_HC_PPC_MAP_MAGIC_PAGE:
66         {
67                 vcpu->arch.magic_page_pa = param1;
68                 vcpu->arch.magic_page_ea = param2;
69
70                 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
71
72                 r = HC_EV_SUCCESS;
73                 break;
74         }
75         case HC_VENDOR_KVM | KVM_HC_FEATURES:
76                 r = HC_EV_SUCCESS;
77 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500)
78                 /* XXX Missing magic page on 44x */
79                 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
80 #endif
81
82                 /* Second return value is in r4 */
83                 break;
84         default:
85                 r = HC_EV_UNIMPLEMENTED;
86                 break;
87         }
88
89         kvmppc_set_gpr(vcpu, 4, r2);
90
91         return r;
92 }
93
94 int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
95 {
96         int r = false;
97
98         /* We have to know what CPU to virtualize */
99         if (!vcpu->arch.pvr)
100                 goto out;
101
102         /* PAPR only works with book3s_64 */
103         if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
104                 goto out;
105
106 #ifdef CONFIG_KVM_BOOK3S_64_HV
107         /* HV KVM can only do PAPR mode for now */
108         if (!vcpu->arch.papr_enabled)
109                 goto out;
110 #endif
111
112         r = true;
113
114 out:
115         vcpu->arch.sane = r;
116         return r ? 0 : -EINVAL;
117 }
118
119 int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
120 {
121         enum emulation_result er;
122         int r;
123
124         er = kvmppc_emulate_instruction(run, vcpu);
125         switch (er) {
126         case EMULATE_DONE:
127                 /* Future optimization: only reload non-volatiles if they were
128                  * actually modified. */
129                 r = RESUME_GUEST_NV;
130                 break;
131         case EMULATE_DO_MMIO:
132                 run->exit_reason = KVM_EXIT_MMIO;
133                 /* We must reload nonvolatiles because "update" load/store
134                  * instructions modify register state. */
135                 /* Future optimization: only reload non-volatiles if they were
136                  * actually modified. */
137                 r = RESUME_HOST_NV;
138                 break;
139         case EMULATE_FAIL:
140                 /* XXX Deliver Program interrupt to guest. */
141                 printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__,
142                        kvmppc_get_last_inst(vcpu));
143                 r = RESUME_HOST;
144                 break;
145         default:
146                 BUG();
147         }
148
149         return r;
150 }
151
152 int kvm_arch_hardware_enable(void *garbage)
153 {
154         return 0;
155 }
156
157 void kvm_arch_hardware_disable(void *garbage)
158 {
159 }
160
161 int kvm_arch_hardware_setup(void)
162 {
163         return 0;
164 }
165
166 void kvm_arch_hardware_unsetup(void)
167 {
168 }
169
170 void kvm_arch_check_processor_compat(void *rtn)
171 {
172         *(int *)rtn = kvmppc_core_check_processor_compat();
173 }
174
175 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
176 {
177         if (type)
178                 return -EINVAL;
179
180         return kvmppc_core_init_vm(kvm);
181 }
182
183 void kvm_arch_destroy_vm(struct kvm *kvm)
184 {
185         unsigned int i;
186         struct kvm_vcpu *vcpu;
187
188         kvm_for_each_vcpu(i, vcpu, kvm)
189                 kvm_arch_vcpu_free(vcpu);
190
191         mutex_lock(&kvm->lock);
192         for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
193                 kvm->vcpus[i] = NULL;
194
195         atomic_set(&kvm->online_vcpus, 0);
196
197         kvmppc_core_destroy_vm(kvm);
198
199         mutex_unlock(&kvm->lock);
200 }
201
202 void kvm_arch_sync_events(struct kvm *kvm)
203 {
204 }
205
206 int kvm_dev_ioctl_check_extension(long ext)
207 {
208         int r;
209
210         switch (ext) {
211 #ifdef CONFIG_BOOKE
212         case KVM_CAP_PPC_BOOKE_SREGS:
213 #else
214         case KVM_CAP_PPC_SEGSTATE:
215         case KVM_CAP_PPC_PAPR:
216 #endif
217         case KVM_CAP_PPC_UNSET_IRQ:
218         case KVM_CAP_PPC_IRQ_LEVEL:
219         case KVM_CAP_ENABLE_CAP:
220                 r = 1;
221                 break;
222 #ifndef CONFIG_KVM_BOOK3S_64_HV
223         case KVM_CAP_PPC_PAIRED_SINGLES:
224         case KVM_CAP_PPC_OSI:
225         case KVM_CAP_PPC_GET_PVINFO:
226 #ifdef CONFIG_KVM_E500
227         case KVM_CAP_SW_TLB:
228 #endif
229                 r = 1;
230                 break;
231         case KVM_CAP_COALESCED_MMIO:
232                 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
233                 break;
234 #endif
235 #ifdef CONFIG_KVM_BOOK3S_64_HV
236         case KVM_CAP_SPAPR_TCE:
237                 r = 1;
238                 break;
239         case KVM_CAP_PPC_SMT:
240                 r = threads_per_core;
241                 break;
242         case KVM_CAP_PPC_RMA:
243                 r = 1;
244                 /* PPC970 requires an RMA */
245                 if (cpu_has_feature(CPU_FTR_ARCH_201))
246                         r = 2;
247                 break;
248 #endif
249         default:
250                 r = 0;
251                 break;
252         }
253         return r;
254
255 }
256
257 long kvm_arch_dev_ioctl(struct file *filp,
258                         unsigned int ioctl, unsigned long arg)
259 {
260         return -EINVAL;
261 }
262
263 int kvm_arch_prepare_memory_region(struct kvm *kvm,
264                                    struct kvm_memory_slot *memslot,
265                                    struct kvm_memory_slot old,
266                                    struct kvm_userspace_memory_region *mem,
267                                    int user_alloc)
268 {
269         return kvmppc_core_prepare_memory_region(kvm, mem);
270 }
271
272 void kvm_arch_commit_memory_region(struct kvm *kvm,
273                struct kvm_userspace_memory_region *mem,
274                struct kvm_memory_slot old,
275                int user_alloc)
276 {
277         kvmppc_core_commit_memory_region(kvm, mem);
278 }
279
280
281 void kvm_arch_flush_shadow(struct kvm *kvm)
282 {
283 }
284
285 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
286 {
287         struct kvm_vcpu *vcpu;
288         vcpu = kvmppc_core_vcpu_create(kvm, id);
289         vcpu->arch.wqp = &vcpu->wq;
290         if (!IS_ERR(vcpu))
291                 kvmppc_create_vcpu_debugfs(vcpu, id);
292         return vcpu;
293 }
294
295 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
296 {
297         /* Make sure we're not using the vcpu anymore */
298         hrtimer_cancel(&vcpu->arch.dec_timer);
299         tasklet_kill(&vcpu->arch.tasklet);
300
301         kvmppc_remove_vcpu_debugfs(vcpu);
302         kvmppc_core_vcpu_free(vcpu);
303 }
304
305 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
306 {
307         kvm_arch_vcpu_free(vcpu);
308 }
309
310 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
311 {
312         return kvmppc_core_pending_dec(vcpu);
313 }
314
315 /*
316  * low level hrtimer wake routine. Because this runs in hardirq context
317  * we schedule a tasklet to do the real work.
318  */
319 enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
320 {
321         struct kvm_vcpu *vcpu;
322
323         vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
324         tasklet_schedule(&vcpu->arch.tasklet);
325
326         return HRTIMER_NORESTART;
327 }
328
329 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
330 {
331         hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
332         tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu);
333         vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
334         vcpu->arch.dec_expires = ~(u64)0;
335
336 #ifdef CONFIG_KVM_EXIT_TIMING
337         mutex_init(&vcpu->arch.exit_timing_lock);
338 #endif
339
340         return 0;
341 }
342
343 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
344 {
345         kvmppc_mmu_destroy(vcpu);
346 }
347
348 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
349 {
350 #ifdef CONFIG_BOOKE
351         /*
352          * vrsave (formerly usprg0) isn't used by Linux, but may
353          * be used by the guest.
354          *
355          * On non-booke this is associated with Altivec and
356          * is handled by code in book3s.c.
357          */
358         mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
359 #endif
360         kvmppc_core_vcpu_load(vcpu, cpu);
361         vcpu->cpu = smp_processor_id();
362 }
363
364 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
365 {
366         kvmppc_core_vcpu_put(vcpu);
367 #ifdef CONFIG_BOOKE
368         vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
369 #endif
370         vcpu->cpu = -1;
371 }
372
373 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
374                                         struct kvm_guest_debug *dbg)
375 {
376         return -EINVAL;
377 }
378
379 static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
380                                      struct kvm_run *run)
381 {
382         kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data);
383 }
384
385 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
386                                       struct kvm_run *run)
387 {
388         u64 uninitialized_var(gpr);
389
390         if (run->mmio.len > sizeof(gpr)) {
391                 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
392                 return;
393         }
394
395         if (vcpu->arch.mmio_is_bigendian) {
396                 switch (run->mmio.len) {
397                 case 8: gpr = *(u64 *)run->mmio.data; break;
398                 case 4: gpr = *(u32 *)run->mmio.data; break;
399                 case 2: gpr = *(u16 *)run->mmio.data; break;
400                 case 1: gpr = *(u8 *)run->mmio.data; break;
401                 }
402         } else {
403                 /* Convert BE data from userland back to LE. */
404                 switch (run->mmio.len) {
405                 case 4: gpr = ld_le32((u32 *)run->mmio.data); break;
406                 case 2: gpr = ld_le16((u16 *)run->mmio.data); break;
407                 case 1: gpr = *(u8 *)run->mmio.data; break;
408                 }
409         }
410
411         if (vcpu->arch.mmio_sign_extend) {
412                 switch (run->mmio.len) {
413 #ifdef CONFIG_PPC64
414                 case 4:
415                         gpr = (s64)(s32)gpr;
416                         break;
417 #endif
418                 case 2:
419                         gpr = (s64)(s16)gpr;
420                         break;
421                 case 1:
422                         gpr = (s64)(s8)gpr;
423                         break;
424                 }
425         }
426
427         kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
428
429         switch (vcpu->arch.io_gpr & KVM_REG_EXT_MASK) {
430         case KVM_REG_GPR:
431                 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
432                 break;
433         case KVM_REG_FPR:
434                 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
435                 break;
436 #ifdef CONFIG_PPC_BOOK3S
437         case KVM_REG_QPR:
438                 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
439                 break;
440         case KVM_REG_FQPR:
441                 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
442                 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
443                 break;
444 #endif
445         default:
446                 BUG();
447         }
448 }
449
450 int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
451                        unsigned int rt, unsigned int bytes, int is_bigendian)
452 {
453         if (bytes > sizeof(run->mmio.data)) {
454                 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
455                        run->mmio.len);
456         }
457
458         run->mmio.phys_addr = vcpu->arch.paddr_accessed;
459         run->mmio.len = bytes;
460         run->mmio.is_write = 0;
461
462         vcpu->arch.io_gpr = rt;
463         vcpu->arch.mmio_is_bigendian = is_bigendian;
464         vcpu->mmio_needed = 1;
465         vcpu->mmio_is_write = 0;
466         vcpu->arch.mmio_sign_extend = 0;
467
468         return EMULATE_DO_MMIO;
469 }
470
471 /* Same as above, but sign extends */
472 int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
473                         unsigned int rt, unsigned int bytes, int is_bigendian)
474 {
475         int r;
476
477         r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian);
478         vcpu->arch.mmio_sign_extend = 1;
479
480         return r;
481 }
482
483 int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
484                         u64 val, unsigned int bytes, int is_bigendian)
485 {
486         void *data = run->mmio.data;
487
488         if (bytes > sizeof(run->mmio.data)) {
489                 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
490                        run->mmio.len);
491         }
492
493         run->mmio.phys_addr = vcpu->arch.paddr_accessed;
494         run->mmio.len = bytes;
495         run->mmio.is_write = 1;
496         vcpu->mmio_needed = 1;
497         vcpu->mmio_is_write = 1;
498
499         /* Store the value at the lowest bytes in 'data'. */
500         if (is_bigendian) {
501                 switch (bytes) {
502                 case 8: *(u64 *)data = val; break;
503                 case 4: *(u32 *)data = val; break;
504                 case 2: *(u16 *)data = val; break;
505                 case 1: *(u8  *)data = val; break;
506                 }
507         } else {
508                 /* Store LE value into 'data'. */
509                 switch (bytes) {
510                 case 4: st_le32(data, val); break;
511                 case 2: st_le16(data, val); break;
512                 case 1: *(u8 *)data = val; break;
513                 }
514         }
515
516         return EMULATE_DO_MMIO;
517 }
518
519 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
520 {
521         int r;
522         sigset_t sigsaved;
523
524         if (vcpu->sigset_active)
525                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
526
527         if (vcpu->mmio_needed) {
528                 if (!vcpu->mmio_is_write)
529                         kvmppc_complete_mmio_load(vcpu, run);
530                 vcpu->mmio_needed = 0;
531         } else if (vcpu->arch.dcr_needed) {
532                 if (!vcpu->arch.dcr_is_write)
533                         kvmppc_complete_dcr_load(vcpu, run);
534                 vcpu->arch.dcr_needed = 0;
535         } else if (vcpu->arch.osi_needed) {
536                 u64 *gprs = run->osi.gprs;
537                 int i;
538
539                 for (i = 0; i < 32; i++)
540                         kvmppc_set_gpr(vcpu, i, gprs[i]);
541                 vcpu->arch.osi_needed = 0;
542         } else if (vcpu->arch.hcall_needed) {
543                 int i;
544
545                 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
546                 for (i = 0; i < 9; ++i)
547                         kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
548                 vcpu->arch.hcall_needed = 0;
549         }
550
551         r = kvmppc_vcpu_run(run, vcpu);
552
553         if (vcpu->sigset_active)
554                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
555
556         return r;
557 }
558
559 void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
560 {
561         int me;
562         int cpu = vcpu->cpu;
563
564         me = get_cpu();
565         if (waitqueue_active(vcpu->arch.wqp)) {
566                 wake_up_interruptible(vcpu->arch.wqp);
567                 vcpu->stat.halt_wakeup++;
568         } else if (cpu != me && cpu != -1) {
569                 smp_send_reschedule(vcpu->cpu);
570         }
571         put_cpu();
572 }
573
574 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
575 {
576         if (irq->irq == KVM_INTERRUPT_UNSET) {
577                 kvmppc_core_dequeue_external(vcpu, irq);
578                 return 0;
579         }
580
581         kvmppc_core_queue_external(vcpu, irq);
582         kvm_vcpu_kick(vcpu);
583
584         return 0;
585 }
586
587 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
588                                      struct kvm_enable_cap *cap)
589 {
590         int r;
591
592         if (cap->flags)
593                 return -EINVAL;
594
595         switch (cap->cap) {
596         case KVM_CAP_PPC_OSI:
597                 r = 0;
598                 vcpu->arch.osi_enabled = true;
599                 break;
600         case KVM_CAP_PPC_PAPR:
601                 r = 0;
602                 vcpu->arch.papr_enabled = true;
603                 break;
604 #ifdef CONFIG_KVM_E500
605         case KVM_CAP_SW_TLB: {
606                 struct kvm_config_tlb cfg;
607                 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
608
609                 r = -EFAULT;
610                 if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
611                         break;
612
613                 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
614                 break;
615         }
616 #endif
617         default:
618                 r = -EINVAL;
619                 break;
620         }
621
622         if (!r)
623                 r = kvmppc_sanity_check(vcpu);
624
625         return r;
626 }
627
628 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
629                                     struct kvm_mp_state *mp_state)
630 {
631         return -EINVAL;
632 }
633
634 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
635                                     struct kvm_mp_state *mp_state)
636 {
637         return -EINVAL;
638 }
639
640 long kvm_arch_vcpu_ioctl(struct file *filp,
641                          unsigned int ioctl, unsigned long arg)
642 {
643         struct kvm_vcpu *vcpu = filp->private_data;
644         void __user *argp = (void __user *)arg;
645         long r;
646
647         switch (ioctl) {
648         case KVM_INTERRUPT: {
649                 struct kvm_interrupt irq;
650                 r = -EFAULT;
651                 if (copy_from_user(&irq, argp, sizeof(irq)))
652                         goto out;
653                 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
654                 goto out;
655         }
656
657         case KVM_ENABLE_CAP:
658         {
659                 struct kvm_enable_cap cap;
660                 r = -EFAULT;
661                 if (copy_from_user(&cap, argp, sizeof(cap)))
662                         goto out;
663                 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
664                 break;
665         }
666
667 #ifdef CONFIG_KVM_E500
668         case KVM_DIRTY_TLB: {
669                 struct kvm_dirty_tlb dirty;
670                 r = -EFAULT;
671                 if (copy_from_user(&dirty, argp, sizeof(dirty)))
672                         goto out;
673                 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
674                 break;
675         }
676 #endif
677
678         default:
679                 r = -EINVAL;
680         }
681
682 out:
683         return r;
684 }
685
686 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
687 {
688         return VM_FAULT_SIGBUS;
689 }
690
691 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
692 {
693         u32 inst_lis = 0x3c000000;
694         u32 inst_ori = 0x60000000;
695         u32 inst_nop = 0x60000000;
696         u32 inst_sc = 0x44000002;
697         u32 inst_imm_mask = 0xffff;
698
699         /*
700          * The hypercall to get into KVM from within guest context is as
701          * follows:
702          *
703          *    lis r0, r0, KVM_SC_MAGIC_R0@h
704          *    ori r0, KVM_SC_MAGIC_R0@l
705          *    sc
706          *    nop
707          */
708         pvinfo->hcall[0] = inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask);
709         pvinfo->hcall[1] = inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask);
710         pvinfo->hcall[2] = inst_sc;
711         pvinfo->hcall[3] = inst_nop;
712
713         return 0;
714 }
715
716 long kvm_arch_vm_ioctl(struct file *filp,
717                        unsigned int ioctl, unsigned long arg)
718 {
719         void __user *argp = (void __user *)arg;
720         long r;
721
722         switch (ioctl) {
723         case KVM_PPC_GET_PVINFO: {
724                 struct kvm_ppc_pvinfo pvinfo;
725                 memset(&pvinfo, 0, sizeof(pvinfo));
726                 r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
727                 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
728                         r = -EFAULT;
729                         goto out;
730                 }
731
732                 break;
733         }
734 #ifdef CONFIG_KVM_BOOK3S_64_HV
735         case KVM_CREATE_SPAPR_TCE: {
736                 struct kvm_create_spapr_tce create_tce;
737                 struct kvm *kvm = filp->private_data;
738
739                 r = -EFAULT;
740                 if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
741                         goto out;
742                 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce);
743                 goto out;
744         }
745
746         case KVM_ALLOCATE_RMA: {
747                 struct kvm *kvm = filp->private_data;
748                 struct kvm_allocate_rma rma;
749
750                 r = kvm_vm_ioctl_allocate_rma(kvm, &rma);
751                 if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma)))
752                         r = -EFAULT;
753                 break;
754         }
755 #endif /* CONFIG_KVM_BOOK3S_64_HV */
756
757         default:
758                 r = -ENOTTY;
759         }
760
761 out:
762         return r;
763 }
764
765 int kvm_arch_init(void *opaque)
766 {
767         return 0;
768 }
769
770 void kvm_arch_exit(void)
771 {
772 }