VFS: Log the fact that we've given ELOOP rather than creating a loop
[pandora-kernel.git] / arch / powerpc / kvm / powerpc.c
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright IBM Corp. 2007
16  *
17  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18  *          Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
19  */
20
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/kvm_host.h>
24 #include <linux/vmalloc.h>
25 #include <linux/hrtimer.h>
26 #include <linux/fs.h>
27 #include <linux/slab.h>
28 #include <asm/cputable.h>
29 #include <asm/uaccess.h>
30 #include <asm/kvm_ppc.h>
31 #include <asm/tlbflush.h>
32 #include <asm/cputhreads.h>
33 #include "timing.h"
34 #include "../mm/mmu_decl.h"
35
36 #define CREATE_TRACE_POINTS
37 #include "trace.h"
38
39 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
40 {
41         return !(v->arch.shared->msr & MSR_WE) ||
42                !!(v->arch.pending_exceptions);
43 }
44
45 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
46 {
47         int nr = kvmppc_get_gpr(vcpu, 11);
48         int r;
49         unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
50         unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
51         unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
52         unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
53         unsigned long r2 = 0;
54
55         if (!(vcpu->arch.shared->msr & MSR_SF)) {
56                 /* 32 bit mode */
57                 param1 &= 0xffffffff;
58                 param2 &= 0xffffffff;
59                 param3 &= 0xffffffff;
60                 param4 &= 0xffffffff;
61         }
62
63         switch (nr) {
64         case HC_VENDOR_KVM | KVM_HC_PPC_MAP_MAGIC_PAGE:
65         {
66                 vcpu->arch.magic_page_pa = param1;
67                 vcpu->arch.magic_page_ea = param2;
68
69                 r2 = KVM_MAGIC_FEAT_SR;
70
71                 r = HC_EV_SUCCESS;
72                 break;
73         }
74         case HC_VENDOR_KVM | KVM_HC_FEATURES:
75                 r = HC_EV_SUCCESS;
76 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500)
77                 /* XXX Missing magic page on 44x */
78                 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
79 #endif
80
81                 /* Second return value is in r4 */
82                 break;
83         default:
84                 r = HC_EV_UNIMPLEMENTED;
85                 break;
86         }
87
88         kvmppc_set_gpr(vcpu, 4, r2);
89
90         return r;
91 }
92
93 int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
94 {
95         int r = false;
96
97         /* We have to know what CPU to virtualize */
98         if (!vcpu->arch.pvr)
99                 goto out;
100
101         /* PAPR only works with book3s_64 */
102         if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
103                 goto out;
104
105 #ifdef CONFIG_KVM_BOOK3S_64_HV
106         /* HV KVM can only do PAPR mode for now */
107         if (!vcpu->arch.papr_enabled)
108                 goto out;
109 #endif
110
111         r = true;
112
113 out:
114         vcpu->arch.sane = r;
115         return r ? 0 : -EINVAL;
116 }
117
118 int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
119 {
120         enum emulation_result er;
121         int r;
122
123         er = kvmppc_emulate_instruction(run, vcpu);
124         switch (er) {
125         case EMULATE_DONE:
126                 /* Future optimization: only reload non-volatiles if they were
127                  * actually modified. */
128                 r = RESUME_GUEST_NV;
129                 break;
130         case EMULATE_DO_MMIO:
131                 run->exit_reason = KVM_EXIT_MMIO;
132                 /* We must reload nonvolatiles because "update" load/store
133                  * instructions modify register state. */
134                 /* Future optimization: only reload non-volatiles if they were
135                  * actually modified. */
136                 r = RESUME_HOST_NV;
137                 break;
138         case EMULATE_FAIL:
139                 /* XXX Deliver Program interrupt to guest. */
140                 printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__,
141                        kvmppc_get_last_inst(vcpu));
142                 r = RESUME_HOST;
143                 break;
144         default:
145                 BUG();
146         }
147
148         return r;
149 }
150
151 int kvm_arch_hardware_enable(void *garbage)
152 {
153         return 0;
154 }
155
156 void kvm_arch_hardware_disable(void *garbage)
157 {
158 }
159
160 int kvm_arch_hardware_setup(void)
161 {
162         return 0;
163 }
164
165 void kvm_arch_hardware_unsetup(void)
166 {
167 }
168
169 void kvm_arch_check_processor_compat(void *rtn)
170 {
171         *(int *)rtn = kvmppc_core_check_processor_compat();
172 }
173
174 int kvm_arch_init_vm(struct kvm *kvm)
175 {
176         return kvmppc_core_init_vm(kvm);
177 }
178
179 void kvm_arch_destroy_vm(struct kvm *kvm)
180 {
181         unsigned int i;
182         struct kvm_vcpu *vcpu;
183
184         kvm_for_each_vcpu(i, vcpu, kvm)
185                 kvm_arch_vcpu_free(vcpu);
186
187         mutex_lock(&kvm->lock);
188         for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
189                 kvm->vcpus[i] = NULL;
190
191         atomic_set(&kvm->online_vcpus, 0);
192
193         kvmppc_core_destroy_vm(kvm);
194
195         mutex_unlock(&kvm->lock);
196 }
197
198 void kvm_arch_sync_events(struct kvm *kvm)
199 {
200 }
201
202 int kvm_dev_ioctl_check_extension(long ext)
203 {
204         int r;
205
206         switch (ext) {
207 #ifdef CONFIG_BOOKE
208         case KVM_CAP_PPC_BOOKE_SREGS:
209 #else
210         case KVM_CAP_PPC_SEGSTATE:
211         case KVM_CAP_PPC_HIOR:
212         case KVM_CAP_PPC_PAPR:
213 #endif
214         case KVM_CAP_PPC_UNSET_IRQ:
215         case KVM_CAP_PPC_IRQ_LEVEL:
216         case KVM_CAP_ENABLE_CAP:
217                 r = 1;
218                 break;
219 #ifndef CONFIG_KVM_BOOK3S_64_HV
220         case KVM_CAP_PPC_PAIRED_SINGLES:
221         case KVM_CAP_PPC_OSI:
222         case KVM_CAP_PPC_GET_PVINFO:
223                 r = 1;
224                 break;
225         case KVM_CAP_COALESCED_MMIO:
226                 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
227                 break;
228 #endif
229 #ifdef CONFIG_KVM_BOOK3S_64_HV
230         case KVM_CAP_SPAPR_TCE:
231                 r = 1;
232                 break;
233         case KVM_CAP_PPC_SMT:
234                 r = threads_per_core;
235                 break;
236         case KVM_CAP_PPC_RMA:
237                 r = 1;
238                 /* PPC970 requires an RMA */
239                 if (cpu_has_feature(CPU_FTR_ARCH_201))
240                         r = 2;
241                 break;
242 #endif
243         default:
244                 r = 0;
245                 break;
246         }
247         return r;
248
249 }
250
251 long kvm_arch_dev_ioctl(struct file *filp,
252                         unsigned int ioctl, unsigned long arg)
253 {
254         return -EINVAL;
255 }
256
257 int kvm_arch_prepare_memory_region(struct kvm *kvm,
258                                    struct kvm_memory_slot *memslot,
259                                    struct kvm_memory_slot old,
260                                    struct kvm_userspace_memory_region *mem,
261                                    int user_alloc)
262 {
263         return kvmppc_core_prepare_memory_region(kvm, mem);
264 }
265
266 void kvm_arch_commit_memory_region(struct kvm *kvm,
267                struct kvm_userspace_memory_region *mem,
268                struct kvm_memory_slot old,
269                int user_alloc)
270 {
271         kvmppc_core_commit_memory_region(kvm, mem);
272 }
273
274
275 void kvm_arch_flush_shadow(struct kvm *kvm)
276 {
277 }
278
279 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
280 {
281         struct kvm_vcpu *vcpu;
282         vcpu = kvmppc_core_vcpu_create(kvm, id);
283         vcpu->arch.wqp = &vcpu->wq;
284         if (!IS_ERR(vcpu))
285                 kvmppc_create_vcpu_debugfs(vcpu, id);
286         return vcpu;
287 }
288
289 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
290 {
291         /* Make sure we're not using the vcpu anymore */
292         hrtimer_cancel(&vcpu->arch.dec_timer);
293         tasklet_kill(&vcpu->arch.tasklet);
294
295         kvmppc_remove_vcpu_debugfs(vcpu);
296         kvmppc_core_vcpu_free(vcpu);
297 }
298
299 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
300 {
301         kvm_arch_vcpu_free(vcpu);
302 }
303
304 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
305 {
306         return kvmppc_core_pending_dec(vcpu);
307 }
308
309 static void kvmppc_decrementer_func(unsigned long data)
310 {
311         struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
312
313         kvmppc_core_queue_dec(vcpu);
314
315         if (waitqueue_active(vcpu->arch.wqp)) {
316                 wake_up_interruptible(vcpu->arch.wqp);
317                 vcpu->stat.halt_wakeup++;
318         }
319 }
320
321 /*
322  * low level hrtimer wake routine. Because this runs in hardirq context
323  * we schedule a tasklet to do the real work.
324  */
325 enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
326 {
327         struct kvm_vcpu *vcpu;
328
329         vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
330         tasklet_schedule(&vcpu->arch.tasklet);
331
332         return HRTIMER_NORESTART;
333 }
334
335 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
336 {
337         hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
338         tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu);
339         vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
340         vcpu->arch.dec_expires = ~(u64)0;
341
342 #ifdef CONFIG_KVM_EXIT_TIMING
343         mutex_init(&vcpu->arch.exit_timing_lock);
344 #endif
345
346         return 0;
347 }
348
349 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
350 {
351         kvmppc_mmu_destroy(vcpu);
352 }
353
354 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
355 {
356 #ifdef CONFIG_BOOKE
357         /*
358          * vrsave (formerly usprg0) isn't used by Linux, but may
359          * be used by the guest.
360          *
361          * On non-booke this is associated with Altivec and
362          * is handled by code in book3s.c.
363          */
364         mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
365 #endif
366         kvmppc_core_vcpu_load(vcpu, cpu);
367         vcpu->cpu = smp_processor_id();
368 }
369
370 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
371 {
372         kvmppc_core_vcpu_put(vcpu);
373 #ifdef CONFIG_BOOKE
374         vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
375 #endif
376         vcpu->cpu = -1;
377 }
378
379 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
380                                         struct kvm_guest_debug *dbg)
381 {
382         return -EINVAL;
383 }
384
385 static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
386                                      struct kvm_run *run)
387 {
388         kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data);
389 }
390
391 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
392                                       struct kvm_run *run)
393 {
394         u64 uninitialized_var(gpr);
395
396         if (run->mmio.len > sizeof(gpr)) {
397                 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
398                 return;
399         }
400
401         if (vcpu->arch.mmio_is_bigendian) {
402                 switch (run->mmio.len) {
403                 case 8: gpr = *(u64 *)run->mmio.data; break;
404                 case 4: gpr = *(u32 *)run->mmio.data; break;
405                 case 2: gpr = *(u16 *)run->mmio.data; break;
406                 case 1: gpr = *(u8 *)run->mmio.data; break;
407                 }
408         } else {
409                 /* Convert BE data from userland back to LE. */
410                 switch (run->mmio.len) {
411                 case 4: gpr = ld_le32((u32 *)run->mmio.data); break;
412                 case 2: gpr = ld_le16((u16 *)run->mmio.data); break;
413                 case 1: gpr = *(u8 *)run->mmio.data; break;
414                 }
415         }
416
417         if (vcpu->arch.mmio_sign_extend) {
418                 switch (run->mmio.len) {
419 #ifdef CONFIG_PPC64
420                 case 4:
421                         gpr = (s64)(s32)gpr;
422                         break;
423 #endif
424                 case 2:
425                         gpr = (s64)(s16)gpr;
426                         break;
427                 case 1:
428                         gpr = (s64)(s8)gpr;
429                         break;
430                 }
431         }
432
433         kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
434
435         switch (vcpu->arch.io_gpr & KVM_REG_EXT_MASK) {
436         case KVM_REG_GPR:
437                 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
438                 break;
439         case KVM_REG_FPR:
440                 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
441                 break;
442 #ifdef CONFIG_PPC_BOOK3S
443         case KVM_REG_QPR:
444                 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
445                 break;
446         case KVM_REG_FQPR:
447                 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
448                 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
449                 break;
450 #endif
451         default:
452                 BUG();
453         }
454 }
455
456 int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
457                        unsigned int rt, unsigned int bytes, int is_bigendian)
458 {
459         if (bytes > sizeof(run->mmio.data)) {
460                 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
461                        run->mmio.len);
462         }
463
464         run->mmio.phys_addr = vcpu->arch.paddr_accessed;
465         run->mmio.len = bytes;
466         run->mmio.is_write = 0;
467
468         vcpu->arch.io_gpr = rt;
469         vcpu->arch.mmio_is_bigendian = is_bigendian;
470         vcpu->mmio_needed = 1;
471         vcpu->mmio_is_write = 0;
472         vcpu->arch.mmio_sign_extend = 0;
473
474         return EMULATE_DO_MMIO;
475 }
476
477 /* Same as above, but sign extends */
478 int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
479                         unsigned int rt, unsigned int bytes, int is_bigendian)
480 {
481         int r;
482
483         r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian);
484         vcpu->arch.mmio_sign_extend = 1;
485
486         return r;
487 }
488
489 int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
490                         u64 val, unsigned int bytes, int is_bigendian)
491 {
492         void *data = run->mmio.data;
493
494         if (bytes > sizeof(run->mmio.data)) {
495                 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
496                        run->mmio.len);
497         }
498
499         run->mmio.phys_addr = vcpu->arch.paddr_accessed;
500         run->mmio.len = bytes;
501         run->mmio.is_write = 1;
502         vcpu->mmio_needed = 1;
503         vcpu->mmio_is_write = 1;
504
505         /* Store the value at the lowest bytes in 'data'. */
506         if (is_bigendian) {
507                 switch (bytes) {
508                 case 8: *(u64 *)data = val; break;
509                 case 4: *(u32 *)data = val; break;
510                 case 2: *(u16 *)data = val; break;
511                 case 1: *(u8  *)data = val; break;
512                 }
513         } else {
514                 /* Store LE value into 'data'. */
515                 switch (bytes) {
516                 case 4: st_le32(data, val); break;
517                 case 2: st_le16(data, val); break;
518                 case 1: *(u8 *)data = val; break;
519                 }
520         }
521
522         return EMULATE_DO_MMIO;
523 }
524
525 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
526 {
527         int r;
528         sigset_t sigsaved;
529
530         if (vcpu->sigset_active)
531                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
532
533         if (vcpu->mmio_needed) {
534                 if (!vcpu->mmio_is_write)
535                         kvmppc_complete_mmio_load(vcpu, run);
536                 vcpu->mmio_needed = 0;
537         } else if (vcpu->arch.dcr_needed) {
538                 if (!vcpu->arch.dcr_is_write)
539                         kvmppc_complete_dcr_load(vcpu, run);
540                 vcpu->arch.dcr_needed = 0;
541         } else if (vcpu->arch.osi_needed) {
542                 u64 *gprs = run->osi.gprs;
543                 int i;
544
545                 for (i = 0; i < 32; i++)
546                         kvmppc_set_gpr(vcpu, i, gprs[i]);
547                 vcpu->arch.osi_needed = 0;
548         } else if (vcpu->arch.hcall_needed) {
549                 int i;
550
551                 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
552                 for (i = 0; i < 9; ++i)
553                         kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
554                 vcpu->arch.hcall_needed = 0;
555         }
556
557         kvmppc_core_deliver_interrupts(vcpu);
558
559         r = kvmppc_vcpu_run(run, vcpu);
560
561         if (vcpu->sigset_active)
562                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
563
564         return r;
565 }
566
567 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
568 {
569         if (irq->irq == KVM_INTERRUPT_UNSET) {
570                 kvmppc_core_dequeue_external(vcpu, irq);
571                 return 0;
572         }
573
574         kvmppc_core_queue_external(vcpu, irq);
575
576         if (waitqueue_active(vcpu->arch.wqp)) {
577                 wake_up_interruptible(vcpu->arch.wqp);
578                 vcpu->stat.halt_wakeup++;
579         } else if (vcpu->cpu != -1) {
580                 smp_send_reschedule(vcpu->cpu);
581         }
582
583         return 0;
584 }
585
586 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
587                                      struct kvm_enable_cap *cap)
588 {
589         int r;
590
591         if (cap->flags)
592                 return -EINVAL;
593
594         switch (cap->cap) {
595         case KVM_CAP_PPC_OSI:
596                 r = 0;
597                 vcpu->arch.osi_enabled = true;
598                 break;
599         case KVM_CAP_PPC_PAPR:
600                 r = 0;
601                 vcpu->arch.papr_enabled = true;
602                 break;
603         default:
604                 r = -EINVAL;
605                 break;
606         }
607
608         if (!r)
609                 r = kvmppc_sanity_check(vcpu);
610
611         return r;
612 }
613
614 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
615                                     struct kvm_mp_state *mp_state)
616 {
617         return -EINVAL;
618 }
619
620 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
621                                     struct kvm_mp_state *mp_state)
622 {
623         return -EINVAL;
624 }
625
626 long kvm_arch_vcpu_ioctl(struct file *filp,
627                          unsigned int ioctl, unsigned long arg)
628 {
629         struct kvm_vcpu *vcpu = filp->private_data;
630         void __user *argp = (void __user *)arg;
631         long r;
632
633         switch (ioctl) {
634         case KVM_INTERRUPT: {
635                 struct kvm_interrupt irq;
636                 r = -EFAULT;
637                 if (copy_from_user(&irq, argp, sizeof(irq)))
638                         goto out;
639                 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
640                 goto out;
641         }
642
643         case KVM_ENABLE_CAP:
644         {
645                 struct kvm_enable_cap cap;
646                 r = -EFAULT;
647                 if (copy_from_user(&cap, argp, sizeof(cap)))
648                         goto out;
649                 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
650                 break;
651         }
652         default:
653                 r = -EINVAL;
654         }
655
656 out:
657         return r;
658 }
659
660 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
661 {
662         u32 inst_lis = 0x3c000000;
663         u32 inst_ori = 0x60000000;
664         u32 inst_nop = 0x60000000;
665         u32 inst_sc = 0x44000002;
666         u32 inst_imm_mask = 0xffff;
667
668         /*
669          * The hypercall to get into KVM from within guest context is as
670          * follows:
671          *
672          *    lis r0, r0, KVM_SC_MAGIC_R0@h
673          *    ori r0, KVM_SC_MAGIC_R0@l
674          *    sc
675          *    nop
676          */
677         pvinfo->hcall[0] = inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask);
678         pvinfo->hcall[1] = inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask);
679         pvinfo->hcall[2] = inst_sc;
680         pvinfo->hcall[3] = inst_nop;
681
682         return 0;
683 }
684
685 long kvm_arch_vm_ioctl(struct file *filp,
686                        unsigned int ioctl, unsigned long arg)
687 {
688         void __user *argp = (void __user *)arg;
689         long r;
690
691         switch (ioctl) {
692         case KVM_PPC_GET_PVINFO: {
693                 struct kvm_ppc_pvinfo pvinfo;
694                 memset(&pvinfo, 0, sizeof(pvinfo));
695                 r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
696                 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
697                         r = -EFAULT;
698                         goto out;
699                 }
700
701                 break;
702         }
703 #ifdef CONFIG_KVM_BOOK3S_64_HV
704         case KVM_CREATE_SPAPR_TCE: {
705                 struct kvm_create_spapr_tce create_tce;
706                 struct kvm *kvm = filp->private_data;
707
708                 r = -EFAULT;
709                 if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
710                         goto out;
711                 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce);
712                 goto out;
713         }
714
715         case KVM_ALLOCATE_RMA: {
716                 struct kvm *kvm = filp->private_data;
717                 struct kvm_allocate_rma rma;
718
719                 r = kvm_vm_ioctl_allocate_rma(kvm, &rma);
720                 if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma)))
721                         r = -EFAULT;
722                 break;
723         }
724 #endif /* CONFIG_KVM_BOOK3S_64_HV */
725
726         default:
727                 r = -ENOTTY;
728         }
729
730 out:
731         return r;
732 }
733
734 int kvm_arch_init(void *opaque)
735 {
736         return 0;
737 }
738
739 void kvm_arch_exit(void)
740 {
741 }