Merge branch 'core-debugobjects-for-linus' of git://git.kernel.org/pub/scm/linux...
[pandora-kernel.git] / arch / powerpc / kvm / booke.c
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright IBM Corp. 2007
16  *
17  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18  *          Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
19  */
20
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/kvm_host.h>
24 #include <linux/gfp.h>
25 #include <linux/module.h>
26 #include <linux/vmalloc.h>
27 #include <linux/fs.h>
28
29 #include <asm/cputable.h>
30 #include <asm/uaccess.h>
31 #include <asm/kvm_ppc.h>
32 #include "timing.h"
33 #include <asm/cacheflush.h>
34
35 #include "booke.h"
36
37 unsigned long kvmppc_booke_handlers;
38
39 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
40 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
41
42 struct kvm_stats_debugfs_item debugfs_entries[] = {
43         { "mmio",       VCPU_STAT(mmio_exits) },
44         { "dcr",        VCPU_STAT(dcr_exits) },
45         { "sig",        VCPU_STAT(signal_exits) },
46         { "itlb_r",     VCPU_STAT(itlb_real_miss_exits) },
47         { "itlb_v",     VCPU_STAT(itlb_virt_miss_exits) },
48         { "dtlb_r",     VCPU_STAT(dtlb_real_miss_exits) },
49         { "dtlb_v",     VCPU_STAT(dtlb_virt_miss_exits) },
50         { "sysc",       VCPU_STAT(syscall_exits) },
51         { "isi",        VCPU_STAT(isi_exits) },
52         { "dsi",        VCPU_STAT(dsi_exits) },
53         { "inst_emu",   VCPU_STAT(emulated_inst_exits) },
54         { "dec",        VCPU_STAT(dec_exits) },
55         { "ext_intr",   VCPU_STAT(ext_intr_exits) },
56         { "halt_wakeup", VCPU_STAT(halt_wakeup) },
57         { NULL }
58 };
59
60 /* TODO: use vcpu_printf() */
61 void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
62 {
63         int i;
64
65         printk("pc:   %08lx msr:  %08lx\n", vcpu->arch.pc, vcpu->arch.msr);
66         printk("lr:   %08lx ctr:  %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
67         printk("srr0: %08lx srr1: %08lx\n", vcpu->arch.srr0, vcpu->arch.srr1);
68
69         printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
70
71         for (i = 0; i < 32; i += 4) {
72                 printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
73                        kvmppc_get_gpr(vcpu, i),
74                        kvmppc_get_gpr(vcpu, i+1),
75                        kvmppc_get_gpr(vcpu, i+2),
76                        kvmppc_get_gpr(vcpu, i+3));
77         }
78 }
79
80 static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
81                                        unsigned int priority)
82 {
83         set_bit(priority, &vcpu->arch.pending_exceptions);
84 }
85
86 static void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
87                                         ulong dear_flags, ulong esr_flags)
88 {
89         vcpu->arch.queued_dear = dear_flags;
90         vcpu->arch.queued_esr = esr_flags;
91         kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
92 }
93
94 static void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
95                                            ulong dear_flags, ulong esr_flags)
96 {
97         vcpu->arch.queued_dear = dear_flags;
98         vcpu->arch.queued_esr = esr_flags;
99         kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
100 }
101
102 static void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
103                                            ulong esr_flags)
104 {
105         vcpu->arch.queued_esr = esr_flags;
106         kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
107 }
108
109 void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
110 {
111         vcpu->arch.queued_esr = esr_flags;
112         kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
113 }
114
115 void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
116 {
117         kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
118 }
119
120 int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
121 {
122         return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
123 }
124
125 void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
126 {
127         clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
128 }
129
130 void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
131                                 struct kvm_interrupt *irq)
132 {
133         kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_EXTERNAL);
134 }
135
136 /* Deliver the interrupt of the corresponding priority, if possible. */
137 static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
138                                         unsigned int priority)
139 {
140         int allowed = 0;
141         ulong msr_mask;
142         bool update_esr = false, update_dear = false;
143
144         switch (priority) {
145         case BOOKE_IRQPRIO_DTLB_MISS:
146         case BOOKE_IRQPRIO_DATA_STORAGE:
147                 update_dear = true;
148                 /* fall through */
149         case BOOKE_IRQPRIO_INST_STORAGE:
150         case BOOKE_IRQPRIO_PROGRAM:
151                 update_esr = true;
152                 /* fall through */
153         case BOOKE_IRQPRIO_ITLB_MISS:
154         case BOOKE_IRQPRIO_SYSCALL:
155         case BOOKE_IRQPRIO_FP_UNAVAIL:
156         case BOOKE_IRQPRIO_SPE_UNAVAIL:
157         case BOOKE_IRQPRIO_SPE_FP_DATA:
158         case BOOKE_IRQPRIO_SPE_FP_ROUND:
159         case BOOKE_IRQPRIO_AP_UNAVAIL:
160         case BOOKE_IRQPRIO_ALIGNMENT:
161                 allowed = 1;
162                 msr_mask = MSR_CE|MSR_ME|MSR_DE;
163                 break;
164         case BOOKE_IRQPRIO_CRITICAL:
165         case BOOKE_IRQPRIO_WATCHDOG:
166                 allowed = vcpu->arch.msr & MSR_CE;
167                 msr_mask = MSR_ME;
168                 break;
169         case BOOKE_IRQPRIO_MACHINE_CHECK:
170                 allowed = vcpu->arch.msr & MSR_ME;
171                 msr_mask = 0;
172                 break;
173         case BOOKE_IRQPRIO_EXTERNAL:
174         case BOOKE_IRQPRIO_DECREMENTER:
175         case BOOKE_IRQPRIO_FIT:
176                 allowed = vcpu->arch.msr & MSR_EE;
177                 msr_mask = MSR_CE|MSR_ME|MSR_DE;
178                 break;
179         case BOOKE_IRQPRIO_DEBUG:
180                 allowed = vcpu->arch.msr & MSR_DE;
181                 msr_mask = MSR_ME;
182                 break;
183         }
184
185         if (allowed) {
186                 vcpu->arch.srr0 = vcpu->arch.pc;
187                 vcpu->arch.srr1 = vcpu->arch.msr;
188                 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
189                 if (update_esr == true)
190                         vcpu->arch.esr = vcpu->arch.queued_esr;
191                 if (update_dear == true)
192                         vcpu->arch.dear = vcpu->arch.queued_dear;
193                 kvmppc_set_msr(vcpu, vcpu->arch.msr & msr_mask);
194
195                 clear_bit(priority, &vcpu->arch.pending_exceptions);
196         }
197
198         return allowed;
199 }
200
201 /* Check pending exceptions and deliver one, if possible. */
202 void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
203 {
204         unsigned long *pending = &vcpu->arch.pending_exceptions;
205         unsigned int priority;
206
207         priority = __ffs(*pending);
208         while (priority <= BOOKE_IRQPRIO_MAX) {
209                 if (kvmppc_booke_irqprio_deliver(vcpu, priority))
210                         break;
211
212                 priority = find_next_bit(pending,
213                                          BITS_PER_BYTE * sizeof(*pending),
214                                          priority + 1);
215         }
216 }
217
218 /**
219  * kvmppc_handle_exit
220  *
221  * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
222  */
223 int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
224                        unsigned int exit_nr)
225 {
226         enum emulation_result er;
227         int r = RESUME_HOST;
228
229         /* update before a new last_exit_type is rewritten */
230         kvmppc_update_timing_stats(vcpu);
231
232         local_irq_enable();
233
234         run->exit_reason = KVM_EXIT_UNKNOWN;
235         run->ready_for_interrupt_injection = 1;
236
237         switch (exit_nr) {
238         case BOOKE_INTERRUPT_MACHINE_CHECK:
239                 printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
240                 kvmppc_dump_vcpu(vcpu);
241                 r = RESUME_HOST;
242                 break;
243
244         case BOOKE_INTERRUPT_EXTERNAL:
245                 kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
246                 if (need_resched())
247                         cond_resched();
248                 r = RESUME_GUEST;
249                 break;
250
251         case BOOKE_INTERRUPT_DECREMENTER:
252                 /* Since we switched IVPR back to the host's value, the host
253                  * handled this interrupt the moment we enabled interrupts.
254                  * Now we just offer it a chance to reschedule the guest. */
255                 kvmppc_account_exit(vcpu, DEC_EXITS);
256                 if (need_resched())
257                         cond_resched();
258                 r = RESUME_GUEST;
259                 break;
260
261         case BOOKE_INTERRUPT_PROGRAM:
262                 if (vcpu->arch.msr & MSR_PR) {
263                         /* Program traps generated by user-level software must be handled
264                          * by the guest kernel. */
265                         kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
266                         r = RESUME_GUEST;
267                         kvmppc_account_exit(vcpu, USR_PR_INST);
268                         break;
269                 }
270
271                 er = kvmppc_emulate_instruction(run, vcpu);
272                 switch (er) {
273                 case EMULATE_DONE:
274                         /* don't overwrite subtypes, just account kvm_stats */
275                         kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
276                         /* Future optimization: only reload non-volatiles if
277                          * they were actually modified by emulation. */
278                         r = RESUME_GUEST_NV;
279                         break;
280                 case EMULATE_DO_DCR:
281                         run->exit_reason = KVM_EXIT_DCR;
282                         r = RESUME_HOST;
283                         break;
284                 case EMULATE_FAIL:
285                         /* XXX Deliver Program interrupt to guest. */
286                         printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
287                                __func__, vcpu->arch.pc, vcpu->arch.last_inst);
288                         /* For debugging, encode the failing instruction and
289                          * report it to userspace. */
290                         run->hw.hardware_exit_reason = ~0ULL << 32;
291                         run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
292                         r = RESUME_HOST;
293                         break;
294                 default:
295                         BUG();
296                 }
297                 break;
298
299         case BOOKE_INTERRUPT_FP_UNAVAIL:
300                 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
301                 kvmppc_account_exit(vcpu, FP_UNAVAIL);
302                 r = RESUME_GUEST;
303                 break;
304
305         case BOOKE_INTERRUPT_SPE_UNAVAIL:
306                 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_UNAVAIL);
307                 r = RESUME_GUEST;
308                 break;
309
310         case BOOKE_INTERRUPT_SPE_FP_DATA:
311                 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
312                 r = RESUME_GUEST;
313                 break;
314
315         case BOOKE_INTERRUPT_SPE_FP_ROUND:
316                 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
317                 r = RESUME_GUEST;
318                 break;
319
320         case BOOKE_INTERRUPT_DATA_STORAGE:
321                 kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
322                                                vcpu->arch.fault_esr);
323                 kvmppc_account_exit(vcpu, DSI_EXITS);
324                 r = RESUME_GUEST;
325                 break;
326
327         case BOOKE_INTERRUPT_INST_STORAGE:
328                 kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr);
329                 kvmppc_account_exit(vcpu, ISI_EXITS);
330                 r = RESUME_GUEST;
331                 break;
332
333         case BOOKE_INTERRUPT_SYSCALL:
334                 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
335                 kvmppc_account_exit(vcpu, SYSCALL_EXITS);
336                 r = RESUME_GUEST;
337                 break;
338
339         case BOOKE_INTERRUPT_DTLB_MISS: {
340                 unsigned long eaddr = vcpu->arch.fault_dear;
341                 int gtlb_index;
342                 gpa_t gpaddr;
343                 gfn_t gfn;
344
345                 /* Check the guest TLB. */
346                 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
347                 if (gtlb_index < 0) {
348                         /* The guest didn't have a mapping for it. */
349                         kvmppc_core_queue_dtlb_miss(vcpu,
350                                                     vcpu->arch.fault_dear,
351                                                     vcpu->arch.fault_esr);
352                         kvmppc_mmu_dtlb_miss(vcpu);
353                         kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
354                         r = RESUME_GUEST;
355                         break;
356                 }
357
358                 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
359                 gfn = gpaddr >> PAGE_SHIFT;
360
361                 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
362                         /* The guest TLB had a mapping, but the shadow TLB
363                          * didn't, and it is RAM. This could be because:
364                          * a) the entry is mapping the host kernel, or
365                          * b) the guest used a large mapping which we're faking
366                          * Either way, we need to satisfy the fault without
367                          * invoking the guest. */
368                         kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
369                         kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
370                         r = RESUME_GUEST;
371                 } else {
372                         /* Guest has mapped and accessed a page which is not
373                          * actually RAM. */
374                         vcpu->arch.paddr_accessed = gpaddr;
375                         r = kvmppc_emulate_mmio(run, vcpu);
376                         kvmppc_account_exit(vcpu, MMIO_EXITS);
377                 }
378
379                 break;
380         }
381
382         case BOOKE_INTERRUPT_ITLB_MISS: {
383                 unsigned long eaddr = vcpu->arch.pc;
384                 gpa_t gpaddr;
385                 gfn_t gfn;
386                 int gtlb_index;
387
388                 r = RESUME_GUEST;
389
390                 /* Check the guest TLB. */
391                 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
392                 if (gtlb_index < 0) {
393                         /* The guest didn't have a mapping for it. */
394                         kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
395                         kvmppc_mmu_itlb_miss(vcpu);
396                         kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
397                         break;
398                 }
399
400                 kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
401
402                 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
403                 gfn = gpaddr >> PAGE_SHIFT;
404
405                 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
406                         /* The guest TLB had a mapping, but the shadow TLB
407                          * didn't. This could be because:
408                          * a) the entry is mapping the host kernel, or
409                          * b) the guest used a large mapping which we're faking
410                          * Either way, we need to satisfy the fault without
411                          * invoking the guest. */
412                         kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
413                 } else {
414                         /* Guest mapped and leaped at non-RAM! */
415                         kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
416                 }
417
418                 break;
419         }
420
421         case BOOKE_INTERRUPT_DEBUG: {
422                 u32 dbsr;
423
424                 vcpu->arch.pc = mfspr(SPRN_CSRR0);
425
426                 /* clear IAC events in DBSR register */
427                 dbsr = mfspr(SPRN_DBSR);
428                 dbsr &= DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4;
429                 mtspr(SPRN_DBSR, dbsr);
430
431                 run->exit_reason = KVM_EXIT_DEBUG;
432                 kvmppc_account_exit(vcpu, DEBUG_EXITS);
433                 r = RESUME_HOST;
434                 break;
435         }
436
437         default:
438                 printk(KERN_EMERG "exit_nr %d\n", exit_nr);
439                 BUG();
440         }
441
442         local_irq_disable();
443
444         kvmppc_core_deliver_interrupts(vcpu);
445
446         if (!(r & RESUME_HOST)) {
447                 /* To avoid clobbering exit_reason, only check for signals if
448                  * we aren't already exiting to userspace for some other
449                  * reason. */
450                 if (signal_pending(current)) {
451                         run->exit_reason = KVM_EXIT_INTR;
452                         r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
453                         kvmppc_account_exit(vcpu, SIGNAL_EXITS);
454                 }
455         }
456
457         return r;
458 }
459
460 /* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
461 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
462 {
463         vcpu->arch.pc = 0;
464         vcpu->arch.msr = 0;
465         kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
466
467         vcpu->arch.shadow_pid = 1;
468
469         /* Eye-catching number so we know if the guest takes an interrupt
470          * before it's programmed its own IVPR. */
471         vcpu->arch.ivpr = 0x55550000;
472
473         kvmppc_init_timing_stats(vcpu);
474
475         return kvmppc_core_vcpu_setup(vcpu);
476 }
477
478 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
479 {
480         int i;
481
482         regs->pc = vcpu->arch.pc;
483         regs->cr = kvmppc_get_cr(vcpu);
484         regs->ctr = vcpu->arch.ctr;
485         regs->lr = vcpu->arch.lr;
486         regs->xer = kvmppc_get_xer(vcpu);
487         regs->msr = vcpu->arch.msr;
488         regs->srr0 = vcpu->arch.srr0;
489         regs->srr1 = vcpu->arch.srr1;
490         regs->pid = vcpu->arch.pid;
491         regs->sprg0 = vcpu->arch.sprg0;
492         regs->sprg1 = vcpu->arch.sprg1;
493         regs->sprg2 = vcpu->arch.sprg2;
494         regs->sprg3 = vcpu->arch.sprg3;
495         regs->sprg5 = vcpu->arch.sprg4;
496         regs->sprg6 = vcpu->arch.sprg5;
497         regs->sprg7 = vcpu->arch.sprg6;
498
499         for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
500                 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
501
502         return 0;
503 }
504
505 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
506 {
507         int i;
508
509         vcpu->arch.pc = regs->pc;
510         kvmppc_set_cr(vcpu, regs->cr);
511         vcpu->arch.ctr = regs->ctr;
512         vcpu->arch.lr = regs->lr;
513         kvmppc_set_xer(vcpu, regs->xer);
514         kvmppc_set_msr(vcpu, regs->msr);
515         vcpu->arch.srr0 = regs->srr0;
516         vcpu->arch.srr1 = regs->srr1;
517         vcpu->arch.sprg0 = regs->sprg0;
518         vcpu->arch.sprg1 = regs->sprg1;
519         vcpu->arch.sprg2 = regs->sprg2;
520         vcpu->arch.sprg3 = regs->sprg3;
521         vcpu->arch.sprg5 = regs->sprg4;
522         vcpu->arch.sprg6 = regs->sprg5;
523         vcpu->arch.sprg7 = regs->sprg6;
524
525         for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
526                 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
527
528         return 0;
529 }
530
531 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
532                                   struct kvm_sregs *sregs)
533 {
534         return -ENOTSUPP;
535 }
536
537 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
538                                   struct kvm_sregs *sregs)
539 {
540         return -ENOTSUPP;
541 }
542
543 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
544 {
545         return -ENOTSUPP;
546 }
547
548 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
549 {
550         return -ENOTSUPP;
551 }
552
553 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
554                                   struct kvm_translation *tr)
555 {
556         return kvmppc_core_vcpu_translate(vcpu, tr);
557 }
558
559 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
560 {
561         return -ENOTSUPP;
562 }
563
564 int __init kvmppc_booke_init(void)
565 {
566         unsigned long ivor[16];
567         unsigned long max_ivor = 0;
568         int i;
569
570         /* We install our own exception handlers by hijacking IVPR. IVPR must
571          * be 16-bit aligned, so we need a 64KB allocation. */
572         kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
573                                                  VCPU_SIZE_ORDER);
574         if (!kvmppc_booke_handlers)
575                 return -ENOMEM;
576
577         /* XXX make sure our handlers are smaller than Linux's */
578
579         /* Copy our interrupt handlers to match host IVORs. That way we don't
580          * have to swap the IVORs on every guest/host transition. */
581         ivor[0] = mfspr(SPRN_IVOR0);
582         ivor[1] = mfspr(SPRN_IVOR1);
583         ivor[2] = mfspr(SPRN_IVOR2);
584         ivor[3] = mfspr(SPRN_IVOR3);
585         ivor[4] = mfspr(SPRN_IVOR4);
586         ivor[5] = mfspr(SPRN_IVOR5);
587         ivor[6] = mfspr(SPRN_IVOR6);
588         ivor[7] = mfspr(SPRN_IVOR7);
589         ivor[8] = mfspr(SPRN_IVOR8);
590         ivor[9] = mfspr(SPRN_IVOR9);
591         ivor[10] = mfspr(SPRN_IVOR10);
592         ivor[11] = mfspr(SPRN_IVOR11);
593         ivor[12] = mfspr(SPRN_IVOR12);
594         ivor[13] = mfspr(SPRN_IVOR13);
595         ivor[14] = mfspr(SPRN_IVOR14);
596         ivor[15] = mfspr(SPRN_IVOR15);
597
598         for (i = 0; i < 16; i++) {
599                 if (ivor[i] > max_ivor)
600                         max_ivor = ivor[i];
601
602                 memcpy((void *)kvmppc_booke_handlers + ivor[i],
603                        kvmppc_handlers_start + i * kvmppc_handler_len,
604                        kvmppc_handler_len);
605         }
606         flush_icache_range(kvmppc_booke_handlers,
607                            kvmppc_booke_handlers + max_ivor + kvmppc_handler_len);
608
609         return 0;
610 }
611
612 void __exit kvmppc_booke_exit(void)
613 {
614         free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
615         kvm_exit();
616 }