Merge branch 'imx/compile-fixes' of git://git.linaro.org/people/shawnguo/linux-2...
[pandora-kernel.git] / arch / powerpc / kernel / kvm.c
1 /*
2  * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
3  *
4  * Authors:
5  *     Alexander Graf <agraf@suse.de>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License, version 2, as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
19  */
20
21 #include <linux/kvm_host.h>
22 #include <linux/init.h>
23 #include <linux/export.h>
24 #include <linux/kvm_para.h>
25 #include <linux/slab.h>
26 #include <linux/of.h>
27
28 #include <asm/reg.h>
29 #include <asm/sections.h>
30 #include <asm/cacheflush.h>
31 #include <asm/disassemble.h>
32
33 #define KVM_MAGIC_PAGE          (-4096L)
34 #define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x)
35
36 #define KVM_INST_LWZ            0x80000000
37 #define KVM_INST_STW            0x90000000
38 #define KVM_INST_LD             0xe8000000
39 #define KVM_INST_STD            0xf8000000
40 #define KVM_INST_NOP            0x60000000
41 #define KVM_INST_B              0x48000000
42 #define KVM_INST_B_MASK         0x03ffffff
43 #define KVM_INST_B_MAX          0x01ffffff
44
45 #define KVM_MASK_RT             0x03e00000
46 #define KVM_RT_30               0x03c00000
47 #define KVM_MASK_RB             0x0000f800
48 #define KVM_INST_MFMSR          0x7c0000a6
49 #define KVM_INST_MFSPR_SPRG0    0x7c1042a6
50 #define KVM_INST_MFSPR_SPRG1    0x7c1142a6
51 #define KVM_INST_MFSPR_SPRG2    0x7c1242a6
52 #define KVM_INST_MFSPR_SPRG3    0x7c1342a6
53 #define KVM_INST_MFSPR_SRR0     0x7c1a02a6
54 #define KVM_INST_MFSPR_SRR1     0x7c1b02a6
55 #define KVM_INST_MFSPR_DAR      0x7c1302a6
56 #define KVM_INST_MFSPR_DSISR    0x7c1202a6
57
58 #define KVM_INST_MTSPR_SPRG0    0x7c1043a6
59 #define KVM_INST_MTSPR_SPRG1    0x7c1143a6
60 #define KVM_INST_MTSPR_SPRG2    0x7c1243a6
61 #define KVM_INST_MTSPR_SPRG3    0x7c1343a6
62 #define KVM_INST_MTSPR_SRR0     0x7c1a03a6
63 #define KVM_INST_MTSPR_SRR1     0x7c1b03a6
64 #define KVM_INST_MTSPR_DAR      0x7c1303a6
65 #define KVM_INST_MTSPR_DSISR    0x7c1203a6
66
67 #define KVM_INST_TLBSYNC        0x7c00046c
68 #define KVM_INST_MTMSRD_L0      0x7c000164
69 #define KVM_INST_MTMSRD_L1      0x7c010164
70 #define KVM_INST_MTMSR          0x7c000124
71
72 #define KVM_INST_WRTEEI_0       0x7c000146
73 #define KVM_INST_WRTEEI_1       0x7c008146
74
75 #define KVM_INST_MTSRIN         0x7c0001e4
76
77 static bool kvm_patching_worked = true;
78 static char kvm_tmp[1024 * 1024];
79 static int kvm_tmp_index;
80
81 static inline void kvm_patch_ins(u32 *inst, u32 new_inst)
82 {
83         *inst = new_inst;
84         flush_icache_range((ulong)inst, (ulong)inst + 4);
85 }
86
87 static void kvm_patch_ins_ll(u32 *inst, long addr, u32 rt)
88 {
89 #ifdef CONFIG_64BIT
90         kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
91 #else
92         kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000fffc));
93 #endif
94 }
95
96 static void kvm_patch_ins_ld(u32 *inst, long addr, u32 rt)
97 {
98 #ifdef CONFIG_64BIT
99         kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
100 #else
101         kvm_patch_ins(inst, KVM_INST_LWZ | rt | ((addr + 4) & 0x0000fffc));
102 #endif
103 }
104
105 static void kvm_patch_ins_lwz(u32 *inst, long addr, u32 rt)
106 {
107         kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000ffff));
108 }
109
110 static void kvm_patch_ins_std(u32 *inst, long addr, u32 rt)
111 {
112 #ifdef CONFIG_64BIT
113         kvm_patch_ins(inst, KVM_INST_STD | rt | (addr & 0x0000fffc));
114 #else
115         kvm_patch_ins(inst, KVM_INST_STW | rt | ((addr + 4) & 0x0000fffc));
116 #endif
117 }
118
119 static void kvm_patch_ins_stw(u32 *inst, long addr, u32 rt)
120 {
121         kvm_patch_ins(inst, KVM_INST_STW | rt | (addr & 0x0000fffc));
122 }
123
124 static void kvm_patch_ins_nop(u32 *inst)
125 {
126         kvm_patch_ins(inst, KVM_INST_NOP);
127 }
128
129 static void kvm_patch_ins_b(u32 *inst, int addr)
130 {
131 #if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC_BOOK3S)
132         /* On relocatable kernels interrupts handlers and our code
133            can be in different regions, so we don't patch them */
134
135         extern u32 __end_interrupts;
136         if ((ulong)inst < (ulong)&__end_interrupts)
137                 return;
138 #endif
139
140         kvm_patch_ins(inst, KVM_INST_B | (addr & KVM_INST_B_MASK));
141 }
142
143 static u32 *kvm_alloc(int len)
144 {
145         u32 *p;
146
147         if ((kvm_tmp_index + len) > ARRAY_SIZE(kvm_tmp)) {
148                 printk(KERN_ERR "KVM: No more space (%d + %d)\n",
149                                 kvm_tmp_index, len);
150                 kvm_patching_worked = false;
151                 return NULL;
152         }
153
154         p = (void*)&kvm_tmp[kvm_tmp_index];
155         kvm_tmp_index += len;
156
157         return p;
158 }
159
160 extern u32 kvm_emulate_mtmsrd_branch_offs;
161 extern u32 kvm_emulate_mtmsrd_reg_offs;
162 extern u32 kvm_emulate_mtmsrd_orig_ins_offs;
163 extern u32 kvm_emulate_mtmsrd_len;
164 extern u32 kvm_emulate_mtmsrd[];
165
166 static void kvm_patch_ins_mtmsrd(u32 *inst, u32 rt)
167 {
168         u32 *p;
169         int distance_start;
170         int distance_end;
171         ulong next_inst;
172
173         p = kvm_alloc(kvm_emulate_mtmsrd_len * 4);
174         if (!p)
175                 return;
176
177         /* Find out where we are and put everything there */
178         distance_start = (ulong)p - (ulong)inst;
179         next_inst = ((ulong)inst + 4);
180         distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsrd_branch_offs];
181
182         /* Make sure we only write valid b instructions */
183         if (distance_start > KVM_INST_B_MAX) {
184                 kvm_patching_worked = false;
185                 return;
186         }
187
188         /* Modify the chunk to fit the invocation */
189         memcpy(p, kvm_emulate_mtmsrd, kvm_emulate_mtmsrd_len * 4);
190         p[kvm_emulate_mtmsrd_branch_offs] |= distance_end & KVM_INST_B_MASK;
191         switch (get_rt(rt)) {
192         case 30:
193                 kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
194                                  magic_var(scratch2), KVM_RT_30);
195                 break;
196         case 31:
197                 kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
198                                  magic_var(scratch1), KVM_RT_30);
199                 break;
200         default:
201                 p[kvm_emulate_mtmsrd_reg_offs] |= rt;
202                 break;
203         }
204
205         p[kvm_emulate_mtmsrd_orig_ins_offs] = *inst;
206         flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsrd_len * 4);
207
208         /* Patch the invocation */
209         kvm_patch_ins_b(inst, distance_start);
210 }
211
212 extern u32 kvm_emulate_mtmsr_branch_offs;
213 extern u32 kvm_emulate_mtmsr_reg1_offs;
214 extern u32 kvm_emulate_mtmsr_reg2_offs;
215 extern u32 kvm_emulate_mtmsr_orig_ins_offs;
216 extern u32 kvm_emulate_mtmsr_len;
217 extern u32 kvm_emulate_mtmsr[];
218
219 static void kvm_patch_ins_mtmsr(u32 *inst, u32 rt)
220 {
221         u32 *p;
222         int distance_start;
223         int distance_end;
224         ulong next_inst;
225
226         p = kvm_alloc(kvm_emulate_mtmsr_len * 4);
227         if (!p)
228                 return;
229
230         /* Find out where we are and put everything there */
231         distance_start = (ulong)p - (ulong)inst;
232         next_inst = ((ulong)inst + 4);
233         distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsr_branch_offs];
234
235         /* Make sure we only write valid b instructions */
236         if (distance_start > KVM_INST_B_MAX) {
237                 kvm_patching_worked = false;
238                 return;
239         }
240
241         /* Modify the chunk to fit the invocation */
242         memcpy(p, kvm_emulate_mtmsr, kvm_emulate_mtmsr_len * 4);
243         p[kvm_emulate_mtmsr_branch_offs] |= distance_end & KVM_INST_B_MASK;
244
245         /* Make clobbered registers work too */
246         switch (get_rt(rt)) {
247         case 30:
248                 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
249                                  magic_var(scratch2), KVM_RT_30);
250                 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
251                                  magic_var(scratch2), KVM_RT_30);
252                 break;
253         case 31:
254                 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
255                                  magic_var(scratch1), KVM_RT_30);
256                 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
257                                  magic_var(scratch1), KVM_RT_30);
258                 break;
259         default:
260                 p[kvm_emulate_mtmsr_reg1_offs] |= rt;
261                 p[kvm_emulate_mtmsr_reg2_offs] |= rt;
262                 break;
263         }
264
265         p[kvm_emulate_mtmsr_orig_ins_offs] = *inst;
266         flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsr_len * 4);
267
268         /* Patch the invocation */
269         kvm_patch_ins_b(inst, distance_start);
270 }
271
272 #ifdef CONFIG_BOOKE
273
274 extern u32 kvm_emulate_wrteei_branch_offs;
275 extern u32 kvm_emulate_wrteei_ee_offs;
276 extern u32 kvm_emulate_wrteei_len;
277 extern u32 kvm_emulate_wrteei[];
278
279 static void kvm_patch_ins_wrteei(u32 *inst)
280 {
281         u32 *p;
282         int distance_start;
283         int distance_end;
284         ulong next_inst;
285
286         p = kvm_alloc(kvm_emulate_wrteei_len * 4);
287         if (!p)
288                 return;
289
290         /* Find out where we are and put everything there */
291         distance_start = (ulong)p - (ulong)inst;
292         next_inst = ((ulong)inst + 4);
293         distance_end = next_inst - (ulong)&p[kvm_emulate_wrteei_branch_offs];
294
295         /* Make sure we only write valid b instructions */
296         if (distance_start > KVM_INST_B_MAX) {
297                 kvm_patching_worked = false;
298                 return;
299         }
300
301         /* Modify the chunk to fit the invocation */
302         memcpy(p, kvm_emulate_wrteei, kvm_emulate_wrteei_len * 4);
303         p[kvm_emulate_wrteei_branch_offs] |= distance_end & KVM_INST_B_MASK;
304         p[kvm_emulate_wrteei_ee_offs] |= (*inst & MSR_EE);
305         flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrteei_len * 4);
306
307         /* Patch the invocation */
308         kvm_patch_ins_b(inst, distance_start);
309 }
310
311 #endif
312
313 #ifdef CONFIG_PPC_BOOK3S_32
314
315 extern u32 kvm_emulate_mtsrin_branch_offs;
316 extern u32 kvm_emulate_mtsrin_reg1_offs;
317 extern u32 kvm_emulate_mtsrin_reg2_offs;
318 extern u32 kvm_emulate_mtsrin_orig_ins_offs;
319 extern u32 kvm_emulate_mtsrin_len;
320 extern u32 kvm_emulate_mtsrin[];
321
322 static void kvm_patch_ins_mtsrin(u32 *inst, u32 rt, u32 rb)
323 {
324         u32 *p;
325         int distance_start;
326         int distance_end;
327         ulong next_inst;
328
329         p = kvm_alloc(kvm_emulate_mtsrin_len * 4);
330         if (!p)
331                 return;
332
333         /* Find out where we are and put everything there */
334         distance_start = (ulong)p - (ulong)inst;
335         next_inst = ((ulong)inst + 4);
336         distance_end = next_inst - (ulong)&p[kvm_emulate_mtsrin_branch_offs];
337
338         /* Make sure we only write valid b instructions */
339         if (distance_start > KVM_INST_B_MAX) {
340                 kvm_patching_worked = false;
341                 return;
342         }
343
344         /* Modify the chunk to fit the invocation */
345         memcpy(p, kvm_emulate_mtsrin, kvm_emulate_mtsrin_len * 4);
346         p[kvm_emulate_mtsrin_branch_offs] |= distance_end & KVM_INST_B_MASK;
347         p[kvm_emulate_mtsrin_reg1_offs] |= (rb << 10);
348         p[kvm_emulate_mtsrin_reg2_offs] |= rt;
349         p[kvm_emulate_mtsrin_orig_ins_offs] = *inst;
350         flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtsrin_len * 4);
351
352         /* Patch the invocation */
353         kvm_patch_ins_b(inst, distance_start);
354 }
355
356 #endif
357
358 static void kvm_map_magic_page(void *data)
359 {
360         u32 *features = data;
361
362         ulong in[8];
363         ulong out[8];
364
365         in[0] = KVM_MAGIC_PAGE;
366         in[1] = KVM_MAGIC_PAGE;
367
368         kvm_hypercall(in, out, HC_VENDOR_KVM | KVM_HC_PPC_MAP_MAGIC_PAGE);
369
370         *features = out[0];
371 }
372
373 static void kvm_check_ins(u32 *inst, u32 features)
374 {
375         u32 _inst = *inst;
376         u32 inst_no_rt = _inst & ~KVM_MASK_RT;
377         u32 inst_rt = _inst & KVM_MASK_RT;
378
379         switch (inst_no_rt) {
380         /* Loads */
381         case KVM_INST_MFMSR:
382                 kvm_patch_ins_ld(inst, magic_var(msr), inst_rt);
383                 break;
384         case KVM_INST_MFSPR_SPRG0:
385                 kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt);
386                 break;
387         case KVM_INST_MFSPR_SPRG1:
388                 kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt);
389                 break;
390         case KVM_INST_MFSPR_SPRG2:
391                 kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt);
392                 break;
393         case KVM_INST_MFSPR_SPRG3:
394                 kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt);
395                 break;
396         case KVM_INST_MFSPR_SRR0:
397                 kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt);
398                 break;
399         case KVM_INST_MFSPR_SRR1:
400                 kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt);
401                 break;
402         case KVM_INST_MFSPR_DAR:
403                 kvm_patch_ins_ld(inst, magic_var(dar), inst_rt);
404                 break;
405         case KVM_INST_MFSPR_DSISR:
406                 kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt);
407                 break;
408
409         /* Stores */
410         case KVM_INST_MTSPR_SPRG0:
411                 kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt);
412                 break;
413         case KVM_INST_MTSPR_SPRG1:
414                 kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt);
415                 break;
416         case KVM_INST_MTSPR_SPRG2:
417                 kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt);
418                 break;
419         case KVM_INST_MTSPR_SPRG3:
420                 kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt);
421                 break;
422         case KVM_INST_MTSPR_SRR0:
423                 kvm_patch_ins_std(inst, magic_var(srr0), inst_rt);
424                 break;
425         case KVM_INST_MTSPR_SRR1:
426                 kvm_patch_ins_std(inst, magic_var(srr1), inst_rt);
427                 break;
428         case KVM_INST_MTSPR_DAR:
429                 kvm_patch_ins_std(inst, magic_var(dar), inst_rt);
430                 break;
431         case KVM_INST_MTSPR_DSISR:
432                 kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt);
433                 break;
434
435         /* Nops */
436         case KVM_INST_TLBSYNC:
437                 kvm_patch_ins_nop(inst);
438                 break;
439
440         /* Rewrites */
441         case KVM_INST_MTMSRD_L1:
442                 kvm_patch_ins_mtmsrd(inst, inst_rt);
443                 break;
444         case KVM_INST_MTMSR:
445         case KVM_INST_MTMSRD_L0:
446                 kvm_patch_ins_mtmsr(inst, inst_rt);
447                 break;
448         }
449
450         switch (inst_no_rt & ~KVM_MASK_RB) {
451 #ifdef CONFIG_PPC_BOOK3S_32
452         case KVM_INST_MTSRIN:
453                 if (features & KVM_MAGIC_FEAT_SR) {
454                         u32 inst_rb = _inst & KVM_MASK_RB;
455                         kvm_patch_ins_mtsrin(inst, inst_rt, inst_rb);
456                 }
457                 break;
458                 break;
459 #endif
460         }
461
462         switch (_inst) {
463 #ifdef CONFIG_BOOKE
464         case KVM_INST_WRTEEI_0:
465         case KVM_INST_WRTEEI_1:
466                 kvm_patch_ins_wrteei(inst);
467                 break;
468 #endif
469         }
470 }
471
472 static void kvm_use_magic_page(void)
473 {
474         u32 *p;
475         u32 *start, *end;
476         u32 tmp;
477         u32 features;
478
479         /* Tell the host to map the magic page to -4096 on all CPUs */
480         on_each_cpu(kvm_map_magic_page, &features, 1);
481
482         /* Quick self-test to see if the mapping works */
483         if (__get_user(tmp, (u32*)KVM_MAGIC_PAGE)) {
484                 kvm_patching_worked = false;
485                 return;
486         }
487
488         /* Now loop through all code and find instructions */
489         start = (void*)_stext;
490         end = (void*)_etext;
491
492         for (p = start; p < end; p++)
493                 kvm_check_ins(p, features);
494
495         printk(KERN_INFO "KVM: Live patching for a fast VM %s\n",
496                          kvm_patching_worked ? "worked" : "failed");
497 }
498
499 unsigned long kvm_hypercall(unsigned long *in,
500                             unsigned long *out,
501                             unsigned long nr)
502 {
503         unsigned long register r0 asm("r0");
504         unsigned long register r3 asm("r3") = in[0];
505         unsigned long register r4 asm("r4") = in[1];
506         unsigned long register r5 asm("r5") = in[2];
507         unsigned long register r6 asm("r6") = in[3];
508         unsigned long register r7 asm("r7") = in[4];
509         unsigned long register r8 asm("r8") = in[5];
510         unsigned long register r9 asm("r9") = in[6];
511         unsigned long register r10 asm("r10") = in[7];
512         unsigned long register r11 asm("r11") = nr;
513         unsigned long register r12 asm("r12");
514
515         asm volatile("bl        kvm_hypercall_start"
516                      : "=r"(r0), "=r"(r3), "=r"(r4), "=r"(r5), "=r"(r6),
517                        "=r"(r7), "=r"(r8), "=r"(r9), "=r"(r10), "=r"(r11),
518                        "=r"(r12)
519                      : "r"(r3), "r"(r4), "r"(r5), "r"(r6), "r"(r7), "r"(r8),
520                        "r"(r9), "r"(r10), "r"(r11)
521                      : "memory", "cc", "xer", "ctr", "lr");
522
523         out[0] = r4;
524         out[1] = r5;
525         out[2] = r6;
526         out[3] = r7;
527         out[4] = r8;
528         out[5] = r9;
529         out[6] = r10;
530         out[7] = r11;
531
532         return r3;
533 }
534 EXPORT_SYMBOL_GPL(kvm_hypercall);
535
536 static int kvm_para_setup(void)
537 {
538         extern u32 kvm_hypercall_start;
539         struct device_node *hyper_node;
540         u32 *insts;
541         int len, i;
542
543         hyper_node = of_find_node_by_path("/hypervisor");
544         if (!hyper_node)
545                 return -1;
546
547         insts = (u32*)of_get_property(hyper_node, "hcall-instructions", &len);
548         if (len % 4)
549                 return -1;
550         if (len > (4 * 4))
551                 return -1;
552
553         for (i = 0; i < (len / 4); i++)
554                 kvm_patch_ins(&(&kvm_hypercall_start)[i], insts[i]);
555
556         return 0;
557 }
558
559 static __init void kvm_free_tmp(void)
560 {
561         unsigned long start, end;
562
563         start = (ulong)&kvm_tmp[kvm_tmp_index + (PAGE_SIZE - 1)] & PAGE_MASK;
564         end = (ulong)&kvm_tmp[ARRAY_SIZE(kvm_tmp)] & PAGE_MASK;
565
566         /* Free the tmp space we don't need */
567         for (; start < end; start += PAGE_SIZE) {
568                 ClearPageReserved(virt_to_page(start));
569                 init_page_count(virt_to_page(start));
570                 free_page(start);
571                 totalram_pages++;
572         }
573 }
574
575 static int __init kvm_guest_init(void)
576 {
577         if (!kvm_para_available())
578                 goto free_tmp;
579
580         if (kvm_para_setup())
581                 goto free_tmp;
582
583         if (kvm_para_has_feature(KVM_FEATURE_MAGIC_PAGE))
584                 kvm_use_magic_page();
585
586 #ifdef CONFIG_PPC_BOOK3S_64
587         /* Enable napping */
588         powersave_nap = 1;
589 #endif
590
591 free_tmp:
592         kvm_free_tmp();
593
594         return 0;
595 }
596
597 postcore_initcall(kvm_guest_init);