KVM: PPC: Improve indirect svcpu accessors
[pandora-kernel.git] / arch / powerpc / kvm / emulate.c
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright IBM Corp. 2007
16  *
17  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18  */
19
20 #include <linux/jiffies.h>
21 #include <linux/hrtimer.h>
22 #include <linux/types.h>
23 #include <linux/string.h>
24 #include <linux/kvm_host.h>
25
26 #include <asm/reg.h>
27 #include <asm/time.h>
28 #include <asm/byteorder.h>
29 #include <asm/kvm_ppc.h>
30 #include <asm/disassemble.h>
31 #include "timing.h"
32 #include "trace.h"
33
34 #define OP_TRAP 3
35 #define OP_TRAP_64 2
36
37 #define OP_31_XOP_LWZX      23
38 #define OP_31_XOP_LBZX      87
39 #define OP_31_XOP_STWX      151
40 #define OP_31_XOP_STBX      215
41 #define OP_31_XOP_LBZUX     119
42 #define OP_31_XOP_STBUX     247
43 #define OP_31_XOP_LHZX      279
44 #define OP_31_XOP_LHZUX     311
45 #define OP_31_XOP_MFSPR     339
46 #define OP_31_XOP_LHAX      343
47 #define OP_31_XOP_STHX      407
48 #define OP_31_XOP_STHUX     439
49 #define OP_31_XOP_MTSPR     467
50 #define OP_31_XOP_DCBI      470
51 #define OP_31_XOP_LWBRX     534
52 #define OP_31_XOP_TLBSYNC   566
53 #define OP_31_XOP_STWBRX    662
54 #define OP_31_XOP_LHBRX     790
55 #define OP_31_XOP_STHBRX    918
56
57 #define OP_LWZ  32
58 #define OP_LWZU 33
59 #define OP_LBZ  34
60 #define OP_LBZU 35
61 #define OP_STW  36
62 #define OP_STWU 37
63 #define OP_STB  38
64 #define OP_STBU 39
65 #define OP_LHZ  40
66 #define OP_LHZU 41
67 #define OP_LHA  42
68 #define OP_LHAU 43
69 #define OP_STH  44
70 #define OP_STHU 45
71
72 #ifdef CONFIG_PPC64
73 static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu)
74 {
75         return 1;
76 }
77 #else
78 static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu)
79 {
80         return vcpu->arch.tcr & TCR_DIE;
81 }
82 #endif
83
84 void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
85 {
86         unsigned long dec_nsec;
87
88         pr_debug("mtDEC: %x\n", vcpu->arch.dec);
89 #ifdef CONFIG_PPC64
90         /* mtdec lowers the interrupt line when positive. */
91         kvmppc_core_dequeue_dec(vcpu);
92
93         /* POWER4+ triggers a dec interrupt if the value is < 0 */
94         if (vcpu->arch.dec & 0x80000000) {
95                 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
96                 kvmppc_core_queue_dec(vcpu);
97                 return;
98         }
99 #endif
100         if (kvmppc_dec_enabled(vcpu)) {
101                 /* The decrementer ticks at the same rate as the timebase, so
102                  * that's how we convert the guest DEC value to the number of
103                  * host ticks. */
104
105                 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
106                 dec_nsec = vcpu->arch.dec;
107                 dec_nsec *= 1000;
108                 dec_nsec /= tb_ticks_per_usec;
109                 hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec),
110                               HRTIMER_MODE_REL);
111                 vcpu->arch.dec_jiffies = get_tb();
112         } else {
113                 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
114         }
115 }
116
117 /* XXX to do:
118  * lhax
119  * lhaux
120  * lswx
121  * lswi
122  * stswx
123  * stswi
124  * lha
125  * lhau
126  * lmw
127  * stmw
128  *
129  * XXX is_bigendian should depend on MMU mapping or MSR[LE]
130  */
131 /* XXX Should probably auto-generate instruction decoding for a particular core
132  * from opcode tables in the future. */
133 int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
134 {
135         u32 inst = kvmppc_get_last_inst(vcpu);
136         u32 ea;
137         int ra;
138         int rb;
139         int rs;
140         int rt;
141         int sprn;
142         enum emulation_result emulated = EMULATE_DONE;
143         int advance = 1;
144
145         /* this default type might be overwritten by subcategories */
146         kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
147
148         pr_debug(KERN_INFO "Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));
149
150         /* Try again next time */
151         if (inst == KVM_INST_FETCH_FAILED)
152                 return EMULATE_DONE;
153
154         switch (get_op(inst)) {
155         case OP_TRAP:
156 #ifdef CONFIG_PPC64
157         case OP_TRAP_64:
158                 kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
159 #else
160                 kvmppc_core_queue_program(vcpu, vcpu->arch.esr | ESR_PTR);
161 #endif
162                 advance = 0;
163                 break;
164
165         case 31:
166                 switch (get_xop(inst)) {
167
168                 case OP_31_XOP_LWZX:
169                         rt = get_rt(inst);
170                         emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
171                         break;
172
173                 case OP_31_XOP_LBZX:
174                         rt = get_rt(inst);
175                         emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
176                         break;
177
178                 case OP_31_XOP_LBZUX:
179                         rt = get_rt(inst);
180                         ra = get_ra(inst);
181                         rb = get_rb(inst);
182
183                         ea = kvmppc_get_gpr(vcpu, rb);
184                         if (ra)
185                                 ea += kvmppc_get_gpr(vcpu, ra);
186
187                         emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
188                         kvmppc_set_gpr(vcpu, ra, ea);
189                         break;
190
191                 case OP_31_XOP_STWX:
192                         rs = get_rs(inst);
193                         emulated = kvmppc_handle_store(run, vcpu,
194                                                        kvmppc_get_gpr(vcpu, rs),
195                                                        4, 1);
196                         break;
197
198                 case OP_31_XOP_STBX:
199                         rs = get_rs(inst);
200                         emulated = kvmppc_handle_store(run, vcpu,
201                                                        kvmppc_get_gpr(vcpu, rs),
202                                                        1, 1);
203                         break;
204
205                 case OP_31_XOP_STBUX:
206                         rs = get_rs(inst);
207                         ra = get_ra(inst);
208                         rb = get_rb(inst);
209
210                         ea = kvmppc_get_gpr(vcpu, rb);
211                         if (ra)
212                                 ea += kvmppc_get_gpr(vcpu, ra);
213
214                         emulated = kvmppc_handle_store(run, vcpu,
215                                                        kvmppc_get_gpr(vcpu, rs),
216                                                        1, 1);
217                         kvmppc_set_gpr(vcpu, rs, ea);
218                         break;
219
220                 case OP_31_XOP_LHAX:
221                         rt = get_rt(inst);
222                         emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
223                         break;
224
225                 case OP_31_XOP_LHZX:
226                         rt = get_rt(inst);
227                         emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
228                         break;
229
230                 case OP_31_XOP_LHZUX:
231                         rt = get_rt(inst);
232                         ra = get_ra(inst);
233                         rb = get_rb(inst);
234
235                         ea = kvmppc_get_gpr(vcpu, rb);
236                         if (ra)
237                                 ea += kvmppc_get_gpr(vcpu, ra);
238
239                         emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
240                         kvmppc_set_gpr(vcpu, ra, ea);
241                         break;
242
243                 case OP_31_XOP_MFSPR:
244                         sprn = get_sprn(inst);
245                         rt = get_rt(inst);
246
247                         switch (sprn) {
248                         case SPRN_SRR0:
249                                 kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr0); break;
250                         case SPRN_SRR1:
251                                 kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr1); break;
252                         case SPRN_PVR:
253                                 kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break;
254                         case SPRN_PIR:
255                                 kvmppc_set_gpr(vcpu, rt, vcpu->vcpu_id); break;
256                         case SPRN_MSSSR0:
257                                 kvmppc_set_gpr(vcpu, rt, 0); break;
258
259                         /* Note: mftb and TBRL/TBWL are user-accessible, so
260                          * the guest can always access the real TB anyways.
261                          * In fact, we probably will never see these traps. */
262                         case SPRN_TBWL:
263                                 kvmppc_set_gpr(vcpu, rt, get_tb() >> 32); break;
264                         case SPRN_TBWU:
265                                 kvmppc_set_gpr(vcpu, rt, get_tb()); break;
266
267                         case SPRN_SPRG0:
268                                 kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg0); break;
269                         case SPRN_SPRG1:
270                                 kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg1); break;
271                         case SPRN_SPRG2:
272                                 kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg2); break;
273                         case SPRN_SPRG3:
274                                 kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg3); break;
275                         /* Note: SPRG4-7 are user-readable, so we don't get
276                          * a trap. */
277
278                         case SPRN_DEC:
279                         {
280                                 u64 jd = get_tb() - vcpu->arch.dec_jiffies;
281                                 kvmppc_set_gpr(vcpu, rt, vcpu->arch.dec - jd);
282                                 pr_debug(KERN_INFO "mfDEC: %x - %llx = %lx\n",
283                                          vcpu->arch.dec, jd,
284                                          kvmppc_get_gpr(vcpu, rt));
285                                 break;
286                         }
287                         default:
288                                 emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt);
289                                 if (emulated == EMULATE_FAIL) {
290                                         printk("mfspr: unknown spr %x\n", sprn);
291                                         kvmppc_set_gpr(vcpu, rt, 0);
292                                 }
293                                 break;
294                         }
295                         break;
296
297                 case OP_31_XOP_STHX:
298                         rs = get_rs(inst);
299                         ra = get_ra(inst);
300                         rb = get_rb(inst);
301
302                         emulated = kvmppc_handle_store(run, vcpu,
303                                                        kvmppc_get_gpr(vcpu, rs),
304                                                        2, 1);
305                         break;
306
307                 case OP_31_XOP_STHUX:
308                         rs = get_rs(inst);
309                         ra = get_ra(inst);
310                         rb = get_rb(inst);
311
312                         ea = kvmppc_get_gpr(vcpu, rb);
313                         if (ra)
314                                 ea += kvmppc_get_gpr(vcpu, ra);
315
316                         emulated = kvmppc_handle_store(run, vcpu,
317                                                        kvmppc_get_gpr(vcpu, rs),
318                                                        2, 1);
319                         kvmppc_set_gpr(vcpu, ra, ea);
320                         break;
321
322                 case OP_31_XOP_MTSPR:
323                         sprn = get_sprn(inst);
324                         rs = get_rs(inst);
325                         switch (sprn) {
326                         case SPRN_SRR0:
327                                 vcpu->arch.srr0 = kvmppc_get_gpr(vcpu, rs); break;
328                         case SPRN_SRR1:
329                                 vcpu->arch.srr1 = kvmppc_get_gpr(vcpu, rs); break;
330
331                         /* XXX We need to context-switch the timebase for
332                          * watchdog and FIT. */
333                         case SPRN_TBWL: break;
334                         case SPRN_TBWU: break;
335
336                         case SPRN_MSSSR0: break;
337
338                         case SPRN_DEC:
339                                 vcpu->arch.dec = kvmppc_get_gpr(vcpu, rs);
340                                 kvmppc_emulate_dec(vcpu);
341                                 break;
342
343                         case SPRN_SPRG0:
344                                 vcpu->arch.sprg0 = kvmppc_get_gpr(vcpu, rs); break;
345                         case SPRN_SPRG1:
346                                 vcpu->arch.sprg1 = kvmppc_get_gpr(vcpu, rs); break;
347                         case SPRN_SPRG2:
348                                 vcpu->arch.sprg2 = kvmppc_get_gpr(vcpu, rs); break;
349                         case SPRN_SPRG3:
350                                 vcpu->arch.sprg3 = kvmppc_get_gpr(vcpu, rs); break;
351
352                         default:
353                                 emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs);
354                                 if (emulated == EMULATE_FAIL)
355                                         printk("mtspr: unknown spr %x\n", sprn);
356                                 break;
357                         }
358                         break;
359
360                 case OP_31_XOP_DCBI:
361                         /* Do nothing. The guest is performing dcbi because
362                          * hardware DMA is not snooped by the dcache, but
363                          * emulated DMA either goes through the dcache as
364                          * normal writes, or the host kernel has handled dcache
365                          * coherence. */
366                         break;
367
368                 case OP_31_XOP_LWBRX:
369                         rt = get_rt(inst);
370                         emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
371                         break;
372
373                 case OP_31_XOP_TLBSYNC:
374                         break;
375
376                 case OP_31_XOP_STWBRX:
377                         rs = get_rs(inst);
378                         ra = get_ra(inst);
379                         rb = get_rb(inst);
380
381                         emulated = kvmppc_handle_store(run, vcpu,
382                                                        kvmppc_get_gpr(vcpu, rs),
383                                                        4, 0);
384                         break;
385
386                 case OP_31_XOP_LHBRX:
387                         rt = get_rt(inst);
388                         emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
389                         break;
390
391                 case OP_31_XOP_STHBRX:
392                         rs = get_rs(inst);
393                         ra = get_ra(inst);
394                         rb = get_rb(inst);
395
396                         emulated = kvmppc_handle_store(run, vcpu,
397                                                        kvmppc_get_gpr(vcpu, rs),
398                                                        2, 0);
399                         break;
400
401                 default:
402                         /* Attempt core-specific emulation below. */
403                         emulated = EMULATE_FAIL;
404                 }
405                 break;
406
407         case OP_LWZ:
408                 rt = get_rt(inst);
409                 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
410                 break;
411
412         case OP_LWZU:
413                 ra = get_ra(inst);
414                 rt = get_rt(inst);
415                 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
416                 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
417                 break;
418
419         case OP_LBZ:
420                 rt = get_rt(inst);
421                 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
422                 break;
423
424         case OP_LBZU:
425                 ra = get_ra(inst);
426                 rt = get_rt(inst);
427                 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
428                 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
429                 break;
430
431         case OP_STW:
432                 rs = get_rs(inst);
433                 emulated = kvmppc_handle_store(run, vcpu,
434                                                kvmppc_get_gpr(vcpu, rs),
435                                                4, 1);
436                 break;
437
438         case OP_STWU:
439                 ra = get_ra(inst);
440                 rs = get_rs(inst);
441                 emulated = kvmppc_handle_store(run, vcpu,
442                                                kvmppc_get_gpr(vcpu, rs),
443                                                4, 1);
444                 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
445                 break;
446
447         case OP_STB:
448                 rs = get_rs(inst);
449                 emulated = kvmppc_handle_store(run, vcpu,
450                                                kvmppc_get_gpr(vcpu, rs),
451                                                1, 1);
452                 break;
453
454         case OP_STBU:
455                 ra = get_ra(inst);
456                 rs = get_rs(inst);
457                 emulated = kvmppc_handle_store(run, vcpu,
458                                                kvmppc_get_gpr(vcpu, rs),
459                                                1, 1);
460                 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
461                 break;
462
463         case OP_LHZ:
464                 rt = get_rt(inst);
465                 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
466                 break;
467
468         case OP_LHZU:
469                 ra = get_ra(inst);
470                 rt = get_rt(inst);
471                 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
472                 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
473                 break;
474
475         case OP_LHA:
476                 rt = get_rt(inst);
477                 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
478                 break;
479
480         case OP_LHAU:
481                 ra = get_ra(inst);
482                 rt = get_rt(inst);
483                 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
484                 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
485                 break;
486
487         case OP_STH:
488                 rs = get_rs(inst);
489                 emulated = kvmppc_handle_store(run, vcpu,
490                                                kvmppc_get_gpr(vcpu, rs),
491                                                2, 1);
492                 break;
493
494         case OP_STHU:
495                 ra = get_ra(inst);
496                 rs = get_rs(inst);
497                 emulated = kvmppc_handle_store(run, vcpu,
498                                                kvmppc_get_gpr(vcpu, rs),
499                                                2, 1);
500                 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
501                 break;
502
503         default:
504                 emulated = EMULATE_FAIL;
505         }
506
507         if (emulated == EMULATE_FAIL) {
508                 emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance);
509                 if (emulated == EMULATE_AGAIN) {
510                         advance = 0;
511                 } else if (emulated == EMULATE_FAIL) {
512                         advance = 0;
513                         printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
514                                "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
515                         kvmppc_core_queue_program(vcpu, 0);
516                 }
517         }
518
519         trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
520
521         /* Advance past emulated instruction. */
522         if (advance)
523                 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
524
525         return emulated;
526 }