2a73d821e1fc99dde6525c94b303172ac88910ba
[pandora-kernel.git] / arch / powerpc / kvm / emulate.c
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright IBM Corp. 2007
16  *
17  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18  */
19
20 #include <linux/jiffies.h>
21 #include <linux/hrtimer.h>
22 #include <linux/types.h>
23 #include <linux/string.h>
24 #include <linux/kvm_host.h>
25
26 #include <asm/reg.h>
27 #include <asm/time.h>
28 #include <asm/byteorder.h>
29 #include <asm/kvm_ppc.h>
30 #include <asm/disassemble.h>
31 #include "timing.h"
32 #include "trace.h"
33
34 #define OP_TRAP 3
35 #define OP_TRAP_64 2
36
37 #define OP_31_XOP_LWZX      23
38 #define OP_31_XOP_DCBF      86
39 #define OP_31_XOP_LBZX      87
40 #define OP_31_XOP_STWX      151
41 #define OP_31_XOP_STBX      215
42 #define OP_31_XOP_LBZUX     119
43 #define OP_31_XOP_STBUX     247
44 #define OP_31_XOP_LHZX      279
45 #define OP_31_XOP_LHZUX     311
46 #define OP_31_XOP_MFSPR     339
47 #define OP_31_XOP_LHAX      343
48 #define OP_31_XOP_STHX      407
49 #define OP_31_XOP_STHUX     439
50 #define OP_31_XOP_MTSPR     467
51 #define OP_31_XOP_DCBI      470
52 #define OP_31_XOP_LWBRX     534
53 #define OP_31_XOP_TLBSYNC   566
54 #define OP_31_XOP_STWBRX    662
55 #define OP_31_XOP_LHBRX     790
56 #define OP_31_XOP_STHBRX    918
57
58 #define OP_LWZ  32
59 #define OP_LWZU 33
60 #define OP_LBZ  34
61 #define OP_LBZU 35
62 #define OP_STW  36
63 #define OP_STWU 37
64 #define OP_STB  38
65 #define OP_STBU 39
66 #define OP_LHZ  40
67 #define OP_LHZU 41
68 #define OP_LHA  42
69 #define OP_LHAU 43
70 #define OP_STH  44
71 #define OP_STHU 45
72
73 #ifdef CONFIG_PPC_BOOK3S
74 static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu)
75 {
76         return 1;
77 }
78 #else
79 static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu)
80 {
81         return vcpu->arch.tcr & TCR_DIE;
82 }
83 #endif
84
85 void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
86 {
87         unsigned long dec_nsec;
88
89         pr_debug("mtDEC: %x\n", vcpu->arch.dec);
90 #ifdef CONFIG_PPC_BOOK3S
91         /* mtdec lowers the interrupt line when positive. */
92         kvmppc_core_dequeue_dec(vcpu);
93
94         /* POWER4+ triggers a dec interrupt if the value is < 0 */
95         if (vcpu->arch.dec & 0x80000000) {
96                 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
97                 kvmppc_core_queue_dec(vcpu);
98                 return;
99         }
100 #endif
101         if (kvmppc_dec_enabled(vcpu)) {
102                 /* The decrementer ticks at the same rate as the timebase, so
103                  * that's how we convert the guest DEC value to the number of
104                  * host ticks. */
105
106                 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
107                 dec_nsec = vcpu->arch.dec;
108                 dec_nsec *= 1000;
109                 dec_nsec /= tb_ticks_per_usec;
110                 hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec),
111                               HRTIMER_MODE_REL);
112                 vcpu->arch.dec_jiffies = get_tb();
113         } else {
114                 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
115         }
116 }
117
118 u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb)
119 {
120         u64 jd = tb - vcpu->arch.dec_jiffies;
121         return vcpu->arch.dec - jd;
122 }
123
124 /* XXX to do:
125  * lhax
126  * lhaux
127  * lswx
128  * lswi
129  * stswx
130  * stswi
131  * lha
132  * lhau
133  * lmw
134  * stmw
135  *
136  * XXX is_bigendian should depend on MMU mapping or MSR[LE]
137  */
138 /* XXX Should probably auto-generate instruction decoding for a particular core
139  * from opcode tables in the future. */
140 int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
141 {
142         u32 inst = kvmppc_get_last_inst(vcpu);
143         u32 ea;
144         int ra;
145         int rb;
146         int rs;
147         int rt;
148         int sprn;
149         enum emulation_result emulated = EMULATE_DONE;
150         int advance = 1;
151
152         /* this default type might be overwritten by subcategories */
153         kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
154
155         pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));
156
157         switch (get_op(inst)) {
158         case OP_TRAP:
159 #ifdef CONFIG_PPC_BOOK3S
160         case OP_TRAP_64:
161                 kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
162 #else
163                 kvmppc_core_queue_program(vcpu, vcpu->arch.esr | ESR_PTR);
164 #endif
165                 advance = 0;
166                 break;
167
168         case 31:
169                 switch (get_xop(inst)) {
170
171                 case OP_31_XOP_LWZX:
172                         rt = get_rt(inst);
173                         emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
174                         break;
175
176                 case OP_31_XOP_LBZX:
177                         rt = get_rt(inst);
178                         emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
179                         break;
180
181                 case OP_31_XOP_LBZUX:
182                         rt = get_rt(inst);
183                         ra = get_ra(inst);
184                         rb = get_rb(inst);
185
186                         ea = kvmppc_get_gpr(vcpu, rb);
187                         if (ra)
188                                 ea += kvmppc_get_gpr(vcpu, ra);
189
190                         emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
191                         kvmppc_set_gpr(vcpu, ra, ea);
192                         break;
193
194                 case OP_31_XOP_STWX:
195                         rs = get_rs(inst);
196                         emulated = kvmppc_handle_store(run, vcpu,
197                                                        kvmppc_get_gpr(vcpu, rs),
198                                                        4, 1);
199                         break;
200
201                 case OP_31_XOP_STBX:
202                         rs = get_rs(inst);
203                         emulated = kvmppc_handle_store(run, vcpu,
204                                                        kvmppc_get_gpr(vcpu, rs),
205                                                        1, 1);
206                         break;
207
208                 case OP_31_XOP_STBUX:
209                         rs = get_rs(inst);
210                         ra = get_ra(inst);
211                         rb = get_rb(inst);
212
213                         ea = kvmppc_get_gpr(vcpu, rb);
214                         if (ra)
215                                 ea += kvmppc_get_gpr(vcpu, ra);
216
217                         emulated = kvmppc_handle_store(run, vcpu,
218                                                        kvmppc_get_gpr(vcpu, rs),
219                                                        1, 1);
220                         kvmppc_set_gpr(vcpu, rs, ea);
221                         break;
222
223                 case OP_31_XOP_LHAX:
224                         rt = get_rt(inst);
225                         emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
226                         break;
227
228                 case OP_31_XOP_LHZX:
229                         rt = get_rt(inst);
230                         emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
231                         break;
232
233                 case OP_31_XOP_LHZUX:
234                         rt = get_rt(inst);
235                         ra = get_ra(inst);
236                         rb = get_rb(inst);
237
238                         ea = kvmppc_get_gpr(vcpu, rb);
239                         if (ra)
240                                 ea += kvmppc_get_gpr(vcpu, ra);
241
242                         emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
243                         kvmppc_set_gpr(vcpu, ra, ea);
244                         break;
245
246                 case OP_31_XOP_MFSPR:
247                         sprn = get_sprn(inst);
248                         rt = get_rt(inst);
249
250                         switch (sprn) {
251                         case SPRN_SRR0:
252                                 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr0);
253                                 break;
254                         case SPRN_SRR1:
255                                 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr1);
256                                 break;
257                         case SPRN_PVR:
258                                 kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break;
259                         case SPRN_PIR:
260                                 kvmppc_set_gpr(vcpu, rt, vcpu->vcpu_id); break;
261                         case SPRN_MSSSR0:
262                                 kvmppc_set_gpr(vcpu, rt, 0); break;
263
264                         /* Note: mftb and TBRL/TBWL are user-accessible, so
265                          * the guest can always access the real TB anyways.
266                          * In fact, we probably will never see these traps. */
267                         case SPRN_TBWL:
268                                 kvmppc_set_gpr(vcpu, rt, get_tb() >> 32); break;
269                         case SPRN_TBWU:
270                                 kvmppc_set_gpr(vcpu, rt, get_tb()); break;
271
272                         case SPRN_SPRG0:
273                                 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg0);
274                                 break;
275                         case SPRN_SPRG1:
276                                 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg1);
277                                 break;
278                         case SPRN_SPRG2:
279                                 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg2);
280                                 break;
281                         case SPRN_SPRG3:
282                                 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg3);
283                                 break;
284                         /* Note: SPRG4-7 are user-readable, so we don't get
285                          * a trap. */
286
287                         case SPRN_DEC:
288                         {
289                                 kvmppc_set_gpr(vcpu, rt,
290                                                kvmppc_get_dec(vcpu, get_tb()));
291                                 break;
292                         }
293                         default:
294                                 emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt);
295                                 if (emulated == EMULATE_FAIL) {
296                                         printk("mfspr: unknown spr %x\n", sprn);
297                                         kvmppc_set_gpr(vcpu, rt, 0);
298                                 }
299                                 break;
300                         }
301                         kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS);
302                         break;
303
304                 case OP_31_XOP_STHX:
305                         rs = get_rs(inst);
306                         ra = get_ra(inst);
307                         rb = get_rb(inst);
308
309                         emulated = kvmppc_handle_store(run, vcpu,
310                                                        kvmppc_get_gpr(vcpu, rs),
311                                                        2, 1);
312                         break;
313
314                 case OP_31_XOP_STHUX:
315                         rs = get_rs(inst);
316                         ra = get_ra(inst);
317                         rb = get_rb(inst);
318
319                         ea = kvmppc_get_gpr(vcpu, rb);
320                         if (ra)
321                                 ea += kvmppc_get_gpr(vcpu, ra);
322
323                         emulated = kvmppc_handle_store(run, vcpu,
324                                                        kvmppc_get_gpr(vcpu, rs),
325                                                        2, 1);
326                         kvmppc_set_gpr(vcpu, ra, ea);
327                         break;
328
329                 case OP_31_XOP_MTSPR:
330                         sprn = get_sprn(inst);
331                         rs = get_rs(inst);
332                         switch (sprn) {
333                         case SPRN_SRR0:
334                                 vcpu->arch.shared->srr0 = kvmppc_get_gpr(vcpu, rs);
335                                 break;
336                         case SPRN_SRR1:
337                                 vcpu->arch.shared->srr1 = kvmppc_get_gpr(vcpu, rs);
338                                 break;
339
340                         /* XXX We need to context-switch the timebase for
341                          * watchdog and FIT. */
342                         case SPRN_TBWL: break;
343                         case SPRN_TBWU: break;
344
345                         case SPRN_MSSSR0: break;
346
347                         case SPRN_DEC:
348                                 vcpu->arch.dec = kvmppc_get_gpr(vcpu, rs);
349                                 kvmppc_emulate_dec(vcpu);
350                                 break;
351
352                         case SPRN_SPRG0:
353                                 vcpu->arch.shared->sprg0 = kvmppc_get_gpr(vcpu, rs);
354                                 break;
355                         case SPRN_SPRG1:
356                                 vcpu->arch.shared->sprg1 = kvmppc_get_gpr(vcpu, rs);
357                                 break;
358                         case SPRN_SPRG2:
359                                 vcpu->arch.shared->sprg2 = kvmppc_get_gpr(vcpu, rs);
360                                 break;
361                         case SPRN_SPRG3:
362                                 vcpu->arch.shared->sprg3 = kvmppc_get_gpr(vcpu, rs);
363                                 break;
364
365                         default:
366                                 emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs);
367                                 if (emulated == EMULATE_FAIL)
368                                         printk("mtspr: unknown spr %x\n", sprn);
369                                 break;
370                         }
371                         kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS);
372                         break;
373
374                 case OP_31_XOP_DCBF:
375                 case OP_31_XOP_DCBI:
376                         /* Do nothing. The guest is performing dcbi because
377                          * hardware DMA is not snooped by the dcache, but
378                          * emulated DMA either goes through the dcache as
379                          * normal writes, or the host kernel has handled dcache
380                          * coherence. */
381                         break;
382
383                 case OP_31_XOP_LWBRX:
384                         rt = get_rt(inst);
385                         emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
386                         break;
387
388                 case OP_31_XOP_TLBSYNC:
389                         break;
390
391                 case OP_31_XOP_STWBRX:
392                         rs = get_rs(inst);
393                         ra = get_ra(inst);
394                         rb = get_rb(inst);
395
396                         emulated = kvmppc_handle_store(run, vcpu,
397                                                        kvmppc_get_gpr(vcpu, rs),
398                                                        4, 0);
399                         break;
400
401                 case OP_31_XOP_LHBRX:
402                         rt = get_rt(inst);
403                         emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
404                         break;
405
406                 case OP_31_XOP_STHBRX:
407                         rs = get_rs(inst);
408                         ra = get_ra(inst);
409                         rb = get_rb(inst);
410
411                         emulated = kvmppc_handle_store(run, vcpu,
412                                                        kvmppc_get_gpr(vcpu, rs),
413                                                        2, 0);
414                         break;
415
416                 default:
417                         /* Attempt core-specific emulation below. */
418                         emulated = EMULATE_FAIL;
419                 }
420                 break;
421
422         case OP_LWZ:
423                 rt = get_rt(inst);
424                 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
425                 break;
426
427         case OP_LWZU:
428                 ra = get_ra(inst);
429                 rt = get_rt(inst);
430                 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
431                 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
432                 break;
433
434         case OP_LBZ:
435                 rt = get_rt(inst);
436                 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
437                 break;
438
439         case OP_LBZU:
440                 ra = get_ra(inst);
441                 rt = get_rt(inst);
442                 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
443                 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
444                 break;
445
446         case OP_STW:
447                 rs = get_rs(inst);
448                 emulated = kvmppc_handle_store(run, vcpu,
449                                                kvmppc_get_gpr(vcpu, rs),
450                                                4, 1);
451                 break;
452
453         case OP_STWU:
454                 ra = get_ra(inst);
455                 rs = get_rs(inst);
456                 emulated = kvmppc_handle_store(run, vcpu,
457                                                kvmppc_get_gpr(vcpu, rs),
458                                                4, 1);
459                 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
460                 break;
461
462         case OP_STB:
463                 rs = get_rs(inst);
464                 emulated = kvmppc_handle_store(run, vcpu,
465                                                kvmppc_get_gpr(vcpu, rs),
466                                                1, 1);
467                 break;
468
469         case OP_STBU:
470                 ra = get_ra(inst);
471                 rs = get_rs(inst);
472                 emulated = kvmppc_handle_store(run, vcpu,
473                                                kvmppc_get_gpr(vcpu, rs),
474                                                1, 1);
475                 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
476                 break;
477
478         case OP_LHZ:
479                 rt = get_rt(inst);
480                 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
481                 break;
482
483         case OP_LHZU:
484                 ra = get_ra(inst);
485                 rt = get_rt(inst);
486                 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
487                 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
488                 break;
489
490         case OP_LHA:
491                 rt = get_rt(inst);
492                 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
493                 break;
494
495         case OP_LHAU:
496                 ra = get_ra(inst);
497                 rt = get_rt(inst);
498                 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
499                 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
500                 break;
501
502         case OP_STH:
503                 rs = get_rs(inst);
504                 emulated = kvmppc_handle_store(run, vcpu,
505                                                kvmppc_get_gpr(vcpu, rs),
506                                                2, 1);
507                 break;
508
509         case OP_STHU:
510                 ra = get_ra(inst);
511                 rs = get_rs(inst);
512                 emulated = kvmppc_handle_store(run, vcpu,
513                                                kvmppc_get_gpr(vcpu, rs),
514                                                2, 1);
515                 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
516                 break;
517
518         default:
519                 emulated = EMULATE_FAIL;
520         }
521
522         if (emulated == EMULATE_FAIL) {
523                 emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance);
524                 if (emulated == EMULATE_AGAIN) {
525                         advance = 0;
526                 } else if (emulated == EMULATE_FAIL) {
527                         advance = 0;
528                         printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
529                                "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
530                         kvmppc_core_queue_program(vcpu, 0);
531                 }
532         }
533
534         trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
535
536         /* Advance past emulated instruction. */
537         if (advance)
538                 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
539
540         return emulated;
541 }