pandora: defconfig: update
[pandora-kernel.git] / arch / powerpc / kvm / book3s_paired_singles.c
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright Novell Inc 2010
16  *
17  * Authors: Alexander Graf <agraf@suse.de>
18  */
19
20 #include <asm/kvm.h>
21 #include <asm/kvm_ppc.h>
22 #include <asm/disassemble.h>
23 #include <asm/kvm_book3s.h>
24 #include <asm/kvm_fpu.h>
25 #include <asm/reg.h>
26 #include <asm/cacheflush.h>
27 #include <linux/vmalloc.h>
28
29 /* #define DEBUG */
30
31 #ifdef DEBUG
32 #define dprintk printk
33 #else
34 #define dprintk(...) do { } while(0);
35 #endif
36
37 #define OP_LFS                  48
38 #define OP_LFSU                 49
39 #define OP_LFD                  50
40 #define OP_LFDU                 51
41 #define OP_STFS                 52
42 #define OP_STFSU                53
43 #define OP_STFD                 54
44 #define OP_STFDU                55
45 #define OP_PSQ_L                56
46 #define OP_PSQ_LU               57
47 #define OP_PSQ_ST               60
48 #define OP_PSQ_STU              61
49
50 #define OP_31_LFSX              535
51 #define OP_31_LFSUX             567
52 #define OP_31_LFDX              599
53 #define OP_31_LFDUX             631
54 #define OP_31_STFSX             663
55 #define OP_31_STFSUX            695
56 #define OP_31_STFX              727
57 #define OP_31_STFUX             759
58 #define OP_31_LWIZX             887
59 #define OP_31_STFIWX            983
60
61 #define OP_59_FADDS             21
62 #define OP_59_FSUBS             20
63 #define OP_59_FSQRTS            22
64 #define OP_59_FDIVS             18
65 #define OP_59_FRES              24
66 #define OP_59_FMULS             25
67 #define OP_59_FRSQRTES          26
68 #define OP_59_FMSUBS            28
69 #define OP_59_FMADDS            29
70 #define OP_59_FNMSUBS           30
71 #define OP_59_FNMADDS           31
72
73 #define OP_63_FCMPU             0
74 #define OP_63_FCPSGN            8
75 #define OP_63_FRSP              12
76 #define OP_63_FCTIW             14
77 #define OP_63_FCTIWZ            15
78 #define OP_63_FDIV              18
79 #define OP_63_FADD              21
80 #define OP_63_FSQRT             22
81 #define OP_63_FSEL              23
82 #define OP_63_FRE               24
83 #define OP_63_FMUL              25
84 #define OP_63_FRSQRTE           26
85 #define OP_63_FMSUB             28
86 #define OP_63_FMADD             29
87 #define OP_63_FNMSUB            30
88 #define OP_63_FNMADD            31
89 #define OP_63_FCMPO             32
90 #define OP_63_MTFSB1            38 // XXX
91 #define OP_63_FSUB              20
92 #define OP_63_FNEG              40
93 #define OP_63_MCRFS             64
94 #define OP_63_MTFSB0            70
95 #define OP_63_FMR               72
96 #define OP_63_MTFSFI            134
97 #define OP_63_FABS              264
98 #define OP_63_MFFS              583
99 #define OP_63_MTFSF             711
100
101 #define OP_4X_PS_CMPU0          0
102 #define OP_4X_PSQ_LX            6
103 #define OP_4XW_PSQ_STX          7
104 #define OP_4A_PS_SUM0           10
105 #define OP_4A_PS_SUM1           11
106 #define OP_4A_PS_MULS0          12
107 #define OP_4A_PS_MULS1          13
108 #define OP_4A_PS_MADDS0         14
109 #define OP_4A_PS_MADDS1         15
110 #define OP_4A_PS_DIV            18
111 #define OP_4A_PS_SUB            20
112 #define OP_4A_PS_ADD            21
113 #define OP_4A_PS_SEL            23
114 #define OP_4A_PS_RES            24
115 #define OP_4A_PS_MUL            25
116 #define OP_4A_PS_RSQRTE         26
117 #define OP_4A_PS_MSUB           28
118 #define OP_4A_PS_MADD           29
119 #define OP_4A_PS_NMSUB          30
120 #define OP_4A_PS_NMADD          31
121 #define OP_4X_PS_CMPO0          32
122 #define OP_4X_PSQ_LUX           38
123 #define OP_4XW_PSQ_STUX         39
124 #define OP_4X_PS_NEG            40
125 #define OP_4X_PS_CMPU1          64
126 #define OP_4X_PS_MR             72
127 #define OP_4X_PS_CMPO1          96
128 #define OP_4X_PS_NABS           136
129 #define OP_4X_PS_ABS            264
130 #define OP_4X_PS_MERGE00        528
131 #define OP_4X_PS_MERGE01        560
132 #define OP_4X_PS_MERGE10        592
133 #define OP_4X_PS_MERGE11        624
134
135 #define SCALAR_NONE             0
136 #define SCALAR_HIGH             (1 << 0)
137 #define SCALAR_LOW              (1 << 1)
138 #define SCALAR_NO_PS0           (1 << 2)
139 #define SCALAR_NO_PS1           (1 << 3)
140
141 #define GQR_ST_TYPE_MASK        0x00000007
142 #define GQR_ST_TYPE_SHIFT       0
143 #define GQR_ST_SCALE_MASK       0x00003f00
144 #define GQR_ST_SCALE_SHIFT      8
145 #define GQR_LD_TYPE_MASK        0x00070000
146 #define GQR_LD_TYPE_SHIFT       16
147 #define GQR_LD_SCALE_MASK       0x3f000000
148 #define GQR_LD_SCALE_SHIFT      24
149
150 #define GQR_QUANTIZE_FLOAT      0
151 #define GQR_QUANTIZE_U8         4
152 #define GQR_QUANTIZE_U16        5
153 #define GQR_QUANTIZE_S8         6
154 #define GQR_QUANTIZE_S16        7
155
156 #define FPU_LS_SINGLE           0
157 #define FPU_LS_DOUBLE           1
158 #define FPU_LS_SINGLE_LOW       2
159
160 static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt)
161 {
162         kvm_cvt_df(&vcpu->arch.fpr[rt], &vcpu->arch.qpr[rt]);
163 }
164
165 static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store)
166 {
167         u64 dsisr;
168         struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
169
170         shared->msr = kvmppc_set_field(shared->msr, 33, 36, 0);
171         shared->msr = kvmppc_set_field(shared->msr, 42, 47, 0);
172         shared->dar = eaddr;
173         /* Page Fault */
174         dsisr = kvmppc_set_field(0, 33, 33, 1);
175         if (is_store)
176                 shared->dsisr = kvmppc_set_field(dsisr, 38, 38, 1);
177         kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE);
178 }
179
180 static int kvmppc_emulate_fpr_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
181                                    int rs, ulong addr, int ls_type)
182 {
183         int emulated = EMULATE_FAIL;
184         int r;
185         char tmp[8];
186         int len = sizeof(u32);
187
188         if (ls_type == FPU_LS_DOUBLE)
189                 len = sizeof(u64);
190
191         /* read from memory */
192         r = kvmppc_ld(vcpu, &addr, len, tmp, true);
193         vcpu->arch.paddr_accessed = addr;
194
195         if (r < 0) {
196                 kvmppc_inject_pf(vcpu, addr, false);
197                 goto done_load;
198         } else if (r == EMULATE_DO_MMIO) {
199                 emulated = kvmppc_handle_load(run, vcpu, KVM_REG_FPR | rs, len, 1);
200                 goto done_load;
201         }
202
203         emulated = EMULATE_DONE;
204
205         /* put in registers */
206         switch (ls_type) {
207         case FPU_LS_SINGLE:
208                 kvm_cvt_fd((u32*)tmp, &vcpu->arch.fpr[rs]);
209                 vcpu->arch.qpr[rs] = *((u32*)tmp);
210                 break;
211         case FPU_LS_DOUBLE:
212                 vcpu->arch.fpr[rs] = *((u64*)tmp);
213                 break;
214         }
215
216         dprintk(KERN_INFO "KVM: FPR_LD [0x%llx] at 0x%lx (%d)\n", *(u64*)tmp,
217                           addr, len);
218
219 done_load:
220         return emulated;
221 }
222
223 static int kvmppc_emulate_fpr_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
224                                     int rs, ulong addr, int ls_type)
225 {
226         int emulated = EMULATE_FAIL;
227         int r;
228         char tmp[8];
229         u64 val;
230         int len;
231
232         switch (ls_type) {
233         case FPU_LS_SINGLE:
234                 kvm_cvt_df(&vcpu->arch.fpr[rs], (u32*)tmp);
235                 val = *((u32*)tmp);
236                 len = sizeof(u32);
237                 break;
238         case FPU_LS_SINGLE_LOW:
239                 *((u32*)tmp) = vcpu->arch.fpr[rs];
240                 val = vcpu->arch.fpr[rs] & 0xffffffff;
241                 len = sizeof(u32);
242                 break;
243         case FPU_LS_DOUBLE:
244                 *((u64*)tmp) = vcpu->arch.fpr[rs];
245                 val = vcpu->arch.fpr[rs];
246                 len = sizeof(u64);
247                 break;
248         default:
249                 val = 0;
250                 len = 0;
251         }
252
253         r = kvmppc_st(vcpu, &addr, len, tmp, true);
254         vcpu->arch.paddr_accessed = addr;
255         if (r < 0) {
256                 kvmppc_inject_pf(vcpu, addr, true);
257         } else if (r == EMULATE_DO_MMIO) {
258                 emulated = kvmppc_handle_store(run, vcpu, val, len, 1);
259         } else {
260                 emulated = EMULATE_DONE;
261         }
262
263         dprintk(KERN_INFO "KVM: FPR_ST [0x%llx] at 0x%lx (%d)\n",
264                           val, addr, len);
265
266         return emulated;
267 }
268
269 static int kvmppc_emulate_psq_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
270                                    int rs, ulong addr, bool w, int i)
271 {
272         int emulated = EMULATE_FAIL;
273         int r;
274         float one = 1.0;
275         u32 tmp[2];
276
277         /* read from memory */
278         if (w) {
279                 r = kvmppc_ld(vcpu, &addr, sizeof(u32), tmp, true);
280                 memcpy(&tmp[1], &one, sizeof(u32));
281         } else {
282                 r = kvmppc_ld(vcpu, &addr, sizeof(u32) * 2, tmp, true);
283         }
284         vcpu->arch.paddr_accessed = addr;
285         if (r < 0) {
286                 kvmppc_inject_pf(vcpu, addr, false);
287                 goto done_load;
288         } else if ((r == EMULATE_DO_MMIO) && w) {
289                 emulated = kvmppc_handle_load(run, vcpu, KVM_REG_FPR | rs, 4, 1);
290                 vcpu->arch.qpr[rs] = tmp[1];
291                 goto done_load;
292         } else if (r == EMULATE_DO_MMIO) {
293                 emulated = kvmppc_handle_load(run, vcpu, KVM_REG_FQPR | rs, 8, 1);
294                 goto done_load;
295         }
296
297         emulated = EMULATE_DONE;
298
299         /* put in registers */
300         kvm_cvt_fd(&tmp[0], &vcpu->arch.fpr[rs]);
301         vcpu->arch.qpr[rs] = tmp[1];
302
303         dprintk(KERN_INFO "KVM: PSQ_LD [0x%x, 0x%x] at 0x%lx (%d)\n", tmp[0],
304                           tmp[1], addr, w ? 4 : 8);
305
306 done_load:
307         return emulated;
308 }
309
310 static int kvmppc_emulate_psq_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
311                                     int rs, ulong addr, bool w, int i)
312 {
313         int emulated = EMULATE_FAIL;
314         int r;
315         u32 tmp[2];
316         int len = w ? sizeof(u32) : sizeof(u64);
317
318         kvm_cvt_df(&vcpu->arch.fpr[rs], &tmp[0]);
319         tmp[1] = vcpu->arch.qpr[rs];
320
321         r = kvmppc_st(vcpu, &addr, len, tmp, true);
322         vcpu->arch.paddr_accessed = addr;
323         if (r < 0) {
324                 kvmppc_inject_pf(vcpu, addr, true);
325         } else if ((r == EMULATE_DO_MMIO) && w) {
326                 emulated = kvmppc_handle_store(run, vcpu, tmp[0], 4, 1);
327         } else if (r == EMULATE_DO_MMIO) {
328                 u64 val = ((u64)tmp[0] << 32) | tmp[1];
329                 emulated = kvmppc_handle_store(run, vcpu, val, 8, 1);
330         } else {
331                 emulated = EMULATE_DONE;
332         }
333
334         dprintk(KERN_INFO "KVM: PSQ_ST [0x%x, 0x%x] at 0x%lx (%d)\n",
335                           tmp[0], tmp[1], addr, len);
336
337         return emulated;
338 }
339
340 /*
341  * Cuts out inst bits with ordering according to spec.
342  * That means the leftmost bit is zero. All given bits are included.
343  */
344 static inline u32 inst_get_field(u32 inst, int msb, int lsb)
345 {
346         return kvmppc_get_field(inst, msb + 32, lsb + 32);
347 }
348
349 /*
350  * Replaces inst bits with ordering according to spec.
351  */
352 static inline u32 inst_set_field(u32 inst, int msb, int lsb, int value)
353 {
354         return kvmppc_set_field(inst, msb + 32, lsb + 32, value);
355 }
356
357 bool kvmppc_inst_is_paired_single(struct kvm_vcpu *vcpu, u32 inst)
358 {
359         if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE))
360                 return false;
361
362         switch (get_op(inst)) {
363         case OP_PSQ_L:
364         case OP_PSQ_LU:
365         case OP_PSQ_ST:
366         case OP_PSQ_STU:
367         case OP_LFS:
368         case OP_LFSU:
369         case OP_LFD:
370         case OP_LFDU:
371         case OP_STFS:
372         case OP_STFSU:
373         case OP_STFD:
374         case OP_STFDU:
375                 return true;
376         case 4:
377                 /* X form */
378                 switch (inst_get_field(inst, 21, 30)) {
379                 case OP_4X_PS_CMPU0:
380                 case OP_4X_PSQ_LX:
381                 case OP_4X_PS_CMPO0:
382                 case OP_4X_PSQ_LUX:
383                 case OP_4X_PS_NEG:
384                 case OP_4X_PS_CMPU1:
385                 case OP_4X_PS_MR:
386                 case OP_4X_PS_CMPO1:
387                 case OP_4X_PS_NABS:
388                 case OP_4X_PS_ABS:
389                 case OP_4X_PS_MERGE00:
390                 case OP_4X_PS_MERGE01:
391                 case OP_4X_PS_MERGE10:
392                 case OP_4X_PS_MERGE11:
393                         return true;
394                 }
395                 /* XW form */
396                 switch (inst_get_field(inst, 25, 30)) {
397                 case OP_4XW_PSQ_STX:
398                 case OP_4XW_PSQ_STUX:
399                         return true;
400                 }
401                 /* A form */
402                 switch (inst_get_field(inst, 26, 30)) {
403                 case OP_4A_PS_SUM1:
404                 case OP_4A_PS_SUM0:
405                 case OP_4A_PS_MULS0:
406                 case OP_4A_PS_MULS1:
407                 case OP_4A_PS_MADDS0:
408                 case OP_4A_PS_MADDS1:
409                 case OP_4A_PS_DIV:
410                 case OP_4A_PS_SUB:
411                 case OP_4A_PS_ADD:
412                 case OP_4A_PS_SEL:
413                 case OP_4A_PS_RES:
414                 case OP_4A_PS_MUL:
415                 case OP_4A_PS_RSQRTE:
416                 case OP_4A_PS_MSUB:
417                 case OP_4A_PS_MADD:
418                 case OP_4A_PS_NMSUB:
419                 case OP_4A_PS_NMADD:
420                         return true;
421                 }
422                 break;
423         case 59:
424                 switch (inst_get_field(inst, 21, 30)) {
425                 case OP_59_FADDS:
426                 case OP_59_FSUBS:
427                 case OP_59_FDIVS:
428                 case OP_59_FRES:
429                 case OP_59_FRSQRTES:
430                         return true;
431                 }
432                 switch (inst_get_field(inst, 26, 30)) {
433                 case OP_59_FMULS:
434                 case OP_59_FMSUBS:
435                 case OP_59_FMADDS:
436                 case OP_59_FNMSUBS:
437                 case OP_59_FNMADDS:
438                         return true;
439                 }
440                 break;
441         case 63:
442                 switch (inst_get_field(inst, 21, 30)) {
443                 case OP_63_MTFSB0:
444                 case OP_63_MTFSB1:
445                 case OP_63_MTFSF:
446                 case OP_63_MTFSFI:
447                 case OP_63_MCRFS:
448                 case OP_63_MFFS:
449                 case OP_63_FCMPU:
450                 case OP_63_FCMPO:
451                 case OP_63_FNEG:
452                 case OP_63_FMR:
453                 case OP_63_FABS:
454                 case OP_63_FRSP:
455                 case OP_63_FDIV:
456                 case OP_63_FADD:
457                 case OP_63_FSUB:
458                 case OP_63_FCTIW:
459                 case OP_63_FCTIWZ:
460                 case OP_63_FRSQRTE:
461                 case OP_63_FCPSGN:
462                         return true;
463                 }
464                 switch (inst_get_field(inst, 26, 30)) {
465                 case OP_63_FMUL:
466                 case OP_63_FSEL:
467                 case OP_63_FMSUB:
468                 case OP_63_FMADD:
469                 case OP_63_FNMSUB:
470                 case OP_63_FNMADD:
471                         return true;
472                 }
473                 break;
474         case 31:
475                 switch (inst_get_field(inst, 21, 30)) {
476                 case OP_31_LFSX:
477                 case OP_31_LFSUX:
478                 case OP_31_LFDX:
479                 case OP_31_LFDUX:
480                 case OP_31_STFSX:
481                 case OP_31_STFSUX:
482                 case OP_31_STFX:
483                 case OP_31_STFUX:
484                 case OP_31_STFIWX:
485                         return true;
486                 }
487                 break;
488         }
489
490         return false;
491 }
492
493 static int get_d_signext(u32 inst)
494 {
495         int d = inst & 0x8ff;
496
497         if (d & 0x800)
498                 return -(d & 0x7ff);
499
500         return (d & 0x7ff);
501 }
502
503 static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc,
504                                       int reg_out, int reg_in1, int reg_in2,
505                                       int reg_in3, int scalar,
506                                       void (*func)(u64 *fpscr,
507                                                  u32 *dst, u32 *src1,
508                                                  u32 *src2, u32 *src3))
509 {
510         u32 *qpr = vcpu->arch.qpr;
511         u64 *fpr = vcpu->arch.fpr;
512         u32 ps0_out;
513         u32 ps0_in1, ps0_in2, ps0_in3;
514         u32 ps1_in1, ps1_in2, ps1_in3;
515
516         /* RC */
517         WARN_ON(rc);
518
519         /* PS0 */
520         kvm_cvt_df(&fpr[reg_in1], &ps0_in1);
521         kvm_cvt_df(&fpr[reg_in2], &ps0_in2);
522         kvm_cvt_df(&fpr[reg_in3], &ps0_in3);
523
524         if (scalar & SCALAR_LOW)
525                 ps0_in2 = qpr[reg_in2];
526
527         func(&vcpu->arch.fpscr, &ps0_out, &ps0_in1, &ps0_in2, &ps0_in3);
528
529         dprintk(KERN_INFO "PS3 ps0 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n",
530                           ps0_in1, ps0_in2, ps0_in3, ps0_out);
531
532         if (!(scalar & SCALAR_NO_PS0))
533                 kvm_cvt_fd(&ps0_out, &fpr[reg_out]);
534
535         /* PS1 */
536         ps1_in1 = qpr[reg_in1];
537         ps1_in2 = qpr[reg_in2];
538         ps1_in3 = qpr[reg_in3];
539
540         if (scalar & SCALAR_HIGH)
541                 ps1_in2 = ps0_in2;
542
543         if (!(scalar & SCALAR_NO_PS1))
544                 func(&vcpu->arch.fpscr, &qpr[reg_out], &ps1_in1, &ps1_in2, &ps1_in3);
545
546         dprintk(KERN_INFO "PS3 ps1 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n",
547                           ps1_in1, ps1_in2, ps1_in3, qpr[reg_out]);
548
549         return EMULATE_DONE;
550 }
551
552 static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc,
553                                     int reg_out, int reg_in1, int reg_in2,
554                                     int scalar,
555                                     void (*func)(u64 *fpscr,
556                                                  u32 *dst, u32 *src1,
557                                                  u32 *src2))
558 {
559         u32 *qpr = vcpu->arch.qpr;
560         u64 *fpr = vcpu->arch.fpr;
561         u32 ps0_out;
562         u32 ps0_in1, ps0_in2;
563         u32 ps1_out;
564         u32 ps1_in1, ps1_in2;
565
566         /* RC */
567         WARN_ON(rc);
568
569         /* PS0 */
570         kvm_cvt_df(&fpr[reg_in1], &ps0_in1);
571
572         if (scalar & SCALAR_LOW)
573                 ps0_in2 = qpr[reg_in2];
574         else
575                 kvm_cvt_df(&fpr[reg_in2], &ps0_in2);
576
577         func(&vcpu->arch.fpscr, &ps0_out, &ps0_in1, &ps0_in2);
578
579         if (!(scalar & SCALAR_NO_PS0)) {
580                 dprintk(KERN_INFO "PS2 ps0 -> f(0x%x, 0x%x) = 0x%x\n",
581                                   ps0_in1, ps0_in2, ps0_out);
582
583                 kvm_cvt_fd(&ps0_out, &fpr[reg_out]);
584         }
585
586         /* PS1 */
587         ps1_in1 = qpr[reg_in1];
588         ps1_in2 = qpr[reg_in2];
589
590         if (scalar & SCALAR_HIGH)
591                 ps1_in2 = ps0_in2;
592
593         func(&vcpu->arch.fpscr, &ps1_out, &ps1_in1, &ps1_in2);
594
595         if (!(scalar & SCALAR_NO_PS1)) {
596                 qpr[reg_out] = ps1_out;
597
598                 dprintk(KERN_INFO "PS2 ps1 -> f(0x%x, 0x%x) = 0x%x\n",
599                                   ps1_in1, ps1_in2, qpr[reg_out]);
600         }
601
602         return EMULATE_DONE;
603 }
604
605 static int kvmppc_ps_one_in(struct kvm_vcpu *vcpu, bool rc,
606                                     int reg_out, int reg_in,
607                                     void (*func)(u64 *t,
608                                                  u32 *dst, u32 *src1))
609 {
610         u32 *qpr = vcpu->arch.qpr;
611         u64 *fpr = vcpu->arch.fpr;
612         u32 ps0_out, ps0_in;
613         u32 ps1_in;
614
615         /* RC */
616         WARN_ON(rc);
617
618         /* PS0 */
619         kvm_cvt_df(&fpr[reg_in], &ps0_in);
620         func(&vcpu->arch.fpscr, &ps0_out, &ps0_in);
621
622         dprintk(KERN_INFO "PS1 ps0 -> f(0x%x) = 0x%x\n",
623                           ps0_in, ps0_out);
624
625         kvm_cvt_fd(&ps0_out, &fpr[reg_out]);
626
627         /* PS1 */
628         ps1_in = qpr[reg_in];
629         func(&vcpu->arch.fpscr, &qpr[reg_out], &ps1_in);
630
631         dprintk(KERN_INFO "PS1 ps1 -> f(0x%x) = 0x%x\n",
632                           ps1_in, qpr[reg_out]);
633
634         return EMULATE_DONE;
635 }
636
637 int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
638 {
639         u32 inst = kvmppc_get_last_inst(vcpu);
640         enum emulation_result emulated = EMULATE_DONE;
641
642         int ax_rd = inst_get_field(inst, 6, 10);
643         int ax_ra = inst_get_field(inst, 11, 15);
644         int ax_rb = inst_get_field(inst, 16, 20);
645         int ax_rc = inst_get_field(inst, 21, 25);
646         short full_d = inst_get_field(inst, 16, 31);
647
648         u64 *fpr_d = &vcpu->arch.fpr[ax_rd];
649         u64 *fpr_a = &vcpu->arch.fpr[ax_ra];
650         u64 *fpr_b = &vcpu->arch.fpr[ax_rb];
651         u64 *fpr_c = &vcpu->arch.fpr[ax_rc];
652
653         bool rcomp = (inst & 1) ? true : false;
654         u32 cr = kvmppc_get_cr(vcpu);
655 #ifdef DEBUG
656         int i;
657 #endif
658
659         if (!kvmppc_inst_is_paired_single(vcpu, inst))
660                 return EMULATE_FAIL;
661
662         if (!(vcpu->arch.shared->msr & MSR_FP)) {
663                 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL);
664                 return EMULATE_AGAIN;
665         }
666
667         kvmppc_giveup_ext(vcpu, MSR_FP);
668         preempt_disable();
669         enable_kernel_fp();
670         /* Do we need to clear FE0 / FE1 here? Don't think so. */
671
672 #ifdef DEBUG
673         for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) {
674                 u32 f;
675                 kvm_cvt_df(&vcpu->arch.fpr[i], &f);
676                 dprintk(KERN_INFO "FPR[%d] = 0x%x / 0x%llx    QPR[%d] = 0x%x\n",
677                         i, f, vcpu->arch.fpr[i], i, vcpu->arch.qpr[i]);
678         }
679 #endif
680
681         switch (get_op(inst)) {
682         case OP_PSQ_L:
683         {
684                 ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
685                 bool w = inst_get_field(inst, 16, 16) ? true : false;
686                 int i = inst_get_field(inst, 17, 19);
687
688                 addr += get_d_signext(inst);
689                 emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
690                 break;
691         }
692         case OP_PSQ_LU:
693         {
694                 ulong addr = kvmppc_get_gpr(vcpu, ax_ra);
695                 bool w = inst_get_field(inst, 16, 16) ? true : false;
696                 int i = inst_get_field(inst, 17, 19);
697
698                 addr += get_d_signext(inst);
699                 emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
700
701                 if (emulated == EMULATE_DONE)
702                         kvmppc_set_gpr(vcpu, ax_ra, addr);
703                 break;
704         }
705         case OP_PSQ_ST:
706         {
707                 ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
708                 bool w = inst_get_field(inst, 16, 16) ? true : false;
709                 int i = inst_get_field(inst, 17, 19);
710
711                 addr += get_d_signext(inst);
712                 emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
713                 break;
714         }
715         case OP_PSQ_STU:
716         {
717                 ulong addr = kvmppc_get_gpr(vcpu, ax_ra);
718                 bool w = inst_get_field(inst, 16, 16) ? true : false;
719                 int i = inst_get_field(inst, 17, 19);
720
721                 addr += get_d_signext(inst);
722                 emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
723
724                 if (emulated == EMULATE_DONE)
725                         kvmppc_set_gpr(vcpu, ax_ra, addr);
726                 break;
727         }
728         case 4:
729                 /* X form */
730                 switch (inst_get_field(inst, 21, 30)) {
731                 case OP_4X_PS_CMPU0:
732                         /* XXX */
733                         emulated = EMULATE_FAIL;
734                         break;
735                 case OP_4X_PSQ_LX:
736                 {
737                         ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
738                         bool w = inst_get_field(inst, 21, 21) ? true : false;
739                         int i = inst_get_field(inst, 22, 24);
740
741                         addr += kvmppc_get_gpr(vcpu, ax_rb);
742                         emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
743                         break;
744                 }
745                 case OP_4X_PS_CMPO0:
746                         /* XXX */
747                         emulated = EMULATE_FAIL;
748                         break;
749                 case OP_4X_PSQ_LUX:
750                 {
751                         ulong addr = kvmppc_get_gpr(vcpu, ax_ra);
752                         bool w = inst_get_field(inst, 21, 21) ? true : false;
753                         int i = inst_get_field(inst, 22, 24);
754
755                         addr += kvmppc_get_gpr(vcpu, ax_rb);
756                         emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
757
758                         if (emulated == EMULATE_DONE)
759                                 kvmppc_set_gpr(vcpu, ax_ra, addr);
760                         break;
761                 }
762                 case OP_4X_PS_NEG:
763                         vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb];
764                         vcpu->arch.fpr[ax_rd] ^= 0x8000000000000000ULL;
765                         vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
766                         vcpu->arch.qpr[ax_rd] ^= 0x80000000;
767                         break;
768                 case OP_4X_PS_CMPU1:
769                         /* XXX */
770                         emulated = EMULATE_FAIL;
771                         break;
772                 case OP_4X_PS_MR:
773                         WARN_ON(rcomp);
774                         vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb];
775                         vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
776                         break;
777                 case OP_4X_PS_CMPO1:
778                         /* XXX */
779                         emulated = EMULATE_FAIL;
780                         break;
781                 case OP_4X_PS_NABS:
782                         WARN_ON(rcomp);
783                         vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb];
784                         vcpu->arch.fpr[ax_rd] |= 0x8000000000000000ULL;
785                         vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
786                         vcpu->arch.qpr[ax_rd] |= 0x80000000;
787                         break;
788                 case OP_4X_PS_ABS:
789                         WARN_ON(rcomp);
790                         vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb];
791                         vcpu->arch.fpr[ax_rd] &= ~0x8000000000000000ULL;
792                         vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
793                         vcpu->arch.qpr[ax_rd] &= ~0x80000000;
794                         break;
795                 case OP_4X_PS_MERGE00:
796                         WARN_ON(rcomp);
797                         vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra];
798                         /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */
799                         kvm_cvt_df(&vcpu->arch.fpr[ax_rb],
800                                    &vcpu->arch.qpr[ax_rd]);
801                         break;
802                 case OP_4X_PS_MERGE01:
803                         WARN_ON(rcomp);
804                         vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra];
805                         vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
806                         break;
807                 case OP_4X_PS_MERGE10:
808                         WARN_ON(rcomp);
809                         /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */
810                         kvm_cvt_fd(&vcpu->arch.qpr[ax_ra],
811                                    &vcpu->arch.fpr[ax_rd]);
812                         /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */
813                         kvm_cvt_df(&vcpu->arch.fpr[ax_rb],
814                                    &vcpu->arch.qpr[ax_rd]);
815                         break;
816                 case OP_4X_PS_MERGE11:
817                         WARN_ON(rcomp);
818                         /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */
819                         kvm_cvt_fd(&vcpu->arch.qpr[ax_ra],
820                                    &vcpu->arch.fpr[ax_rd]);
821                         vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
822                         break;
823                 }
824                 /* XW form */
825                 switch (inst_get_field(inst, 25, 30)) {
826                 case OP_4XW_PSQ_STX:
827                 {
828                         ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
829                         bool w = inst_get_field(inst, 21, 21) ? true : false;
830                         int i = inst_get_field(inst, 22, 24);
831
832                         addr += kvmppc_get_gpr(vcpu, ax_rb);
833                         emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
834                         break;
835                 }
836                 case OP_4XW_PSQ_STUX:
837                 {
838                         ulong addr = kvmppc_get_gpr(vcpu, ax_ra);
839                         bool w = inst_get_field(inst, 21, 21) ? true : false;
840                         int i = inst_get_field(inst, 22, 24);
841
842                         addr += kvmppc_get_gpr(vcpu, ax_rb);
843                         emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
844
845                         if (emulated == EMULATE_DONE)
846                                 kvmppc_set_gpr(vcpu, ax_ra, addr);
847                         break;
848                 }
849                 }
850                 /* A form */
851                 switch (inst_get_field(inst, 26, 30)) {
852                 case OP_4A_PS_SUM1:
853                         emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
854                                         ax_rb, ax_ra, SCALAR_NO_PS0 | SCALAR_HIGH, fps_fadds);
855                         vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rc];
856                         break;
857                 case OP_4A_PS_SUM0:
858                         emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
859                                         ax_ra, ax_rb, SCALAR_NO_PS1 | SCALAR_LOW, fps_fadds);
860                         vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rc];
861                         break;
862                 case OP_4A_PS_MULS0:
863                         emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
864                                         ax_ra, ax_rc, SCALAR_HIGH, fps_fmuls);
865                         break;
866                 case OP_4A_PS_MULS1:
867                         emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
868                                         ax_ra, ax_rc, SCALAR_LOW, fps_fmuls);
869                         break;
870                 case OP_4A_PS_MADDS0:
871                         emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
872                                         ax_ra, ax_rc, ax_rb, SCALAR_HIGH, fps_fmadds);
873                         break;
874                 case OP_4A_PS_MADDS1:
875                         emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
876                                         ax_ra, ax_rc, ax_rb, SCALAR_LOW, fps_fmadds);
877                         break;
878                 case OP_4A_PS_DIV:
879                         emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
880                                         ax_ra, ax_rb, SCALAR_NONE, fps_fdivs);
881                         break;
882                 case OP_4A_PS_SUB:
883                         emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
884                                         ax_ra, ax_rb, SCALAR_NONE, fps_fsubs);
885                         break;
886                 case OP_4A_PS_ADD:
887                         emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
888                                         ax_ra, ax_rb, SCALAR_NONE, fps_fadds);
889                         break;
890                 case OP_4A_PS_SEL:
891                         emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
892                                         ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fsel);
893                         break;
894                 case OP_4A_PS_RES:
895                         emulated = kvmppc_ps_one_in(vcpu, rcomp, ax_rd,
896                                         ax_rb, fps_fres);
897                         break;
898                 case OP_4A_PS_MUL:
899                         emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
900                                         ax_ra, ax_rc, SCALAR_NONE, fps_fmuls);
901                         break;
902                 case OP_4A_PS_RSQRTE:
903                         emulated = kvmppc_ps_one_in(vcpu, rcomp, ax_rd,
904                                         ax_rb, fps_frsqrte);
905                         break;
906                 case OP_4A_PS_MSUB:
907                         emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
908                                         ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fmsubs);
909                         break;
910                 case OP_4A_PS_MADD:
911                         emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
912                                         ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fmadds);
913                         break;
914                 case OP_4A_PS_NMSUB:
915                         emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
916                                         ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fnmsubs);
917                         break;
918                 case OP_4A_PS_NMADD:
919                         emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
920                                         ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fnmadds);
921                         break;
922                 }
923                 break;
924
925         /* Real FPU operations */
926
927         case OP_LFS:
928         {
929                 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;
930
931                 emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
932                                                    FPU_LS_SINGLE);
933                 break;
934         }
935         case OP_LFSU:
936         {
937                 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;
938
939                 emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
940                                                    FPU_LS_SINGLE);
941
942                 if (emulated == EMULATE_DONE)
943                         kvmppc_set_gpr(vcpu, ax_ra, addr);
944                 break;
945         }
946         case OP_LFD:
947         {
948                 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;
949
950                 emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
951                                                    FPU_LS_DOUBLE);
952                 break;
953         }
954         case OP_LFDU:
955         {
956                 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;
957
958                 emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
959                                                    FPU_LS_DOUBLE);
960
961                 if (emulated == EMULATE_DONE)
962                         kvmppc_set_gpr(vcpu, ax_ra, addr);
963                 break;
964         }
965         case OP_STFS:
966         {
967                 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;
968
969                 emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
970                                                     FPU_LS_SINGLE);
971                 break;
972         }
973         case OP_STFSU:
974         {
975                 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;
976
977                 emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
978                                                     FPU_LS_SINGLE);
979
980                 if (emulated == EMULATE_DONE)
981                         kvmppc_set_gpr(vcpu, ax_ra, addr);
982                 break;
983         }
984         case OP_STFD:
985         {
986                 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;
987
988                 emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
989                                                     FPU_LS_DOUBLE);
990                 break;
991         }
992         case OP_STFDU:
993         {
994                 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;
995
996                 emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
997                                                     FPU_LS_DOUBLE);
998
999                 if (emulated == EMULATE_DONE)
1000                         kvmppc_set_gpr(vcpu, ax_ra, addr);
1001                 break;
1002         }
1003         case 31:
1004                 switch (inst_get_field(inst, 21, 30)) {
1005                 case OP_31_LFSX:
1006                 {
1007                         ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
1008
1009                         addr += kvmppc_get_gpr(vcpu, ax_rb);
1010                         emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
1011                                                            addr, FPU_LS_SINGLE);
1012                         break;
1013                 }
1014                 case OP_31_LFSUX:
1015                 {
1016                         ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
1017                                      kvmppc_get_gpr(vcpu, ax_rb);
1018
1019                         emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
1020                                                            addr, FPU_LS_SINGLE);
1021
1022                         if (emulated == EMULATE_DONE)
1023                                 kvmppc_set_gpr(vcpu, ax_ra, addr);
1024                         break;
1025                 }
1026                 case OP_31_LFDX:
1027                 {
1028                         ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
1029                                      kvmppc_get_gpr(vcpu, ax_rb);
1030
1031                         emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
1032                                                            addr, FPU_LS_DOUBLE);
1033                         break;
1034                 }
1035                 case OP_31_LFDUX:
1036                 {
1037                         ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
1038                                      kvmppc_get_gpr(vcpu, ax_rb);
1039
1040                         emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
1041                                                            addr, FPU_LS_DOUBLE);
1042
1043                         if (emulated == EMULATE_DONE)
1044                                 kvmppc_set_gpr(vcpu, ax_ra, addr);
1045                         break;
1046                 }
1047                 case OP_31_STFSX:
1048                 {
1049                         ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
1050                                      kvmppc_get_gpr(vcpu, ax_rb);
1051
1052                         emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
1053                                                             addr, FPU_LS_SINGLE);
1054                         break;
1055                 }
1056                 case OP_31_STFSUX:
1057                 {
1058                         ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
1059                                      kvmppc_get_gpr(vcpu, ax_rb);
1060
1061                         emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
1062                                                             addr, FPU_LS_SINGLE);
1063
1064                         if (emulated == EMULATE_DONE)
1065                                 kvmppc_set_gpr(vcpu, ax_ra, addr);
1066                         break;
1067                 }
1068                 case OP_31_STFX:
1069                 {
1070                         ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
1071                                      kvmppc_get_gpr(vcpu, ax_rb);
1072
1073                         emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
1074                                                             addr, FPU_LS_DOUBLE);
1075                         break;
1076                 }
1077                 case OP_31_STFUX:
1078                 {
1079                         ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
1080                                      kvmppc_get_gpr(vcpu, ax_rb);
1081
1082                         emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
1083                                                             addr, FPU_LS_DOUBLE);
1084
1085                         if (emulated == EMULATE_DONE)
1086                                 kvmppc_set_gpr(vcpu, ax_ra, addr);
1087                         break;
1088                 }
1089                 case OP_31_STFIWX:
1090                 {
1091                         ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
1092                                      kvmppc_get_gpr(vcpu, ax_rb);
1093
1094                         emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
1095                                                             addr,
1096                                                             FPU_LS_SINGLE_LOW);
1097                         break;
1098                 }
1099                         break;
1100                 }
1101                 break;
1102         case 59:
1103                 switch (inst_get_field(inst, 21, 30)) {
1104                 case OP_59_FADDS:
1105                         fpd_fadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1106                         kvmppc_sync_qpr(vcpu, ax_rd);
1107                         break;
1108                 case OP_59_FSUBS:
1109                         fpd_fsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1110                         kvmppc_sync_qpr(vcpu, ax_rd);
1111                         break;
1112                 case OP_59_FDIVS:
1113                         fpd_fdivs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1114                         kvmppc_sync_qpr(vcpu, ax_rd);
1115                         break;
1116                 case OP_59_FRES:
1117                         fpd_fres(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
1118                         kvmppc_sync_qpr(vcpu, ax_rd);
1119                         break;
1120                 case OP_59_FRSQRTES:
1121                         fpd_frsqrtes(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
1122                         kvmppc_sync_qpr(vcpu, ax_rd);
1123                         break;
1124                 }
1125                 switch (inst_get_field(inst, 26, 30)) {
1126                 case OP_59_FMULS:
1127                         fpd_fmuls(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c);
1128                         kvmppc_sync_qpr(vcpu, ax_rd);
1129                         break;
1130                 case OP_59_FMSUBS:
1131                         fpd_fmsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1132                         kvmppc_sync_qpr(vcpu, ax_rd);
1133                         break;
1134                 case OP_59_FMADDS:
1135                         fpd_fmadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1136                         kvmppc_sync_qpr(vcpu, ax_rd);
1137                         break;
1138                 case OP_59_FNMSUBS:
1139                         fpd_fnmsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1140                         kvmppc_sync_qpr(vcpu, ax_rd);
1141                         break;
1142                 case OP_59_FNMADDS:
1143                         fpd_fnmadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1144                         kvmppc_sync_qpr(vcpu, ax_rd);
1145                         break;
1146                 }
1147                 break;
1148         case 63:
1149                 switch (inst_get_field(inst, 21, 30)) {
1150                 case OP_63_MTFSB0:
1151                 case OP_63_MTFSB1:
1152                 case OP_63_MCRFS:
1153                 case OP_63_MTFSFI:
1154                         /* XXX need to implement */
1155                         break;
1156                 case OP_63_MFFS:
1157                         /* XXX missing CR */
1158                         *fpr_d = vcpu->arch.fpscr;
1159                         break;
1160                 case OP_63_MTFSF:
1161                         /* XXX missing fm bits */
1162                         /* XXX missing CR */
1163                         vcpu->arch.fpscr = *fpr_b;
1164                         break;
1165                 case OP_63_FCMPU:
1166                 {
1167                         u32 tmp_cr;
1168                         u32 cr0_mask = 0xf0000000;
1169                         u32 cr_shift = inst_get_field(inst, 6, 8) * 4;
1170
1171                         fpd_fcmpu(&vcpu->arch.fpscr, &tmp_cr, fpr_a, fpr_b);
1172                         cr &= ~(cr0_mask >> cr_shift);
1173                         cr |= (cr & cr0_mask) >> cr_shift;
1174                         break;
1175                 }
1176                 case OP_63_FCMPO:
1177                 {
1178                         u32 tmp_cr;
1179                         u32 cr0_mask = 0xf0000000;
1180                         u32 cr_shift = inst_get_field(inst, 6, 8) * 4;
1181
1182                         fpd_fcmpo(&vcpu->arch.fpscr, &tmp_cr, fpr_a, fpr_b);
1183                         cr &= ~(cr0_mask >> cr_shift);
1184                         cr |= (cr & cr0_mask) >> cr_shift;
1185                         break;
1186                 }
1187                 case OP_63_FNEG:
1188                         fpd_fneg(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
1189                         break;
1190                 case OP_63_FMR:
1191                         *fpr_d = *fpr_b;
1192                         break;
1193                 case OP_63_FABS:
1194                         fpd_fabs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
1195                         break;
1196                 case OP_63_FCPSGN:
1197                         fpd_fcpsgn(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1198                         break;
1199                 case OP_63_FDIV:
1200                         fpd_fdiv(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1201                         break;
1202                 case OP_63_FADD:
1203                         fpd_fadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1204                         break;
1205                 case OP_63_FSUB:
1206                         fpd_fsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1207                         break;
1208                 case OP_63_FCTIW:
1209                         fpd_fctiw(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
1210                         break;
1211                 case OP_63_FCTIWZ:
1212                         fpd_fctiwz(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
1213                         break;
1214                 case OP_63_FRSP:
1215                         fpd_frsp(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
1216                         kvmppc_sync_qpr(vcpu, ax_rd);
1217                         break;
1218                 case OP_63_FRSQRTE:
1219                 {
1220                         double one = 1.0f;
1221
1222                         /* fD = sqrt(fB) */
1223                         fpd_fsqrt(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
1224                         /* fD = 1.0f / fD */
1225                         fpd_fdiv(&vcpu->arch.fpscr, &cr, fpr_d, (u64*)&one, fpr_d);
1226                         break;
1227                 }
1228                 }
1229                 switch (inst_get_field(inst, 26, 30)) {
1230                 case OP_63_FMUL:
1231                         fpd_fmul(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c);
1232                         break;
1233                 case OP_63_FSEL:
1234                         fpd_fsel(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1235                         break;
1236                 case OP_63_FMSUB:
1237                         fpd_fmsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1238                         break;
1239                 case OP_63_FMADD:
1240                         fpd_fmadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1241                         break;
1242                 case OP_63_FNMSUB:
1243                         fpd_fnmsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1244                         break;
1245                 case OP_63_FNMADD:
1246                         fpd_fnmadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1247                         break;
1248                 }
1249                 break;
1250         }
1251
1252 #ifdef DEBUG
1253         for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) {
1254                 u32 f;
1255                 kvm_cvt_df(&vcpu->arch.fpr[i], &f);
1256                 dprintk(KERN_INFO "FPR[%d] = 0x%x\n", i, f);
1257         }
1258 #endif
1259
1260         if (rcomp)
1261                 kvmppc_set_cr(vcpu, cr);
1262
1263         preempt_enable();
1264
1265         return emulated;
1266 }