Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/hch/hfsplus
[pandora-kernel.git] / arch / powerpc / kernel / kvm_emul.S
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright SUSE Linux Products GmbH 2010
16  *
17  * Authors: Alexander Graf <agraf@suse.de>
18  */
19
20 #include <asm/ppc_asm.h>
21 #include <asm/kvm_asm.h>
22 #include <asm/reg.h>
23 #include <asm/page.h>
24 #include <asm/asm-offsets.h>
25
26 /* Hypercall entry point. Will be patched with device tree instructions. */
27
28 .global kvm_hypercall_start
29 kvm_hypercall_start:
30         li      r3, -1
31         nop
32         nop
33         nop
34         blr
35
36 #define KVM_MAGIC_PAGE          (-4096)
37
38 #ifdef CONFIG_64BIT
39 #define LL64(reg, offs, reg2)   ld      reg, (offs)(reg2)
40 #define STL64(reg, offs, reg2)  std     reg, (offs)(reg2)
41 #else
42 #define LL64(reg, offs, reg2)   lwz     reg, (offs + 4)(reg2)
43 #define STL64(reg, offs, reg2)  stw     reg, (offs + 4)(reg2)
44 #endif
45
46 #define SCRATCH_SAVE                                                    \
47         /* Enable critical section. We are critical if                  \
48            shared->critical == r1 */                                    \
49         STL64(r1, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0);              \
50                                                                         \
51         /* Save state */                                                \
52         PPC_STL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0);          \
53         PPC_STL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0);          \
54         mfcr    r31;                                                    \
55         stw     r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0);
56
57 #define SCRATCH_RESTORE                                                 \
58         /* Restore state */                                             \
59         PPC_LL  r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0);          \
60         lwz     r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0);          \
61         mtcr    r30;                                                    \
62         PPC_LL  r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0);          \
63                                                                         \
64         /* Disable critical section. We are critical if                 \
65            shared->critical == r1 and r2 is always != r1 */             \
66         STL64(r2, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0);
67
68 .global kvm_emulate_mtmsrd
69 kvm_emulate_mtmsrd:
70
71         SCRATCH_SAVE
72
73         /* Put MSR & ~(MSR_EE|MSR_RI) in r31 */
74         LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
75         lis     r30, (~(MSR_EE | MSR_RI))@h
76         ori     r30, r30, (~(MSR_EE | MSR_RI))@l
77         and     r31, r31, r30
78
79         /* OR the register's (MSR_EE|MSR_RI) on MSR */
80 kvm_emulate_mtmsrd_reg:
81         ori     r30, r0, 0
82         andi.   r30, r30, (MSR_EE|MSR_RI)
83         or      r31, r31, r30
84
85         /* Put MSR back into magic page */
86         STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
87
88         /* Check if we have to fetch an interrupt */
89         lwz     r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
90         cmpwi   r31, 0
91         beq+    no_check
92
93         /* Check if we may trigger an interrupt */
94         andi.   r30, r30, MSR_EE
95         beq     no_check
96
97         SCRATCH_RESTORE
98
99         /* Nag hypervisor */
100 kvm_emulate_mtmsrd_orig_ins:
101         tlbsync
102
103         b       kvm_emulate_mtmsrd_branch
104
105 no_check:
106
107         SCRATCH_RESTORE
108
109         /* Go back to caller */
110 kvm_emulate_mtmsrd_branch:
111         b       .
112 kvm_emulate_mtmsrd_end:
113
114 .global kvm_emulate_mtmsrd_branch_offs
115 kvm_emulate_mtmsrd_branch_offs:
116         .long (kvm_emulate_mtmsrd_branch - kvm_emulate_mtmsrd) / 4
117
118 .global kvm_emulate_mtmsrd_reg_offs
119 kvm_emulate_mtmsrd_reg_offs:
120         .long (kvm_emulate_mtmsrd_reg - kvm_emulate_mtmsrd) / 4
121
122 .global kvm_emulate_mtmsrd_orig_ins_offs
123 kvm_emulate_mtmsrd_orig_ins_offs:
124         .long (kvm_emulate_mtmsrd_orig_ins - kvm_emulate_mtmsrd) / 4
125
126 .global kvm_emulate_mtmsrd_len
127 kvm_emulate_mtmsrd_len:
128         .long (kvm_emulate_mtmsrd_end - kvm_emulate_mtmsrd) / 4
129
130
131 #define MSR_SAFE_BITS (MSR_EE | MSR_CE | MSR_ME | MSR_RI)
132 #define MSR_CRITICAL_BITS ~MSR_SAFE_BITS
133
134 .global kvm_emulate_mtmsr
135 kvm_emulate_mtmsr:
136
137         SCRATCH_SAVE
138
139         /* Fetch old MSR in r31 */
140         LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
141
142         /* Find the changed bits between old and new MSR */
143 kvm_emulate_mtmsr_reg1:
144         ori     r30, r0, 0
145         xor     r31, r30, r31
146
147         /* Check if we need to really do mtmsr */
148         LOAD_REG_IMMEDIATE(r30, MSR_CRITICAL_BITS)
149         and.    r31, r31, r30
150
151         /* No critical bits changed? Maybe we can stay in the guest. */
152         beq     maybe_stay_in_guest
153
154 do_mtmsr:
155
156         SCRATCH_RESTORE
157
158         /* Just fire off the mtmsr if it's critical */
159 kvm_emulate_mtmsr_orig_ins:
160         mtmsr   r0
161
162         b       kvm_emulate_mtmsr_branch
163
164 maybe_stay_in_guest:
165
166         /* Get the target register in r30 */
167 kvm_emulate_mtmsr_reg2:
168         ori     r30, r0, 0
169
170         /* Check if we have to fetch an interrupt */
171         lwz     r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
172         cmpwi   r31, 0
173         beq+    no_mtmsr
174
175         /* Check if we may trigger an interrupt */
176         andi.   r31, r30, MSR_EE
177         beq     no_mtmsr
178
179         b       do_mtmsr
180
181 no_mtmsr:
182
183         /* Put MSR into magic page because we don't call mtmsr */
184         STL64(r30, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
185
186         SCRATCH_RESTORE
187
188         /* Go back to caller */
189 kvm_emulate_mtmsr_branch:
190         b       .
191 kvm_emulate_mtmsr_end:
192
193 .global kvm_emulate_mtmsr_branch_offs
194 kvm_emulate_mtmsr_branch_offs:
195         .long (kvm_emulate_mtmsr_branch - kvm_emulate_mtmsr) / 4
196
197 .global kvm_emulate_mtmsr_reg1_offs
198 kvm_emulate_mtmsr_reg1_offs:
199         .long (kvm_emulate_mtmsr_reg1 - kvm_emulate_mtmsr) / 4
200
201 .global kvm_emulate_mtmsr_reg2_offs
202 kvm_emulate_mtmsr_reg2_offs:
203         .long (kvm_emulate_mtmsr_reg2 - kvm_emulate_mtmsr) / 4
204
205 .global kvm_emulate_mtmsr_orig_ins_offs
206 kvm_emulate_mtmsr_orig_ins_offs:
207         .long (kvm_emulate_mtmsr_orig_ins - kvm_emulate_mtmsr) / 4
208
209 .global kvm_emulate_mtmsr_len
210 kvm_emulate_mtmsr_len:
211         .long (kvm_emulate_mtmsr_end - kvm_emulate_mtmsr) / 4
212
213
214
215 .global kvm_emulate_wrteei
216 kvm_emulate_wrteei:
217
218         SCRATCH_SAVE
219
220         /* Fetch old MSR in r31 */
221         LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
222
223         /* Remove MSR_EE from old MSR */
224         li      r30, 0
225         ori     r30, r30, MSR_EE
226         andc    r31, r31, r30
227
228         /* OR new MSR_EE onto the old MSR */
229 kvm_emulate_wrteei_ee:
230         ori     r31, r31, 0
231
232         /* Write new MSR value back */
233         STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
234
235         SCRATCH_RESTORE
236
237         /* Go back to caller */
238 kvm_emulate_wrteei_branch:
239         b       .
240 kvm_emulate_wrteei_end:
241
242 .global kvm_emulate_wrteei_branch_offs
243 kvm_emulate_wrteei_branch_offs:
244         .long (kvm_emulate_wrteei_branch - kvm_emulate_wrteei) / 4
245
246 .global kvm_emulate_wrteei_ee_offs
247 kvm_emulate_wrteei_ee_offs:
248         .long (kvm_emulate_wrteei_ee - kvm_emulate_wrteei) / 4
249
250 .global kvm_emulate_wrteei_len
251 kvm_emulate_wrteei_len:
252         .long (kvm_emulate_wrteei_end - kvm_emulate_wrteei) / 4
253
254
255 .global kvm_emulate_mtsrin
256 kvm_emulate_mtsrin:
257
258         SCRATCH_SAVE
259
260         LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
261         andi.   r31, r31, MSR_DR | MSR_IR
262         beq     kvm_emulate_mtsrin_reg1
263
264         SCRATCH_RESTORE
265
266 kvm_emulate_mtsrin_orig_ins:
267         nop
268         b       kvm_emulate_mtsrin_branch
269
270 kvm_emulate_mtsrin_reg1:
271         /* rX >> 26 */
272         rlwinm  r30,r0,6,26,29
273
274 kvm_emulate_mtsrin_reg2:
275         stw     r0, (KVM_MAGIC_PAGE + KVM_MAGIC_SR)(r30)
276
277         SCRATCH_RESTORE
278
279         /* Go back to caller */
280 kvm_emulate_mtsrin_branch:
281         b       .
282 kvm_emulate_mtsrin_end:
283
284 .global kvm_emulate_mtsrin_branch_offs
285 kvm_emulate_mtsrin_branch_offs:
286         .long (kvm_emulate_mtsrin_branch - kvm_emulate_mtsrin) / 4
287
288 .global kvm_emulate_mtsrin_reg1_offs
289 kvm_emulate_mtsrin_reg1_offs:
290         .long (kvm_emulate_mtsrin_reg1 - kvm_emulate_mtsrin) / 4
291
292 .global kvm_emulate_mtsrin_reg2_offs
293 kvm_emulate_mtsrin_reg2_offs:
294         .long (kvm_emulate_mtsrin_reg2 - kvm_emulate_mtsrin) / 4
295
296 .global kvm_emulate_mtsrin_orig_ins_offs
297 kvm_emulate_mtsrin_orig_ins_offs:
298         .long (kvm_emulate_mtsrin_orig_ins - kvm_emulate_mtsrin) / 4
299
300 .global kvm_emulate_mtsrin_len
301 kvm_emulate_mtsrin_len:
302         .long (kvm_emulate_mtsrin_end - kvm_emulate_mtsrin) / 4