Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
[pandora-kernel.git] / arch / powerpc / kvm / book3s_64_slb.S
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright SUSE Linux Products GmbH 2009
16  *
17  * Authors: Alexander Graf <agraf@suse.de>
18  */
19
20 #define SHADOW_SLB_ESID(num)    (SLBSHADOW_SAVEAREA + (num * 0x10))
21 #define SHADOW_SLB_VSID(num)    (SLBSHADOW_SAVEAREA + (num * 0x10) + 0x8)
22 #define UNBOLT_SLB_ENTRY(num) \
23         ld      r9, SHADOW_SLB_ESID(num)(r12); \
24         /* Invalid? Skip. */; \
25         rldicl. r0, r9, 37, 63; \
26         beq     slb_entry_skip_ ## num; \
27         xoris   r9, r9, SLB_ESID_V@h; \
28         std     r9, SHADOW_SLB_ESID(num)(r12); \
29   slb_entry_skip_ ## num:
30
31 #define REBOLT_SLB_ENTRY(num) \
32         ld      r10, SHADOW_SLB_ESID(num)(r11); \
33         cmpdi   r10, 0; \
34         beq     slb_exit_skip_ ## num; \
35         oris    r10, r10, SLB_ESID_V@h; \
36         ld      r9, SHADOW_SLB_VSID(num)(r11); \
37         slbmte  r9, r10; \
38         std     r10, SHADOW_SLB_ESID(num)(r11); \
39 slb_exit_skip_ ## num:
40
41 /******************************************************************************
42  *                                                                            *
43  *                               Entry code                                   *
44  *                                                                            *
45  *****************************************************************************/
46
47 .global kvmppc_handler_trampoline_enter
48 kvmppc_handler_trampoline_enter:
49
50         /* Required state:
51          *
52          * MSR = ~IR|DR
53          * R13 = PACA
54          * R1 = host R1
55          * R2 = host R2
56          * R9 = guest IP
57          * R10 = guest MSR
58          * all other GPRS = free
59          * PACA[KVM_CR] = guest CR
60          * PACA[KVM_XER] = guest XER
61          */
62
63         mtsrr0  r9
64         mtsrr1  r10
65
66         /* Activate guest mode, so faults get handled by KVM */
67         li      r11, KVM_GUEST_MODE_GUEST
68         stb     r11, PACA_KVM_IN_GUEST(r13)
69
70         /* Remove LPAR shadow entries */
71
72 #if SLB_NUM_BOLTED == 3
73
74         ld      r12, PACA_SLBSHADOWPTR(r13)
75
76         /* Save off the first entry so we can slbie it later */
77         ld      r10, SHADOW_SLB_ESID(0)(r12)
78         ld      r11, SHADOW_SLB_VSID(0)(r12)
79
80         /* Remove bolted entries */
81         UNBOLT_SLB_ENTRY(0)
82         UNBOLT_SLB_ENTRY(1)
83         UNBOLT_SLB_ENTRY(2)
84         
85 #else
86 #error unknown number of bolted entries
87 #endif
88
89         /* Flush SLB */
90
91         slbia
92
93         /* r0 = esid & ESID_MASK */
94         rldicr  r10, r10, 0, 35
95         /* r0 |= CLASS_BIT(VSID) */
96         rldic   r12, r11, 56 - 36, 36
97         or      r10, r10, r12
98         slbie   r10
99
100         isync
101
102         /* Fill SLB with our shadow */
103
104         lbz     r12, PACA_KVM_SLB_MAX(r13)
105         mulli   r12, r12, 16
106         addi    r12, r12, PACA_KVM_SLB
107         add     r12, r12, r13
108
109         /* for (r11 = kvm_slb; r11 < kvm_slb + kvm_slb_size; r11+=slb_entry) */
110         li      r11, PACA_KVM_SLB
111         add     r11, r11, r13
112
113 slb_loop_enter:
114
115         ld      r10, 0(r11)
116
117         rldicl. r0, r10, 37, 63
118         beq     slb_loop_enter_skip
119
120         ld      r9, 8(r11)
121         slbmte  r9, r10
122
123 slb_loop_enter_skip:
124         addi    r11, r11, 16
125         cmpd    cr0, r11, r12
126         blt     slb_loop_enter
127
128 slb_do_enter:
129
130         /* Enter guest */
131
132         ld      r0, (PACA_KVM_R0)(r13)
133         ld      r1, (PACA_KVM_R1)(r13)
134         ld      r2, (PACA_KVM_R2)(r13)
135         ld      r3, (PACA_KVM_R3)(r13)
136         ld      r4, (PACA_KVM_R4)(r13)
137         ld      r5, (PACA_KVM_R5)(r13)
138         ld      r6, (PACA_KVM_R6)(r13)
139         ld      r7, (PACA_KVM_R7)(r13)
140         ld      r8, (PACA_KVM_R8)(r13)
141         ld      r9, (PACA_KVM_R9)(r13)
142         ld      r10, (PACA_KVM_R10)(r13)
143         ld      r12, (PACA_KVM_R12)(r13)
144
145         lwz     r11, (PACA_KVM_CR)(r13)
146         mtcr    r11
147
148         ld      r11, (PACA_KVM_XER)(r13)
149         mtxer   r11
150
151         ld      r11, (PACA_KVM_R11)(r13)
152         ld      r13, (PACA_KVM_R13)(r13)
153
154         RFI
155 kvmppc_handler_trampoline_enter_end:
156
157
158
159 /******************************************************************************
160  *                                                                            *
161  *                               Exit code                                    *
162  *                                                                            *
163  *****************************************************************************/
164
165 .global kvmppc_handler_trampoline_exit
166 kvmppc_handler_trampoline_exit:
167
168         /* Register usage at this point:
169          *
170          * SPRG_SCRATCH0     = guest R13
171          * R12               = exit handler id
172          * R13               = PACA
173          * PACA.KVM.SCRATCH0 = guest R12
174          * PACA.KVM.SCRATCH1 = guest CR
175          *
176          */
177
178         /* Save registers */
179
180         std     r0, PACA_KVM_R0(r13)
181         std     r1, PACA_KVM_R1(r13)
182         std     r2, PACA_KVM_R2(r13)
183         std     r3, PACA_KVM_R3(r13)
184         std     r4, PACA_KVM_R4(r13)
185         std     r5, PACA_KVM_R5(r13)
186         std     r6, PACA_KVM_R6(r13)
187         std     r7, PACA_KVM_R7(r13)
188         std     r8, PACA_KVM_R8(r13)
189         std     r9, PACA_KVM_R9(r13)
190         std     r10, PACA_KVM_R10(r13)
191         std     r11, PACA_KVM_R11(r13)
192
193         /* Restore R1/R2 so we can handle faults */
194         ld      r1, PACA_KVM_HOST_R1(r13)
195         ld      r2, PACA_KVM_HOST_R2(r13)
196
197         /* Save guest PC and MSR in GPRs */
198         mfsrr0  r3
199         mfsrr1  r4
200
201         /* Get scratch'ed off registers */
202         mfspr   r9, SPRN_SPRG_SCRATCH0
203         std     r9, PACA_KVM_R13(r13)
204
205         ld      r8, PACA_KVM_SCRATCH0(r13)
206         std     r8, PACA_KVM_R12(r13)
207
208         lwz     r7, PACA_KVM_SCRATCH1(r13)
209         stw     r7, PACA_KVM_CR(r13)
210
211         /* Save more register state  */
212
213         mfxer   r6
214         stw     r6, PACA_KVM_XER(r13)
215
216         mfdar   r5
217         mfdsisr r6
218
219         /*
220          * In order for us to easily get the last instruction,
221          * we got the #vmexit at, we exploit the fact that the
222          * virtual layout is still the same here, so we can just
223          * ld from the guest's PC address
224          */
225
226         /* We only load the last instruction when it's safe */
227         cmpwi   r12, BOOK3S_INTERRUPT_DATA_STORAGE
228         beq     ld_last_inst
229         cmpwi   r12, BOOK3S_INTERRUPT_PROGRAM
230         beq     ld_last_inst
231
232         b       no_ld_last_inst
233
234 ld_last_inst:
235         /* Save off the guest instruction we're at */
236
237         /* Set guest mode to 'jump over instruction' so if lwz faults
238          * we'll just continue at the next IP. */
239         li      r9, KVM_GUEST_MODE_SKIP
240         stb     r9, PACA_KVM_IN_GUEST(r13)
241
242         /*    1) enable paging for data */
243         mfmsr   r9
244         ori     r11, r9, MSR_DR                 /* Enable paging for data */
245         mtmsr   r11
246         /*    2) fetch the instruction */
247         li      r0, KVM_INST_FETCH_FAILED       /* In case lwz faults */
248         lwz     r0, 0(r3)
249         /*    3) disable paging again */
250         mtmsr   r9
251
252 no_ld_last_inst:
253
254         /* Unset guest mode */
255         li      r9, KVM_GUEST_MODE_NONE
256         stb     r9, PACA_KVM_IN_GUEST(r13)
257
258         /* Restore bolted entries from the shadow and fix it along the way */
259
260         /* We don't store anything in entry 0, so we don't need to take care of it */
261         slbia
262         isync
263
264 #if SLB_NUM_BOLTED == 3
265
266         ld      r11, PACA_SLBSHADOWPTR(r13)
267
268         REBOLT_SLB_ENTRY(0)
269         REBOLT_SLB_ENTRY(1)
270         REBOLT_SLB_ENTRY(2)
271         
272 #else
273 #error unknown number of bolted entries
274 #endif
275
276 slb_do_exit:
277
278         /* Register usage at this point:
279          *
280          * R0         = guest last inst
281          * R1         = host R1
282          * R2         = host R2
283          * R3         = guest PC
284          * R4         = guest MSR
285          * R5         = guest DAR
286          * R6         = guest DSISR
287          * R12        = exit handler id
288          * R13        = PACA
289          * PACA.KVM.* = guest *
290          *
291          */
292
293         /* RFI into the highmem handler */
294         mfmsr   r7
295         ori     r7, r7, MSR_IR|MSR_DR|MSR_RI    /* Enable paging */
296         mtsrr1  r7
297         ld      r8, PACA_KVM_VMHANDLER(r13)     /* Highmem handler address */
298         mtsrr0  r8
299
300         RFI
301 kvmppc_handler_trampoline_exit_end:
302