Merge branch 'virtio' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux...
[pandora-kernel.git] / arch / powerpc / kvm / book3s_rmhandlers.S
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright SUSE Linux Products GmbH 2009
16  *
17  * Authors: Alexander Graf <agraf@suse.de>
18  */
19
20 #include <asm/ppc_asm.h>
21 #include <asm/kvm_asm.h>
22 #include <asm/reg.h>
23 #include <asm/page.h>
24 #include <asm/asm-offsets.h>
25
26 #ifdef CONFIG_PPC_BOOK3S_64
27 #include <asm/exception-64s.h>
28 #endif
29
30 /*****************************************************************************
31  *                                                                           *
32  *        Real Mode handlers that need to be in low physical memory          *
33  *                                                                           *
34  ****************************************************************************/
35
36 #if defined(CONFIG_PPC_BOOK3S_64)
37
38 #define LOAD_SHADOW_VCPU(reg)                           \
39         mfspr   reg, SPRN_SPRG_PACA
40
41 #define SHADOW_VCPU_OFF         PACA_KVM_SVCPU
42 #define MSR_NOIRQ               MSR_KERNEL & ~(MSR_IR | MSR_DR)
43 #define FUNC(name)              GLUE(.,name)
44
45 #elif defined(CONFIG_PPC_BOOK3S_32)
46
47 #define LOAD_SHADOW_VCPU(reg)                                           \
48         mfspr   reg, SPRN_SPRG_THREAD;                                  \
49         lwz     reg, THREAD_KVM_SVCPU(reg);                             \
50         /* PPC32 can have a NULL pointer - let's check for that */      \
51         mtspr   SPRN_SPRG_SCRATCH1, r12;        /* Save r12 */          \
52         mfcr    r12;                                                    \
53         cmpwi   reg, 0;                                                 \
54         bne     1f;                                                     \
55         mfspr   reg, SPRN_SPRG_SCRATCH0;                                \
56         mtcr    r12;                                                    \
57         mfspr   r12, SPRN_SPRG_SCRATCH1;                                \
58         b       kvmppc_resume_\intno;                                   \
59 1:;                                                                     \
60         mtcr    r12;                                                    \
61         mfspr   r12, SPRN_SPRG_SCRATCH1;                                \
62         tophys(reg, reg)
63
64 #define SHADOW_VCPU_OFF         0
65 #define MSR_NOIRQ               MSR_KERNEL
66 #define FUNC(name)              name
67
68 #endif
69
70 .macro INTERRUPT_TRAMPOLINE intno
71
72 .global kvmppc_trampoline_\intno
73 kvmppc_trampoline_\intno:
74
75         mtspr   SPRN_SPRG_SCRATCH0, r13         /* Save r13 */
76
77         /*
78          * First thing to do is to find out if we're coming
79          * from a KVM guest or a Linux process.
80          *
81          * To distinguish, we check a magic byte in the PACA/current
82          */
83         LOAD_SHADOW_VCPU(r13)
84         PPC_STL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13)
85         mfcr    r12
86         stw     r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13)
87         lbz     r12, (SHADOW_VCPU_OFF + SVCPU_IN_GUEST)(r13)
88         cmpwi   r12, KVM_GUEST_MODE_NONE
89         bne     ..kvmppc_handler_hasmagic_\intno
90         /* No KVM guest? Then jump back to the Linux handler! */
91         lwz     r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13)
92         mtcr    r12
93         PPC_LL  r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13)
94         mfspr   r13, SPRN_SPRG_SCRATCH0         /* r13 = original r13 */
95         b       kvmppc_resume_\intno            /* Get back original handler */
96
97         /* Now we know we're handling a KVM guest */
98 ..kvmppc_handler_hasmagic_\intno:
99
100         /* Should we just skip the faulting instruction? */
101         cmpwi   r12, KVM_GUEST_MODE_SKIP
102         beq     kvmppc_handler_skip_ins
103
104         /* Let's store which interrupt we're handling */
105         li      r12, \intno
106
107         /* Jump into the SLB exit code that goes to the highmem handler */
108         b       kvmppc_handler_trampoline_exit
109
110 .endm
111
112 INTERRUPT_TRAMPOLINE    BOOK3S_INTERRUPT_SYSTEM_RESET
113 INTERRUPT_TRAMPOLINE    BOOK3S_INTERRUPT_MACHINE_CHECK
114 INTERRUPT_TRAMPOLINE    BOOK3S_INTERRUPT_DATA_STORAGE
115 INTERRUPT_TRAMPOLINE    BOOK3S_INTERRUPT_INST_STORAGE
116 INTERRUPT_TRAMPOLINE    BOOK3S_INTERRUPT_EXTERNAL
117 INTERRUPT_TRAMPOLINE    BOOK3S_INTERRUPT_ALIGNMENT
118 INTERRUPT_TRAMPOLINE    BOOK3S_INTERRUPT_PROGRAM
119 INTERRUPT_TRAMPOLINE    BOOK3S_INTERRUPT_FP_UNAVAIL
120 INTERRUPT_TRAMPOLINE    BOOK3S_INTERRUPT_DECREMENTER
121 INTERRUPT_TRAMPOLINE    BOOK3S_INTERRUPT_SYSCALL
122 INTERRUPT_TRAMPOLINE    BOOK3S_INTERRUPT_TRACE
123 INTERRUPT_TRAMPOLINE    BOOK3S_INTERRUPT_PERFMON
124 INTERRUPT_TRAMPOLINE    BOOK3S_INTERRUPT_ALTIVEC
125
126 /* Those are only available on 64 bit machines */
127
128 #ifdef CONFIG_PPC_BOOK3S_64
129 INTERRUPT_TRAMPOLINE    BOOK3S_INTERRUPT_DATA_SEGMENT
130 INTERRUPT_TRAMPOLINE    BOOK3S_INTERRUPT_INST_SEGMENT
131 INTERRUPT_TRAMPOLINE    BOOK3S_INTERRUPT_VSX
132 #endif
133
134 /*
135  * Bring us back to the faulting code, but skip the
136  * faulting instruction.
137  *
138  * This is a generic exit path from the interrupt
139  * trampolines above.
140  *
141  * Input Registers:
142  *
143  * R12            = free
144  * R13            = Shadow VCPU (PACA)
145  * SVCPU.SCRATCH0 = guest R12
146  * SVCPU.SCRATCH1 = guest CR
147  * SPRG_SCRATCH0  = guest R13
148  *
149  */
150 kvmppc_handler_skip_ins:
151
152         /* Patch the IP to the next instruction */
153         mfsrr0  r12
154         addi    r12, r12, 4
155         mtsrr0  r12
156
157         /* Clean up all state */
158         lwz     r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13)
159         mtcr    r12
160         PPC_LL  r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13)
161         mfspr   r13, SPRN_SPRG_SCRATCH0
162
163         /* And get back into the code */
164         RFI
165
166 /*
167  * This trampoline brings us back to a real mode handler
168  *
169  * Input Registers:
170  *
171  * R5 = SRR0
172  * R6 = SRR1
173  * LR = real-mode IP
174  *
175  */
176 .global kvmppc_handler_lowmem_trampoline
177 kvmppc_handler_lowmem_trampoline:
178
179         mtsrr0  r5
180         mtsrr1  r6
181         blr
182 kvmppc_handler_lowmem_trampoline_end:
183
184 /*
185  * Call a function in real mode
186  *
187  * Input Registers:
188  *
189  * R3 = function
190  * R4 = MSR
191  * R5 = scratch register
192  *
193  */
194 _GLOBAL(kvmppc_rmcall)
195         LOAD_REG_IMMEDIATE(r5, MSR_NOIRQ)
196         mtmsr   r5              /* Disable relocation and interrupts, so mtsrr
197                                    doesn't get interrupted */
198         sync
199         mtsrr0  r3
200         mtsrr1  r4
201         RFI
202
203 #if defined(CONFIG_PPC_BOOK3S_32)
204 #define STACK_LR        INT_FRAME_SIZE+4
205 #elif defined(CONFIG_PPC_BOOK3S_64)
206 #define STACK_LR        _LINK
207 #endif
208
209 /*
210  * Activate current's external feature (FPU/Altivec/VSX)
211  */
212 #define define_load_up(what)                                    \
213                                                                 \
214 _GLOBAL(kvmppc_load_up_ ## what);                               \
215         PPC_STLU r1, -INT_FRAME_SIZE(r1);                       \
216         mflr    r3;                                             \
217         PPC_STL r3, STACK_LR(r1);                               \
218         PPC_STL r20, _NIP(r1);                                  \
219         mfmsr   r20;                                            \
220         LOAD_REG_IMMEDIATE(r3, MSR_DR|MSR_EE);                  \
221         andc    r3,r20,r3;              /* Disable DR,EE */     \
222         mtmsr   r3;                                             \
223         sync;                                                   \
224                                                                 \
225         bl      FUNC(load_up_ ## what);                         \
226                                                                 \
227         mtmsr   r20;                    /* Enable DR,EE */      \
228         sync;                                                   \
229         PPC_LL  r3, STACK_LR(r1);                               \
230         PPC_LL  r20, _NIP(r1);                                  \
231         mtlr    r3;                                             \
232         addi    r1, r1, INT_FRAME_SIZE;                         \
233         blr
234
235 define_load_up(fpu)
236 #ifdef CONFIG_ALTIVEC
237 define_load_up(altivec)
238 #endif
239 #ifdef CONFIG_VSX
240 define_load_up(vsx)
241 #endif
242
243 .global kvmppc_trampoline_lowmem
244 kvmppc_trampoline_lowmem:
245         .long kvmppc_handler_lowmem_trampoline - CONFIG_KERNEL_START
246
247 .global kvmppc_trampoline_enter
248 kvmppc_trampoline_enter:
249         .long kvmppc_handler_trampoline_enter - CONFIG_KERNEL_START
250
251 #include "book3s_segment.S"