Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc
[pandora-kernel.git] / arch / powerpc / kvm / book3s_32_sr.S
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright SUSE Linux Products GmbH 2009
16  *
17  * Authors: Alexander Graf <agraf@suse.de>
18  */
19
20 /******************************************************************************
21  *                                                                            *
22  *                               Entry code                                   *
23  *                                                                            *
24  *****************************************************************************/
25
26 .macro LOAD_GUEST_SEGMENTS
27
28         /* Required state:
29          *
30          * MSR = ~IR|DR
31          * R1 = host R1
32          * R2 = host R2
33          * R3 = shadow vcpu
34          * all other volatile GPRS = free except R4, R6
35          * SVCPU[CR]  = guest CR
36          * SVCPU[XER] = guest XER
37          * SVCPU[CTR] = guest CTR
38          * SVCPU[LR]  = guest LR
39          */
40
41 #define XCHG_SR(n)      lwz     r9, (SVCPU_SR+(n*4))(r3);  \
42                         mtsr    n, r9
43
44         XCHG_SR(0)
45         XCHG_SR(1)
46         XCHG_SR(2)
47         XCHG_SR(3)
48         XCHG_SR(4)
49         XCHG_SR(5)
50         XCHG_SR(6)
51         XCHG_SR(7)
52         XCHG_SR(8)
53         XCHG_SR(9)
54         XCHG_SR(10)
55         XCHG_SR(11)
56         XCHG_SR(12)
57         XCHG_SR(13)
58         XCHG_SR(14)
59         XCHG_SR(15)
60
61         /* Clear BATs. */
62
63 #define KVM_KILL_BAT(n, reg)            \
64         mtspr   SPRN_IBAT##n##U,reg;    \
65         mtspr   SPRN_IBAT##n##L,reg;    \
66         mtspr   SPRN_DBAT##n##U,reg;    \
67         mtspr   SPRN_DBAT##n##L,reg;    \
68
69         li      r9, 0
70         KVM_KILL_BAT(0, r9)
71         KVM_KILL_BAT(1, r9)
72         KVM_KILL_BAT(2, r9)
73         KVM_KILL_BAT(3, r9)
74
75 .endm
76
77 /******************************************************************************
78  *                                                                            *
79  *                               Exit code                                    *
80  *                                                                            *
81  *****************************************************************************/
82
83 .macro LOAD_HOST_SEGMENTS
84
85         /* Register usage at this point:
86          *
87          * R1         = host R1
88          * R2         = host R2
89          * R12        = exit handler id
90          * R13        = shadow vcpu - SHADOW_VCPU_OFF
91          * SVCPU.*    = guest *
92          * SVCPU[CR]  = guest CR
93          * SVCPU[XER] = guest XER
94          * SVCPU[CTR] = guest CTR
95          * SVCPU[LR]  = guest LR
96          *
97          */
98
99         /* Restore BATs */
100
101         /* We only overwrite the upper part, so we only restoree
102            the upper part. */
103 #define KVM_LOAD_BAT(n, reg, RA, RB)    \
104         lwz     RA,(n*16)+0(reg);       \
105         lwz     RB,(n*16)+4(reg);       \
106         mtspr   SPRN_IBAT##n##U,RA;     \
107         mtspr   SPRN_IBAT##n##L,RB;     \
108         lwz     RA,(n*16)+8(reg);       \
109         lwz     RB,(n*16)+12(reg);      \
110         mtspr   SPRN_DBAT##n##U,RA;     \
111         mtspr   SPRN_DBAT##n##L,RB;     \
112
113         lis     r9, BATS@ha
114         addi    r9, r9, BATS@l
115         tophys(r9, r9)
116         KVM_LOAD_BAT(0, r9, r10, r11)
117         KVM_LOAD_BAT(1, r9, r10, r11)
118         KVM_LOAD_BAT(2, r9, r10, r11)
119         KVM_LOAD_BAT(3, r9, r10, r11)
120
121         /* Restore Segment Registers */
122
123         /* 0xc - 0xf */
124
125         li      r0, 4
126         mtctr   r0
127         LOAD_REG_IMMEDIATE(r3, 0x20000000 | (0x111 * 0xc))
128         lis     r4, 0xc000
129 3:      mtsrin  r3, r4
130         addi    r3, r3, 0x111     /* increment VSID */
131         addis   r4, r4, 0x1000    /* address of next segment */
132         bdnz    3b
133
134         /* 0x0 - 0xb */
135
136         /* 'current->mm' needs to be in r4 */
137         tophys(r4, r2)
138         lwz     r4, MM(r4)
139         tophys(r4, r4)
140         /* This only clobbers r0, r3, r4 and r5 */
141         bl      switch_mmu_context
142
143 .endm