Merge git://git.infradead.org/~dwmw2/mtd-2.6.35
[pandora-kernel.git] / arch / powerpc / kvm / book3s_64_slb.S
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright SUSE Linux Products GmbH 2009
16  *
17  * Authors: Alexander Graf <agraf@suse.de>
18  */
19
20 #define SHADOW_SLB_ESID(num)    (SLBSHADOW_SAVEAREA + (num * 0x10))
21 #define SHADOW_SLB_VSID(num)    (SLBSHADOW_SAVEAREA + (num * 0x10) + 0x8)
22 #define UNBOLT_SLB_ENTRY(num) \
23         ld      r9, SHADOW_SLB_ESID(num)(r12); \
24         /* Invalid? Skip. */; \
25         rldicl. r0, r9, 37, 63; \
26         beq     slb_entry_skip_ ## num; \
27         xoris   r9, r9, SLB_ESID_V@h; \
28         std     r9, SHADOW_SLB_ESID(num)(r12); \
29   slb_entry_skip_ ## num:
30
31 #define REBOLT_SLB_ENTRY(num) \
32         ld      r10, SHADOW_SLB_ESID(num)(r11); \
33         cmpdi   r10, 0; \
34         beq     slb_exit_skip_ ## num; \
35         oris    r10, r10, SLB_ESID_V@h; \
36         ld      r9, SHADOW_SLB_VSID(num)(r11); \
37         slbmte  r9, r10; \
38         std     r10, SHADOW_SLB_ESID(num)(r11); \
39 slb_exit_skip_ ## num:
40
41 /******************************************************************************
42  *                                                                            *
43  *                               Entry code                                   *
44  *                                                                            *
45  *****************************************************************************/
46
47 .macro LOAD_GUEST_SEGMENTS
48
49         /* Required state:
50          *
51          * MSR = ~IR|DR
52          * R13 = PACA
53          * R1 = host R1
54          * R2 = host R2
55          * R3 = shadow vcpu
56          * all other volatile GPRS = free
57          * SVCPU[CR]  = guest CR
58          * SVCPU[XER] = guest XER
59          * SVCPU[CTR] = guest CTR
60          * SVCPU[LR]  = guest LR
61          */
62
63         /* Remove LPAR shadow entries */
64
65 #if SLB_NUM_BOLTED == 3
66
67         ld      r12, PACA_SLBSHADOWPTR(r13)
68
69         /* Save off the first entry so we can slbie it later */
70         ld      r10, SHADOW_SLB_ESID(0)(r12)
71         ld      r11, SHADOW_SLB_VSID(0)(r12)
72
73         /* Remove bolted entries */
74         UNBOLT_SLB_ENTRY(0)
75         UNBOLT_SLB_ENTRY(1)
76         UNBOLT_SLB_ENTRY(2)
77         
78 #else
79 #error unknown number of bolted entries
80 #endif
81
82         /* Flush SLB */
83
84         slbia
85
86         /* r0 = esid & ESID_MASK */
87         rldicr  r10, r10, 0, 35
88         /* r0 |= CLASS_BIT(VSID) */
89         rldic   r12, r11, 56 - 36, 36
90         or      r10, r10, r12
91         slbie   r10
92
93         isync
94
95         /* Fill SLB with our shadow */
96
97         lbz     r12, SVCPU_SLB_MAX(r3)
98         mulli   r12, r12, 16
99         addi    r12, r12, SVCPU_SLB
100         add     r12, r12, r3
101
102         /* for (r11 = kvm_slb; r11 < kvm_slb + kvm_slb_size; r11+=slb_entry) */
103         li      r11, SVCPU_SLB
104         add     r11, r11, r3
105
106 slb_loop_enter:
107
108         ld      r10, 0(r11)
109
110         rldicl. r0, r10, 37, 63
111         beq     slb_loop_enter_skip
112
113         ld      r9, 8(r11)
114         slbmte  r9, r10
115
116 slb_loop_enter_skip:
117         addi    r11, r11, 16
118         cmpd    cr0, r11, r12
119         blt     slb_loop_enter
120
121 slb_do_enter:
122
123 .endm
124
125 /******************************************************************************
126  *                                                                            *
127  *                               Exit code                                    *
128  *                                                                            *
129  *****************************************************************************/
130
131 .macro LOAD_HOST_SEGMENTS
132
133         /* Register usage at this point:
134          *
135          * R1         = host R1
136          * R2         = host R2
137          * R12        = exit handler id
138          * R13        = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64]
139          * SVCPU.*    = guest *
140          * SVCPU[CR]  = guest CR
141          * SVCPU[XER] = guest XER
142          * SVCPU[CTR] = guest CTR
143          * SVCPU[LR]  = guest LR
144          *
145          */
146
147         /* Restore bolted entries from the shadow and fix it along the way */
148
149         /* We don't store anything in entry 0, so we don't need to take care of it */
150         slbia
151         isync
152
153 #if SLB_NUM_BOLTED == 3
154
155         ld      r11, PACA_SLBSHADOWPTR(r13)
156
157         REBOLT_SLB_ENTRY(0)
158         REBOLT_SLB_ENTRY(1)
159         REBOLT_SLB_ENTRY(2)
160         
161 #else
162 #error unknown number of bolted entries
163 #endif
164
165 slb_do_exit:
166
167 .endm