2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright SUSE Linux Products GmbH 2010
17 * Authors: Alexander Graf <agraf@suse.de>
20 #include <asm/ppc_asm.h>
21 #include <asm/kvm_asm.h>
24 #include <asm/asm-offsets.h>
26 /* Hypercall entry point. Will be patched with device tree instructions. */
28 .global kvm_hypercall_start
36 #define KVM_MAGIC_PAGE (-4096)
39 #define LL64(reg, offs, reg2) ld reg, (offs)(reg2)
40 #define STL64(reg, offs, reg2) std reg, (offs)(reg2)
42 #define LL64(reg, offs, reg2) lwz reg, (offs + 4)(reg2)
43 #define STL64(reg, offs, reg2) stw reg, (offs + 4)(reg2)
46 #define SCRATCH_SAVE \
47 /* Enable critical section. We are critical if \
48 shared->critical == r1 */ \
49 STL64(r1, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0); \
52 PPC_STL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0); \
53 PPC_STL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0); \
55 stw r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0);
57 #define SCRATCH_RESTORE \
59 PPC_LL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0); \
60 lwz r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0); \
62 PPC_LL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0); \
64 /* Disable critical section. We are critical if \
65 shared->critical == r1 and r2 is always != r1 */ \
66 STL64(r2, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0);
68 .global kvm_emulate_mtmsrd
73 /* Put MSR & ~(MSR_EE|MSR_RI) in r31 */
74 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
75 lis r30, (~(MSR_EE | MSR_RI))@h
76 ori r30, r30, (~(MSR_EE | MSR_RI))@l
79 /* OR the register's (MSR_EE|MSR_RI) on MSR */
80 kvm_emulate_mtmsrd_reg:
81 andi. r30, r0, (MSR_EE|MSR_RI)
84 /* Put MSR back into magic page */
85 STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
87 /* Check if we have to fetch an interrupt */
88 lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
92 /* Check if we may trigger an interrupt */
93 andi. r30, r30, MSR_EE
101 b kvm_emulate_mtmsrd_branch
107 /* Go back to caller */
108 kvm_emulate_mtmsrd_branch:
110 kvm_emulate_mtmsrd_end:
112 .global kvm_emulate_mtmsrd_branch_offs
113 kvm_emulate_mtmsrd_branch_offs:
114 .long (kvm_emulate_mtmsrd_branch - kvm_emulate_mtmsrd) / 4
116 .global kvm_emulate_mtmsrd_reg_offs
117 kvm_emulate_mtmsrd_reg_offs:
118 .long (kvm_emulate_mtmsrd_reg - kvm_emulate_mtmsrd) / 4
120 .global kvm_emulate_mtmsrd_len
121 kvm_emulate_mtmsrd_len:
122 .long (kvm_emulate_mtmsrd_end - kvm_emulate_mtmsrd) / 4
125 #define MSR_SAFE_BITS (MSR_EE | MSR_CE | MSR_ME | MSR_RI)
126 #define MSR_CRITICAL_BITS ~MSR_SAFE_BITS
128 .global kvm_emulate_mtmsr
133 /* Fetch old MSR in r31 */
134 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
136 /* Find the changed bits between old and new MSR */
137 kvm_emulate_mtmsr_reg1:
140 /* Check if we need to really do mtmsr */
141 LOAD_REG_IMMEDIATE(r30, MSR_CRITICAL_BITS)
144 /* No critical bits changed? Maybe we can stay in the guest. */
145 beq maybe_stay_in_guest
151 /* Just fire off the mtmsr if it's critical */
152 kvm_emulate_mtmsr_orig_ins:
155 b kvm_emulate_mtmsr_branch
159 /* Check if we have to fetch an interrupt */
160 lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
164 /* Check if we may trigger an interrupt */
165 kvm_emulate_mtmsr_reg2:
166 andi. r31, r0, MSR_EE
173 /* Put MSR into magic page because we don't call mtmsr */
174 kvm_emulate_mtmsr_reg3:
175 STL64(r0, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
179 /* Go back to caller */
180 kvm_emulate_mtmsr_branch:
182 kvm_emulate_mtmsr_end:
184 .global kvm_emulate_mtmsr_branch_offs
185 kvm_emulate_mtmsr_branch_offs:
186 .long (kvm_emulate_mtmsr_branch - kvm_emulate_mtmsr) / 4
188 .global kvm_emulate_mtmsr_reg1_offs
189 kvm_emulate_mtmsr_reg1_offs:
190 .long (kvm_emulate_mtmsr_reg1 - kvm_emulate_mtmsr) / 4
192 .global kvm_emulate_mtmsr_reg2_offs
193 kvm_emulate_mtmsr_reg2_offs:
194 .long (kvm_emulate_mtmsr_reg2 - kvm_emulate_mtmsr) / 4
196 .global kvm_emulate_mtmsr_reg3_offs
197 kvm_emulate_mtmsr_reg3_offs:
198 .long (kvm_emulate_mtmsr_reg3 - kvm_emulate_mtmsr) / 4
200 .global kvm_emulate_mtmsr_orig_ins_offs
201 kvm_emulate_mtmsr_orig_ins_offs:
202 .long (kvm_emulate_mtmsr_orig_ins - kvm_emulate_mtmsr) / 4
204 .global kvm_emulate_mtmsr_len
205 kvm_emulate_mtmsr_len:
206 .long (kvm_emulate_mtmsr_end - kvm_emulate_mtmsr) / 4