2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Authors: Rusty Russell <rusty@rustcorp.com.au>
4 * Christoffer Dall <c.dall@virtualopensystems.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
20 #include <linux/kvm_host.h>
21 #include <asm/kvm_arm.h>
22 #include <asm/kvm_host.h>
23 #include <asm/kvm_emulate.h>
24 #include <asm/kvm_coproc.h>
25 #include <asm/cacheflush.h>
26 #include <asm/cputype.h>
27 #include <trace/events/kvm.h>
33 /******************************************************************************
34 * Co-processor emulation
35 *****************************************************************************/
37 int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run)
39 kvm_inject_undefined(vcpu);
43 int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
46 * We can get here, if the host has been built without VFPv3 support,
47 * but the guest attempted a floating point operation.
49 kvm_inject_undefined(vcpu);
53 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
55 kvm_inject_undefined(vcpu);
59 int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
61 kvm_inject_undefined(vcpu);
65 /* See note at ARM ARM B1.14.4 */
66 static bool access_dcsw(struct kvm_vcpu *vcpu,
67 const struct coproc_params *p,
68 const struct coproc_reg *r)
76 return read_from_write_only(vcpu, p);
78 cpumask_setall(&vcpu->arch.require_dcache_flush);
79 cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
81 /* If we were already preempted, take the long way around */
82 if (cpu != vcpu->arch.last_pcpu) {
87 val = *vcpu_reg(vcpu, p->Rt1);
90 case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
92 asm volatile("mcr p15, 0, %0, c7, c14, 2" : : "r" (val));
96 asm volatile("mcr p15, 0, %0, c7, c10, 2" : : "r" (val));
107 * We could trap ID_DFR0 and tell the guest we don't support performance
108 * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
109 * NAKed, so it will read the PMCR anyway.
111 * Therefore we tell the guest we have 0 counters. Unfortunately, we
112 * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
113 * all PM registers, which doesn't crash the guest kernel at least.
115 static bool pm_fake(struct kvm_vcpu *vcpu,
116 const struct coproc_params *p,
117 const struct coproc_reg *r)
120 return ignore_write(vcpu, p);
122 return read_zero(vcpu, p);
125 #define access_pmcr pm_fake
126 #define access_pmcntenset pm_fake
127 #define access_pmcntenclr pm_fake
128 #define access_pmovsr pm_fake
129 #define access_pmselr pm_fake
130 #define access_pmceid0 pm_fake
131 #define access_pmceid1 pm_fake
132 #define access_pmccntr pm_fake
133 #define access_pmxevtyper pm_fake
134 #define access_pmxevcntr pm_fake
135 #define access_pmuserenr pm_fake
136 #define access_pmintenset pm_fake
137 #define access_pmintenclr pm_fake
139 /* Architected CP15 registers.
140 * Important: Must be sorted ascending by CRn, CRM, Op1, Op2
142 static const struct coproc_reg cp15_regs[] = {
143 /* CSSELR: swapped by interrupt.S. */
144 { CRn( 0), CRm( 0), Op1( 2), Op2( 0), is32,
145 NULL, reset_unknown, c0_CSSELR },
147 /* TTBR0/TTBR1: swapped by interrupt.S. */
148 { CRm( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 },
149 { CRm( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 },
151 /* TTBCR: swapped by interrupt.S. */
152 { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32,
153 NULL, reset_val, c2_TTBCR, 0x00000000 },
155 /* DACR: swapped by interrupt.S. */
156 { CRn( 3), CRm( 0), Op1( 0), Op2( 0), is32,
157 NULL, reset_unknown, c3_DACR },
159 /* DFSR/IFSR/ADFSR/AIFSR: swapped by interrupt.S. */
160 { CRn( 5), CRm( 0), Op1( 0), Op2( 0), is32,
161 NULL, reset_unknown, c5_DFSR },
162 { CRn( 5), CRm( 0), Op1( 0), Op2( 1), is32,
163 NULL, reset_unknown, c5_IFSR },
164 { CRn( 5), CRm( 1), Op1( 0), Op2( 0), is32,
165 NULL, reset_unknown, c5_ADFSR },
166 { CRn( 5), CRm( 1), Op1( 0), Op2( 1), is32,
167 NULL, reset_unknown, c5_AIFSR },
169 /* DFAR/IFAR: swapped by interrupt.S. */
170 { CRn( 6), CRm( 0), Op1( 0), Op2( 0), is32,
171 NULL, reset_unknown, c6_DFAR },
172 { CRn( 6), CRm( 0), Op1( 0), Op2( 2), is32,
173 NULL, reset_unknown, c6_IFAR },
175 * DC{C,I,CI}SW operations:
177 { CRn( 7), CRm( 6), Op1( 0), Op2( 2), is32, access_dcsw},
178 { CRn( 7), CRm(10), Op1( 0), Op2( 2), is32, access_dcsw},
179 { CRn( 7), CRm(14), Op1( 0), Op2( 2), is32, access_dcsw},
181 * Dummy performance monitor implementation.
183 { CRn( 9), CRm(12), Op1( 0), Op2( 0), is32, access_pmcr},
184 { CRn( 9), CRm(12), Op1( 0), Op2( 1), is32, access_pmcntenset},
185 { CRn( 9), CRm(12), Op1( 0), Op2( 2), is32, access_pmcntenclr},
186 { CRn( 9), CRm(12), Op1( 0), Op2( 3), is32, access_pmovsr},
187 { CRn( 9), CRm(12), Op1( 0), Op2( 5), is32, access_pmselr},
188 { CRn( 9), CRm(12), Op1( 0), Op2( 6), is32, access_pmceid0},
189 { CRn( 9), CRm(12), Op1( 0), Op2( 7), is32, access_pmceid1},
190 { CRn( 9), CRm(13), Op1( 0), Op2( 0), is32, access_pmccntr},
191 { CRn( 9), CRm(13), Op1( 0), Op2( 1), is32, access_pmxevtyper},
192 { CRn( 9), CRm(13), Op1( 0), Op2( 2), is32, access_pmxevcntr},
193 { CRn( 9), CRm(14), Op1( 0), Op2( 0), is32, access_pmuserenr},
194 { CRn( 9), CRm(14), Op1( 0), Op2( 1), is32, access_pmintenset},
195 { CRn( 9), CRm(14), Op1( 0), Op2( 2), is32, access_pmintenclr},
197 /* PRRR/NMRR (aka MAIR0/MAIR1): swapped by interrupt.S. */
198 { CRn(10), CRm( 2), Op1( 0), Op2( 0), is32,
199 NULL, reset_unknown, c10_PRRR},
200 { CRn(10), CRm( 2), Op1( 0), Op2( 1), is32,
201 NULL, reset_unknown, c10_NMRR},
203 /* VBAR: swapped by interrupt.S. */
204 { CRn(12), CRm( 0), Op1( 0), Op2( 0), is32,
205 NULL, reset_val, c12_VBAR, 0x00000000 },
207 /* CONTEXTIDR/TPIDRURW/TPIDRURO/TPIDRPRW: swapped by interrupt.S. */
208 { CRn(13), CRm( 0), Op1( 0), Op2( 1), is32,
209 NULL, reset_val, c13_CID, 0x00000000 },
210 { CRn(13), CRm( 0), Op1( 0), Op2( 2), is32,
211 NULL, reset_unknown, c13_TID_URW },
212 { CRn(13), CRm( 0), Op1( 0), Op2( 3), is32,
213 NULL, reset_unknown, c13_TID_URO },
214 { CRn(13), CRm( 0), Op1( 0), Op2( 4), is32,
215 NULL, reset_unknown, c13_TID_PRIV },
218 /* Target specific emulation tables */
219 static struct kvm_coproc_target_table *target_tables[KVM_ARM_NUM_TARGETS];
221 void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table)
223 target_tables[table->target] = table;
226 /* Get specific register table for this target. */
227 static const struct coproc_reg *get_target_table(unsigned target, size_t *num)
229 struct kvm_coproc_target_table *table;
231 table = target_tables[target];
236 static const struct coproc_reg *find_reg(const struct coproc_params *params,
237 const struct coproc_reg table[],
242 for (i = 0; i < num; i++) {
243 const struct coproc_reg *r = &table[i];
245 if (params->is_64bit != r->is_64)
247 if (params->CRn != r->CRn)
249 if (params->CRm != r->CRm)
251 if (params->Op1 != r->Op1)
253 if (params->Op2 != r->Op2)
261 static int emulate_cp15(struct kvm_vcpu *vcpu,
262 const struct coproc_params *params)
265 const struct coproc_reg *table, *r;
267 trace_kvm_emulate_cp15_imp(params->Op1, params->Rt1, params->CRn,
268 params->CRm, params->Op2, params->is_write);
270 table = get_target_table(vcpu->arch.target, &num);
272 /* Search target-specific then generic table. */
273 r = find_reg(params, table, num);
275 r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs));
278 /* If we don't have an accessor, we should never get here! */
281 if (likely(r->access(vcpu, params, r))) {
282 /* Skip instruction, since it was emulated */
283 kvm_skip_instr(vcpu, (vcpu->arch.hsr >> 25) & 1);
286 /* If access function fails, it should complain. */
288 kvm_err("Unsupported guest CP15 access at: %08x\n",
290 print_cp_instr(params);
292 kvm_inject_undefined(vcpu);
297 * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
298 * @vcpu: The VCPU pointer
299 * @run: The kvm_run struct
301 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
303 struct coproc_params params;
305 params.CRm = (vcpu->arch.hsr >> 1) & 0xf;
306 params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf;
307 params.is_write = ((vcpu->arch.hsr & 1) == 0);
308 params.is_64bit = true;
310 params.Op1 = (vcpu->arch.hsr >> 16) & 0xf;
312 params.Rt2 = (vcpu->arch.hsr >> 10) & 0xf;
315 return emulate_cp15(vcpu, ¶ms);
318 static void reset_coproc_regs(struct kvm_vcpu *vcpu,
319 const struct coproc_reg *table, size_t num)
323 for (i = 0; i < num; i++)
325 table[i].reset(vcpu, &table[i]);
329 * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
330 * @vcpu: The VCPU pointer
331 * @run: The kvm_run struct
333 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
335 struct coproc_params params;
337 params.CRm = (vcpu->arch.hsr >> 1) & 0xf;
338 params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf;
339 params.is_write = ((vcpu->arch.hsr & 1) == 0);
340 params.is_64bit = false;
342 params.CRn = (vcpu->arch.hsr >> 10) & 0xf;
343 params.Op1 = (vcpu->arch.hsr >> 14) & 0x7;
344 params.Op2 = (vcpu->arch.hsr >> 17) & 0x7;
347 return emulate_cp15(vcpu, ¶ms);
350 void kvm_coproc_table_init(void)
354 /* Make sure tables are unique and in order. */
355 for (i = 1; i < ARRAY_SIZE(cp15_regs); i++)
356 BUG_ON(cmp_reg(&cp15_regs[i-1], &cp15_regs[i]) >= 0);
360 * kvm_reset_coprocs - sets cp15 registers to reset value
361 * @vcpu: The VCPU pointer
363 * This function finds the right table above and sets the registers on the
364 * virtual CPU struct to their architecturally defined reset values.
366 void kvm_reset_coprocs(struct kvm_vcpu *vcpu)
369 const struct coproc_reg *table;
371 /* Catch someone adding a register without putting in reset entry. */
372 memset(vcpu->arch.cp15, 0x42, sizeof(vcpu->arch.cp15));
374 /* Generic chip reset first (so target could override). */
375 reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs));
377 table = get_target_table(vcpu->arch.target, &num);
378 reset_coproc_regs(vcpu, table, num);
380 for (num = 1; num < NR_CP15_REGS; num++)
381 if (vcpu->arch.cp15[num] == 0x42424242)
382 panic("Didn't reset vcpu->arch.cp15[%zi]", num);