2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 #include <linux/mman.h>
20 #include <linux/kvm_host.h>
22 #include <trace/events/kvm.h>
23 #include <asm/pgalloc.h>
24 #include <asm/cacheflush.h>
25 #include <asm/kvm_arm.h>
26 #include <asm/kvm_mmu.h>
27 #include <asm/kvm_mmio.h>
28 #include <asm/kvm_asm.h>
29 #include <asm/kvm_emulate.h>
33 extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
35 static pgd_t *boot_hyp_pgd;
36 static pgd_t *hyp_pgd;
37 static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
39 static void *init_bounce_page;
40 static unsigned long hyp_idmap_start;
41 static unsigned long hyp_idmap_end;
42 static phys_addr_t hyp_idmap_vector;
44 static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
46 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
49 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
54 BUG_ON(max > KVM_NR_MEM_OBJS);
55 if (cache->nobjs >= min)
57 while (cache->nobjs < max) {
58 page = (void *)__get_free_page(PGALLOC_GFP);
61 cache->objects[cache->nobjs++] = page;
66 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
69 free_page((unsigned long)mc->objects[--mc->nobjs]);
72 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
76 BUG_ON(!mc || !mc->nobjs);
77 p = mc->objects[--mc->nobjs];
81 static void clear_pud_entry(pud_t *pud)
83 pmd_t *pmd_table = pmd_offset(pud, 0);
85 pmd_free(NULL, pmd_table);
86 put_page(virt_to_page(pud));
89 static void clear_pmd_entry(pmd_t *pmd)
91 pte_t *pte_table = pte_offset_kernel(pmd, 0);
93 pte_free_kernel(NULL, pte_table);
94 put_page(virt_to_page(pmd));
97 static bool pmd_empty(pmd_t *pmd)
99 struct page *pmd_page = virt_to_page(pmd);
100 return page_count(pmd_page) == 1;
103 static void clear_pte_entry(pte_t *pte)
105 if (pte_present(*pte)) {
106 kvm_set_pte(pte, __pte(0));
107 put_page(virt_to_page(pte));
111 static bool pte_empty(pte_t *pte)
113 struct page *pte_page = virt_to_page(pte);
114 return page_count(pte_page) == 1;
117 static void unmap_range(pgd_t *pgdp, unsigned long long start, u64 size)
123 unsigned long long addr = start, end = start + size;
127 pgd = pgdp + pgd_index(addr);
128 pud = pud_offset(pgd, addr);
129 if (pud_none(*pud)) {
134 pmd = pmd_offset(pud, addr);
135 if (pmd_none(*pmd)) {
140 pte = pte_offset_kernel(pmd, addr);
141 clear_pte_entry(pte);
144 /* If we emptied the pte, walk back up the ladder */
145 if (pte_empty(pte)) {
146 clear_pmd_entry(pmd);
148 if (pmd_empty(pmd)) {
149 clear_pud_entry(pud);
159 * free_boot_hyp_pgd - free HYP boot page tables
161 * Free the HYP boot page tables. The bounce page is also freed.
163 void free_boot_hyp_pgd(void)
165 mutex_lock(&kvm_hyp_pgd_mutex);
168 unmap_range(boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
169 unmap_range(boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
175 unmap_range(hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
177 kfree(init_bounce_page);
178 init_bounce_page = NULL;
180 mutex_unlock(&kvm_hyp_pgd_mutex);
184 * free_hyp_pgds - free Hyp-mode page tables
186 * Assumes hyp_pgd is a page table used strictly in Hyp-mode and
187 * therefore contains either mappings in the kernel memory area (above
188 * PAGE_OFFSET), or device mappings in the vmalloc range (from
189 * VMALLOC_START to VMALLOC_END).
191 * boot_hyp_pgd should only map two pages for the init code.
193 void free_hyp_pgds(void)
199 mutex_lock(&kvm_hyp_pgd_mutex);
202 for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
203 unmap_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
204 for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
205 unmap_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
210 mutex_unlock(&kvm_hyp_pgd_mutex);
213 static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
214 unsigned long end, unsigned long pfn,
222 pte = pte_offset_kernel(pmd, addr);
223 kvm_set_pte(pte, pfn_pte(pfn, prot));
224 get_page(virt_to_page(pte));
225 kvm_flush_dcache_to_poc(pte, sizeof(*pte));
227 } while (addr += PAGE_SIZE, addr != end);
230 static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
231 unsigned long end, unsigned long pfn,
236 unsigned long addr, next;
240 pmd = pmd_offset(pud, addr);
242 BUG_ON(pmd_sect(*pmd));
244 if (pmd_none(*pmd)) {
245 pte = pte_alloc_one_kernel(NULL, addr);
247 kvm_err("Cannot allocate Hyp pte\n");
250 pmd_populate_kernel(NULL, pmd, pte);
251 get_page(virt_to_page(pmd));
252 kvm_flush_dcache_to_poc(pmd, sizeof(*pmd));
255 next = pmd_addr_end(addr, end);
257 create_hyp_pte_mappings(pmd, addr, next, pfn, prot);
258 pfn += (next - addr) >> PAGE_SHIFT;
259 } while (addr = next, addr != end);
264 static int __create_hyp_mappings(pgd_t *pgdp,
265 unsigned long start, unsigned long end,
266 unsigned long pfn, pgprot_t prot)
271 unsigned long addr, next;
274 mutex_lock(&kvm_hyp_pgd_mutex);
275 addr = start & PAGE_MASK;
276 end = PAGE_ALIGN(end);
278 pgd = pgdp + pgd_index(addr);
279 pud = pud_offset(pgd, addr);
281 if (pud_none_or_clear_bad(pud)) {
282 pmd = pmd_alloc_one(NULL, addr);
284 kvm_err("Cannot allocate Hyp pmd\n");
288 pud_populate(NULL, pud, pmd);
289 get_page(virt_to_page(pud));
290 kvm_flush_dcache_to_poc(pud, sizeof(*pud));
293 next = pgd_addr_end(addr, end);
294 err = create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
297 pfn += (next - addr) >> PAGE_SHIFT;
298 } while (addr = next, addr != end);
300 mutex_unlock(&kvm_hyp_pgd_mutex);
305 * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
306 * @from: The virtual kernel start address of the range
307 * @to: The virtual kernel end address of the range (exclusive)
309 * The same virtual address as the kernel virtual address is also used
310 * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
313 int create_hyp_mappings(void *from, void *to)
315 unsigned long phys_addr = virt_to_phys(from);
316 unsigned long start = KERN_TO_HYP((unsigned long)from);
317 unsigned long end = KERN_TO_HYP((unsigned long)to);
319 /* Check for a valid kernel memory mapping */
320 if (!virt_addr_valid(from) || !virt_addr_valid(to - 1))
323 return __create_hyp_mappings(hyp_pgd, start, end,
324 __phys_to_pfn(phys_addr), PAGE_HYP);
328 * create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode
329 * @from: The kernel start VA of the range
330 * @to: The kernel end VA of the range (exclusive)
331 * @phys_addr: The physical start address which gets mapped
333 * The resulting HYP VA is the same as the kernel VA, modulo
336 int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
338 unsigned long start = KERN_TO_HYP((unsigned long)from);
339 unsigned long end = KERN_TO_HYP((unsigned long)to);
341 /* Check for a valid kernel IO mapping */
342 if (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1))
345 return __create_hyp_mappings(hyp_pgd, start, end,
346 __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE);
350 * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
351 * @kvm: The KVM struct pointer for the VM.
353 * Allocates the 1st level table only of size defined by S2_PGD_ORDER (can
354 * support either full 40-bit input addresses or limited to 32-bit input
355 * addresses). Clears the allocated pages.
357 * Note we don't need locking here as this is only called when the VM is
358 * created, which can only be done once.
360 int kvm_alloc_stage2_pgd(struct kvm *kvm)
364 if (kvm->arch.pgd != NULL) {
365 kvm_err("kvm_arch already initialized?\n");
369 pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, S2_PGD_ORDER);
373 memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t));
381 * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
382 * @kvm: The VM pointer
383 * @start: The intermediate physical base address of the range to unmap
384 * @size: The size of the area to unmap
386 * Clear a range of stage-2 mappings, lowering the various ref-counts. Must
387 * be called while holding mmu_lock (unless for freeing the stage2 pgd before
388 * destroying the VM), otherwise another faulting VCPU may come in and mess
389 * with things behind our backs.
391 static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
393 unmap_range(kvm->arch.pgd, start, size);
397 * kvm_free_stage2_pgd - free all stage-2 tables
398 * @kvm: The KVM struct pointer for the VM.
400 * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
401 * underlying level-2 and level-3 tables before freeing the actual level-1 table
402 * and setting the struct pointer to NULL.
404 * Note we don't need locking here as this is only called when the VM is
405 * destroyed, which can only be done once.
407 void kvm_free_stage2_pgd(struct kvm *kvm)
409 if (kvm->arch.pgd == NULL)
412 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
413 free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER);
414 kvm->arch.pgd = NULL;
418 static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
419 phys_addr_t addr, const pte_t *new_pte, bool iomap)
426 /* Create 2nd stage page table mapping - Level 1 */
427 pgd = kvm->arch.pgd + pgd_index(addr);
428 pud = pud_offset(pgd, addr);
429 if (pud_none(*pud)) {
431 return 0; /* ignore calls from kvm_set_spte_hva */
432 pmd = mmu_memory_cache_alloc(cache);
433 pud_populate(NULL, pud, pmd);
434 get_page(virt_to_page(pud));
437 pmd = pmd_offset(pud, addr);
439 /* Create 2nd stage page table mapping - Level 2 */
440 if (pmd_none(*pmd)) {
442 return 0; /* ignore calls from kvm_set_spte_hva */
443 pte = mmu_memory_cache_alloc(cache);
445 pmd_populate_kernel(NULL, pmd, pte);
446 get_page(virt_to_page(pmd));
449 pte = pte_offset_kernel(pmd, addr);
451 if (iomap && pte_present(*pte))
454 /* Create 2nd stage page table mapping - Level 3 */
456 kvm_set_pte(pte, *new_pte);
457 if (pte_present(old_pte))
458 kvm_tlb_flush_vmid_ipa(kvm, addr);
460 get_page(virt_to_page(pte));
466 * kvm_phys_addr_ioremap - map a device range to guest IPA
468 * @kvm: The KVM pointer
469 * @guest_ipa: The IPA at which to insert the mapping
470 * @pa: The physical address of the device
471 * @size: The size of the mapping
473 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
474 phys_addr_t pa, unsigned long size)
476 phys_addr_t addr, end;
479 struct kvm_mmu_memory_cache cache = { 0, };
481 end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
482 pfn = __phys_to_pfn(pa);
484 for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
485 pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
486 kvm_set_s2pte_writable(&pte);
488 ret = mmu_topup_memory_cache(&cache, 2, 2);
491 spin_lock(&kvm->mmu_lock);
492 ret = stage2_set_pte(kvm, &cache, addr, &pte, true);
493 spin_unlock(&kvm->mmu_lock);
501 mmu_free_memory_cache(&cache);
505 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
506 gfn_t gfn, struct kvm_memory_slot *memslot,
507 unsigned long fault_status)
512 bool write_fault, writable;
513 unsigned long mmu_seq;
514 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
516 write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu));
517 if (fault_status == FSC_PERM && !write_fault) {
518 kvm_err("Unexpected L2 read permission error\n");
522 /* We need minimum second+third level pages */
523 ret = mmu_topup_memory_cache(memcache, 2, KVM_NR_MEM_OBJS);
527 mmu_seq = vcpu->kvm->mmu_notifier_seq;
529 * Ensure the read of mmu_notifier_seq happens before we call
530 * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
531 * the page we just got a reference to gets unmapped before we have a
532 * chance to grab the mmu_lock, which ensure that if the page gets
533 * unmapped afterwards, the call to kvm_unmap_hva will take it away
534 * from us again properly. This smp_rmb() interacts with the smp_wmb()
535 * in kvm_mmu_notifier_invalidate_<page|range_end>.
539 pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write_fault, &writable);
540 if (is_error_pfn(pfn))
543 new_pte = pfn_pte(pfn, PAGE_S2);
544 coherent_icache_guest_page(vcpu->kvm, gfn);
546 spin_lock(&vcpu->kvm->mmu_lock);
547 if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
550 kvm_set_s2pte_writable(&new_pte);
551 kvm_set_pfn_dirty(pfn);
553 stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte, false);
556 spin_unlock(&vcpu->kvm->mmu_lock);
557 kvm_release_pfn_clean(pfn);
562 * kvm_handle_guest_abort - handles all 2nd stage aborts
563 * @vcpu: the VCPU pointer
564 * @run: the kvm_run structure
566 * Any abort that gets to the host is almost guaranteed to be caused by a
567 * missing second stage translation table entry, which can mean that either the
568 * guest simply needs more memory and we must allocate an appropriate page or it
569 * can mean that the guest tried to access I/O memory, which is emulated by user
570 * space. The distinction is based on the IPA causing the fault and whether this
571 * memory region has been registered as standard RAM by user space.
573 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
575 unsigned long fault_status;
576 phys_addr_t fault_ipa;
577 struct kvm_memory_slot *memslot;
582 is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
583 fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
585 trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
586 kvm_vcpu_get_hfar(vcpu), fault_ipa);
588 /* Check the stage-2 fault is trans. fault or write fault */
589 fault_status = kvm_vcpu_trap_get_fault(vcpu);
590 if (fault_status != FSC_FAULT && fault_status != FSC_PERM) {
591 kvm_err("Unsupported fault status: EC=%#x DFCS=%#lx\n",
592 kvm_vcpu_trap_get_class(vcpu), fault_status);
596 idx = srcu_read_lock(&vcpu->kvm->srcu);
598 gfn = fault_ipa >> PAGE_SHIFT;
599 if (!kvm_is_visible_gfn(vcpu->kvm, gfn)) {
601 /* Prefetch Abort on I/O address */
602 kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
607 if (fault_status != FSC_FAULT) {
608 kvm_err("Unsupported fault status on io memory: %#lx\n",
615 * The IPA is reported as [MAX:12], so we need to
616 * complement it with the bottom 12 bits from the
617 * faulting VA. This is always 12 bits, irrespective
620 fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
621 ret = io_mem_abort(vcpu, run, fault_ipa);
625 memslot = gfn_to_memslot(vcpu->kvm, gfn);
627 ret = user_mem_abort(vcpu, fault_ipa, gfn, memslot, fault_status);
631 srcu_read_unlock(&vcpu->kvm->srcu, idx);
635 static void handle_hva_to_gpa(struct kvm *kvm,
638 void (*handler)(struct kvm *kvm,
639 gpa_t gpa, void *data),
642 struct kvm_memslots *slots;
643 struct kvm_memory_slot *memslot;
645 slots = kvm_memslots(kvm);
647 /* we only care about the pages that the guest sees */
648 kvm_for_each_memslot(memslot, slots) {
649 unsigned long hva_start, hva_end;
652 hva_start = max(start, memslot->userspace_addr);
653 hva_end = min(end, memslot->userspace_addr +
654 (memslot->npages << PAGE_SHIFT));
655 if (hva_start >= hva_end)
659 * {gfn(page) | page intersects with [hva_start, hva_end)} =
660 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
662 gfn = hva_to_gfn_memslot(hva_start, memslot);
663 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
665 for (; gfn < gfn_end; ++gfn) {
666 gpa_t gpa = gfn << PAGE_SHIFT;
667 handler(kvm, gpa, data);
672 static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
674 unmap_stage2_range(kvm, gpa, PAGE_SIZE);
675 kvm_tlb_flush_vmid_ipa(kvm, gpa);
678 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
680 unsigned long end = hva + PAGE_SIZE;
685 trace_kvm_unmap_hva(hva);
686 handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
690 int kvm_unmap_hva_range(struct kvm *kvm,
691 unsigned long start, unsigned long end)
696 trace_kvm_unmap_hva_range(start, end);
697 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
701 static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
703 pte_t *pte = (pte_t *)data;
705 stage2_set_pte(kvm, NULL, gpa, pte, false);
709 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
711 unsigned long end = hva + PAGE_SIZE;
717 trace_kvm_set_spte_hva(hva);
718 stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2);
719 handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
722 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
724 mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
727 phys_addr_t kvm_mmu_get_httbr(void)
729 return virt_to_phys(hyp_pgd);
732 phys_addr_t kvm_mmu_get_boot_httbr(void)
734 return virt_to_phys(boot_hyp_pgd);
737 phys_addr_t kvm_get_idmap_vector(void)
739 return hyp_idmap_vector;
742 int kvm_mmu_init(void)
746 hyp_idmap_start = virt_to_phys(__hyp_idmap_text_start);
747 hyp_idmap_end = virt_to_phys(__hyp_idmap_text_end);
748 hyp_idmap_vector = virt_to_phys(__kvm_hyp_init);
750 if ((hyp_idmap_start ^ hyp_idmap_end) & PAGE_MASK) {
752 * Our init code is crossing a page boundary. Allocate
753 * a bounce page, copy the code over and use that.
755 size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start;
756 phys_addr_t phys_base;
758 init_bounce_page = kmalloc(PAGE_SIZE, GFP_KERNEL);
759 if (!init_bounce_page) {
760 kvm_err("Couldn't allocate HYP init bounce page\n");
765 memcpy(init_bounce_page, __hyp_idmap_text_start, len);
767 * Warning: the code we just copied to the bounce page
768 * must be flushed to the point of coherency.
769 * Otherwise, the data may be sitting in L2, and HYP
770 * mode won't be able to observe it as it runs with
771 * caches off at that point.
773 kvm_flush_dcache_to_poc(init_bounce_page, len);
775 phys_base = virt_to_phys(init_bounce_page);
776 hyp_idmap_vector += phys_base - hyp_idmap_start;
777 hyp_idmap_start = phys_base;
778 hyp_idmap_end = phys_base + len;
780 kvm_info("Using HYP init bounce page @%lx\n",
781 (unsigned long)phys_base);
784 hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
785 boot_hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
786 if (!hyp_pgd || !boot_hyp_pgd) {
787 kvm_err("Hyp mode PGD not allocated\n");
792 /* Create the idmap in the boot page tables */
793 err = __create_hyp_mappings(boot_hyp_pgd,
794 hyp_idmap_start, hyp_idmap_end,
795 __phys_to_pfn(hyp_idmap_start),
799 kvm_err("Failed to idmap %lx-%lx\n",
800 hyp_idmap_start, hyp_idmap_end);
804 /* Map the very same page at the trampoline VA */
805 err = __create_hyp_mappings(boot_hyp_pgd,
806 TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE,
807 __phys_to_pfn(hyp_idmap_start),
810 kvm_err("Failed to map trampoline @%lx into boot HYP pgd\n",
815 /* Map the same page again into the runtime page tables */
816 err = __create_hyp_mappings(hyp_pgd,
817 TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE,
818 __phys_to_pfn(hyp_idmap_start),
821 kvm_err("Failed to map trampoline @%lx into runtime HYP pgd\n",