3 * Copyright (C) 2001 Todd Inglett, IBM Corporation
5 * pSeries LPAR support.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 /* Enables debugging of low-level hash table routines - careful! */
25 #include <linux/kernel.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/console.h>
28 #include <linux/export.h>
29 #include <linux/static_key.h>
30 #include <asm/processor.h>
33 #include <asm/pgtable.h>
34 #include <asm/machdep.h>
35 #include <asm/mmu_context.h>
36 #include <asm/iommu.h>
37 #include <asm/tlbflush.h>
40 #include <asm/cputable.h>
43 #include <asm/trace.h>
44 #include <asm/firmware.h>
45 #include <asm/plpar_wrappers.h>
49 /* Flag bits for H_BULK_REMOVE */
50 #define HBR_REQUEST 0x4000000000000000UL
51 #define HBR_RESPONSE 0x8000000000000000UL
52 #define HBR_END 0xc000000000000000UL
53 #define HBR_AVPN 0x0200000000000000UL
54 #define HBR_ANDCOND 0x0100000000000000UL
58 EXPORT_SYMBOL(plpar_hcall);
59 EXPORT_SYMBOL(plpar_hcall9);
60 EXPORT_SYMBOL(plpar_hcall_norets);
62 extern void pSeries_find_serial_port(void);
64 void vpa_init(int cpu)
66 int hwcpu = get_hard_smp_processor_id(cpu);
69 struct paca_struct *pp;
70 struct dtl_entry *dtl;
73 * The spec says it "may be problematic" if CPU x registers the VPA of
74 * CPU y. We should never do that, but wail if we ever do.
76 WARN_ON(cpu != smp_processor_id());
78 if (cpu_has_feature(CPU_FTR_ALTIVEC))
79 lppaca_of(cpu).vmxregs_in_use = 1;
81 if (cpu_has_feature(CPU_FTR_ARCH_207S))
82 lppaca_of(cpu).ebb_regs_in_use = 1;
84 addr = __pa(&lppaca_of(cpu));
85 ret = register_vpa(hwcpu, addr);
88 pr_err("WARNING: VPA registration for cpu %d (hw %d) of area "
89 "%lx failed with %ld\n", cpu, hwcpu, addr, ret);
93 * PAPR says this feature is SLB-Buffer but firmware never
94 * reports that. All SPLPAR support SLB shadow buffer.
96 addr = __pa(paca[cpu].slb_shadow_ptr);
97 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
98 ret = register_slb_shadow(hwcpu, addr);
100 pr_err("WARNING: SLB shadow buffer registration for "
101 "cpu %d (hw %d) of area %lx failed with %ld\n",
102 cpu, hwcpu, addr, ret);
106 * Register dispatch trace log, if one has been allocated.
109 dtl = pp->dispatch_log;
113 lppaca_of(cpu).dtl_idx = 0;
115 /* hypervisor reads buffer length from this field */
116 dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES);
117 ret = register_dtl(hwcpu, __pa(dtl));
119 pr_err("WARNING: DTL registration of cpu %d (hw %d) "
120 "failed with %ld\n", smp_processor_id(),
122 lppaca_of(cpu).dtl_enable_mask = 2;
126 static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
127 unsigned long vpn, unsigned long pa,
128 unsigned long rflags, unsigned long vflags,
129 int psize, int apsize, int ssize)
131 unsigned long lpar_rc;
134 unsigned long hpte_v, hpte_r;
136 if (!(vflags & HPTE_V_BOLTED))
137 pr_devel("hpte_insert(group=%lx, vpn=%016lx, "
138 "pa=%016lx, rflags=%lx, vflags=%lx, psize=%d)\n",
139 hpte_group, vpn, pa, rflags, vflags, psize);
141 hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
142 hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
144 if (!(vflags & HPTE_V_BOLTED))
145 pr_devel(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
147 /* Now fill in the actual HPTE */
148 /* Set CEC cookie to 0 */
150 /* I-cache Invalidate = 0 */
151 /* I-cache synchronize = 0 */
155 /* Make pHyp happy */
156 if ((rflags & _PAGE_NO_CACHE) && !(rflags & _PAGE_WRITETHRU))
159 if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N))
160 flags |= H_COALESCE_CAND;
162 lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot);
163 if (unlikely(lpar_rc == H_PTEG_FULL)) {
164 if (!(vflags & HPTE_V_BOLTED))
170 * Since we try and ioremap PHBs we don't own, the pte insert
171 * will fail. However we must catch the failure in hash_page
172 * or we will loop forever, so return -2 in this case.
174 if (unlikely(lpar_rc != H_SUCCESS)) {
175 if (!(vflags & HPTE_V_BOLTED))
176 pr_devel(" lpar err %ld\n", lpar_rc);
179 if (!(vflags & HPTE_V_BOLTED))
180 pr_devel(" -> slot: %lu\n", slot & 7);
182 /* Because of iSeries, we have to pass down the secondary
183 * bucket bit here as well
185 return (slot & 7) | (!!(vflags & HPTE_V_SECONDARY) << 3);
188 static DEFINE_SPINLOCK(pSeries_lpar_tlbie_lock);
190 static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
192 unsigned long slot_offset;
193 unsigned long lpar_rc;
195 unsigned long dummy1, dummy2;
197 /* pick a random slot to start at */
198 slot_offset = mftb() & 0x7;
200 for (i = 0; i < HPTES_PER_GROUP; i++) {
202 /* don't remove a bolted entry */
203 lpar_rc = plpar_pte_remove(H_ANDCOND, hpte_group + slot_offset,
204 (0x1UL << 4), &dummy1, &dummy2);
205 if (lpar_rc == H_SUCCESS)
209 * The test for adjunct partition is performed before the
210 * ANDCOND test. H_RESOURCE may be returned, so we need to
211 * check for that as well.
213 BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE);
222 static void pSeries_lpar_hptab_clear(void)
224 unsigned long size_bytes = 1UL << ppc64_pft_size;
225 unsigned long hpte_count = size_bytes >> 4;
233 /* Read in batches of 4,
234 * invalidate only valid entries not in the VRMA
235 * hpte_count will be a multiple of 4
237 for (i = 0; i < hpte_count; i += 4) {
238 lpar_rc = plpar_pte_read_4_raw(0, i, (void *)ptes);
239 if (lpar_rc != H_SUCCESS)
241 for (j = 0; j < 4; j++){
242 if ((ptes[j].pteh & HPTE_V_VRMA_MASK) ==
245 if (ptes[j].pteh & HPTE_V_VALID)
246 plpar_pte_remove_raw(0, i + j, 0,
247 &(ptes[j].pteh), &(ptes[j].ptel));
251 #ifdef __LITTLE_ENDIAN__
252 /* Reset exceptions to big endian */
253 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
256 rc = pseries_big_endian_exceptions();
258 * At this point it is unlikely panic() will get anything
259 * out to the user, but at least this will stop us from
260 * continuing on further and creating an even more
261 * difficult to debug situation.
264 panic("Could not enable big endian exceptions");
270 * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
271 * the low 3 bits of flags happen to line up. So no transform is needed.
272 * We can probably optimize here and assume the high bits of newpp are
273 * already zero. For now I am paranoid.
275 static long pSeries_lpar_hpte_updatepp(unsigned long slot,
278 int psize, int apsize,
279 int ssize, int local)
281 unsigned long lpar_rc;
282 unsigned long flags = (newpp & 7) | H_AVPN;
283 unsigned long want_v;
285 want_v = hpte_encode_avpn(vpn, psize, ssize);
287 pr_devel(" update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...",
288 want_v, slot, flags, psize);
290 lpar_rc = plpar_pte_protect(flags, slot, want_v);
292 if (lpar_rc == H_NOT_FOUND) {
293 pr_devel("not found !\n");
299 BUG_ON(lpar_rc != H_SUCCESS);
304 static unsigned long pSeries_lpar_hpte_getword0(unsigned long slot)
306 unsigned long dword0;
307 unsigned long lpar_rc;
308 unsigned long dummy_word1;
311 /* Read 1 pte at a time */
312 /* Do not need RPN to logical page translation */
313 /* No cross CEC PFT access */
316 lpar_rc = plpar_pte_read(flags, slot, &dword0, &dummy_word1);
318 BUG_ON(lpar_rc != H_SUCCESS);
323 static long pSeries_lpar_hpte_find(unsigned long vpn, int psize, int ssize)
328 unsigned long want_v, hpte_v;
330 hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
331 want_v = hpte_encode_avpn(vpn, psize, ssize);
333 /* Bolted entries are always in the primary group */
334 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
335 for (i = 0; i < HPTES_PER_GROUP; i++) {
336 hpte_v = pSeries_lpar_hpte_getword0(slot);
338 if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
347 static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
349 int psize, int ssize)
352 unsigned long lpar_rc, slot, vsid, flags;
354 vsid = get_kernel_vsid(ea, ssize);
355 vpn = hpt_vpn(ea, vsid, ssize);
357 slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
361 lpar_rc = plpar_pte_protect(flags, slot, 0);
363 BUG_ON(lpar_rc != H_SUCCESS);
366 static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn,
367 int psize, int apsize,
368 int ssize, int local)
370 unsigned long want_v;
371 unsigned long lpar_rc;
372 unsigned long dummy1, dummy2;
374 pr_devel(" inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n",
375 slot, vpn, psize, local);
377 want_v = hpte_encode_avpn(vpn, psize, ssize);
378 lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v, &dummy1, &dummy2);
379 if (lpar_rc == H_NOT_FOUND)
382 BUG_ON(lpar_rc != H_SUCCESS);
386 * Limit iterations holding pSeries_lpar_tlbie_lock to 3. We also need
387 * to make sure that we avoid bouncing the hypervisor tlbie lock.
389 #define PPC64_HUGE_HPTE_BATCH 12
391 static void __pSeries_lpar_hugepage_invalidate(unsigned long *slot,
392 unsigned long *vpn, int count,
393 int psize, int ssize)
395 unsigned long param[8];
396 int i = 0, pix = 0, rc;
397 unsigned long flags = 0;
398 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
401 spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
403 for (i = 0; i < count; i++) {
405 if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
406 pSeries_lpar_hpte_invalidate(slot[i], vpn[i], psize, 0,
409 param[pix] = HBR_REQUEST | HBR_AVPN | slot[i];
410 param[pix+1] = hpte_encode_avpn(vpn[i], psize, ssize);
413 rc = plpar_hcall9(H_BULK_REMOVE, param,
414 param[0], param[1], param[2],
415 param[3], param[4], param[5],
417 BUG_ON(rc != H_SUCCESS);
423 param[pix] = HBR_END;
424 rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1],
425 param[2], param[3], param[4], param[5],
427 BUG_ON(rc != H_SUCCESS);
431 spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
434 static void pSeries_lpar_hugepage_invalidate(unsigned long vsid,
436 unsigned char *hpte_slot_array,
437 int psize, int ssize)
440 unsigned long s_addr = addr;
441 unsigned int max_hpte_count, valid;
442 unsigned long vpn_array[PPC64_HUGE_HPTE_BATCH];
443 unsigned long slot_array[PPC64_HUGE_HPTE_BATCH];
444 unsigned long shift, hidx, vpn = 0, hash, slot;
446 shift = mmu_psize_defs[psize].shift;
447 max_hpte_count = 1U << (PMD_SHIFT - shift);
449 for (i = 0; i < max_hpte_count; i++) {
450 valid = hpte_valid(hpte_slot_array, i);
453 hidx = hpte_hash_index(hpte_slot_array, i);
456 addr = s_addr + (i * (1ul << shift));
457 vpn = hpt_vpn(addr, vsid, ssize);
458 hash = hpt_hash(vpn, shift, ssize);
459 if (hidx & _PTEIDX_SECONDARY)
462 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
463 slot += hidx & _PTEIDX_GROUP_IX;
465 slot_array[index] = slot;
466 vpn_array[index] = vpn;
467 if (index == PPC64_HUGE_HPTE_BATCH - 1) {
469 * Now do a bluk invalidate
471 __pSeries_lpar_hugepage_invalidate(slot_array,
473 PPC64_HUGE_HPTE_BATCH,
480 __pSeries_lpar_hugepage_invalidate(slot_array, vpn_array,
481 index, psize, ssize);
484 static void pSeries_lpar_hpte_removebolted(unsigned long ea,
485 int psize, int ssize)
488 unsigned long slot, vsid;
490 vsid = get_kernel_vsid(ea, ssize);
491 vpn = hpt_vpn(ea, vsid, ssize);
493 slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
496 * lpar doesn't use the passed actual page size
498 pSeries_lpar_hpte_invalidate(slot, vpn, psize, 0, ssize, 0);
502 * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie
505 static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
508 unsigned long i, pix, rc;
509 unsigned long flags = 0;
510 struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
511 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
512 unsigned long param[9];
513 unsigned long hash, index, shift, hidx, slot;
518 spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
520 psize = batch->psize;
521 ssize = batch->ssize;
523 for (i = 0; i < number; i++) {
526 pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
527 hash = hpt_hash(vpn, shift, ssize);
528 hidx = __rpte_to_hidx(pte, index);
529 if (hidx & _PTEIDX_SECONDARY)
531 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
532 slot += hidx & _PTEIDX_GROUP_IX;
533 if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
535 * lpar doesn't use the passed actual page size
537 pSeries_lpar_hpte_invalidate(slot, vpn, psize,
540 param[pix] = HBR_REQUEST | HBR_AVPN | slot;
541 param[pix+1] = hpte_encode_avpn(vpn, psize,
545 rc = plpar_hcall9(H_BULK_REMOVE, param,
546 param[0], param[1], param[2],
547 param[3], param[4], param[5],
549 BUG_ON(rc != H_SUCCESS);
553 } pte_iterate_hashed_end();
556 param[pix] = HBR_END;
557 rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1],
558 param[2], param[3], param[4], param[5],
560 BUG_ON(rc != H_SUCCESS);
564 spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
567 static int __init disable_bulk_remove(char *str)
569 if (strcmp(str, "off") == 0 &&
570 firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
571 printk(KERN_INFO "Disabling BULK_REMOVE firmware feature");
572 powerpc_firmware_features &= ~FW_FEATURE_BULK_REMOVE;
577 __setup("bulk_remove=", disable_bulk_remove);
579 void __init hpte_init_lpar(void)
581 ppc_md.hpte_invalidate = pSeries_lpar_hpte_invalidate;
582 ppc_md.hpte_updatepp = pSeries_lpar_hpte_updatepp;
583 ppc_md.hpte_updateboltedpp = pSeries_lpar_hpte_updateboltedpp;
584 ppc_md.hpte_insert = pSeries_lpar_hpte_insert;
585 ppc_md.hpte_remove = pSeries_lpar_hpte_remove;
586 ppc_md.hpte_removebolted = pSeries_lpar_hpte_removebolted;
587 ppc_md.flush_hash_range = pSeries_lpar_flush_hash_range;
588 ppc_md.hpte_clear_all = pSeries_lpar_hptab_clear;
589 ppc_md.hugepage_invalidate = pSeries_lpar_hugepage_invalidate;
592 #ifdef CONFIG_PPC_SMLPAR
593 #define CMO_FREE_HINT_DEFAULT 1
594 static int cmo_free_hint_flag = CMO_FREE_HINT_DEFAULT;
596 static int __init cmo_free_hint(char *str)
599 parm = strstrip(str);
601 if (strcasecmp(parm, "no") == 0 || strcasecmp(parm, "off") == 0) {
602 printk(KERN_INFO "cmo_free_hint: CMO free page hinting is not active.\n");
603 cmo_free_hint_flag = 0;
607 cmo_free_hint_flag = 1;
608 printk(KERN_INFO "cmo_free_hint: CMO free page hinting is active.\n");
610 if (strcasecmp(parm, "yes") == 0 || strcasecmp(parm, "on") == 0)
616 __setup("cmo_free_hint=", cmo_free_hint);
618 static void pSeries_set_page_state(struct page *page, int order,
622 unsigned long cmo_page_sz, addr;
624 cmo_page_sz = cmo_get_page_size();
625 addr = __pa((unsigned long)page_address(page));
627 for (i = 0; i < (1 << order); i++, addr += PAGE_SIZE) {
628 for (j = 0; j < PAGE_SIZE; j += cmo_page_sz)
629 plpar_hcall_norets(H_PAGE_INIT, state, addr + j, 0);
633 void arch_free_page(struct page *page, int order)
635 if (!cmo_free_hint_flag || !firmware_has_feature(FW_FEATURE_CMO))
638 pSeries_set_page_state(page, order, H_PAGE_SET_UNUSED);
640 EXPORT_SYMBOL(arch_free_page);
644 #ifdef CONFIG_TRACEPOINTS
645 #ifdef CONFIG_JUMP_LABEL
646 struct static_key hcall_tracepoint_key = STATIC_KEY_INIT;
648 void hcall_tracepoint_regfunc(void)
650 static_key_slow_inc(&hcall_tracepoint_key);
653 void hcall_tracepoint_unregfunc(void)
655 static_key_slow_dec(&hcall_tracepoint_key);
659 * We optimise our hcall path by placing hcall_tracepoint_refcount
660 * directly in the TOC so we can check if the hcall tracepoints are
661 * enabled via a single load.
664 /* NB: reg/unreg are called while guarded with the tracepoints_mutex */
665 extern long hcall_tracepoint_refcount;
667 void hcall_tracepoint_regfunc(void)
669 hcall_tracepoint_refcount++;
672 void hcall_tracepoint_unregfunc(void)
674 hcall_tracepoint_refcount--;
679 * Since the tracing code might execute hcalls we need to guard against
680 * recursion. One example of this are spinlocks calling H_YIELD on
681 * shared processor partitions.
683 static DEFINE_PER_CPU(unsigned int, hcall_trace_depth);
686 void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
692 * We cannot call tracepoints inside RCU idle regions which
693 * means we must not trace H_CEDE.
695 if (opcode == H_CEDE)
698 local_irq_save(flags);
700 depth = this_cpu_ptr(&hcall_trace_depth);
707 trace_hcall_entry(opcode, args);
711 local_irq_restore(flags);
714 void __trace_hcall_exit(long opcode, unsigned long retval,
715 unsigned long *retbuf)
720 if (opcode == H_CEDE)
723 local_irq_save(flags);
725 depth = this_cpu_ptr(&hcall_trace_depth);
731 trace_hcall_exit(opcode, retval, retbuf);
736 local_irq_restore(flags);
742 * H_GET_MPP hcall returns info in 7 parms
744 int h_get_mpp(struct hvcall_mpp_data *mpp_data)
747 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
749 rc = plpar_hcall9(H_GET_MPP, retbuf);
751 mpp_data->entitled_mem = retbuf[0];
752 mpp_data->mapped_mem = retbuf[1];
754 mpp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff;
755 mpp_data->pool_num = retbuf[2] & 0xffff;
757 mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff;
758 mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff;
759 mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffffUL;
761 mpp_data->pool_size = retbuf[4];
762 mpp_data->loan_request = retbuf[5];
763 mpp_data->backing_mem = retbuf[6];
767 EXPORT_SYMBOL(h_get_mpp);
769 int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data)
772 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = { 0 };
774 rc = plpar_hcall9(H_GET_MPP_X, retbuf);
776 mpp_x_data->coalesced_bytes = retbuf[0];
777 mpp_x_data->pool_coalesced_bytes = retbuf[1];
778 mpp_x_data->pool_purr_cycles = retbuf[2];
779 mpp_x_data->pool_spurr_cycles = retbuf[3];