2 * PowerNV OPAL high level interfaces
4 * Copyright 2011 IBM Corp.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
14 #include <linux/types.h>
16 #include <linux/of_fdt.h>
17 #include <linux/of_platform.h>
18 #include <linux/interrupt.h>
19 #include <linux/notifier.h>
20 #include <linux/slab.h>
21 #include <linux/sched.h>
22 #include <linux/kobject.h>
23 #include <linux/delay.h>
24 #include <linux/memblock.h>
26 #include <asm/machdep.h>
28 #include <asm/firmware.h>
33 /* /sys/firmware/opal */
34 struct kobject *opal_kobj;
42 struct mcheck_recoverable_range {
48 static struct mcheck_recoverable_range *mc_recoverable_range;
49 static int mc_recoverable_range_len;
51 struct device_node *opal_node;
52 static DEFINE_SPINLOCK(opal_write_lock);
53 extern u64 opal_mc_secondary_handler[];
54 static unsigned int *opal_irqs;
55 static unsigned int opal_irq_count;
56 static ATOMIC_NOTIFIER_HEAD(opal_notifier_head);
57 static struct atomic_notifier_head opal_msg_notifier_head[OPAL_MSG_TYPE_MAX];
58 static DEFINE_SPINLOCK(opal_notifier_lock);
59 static uint64_t last_notified_mask = 0x0ul;
60 static atomic_t opal_notifier_hold = ATOMIC_INIT(0);
62 static void opal_reinit_cores(void)
64 /* Do the actual re-init, This will clobber all FPRs, VRs, etc...
66 * It will preserve non volatile GPRs and HSPRG0/1. It will
67 * also restore HIDs and other SPRs to their original value
68 * but it might clobber a bunch.
71 opal_reinit_cpus(OPAL_REINIT_CPUS_HILE_BE);
73 opal_reinit_cpus(OPAL_REINIT_CPUS_HILE_LE);
77 int __init early_init_dt_scan_opal(unsigned long node,
78 const char *uname, int depth, void *data)
80 const void *basep, *entryp, *sizep;
81 int basesz, entrysz, runtimesz;
83 if (depth != 1 || strcmp(uname, "ibm,opal") != 0)
86 basep = of_get_flat_dt_prop(node, "opal-base-address", &basesz);
87 entryp = of_get_flat_dt_prop(node, "opal-entry-address", &entrysz);
88 sizep = of_get_flat_dt_prop(node, "opal-runtime-size", &runtimesz);
90 if (!basep || !entryp || !sizep)
93 opal.base = of_read_number(basep, basesz/4);
94 opal.entry = of_read_number(entryp, entrysz/4);
95 opal.size = of_read_number(sizep, runtimesz/4);
97 pr_debug("OPAL Base = 0x%llx (basep=%p basesz=%d)\n",
98 opal.base, basep, basesz);
99 pr_debug("OPAL Entry = 0x%llx (entryp=%p basesz=%d)\n",
100 opal.entry, entryp, entrysz);
101 pr_debug("OPAL Entry = 0x%llx (sizep=%p runtimesz=%d)\n",
102 opal.size, sizep, runtimesz);
104 powerpc_firmware_features |= FW_FEATURE_OPAL;
105 if (of_flat_dt_is_compatible(node, "ibm,opal-v3")) {
106 powerpc_firmware_features |= FW_FEATURE_OPALv2;
107 powerpc_firmware_features |= FW_FEATURE_OPALv3;
108 pr_info("OPAL V3 detected !\n");
109 } else if (of_flat_dt_is_compatible(node, "ibm,opal-v2")) {
110 powerpc_firmware_features |= FW_FEATURE_OPALv2;
111 pr_info("OPAL V2 detected !\n");
113 pr_info("OPAL V1 detected !\n");
116 /* Reinit all cores with the right endian */
119 /* Restore some bits */
120 if (cur_cpu_spec->cpu_restore)
121 cur_cpu_spec->cpu_restore();
126 int __init early_init_dt_scan_recoverable_ranges(unsigned long node,
127 const char *uname, int depth, void *data)
132 if (depth != 1 || strcmp(uname, "ibm,opal") != 0)
135 prop = of_get_flat_dt_prop(node, "mcheck-recoverable-ranges", &psize);
140 pr_debug("Found machine check recoverable ranges.\n");
143 * Calculate number of available entries.
145 * Each recoverable address range entry is (start address, len,
146 * recovery address), 2 cells each for start and recovery address,
147 * 1 cell for len, totalling 5 cells per entry.
149 mc_recoverable_range_len = psize / (sizeof(*prop) * 5);
152 if (!mc_recoverable_range_len)
155 /* Size required to hold all the entries. */
156 size = mc_recoverable_range_len *
157 sizeof(struct mcheck_recoverable_range);
160 * Allocate a buffer to hold the MC recoverable ranges. We would be
161 * accessing them in real mode, hence it needs to be within
164 mc_recoverable_range =__va(memblock_alloc_base(size, __alignof__(u64),
166 memset(mc_recoverable_range, 0, size);
168 for (i = 0; i < mc_recoverable_range_len; i++) {
169 mc_recoverable_range[i].start_addr =
170 of_read_number(prop + (i * 5) + 0, 2);
171 mc_recoverable_range[i].end_addr =
172 mc_recoverable_range[i].start_addr +
173 of_read_number(prop + (i * 5) + 2, 1);
174 mc_recoverable_range[i].recover_addr =
175 of_read_number(prop + (i * 5) + 3, 2);
177 pr_debug("Machine check recoverable range: %llx..%llx: %llx\n",
178 mc_recoverable_range[i].start_addr,
179 mc_recoverable_range[i].end_addr,
180 mc_recoverable_range[i].recover_addr);
185 static int __init opal_register_exception_handlers(void)
187 #ifdef __BIG_ENDIAN__
190 if (!(powerpc_firmware_features & FW_FEATURE_OPAL))
193 /* Hookup some exception handlers except machine check. We use the
194 * fwnmi area at 0x7000 to provide the glue space to OPAL
197 opal_register_exception_handler(OPAL_SOFTPATCH_HANDLER, 0, glue);
202 machine_early_initcall(powernv, opal_register_exception_handlers);
204 int opal_notifier_register(struct notifier_block *nb)
207 pr_warning("%s: Invalid argument (%p)\n",
212 atomic_notifier_chain_register(&opal_notifier_head, nb);
215 EXPORT_SYMBOL_GPL(opal_notifier_register);
217 int opal_notifier_unregister(struct notifier_block *nb)
220 pr_warning("%s: Invalid argument (%p)\n",
225 atomic_notifier_chain_unregister(&opal_notifier_head, nb);
228 EXPORT_SYMBOL_GPL(opal_notifier_unregister);
230 static void opal_do_notifier(uint64_t events)
233 uint64_t changed_mask;
235 if (atomic_read(&opal_notifier_hold))
238 spin_lock_irqsave(&opal_notifier_lock, flags);
239 changed_mask = last_notified_mask ^ events;
240 last_notified_mask = events;
241 spin_unlock_irqrestore(&opal_notifier_lock, flags);
244 * We feed with the event bits and changed bits for
245 * enough information to the callback.
247 atomic_notifier_call_chain(&opal_notifier_head,
248 events, (void *)changed_mask);
251 void opal_notifier_update_evt(uint64_t evt_mask,
256 spin_lock_irqsave(&opal_notifier_lock, flags);
257 last_notified_mask &= ~evt_mask;
258 last_notified_mask |= evt_val;
259 spin_unlock_irqrestore(&opal_notifier_lock, flags);
262 void opal_notifier_enable(void)
267 atomic_set(&opal_notifier_hold, 0);
269 /* Process pending events */
270 rc = opal_poll_events(&evt);
271 if (rc == OPAL_SUCCESS && evt)
272 opal_do_notifier(be64_to_cpu(evt));
275 void opal_notifier_disable(void)
277 atomic_set(&opal_notifier_hold, 1);
281 * Opal message notifier based on message type. Allow subscribers to get
282 * notified for specific messgae type.
284 int opal_message_notifier_register(enum OpalMessageType msg_type,
285 struct notifier_block *nb)
288 pr_warning("%s: Invalid argument (%p)\n",
292 if (msg_type > OPAL_MSG_TYPE_MAX) {
293 pr_warning("%s: Invalid message type argument (%d)\n",
297 return atomic_notifier_chain_register(
298 &opal_msg_notifier_head[msg_type], nb);
301 static void opal_message_do_notify(uint32_t msg_type, void *msg)
303 /* notify subscribers */
304 atomic_notifier_call_chain(&opal_msg_notifier_head[msg_type],
308 static void opal_handle_message(void)
312 * TODO: pre-allocate a message buffer depending on opal-msg-size
313 * value in /proc/device-tree.
315 static struct opal_msg msg;
318 ret = opal_get_msg(__pa(&msg), sizeof(msg));
319 /* No opal message pending. */
320 if (ret == OPAL_RESOURCE)
323 /* check for errors. */
325 pr_warning("%s: Failed to retrieve opal message, err=%lld\n",
330 type = be32_to_cpu(msg.msg_type);
333 if (type > OPAL_MSG_TYPE_MAX) {
334 pr_warning("%s: Unknown message type: %u\n", __func__, type);
337 opal_message_do_notify(type, (void *)&msg);
340 static int opal_message_notify(struct notifier_block *nb,
341 unsigned long events, void *change)
343 if (events & OPAL_EVENT_MSG_PENDING)
344 opal_handle_message();
348 static struct notifier_block opal_message_nb = {
349 .notifier_call = opal_message_notify,
354 static int __init opal_message_init(void)
358 for (i = 0; i < OPAL_MSG_TYPE_MAX; i++)
359 ATOMIC_INIT_NOTIFIER_HEAD(&opal_msg_notifier_head[i]);
361 ret = opal_notifier_register(&opal_message_nb);
363 pr_err("%s: Can't register OPAL event notifier (%d)\n",
369 machine_early_initcall(powernv, opal_message_init);
371 int opal_get_chars(uint32_t vtermno, char *buf, int count)
378 opal_poll_events(&evt);
379 if ((be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_INPUT) == 0)
381 len = cpu_to_be64(count);
382 rc = opal_console_read(vtermno, &len, buf);
383 if (rc == OPAL_SUCCESS)
384 return be64_to_cpu(len);
388 int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
399 /* We want put_chars to be atomic to avoid mangling of hvsi
400 * packets. To do that, we first test for room and return
401 * -EAGAIN if there isn't enough.
403 * Unfortunately, opal_console_write_buffer_space() doesn't
404 * appear to work on opal v1, so we just assume there is
405 * enough room and be done with it
407 spin_lock_irqsave(&opal_write_lock, flags);
408 if (firmware_has_feature(FW_FEATURE_OPALv2)) {
409 rc = opal_console_write_buffer_space(vtermno, &olen);
410 len = be64_to_cpu(olen);
411 if (rc || len < total_len) {
412 spin_unlock_irqrestore(&opal_write_lock, flags);
413 /* Closed -> drop characters */
416 opal_poll_events(NULL);
421 /* We still try to handle partial completions, though they
422 * should no longer happen.
425 while(total_len > 0 && (rc == OPAL_BUSY ||
426 rc == OPAL_BUSY_EVENT || rc == OPAL_SUCCESS)) {
427 olen = cpu_to_be64(total_len);
428 rc = opal_console_write(vtermno, &olen, data);
429 len = be64_to_cpu(olen);
431 /* Closed or other error drop */
432 if (rc != OPAL_SUCCESS && rc != OPAL_BUSY &&
433 rc != OPAL_BUSY_EVENT) {
437 if (rc == OPAL_SUCCESS) {
442 /* This is a bit nasty but we need that for the console to
443 * flush when there aren't any interrupts. We will clean
444 * things a bit later to limit that to synchronous path
445 * such as the kernel console and xmon/udbg
448 opal_poll_events(&evt);
449 while(rc == OPAL_SUCCESS &&
450 (be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_OUTPUT));
452 spin_unlock_irqrestore(&opal_write_lock, flags);
456 static int opal_recover_mce(struct pt_regs *regs,
457 struct machine_check_event *evt)
460 uint64_t ea = get_mce_fault_addr(evt);
462 if (!(regs->msr & MSR_RI)) {
463 /* If MSR_RI isn't set, we cannot recover */
465 } else if (evt->disposition == MCE_DISPOSITION_RECOVERED) {
466 /* Platform corrected itself */
468 } else if (ea && !is_kernel_addr(ea)) {
470 * Faulting address is not in kernel text. We should be fine.
471 * We need to find which process uses this address.
472 * For now, kill the task if we have received exception when
475 * TODO: Queue up this address for hwpoisioning later.
477 if (user_mode(regs) && !is_global_init(current)) {
478 _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
482 } else if (user_mode(regs) && !is_global_init(current) &&
483 evt->severity == MCE_SEV_ERROR_SYNC) {
485 * If we have received a synchronous error when in userspace
488 _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
494 int opal_machine_check(struct pt_regs *regs)
496 struct machine_check_event evt;
498 if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
501 /* Print things out */
502 if (evt.version != MCE_V1) {
503 pr_err("Machine Check Exception, Unknown event version %d !\n",
507 machine_check_print_event_info(&evt);
509 if (opal_recover_mce(regs, &evt))
514 /* Early hmi handler called in real mode. */
515 int opal_hmi_exception_early(struct pt_regs *regs)
520 * call opal hmi handler. Pass paca address as token.
521 * The return value OPAL_SUCCESS is an indication that there is
522 * an HMI event generated waiting to pull by Linux.
524 rc = opal_handle_hmi();
525 if (rc == OPAL_SUCCESS) {
526 local_paca->hmi_event_available = 1;
532 /* HMI exception handler called in virtual mode during check_irq_replay. */
533 int opal_handle_hmi_exception(struct pt_regs *regs)
539 * Check if HMI event is available.
540 * if Yes, then call opal_poll_events to pull opal messages and
543 if (!local_paca->hmi_event_available)
546 local_paca->hmi_event_available = 0;
547 rc = opal_poll_events(&evt);
548 if (rc == OPAL_SUCCESS && evt)
549 opal_do_notifier(be64_to_cpu(evt));
554 static uint64_t find_recovery_address(uint64_t nip)
558 for (i = 0; i < mc_recoverable_range_len; i++)
559 if ((nip >= mc_recoverable_range[i].start_addr) &&
560 (nip < mc_recoverable_range[i].end_addr))
561 return mc_recoverable_range[i].recover_addr;
565 bool opal_mce_check_early_recovery(struct pt_regs *regs)
567 uint64_t recover_addr = 0;
569 if (!opal.base || !opal.size)
572 if ((regs->nip >= opal.base) &&
573 (regs->nip <= (opal.base + opal.size)))
574 recover_addr = find_recovery_address(regs->nip);
577 * Setup regs->nip to rfi into fixup address.
580 regs->nip = recover_addr;
583 return !!recover_addr;
586 static irqreturn_t opal_interrupt(int irq, void *data)
590 opal_handle_interrupt(virq_to_hw(irq), &events);
592 opal_do_notifier(be64_to_cpu(events));
597 static int opal_sysfs_init(void)
599 opal_kobj = kobject_create_and_add("opal", firmware_kobj);
601 pr_warn("kobject_create_and_add opal failed\n");
608 static void __init opal_dump_region_init(void)
614 /* Register kernel log buffer */
615 addr = log_buf_addr_get();
616 size = log_buf_len_get();
617 rc = opal_register_dump_region(OPAL_DUMP_REGION_LOG_BUF,
619 /* Don't warn if this is just an older OPAL that doesn't
620 * know about that call
622 if (rc && rc != OPAL_UNSUPPORTED)
623 pr_warn("DUMP: Failed to register kernel log buffer. "
626 static int __init opal_init(void)
628 struct device_node *np, *consoles;
632 opal_node = of_find_node_by_path("/ibm,opal");
634 pr_warn("opal: Node not found\n");
638 /* Register OPAL consoles if any ports */
639 if (firmware_has_feature(FW_FEATURE_OPALv2))
640 consoles = of_find_node_by_path("/ibm,opal/consoles");
642 consoles = of_node_get(opal_node);
644 for_each_child_of_node(consoles, np) {
645 if (strcmp(np->name, "serial"))
647 of_platform_device_create(np, NULL, NULL);
649 of_node_put(consoles);
652 /* Find all OPAL interrupts and request them */
653 irqs = of_get_property(opal_node, "opal-interrupts", &irqlen);
654 pr_debug("opal: Found %d interrupts reserved for OPAL\n",
655 irqs ? (irqlen / 4) : 0);
656 opal_irq_count = irqlen / 4;
657 opal_irqs = kzalloc(opal_irq_count * sizeof(unsigned int), GFP_KERNEL);
658 for (i = 0; irqs && i < (irqlen / 4); i++, irqs++) {
659 unsigned int hwirq = be32_to_cpup(irqs);
660 unsigned int irq = irq_create_mapping(NULL, hwirq);
662 pr_warning("opal: Failed to map irq 0x%x\n", hwirq);
665 rc = request_irq(irq, opal_interrupt, 0, "opal", NULL);
667 pr_warning("opal: Error %d requesting irq %d"
668 " (0x%x)\n", rc, irq, hwirq);
672 /* Create "opal" kobject under /sys/firmware */
673 rc = opal_sysfs_init();
675 /* Setup dump region interface */
676 opal_dump_region_init();
677 /* Setup error log interface */
678 rc = opal_elog_init();
679 /* Setup code update interface */
681 /* Setup platform dump extract interface */
682 opal_platform_dump_init();
683 /* Setup system parameters interface */
684 opal_sys_param_init();
685 /* Setup message log interface. */
691 machine_subsys_initcall(powernv, opal_init);
693 void opal_shutdown(void)
698 /* First free interrupts, which will also mask them */
699 for (i = 0; i < opal_irq_count; i++) {
701 free_irq(opal_irqs[i], NULL);
706 * Then sync with OPAL which ensure anything that can
707 * potentially write to our memory has completed such
708 * as an ongoing dump retrieval
710 while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
711 rc = opal_sync_host_reboot();
713 opal_poll_events(NULL);
718 /* Unregister memory dump region */
719 opal_unregister_dump_region(OPAL_DUMP_REGION_LOG_BUF);
722 /* Export this so that test modules can use it */
723 EXPORT_SYMBOL_GPL(opal_invalid_call);
725 /* Convert a region of vmalloc memory to an opal sg list */
726 struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
727 unsigned long vmalloc_size)
729 struct opal_sg_list *sg, *first = NULL;
732 sg = kzalloc(PAGE_SIZE, GFP_KERNEL);
738 while (vmalloc_size > 0) {
739 uint64_t data = vmalloc_to_pfn(vmalloc_addr) << PAGE_SHIFT;
740 uint64_t length = min(vmalloc_size, PAGE_SIZE);
742 sg->entry[i].data = cpu_to_be64(data);
743 sg->entry[i].length = cpu_to_be64(length);
746 if (i >= SG_ENTRIES_PER_NODE) {
747 struct opal_sg_list *next;
749 next = kzalloc(PAGE_SIZE, GFP_KERNEL);
753 sg->length = cpu_to_be64(
754 i * sizeof(struct opal_sg_entry) + 16);
756 sg->next = cpu_to_be64(__pa(next));
760 vmalloc_addr += length;
761 vmalloc_size -= length;
764 sg->length = cpu_to_be64(i * sizeof(struct opal_sg_entry) + 16);
769 pr_err("%s : Failed to allocate memory\n", __func__);
770 opal_free_sg_list(first);
774 void opal_free_sg_list(struct opal_sg_list *sg)
777 uint64_t next = be64_to_cpu(sg->next);