Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 14 Jan 2011 04:15:35 +0000 (20:15 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 14 Jan 2011 04:15:35 +0000 (20:15 -0800)
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6: (59 commits)
  ACPI / PM: Fix build problems for !CONFIG_ACPI related to NVS rework
  ACPI: fix resource check message
  ACPI / Battery: Update information on info notification and resume
  ACPI: Drop device flag wake_capable
  ACPI: Always check if _PRW is present before trying to evaluate it
  ACPI / PM: Check status of power resources under mutexes
  ACPI / PM: Rename acpi_power_off_device()
  ACPI / PM: Drop acpi_power_nocheck
  ACPI / PM: Drop acpi_bus_get_power()
  Platform / x86: Make fujitsu_laptop use acpi_bus_update_power()
  ACPI / Fan: Rework the handling of power resources
  ACPI / PM: Register power resource devices as soon as they are needed
  ACPI / PM: Register acpi_power_driver early
  ACPI / PM: Add function for updating device power state consistently
  ACPI / PM: Add function for device power state initialization
  ACPI / PM: Introduce __acpi_bus_get_power()
  ACPI / PM: Introduce function for refcounting device power resources
  ACPI / PM: Add functions for manipulating lists of power resources
  ACPI / PM: Prevent acpi_power_get_inferred_state() from making changes
  ACPICA: Update version to 20101209
  ...

84 files changed:
Documentation/ABI/stable/thermal-notification [new file with mode: 0644]
Documentation/IPMI.txt
Documentation/acpi/apei/output_format.txt [new file with mode: 0644]
Documentation/feature-removal-schedule.txt
Documentation/kernel-parameters.txt
Documentation/thermal/sysfs-api.txt
arch/ia64/include/asm/io.h
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/dumpstack.c
arch/x86/kernel/e820.c
drivers/acpi/Kconfig
drivers/acpi/Makefile
drivers/acpi/ac.c
drivers/acpi/acpi_ipmi.c [new file with mode: 0644]
drivers/acpi/acpica/Makefile
drivers/acpi/acpica/acevents.h
drivers/acpi/acpica/acglobal.h
drivers/acpi/acpica/achware.h
drivers/acpi/acpica/aclocal.h
drivers/acpi/acpica/evevent.c
drivers/acpi/acpica/evgpe.c
drivers/acpi/acpica/evgpeblk.c
drivers/acpi/acpica/evgpeinit.c
drivers/acpi/acpica/evgpeutil.c
drivers/acpi/acpica/evmisc.c
drivers/acpi/acpica/evxface.c
drivers/acpi/acpica/evxfevnt.c
drivers/acpi/acpica/evxfgpe.c [new file with mode: 0644]
drivers/acpi/acpica/hwgpe.c
drivers/acpi/acpica/utglobal.c
drivers/acpi/apei/apei-internal.h
drivers/acpi/apei/cper.c
drivers/acpi/apei/ghes.c
drivers/acpi/battery.c
drivers/acpi/bus.c
drivers/acpi/button.c
drivers/acpi/dock.c
drivers/acpi/ec.c
drivers/acpi/fan.c
drivers/acpi/glue.c
drivers/acpi/internal.h
drivers/acpi/nvs.c [moved from kernel/power/nvs.c with 86% similarity]
drivers/acpi/osl.c
drivers/acpi/power.c
drivers/acpi/proc.c
drivers/acpi/processor_driver.c
drivers/acpi/processor_throttling.c
drivers/acpi/sbs.c
drivers/acpi/scan.c
drivers/acpi/sleep.c
drivers/acpi/sysfs.c
drivers/acpi/thermal.c
drivers/acpi/video.c
drivers/acpi/video_detect.c
drivers/acpi/wakeup.c
drivers/char/ipmi/ipmi_msghandler.c
drivers/char/ipmi/ipmi_si_intf.c
drivers/gpu/drm/Kconfig
drivers/gpu/stub/Kconfig
drivers/platform/x86/fujitsu-laptop.c
drivers/pnp/Makefile
drivers/pnp/core.c
drivers/pnp/driver.c
drivers/pnp/isapnp/Makefile
drivers/pnp/pnpacpi/Makefile
drivers/pnp/pnpacpi/core.c
drivers/pnp/pnpbios/Makefile
drivers/thermal/Kconfig
drivers/thermal/thermal_sys.c
include/acpi/acpi_bus.h
include/acpi/acpixf.h
include/acpi/actypes.h
include/acpi/processor.h
include/linux/acpi.h
include/linux/cper.h
include/linux/ipmi.h
include/linux/ipmi_smi.h
include/linux/suspend.h
include/linux/thermal.h
kernel/panic.c
kernel/power/Kconfig
kernel/power/Makefile
lib/ioremap.c
mm/vmalloc.c

diff --git a/Documentation/ABI/stable/thermal-notification b/Documentation/ABI/stable/thermal-notification
new file mode 100644 (file)
index 0000000..9723e8b
--- /dev/null
@@ -0,0 +1,4 @@
+What:          A notification mechanism for thermal related events
+Description:
+       This interface enables notification for thermal related events.
+       The notification is in the form of a netlink event.
index 69dd29e..b2bea15 100644 (file)
@@ -533,6 +533,33 @@ completion during sending a panic event.
 Other Pieces
 ------------
 
+Get the detailed info related with the IPMI device
+--------------------------------------------------
+
+Some users need more detailed information about a device, like where
+the address came from or the raw base device for the IPMI interface.
+You can use the IPMI smi_watcher to catch the IPMI interfaces as they
+come or go, and to grab the information, you can use the function
+ipmi_get_smi_info(), which returns the following structure:
+
+struct ipmi_smi_info {
+       enum ipmi_addr_src addr_src;
+       struct device *dev;
+       union {
+               struct {
+                       void *acpi_handle;
+               } acpi_info;
+       } addr_info;
+};
+
+Currently special info for only for SI_ACPI address sources is
+returned.  Others may be added as necessary.
+
+Note that the dev pointer is included in the above structure, and
+assuming ipmi_smi_get_info returns success, you must call put_device
+on the dev pointer.
+
+
 Watchdog
 --------
 
diff --git a/Documentation/acpi/apei/output_format.txt b/Documentation/acpi/apei/output_format.txt
new file mode 100644 (file)
index 0000000..9146952
--- /dev/null
@@ -0,0 +1,122 @@
+                     APEI output format
+                     ~~~~~~~~~~~~~~~~~~
+
+APEI uses printk as hardware error reporting interface, the output
+format is as follow.
+
+<error record> :=
+APEI generic hardware error status
+severity: <integer>, <severity string>
+section: <integer>, severity: <integer>, <severity string>
+flags: <integer>
+<section flags strings>
+fru_id: <uuid string>
+fru_text: <string>
+section_type: <section type string>
+<section data>
+
+<severity string>* := recoverable | fatal | corrected | info
+
+<section flags strings># :=
+[primary][, containment warning][, reset][, threshold exceeded]\
+[, resource not accessible][, latent error]
+
+<section type string> := generic processor error | memory error | \
+PCIe error | unknown, <uuid string>
+
+<section data> :=
+<generic processor section data> | <memory section data> | \
+<pcie section data> | <null>
+
+<generic processor section data> :=
+[processor_type: <integer>, <proc type string>]
+[processor_isa: <integer>, <proc isa string>]
+[error_type: <integer>
+<proc error type strings>]
+[operation: <integer>, <proc operation string>]
+[flags: <integer>
+<proc flags strings>]
+[level: <integer>]
+[version_info: <integer>]
+[processor_id: <integer>]
+[target_address: <integer>]
+[requestor_id: <integer>]
+[responder_id: <integer>]
+[IP: <integer>]
+
+<proc type string>* := IA32/X64 | IA64
+
+<proc isa string>* := IA32 | IA64 | X64
+
+<processor error type strings># :=
+[cache error][, TLB error][, bus error][, micro-architectural error]
+
+<proc operation string>* := unknown or generic | data read | data write | \
+instruction execution
+
+<proc flags strings># :=
+[restartable][, precise IP][, overflow][, corrected]
+
+<memory section data> :=
+[error_status: <integer>]
+[physical_address: <integer>]
+[physical_address_mask: <integer>]
+[node: <integer>]
+[card: <integer>]
+[module: <integer>]
+[bank: <integer>]
+[device: <integer>]
+[row: <integer>]
+[column: <integer>]
+[bit_position: <integer>]
+[requestor_id: <integer>]
+[responder_id: <integer>]
+[target_id: <integer>]
+[error_type: <integer>, <mem error type string>]
+
+<mem error type string>* :=
+unknown | no error | single-bit ECC | multi-bit ECC | \
+single-symbol chipkill ECC | multi-symbol chipkill ECC | master abort | \
+target abort | parity error | watchdog timeout | invalid address | \
+mirror Broken | memory sparing | scrub corrected error | \
+scrub uncorrected error
+
+<pcie section data> :=
+[port_type: <integer>, <pcie port type string>]
+[version: <integer>.<integer>]
+[command: <integer>, status: <integer>]
+[device_id: <integer>:<integer>:<integer>.<integer>
+slot: <integer>
+secondary_bus: <integer>
+vendor_id: <integer>, device_id: <integer>
+class_code: <integer>]
+[serial number: <integer>, <integer>]
+[bridge: secondary_status: <integer>, control: <integer>]
+
+<pcie port type string>* := PCIe end point | legacy PCI end point | \
+unknown | unknown | root port | upstream switch port | \
+downstream switch port | PCIe to PCI/PCI-X bridge | \
+PCI/PCI-X to PCIe bridge | root complex integrated endpoint device | \
+root complex event collector
+
+Where, [] designate corresponding content is optional
+
+All <field string> description with * has the following format:
+
+field: <integer>, <field string>
+
+Where value of <integer> should be the position of "string" in <field
+string> description. Otherwise, <field string> will be "unknown".
+
+All <field strings> description with # has the following format:
+
+field: <integer>
+<field strings>
+
+Where each string in <fields strings> corresponding to one set bit of
+<integer>. The bit position is the position of "string" in <field
+strings> description.
+
+For more detailed explanation of every field, please refer to UEFI
+specification version 2.3 or later, section Appendix N: Common
+Platform Error Record.
index 6cbbd20..8c594c4 100644 (file)
@@ -248,6 +248,17 @@ Who:       Zhang Rui <rui.zhang@intel.com>
 
 ---------------------------
 
+What:  CONFIG_ACPI_PROCFS_POWER
+When:  2.6.39
+Why:   sysfs I/F for ACPI power devices, including AC and Battery,
+        has been working in upstream kenrel since 2.6.24, Sep 2007.
+       In 2.6.37, we make the sysfs I/F always built in and this option
+       disabled by default.
+       Remove this option and the ACPI power procfs interface in 2.6.39.
+Who:   Zhang Rui <rui.zhang@intel.com>
+
+---------------------------
+
 What:  /proc/acpi/button
 When:  August 2007
 Why:   /proc/acpi/button has been replaced by events to the input layer
index 55fe759..b72e071 100644 (file)
@@ -199,11 +199,6 @@ and is between 256 and 4096 characters. It is defined in the file
                        unusable.  The "log_buf_len" parameter may be useful
                        if you need to capture more output.
 
-       acpi_display_output=    [HW,ACPI]
-                       acpi_display_output=vendor
-                       acpi_display_output=video
-                       See above.
-
        acpi_irq_balance [HW,ACPI]
                        ACPI will balance active IRQs
                        default in APIC mode
index cb3d15b..b61e46f 100644 (file)
@@ -278,3 +278,15 @@ method, the sys I/F structure will be built like this:
     |---name:                  acpitz
     |---temp1_input:           37000
     |---temp1_crit:            100000
+
+4. Event Notification
+
+The framework includes a simple notification mechanism, in the form of a
+netlink event. Netlink socket initialization is done during the _init_
+of the framework. Drivers which intend to use the notification mechanism
+just need to call generate_netlink_event() with two arguments viz
+(originator, event). Typically the originator will be an integer assigned
+to a thermal_zone_device when it registers itself with the framework. The
+event will be one of:{THERMAL_AUX0, THERMAL_AUX1, THERMAL_CRITICAL,
+THERMAL_DEV_FAULT}. Notification can be sent when the current temperature
+crosses any of the configured thresholds.
index cc8335e..e5a6c35 100644 (file)
@@ -426,6 +426,11 @@ extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size)
 extern void iounmap (volatile void __iomem *addr);
 extern void __iomem * early_ioremap (unsigned long phys_addr, unsigned long size);
 extern void early_iounmap (volatile void __iomem *addr, unsigned long size);
+static inline void __iomem * ioremap_cache (unsigned long phys_addr, unsigned long size)
+{
+       return ioremap(phys_addr, size);
+}
+
 
 /*
  * String version of IO memory access ops:
index ec881c6..b3a7113 100644 (file)
@@ -509,6 +509,7 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(acpi_gsi_to_irq);
 
 int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi)
 {
index d6fb146..df20723 100644 (file)
@@ -234,6 +234,7 @@ unsigned __kprobes long oops_begin(void)
        bust_spinlocks(1);
        return flags;
 }
+EXPORT_SYMBOL_GPL(oops_begin);
 
 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
 {
index 0c2b7ef..294f26d 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/bootmem.h>
 #include <linux/pfn.h>
 #include <linux/suspend.h>
+#include <linux/acpi.h>
 #include <linux/firmware-map.h>
 #include <linux/memblock.h>
 
index 3f3489c..10c7ad5 100644 (file)
@@ -51,12 +51,7 @@ config ACPI_PROCFS
          For backwards compatibility, this option allows
          deprecated /proc/acpi/ files to exist, even when
          they have been replaced by functions in /sys.
-         The deprecated files (and their replacements) include:
 
-         /proc/acpi/processor/*/throttling (/sys/class/thermal/
-               cooling_device*/*)
-         /proc/acpi/video/*/brightness (/sys/class/backlight/)
-         /proc/acpi/thermal_zone/*/* (/sys/class/thermal/)
          This option has no effect on /proc/acpi/ files
          and functions which do not yet exist in /sys.
 
@@ -74,6 +69,8 @@ config ACPI_PROCFS_POWER
          /proc/acpi/ac_adapter/* (sys/class/power_supply/*)
          This option has no effect on /proc/acpi/ directories
          and functions, which do not yet exist in /sys
+         This option, together with the proc directories, will be
+         deleted in 2.6.39.
 
          Say N to delete power /proc/acpi/ directories that have moved to /sys/
 
@@ -209,6 +206,17 @@ config ACPI_PROCESSOR
 
          To compile this driver as a module, choose M here:
          the module will be called processor.
+config ACPI_IPMI
+       tristate "IPMI"
+       depends on EXPERIMENTAL && IPMI_SI && IPMI_HANDLER
+       default n
+       help
+         This driver enables the ACPI to access the BMC controller. And it
+         uses the IPMI request/response message to communicate with BMC
+         controller, which can be found on on the server.
+
+         To compile this driver as a module, choose M here:
+         the module will be called as acpi_ipmi.
 
 config ACPI_HOTPLUG_CPU
        bool
index 3d031d0..d113fa5 100644 (file)
@@ -24,7 +24,7 @@ acpi-y                                += atomicio.o
 # sleep related files
 acpi-y                         += wakeup.o
 acpi-y                         += sleep.o
-acpi-$(CONFIG_ACPI_SLEEP)      += proc.o
+acpi-$(CONFIG_ACPI_SLEEP)      += proc.o nvs.o
 
 
 #
@@ -69,5 +69,6 @@ processor-y                   += processor_idle.o processor_thermal.o
 processor-$(CONFIG_CPU_FREQ)   += processor_perflib.o
 
 obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o
+obj-$(CONFIG_ACPI_IPMI)                += acpi_ipmi.o
 
 obj-$(CONFIG_ACPI_APEI)                += apei/
index 25d3aae..58c3f74 100644 (file)
@@ -197,7 +197,8 @@ static int acpi_ac_add_fs(struct acpi_device *device)
 {
        struct proc_dir_entry *entry = NULL;
 
-
+       printk(KERN_WARNING PREFIX "Deprecated procfs I/F for AC is loaded,"
+                       " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
        if (!acpi_device_dir(device)) {
                acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
                                                     acpi_ac_dir);
diff --git a/drivers/acpi/acpi_ipmi.c b/drivers/acpi/acpi_ipmi.c
new file mode 100644 (file)
index 0000000..f40acef
--- /dev/null
@@ -0,0 +1,525 @@
+/*
+ *  acpi_ipmi.c - ACPI IPMI opregion
+ *
+ *  Copyright (C) 2010 Intel Corporation
+ *  Copyright (C) 2010 Zhao Yakui <yakui.zhao@intel.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or (at
+ *  your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+#include <linux/ipmi.h>
+#include <linux/device.h>
+#include <linux/pnp.h>
+
+MODULE_AUTHOR("Zhao Yakui");
+MODULE_DESCRIPTION("ACPI IPMI Opregion driver");
+MODULE_LICENSE("GPL");
+
+#define IPMI_FLAGS_HANDLER_INSTALL     0
+
+#define ACPI_IPMI_OK                   0
+#define ACPI_IPMI_TIMEOUT              0x10
+#define ACPI_IPMI_UNKNOWN              0x07
+/* the IPMI timeout is 5s */
+#define IPMI_TIMEOUT                   (5 * HZ)
+
+struct acpi_ipmi_device {
+       /* the device list attached to driver_data.ipmi_devices */
+       struct list_head head;
+       /* the IPMI request message list */
+       struct list_head tx_msg_list;
+       struct mutex    tx_msg_lock;
+       acpi_handle handle;
+       struct pnp_dev *pnp_dev;
+       ipmi_user_t     user_interface;
+       int ipmi_ifnum; /* IPMI interface number */
+       long curr_msgid;
+       unsigned long flags;
+       struct ipmi_smi_info smi_data;
+};
+
+struct ipmi_driver_data {
+       struct list_head        ipmi_devices;
+       struct ipmi_smi_watcher bmc_events;
+       struct ipmi_user_hndl   ipmi_hndlrs;
+       struct mutex            ipmi_lock;
+};
+
+struct acpi_ipmi_msg {
+       struct list_head head;
+       /*
+        * General speaking the addr type should be SI_ADDR_TYPE. And
+        * the addr channel should be BMC.
+        * In fact it can also be IPMB type. But we will have to
+        * parse it from the Netfn command buffer. It is so complex
+        * that it is skipped.
+        */
+       struct ipmi_addr addr;
+       long tx_msgid;
+       /* it is used to track whether the IPMI message is finished */
+       struct completion tx_complete;
+       struct kernel_ipmi_msg tx_message;
+       int     msg_done;
+       /* tx data . And copy it from ACPI object buffer */
+       u8      tx_data[64];
+       int     tx_len;
+       u8      rx_data[64];
+       int     rx_len;
+       struct acpi_ipmi_device *device;
+};
+
+/* IPMI request/response buffer per ACPI 4.0, sec 5.5.2.4.3.2 */
+struct acpi_ipmi_buffer {
+       u8 status;
+       u8 length;
+       u8 data[64];
+};
+
+static void ipmi_register_bmc(int iface, struct device *dev);
+static void ipmi_bmc_gone(int iface);
+static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data);
+static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device);
+static void acpi_remove_ipmi_device(struct acpi_ipmi_device *ipmi_device);
+
+static struct ipmi_driver_data driver_data = {
+       .ipmi_devices = LIST_HEAD_INIT(driver_data.ipmi_devices),
+       .bmc_events = {
+               .owner = THIS_MODULE,
+               .new_smi = ipmi_register_bmc,
+               .smi_gone = ipmi_bmc_gone,
+       },
+       .ipmi_hndlrs = {
+               .ipmi_recv_hndl = ipmi_msg_handler,
+       },
+};
+
+static struct acpi_ipmi_msg *acpi_alloc_ipmi_msg(struct acpi_ipmi_device *ipmi)
+{
+       struct acpi_ipmi_msg *ipmi_msg;
+       struct pnp_dev *pnp_dev = ipmi->pnp_dev;
+
+       ipmi_msg = kzalloc(sizeof(struct acpi_ipmi_msg), GFP_KERNEL);
+       if (!ipmi_msg)  {
+               dev_warn(&pnp_dev->dev, "Can't allocate memory for ipmi_msg\n");
+               return NULL;
+       }
+       init_completion(&ipmi_msg->tx_complete);
+       INIT_LIST_HEAD(&ipmi_msg->head);
+       ipmi_msg->device = ipmi;
+       return ipmi_msg;
+}
+
+#define                IPMI_OP_RGN_NETFN(offset)       ((offset >> 8) & 0xff)
+#define                IPMI_OP_RGN_CMD(offset)         (offset & 0xff)
+static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg,
+                               acpi_physical_address address,
+                               acpi_integer *value)
+{
+       struct kernel_ipmi_msg *msg;
+       struct acpi_ipmi_buffer *buffer;
+       struct acpi_ipmi_device *device;
+
+       msg = &tx_msg->tx_message;
+       /*
+        * IPMI network function and command are encoded in the address
+        * within the IPMI OpRegion; see ACPI 4.0, sec 5.5.2.4.3.
+        */
+       msg->netfn = IPMI_OP_RGN_NETFN(address);
+       msg->cmd = IPMI_OP_RGN_CMD(address);
+       msg->data = tx_msg->tx_data;
+       /*
+        * value is the parameter passed by the IPMI opregion space handler.
+        * It points to the IPMI request message buffer
+        */
+       buffer = (struct acpi_ipmi_buffer *)value;
+       /* copy the tx message data */
+       msg->data_len = buffer->length;
+       memcpy(tx_msg->tx_data, buffer->data, msg->data_len);
+       /*
+        * now the default type is SYSTEM_INTERFACE and channel type is BMC.
+        * If the netfn is APP_REQUEST and the cmd is SEND_MESSAGE,
+        * the addr type should be changed to IPMB. Then we will have to parse
+        * the IPMI request message buffer to get the IPMB address.
+        * If so, please fix me.
+        */
+       tx_msg->addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+       tx_msg->addr.channel = IPMI_BMC_CHANNEL;
+       tx_msg->addr.data[0] = 0;
+
+       /* Get the msgid */
+       device = tx_msg->device;
+       mutex_lock(&device->tx_msg_lock);
+       device->curr_msgid++;
+       tx_msg->tx_msgid = device->curr_msgid;
+       mutex_unlock(&device->tx_msg_lock);
+}
+
+static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg,
+               acpi_integer *value, int rem_time)
+{
+       struct acpi_ipmi_buffer *buffer;
+
+       /*
+        * value is also used as output parameter. It represents the response
+        * IPMI message returned by IPMI command.
+        */
+       buffer = (struct acpi_ipmi_buffer *)value;
+       if (!rem_time && !msg->msg_done) {
+               buffer->status = ACPI_IPMI_TIMEOUT;
+               return;
+       }
+       /*
+        * If the flag of msg_done is not set or the recv length is zero, it
+        * means that the IPMI command is not executed correctly.
+        * The status code will be ACPI_IPMI_UNKNOWN.
+        */
+       if (!msg->msg_done || !msg->rx_len) {
+               buffer->status = ACPI_IPMI_UNKNOWN;
+               return;
+       }
+       /*
+        * If the IPMI response message is obtained correctly, the status code
+        * will be ACPI_IPMI_OK
+        */
+       buffer->status = ACPI_IPMI_OK;
+       buffer->length = msg->rx_len;
+       memcpy(buffer->data, msg->rx_data, msg->rx_len);
+}
+
+static void ipmi_flush_tx_msg(struct acpi_ipmi_device *ipmi)
+{
+       struct acpi_ipmi_msg *tx_msg, *temp;
+       int count = HZ / 10;
+       struct pnp_dev *pnp_dev = ipmi->pnp_dev;
+
+       list_for_each_entry_safe(tx_msg, temp, &ipmi->tx_msg_list, head) {
+               /* wake up the sleep thread on the Tx msg */
+               complete(&tx_msg->tx_complete);
+       }
+
+       /* wait for about 100ms to flush the tx message list */
+       while (count--) {
+               if (list_empty(&ipmi->tx_msg_list))
+                       break;
+               schedule_timeout(1);
+       }
+       if (!list_empty(&ipmi->tx_msg_list))
+               dev_warn(&pnp_dev->dev, "tx msg list is not NULL\n");
+}
+
+static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
+{
+       struct acpi_ipmi_device *ipmi_device = user_msg_data;
+       int msg_found = 0;
+       struct acpi_ipmi_msg *tx_msg;
+       struct pnp_dev *pnp_dev = ipmi_device->pnp_dev;
+
+       if (msg->user != ipmi_device->user_interface) {
+               dev_warn(&pnp_dev->dev, "Unexpected response is returned. "
+                       "returned user %p, expected user %p\n",
+                       msg->user, ipmi_device->user_interface);
+               ipmi_free_recv_msg(msg);
+               return;
+       }
+       mutex_lock(&ipmi_device->tx_msg_lock);
+       list_for_each_entry(tx_msg, &ipmi_device->tx_msg_list, head) {
+               if (msg->msgid == tx_msg->tx_msgid) {
+                       msg_found = 1;
+                       break;
+               }
+       }
+
+       mutex_unlock(&ipmi_device->tx_msg_lock);
+       if (!msg_found) {
+               dev_warn(&pnp_dev->dev, "Unexpected response (msg id %ld) is "
+                       "returned.\n", msg->msgid);
+               ipmi_free_recv_msg(msg);
+               return;
+       }
+
+       if (msg->msg.data_len) {
+               /* copy the response data to Rx_data buffer */
+               memcpy(tx_msg->rx_data, msg->msg_data, msg->msg.data_len);
+               tx_msg->rx_len = msg->msg.data_len;
+               tx_msg->msg_done = 1;
+       }
+       complete(&tx_msg->tx_complete);
+       ipmi_free_recv_msg(msg);
+};
+
+static void ipmi_register_bmc(int iface, struct device *dev)
+{
+       struct acpi_ipmi_device *ipmi_device, *temp;
+       struct pnp_dev *pnp_dev;
+       ipmi_user_t             user;
+       int err;
+       struct ipmi_smi_info smi_data;
+       acpi_handle handle;
+
+       err = ipmi_get_smi_info(iface, &smi_data);
+
+       if (err)
+               return;
+
+       if (smi_data.addr_src != SI_ACPI) {
+               put_device(smi_data.dev);
+               return;
+       }
+
+       handle = smi_data.addr_info.acpi_info.acpi_handle;
+
+       mutex_lock(&driver_data.ipmi_lock);
+       list_for_each_entry(temp, &driver_data.ipmi_devices, head) {
+               /*
+                * if the corresponding ACPI handle is already added
+                * to the device list, don't add it again.
+                */
+               if (temp->handle == handle)
+                       goto out;
+       }
+
+       ipmi_device = kzalloc(sizeof(*ipmi_device), GFP_KERNEL);
+
+       if (!ipmi_device)
+               goto out;
+
+       pnp_dev = to_pnp_dev(smi_data.dev);
+       ipmi_device->handle = handle;
+       ipmi_device->pnp_dev = pnp_dev;
+
+       err = ipmi_create_user(iface, &driver_data.ipmi_hndlrs,
+                                       ipmi_device, &user);
+       if (err) {
+               dev_warn(&pnp_dev->dev, "Can't create IPMI user interface\n");
+               kfree(ipmi_device);
+               goto out;
+       }
+       acpi_add_ipmi_device(ipmi_device);
+       ipmi_device->user_interface = user;
+       ipmi_device->ipmi_ifnum = iface;
+       mutex_unlock(&driver_data.ipmi_lock);
+       memcpy(&ipmi_device->smi_data, &smi_data, sizeof(struct ipmi_smi_info));
+       return;
+
+out:
+       mutex_unlock(&driver_data.ipmi_lock);
+       put_device(smi_data.dev);
+       return;
+}
+
+static void ipmi_bmc_gone(int iface)
+{
+       struct acpi_ipmi_device *ipmi_device, *temp;
+
+       mutex_lock(&driver_data.ipmi_lock);
+       list_for_each_entry_safe(ipmi_device, temp,
+                               &driver_data.ipmi_devices, head) {
+               if (ipmi_device->ipmi_ifnum != iface)
+                       continue;
+
+               acpi_remove_ipmi_device(ipmi_device);
+               put_device(ipmi_device->smi_data.dev);
+               kfree(ipmi_device);
+               break;
+       }
+       mutex_unlock(&driver_data.ipmi_lock);
+}
+/* --------------------------------------------------------------------------
+ *                     Address Space Management
+ * -------------------------------------------------------------------------- */
+/*
+ * This is the IPMI opregion space handler.
+ * @function: indicates the read/write. In fact as the IPMI message is driven
+ * by command, only write is meaningful.
+ * @address: This contains the netfn/command of IPMI request message.
+ * @bits   : not used.
+ * @value  : it is an in/out parameter. It points to the IPMI message buffer.
+ *          Before the IPMI message is sent, it represents the actual request
+ *          IPMI message. After the IPMI message is finished, it represents
+ *          the response IPMI message returned by IPMI command.
+ * @handler_context: IPMI device context.
+ */
+
+static acpi_status
+acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
+                     u32 bits, acpi_integer *value,
+                     void *handler_context, void *region_context)
+{
+       struct acpi_ipmi_msg *tx_msg;
+       struct acpi_ipmi_device *ipmi_device = handler_context;
+       int err, rem_time;
+       acpi_status status;
+       /*
+        * IPMI opregion message.
+        * IPMI message is firstly written to the BMC and system software
+        * can get the respsonse. So it is unmeaningful for the read access
+        * of IPMI opregion.
+        */
+       if ((function & ACPI_IO_MASK) == ACPI_READ)
+               return AE_TYPE;
+
+       if (!ipmi_device->user_interface)
+               return AE_NOT_EXIST;
+
+       tx_msg = acpi_alloc_ipmi_msg(ipmi_device);
+       if (!tx_msg)
+               return AE_NO_MEMORY;
+
+       acpi_format_ipmi_msg(tx_msg, address, value);
+       mutex_lock(&ipmi_device->tx_msg_lock);
+       list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list);
+       mutex_unlock(&ipmi_device->tx_msg_lock);
+       err = ipmi_request_settime(ipmi_device->user_interface,
+                                       &tx_msg->addr,
+                                       tx_msg->tx_msgid,
+                                       &tx_msg->tx_message,
+                                       NULL, 0, 0, 0);
+       if (err) {
+               status = AE_ERROR;
+               goto end_label;
+       }
+       rem_time = wait_for_completion_timeout(&tx_msg->tx_complete,
+                                       IPMI_TIMEOUT);
+       acpi_format_ipmi_response(tx_msg, value, rem_time);
+       status = AE_OK;
+
+end_label:
+       mutex_lock(&ipmi_device->tx_msg_lock);
+       list_del(&tx_msg->head);
+       mutex_unlock(&ipmi_device->tx_msg_lock);
+       kfree(tx_msg);
+       return status;
+}
+
+static void ipmi_remove_space_handler(struct acpi_ipmi_device *ipmi)
+{
+       if (!test_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags))
+               return;
+
+       acpi_remove_address_space_handler(ipmi->handle,
+                               ACPI_ADR_SPACE_IPMI, &acpi_ipmi_space_handler);
+
+       clear_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags);
+}
+
+static int ipmi_install_space_handler(struct acpi_ipmi_device *ipmi)
+{
+       acpi_status status;
+
+       if (test_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags))
+               return 0;
+
+       status = acpi_install_address_space_handler(ipmi->handle,
+                                                   ACPI_ADR_SPACE_IPMI,
+                                                   &acpi_ipmi_space_handler,
+                                                   NULL, ipmi);
+       if (ACPI_FAILURE(status)) {
+               struct pnp_dev *pnp_dev = ipmi->pnp_dev;
+               dev_warn(&pnp_dev->dev, "Can't register IPMI opregion space "
+                       "handle\n");
+               return -EINVAL;
+       }
+       set_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags);
+       return 0;
+}
+
+static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device)
+{
+
+       INIT_LIST_HEAD(&ipmi_device->head);
+
+       mutex_init(&ipmi_device->tx_msg_lock);
+       INIT_LIST_HEAD(&ipmi_device->tx_msg_list);
+       ipmi_install_space_handler(ipmi_device);
+
+       list_add_tail(&ipmi_device->head, &driver_data.ipmi_devices);
+}
+
+static void acpi_remove_ipmi_device(struct acpi_ipmi_device *ipmi_device)
+{
+       /*
+        * If the IPMI user interface is created, it should be
+        * destroyed.
+        */
+       if (ipmi_device->user_interface) {
+               ipmi_destroy_user(ipmi_device->user_interface);
+               ipmi_device->user_interface = NULL;
+       }
+       /* flush the Tx_msg list */
+       if (!list_empty(&ipmi_device->tx_msg_list))
+               ipmi_flush_tx_msg(ipmi_device);
+
+       list_del(&ipmi_device->head);
+       ipmi_remove_space_handler(ipmi_device);
+}
+
+static int __init acpi_ipmi_init(void)
+{
+       int result = 0;
+
+       if (acpi_disabled)
+               return result;
+
+       mutex_init(&driver_data.ipmi_lock);
+
+       result = ipmi_smi_watcher_register(&driver_data.bmc_events);
+
+       return result;
+}
+
+static void __exit acpi_ipmi_exit(void)
+{
+       struct acpi_ipmi_device *ipmi_device, *temp;
+
+       if (acpi_disabled)
+               return;
+
+       ipmi_smi_watcher_unregister(&driver_data.bmc_events);
+
+       /*
+        * When one smi_watcher is unregistered, it is only deleted
+        * from the smi_watcher list. But the smi_gone callback function
+        * is not called. So explicitly uninstall the ACPI IPMI oregion
+        * handler and free it.
+        */
+       mutex_lock(&driver_data.ipmi_lock);
+       list_for_each_entry_safe(ipmi_device, temp,
+                               &driver_data.ipmi_devices, head) {
+               acpi_remove_ipmi_device(ipmi_device);
+               put_device(ipmi_device->smi_data.dev);
+               kfree(ipmi_device);
+       }
+       mutex_unlock(&driver_data.ipmi_lock);
+}
+
+module_init(acpi_ipmi_init);
+module_exit(acpi_ipmi_exit);
index a7e1d1a..eec2ead 100644 (file)
@@ -14,7 +14,7 @@ acpi-y := dsfield.o   dsmthdat.o  dsopcode.o  dswexec.o  dswscope.o \
 
 acpi-y += evevent.o  evregion.o  evsci.o    evxfevnt.o \
         evmisc.o   evrgnini.o  evxface.o  evxfregn.o \
-        evgpe.o    evgpeblk.o evgpeinit.o  evgpeutil.o
+        evgpe.o    evgpeblk.o evgpeinit.o  evgpeutil.o evxfgpe.o
 
 acpi-y += exconfig.o  exfield.o  exnames.o   exoparg6.o  exresolv.o  exstorob.o\
         exconvrt.o  exfldio.o  exoparg1.o  exprep.o    exresop.o   exsystem.o\
index a6f99cc..70e0b28 100644 (file)
@@ -51,8 +51,6 @@ acpi_status acpi_ev_initialize_events(void);
 
 acpi_status acpi_ev_install_xrupt_handlers(void);
 
-acpi_status acpi_ev_install_fadt_gpes(void);
-
 u32 acpi_ev_fixed_event_detect(void);
 
 /*
@@ -82,9 +80,9 @@ acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info);
 
 acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info);
 
-acpi_status acpi_raw_enable_gpe(struct acpi_gpe_event_info *gpe_event_info);
+acpi_status acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info);
 
-acpi_status acpi_raw_disable_gpe(struct acpi_gpe_event_info *gpe_event_info);
+acpi_status acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info);
 
 struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
                                                       u32 gpe_number);
@@ -93,6 +91,8 @@ struct acpi_gpe_event_info *acpi_ev_low_get_gpe_info(u32 gpe_number,
                                                     struct acpi_gpe_block_info
                                                     *gpe_block);
 
+acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info);
+
 /*
  * evgpeblk - Upper-level GPE block support
  */
@@ -107,12 +107,13 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
 acpi_status
 acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
                             struct acpi_gpe_block_info *gpe_block,
-                            void *ignored);
+                            void *context);
 
 acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block);
 
 u32
-acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info,
+acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
+                    struct acpi_gpe_event_info *gpe_event_info,
                     u32 gpe_number);
 
 /*
@@ -126,10 +127,6 @@ acpi_status
 acpi_ev_match_gpe_method(acpi_handle obj_handle,
                         u32 level, void *context, void **return_value);
 
-acpi_status
-acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
-                         u32 level, void *context, void **return_value);
-
 /*
  * evgpeutil - GPE utilities
  */
@@ -138,6 +135,10 @@ acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context);
 
 u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info);
 
+acpi_status
+acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+                      struct acpi_gpe_block_info *gpe_block, void *context);
+
 struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 interrupt_number);
 
 acpi_status acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt);
index ad88fca..9bb69c5 100644 (file)
@@ -146,6 +146,9 @@ u8 acpi_gbl_system_awake_and_running;
 
 extern u32 acpi_gbl_nesting_level;
 
+ACPI_EXTERN u32 acpi_gpe_count;
+ACPI_EXTERN u32 acpi_fixed_event_count[ACPI_NUM_FIXED_EVENTS];
+
 /* Support for dynamic control method tracing mechanism */
 
 ACPI_EXTERN u32 acpi_gbl_original_dbg_level;
@@ -370,7 +373,9 @@ ACPI_EXTERN struct acpi_fixed_event_handler
 ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head;
 ACPI_EXTERN struct acpi_gpe_block_info
 *acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS];
-ACPI_EXTERN u8 acpi_all_gpes_initialized;
+ACPI_EXTERN u8 acpi_gbl_all_gpes_initialized;
+ACPI_EXTERN ACPI_GBL_EVENT_HANDLER acpi_gbl_global_event_handler;
+ACPI_EXTERN void *acpi_gbl_global_event_handler_context;
 
 /*****************************************************************************
  *
index 167470a..258d628 100644 (file)
@@ -94,7 +94,7 @@ u32 acpi_hw_get_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info,
                             struct acpi_gpe_register_info *gpe_register_info);
 
 acpi_status
-acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action);
+acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action);
 
 acpi_status
 acpi_hw_disable_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
index 2ceb0c0..74000f5 100644 (file)
@@ -408,17 +408,18 @@ struct acpi_predefined_data {
 
 /* Dispatch info for each GPE -- either a method or handler, cannot be both */
 
-struct acpi_handler_info {
-       acpi_event_handler address;     /* Address of handler, if any */
+struct acpi_gpe_handler_info {
+       acpi_gpe_handler address;       /* Address of handler, if any */
        void *context;          /* Context to be passed to handler */
        struct acpi_namespace_node *method_node;        /* Method node for this GPE level (saved) */
-       u8 orig_flags;          /* Original misc info about this GPE */
-       u8 orig_enabled;        /* Set if the GPE was originally enabled */
+       u8 original_flags;      /* Original (pre-handler) GPE info */
+       u8 originally_enabled;  /* True if GPE was originally enabled */
 };
 
 union acpi_gpe_dispatch_info {
        struct acpi_namespace_node *method_node;        /* Method node for this GPE level */
-       struct acpi_handler_info *handler;
+       struct acpi_gpe_handler_info *handler;  /* Installed GPE handler */
+       struct acpi_namespace_node *device_node;        /* Parent _PRW device for implicit notify */
 };
 
 /*
@@ -458,7 +459,7 @@ struct acpi_gpe_block_info {
        u32 register_count;     /* Number of register pairs in block */
        u16 gpe_count;          /* Number of individual GPEs in block */
        u8 block_base_number;   /* Base GPE number for this block */
-       u8 initialized;         /* If set, the GPE block has been initialized */
+       u8 initialized;         /* TRUE if this block is initialized */
 };
 
 /* Information about GPE interrupt handlers, one per each interrupt level used for GPEs */
index c61c303..e5e313c 100644 (file)
@@ -217,9 +217,17 @@ u32 acpi_ev_fixed_event_detect(void)
                     status_bit_mask)
                    && (fixed_enable & acpi_gbl_fixed_event_info[i].
                        enable_bit_mask)) {
+                       /*
+                        * Found an active (signalled) event. Invoke global event
+                        * handler if present.
+                        */
+                       acpi_fixed_event_count[i]++;
+                       if (acpi_gbl_global_event_handler) {
+                               acpi_gbl_global_event_handler
+                                   (ACPI_EVENT_TYPE_FIXED, NULL, i,
+                                    acpi_gbl_global_event_handler_context);
+                       }
 
-                       /* Found an active (signalled) event */
-                       acpi_os_fixed_event_count(i);
                        int_status |= acpi_ev_fixed_event_dispatch(i);
                }
        }
index f226eac..7c339d3 100644 (file)
@@ -52,6 +52,8 @@ ACPI_MODULE_NAME("evgpe")
 /* Local prototypes */
 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context);
 
+static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context);
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ev_update_gpe_enable_mask
@@ -102,7 +104,7 @@ acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info)
  *
  * RETURN:      Status
  *
- * DESCRIPTION: Clear the given GPE from stale events and enable it.
+ * DESCRIPTION: Clear a GPE of stale events and enable it.
  *
  ******************************************************************************/
 acpi_status
@@ -113,12 +115,13 @@ acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
        ACPI_FUNCTION_TRACE(ev_enable_gpe);
 
        /*
-        * We will only allow a GPE to be enabled if it has either an
-        * associated method (_Lxx/_Exx) or a handler. Otherwise, the
-        * GPE will be immediately disabled by acpi_ev_gpe_dispatch the
-        * first time it fires.
+        * We will only allow a GPE to be enabled if it has either an associated
+        * method (_Lxx/_Exx) or a handler, or is using the implicit notify
+        * feature. Otherwise, the GPE will be immediately disabled by
+        * acpi_ev_gpe_dispatch the first time it fires.
         */
-       if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)) {
+       if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
+           ACPI_GPE_DISPATCH_NONE) {
                return_ACPI_STATUS(AE_NO_HANDLER);
        }
 
@@ -137,9 +140,9 @@ acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
 
 /*******************************************************************************
  *
- * FUNCTION:    acpi_raw_enable_gpe
+ * FUNCTION:    acpi_ev_add_gpe_reference
  *
- * PARAMETERS:  gpe_event_info  - GPE to enable
+ * PARAMETERS:  gpe_event_info          - Add a reference to this GPE
  *
  * RETURN:      Status
  *
@@ -148,16 +151,21 @@ acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
  *
  ******************************************************************************/
 
-acpi_status acpi_raw_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
+acpi_status acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
 {
        acpi_status status = AE_OK;
 
+       ACPI_FUNCTION_TRACE(ev_add_gpe_reference);
+
        if (gpe_event_info->runtime_count == ACPI_UINT8_MAX) {
                return_ACPI_STATUS(AE_LIMIT);
        }
 
        gpe_event_info->runtime_count++;
        if (gpe_event_info->runtime_count == 1) {
+
+               /* Enable on first reference */
+
                status = acpi_ev_update_gpe_enable_mask(gpe_event_info);
                if (ACPI_SUCCESS(status)) {
                        status = acpi_ev_enable_gpe(gpe_event_info);
@@ -173,9 +181,9 @@ acpi_status acpi_raw_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
 
 /*******************************************************************************
  *
- * FUNCTION:    acpi_raw_disable_gpe
+ * FUNCTION:    acpi_ev_remove_gpe_reference
  *
- * PARAMETERS:  gpe_event_info  - GPE to disable
+ * PARAMETERS:  gpe_event_info          - Remove a reference to this GPE
  *
  * RETURN:      Status
  *
@@ -184,16 +192,21 @@ acpi_status acpi_raw_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
  *
  ******************************************************************************/
 
-acpi_status acpi_raw_disable_gpe(struct acpi_gpe_event_info *gpe_event_info)
+acpi_status acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
 {
        acpi_status status = AE_OK;
 
+       ACPI_FUNCTION_TRACE(ev_remove_gpe_reference);
+
        if (!gpe_event_info->runtime_count) {
                return_ACPI_STATUS(AE_LIMIT);
        }
 
        gpe_event_info->runtime_count--;
        if (!gpe_event_info->runtime_count) {
+
+               /* Disable on last reference */
+
                status = acpi_ev_update_gpe_enable_mask(gpe_event_info);
                if (ACPI_SUCCESS(status)) {
                        status = acpi_hw_low_set_gpe(gpe_event_info,
@@ -379,7 +392,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
                        }
 
                        ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
-                                         "Read GPE Register at GPE%X: Status=%02X, Enable=%02X\n",
+                                         "Read GPE Register at GPE%02X: Status=%02X, Enable=%02X\n",
                                          gpe_register_info->base_gpe_number,
                                          status_reg, enable_reg));
 
@@ -405,7 +418,9 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
                                         * or method.
                                         */
                                        int_status |=
-                                           acpi_ev_gpe_dispatch(&gpe_block->
+                                           acpi_ev_gpe_dispatch(gpe_block->
+                                                                node,
+                                                                &gpe_block->
                                                event_info[((acpi_size) i * ACPI_GPE_REGISTER_WIDTH) + j], j + gpe_register_info->base_gpe_number);
                                }
                        }
@@ -435,17 +450,25 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
  *              an interrupt handler.
  *
  ******************************************************************************/
-static void acpi_ev_asynch_enable_gpe(void *context);
 
 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
 {
-       struct acpi_gpe_event_info *gpe_event_info = (void *)context;
+       struct acpi_gpe_event_info *gpe_event_info = context;
        acpi_status status;
-       struct acpi_gpe_event_info local_gpe_event_info;
+       struct acpi_gpe_event_info *local_gpe_event_info;
        struct acpi_evaluate_info *info;
 
        ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
 
+       /* Allocate a local GPE block */
+
+       local_gpe_event_info =
+           ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_event_info));
+       if (!local_gpe_event_info) {
+               ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY, "while handling a GPE"));
+               return_VOID;
+       }
+
        status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
        if (ACPI_FAILURE(status)) {
                return_VOID;
@@ -462,7 +485,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
         * Take a snapshot of the GPE info for this level - we copy the info to
         * prevent a race condition with remove_handler/remove_block.
         */
-       ACPI_MEMCPY(&local_gpe_event_info, gpe_event_info,
+       ACPI_MEMCPY(local_gpe_event_info, gpe_event_info,
                    sizeof(struct acpi_gpe_event_info));
 
        status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
@@ -470,12 +493,26 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
                return_VOID;
        }
 
-       /*
-        * Must check for control method type dispatch one more time to avoid a
-        * race with ev_gpe_install_handler
-        */
-       if ((local_gpe_event_info.flags & ACPI_GPE_DISPATCH_MASK) ==
-           ACPI_GPE_DISPATCH_METHOD) {
+       /* Do the correct dispatch - normal method or implicit notify */
+
+       switch (local_gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) {
+       case ACPI_GPE_DISPATCH_NOTIFY:
+
+               /*
+                * Implicit notify.
+                * Dispatch a DEVICE_WAKE notify to the appropriate handler.
+                * NOTE: the request is queued for execution after this method
+                * completes. The notify handlers are NOT invoked synchronously
+                * from this thread -- because handlers may in turn run other
+                * control methods.
+                */
+               status =
+                   acpi_ev_queue_notify_request(local_gpe_event_info->dispatch.
+                                                device_node,
+                                                ACPI_NOTIFY_DEVICE_WAKE);
+               break;
+
+       case ACPI_GPE_DISPATCH_METHOD:
 
                /* Allocate the evaluation information block */
 
@@ -488,7 +525,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
                         * control method that corresponds to this GPE
                         */
                        info->prefix_node =
-                           local_gpe_event_info.dispatch.method_node;
+                           local_gpe_event_info->dispatch.method_node;
                        info->flags = ACPI_IGNORE_RETURN_VALUE;
 
                        status = acpi_ns_evaluate(info);
@@ -499,46 +536,98 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
                        ACPI_EXCEPTION((AE_INFO, status,
                                        "while evaluating GPE method [%4.4s]",
                                        acpi_ut_get_node_name
-                                       (local_gpe_event_info.dispatch.
+                                       (local_gpe_event_info->dispatch.
                                         method_node)));
                }
+
+               break;
+
+       default:
+               return_VOID;    /* Should never happen */
        }
+
        /* Defer enabling of GPE until all notify handlers are done */
-       acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_asynch_enable_gpe,
-                               gpe_event_info);
+
+       status = acpi_os_execute(OSL_NOTIFY_HANDLER,
+                                acpi_ev_asynch_enable_gpe,
+                                local_gpe_event_info);
+       if (ACPI_FAILURE(status)) {
+               ACPI_FREE(local_gpe_event_info);
+       }
        return_VOID;
 }
 
-static void acpi_ev_asynch_enable_gpe(void *context)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_asynch_enable_gpe
+ *
+ * PARAMETERS:  Context (gpe_event_info) - Info for this GPE
+ *              Callback from acpi_os_execute
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Asynchronous clear/enable for GPE. This allows the GPE to
+ *              complete (i.e., finish execution of Notify)
+ *
+ ******************************************************************************/
+
+static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context)
 {
        struct acpi_gpe_event_info *gpe_event_info = context;
+
+       (void)acpi_ev_finish_gpe(gpe_event_info);
+
+       ACPI_FREE(gpe_event_info);
+       return;
+}
+
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_finish_gpe
+ *
+ * PARAMETERS:  gpe_event_info      - Info for this GPE
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Clear/Enable a GPE. Common code that is used after execution
+ *              of a GPE method or a synchronous or asynchronous GPE handler.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info)
+{
        acpi_status status;
+
        if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
            ACPI_GPE_LEVEL_TRIGGERED) {
                /*
-                * GPE is level-triggered, we clear the GPE status bit after handling
-                * the event.
+                * GPE is level-triggered, we clear the GPE status bit after
+                * handling the event.
                 */
                status = acpi_hw_clear_gpe(gpe_event_info);
                if (ACPI_FAILURE(status)) {
-                       return_VOID;
+                       return (status);
                }
        }
 
        /*
-        * Enable this GPE, conditionally. This means that the GPE will only be
-        * physically enabled if the enable_for_run bit is set in the event_info
+        * Enable this GPE, conditionally. This means that the GPE will
+        * only be physically enabled if the enable_for_run bit is set
+        * in the event_info.
         */
-       (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_COND_ENABLE);
-
-       return_VOID;
+       (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_CONDITIONAL_ENABLE);
+       return (AE_OK);
 }
 
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ev_gpe_dispatch
  *
- * PARAMETERS:  gpe_event_info  - Info for this GPE
+ * PARAMETERS:  gpe_device      - Device node. NULL for GPE0/GPE1
+ *              gpe_event_info  - Info for this GPE
  *              gpe_number      - Number relative to the parent GPE block
  *
  * RETURN:      INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
@@ -551,13 +640,22 @@ static void acpi_ev_asynch_enable_gpe(void *context)
  ******************************************************************************/
 
 u32
-acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
+acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
+                   struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
 {
        acpi_status status;
+       u32 return_value;
 
        ACPI_FUNCTION_TRACE(ev_gpe_dispatch);
 
-       acpi_os_gpe_count(gpe_number);
+       /* Invoke global event handler if present */
+
+       acpi_gpe_count++;
+       if (acpi_gbl_global_event_handler) {
+               acpi_gbl_global_event_handler(ACPI_EVENT_TYPE_GPE, gpe_device,
+                                             gpe_number,
+                                             acpi_gbl_global_event_handler_context);
+       }
 
        /*
         * If edge-triggered, clear the GPE status bit now. Note that
@@ -568,59 +666,55 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
                status = acpi_hw_clear_gpe(gpe_event_info);
                if (ACPI_FAILURE(status)) {
                        ACPI_EXCEPTION((AE_INFO, status,
-                                       "Unable to clear GPE[0x%2X]",
-                                       gpe_number));
+                                       "Unable to clear GPE%02X", gpe_number));
                        return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
                }
        }
 
        /*
-        * Dispatch the GPE to either an installed handler, or the control method
-        * associated with this GPE (_Lxx or _Exx). If a handler exists, we invoke
-        * it and do not attempt to run the method. If there is neither a handler
-        * nor a method, we disable this GPE to prevent further such pointless
-        * events from firing.
+        * Always disable the GPE so that it does not keep firing before
+        * any asynchronous activity completes (either from the execution
+        * of a GPE method or an asynchronous GPE handler.)
+        *
+        * If there is no handler or method to run, just disable the
+        * GPE and leave it disabled permanently to prevent further such
+        * pointless events from firing.
+        */
+       status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
+       if (ACPI_FAILURE(status)) {
+               ACPI_EXCEPTION((AE_INFO, status,
+                               "Unable to disable GPE%02X", gpe_number));
+               return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
+       }
+
+       /*
+        * Dispatch the GPE to either an installed handler or the control
+        * method associated with this GPE (_Lxx or _Exx). If a handler
+        * exists, we invoke it and do not attempt to run the method.
+        * If there is neither a handler nor a method, leave the GPE
+        * disabled.
         */
        switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) {
        case ACPI_GPE_DISPATCH_HANDLER:
 
-               /*
-                * Invoke the installed handler (at interrupt level)
-                * Ignore return status for now.
-                * TBD: leave GPE disabled on error?
-                */
-               (void)gpe_event_info->dispatch.handler->address(gpe_event_info->
-                                                               dispatch.
-                                                               handler->
-                                                               context);
+               /* Invoke the installed handler (at interrupt level) */
 
-               /* It is now safe to clear level-triggered events. */
+               return_value =
+                   gpe_event_info->dispatch.handler->address(gpe_device,
+                                                             gpe_number,
+                                                             gpe_event_info->
+                                                             dispatch.handler->
+                                                             context);
 
-               if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
-                   ACPI_GPE_LEVEL_TRIGGERED) {
-                       status = acpi_hw_clear_gpe(gpe_event_info);
-                       if (ACPI_FAILURE(status)) {
-                               ACPI_EXCEPTION((AE_INFO, status,
-                                       "Unable to clear GPE[0x%2X]",
-                                               gpe_number));
-                               return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
-                       }
+               /* If requested, clear (if level-triggered) and reenable the GPE */
+
+               if (return_value & ACPI_REENABLE_GPE) {
+                       (void)acpi_ev_finish_gpe(gpe_event_info);
                }
                break;
 
        case ACPI_GPE_DISPATCH_METHOD:
-
-               /*
-                * Disable the GPE, so it doesn't keep firing before the method has a
-                * chance to run (it runs asynchronously with interrupts enabled).
-                */
-               status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
-               if (ACPI_FAILURE(status)) {
-                       ACPI_EXCEPTION((AE_INFO, status,
-                                       "Unable to disable GPE[0x%2X]",
-                                       gpe_number));
-                       return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
-               }
+       case ACPI_GPE_DISPATCH_NOTIFY:
 
                /*
                 * Execute the method associated with the GPE
@@ -631,7 +725,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
                                         gpe_event_info);
                if (ACPI_FAILURE(status)) {
                        ACPI_EXCEPTION((AE_INFO, status,
-                                       "Unable to queue handler for GPE[0x%2X] - event disabled",
+                                       "Unable to queue handler for GPE%2X - event disabled",
                                        gpe_number));
                }
                break;
@@ -644,20 +738,9 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
                 * a GPE to be enabled if it has no handler or method.
                 */
                ACPI_ERROR((AE_INFO,
-                           "No handler or method for GPE[0x%2X], disabling event",
+                           "No handler or method for GPE%02X, disabling event",
                            gpe_number));
 
-               /*
-                * Disable the GPE. The GPE will remain disabled a handler
-                * is installed or ACPICA is restarted.
-                */
-               status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
-               if (ACPI_FAILURE(status)) {
-                       ACPI_EXCEPTION((AE_INFO, status,
-                                       "Unable to disable GPE[0x%2X]",
-                                       gpe_number));
-                       return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
-               }
                break;
        }
 
index 020add3..9acb869 100644 (file)
@@ -361,9 +361,9 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
 
        gpe_block->node = gpe_device;
        gpe_block->gpe_count = (u16)(register_count * ACPI_GPE_REGISTER_WIDTH);
+       gpe_block->initialized = FALSE;
        gpe_block->register_count = register_count;
        gpe_block->block_base_number = gpe_block_base_number;
-       gpe_block->initialized = FALSE;
 
        ACPI_MEMCPY(&gpe_block->block_address, gpe_block_address,
                    sizeof(struct acpi_generic_address));
@@ -386,7 +386,7 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
                return_ACPI_STATUS(status);
        }
 
-       acpi_all_gpes_initialized = FALSE;
+       acpi_gbl_all_gpes_initialized = FALSE;
 
        /* Find all GPE methods (_Lxx or_Exx) for this block */
 
@@ -423,14 +423,12 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
  *
  * FUNCTION:    acpi_ev_initialize_gpe_block
  *
- * PARAMETERS:  gpe_device          - Handle to the parent GPE block
- *              gpe_block           - Gpe Block info
+ * PARAMETERS:  acpi_gpe_callback
  *
  * RETURN:      Status
  *
- * DESCRIPTION: Initialize and enable a GPE block. First find and run any
- *              _PRT methods associated with the block, then enable the
- *              appropriate GPEs.
+ * DESCRIPTION: Initialize and enable a GPE block. Enable GPEs that have
+ *              associated methods.
  *              Note: Assumes namespace is locked.
  *
  ******************************************************************************/
@@ -450,8 +448,8 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
        ACPI_FUNCTION_TRACE(ev_initialize_gpe_block);
 
        /*
-        * Ignore a null GPE block (e.g., if no GPE block 1 exists) and
-        * GPE blocks that have been initialized already.
+        * Ignore a null GPE block (e.g., if no GPE block 1 exists), and
+        * any GPE blocks that have been initialized already.
         */
        if (!gpe_block || gpe_block->initialized) {
                return_ACPI_STATUS(AE_OK);
@@ -459,8 +457,8 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
 
        /*
         * Enable all GPEs that have a corresponding method and have the
-        * ACPI_GPE_CAN_WAKE flag unset.  Any other GPEs within this block must
-        * be enabled via the acpi_enable_gpe() interface.
+        * ACPI_GPE_CAN_WAKE flag unset. Any other GPEs within this block
+        * must be enabled via the acpi_enable_gpe() interface.
         */
        gpe_enabled_count = 0;
 
@@ -472,14 +470,19 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
                        gpe_index = (i * ACPI_GPE_REGISTER_WIDTH) + j;
                        gpe_event_info = &gpe_block->event_info[gpe_index];
 
-                       /* Ignore GPEs that have no corresponding _Lxx/_Exx method */
-
-                       if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD)
+                       /*
+                        * Ignore GPEs that have no corresponding _Lxx/_Exx method
+                        * and GPEs that are used to wake the system
+                        */
+                       if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
+                            ACPI_GPE_DISPATCH_NONE)
+                           || ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)
+                               == ACPI_GPE_DISPATCH_HANDLER)
                            || (gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
                                continue;
                        }
 
-                       status = acpi_raw_enable_gpe(gpe_event_info);
+                       status = acpi_ev_add_gpe_reference(gpe_event_info);
                        if (ACPI_FAILURE(status)) {
                                ACPI_EXCEPTION((AE_INFO, status,
                                        "Could not enable GPE 0x%02X",
index 4c8dea5..c59dc23 100644 (file)
 #include "accommon.h"
 #include "acevents.h"
 #include "acnamesp.h"
-#include "acinterp.h"
 
 #define _COMPONENT          ACPI_EVENTS
 ACPI_MODULE_NAME("evgpeinit")
 
+/*
+ * Note: History of _PRW support in ACPICA
+ *
+ * Originally (2000 - 2010), the GPE initialization code performed a walk of
+ * the entire namespace to execute the _PRW methods and detect all GPEs
+ * capable of waking the system.
+ *
+ * As of 10/2010, the _PRW method execution has been removed since it is
+ * actually unnecessary. The host OS must in fact execute all _PRW methods
+ * in order to identify the device/power-resource dependencies. We now put
+ * the onus on the host OS to identify the wake GPEs as part of this process
+ * and to inform ACPICA of these GPEs via the acpi_setup_gpe_for_wake interface. This
+ * not only reduces the complexity of the ACPICA initialization code, but in
+ * some cases (on systems with very large namespaces) it should reduce the
+ * kernel boot time as well.
+ */
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ev_gpe_initialize
@@ -222,7 +238,7 @@ void acpi_ev_update_gpes(acpi_owner_id table_owner_id)
        acpi_status status = AE_OK;
 
        /*
-        * 2) Find any _Lxx/_Exx GPE methods that have just been loaded.
+        * Find any _Lxx/_Exx GPE methods that have just been loaded.
         *
         * Any GPEs that correspond to new _Lxx/_Exx methods are immediately
         * enabled.
@@ -235,9 +251,9 @@ void acpi_ev_update_gpes(acpi_owner_id table_owner_id)
                return;
        }
 
+       walk_info.count = 0;
        walk_info.owner_id = table_owner_id;
        walk_info.execute_by_owner_id = TRUE;
-       walk_info.count = 0;
 
        /* Walk the interrupt level descriptor list */
 
@@ -298,7 +314,7 @@ void acpi_ev_update_gpes(acpi_owner_id table_owner_id)
  *                  xx     - is the GPE number [in HEX]
  *
  * If walk_info->execute_by_owner_id is TRUE, we only execute examine GPE methods
- *    with that owner.
+ * with that owner.
  *
  ******************************************************************************/
 
@@ -415,6 +431,7 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle,
         * Add the GPE information from above to the gpe_event_info block for
         * use during dispatch of this GPE.
         */
+       gpe_event_info->flags &= ~(ACPI_GPE_DISPATCH_MASK);
        gpe_event_info->flags |= (u8)(type | ACPI_GPE_DISPATCH_METHOD);
        gpe_event_info->dispatch.method_node = method_node;
 
index 19a0e51..10e4774 100644 (file)
@@ -152,6 +152,45 @@ u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info)
        return (FALSE);
 }
 
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_get_gpe_device
+ *
+ * PARAMETERS:  GPE_WALK_CALLBACK
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Matches the input GPE index (0-current_gpe_count) with a GPE
+ *              block device. NULL if the GPE is one of the FADT-defined GPEs.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+                      struct acpi_gpe_block_info *gpe_block, void *context)
+{
+       struct acpi_gpe_device_info *info = context;
+
+       /* Increment Index by the number of GPEs in this block */
+
+       info->next_block_base_index += gpe_block->gpe_count;
+
+       if (info->index < info->next_block_base_index) {
+               /*
+                * The GPE index is within this block, get the node. Leave the node
+                * NULL for the FADT-defined GPEs
+                */
+               if ((gpe_block->node)->type == ACPI_TYPE_DEVICE) {
+                       info->gpe_device = gpe_block->node;
+               }
+
+               info->status = AE_OK;
+               return (AE_CTRL_END);
+       }
+
+       return (AE_OK);
+}
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ev_get_gpe_xrupt_block
index fcaed9f..8e31bb5 100644 (file)
@@ -284,41 +284,41 @@ static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context)
  * RETURN:      ACPI_INTERRUPT_HANDLED
  *
  * DESCRIPTION: Invoked directly from the SCI handler when a global lock
- *              release interrupt occurs. Attempt to acquire the global lock,
- *              if successful, signal the thread waiting for the lock.
+ *              release interrupt occurs.  If there's a thread waiting for
+ *              the global lock, signal it.
  *
  * NOTE: Assumes that the semaphore can be signaled from interrupt level. If
  * this is not possible for some reason, a separate thread will have to be
  * scheduled to do this.
  *
  ******************************************************************************/
+static u8 acpi_ev_global_lock_pending;
+static spinlock_t _acpi_ev_global_lock_pending_lock;
+#define acpi_ev_global_lock_pending_lock &_acpi_ev_global_lock_pending_lock
 
 static u32 acpi_ev_global_lock_handler(void *context)
 {
-       u8 acquired = FALSE;
+       acpi_status status;
+       acpi_cpu_flags flags;
 
-       /*
-        * Attempt to get the lock.
-        *
-        * If we don't get it now, it will be marked pending and we will
-        * take another interrupt when it becomes free.
-        */
-       ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired);
-       if (acquired) {
+       flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock);
 
-               /* Got the lock, now wake all threads waiting for it */
+       if (!acpi_ev_global_lock_pending) {
+               goto out;
+       }
 
-               acpi_gbl_global_lock_acquired = TRUE;
-               /* Send a unit to the semaphore */
+       /* Send a unit to the semaphore */
 
-               if (ACPI_FAILURE
-                   (acpi_os_signal_semaphore
-                    (acpi_gbl_global_lock_semaphore, 1))) {
-                       ACPI_ERROR((AE_INFO,
-                                   "Could not signal Global Lock semaphore"));
-               }
+       status = acpi_os_signal_semaphore(acpi_gbl_global_lock_semaphore, 1);
+       if (ACPI_FAILURE(status)) {
+               ACPI_ERROR((AE_INFO, "Could not signal Global Lock semaphore"));
        }
 
+       acpi_ev_global_lock_pending = FALSE;
+
+ out:
+       acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags);
+
        return (ACPI_INTERRUPT_HANDLED);
 }
 
@@ -415,6 +415,7 @@ static int acpi_ev_global_lock_acquired;
 
 acpi_status acpi_ev_acquire_global_lock(u16 timeout)
 {
+       acpi_cpu_flags flags;
        acpi_status status = AE_OK;
        u8 acquired = FALSE;
 
@@ -467,32 +468,47 @@ acpi_status acpi_ev_acquire_global_lock(u16 timeout)
                return_ACPI_STATUS(AE_OK);
        }
 
-       /* Attempt to acquire the actual hardware lock */
+       flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock);
+
+       do {
+
+               /* Attempt to acquire the actual hardware lock */
+
+               ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired);
+               if (acquired) {
+                       acpi_gbl_global_lock_acquired = TRUE;
+
+                       ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+                                         "Acquired hardware Global Lock\n"));
+                       break;
+               }
 
-       ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired);
-       if (acquired) {
+               acpi_ev_global_lock_pending = TRUE;
 
-               /* We got the lock */
+               acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags);
 
+               /*
+                * Did not get the lock. The pending bit was set above, and we
+                * must wait until we get the global lock released interrupt.
+                */
                ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-                                 "Acquired hardware Global Lock\n"));
+                                 "Waiting for hardware Global Lock\n"));
 
-               acpi_gbl_global_lock_acquired = TRUE;
-               return_ACPI_STATUS(AE_OK);
-       }
+               /*
+                * Wait for handshake with the global lock interrupt handler.
+                * This interface releases the interpreter if we must wait.
+                */
+               status = acpi_ex_system_wait_semaphore(
+                                               acpi_gbl_global_lock_semaphore,
+                                               ACPI_WAIT_FOREVER);
 
-       /*
-        * Did not get the lock. The pending bit was set above, and we must now
-        * wait until we get the global lock released interrupt.
-        */
-       ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Waiting for hardware Global Lock\n"));
+               flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock);
 
-       /*
-        * Wait for handshake with the global lock interrupt handler.
-        * This interface releases the interpreter if we must wait.
-        */
-       status = acpi_ex_system_wait_semaphore(acpi_gbl_global_lock_semaphore,
-                                              ACPI_WAIT_FOREVER);
+       } while (ACPI_SUCCESS(status));
+
+       acpi_ev_global_lock_pending = FALSE;
+
+       acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags);
 
        return_ACPI_STATUS(status);
 }
index 36af222..1226689 100644 (file)
@@ -92,6 +92,57 @@ acpi_status acpi_install_exception_handler(acpi_exception_handler handler)
 
 ACPI_EXPORT_SYMBOL(acpi_install_exception_handler)
 #endif                         /*  ACPI_FUTURE_USAGE  */
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_install_global_event_handler
+ *
+ * PARAMETERS:  Handler         - Pointer to the global event handler function
+ *              Context         - Value passed to the handler on each event
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Saves the pointer to the handler function. The global handler
+ *              is invoked upon each incoming GPE and Fixed Event. It is
+ *              invoked at interrupt level at the time of the event dispatch.
+ *              Can be used to update event counters, etc.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_install_global_event_handler(ACPI_GBL_EVENT_HANDLER handler, void *context)
+{
+       acpi_status status;
+
+       ACPI_FUNCTION_TRACE(acpi_install_global_event_handler);
+
+       /* Parameter validation */
+
+       if (!handler) {
+               return_ACPI_STATUS(AE_BAD_PARAMETER);
+       }
+
+       status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+       if (ACPI_FAILURE(status)) {
+               return_ACPI_STATUS(status);
+       }
+
+       /* Don't allow two handlers. */
+
+       if (acpi_gbl_global_event_handler) {
+               status = AE_ALREADY_EXISTS;
+               goto cleanup;
+       }
+
+       acpi_gbl_global_event_handler = handler;
+       acpi_gbl_global_event_handler_context = context;
+
+      cleanup:
+       (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+       return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_install_global_event_handler)
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_install_fixed_event_handler
@@ -671,10 +722,10 @@ ACPI_EXPORT_SYMBOL(acpi_remove_notify_handler)
 acpi_status
 acpi_install_gpe_handler(acpi_handle gpe_device,
                         u32 gpe_number,
-                        u32 type, acpi_event_handler address, void *context)
+                        u32 type, acpi_gpe_handler address, void *context)
 {
        struct acpi_gpe_event_info *gpe_event_info;
-       struct acpi_handler_info *handler;
+       struct acpi_gpe_handler_info *handler;
        acpi_status status;
        acpi_cpu_flags flags;
 
@@ -693,7 +744,7 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
 
        /* Allocate memory for the handler object */
 
-       handler = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_handler_info));
+       handler = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_handler_info));
        if (!handler) {
                status = AE_NO_MEMORY;
                goto unlock_and_exit;
@@ -722,7 +773,7 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
        handler->address = address;
        handler->context = context;
        handler->method_node = gpe_event_info->dispatch.method_node;
-       handler->orig_flags = gpe_event_info->flags &
+       handler->original_flags = gpe_event_info->flags &
                        (ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK);
 
        /*
@@ -731,10 +782,10 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
         * disabled now to avoid spurious execution of the handler.
         */
 
-       if ((handler->orig_flags & ACPI_GPE_DISPATCH_METHOD)
+       if ((handler->original_flags & ACPI_GPE_DISPATCH_METHOD)
            && gpe_event_info->runtime_count) {
-               handler->orig_enabled = 1;
-               (void)acpi_raw_disable_gpe(gpe_event_info);
+               handler->originally_enabled = 1;
+               (void)acpi_ev_remove_gpe_reference(gpe_event_info);
        }
 
        /* Install the handler */
@@ -777,10 +828,10 @@ ACPI_EXPORT_SYMBOL(acpi_install_gpe_handler)
  ******************************************************************************/
 acpi_status
 acpi_remove_gpe_handler(acpi_handle gpe_device,
-                       u32 gpe_number, acpi_event_handler address)
+                       u32 gpe_number, acpi_gpe_handler address)
 {
        struct acpi_gpe_event_info *gpe_event_info;
-       struct acpi_handler_info *handler;
+       struct acpi_gpe_handler_info *handler;
        acpi_status status;
        acpi_cpu_flags flags;
 
@@ -835,7 +886,7 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
        gpe_event_info->dispatch.method_node = handler->method_node;
        gpe_event_info->flags &=
                ~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK);
-       gpe_event_info->flags |= handler->orig_flags;
+       gpe_event_info->flags |= handler->original_flags;
 
        /*
         * If the GPE was previously associated with a method and it was
@@ -843,9 +894,9 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
         * post-initialization configuration.
         */
 
-       if ((handler->orig_flags & ACPI_GPE_DISPATCH_METHOD)
-           && handler->orig_enabled)
-               (void)acpi_raw_enable_gpe(gpe_event_info);
+       if ((handler->original_flags & ACPI_GPE_DISPATCH_METHOD)
+           && handler->originally_enabled)
+               (void)acpi_ev_add_gpe_reference(gpe_event_info);
 
        /* Now we can free the handler object */
 
index a1dabe3..90488c1 100644 (file)
 
 #include <acpi/acpi.h>
 #include "accommon.h"
-#include "acevents.h"
-#include "acnamesp.h"
 #include "actables.h"
 
 #define _COMPONENT          ACPI_EVENTS
 ACPI_MODULE_NAME("evxfevnt")
 
-/* Local prototypes */
-static acpi_status
-acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
-                      struct acpi_gpe_block_info *gpe_block, void *context);
-
 /*******************************************************************************
  *
  * FUNCTION:    acpi_enable
@@ -211,185 +204,6 @@ acpi_status acpi_enable_event(u32 event, u32 flags)
 
 ACPI_EXPORT_SYMBOL(acpi_enable_event)
 
-/*******************************************************************************
- *
- * FUNCTION:    acpi_gpe_wakeup
- *
- * PARAMETERS:  gpe_device      - Parent GPE Device. NULL for GPE0/GPE1
- *              gpe_number      - GPE level within the GPE block
- *              Action          - Enable or Disable
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Set or clear the GPE's wakeup enable mask bit.
- *
- ******************************************************************************/
-acpi_status acpi_gpe_wakeup(acpi_handle gpe_device, u32 gpe_number, u8 action)
-{
-       acpi_status status = AE_OK;
-       struct acpi_gpe_event_info *gpe_event_info;
-       struct acpi_gpe_register_info *gpe_register_info;
-       acpi_cpu_flags flags;
-       u32 register_bit;
-
-       ACPI_FUNCTION_TRACE(acpi_gpe_wakeup);
-
-       flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
-
-       /* Ensure that we have a valid GPE number */
-
-       gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
-       if (!gpe_event_info || !(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
-               status = AE_BAD_PARAMETER;
-               goto unlock_and_exit;
-       }
-
-       gpe_register_info = gpe_event_info->register_info;
-       if (!gpe_register_info) {
-               status = AE_NOT_EXIST;
-               goto unlock_and_exit;
-       }
-
-       register_bit =
-           acpi_hw_get_gpe_register_bit(gpe_event_info, gpe_register_info);
-
-       /* Perform the action */
-
-       switch (action) {
-       case ACPI_GPE_ENABLE:
-               ACPI_SET_BIT(gpe_register_info->enable_for_wake,
-                            (u8)register_bit);
-               break;
-
-       case ACPI_GPE_DISABLE:
-               ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake,
-                              (u8)register_bit);
-               break;
-
-       default:
-               ACPI_ERROR((AE_INFO, "%u, Invalid action", action));
-               status = AE_BAD_PARAMETER;
-               break;
-       }
-
-unlock_and_exit:
-       acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
-       return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_gpe_wakeup)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_enable_gpe
- *
- * PARAMETERS:  gpe_device      - Parent GPE Device. NULL for GPE0/GPE1
- *              gpe_number      - GPE level within the GPE block
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Add a reference to a GPE. On the first reference, the GPE is
- *              hardware-enabled.
- *
- ******************************************************************************/
-acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number)
-{
-       acpi_status status = AE_BAD_PARAMETER;
-       struct acpi_gpe_event_info *gpe_event_info;
-       acpi_cpu_flags flags;
-
-       ACPI_FUNCTION_TRACE(acpi_enable_gpe);
-
-       flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
-
-       /* Ensure that we have a valid GPE number */
-
-       gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
-       if (gpe_event_info) {
-               status = acpi_raw_enable_gpe(gpe_event_info);
-       }
-
-       acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
-       return_ACPI_STATUS(status);
-}
-ACPI_EXPORT_SYMBOL(acpi_enable_gpe)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_disable_gpe
- *
- * PARAMETERS:  gpe_device      - Parent GPE Device. NULL for GPE0/GPE1
- *              gpe_number      - GPE level within the GPE block
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Remove a reference to a GPE. When the last reference is
- *              removed, only then is the GPE disabled (for runtime GPEs), or
- *              the GPE mask bit disabled (for wake GPEs)
- *
- ******************************************************************************/
-acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number)
-{
-       acpi_status status = AE_BAD_PARAMETER;
-       struct acpi_gpe_event_info *gpe_event_info;
-       acpi_cpu_flags flags;
-
-       ACPI_FUNCTION_TRACE(acpi_disable_gpe);
-
-       flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
-
-       /* Ensure that we have a valid GPE number */
-
-       gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
-       if (gpe_event_info) {
-               status = acpi_raw_disable_gpe(gpe_event_info) ;
-       }
-
-       acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
-       return_ACPI_STATUS(status);
-}
-ACPI_EXPORT_SYMBOL(acpi_disable_gpe)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_gpe_can_wake
- *
- * PARAMETERS:  gpe_device      - Parent GPE Device. NULL for GPE0/GPE1
- *              gpe_number      - GPE level within the GPE block
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Set the ACPI_GPE_CAN_WAKE flag for the given GPE.  If the GPE
- *              has a corresponding method and is currently enabled, disable it
- *              (GPEs with corresponding methods are enabled unconditionally
- *              during initialization, but GPEs that can wake up are expected
- *              to be initially disabled).
- *
- ******************************************************************************/
-acpi_status acpi_gpe_can_wake(acpi_handle gpe_device, u32 gpe_number)
-{
-       acpi_status status = AE_OK;
-       struct acpi_gpe_event_info *gpe_event_info;
-       acpi_cpu_flags flags;
-
-       ACPI_FUNCTION_TRACE(acpi_gpe_can_wake);
-
-       flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
-
-       /* Ensure that we have a valid GPE number */
-
-       gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
-       if (gpe_event_info) {
-               gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
-       } else {
-               status = AE_BAD_PARAMETER;
-       }
-
-       acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
-       return_ACPI_STATUS(status);
-}
-ACPI_EXPORT_SYMBOL(acpi_gpe_can_wake)
-
 /*******************************************************************************
  *
  * FUNCTION:    acpi_disable_event
@@ -481,44 +295,6 @@ acpi_status acpi_clear_event(u32 event)
 
 ACPI_EXPORT_SYMBOL(acpi_clear_event)
 
-/*******************************************************************************
- *
- * FUNCTION:    acpi_clear_gpe
- *
- * PARAMETERS:  gpe_device      - Parent GPE Device. NULL for GPE0/GPE1
- *              gpe_number      - GPE level within the GPE block
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Clear an ACPI event (general purpose)
- *
- ******************************************************************************/
-acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number)
-{
-       acpi_status status = AE_OK;
-       struct acpi_gpe_event_info *gpe_event_info;
-       acpi_cpu_flags flags;
-
-       ACPI_FUNCTION_TRACE(acpi_clear_gpe);
-
-       flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
-
-       /* Ensure that we have a valid GPE number */
-
-       gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
-       if (!gpe_event_info) {
-               status = AE_BAD_PARAMETER;
-               goto unlock_and_exit;
-       }
-
-       status = acpi_hw_clear_gpe(gpe_event_info);
-
-      unlock_and_exit:
-       acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
-       return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_clear_gpe)
 /*******************************************************************************
  *
  * FUNCTION:    acpi_get_event_status
@@ -575,379 +351,3 @@ acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status)
 }
 
 ACPI_EXPORT_SYMBOL(acpi_get_event_status)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_get_gpe_status
- *
- * PARAMETERS:  gpe_device      - Parent GPE Device. NULL for GPE0/GPE1
- *              gpe_number      - GPE level within the GPE block
- *              event_status    - Where the current status of the event will
- *                                be returned
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Get status of an event (general purpose)
- *
- ******************************************************************************/
-acpi_status
-acpi_get_gpe_status(acpi_handle gpe_device,
-                   u32 gpe_number, acpi_event_status *event_status)
-{
-       acpi_status status = AE_OK;
-       struct acpi_gpe_event_info *gpe_event_info;
-       acpi_cpu_flags flags;
-
-       ACPI_FUNCTION_TRACE(acpi_get_gpe_status);
-
-       flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
-
-       /* Ensure that we have a valid GPE number */
-
-       gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
-       if (!gpe_event_info) {
-               status = AE_BAD_PARAMETER;
-               goto unlock_and_exit;
-       }
-
-       /* Obtain status on the requested GPE number */
-
-       status = acpi_hw_get_gpe_status(gpe_event_info, event_status);
-
-       if (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)
-               *event_status |= ACPI_EVENT_FLAG_HANDLE;
-
-      unlock_and_exit:
-       acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
-       return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_get_gpe_status)
-/*******************************************************************************
- *
- * FUNCTION:    acpi_install_gpe_block
- *
- * PARAMETERS:  gpe_device          - Handle to the parent GPE Block Device
- *              gpe_block_address   - Address and space_iD
- *              register_count      - Number of GPE register pairs in the block
- *              interrupt_number    - H/W interrupt for the block
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Create and Install a block of GPE registers
- *
- ******************************************************************************/
-acpi_status
-acpi_install_gpe_block(acpi_handle gpe_device,
-                      struct acpi_generic_address *gpe_block_address,
-                      u32 register_count, u32 interrupt_number)
-{
-       acpi_status status = AE_OK;
-       union acpi_operand_object *obj_desc;
-       struct acpi_namespace_node *node;
-       struct acpi_gpe_block_info *gpe_block;
-
-       ACPI_FUNCTION_TRACE(acpi_install_gpe_block);
-
-       if ((!gpe_device) || (!gpe_block_address) || (!register_count)) {
-               return_ACPI_STATUS(AE_BAD_PARAMETER);
-       }
-
-       status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-       if (ACPI_FAILURE(status)) {
-               return (status);
-       }
-
-       node = acpi_ns_validate_handle(gpe_device);
-       if (!node) {
-               status = AE_BAD_PARAMETER;
-               goto unlock_and_exit;
-       }
-
-       /*
-        * For user-installed GPE Block Devices, the gpe_block_base_number
-        * is always zero
-        */
-       status =
-           acpi_ev_create_gpe_block(node, gpe_block_address, register_count, 0,
-                                    interrupt_number, &gpe_block);
-       if (ACPI_FAILURE(status)) {
-               goto unlock_and_exit;
-       }
-
-       /* Install block in the device_object attached to the node */
-
-       obj_desc = acpi_ns_get_attached_object(node);
-       if (!obj_desc) {
-
-               /*
-                * No object, create a new one (Device nodes do not always have
-                * an attached object)
-                */
-               obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_DEVICE);
-               if (!obj_desc) {
-                       status = AE_NO_MEMORY;
-                       goto unlock_and_exit;
-               }
-
-               status =
-                   acpi_ns_attach_object(node, obj_desc, ACPI_TYPE_DEVICE);
-
-               /* Remove local reference to the object */
-
-               acpi_ut_remove_reference(obj_desc);
-
-               if (ACPI_FAILURE(status)) {
-                       goto unlock_and_exit;
-               }
-       }
-
-       /* Now install the GPE block in the device_object */
-
-       obj_desc->device.gpe_block = gpe_block;
-
-      unlock_and_exit:
-       (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-       return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_install_gpe_block)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_remove_gpe_block
- *
- * PARAMETERS:  gpe_device          - Handle to the parent GPE Block Device
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Remove a previously installed block of GPE registers
- *
- ******************************************************************************/
-acpi_status acpi_remove_gpe_block(acpi_handle gpe_device)
-{
-       union acpi_operand_object *obj_desc;
-       acpi_status status;
-       struct acpi_namespace_node *node;
-
-       ACPI_FUNCTION_TRACE(acpi_remove_gpe_block);
-
-       if (!gpe_device) {
-               return_ACPI_STATUS(AE_BAD_PARAMETER);
-       }
-
-       status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-       if (ACPI_FAILURE(status)) {
-               return (status);
-       }
-
-       node = acpi_ns_validate_handle(gpe_device);
-       if (!node) {
-               status = AE_BAD_PARAMETER;
-               goto unlock_and_exit;
-       }
-
-       /* Get the device_object attached to the node */
-
-       obj_desc = acpi_ns_get_attached_object(node);
-       if (!obj_desc || !obj_desc->device.gpe_block) {
-               return_ACPI_STATUS(AE_NULL_OBJECT);
-       }
-
-       /* Delete the GPE block (but not the device_object) */
-
-       status = acpi_ev_delete_gpe_block(obj_desc->device.gpe_block);
-       if (ACPI_SUCCESS(status)) {
-               obj_desc->device.gpe_block = NULL;
-       }
-
-      unlock_and_exit:
-       (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-       return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_remove_gpe_block)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_get_gpe_device
- *
- * PARAMETERS:  Index               - System GPE index (0-current_gpe_count)
- *              gpe_device          - Where the parent GPE Device is returned
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Obtain the GPE device associated with the input index. A NULL
- *              gpe device indicates that the gpe number is contained in one of
- *              the FADT-defined gpe blocks. Otherwise, the GPE block device.
- *
- ******************************************************************************/
-acpi_status
-acpi_get_gpe_device(u32 index, acpi_handle *gpe_device)
-{
-       struct acpi_gpe_device_info info;
-       acpi_status status;
-
-       ACPI_FUNCTION_TRACE(acpi_get_gpe_device);
-
-       if (!gpe_device) {
-               return_ACPI_STATUS(AE_BAD_PARAMETER);
-       }
-
-       if (index >= acpi_current_gpe_count) {
-               return_ACPI_STATUS(AE_NOT_EXIST);
-       }
-
-       /* Setup and walk the GPE list */
-
-       info.index = index;
-       info.status = AE_NOT_EXIST;
-       info.gpe_device = NULL;
-       info.next_block_base_index = 0;
-
-       status = acpi_ev_walk_gpe_list(acpi_ev_get_gpe_device, &info);
-       if (ACPI_FAILURE(status)) {
-               return_ACPI_STATUS(status);
-       }
-
-       *gpe_device = info.gpe_device;
-       return_ACPI_STATUS(info.status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_get_gpe_device)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_get_gpe_device
- *
- * PARAMETERS:  GPE_WALK_CALLBACK
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Matches the input GPE index (0-current_gpe_count) with a GPE
- *              block device. NULL if the GPE is one of the FADT-defined GPEs.
- *
- ******************************************************************************/
-static acpi_status
-acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
-                      struct acpi_gpe_block_info *gpe_block, void *context)
-{
-       struct acpi_gpe_device_info *info = context;
-
-       /* Increment Index by the number of GPEs in this block */
-
-       info->next_block_base_index += gpe_block->gpe_count;
-
-       if (info->index < info->next_block_base_index) {
-               /*
-                * The GPE index is within this block, get the node. Leave the node
-                * NULL for the FADT-defined GPEs
-                */
-               if ((gpe_block->node)->type == ACPI_TYPE_DEVICE) {
-                       info->gpe_device = gpe_block->node;
-               }
-
-               info->status = AE_OK;
-               return (AE_CTRL_END);
-       }
-
-       return (AE_OK);
-}
-
-/******************************************************************************
- *
- * FUNCTION:    acpi_disable_all_gpes
- *
- * PARAMETERS:  None
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Disable and clear all GPEs in all GPE blocks
- *
- ******************************************************************************/
-
-acpi_status acpi_disable_all_gpes(void)
-{
-       acpi_status status;
-
-       ACPI_FUNCTION_TRACE(acpi_disable_all_gpes);
-
-       status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
-       if (ACPI_FAILURE(status)) {
-               return_ACPI_STATUS(status);
-       }
-
-       status = acpi_hw_disable_all_gpes();
-       (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
-
-       return_ACPI_STATUS(status);
-}
-
-/******************************************************************************
- *
- * FUNCTION:    acpi_enable_all_runtime_gpes
- *
- * PARAMETERS:  None
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Enable all "runtime" GPEs, in all GPE blocks
- *
- ******************************************************************************/
-
-acpi_status acpi_enable_all_runtime_gpes(void)
-{
-       acpi_status status;
-
-       ACPI_FUNCTION_TRACE(acpi_enable_all_runtime_gpes);
-
-       status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
-       if (ACPI_FAILURE(status)) {
-               return_ACPI_STATUS(status);
-       }
-
-       status = acpi_hw_enable_all_runtime_gpes();
-       (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
-
-       return_ACPI_STATUS(status);
-}
-
-/******************************************************************************
- *
- * FUNCTION:    acpi_update_gpes
- *
- * PARAMETERS:  None
- *
- * RETURN:      None
- *
- * DESCRIPTION: Enable all GPEs that have associated _Lxx or _Exx methods and
- *              are not pointed to by any device _PRW methods indicating that
- *              these GPEs are generally intended for system or device wakeup
- *              (such GPEs have to be enabled directly when the devices whose
- *              _PRW methods point to them are set up for wakeup signaling).
- *
- ******************************************************************************/
-
-acpi_status acpi_update_gpes(void)
-{
-       acpi_status status;
-
-       ACPI_FUNCTION_TRACE(acpi_update_gpes);
-
-       status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
-       if (ACPI_FAILURE(status)) {
-               return_ACPI_STATUS(status);
-       } else if (acpi_all_gpes_initialized) {
-               goto unlock;
-       }
-
-       status = acpi_ev_walk_gpe_list(acpi_ev_initialize_gpe_block, NULL);
-       if (ACPI_SUCCESS(status)) {
-               acpi_all_gpes_initialized = TRUE;
-       }
-
-unlock:
-       (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
-
-       return_ACPI_STATUS(status);
-}
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
new file mode 100644 (file)
index 0000000..416845b
--- /dev/null
@@ -0,0 +1,669 @@
+/******************************************************************************
+ *
+ * Module Name: evxfgpe - External Interfaces for General Purpose Events (GPEs)
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2010, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acevents.h"
+#include "acnamesp.h"
+
+#define _COMPONENT          ACPI_EVENTS
+ACPI_MODULE_NAME("evxfgpe")
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_update_all_gpes
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Complete GPE initialization and enable all GPEs that have
+ *              associated _Lxx or _Exx methods and are not pointed to by any
+ *              device _PRW methods (this indicates that these GPEs are
+ *              generally intended for system or device wakeup. Such GPEs
+ *              have to be enabled directly when the devices whose _PRW
+ *              methods point to them are set up for wakeup signaling.)
+ *
+ * NOTE: Should be called after any GPEs are added to the system. Primarily,
+ * after the system _PRW methods have been run, but also after a GPE Block
+ * Device has been added or if any new GPE methods have been added via a
+ * dynamic table load.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_update_all_gpes(void)
+{
+       acpi_status status;
+
+       ACPI_FUNCTION_TRACE(acpi_update_all_gpes);
+
+       status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+       if (ACPI_FAILURE(status)) {
+               return_ACPI_STATUS(status);
+       }
+
+       if (acpi_gbl_all_gpes_initialized) {
+               goto unlock_and_exit;
+       }
+
+       status = acpi_ev_walk_gpe_list(acpi_ev_initialize_gpe_block, NULL);
+       if (ACPI_SUCCESS(status)) {
+               acpi_gbl_all_gpes_initialized = TRUE;
+       }
+
+unlock_and_exit:
+       (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+
+       return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_update_all_gpes)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_enable_gpe
+ *
+ * PARAMETERS:  gpe_device      - Parent GPE Device. NULL for GPE0/GPE1
+ *              gpe_number      - GPE level within the GPE block
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Add a reference to a GPE. On the first reference, the GPE is
+ *              hardware-enabled.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number)
+{
+       acpi_status status = AE_BAD_PARAMETER;
+       struct acpi_gpe_event_info *gpe_event_info;
+       acpi_cpu_flags flags;
+
+       ACPI_FUNCTION_TRACE(acpi_enable_gpe);
+
+       flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+       /* Ensure that we have a valid GPE number */
+
+       gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+       if (gpe_event_info) {
+               status = acpi_ev_add_gpe_reference(gpe_event_info);
+       }
+
+       acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+       return_ACPI_STATUS(status);
+}
+ACPI_EXPORT_SYMBOL(acpi_enable_gpe)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_disable_gpe
+ *
+ * PARAMETERS:  gpe_device      - Parent GPE Device. NULL for GPE0/GPE1
+ *              gpe_number      - GPE level within the GPE block
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Remove a reference to a GPE. When the last reference is
+ *              removed, only then is the GPE disabled (for runtime GPEs), or
+ *              the GPE mask bit disabled (for wake GPEs)
+ *
+ ******************************************************************************/
+
+acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number)
+{
+       acpi_status status = AE_BAD_PARAMETER;
+       struct acpi_gpe_event_info *gpe_event_info;
+       acpi_cpu_flags flags;
+
+       ACPI_FUNCTION_TRACE(acpi_disable_gpe);
+
+       flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+       /* Ensure that we have a valid GPE number */
+
+       gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+       if (gpe_event_info) {
+               status = acpi_ev_remove_gpe_reference(gpe_event_info) ;
+       }
+
+       acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+       return_ACPI_STATUS(status);
+}
+ACPI_EXPORT_SYMBOL(acpi_disable_gpe)
+
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_setup_gpe_for_wake
+ *
+ * PARAMETERS:  wake_device         - Device associated with the GPE (via _PRW)
+ *              gpe_device          - Parent GPE Device. NULL for GPE0/GPE1
+ *              gpe_number          - GPE level within the GPE block
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Mark a GPE as having the ability to wake the system. This
+ *              interface is intended to be used as the host executes the
+ *              _PRW methods (Power Resources for Wake) in the system tables.
+ *              Each _PRW appears under a Device Object (The wake_device), and
+ *              contains the info for the wake GPE associated with the
+ *              wake_device.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_setup_gpe_for_wake(acpi_handle wake_device,
+                       acpi_handle gpe_device, u32 gpe_number)
+{
+       acpi_status status = AE_BAD_PARAMETER;
+       struct acpi_gpe_event_info *gpe_event_info;
+       struct acpi_namespace_node *device_node;
+       acpi_cpu_flags flags;
+
+       ACPI_FUNCTION_TRACE(acpi_setup_gpe_for_wake);
+
+       /* Parameter Validation */
+
+       if (!wake_device) {
+               /*
+                * By forcing wake_device to be valid, we automatically enable the
+                * implicit notify feature on all hosts.
+                */
+               return_ACPI_STATUS(AE_BAD_PARAMETER);
+       }
+
+       /* Validate wake_device is of type Device */
+
+       device_node = ACPI_CAST_PTR(struct acpi_namespace_node, wake_device);
+       if (device_node->type != ACPI_TYPE_DEVICE) {
+               return_ACPI_STATUS(AE_BAD_PARAMETER);
+       }
+
+       flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+       /* Ensure that we have a valid GPE number */
+
+       gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+       if (gpe_event_info) {
+               /*
+                * If there is no method or handler for this GPE, then the
+                * wake_device will be notified whenever this GPE fires (aka
+                * "implicit notify") Note: The GPE is assumed to be
+                * level-triggered (for windows compatibility).
+                */
+               if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
+                   ACPI_GPE_DISPATCH_NONE) {
+                       gpe_event_info->flags =
+                           (ACPI_GPE_DISPATCH_NOTIFY |
+                            ACPI_GPE_LEVEL_TRIGGERED);
+                       gpe_event_info->dispatch.device_node = device_node;
+               }
+
+               gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
+               status = AE_OK;
+       }
+
+       acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+       return_ACPI_STATUS(status);
+}
+ACPI_EXPORT_SYMBOL(acpi_setup_gpe_for_wake)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_set_gpe_wake_mask
+ *
+ * PARAMETERS:  gpe_device      - Parent GPE Device. NULL for GPE0/GPE1
+ *              gpe_number      - GPE level within the GPE block
+ *              Action          - Enable or Disable
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Set or clear the GPE's wakeup enable mask bit. The GPE must
+ *              already be marked as a WAKE GPE.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_set_gpe_wake_mask(acpi_handle gpe_device, u32 gpe_number, u8 action)
+{
+       acpi_status status = AE_OK;
+       struct acpi_gpe_event_info *gpe_event_info;
+       struct acpi_gpe_register_info *gpe_register_info;
+       acpi_cpu_flags flags;
+       u32 register_bit;
+
+       ACPI_FUNCTION_TRACE(acpi_set_gpe_wake_mask);
+
+       flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+       /*
+        * Ensure that we have a valid GPE number and that this GPE is in
+        * fact a wake GPE
+        */
+       gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+       if (!gpe_event_info) {
+               status = AE_BAD_PARAMETER;
+               goto unlock_and_exit;
+       }
+
+       if (!(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
+               status = AE_TYPE;
+               goto unlock_and_exit;
+       }
+
+       gpe_register_info = gpe_event_info->register_info;
+       if (!gpe_register_info) {
+               status = AE_NOT_EXIST;
+               goto unlock_and_exit;
+       }
+
+       register_bit =
+           acpi_hw_get_gpe_register_bit(gpe_event_info, gpe_register_info);
+
+       /* Perform the action */
+
+       switch (action) {
+       case ACPI_GPE_ENABLE:
+               ACPI_SET_BIT(gpe_register_info->enable_for_wake,
+                            (u8)register_bit);
+               break;
+
+       case ACPI_GPE_DISABLE:
+               ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake,
+                              (u8)register_bit);
+               break;
+
+       default:
+               ACPI_ERROR((AE_INFO, "%u, Invalid action", action));
+               status = AE_BAD_PARAMETER;
+               break;
+       }
+
+unlock_and_exit:
+       acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+       return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_set_gpe_wake_mask)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_clear_gpe
+ *
+ * PARAMETERS:  gpe_device      - Parent GPE Device. NULL for GPE0/GPE1
+ *              gpe_number      - GPE level within the GPE block
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Clear an ACPI event (general purpose)
+ *
+ ******************************************************************************/
+acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number)
+{
+       acpi_status status = AE_OK;
+       struct acpi_gpe_event_info *gpe_event_info;
+       acpi_cpu_flags flags;
+
+       ACPI_FUNCTION_TRACE(acpi_clear_gpe);
+
+       flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+       /* Ensure that we have a valid GPE number */
+
+       gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+       if (!gpe_event_info) {
+               status = AE_BAD_PARAMETER;
+               goto unlock_and_exit;
+       }
+
+       status = acpi_hw_clear_gpe(gpe_event_info);
+
+      unlock_and_exit:
+       acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+       return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_clear_gpe)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_get_gpe_status
+ *
+ * PARAMETERS:  gpe_device      - Parent GPE Device. NULL for GPE0/GPE1
+ *              gpe_number      - GPE level within the GPE block
+ *              event_status    - Where the current status of the event will
+ *                                be returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Get the current status of a GPE (signalled/not_signalled)
+ *
+ ******************************************************************************/
+acpi_status
+acpi_get_gpe_status(acpi_handle gpe_device,
+                   u32 gpe_number, acpi_event_status *event_status)
+{
+       acpi_status status = AE_OK;
+       struct acpi_gpe_event_info *gpe_event_info;
+       acpi_cpu_flags flags;
+
+       ACPI_FUNCTION_TRACE(acpi_get_gpe_status);
+
+       flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+       /* Ensure that we have a valid GPE number */
+
+       gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+       if (!gpe_event_info) {
+               status = AE_BAD_PARAMETER;
+               goto unlock_and_exit;
+       }
+
+       /* Obtain status on the requested GPE number */
+
+       status = acpi_hw_get_gpe_status(gpe_event_info, event_status);
+
+       if (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)
+               *event_status |= ACPI_EVENT_FLAG_HANDLE;
+
+      unlock_and_exit:
+       acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+       return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_gpe_status)
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_disable_all_gpes
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Disable and clear all GPEs in all GPE blocks
+ *
+ ******************************************************************************/
+
+acpi_status acpi_disable_all_gpes(void)
+{
+       acpi_status status;
+
+       ACPI_FUNCTION_TRACE(acpi_disable_all_gpes);
+
+       status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+       if (ACPI_FAILURE(status)) {
+               return_ACPI_STATUS(status);
+       }
+
+       status = acpi_hw_disable_all_gpes();
+       (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+
+       return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_disable_all_gpes)
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_enable_all_runtime_gpes
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Enable all "runtime" GPEs, in all GPE blocks
+ *
+ ******************************************************************************/
+
+acpi_status acpi_enable_all_runtime_gpes(void)
+{
+       acpi_status status;
+
+       ACPI_FUNCTION_TRACE(acpi_enable_all_runtime_gpes);
+
+       status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+       if (ACPI_FAILURE(status)) {
+               return_ACPI_STATUS(status);
+       }
+
+       status = acpi_hw_enable_all_runtime_gpes();
+       (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+
+       return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_enable_all_runtime_gpes)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_install_gpe_block
+ *
+ * PARAMETERS:  gpe_device          - Handle to the parent GPE Block Device
+ *              gpe_block_address   - Address and space_iD
+ *              register_count      - Number of GPE register pairs in the block
+ *              interrupt_number    - H/W interrupt for the block
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Create and Install a block of GPE registers. The GPEs are not
+ *              enabled here.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_install_gpe_block(acpi_handle gpe_device,
+                      struct acpi_generic_address *gpe_block_address,
+                      u32 register_count, u32 interrupt_number)
+{
+       acpi_status status;
+       union acpi_operand_object *obj_desc;
+       struct acpi_namespace_node *node;
+       struct acpi_gpe_block_info *gpe_block;
+
+       ACPI_FUNCTION_TRACE(acpi_install_gpe_block);
+
+       if ((!gpe_device) || (!gpe_block_address) || (!register_count)) {
+               return_ACPI_STATUS(AE_BAD_PARAMETER);
+       }
+
+       status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+       if (ACPI_FAILURE(status)) {
+               return (status);
+       }
+
+       node = acpi_ns_validate_handle(gpe_device);
+       if (!node) {
+               status = AE_BAD_PARAMETER;
+               goto unlock_and_exit;
+       }
+
+       /*
+        * For user-installed GPE Block Devices, the gpe_block_base_number
+        * is always zero
+        */
+       status =
+           acpi_ev_create_gpe_block(node, gpe_block_address, register_count, 0,
+                                    interrupt_number, &gpe_block);
+       if (ACPI_FAILURE(status)) {
+               goto unlock_and_exit;
+       }
+
+       /* Install block in the device_object attached to the node */
+
+       obj_desc = acpi_ns_get_attached_object(node);
+       if (!obj_desc) {
+
+               /*
+                * No object, create a new one (Device nodes do not always have
+                * an attached object)
+                */
+               obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_DEVICE);
+               if (!obj_desc) {
+                       status = AE_NO_MEMORY;
+                       goto unlock_and_exit;
+               }
+
+               status =
+                   acpi_ns_attach_object(node, obj_desc, ACPI_TYPE_DEVICE);
+
+               /* Remove local reference to the object */
+
+               acpi_ut_remove_reference(obj_desc);
+
+               if (ACPI_FAILURE(status)) {
+                       goto unlock_and_exit;
+               }
+       }
+
+       /* Now install the GPE block in the device_object */
+
+       obj_desc->device.gpe_block = gpe_block;
+
+      unlock_and_exit:
+       (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+       return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_install_gpe_block)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_remove_gpe_block
+ *
+ * PARAMETERS:  gpe_device          - Handle to the parent GPE Block Device
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Remove a previously installed block of GPE registers
+ *
+ ******************************************************************************/
+acpi_status acpi_remove_gpe_block(acpi_handle gpe_device)
+{
+       union acpi_operand_object *obj_desc;
+       acpi_status status;
+       struct acpi_namespace_node *node;
+
+       ACPI_FUNCTION_TRACE(acpi_remove_gpe_block);
+
+       if (!gpe_device) {
+               return_ACPI_STATUS(AE_BAD_PARAMETER);
+       }
+
+       status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+       if (ACPI_FAILURE(status)) {
+               return (status);
+       }
+
+       node = acpi_ns_validate_handle(gpe_device);
+       if (!node) {
+               status = AE_BAD_PARAMETER;
+               goto unlock_and_exit;
+       }
+
+       /* Get the device_object attached to the node */
+
+       obj_desc = acpi_ns_get_attached_object(node);
+       if (!obj_desc || !obj_desc->device.gpe_block) {
+               return_ACPI_STATUS(AE_NULL_OBJECT);
+       }
+
+       /* Delete the GPE block (but not the device_object) */
+
+       status = acpi_ev_delete_gpe_block(obj_desc->device.gpe_block);
+       if (ACPI_SUCCESS(status)) {
+               obj_desc->device.gpe_block = NULL;
+       }
+
+      unlock_and_exit:
+       (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+       return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_remove_gpe_block)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_get_gpe_device
+ *
+ * PARAMETERS:  Index               - System GPE index (0-current_gpe_count)
+ *              gpe_device          - Where the parent GPE Device is returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Obtain the GPE device associated with the input index. A NULL
+ *              gpe device indicates that the gpe number is contained in one of
+ *              the FADT-defined gpe blocks. Otherwise, the GPE block device.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_get_gpe_device(u32 index, acpi_handle *gpe_device)
+{
+       struct acpi_gpe_device_info info;
+       acpi_status status;
+
+       ACPI_FUNCTION_TRACE(acpi_get_gpe_device);
+
+       if (!gpe_device) {
+               return_ACPI_STATUS(AE_BAD_PARAMETER);
+       }
+
+       if (index >= acpi_current_gpe_count) {
+               return_ACPI_STATUS(AE_NOT_EXIST);
+       }
+
+       /* Setup and walk the GPE list */
+
+       info.index = index;
+       info.status = AE_NOT_EXIST;
+       info.gpe_device = NULL;
+       info.next_block_base_index = 0;
+
+       status = acpi_ev_walk_gpe_list(acpi_ev_get_gpe_device, &info);
+       if (ACPI_FAILURE(status)) {
+               return_ACPI_STATUS(status);
+       }
+
+       *gpe_device = ACPI_CAST_PTR(acpi_handle, info.gpe_device);
+       return_ACPI_STATUS(info.status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_gpe_device)
index 14750db..85c3cbd 100644 (file)
@@ -62,10 +62,10 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
  * PARAMETERS: gpe_event_info      - Info block for the GPE
  *             gpe_register_info   - Info block for the GPE register
  *
- * RETURN:     Status
+ * RETURN:     Register mask with a one in the GPE bit position
  *
- * DESCRIPTION:        Compute GPE enable mask with one bit corresponding to the given
- *             GPE set.
+ * DESCRIPTION: Compute the register mask for this GPE. One bit is set in the
+ *              correct position for the input GPE.
  *
  ******************************************************************************/
 
@@ -85,12 +85,12 @@ u32 acpi_hw_get_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info,
  *
  * RETURN:     Status
  *
- * DESCRIPTION: Enable or disable a single GPE in its enable register.
+ * DESCRIPTION: Enable or disable a single GPE in the parent enable register.
  *
  ******************************************************************************/
 
 acpi_status
-acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action)
+acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action)
 {
        struct acpi_gpe_register_info *gpe_register_info;
        acpi_status status;
@@ -113,14 +113,20 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action)
                return (status);
        }
 
-       /* Set ot clear just the bit that corresponds to this GPE */
+       /* Set or clear just the bit that corresponds to this GPE */
 
        register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info,
                                                gpe_register_info);
        switch (action) {
-       case ACPI_GPE_COND_ENABLE:
-               if (!(register_bit & gpe_register_info->enable_for_run))
+       case ACPI_GPE_CONDITIONAL_ENABLE:
+
+               /* Only enable if the enable_for_run bit is set */
+
+               if (!(register_bit & gpe_register_info->enable_for_run)) {
                        return (AE_BAD_PARAMETER);
+               }
+
+               /*lint -fallthrough */
 
        case ACPI_GPE_ENABLE:
                ACPI_SET_BIT(enable_mask, register_bit);
@@ -131,7 +137,7 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action)
                break;
 
        default:
-               ACPI_ERROR((AE_INFO, "Invalid action\n"));
+               ACPI_ERROR((AE_INFO, "Invalid GPE Action, %u\n", action));
                return (AE_BAD_PARAMETER);
        }
 
@@ -168,13 +174,13 @@ acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info * gpe_event_info)
                return (AE_NOT_EXIST);
        }
 
-       register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info,
-                                               gpe_register_info);
-
        /*
         * Write a one to the appropriate bit in the status register to
         * clear this GPE.
         */
+       register_bit =
+           acpi_hw_get_gpe_register_bit(gpe_event_info, gpe_register_info);
+
        status = acpi_hw_write(register_bit,
                               &gpe_register_info->status_address);
 
@@ -201,8 +207,8 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info,
        u32 in_byte;
        u32 register_bit;
        struct acpi_gpe_register_info *gpe_register_info;
-       acpi_status status;
        acpi_event_status local_event_status = 0;
+       acpi_status status;
 
        ACPI_FUNCTION_ENTRY();
 
index e87bc67..508537f 100644 (file)
@@ -768,7 +768,7 @@ acpi_status acpi_ut_init_globals(void)
        acpi_gbl_gpe_fadt_blocks[0] = NULL;
        acpi_gbl_gpe_fadt_blocks[1] = NULL;
        acpi_current_gpe_count = 0;
-       acpi_all_gpes_initialized = FALSE;
+       acpi_gbl_all_gpes_initialized = FALSE;
 
        /* Global handlers */
 
@@ -778,6 +778,7 @@ acpi_status acpi_ut_init_globals(void)
        acpi_gbl_init_handler = NULL;
        acpi_gbl_table_handler = NULL;
        acpi_gbl_interface_handler = NULL;
+       acpi_gbl_global_event_handler = NULL;
 
        /* Global Lock support */
 
index 18df1e9..ef0581f 100644 (file)
@@ -109,6 +109,8 @@ static inline u32 apei_estatus_len(struct acpi_hest_generic_status *estatus)
                return sizeof(*estatus) + estatus->data_length;
 }
 
+void apei_estatus_print(const char *pfx,
+                       const struct acpi_hest_generic_status *estatus);
 int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus);
 int apei_estatus_check(const struct acpi_hest_generic_status *estatus);
 #endif
index f4cf2fc..31464a0 100644 (file)
@@ -46,6 +46,317 @@ u64 cper_next_record_id(void)
 }
 EXPORT_SYMBOL_GPL(cper_next_record_id);
 
+static const char *cper_severity_strs[] = {
+       "recoverable",
+       "fatal",
+       "corrected",
+       "info",
+};
+
+static const char *cper_severity_str(unsigned int severity)
+{
+       return severity < ARRAY_SIZE(cper_severity_strs) ?
+               cper_severity_strs[severity] : "unknown";
+}
+
+/*
+ * cper_print_bits - print strings for set bits
+ * @pfx: prefix for each line, including log level and prefix string
+ * @bits: bit mask
+ * @strs: string array, indexed by bit position
+ * @strs_size: size of the string array: @strs
+ *
+ * For each set bit in @bits, print the corresponding string in @strs.
+ * If the output length is longer than 80, multiple line will be
+ * printed, with @pfx is printed at the beginning of each line.
+ */
+static void cper_print_bits(const char *pfx, unsigned int bits,
+                           const char *strs[], unsigned int strs_size)
+{
+       int i, len = 0;
+       const char *str;
+       char buf[84];
+
+       for (i = 0; i < strs_size; i++) {
+               if (!(bits & (1U << i)))
+                       continue;
+               str = strs[i];
+               if (len && len + strlen(str) + 2 > 80) {
+                       printk("%s\n", buf);
+                       len = 0;
+               }
+               if (!len)
+                       len = snprintf(buf, sizeof(buf), "%s%s", pfx, str);
+               else
+                       len += snprintf(buf+len, sizeof(buf)-len, ", %s", str);
+       }
+       if (len)
+               printk("%s\n", buf);
+}
+
+static const char *cper_proc_type_strs[] = {
+       "IA32/X64",
+       "IA64",
+};
+
+static const char *cper_proc_isa_strs[] = {
+       "IA32",
+       "IA64",
+       "X64",
+};
+
+static const char *cper_proc_error_type_strs[] = {
+       "cache error",
+       "TLB error",
+       "bus error",
+       "micro-architectural error",
+};
+
+static const char *cper_proc_op_strs[] = {
+       "unknown or generic",
+       "data read",
+       "data write",
+       "instruction execution",
+};
+
+static const char *cper_proc_flag_strs[] = {
+       "restartable",
+       "precise IP",
+       "overflow",
+       "corrected",
+};
+
+static void cper_print_proc_generic(const char *pfx,
+                                   const struct cper_sec_proc_generic *proc)
+{
+       if (proc->validation_bits & CPER_PROC_VALID_TYPE)
+               printk("%s""processor_type: %d, %s\n", pfx, proc->proc_type,
+                      proc->proc_type < ARRAY_SIZE(cper_proc_type_strs) ?
+                      cper_proc_type_strs[proc->proc_type] : "unknown");
+       if (proc->validation_bits & CPER_PROC_VALID_ISA)
+               printk("%s""processor_isa: %d, %s\n", pfx, proc->proc_isa,
+                      proc->proc_isa < ARRAY_SIZE(cper_proc_isa_strs) ?
+                      cper_proc_isa_strs[proc->proc_isa] : "unknown");
+       if (proc->validation_bits & CPER_PROC_VALID_ERROR_TYPE) {
+               printk("%s""error_type: 0x%02x\n", pfx, proc->proc_error_type);
+               cper_print_bits(pfx, proc->proc_error_type,
+                               cper_proc_error_type_strs,
+                               ARRAY_SIZE(cper_proc_error_type_strs));
+       }
+       if (proc->validation_bits & CPER_PROC_VALID_OPERATION)
+               printk("%s""operation: %d, %s\n", pfx, proc->operation,
+                      proc->operation < ARRAY_SIZE(cper_proc_op_strs) ?
+                      cper_proc_op_strs[proc->operation] : "unknown");
+       if (proc->validation_bits & CPER_PROC_VALID_FLAGS) {
+               printk("%s""flags: 0x%02x\n", pfx, proc->flags);
+               cper_print_bits(pfx, proc->flags, cper_proc_flag_strs,
+                               ARRAY_SIZE(cper_proc_flag_strs));
+       }
+       if (proc->validation_bits & CPER_PROC_VALID_LEVEL)
+               printk("%s""level: %d\n", pfx, proc->level);
+       if (proc->validation_bits & CPER_PROC_VALID_VERSION)
+               printk("%s""version_info: 0x%016llx\n", pfx, proc->cpu_version);
+       if (proc->validation_bits & CPER_PROC_VALID_ID)
+               printk("%s""processor_id: 0x%016llx\n", pfx, proc->proc_id);
+       if (proc->validation_bits & CPER_PROC_VALID_TARGET_ADDRESS)
+               printk("%s""target_address: 0x%016llx\n",
+                      pfx, proc->target_addr);
+       if (proc->validation_bits & CPER_PROC_VALID_REQUESTOR_ID)
+               printk("%s""requestor_id: 0x%016llx\n",
+                      pfx, proc->requestor_id);
+       if (proc->validation_bits & CPER_PROC_VALID_RESPONDER_ID)
+               printk("%s""responder_id: 0x%016llx\n",
+                      pfx, proc->responder_id);
+       if (proc->validation_bits & CPER_PROC_VALID_IP)
+               printk("%s""IP: 0x%016llx\n", pfx, proc->ip);
+}
+
+static const char *cper_mem_err_type_strs[] = {
+       "unknown",
+       "no error",
+       "single-bit ECC",
+       "multi-bit ECC",
+       "single-symbol chipkill ECC",
+       "multi-symbol chipkill ECC",
+       "master abort",
+       "target abort",
+       "parity error",
+       "watchdog timeout",
+       "invalid address",
+       "mirror Broken",
+       "memory sparing",
+       "scrub corrected error",
+       "scrub uncorrected error",
+};
+
+static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem)
+{
+       if (mem->validation_bits & CPER_MEM_VALID_ERROR_STATUS)
+               printk("%s""error_status: 0x%016llx\n", pfx, mem->error_status);
+       if (mem->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS)
+               printk("%s""physical_address: 0x%016llx\n",
+                      pfx, mem->physical_addr);
+       if (mem->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS_MASK)
+               printk("%s""physical_address_mask: 0x%016llx\n",
+                      pfx, mem->physical_addr_mask);
+       if (mem->validation_bits & CPER_MEM_VALID_NODE)
+               printk("%s""node: %d\n", pfx, mem->node);
+       if (mem->validation_bits & CPER_MEM_VALID_CARD)
+               printk("%s""card: %d\n", pfx, mem->card);
+       if (mem->validation_bits & CPER_MEM_VALID_MODULE)
+               printk("%s""module: %d\n", pfx, mem->module);
+       if (mem->validation_bits & CPER_MEM_VALID_BANK)
+               printk("%s""bank: %d\n", pfx, mem->bank);
+       if (mem->validation_bits & CPER_MEM_VALID_DEVICE)
+               printk("%s""device: %d\n", pfx, mem->device);
+       if (mem->validation_bits & CPER_MEM_VALID_ROW)
+               printk("%s""row: %d\n", pfx, mem->row);
+       if (mem->validation_bits & CPER_MEM_VALID_COLUMN)
+               printk("%s""column: %d\n", pfx, mem->column);
+       if (mem->validation_bits & CPER_MEM_VALID_BIT_POSITION)
+               printk("%s""bit_position: %d\n", pfx, mem->bit_pos);
+       if (mem->validation_bits & CPER_MEM_VALID_REQUESTOR_ID)
+               printk("%s""requestor_id: 0x%016llx\n", pfx, mem->requestor_id);
+       if (mem->validation_bits & CPER_MEM_VALID_RESPONDER_ID)
+               printk("%s""responder_id: 0x%016llx\n", pfx, mem->responder_id);
+       if (mem->validation_bits & CPER_MEM_VALID_TARGET_ID)
+               printk("%s""target_id: 0x%016llx\n", pfx, mem->target_id);
+       if (mem->validation_bits & CPER_MEM_VALID_ERROR_TYPE) {
+               u8 etype = mem->error_type;
+               printk("%s""error_type: %d, %s\n", pfx, etype,
+                      etype < ARRAY_SIZE(cper_mem_err_type_strs) ?
+                      cper_mem_err_type_strs[etype] : "unknown");
+       }
+}
+
+static const char *cper_pcie_port_type_strs[] = {
+       "PCIe end point",
+       "legacy PCI end point",
+       "unknown",
+       "unknown",
+       "root port",
+       "upstream switch port",
+       "downstream switch port",
+       "PCIe to PCI/PCI-X bridge",
+       "PCI/PCI-X to PCIe bridge",
+       "root complex integrated endpoint device",
+       "root complex event collector",
+};
+
+static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie)
+{
+       if (pcie->validation_bits & CPER_PCIE_VALID_PORT_TYPE)
+               printk("%s""port_type: %d, %s\n", pfx, pcie->port_type,
+                      pcie->port_type < ARRAY_SIZE(cper_pcie_port_type_strs) ?
+                      cper_pcie_port_type_strs[pcie->port_type] : "unknown");
+       if (pcie->validation_bits & CPER_PCIE_VALID_VERSION)
+               printk("%s""version: %d.%d\n", pfx,
+                      pcie->version.major, pcie->version.minor);
+       if (pcie->validation_bits & CPER_PCIE_VALID_COMMAND_STATUS)
+               printk("%s""command: 0x%04x, status: 0x%04x\n", pfx,
+                      pcie->command, pcie->status);
+       if (pcie->validation_bits & CPER_PCIE_VALID_DEVICE_ID) {
+               const __u8 *p;
+               printk("%s""device_id: %04x:%02x:%02x.%x\n", pfx,
+                      pcie->device_id.segment, pcie->device_id.bus,
+                      pcie->device_id.device, pcie->device_id.function);
+               printk("%s""slot: %d\n", pfx,
+                      pcie->device_id.slot >> CPER_PCIE_SLOT_SHIFT);
+               printk("%s""secondary_bus: 0x%02x\n", pfx,
+                      pcie->device_id.secondary_bus);
+               printk("%s""vendor_id: 0x%04x, device_id: 0x%04x\n", pfx,
+                      pcie->device_id.vendor_id, pcie->device_id.device_id);
+               p = pcie->device_id.class_code;
+               printk("%s""class_code: %02x%02x%02x\n", pfx, p[0], p[1], p[2]);
+       }
+       if (pcie->validation_bits & CPER_PCIE_VALID_SERIAL_NUMBER)
+               printk("%s""serial number: 0x%04x, 0x%04x\n", pfx,
+                      pcie->serial_number.lower, pcie->serial_number.upper);
+       if (pcie->validation_bits & CPER_PCIE_VALID_BRIDGE_CONTROL_STATUS)
+               printk(
+       "%s""bridge: secondary_status: 0x%04x, control: 0x%04x\n",
+       pfx, pcie->bridge.secondary_status, pcie->bridge.control);
+}
+
+static const char *apei_estatus_section_flag_strs[] = {
+       "primary",
+       "containment warning",
+       "reset",
+       "threshold exceeded",
+       "resource not accessible",
+       "latent error",
+};
+
+static void apei_estatus_print_section(
+       const char *pfx, const struct acpi_hest_generic_data *gdata, int sec_no)
+{
+       uuid_le *sec_type = (uuid_le *)gdata->section_type;
+       __u16 severity;
+
+       severity = gdata->error_severity;
+       printk("%s""section: %d, severity: %d, %s\n", pfx, sec_no, severity,
+              cper_severity_str(severity));
+       printk("%s""flags: 0x%02x\n", pfx, gdata->flags);
+       cper_print_bits(pfx, gdata->flags, apei_estatus_section_flag_strs,
+                       ARRAY_SIZE(apei_estatus_section_flag_strs));
+       if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
+               printk("%s""fru_id: %pUl\n", pfx, (uuid_le *)gdata->fru_id);
+       if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
+               printk("%s""fru_text: %.20s\n", pfx, gdata->fru_text);
+
+       if (!uuid_le_cmp(*sec_type, CPER_SEC_PROC_GENERIC)) {
+               struct cper_sec_proc_generic *proc_err = (void *)(gdata + 1);
+               printk("%s""section_type: general processor error\n", pfx);
+               if (gdata->error_data_length >= sizeof(*proc_err))
+                       cper_print_proc_generic(pfx, proc_err);
+               else
+                       goto err_section_too_small;
+       } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PLATFORM_MEM)) {
+               struct cper_sec_mem_err *mem_err = (void *)(gdata + 1);
+               printk("%s""section_type: memory error\n", pfx);
+               if (gdata->error_data_length >= sizeof(*mem_err))
+                       cper_print_mem(pfx, mem_err);
+               else
+                       goto err_section_too_small;
+       } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PCIE)) {
+               struct cper_sec_pcie *pcie = (void *)(gdata + 1);
+               printk("%s""section_type: PCIe error\n", pfx);
+               if (gdata->error_data_length >= sizeof(*pcie))
+                       cper_print_pcie(pfx, pcie);
+               else
+                       goto err_section_too_small;
+       } else
+               printk("%s""section type: unknown, %pUl\n", pfx, sec_type);
+
+       return;
+
+err_section_too_small:
+       pr_err(FW_WARN "error section length is too small\n");
+}
+
+void apei_estatus_print(const char *pfx,
+                       const struct acpi_hest_generic_status *estatus)
+{
+       struct acpi_hest_generic_data *gdata;
+       unsigned int data_len, gedata_len;
+       int sec_no = 0;
+       __u16 severity;
+
+       printk("%s""APEI generic hardware error status\n", pfx);
+       severity = estatus->error_severity;
+       printk("%s""severity: %d, %s\n", pfx, severity,
+              cper_severity_str(severity));
+       data_len = estatus->data_length;
+       gdata = (struct acpi_hest_generic_data *)(estatus + 1);
+       while (data_len > sizeof(*gdata)) {
+               gedata_len = gdata->error_data_length;
+               apei_estatus_print_section(pfx, gdata, sec_no);
+               data_len -= gedata_len + sizeof(*gdata);
+               sec_no++;
+       }
+}
+EXPORT_SYMBOL_GPL(apei_estatus_print);
+
 int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus)
 {
        if (estatus->data_length &&
index 0d505e5..d1d484d 100644 (file)
  * For more information about Generic Hardware Error Source, please
  * refer to ACPI Specification version 4.0, section 17.3.2.6
  *
- * Now, only SCI notification type and memory errors are
- * supported. More notification type and hardware error type will be
- * added later.
- *
  * Copyright 2010 Intel Corp.
  *   Author: Huang Ying <ying.huang@intel.com>
  *
 #include <linux/acpi.h>
 #include <linux/io.h>
 #include <linux/interrupt.h>
+#include <linux/timer.h>
 #include <linux/cper.h>
 #include <linux/kdebug.h>
 #include <linux/platform_device.h>
 #include <linux/mutex.h>
+#include <linux/ratelimit.h>
+#include <linux/vmalloc.h>
 #include <acpi/apei.h>
 #include <acpi/atomicio.h>
 #include <acpi/hed.h>
 #include <asm/mce.h>
+#include <asm/tlbflush.h>
 
 #include "apei-internal.h"
 
 #define GHES_ESTATUS_MAX_SIZE          65536
 
 /*
- * One struct ghes is created for each generic hardware error
- * source.
- *
+ * One struct ghes is created for each generic hardware error source.
  * It provides the context for APEI hardware error timer/IRQ/SCI/NMI
- * handler. Handler for one generic hardware error source is only
- * triggered after the previous one is done. So handler can uses
- * struct ghes without locking.
+ * handler.
  *
  * estatus: memory buffer for error status block, allocated during
  * HEST parsing.
  */
 #define GHES_TO_CLEAR          0x0001
+#define GHES_EXITING           0x0002
 
 struct ghes {
        struct acpi_hest_generic *generic;
        struct acpi_hest_generic_status *estatus;
-       struct list_head list;
        u64 buffer_paddr;
        unsigned long flags;
+       union {
+               struct list_head list;
+               struct timer_list timer;
+               unsigned int irq;
+       };
 };
 
+static int ghes_panic_timeout  __read_mostly = 30;
+
 /*
- * Error source lists, one list for each notification method. The
- * members in lists are struct ghes.
+ * All error sources notified with SCI shares one notifier function,
+ * so they need to be linked and checked one by one.  This is applied
+ * to NMI too.
  *
- * The list members are only added in HEST parsing and deleted during
- * module_exit, that is, single-threaded. So no lock is needed for
- * that.
- *
- * But the mutual exclusion is needed between members adding/deleting
- * and timer/IRQ/SCI/NMI handler, which may traverse the list. RCU is
- * used for that.
+ * RCU is used for these lists, so ghes_list_mutex is only used for
+ * list changing, not for traversing.
  */
 static LIST_HEAD(ghes_sci);
+static LIST_HEAD(ghes_nmi);
 static DEFINE_MUTEX(ghes_list_mutex);
 
+/*
+ * NMI may be triggered on any CPU, so ghes_nmi_lock is used for
+ * mutual exclusion.
+ */
+static DEFINE_RAW_SPINLOCK(ghes_nmi_lock);
+
+/*
+ * Because the memory area used to transfer hardware error information
+ * from BIOS to Linux can be determined only in NMI, IRQ or timer
+ * handler, but general ioremap can not be used in atomic context, so
+ * a special version of atomic ioremap is implemented for that.
+ */
+
+/*
+ * Two virtual pages are used, one for NMI context, the other for
+ * IRQ/PROCESS context
+ */
+#define GHES_IOREMAP_PAGES             2
+#define GHES_IOREMAP_NMI_PAGE(base)    (base)
+#define GHES_IOREMAP_IRQ_PAGE(base)    ((base) + PAGE_SIZE)
+
+/* virtual memory area for atomic ioremap */
+static struct vm_struct *ghes_ioremap_area;
+/*
+ * These 2 spinlock is used to prevent atomic ioremap virtual memory
+ * area from being mapped simultaneously.
+ */
+static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi);
+static DEFINE_SPINLOCK(ghes_ioremap_lock_irq);
+
+static int ghes_ioremap_init(void)
+{
+       ghes_ioremap_area = __get_vm_area(PAGE_SIZE * GHES_IOREMAP_PAGES,
+               VM_IOREMAP, VMALLOC_START, VMALLOC_END);
+       if (!ghes_ioremap_area) {
+               pr_err(GHES_PFX "Failed to allocate virtual memory area for atomic ioremap.\n");
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static void ghes_ioremap_exit(void)
+{
+       free_vm_area(ghes_ioremap_area);
+}
+
+static void __iomem *ghes_ioremap_pfn_nmi(u64 pfn)
+{
+       unsigned long vaddr;
+
+       vaddr = (unsigned long)GHES_IOREMAP_NMI_PAGE(ghes_ioremap_area->addr);
+       ioremap_page_range(vaddr, vaddr + PAGE_SIZE,
+                          pfn << PAGE_SHIFT, PAGE_KERNEL);
+
+       return (void __iomem *)vaddr;
+}
+
+static void __iomem *ghes_ioremap_pfn_irq(u64 pfn)
+{
+       unsigned long vaddr;
+
+       vaddr = (unsigned long)GHES_IOREMAP_IRQ_PAGE(ghes_ioremap_area->addr);
+       ioremap_page_range(vaddr, vaddr + PAGE_SIZE,
+                          pfn << PAGE_SHIFT, PAGE_KERNEL);
+
+       return (void __iomem *)vaddr;
+}
+
+static void ghes_iounmap_nmi(void __iomem *vaddr_ptr)
+{
+       unsigned long vaddr = (unsigned long __force)vaddr_ptr;
+       void *base = ghes_ioremap_area->addr;
+
+       BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_NMI_PAGE(base));
+       unmap_kernel_range_noflush(vaddr, PAGE_SIZE);
+       __flush_tlb_one(vaddr);
+}
+
+static void ghes_iounmap_irq(void __iomem *vaddr_ptr)
+{
+       unsigned long vaddr = (unsigned long __force)vaddr_ptr;
+       void *base = ghes_ioremap_area->addr;
+
+       BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_IRQ_PAGE(base));
+       unmap_kernel_range_noflush(vaddr, PAGE_SIZE);
+       __flush_tlb_one(vaddr);
+}
+
 static struct ghes *ghes_new(struct acpi_hest_generic *generic)
 {
        struct ghes *ghes;
@@ -101,7 +190,6 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic)
        if (!ghes)
                return ERR_PTR(-ENOMEM);
        ghes->generic = generic;
-       INIT_LIST_HEAD(&ghes->list);
        rc = acpi_pre_map_gar(&generic->error_status_address);
        if (rc)
                goto err_free;
@@ -158,22 +246,41 @@ static inline int ghes_severity(int severity)
        }
 }
 
-/* SCI handler run in work queue, so ioremap can be used here */
-static int ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len,
-                                int from_phys)
+static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len,
+                                 int from_phys)
 {
-       void *vaddr;
-
-       vaddr = ioremap_cache(paddr, len);
-       if (!vaddr)
-               return -ENOMEM;
-       if (from_phys)
-               memcpy(buffer, vaddr, len);
-       else
-               memcpy(vaddr, buffer, len);
-       iounmap(vaddr);
-
-       return 0;
+       void __iomem *vaddr;
+       unsigned long flags = 0;
+       int in_nmi = in_nmi();
+       u64 offset;
+       u32 trunk;
+
+       while (len > 0) {
+               offset = paddr - (paddr & PAGE_MASK);
+               if (in_nmi) {
+                       raw_spin_lock(&ghes_ioremap_lock_nmi);
+                       vaddr = ghes_ioremap_pfn_nmi(paddr >> PAGE_SHIFT);
+               } else {
+                       spin_lock_irqsave(&ghes_ioremap_lock_irq, flags);
+                       vaddr = ghes_ioremap_pfn_irq(paddr >> PAGE_SHIFT);
+               }
+               trunk = PAGE_SIZE - offset;
+               trunk = min(trunk, len);
+               if (from_phys)
+                       memcpy_fromio(buffer, vaddr + offset, trunk);
+               else
+                       memcpy_toio(vaddr + offset, buffer, trunk);
+               len -= trunk;
+               paddr += trunk;
+               buffer += trunk;
+               if (in_nmi) {
+                       ghes_iounmap_nmi(vaddr);
+                       raw_spin_unlock(&ghes_ioremap_lock_nmi);
+               } else {
+                       ghes_iounmap_irq(vaddr);
+                       spin_unlock_irqrestore(&ghes_ioremap_lock_irq, flags);
+               }
+       }
 }
 
 static int ghes_read_estatus(struct ghes *ghes, int silent)
@@ -194,10 +301,8 @@ static int ghes_read_estatus(struct ghes *ghes, int silent)
        if (!buf_paddr)
                return -ENOENT;
 
-       rc = ghes_copy_tofrom_phys(ghes->estatus, buf_paddr,
-                                  sizeof(*ghes->estatus), 1);
-       if (rc)
-               return rc;
+       ghes_copy_tofrom_phys(ghes->estatus, buf_paddr,
+                             sizeof(*ghes->estatus), 1);
        if (!ghes->estatus->block_status)
                return -ENOENT;
 
@@ -212,17 +317,15 @@ static int ghes_read_estatus(struct ghes *ghes, int silent)
                goto err_read_block;
        if (apei_estatus_check_header(ghes->estatus))
                goto err_read_block;
-       rc = ghes_copy_tofrom_phys(ghes->estatus + 1,
-                                  buf_paddr + sizeof(*ghes->estatus),
-                                  len - sizeof(*ghes->estatus), 1);
-       if (rc)
-               return rc;
+       ghes_copy_tofrom_phys(ghes->estatus + 1,
+                             buf_paddr + sizeof(*ghes->estatus),
+                             len - sizeof(*ghes->estatus), 1);
        if (apei_estatus_check(ghes->estatus))
                goto err_read_block;
        rc = 0;
 
 err_read_block:
-       if (rc && !silent)
+       if (rc && !silent && printk_ratelimit())
                pr_warning(FW_WARN GHES_PFX
                           "Failed to read error status block!\n");
        return rc;
@@ -255,11 +358,26 @@ static void ghes_do_proc(struct ghes *ghes)
                }
 #endif
        }
+}
 
-       if (!processed && printk_ratelimit())
-               pr_warning(GHES_PFX
-               "Unknown error record from generic hardware error source: %d\n",
-                          ghes->generic->header.source_id);
+static void ghes_print_estatus(const char *pfx, struct ghes *ghes)
+{
+       /* Not more than 2 messages every 5 seconds */
+       static DEFINE_RATELIMIT_STATE(ratelimit, 5*HZ, 2);
+
+       if (pfx == NULL) {
+               if (ghes_severity(ghes->estatus->error_severity) <=
+                   GHES_SEV_CORRECTED)
+                       pfx = KERN_WARNING HW_ERR;
+               else
+                       pfx = KERN_ERR HW_ERR;
+       }
+       if (__ratelimit(&ratelimit)) {
+               printk(
+       "%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
+       pfx, ghes->generic->header.source_id);
+               apei_estatus_print(pfx, ghes->estatus);
+       }
 }
 
 static int ghes_proc(struct ghes *ghes)
@@ -269,6 +387,7 @@ static int ghes_proc(struct ghes *ghes)
        rc = ghes_read_estatus(ghes, 0);
        if (rc)
                goto out;
+       ghes_print_estatus(NULL, ghes);
        ghes_do_proc(ghes);
 
 out:
@@ -276,6 +395,42 @@ out:
        return 0;
 }
 
+static void ghes_add_timer(struct ghes *ghes)
+{
+       struct acpi_hest_generic *g = ghes->generic;
+       unsigned long expire;
+
+       if (!g->notify.poll_interval) {
+               pr_warning(FW_WARN GHES_PFX "Poll interval is 0 for generic hardware error source: %d, disabled.\n",
+                          g->header.source_id);
+               return;
+       }
+       expire = jiffies + msecs_to_jiffies(g->notify.poll_interval);
+       ghes->timer.expires = round_jiffies_relative(expire);
+       add_timer(&ghes->timer);
+}
+
+static void ghes_poll_func(unsigned long data)
+{
+       struct ghes *ghes = (void *)data;
+
+       ghes_proc(ghes);
+       if (!(ghes->flags & GHES_EXITING))
+               ghes_add_timer(ghes);
+}
+
+static irqreturn_t ghes_irq_func(int irq, void *data)
+{
+       struct ghes *ghes = data;
+       int rc;
+
+       rc = ghes_proc(ghes);
+       if (rc)
+               return IRQ_NONE;
+
+       return IRQ_HANDLED;
+}
+
 static int ghes_notify_sci(struct notifier_block *this,
                                  unsigned long event, void *data)
 {
@@ -292,10 +447,63 @@ static int ghes_notify_sci(struct notifier_block *this,
        return ret;
 }
 
+static int ghes_notify_nmi(struct notifier_block *this,
+                                 unsigned long cmd, void *data)
+{
+       struct ghes *ghes, *ghes_global = NULL;
+       int sev, sev_global = -1;
+       int ret = NOTIFY_DONE;
+
+       if (cmd != DIE_NMI)
+               return ret;
+
+       raw_spin_lock(&ghes_nmi_lock);
+       list_for_each_entry_rcu(ghes, &ghes_nmi, list) {
+               if (ghes_read_estatus(ghes, 1)) {
+                       ghes_clear_estatus(ghes);
+                       continue;
+               }
+               sev = ghes_severity(ghes->estatus->error_severity);
+               if (sev > sev_global) {
+                       sev_global = sev;
+                       ghes_global = ghes;
+               }
+               ret = NOTIFY_STOP;
+       }
+
+       if (ret == NOTIFY_DONE)
+               goto out;
+
+       if (sev_global >= GHES_SEV_PANIC) {
+               oops_begin();
+               ghes_print_estatus(KERN_EMERG HW_ERR, ghes_global);
+               /* reboot to log the error! */
+               if (panic_timeout == 0)
+                       panic_timeout = ghes_panic_timeout;
+               panic("Fatal hardware error!");
+       }
+
+       list_for_each_entry_rcu(ghes, &ghes_nmi, list) {
+               if (!(ghes->flags & GHES_TO_CLEAR))
+                       continue;
+               /* Do not print estatus because printk is not NMI safe */
+               ghes_do_proc(ghes);
+               ghes_clear_estatus(ghes);
+       }
+
+out:
+       raw_spin_unlock(&ghes_nmi_lock);
+       return ret;
+}
+
 static struct notifier_block ghes_notifier_sci = {
        .notifier_call = ghes_notify_sci,
 };
 
+static struct notifier_block ghes_notifier_nmi = {
+       .notifier_call = ghes_notify_nmi,
+};
+
 static int __devinit ghes_probe(struct platform_device *ghes_dev)
 {
        struct acpi_hest_generic *generic;
@@ -306,18 +514,27 @@ static int __devinit ghes_probe(struct platform_device *ghes_dev)
        if (!generic->enabled)
                return -ENODEV;
 
-       if (generic->error_block_length <
-           sizeof(struct acpi_hest_generic_status)) {
-               pr_warning(FW_BUG GHES_PFX
-"Invalid error block length: %u for generic hardware error source: %d\n",
-                          generic->error_block_length,
+       switch (generic->notify.type) {
+       case ACPI_HEST_NOTIFY_POLLED:
+       case ACPI_HEST_NOTIFY_EXTERNAL:
+       case ACPI_HEST_NOTIFY_SCI:
+       case ACPI_HEST_NOTIFY_NMI:
+               break;
+       case ACPI_HEST_NOTIFY_LOCAL:
+               pr_warning(GHES_PFX "Generic hardware error source: %d notified via local interrupt is not supported!\n",
                           generic->header.source_id);
                goto err;
+       default:
+               pr_warning(FW_WARN GHES_PFX "Unknown notification type: %u for generic hardware error source: %d\n",
+                          generic->notify.type, generic->header.source_id);
+               goto err;
        }
-       if (generic->records_to_preallocate == 0) {
-               pr_warning(FW_BUG GHES_PFX
-"Invalid records to preallocate: %u for generic hardware error source: %d\n",
-                          generic->records_to_preallocate,
+
+       rc = -EIO;
+       if (generic->error_block_length <
+           sizeof(struct acpi_hest_generic_status)) {
+               pr_warning(FW_BUG GHES_PFX "Invalid error block length: %u for generic hardware error source: %d\n",
+                          generic->error_block_length,
                           generic->header.source_id);
                goto err;
        }
@@ -327,38 +544,43 @@ static int __devinit ghes_probe(struct platform_device *ghes_dev)
                ghes = NULL;
                goto err;
        }
-       if (generic->notify.type == ACPI_HEST_NOTIFY_SCI) {
+       switch (generic->notify.type) {
+       case ACPI_HEST_NOTIFY_POLLED:
+               ghes->timer.function = ghes_poll_func;
+               ghes->timer.data = (unsigned long)ghes;
+               init_timer_deferrable(&ghes->timer);
+               ghes_add_timer(ghes);
+               break;
+       case ACPI_HEST_NOTIFY_EXTERNAL:
+               /* External interrupt vector is GSI */
+               if (acpi_gsi_to_irq(generic->notify.vector, &ghes->irq)) {
+                       pr_err(GHES_PFX "Failed to map GSI to IRQ for generic hardware error source: %d\n",
+                              generic->header.source_id);
+                       goto err;
+               }
+               if (request_irq(ghes->irq, ghes_irq_func,
+                               0, "GHES IRQ", ghes)) {
+                       pr_err(GHES_PFX "Failed to register IRQ for generic hardware error source: %d\n",
+                              generic->header.source_id);
+                       goto err;
+               }
+               break;
+       case ACPI_HEST_NOTIFY_SCI:
                mutex_lock(&ghes_list_mutex);
                if (list_empty(&ghes_sci))
                        register_acpi_hed_notifier(&ghes_notifier_sci);
                list_add_rcu(&ghes->list, &ghes_sci);
                mutex_unlock(&ghes_list_mutex);
-       } else {
-               unsigned char *notify = NULL;
-
-               switch (generic->notify.type) {
-               case ACPI_HEST_NOTIFY_POLLED:
-                       notify = "POLL";
-                       break;
-               case ACPI_HEST_NOTIFY_EXTERNAL:
-               case ACPI_HEST_NOTIFY_LOCAL:
-                       notify = "IRQ";
-                       break;
-               case ACPI_HEST_NOTIFY_NMI:
-                       notify = "NMI";
-                       break;
-               }
-               if (notify) {
-                       pr_warning(GHES_PFX
-"Generic hardware error source: %d notified via %s is not supported!\n",
-                                  generic->header.source_id, notify);
-               } else {
-                       pr_warning(FW_WARN GHES_PFX
-"Unknown notification type: %u for generic hardware error source: %d\n",
-                       generic->notify.type, generic->header.source_id);
-               }
-               rc = -ENODEV;
-               goto err;
+               break;
+       case ACPI_HEST_NOTIFY_NMI:
+               mutex_lock(&ghes_list_mutex);
+               if (list_empty(&ghes_nmi))
+                       register_die_notifier(&ghes_notifier_nmi);
+               list_add_rcu(&ghes->list, &ghes_nmi);
+               mutex_unlock(&ghes_list_mutex);
+               break;
+       default:
+               BUG();
        }
        platform_set_drvdata(ghes_dev, ghes);
 
@@ -379,7 +601,14 @@ static int __devexit ghes_remove(struct platform_device *ghes_dev)
        ghes = platform_get_drvdata(ghes_dev);
        generic = ghes->generic;
 
+       ghes->flags |= GHES_EXITING;
        switch (generic->notify.type) {
+       case ACPI_HEST_NOTIFY_POLLED:
+               del_timer_sync(&ghes->timer);
+               break;
+       case ACPI_HEST_NOTIFY_EXTERNAL:
+               free_irq(ghes->irq, ghes);
+               break;
        case ACPI_HEST_NOTIFY_SCI:
                mutex_lock(&ghes_list_mutex);
                list_del_rcu(&ghes->list);
@@ -387,12 +616,23 @@ static int __devexit ghes_remove(struct platform_device *ghes_dev)
                        unregister_acpi_hed_notifier(&ghes_notifier_sci);
                mutex_unlock(&ghes_list_mutex);
                break;
+       case ACPI_HEST_NOTIFY_NMI:
+               mutex_lock(&ghes_list_mutex);
+               list_del_rcu(&ghes->list);
+               if (list_empty(&ghes_nmi))
+                       unregister_die_notifier(&ghes_notifier_nmi);
+               mutex_unlock(&ghes_list_mutex);
+               /*
+                * To synchronize with NMI handler, ghes can only be
+                * freed after NMI handler finishes.
+                */
+               synchronize_rcu();
+               break;
        default:
                BUG();
                break;
        }
 
-       synchronize_rcu();
        ghes_fini(ghes);
        kfree(ghes);
 
@@ -412,6 +652,8 @@ static struct platform_driver ghes_platform_driver = {
 
 static int __init ghes_init(void)
 {
+       int rc;
+
        if (acpi_disabled)
                return -ENODEV;
 
@@ -420,12 +662,25 @@ static int __init ghes_init(void)
                return -EINVAL;
        }
 
-       return platform_driver_register(&ghes_platform_driver);
+       rc = ghes_ioremap_init();
+       if (rc)
+               goto err;
+
+       rc = platform_driver_register(&ghes_platform_driver);
+       if (rc)
+               goto err_ioremap_exit;
+
+       return 0;
+err_ioremap_exit:
+       ghes_ioremap_exit();
+err:
+       return rc;
 }
 
 static void __exit ghes_exit(void)
 {
        platform_driver_unregister(&ghes_platform_driver);
+       ghes_ioremap_exit();
 }
 
 module_init(ghes_init);
index 95649d3..68bc227 100644 (file)
@@ -631,6 +631,17 @@ static int acpi_battery_update(struct acpi_battery *battery)
        return result;
 }
 
+static void acpi_battery_refresh(struct acpi_battery *battery)
+{
+       if (!battery->bat.dev)
+               return;
+
+       acpi_battery_get_info(battery);
+       /* The battery may have changed its reporting units. */
+       sysfs_remove_battery(battery);
+       sysfs_add_battery(battery);
+}
+
 /* --------------------------------------------------------------------------
                               FS Interface (/proc)
    -------------------------------------------------------------------------- */
@@ -868,6 +879,8 @@ static int acpi_battery_add_fs(struct acpi_device *device)
        struct proc_dir_entry *entry = NULL;
        int i;
 
+       printk(KERN_WARNING PREFIX "Deprecated procfs I/F for battery is loaded,"
+                       " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
        if (!acpi_device_dir(device)) {
                acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
                                                     acpi_battery_dir);
@@ -914,6 +927,8 @@ static void acpi_battery_notify(struct acpi_device *device, u32 event)
        if (!battery)
                return;
        old = battery->bat.dev;
+       if (event == ACPI_BATTERY_NOTIFY_INFO)
+               acpi_battery_refresh(battery);
        acpi_battery_update(battery);
        acpi_bus_generate_proc_event(device, event,
                                     acpi_battery_present(battery));
@@ -983,6 +998,7 @@ static int acpi_battery_resume(struct acpi_device *device)
        if (!device)
                return -EINVAL;
        battery = acpi_driver_data(device);
+       acpi_battery_refresh(battery);
        battery->update_time = 0;
        acpi_battery_update(battery);
        return 0;
index d68bd61..7ced61f 100644 (file)
@@ -52,22 +52,6 @@ EXPORT_SYMBOL(acpi_root_dir);
 
 #define STRUCT_TO_INT(s)       (*((int*)&s))
 
-static int set_power_nocheck(const struct dmi_system_id *id)
-{
-       printk(KERN_NOTICE PREFIX "%s detected - "
-               "disable power check in power transition\n", id->ident);
-       acpi_power_nocheck = 1;
-       return 0;
-}
-static struct dmi_system_id __cpuinitdata power_nocheck_dmi_table[] = {
-       {
-       set_power_nocheck, "HP Pavilion 05", {
-       DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
-       DMI_MATCH(DMI_SYS_VENDOR, "HP Pavilion 05"),
-       DMI_MATCH(DMI_PRODUCT_VERSION, "2001211RE101GLEND") }, NULL},
-       {},
-};
-
 
 #ifdef CONFIG_X86
 static int set_copy_dsdt(const struct dmi_system_id *id)
@@ -196,33 +180,24 @@ EXPORT_SYMBOL(acpi_bus_get_private_data);
                                  Power Management
    -------------------------------------------------------------------------- */
 
-int acpi_bus_get_power(acpi_handle handle, int *state)
+static int __acpi_bus_get_power(struct acpi_device *device, int *state)
 {
        int result = 0;
        acpi_status status = 0;
-       struct acpi_device *device = NULL;
        unsigned long long psc = 0;
 
-
-       result = acpi_bus_get_device(handle, &device);
-       if (result)
-               return result;
+       if (!device || !state)
+               return -EINVAL;
 
        *state = ACPI_STATE_UNKNOWN;
 
-       if (!device->flags.power_manageable) {
-               /* TBD: Non-recursive algorithm for walking up hierarchy */
-               if (device->parent)
-                       *state = device->parent->power.state;
-               else
-                       *state = ACPI_STATE_D0;
-       } else {
+       if (device->flags.power_manageable) {
                /*
                 * Get the device's power state either directly (via _PSC) or
                 * indirectly (via power resources).
                 */
                if (device->power.flags.power_resources) {
-                       result = acpi_power_get_inferred_state(device);
+                       result = acpi_power_get_inferred_state(device, state);
                        if (result)
                                return result;
                } else if (device->power.flags.explicit_get) {
@@ -230,59 +205,33 @@ int acpi_bus_get_power(acpi_handle handle, int *state)
                                                       NULL, &psc);
                        if (ACPI_FAILURE(status))
                                return -ENODEV;
-                       device->power.state = (int)psc;
+                       *state = (int)psc;
                }
-
-               *state = device->power.state;
+       } else {
+               /* TBD: Non-recursive algorithm for walking up hierarchy. */
+               *state = device->parent ?
+                       device->parent->power.state : ACPI_STATE_D0;
        }
 
        ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is D%d\n",
-                         device->pnp.bus_id, device->power.state));
+                         device->pnp.bus_id, *state));
 
        return 0;
 }
 
-EXPORT_SYMBOL(acpi_bus_get_power);
 
-int acpi_bus_set_power(acpi_handle handle, int state)
+static int __acpi_bus_set_power(struct acpi_device *device, int state)
 {
        int result = 0;
        acpi_status status = AE_OK;
-       struct acpi_device *device = NULL;
        char object_name[5] = { '_', 'P', 'S', '0' + state, '\0' };
 
-
-       result = acpi_bus_get_device(handle, &device);
-       if (result)
-               return result;
-
-       if ((state < ACPI_STATE_D0) || (state > ACPI_STATE_D3))
+       if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3))
                return -EINVAL;
 
        /* Make sure this is a valid target state */
 
-       if (!device->flags.power_manageable) {
-               ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device `[%s]' is not power manageable\n",
-                               kobject_name(&device->dev.kobj)));
-               return -ENODEV;
-       }
-       /*
-        * Get device's current power state
-        */
-       if (!acpi_power_nocheck) {
-               /*
-                * Maybe the incorrect power state is returned on the bogus
-                * bios, which is different with the real power state.
-                * For example: the bios returns D0 state and the real power
-                * state is D3. OS expects to set the device to D0 state. In
-                * such case if OS uses the power state returned by the BIOS,
-                * the device can't be transisted to the correct power state.
-                * So if the acpi_power_nocheck is set, it is unnecessary to
-                * get the power state by calling acpi_bus_get_power.
-                */
-               acpi_bus_get_power(device->handle, &device->power.state);
-       }
-       if ((state == device->power.state) && !device->flags.force_power_state) {
+       if (state == device->power.state) {
                ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at D%d\n",
                                  state));
                return 0;
@@ -351,8 +300,75 @@ int acpi_bus_set_power(acpi_handle handle, int state)
        return result;
 }
 
+
+int acpi_bus_set_power(acpi_handle handle, int state)
+{
+       struct acpi_device *device;
+       int result;
+
+       result = acpi_bus_get_device(handle, &device);
+       if (result)
+               return result;
+
+       if (!device->flags.power_manageable) {
+               ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+                               "Device [%s] is not power manageable\n",
+                               dev_name(&device->dev)));
+               return -ENODEV;
+       }
+
+       return __acpi_bus_set_power(device, state);
+}
 EXPORT_SYMBOL(acpi_bus_set_power);
 
+
+int acpi_bus_init_power(struct acpi_device *device)
+{
+       int state;
+       int result;
+
+       if (!device)
+               return -EINVAL;
+
+       device->power.state = ACPI_STATE_UNKNOWN;
+
+       result = __acpi_bus_get_power(device, &state);
+       if (result)
+               return result;
+
+       if (device->power.flags.power_resources)
+               result = acpi_power_on_resources(device, state);
+
+       if (!result)
+               device->power.state = state;
+
+       return result;
+}
+
+
+int acpi_bus_update_power(acpi_handle handle, int *state_p)
+{
+       struct acpi_device *device;
+       int state;
+       int result;
+
+       result = acpi_bus_get_device(handle, &device);
+       if (result)
+               return result;
+
+       result = __acpi_bus_get_power(device, &state);
+       if (result)
+               return result;
+
+       result = __acpi_bus_set_power(device, state);
+       if (!result && state_p)
+               *state_p = state;
+
+       return result;
+}
+EXPORT_SYMBOL_GPL(acpi_bus_update_power);
+
+
 bool acpi_bus_power_manageable(acpi_handle handle)
 {
        struct acpi_device *device;
@@ -1023,15 +1039,8 @@ static int __init acpi_init(void)
        if (acpi_disabled)
                return result;
 
-       /*
-        * If the laptop falls into the DMI check table, the power state check
-        * will be disabled in the course of device power transition.
-        */
-       dmi_check_system(power_nocheck_dmi_table);
-
        acpi_scan_init();
        acpi_ec_init();
-       acpi_power_init();
        acpi_debugfs_init();
        acpi_sleep_proc_init();
        acpi_wakeup_device_init();
index 71ef9cd..76bbb78 100644 (file)
@@ -279,6 +279,9 @@ static int acpi_lid_send_state(struct acpi_device *device)
        input_report_switch(button->input, SW_LID, !state);
        input_sync(button->input);
 
+       if (state)
+               pm_wakeup_event(&device->dev, 0);
+
        ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device);
        if (ret == NOTIFY_DONE)
                ret = blocking_notifier_call_chain(&acpi_lid_notifier, state,
@@ -314,6 +317,8 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
                        input_sync(input);
                        input_report_key(input, keycode, 0);
                        input_sync(input);
+
+                       pm_wakeup_event(&device->dev, 0);
                }
 
                acpi_bus_generate_proc_event(device, event, ++button->pushed);
@@ -426,7 +431,7 @@ static int acpi_button_add(struct acpi_device *device)
                acpi_enable_gpe(device->wakeup.gpe_device,
                                device->wakeup.gpe_number);
                device->wakeup.run_wake_count++;
-               device->wakeup.state.enabled = 1;
+               device_set_wakeup_enable(&device->dev, true);
        }
 
        printk(KERN_INFO PREFIX "%s [%s]\n", name, acpi_device_bid(device));
@@ -449,7 +454,7 @@ static int acpi_button_remove(struct acpi_device *device, int type)
                acpi_disable_gpe(device->wakeup.gpe_device,
                                device->wakeup.gpe_number);
                device->wakeup.run_wake_count--;
-               device->wakeup.state.enabled = 0;
+               device_set_wakeup_enable(&device->dev, false);
        }
 
        acpi_button_remove_fs(device);
index 81514a4..1864ad3 100644 (file)
@@ -725,7 +725,7 @@ static void dock_notify(acpi_handle handle, u32 event, void *data)
                        complete_dock(ds);
                        dock_event(ds, event, DOCK_EVENT);
                        dock_lock(ds, 1);
-                       acpi_update_gpes();
+                       acpi_update_all_gpes();
                        break;
                }
                if (dock_present(ds) || dock_in_progress(ds))
index 302b31e..fa848c4 100644 (file)
@@ -606,7 +606,8 @@ static int ec_check_sci(struct acpi_ec *ec, u8 state)
        return 0;
 }
 
-static u32 acpi_ec_gpe_handler(void *data)
+static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
+       u32 gpe_number, void *data)
 {
        struct acpi_ec *ec = data;
 
@@ -618,7 +619,7 @@ static u32 acpi_ec_gpe_handler(void *data)
                wake_up(&ec->wait);
                ec_check_sci(ec, acpi_ec_read_status(ec));
        }
-       return ACPI_INTERRUPT_HANDLED;
+       return ACPI_INTERRUPT_HANDLED | ACPI_REENABLE_GPE;
 }
 
 /* --------------------------------------------------------------------------
index 6004908..467479f 100644 (file)
@@ -86,7 +86,7 @@ static int fan_get_cur_state(struct thermal_cooling_device *cdev, unsigned long
        if (!device)
                return -EINVAL;
 
-       result = acpi_bus_get_power(device->handle, &acpi_state);
+       result = acpi_bus_update_power(device->handle, &acpi_state);
        if (result)
                return result;
 
@@ -123,7 +123,6 @@ static struct thermal_cooling_device_ops fan_cooling_ops = {
 static int acpi_fan_add(struct acpi_device *device)
 {
        int result = 0;
-       int state = 0;
        struct thermal_cooling_device *cdev;
 
        if (!device)
@@ -132,16 +131,12 @@ static int acpi_fan_add(struct acpi_device *device)
        strcpy(acpi_device_name(device), "Fan");
        strcpy(acpi_device_class(device), ACPI_FAN_CLASS);
 
-       result = acpi_bus_get_power(device->handle, &state);
+       result = acpi_bus_update_power(device->handle, NULL);
        if (result) {
-               printk(KERN_ERR PREFIX "Reading power state\n");
+               printk(KERN_ERR PREFIX "Setting initial power state\n");
                goto end;
        }
 
-       device->flags.force_power_state = 1;
-       acpi_bus_set_power(device->handle, state);
-       device->flags.force_power_state = 0;
-
        cdev = thermal_cooling_device_register("Fan", device,
                                                &fan_cooling_ops);
        if (IS_ERR(cdev)) {
@@ -200,22 +195,14 @@ static int acpi_fan_suspend(struct acpi_device *device, pm_message_t state)
 
 static int acpi_fan_resume(struct acpi_device *device)
 {
-       int result = 0;
-       int power_state = 0;
+       int result;
 
        if (!device)
                return -EINVAL;
 
-       result = acpi_bus_get_power(device->handle, &power_state);
-       if (result) {
-               printk(KERN_ERR PREFIX
-                                 "Error reading fan power state\n");
-               return result;
-       }
-
-       device->flags.force_power_state = 1;
-       acpi_bus_set_power(device->handle, power_state);
-       device->flags.force_power_state = 0;
+       result = acpi_bus_update_power(device->handle, NULL);
+       if (result)
+               printk(KERN_ERR PREFIX "Error updating fan power state\n");
 
        return result;
 }
index 78b0164..7c47ed5 100644 (file)
@@ -167,11 +167,8 @@ static int acpi_bind_one(struct device *dev, acpi_handle handle)
                                "firmware_node");
                ret = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj,
                                "physical_node");
-               if (acpi_dev->wakeup.flags.valid) {
+               if (acpi_dev->wakeup.flags.valid)
                        device_set_wakeup_capable(dev, true);
-                       device_set_wakeup_enable(dev,
-                                               acpi_dev->wakeup.state.enabled);
-               }
        }
 
        return 0;
index a212bfe..b1cc81a 100644 (file)
@@ -41,9 +41,10 @@ static inline int acpi_debugfs_init(void) { return 0; }
 int acpi_power_init(void);
 int acpi_device_sleep_wake(struct acpi_device *dev,
                            int enable, int sleep_state, int dev_state);
-int acpi_power_get_inferred_state(struct acpi_device *device);
+int acpi_power_get_inferred_state(struct acpi_device *device, int *state);
+int acpi_power_on_resources(struct acpi_device *device, int state);
 int acpi_power_transition(struct acpi_device *device, int state);
-extern int acpi_power_nocheck;
+int acpi_bus_init_power(struct acpi_device *device);
 
 int acpi_wakeup_device_init(void);
 void acpi_early_processor_set_pdc(void);
@@ -82,8 +83,16 @@ extern int acpi_sleep_init(void);
 
 #ifdef CONFIG_ACPI_SLEEP
 int acpi_sleep_proc_init(void);
+int suspend_nvs_alloc(void);
+void suspend_nvs_free(void);
+int suspend_nvs_save(void);
+void suspend_nvs_restore(void);
 #else
 static inline int acpi_sleep_proc_init(void) { return 0; }
+static inline int suspend_nvs_alloc(void) { return 0; }
+static inline void suspend_nvs_free(void) {}
+static inline int suspend_nvs_save(void) { return 0; }
+static inline void suspend_nvs_restore(void) {}
 #endif
 
 #endif /* _ACPI_INTERNAL_H_ */
similarity index 86%
rename from kernel/power/nvs.c
rename to drivers/acpi/nvs.c
index 1836db6..54b6ab8 100644 (file)
@@ -1,7 +1,7 @@
 /*
- * linux/kernel/power/hibernate_nvs.c - Routines for handling NVS memory
+ * nvs.c - Routines for saving and restoring ACPI NVS memory region
  *
- * Copyright (C) 2008,2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
+ * Copyright (C) 2008-2011 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
  *
  * This file is released under the GPLv2.
  */
@@ -11,7 +11,8 @@
 #include <linux/list.h>
 #include <linux/mm.h>
 #include <linux/slab.h>
-#include <linux/suspend.h>
+#include <linux/acpi.h>
+#include <acpi/acpiosxf.h>
 
 /*
  * Platforms, like ACPI, may want us to save some memory used by them during
@@ -79,7 +80,7 @@ void suspend_nvs_free(void)
                        free_page((unsigned long)entry->data);
                        entry->data = NULL;
                        if (entry->kaddr) {
-                               iounmap(entry->kaddr);
+                               acpi_os_unmap_memory(entry->kaddr, entry->size);
                                entry->kaddr = NULL;
                        }
                }
@@ -105,7 +106,7 @@ int suspend_nvs_alloc(void)
 /**
  *     suspend_nvs_save - save NVS memory regions
  */
-void suspend_nvs_save(void)
+int suspend_nvs_save(void)
 {
        struct nvs_page *entry;
 
@@ -113,9 +114,16 @@ void suspend_nvs_save(void)
 
        list_for_each_entry(entry, &nvs_list, node)
                if (entry->data) {
-                       entry->kaddr = ioremap(entry->phys_start, entry->size);
+                       entry->kaddr = acpi_os_map_memory(entry->phys_start,
+                                                         entry->size);
+                       if (!entry->kaddr) {
+                               suspend_nvs_free();
+                               return -ENOMEM;
+                       }
                        memcpy(entry->data, entry->kaddr, entry->size);
                }
+
+       return 0;
 }
 
 /**
index 055d7b7..e2dd6de 100644 (file)
@@ -320,7 +320,7 @@ acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
 
        pg_off = round_down(phys, PAGE_SIZE);
        pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
-       virt = ioremap(pg_off, pg_sz);
+       virt = ioremap_cache(pg_off, pg_sz);
        if (!virt) {
                kfree(map);
                return NULL;
@@ -642,7 +642,7 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
        virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
        rcu_read_unlock();
        if (!virt_addr) {
-               virt_addr = ioremap(phys_addr, size);
+               virt_addr = ioremap_cache(phys_addr, size);
                unmap = 1;
        }
        if (!value)
@@ -678,7 +678,7 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
        virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
        rcu_read_unlock();
        if (!virt_addr) {
-               virt_addr = ioremap(phys_addr, size);
+               virt_addr = ioremap_cache(phys_addr, size);
                unmap = 1;
        }
 
@@ -1233,8 +1233,7 @@ __setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
 int acpi_check_resource_conflict(const struct resource *res)
 {
        struct acpi_res_list *res_list_elem;
-       int ioport;
-       int clash = 0;
+       int ioport = 0, clash = 0;
 
        if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
                return 0;
@@ -1264,9 +1263,13 @@ int acpi_check_resource_conflict(const struct resource *res)
        if (clash) {
                if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
                        printk(KERN_WARNING "ACPI: resource %s %pR"
-                              " conflicts with ACPI region %s %pR\n",
+                              " conflicts with ACPI region %s "
+                              "[%s 0x%zx-0x%zx]\n",
                               res->name, res, res_list_elem->name,
-                              res_list_elem);
+                              (res_list_elem->resource_type ==
+                               ACPI_ADR_SPACE_SYSTEM_IO) ? "io" : "mem",
+                              (size_t) res_list_elem->start,
+                              (size_t) res_list_elem->end);
                        if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
                                printk(KERN_NOTICE "ACPI: This conflict may"
                                       " cause random problems and system"
index 4c9c2fb..9ac2a9f 100644 (file)
@@ -56,9 +56,6 @@ ACPI_MODULE_NAME("power");
 #define ACPI_POWER_RESOURCE_STATE_ON   0x01
 #define ACPI_POWER_RESOURCE_STATE_UNKNOWN 0xFF
 
-int acpi_power_nocheck;
-module_param_named(power_nocheck, acpi_power_nocheck, bool, 000);
-
 static int acpi_power_add(struct acpi_device *device);
 static int acpi_power_remove(struct acpi_device *device, int type);
 static int acpi_power_resume(struct acpi_device *device);
@@ -148,9 +145,8 @@ static int acpi_power_get_state(acpi_handle handle, int *state)
 
 static int acpi_power_get_list_state(struct acpi_handle_list *list, int *state)
 {
-       int result = 0, state1;
-       u32 i = 0;
-
+       int cur_state;
+       int i = 0;
 
        if (!list || !state)
                return -EINVAL;
@@ -158,25 +154,33 @@ static int acpi_power_get_list_state(struct acpi_handle_list *list, int *state)
        /* The state of the list is 'on' IFF all resources are 'on'. */
 
        for (i = 0; i < list->count; i++) {
-               /*
-                * The state of the power resource can be obtained by
-                * using the ACPI handle. In such case it is unnecessary to
-                * get the Power resource first and then get its state again.
-                */
-               result = acpi_power_get_state(list->handles[i], &state1);
+               struct acpi_power_resource *resource;
+               acpi_handle handle = list->handles[i];
+               int result;
+
+               result = acpi_power_get_context(handle, &resource);
                if (result)
                        return result;
 
-               *state = state1;
+               mutex_lock(&resource->resource_lock);
 
-               if (*state != ACPI_POWER_RESOURCE_STATE_ON)
+               result = acpi_power_get_state(handle, &cur_state);
+
+               mutex_unlock(&resource->resource_lock);
+
+               if (result)
+                       return result;
+
+               if (cur_state != ACPI_POWER_RESOURCE_STATE_ON)
                        break;
        }
 
        ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource list is %s\n",
-                         *state ? "on" : "off"));
+                         cur_state ? "on" : "off"));
 
-       return result;
+       *state = cur_state;
+
+       return 0;
 }
 
 static int __acpi_power_on(struct acpi_power_resource *resource)
@@ -222,7 +226,7 @@ static int acpi_power_on(acpi_handle handle)
        return result;
 }
 
-static int acpi_power_off_device(acpi_handle handle)
+static int acpi_power_off(acpi_handle handle)
 {
        int result = 0;
        acpi_status status = AE_OK;
@@ -266,6 +270,35 @@ static int acpi_power_off_device(acpi_handle handle)
        return result;
 }
 
+static void __acpi_power_off_list(struct acpi_handle_list *list, int num_res)
+{
+       int i;
+
+       for (i = num_res - 1; i >= 0 ; i--)
+               acpi_power_off(list->handles[i]);
+}
+
+static void acpi_power_off_list(struct acpi_handle_list *list)
+{
+       __acpi_power_off_list(list, list->count);
+}
+
+static int acpi_power_on_list(struct acpi_handle_list *list)
+{
+       int result = 0;
+       int i;
+
+       for (i = 0; i < list->count; i++) {
+               result = acpi_power_on(list->handles[i]);
+               if (result) {
+                       __acpi_power_off_list(list, i);
+                       break;
+               }
+       }
+
+       return result;
+}
+
 /**
  * acpi_device_sleep_wake - execute _DSW (Device Sleep Wake) or (deprecated in
  *                          ACPI 3.0) _PSW (Power State Wake)
@@ -404,8 +437,7 @@ int acpi_disable_wakeup_device_power(struct acpi_device *dev)
 
        /* Close power resource */
        for (i = 0; i < dev->wakeup.resources.count; i++) {
-               int ret = acpi_power_off_device(
-                               dev->wakeup.resources.handles[i]);
+               int ret = acpi_power_off(dev->wakeup.resources.handles[i]);
                if (ret) {
                        printk(KERN_ERR PREFIX "Transition power state\n");
                        dev->wakeup.flags.valid = 0;
@@ -423,19 +455,16 @@ int acpi_disable_wakeup_device_power(struct acpi_device *dev)
                              Device Power Management
    -------------------------------------------------------------------------- */
 
-int acpi_power_get_inferred_state(struct acpi_device *device)
+int acpi_power_get_inferred_state(struct acpi_device *device, int *state)
 {
        int result = 0;
        struct acpi_handle_list *list = NULL;
        int list_state = 0;
        int i = 0;
 
-
-       if (!device)
+       if (!device || !state)
                return -EINVAL;
 
-       device->power.state = ACPI_STATE_UNKNOWN;
-
        /*
         * We know a device's inferred power state when all the resources
         * required for a given D-state are 'on'.
@@ -450,22 +479,26 @@ int acpi_power_get_inferred_state(struct acpi_device *device)
                        return result;
 
                if (list_state == ACPI_POWER_RESOURCE_STATE_ON) {
-                       device->power.state = i;
+                       *state = i;
                        return 0;
                }
        }
 
-       device->power.state = ACPI_STATE_D3;
-
+       *state = ACPI_STATE_D3;
        return 0;
 }
 
+int acpi_power_on_resources(struct acpi_device *device, int state)
+{
+       if (!device || state < ACPI_STATE_D0 || state > ACPI_STATE_D3)
+               return -EINVAL;
+
+       return acpi_power_on_list(&device->power.states[state].resources);
+}
+
 int acpi_power_transition(struct acpi_device *device, int state)
 {
-       int result = 0;
-       struct acpi_handle_list *cl = NULL;     /* Current Resources */
-       struct acpi_handle_list *tl = NULL;     /* Target Resources */
-       int i = 0;
+       int result;
 
        if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3))
                return -EINVAL;
@@ -477,37 +510,20 @@ int acpi_power_transition(struct acpi_device *device, int state)
            || (device->power.state > ACPI_STATE_D3))
                return -ENODEV;
 
-       cl = &device->power.states[device->power.state].resources;
-       tl = &device->power.states[state].resources;
-
        /* TBD: Resources must be ordered. */
 
        /*
         * First we reference all power resources required in the target list
-        * (e.g. so the device doesn't lose power while transitioning).
+        * (e.g. so the device doesn't lose power while transitioning).  Then,
+        * we dereference all power resources used in the current list.
         */
-       for (i = 0; i < tl->count; i++) {
-               result = acpi_power_on(tl->handles[i]);
-               if (result)
-                       goto end;
-       }
+       result = acpi_power_on_list(&device->power.states[state].resources);
+       if (!result)
+               acpi_power_off_list(
+                       &device->power.states[device->power.state].resources);
 
-       /*
-        * Then we dereference all power resources used in the current list.
-        */
-       for (i = 0; i < cl->count; i++) {
-               result = acpi_power_off_device(cl->handles[i]);
-               if (result)
-                       goto end;
-       }
-
-     end:
-       if (result)
-               device->power.state = ACPI_STATE_UNKNOWN;
-       else {
-       /* We shouldn't change the state till all above operations succeed */
-               device->power.state = state;
-       }
+       /* We shouldn't change the state unless the above operations succeed. */
+       device->power.state = result ? ACPI_STATE_UNKNOWN : state;
 
        return result;
 }
index afad677..f5f9869 100644 (file)
@@ -311,7 +311,9 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
                           dev->pnp.bus_id,
                           (u32) dev->wakeup.sleep_state,
                           dev->wakeup.flags.run_wake ? '*' : ' ',
-                          dev->wakeup.state.enabled ? "enabled" : "disabled");
+                          (device_may_wakeup(&dev->dev)
+                            || (ldev && device_may_wakeup(ldev))) ?
+                              "enabled" : "disabled");
                if (ldev)
                        seq_printf(seq, "%s:%s",
                                   ldev->bus ? ldev->bus->name : "no-bus",
@@ -328,8 +330,10 @@ static void physical_device_enable_wakeup(struct acpi_device *adev)
 {
        struct device *dev = acpi_get_physical_device(adev->handle);
 
-       if (dev && device_can_wakeup(dev))
-               device_set_wakeup_enable(dev, adev->wakeup.state.enabled);
+       if (dev && device_can_wakeup(dev)) {
+               bool enable = !device_may_wakeup(dev);
+               device_set_wakeup_enable(dev, enable);
+       }
 }
 
 static ssize_t
@@ -341,7 +345,6 @@ acpi_system_write_wakeup_device(struct file *file,
        char strbuf[5];
        char str[5] = "";
        unsigned int len = count;
-       struct acpi_device *found_dev = NULL;
 
        if (len > 4)
                len = 4;
@@ -361,33 +364,13 @@ acpi_system_write_wakeup_device(struct file *file,
                        continue;
 
                if (!strncmp(dev->pnp.bus_id, str, 4)) {
-                       dev->wakeup.state.enabled =
-                           dev->wakeup.state.enabled ? 0 : 1;
-                       found_dev = dev;
-                       break;
-               }
-       }
-       if (found_dev) {
-               physical_device_enable_wakeup(found_dev);
-               list_for_each_safe(node, next, &acpi_wakeup_device_list) {
-                       struct acpi_device *dev = container_of(node,
-                                                              struct
-                                                              acpi_device,
-                                                              wakeup_list);
-
-                       if ((dev != found_dev) &&
-                           (dev->wakeup.gpe_number ==
-                            found_dev->wakeup.gpe_number)
-                           && (dev->wakeup.gpe_device ==
-                               found_dev->wakeup.gpe_device)) {
-                               printk(KERN_WARNING
-                                      "ACPI: '%s' and '%s' have the same GPE, "
-                                      "can't disable/enable one separately\n",
-                                      dev->pnp.bus_id, found_dev->pnp.bus_id);
-                               dev->wakeup.state.enabled =
-                                   found_dev->wakeup.state.enabled;
+                       if (device_can_wakeup(&dev->dev)) {
+                               bool enable = !device_may_wakeup(&dev->dev);
+                               device_set_wakeup_enable(&dev->dev, enable);
+                       } else {
                                physical_device_enable_wakeup(dev);
                        }
+                       break;
                }
        }
        mutex_unlock(&acpi_device_lock);
index 85e4804..360a74e 100644 (file)
 #include <linux/pm.h>
 #include <linux/cpufreq.h>
 #include <linux/cpu.h>
-#ifdef CONFIG_ACPI_PROCFS
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#endif
 #include <linux/dmi.h>
 #include <linux/moduleparam.h>
 #include <linux/cpuidle.h>
@@ -246,53 +242,6 @@ static int acpi_processor_errata(struct acpi_processor *pr)
        return result;
 }
 
-#ifdef CONFIG_ACPI_PROCFS
-static struct proc_dir_entry *acpi_processor_dir = NULL;
-
-static int __cpuinit acpi_processor_add_fs(struct acpi_device *device)
-{
-       struct proc_dir_entry *entry = NULL;
-
-
-       if (!acpi_device_dir(device)) {
-               acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
-                                                    acpi_processor_dir);
-               if (!acpi_device_dir(device))
-                       return -ENODEV;
-       }
-
-       /* 'throttling' [R/W] */
-       entry = proc_create_data(ACPI_PROCESSOR_FILE_THROTTLING,
-                                S_IFREG | S_IRUGO | S_IWUSR,
-                                acpi_device_dir(device),
-                                &acpi_processor_throttling_fops,
-                                acpi_driver_data(device));
-       if (!entry)
-               return -EIO;
-       return 0;
-}
-static int acpi_processor_remove_fs(struct acpi_device *device)
-{
-
-       if (acpi_device_dir(device)) {
-               remove_proc_entry(ACPI_PROCESSOR_FILE_THROTTLING,
-                                 acpi_device_dir(device));
-               remove_proc_entry(acpi_device_bid(device), acpi_processor_dir);
-               acpi_device_dir(device) = NULL;
-       }
-
-       return 0;
-}
-#else
-static inline int acpi_processor_add_fs(struct acpi_device *device)
-{
-       return 0;
-}
-static inline int acpi_processor_remove_fs(struct acpi_device *device)
-{
-       return 0;
-}
-#endif
 /* --------------------------------------------------------------------------
                                  Driver Interface
    -------------------------------------------------------------------------- */
@@ -478,8 +427,13 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
        if (action == CPU_ONLINE && pr) {
                acpi_processor_ppc_has_changed(pr, 0);
                acpi_processor_cst_has_changed(pr);
+               acpi_processor_reevaluate_tstate(pr, action);
                acpi_processor_tstate_has_changed(pr);
        }
+       if (action == CPU_DEAD && pr) {
+               /* invalidate the flag.throttling after one CPU is offline */
+               acpi_processor_reevaluate_tstate(pr, action);
+       }
        return NOTIFY_OK;
 }
 
@@ -537,14 +491,10 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
 
        per_cpu(processors, pr->id) = pr;
 
-       result = acpi_processor_add_fs(device);
-       if (result)
-               goto err_free_cpumask;
-
        sysdev = get_cpu_sysdev(pr->id);
        if (sysfs_create_link(&device->dev.kobj, &sysdev->kobj, "sysdev")) {
                result = -EFAULT;
-               goto err_remove_fs;
+               goto err_free_cpumask;
        }
 
 #ifdef CONFIG_CPU_FREQ
@@ -590,8 +540,6 @@ err_thermal_unregister:
        thermal_cooling_device_unregister(pr->cdev);
 err_power_exit:
        acpi_processor_power_exit(pr, device);
-err_remove_fs:
-       acpi_processor_remove_fs(device);
 err_free_cpumask:
        free_cpumask_var(pr->throttling.shared_cpu_map);
 
@@ -620,8 +568,6 @@ static int acpi_processor_remove(struct acpi_device *device, int type)
 
        sysfs_remove_link(&device->dev.kobj, "sysdev");
 
-       acpi_processor_remove_fs(device);
-
        if (pr->cdev) {
                sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
                sysfs_remove_link(&pr->cdev->device.kobj, "device");
@@ -854,12 +800,6 @@ static int __init acpi_processor_init(void)
 
        memset(&errata, 0, sizeof(errata));
 
-#ifdef CONFIG_ACPI_PROCFS
-       acpi_processor_dir = proc_mkdir(ACPI_PROCESSOR_CLASS, acpi_root_dir);
-       if (!acpi_processor_dir)
-               return -ENOMEM;
-#endif
-
        if (!cpuidle_register_driver(&acpi_idle_driver)) {
                printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
                        acpi_idle_driver.name);
@@ -885,10 +825,6 @@ static int __init acpi_processor_init(void)
 out_cpuidle:
        cpuidle_unregister_driver(&acpi_idle_driver);
 
-#ifdef CONFIG_ACPI_PROCFS
-       remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
-#endif
-
        return result;
 }
 
@@ -907,10 +843,6 @@ static void __exit acpi_processor_exit(void)
 
        cpuidle_unregister_driver(&acpi_idle_driver);
 
-#ifdef CONFIG_ACPI_PROCFS
-       remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
-#endif
-
        return;
 }
 
index ff36327..fa84e97 100644 (file)
 #include <linux/init.h>
 #include <linux/sched.h>
 #include <linux/cpufreq.h>
-#ifdef CONFIG_ACPI_PROCFS
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#endif
 
 #include <asm/io.h>
 #include <asm/uaccess.h>
@@ -369,6 +365,58 @@ int acpi_processor_tstate_has_changed(struct acpi_processor *pr)
        return acpi_processor_set_throttling(pr, target_state, false);
 }
 
+/*
+ * This function is used to reevaluate whether the T-state is valid
+ * after one CPU is onlined/offlined.
+ * It is noted that it won't reevaluate the following properties for
+ * the T-state.
+ *     1. Control method.
+ *     2. the number of supported T-state
+ *     3. TSD domain
+ */
+void acpi_processor_reevaluate_tstate(struct acpi_processor *pr,
+                                       unsigned long action)
+{
+       int result = 0;
+
+       if (action == CPU_DEAD) {
+               /* When one CPU is offline, the T-state throttling
+                * will be invalidated.
+                */
+               pr->flags.throttling = 0;
+               return;
+       }
+       /* the following is to recheck whether the T-state is valid for
+        * the online CPU
+        */
+       if (!pr->throttling.state_count) {
+               /* If the number of T-state is invalid, it is
+                * invalidated.
+                */
+               pr->flags.throttling = 0;
+               return;
+       }
+       pr->flags.throttling = 1;
+
+       /* Disable throttling (if enabled).  We'll let subsequent
+        * policy (e.g.thermal) decide to lower performance if it
+        * so chooses, but for now we'll crank up the speed.
+        */
+
+       result = acpi_processor_get_throttling(pr);
+       if (result)
+               goto end;
+
+       if (pr->throttling.state) {
+               result = acpi_processor_set_throttling(pr, 0, false);
+               if (result)
+                       goto end;
+       }
+
+end:
+       if (result)
+               pr->flags.throttling = 0;
+}
 /*
  * _PTC - Processor Throttling Control (and status) register location
  */
@@ -876,7 +924,11 @@ static int acpi_processor_get_throttling(struct acpi_processor *pr)
         */
        cpumask_copy(saved_mask, &current->cpus_allowed);
        /* FIXME: use work_on_cpu() */
-       set_cpus_allowed_ptr(current, cpumask_of(pr->id));
+       if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) {
+               /* Can't migrate to the target pr->id CPU. Exit */
+               free_cpumask_var(saved_mask);
+               return -ENODEV;
+       }
        ret = pr->throttling.acpi_processor_get_throttling(pr);
        /* restore the previous state */
        set_cpus_allowed_ptr(current, saved_mask);
@@ -1051,6 +1103,14 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
                return -ENOMEM;
        }
 
+       if (cpu_is_offline(pr->id)) {
+               /*
+                * the cpu pointed by pr->id is offline. Unnecessary to change
+                * the throttling state any more.
+                */
+               return -ENODEV;
+       }
+
        cpumask_copy(saved_mask, &current->cpus_allowed);
        t_state.target_state = state;
        p_throttling = &(pr->throttling);
@@ -1074,7 +1134,11 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
         */
        if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
                /* FIXME: use work_on_cpu() */
-               set_cpus_allowed_ptr(current, cpumask_of(pr->id));
+               if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) {
+                       /* Can't migrate to the pr->id CPU. Exit */
+                       ret = -ENODEV;
+                       goto exit;
+               }
                ret = p_throttling->acpi_processor_set_throttling(pr,
                                                t_state.target_state, force);
        } else {
@@ -1106,7 +1170,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
                        }
                        t_state.cpu = i;
                        /* FIXME: use work_on_cpu() */
-                       set_cpus_allowed_ptr(current, cpumask_of(i));
+                       if (set_cpus_allowed_ptr(current, cpumask_of(i)))
+                               continue;
                        ret = match_pr->throttling.
                                acpi_processor_set_throttling(
                                match_pr, t_state.target_state, force);
@@ -1126,6 +1191,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
        /* restore the previous state */
        /* FIXME: use work_on_cpu() */
        set_cpus_allowed_ptr(current, saved_mask);
+exit:
        free_cpumask_var(online_throttling_cpus);
        free_cpumask_var(saved_mask);
        return ret;
@@ -1216,113 +1282,3 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr)
        return result;
 }
 
-#ifdef CONFIG_ACPI_PROCFS
-/* proc interface */
-static int acpi_processor_throttling_seq_show(struct seq_file *seq,
-                                             void *offset)
-{
-       struct acpi_processor *pr = seq->private;
-       int i = 0;
-       int result = 0;
-
-       if (!pr)
-               goto end;
-
-       if (!(pr->throttling.state_count > 0)) {
-               seq_puts(seq, "<not supported>\n");
-               goto end;
-       }
-
-       result = acpi_processor_get_throttling(pr);
-
-       if (result) {
-               seq_puts(seq,
-                        "Could not determine current throttling state.\n");
-               goto end;
-       }
-
-       seq_printf(seq, "state count:             %d\n"
-                  "active state:            T%d\n"
-                  "state available: T%d to T%d\n",
-                  pr->throttling.state_count, pr->throttling.state,
-                  pr->throttling_platform_limit,
-                  pr->throttling.state_count - 1);
-
-       seq_puts(seq, "states:\n");
-       if (pr->throttling.acpi_processor_get_throttling ==
-                       acpi_processor_get_throttling_fadt) {
-               for (i = 0; i < pr->throttling.state_count; i++)
-                       seq_printf(seq, "   %cT%d:                  %02d%%\n",
-                                  (i == pr->throttling.state ? '*' : ' '), i,
-                                  (pr->throttling.states[i].performance ? pr->
-                                   throttling.states[i].performance / 10 : 0));
-       } else {
-               for (i = 0; i < pr->throttling.state_count; i++)
-                       seq_printf(seq, "   %cT%d:                  %02d%%\n",
-                                  (i == pr->throttling.state ? '*' : ' '), i,
-                                  (int)pr->throttling.states_tss[i].
-                                  freqpercentage);
-       }
-
-      end:
-       return 0;
-}
-
-static int acpi_processor_throttling_open_fs(struct inode *inode,
-                                            struct file *file)
-{
-       return single_open(file, acpi_processor_throttling_seq_show,
-                          PDE(inode)->data);
-}
-
-static ssize_t acpi_processor_write_throttling(struct file *file,
-                                              const char __user * buffer,
-                                              size_t count, loff_t * data)
-{
-       int result = 0;
-       struct seq_file *m = file->private_data;
-       struct acpi_processor *pr = m->private;
-       char state_string[5] = "";
-       char *charp = NULL;
-       size_t state_val = 0;
-       char tmpbuf[5] = "";
-
-       if (!pr || (count > sizeof(state_string) - 1))
-               return -EINVAL;
-
-       if (copy_from_user(state_string, buffer, count))
-               return -EFAULT;
-
-       state_string[count] = '\0';
-       if ((count > 0) && (state_string[count-1] == '\n'))
-               state_string[count-1] = '\0';
-
-       charp = state_string;
-       if ((state_string[0] == 't') || (state_string[0] == 'T'))
-               charp++;
-
-       state_val = simple_strtoul(charp, NULL, 0);
-       if (state_val >= pr->throttling.state_count)
-               return -EINVAL;
-
-       snprintf(tmpbuf, 5, "%zu", state_val);
-
-       if (strcmp(tmpbuf, charp) != 0)
-               return -EINVAL;
-
-       result = acpi_processor_set_throttling(pr, state_val, false);
-       if (result)
-               return result;
-
-       return count;
-}
-
-const struct file_operations acpi_processor_throttling_fops = {
-       .owner = THIS_MODULE,
-       .open = acpi_processor_throttling_open_fs,
-       .read = seq_read,
-       .write = acpi_processor_write_throttling,
-       .llseek = seq_lseek,
-       .release = single_release,
-};
-#endif
index e5dbedb..51ae379 100644 (file)
@@ -484,6 +484,8 @@ acpi_sbs_add_fs(struct proc_dir_entry **dir,
                const struct file_operations *state_fops,
                const struct file_operations *alarm_fops, void *data)
 {
+       printk(KERN_WARNING PREFIX "Deprecated procfs I/F for SBS is loaded,"
+                       " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
        if (!*dir) {
                *dir = proc_mkdir(dir_name, parent_dir);
                if (!*dir) {
index 29ef505..b99e624 100644 (file)
@@ -778,7 +778,7 @@ acpi_bus_extract_wakeup_device_power_package(acpi_handle handle,
                wakeup->resources.handles[i] = element->reference.handle;
        }
 
-       acpi_gpe_can_wake(wakeup->gpe_device, wakeup->gpe_number);
+       acpi_setup_gpe_for_wake(handle, wakeup->gpe_device, wakeup->gpe_number);
 
  out:
        kfree(buffer.pointer);
@@ -803,7 +803,7 @@ static void acpi_bus_set_run_wake_flags(struct acpi_device *device)
        /* Power button, Lid switch always enable wakeup */
        if (!acpi_match_device_ids(device, button_device_ids)) {
                device->wakeup.flags.run_wake = 1;
-               device->wakeup.flags.always_enabled = 1;
+               device_set_wakeup_capable(&device->dev, true);
                return;
        }
 
@@ -815,16 +815,22 @@ static void acpi_bus_set_run_wake_flags(struct acpi_device *device)
                                !!(event_status & ACPI_EVENT_FLAG_HANDLE);
 }
 
-static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
+static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
 {
+       acpi_handle temp;
        acpi_status status = 0;
        int psw_error;
 
+       /* Presence of _PRW indicates wake capable */
+       status = acpi_get_handle(device->handle, "_PRW", &temp);
+       if (ACPI_FAILURE(status))
+               return;
+
        status = acpi_bus_extract_wakeup_device_power_package(device->handle,
                                                              &device->wakeup);
        if (ACPI_FAILURE(status)) {
                ACPI_EXCEPTION((AE_INFO, status, "Extracting _PRW package"));
-               goto end;
+               return;
        }
 
        device->wakeup.flags.valid = 1;
@@ -840,13 +846,10 @@ static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
        if (psw_error)
                ACPI_DEBUG_PRINT((ACPI_DB_INFO,
                                "error in _DSW or _PSW evaluation\n"));
-
-end:
-       if (ACPI_FAILURE(status))
-               device->flags.wake_capable = 0;
-       return 0;
 }
 
+static void acpi_bus_add_power_resource(acpi_handle handle);
+
 static int acpi_bus_get_power_flags(struct acpi_device *device)
 {
        acpi_status status = 0;
@@ -875,8 +878,12 @@ static int acpi_bus_get_power_flags(struct acpi_device *device)
                acpi_evaluate_reference(device->handle, object_name, NULL,
                                        &ps->resources);
                if (ps->resources.count) {
+                       int j;
+
                        device->power.flags.power_resources = 1;
                        ps->flags.valid = 1;
+                       for (j = 0; j < ps->resources.count; j++)
+                               acpi_bus_add_power_resource(ps->resources.handles[j]);
                }
 
                /* Evaluate "_PSx" to see if we can do explicit sets */
@@ -901,10 +908,7 @@ static int acpi_bus_get_power_flags(struct acpi_device *device)
        device->power.states[ACPI_STATE_D3].flags.valid = 1;
        device->power.states[ACPI_STATE_D3].power = 0;
 
-       /* TBD: System wake support and resource requirements. */
-
-       device->power.state = ACPI_STATE_UNKNOWN;
-       acpi_bus_get_power(device->handle, &(device->power.state));
+       acpi_bus_init_power(device);
 
        return 0;
 }
@@ -947,11 +951,6 @@ static int acpi_bus_get_flags(struct acpi_device *device)
        if (ACPI_SUCCESS(status))
                device->flags.power_manageable = 1;
 
-       /* Presence of _PRW indicates wake capable */
-       status = acpi_get_handle(device->handle, "_PRW", &temp);
-       if (ACPI_SUCCESS(status))
-               device->flags.wake_capable = 1;
-
        /* TBD: Performance management */
 
        return 0;
@@ -1278,11 +1277,7 @@ static int acpi_add_single_object(struct acpi_device **child,
         * Wakeup device management
         *-----------------------
         */
-       if (device->flags.wake_capable) {
-               result = acpi_bus_get_wakeup_device_flags(device);
-               if (result)
-                       goto end;
-       }
+       acpi_bus_get_wakeup_device_flags(device);
 
        /*
         * Performance Management
@@ -1326,6 +1321,20 @@ end:
 #define ACPI_STA_DEFAULT (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_ENABLED | \
                          ACPI_STA_DEVICE_UI      | ACPI_STA_DEVICE_FUNCTIONING)
 
+static void acpi_bus_add_power_resource(acpi_handle handle)
+{
+       struct acpi_bus_ops ops = {
+               .acpi_op_add = 1,
+               .acpi_op_start = 1,
+       };
+       struct acpi_device *device = NULL;
+
+       acpi_bus_get_device(handle, &device);
+       if (!device)
+               acpi_add_single_object(&device, handle, ACPI_BUS_TYPE_POWER,
+                                       ACPI_STA_DEFAULT, &ops);
+}
+
 static int acpi_bus_type_and_status(acpi_handle handle, int *type,
                                    unsigned long long *sta)
 {
@@ -1371,7 +1380,6 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl,
        struct acpi_bus_ops *ops = context;
        int type;
        unsigned long long sta;
-       struct acpi_device_wakeup wakeup;
        struct acpi_device *device;
        acpi_status status;
        int result;
@@ -1382,7 +1390,13 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl,
 
        if (!(sta & ACPI_STA_DEVICE_PRESENT) &&
            !(sta & ACPI_STA_DEVICE_FUNCTIONING)) {
-               acpi_bus_extract_wakeup_device_power_package(handle, &wakeup);
+               struct acpi_device_wakeup wakeup;
+               acpi_handle temp;
+
+               status = acpi_get_handle(handle, "_PRW", &temp);
+               if (ACPI_SUCCESS(status))
+                       acpi_bus_extract_wakeup_device_power_package(handle,
+                                                                    &wakeup);
                return AE_CTRL_DEPTH;
        }
 
@@ -1467,7 +1481,7 @@ int acpi_bus_start(struct acpi_device *device)
 
        result = acpi_bus_scan(device->handle, &ops, NULL);
 
-       acpi_update_gpes();
+       acpi_update_all_gpes();
 
        return result;
 }
@@ -1573,6 +1587,8 @@ int __init acpi_scan_init(void)
                printk(KERN_ERR PREFIX "Could not register bus type\n");
        }
 
+       acpi_power_init();
+
        /*
         * Enumerate devices in the ACPI namespace.
         */
@@ -1584,7 +1600,7 @@ int __init acpi_scan_init(void)
        if (result)
                acpi_device_unregister(acpi_root, ACPI_BUS_REMOVAL_NORMAL);
        else
-               acpi_update_gpes();
+               acpi_update_all_gpes();
 
        return result;
 }
index c423231..fdd3aee 100644 (file)
@@ -124,8 +124,7 @@ static int acpi_pm_freeze(void)
 static int acpi_pm_pre_suspend(void)
 {
        acpi_pm_freeze();
-       suspend_nvs_save();
-       return 0;
+       return suspend_nvs_save();
 }
 
 /**
@@ -151,7 +150,7 @@ static int acpi_pm_prepare(void)
 {
        int error = __acpi_pm_prepare();
        if (!error)
-               acpi_pm_pre_suspend();
+               error = acpi_pm_pre_suspend();
 
        return error;
 }
@@ -435,6 +434,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
                DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NW130D"),
                },
        },
+       {
+       .callback = init_nvs_nosave,
+       .ident = "Averatec AV1020-ED2",
+       .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"),
+               DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"),
+               },
+       },
        {},
 };
 #endif /* CONFIG_SUSPEND */
index f8588f8..61891e7 100644 (file)
@@ -438,7 +438,7 @@ static void delete_gpe_attr_array(void)
        return;
 }
 
-void acpi_os_gpe_count(u32 gpe_number)
+static void gpe_count(u32 gpe_number)
 {
        acpi_gpe_count++;
 
@@ -454,7 +454,7 @@ void acpi_os_gpe_count(u32 gpe_number)
        return;
 }
 
-void acpi_os_fixed_event_count(u32 event_number)
+static void fixed_event_count(u32 event_number)
 {
        if (!all_counters)
                return;
@@ -468,6 +468,16 @@ void acpi_os_fixed_event_count(u32 event_number)
        return;
 }
 
+static void acpi_gbl_event_handler(u32 event_type, acpi_handle device,
+       u32 event_number, void *context)
+{
+       if (event_type == ACPI_EVENT_TYPE_GPE)
+               gpe_count(event_number);
+
+       if (event_type == ACPI_EVENT_TYPE_FIXED)
+               fixed_event_count(event_number);
+}
+
 static int get_status(u32 index, acpi_event_status *status,
                      acpi_handle *handle)
 {
@@ -601,6 +611,7 @@ end:
 
 void acpi_irq_stats_init(void)
 {
+       acpi_status status;
        int i;
 
        if (all_counters)
@@ -619,6 +630,10 @@ void acpi_irq_stats_init(void)
        if (all_counters == NULL)
                goto fail;
 
+       status = acpi_install_global_event_handler(acpi_gbl_event_handler, NULL);
+       if (ACPI_FAILURE(status))
+               goto fail;
+
        counter_attrs = kzalloc(sizeof(struct kobj_attribute) * (num_counters),
                                GFP_KERNEL);
        if (counter_attrs == NULL)
index 5a27b0a..2607e17 100644 (file)
@@ -1059,8 +1059,9 @@ static int acpi_thermal_resume(struct acpi_device *device)
                        break;
                tz->trips.active[i].flags.enabled = 1;
                for (j = 0; j < tz->trips.active[i].devices.count; j++) {
-                       result = acpi_bus_get_power(tz->trips.active[i].devices.
-                           handles[j], &power_state);
+                       result = acpi_bus_update_power(
+                                       tz->trips.active[i].devices.handles[j],
+                                       &power_state);
                        if (result || (power_state != ACPI_STATE_D0)) {
                                tz->trips.active[i].flags.enabled = 0;
                                break;
index 15a0fde..90f8f76 100644 (file)
@@ -33,7 +33,6 @@
 #include <linux/input.h>
 #include <linux/backlight.h>
 #include <linux/thermal.h>
-#include <linux/video_output.h>
 #include <linux/sort.h>
 #include <linux/pci.h>
 #include <linux/pci_ids.h>
@@ -81,6 +80,13 @@ module_param(brightness_switch_enabled, bool, 0644);
 static int allow_duplicates;
 module_param(allow_duplicates, bool, 0644);
 
+/*
+ * Some BIOSes claim they use minimum backlight at boot,
+ * and this may bring dimming screen after boot
+ */
+static int use_bios_initial_backlight = 1;
+module_param(use_bios_initial_backlight, bool, 0644);
+
 static int register_count = 0;
 static int acpi_video_bus_add(struct acpi_device *device);
 static int acpi_video_bus_remove(struct acpi_device *device, int type);
@@ -172,9 +178,6 @@ struct acpi_video_device_cap {
        u8 _BQC:1;              /* Get current brightness level */
        u8 _BCQ:1;              /* Some buggy BIOS uses _BCQ instead of _BQC */
        u8 _DDC:1;              /*Return the EDID for this device */
-       u8 _DCS:1;              /*Return status of output device */
-       u8 _DGS:1;              /*Query graphics state */
-       u8 _DSS:1;              /*Device state set */
 };
 
 struct acpi_video_brightness_flags {
@@ -202,7 +205,6 @@ struct acpi_video_device {
        struct acpi_video_device_brightness *brightness;
        struct backlight_device *backlight;
        struct thermal_cooling_device *cooling_dev;
-       struct output_device *output_dev;
 };
 
 static const char device_decode[][30] = {
@@ -226,10 +228,6 @@ static int acpi_video_get_next_level(struct acpi_video_device *device,
                                     u32 level_current, u32 event);
 static int acpi_video_switch_brightness(struct acpi_video_device *device,
                                         int event);
-static int acpi_video_device_get_state(struct acpi_video_device *device,
-                           unsigned long long *state);
-static int acpi_video_output_get(struct output_device *od);
-static int acpi_video_device_set_state(struct acpi_video_device *device, int state);
 
 /*backlight device sysfs support*/
 static int acpi_video_get_brightness(struct backlight_device *bd)
@@ -265,30 +263,6 @@ static const struct backlight_ops acpi_backlight_ops = {
        .update_status  = acpi_video_set_brightness,
 };
 
-/*video output device sysfs support*/
-static int acpi_video_output_get(struct output_device *od)
-{
-       unsigned long long state;
-       struct acpi_video_device *vd =
-               (struct acpi_video_device *)dev_get_drvdata(&od->dev);
-       acpi_video_device_get_state(vd, &state);
-       return (int)state;
-}
-
-static int acpi_video_output_set(struct output_device *od)
-{
-       unsigned long state = od->request_state;
-       struct acpi_video_device *vd=
-               (struct acpi_video_device *)dev_get_drvdata(&od->dev);
-       return acpi_video_device_set_state(vd, state);
-}
-
-static struct output_properties acpi_output_properties = {
-       .set_state = acpi_video_output_set,
-       .get_status = acpi_video_output_get,
-};
-
-
 /* thermal cooling device callbacks */
 static int video_get_max_state(struct thermal_cooling_device *cooling_dev, unsigned
                               long *state)
@@ -344,34 +318,6 @@ static struct thermal_cooling_device_ops video_cooling_ops = {
                                Video Management
    -------------------------------------------------------------------------- */
 
-/* device */
-
-static int
-acpi_video_device_get_state(struct acpi_video_device *device,
-                           unsigned long long *state)
-{
-       int status;
-
-       status = acpi_evaluate_integer(device->dev->handle, "_DCS", NULL, state);
-
-       return status;
-}
-
-static int
-acpi_video_device_set_state(struct acpi_video_device *device, int state)
-{
-       int status;
-       union acpi_object arg0 = { ACPI_TYPE_INTEGER };
-       struct acpi_object_list args = { 1, &arg0 };
-       unsigned long long ret;
-
-
-       arg0.integer.value = state;
-       status = acpi_evaluate_integer(device->dev->handle, "_DSS", &args, &ret);
-
-       return status;
-}
-
 static int
 acpi_video_device_lcd_query_levels(struct acpi_video_device *device,
                                   union acpi_object **levels)
@@ -766,9 +712,11 @@ acpi_video_init_brightness(struct acpi_video_device *device)
                 * when invoked for the first time, i.e. level_old is invalid.
                 * set the backlight to max_level in this case
                 */
-               for (i = 2; i < br->count; i++)
-                       if (level_old == br->levels[i])
-                               level = level_old;
+               if (use_bios_initial_backlight) {
+                       for (i = 2; i < br->count; i++)
+                               if (level_old == br->levels[i])
+                                       level = level_old;
+               }
                goto set_level;
        }
 
@@ -831,15 +779,6 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
        if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DDC", &h_dummy1))) {
                device->cap._DDC = 1;
        }
-       if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DCS", &h_dummy1))) {
-               device->cap._DCS = 1;
-       }
-       if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DGS", &h_dummy1))) {
-               device->cap._DGS = 1;
-       }
-       if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DSS", &h_dummy1))) {
-               device->cap._DSS = 1;
-       }
 
        if (acpi_video_backlight_support()) {
                struct backlight_properties props;
@@ -904,21 +843,6 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
                        printk(KERN_ERR PREFIX "Create sysfs link\n");
 
        }
-
-       if (acpi_video_display_switch_support()) {
-
-               if (device->cap._DCS && device->cap._DSS) {
-                       static int count;
-                       char *name;
-                       name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
-                       if (!name)
-                               return;
-                       count++;
-                       device->output_dev = video_output_register(name,
-                                       NULL, device, &acpi_output_properties);
-                       kfree(name);
-               }
-       }
 }
 
 /*
@@ -1360,6 +1284,9 @@ int acpi_video_get_edid(struct acpi_device *device, int type, int device_id,
                if (!video_device)
                        continue;
 
+               if (!video_device->cap._DDC)
+                       continue;
+
                if (type) {
                        switch (type) {
                        case ACPI_VIDEO_DISPLAY_CRT:
@@ -1452,7 +1379,6 @@ static int acpi_video_bus_put_one_device(struct acpi_video_device *device)
                thermal_cooling_device_unregister(device->cooling_dev);
                device->cooling_dev = NULL;
        }
-       video_output_unregister(device->output_dev);
 
        return 0;
 }
index b836761..42d3d72 100644 (file)
  * capabilities the graphics cards plugged in support. The check for general
  * video capabilities will be triggered by the first caller of
  * acpi_video_get_capabilities(NULL); which will happen when the first
- * backlight (or display output) switching supporting driver calls:
+ * backlight switching supporting driver calls:
  * acpi_video_backlight_support();
  *
  * Depending on whether ACPI graphics extensions (cmp. ACPI spec Appendix B)
  * are available, video.ko should be used to handle the device.
  *
  * Otherwise vendor specific drivers like thinkpad_acpi, asus_acpi,
- * sony_acpi,... can take care about backlight brightness and display output
- * switching.
+ * sony_acpi,... can take care about backlight brightness.
  *
  * If CONFIG_ACPI_VIDEO is neither set as "compiled in" (y) nor as a module (m)
  * this file will not be compiled, acpi_video_get_capabilities() and
@@ -83,11 +82,6 @@ long acpi_is_video_device(struct acpi_device *device)
        if (!device)
                return 0;
 
-       /* Is this device able to support video switching ? */
-       if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOD", &h_dummy)) ||
-           ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOS", &h_dummy)))
-               video_caps |= ACPI_VIDEO_OUTPUT_SWITCHING;
-
        /* Is this device able to retrieve a video ROM ? */
        if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_ROM", &h_dummy)))
                video_caps |= ACPI_VIDEO_ROM_AVAILABLE;
@@ -161,8 +155,6 @@ long acpi_video_get_capabilities(acpi_handle graphics_handle)
                 *
                 *   if (dmi_name_in_vendors("XY")) {
                 *      acpi_video_support |=
-                *              ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR;
-                *      acpi_video_support |=
                 *              ACPI_VIDEO_BACKLIGHT_DMI_VENDOR;
                 *}
                 */
@@ -212,33 +204,8 @@ int acpi_video_backlight_support(void)
 EXPORT_SYMBOL(acpi_video_backlight_support);
 
 /*
- * Returns true if video.ko can do display output switching.
- * This does not work well/at all with binary graphics drivers
- * which disable system io ranges and do it on their own.
- */
-int acpi_video_display_switch_support(void)
-{
-       if (!acpi_video_caps_checked)
-               acpi_video_get_capabilities(NULL);
-
-       if (acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VENDOR)
-               return 0;
-       else if (acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO)
-               return 1;
-
-       if (acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR)
-               return 0;
-       else if (acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VIDEO)
-               return 1;
-
-       return acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING;
-}
-EXPORT_SYMBOL(acpi_video_display_switch_support);
-
-/*
- * Use acpi_display_output=vendor/video or acpi_backlight=vendor/video
- * To force that backlight or display output switching is processed by vendor
- * specific acpi drivers or video.ko driver.
+ * Use acpi_backlight=vendor/video to force that backlight switching
+ * is processed by vendor specific acpi drivers or video.ko driver.
  */
 static int __init acpi_backlight(char *str)
 {
@@ -255,19 +222,3 @@ static int __init acpi_backlight(char *str)
        return 1;
 }
 __setup("acpi_backlight=", acpi_backlight);
-
-static int __init acpi_display_output(char *str)
-{
-       if (str == NULL || *str == '\0')
-               return 1;
-       else {
-               if (!strcmp("vendor", str))
-                       acpi_video_support |=
-                               ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VENDOR;
-               if (!strcmp("video", str))
-                       acpi_video_support |=
-                               ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO;
-       }
-       return 1;
-}
-__setup("acpi_display_output=", acpi_display_output);
index f62a50c..ed65014 100644 (file)
@@ -37,15 +37,16 @@ void acpi_enable_wakeup_devices(u8 sleep_state)
                        container_of(node, struct acpi_device, wakeup_list);
 
                if (!dev->wakeup.flags.valid
-                   || !(dev->wakeup.state.enabled || dev->wakeup.prepare_count)
-                   || sleep_state > (u32) dev->wakeup.sleep_state)
+                   || sleep_state > (u32) dev->wakeup.sleep_state
+                   || !(device_may_wakeup(&dev->dev)
+                       || dev->wakeup.prepare_count))
                        continue;
 
-               if (dev->wakeup.state.enabled)
+               if (device_may_wakeup(&dev->dev))
                        acpi_enable_wakeup_device_power(dev, sleep_state);
 
                /* The wake-up power should have been enabled already. */
-               acpi_gpe_wakeup(dev->wakeup.gpe_device, dev->wakeup.gpe_number,
+               acpi_set_gpe_wake_mask(dev->wakeup.gpe_device, dev->wakeup.gpe_number,
                                ACPI_GPE_ENABLE);
        }
 }
@@ -63,14 +64,15 @@ void acpi_disable_wakeup_devices(u8 sleep_state)
                        container_of(node, struct acpi_device, wakeup_list);
 
                if (!dev->wakeup.flags.valid
-                   || !(dev->wakeup.state.enabled || dev->wakeup.prepare_count)
-                   || (sleep_state > (u32) dev->wakeup.sleep_state))
+                   || sleep_state > (u32) dev->wakeup.sleep_state
+                   || !(device_may_wakeup(&dev->dev)
+                       || dev->wakeup.prepare_count))
                        continue;
 
-               acpi_gpe_wakeup(dev->wakeup.gpe_device, dev->wakeup.gpe_number,
+               acpi_set_gpe_wake_mask(dev->wakeup.gpe_device, dev->wakeup.gpe_number,
                                ACPI_GPE_DISABLE);
 
-               if (dev->wakeup.state.enabled)
+               if (device_may_wakeup(&dev->dev))
                        acpi_disable_wakeup_device_power(dev);
        }
 }
@@ -84,8 +86,8 @@ int __init acpi_wakeup_device_init(void)
                struct acpi_device *dev = container_of(node,
                                                       struct acpi_device,
                                                       wakeup_list);
-               if (dev->wakeup.flags.always_enabled)
-                       dev->wakeup.state.enabled = 1;
+               if (device_can_wakeup(&dev->dev))
+                       device_set_wakeup_enable(&dev->dev, true);
        }
        mutex_unlock(&acpi_device_lock);
        return 0;
index 2fe72f8..38223e9 100644 (file)
@@ -970,6 +970,33 @@ out_kfree:
 }
 EXPORT_SYMBOL(ipmi_create_user);
 
+int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data)
+{
+       int           rv = 0;
+       ipmi_smi_t    intf;
+       struct ipmi_smi_handlers *handlers;
+
+       mutex_lock(&ipmi_interfaces_mutex);
+       list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+               if (intf->intf_num == if_num)
+                       goto found;
+       }
+       /* Not found, return an error */
+       rv = -EINVAL;
+       mutex_unlock(&ipmi_interfaces_mutex);
+       return rv;
+
+found:
+       handlers = intf->handlers;
+       rv = -ENOSYS;
+       if (handlers->get_smi_info)
+               rv = handlers->get_smi_info(intf->send_info, data);
+       mutex_unlock(&ipmi_interfaces_mutex);
+
+       return rv;
+}
+EXPORT_SYMBOL(ipmi_get_smi_info);
+
 static void free_user(struct kref *ref)
 {
        ipmi_user_t user = container_of(ref, struct ipmi_user, refcount);
index f27c04e..b6ae6e9 100644 (file)
@@ -57,6 +57,7 @@
 #include <asm/irq.h>
 #include <linux/interrupt.h>
 #include <linux/rcupdate.h>
+#include <linux/ipmi.h>
 #include <linux/ipmi_smi.h>
 #include <asm/io.h>
 #include "ipmi_si_sm.h"
@@ -109,10 +110,6 @@ enum si_type {
 };
 static char *si_to_str[] = { "kcs", "smic", "bt" };
 
-enum ipmi_addr_src {
-       SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS,
-       SI_PCI, SI_DEVICETREE, SI_DEFAULT
-};
 static char *ipmi_addr_src_to_str[] = { NULL, "hotmod", "hardcoded", "SPMI",
                                        "ACPI", "SMBIOS", "PCI",
                                        "device-tree", "default" };
@@ -293,6 +290,7 @@ struct smi_info {
        struct task_struct *thread;
 
        struct list_head link;
+       union ipmi_smi_info_union addr_info;
 };
 
 #define smi_inc_stat(smi, stat) \
@@ -1188,6 +1186,18 @@ static int smi_start_processing(void       *send_info,
        return 0;
 }
 
+static int get_smi_info(void *send_info, struct ipmi_smi_info *data)
+{
+       struct smi_info *smi = send_info;
+
+       data->addr_src = smi->addr_source;
+       data->dev = smi->dev;
+       data->addr_info = smi->addr_info;
+       get_device(smi->dev);
+
+       return 0;
+}
+
 static void set_maintenance_mode(void *send_info, int enable)
 {
        struct smi_info   *smi_info = send_info;
@@ -1199,6 +1209,7 @@ static void set_maintenance_mode(void *send_info, int enable)
 static struct ipmi_smi_handlers handlers = {
        .owner                  = THIS_MODULE,
        .start_processing       = smi_start_processing,
+       .get_smi_info           = get_smi_info,
        .sender                 = sender,
        .request_events         = request_events,
        .set_maintenance_mode   = set_maintenance_mode,
@@ -1930,7 +1941,8 @@ static void __devinit hardcode_find_bmc(void)
 static int acpi_failure;
 
 /* For GPE-type interrupts. */
-static u32 ipmi_acpi_gpe(void *context)
+static u32 ipmi_acpi_gpe(acpi_handle gpe_device,
+       u32 gpe_number, void *context)
 {
        struct smi_info *smi_info = context;
        unsigned long   flags;
@@ -2158,6 +2170,7 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
        printk(KERN_INFO PFX "probing via ACPI\n");
 
        handle = acpi_dev->handle;
+       info->addr_info.acpi_info.acpi_handle = handle;
 
        /* _IFT tells us the interface type: KCS, BT, etc */
        status = acpi_evaluate_integer(handle, "_IFT", NULL, &tmp);
index 7af4436..64828a7 100644 (file)
@@ -107,7 +107,6 @@ config DRM_I915
        select FB_CFB_IMAGEBLIT
        # i915 depends on ACPI_VIDEO when ACPI is enabled
        # but for select to work, need to select ACPI_VIDEO's dependencies, ick
-       select VIDEO_OUTPUT_CONTROL if ACPI
        select BACKLIGHT_CLASS_DEVICE if ACPI
        select INPUT if ACPI
        select ACPI_VIDEO if ACPI
index 0e1edd7..09aea5f 100644 (file)
@@ -3,7 +3,6 @@ config STUB_POULSBO
        depends on PCI
        # Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled
        # but for select to work, need to select ACPI_VIDEO's dependencies, ick
-       select VIDEO_OUTPUT_CONTROL if ACPI
        select BACKLIGHT_CLASS_DEVICE if ACPI
        select INPUT if ACPI
        select ACPI_VIDEO if ACPI
index 19e92b2..95e3b09 100644 (file)
@@ -689,7 +689,7 @@ static int acpi_fujitsu_add(struct acpi_device *device)
        if (error)
                goto err_free_input_dev;
 
-       result = acpi_bus_get_power(fujitsu->acpi_handle, &state);
+       result = acpi_bus_update_power(fujitsu->acpi_handle, &state);
        if (result) {
                printk(KERN_ERR "Error reading power state\n");
                goto err_unregister_input_dev;
@@ -857,7 +857,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
        if (error)
                goto err_free_input_dev;
 
-       result = acpi_bus_get_power(fujitsu_hotkey->acpi_handle, &state);
+       result = acpi_bus_update_power(fujitsu_hotkey->acpi_handle, &state);
        if (result) {
                printk(KERN_ERR "Error reading power state\n");
                goto err_unregister_input_dev;
index 8de3775..bfba893 100644 (file)
@@ -2,11 +2,13 @@
 # Makefile for the Linux Plug-and-Play Support.
 #
 
-obj-y          := core.o card.o driver.o resource.o manager.o support.o interface.o quirks.o
+obj-y          := pnp.o
+
+pnp-y          := core.o card.o driver.o resource.o manager.o support.o interface.o quirks.o
 
 obj-$(CONFIG_PNPACPI)          += pnpacpi/
 obj-$(CONFIG_PNPBIOS)          += pnpbios/
 obj-$(CONFIG_ISAPNP)           += isapnp/
 
 # pnp_system_init goes after pnpacpi/pnpbios init
-obj-y                          += system.o
+pnp-y                          += system.o
index 0f34d96..cb6ce42 100644 (file)
@@ -220,10 +220,5 @@ subsys_initcall(pnp_init);
 int pnp_debug;
 
 #if defined(CONFIG_PNP_DEBUG_MESSAGES)
-static int __init pnp_debug_setup(char *__unused)
-{
-       pnp_debug = 1;
-       return 1;
-}
-__setup("pnp.debug", pnp_debug_setup);
+module_param_named(debug, pnp_debug, int, 0644);
 #endif
index d1dbb9d..00e9403 100644 (file)
@@ -189,8 +189,11 @@ static int pnp_bus_resume(struct device *dev)
        if (!pnp_drv)
                return 0;
 
-       if (pnp_dev->protocol->resume)
-               pnp_dev->protocol->resume(pnp_dev);
+       if (pnp_dev->protocol->resume) {
+               error = pnp_dev->protocol->resume(pnp_dev);
+               if (error)
+                       return error;
+       }
 
        if (pnp_can_write(pnp_dev)) {
                error = pnp_start_dev(pnp_dev);
index cac18bb..6e607aa 100644 (file)
@@ -1,7 +1,7 @@
 #
 # Makefile for the kernel ISAPNP driver.
 #
+obj-y                  += pnp.o
+pnp-y                  := core.o compat.o
 
-isapnp-proc-$(CONFIG_PROC_FS) = proc.o
-
-obj-y := core.o compat.o $(isapnp-proc-y)
+pnp-$(CONFIG_PROC_FS)  += proc.o
index 905326f..40c93da 100644 (file)
@@ -1,5 +1,6 @@
 #
 # Makefile for the kernel PNPACPI driver.
 #
+obj-y += pnp.o
 
-obj-y := core.o rsparser.o
+pnp-y := core.o rsparser.o
index 57313f4..ca84d50 100644 (file)
@@ -81,12 +81,19 @@ static int pnpacpi_get_resources(struct pnp_dev *dev)
 
 static int pnpacpi_set_resources(struct pnp_dev *dev)
 {
-       struct acpi_device *acpi_dev = dev->data;
-       acpi_handle handle = acpi_dev->handle;
+       struct acpi_device *acpi_dev;
+       acpi_handle handle;
        struct acpi_buffer buffer;
        int ret;
 
        pnp_dbg(&dev->dev, "set resources\n");
+
+       handle = DEVICE_ACPI_HANDLE(&dev->dev);
+       if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) {
+               dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
+               return -ENODEV;
+       }
+
        ret = pnpacpi_build_resource_template(dev, &buffer);
        if (ret)
                return ret;
@@ -105,12 +112,18 @@ static int pnpacpi_set_resources(struct pnp_dev *dev)
 
 static int pnpacpi_disable_resources(struct pnp_dev *dev)
 {
-       struct acpi_device *acpi_dev = dev->data;
-       acpi_handle handle = acpi_dev->handle;
+       struct acpi_device *acpi_dev;
+       acpi_handle handle;
        int ret;
 
        dev_dbg(&dev->dev, "disable resources\n");
 
+       handle = DEVICE_ACPI_HANDLE(&dev->dev);
+       if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) {
+               dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
+               return 0;
+       }
+
        /* acpi_unregister_gsi(pnp_irq(dev, 0)); */
        ret = 0;
        if (acpi_bus_power_manageable(handle))
@@ -124,46 +137,74 @@ static int pnpacpi_disable_resources(struct pnp_dev *dev)
 #ifdef CONFIG_ACPI_SLEEP
 static bool pnpacpi_can_wakeup(struct pnp_dev *dev)
 {
-       struct acpi_device *acpi_dev = dev->data;
-       acpi_handle handle = acpi_dev->handle;
+       struct acpi_device *acpi_dev;
+       acpi_handle handle;
+
+       handle = DEVICE_ACPI_HANDLE(&dev->dev);
+       if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) {
+               dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
+               return false;
+       }
 
        return acpi_bus_can_wakeup(handle);
 }
 
 static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state)
 {
-       struct acpi_device *acpi_dev = dev->data;
-       acpi_handle handle = acpi_dev->handle;
-       int power_state;
+       struct acpi_device *acpi_dev;
+       acpi_handle handle;
+       int error = 0;
+
+       handle = DEVICE_ACPI_HANDLE(&dev->dev);
+       if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) {
+               dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
+               return 0;
+       }
 
        if (device_can_wakeup(&dev->dev)) {
-               int rc = acpi_pm_device_sleep_wake(&dev->dev,
+               error = acpi_pm_device_sleep_wake(&dev->dev,
                                device_may_wakeup(&dev->dev));
+               if (error)
+                       return error;
+       }
+
+       if (acpi_bus_power_manageable(handle)) {
+               int power_state = acpi_pm_device_sleep_state(&dev->dev, NULL);
+
+               if (power_state < 0)
+                       power_state = (state.event == PM_EVENT_ON) ?
+                                       ACPI_STATE_D0 : ACPI_STATE_D3;
 
-               if (rc)
-                       return rc;
+               /*
+                * acpi_bus_set_power() often fails (keyboard port can't be
+                * powered-down?), and in any case, our return value is ignored
+                * by pnp_bus_suspend().  Hence we don't revert the wakeup
+                * setting if the set_power fails.
+                */
+               error = acpi_bus_set_power(handle, power_state);
        }
-       power_state = acpi_pm_device_sleep_state(&dev->dev, NULL);
-       if (power_state < 0)
-               power_state = (state.event == PM_EVENT_ON) ?
-                               ACPI_STATE_D0 : ACPI_STATE_D3;
-
-       /* acpi_bus_set_power() often fails (keyboard port can't be
-        * powered-down?), and in any case, our return value is ignored
-        * by pnp_bus_suspend().  Hence we don't revert the wakeup
-        * setting if the set_power fails.
-        */
-       return acpi_bus_set_power(handle, power_state);
+
+       return error;
 }
 
 static int pnpacpi_resume(struct pnp_dev *dev)
 {
-       struct acpi_device *acpi_dev = dev->data;
-       acpi_handle handle = acpi_dev->handle;
+       struct acpi_device *acpi_dev;
+       acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev);
+       int error = 0;
+
+       if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) {
+               dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
+               return -ENODEV;
+       }
 
        if (device_may_wakeup(&dev->dev))
                acpi_pm_device_sleep_wake(&dev->dev, false);
-       return acpi_bus_set_power(handle, ACPI_STATE_D0);
+
+       if (acpi_bus_power_manageable(handle))
+               error = acpi_bus_set_power(handle, ACPI_STATE_D0);
+
+       return error;
 }
 #endif
 
index 3cd3ed7..240b0ff 100644 (file)
@@ -1,7 +1,8 @@
 #
 # Makefile for the kernel PNPBIOS driver.
 #
+obj-y := pnp.o
 
-pnpbios-proc-$(CONFIG_PNPBIOS_PROC_FS) = proc.o
+pnp-y := core.o bioscalls.o rsparser.o
 
-obj-y := core.o bioscalls.o rsparser.o $(pnpbios-proc-y)
+pnp-$(CONFIG_PNPBIOS_PROC_FS) += proc.o
index bf7c687..f7a5dba 100644 (file)
@@ -4,6 +4,7 @@
 
 menuconfig THERMAL
        tristate "Generic Thermal sysfs driver"
+       depends on NET
        help
          Generic Thermal Sysfs driver offers a generic mechanism for
          thermal management. Usually it's made up of one or more thermal
index 13c72c6..7d0e63c 100644 (file)
@@ -32,6 +32,8 @@
 #include <linux/thermal.h>
 #include <linux/spinlock.h>
 #include <linux/reboot.h>
+#include <net/netlink.h>
+#include <net/genetlink.h>
 
 MODULE_AUTHOR("Zhang Rui");
 MODULE_DESCRIPTION("Generic thermal management sysfs support");
@@ -58,6 +60,22 @@ static LIST_HEAD(thermal_tz_list);
 static LIST_HEAD(thermal_cdev_list);
 static DEFINE_MUTEX(thermal_list_lock);
 
+static unsigned int thermal_event_seqnum;
+
+static struct genl_family thermal_event_genl_family = {
+       .id = GENL_ID_GENERATE,
+       .name = THERMAL_GENL_FAMILY_NAME,
+       .version = THERMAL_GENL_VERSION,
+       .maxattr = THERMAL_GENL_ATTR_MAX,
+};
+
+static struct genl_multicast_group thermal_event_mcgrp = {
+       .name = THERMAL_GENL_MCAST_GROUP_NAME,
+};
+
+static int genetlink_init(void);
+static void genetlink_exit(void);
+
 static int get_idr(struct idr *idr, struct mutex *lock, int *id)
 {
        int err;
@@ -823,11 +841,8 @@ static struct class thermal_class = {
  * @devdata:   device private data.
  * @ops:               standard thermal cooling devices callbacks.
  */
-struct thermal_cooling_device *thermal_cooling_device_register(char *type,
-                                                              void *devdata,
-                                                              struct
-                                                              thermal_cooling_device_ops
-                                                              *ops)
+struct thermal_cooling_device *thermal_cooling_device_register(
+     char *type, void *devdata, const struct thermal_cooling_device_ops *ops)
 {
        struct thermal_cooling_device *cdev;
        struct thermal_zone_device *pos;
@@ -1048,13 +1063,9 @@ EXPORT_SYMBOL(thermal_zone_device_update);
  * section 11.1.5.1 of the ACPI specification 3.0.
  */
 struct thermal_zone_device *thermal_zone_device_register(char *type,
-                                                        int trips,
-                                                        void *devdata, struct
-                                                        thermal_zone_device_ops
-                                                        *ops, int tc1, int
-                                                        tc2,
-                                                        int passive_delay,
-                                                        int polling_delay)
+       int trips, void *devdata,
+       const struct thermal_zone_device_ops *ops,
+       int tc1, int tc2, int passive_delay, int polling_delay)
 {
        struct thermal_zone_device *tz;
        struct thermal_cooling_device *pos;
@@ -1214,6 +1225,82 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
 
 EXPORT_SYMBOL(thermal_zone_device_unregister);
 
+int generate_netlink_event(u32 orig, enum events event)
+{
+       struct sk_buff *skb;
+       struct nlattr *attr;
+       struct thermal_genl_event *thermal_event;
+       void *msg_header;
+       int size;
+       int result;
+
+       /* allocate memory */
+       size = nla_total_size(sizeof(struct thermal_genl_event)) + \
+                               nla_total_size(0);
+
+       skb = genlmsg_new(size, GFP_ATOMIC);
+       if (!skb)
+               return -ENOMEM;
+
+       /* add the genetlink message header */
+       msg_header = genlmsg_put(skb, 0, thermal_event_seqnum++,
+                                &thermal_event_genl_family, 0,
+                                THERMAL_GENL_CMD_EVENT);
+       if (!msg_header) {
+               nlmsg_free(skb);
+               return -ENOMEM;
+       }
+
+       /* fill the data */
+       attr = nla_reserve(skb, THERMAL_GENL_ATTR_EVENT, \
+                       sizeof(struct thermal_genl_event));
+
+       if (!attr) {
+               nlmsg_free(skb);
+               return -EINVAL;
+       }
+
+       thermal_event = nla_data(attr);
+       if (!thermal_event) {
+               nlmsg_free(skb);
+               return -EINVAL;
+       }
+
+       memset(thermal_event, 0, sizeof(struct thermal_genl_event));
+
+       thermal_event->orig = orig;
+       thermal_event->event = event;
+
+       /* send multicast genetlink message */
+       result = genlmsg_end(skb, msg_header);
+       if (result < 0) {
+               nlmsg_free(skb);
+               return result;
+       }
+
+       result = genlmsg_multicast(skb, 0, thermal_event_mcgrp.id, GFP_ATOMIC);
+       if (result)
+               printk(KERN_INFO "failed to send netlink event:%d", result);
+
+       return result;
+}
+EXPORT_SYMBOL(generate_netlink_event);
+
+static int genetlink_init(void)
+{
+       int result;
+
+       result = genl_register_family(&thermal_event_genl_family);
+       if (result)
+               return result;
+
+       result = genl_register_mc_group(&thermal_event_genl_family,
+                                       &thermal_event_mcgrp);
+       if (result)
+               genl_unregister_family(&thermal_event_genl_family);
+       return result;
+}
+
 static int __init thermal_init(void)
 {
        int result = 0;
@@ -1225,9 +1312,15 @@ static int __init thermal_init(void)
                mutex_destroy(&thermal_idr_lock);
                mutex_destroy(&thermal_list_lock);
        }
+       result = genetlink_init();
        return result;
 }
 
+static void genetlink_exit(void)
+{
+       genl_unregister_family(&thermal_event_genl_family);
+}
+
 static void __exit thermal_exit(void)
 {
        class_unregister(&thermal_class);
@@ -1235,7 +1328,8 @@ static void __exit thermal_exit(void)
        idr_destroy(&thermal_cdev_idr);
        mutex_destroy(&thermal_idr_lock);
        mutex_destroy(&thermal_list_lock);
+       genetlink_exit();
 }
 
-subsys_initcall(thermal_init);
+fs_initcall(thermal_init);
 module_exit(thermal_exit);
index 359ef11..78ca429 100644 (file)
@@ -148,9 +148,7 @@ struct acpi_device_flags {
        u32 suprise_removal_ok:1;
        u32 power_manageable:1;
        u32 performance_manageable:1;
-       u32 wake_capable:1;     /* Wakeup(_PRW) supported? */
-       u32 force_power_state:1;
-       u32 reserved:22;
+       u32 reserved:24;
 };
 
 /* File System */
@@ -242,20 +240,14 @@ struct acpi_device_perf {
 struct acpi_device_wakeup_flags {
        u8 valid:1;             /* Can successfully enable wakeup? */
        u8 run_wake:1;          /* Run-Wake GPE devices */
-       u8 always_enabled:1;    /* Run-wake devices that are always enabled */
        u8 notifier_present:1;  /* Wake-up notify handler has been installed */
 };
 
-struct acpi_device_wakeup_state {
-       u8 enabled:1;
-};
-
 struct acpi_device_wakeup {
        acpi_handle gpe_device;
        u64 gpe_number;
        u64 sleep_state;
        struct acpi_handle_list resources;
-       struct acpi_device_wakeup_state state;
        struct acpi_device_wakeup_flags flags;
        int prepare_count;
        int run_wake_count;
@@ -328,8 +320,8 @@ void acpi_bus_data_handler(acpi_handle handle, void *context);
 acpi_status acpi_bus_get_status_handle(acpi_handle handle,
                                       unsigned long long *sta);
 int acpi_bus_get_status(struct acpi_device *device);
-int acpi_bus_get_power(acpi_handle handle, int *state);
 int acpi_bus_set_power(acpi_handle handle, int state);
+int acpi_bus_update_power(acpi_handle handle, int *state_p);
 bool acpi_bus_power_manageable(acpi_handle handle);
 bool acpi_bus_can_wakeup(acpi_handle handle);
 #ifdef CONFIG_ACPI_PROC_EVENT
index 53b7cfd..241b8a0 100644 (file)
@@ -47,7 +47,7 @@
 
 /* Current ACPICA subsystem version in YYYYMMDD format */
 
-#define ACPI_CA_VERSION                 0x20101013
+#define ACPI_CA_VERSION                 0x20101209
 
 #include "actypes.h"
 #include "actbl.h"
@@ -228,6 +228,10 @@ acpi_status acpi_get_parent(acpi_handle object, acpi_handle * out_handle);
 acpi_status
 acpi_install_initialization_handler(acpi_init_handler handler, u32 function);
 
+acpi_status
+acpi_install_global_event_handler(ACPI_GBL_EVENT_HANDLER handler,
+                                void *context);
+
 acpi_status
 acpi_install_fixed_event_handler(u32 acpi_event,
                                 acpi_event_handler handler, void *context);
@@ -258,11 +262,11 @@ acpi_remove_address_space_handler(acpi_handle device,
 acpi_status
 acpi_install_gpe_handler(acpi_handle gpe_device,
                         u32 gpe_number,
-                        u32 type, acpi_event_handler address, void *context);
+                        u32 type, acpi_gpe_handler address, void *context);
 
 acpi_status
 acpi_remove_gpe_handler(acpi_handle gpe_device,
-                       u32 gpe_number, acpi_event_handler address);
+                       u32 gpe_number, acpi_gpe_handler address);
 
 #ifdef ACPI_FUTURE_USAGE
 acpi_status acpi_install_exception_handler(acpi_exception_handler handler);
@@ -292,11 +296,13 @@ acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number);
 
 acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number);
 
-acpi_status acpi_gpe_can_wake(acpi_handle gpe_device, u32 gpe_number);
-
 acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number);
 
-acpi_status acpi_gpe_wakeup(acpi_handle gpe_device, u32 gpe_number, u8 action);
+acpi_status
+acpi_setup_gpe_for_wake(acpi_handle parent_device,
+                       acpi_handle gpe_device, u32 gpe_number);
+
+acpi_status acpi_set_gpe_wake_mask(acpi_handle gpe_device, u32 gpe_number, u8 action);
 
 acpi_status
 acpi_get_gpe_status(acpi_handle gpe_device,
@@ -315,7 +321,7 @@ acpi_install_gpe_block(acpi_handle gpe_device,
 
 acpi_status acpi_remove_gpe_block(acpi_handle gpe_device);
 
-acpi_status acpi_update_gpes(void);
+acpi_status acpi_update_all_gpes(void);
 
 /*
  * Resource interfaces
index 2b134b6..939a431 100644 (file)
@@ -656,33 +656,34 @@ typedef u32 acpi_event_status;
 #define ACPI_GPE_MAX                    0xFF
 #define ACPI_NUM_GPE                    256
 
-/* Actions for acpi_gpe_wakeup, acpi_hw_low_set_gpe */
+/* Actions for acpi_set_gpe_wake_mask, acpi_hw_low_set_gpe */
 
 #define ACPI_GPE_ENABLE                 0
 #define ACPI_GPE_DISABLE                1
-#define ACPI_GPE_COND_ENABLE            2
+#define ACPI_GPE_CONDITIONAL_ENABLE     2
 
 /*
  * GPE info flags - Per GPE
- * +-------+---+-+-+
- * |  7:4  |3:2|1|0|
- * +-------+---+-+-+
- *     |     |  | |
- *     |     |  | +--- Interrupt type: edge or level triggered
- *     |     |  +----- GPE can wake the system
- *     |     +-------- Type of dispatch:to method, handler, or none
- *     +-------------- <Reserved>
+ * +-------+-+-+---+
+ * |  7:4  |3|2|1:0|
+ * +-------+-+-+---+
+ *     |    | |  |
+ *     |    | |  +-- Type of dispatch:to method, handler, notify, or none
+ *     |    | +----- Interrupt type: edge or level triggered
+ *     |    +------- Is a Wake GPE
+ *     +------------ <Reserved>
  */
-#define ACPI_GPE_XRUPT_TYPE_MASK        (u8) 0x01
-#define ACPI_GPE_LEVEL_TRIGGERED        (u8) 0x01
-#define ACPI_GPE_EDGE_TRIGGERED         (u8) 0x00
+#define ACPI_GPE_DISPATCH_NONE          (u8) 0x00
+#define ACPI_GPE_DISPATCH_METHOD        (u8) 0x01
+#define ACPI_GPE_DISPATCH_HANDLER       (u8) 0x02
+#define ACPI_GPE_DISPATCH_NOTIFY        (u8) 0x03
+#define ACPI_GPE_DISPATCH_MASK          (u8) 0x03
 
-#define ACPI_GPE_CAN_WAKE              (u8) 0x02
+#define ACPI_GPE_LEVEL_TRIGGERED        (u8) 0x04
+#define ACPI_GPE_EDGE_TRIGGERED         (u8) 0x00
+#define ACPI_GPE_XRUPT_TYPE_MASK        (u8) 0x04
 
-#define ACPI_GPE_DISPATCH_MASK          (u8) 0x0C
-#define ACPI_GPE_DISPATCH_HANDLER       (u8) 0x04
-#define ACPI_GPE_DISPATCH_METHOD        (u8) 0x08
-#define ACPI_GPE_DISPATCH_NOT_USED      (u8) 0x00
+#define ACPI_GPE_CAN_WAKE               (u8) 0x08
 
 /*
  * Flags for GPE and Lock interfaces
@@ -894,8 +895,19 @@ typedef void
 /*
  * Various handlers and callback procedures
  */
+typedef
+void (*ACPI_GBL_EVENT_HANDLER) (u32 event_type,
+                              acpi_handle device,
+                              u32 event_number, void *context);
+
+#define ACPI_EVENT_TYPE_GPE         0
+#define ACPI_EVENT_TYPE_FIXED       1
+
 typedef u32(*acpi_event_handler) (void *context);
 
+typedef
+u32 (*acpi_gpe_handler) (acpi_handle gpe_device, u32 gpe_number, void *context);
+
 typedef
 void (*acpi_notify_handler) (acpi_handle device, u32 value, void *context);
 
@@ -951,6 +963,10 @@ u32 (*acpi_interface_handler) (acpi_string interface_name, u32 supported);
 #define ACPI_INTERRUPT_NOT_HANDLED      0x00
 #define ACPI_INTERRUPT_HANDLED          0x01
 
+/* GPE handler return values */
+
+#define ACPI_REENABLE_GPE               0x80
+
 /* Length of 32-bit EISAID values when converted back to a string */
 
 #define ACPI_EISAID_STRING_SIZE         8      /* Includes null terminator */
index 1b62102..55192ac 100644 (file)
@@ -324,6 +324,12 @@ int acpi_processor_tstate_has_changed(struct acpi_processor *pr);
 int acpi_processor_get_throttling_info(struct acpi_processor *pr);
 extern int acpi_processor_set_throttling(struct acpi_processor *pr,
                                         int state, bool force);
+/*
+ * Reevaluate whether the T-state is invalid after one cpu is
+ * onlined/offlined. In such case the flags.throttling will be updated.
+ */
+extern void acpi_processor_reevaluate_tstate(struct acpi_processor *pr,
+                       unsigned long action);
 extern const struct file_operations acpi_processor_throttling_fops;
 extern void acpi_processor_throttling_init(void);
 /* in processor_idle.c */
index 67c91b4..eb176bb 100644 (file)
@@ -352,4 +352,14 @@ static inline int acpi_table_parse(char *id,
        return -1;
 }
 #endif /* !CONFIG_ACPI */
+
+#ifdef CONFIG_ACPI_SLEEP
+int suspend_nvs_register(unsigned long start, unsigned long size);
+#else
+static inline int suspend_nvs_register(unsigned long a, unsigned long b)
+{
+       return 0;
+}
+#endif
+
 #endif /*_LINUX_ACPI_H*/
index bf972f8..3104aaf 100644 (file)
  * Severity difinition for error_severity in struct cper_record_header
  * and section_severity in struct cper_section_descriptor
  */
-#define CPER_SEV_RECOVERABLE                   0x0
-#define CPER_SEV_FATAL                         0x1
-#define CPER_SEV_CORRECTED                     0x2
-#define CPER_SEV_INFORMATIONAL                 0x3
+enum {
+       CPER_SEV_RECOVERABLE,
+       CPER_SEV_FATAL,
+       CPER_SEV_CORRECTED,
+       CPER_SEV_INFORMATIONAL,
+};
 
 /*
  * Validation bits difinition for validation_bits in struct
        UUID_LE(0x036F84E1, 0x7F37, 0x428c, 0xA7, 0x9E, 0x57, 0x5F,     \
                0xDF, 0xAA, 0x84, 0xEC)
 
+#define CPER_PROC_VALID_TYPE                   0x0001
+#define CPER_PROC_VALID_ISA                    0x0002
+#define CPER_PROC_VALID_ERROR_TYPE             0x0004
+#define CPER_PROC_VALID_OPERATION              0x0008
+#define CPER_PROC_VALID_FLAGS                  0x0010
+#define CPER_PROC_VALID_LEVEL                  0x0020
+#define CPER_PROC_VALID_VERSION                        0x0040
+#define CPER_PROC_VALID_BRAND_INFO             0x0080
+#define CPER_PROC_VALID_ID                     0x0100
+#define CPER_PROC_VALID_TARGET_ADDRESS         0x0200
+#define CPER_PROC_VALID_REQUESTOR_ID           0x0400
+#define CPER_PROC_VALID_RESPONDER_ID           0x0800
+#define CPER_PROC_VALID_IP                     0x1000
+
+#define CPER_MEM_VALID_ERROR_STATUS            0x0001
+#define CPER_MEM_VALID_PHYSICAL_ADDRESS                0x0002
+#define CPER_MEM_VALID_PHYSICAL_ADDRESS_MASK   0x0004
+#define CPER_MEM_VALID_NODE                    0x0008
+#define CPER_MEM_VALID_CARD                    0x0010
+#define CPER_MEM_VALID_MODULE                  0x0020
+#define CPER_MEM_VALID_BANK                    0x0040
+#define CPER_MEM_VALID_DEVICE                  0x0080
+#define CPER_MEM_VALID_ROW                     0x0100
+#define CPER_MEM_VALID_COLUMN                  0x0200
+#define CPER_MEM_VALID_BIT_POSITION            0x0400
+#define CPER_MEM_VALID_REQUESTOR_ID            0x0800
+#define CPER_MEM_VALID_RESPONDER_ID            0x1000
+#define CPER_MEM_VALID_TARGET_ID               0x2000
+#define CPER_MEM_VALID_ERROR_TYPE              0x4000
+
+#define CPER_PCIE_VALID_PORT_TYPE              0x0001
+#define CPER_PCIE_VALID_VERSION                        0x0002
+#define CPER_PCIE_VALID_COMMAND_STATUS         0x0004
+#define CPER_PCIE_VALID_DEVICE_ID              0x0008
+#define CPER_PCIE_VALID_SERIAL_NUMBER          0x0010
+#define CPER_PCIE_VALID_BRIDGE_CONTROL_STATUS  0x0020
+#define CPER_PCIE_VALID_CAPABILITY             0x0040
+#define CPER_PCIE_VALID_AER_INFO               0x0080
+
+#define CPER_PCIE_SLOT_SHIFT                   3
+
 /*
  * All tables and structs must be byte-packed to match CPER
  * specification, since the tables are provided by the system BIOS
@@ -306,6 +349,41 @@ struct cper_sec_mem_err {
        __u8    error_type;
 };
 
+struct cper_sec_pcie {
+       __u64           validation_bits;
+       __u32           port_type;
+       struct {
+               __u8    minor;
+               __u8    major;
+               __u8    reserved[2];
+       }               version;
+       __u16           command;
+       __u16           status;
+       __u32           reserved;
+       struct {
+               __u16   vendor_id;
+               __u16   device_id;
+               __u8    class_code[3];
+               __u8    function;
+               __u8    device;
+               __u16   segment;
+               __u8    bus;
+               __u8    secondary_bus;
+               __u16   slot;
+               __u8    reserved;
+       }               device_id;
+       struct {
+               __u32   lower;
+               __u32   upper;
+       }               serial_number;
+       struct {
+               __u16   secondary_status;
+               __u16   control;
+       }               bridge;
+       __u8    capability[60];
+       __u8    aer_info[96];
+};
+
 /* Reset to default packing */
 #pragma pack()
 
index 65aae34..045f2f2 100644 (file)
@@ -454,6 +454,44 @@ unsigned int ipmi_addr_length(int addr_type);
 /* Validate that the given IPMI address is valid. */
 int ipmi_validate_addr(struct ipmi_addr *addr, int len);
 
+/*
+ * How did the IPMI driver find out about the device?
+ */
+enum ipmi_addr_src {
+       SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS,
+       SI_PCI, SI_DEVICETREE, SI_DEFAULT
+};
+
+union ipmi_smi_info_union {
+       /*
+        * the acpi_info element is defined for the SI_ACPI
+        * address type
+        */
+       struct {
+               void *acpi_handle;
+       } acpi_info;
+};
+
+struct ipmi_smi_info {
+       enum ipmi_addr_src addr_src;
+
+       /*
+        * Base device for the interface.  Don't forget to put this when
+        * you are done.
+        */
+       struct device *dev;
+
+       /*
+        * The addr_info provides more detailed info for some IPMI
+        * devices, depending on the addr_src.  Currently only SI_ACPI
+        * info is provided.
+        */
+       union ipmi_smi_info_union addr_info;
+};
+
+/* This is to get the private info of ipmi_smi_t */
+extern int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data);
+
 #endif /* __KERNEL__ */
 
 
index 4b48318..906590a 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/module.h>
 #include <linux/device.h>
 #include <linux/platform_device.h>
+#include <linux/ipmi.h>
 
 /* This files describes the interface for IPMI system management interface
    drivers to bind into the IPMI message handler. */
@@ -86,6 +87,13 @@ struct ipmi_smi_handlers {
        int (*start_processing)(void       *send_info,
                                ipmi_smi_t new_intf);
 
+       /*
+        * Get the detailed private info of the low level interface and store
+        * it into the structure of ipmi_smi_data. For example: the
+        * ACPI device handle will be returned for the pnp_acpi IPMI device.
+        */
+       int (*get_smi_info)(void *send_info, struct ipmi_smi_info *data);
+
        /* Called to enqueue an SMI message to be sent.  This
           operation is not allowed to fail.  If an error occurs, it
           should report back the error in a received message.  It may
index c1f4998..5a89e36 100644 (file)
@@ -258,23 +258,6 @@ static inline int hibernate(void) { return -ENOSYS; }
 static inline bool system_entering_hibernation(void) { return false; }
 #endif /* CONFIG_HIBERNATION */
 
-#ifdef CONFIG_SUSPEND_NVS
-extern int suspend_nvs_register(unsigned long start, unsigned long size);
-extern int suspend_nvs_alloc(void);
-extern void suspend_nvs_free(void);
-extern void suspend_nvs_save(void);
-extern void suspend_nvs_restore(void);
-#else /* CONFIG_SUSPEND_NVS */
-static inline int suspend_nvs_register(unsigned long a, unsigned long b)
-{
-       return 0;
-}
-static inline int suspend_nvs_alloc(void) { return 0; }
-static inline void suspend_nvs_free(void) {}
-static inline void suspend_nvs_save(void) {}
-static inline void suspend_nvs_restore(void) {}
-#endif /* CONFIG_SUSPEND_NVS */
-
 #ifdef CONFIG_PM_SLEEP
 void save_processor_state(void);
 void restore_processor_state(void);
index 1de8b9e..8651556 100644 (file)
@@ -77,7 +77,7 @@ struct thermal_cooling_device {
        char type[THERMAL_NAME_LENGTH];
        struct device device;
        void *devdata;
-       struct thermal_cooling_device_ops *ops;
+       const struct thermal_cooling_device_ops *ops;
        struct list_head node;
 };
 
@@ -114,7 +114,7 @@ struct thermal_zone_device {
        int last_temperature;
        bool passive;
        unsigned int forced_passive;
-       struct thermal_zone_device_ops *ops;
+       const struct thermal_zone_device_ops *ops;
        struct list_head cooling_devices;
        struct idr idr;
        struct mutex lock;      /* protect cooling devices list */
@@ -127,13 +127,41 @@ struct thermal_zone_device {
        struct thermal_hwmon_attr temp_crit;    /* hwmon sys attr */
 #endif
 };
+/* Adding event notification support elements */
+#define THERMAL_GENL_FAMILY_NAME                "thermal_event"
+#define THERMAL_GENL_VERSION                    0x01
+#define THERMAL_GENL_MCAST_GROUP_NAME           "thermal_mc_group"
+
+enum events {
+       THERMAL_AUX0,
+       THERMAL_AUX1,
+       THERMAL_CRITICAL,
+       THERMAL_DEV_FAULT,
+};
+
+struct thermal_genl_event {
+       u32 orig;
+       enum events event;
+};
+/* attributes of thermal_genl_family */
+enum {
+       THERMAL_GENL_ATTR_UNSPEC,
+       THERMAL_GENL_ATTR_EVENT,
+       __THERMAL_GENL_ATTR_MAX,
+};
+#define THERMAL_GENL_ATTR_MAX (__THERMAL_GENL_ATTR_MAX - 1)
+
+/* commands supported by the thermal_genl_family */
+enum {
+       THERMAL_GENL_CMD_UNSPEC,
+       THERMAL_GENL_CMD_EVENT,
+       __THERMAL_GENL_CMD_MAX,
+};
+#define THERMAL_GENL_CMD_MAX (__THERMAL_GENL_CMD_MAX - 1)
 
 struct thermal_zone_device *thermal_zone_device_register(char *, int, void *,
-                                                        struct
-                                                        thermal_zone_device_ops
-                                                        *, int tc1, int tc2,
-                                                        int passive_freq,
-                                                        int polling_freq);
+               const struct thermal_zone_device_ops *, int tc1, int tc2,
+               int passive_freq, int polling_freq);
 void thermal_zone_device_unregister(struct thermal_zone_device *);
 
 int thermal_zone_bind_cooling_device(struct thermal_zone_device *, int,
@@ -142,9 +170,8 @@ int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int,
                                       struct thermal_cooling_device *);
 void thermal_zone_device_update(struct thermal_zone_device *);
 struct thermal_cooling_device *thermal_cooling_device_register(char *, void *,
-                                                              struct
-                                                              thermal_cooling_device_ops
-                                                              *);
+               const struct thermal_cooling_device_ops *);
 void thermal_cooling_device_unregister(struct thermal_cooling_device *);
+extern int generate_netlink_event(u32 orig, enum events event);
 
 #endif /* __THERMAL_H__ */
index 4c13b1a..991bb87 100644 (file)
@@ -34,6 +34,7 @@ static int pause_on_oops_flag;
 static DEFINE_SPINLOCK(pause_on_oops_lock);
 
 int panic_timeout;
+EXPORT_SYMBOL_GPL(panic_timeout);
 
 ATOMIC_NOTIFIER_HEAD(panic_notifier_list);
 
index a5aff3e..2657299 100644 (file)
@@ -100,13 +100,9 @@ config PM_SLEEP_ADVANCED_DEBUG
        depends on PM_ADVANCED_DEBUG
        default n
 
-config SUSPEND_NVS
-       bool
-
 config SUSPEND
        bool "Suspend to RAM and standby"
        depends on PM && ARCH_SUSPEND_POSSIBLE
-       select SUSPEND_NVS if HAS_IOMEM
        default y
        ---help---
          Allow the system to enter sleep states in which main memory is
@@ -140,7 +136,6 @@ config HIBERNATION
        depends on PM && SWAP && ARCH_HIBERNATION_POSSIBLE
        select LZO_COMPRESS
        select LZO_DECOMPRESS
-       select SUSPEND_NVS if HAS_IOMEM
        ---help---
          Enable the suspend to disk (STD) functionality, which is usually
          called "hibernation" in user interfaces.  STD checkpoints the
index b755972..c350e18 100644 (file)
@@ -7,6 +7,5 @@ obj-$(CONFIG_SUSPEND)           += suspend.o
 obj-$(CONFIG_PM_TEST_SUSPEND)  += suspend_test.o
 obj-$(CONFIG_HIBERNATION)      += hibernate.o snapshot.o swap.o user.o \
                                   block_io.o
-obj-$(CONFIG_SUSPEND_NVS)      += nvs.o
 
 obj-$(CONFIG_MAGIC_SYSRQ)      += poweroff.o
index 5730ecd..da4e2ad 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/mm.h>
 #include <linux/sched.h>
 #include <linux/io.h>
+#include <linux/module.h>
 #include <asm/cacheflush.h>
 #include <asm/pgtable.h>
 
@@ -90,3 +91,4 @@ int ioremap_page_range(unsigned long addr,
 
        return err;
 }
+EXPORT_SYMBOL_GPL(ioremap_page_range);
index cac13b4..f9b1667 100644 (file)
@@ -1175,6 +1175,7 @@ void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
 {
        vunmap_page_range(addr, addr + size);
 }
+EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush);
 
 /**
  * unmap_kernel_range - unmap kernel VM area and flush cache and TLB