Merge tag 'pm+acpi-3.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael...
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 1 Apr 2014 19:48:54 +0000 (12:48 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 1 Apr 2014 19:48:54 +0000 (12:48 -0700)
Pull ACPI and power management updates from Rafael Wysocki:
 "The majority of this material spent some time in linux-next, some of
  it even several weeks.  There are a few relatively fresh commits in
  it, but they are mostly fixes and simple cleanups.

  ACPI took the lead this time, both in terms of the number of commits
  and the number of modified lines of code, cpufreq follows and there
  are a few changes in the PM core and in cpuidle too.

  A new feature that already got some LWN.net's attention is the device
  PM QoS extension allowing latency tolerance requirements to be
  propagated from leaf devices to their ancestors with hardware
  interfaces for specifying latency tolerance.  That should help systems
  with hardware-driven power management to avoid going too far with it
  in cases when there are latency tolerance constraints.

  There also are some significant changes in the ACPI core related to
  the way in which hotplug notifications are handled.  They affect PCI
  hotplug (ACPIPHP) and the ACPI dock station code too.  The bottom line
  is that all those notification now go through the root notify handler
  and are propagated to the interested subsystems by means of callbacks
  instead of having to install a notify handler for each device object
  that we can potentially get hotplug notifications for.

  In addition to that ACPICA will now advertise "Windows 2013"
  compatibility for _OSI, because some systems out there don't work
  correctly if that is not done (some of them don't even boot).

  On the system suspend side of things, all of the device suspend and
  resume callbacks, except for ->prepare() and ->complete(), are now
  going to be executed asynchronously as that turns out to speed up
  system suspend and resume on some platforms quite significantly and we
  have a few more optimizations in that area.

  Apart from that, there are some new device IDs and fixes and cleanups
  all over.  In particular, the system suspend and resume handling by
  cpufreq should be improved and the cpuidle menu governor should be a
  bit more robust now.

  Specifics:

   - Device PM QoS support for latency tolerance constraints on systems
     with hardware interfaces allowing such constraints to be specified.
     That is necessary to prevent hardware-driven power management from
     becoming overly aggressive on some systems and to prevent power
     management features leading to excessive latencies from being used
     in some cases.

   - Consolidation of the handling of ACPI hotplug notifications for
     device objects.  This causes all device hotplug notifications to go
     through the root notify handler (that was executed for all of them
     anyway before) that propagates them to individual subsystems, if
     necessary, by executing callbacks provided by those subsystems
     (those callbacks are associated with struct acpi_device objects
     during device enumeration).  As a result, the code in question
     becomes both smaller in size and more straightforward and all of
     those changes should not affect users.

   - ACPICA update, including fixes related to the handling of _PRT in
     cases when it is broken and the addition of "Windows 2013" to the
     list of supported "features" for _OSI (which is necessary to
     support systems that work incorrectly or don't even boot without
     it).  Changes from Bob Moore and Lv Zheng.

   - Consolidation of ACPI _OST handling from Jiang Liu.

   - ACPI battery and AC fixes allowing unusual system configurations to
     be handled by that code from Alexander Mezin.

   - New device IDs for the ACPI LPSS driver from Chiau Ee Chew.

   - ACPI fan and thermal optimizations related to system suspend and
     resume from Aaron Lu.

   - Cleanups related to ACPI video from Jean Delvare.

   - Assorted ACPI fixes and cleanups from Al Stone, Hanjun Guo, Lan
     Tianyu, Paul Bolle, Tomasz Nowicki.

   - Intel RAPL (Running Average Power Limits) driver cleanups from
     Jacob Pan.

   - intel_pstate fixes and cleanups from Dirk Brandewie.

   - cpufreq fixes related to system suspend/resume handling from Viresh
     Kumar.

   - cpufreq core fixes and cleanups from Viresh Kumar, Stratos
     Karafotis, Saravana Kannan, Rashika Kheria, Joe Perches.

   - cpufreq drivers updates from Viresh Kumar, Zhuoyu Zhang, Rob
     Herring.

   - cpuidle fixes related to the menu governor from Tuukka Tikkanen.

   - cpuidle fix related to coupled CPUs handling from Paul Burton.

   - Asynchronous execution of all device suspend and resume callbacks,
     except for ->prepare and ->complete, during system suspend and
     resume from Chuansheng Liu.

   - Delayed resuming of runtime-suspended devices during system suspend
     for the PCI bus type and ACPI PM domain.

   - New set of PM helper routines to allow device runtime PM callbacks
     to be used during system suspend and resume more easily from Ulf
     Hansson.

   - Assorted fixes and cleanups in the PM core from Geert Uytterhoeven,
     Prabhakar Lad, Philipp Zabel, Rashika Kheria, Sebastian Capella.

   - devfreq fix from Saravana Kannan"

* tag 'pm+acpi-3.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (162 commits)
  PM / devfreq: Rewrite devfreq_update_status() to fix multiple bugs
  PM / sleep: Correct whitespace errors in <linux/pm.h>
  intel_pstate: Set core to min P state during core offline
  cpufreq: Add stop CPU callback to cpufreq_driver interface
  cpufreq: Remove unnecessary braces
  cpufreq: Fix checkpatch errors and warnings
  cpufreq: powerpc: add cpufreq transition latency for FSL e500mc SoCs
  MAINTAINERS: Reorder maintainer addresses for PM and ACPI
  PM / Runtime: Update runtime_idle() documentation for return value meaning
  video / output: Drop display output class support
  fujitsu-laptop: Drop unneeded include
  acer-wmi: Stop selecting VIDEO_OUTPUT_CONTROL
  ACPI / gpu / drm: Stop selecting VIDEO_OUTPUT_CONTROL
  ACPI / video: fix ACPI_VIDEO dependencies
  cpufreq: remove unused notifier: CPUFREQ_{SUSPENDCHANGE|RESUMECHANGE}
  cpufreq: Do not allow ->setpolicy drivers to provide ->target
  cpufreq: arm_big_little: set 'physical_cluster' for each CPU
  cpufreq: arm_big_little: make vexpress driver depend on bL core driver
  ACPI / button: Add ACPI Button event via netlink routine
  ACPI: Remove duplicate definitions of PREFIX
  ...

331 files changed:
Documentation/ABI/testing/sysfs-devices-power
Documentation/ABI/testing/sysfs-power
Documentation/cpu-freq/core.txt
Documentation/cpu-freq/cpu-drivers.txt
Documentation/kernel-parameters.txt
Documentation/power/pm_qos_interface.txt
Documentation/trace/events-power.txt
MAINTAINERS
arch/arm/kernel/smp.c
arch/arm/kernel/smp_twd.c
arch/arm/mach-pxa/viper.c
arch/arm/mach-spear/spear1310.c
arch/arm/mach-spear/spear1340.c
arch/arm64/Kconfig
arch/ia64/configs/generic_defconfig
arch/ia64/configs/tiger_defconfig
arch/ia64/configs/zx1_defconfig
arch/ia64/kernel/acpi.c
arch/powerpc/oprofile/op_model_cell.c
arch/sparc/kernel/time_64.c
arch/x86/configs/i386_defconfig
arch/x86/configs/x86_64_defconfig
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/tsc.c
drivers/acpi/Kconfig
drivers/acpi/ac.c
drivers/acpi/acpi_cmos_rtc.c
drivers/acpi/acpi_lpss.c
drivers/acpi/acpi_pad.c
drivers/acpi/acpica/Makefile
drivers/acpi/acpica/accommon.h
drivers/acpi/acpica/acdebug.h
drivers/acpi/acpica/acdispat.h
drivers/acpi/acpica/acevents.h
drivers/acpi/acpica/acglobal.h
drivers/acpi/acpica/achware.h
drivers/acpi/acpica/acinterp.h
drivers/acpi/acpica/aclocal.h
drivers/acpi/acpica/acmacros.h
drivers/acpi/acpica/acnamesp.h
drivers/acpi/acpica/acobject.h
drivers/acpi/acpica/acopcode.h
drivers/acpi/acpica/acparser.h
drivers/acpi/acpica/acpredef.h
drivers/acpi/acpica/acresrc.h
drivers/acpi/acpica/acstruct.h
drivers/acpi/acpica/actables.h
drivers/acpi/acpica/acutils.h
drivers/acpi/acpica/amlcode.h
drivers/acpi/acpica/amlresrc.h
drivers/acpi/acpica/dsargs.c
drivers/acpi/acpica/dscontrol.c
drivers/acpi/acpica/dsfield.c
drivers/acpi/acpica/dsinit.c
drivers/acpi/acpica/dsmethod.c
drivers/acpi/acpica/dsmthdat.c
drivers/acpi/acpica/dsobject.c
drivers/acpi/acpica/dsopcode.c
drivers/acpi/acpica/dsutils.c
drivers/acpi/acpica/dswexec.c
drivers/acpi/acpica/dswload.c
drivers/acpi/acpica/dswload2.c
drivers/acpi/acpica/dswscope.c
drivers/acpi/acpica/dswstate.c
drivers/acpi/acpica/evevent.c
drivers/acpi/acpica/evglock.c
drivers/acpi/acpica/evgpe.c
drivers/acpi/acpica/evgpeblk.c
drivers/acpi/acpica/evgpeinit.c
drivers/acpi/acpica/evgpeutil.c
drivers/acpi/acpica/evhandler.c
drivers/acpi/acpica/evmisc.c
drivers/acpi/acpica/evregion.c
drivers/acpi/acpica/evrgnini.c
drivers/acpi/acpica/evsci.c
drivers/acpi/acpica/evxface.c
drivers/acpi/acpica/evxfevnt.c
drivers/acpi/acpica/evxfgpe.c
drivers/acpi/acpica/evxfregn.c
drivers/acpi/acpica/exconfig.c
drivers/acpi/acpica/exconvrt.c
drivers/acpi/acpica/excreate.c
drivers/acpi/acpica/exdebug.c
drivers/acpi/acpica/exdump.c
drivers/acpi/acpica/exfield.c
drivers/acpi/acpica/exfldio.c
drivers/acpi/acpica/exmisc.c
drivers/acpi/acpica/exmutex.c
drivers/acpi/acpica/exnames.c
drivers/acpi/acpica/exoparg1.c
drivers/acpi/acpica/exoparg2.c
drivers/acpi/acpica/exoparg3.c
drivers/acpi/acpica/exoparg6.c
drivers/acpi/acpica/exprep.c
drivers/acpi/acpica/exregion.c
drivers/acpi/acpica/exresnte.c
drivers/acpi/acpica/exresolv.c
drivers/acpi/acpica/exresop.c
drivers/acpi/acpica/exstore.c
drivers/acpi/acpica/exstoren.c
drivers/acpi/acpica/exstorob.c
drivers/acpi/acpica/exsystem.c
drivers/acpi/acpica/exutils.c
drivers/acpi/acpica/hwacpi.c
drivers/acpi/acpica/hwesleep.c
drivers/acpi/acpica/hwgpe.c
drivers/acpi/acpica/hwpci.c
drivers/acpi/acpica/hwregs.c
drivers/acpi/acpica/hwsleep.c
drivers/acpi/acpica/hwtimer.c
drivers/acpi/acpica/hwvalid.c
drivers/acpi/acpica/hwxface.c
drivers/acpi/acpica/hwxfsleep.c
drivers/acpi/acpica/nsaccess.c
drivers/acpi/acpica/nsalloc.c
drivers/acpi/acpica/nsarguments.c
drivers/acpi/acpica/nsconvert.c
drivers/acpi/acpica/nsdump.c
drivers/acpi/acpica/nsdumpdv.c
drivers/acpi/acpica/nseval.c
drivers/acpi/acpica/nsinit.c
drivers/acpi/acpica/nsload.c
drivers/acpi/acpica/nsnames.c
drivers/acpi/acpica/nsobject.c
drivers/acpi/acpica/nsparse.c
drivers/acpi/acpica/nspredef.c
drivers/acpi/acpica/nsprepkg.c
drivers/acpi/acpica/nsrepair.c
drivers/acpi/acpica/nsrepair2.c
drivers/acpi/acpica/nssearch.c
drivers/acpi/acpica/nsutils.c
drivers/acpi/acpica/nswalk.c
drivers/acpi/acpica/nsxfeval.c
drivers/acpi/acpica/nsxfname.c
drivers/acpi/acpica/nsxfobj.c
drivers/acpi/acpica/psargs.c
drivers/acpi/acpica/psloop.c
drivers/acpi/acpica/psobject.c
drivers/acpi/acpica/psopcode.c
drivers/acpi/acpica/psopinfo.c
drivers/acpi/acpica/psparse.c
drivers/acpi/acpica/psscope.c
drivers/acpi/acpica/pstree.c
drivers/acpi/acpica/psutils.c
drivers/acpi/acpica/pswalk.c
drivers/acpi/acpica/psxface.c
drivers/acpi/acpica/rsaddr.c
drivers/acpi/acpica/rscalc.c
drivers/acpi/acpica/rscreate.c
drivers/acpi/acpica/rsdump.c
drivers/acpi/acpica/rsdumpinfo.c
drivers/acpi/acpica/rsinfo.c
drivers/acpi/acpica/rsio.c
drivers/acpi/acpica/rsirq.c
drivers/acpi/acpica/rslist.c
drivers/acpi/acpica/rsmemory.c
drivers/acpi/acpica/rsmisc.c
drivers/acpi/acpica/rsserial.c
drivers/acpi/acpica/rsutils.c
drivers/acpi/acpica/rsxface.c
drivers/acpi/acpica/tbfadt.c
drivers/acpi/acpica/tbfind.c
drivers/acpi/acpica/tbinstal.c
drivers/acpi/acpica/tbprint.c
drivers/acpi/acpica/tbutils.c
drivers/acpi/acpica/tbxface.c
drivers/acpi/acpica/tbxfload.c
drivers/acpi/acpica/tbxfroot.c
drivers/acpi/acpica/utaddress.c
drivers/acpi/acpica/utalloc.c
drivers/acpi/acpica/utbuffer.c
drivers/acpi/acpica/utcache.c
drivers/acpi/acpica/utcopy.c
drivers/acpi/acpica/utdebug.c
drivers/acpi/acpica/utdecode.c
drivers/acpi/acpica/utdelete.c
drivers/acpi/acpica/uterror.c
drivers/acpi/acpica/uteval.c
drivers/acpi/acpica/utexcep.c
drivers/acpi/acpica/utglobal.c
drivers/acpi/acpica/utids.c
drivers/acpi/acpica/utinit.c
drivers/acpi/acpica/utlock.c
drivers/acpi/acpica/utmath.c
drivers/acpi/acpica/utmisc.c
drivers/acpi/acpica/utmutex.c
drivers/acpi/acpica/utobject.c
drivers/acpi/acpica/utosi.c
drivers/acpi/acpica/utownerid.c
drivers/acpi/acpica/utpredef.c
drivers/acpi/acpica/utresrc.c
drivers/acpi/acpica/utstate.c
drivers/acpi/acpica/utstring.c
drivers/acpi/acpica/uttrack.c
drivers/acpi/acpica/utxface.c
drivers/acpi/acpica/utxferror.c
drivers/acpi/acpica/utxfinit.c
drivers/acpi/acpica/utxfmutex.c
drivers/acpi/apei/Kconfig
drivers/acpi/battery.c
drivers/acpi/battery.h [new file with mode: 0644]
drivers/acpi/bus.c
drivers/acpi/button.c
drivers/acpi/container.c
drivers/acpi/device_pm.c
drivers/acpi/dock.c
drivers/acpi/fan.c
drivers/acpi/glue.c
drivers/acpi/internal.h
drivers/acpi/osl.c
drivers/acpi/pci_irq.c
drivers/acpi/pci_link.c
drivers/acpi/pci_root.c
drivers/acpi/power.c
drivers/acpi/processor_core.c
drivers/acpi/processor_driver.c
drivers/acpi/processor_perflib.c
drivers/acpi/sbs.c
drivers/acpi/scan.c
drivers/acpi/sysfs.c
drivers/acpi/tables.c
drivers/acpi/thermal.c
drivers/acpi/utils.c
drivers/acpi/video.c
drivers/acpi/video_detect.c
drivers/ata/libata-acpi.c
drivers/base/power/Makefile
drivers/base/power/domain.c
drivers/base/power/main.c
drivers/base/power/power.h
drivers/base/power/qos.c
drivers/base/power/runtime.c
drivers/base/power/sysfs.c
drivers/cpufreq/Kconfig.arm
drivers/cpufreq/acpi-cpufreq.c
drivers/cpufreq/arm_big_little.c
drivers/cpufreq/blackfin-cpufreq.c
drivers/cpufreq/cpufreq-cpu0.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/cpufreq_stats.c
drivers/cpufreq/cris-artpec3-cpufreq.c
drivers/cpufreq/cris-etraxfs-cpufreq.c
drivers/cpufreq/davinci-cpufreq.c
drivers/cpufreq/e_powersaver.c
drivers/cpufreq/elanfreq.c
drivers/cpufreq/exynos-cpufreq.c
drivers/cpufreq/exynos5440-cpufreq.c
drivers/cpufreq/freq_table.c
drivers/cpufreq/ia64-acpi-cpufreq.c
drivers/cpufreq/imx6q-cpufreq.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/kirkwood-cpufreq.c
drivers/cpufreq/longhaul.c
drivers/cpufreq/loongson2_cpufreq.c
drivers/cpufreq/omap-cpufreq.c
drivers/cpufreq/p4-clockmod.c
drivers/cpufreq/pasemi-cpufreq.c
drivers/cpufreq/powernow-k6.c
drivers/cpufreq/powernow-k7.c
drivers/cpufreq/powernow-k8.c
drivers/cpufreq/ppc-corenet-cpufreq.c
drivers/cpufreq/ppc_cbe_cpufreq.c
drivers/cpufreq/pxa2xx-cpufreq.c
drivers/cpufreq/pxa3xx-cpufreq.c
drivers/cpufreq/s5pv210-cpufreq.c
drivers/cpufreq/sc520_freq.c
drivers/cpufreq/sh-cpufreq.c
drivers/cpufreq/sparc-us2e-cpufreq.c
drivers/cpufreq/sparc-us3-cpufreq.c
drivers/cpufreq/spear-cpufreq.c
drivers/cpufreq/speedstep-centrino.c
drivers/cpufreq/speedstep-ich.c
drivers/cpufreq/speedstep-smi.c
drivers/cpufreq/tegra-cpufreq.c
drivers/cpuidle/cpuidle.c
drivers/cpuidle/driver.c
drivers/cpuidle/governors/menu.c
drivers/devfreq/devfreq.c
drivers/gpu/drm/gma500/Kconfig
drivers/gpu/drm/i915/Kconfig
drivers/gpu/drm/nouveau/Kconfig
drivers/input/touchscreen/st1232.c
drivers/mtd/nand/sh_flctl.c
drivers/pci/hotplug/acpiphp.h
drivers/pci/hotplug/acpiphp_glue.c
drivers/pci/pci-driver.c
drivers/pcmcia/sa11xx_base.c
drivers/platform/x86/Kconfig
drivers/platform/x86/fujitsu-laptop.c
drivers/powercap/intel_rapl.c
drivers/tty/serial/sh-sci.c
drivers/video/Kconfig
drivers/video/Makefile
drivers/video/output.c [deleted file]
drivers/xen/xen-acpi-cpuhotplug.c
drivers/xen/xen-acpi-memhotplug.c
drivers/xen/xen-acpi-pad.c
include/acpi/acbuffer.h
include/acpi/acconfig.h
include/acpi/acexcep.h
include/acpi/acnames.h
include/acpi/acoutput.h
include/acpi/acpi.h
include/acpi/acpi_bus.h
include/acpi/acpi_drivers.h
include/acpi/acpiosxf.h
include/acpi/acpixf.h
include/acpi/acrestyp.h
include/acpi/actbl.h
include/acpi/actbl1.h
include/acpi/actbl2.h
include/acpi/actbl3.h
include/acpi/actypes.h
include/acpi/platform/acenv.h
include/acpi/platform/acgcc.h
include/acpi/platform/aclinux.h
include/linux/acpi.h
include/linux/cpufreq.h
include/linux/pci-acpi.h
include/linux/pm.h
include/linux/pm_qos.h
include/linux/pm_runtime.h
include/linux/video_output.h [deleted file]
include/trace/events/power.h
kernel/power/hibernate.c
kernel/power/main.c
kernel/power/power.h
kernel/power/qos.c
kernel/power/snapshot.c
kernel/power/suspend.c
kernel/power/wakelock.c

index efe449b..7dbf96b 100644 (file)
@@ -187,7 +187,7 @@ Description:
                Not all drivers support this attribute.  If it isn't supported,
                attempts to read or write it will yield I/O errors.
 
-What:          /sys/devices/.../power/pm_qos_latency_us
+What:          /sys/devices/.../power/pm_qos_resume_latency_us
 Date:          March 2012
 Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
@@ -205,6 +205,31 @@ Description:
                This attribute has no effect on system-wide suspend/resume and
                hibernation.
 
+What:          /sys/devices/.../power/pm_qos_latency_tolerance_us
+Date:          January 2014
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
+Description:
+               The /sys/devices/.../power/pm_qos_latency_tolerance_us attribute
+               contains the PM QoS active state latency tolerance limit for the
+               given device in microseconds.  That is the maximum memory access
+               latency the device can suffer without any visible adverse
+               effects on user space functionality.  If that value is the
+               string "any", the latency does not matter to user space at all,
+               but hardware should not be allowed to set the latency tolerance
+               for the device automatically.
+
+               Reading "auto" from this file means that the maximum memory
+               access latency for the device may be determined automatically
+               by the hardware as needed.  Writing "auto" to it allows the
+               hardware to be switched to this mode if there are no other
+               latency tolerance requirements from the kernel side.
+
+               This attribute is only present if the feature controlled by it
+               is supported by the hardware.
+
+               This attribute has no effect on runtime suspend and resume of
+               devices and on system-wide suspend/resume and hibernation.
+
 What:          /sys/devices/.../power/pm_qos_no_power_off
 Date:          September 2012
 Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
index 205a738..64c9276 100644 (file)
@@ -12,8 +12,9 @@ Contact:      Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/power/state file controls the system power state.
                Reading from this file returns what states are supported,
-               which is hard-coded to 'standby' (Power-On Suspend), 'mem'
-               (Suspend-to-RAM), and 'disk' (Suspend-to-Disk).
+               which is hard-coded to 'freeze' (Low-Power Idle), 'standby'
+               (Power-On Suspend), 'mem' (Suspend-to-RAM), and 'disk'
+               (Suspend-to-Disk).
 
                Writing to this file one of these strings causes the system to
                transition into that state. Please see the file
index ce0666e..0060d76 100644 (file)
@@ -92,7 +92,3 @@ values:
 cpu    - number of the affected CPU
 old    - old frequency
 new    - new frequency
-
-If the cpufreq core detects the frequency has changed while the system
-was suspended, these notifiers are called with CPUFREQ_RESUMECHANGE as
-second argument.
index 8b1a445..48da5fd 100644 (file)
@@ -61,7 +61,13 @@ target_index         -       See below on the differences.
 
 And optionally
 
-cpufreq_driver.exit -          A pointer to a per-CPU cleanup function.
+cpufreq_driver.exit -          A pointer to a per-CPU cleanup
+                               function called during CPU_POST_DEAD
+                               phase of cpu hotplug process.
+
+cpufreq_driver.stop_cpu -      A pointer to a per-CPU stop function
+                               called during CPU_DOWN_PREPARE phase of
+                               cpu hotplug process.
 
 cpufreq_driver.resume -                A pointer to a per-CPU resume function
                                which is called with interrupts disabled
index 67755ea..121d5fc 100644 (file)
@@ -231,6 +231,14 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
 
        acpi_no_auto_ssdt       [HW,ACPI] Disable automatic loading of SSDT
 
+       acpica_no_return_repair [HW, ACPI]
+                       Disable AML predefined validation mechanism
+                       This mechanism can repair the evaluation result to make
+                       the return objects more ACPI specification compliant.
+                       This option is useful for developers to identify the
+                       root cause of an AML interpreter issue when the issue
+                       has something to do with the repair mechanism.
+
        acpi_os_name=   [HW,ACPI] Tell ACPI BIOS the name of the OS
                        Format: To spoof as Windows 98: ="Microsoft Windows"
 
index 4836320..a5da5c7 100644 (file)
@@ -88,17 +88,19 @@ node.
 
 2. PM QoS per-device latency and flags framework
 
-For each device, there are two lists of PM QoS requests. One is maintained
-along with the aggregated target of latency value and the other is for PM QoS
-flags. Values are updated in response to changes of the request list.
+For each device, there are three lists of PM QoS requests. Two of them are
+maintained along with the aggregated targets of resume latency and active
+state latency tolerance (in microseconds) and the third one is for PM QoS flags.
+Values are updated in response to changes of the request list.
 
-Target latency value is simply the minimum of the request values held in the
-parameter list elements.  The PM QoS flags aggregate value is a gather (bitwise
-OR) of all list elements' values. Two device PM QoS flags are defined currently:
-PM_QOS_FLAG_NO_POWER_OFF and PM_QOS_FLAG_REMOTE_WAKEUP.
+The target values of resume latency and active state latency tolerance are
+simply the minimum of the request values held in the parameter list elements.
+The PM QoS flags aggregate value is a gather (bitwise OR) of all list elements'
+values.  Two device PM QoS flags are defined currently: PM_QOS_FLAG_NO_POWER_OFF
+and PM_QOS_FLAG_REMOTE_WAKEUP.
 
-Note: the aggregated target value is implemented as an atomic variable so that
-reading the aggregated value does not require any locking mechanism.
+Note: The aggregated target values are implemented in such a way that reading
+the aggregated value does not require any locking mechanism.
 
 
 From kernel mode the use of this interface is the following:
@@ -132,19 +134,21 @@ The meaning of the return values is as follows:
        PM_QOS_FLAGS_UNDEFINED: The device's PM QoS structure has not been
                        initialized or the list of requests is empty.
 
-int dev_pm_qos_add_ancestor_request(dev, handle, value)
+int dev_pm_qos_add_ancestor_request(dev, handle, type, value)
 Add a PM QoS request for the first direct ancestor of the given device whose
-power.ignore_children flag is unset.
+power.ignore_children flag is unset (for DEV_PM_QOS_RESUME_LATENCY requests)
+or whose power.set_latency_tolerance callback pointer is not NULL (for
+DEV_PM_QOS_LATENCY_TOLERANCE requests).
 
 int dev_pm_qos_expose_latency_limit(device, value)
-Add a request to the device's PM QoS list of latency constraints and create
-a sysfs attribute pm_qos_resume_latency_us under the device's power directory
-allowing user space to manipulate that request.
+Add a request to the device's PM QoS list of resume latency constraints and
+create a sysfs attribute pm_qos_resume_latency_us under the device's power
+directory allowing user space to manipulate that request.
 
 void dev_pm_qos_hide_latency_limit(device)
 Drop the request added by dev_pm_qos_expose_latency_limit() from the device's
-PM QoS list of latency constraints and remove sysfs attribute pm_qos_resume_latency_us
-from the device's power directory.
+PM QoS list of resume latency constraints and remove sysfs attribute
+pm_qos_resume_latency_us from the device's power directory.
 
 int dev_pm_qos_expose_flags(device, value)
 Add a request to the device's PM QoS list of flags and create sysfs attributes
@@ -163,7 +167,7 @@ a per-device notification tree and a global notification tree.
 int dev_pm_qos_add_notifier(device, notifier):
 Adds a notification callback function for the device.
 The callback is called when the aggregated value of the device constraints list
-is changed.
+is changed (for resume latency device PM QoS only).
 
 int dev_pm_qos_remove_notifier(device, notifier):
 Removes the notification callback function for the device.
@@ -171,14 +175,48 @@ Removes the notification callback function for the device.
 int dev_pm_qos_add_global_notifier(notifier):
 Adds a notification callback function in the global notification tree of the
 framework.
-The callback is called when the aggregated value for any device is changed.
+The callback is called when the aggregated value for any device is changed
+(for resume latency device PM QoS only).
 
 int dev_pm_qos_remove_global_notifier(notifier):
 Removes the notification callback function from the global notification tree
 of the framework.
 
 
-From user mode:
-No API for user space access to the per-device latency constraints is provided
-yet - still under discussion.
-
+Active state latency tolerance
+
+This device PM QoS type is used to support systems in which hardware may switch
+to energy-saving operation modes on the fly.  In those systems, if the operation
+mode chosen by the hardware attempts to save energy in an overly aggressive way,
+it may cause excess latencies to be visible to software, causing it to miss
+certain protocol requirements or target frame or sample rates etc.
+
+If there is a latency tolerance control mechanism for a given device available
+to software, the .set_latency_tolerance callback in that device's dev_pm_info
+structure should be populated.  The routine pointed to by it is should implement
+whatever is necessary to transfer the effective requirement value to the
+hardware.
+
+Whenever the effective latency tolerance changes for the device, its
+.set_latency_tolerance() callback will be executed and the effective value will
+be passed to it.  If that value is negative, which means that the list of
+latency tolerance requirements for the device is empty, the callback is expected
+to switch the underlying hardware latency tolerance control mechanism to an
+autonomous mode if available.  If that value is PM_QOS_LATENCY_ANY, in turn, and
+the hardware supports a special "no requirement" setting, the callback is
+expected to use it.  That allows software to prevent the hardware from
+automatically updating the device's latency tolerance in response to its power
+state changes (e.g. during transitions from D3cold to D0), which generally may
+be done in the autonomous latency tolerance control mode.
+
+If .set_latency_tolerance() is present for the device, sysfs attribute
+pm_qos_latency_tolerance_us will be present in the devivce's power directory.
+Then, user space can use that attribute to specify its latency tolerance
+requirement for the device, if any.  Writing "any" to it means "no requirement,
+but do not let the hardware control latency tolerance" and writing "auto" to it
+allows the hardware to be switched to the autonomous mode if there are no other
+requirements from the kernel side in the device's list.
+
+Kernel code can use the functions described above along with the
+DEV_PM_QOS_LATENCY_TOLERANCE device PM QoS type to add, remove and update
+latency tolerance requirements for devices.
index 3bd33b8..21d514c 100644 (file)
@@ -92,5 +92,5 @@ dev_pm_qos_remove_request          "device=%s type=%s new_value=%d"
 
 The first parameter gives the device name which tries to add/update/remove
 QoS requests.
-The second parameter gives the request type (e.g. "DEV_PM_QOS_LATENCY").
+The second parameter gives the request type (e.g. "DEV_PM_QOS_RESUME_LATENCY").
 The third parameter is value to be added/updated/removed.
index d1f3cb3..eea871f 100644 (file)
@@ -242,8 +242,8 @@ S:  Maintained
 F:     drivers/platform/x86/acer-wmi.c
 
 ACPI
-M:     Len Brown <lenb@kernel.org>
 M:     Rafael J. Wysocki <rjw@rjwysocki.net>
+M:     Len Brown <lenb@kernel.org>
 L:     linux-acpi@vger.kernel.org
 W:     https://01.org/linux-acpi
 Q:     https://patchwork.kernel.org/project/linux-acpi/list/
@@ -3658,8 +3658,8 @@ S:        Maintained
 F:     fs/freevxfs/
 
 FREEZER
-M:     Pavel Machek <pavel@ucw.cz>
 M:     "Rafael J. Wysocki" <rjw@rjwysocki.net>
+M:     Pavel Machek <pavel@ucw.cz>
 L:     linux-pm@vger.kernel.org
 S:     Supported
 F:     Documentation/power/freezing-of-tasks.txt
@@ -4023,8 +4023,8 @@ S:        Maintained
 F:     drivers/video/hgafb.c
 
 HIBERNATION (aka Software Suspend, aka swsusp)
-M:     Pavel Machek <pavel@ucw.cz>
 M:     "Rafael J. Wysocki" <rjw@rjwysocki.net>
+M:     Pavel Machek <pavel@ucw.cz>
 L:     linux-pm@vger.kernel.org
 S:     Supported
 F:     arch/x86/power/
@@ -8453,9 +8453,9 @@ F:        arch/sh/
 F:     drivers/sh/
 
 SUSPEND TO RAM
+M:     "Rafael J. Wysocki" <rjw@rjwysocki.net>
 M:     Len Brown <len.brown@intel.com>
 M:     Pavel Machek <pavel@ucw.cz>
-M:     "Rafael J. Wysocki" <rjw@rjwysocki.net>
 L:     linux-pm@vger.kernel.org
 S:     Supported
 F:     Documentation/power/
index b7b4c86..7c4fada 100644 (file)
@@ -674,8 +674,7 @@ static int cpufreq_callback(struct notifier_block *nb,
        }
 
        if ((val == CPUFREQ_PRECHANGE  && freq->old < freq->new) ||
-           (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
-           (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
+           (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
                loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
                                                global_l_p_j_ref_freq,
                                                freq->new);
index 6591e26..dfc3213 100644 (file)
@@ -166,7 +166,7 @@ static int twd_cpufreq_transition(struct notifier_block *nb,
         * frequency.  The timer is local to a cpu, so cross-call to the
         * changing cpu.
         */
-       if (state == CPUFREQ_POSTCHANGE || state == CPUFREQ_RESUMECHANGE)
+       if (state == CPUFREQ_POSTCHANGE)
                smp_call_function_single(freqs->cpu, twd_update_frequency,
                        NULL, 1);
 
index 29905b1..41f27f6 100644 (file)
@@ -885,9 +885,6 @@ static int viper_cpufreq_notifier(struct notifier_block *nb,
                        viper_set_core_cpu_voltage(freq->new, 0);
                }
                break;
-       case CPUFREQ_RESUMECHANGE:
-               viper_set_core_cpu_voltage(freq->new, 0);
-               break;
        default:
                /* ignore */
                break;
index 7ad0030..824b12a 100644 (file)
@@ -28,6 +28,7 @@
 static void __init spear1310_dt_init(void)
 {
        of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+       platform_device_register_simple("spear-cpufreq", -1, NULL, 0);
 }
 
 static const char * const spear1310_dt_board_compat[] = {
index 3fb6834..7b6bff7 100644 (file)
@@ -143,6 +143,7 @@ static void __init spear1340_dt_init(void)
 {
        of_platform_populate(NULL, of_default_bus_match_table,
                        spear1340_auxdata_lookup, NULL);
+       platform_device_register_simple("spear-cpufreq", -1, NULL, 0);
 }
 
 static const char * const spear1340_dt_board_compat[] = {
index 516d8a7..07aa355 100644 (file)
@@ -325,6 +325,14 @@ source "drivers/cpufreq/Kconfig"
 
 endmenu
 
+menu "Power management options"
+
+source "kernel/power/Kconfig"
+
+source "drivers/cpufreq/Kconfig"
+
+endmenu
+
 source "net/Kconfig"
 
 source "drivers/Kconfig"
index 6404acb..b4efaf2 100644 (file)
@@ -25,7 +25,6 @@ CONFIG_KEXEC=y
 CONFIG_CRASH_DUMP=y
 CONFIG_EFI_VARS=y
 CONFIG_BINFMT_MISC=m
-CONFIG_ACPI_PROCFS=y
 CONFIG_ACPI_BUTTON=m
 CONFIG_ACPI_FAN=m
 CONFIG_ACPI_DOCK=y
index 0f4e9e4..0fed9ae 100644 (file)
@@ -26,7 +26,6 @@ CONFIG_IA64_PALINFO=y
 CONFIG_KEXEC=y
 CONFIG_EFI_VARS=y
 CONFIG_BINFMT_MISC=m
-CONFIG_ACPI_PROCFS=y
 CONFIG_ACPI_BUTTON=m
 CONFIG_ACPI_FAN=m
 CONFIG_ACPI_PROCESSOR=m
index fc7aba0..54bc72e 100644 (file)
@@ -16,7 +16,6 @@ CONFIG_IA64_PALINFO=y
 CONFIG_CRASH_DUMP=y
 CONFIG_EFI_VARS=y
 CONFIG_BINFMT_MISC=y
-CONFIG_ACPI_PROCFS=y
 CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_ACPI=y
 CONFIG_PACKET=y
index 07d209c..467497a 100644 (file)
 #include <asm/sal.h>
 #include <asm/cyclone.h>
 
-#define BAD_MADT_ENTRY(entry, end) (                                        \
-               (!entry) || (unsigned long)entry + sizeof(*entry) > end ||  \
-               ((struct acpi_subtable_header *)entry)->length < sizeof(*entry))
-
 #define PREFIX                 "ACPI: "
 
 unsigned int acpi_cpei_override;
index 1f0ebde..863d893 100644 (file)
@@ -1121,8 +1121,7 @@ oprof_cpufreq_notify(struct notifier_block *nb, unsigned long val, void *data)
        int ret = 0;
        struct cpufreq_freqs *frq = data;
        if ((val == CPUFREQ_PRECHANGE && frq->old < frq->new) ||
-           (val == CPUFREQ_POSTCHANGE && frq->old > frq->new) ||
-           (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE))
+           (val == CPUFREQ_POSTCHANGE && frq->old > frq->new))
                set_spu_profiling_frequency(frq->new, spu_cycle_reset);
        return ret;
 }
index 24e8b87..3fddf64 100644 (file)
@@ -659,8 +659,7 @@ static int sparc64_cpufreq_notifier(struct notifier_block *nb, unsigned long val
                ft->clock_tick_ref = cpu_data(cpu).clock_tick;
        }
        if ((val == CPUFREQ_PRECHANGE  && freq->old < freq->new) ||
-           (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
-           (val == CPUFREQ_RESUMECHANGE)) {
+           (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
                cpu_data(cpu).clock_tick =
                        cpufreq_scale(ft->clock_tick_ref,
                                      ft->ref_freq,
index a7fef26..619e7f7 100644 (file)
@@ -60,7 +60,6 @@ CONFIG_CRASH_DUMP=y
 CONFIG_HIBERNATION=y
 CONFIG_PM_DEBUG=y
 CONFIG_PM_TRACE_RTC=y
-CONFIG_ACPI_PROCFS=y
 CONFIG_ACPI_DOCK=y
 CONFIG_CPU_FREQ=y
 # CONFIG_CPU_FREQ_STAT is not set
index c1119d4..6181c69 100644 (file)
@@ -58,7 +58,6 @@ CONFIG_CRASH_DUMP=y
 CONFIG_HIBERNATION=y
 CONFIG_PM_DEBUG=y
 CONFIG_PM_TRACE_RTC=y
-CONFIG_ACPI_PROCFS=y
 CONFIG_ACPI_DOCK=y
 CONFIG_CPU_FREQ=y
 # CONFIG_CPU_FREQ_STAT is not set
index 9f46f2b..8e61d23 100644 (file)
@@ -53,10 +53,6 @@ EXPORT_SYMBOL(acpi_disabled);
 # include <asm/proto.h>
 #endif                         /* X86 */
 
-#define BAD_MADT_ENTRY(entry, end) (                                       \
-               (!entry) || (unsigned long)entry + sizeof(*entry) > end ||  \
-               ((struct acpi_subtable_header *)entry)->length < sizeof(*entry))
-
 #define PREFIX                 "ACPI: "
 
 int acpi_noirq;                                /* skip ACPI IRQ initialization */
index cfbe99f..7a9296a 100644 (file)
@@ -914,8 +914,7 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
                tsc_khz_ref = tsc_khz;
        }
        if ((val == CPUFREQ_PRECHANGE  && freq->old < freq->new) ||
-                       (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
-                       (val == CPUFREQ_RESUMECHANGE)) {
+                       (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
                *lpj = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
 
                tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
index 4770de5..c205653 100644 (file)
@@ -43,19 +43,6 @@ config ACPI_SLEEP
        depends on SUSPEND || HIBERNATION
        default y
 
-config ACPI_PROCFS
-       bool "Deprecated /proc/acpi files"
-       depends on PROC_FS
-       help
-         For backwards compatibility, this option allows
-         deprecated /proc/acpi/ files to exist, even when
-         they have been replaced by functions in /sys.
-
-         This option has no effect on /proc/acpi/ files
-         and functions which do not yet exist in /sys.
-
-         Say N to delete /proc/acpi/ files that have moved to /sys/
-
 config ACPI_EC_DEBUGFS
        tristate "EC read/write access through /sys/kernel/debug/ec"
        default n
@@ -115,7 +102,7 @@ config ACPI_BUTTON
 
 config ACPI_VIDEO
        tristate "Video"
-       depends on X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL
+       depends on X86 && BACKLIGHT_CLASS_DEVICE
        depends on INPUT
        select THERMAL
        help
@@ -343,6 +330,19 @@ config ACPI_BGRT
          data from the firmware boot splash. It will appear under
          /sys/firmware/acpi/bgrt/ .
 
+config ACPI_REDUCED_HARDWARE_ONLY
+       bool "Hardware-reduced ACPI support only" if EXPERT
+       def_bool n
+       depends on ACPI
+       help
+       This config item changes the way the ACPI code is built.  When this
+       option is selected, the kernel will use a specialized version of
+       ACPICA that ONLY supports the ACPI "reduced hardware" mode.  The
+       resulting kernel will be smaller but it will also be restricted to
+       running in ACPI reduced hardware mode ONLY.
+
+       If you are unsure what to do, do not enable this option.
+
 source "drivers/acpi/apei/Kconfig"
 
 config ACPI_EXTLOG
index 6f190bc..2c01c1d 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/platform_device.h>
 #include <linux/power_supply.h>
 #include <linux/acpi.h>
+#include "battery.h"
 
 #define PREFIX "ACPI: "
 
@@ -57,6 +58,7 @@ struct acpi_ac {
        struct power_supply charger;
        struct platform_device *pdev;
        unsigned long long state;
+       struct notifier_block battery_nb;
 };
 
 #define to_acpi_ac(x) container_of(x, struct acpi_ac, charger)
@@ -152,6 +154,26 @@ static void acpi_ac_notify_handler(acpi_handle handle, u32 event, void *data)
        return;
 }
 
+static int acpi_ac_battery_notify(struct notifier_block *nb,
+                                 unsigned long action, void *data)
+{
+       struct acpi_ac *ac = container_of(nb, struct acpi_ac, battery_nb);
+       struct acpi_bus_event *event = (struct acpi_bus_event *)data;
+
+       /*
+        * On HP Pavilion dv6-6179er AC status notifications aren't triggered
+        * when adapter is plugged/unplugged. However, battery status
+        * notifcations are triggered when battery starts charging or
+        * discharging. Re-reading AC status triggers lost AC notifications,
+        * if AC status has changed.
+        */
+       if (strcmp(event->device_class, ACPI_BATTERY_CLASS) == 0 &&
+           event->type == ACPI_BATTERY_NOTIFY_STATUS)
+               acpi_ac_get_state(ac);
+
+       return NOTIFY_OK;
+}
+
 static int thinkpad_e530_quirk(const struct dmi_system_id *d)
 {
        ac_sleep_before_get_state_ms = 1000;
@@ -215,6 +237,8 @@ static int acpi_ac_probe(struct platform_device *pdev)
               acpi_device_name(adev), acpi_device_bid(adev),
               ac->state ? "on-line" : "off-line");
 
+       ac->battery_nb.notifier_call = acpi_ac_battery_notify;
+       register_acpi_notifier(&ac->battery_nb);
 end:
        if (result)
                kfree(ac);
@@ -261,6 +285,7 @@ static int acpi_ac_remove(struct platform_device *pdev)
        ac = platform_get_drvdata(pdev);
        if (ac->charger.dev)
                power_supply_unregister(&ac->charger);
+       unregister_acpi_notifier(&ac->battery_nb);
 
        kfree(ac);
 
index 84190ed..961b45d 100644 (file)
@@ -18,8 +18,6 @@
 
 #include "internal.h"
 
-#define PREFIX "ACPI: "
-
 ACPI_MODULE_NAME("cmos rtc");
 
 static const struct acpi_device_id acpi_cmos_rtc_ids[] = {
index 6745fe1..69e29f4 100644 (file)
@@ -33,6 +33,13 @@ ACPI_MODULE_NAME("acpi_lpss");
 #define LPSS_GENERAL_UART_RTS_OVRD     BIT(3)
 #define LPSS_SW_LTR                    0x10
 #define LPSS_AUTO_LTR                  0x14
+#define LPSS_LTR_SNOOP_REQ             BIT(15)
+#define LPSS_LTR_SNOOP_MASK            0x0000FFFF
+#define LPSS_LTR_SNOOP_LAT_1US         0x800
+#define LPSS_LTR_SNOOP_LAT_32US                0xC00
+#define LPSS_LTR_SNOOP_LAT_SHIFT       5
+#define LPSS_LTR_SNOOP_LAT_CUTOFF      3000
+#define LPSS_LTR_MAX_VAL               0x3FF
 #define LPSS_TX_INT                    0x20
 #define LPSS_TX_INT_MASK               BIT(1)
 
@@ -102,6 +109,16 @@ static struct lpss_device_desc lpt_sdio_dev_desc = {
        .ltr_required = true,
 };
 
+static struct lpss_shared_clock pwm_clock = {
+       .name = "pwm_clk",
+       .rate = 25000000,
+};
+
+static struct lpss_device_desc byt_pwm_dev_desc = {
+       .clk_required = true,
+       .shared_clock = &pwm_clock,
+};
+
 static struct lpss_shared_clock uart_clock = {
        .name = "uart_clk",
        .rate = 44236800,
@@ -157,6 +174,7 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
        { "INT33C7", },
 
        /* BayTrail LPSS devices */
+       { "80860F09", (unsigned long)&byt_pwm_dev_desc },
        { "80860F0A", (unsigned long)&byt_uart_dev_desc },
        { "80860F0E", (unsigned long)&byt_spi_dev_desc },
        { "80860F14", (unsigned long)&byt_sdio_dev_desc },
@@ -315,6 +333,17 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
        return ret;
 }
 
+static u32 __lpss_reg_read(struct lpss_private_data *pdata, unsigned int reg)
+{
+       return readl(pdata->mmio_base + pdata->dev_desc->prv_offset + reg);
+}
+
+static void __lpss_reg_write(u32 val, struct lpss_private_data *pdata,
+                            unsigned int reg)
+{
+       writel(val, pdata->mmio_base + pdata->dev_desc->prv_offset + reg);
+}
+
 static int lpss_reg_read(struct device *dev, unsigned int reg, u32 *val)
 {
        struct acpi_device *adev;
@@ -336,7 +365,7 @@ static int lpss_reg_read(struct device *dev, unsigned int reg, u32 *val)
                ret = -ENODEV;
                goto out;
        }
-       *val = readl(pdata->mmio_base + pdata->dev_desc->prv_offset + reg);
+       *val = __lpss_reg_read(pdata, reg);
 
  out:
        spin_unlock_irqrestore(&dev->power.lock, flags);
@@ -389,6 +418,37 @@ static struct attribute_group lpss_attr_group = {
        .name = "lpss_ltr",
 };
 
+static void acpi_lpss_set_ltr(struct device *dev, s32 val)
+{
+       struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
+       u32 ltr_mode, ltr_val;
+
+       ltr_mode = __lpss_reg_read(pdata, LPSS_GENERAL);
+       if (val < 0) {
+               if (ltr_mode & LPSS_GENERAL_LTR_MODE_SW) {
+                       ltr_mode &= ~LPSS_GENERAL_LTR_MODE_SW;
+                       __lpss_reg_write(ltr_mode, pdata, LPSS_GENERAL);
+               }
+               return;
+       }
+       ltr_val = __lpss_reg_read(pdata, LPSS_SW_LTR) & ~LPSS_LTR_SNOOP_MASK;
+       if (val >= LPSS_LTR_SNOOP_LAT_CUTOFF) {
+               ltr_val |= LPSS_LTR_SNOOP_LAT_32US;
+               val = LPSS_LTR_MAX_VAL;
+       } else if (val > LPSS_LTR_MAX_VAL) {
+               ltr_val |= LPSS_LTR_SNOOP_LAT_32US | LPSS_LTR_SNOOP_REQ;
+               val >>= LPSS_LTR_SNOOP_LAT_SHIFT;
+       } else {
+               ltr_val |= LPSS_LTR_SNOOP_LAT_1US | LPSS_LTR_SNOOP_REQ;
+       }
+       ltr_val |= val;
+       __lpss_reg_write(ltr_val, pdata, LPSS_SW_LTR);
+       if (!(ltr_mode & LPSS_GENERAL_LTR_MODE_SW)) {
+               ltr_mode |= LPSS_GENERAL_LTR_MODE_SW;
+               __lpss_reg_write(ltr_mode, pdata, LPSS_GENERAL);
+       }
+}
+
 static int acpi_lpss_platform_notify(struct notifier_block *nb,
                                     unsigned long action, void *data)
 {
@@ -426,9 +486,29 @@ static struct notifier_block acpi_lpss_nb = {
        .notifier_call = acpi_lpss_platform_notify,
 };
 
+static void acpi_lpss_bind(struct device *dev)
+{
+       struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
+
+       if (!pdata || !pdata->mmio_base || !pdata->dev_desc->ltr_required)
+               return;
+
+       if (pdata->mmio_size >= pdata->dev_desc->prv_offset + LPSS_LTR_SIZE)
+               dev->power.set_latency_tolerance = acpi_lpss_set_ltr;
+       else
+               dev_err(dev, "MMIO size insufficient to access LTR\n");
+}
+
+static void acpi_lpss_unbind(struct device *dev)
+{
+       dev->power.set_latency_tolerance = NULL;
+}
+
 static struct acpi_scan_handler lpss_handler = {
        .ids = acpi_lpss_device_ids,
        .attach = acpi_lpss_create_device,
+       .bind = acpi_lpss_bind,
+       .unbind = acpi_lpss_unbind,
 };
 
 void __init acpi_lpss_init(void)
index df96a0f..37d7302 100644 (file)
@@ -408,28 +408,14 @@ static int acpi_pad_pur(acpi_handle handle)
        return num;
 }
 
-/* Notify firmware how many CPUs are idle */
-static void acpi_pad_ost(acpi_handle handle, int stat,
-       uint32_t idle_cpus)
-{
-       union acpi_object params[3] = {
-               {.type = ACPI_TYPE_INTEGER,},
-               {.type = ACPI_TYPE_INTEGER,},
-               {.type = ACPI_TYPE_BUFFER,},
-       };
-       struct acpi_object_list arg_list = {3, params};
-
-       params[0].integer.value = ACPI_PROCESSOR_AGGREGATOR_NOTIFY;
-       params[1].integer.value =  stat;
-       params[2].buffer.length = 4;
-       params[2].buffer.pointer = (void *)&idle_cpus;
-       acpi_evaluate_object(handle, "_OST", &arg_list, NULL);
-}
-
 static void acpi_pad_handle_notify(acpi_handle handle)
 {
        int num_cpus;
        uint32_t idle_cpus;
+       struct acpi_buffer param = {
+               .length = 4,
+               .pointer = (void *)&idle_cpus,
+       };
 
        mutex_lock(&isolated_cpus_lock);
        num_cpus = acpi_pad_pur(handle);
@@ -439,7 +425,7 @@ static void acpi_pad_handle_notify(acpi_handle handle)
        }
        acpi_pad_idle_cpus(num_cpus);
        idle_cpus = acpi_pad_idle_cpus_num();
-       acpi_pad_ost(handle, 0, idle_cpus);
+       acpi_evaluate_ost(handle, ACPI_PROCESSOR_AGGREGATOR_NOTIFY, 0, &param);
        mutex_unlock(&isolated_cpus_lock);
 }
 
index 4383040..b7ed86a 100644 (file)
@@ -122,6 +122,8 @@ acpi-y +=           \
        rsaddr.o        \
        rscalc.o        \
        rscreate.o      \
+       rsdump.o        \
+       rsdumpinfo.o    \
        rsinfo.o        \
        rsio.o          \
        rsirq.o         \
@@ -132,8 +134,6 @@ acpi-y +=           \
        rsutils.o       \
        rsxface.o
 
-acpi-$(ACPI_FUTURE_USAGE) += rsdump.o rsdumpinfo.o
-
 acpi-y +=              \
        tbfadt.o        \
        tbfind.o        \
index 8a6c4a0..6f1c616 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 2bf3ca2..68a91eb 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -115,6 +115,8 @@ ACPI_HW_DEPENDENT_RETURN_VOID(void
                                                   char *block_arg))
 ACPI_HW_DEPENDENT_RETURN_VOID(void acpi_db_generate_sci(void))
 
+void acpi_db_execute_test(char *type_arg);
+
 /*
  * dbconvert - miscellaneous conversion routines
  */
index 427db72..5b472c4 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 0fb0adf..68ec61f 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 4ed1aa3..8f40bb9 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * to simplify maintenance of the code.
  */
 #ifdef DEFINE_ACPI_GLOBALS
-#define ACPI_EXTERN
-#define ACPI_INIT_GLOBAL(a,b) a=b
+#define ACPI_GLOBAL(type,name) \
+       extern type name; \
+       type name
+
+#define ACPI_INIT_GLOBAL(type,name,value) \
+       type name=value
+
 #else
-#define ACPI_EXTERN extern
-#define ACPI_INIT_GLOBAL(a,b) a
+#define ACPI_GLOBAL(type,name) \
+       extern type name
+
+#define ACPI_INIT_GLOBAL(type,name,value) \
+       extern type name
 #endif
 
 #ifdef DEFINE_ACPI_GLOBALS
@@ -82,7 +90,7 @@
  * 5) Allow unresolved references (invalid target name) in package objects
  * 6) Enable warning messages for behavior that is not ACPI spec compliant
  */
-u8 ACPI_INIT_GLOBAL(acpi_gbl_enable_interpreter_slack, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_enable_interpreter_slack, FALSE);
 
 /*
  * Automatically serialize ALL control methods? Default is FALSE, meaning
@@ -90,25 +98,25 @@ u8 ACPI_INIT_GLOBAL(acpi_gbl_enable_interpreter_slack, FALSE);
  * Only change this if the ASL code is poorly written and cannot handle
  * reentrancy even though methods are marked "NotSerialized".
  */
-u8 ACPI_INIT_GLOBAL(acpi_gbl_all_methods_serialized, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_all_methods_serialized, FALSE);
 
 /*
  * Create the predefined _OSI method in the namespace? Default is TRUE
  * because ACPI CA is fully compatible with other ACPI implementations.
  * Changing this will revert ACPI CA (and machine ASL) to pre-OSI behavior.
  */
-u8 ACPI_INIT_GLOBAL(acpi_gbl_create_osi_method, TRUE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_create_osi_method, TRUE);
 
 /*
  * Optionally use default values for the ACPI register widths. Set this to
  * TRUE to use the defaults, if an FADT contains incorrect widths/lengths.
  */
-u8 ACPI_INIT_GLOBAL(acpi_gbl_use_default_register_widths, TRUE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_use_default_register_widths, TRUE);
 
 /*
  * Optionally enable output from the AML Debug Object.
  */
-u8 ACPI_INIT_GLOBAL(acpi_gbl_enable_aml_debug_object, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_enable_aml_debug_object, FALSE);
 
 /*
  * Optionally copy the entire DSDT to local memory (instead of simply
@@ -116,7 +124,7 @@ u8 ACPI_INIT_GLOBAL(acpi_gbl_enable_aml_debug_object, FALSE);
  * DSDT, creating the need for this option. Default is FALSE, do not copy
  * the DSDT.
  */
-u8 ACPI_INIT_GLOBAL(acpi_gbl_copy_dsdt_locally, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_copy_dsdt_locally, FALSE);
 
 /*
  * Optionally ignore an XSDT if present and use the RSDT instead.
@@ -124,7 +132,7 @@ u8 ACPI_INIT_GLOBAL(acpi_gbl_copy_dsdt_locally, FALSE);
  * of the RSDT, the XSDT has been found to be corrupt or ill-formed on
  * some machines. Default behavior is to use the XSDT if present.
  */
-u8 ACPI_INIT_GLOBAL(acpi_gbl_do_not_use_xsdt, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_do_not_use_xsdt, FALSE);
 
 /*
  * Optionally use 32-bit FADT addresses if and when there is a conflict
@@ -134,7 +142,7 @@ u8 ACPI_INIT_GLOBAL(acpi_gbl_do_not_use_xsdt, FALSE);
  * some machines have been found to have a corrupted non-zero 64-bit
  * address. Default is FALSE, do not favor the 32-bit addresses.
  */
-u8 ACPI_INIT_GLOBAL(acpi_gbl_use32_bit_fadt_addresses, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_use32_bit_fadt_addresses, FALSE);
 
 /*
  * Optionally truncate I/O addresses to 16 bits. Provides compatibility
@@ -142,47 +150,28 @@ u8 ACPI_INIT_GLOBAL(acpi_gbl_use32_bit_fadt_addresses, FALSE);
  * this value is set to TRUE if any Windows OSI strings have been
  * requested by the BIOS.
  */
-u8 ACPI_INIT_GLOBAL(acpi_gbl_truncate_io_addresses, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_truncate_io_addresses, FALSE);
 
 /*
  * Disable runtime checking and repair of values returned by control methods.
  * Use only if the repair is causing a problem on a particular machine.
  */
-u8 ACPI_INIT_GLOBAL(acpi_gbl_disable_auto_repair, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_disable_auto_repair, FALSE);
 
 /*
  * Optionally do not load any SSDTs from the RSDT/XSDT during initialization.
  * This can be useful for debugging ACPI problems on some machines.
  */
-u8 ACPI_INIT_GLOBAL(acpi_gbl_disable_ssdt_table_load, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_disable_ssdt_table_load, FALSE);
 
 /*
  * We keep track of the latest version of Windows that has been requested by
  * the BIOS.
  */
-u8 ACPI_INIT_GLOBAL(acpi_gbl_osi_data, 0);
-
-/* acpi_gbl_FADT is a local copy of the FADT, converted to a common format. */
-
-struct acpi_table_fadt acpi_gbl_FADT;
-u32 acpi_current_gpe_count;
-u32 acpi_gbl_trace_flags;
-acpi_name acpi_gbl_trace_method_name;
-u8 acpi_gbl_system_awake_and_running;
-
-/*
- * ACPI 5.0 introduces the concept of a "reduced hardware platform", meaning
- * that the ACPI hardware is no longer required. A flag in the FADT indicates
- * a reduced HW machine, and that flag is duplicated here for convenience.
- */
-u8 acpi_gbl_reduced_hardware;
+ACPI_INIT_GLOBAL(u8, acpi_gbl_osi_data, 0);
 
 #endif                         /* DEFINE_ACPI_GLOBALS */
 
-/* Do not disassemble buffers to resource descriptors */
-
-ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_no_resource_disassembly, FALSE);
-
 /*****************************************************************************
  *
  * ACPI Table globals
@@ -190,37 +179,36 @@ ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_no_resource_disassembly, FALSE);
  ****************************************************************************/
 
 /*
- * acpi_gbl_root_table_list is the master list of ACPI tables that were
- * found in the RSDT/XSDT.
+ * Master list of all ACPI tables that were found in the RSDT/XSDT.
  */
-ACPI_EXTERN struct acpi_table_list acpi_gbl_root_table_list;
+ACPI_GLOBAL(struct acpi_table_list, acpi_gbl_root_table_list);
+
+/* DSDT information. Used to check for DSDT corruption */
+
+ACPI_GLOBAL(struct acpi_table_header *, acpi_gbl_DSDT);
+ACPI_GLOBAL(struct acpi_table_header, acpi_gbl_original_dsdt_header);
 
 #if (!ACPI_REDUCED_HARDWARE)
-ACPI_EXTERN struct acpi_table_facs *acpi_gbl_FACS;
+ACPI_GLOBAL(struct acpi_table_facs *, acpi_gbl_FACS);
 
 #endif                         /* !ACPI_REDUCED_HARDWARE */
 
 /* These addresses are calculated from the FADT Event Block addresses */
 
-ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1a_status;
-ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1a_enable;
-
-ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1b_status;
-ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1b_enable;
+ACPI_GLOBAL(struct acpi_generic_address, acpi_gbl_xpm1a_status);
+ACPI_GLOBAL(struct acpi_generic_address, acpi_gbl_xpm1a_enable);
 
-/* DSDT information. Used to check for DSDT corruption */
-
-ACPI_EXTERN struct acpi_table_header *acpi_gbl_DSDT;
-ACPI_EXTERN struct acpi_table_header acpi_gbl_original_dsdt_header;
+ACPI_GLOBAL(struct acpi_generic_address, acpi_gbl_xpm1b_status);
+ACPI_GLOBAL(struct acpi_generic_address, acpi_gbl_xpm1b_enable);
 
 /*
- * Handle both ACPI 1.0 and ACPI 2.0 Integer widths. The integer width is
+ * Handle both ACPI 1.0 and ACPI 2.0+ Integer widths. The integer width is
  * determined by the revision of the DSDT: If the DSDT revision is less than
  * 2, use only the lower 32 bits of the internal 64-bit Integer.
  */
-ACPI_EXTERN u8 acpi_gbl_integer_bit_width;
-ACPI_EXTERN u8 acpi_gbl_integer_byte_width;
-ACPI_EXTERN u8 acpi_gbl_integer_nybble_width;
+ACPI_GLOBAL(u8, acpi_gbl_integer_bit_width);
+ACPI_GLOBAL(u8, acpi_gbl_integer_byte_width);
+ACPI_GLOBAL(u8, acpi_gbl_integer_nybble_width);
 
 /*****************************************************************************
  *
@@ -233,36 +221,36 @@ ACPI_EXTERN u8 acpi_gbl_integer_nybble_width;
  * actual OS mutex handles, indexed by the local ACPI_MUTEX_HANDLEs.
  * (The table maps local handles to the real OS handles)
  */
-ACPI_EXTERN struct acpi_mutex_info acpi_gbl_mutex_info[ACPI_NUM_MUTEX];
+ACPI_GLOBAL(struct acpi_mutex_info, acpi_gbl_mutex_info[ACPI_NUM_MUTEX]);
 
 /*
  * Global lock mutex is an actual AML mutex object
  * Global lock semaphore works in conjunction with the actual global lock
  * Global lock spinlock is used for "pending" handshake
  */
-ACPI_EXTERN union acpi_operand_object *acpi_gbl_global_lock_mutex;
-ACPI_EXTERN acpi_semaphore acpi_gbl_global_lock_semaphore;
-ACPI_EXTERN acpi_spinlock acpi_gbl_global_lock_pending_lock;
-ACPI_EXTERN u16 acpi_gbl_global_lock_handle;
-ACPI_EXTERN u8 acpi_gbl_global_lock_acquired;
-ACPI_EXTERN u8 acpi_gbl_global_lock_present;
-ACPI_EXTERN u8 acpi_gbl_global_lock_pending;
+ACPI_GLOBAL(union acpi_operand_object *, acpi_gbl_global_lock_mutex);
+ACPI_GLOBAL(acpi_semaphore, acpi_gbl_global_lock_semaphore);
+ACPI_GLOBAL(acpi_spinlock, acpi_gbl_global_lock_pending_lock);
+ACPI_GLOBAL(u16, acpi_gbl_global_lock_handle);
+ACPI_GLOBAL(u8, acpi_gbl_global_lock_acquired);
+ACPI_GLOBAL(u8, acpi_gbl_global_lock_present);
+ACPI_GLOBAL(u8, acpi_gbl_global_lock_pending);
 
 /*
  * Spinlocks are used for interfaces that can be possibly called at
  * interrupt level
  */
-ACPI_EXTERN acpi_spinlock acpi_gbl_gpe_lock;   /* For GPE data structs and registers */
-ACPI_EXTERN acpi_spinlock acpi_gbl_hardware_lock;      /* For ACPI H/W except GPE registers */
-ACPI_EXTERN acpi_spinlock acpi_gbl_reference_count_lock;
+ACPI_GLOBAL(acpi_spinlock, acpi_gbl_gpe_lock); /* For GPE data structs and registers */
+ACPI_GLOBAL(acpi_spinlock, acpi_gbl_hardware_lock);    /* For ACPI H/W except GPE registers */
+ACPI_GLOBAL(acpi_spinlock, acpi_gbl_reference_count_lock);
 
 /* Mutex for _OSI support */
 
-ACPI_EXTERN acpi_mutex acpi_gbl_osi_mutex;
+ACPI_GLOBAL(acpi_mutex, acpi_gbl_osi_mutex);
 
 /* Reader/Writer lock is used for namespace walk and dynamic table unload */
 
-ACPI_EXTERN struct acpi_rw_lock acpi_gbl_namespace_rw_lock;
+ACPI_GLOBAL(struct acpi_rw_lock, acpi_gbl_namespace_rw_lock);
 
 /*****************************************************************************
  *
@@ -272,70 +260,69 @@ ACPI_EXTERN struct acpi_rw_lock acpi_gbl_namespace_rw_lock;
 
 /* Object caches */
 
-ACPI_EXTERN acpi_cache_t *acpi_gbl_namespace_cache;
-ACPI_EXTERN acpi_cache_t *acpi_gbl_state_cache;
-ACPI_EXTERN acpi_cache_t *acpi_gbl_ps_node_cache;
-ACPI_EXTERN acpi_cache_t *acpi_gbl_ps_node_ext_cache;
-ACPI_EXTERN acpi_cache_t *acpi_gbl_operand_cache;
+ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_namespace_cache);
+ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_state_cache);
+ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_ps_node_cache);
+ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_ps_node_ext_cache);
+ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_operand_cache);
+
+/* System */
+
+ACPI_INIT_GLOBAL(u32, acpi_gbl_startup_flags, 0);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_shutdown, TRUE);
 
 /* Global handlers */
 
-ACPI_EXTERN struct acpi_global_notify_handler acpi_gbl_global_notify[2];
-ACPI_EXTERN acpi_exception_handler acpi_gbl_exception_handler;
-ACPI_EXTERN acpi_init_handler acpi_gbl_init_handler;
-ACPI_EXTERN acpi_table_handler acpi_gbl_table_handler;
-ACPI_EXTERN void *acpi_gbl_table_handler_context;
-ACPI_EXTERN struct acpi_walk_state *acpi_gbl_breakpoint_walk;
-ACPI_EXTERN acpi_interface_handler acpi_gbl_interface_handler;
-ACPI_EXTERN struct acpi_sci_handler_info *acpi_gbl_sci_handler_list;
+ACPI_GLOBAL(struct acpi_global_notify_handler, acpi_gbl_global_notify[2]);
+ACPI_GLOBAL(acpi_exception_handler, acpi_gbl_exception_handler);
+ACPI_GLOBAL(acpi_init_handler, acpi_gbl_init_handler);
+ACPI_GLOBAL(acpi_table_handler, acpi_gbl_table_handler);
+ACPI_GLOBAL(void *, acpi_gbl_table_handler_context);
+ACPI_GLOBAL(struct acpi_walk_state *, acpi_gbl_breakpoint_walk);
+ACPI_GLOBAL(acpi_interface_handler, acpi_gbl_interface_handler);
+ACPI_GLOBAL(struct acpi_sci_handler_info *, acpi_gbl_sci_handler_list);
 
 /* Owner ID support */
 
-ACPI_EXTERN u32 acpi_gbl_owner_id_mask[ACPI_NUM_OWNERID_MASKS];
-ACPI_EXTERN u8 acpi_gbl_last_owner_id_index;
-ACPI_EXTERN u8 acpi_gbl_next_owner_id_offset;
+ACPI_GLOBAL(u32, acpi_gbl_owner_id_mask[ACPI_NUM_OWNERID_MASKS]);
+ACPI_GLOBAL(u8, acpi_gbl_last_owner_id_index);
+ACPI_GLOBAL(u8, acpi_gbl_next_owner_id_offset);
 
 /* Initialization sequencing */
 
-ACPI_EXTERN u8 acpi_gbl_reg_methods_executed;
+ACPI_GLOBAL(u8, acpi_gbl_reg_methods_executed);
 
 /* Misc */
 
-ACPI_EXTERN u32 acpi_gbl_original_mode;
-ACPI_EXTERN u32 acpi_gbl_rsdp_original_location;
-ACPI_EXTERN u32 acpi_gbl_ns_lookup_count;
-ACPI_EXTERN u32 acpi_gbl_ps_find_count;
-ACPI_EXTERN u16 acpi_gbl_pm1_enable_register_save;
-ACPI_EXTERN u8 acpi_gbl_debugger_configuration;
-ACPI_EXTERN u8 acpi_gbl_step_to_next_call;
-ACPI_EXTERN u8 acpi_gbl_acpi_hardware_present;
-ACPI_EXTERN u8 acpi_gbl_events_initialized;
-ACPI_EXTERN struct acpi_interface_info *acpi_gbl_supported_interfaces;
-ACPI_EXTERN struct acpi_address_range
-    *acpi_gbl_address_range_list[ACPI_ADDRESS_RANGE_MAX];
-
-#ifndef DEFINE_ACPI_GLOBALS
-
-/* Other miscellaneous */
-
-extern u8 acpi_gbl_shutdown;
-extern u32 acpi_gbl_startup_flags;
+ACPI_GLOBAL(u32, acpi_gbl_original_mode);
+ACPI_GLOBAL(u32, acpi_gbl_rsdp_original_location);
+ACPI_GLOBAL(u32, acpi_gbl_ns_lookup_count);
+ACPI_GLOBAL(u32, acpi_gbl_ps_find_count);
+ACPI_GLOBAL(u16, acpi_gbl_pm1_enable_register_save);
+ACPI_GLOBAL(u8, acpi_gbl_debugger_configuration);
+ACPI_GLOBAL(u8, acpi_gbl_step_to_next_call);
+ACPI_GLOBAL(u8, acpi_gbl_acpi_hardware_present);
+ACPI_GLOBAL(u8, acpi_gbl_events_initialized);
+ACPI_GLOBAL(struct acpi_interface_info *, acpi_gbl_supported_interfaces);
+ACPI_GLOBAL(struct acpi_address_range *,
+           acpi_gbl_address_range_list[ACPI_ADDRESS_RANGE_MAX]);
+
+/* Other miscellaneous, declared and initialized in utglobal */
+
 extern const char *acpi_gbl_sleep_state_names[ACPI_S_STATE_COUNT];
 extern const char *acpi_gbl_lowest_dstate_names[ACPI_NUM_sx_w_METHODS];
 extern const char *acpi_gbl_highest_dstate_names[ACPI_NUM_sx_d_METHODS];
-extern const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES];
 extern const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS];
-
-#endif
+extern const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES];
 
 #ifdef ACPI_DBG_TRACK_ALLOCATIONS
 
-/* Lists for tracking memory allocations */
+/* Lists for tracking memory allocations (debug only) */
 
-ACPI_EXTERN struct acpi_memory_list *acpi_gbl_global_list;
-ACPI_EXTERN struct acpi_memory_list *acpi_gbl_ns_node_list;
-ACPI_EXTERN u8 acpi_gbl_display_final_mem_stats;
-ACPI_EXTERN u8 acpi_gbl_disable_mem_tracking;
+ACPI_GLOBAL(struct acpi_memory_list *, acpi_gbl_global_list);
+ACPI_GLOBAL(struct acpi_memory_list *, acpi_gbl_ns_node_list);
+ACPI_GLOBAL(u8, acpi_gbl_display_final_mem_stats);
+ACPI_GLOBAL(u8, acpi_gbl_disable_mem_tracking);
 #endif
 
 /*****************************************************************************
@@ -350,22 +337,23 @@ ACPI_EXTERN u8 acpi_gbl_disable_mem_tracking;
 #define NUM_PREDEFINED_NAMES            9
 #endif
 
-ACPI_EXTERN struct acpi_namespace_node acpi_gbl_root_node_struct;
-ACPI_EXTERN struct acpi_namespace_node *acpi_gbl_root_node;
-ACPI_EXTERN struct acpi_namespace_node *acpi_gbl_fadt_gpe_device;
-ACPI_EXTERN union acpi_operand_object *acpi_gbl_module_code_list;
+ACPI_GLOBAL(struct acpi_namespace_node, acpi_gbl_root_node_struct);
+ACPI_GLOBAL(struct acpi_namespace_node *, acpi_gbl_root_node);
+ACPI_GLOBAL(struct acpi_namespace_node *, acpi_gbl_fadt_gpe_device);
+ACPI_GLOBAL(union acpi_operand_object *, acpi_gbl_module_code_list);
 
 extern const u8 acpi_gbl_ns_properties[ACPI_NUM_NS_TYPES];
 extern const struct acpi_predefined_names
     acpi_gbl_pre_defined_names[NUM_PREDEFINED_NAMES];
 
 #ifdef ACPI_DEBUG_OUTPUT
-ACPI_EXTERN u32 acpi_gbl_current_node_count;
-ACPI_EXTERN u32 acpi_gbl_current_node_size;
-ACPI_EXTERN u32 acpi_gbl_max_concurrent_node_count;
-ACPI_EXTERN acpi_size *acpi_gbl_entry_stack_pointer;
-ACPI_EXTERN acpi_size *acpi_gbl_lowest_stack_pointer;
-ACPI_EXTERN u32 acpi_gbl_deepest_nesting;
+ACPI_GLOBAL(u32, acpi_gbl_current_node_count);
+ACPI_GLOBAL(u32, acpi_gbl_current_node_size);
+ACPI_GLOBAL(u32, acpi_gbl_max_concurrent_node_count);
+ACPI_GLOBAL(acpi_size *, acpi_gbl_entry_stack_pointer);
+ACPI_GLOBAL(acpi_size *, acpi_gbl_lowest_stack_pointer);
+ACPI_GLOBAL(u32, acpi_gbl_deepest_nesting);
+ACPI_INIT_GLOBAL(u32, acpi_gbl_nesting_level, 0);
 #endif
 
 /*****************************************************************************
@@ -374,11 +362,11 @@ ACPI_EXTERN u32 acpi_gbl_deepest_nesting;
  *
  ****************************************************************************/
 
-ACPI_EXTERN struct acpi_thread_state *acpi_gbl_current_walk_list;
+ACPI_GLOBAL(struct acpi_thread_state *, acpi_gbl_current_walk_list);
 
 /* Control method single step flag */
 
-ACPI_EXTERN u8 acpi_gbl_cm_single_step;
+ACPI_GLOBAL(u8, acpi_gbl_cm_single_step);
 
 /*****************************************************************************
  *
@@ -388,8 +376,9 @@ ACPI_EXTERN u8 acpi_gbl_cm_single_step;
 
 extern struct acpi_bit_register_info
     acpi_gbl_bit_register_info[ACPI_NUM_BITREG];
-ACPI_EXTERN u8 acpi_gbl_sleep_type_a;
-ACPI_EXTERN u8 acpi_gbl_sleep_type_b;
+
+ACPI_GLOBAL(u8, acpi_gbl_sleep_type_a);
+ACPI_GLOBAL(u8, acpi_gbl_sleep_type_b);
 
 /*****************************************************************************
  *
@@ -399,14 +388,15 @@ ACPI_EXTERN u8 acpi_gbl_sleep_type_b;
 
 #if (!ACPI_REDUCED_HARDWARE)
 
-ACPI_EXTERN u8 acpi_gbl_all_gpes_initialized;
-ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head;
-ACPI_EXTERN struct acpi_gpe_block_info
-    *acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS];
-ACPI_EXTERN acpi_gbl_event_handler acpi_gbl_global_event_handler;
-ACPI_EXTERN void *acpi_gbl_global_event_handler_context;
-ACPI_EXTERN struct acpi_fixed_event_handler
-    acpi_gbl_fixed_event_handlers[ACPI_NUM_FIXED_EVENTS];
+ACPI_GLOBAL(u8, acpi_gbl_all_gpes_initialized);
+ACPI_GLOBAL(struct acpi_gpe_xrupt_info *, acpi_gbl_gpe_xrupt_list_head);
+ACPI_GLOBAL(struct acpi_gpe_block_info *,
+           acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS]);
+ACPI_GLOBAL(acpi_gbl_event_handler, acpi_gbl_global_event_handler);
+ACPI_GLOBAL(void *, acpi_gbl_global_event_handler_context);
+ACPI_GLOBAL(struct acpi_fixed_event_handler,
+           acpi_gbl_fixed_event_handlers[ACPI_NUM_FIXED_EVENTS]);
+
 extern struct acpi_fixed_event_info
     acpi_gbl_fixed_event_info[ACPI_NUM_FIXED_EVENTS];
 
@@ -418,23 +408,19 @@ extern struct acpi_fixed_event_info
  *
  ****************************************************************************/
 
-/* Procedure nesting level for debug output */
-
-extern u32 acpi_gbl_nesting_level;
-
 /* Event counters */
 
-ACPI_EXTERN u32 acpi_method_count;
-ACPI_EXTERN u32 acpi_gpe_count;
-ACPI_EXTERN u32 acpi_sci_count;
-ACPI_EXTERN u32 acpi_fixed_event_count[ACPI_NUM_FIXED_EVENTS];
+ACPI_GLOBAL(u32, acpi_method_count);
+ACPI_GLOBAL(u32, acpi_gpe_count);
+ACPI_GLOBAL(u32, acpi_sci_count);
+ACPI_GLOBAL(u32, acpi_fixed_event_count[ACPI_NUM_FIXED_EVENTS]);
 
 /* Support for dynamic control method tracing mechanism */
 
-ACPI_EXTERN u32 acpi_gbl_original_dbg_level;
-ACPI_EXTERN u32 acpi_gbl_original_dbg_layer;
-ACPI_EXTERN u32 acpi_gbl_trace_dbg_level;
-ACPI_EXTERN u32 acpi_gbl_trace_dbg_layer;
+ACPI_GLOBAL(u32, acpi_gbl_original_dbg_level);
+ACPI_GLOBAL(u32, acpi_gbl_original_dbg_layer);
+ACPI_GLOBAL(u32, acpi_gbl_trace_dbg_level);
+ACPI_GLOBAL(u32, acpi_gbl_trace_dbg_layer);
 
 /*****************************************************************************
  *
@@ -442,61 +428,64 @@ ACPI_EXTERN u32 acpi_gbl_trace_dbg_layer;
  *
  ****************************************************************************/
 
-ACPI_EXTERN u8 acpi_gbl_db_output_flags;
+ACPI_GLOBAL(u8, acpi_gbl_db_output_flags);
 
 #ifdef ACPI_DISASSEMBLER
 
-ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_ignore_noop_operator, FALSE);
+/* Do not disassemble buffers to resource descriptors */
+
+ACPI_INIT_GLOBAL(u8, acpi_gbl_no_resource_disassembly, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_ignore_noop_operator, FALSE);
 
-ACPI_EXTERN u8 acpi_gbl_db_opt_disasm;
-ACPI_EXTERN u8 acpi_gbl_db_opt_verbose;
-ACPI_EXTERN u8 acpi_gbl_num_external_methods;
-ACPI_EXTERN u32 acpi_gbl_resolved_external_methods;
-ACPI_EXTERN struct acpi_external_list *acpi_gbl_external_list;
-ACPI_EXTERN struct acpi_external_file *acpi_gbl_external_file_list;
+ACPI_GLOBAL(u8, acpi_gbl_db_opt_disasm);
+ACPI_GLOBAL(u8, acpi_gbl_db_opt_verbose);
+ACPI_GLOBAL(u8, acpi_gbl_num_external_methods);
+ACPI_GLOBAL(u32, acpi_gbl_resolved_external_methods);
+ACPI_GLOBAL(struct acpi_external_list *, acpi_gbl_external_list);
+ACPI_GLOBAL(struct acpi_external_file *, acpi_gbl_external_file_list);
 #endif
 
 #ifdef ACPI_DEBUGGER
 
-extern u8 acpi_gbl_method_executing;
-extern u8 acpi_gbl_abort_method;
-extern u8 acpi_gbl_db_terminate_threads;
+ACPI_INIT_GLOBAL(u8, acpi_gbl_db_terminate_threads, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_abort_method, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_method_executing, FALSE);
 
-ACPI_EXTERN u8 acpi_gbl_db_opt_tables;
-ACPI_EXTERN u8 acpi_gbl_db_opt_stats;
-ACPI_EXTERN u8 acpi_gbl_db_opt_ini_methods;
-ACPI_EXTERN u8 acpi_gbl_db_opt_no_region_support;
-ACPI_EXTERN u8 acpi_gbl_db_output_to_file;
-ACPI_EXTERN char *acpi_gbl_db_buffer;
-ACPI_EXTERN char *acpi_gbl_db_filename;
-ACPI_EXTERN u32 acpi_gbl_db_debug_level;
-ACPI_EXTERN u32 acpi_gbl_db_console_debug_level;
-ACPI_EXTERN struct acpi_namespace_node *acpi_gbl_db_scope_node;
+ACPI_GLOBAL(u8, acpi_gbl_db_opt_tables);
+ACPI_GLOBAL(u8, acpi_gbl_db_opt_stats);
+ACPI_GLOBAL(u8, acpi_gbl_db_opt_ini_methods);
+ACPI_GLOBAL(u8, acpi_gbl_db_opt_no_region_support);
+ACPI_GLOBAL(u8, acpi_gbl_db_output_to_file);
+ACPI_GLOBAL(char *, acpi_gbl_db_buffer);
+ACPI_GLOBAL(char *, acpi_gbl_db_filename);
+ACPI_GLOBAL(u32, acpi_gbl_db_debug_level);
+ACPI_GLOBAL(u32, acpi_gbl_db_console_debug_level);
+ACPI_GLOBAL(struct acpi_namespace_node *, acpi_gbl_db_scope_node);
 
-ACPI_EXTERN char *acpi_gbl_db_args[ACPI_DEBUGGER_MAX_ARGS];
-ACPI_EXTERN acpi_object_type acpi_gbl_db_arg_types[ACPI_DEBUGGER_MAX_ARGS];
+ACPI_GLOBAL(char *, acpi_gbl_db_args[ACPI_DEBUGGER_MAX_ARGS]);
+ACPI_GLOBAL(acpi_object_type, acpi_gbl_db_arg_types[ACPI_DEBUGGER_MAX_ARGS]);
 
 /* These buffers should all be the same size */
 
-ACPI_EXTERN char acpi_gbl_db_line_buf[ACPI_DB_LINE_BUFFER_SIZE];
-ACPI_EXTERN char acpi_gbl_db_parsed_buf[ACPI_DB_LINE_BUFFER_SIZE];
-ACPI_EXTERN char acpi_gbl_db_scope_buf[ACPI_DB_LINE_BUFFER_SIZE];
-ACPI_EXTERN char acpi_gbl_db_debug_filename[ACPI_DB_LINE_BUFFER_SIZE];
+ACPI_GLOBAL(char, acpi_gbl_db_line_buf[ACPI_DB_LINE_BUFFER_SIZE]);
+ACPI_GLOBAL(char, acpi_gbl_db_parsed_buf[ACPI_DB_LINE_BUFFER_SIZE]);
+ACPI_GLOBAL(char, acpi_gbl_db_scope_buf[ACPI_DB_LINE_BUFFER_SIZE]);
+ACPI_GLOBAL(char, acpi_gbl_db_debug_filename[ACPI_DB_LINE_BUFFER_SIZE]);
 
 /*
  * Statistic globals
  */
-ACPI_EXTERN u16 acpi_gbl_obj_type_count[ACPI_TYPE_NS_NODE_MAX + 1];
-ACPI_EXTERN u16 acpi_gbl_node_type_count[ACPI_TYPE_NS_NODE_MAX + 1];
-ACPI_EXTERN u16 acpi_gbl_obj_type_count_misc;
-ACPI_EXTERN u16 acpi_gbl_node_type_count_misc;
-ACPI_EXTERN u32 acpi_gbl_num_nodes;
-ACPI_EXTERN u32 acpi_gbl_num_objects;
-
-ACPI_EXTERN u32 acpi_gbl_size_of_parse_tree;
-ACPI_EXTERN u32 acpi_gbl_size_of_method_trees;
-ACPI_EXTERN u32 acpi_gbl_size_of_node_entries;
-ACPI_EXTERN u32 acpi_gbl_size_of_acpi_objects;
+ACPI_GLOBAL(u16, acpi_gbl_obj_type_count[ACPI_TYPE_NS_NODE_MAX + 1]);
+ACPI_GLOBAL(u16, acpi_gbl_node_type_count[ACPI_TYPE_NS_NODE_MAX + 1]);
+ACPI_GLOBAL(u16, acpi_gbl_obj_type_count_misc);
+ACPI_GLOBAL(u16, acpi_gbl_node_type_count_misc);
+ACPI_GLOBAL(u32, acpi_gbl_num_nodes);
+ACPI_GLOBAL(u32, acpi_gbl_num_objects);
+
+ACPI_GLOBAL(u32, acpi_gbl_size_of_parse_tree);
+ACPI_GLOBAL(u32, acpi_gbl_size_of_method_trees);
+ACPI_GLOBAL(u32, acpi_gbl_size_of_node_entries);
+ACPI_GLOBAL(u32, acpi_gbl_size_of_acpi_objects);
 
 #endif                         /* ACPI_DEBUGGER */
 
@@ -508,7 +497,7 @@ ACPI_EXTERN u32 acpi_gbl_size_of_acpi_objects;
 
 #ifdef ACPI_APPLICATION
 
-ACPI_FILE ACPI_INIT_GLOBAL(acpi_gbl_debug_file, NULL);
+ACPI_INIT_GLOBAL(ACPI_FILE, acpi_gbl_debug_file, NULL);
 
 #endif                         /* ACPI_APPLICATION */
 
index 6357e93..2ad2351 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 8af8c9b..c542677 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -87,6 +87,10 @@ typedef const struct acpi_exdump_info {
 #define ACPI_EXD_PACKAGE                11
 #define ACPI_EXD_FIELD                  12
 #define ACPI_EXD_REFERENCE              13
+#define ACPI_EXD_LIST                   14     /* Operand object list */
+#define ACPI_EXD_HDLR_LIST              15     /* Address Handler list */
+#define ACPI_EXD_RGN_LIST               16     /* Region list */
+#define ACPI_EXD_NODE                   17     /* Namespace Node */
 
 /* restore default alignment */
 
index d95ca54..52a21da 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 2a86c65..4bceb11 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 #define ACPI_SET64(ptr, val)            (*ACPI_CAST64 (ptr) = (u64) (val))
 
 /*
- * printf() format helpers
+ * printf() format helpers. These macros are workarounds for the difficulties
+ * with emitting 64-bit integers and 64-bit pointers with the same code
+ * for both 32-bit and 64-bit hosts.
  */
-
-/* Split 64-bit integer into two 32-bit values. Use with %8.8X%8.8X */
-
 #define ACPI_FORMAT_UINT64(i)           ACPI_HIDWORD(i), ACPI_LODWORD(i)
 
 #if ACPI_MACHINE_WIDTH == 64
 #define ACPI_FORMAT_NATIVE_UINT(i)      ACPI_FORMAT_UINT64(i)
+#define ACPI_FORMAT_TO_UINT(i)          ACPI_FORMAT_UINT64(i)
+#define ACPI_PRINTF_UINT                 "0x%8.8X%8.8X"
+
 #else
-#define ACPI_FORMAT_NATIVE_UINT(i)      0, (i)
+#define ACPI_FORMAT_NATIVE_UINT(i)      0, (u32) (i)
+#define ACPI_FORMAT_TO_UINT(i)          (u32) (i)
+#define ACPI_PRINTF_UINT                 "0x%8.8X"
 #endif
 
 /*
index e6138ac..ee1c040 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index cc7ab6d..1a4d618 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 3fc9ca7..dda0e6a 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index aed3193..6168b85 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index f600ade..a48d713 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -48,7 +48,7 @@
  *
  * Return Package types
  *
- * 1) PTYPE1 packages do not contain sub-packages.
+ * 1) PTYPE1 packages do not contain subpackages.
  *
  * ACPI_PTYPE1_FIXED: Fixed-length length, 1 or 2 object types:
  *      object type
@@ -63,8 +63,8 @@
  *      (Used for _PRW)
  *
  *
- * 2) PTYPE2 packages contain a Variable-length number of sub-packages. Each
- *    of the different types describe the contents of each of the sub-packages.
+ * 2) PTYPE2 packages contain a Variable-length number of subpackages. Each
+ *    of the different types describe the contents of each of the subpackages.
  *
  * ACPI_PTYPE2: Each subpackage contains 1 or 2 object types. Zero-length
  *      parent package is allowed:
@@ -560,7 +560,7 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = {
 
        /*
         * For _HPX, a single package is returned, containing a variable-length number
-        * of sub-packages. Each sub-package contains a PCI record setting.
+        * of subpackages. Each subpackage contains a PCI record setting.
         * There are several different type of record settings, of different
         * lengths, but all elements of all settings are Integers.
         */
@@ -698,6 +698,12 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = {
          METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */
        PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
 
+       {{"_PRP", METHOD_0ARGS,
+         METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Pkgs) each: 1 Str, 1 Int/Str/Pkg */
+       PACKAGE_INFO(ACPI_PTYPE2, ACPI_RTYPE_STRING, 1,
+                    ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING |
+                    ACPI_RTYPE_PACKAGE | ACPI_RTYPE_REFERENCE, 1, 0),
+
        {{"_PRS", METHOD_0ARGS,
          METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
 
index ff97430..4b008e8 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index fc83c0a..5d2989a 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index c54f42c..5fa4b20 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index be8180c..ceeec0b 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -49,7 +49,7 @@ extern const u8 acpi_gbl_resource_aml_serial_bus_sizes[];
 
 /* Strings used by the disassembler and debugger resource dump routines */
 
-#if defined(ACPI_DISASSEMBLER) || defined (ACPI_DEBUGGER)
+#if defined(ACPI_DEBUG_OUTPUT) || defined (ACPI_DISASSEMBLER) || defined (ACPI_DEBUGGER)
 
 extern const char *acpi_gbl_bm_decode[];
 extern const char *acpi_gbl_config_decode[];
index 48a3e33..5908cce 100644 (file)
@@ -7,7 +7,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 87c2636..f3f8344 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index afdc6df..720b1cd 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index eb56b66..8daf9de 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index e7a57c5..3661c8e 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 1442420..96644d5 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 81a78ba..2c6d42c 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index c4b0b36..b67522d 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index b1746a6..a1e7e6b 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 5205edc..6c0759c 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index d7f53fb..9f74795 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 1bbb22f..f7f5107 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 2dbe109..bd7811c 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 7f569d5..2ac28d2 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index d67891d..9d6e2c1 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index ecb12e2..24f7d5e 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 83cd45f..c7bffff 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 4c67193..3393a73 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index a9cb4a1..955f83d 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index a31e549..caaed3c 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index a3e2f38..ae779c1 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 4d764e8..17e4bbf 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index e315731..78ac293 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index a568754..5d594eb 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 144cbb9..9957297 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -314,6 +314,7 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
 {
        union acpi_operand_object *handler_obj;
        union acpi_operand_object *obj_desc;
+       union acpi_operand_object *start_desc;
        union acpi_operand_object **last_obj_ptr;
        acpi_adr_space_setup region_setup;
        void **region_context;
@@ -341,6 +342,7 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
        /* Find this region in the handler's list */
 
        obj_desc = handler_obj->address_space.region_list;
+       start_desc = obj_desc;
        last_obj_ptr = &handler_obj->address_space.region_list;
 
        while (obj_desc) {
@@ -438,6 +440,15 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
 
                last_obj_ptr = &obj_desc->region.next;
                obj_desc = obj_desc->region.next;
+
+               /* Prevent infinite loop if list is corrupted */
+
+               if (obj_desc == start_desc) {
+                       ACPI_ERROR((AE_INFO,
+                                   "Circular handler list in region object %p",
+                                   region_obj));
+                       return_VOID;
+               }
        }
 
        /* If we get here, the region was not in the handler's region list */
index 8354c4f..1b148a4 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 9e9e345..4d8a709 100644 (file)
@@ -6,7 +6,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 23a7fad..a734b27 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 39d06af..e286640 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 5713da7..20a1392 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -583,6 +583,18 @@ acpi_install_gpe_block(acpi_handle gpe_device,
                goto unlock_and_exit;
        }
 
+       /* Validate the parent device */
+
+       if (node->type != ACPI_TYPE_DEVICE) {
+               status = AE_TYPE;
+               goto unlock_and_exit;
+       }
+
+       if (node->object) {
+               status = AE_ALREADY_EXISTS;
+               goto unlock_and_exit;
+       }
+
        /*
         * For user-installed GPE Block Devices, the gpe_block_base_number
         * is always zero
@@ -666,6 +678,13 @@ acpi_status acpi_remove_gpe_block(acpi_handle gpe_device)
                goto unlock_and_exit;
        }
 
+       /* Validate the parent device */
+
+       if (node->type != ACPI_TYPE_DEVICE) {
+               status = AE_TYPE;
+               goto unlock_and_exit;
+       }
+
        /* Get the device_object attached to the node */
 
        obj_desc = acpi_ns_get_attached_object(node);
index 02ed75a..2d6f187 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 06d216c..8ba1464 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 69e4a8c..c545386 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 3c2e6dc..95d23da 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 81c72a4..4cfc3d3 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 4d046fa..973fdae 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -94,12 +94,13 @@ static struct acpi_exdump_info acpi_ex_dump_buffer[5] = {
        {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_buffer), NULL},
        {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(buffer.length), "Length"},
        {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(buffer.pointer), "Pointer"},
-       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(buffer.node), "Parent Node"},
+       {ACPI_EXD_NODE, ACPI_EXD_OFFSET(buffer.node), "Parent Node"},
        {ACPI_EXD_BUFFER, 0, NULL}
 };
 
-static struct acpi_exdump_info acpi_ex_dump_package[5] = {
+static struct acpi_exdump_info acpi_ex_dump_package[6] = {
        {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_package), NULL},
+       {ACPI_EXD_NODE, ACPI_EXD_OFFSET(package.node), "Parent Node"},
        {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(package.flags), "Flags"},
        {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(package.count), "Elements"},
        {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(package.elements), "Element List"},
@@ -108,11 +109,11 @@ static struct acpi_exdump_info acpi_ex_dump_package[5] = {
 
 static struct acpi_exdump_info acpi_ex_dump_device[4] = {
        {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_device), NULL},
-       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(device.handler), "Handler"},
        {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(device.notify_list[0]),
         "System Notify"},
        {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(device.notify_list[1]),
-        "Device Notify"}
+        "Device Notify"},
+       {ACPI_EXD_HDLR_LIST, ACPI_EXD_OFFSET(device.handler), "Handler"}
 };
 
 static struct acpi_exdump_info acpi_ex_dump_event[2] = {
@@ -142,17 +143,18 @@ static struct acpi_exdump_info acpi_ex_dump_mutex[5] = {
        {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.os_mutex), "OsMutex"}
 };
 
-static struct acpi_exdump_info acpi_ex_dump_region[7] = {
+static struct acpi_exdump_info acpi_ex_dump_region[8] = {
        {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_region), NULL},
        {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(region.space_id), "Space Id"},
        {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(region.flags), "Flags"},
+       {ACPI_EXD_NODE, ACPI_EXD_OFFSET(region.node), "Parent Node"},
        {ACPI_EXD_ADDRESS, ACPI_EXD_OFFSET(region.address), "Address"},
        {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(region.length), "Length"},
-       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(region.handler), "Handler"},
+       {ACPI_EXD_HDLR_LIST, ACPI_EXD_OFFSET(region.handler), "Handler"},
        {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(region.next), "Next"}
 };
 
-static struct acpi_exdump_info acpi_ex_dump_power[5] = {
+static struct acpi_exdump_info acpi_ex_dump_power[6] = {
        {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_power), NULL},
        {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(power_resource.system_level),
         "System Level"},
@@ -161,7 +163,8 @@ static struct acpi_exdump_info acpi_ex_dump_power[5] = {
        {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(power_resource.notify_list[0]),
         "System Notify"},
        {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(power_resource.notify_list[1]),
-        "Device Notify"}
+        "Device Notify"},
+       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(power_resource.handler), "Handler"}
 };
 
 static struct acpi_exdump_info acpi_ex_dump_processor[7] = {
@@ -225,7 +228,7 @@ static struct acpi_exdump_info acpi_ex_dump_reference[8] = {
        {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(reference.target_type), "Target Type"},
        {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(reference.value), "Value"},
        {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(reference.object), "Object Desc"},
-       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(reference.node), "Node"},
+       {ACPI_EXD_NODE, ACPI_EXD_OFFSET(reference.node), "Node"},
        {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(reference.where), "Where"},
        {ACPI_EXD_REFERENCE, 0, NULL}
 };
@@ -234,16 +237,16 @@ static struct acpi_exdump_info acpi_ex_dump_address_handler[6] = {
        {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_address_handler),
         NULL},
        {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(address_space.space_id), "Space Id"},
-       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(address_space.next), "Next"},
-       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(address_space.region_list),
+       {ACPI_EXD_HDLR_LIST, ACPI_EXD_OFFSET(address_space.next), "Next"},
+       {ACPI_EXD_RGN_LIST, ACPI_EXD_OFFSET(address_space.region_list),
         "Region List"},
-       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(address_space.node), "Node"},
+       {ACPI_EXD_NODE, ACPI_EXD_OFFSET(address_space.node), "Node"},
        {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(address_space.context), "Context"}
 };
 
 static struct acpi_exdump_info acpi_ex_dump_notify[7] = {
        {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_notify), NULL},
-       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(notify.node), "Node"},
+       {ACPI_EXD_NODE, ACPI_EXD_OFFSET(notify.node), "Node"},
        {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(notify.handler_type), "Handler Type"},
        {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(notify.handler), "Handler"},
        {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(notify.context), "Context"},
@@ -252,14 +255,31 @@ static struct acpi_exdump_info acpi_ex_dump_notify[7] = {
        {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(notify.next[1]), "Next Device Notify"}
 };
 
+static struct acpi_exdump_info acpi_ex_dump_extra[6] = {
+       {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_extra), NULL},
+       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(extra.method_REG), "_REG Method"},
+       {ACPI_EXD_NODE, ACPI_EXD_OFFSET(extra.scope_node), "Scope Node"},
+       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(extra.region_context),
+        "Region Context"},
+       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(extra.aml_start), "Aml Start"},
+       {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(extra.aml_length), "Aml Length"}
+};
+
+static struct acpi_exdump_info acpi_ex_dump_data[3] = {
+       {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_data), NULL},
+       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(data.handler), "Handler"},
+       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(data.pointer), "Raw Data"}
+};
+
 /* Miscellaneous tables */
 
-static struct acpi_exdump_info acpi_ex_dump_common[4] = {
+static struct acpi_exdump_info acpi_ex_dump_common[5] = {
        {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_common), NULL},
        {ACPI_EXD_TYPE, 0, NULL},
        {ACPI_EXD_UINT16, ACPI_EXD_OFFSET(common.reference_count),
         "Reference Count"},
-       {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(common.flags), "Flags"}
+       {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(common.flags), "Flags"},
+       {ACPI_EXD_LIST, ACPI_EXD_OFFSET(common.next_object), "Object List"}
 };
 
 static struct acpi_exdump_info acpi_ex_dump_field_common[7] = {
@@ -274,15 +294,17 @@ static struct acpi_exdump_info acpi_ex_dump_field_common[7] = {
         "Field Bit Offset"},
        {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(common_field.base_byte_offset),
         "Base Byte Offset"},
-       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(common_field.node), "Parent Node"}
+       {ACPI_EXD_NODE, ACPI_EXD_OFFSET(common_field.node), "Parent Node"}
 };
 
-static struct acpi_exdump_info acpi_ex_dump_node[5] = {
+static struct acpi_exdump_info acpi_ex_dump_node[7] = {
        {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_node), NULL},
        {ACPI_EXD_UINT8, ACPI_EXD_NSOFFSET(flags), "Flags"},
        {ACPI_EXD_UINT8, ACPI_EXD_NSOFFSET(owner_id), "Owner Id"},
-       {ACPI_EXD_POINTER, ACPI_EXD_NSOFFSET(child), "Child List"},
-       {ACPI_EXD_POINTER, ACPI_EXD_NSOFFSET(peer), "Next Peer"}
+       {ACPI_EXD_LIST, ACPI_EXD_NSOFFSET(object), "Object List"},
+       {ACPI_EXD_NODE, ACPI_EXD_NSOFFSET(parent), "Parent"},
+       {ACPI_EXD_NODE, ACPI_EXD_NSOFFSET(child), "Child"},
+       {ACPI_EXD_NODE, ACPI_EXD_NSOFFSET(peer), "Peer"}
 };
 
 /* Dispatch table, indexed by object type */
@@ -315,7 +337,9 @@ static struct acpi_exdump_info *acpi_ex_dump_info[] = {
        acpi_ex_dump_address_handler,
        NULL,
        NULL,
-       NULL
+       NULL,
+       acpi_ex_dump_extra,
+       acpi_ex_dump_data
 };
 
 /*******************************************************************************
@@ -340,6 +364,10 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
        char *name;
        const char *reference_name;
        u8 count;
+       union acpi_operand_object *start;
+       union acpi_operand_object *data = NULL;
+       union acpi_operand_object *next;
+       struct acpi_namespace_node *node;
 
        if (!info) {
                acpi_os_printf
@@ -363,9 +391,9 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
 
                case ACPI_EXD_TYPE:
 
-                       acpi_ex_out_string("Type",
-                                          acpi_ut_get_object_type_name
-                                          (obj_desc));
+                       acpi_os_printf("%20s : %2.2X [%s]\n", "Type",
+                                      obj_desc->common.type,
+                                      acpi_ut_get_object_type_name(obj_desc));
                        break;
 
                case ACPI_EXD_UINT8:
@@ -433,6 +461,121 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
                        acpi_ex_dump_reference_obj(obj_desc);
                        break;
 
+               case ACPI_EXD_LIST:
+
+                       start = *ACPI_CAST_PTR(void *, target);
+                       next = start;
+
+                       acpi_os_printf("%20s : %p", name, next);
+                       if (next) {
+                               acpi_os_printf("(%s %2.2X)",
+                                              acpi_ut_get_object_type_name
+                                              (next), next->common.type);
+
+                               while (next->common.next_object) {
+                                       if ((next->common.type ==
+                                            ACPI_TYPE_LOCAL_DATA) && !data) {
+                                               data = next;
+                                       }
+
+                                       next = next->common.next_object;
+                                       acpi_os_printf("->%p(%s %2.2X)", next,
+                                                      acpi_ut_get_object_type_name
+                                                      (next),
+                                                      next->common.type);
+
+                                       if ((next == start) || (next == data)) {
+                                               acpi_os_printf
+                                                   ("\n**** Error: Object list appears to be circular linked");
+                                               break;
+                                       }
+                               }
+                       }
+
+                       acpi_os_printf("\n", next);
+                       break;
+
+               case ACPI_EXD_HDLR_LIST:
+
+                       start = *ACPI_CAST_PTR(void *, target);
+                       next = start;
+
+                       acpi_os_printf("%20s : %p", name, next);
+                       if (next) {
+                               acpi_os_printf("(%s %2.2X)",
+                                              acpi_ut_get_object_type_name
+                                              (next), next->common.type);
+
+                               while (next->address_space.next) {
+                                       if ((next->common.type ==
+                                            ACPI_TYPE_LOCAL_DATA) && !data) {
+                                               data = next;
+                                       }
+
+                                       next = next->address_space.next;
+                                       acpi_os_printf("->%p(%s %2.2X)", next,
+                                                      acpi_ut_get_object_type_name
+                                                      (next),
+                                                      next->common.type);
+
+                                       if ((next == start) || (next == data)) {
+                                               acpi_os_printf
+                                                   ("\n**** Error: Handler list appears to be circular linked");
+                                               break;
+                                       }
+                               }
+                       }
+
+                       acpi_os_printf("\n", next);
+                       break;
+
+               case ACPI_EXD_RGN_LIST:
+
+                       start = *ACPI_CAST_PTR(void *, target);
+                       next = start;
+
+                       acpi_os_printf("%20s : %p", name, next);
+                       if (next) {
+                               acpi_os_printf("(%s %2.2X)",
+                                              acpi_ut_get_object_type_name
+                                              (next), next->common.type);
+
+                               while (next->region.next) {
+                                       if ((next->common.type ==
+                                            ACPI_TYPE_LOCAL_DATA) && !data) {
+                                               data = next;
+                                       }
+
+                                       next = next->region.next;
+                                       acpi_os_printf("->%p(%s %2.2X)", next,
+                                                      acpi_ut_get_object_type_name
+                                                      (next),
+                                                      next->common.type);
+
+                                       if ((next == start) || (next == data)) {
+                                               acpi_os_printf
+                                                   ("\n**** Error: Region list appears to be circular linked");
+                                               break;
+                                       }
+                               }
+                       }
+
+                       acpi_os_printf("\n", next);
+                       break;
+
+               case ACPI_EXD_NODE:
+
+                       node =
+                           *ACPI_CAST_PTR(struct acpi_namespace_node *,
+                                          target);
+
+                       acpi_os_printf("%20s : %p", name, node);
+                       if (node) {
+                               acpi_os_printf(" [%4.4s]", node->name.ascii);
+                       }
+                       acpi_os_printf("\n");
+                       break;
+
                default:
 
                        acpi_os_printf("**** Invalid table opcode [%X] ****\n",
@@ -821,10 +964,8 @@ void acpi_ex_dump_namespace_node(struct acpi_namespace_node *node, u32 flags)
        }
 
        acpi_os_printf("%20s : %4.4s\n", "Name", acpi_ut_get_node_name(node));
-       acpi_ex_out_string("Type", acpi_ut_get_type_name(node->type));
-       acpi_ex_out_pointer("Attached Object",
-                           acpi_ns_get_attached_object(node));
-       acpi_ex_out_pointer("Parent", node->parent);
+       acpi_os_printf("%20s : %2.2X [%s]\n", "Type",
+                      node->type, acpi_ut_get_type_name(node->type));
 
        acpi_ex_dump_object(ACPI_CAST_PTR(union acpi_operand_object, node),
                            acpi_ex_dump_node);
@@ -1017,22 +1158,26 @@ acpi_ex_dump_object_descriptor(union acpi_operand_object *obj_desc, u32 flags)
                               ((struct acpi_namespace_node *)obj_desc)->
                               object);
 
-               acpi_ex_dump_object_descriptor(((struct acpi_namespace_node *)
-                                               obj_desc)->object, flags);
-               return_VOID;
+               obj_desc = ((struct acpi_namespace_node *)obj_desc)->object;
+               goto dump_object;
        }
 
        if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) != ACPI_DESC_TYPE_OPERAND) {
-               acpi_os_printf
-                   ("ExDumpObjectDescriptor: %p is not an ACPI operand object: [%s]\n",
-                    obj_desc, acpi_ut_get_descriptor_name(obj_desc));
+               acpi_os_printf("%p is not an ACPI operand object: [%s]\n",
+                              obj_desc, acpi_ut_get_descriptor_name(obj_desc));
                return_VOID;
        }
 
-       if (obj_desc->common.type > ACPI_TYPE_NS_NODE_MAX) {
+       /* Validate the object type */
+
+       if (obj_desc->common.type > ACPI_TYPE_LOCAL_MAX) {
+               acpi_os_printf("Not a known object type: %2.2X\n",
+                              obj_desc->common.type);
                return_VOID;
        }
 
+dump_object:
+
        /* Common Fields */
 
        acpi_ex_dump_object(obj_desc, acpi_ex_dump_common);
@@ -1040,6 +1185,22 @@ acpi_ex_dump_object_descriptor(union acpi_operand_object *obj_desc, u32 flags)
        /* Object-specific fields */
 
        acpi_ex_dump_object(obj_desc, acpi_ex_dump_info[obj_desc->common.type]);
+
+       if (obj_desc->common.type == ACPI_TYPE_REGION) {
+               obj_desc = obj_desc->common.next_object;
+               if (obj_desc->common.type > ACPI_TYPE_LOCAL_MAX) {
+                       acpi_os_printf
+                           ("Secondary object is not a known object type: %2.2X\n",
+                            obj_desc->common.type);
+
+                       return_VOID;
+               }
+
+               acpi_os_printf("\nExtra attached Object (%p):\n", obj_desc);
+               acpi_ex_dump_object(obj_desc,
+                                   acpi_ex_dump_info[obj_desc->common.type]);
+       }
+
        return_VOID;
 }
 
index cfd8752..68d9744 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 49fb742..1d1b27a 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 65d9360..2207e62 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 7be0205..b49ea2a 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 14689de..dbb03b5 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index d74cea4..1b8e941 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index d6fa0fc..2ede656 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index bc042ad..363767c 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 4459e32..29e9e99 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 5a58861..ee3f872 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 9d28867..cd5288a 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 7ca6925..ab06026 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 1606524..3cde553 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index be3f669..3af8de3 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index f0b09bf..daf49f7 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 20d809d..04bd16c 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 26e3710..fd11018 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 6578dee..841caed 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 99dc7b2..5b16c54 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 3d36df8..1e66d96 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 4140768..858fdd6 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 9654050..2e6caab 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 0889a62..e701d8c 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 12e6cff..e0fd9b4 100644 (file)
@@ -6,7 +6,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index e3828cc..d590693 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 3c498dc..76ab5c1 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index eab70d5..6b91912 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index b4b47db..96d007d 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 15dddc1..6921c7f 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 14f65f6..f1249e3 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index fd1ff54..607eb9e 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 74b24c8..80fcfc8 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index acd2964..b55642c 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 48b9c6f..3d88ef4 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 2837625..42d3710 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 963ceef..e634a05 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 3a0423a..5b74677 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 89ec645..7ae521c 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 90a0380..7eee0a6 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 7a736f4..fe54a8c 100644 (file)
@@ -6,7 +6,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -222,13 +222,19 @@ void acpi_ns_detach_object(struct acpi_namespace_node *node)
                }
        }
 
-       /* Clear the entry in all cases */
+       /* Clear the Node entry in all cases */
 
        node->object = NULL;
        if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) == ACPI_DESC_TYPE_OPERAND) {
+
+               /* Unlink object from front of possible object list */
+
                node->object = obj_desc->common.next_object;
+
+               /* Handle possible 2-descriptor object */
+
                if (node->object &&
-                   ((node->object)->common.type != ACPI_TYPE_LOCAL_DATA)) {
+                   (node->object->common.type != ACPI_TYPE_LOCAL_DATA)) {
                        node->object = node->object->common.next_object;
                }
        }
index 1778573..e83cff3 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index d2855d9..392910f 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 3d5391f..68f7258 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -132,12 +132,12 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
         * Decode the type of the expected package contents
         *
         * PTYPE1 packages contain no subpackages
-        * PTYPE2 packages contain sub-packages
+        * PTYPE2 packages contain subpackages
         */
        switch (package->ret_info.type) {
        case ACPI_PTYPE1_FIXED:
                /*
-                * The package count is fixed and there are no sub-packages
+                * The package count is fixed and there are no subpackages
                 *
                 * If package is too small, exit.
                 * If package is larger than expected, issue warning but continue
@@ -169,7 +169,7 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
 
        case ACPI_PTYPE1_VAR:
                /*
-                * The package count is variable, there are no sub-packages, and all
+                * The package count is variable, there are no subpackages, and all
                 * elements must be of the same type
                 */
                for (i = 0; i < count; i++) {
@@ -185,7 +185,7 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
 
        case ACPI_PTYPE1_OPTION:
                /*
-                * The package count is variable, there are no sub-packages. There are
+                * The package count is variable, there are no subpackages. There are
                 * a fixed number of required elements, and a variable number of
                 * optional elements.
                 *
@@ -242,7 +242,7 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
                elements++;
                count--;
 
-               /* Examine the sub-packages */
+               /* Examine the subpackages */
 
                status =
                    acpi_ns_check_package_list(info, package, elements, count);
@@ -250,7 +250,7 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
 
        case ACPI_PTYPE2_PKG_COUNT:
 
-               /* First element is the (Integer) count of sub-packages to follow */
+               /* First element is the (Integer) count of subpackages to follow */
 
                status = acpi_ns_check_object_type(info, elements,
                                                   ACPI_RTYPE_INTEGER, 0);
@@ -270,7 +270,7 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
                count = expected_count;
                elements++;
 
-               /* Examine the sub-packages */
+               /* Examine the subpackages */
 
                status =
                    acpi_ns_check_package_list(info, package, elements, count);
@@ -283,9 +283,9 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
        case ACPI_PTYPE2_FIX_VAR:
                /*
                 * These types all return a single Package that consists of a
-                * variable number of sub-Packages.
+                * variable number of subpackages.
                 *
-                * First, ensure that the first element is a sub-Package. If not,
+                * First, ensure that the first element is a subpackage. If not,
                 * the BIOS may have incorrectly returned the object as a single
                 * package instead of a Package of Packages (a common error if
                 * there is only one entry). We may be able to repair this by
@@ -310,7 +310,7 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
                        count = 1;
                }
 
-               /* Examine the sub-packages */
+               /* Examine the subpackages */
 
                status =
                    acpi_ns_check_package_list(info, package, elements, count);
@@ -370,9 +370,9 @@ acpi_ns_check_package_list(struct acpi_evaluate_info *info,
        u32 j;
 
        /*
-        * Validate each sub-Package in the parent Package
+        * Validate each subpackage in the parent Package
         *
-        * NOTE: assumes list of sub-packages contains no NULL elements.
+        * NOTE: assumes list of subpackages contains no NULL elements.
         * Any NULL elements should have been removed by earlier call
         * to acpi_ns_remove_null_elements.
         */
@@ -389,7 +389,7 @@ acpi_ns_check_package_list(struct acpi_evaluate_info *info,
                        return (status);
                }
 
-               /* Examine the different types of expected sub-packages */
+               /* Examine the different types of expected subpackages */
 
                info->parent_package = sub_package;
                switch (package->ret_info.type) {
@@ -450,14 +450,14 @@ acpi_ns_check_package_list(struct acpi_evaluate_info *info,
 
                case ACPI_PTYPE2_FIXED:
 
-                       /* Each sub-package has a fixed length */
+                       /* Each subpackage has a fixed length */
 
                        expected_count = package->ret_info2.count;
                        if (sub_package->package.count < expected_count) {
                                goto package_too_small;
                        }
 
-                       /* Check the type of each sub-package element */
+                       /* Check the type of each subpackage element */
 
                        for (j = 0; j < expected_count; j++) {
                                status =
@@ -475,14 +475,14 @@ acpi_ns_check_package_list(struct acpi_evaluate_info *info,
 
                case ACPI_PTYPE2_MIN:
 
-                       /* Each sub-package has a variable but minimum length */
+                       /* Each subpackage has a variable but minimum length */
 
                        expected_count = package->ret_info.count1;
                        if (sub_package->package.count < expected_count) {
                                goto package_too_small;
                        }
 
-                       /* Check the type of each sub-package element */
+                       /* Check the type of each subpackage element */
 
                        status =
                            acpi_ns_check_package_elements(info, sub_elements,
@@ -531,7 +531,7 @@ acpi_ns_check_package_list(struct acpi_evaluate_info *info,
                                (*sub_elements)->integer.value = expected_count;
                        }
 
-                       /* Check the type of each sub-package element */
+                       /* Check the type of each subpackage element */
 
                        status =
                            acpi_ns_check_package_elements(info,
@@ -557,10 +557,10 @@ acpi_ns_check_package_list(struct acpi_evaluate_info *info,
 
 package_too_small:
 
-       /* The sub-package count was smaller than required */
+       /* The subpackage count was smaller than required */
 
        ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname, info->node_flags,
-                             "Return Sub-Package[%u] is too small - found %u elements, expected %u",
+                             "Return SubPackage[%u] is too small - found %u elements, expected %u",
                              i, sub_package->package.count, expected_count));
 
        return (AE_AML_OPERAND_VALUE);
index a05afff..7e417aa 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -207,13 +207,30 @@ acpi_ns_simple_repair(struct acpi_evaluate_info *info,
         * this predefined name. Either one return value is expected, or none,
         * for both methods and other objects.
         *
-        * Exit now if there is no return object. Warning if one was expected.
+        * Try to fix if there was no return object. Warning if failed to fix.
         */
        if (!return_object) {
                if (expected_btypes && (!(expected_btypes & ACPI_RTYPE_NONE))) {
-                       ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname,
-                                             ACPI_WARN_ALWAYS,
-                                             "Missing expected return value"));
+                       if (package_index != ACPI_NOT_PACKAGE_ELEMENT) {
+                               ACPI_WARN_PREDEFINED((AE_INFO,
+                                                     info->full_pathname,
+                                                     ACPI_WARN_ALWAYS,
+                                                     "Found unexpected NULL package element"));
+
+                               status =
+                                   acpi_ns_repair_null_element(info,
+                                                               expected_btypes,
+                                                               package_index,
+                                                               return_object_ptr);
+                               if (ACPI_SUCCESS(status)) {
+                                       return (AE_OK); /* Repair was successful */
+                               }
+                       } else {
+                               ACPI_WARN_PREDEFINED((AE_INFO,
+                                                     info->full_pathname,
+                                                     ACPI_WARN_ALWAYS,
+                                                     "Missing expected return value"));
+                       }
 
                        return (AE_AML_NO_RETURN_VALUE);
                }
@@ -448,7 +465,7 @@ acpi_ns_repair_null_element(struct acpi_evaluate_info * info,
  * RETURN:      None.
  *
  * DESCRIPTION: Remove all NULL package elements from packages that contain
- *              a variable number of sub-packages. For these types of
+ *              a variable number of subpackages. For these types of
  *              packages, NULL elements can be safely removed.
  *
  *****************************************************************************/
@@ -469,7 +486,7 @@ acpi_ns_remove_null_elements(struct acpi_evaluate_info *info,
        /*
         * We can safely remove all NULL elements from these package types:
         * PTYPE1_VAR packages contain a variable number of simple data types.
-        * PTYPE2 packages contain a variable number of sub-packages.
+        * PTYPE2 packages contain a variable number of subpackages.
         */
        switch (package_type) {
        case ACPI_PTYPE1_VAR:
index 6a25d32..b09e6be 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -432,8 +432,8 @@ acpi_ns_repair_CID(struct acpi_evaluate_info *info,
  * DESCRIPTION: Repair for the _CST object:
  *              1. Sort the list ascending by C state type
  *              2. Ensure type cannot be zero
- *              3. A sub-package count of zero means _CST is meaningless
- *              4. Count must match the number of C state sub-packages
+ *              3. A subpackage count of zero means _CST is meaningless
+ *              4. Count must match the number of C state subpackages
  *
  *****************************************************************************/
 
@@ -611,6 +611,7 @@ acpi_ns_repair_PRT(struct acpi_evaluate_info *info,
        union acpi_operand_object **top_object_list;
        union acpi_operand_object **sub_object_list;
        union acpi_operand_object *obj_desc;
+       union acpi_operand_object *sub_package;
        u32 element_count;
        u32 index;
 
@@ -619,8 +620,17 @@ acpi_ns_repair_PRT(struct acpi_evaluate_info *info,
        top_object_list = package_object->package.elements;
        element_count = package_object->package.count;
 
-       for (index = 0; index < element_count; index++) {
-               sub_object_list = (*top_object_list)->package.elements;
+       /* Examine each subpackage */
+
+       for (index = 0; index < element_count; index++, top_object_list++) {
+               sub_package = *top_object_list;
+               sub_object_list = sub_package->package.elements;
+
+               /* Check for minimum required element count */
+
+               if (sub_package->package.count < 4) {
+                       continue;
+               }
 
                /*
                 * If the BIOS has erroneously reversed the _PRT source_name (index 2)
@@ -634,15 +644,12 @@ acpi_ns_repair_PRT(struct acpi_evaluate_info *info,
                        sub_object_list[2] = obj_desc;
                        info->return_flags |= ACPI_OBJECT_REPAIRED;
 
-                       ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname,
+                       ACPI_WARN_PREDEFINED((AE_INFO,
+                                             info->full_pathname,
                                              info->node_flags,
                                              "PRT[%X]: Fixed reversed SourceName and SourceIndex",
                                              index));
                }
-
-               /* Point to the next union acpi_operand_object in the top level package */
-
-               top_object_list++;
        }
 
        return (AE_OK);
@@ -679,7 +686,7 @@ acpi_ns_repair_PSS(struct acpi_evaluate_info *info,
        u32 i;
 
        /*
-        * Entries (sub-packages) in the _PSS Package must be sorted by power
+        * Entries (subpackages) in the _PSS Package must be sorted by power
         * dissipation, in descending order. If it appears that the list is
         * incorrectly sorted, sort it. We sort by cpu_frequency, since this
         * should be proportional to the power.
@@ -767,9 +774,9 @@ acpi_ns_repair_TSS(struct acpi_evaluate_info *info,
  *
  * PARAMETERS:  info                - Method execution information block
  *              return_object       - Pointer to the top-level returned object
- *              start_index         - Index of the first sub-package
- *              expected_count      - Minimum length of each sub-package
- *              sort_index          - Sub-package entry to sort on
+ *              start_index         - Index of the first subpackage
+ *              expected_count      - Minimum length of each subpackage
+ *              sort_index          - Subpackage entry to sort on
  *              sort_direction      - Ascending or descending
  *              sort_key_name       - Name of the sort_index field
  *
@@ -805,7 +812,7 @@ acpi_ns_check_sorted_list(struct acpi_evaluate_info *info,
        }
 
        /*
-        * NOTE: assumes list of sub-packages contains no NULL elements.
+        * NOTE: assumes list of subpackages contains no NULL elements.
         * Any NULL elements should have been removed by earlier call
         * to acpi_ns_remove_null_elements.
         */
@@ -832,7 +839,7 @@ acpi_ns_check_sorted_list(struct acpi_evaluate_info *info,
                        return (AE_AML_OPERAND_TYPE);
                }
 
-               /* Each sub-package must have the minimum length */
+               /* Each subpackage must have the minimum length */
 
                if ((*outer_elements)->package.count < expected_count) {
                        return (AE_AML_PACKAGE_LIMIT);
index 47420fa..af1cc42 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 4a0665b..4a5e3f5 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index e81f15e..4758a1f 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 1f0c28b..4bd558b 100644 (file)
@@ -6,7 +6,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -923,19 +923,22 @@ ACPI_EXPORT_SYMBOL(acpi_detach_data)
 
 /*******************************************************************************
  *
- * FUNCTION:    acpi_get_data
+ * FUNCTION:    acpi_get_data_full
  *
  * PARAMETERS:  obj_handle          - Namespace node
  *              handler             - Handler used in call to attach_data
  *              data                - Where the data is returned
+ *              callback            - function to execute before returning
  *
  * RETURN:      Status
  *
- * DESCRIPTION: Retrieve data that was previously attached to a namespace node.
+ * DESCRIPTION: Retrieve data that was previously attached to a namespace node
+ *              and execute a callback before returning.
  *
  ******************************************************************************/
 acpi_status
-acpi_get_data(acpi_handle obj_handle, acpi_object_handler handler, void **data)
+acpi_get_data_full(acpi_handle obj_handle, acpi_object_handler handler,
+                  void **data, void (*callback)(void *))
 {
        struct acpi_namespace_node *node;
        acpi_status status;
@@ -960,10 +963,34 @@ acpi_get_data(acpi_handle obj_handle, acpi_object_handler handler, void **data)
        }
 
        status = acpi_ns_get_attached_data(node, handler, data);
+       if (ACPI_SUCCESS(status) && callback) {
+               callback(*data);
+       }
 
 unlock_and_exit:
        (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
        return (status);
 }
 
+ACPI_EXPORT_SYMBOL(acpi_get_data_full)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_get_data
+ *
+ * PARAMETERS:  obj_handle          - Namespace node
+ *              handler             - Handler used in call to attach_data
+ *              data                - Where the data is returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Retrieve data that was previously attached to a namespace node.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_get_data(acpi_handle obj_handle, acpi_object_handler handler, void **data)
+{
+       return acpi_get_data_full(obj_handle, handler, data, NULL);
+}
+
 ACPI_EXPORT_SYMBOL(acpi_get_data)
index 3a4bd3f..8c6c11c 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 0e6d79e..dae9401 100644 (file)
@@ -6,7 +6,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 91a5a69..314d314 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 065b44a..646d1a3 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 95dc608..af1f46c 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 1b659e5..1755d2a 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index b0c9787..0d8d37f 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 79d9a28..6d27b59 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 6a4b6fb..32d250f 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 877dc0d..0b64181 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 91fa73a..3cd4880 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index abd6562..9cb07e1 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index fcb7a84..e135aca 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index f3a9276..916fd09 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index b60c9cf..6895567 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -636,7 +636,7 @@ acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object,
 
        for (index = 0; index < number_of_elements; index++) {
 
-               /* Dereference the sub-package */
+               /* Dereference the subpackage */
 
                package_element = *top_object_list;
 
index 3a2ace9..75d3690 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -273,7 +273,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
                 */
                user_prt->length = (sizeof(struct acpi_pci_routing_table) - 4);
 
-               /* Each sub-package must be of length 4 */
+               /* Each subpackage must be of length 4 */
 
                if ((*top_object_list)->package.count != 4) {
                        ACPI_ERROR((AE_INFO,
@@ -283,7 +283,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
                }
 
                /*
-                * Dereference the sub-package.
+                * Dereference the subpackage.
                 * The sub_object_list will now point to an array of the four IRQ
                 * elements: [Address, Pin, Source, source_index]
                 */
@@ -292,7 +292,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
                /* 1) First subobject: Dereference the PRT.Address */
 
                obj_desc = sub_object_list[0];
-               if (obj_desc->common.type != ACPI_TYPE_INTEGER) {
+               if (!obj_desc || obj_desc->common.type != ACPI_TYPE_INTEGER) {
                        ACPI_ERROR((AE_INFO,
                                    "(PRT[%u].Address) Need Integer, found %s",
                                    index,
@@ -305,7 +305,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
                /* 2) Second subobject: Dereference the PRT.Pin */
 
                obj_desc = sub_object_list[1];
-               if (obj_desc->common.type != ACPI_TYPE_INTEGER) {
+               if (!obj_desc || obj_desc->common.type != ACPI_TYPE_INTEGER) {
                        ACPI_ERROR((AE_INFO,
                                    "(PRT[%u].Pin) Need Integer, found %s",
                                    index,
@@ -394,7 +394,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
                /* 4) Fourth subobject: Dereference the PRT.source_index */
 
                obj_desc = sub_object_list[3];
-               if (obj_desc->common.type != ACPI_TYPE_INTEGER) {
+               if (!obj_desc || obj_desc->common.type != ACPI_TYPE_INTEGER) {
                        ACPI_ERROR((AE_INFO,
                                    "(PRT[%u].SourceIndex) Need Integer, found %s",
                                    index,
index 8a2d498..c3c56b5 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -47,7 +47,8 @@
 
 #define _COMPONENT          ACPI_RESOURCES
 ACPI_MODULE_NAME("rsdump")
-#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+
+#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DISASSEMBLER) || defined(ACPI_DEBUGGER)
 /* Local prototypes */
 static void acpi_rs_out_string(char *title, char *value);
 
index 46192bd..2f9332d 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -48,7 +48,7 @@
 #define _COMPONENT          ACPI_RESOURCES
 ACPI_MODULE_NAME("rsdumpinfo")
 
-#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DISASSEMBLER) || defined(ACPI_DEBUGGER)
 #define ACPI_RSD_OFFSET(f)          (u8) ACPI_OFFSET (union acpi_resource_data,f)
 #define ACPI_PRT_OFFSET(f)          (u8) ACPI_OFFSET (struct acpi_pci_routing_table,f)
 #define ACPI_RSD_TABLE_SIZE(name)   (sizeof(name) / sizeof (struct acpi_rsdump_info))
index 41fed78..9d3f8a9 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -132,8 +132,7 @@ struct acpi_rsconvert_info *acpi_gbl_convert_resource_serial_bus_dispatch[] = {
        acpi_rs_convert_uart_serial_bus,
 };
 
-#ifdef ACPI_FUTURE_USAGE
-#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DISASSEMBLER) || defined(ACPI_DEBUGGER)
 
 /* Dispatch table for resource dump functions */
 
@@ -168,7 +167,6 @@ struct acpi_rsdump_info *acpi_gbl_dump_serial_bus_dispatch[] = {
 };
 #endif
 
-#endif                         /* ACPI_FUTURE_USAGE */
 /*
  * Base sizes for external AML resource descriptors, indexed by internal type.
  * Includes size of the descriptor header (1 byte for small descriptors,
index ca18375..19d6487 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 364decc..3461f7d 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 6053aa1..7729129 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index ebc773a..eab4483 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index c99cec9..41eea4b 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index fe49fc4..9e84072 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 14a7982..897a5ce 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 01e4769..877ab92 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 8f89263..ec14588 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index e4f4f02..c120039 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 634357d..e304094 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -292,10 +292,11 @@ struct acpi_table_header *acpi_tb_table_override(struct acpi_table_header
                new_table = acpi_os_map_memory(new_address, new_table_length);
                if (!new_table) {
                        ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY,
-                                       "%4.4s %p Attempted physical table override failed",
+                                       "%4.4s " ACPI_PRINTF_UINT
+                                       " Attempted physical table override failed",
                                        table_header->signature,
-                                       ACPI_CAST_PTR(void,
-                                                     table_desc->address)));
+                                       ACPI_FORMAT_TO_UINT(table_desc->
+                                                           address)));
                        return (NULL);
                }
 
@@ -308,11 +309,11 @@ struct acpi_table_header *acpi_tb_table_override(struct acpi_table_header
 
 finish_override:
 
-       ACPI_INFO((AE_INFO,
-                  "%4.4s %p %s table override, new table: %p",
+       ACPI_INFO((AE_INFO, "%4.4s " ACPI_PRINTF_UINT
+                  " %s table override, new table: " ACPI_PRINTF_UINT,
                   table_header->signature,
-                  ACPI_CAST_PTR(void, table_desc->address),
-                  override_type, new_table));
+                  ACPI_FORMAT_TO_UINT(table_desc->address),
+                  override_type, ACPI_FORMAT_TO_UINT(new_table)));
 
        /* We can now unmap/delete the original table (if fully mapped) */
 
index 6866e76..df3bb20 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -128,15 +128,17 @@ acpi_tb_print_table_header(acpi_physical_address address,
        struct acpi_table_header local_header;
 
        /*
-        * The reason that the Address is cast to a void pointer is so that we
-        * can use %p which will work properly on both 32-bit and 64-bit hosts.
+        * The reason that we use ACPI_PRINTF_UINT and ACPI_FORMAT_TO_UINT is to
+        * support both 32-bit and 64-bit hosts/addresses in a consistent manner.
+        * The %p specifier does not emit uniform output on all hosts. On some,
+        * leading zeros are not supported.
         */
        if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_FACS)) {
 
                /* FACS only has signature and length fields */
 
-               ACPI_INFO((AE_INFO, "%4.4s %p %06X",
-                          header->signature, ACPI_CAST_PTR(void, address),
+               ACPI_INFO((AE_INFO, "%-4.4s " ACPI_PRINTF_UINT " %06X",
+                          header->signature, ACPI_FORMAT_TO_UINT(address),
                           header->length));
        } else if (ACPI_VALIDATE_RSDP_SIG(header->signature)) {
 
@@ -147,8 +149,9 @@ acpi_tb_print_table_header(acpi_physical_address address,
                                          header)->oem_id, ACPI_OEM_ID_SIZE);
                acpi_tb_fix_string(local_header.oem_id, ACPI_OEM_ID_SIZE);
 
-               ACPI_INFO((AE_INFO, "RSDP %p %06X (v%.2d %6.6s)",
-                          ACPI_CAST_PTR(void, address),
+               ACPI_INFO((AE_INFO,
+                          "RSDP " ACPI_PRINTF_UINT " %06X (v%.2d %-6.6s)",
+                          ACPI_FORMAT_TO_UINT(address),
                           (ACPI_CAST_PTR(struct acpi_table_rsdp, header)->
                            revision >
                            0) ? ACPI_CAST_PTR(struct acpi_table_rsdp,
@@ -162,8 +165,9 @@ acpi_tb_print_table_header(acpi_physical_address address,
                acpi_tb_cleanup_table_header(&local_header, header);
 
                ACPI_INFO((AE_INFO,
-                          "%4.4s %p %06X (v%.2d %6.6s %8.8s %08X %4.4s %08X)",
-                          local_header.signature, ACPI_CAST_PTR(void, address),
+                          "%-4.4s " ACPI_PRINTF_UINT
+                          " %06X (v%.2d %-6.6s %-8.8s %08X %-4.4s %08X)",
+                          local_header.signature, ACPI_FORMAT_TO_UINT(address),
                           local_header.length, local_header.revision,
                           local_header.oem_id, local_header.oem_table_id,
                           local_header.oem_revision,
index 6412d3c..a4702ee 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index db826ea..a159315 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 60b5a87..0909420 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index e4e1468..65ab8fe 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 2c2b6ae..a1acec9 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 1851762..efac83c 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 11fde93..3c16997 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index cacd2fd..78fde0a 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index edff4e6..270c164 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -535,10 +535,10 @@ acpi_ut_copy_esimple_to_isimple(union acpi_object *external_object,
 
        case ACPI_TYPE_LOCAL_REFERENCE:
 
-               /* TBD: should validate incoming handle */
+               /* An incoming reference is defined to be a namespace node */
 
-               internal_object->reference.class = ACPI_REFCLASS_NAME;
-               internal_object->reference.node =
+               internal_object->reference.class = ACPI_REFCLASS_REFOF;
+               internal_object->reference.object =
                    external_object->reference.handle;
                break;
 
index d971c86..21a20ac 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index b3f31dd..fbfa9ec 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index c07d222..a3516de 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -75,6 +75,7 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
        union acpi_operand_object *handler_desc;
        union acpi_operand_object *second_desc;
        union acpi_operand_object *next_desc;
+       union acpi_operand_object *start_desc;
        union acpi_operand_object **last_obj_ptr;
 
        ACPI_FUNCTION_TRACE_PTR(ut_delete_internal_obj, object);
@@ -235,10 +236,11 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
                        if (handler_desc) {
                                next_desc =
                                    handler_desc->address_space.region_list;
+                               start_desc = next_desc;
                                last_obj_ptr =
                                    &handler_desc->address_space.region_list;
 
-                               /* Remove the region object from the handler's list */
+                               /* Remove the region object from the handler list */
 
                                while (next_desc) {
                                        if (next_desc == object) {
@@ -247,10 +249,19 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
                                                break;
                                        }
 
-                                       /* Walk the linked list of handler */
+                                       /* Walk the linked list of handlers */
 
                                        last_obj_ptr = &next_desc->region.next;
                                        next_desc = next_desc->region.next;
+
+                                       /* Prevent infinite loop if list is corrupted */
+
+                                       if (next_desc == start_desc) {
+                                               ACPI_ERROR((AE_INFO,
+                                                           "Circular region list in address handler object %p",
+                                                           handler_desc));
+                                               return_VOID;
+                                       }
                                }
 
                                if (handler_desc->address_space.handler_flags &
index 154fdca..8e544d4 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 16fb905..8fed148 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 3cf7b59..0403dca 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 030cb0d..f3abeae 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -55,31 +55,27 @@ ACPI_MODULE_NAME("utglobal")
  * Static global variable initialization.
  *
  ******************************************************************************/
-/*
- * We want the debug switches statically initialized so they
- * are already set when the debugger is entered.
- */
-/* Debug switch - level and trace mask */
+/* Debug output control masks */
 u32 acpi_dbg_level = ACPI_DEBUG_DEFAULT;
 
-/* Debug switch - layer (component) mask */
-
 u32 acpi_dbg_layer = 0;
-u32 acpi_gbl_nesting_level = 0;
 
-/* Debugger globals */
+/* acpi_gbl_FADT is a local copy of the FADT, converted to a common format. */
 
-u8 acpi_gbl_db_terminate_threads = FALSE;
-u8 acpi_gbl_abort_method = FALSE;
-u8 acpi_gbl_method_executing = FALSE;
+struct acpi_table_fadt acpi_gbl_FADT;
+u32 acpi_gbl_trace_flags;
+acpi_name acpi_gbl_trace_method_name;
+u8 acpi_gbl_system_awake_and_running;
+u32 acpi_current_gpe_count;
 
-/* System flags */
-
-u32 acpi_gbl_startup_flags = 0;
-
-/* System starts uninitialized */
+/*
+ * ACPI 5.0 introduces the concept of a "reduced hardware platform", meaning
+ * that the ACPI hardware is no longer required. A flag in the FADT indicates
+ * a reduced HW machine, and that flag is duplicated here for convenience.
+ */
+u8 acpi_gbl_reduced_hardware;
 
-u8 acpi_gbl_shutdown = TRUE;
+/* Various state name strings */
 
 const char *acpi_gbl_sleep_state_names[ACPI_S_STATE_COUNT] = {
        "\\_S0_",
@@ -335,7 +331,6 @@ acpi_status acpi_ut_init_globals(void)
 
        acpi_gbl_DSDT = NULL;
        acpi_gbl_cm_single_step = FALSE;
-       acpi_gbl_db_terminate_threads = FALSE;
        acpi_gbl_shutdown = FALSE;
        acpi_gbl_ns_lookup_count = 0;
        acpi_gbl_ps_find_count = 0;
@@ -382,6 +377,10 @@ acpi_status acpi_ut_init_globals(void)
        acpi_gbl_disable_mem_tracking = FALSE;
 #endif
 
+#ifdef ACPI_DEBUGGER
+       acpi_gbl_db_terminate_threads = FALSE;
+#endif
+
        return_ACPI_STATUS(AE_OK);
 }
 
index bfca7b4..4b12880 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index c5d1ac4..5f56fc4 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 5c26ad4..dc6e965 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 909fe66..d44dee6 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 02f9101..2e2bb14 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 08c3232..82717ff 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 517af70..dfa9009 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 8856bd3..685766f 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 #define _COMPONENT          ACPI_UTILITIES
 ACPI_MODULE_NAME("utosi")
 
+/******************************************************************************
+ *
+ * ACPICA policy for new _OSI strings:
+ *
+ * It is the stated policy of ACPICA that new _OSI strings will be integrated
+ * into this module as soon as possible after they are defined. It is strongly
+ * recommended that all ACPICA hosts mirror this policy and integrate any
+ * changes to this module as soon as possible. There are several historical
+ * reasons behind this policy:
+ *
+ * 1) New BIOSs tend to test only the case where the host responds TRUE to
+ *    the latest version of Windows, which would respond to the latest/newest
+ *    _OSI string. Not responding TRUE to the latest version of Windows will
+ *    risk executing untested code paths throughout the DSDT and SSDTs.
+ *
+ * 2) If a new _OSI string is recognized only after a significant delay, this
+ *    has the potential to cause problems on existing working machines because
+ *    of the possibility that a new and different path through the ASL code
+ *    will be executed.
+ *
+ * 3) New _OSI strings are tending to come out about once per year. A delay
+ *    in recognizing a new string for a significant amount of time risks the
+ *    release of another string which only compounds the initial problem.
+ *
+ *****************************************************************************/
 /*
  * Strings supported by the _OSI predefined control method (which is
  * implemented internally within this module.)
@@ -74,6 +99,7 @@ static struct acpi_interface_info acpi_default_supported_interfaces[] = {
        {"Windows 2006 SP2", NULL, 0, ACPI_OSI_WIN_VISTA_SP2},  /* Windows Vista SP2 - Added 09/2010 */
        {"Windows 2009", NULL, 0, ACPI_OSI_WIN_7},      /* Windows 7 and Server 2008 R2 - Added 09/2009 */
        {"Windows 2012", NULL, 0, ACPI_OSI_WIN_8},      /* Windows 8 and Server 2012 - Added 08/2012 */
+       {"Windows 2013", NULL, 0, ACPI_OSI_WIN_8},      /* Windows 8.1 and Server 2012 R2 - Added 01/2014 */
 
        /* Feature Group Strings */
 
index eb3aca7..36bec57 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 2b1ce4c..db30caf 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 2c2accb..14cb6c0 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -47,7 +47,8 @@
 
 #define _COMPONENT          ACPI_UTILITIES
 ACPI_MODULE_NAME("utresrc")
-#if defined(ACPI_DISASSEMBLER) || defined (ACPI_DEBUGGER)
+
+#if defined(ACPI_DEBUG_OUTPUT) || defined (ACPI_DISASSEMBLER) || defined (ACPI_DEBUGGER)
 /*
  * Strings used to decode resource descriptors.
  * Used by both the disassembler and the debugger resource dump routines
index 03c4c2f..1cc97a7 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 45c0eb2..7721933 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index c002777..7d0ee96 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -276,7 +276,8 @@ acpi_ut_free_and_track(void *allocation,
        }
 
        acpi_os_free(debug_block);
-       ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "%p freed\n", allocation));
+       ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "%p freed (block %p)\n",
+                         allocation, debug_block));
        return_VOID;
 }
 
index be322c8..502a849 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index f7edb88..edd8611 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 246ef68..13380d8 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 3122997..2a0f9e0 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 3650b21..c4dac71 100644 (file)
@@ -12,7 +12,7 @@ config ACPI_APEI
 
 config ACPI_APEI_GHES
        bool "APEI Generic Hardware Error Source"
-       depends on ACPI_APEI && X86
+       depends on ACPI_APEI
        select ACPI_HED
        select IRQ_WORK
        select GENERIC_ALLOCATOR
index 797a693..9a2c63b 100644 (file)
 #include <linux/acpi.h>
 #include <linux/power_supply.h>
 
+#include "battery.h"
+
 #define PREFIX "ACPI: "
 
 #define ACPI_BATTERY_VALUE_UNKNOWN 0xFFFFFFFF
 
-#define ACPI_BATTERY_CLASS             "battery"
 #define ACPI_BATTERY_DEVICE_NAME       "Battery"
-#define ACPI_BATTERY_NOTIFY_STATUS     0x80
-#define ACPI_BATTERY_NOTIFY_INFO       0x81
-#define ACPI_BATTERY_NOTIFY_THRESHOLD   0x82
 
 /* Battery power unit: 0 means mW, 1 means mA */
 #define ACPI_BATTERY_POWER_UNIT_MA     1
@@ -736,6 +734,7 @@ static void acpi_battery_notify(struct acpi_device *device, u32 event)
        acpi_bus_generate_netlink_event(device->pnp.device_class,
                                        dev_name(&device->dev), event,
                                        acpi_battery_present(battery));
+       acpi_notifier_call_chain(device, event, acpi_battery_present(battery));
        /* acpi_battery_update could remove power_supply object */
        if (old && battery->bat.dev)
                power_supply_changed(&battery->bat);
diff --git a/drivers/acpi/battery.h b/drivers/acpi/battery.h
new file mode 100644 (file)
index 0000000..6c08497
--- /dev/null
@@ -0,0 +1,10 @@
+#ifndef __ACPI_BATTERY_H
+#define __ACPI_BATTERY_H
+
+#define ACPI_BATTERY_CLASS "battery"
+
+#define ACPI_BATTERY_NOTIFY_STATUS     0x80
+#define ACPI_BATTERY_NOTIFY_INFO       0x81
+#define ACPI_BATTERY_NOTIFY_THRESHOLD   0x82
+
+#endif
index fcb59c2..e7e5844 100644 (file)
@@ -311,9 +311,7 @@ static void acpi_bus_osc_support(void)
        capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PPC_OST_SUPPORT;
 #endif
 
-#ifdef ACPI_HOTPLUG_OST
        capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_HOTPLUG_OST_SUPPORT;
-#endif
 
        if (!ghes_disable)
                capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_APEI_SUPPORT;
@@ -340,60 +338,77 @@ static void acpi_bus_osc_support(void)
  */
 static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
 {
-       struct acpi_device *device = NULL;
+       struct acpi_device *adev;
        struct acpi_driver *driver;
-
-       ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Notification %#02x to handle %p\n",
-                         type, handle));
+       acpi_status status;
+       u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
 
        switch (type) {
-
        case ACPI_NOTIFY_BUS_CHECK:
-               /* TBD */
+               acpi_handle_debug(handle, "ACPI_NOTIFY_BUS_CHECK event\n");
                break;
 
        case ACPI_NOTIFY_DEVICE_CHECK:
-               /* TBD */
+               acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_CHECK event\n");
                break;
 
        case ACPI_NOTIFY_DEVICE_WAKE:
-               /* TBD */
+               acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_WAKE event\n");
                break;
 
        case ACPI_NOTIFY_EJECT_REQUEST:
-               /* TBD */
+               acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n");
                break;
 
        case ACPI_NOTIFY_DEVICE_CHECK_LIGHT:
+               acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_CHECK_LIGHT event\n");
                /* TBD: Exactly what does 'light' mean? */
                break;
 
        case ACPI_NOTIFY_FREQUENCY_MISMATCH:
-               /* TBD */
+               acpi_handle_err(handle, "Device cannot be configured due "
+                               "to a frequency mismatch\n");
                break;
 
        case ACPI_NOTIFY_BUS_MODE_MISMATCH:
-               /* TBD */
+               acpi_handle_err(handle, "Device cannot be configured due "
+                               "to a bus mode mismatch\n");
                break;
 
        case ACPI_NOTIFY_POWER_FAULT:
-               /* TBD */
+               acpi_handle_err(handle, "Device has suffered a power fault\n");
                break;
 
        default:
-               ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-                                 "Received unknown/unsupported notification [%08x]\n",
-                                 type));
-               break;
+               acpi_handle_warn(handle, "Unsupported event type 0x%x\n", type);
+               ost_code = ACPI_OST_SC_UNRECOGNIZED_NOTIFY;
+               goto err;
        }
 
-       acpi_bus_get_device(handle, &device);
-       if (device) {
-               driver = device->driver;
-               if (driver && driver->ops.notify &&
-                   (driver->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS))
-                       driver->ops.notify(device, type);
+       adev = acpi_bus_get_acpi_device(handle);
+       if (!adev)
+               goto err;
+
+       driver = adev->driver;
+       if (driver && driver->ops.notify &&
+           (driver->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS))
+               driver->ops.notify(adev, type);
+
+       switch (type) {
+       case ACPI_NOTIFY_BUS_CHECK:
+       case ACPI_NOTIFY_DEVICE_CHECK:
+       case ACPI_NOTIFY_EJECT_REQUEST:
+               status = acpi_hotplug_schedule(adev, type);
+               if (ACPI_SUCCESS(status))
+                       return;
+       default:
+               break;
        }
+       acpi_bus_put_acpi_device(adev);
+       return;
+
+ err:
+       acpi_evaluate_ost(handle, type, ost_code, NULL);
 }
 
 /* --------------------------------------------------------------------------
index 714e957..db35594 100644 (file)
@@ -302,6 +302,10 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
                        input_sync(input);
 
                        pm_wakeup_event(&device->dev, 0);
+                       acpi_bus_generate_netlink_event(
+                                       device->pnp.device_class,
+                                       dev_name(&device->dev),
+                                       event, ++button->pushed);
                }
                break;
        default:
index 368f9dd..63119d0 100644 (file)
@@ -31,8 +31,6 @@
 
 #include "internal.h"
 
-#define PREFIX "ACPI: "
-
 #define _COMPONENT                     ACPI_CONTAINER_COMPONENT
 ACPI_MODULE_NAME("container");
 
@@ -68,6 +66,9 @@ static int container_device_attach(struct acpi_device *adev,
        struct device *dev;
        int ret;
 
+       if (adev->flags.is_dock_station)
+               return 0;
+
        cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
        if (!cdev)
                return -ENOMEM;
index c14a00d..d047739 100644 (file)
@@ -901,14 +901,29 @@ EXPORT_SYMBOL_GPL(acpi_dev_resume_early);
 int acpi_subsys_prepare(struct device *dev)
 {
        /*
-        * Follow PCI and resume devices suspended at run time before running
-        * their system suspend callbacks.
+        * Devices having power.ignore_children set may still be necessary for
+        * suspending their children in the next phase of device suspend.
         */
-       pm_runtime_resume(dev);
+       if (dev->power.ignore_children)
+               pm_runtime_resume(dev);
+
        return pm_generic_prepare(dev);
 }
 EXPORT_SYMBOL_GPL(acpi_subsys_prepare);
 
+/**
+ * acpi_subsys_suspend - Run the device driver's suspend callback.
+ * @dev: Device to handle.
+ *
+ * Follow PCI and resume devices suspended at run time before running their
+ * system suspend callbacks.
+ */
+int acpi_subsys_suspend(struct device *dev)
+{
+       pm_runtime_resume(dev);
+       return pm_generic_suspend(dev);
+}
+
 /**
  * acpi_subsys_suspend_late - Suspend device using ACPI.
  * @dev: Device to suspend.
@@ -937,6 +952,23 @@ int acpi_subsys_resume_early(struct device *dev)
        return ret ? ret : pm_generic_resume_early(dev);
 }
 EXPORT_SYMBOL_GPL(acpi_subsys_resume_early);
+
+/**
+ * acpi_subsys_freeze - Run the device driver's freeze callback.
+ * @dev: Device to handle.
+ */
+int acpi_subsys_freeze(struct device *dev)
+{
+       /*
+        * This used to be done in acpi_subsys_prepare() for all devices and
+        * some drivers may depend on it, so do it here.  Ideally, however,
+        * runtime-suspended devices should not be touched during freeze/thaw
+        * transitions.
+        */
+       pm_runtime_resume(dev);
+       return pm_generic_freeze(dev);
+}
+
 #endif /* CONFIG_PM_SLEEP */
 
 static struct dev_pm_domain acpi_general_pm_domain = {
@@ -947,8 +979,11 @@ static struct dev_pm_domain acpi_general_pm_domain = {
 #endif
 #ifdef CONFIG_PM_SLEEP
                .prepare = acpi_subsys_prepare,
+               .suspend = acpi_subsys_suspend,
                .suspend_late = acpi_subsys_suspend_late,
                .resume_early = acpi_subsys_resume_early,
+               .freeze = acpi_subsys_freeze,
+               .poweroff = acpi_subsys_suspend,
                .poweroff_late = acpi_subsys_suspend_late,
                .restore_early = acpi_subsys_resume_early,
 #endif
index 5bfd769..f0fc626 100644 (file)
@@ -1,7 +1,9 @@
 /*
  *  dock.c - ACPI dock station driver
  *
- *  Copyright (C) 2006 Kristen Carlson Accardi <kristen.c.accardi@intel.com>
+ *  Copyright (C) 2006, 2014, Intel Corp.
+ *  Author: Kristen Carlson Accardi <kristen.c.accardi@intel.com>
+ *          Rafael J. Wysocki <rafael.j.wysocki@intel.com>
  *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  *
@@ -35,8 +37,6 @@
 
 #include "internal.h"
 
-#define PREFIX "ACPI: "
-
 #define ACPI_DOCK_DRIVER_DESCRIPTION "ACPI Dock Station Driver"
 
 ACPI_MODULE_NAME("dock");
@@ -68,15 +68,10 @@ struct dock_station {
 };
 static LIST_HEAD(dock_stations);
 static int dock_station_count;
-static DEFINE_MUTEX(hotplug_lock);
 
 struct dock_dependent_device {
        struct list_head list;
-       acpi_handle handle;
-       const struct acpi_dock_ops *hp_ops;
-       void *hp_context;
-       unsigned int hp_refcount;
-       void (*hp_release)(void *);
+       struct acpi_device *adev;
 };
 
 #define DOCK_DOCKING   0x00000001
@@ -98,13 +93,13 @@ enum dock_callback_type {
  *****************************************************************************/
 /**
  * add_dock_dependent_device - associate a device with the dock station
- * @ds: The dock station
- * @handle: handle of the dependent device
+ * @ds: Dock station.
+ * @adev: Dependent ACPI device object.
  *
  * Add the dependent device to the dock's dependent device list.
  */
-static int __init
-add_dock_dependent_device(struct dock_station *ds, acpi_handle handle)
+static int add_dock_dependent_device(struct dock_station *ds,
+                                    struct acpi_device *adev)
 {
        struct dock_dependent_device *dd;
 
@@ -112,180 +107,120 @@ add_dock_dependent_device(struct dock_station *ds, acpi_handle handle)
        if (!dd)
                return -ENOMEM;
 
-       dd->handle = handle;
+       dd->adev = adev;
        INIT_LIST_HEAD(&dd->list);
        list_add_tail(&dd->list, &ds->dependent_devices);
 
        return 0;
 }
 
-static void remove_dock_dependent_devices(struct dock_station *ds)
+static void dock_hotplug_event(struct dock_dependent_device *dd, u32 event,
+                              enum dock_callback_type cb_type)
 {
-       struct dock_dependent_device *dd, *aux;
+       struct acpi_device *adev = dd->adev;
 
-       list_for_each_entry_safe(dd, aux, &ds->dependent_devices, list) {
-               list_del(&dd->list);
-               kfree(dd);
-       }
-}
+       acpi_lock_hp_context();
 
-/**
- * dock_init_hotplug - Initialize a hotplug device on a docking station.
- * @dd: Dock-dependent device.
- * @ops: Dock operations to attach to the dependent device.
- * @context: Data to pass to the @ops callbacks and @release.
- * @init: Optional initialization routine to run after setting up context.
- * @release: Optional release routine to run on removal.
- */
-static int dock_init_hotplug(struct dock_dependent_device *dd,
-                            const struct acpi_dock_ops *ops, void *context,
-                            void (*init)(void *), void (*release)(void *))
-{
-       int ret = 0;
+       if (!adev->hp)
+               goto out;
 
-       mutex_lock(&hotplug_lock);
-       if (WARN_ON(dd->hp_context)) {
-               ret = -EEXIST;
-       } else {
-               dd->hp_refcount = 1;
-               dd->hp_ops = ops;
-               dd->hp_context = context;
-               dd->hp_release = release;
-               if (init)
-                       init(context);
-       }
-       mutex_unlock(&hotplug_lock);
-       return ret;
-}
+       if (cb_type == DOCK_CALL_FIXUP) {
+               void (*fixup)(struct acpi_device *);
 
-/**
- * dock_release_hotplug - Decrement hotplug reference counter of dock device.
- * @dd: Dock-dependent device.
- *
- * Decrement the reference counter of @dd and if 0, detach its hotplug
- * operations from it, reset its context pointer and run the optional release
- * routine if present.
- */
-static void dock_release_hotplug(struct dock_dependent_device *dd)
-{
-       mutex_lock(&hotplug_lock);
-       if (dd->hp_context && !--dd->hp_refcount) {
-               void (*release)(void *) = dd->hp_release;
-               void *context = dd->hp_context;
-
-               dd->hp_ops = NULL;
-               dd->hp_context = NULL;
-               dd->hp_release = NULL;
-               if (release)
-                       release(context);
-       }
-       mutex_unlock(&hotplug_lock);
-}
+               fixup = adev->hp->fixup;
+               if (fixup) {
+                       acpi_unlock_hp_context();
+                       fixup(adev);
+                       return;
+               }
+       } else if (cb_type == DOCK_CALL_UEVENT) {
+               void (*uevent)(struct acpi_device *, u32);
+
+               uevent = adev->hp->uevent;
+               if (uevent) {
+                       acpi_unlock_hp_context();
+                       uevent(adev, event);
+                       return;
+               }
+       } else {
+               int (*notify)(struct acpi_device *, u32);
 
-static void dock_hotplug_event(struct dock_dependent_device *dd, u32 event,
-                              enum dock_callback_type cb_type)
-{
-       acpi_notify_handler cb = NULL;
-       bool run = false;
-
-       mutex_lock(&hotplug_lock);
-
-       if (dd->hp_context) {
-               run = true;
-               dd->hp_refcount++;
-               if (dd->hp_ops) {
-                       switch (cb_type) {
-                       case DOCK_CALL_FIXUP:
-                               cb = dd->hp_ops->fixup;
-                               break;
-                       case DOCK_CALL_UEVENT:
-                               cb = dd->hp_ops->uevent;
-                               break;
-                       default:
-                               cb = dd->hp_ops->handler;
-                       }
+               notify = adev->hp->notify;
+               if (notify) {
+                       acpi_unlock_hp_context();
+                       notify(adev, event);
+                       return;
                }
        }
 
-       mutex_unlock(&hotplug_lock);
+ out:
+       acpi_unlock_hp_context();
+}
 
-       if (!run)
-               return;
+static struct dock_station *find_dock_station(acpi_handle handle)
+{
+       struct dock_station *ds;
 
-       if (cb)
-               cb(dd->handle, event, dd->hp_context);
+       list_for_each_entry(ds, &dock_stations, sibling)
+               if (ds->handle == handle)
+                       return ds;
 
-       dock_release_hotplug(dd);
+       return NULL;
 }
 
 /**
  * find_dock_dependent_device - get a device dependent on this dock
  * @ds: the dock station
- * @handle: the acpi_handle of the device we want
+ * @adev: ACPI device object to find.
  *
  * iterate over the dependent device list for this dock.  If the
  * dependent device matches the handle, return.
  */
 static struct dock_dependent_device *
-find_dock_dependent_device(struct dock_station *ds, acpi_handle handle)
+find_dock_dependent_device(struct dock_station *ds, struct acpi_device *adev)
 {
        struct dock_dependent_device *dd;
 
        list_for_each_entry(dd, &ds->dependent_devices, list)
-               if (handle == dd->handle)
+               if (adev == dd->adev)
                        return dd;
 
        return NULL;
 }
 
-/*****************************************************************************
- *                         Dock functions                                    *
- *****************************************************************************/
-static int __init is_battery(acpi_handle handle)
+void register_dock_dependent_device(struct acpi_device *adev,
+                                   acpi_handle dshandle)
 {
-       struct acpi_device_info *info;
-       int ret = 1;
+       struct dock_station *ds = find_dock_station(dshandle);
 
-       if (!ACPI_SUCCESS(acpi_get_object_info(handle, &info)))
-               return 0;
-       if (!(info->valid & ACPI_VALID_HID))
-               ret = 0;
-       else
-               ret = !strcmp("PNP0C0A", info->hardware_id.string);
-
-       kfree(info);
-       return ret;
+       if (ds && !find_dock_dependent_device(ds, adev))
+               add_dock_dependent_device(ds, adev);
 }
 
-/* Check whether ACPI object is an ejectable battery or disk bay */
-static bool __init is_ejectable_bay(acpi_handle handle)
-{
-       if (acpi_has_method(handle, "_EJ0") && is_battery(handle))
-               return true;
-
-       return acpi_bay_match(handle);
-}
+/*****************************************************************************
+ *                         Dock functions                                    *
+ *****************************************************************************/
 
 /**
  * is_dock_device - see if a device is on a dock station
- * @handle: acpi handle of the device
+ * @adev: ACPI device object to check.
  *
  * If this device is either the dock station itself,
  * or is a device dependent on the dock station, then it
  * is a dock device
  */
-int is_dock_device(acpi_handle handle)
+int is_dock_device(struct acpi_device *adev)
 {
        struct dock_station *dock_station;
 
        if (!dock_station_count)
                return 0;
 
-       if (acpi_dock_match(handle))
+       if (acpi_dock_match(adev->handle))
                return 1;
 
        list_for_each_entry(dock_station, &dock_stations, sibling)
-               if (find_dock_dependent_device(dock_station, handle))
+               if (find_dock_dependent_device(dock_station, adev))
                        return 1;
 
        return 0;
@@ -312,43 +247,6 @@ static int dock_present(struct dock_station *ds)
        return 0;
 }
 
-/**
- * dock_create_acpi_device - add new devices to acpi
- * @handle - handle of the device to add
- *
- *  This function will create a new acpi_device for the given
- *  handle if one does not exist already.  This should cause
- *  acpi to scan for drivers for the given devices, and call
- *  matching driver's add routine.
- */
-static void dock_create_acpi_device(acpi_handle handle)
-{
-       struct acpi_device *device = NULL;
-       int ret;
-
-       acpi_bus_get_device(handle, &device);
-       if (!acpi_device_enumerated(device)) {
-               ret = acpi_bus_scan(handle);
-               if (ret)
-                       pr_debug("error adding bus, %x\n", -ret);
-       }
-}
-
-/**
- * dock_remove_acpi_device - remove the acpi_device struct from acpi
- * @handle - the handle of the device to remove
- *
- *  Tell acpi to remove the acpi_device.  This should cause any loaded
- *  driver to have it's remove routine called.
- */
-static void dock_remove_acpi_device(acpi_handle handle)
-{
-       struct acpi_device *device;
-
-       if (!acpi_bus_get_device(handle, &device))
-               acpi_bus_trim(device);
-}
-
 /**
  * hot_remove_dock_devices - Remove dock station devices.
  * @ds: Dock station.
@@ -366,7 +264,7 @@ static void hot_remove_dock_devices(struct dock_station *ds)
                dock_hotplug_event(dd, ACPI_NOTIFY_EJECT_REQUEST, false);
 
        list_for_each_entry_reverse(dd, &ds->dependent_devices, list)
-               dock_remove_acpi_device(dd->handle);
+               acpi_bus_trim(dd->adev);
 }
 
 /**
@@ -392,12 +290,20 @@ static void hotplug_dock_devices(struct dock_station *ds, u32 event)
                dock_hotplug_event(dd, event, DOCK_CALL_HANDLER);
 
        /*
-        * Now make sure that an acpi_device is created for each dependent
-        * device.  That will cause scan handlers to be attached to device
-        * objects or acpi_drivers to be stopped/started if they are present.
+        * Check if all devices have been enumerated already.  If not, run
+        * acpi_bus_scan() for them and that will cause scan handlers to be
+        * attached to device objects or acpi_drivers to be stopped/started if
+        * they are present.
         */
-       list_for_each_entry(dd, &ds->dependent_devices, list)
-               dock_create_acpi_device(dd->handle);
+       list_for_each_entry(dd, &ds->dependent_devices, list) {
+               struct acpi_device *adev = dd->adev;
+
+               if (!acpi_device_enumerated(adev)) {
+                       int ret = acpi_bus_scan(adev->handle);
+                       if (ret)
+                               dev_dbg(&adev->dev, "scan error %d\n", -ret);
+               }
+       }
 }
 
 static void dock_event(struct dock_station *ds, u32 event, int num)
@@ -500,71 +406,6 @@ static int dock_in_progress(struct dock_station *ds)
        return 0;
 }
 
-/**
- * register_hotplug_dock_device - register a hotplug function
- * @handle: the handle of the device
- * @ops: handlers to call after docking
- * @context: device specific data
- * @init: Optional initialization routine to run after registration
- * @release: Optional release routine to run on unregistration
- *
- * If a driver would like to perform a hotplug operation after a dock
- * event, they can register an acpi_notifiy_handler to be called by
- * the dock driver after _DCK is executed.
- */
-int register_hotplug_dock_device(acpi_handle handle,
-                                const struct acpi_dock_ops *ops, void *context,
-                                void (*init)(void *), void (*release)(void *))
-{
-       struct dock_dependent_device *dd;
-       struct dock_station *dock_station;
-       int ret = -EINVAL;
-
-       if (WARN_ON(!context))
-               return -EINVAL;
-
-       if (!dock_station_count)
-               return -ENODEV;
-
-       /*
-        * make sure this handle is for a device dependent on the dock,
-        * this would include the dock station itself
-        */
-       list_for_each_entry(dock_station, &dock_stations, sibling) {
-               /*
-                * An ATA bay can be in a dock and itself can be ejected
-                * separately, so there are two 'dock stations' which need the
-                * ops
-                */
-               dd = find_dock_dependent_device(dock_station, handle);
-               if (dd && !dock_init_hotplug(dd, ops, context, init, release))
-                       ret = 0;
-       }
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(register_hotplug_dock_device);
-
-/**
- * unregister_hotplug_dock_device - remove yourself from the hotplug list
- * @handle: the acpi handle of the device
- */
-void unregister_hotplug_dock_device(acpi_handle handle)
-{
-       struct dock_dependent_device *dd;
-       struct dock_station *dock_station;
-
-       if (!dock_station_count)
-               return;
-
-       list_for_each_entry(dock_station, &dock_stations, sibling) {
-               dd = find_dock_dependent_device(dock_station, handle);
-               if (dd)
-                       dock_release_hotplug(dd);
-       }
-}
-EXPORT_SYMBOL_GPL(unregister_hotplug_dock_device);
-
 /**
  * handle_eject_request - handle an undock request checking for error conditions
  *
@@ -598,20 +439,23 @@ static int handle_eject_request(struct dock_station *ds, u32 event)
 }
 
 /**
- * dock_notify - act upon an acpi dock notification
- * @ds: dock station
- * @event: the acpi event
+ * dock_notify - Handle ACPI dock notification.
+ * @adev: Dock station's ACPI device object.
+ * @event: Event code.
  *
  * If we are notified to dock, then check to see if the dock is
  * present and then dock.  Notify all drivers of the dock event,
  * and then hotplug and devices that may need hotplugging.
  */
-static void dock_notify(struct dock_station *ds, u32 event)
+int dock_notify(struct acpi_device *adev, u32 event)
 {
-       acpi_handle handle = ds->handle;
-       struct acpi_device *adev = NULL;
+       acpi_handle handle = adev->handle;
+       struct dock_station *ds = find_dock_station(handle);
        int surprise_removal = 0;
 
+       if (!ds)
+               return -ENODEV;
+
        /*
         * According to acpi spec 3.0a, if a DEVICE_CHECK notification
         * is sent and _DCK is present, it is assumed to mean an undock
@@ -632,7 +476,6 @@ static void dock_notify(struct dock_station *ds, u32 event)
        switch (event) {
        case ACPI_NOTIFY_BUS_CHECK:
        case ACPI_NOTIFY_DEVICE_CHECK:
-               acpi_bus_get_device(handle, &adev);
                if (!dock_in_progress(ds) && !acpi_device_enumerated(adev)) {
                        begin_dock(ds);
                        dock(ds);
@@ -662,49 +505,8 @@ static void dock_notify(struct dock_station *ds, u32 event)
                else
                        dock_event(ds, event, UNDOCK_EVENT);
                break;
-       default:
-               acpi_handle_err(handle, "Unknown dock event %d\n", event);
        }
-}
-
-static void acpi_dock_deferred_cb(void *data, u32 event)
-{
-       acpi_scan_lock_acquire();
-       dock_notify(data, event);
-       acpi_scan_lock_release();
-}
-
-static void dock_notify_handler(acpi_handle handle, u32 event, void *data)
-{
-       if (event != ACPI_NOTIFY_BUS_CHECK && event != ACPI_NOTIFY_DEVICE_CHECK
-          && event != ACPI_NOTIFY_EJECT_REQUEST)
-               return;
-
-       acpi_hotplug_execute(acpi_dock_deferred_cb, data, event);
-}
-
-/**
- * find_dock_devices - find devices on the dock station
- * @handle: the handle of the device we are examining
- * @lvl: unused
- * @context: the dock station private data
- * @rv: unused
- *
- * This function is called by acpi_walk_namespace.  It will
- * check to see if an object has an _EJD method.  If it does, then it
- * will see if it is dependent on the dock station.
- */
-static acpi_status __init find_dock_devices(acpi_handle handle, u32 lvl,
-                                           void *context, void **rv)
-{
-       struct dock_station *ds = context;
-       acpi_handle ejd = NULL;
-
-       acpi_bus_get_ejd(handle, &ejd);
-       if (ejd == ds->handle)
-               add_dock_dependent_device(ds, handle);
-
-       return AE_OK;
+       return 0;
 }
 
 /*
@@ -803,23 +605,28 @@ static struct attribute_group dock_attribute_group = {
 };
 
 /**
- * dock_add - add a new dock station
- * @handle: the dock station handle
+ * acpi_dock_add - Add a new dock station
+ * @adev: Dock station ACPI device object.
  *
- * allocated and initialize a new dock station device.  Find all devices
- * that are on the dock station, and register for dock event notifications.
+ * allocated and initialize a new dock station device.
  */
-static int __init dock_add(acpi_handle handle)
+void acpi_dock_add(struct acpi_device *adev)
 {
        struct dock_station *dock_station, ds = { NULL, };
+       struct platform_device_info pdevinfo;
+       acpi_handle handle = adev->handle;
        struct platform_device *dd;
-       acpi_status status;
        int ret;
 
-       dd = platform_device_register_data(NULL, "dock", dock_station_count,
-                                          &ds, sizeof(ds));
+       memset(&pdevinfo, 0, sizeof(pdevinfo));
+       pdevinfo.name = "dock";
+       pdevinfo.id = dock_station_count;
+       pdevinfo.acpi_node.companion = adev;
+       pdevinfo.data = &ds;
+       pdevinfo.size_data = sizeof(ds);
+       dd = platform_device_register_full(&pdevinfo);
        if (IS_ERR(dd))
-               return PTR_ERR(dd);
+               return;
 
        dock_station = dd->dev.platform_data;
 
@@ -837,72 +644,29 @@ static int __init dock_add(acpi_handle handle)
                dock_station->flags |= DOCK_IS_DOCK;
        if (acpi_ata_match(handle))
                dock_station->flags |= DOCK_IS_ATA;
-       if (is_battery(handle))
+       if (acpi_device_is_battery(adev))
                dock_station->flags |= DOCK_IS_BAT;
 
        ret = sysfs_create_group(&dd->dev.kobj, &dock_attribute_group);
        if (ret)
                goto err_unregister;
 
-       /* Find dependent devices */
-       acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
-                           ACPI_UINT32_MAX, find_dock_devices, NULL,
-                           dock_station, NULL);
-
        /* add the dock station as a device dependent on itself */
-       ret = add_dock_dependent_device(dock_station, handle);
+       ret = add_dock_dependent_device(dock_station, adev);
        if (ret)
                goto err_rmgroup;
 
-       status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
-                                            dock_notify_handler, dock_station);
-       if (ACPI_FAILURE(status)) {
-               ret = -ENODEV;
-               goto err_rmgroup;
-       }
-
        dock_station_count++;
        list_add(&dock_station->sibling, &dock_stations);
-       return 0;
+       adev->flags.is_dock_station = true;
+       dev_info(&adev->dev, "ACPI dock station (docks/bays count: %d)\n",
+                dock_station_count);
+       return;
 
 err_rmgroup:
-       remove_dock_dependent_devices(dock_station);
        sysfs_remove_group(&dd->dev.kobj, &dock_attribute_group);
+
 err_unregister:
        platform_device_unregister(dd);
        acpi_handle_err(handle, "%s encountered error %d\n", __func__, ret);
-       return ret;
-}
-
-/**
- * find_dock_and_bay - look for dock stations and bays
- * @handle: acpi handle of a device
- * @lvl: unused
- * @context: unused
- * @rv: unused
- *
- * This is called by acpi_walk_namespace to look for dock stations and bays.
- */
-static acpi_status __init
-find_dock_and_bay(acpi_handle handle, u32 lvl, void *context, void **rv)
-{
-       if (acpi_dock_match(handle) || is_ejectable_bay(handle))
-               dock_add(handle);
-
-       return AE_OK;
-}
-
-void __init acpi_dock_init(void)
-{
-       /* look for dock stations and bays */
-       acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
-               ACPI_UINT32_MAX, find_dock_and_bay, NULL, NULL, NULL);
-
-       if (!dock_station_count) {
-               pr_info(PREFIX "No dock devices found.\n");
-               return;
-       }
-
-       pr_info(PREFIX "%s: %d docks/bays found\n",
-               ACPI_DOCK_DRIVER_DESCRIPTION, dock_station_count);
 }
index 09e423f..8acf53e 100644 (file)
@@ -55,11 +55,16 @@ MODULE_DEVICE_TABLE(acpi, fan_device_ids);
 #ifdef CONFIG_PM_SLEEP
 static int acpi_fan_suspend(struct device *dev);
 static int acpi_fan_resume(struct device *dev);
+static struct dev_pm_ops acpi_fan_pm = {
+       .resume = acpi_fan_resume,
+       .freeze = acpi_fan_suspend,
+       .thaw = acpi_fan_resume,
+       .restore = acpi_fan_resume,
+};
+#define FAN_PM_OPS_PTR (&acpi_fan_pm)
 #else
-#define acpi_fan_suspend NULL
-#define acpi_fan_resume NULL
+#define FAN_PM_OPS_PTR NULL
 #endif
-static SIMPLE_DEV_PM_OPS(acpi_fan_pm, acpi_fan_suspend, acpi_fan_resume);
 
 static struct acpi_driver acpi_fan_driver = {
        .name = "fan",
@@ -69,7 +74,7 @@ static struct acpi_driver acpi_fan_driver = {
                .add = acpi_fan_add,
                .remove = acpi_fan_remove,
                },
-       .drv.pm = &acpi_fan_pm,
+       .drv.pm = FAN_PM_OPS_PTR,
 };
 
 /* thermal cooling device callbacks */
index 0c78922..f774c65 100644 (file)
@@ -287,6 +287,7 @@ EXPORT_SYMBOL_GPL(acpi_unbind_one);
 static int acpi_platform_notify(struct device *dev)
 {
        struct acpi_bus_type *type = acpi_get_bus_type(dev);
+       struct acpi_device *adev;
        int ret;
 
        ret = acpi_bind_one(dev, NULL);
@@ -303,9 +304,14 @@ static int acpi_platform_notify(struct device *dev)
                if (ret)
                        goto out;
        }
+       adev = ACPI_COMPANION(dev);
+       if (!adev)
+               goto out;
 
        if (type && type->setup)
                type->setup(dev);
+       else if (adev->handler && adev->handler->bind)
+               adev->handler->bind(dev);
 
  out:
 #if ACPI_GLUE_DEBUG
@@ -324,11 +330,17 @@ static int acpi_platform_notify(struct device *dev)
 
 static int acpi_platform_notify_remove(struct device *dev)
 {
+       struct acpi_device *adev = ACPI_COMPANION(dev);
        struct acpi_bus_type *type;
 
+       if (!adev)
+               return 0;
+
        type = acpi_get_bus_type(dev);
        if (type && type->cleanup)
                type->cleanup(dev);
+       else if (adev->handler && adev->handler->unbind)
+               adev->handler->unbind(dev);
 
        acpi_unbind_one(dev);
        return 0;
index dedbb2d..9573913 100644 (file)
@@ -37,9 +37,15 @@ void acpi_container_init(void);
 static inline void acpi_container_init(void) {}
 #endif
 #ifdef CONFIG_ACPI_DOCK
-void acpi_dock_init(void);
+void register_dock_dependent_device(struct acpi_device *adev,
+                                   acpi_handle dshandle);
+int dock_notify(struct acpi_device *adev, u32 event);
+void acpi_dock_add(struct acpi_device *adev);
 #else
-static inline void acpi_dock_init(void) {}
+static inline void register_dock_dependent_device(struct acpi_device *adev,
+                                                 acpi_handle dshandle) {}
+static inline int dock_notify(struct acpi_device *adev, u32 event) { return -ENODEV; }
+static inline void acpi_dock_add(struct acpi_device *adev) {}
 #endif
 #ifdef CONFIG_ACPI_HOTPLUG_MEMORY
 void acpi_memory_hotplug_init(void);
@@ -72,7 +78,9 @@ void acpi_lpss_init(void);
 static inline void acpi_lpss_init(void) {}
 #endif
 
+acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src);
 bool acpi_queue_hotplug_work(struct work_struct *work);
+void acpi_device_hotplug(struct acpi_device *adev, u32 src);
 bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent);
 
 /* --------------------------------------------------------------------------
@@ -90,6 +98,7 @@ void acpi_free_pnp_ids(struct acpi_device_pnp *pnp);
 int acpi_bind_one(struct device *dev, struct acpi_device *adev);
 int acpi_unbind_one(struct device *dev);
 bool acpi_device_is_present(struct acpi_device *adev);
+bool acpi_device_is_battery(struct acpi_device *adev);
 
 /* --------------------------------------------------------------------------
                                   Power Resource
index fc1aa79..27f84af 100644 (file)
@@ -52,7 +52,7 @@
 
 #define _COMPONENT             ACPI_OS_SERVICES
 ACPI_MODULE_NAME("osl");
-#define PREFIX         "ACPI: "
+
 struct acpi_os_dpc {
        acpi_osd_exec_callback function;
        void *context;
@@ -1168,8 +1168,7 @@ void acpi_os_wait_events_complete(void)
 
 struct acpi_hp_work {
        struct work_struct work;
-       acpi_hp_callback func;
-       void *data;
+       struct acpi_device *adev;
        u32 src;
 };
 
@@ -1178,25 +1177,24 @@ static void acpi_hotplug_work_fn(struct work_struct *work)
        struct acpi_hp_work *hpw = container_of(work, struct acpi_hp_work, work);
 
        acpi_os_wait_events_complete();
-       hpw->func(hpw->data, hpw->src);
+       acpi_device_hotplug(hpw->adev, hpw->src);
        kfree(hpw);
 }
 
-acpi_status acpi_hotplug_execute(acpi_hp_callback func, void *data, u32 src)
+acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src)
 {
        struct acpi_hp_work *hpw;
 
        ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-                 "Scheduling function [%p(%p, %u)] for deferred execution.\n",
-                 func, data, src));
+                 "Scheduling hotplug event (%p, %u) for deferred execution.\n",
+                 adev, src));
 
        hpw = kmalloc(sizeof(*hpw), GFP_KERNEL);
        if (!hpw)
                return AE_NO_MEMORY;
 
        INIT_WORK(&hpw->work, acpi_hotplug_work_fn);
-       hpw->func = func;
-       hpw->data = data;
+       hpw->adev = adev;
        hpw->src = src;
        /*
         * We can't run hotplug code in kacpid_wq/kacpid_notify_wq etc., because
@@ -1780,6 +1778,17 @@ static int __init acpi_no_auto_ssdt_setup(char *s)
 
 __setup("acpi_no_auto_ssdt", acpi_no_auto_ssdt_setup);
 
+static int __init acpi_disable_return_repair(char *s)
+{
+       printk(KERN_NOTICE PREFIX
+              "ACPI: Predefined validation mechanism disabled\n");
+       acpi_gbl_disable_auto_repair = TRUE;
+
+       return 1;
+}
+
+__setup("acpica_no_return_repair", acpi_disable_return_repair);
+
 acpi_status __init acpi_os_initialize(void)
 {
        acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
index 361b40c..9c62340 100644 (file)
@@ -370,6 +370,30 @@ static struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin)
        return NULL;
 }
 
+#if IS_ENABLED(CONFIG_ISA) || IS_ENABLED(CONFIG_EISA)
+static int acpi_isa_register_gsi(struct pci_dev *dev)
+{
+       u32 dev_gsi;
+
+       /* Interrupt Line values above 0xF are forbidden */
+       if (dev->irq > 0 && (dev->irq <= 0xF) &&
+           (acpi_isa_irq_to_gsi(dev->irq, &dev_gsi) == 0)) {
+               dev_warn(&dev->dev, "PCI INT %c: no GSI - using ISA IRQ %d\n",
+                        pin_name(dev->pin), dev->irq);
+               acpi_register_gsi(&dev->dev, dev_gsi,
+                                 ACPI_LEVEL_SENSITIVE,
+                                 ACPI_ACTIVE_LOW);
+               return 0;
+       }
+       return -EINVAL;
+}
+#else
+static inline int acpi_isa_register_gsi(struct pci_dev *dev)
+{
+       return -ENODEV;
+}
+#endif
+
 int acpi_pci_irq_enable(struct pci_dev *dev)
 {
        struct acpi_prt_entry *entry;
@@ -416,19 +440,9 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
         * driver reported one, then use it. Exit in any case.
         */
        if (gsi < 0) {
-               u32 dev_gsi;
-               /* Interrupt Line values above 0xF are forbidden */
-               if (dev->irq > 0 && (dev->irq <= 0xF) &&
-                   (acpi_isa_irq_to_gsi(dev->irq, &dev_gsi) == 0)) {
-                       dev_warn(&dev->dev, "PCI INT %c: no GSI - using ISA IRQ %d\n",
-                                pin_name(pin), dev->irq);
-                       acpi_register_gsi(&dev->dev, dev_gsi,
-                                         ACPI_LEVEL_SENSITIVE,
-                                         ACPI_ACTIVE_LOW);
-               } else {
+               if (acpi_isa_register_gsi(dev))
                        dev_warn(&dev->dev, "PCI INT %c: no GSI\n",
                                 pin_name(pin));
-               }
 
                kfree(entry);
                return 0;
index 9418c7a..cfd7581 100644 (file)
@@ -43,8 +43,6 @@
 
 #include "internal.h"
 
-#define PREFIX "ACPI: "
-
 #define _COMPONENT                     ACPI_PCI_COMPONENT
 ACPI_MODULE_NAME("pci_link");
 #define ACPI_PCI_LINK_CLASS            "pci_irq_routing"
index c1c4102..d388f13 100644 (file)
@@ -39,8 +39,6 @@
 
 #include "internal.h"
 
-#define PREFIX "ACPI: "
-
 #define _COMPONENT             ACPI_PCI_COMPONENT
 ACPI_MODULE_NAME("pci_root");
 #define ACPI_PCI_ROOT_CLASS            "pci_bridge"
@@ -51,7 +49,7 @@ static void acpi_pci_root_remove(struct acpi_device *device);
 
 static int acpi_pci_root_scan_dependent(struct acpi_device *adev)
 {
-       acpiphp_check_host_bridge(adev->handle);
+       acpiphp_check_host_bridge(adev);
        return 0;
 }
 
index ad7da68..e0bcfb6 100644 (file)
@@ -46,8 +46,6 @@
 #include "sleep.h"
 #include "internal.h"
 
-#define PREFIX "ACPI: "
-
 #define _COMPONENT                     ACPI_POWER_COMPONENT
 ACPI_MODULE_NAME("power");
 #define ACPI_POWER_CLASS               "power_resource"
index a4eea9a..86d73d5 100644 (file)
 
 #include "internal.h"
 
-#define PREFIX                 "ACPI: "
 #define _COMPONENT             ACPI_PROCESSOR_COMPONENT
 ACPI_MODULE_NAME("processor_core");
 
-static int __init set_no_mwait(const struct dmi_system_id *id)
-{
-       printk(KERN_NOTICE PREFIX "%s detected - "
-               "disabling mwait for CPU C-states\n", id->ident);
-       boot_option_idle_override = IDLE_NOMWAIT;
-       return 0;
-}
-
-static struct dmi_system_id processor_idle_dmi_table[] __initdata = {
-       {
-       set_no_mwait, "Extensa 5220", {
-       DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
-       DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
-       DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
-       DMI_MATCH(DMI_BOARD_NAME, "Columbia") }, NULL},
-       {},
-};
-
 static int map_lapic_id(struct acpi_subtable_header *entry,
                 u32 acpi_id, int *apic_id)
 {
@@ -89,6 +70,28 @@ static int map_lsapic_id(struct acpi_subtable_header *entry,
        return 0;
 }
 
+static int map_gic_id(struct acpi_subtable_header *entry,
+               int device_declaration, u32 acpi_id, int *apic_id)
+{
+       struct acpi_madt_generic_interrupt *gic =
+               (struct acpi_madt_generic_interrupt *)entry;
+
+       if (!(gic->flags & ACPI_MADT_ENABLED))
+               return -ENODEV;
+
+       /*
+        * In the GIC interrupt model, logical processors are
+        * required to have a Processor Device object in the DSDT,
+        * so we should check device_declaration here
+        */
+       if (device_declaration && (gic->uid == acpi_id)) {
+               *apic_id = gic->gic_id;
+               return 0;
+       }
+
+       return -EINVAL;
+}
+
 static int map_madt_entry(int type, u32 acpi_id)
 {
        unsigned long madt_end, entry;
@@ -124,6 +127,9 @@ static int map_madt_entry(int type, u32 acpi_id)
                } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
                        if (!map_lsapic_id(header, type, acpi_id, &apic_id))
                                break;
+               } else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT) {
+                       if (!map_gic_id(header, type, acpi_id, &apic_id))
+                               break;
                }
                entry += header->length;
        }
@@ -154,6 +160,8 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
                map_lapic_id(header, acpi_id, &apic_id);
        } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
                map_lsapic_id(header, type, acpi_id, &apic_id);
+       } else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT) {
+               map_gic_id(header, type, acpi_id, &apic_id);
        }
 
 exit:
@@ -323,7 +331,7 @@ static struct acpi_object_list *acpi_processor_alloc_pdc(void)
  * _PDC is required for a BIOS-OS handshake for most of the newer
  * ACPI processor features.
  */
-static int
+static acpi_status
 acpi_processor_eval_pdc(acpi_handle handle, struct acpi_object_list *pdc_in)
 {
        acpi_status status = AE_OK;
@@ -379,16 +387,43 @@ early_init_pdc(acpi_handle handle, u32 lvl, void *context, void **rv)
        return AE_OK;
 }
 
-void __init acpi_early_processor_set_pdc(void)
+#if defined(CONFIG_X86) || defined(CONFIG_IA64)
+static int __init set_no_mwait(const struct dmi_system_id *id)
+{
+       pr_notice(PREFIX "%s detected - disabling mwait for CPU C-states\n",
+                 id->ident);
+       boot_option_idle_override = IDLE_NOMWAIT;
+       return 0;
+}
+
+static struct dmi_system_id processor_idle_dmi_table[] __initdata = {
+       {
+       set_no_mwait, "Extensa 5220", {
+       DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
+       DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+       DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
+       DMI_MATCH(DMI_BOARD_NAME, "Columbia") }, NULL},
+       {},
+};
+
+static void __init processor_dmi_check(void)
 {
        /*
         * Check whether the system is DMI table. If yes, OSPM
         * should not use mwait for CPU-states.
         */
        dmi_check_system(processor_idle_dmi_table);
+}
+#else
+static inline void processor_dmi_check(void) {}
+#endif
+
+void __init acpi_early_processor_set_pdc(void)
+{
+       processor_dmi_check();
 
        acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
                            ACPI_UINT32_MAX,
                            early_init_pdc, NULL, NULL, NULL);
-       acpi_get_devices("ACPI0007", early_init_pdc, NULL, NULL);
+       acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, early_init_pdc, NULL, NULL);
 }
index c1c3562..7f70f31 100644 (file)
@@ -41,8 +41,6 @@
 
 #include "internal.h"
 
-#define PREFIX "ACPI: "
-
 #define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80
 #define ACPI_PROCESSOR_NOTIFY_POWER    0x81
 #define ACPI_PROCESSOR_NOTIFY_THROTTLING       0x82
index ff90054..cfc8aba 100644 (file)
@@ -156,17 +156,9 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
  */
 static void acpi_processor_ppc_ost(acpi_handle handle, int status)
 {
-       union acpi_object params[2] = {
-               {.type = ACPI_TYPE_INTEGER,},
-               {.type = ACPI_TYPE_INTEGER,},
-       };
-       struct acpi_object_list arg_list = {2, params};
-
-       if (acpi_has_method(handle, "_OST")) {
-               params[0].integer.value = ACPI_PROCESSOR_NOTIFY_PERFORMANCE;
-               params[1].integer.value =  status;
-               acpi_evaluate_object(handle, "_OST", &arg_list, NULL);
-       }
+       if (acpi_has_method(handle, "_OST"))
+               acpi_evaluate_ost(handle, ACPI_PROCESSOR_NOTIFY_PERFORMANCE,
+                                 status, NULL);
 }
 
 int acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
index dbd4849..366ca40 100644 (file)
 #include <linux/power_supply.h>
 
 #include "sbshc.h"
+#include "battery.h"
 
 #define PREFIX "ACPI: "
 
 #define ACPI_SBS_CLASS                 "sbs"
 #define ACPI_AC_CLASS                  "ac_adapter"
-#define ACPI_BATTERY_CLASS             "battery"
 #define ACPI_SBS_DEVICE_NAME           "Smart Battery System"
 #define ACPI_SBS_FILE_INFO             "info"
 #define ACPI_SBS_FILE_STATE            "state"
index 57b053f..7efe546 100644 (file)
@@ -41,6 +41,7 @@ static DEFINE_MUTEX(acpi_scan_lock);
 static LIST_HEAD(acpi_scan_handlers_list);
 DEFINE_MUTEX(acpi_device_lock);
 LIST_HEAD(acpi_wakeup_device_list);
+static DEFINE_MUTEX(acpi_hp_context_lock);
 
 struct acpi_device_bus_id{
        char bus_id[15];
@@ -60,6 +61,27 @@ void acpi_scan_lock_release(void)
 }
 EXPORT_SYMBOL_GPL(acpi_scan_lock_release);
 
+void acpi_lock_hp_context(void)
+{
+       mutex_lock(&acpi_hp_context_lock);
+}
+
+void acpi_unlock_hp_context(void)
+{
+       mutex_unlock(&acpi_hp_context_lock);
+}
+
+void acpi_initialize_hp_context(struct acpi_device *adev,
+                               struct acpi_hotplug_context *hp,
+                               int (*notify)(struct acpi_device *, u32),
+                               void (*uevent)(struct acpi_device *, u32))
+{
+       acpi_lock_hp_context();
+       acpi_set_hp_context(adev, hp, notify, uevent, NULL);
+       acpi_unlock_hp_context();
+}
+EXPORT_SYMBOL_GPL(acpi_initialize_hp_context);
+
 int acpi_scan_add_handler(struct acpi_scan_handler *handler)
 {
        if (!handler || !handler->attach)
@@ -439,90 +461,75 @@ static int acpi_scan_bus_check(struct acpi_device *adev)
        return 0;
 }
 
-static void acpi_device_hotplug(void *data, u32 src)
+static int acpi_generic_hotplug_event(struct acpi_device *adev, u32 type)
 {
-       u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
-       struct acpi_device *adev = data;
-       int error;
-
-       lock_device_hotplug();
-       mutex_lock(&acpi_scan_lock);
-
-       /*
-        * The device object's ACPI handle cannot become invalid as long as we
-        * are holding acpi_scan_lock, but it may have become invalid before
-        * that lock was acquired.
-        */
-       if (adev->handle == INVALID_ACPI_HANDLE)
-               goto out;
-
-       switch (src) {
+       switch (type) {
        case ACPI_NOTIFY_BUS_CHECK:
-               error = acpi_scan_bus_check(adev);
-               break;
+               return acpi_scan_bus_check(adev);
        case ACPI_NOTIFY_DEVICE_CHECK:
-               error = acpi_scan_device_check(adev);
-               break;
+               return acpi_scan_device_check(adev);
        case ACPI_NOTIFY_EJECT_REQUEST:
        case ACPI_OST_EC_OSPM_EJECT:
-               error = acpi_scan_hot_remove(adev);
-               break;
-       default:
-               error = -EINVAL;
-               break;
+               if (adev->handler && !adev->handler->hotplug.enabled) {
+                       dev_info(&adev->dev, "Eject disabled\n");
+                       return -EPERM;
+               }
+               acpi_evaluate_ost(adev->handle, ACPI_NOTIFY_EJECT_REQUEST,
+                                 ACPI_OST_SC_EJECT_IN_PROGRESS, NULL);
+               return acpi_scan_hot_remove(adev);
        }
-       if (!error)
-               ost_code = ACPI_OST_SC_SUCCESS;
-
- out:
-       acpi_evaluate_hotplug_ost(adev->handle, src, ost_code, NULL);
-       put_device(&adev->dev);
-       mutex_unlock(&acpi_scan_lock);
-       unlock_device_hotplug();
+       return -EINVAL;
 }
 
-static void acpi_hotplug_notify_cb(acpi_handle handle, u32 type, void *data)
+void acpi_device_hotplug(struct acpi_device *adev, u32 src)
 {
        u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
-       struct acpi_device *adev;
-       acpi_status status;
+       int error = -ENODEV;
 
-       if (acpi_bus_get_device(handle, &adev))
-               goto err_out;
+       lock_device_hotplug();
+       mutex_lock(&acpi_scan_lock);
 
-       switch (type) {
-       case ACPI_NOTIFY_BUS_CHECK:
-               acpi_handle_debug(handle, "ACPI_NOTIFY_BUS_CHECK event\n");
-               break;
-       case ACPI_NOTIFY_DEVICE_CHECK:
-               acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_CHECK event\n");
-               break;
-       case ACPI_NOTIFY_EJECT_REQUEST:
-               acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n");
-               if (!adev->handler)
-                       goto err_out;
+       /*
+        * The device object's ACPI handle cannot become invalid as long as we
+        * are holding acpi_scan_lock, but it might have become invalid before
+        * that lock was acquired.
+        */
+       if (adev->handle == INVALID_ACPI_HANDLE)
+               goto err_out;
 
-               if (!adev->handler->hotplug.enabled) {
-                       acpi_handle_err(handle, "Eject disabled\n");
+       if (adev->flags.is_dock_station) {
+               error = dock_notify(adev, src);
+       } else if (adev->flags.hotplug_notify) {
+               error = acpi_generic_hotplug_event(adev, src);
+               if (error == -EPERM) {
                        ost_code = ACPI_OST_SC_EJECT_NOT_SUPPORTED;
                        goto err_out;
                }
-               acpi_evaluate_hotplug_ost(handle, ACPI_NOTIFY_EJECT_REQUEST,
-                                         ACPI_OST_SC_EJECT_IN_PROGRESS, NULL);
-               break;
-       default:
-               /* non-hotplug event; possibly handled by other handler */
-               return;
-       }
-       get_device(&adev->dev);
-       status = acpi_hotplug_execute(acpi_device_hotplug, adev, type);
-       if (ACPI_SUCCESS(status))
-               return;
+       } else {
+               int (*notify)(struct acpi_device *, u32);
 
-       put_device(&adev->dev);
+               acpi_lock_hp_context();
+               notify = adev->hp ? adev->hp->notify : NULL;
+               acpi_unlock_hp_context();
+               /*
+                * There may be additional notify handlers for device objects
+                * without the .event() callback, so ignore them here.
+                */
+               if (notify)
+                       error = notify(adev, src);
+               else
+                       goto out;
+       }
+       if (!error)
+               ost_code = ACPI_OST_SC_SUCCESS;
 
  err_out:
-       acpi_evaluate_hotplug_ost(handle, type, ost_code, NULL);
+       acpi_evaluate_ost(adev->handle, src, ost_code, NULL);
+
+ out:
+       acpi_bus_put_acpi_device(adev);
+       mutex_unlock(&acpi_scan_lock);
+       unlock_device_hotplug();
 }
 
 static ssize_t real_power_state_show(struct device *dev,
@@ -570,17 +577,14 @@ acpi_eject_store(struct device *d, struct device_attribute *attr,
        if (ACPI_FAILURE(status) || !acpi_device->flags.ejectable)
                return -ENODEV;
 
-       acpi_evaluate_hotplug_ost(acpi_device->handle, ACPI_OST_EC_OSPM_EJECT,
-                                 ACPI_OST_SC_EJECT_IN_PROGRESS, NULL);
        get_device(&acpi_device->dev);
-       status = acpi_hotplug_execute(acpi_device_hotplug, acpi_device,
-                                     ACPI_OST_EC_OSPM_EJECT);
+       status = acpi_hotplug_schedule(acpi_device, ACPI_OST_EC_OSPM_EJECT);
        if (ACPI_SUCCESS(status))
                return count;
 
        put_device(&acpi_device->dev);
-       acpi_evaluate_hotplug_ost(acpi_device->handle, ACPI_OST_EC_OSPM_EJECT,
-                                 ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL);
+       acpi_evaluate_ost(acpi_device->handle, ACPI_OST_EC_OSPM_EJECT,
+                         ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL);
        return status == AE_NO_MEMORY ? -ENOMEM : -EAGAIN;
 }
 
@@ -1114,14 +1118,16 @@ static void acpi_scan_drop_device(acpi_handle handle, void *context)
        mutex_unlock(&acpi_device_del_lock);
 }
 
-int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device)
+static int acpi_get_device_data(acpi_handle handle, struct acpi_device **device,
+                               void (*callback)(void *))
 {
        acpi_status status;
 
        if (!device)
                return -EINVAL;
 
-       status = acpi_get_data(handle, acpi_scan_drop_device, (void **)device);
+       status = acpi_get_data_full(handle, acpi_scan_drop_device,
+                                   (void **)device, callback);
        if (ACPI_FAILURE(status) || !*device) {
                ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No context for object [%p]\n",
                                  handle));
@@ -1129,8 +1135,32 @@ int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device)
        }
        return 0;
 }
+
+int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device)
+{
+       return acpi_get_device_data(handle, device, NULL);
+}
 EXPORT_SYMBOL(acpi_bus_get_device);
 
+static void get_acpi_device(void *dev)
+{
+       if (dev)
+               get_device(&((struct acpi_device *)dev)->dev);
+}
+
+struct acpi_device *acpi_bus_get_acpi_device(acpi_handle handle)
+{
+       struct acpi_device *adev = NULL;
+
+       acpi_get_device_data(handle, &adev, get_acpi_device);
+       return adev;
+}
+
+void acpi_bus_put_acpi_device(struct acpi_device *adev)
+{
+       put_device(&adev->dev);
+}
+
 int acpi_device_add(struct acpi_device *device,
                    void (*release)(struct device *))
 {
@@ -1641,6 +1671,27 @@ bool acpi_bay_match(acpi_handle handle)
        return acpi_ata_match(phandle);
 }
 
+bool acpi_device_is_battery(struct acpi_device *adev)
+{
+       struct acpi_hardware_id *hwid;
+
+       list_for_each_entry(hwid, &adev->pnp.ids, list)
+               if (!strcmp("PNP0C0A", hwid->id))
+                       return true;
+
+       return false;
+}
+
+static bool is_ejectable_bay(struct acpi_device *adev)
+{
+       acpi_handle handle = adev->handle;
+
+       if (acpi_has_method(handle, "_EJ0") && acpi_device_is_battery(adev))
+               return true;
+
+       return acpi_bay_match(handle);
+}
+
 /*
  * acpi_dock_match - see if an acpi object has a _DCK method
  */
@@ -1706,6 +1757,20 @@ static bool acpi_ibm_smbus_match(acpi_handle handle)
        return false;
 }
 
+static bool acpi_object_is_system_bus(acpi_handle handle)
+{
+       acpi_handle tmp;
+
+       if (ACPI_SUCCESS(acpi_get_handle(NULL, "\\_SB", &tmp)) &&
+           tmp == handle)
+               return true;
+       if (ACPI_SUCCESS(acpi_get_handle(NULL, "\\_TZ", &tmp)) &&
+           tmp == handle)
+               return true;
+
+       return false;
+}
+
 static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
                                int device_type)
 {
@@ -1757,8 +1822,10 @@ static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
                        acpi_add_id(pnp, ACPI_DOCK_HID);
                else if (acpi_ibm_smbus_match(handle))
                        acpi_add_id(pnp, ACPI_SMBUS_IBM_HID);
-               else if (list_empty(&pnp->ids) && handle == ACPI_ROOT_OBJECT) {
-                       acpi_add_id(pnp, ACPI_BUS_HID); /* \_SB, LNXSYBUS */
+               else if (list_empty(&pnp->ids) &&
+                        acpi_object_is_system_bus(handle)) {
+                       /* \_SB, \_TZ, LNXSYBUS */
+                       acpi_add_id(pnp, ACPI_BUS_HID);
                        strcpy(pnp->device_name, ACPI_BUS_DEVICE_NAME);
                        strcpy(pnp->device_class, ACPI_BUS_CLASS);
                }
@@ -1941,33 +2008,23 @@ void acpi_scan_hotplug_enabled(struct acpi_hotplug_profile *hotplug, bool val)
        mutex_unlock(&acpi_scan_lock);
 }
 
-static void acpi_scan_init_hotplug(acpi_handle handle, int type)
+static void acpi_scan_init_hotplug(struct acpi_device *adev)
 {
-       struct acpi_device_pnp pnp = {};
        struct acpi_hardware_id *hwid;
-       struct acpi_scan_handler *handler;
 
-       INIT_LIST_HEAD(&pnp.ids);
-       acpi_set_pnp_ids(handle, &pnp, type);
-
-       if (!pnp.type.hardware_id)
-               goto out;
+       if (acpi_dock_match(adev->handle) || is_ejectable_bay(adev)) {
+               acpi_dock_add(adev);
+               return;
+       }
+       list_for_each_entry(hwid, &adev->pnp.ids, list) {
+               struct acpi_scan_handler *handler;
 
-       /*
-        * This relies on the fact that acpi_install_notify_handler() will not
-        * install the same notify handler routine twice for the same handle.
-        */
-       list_for_each_entry(hwid, &pnp.ids, list) {
                handler = acpi_scan_match_handler(hwid->id, NULL);
                if (handler) {
-                       acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
-                                       acpi_hotplug_notify_cb, handler);
+                       adev->flags.hotplug_notify = true;
                        break;
                }
        }
-
-out:
-       acpi_free_pnp_ids(&pnp);
 }
 
 static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used,
@@ -1991,12 +2048,12 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used,
                return AE_OK;
        }
 
-       acpi_scan_init_hotplug(handle, type);
-
        acpi_add_single_object(&device, handle, type, sta);
        if (!device)
                return AE_CTRL_DEPTH;
 
+       acpi_scan_init_hotplug(device);
+
  out:
        if (!*return_value)
                *return_value = device;
@@ -2015,13 +2072,14 @@ static int acpi_scan_attach_handler(struct acpi_device *device)
 
                handler = acpi_scan_match_handler(hwid->id, &devid);
                if (handler) {
+                       device->handler = handler;
                        ret = handler->attach(device, devid);
-                       if (ret > 0) {
-                               device->handler = handler;
+                       if (ret > 0)
                                break;
-                       } else if (ret < 0) {
+
+                       device->handler = NULL;
+                       if (ret < 0)
                                break;
-                       }
                }
        }
        return ret;
@@ -2030,8 +2088,12 @@ static int acpi_scan_attach_handler(struct acpi_device *device)
 static void acpi_bus_attach(struct acpi_device *device)
 {
        struct acpi_device *child;
+       acpi_handle ejd;
        int ret;
 
+       if (ACPI_SUCCESS(acpi_bus_get_ejd(device->handle, &ejd)))
+               register_dock_dependent_device(device, ejd);
+
        acpi_bus_get_status(device);
        /* Skip devices that are not present. */
        if (!acpi_device_is_present(device)) {
@@ -2184,7 +2246,6 @@ int __init acpi_scan_init(void)
        acpi_cmos_rtc_init();
        acpi_container_init();
        acpi_memory_hotplug_init();
-       acpi_dock_init();
 
        mutex_lock(&acpi_scan_lock);
        /*
index 91a32ce..38cb978 100644 (file)
@@ -12,8 +12,6 @@
 #define _COMPONENT             ACPI_SYSTEM_COMPONENT
 ACPI_MODULE_NAME("sysfs");
 
-#define PREFIX "ACPI: "
-
 #ifdef CONFIG_ACPI_DEBUG
 /*
  * ACPI debug sysfs I/F, including:
index 5837f85..2178229 100644 (file)
@@ -23,6 +23,8 @@
  *
  */
 
+#define pr_fmt(fmt) "ACPI: " fmt
+
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/smp.h>
@@ -33,8 +35,6 @@
 #include <linux/acpi.h>
 #include <linux/bootmem.h>
 
-#define PREFIX                 "ACPI: "
-
 #define ACPI_MAX_TABLES                128
 
 static char *mps_inti_flags_polarity[] = { "dfl", "high", "res", "low" };
@@ -55,10 +55,9 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
                {
                        struct acpi_madt_local_apic *p =
                            (struct acpi_madt_local_apic *)header;
-                       printk(KERN_INFO PREFIX
-                              "LAPIC (acpi_id[0x%02x] lapic_id[0x%02x] %s)\n",
-                              p->processor_id, p->id,
-                              (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+                       pr_info("LAPIC (acpi_id[0x%02x] lapic_id[0x%02x] %s)\n",
+                               p->processor_id, p->id,
+                               (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
                }
                break;
 
@@ -66,11 +65,9 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
                {
                        struct acpi_madt_local_x2apic *p =
                            (struct acpi_madt_local_x2apic *)header;
-                       printk(KERN_INFO PREFIX
-                              "X2APIC (apic_id[0x%02x] uid[0x%02x] %s)\n",
-                              p->local_apic_id, p->uid,
-                              (p->lapic_flags & ACPI_MADT_ENABLED) ?
-                              "enabled" : "disabled");
+                       pr_info("X2APIC (apic_id[0x%02x] uid[0x%02x] %s)\n",
+                               p->local_apic_id, p->uid,
+                               (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
                }
                break;
 
@@ -78,9 +75,8 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
                {
                        struct acpi_madt_io_apic *p =
                            (struct acpi_madt_io_apic *)header;
-                       printk(KERN_INFO PREFIX
-                              "IOAPIC (id[0x%02x] address[0x%08x] gsi_base[%d])\n",
-                              p->id, p->address, p->global_irq_base);
+                       pr_info("IOAPIC (id[0x%02x] address[0x%08x] gsi_base[%d])\n",
+                               p->id, p->address, p->global_irq_base);
                }
                break;
 
@@ -88,18 +84,15 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
                {
                        struct acpi_madt_interrupt_override *p =
                            (struct acpi_madt_interrupt_override *)header;
-                       printk(KERN_INFO PREFIX
-                              "INT_SRC_OVR (bus %d bus_irq %d global_irq %d %s %s)\n",
-                              p->bus, p->source_irq, p->global_irq,
-                              mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK],
-                              mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2]);
+                       pr_info("INT_SRC_OVR (bus %d bus_irq %d global_irq %d %s %s)\n",
+                               p->bus, p->source_irq, p->global_irq,
+                               mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK],
+                               mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2]);
                        if (p->inti_flags  &
                            ~(ACPI_MADT_POLARITY_MASK | ACPI_MADT_TRIGGER_MASK))
-                               printk(KERN_INFO PREFIX
-                                      "INT_SRC_OVR unexpected reserved flags: 0x%x\n",
-                                      p->inti_flags  &
+                               pr_info("INT_SRC_OVR unexpected reserved flags: 0x%x\n",
+                                       p->inti_flags  &
                                        ~(ACPI_MADT_POLARITY_MASK | ACPI_MADT_TRIGGER_MASK));
-
                }
                break;
 
@@ -107,11 +100,10 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
                {
                        struct acpi_madt_nmi_source *p =
                            (struct acpi_madt_nmi_source *)header;
-                       printk(KERN_INFO PREFIX
-                              "NMI_SRC (%s %s global_irq %d)\n",
-                              mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK],
-                              mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2],
-                              p->global_irq);
+                       pr_info("NMI_SRC (%s %s global_irq %d)\n",
+                               mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK],
+                               mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2],
+                               p->global_irq);
                }
                break;
 
@@ -119,12 +111,11 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
                {
                        struct acpi_madt_local_apic_nmi *p =
                            (struct acpi_madt_local_apic_nmi *)header;
-                       printk(KERN_INFO PREFIX
-                              "LAPIC_NMI (acpi_id[0x%02x] %s %s lint[0x%x])\n",
-                              p->processor_id,
-                              mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK  ],
-                              mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2],
-                              p->lint);
+                       pr_info("LAPIC_NMI (acpi_id[0x%02x] %s %s lint[0x%x])\n",
+                               p->processor_id,
+                               mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK ],
+                               mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2],
+                               p->lint);
                }
                break;
 
@@ -137,12 +128,11 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
                        polarity = p->inti_flags & ACPI_MADT_POLARITY_MASK;
                        trigger = (p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2;
 
-                       printk(KERN_INFO PREFIX
-                              "X2APIC_NMI (uid[0x%02x] %s %s lint[0x%x])\n",
-                              p->uid,
-                              mps_inti_flags_polarity[polarity],
-                              mps_inti_flags_trigger[trigger],
-                              p->lint);
+                       pr_info("X2APIC_NMI (uid[0x%02x] %s %s lint[0x%x])\n",
+                               p->uid,
+                               mps_inti_flags_polarity[polarity],
+                               mps_inti_flags_trigger[trigger],
+                               p->lint);
                }
                break;
 
@@ -150,9 +140,8 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
                {
                        struct acpi_madt_local_apic_override *p =
                            (struct acpi_madt_local_apic_override *)header;
-                       printk(KERN_INFO PREFIX
-                              "LAPIC_ADDR_OVR (address[%p])\n",
-                              (void *)(unsigned long)p->address);
+                       pr_info("LAPIC_ADDR_OVR (address[%p])\n",
+                               (void *)(unsigned long)p->address);
                }
                break;
 
@@ -160,10 +149,9 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
                {
                        struct acpi_madt_io_sapic *p =
                            (struct acpi_madt_io_sapic *)header;
-                       printk(KERN_INFO PREFIX
-                              "IOSAPIC (id[0x%x] address[%p] gsi_base[%d])\n",
-                              p->id, (void *)(unsigned long)p->address,
-                              p->global_irq_base);
+                       pr_info("IOSAPIC (id[0x%x] address[%p] gsi_base[%d])\n",
+                               p->id, (void *)(unsigned long)p->address,
+                               p->global_irq_base);
                }
                break;
 
@@ -171,10 +159,9 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
                {
                        struct acpi_madt_local_sapic *p =
                            (struct acpi_madt_local_sapic *)header;
-                       printk(KERN_INFO PREFIX
-                              "LSAPIC (acpi_id[0x%02x] lsapic_id[0x%02x] lsapic_eid[0x%02x] %s)\n",
-                              p->processor_id, p->id, p->eid,
-                              (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+                       pr_info("LSAPIC (acpi_id[0x%02x] lsapic_id[0x%02x] lsapic_eid[0x%02x] %s)\n",
+                               p->processor_id, p->id, p->eid,
+                               (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
                }
                break;
 
@@ -182,19 +169,17 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
                {
                        struct acpi_madt_interrupt_source *p =
                            (struct acpi_madt_interrupt_source *)header;
-                       printk(KERN_INFO PREFIX
-                              "PLAT_INT_SRC (%s %s type[0x%x] id[0x%04x] eid[0x%x] iosapic_vector[0x%x] global_irq[0x%x]\n",
-                              mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK],
-                              mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2],
-                              p->type, p->id, p->eid, p->io_sapic_vector,
-                              p->global_irq);
+                       pr_info("PLAT_INT_SRC (%s %s type[0x%x] id[0x%04x] eid[0x%x] iosapic_vector[0x%x] global_irq[0x%x]\n",
+                               mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK],
+                               mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2],
+                               p->type, p->id, p->eid, p->io_sapic_vector,
+                               p->global_irq);
                }
                break;
 
        default:
-               printk(KERN_WARNING PREFIX
-                      "Found unsupported MADT entry (type = 0x%x)\n",
-                      header->type);
+               pr_warn("Found unsupported MADT entry (type = 0x%x)\n",
+                       header->type);
                break;
        }
 }
@@ -225,7 +210,7 @@ acpi_table_parse_entries(char *id,
                acpi_get_table_with_size(id, 0, &table_header, &tbl_size);
 
        if (!table_header) {
-               printk(KERN_WARNING PREFIX "%4.4s not present\n", id);
+               pr_warn("%4.4s not present\n", id);
                return -ENODEV;
        }
 
@@ -248,7 +233,7 @@ acpi_table_parse_entries(char *id,
                 * infinite loop.
                 */
                if (entry->length == 0) {
-                       pr_err(PREFIX "[%4.4s:0x%02x] Invalid zero length\n", id, entry_id);
+                       pr_err("[%4.4s:0x%02x] Invalid zero length\n", id, entry_id);
                        goto err;
                }
 
@@ -256,8 +241,8 @@ acpi_table_parse_entries(char *id,
                    ((unsigned long)entry + entry->length);
        }
        if (max_entries && count > max_entries) {
-               printk(KERN_WARNING PREFIX "[%4.4s:0x%02x] ignored %i entries of "
-                      "%i found\n", id, entry_id, count - max_entries, count);
+               pr_warn("[%4.4s:0x%02x] ignored %i entries of %i found\n",
+                       id, entry_id, count - max_entries, count);
        }
 
        early_acpi_os_unmap_memory((char *)table_header, tbl_size);
@@ -322,13 +307,11 @@ static void __init check_multiple_madt(void)
 
        acpi_get_table_with_size(ACPI_SIG_MADT, 2, &table, &tbl_size);
        if (table) {
-               printk(KERN_WARNING PREFIX
-                      "BIOS bug: multiple APIC/MADT found,"
-                      " using %d\n", acpi_apic_instance);
-               printk(KERN_WARNING PREFIX
-                      "If \"acpi_apic_instance=%d\" works better, "
-                      "notify linux-acpi@vger.kernel.org\n",
-                      acpi_apic_instance ? 0 : 2);
+               pr_warn("BIOS bug: multiple APIC/MADT found, using %d\n",
+                       acpi_apic_instance);
+               pr_warn("If \"acpi_apic_instance=%d\" works better, "
+                       "notify linux-acpi@vger.kernel.org\n",
+                       acpi_apic_instance ? 0 : 2);
                early_acpi_os_unmap_memory(table, tbl_size);
 
        } else
@@ -365,8 +348,7 @@ static int __init acpi_parse_apic_instance(char *str)
 
        acpi_apic_instance = simple_strtoul(str, NULL, 0);
 
-       printk(KERN_NOTICE PREFIX "Shall use APIC/MADT table %d\n",
-              acpi_apic_instance);
+       pr_notice("Shall use APIC/MADT table %d\n", acpi_apic_instance);
 
        return 0;
 }
index 08626c8..9640685 100644 (file)
@@ -43,6 +43,7 @@
 #include <linux/device.h>
 #include <linux/thermal.h>
 #include <linux/acpi.h>
+#include <linux/workqueue.h>
 #include <asm/uaccess.h>
 
 #define PREFIX "ACPI: "
@@ -90,6 +91,8 @@ static int psv;
 module_param(psv, int, 0644);
 MODULE_PARM_DESC(psv, "Disable or override all passive trip points.");
 
+static struct workqueue_struct *acpi_thermal_pm_queue;
+
 static int acpi_thermal_add(struct acpi_device *device);
 static int acpi_thermal_remove(struct acpi_device *device);
 static void acpi_thermal_notify(struct acpi_device *device, u32 event);
@@ -101,11 +104,13 @@ static const struct acpi_device_id  thermal_device_ids[] = {
 MODULE_DEVICE_TABLE(acpi, thermal_device_ids);
 
 #ifdef CONFIG_PM_SLEEP
+static int acpi_thermal_suspend(struct device *dev);
 static int acpi_thermal_resume(struct device *dev);
 #else
+#define acpi_thermal_suspend NULL
 #define acpi_thermal_resume NULL
 #endif
-static SIMPLE_DEV_PM_OPS(acpi_thermal_pm, NULL, acpi_thermal_resume);
+static SIMPLE_DEV_PM_OPS(acpi_thermal_pm, acpi_thermal_suspend, acpi_thermal_resume);
 
 static struct acpi_driver acpi_thermal_driver = {
        .name = "thermal",
@@ -186,6 +191,7 @@ struct acpi_thermal {
        struct thermal_zone_device *thermal_zone;
        int tz_enabled;
        int kelvin_offset;
+       struct work_struct thermal_check_work;
 };
 
 /* --------------------------------------------------------------------------
@@ -1064,6 +1070,13 @@ static void acpi_thermal_guess_offset(struct acpi_thermal *tz)
                tz->kelvin_offset = 2732;
 }
 
+static void acpi_thermal_check_fn(struct work_struct *work)
+{
+       struct acpi_thermal *tz = container_of(work, struct acpi_thermal,
+                                              thermal_check_work);
+       acpi_thermal_check(tz);
+}
+
 static int acpi_thermal_add(struct acpi_device *device)
 {
        int result = 0;
@@ -1093,6 +1106,8 @@ static int acpi_thermal_add(struct acpi_device *device)
        if (result)
                goto free_memory;
 
+       INIT_WORK(&tz->thermal_check_work, acpi_thermal_check_fn);
+
        pr_info(PREFIX "%s [%s] (%ld C)\n", acpi_device_name(device),
                acpi_device_bid(device), KELVIN_TO_CELSIUS(tz->temperature));
        goto end;
@@ -1110,6 +1125,7 @@ static int acpi_thermal_remove(struct acpi_device *device)
        if (!device || !acpi_driver_data(device))
                return -EINVAL;
 
+       flush_workqueue(acpi_thermal_pm_queue);
        tz = acpi_driver_data(device);
 
        acpi_thermal_unregister_thermal_zone(tz);
@@ -1118,6 +1134,13 @@ static int acpi_thermal_remove(struct acpi_device *device)
 }
 
 #ifdef CONFIG_PM_SLEEP
+static int acpi_thermal_suspend(struct device *dev)
+{
+       /* Make sure the previously queued thermal check work has been done */
+       flush_workqueue(acpi_thermal_pm_queue);
+       return 0;
+}
+
 static int acpi_thermal_resume(struct device *dev)
 {
        struct acpi_thermal *tz;
@@ -1148,7 +1171,7 @@ static int acpi_thermal_resume(struct device *dev)
                tz->state.active |= tz->trips.active[i].flags.enabled;
        }
 
-       acpi_thermal_check(tz);
+       queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work);
 
        return AE_OK;
 }
@@ -1240,16 +1263,22 @@ static int __init acpi_thermal_init(void)
                return -ENODEV;
        }
 
+       acpi_thermal_pm_queue = create_workqueue("acpi_thermal_pm");
+       if (!acpi_thermal_pm_queue)
+               return -ENODEV;
+
        result = acpi_bus_register_driver(&acpi_thermal_driver);
-       if (result < 0)
+       if (result < 0) {
+               destroy_workqueue(acpi_thermal_pm_queue);
                return -ENODEV;
+       }
 
        return 0;
 }
 
 static void __exit acpi_thermal_exit(void)
 {
-
+       destroy_workqueue(acpi_thermal_pm_queue);
        acpi_bus_unregister_driver(&acpi_thermal_driver);
 
        return;
index 85e3b61..0f5f78f 100644 (file)
@@ -422,7 +422,7 @@ out:
 EXPORT_SYMBOL(acpi_get_physical_device_location);
 
 /**
- * acpi_evaluate_hotplug_ost: Evaluate _OST for hotplug operations
+ * acpi_evaluate_ost: Evaluate _OST for hotplug operations
  * @handle: ACPI device handle
  * @source_event: source event code
  * @status_code: status code
@@ -433,17 +433,15 @@ EXPORT_SYMBOL(acpi_get_physical_device_location);
  * When the platform does not support _OST, this function has no effect.
  */
 acpi_status
-acpi_evaluate_hotplug_ost(acpi_handle handle, u32 source_event,
-               u32 status_code, struct acpi_buffer *status_buf)
+acpi_evaluate_ost(acpi_handle handle, u32 source_event, u32 status_code,
+                 struct acpi_buffer *status_buf)
 {
-#ifdef ACPI_HOTPLUG_OST
        union acpi_object params[3] = {
                {.type = ACPI_TYPE_INTEGER,},
                {.type = ACPI_TYPE_INTEGER,},
                {.type = ACPI_TYPE_BUFFER,}
        };
        struct acpi_object_list arg_list = {3, params};
-       acpi_status status;
 
        params[0].integer.value = source_event;
        params[1].integer.value = status_code;
@@ -455,13 +453,9 @@ acpi_evaluate_hotplug_ost(acpi_handle handle, u32 source_event,
                params[2].buffer.length = 0;
        }
 
-       status = acpi_evaluate_object(handle, "_OST", &arg_list, NULL);
-       return status;
-#else
-       return AE_OK;
-#endif
+       return acpi_evaluate_object(handle, "_OST", &arg_list, NULL);
 }
-EXPORT_SYMBOL(acpi_evaluate_hotplug_ost);
+EXPORT_SYMBOL(acpi_evaluate_ost);
 
 /**
  * acpi_handle_printk: Print message with ACPI prefix and object path
index b6ba88e..48c7e8a 100644 (file)
@@ -45,8 +45,6 @@
 
 #include "internal.h"
 
-#define PREFIX "ACPI: "
-
 #define ACPI_VIDEO_BUS_NAME            "Video Bus"
 #define ACPI_VIDEO_DEVICE_NAME         "Video Device"
 #define ACPI_VIDEO_NOTIFY_SWITCH       0x80
index 19080c8..33e3db5 100644 (file)
@@ -40,8 +40,6 @@
 
 #include "internal.h"
 
-#define PREFIX "ACPI: "
-
 ACPI_MODULE_NAME("video");
 #define _COMPONENT             ACPI_VIDEO_COMPONENT
 
index b4f7cc2..97a14fe 100644 (file)
@@ -38,6 +38,16 @@ static void ata_acpi_clear_gtf(struct ata_device *dev)
        dev->gtf_cache = NULL;
 }
 
+struct ata_acpi_hotplug_context {
+       struct acpi_hotplug_context hp;
+       union {
+               struct ata_port *ap;
+               struct ata_device *dev;
+       } data;
+};
+
+#define ata_hotplug_data(context) (container_of((context), struct ata_acpi_hotplug_context, hp)->data)
+
 /**
  * ata_dev_acpi_handle - provide the acpi_handle for an ata_device
  * @dev: the acpi_handle returned will correspond to this device
@@ -121,18 +131,17 @@ static void ata_acpi_handle_hotplug(struct ata_port *ap, struct ata_device *dev,
                ata_port_wait_eh(ap);
 }
 
-static void ata_acpi_dev_notify_dock(acpi_handle handle, u32 event, void *data)
+static int ata_acpi_dev_notify_dock(struct acpi_device *adev, u32 event)
 {
-       struct ata_device *dev = data;
-
+       struct ata_device *dev = ata_hotplug_data(adev->hp).dev;
        ata_acpi_handle_hotplug(dev->link->ap, dev, event);
+       return 0;
 }
 
-static void ata_acpi_ap_notify_dock(acpi_handle handle, u32 event, void *data)
+static int ata_acpi_ap_notify_dock(struct acpi_device *adev, u32 event)
 {
-       struct ata_port *ap = data;
-
-       ata_acpi_handle_hotplug(ap, NULL, event);
+       ata_acpi_handle_hotplug(ata_hotplug_data(adev->hp).ap, NULL, event);
+       return 0;
 }
 
 static void ata_acpi_uevent(struct ata_port *ap, struct ata_device *dev,
@@ -154,31 +163,23 @@ static void ata_acpi_uevent(struct ata_port *ap, struct ata_device *dev,
        }
 }
 
-static void ata_acpi_ap_uevent(acpi_handle handle, u32 event, void *data)
+static void ata_acpi_ap_uevent(struct acpi_device *adev, u32 event)
 {
-       ata_acpi_uevent(data, NULL, event);
+       ata_acpi_uevent(ata_hotplug_data(adev->hp).ap, NULL, event);
 }
 
-static void ata_acpi_dev_uevent(acpi_handle handle, u32 event, void *data)
+static void ata_acpi_dev_uevent(struct acpi_device *adev, u32 event)
 {
-       struct ata_device *dev = data;
+       struct ata_device *dev = ata_hotplug_data(adev->hp).dev;
        ata_acpi_uevent(dev->link->ap, dev, event);
 }
 
-static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
-       .handler = ata_acpi_dev_notify_dock,
-       .uevent = ata_acpi_dev_uevent,
-};
-
-static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
-       .handler = ata_acpi_ap_notify_dock,
-       .uevent = ata_acpi_ap_uevent,
-};
-
 /* bind acpi handle to pata port */
 void ata_acpi_bind_port(struct ata_port *ap)
 {
        struct acpi_device *host_companion = ACPI_COMPANION(ap->host->dev);
+       struct acpi_device *adev;
+       struct ata_acpi_hotplug_context *context;
 
        if (libata_noacpi || ap->flags & ATA_FLAG_ACPI_SATA || !host_companion)
                return;
@@ -188,9 +189,17 @@ void ata_acpi_bind_port(struct ata_port *ap)
        if (ata_acpi_gtm(ap, &ap->__acpi_init_gtm) == 0)
                ap->pflags |= ATA_PFLAG_INIT_GTM_VALID;
 
-       /* we might be on a docking station */
-       register_hotplug_dock_device(ACPI_HANDLE(&ap->tdev),
-                                    &ata_acpi_ap_dock_ops, ap, NULL, NULL);
+       adev = ACPI_COMPANION(&ap->tdev);
+       if (!adev || adev->hp)
+               return;
+
+       context = kzalloc(sizeof(*context), GFP_KERNEL);
+       if (!context)
+               return;
+
+       context->data.ap = ap;
+       acpi_initialize_hp_context(adev, &context->hp, ata_acpi_ap_notify_dock,
+                                  ata_acpi_ap_uevent);
 }
 
 void ata_acpi_bind_dev(struct ata_device *dev)
@@ -198,7 +207,8 @@ void ata_acpi_bind_dev(struct ata_device *dev)
        struct ata_port *ap = dev->link->ap;
        struct acpi_device *port_companion = ACPI_COMPANION(&ap->tdev);
        struct acpi_device *host_companion = ACPI_COMPANION(ap->host->dev);
-       struct acpi_device *parent;
+       struct acpi_device *parent, *adev;
+       struct ata_acpi_hotplug_context *context;
        u64 adr;
 
        /*
@@ -221,9 +231,17 @@ void ata_acpi_bind_dev(struct ata_device *dev)
        }
 
        acpi_preset_companion(&dev->tdev, parent, adr);
+       adev = ACPI_COMPANION(&dev->tdev);
+       if (!adev || adev->hp)
+               return;
+
+       context = kzalloc(sizeof(*context), GFP_KERNEL);
+       if (!context)
+               return;
 
-       register_hotplug_dock_device(ata_dev_acpi_handle(dev),
-                                    &ata_acpi_dev_dock_ops, dev, NULL, NULL);
+       context->data.dev = dev;
+       acpi_initialize_hp_context(adev, &context->hp, ata_acpi_dev_notify_dock,
+                                  ata_acpi_dev_uevent);
 }
 
 /**
index 2e58ebb..1cb8544 100644 (file)
@@ -1,6 +1,5 @@
-obj-$(CONFIG_PM)       += sysfs.o generic_ops.o common.o qos.o
+obj-$(CONFIG_PM)       += sysfs.o generic_ops.o common.o qos.o runtime.o
 obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o
-obj-$(CONFIG_PM_RUNTIME)       += runtime.o
 obj-$(CONFIG_PM_TRACE_RTC)     += trace.o
 obj-$(CONFIG_PM_OPP)   += opp.o
 obj-$(CONFIG_PM_GENERIC_DOMAINS)       +=  domain.o domain_governor.o
index bfb8955..dc127e5 100644 (file)
@@ -42,7 +42,7 @@
        struct gpd_timing_data *__td = &dev_gpd_data(dev)->td;                  \
        if (!__retval && __elapsed > __td->field) {                             \
                __td->field = __elapsed;                                        \
-               dev_warn(dev, name " latency exceeded, new value %lld ns\n",    \
+               dev_dbg(dev, name " latency exceeded, new value %lld ns\n",     \
                        __elapsed);                                             \
                genpd->max_off_time_changed = true;                             \
                __td->constraint_changed = true;                                \
index 1b41fca..86d5e4f 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/async.h>
 #include <linux/suspend.h>
 #include <trace/events/power.h>
+#include <linux/cpufreq.h>
 #include <linux/cpuidle.h>
 #include <linux/timer.h>
 
@@ -91,6 +92,8 @@ void device_pm_sleep_init(struct device *dev)
 {
        dev->power.is_prepared = false;
        dev->power.is_suspended = false;
+       dev->power.is_noirq_suspended = false;
+       dev->power.is_late_suspended = false;
        init_completion(&dev->power.completion);
        complete_all(&dev->power.completion);
        dev->power.wakeup = NULL;
@@ -467,7 +470,7 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd)
  * The driver of @dev will not receive interrupts while this function is being
  * executed.
  */
-static int device_resume_noirq(struct device *dev, pm_message_t state)
+static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
 {
        pm_callback_t callback = NULL;
        char *info = NULL;
@@ -479,6 +482,11 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
        if (dev->power.syscore)
                goto Out;
 
+       if (!dev->power.is_noirq_suspended)
+               goto Out;
+
+       dpm_wait(dev->parent, async);
+
        if (dev->pm_domain) {
                info = "noirq power domain ";
                callback = pm_noirq_op(&dev->pm_domain->ops, state);
@@ -499,12 +507,32 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
        }
 
        error = dpm_run_callback(callback, dev, state, info);
+       dev->power.is_noirq_suspended = false;
 
  Out:
+       complete_all(&dev->power.completion);
        TRACE_RESUME(error);
        return error;
 }
 
+static bool is_async(struct device *dev)
+{
+       return dev->power.async_suspend && pm_async_enabled
+               && !pm_trace_is_enabled();
+}
+
+static void async_resume_noirq(void *data, async_cookie_t cookie)
+{
+       struct device *dev = (struct device *)data;
+       int error;
+
+       error = device_resume_noirq(dev, pm_transition, true);
+       if (error)
+               pm_dev_err(dev, pm_transition, " async", error);
+
+       put_device(dev);
+}
+
 /**
  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
  * @state: PM transition of the system being carried out.
@@ -514,29 +542,48 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
  */
 static void dpm_resume_noirq(pm_message_t state)
 {
+       struct device *dev;
        ktime_t starttime = ktime_get();
 
        mutex_lock(&dpm_list_mtx);
-       while (!list_empty(&dpm_noirq_list)) {
-               struct device *dev = to_device(dpm_noirq_list.next);
-               int error;
+       pm_transition = state;
 
+       /*
+        * Advanced the async threads upfront,
+        * in case the starting of async threads is
+        * delayed by non-async resuming devices.
+        */
+       list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
+               reinit_completion(&dev->power.completion);
+               if (is_async(dev)) {
+                       get_device(dev);
+                       async_schedule(async_resume_noirq, dev);
+               }
+       }
+
+       while (!list_empty(&dpm_noirq_list)) {
+               dev = to_device(dpm_noirq_list.next);
                get_device(dev);
                list_move_tail(&dev->power.entry, &dpm_late_early_list);
                mutex_unlock(&dpm_list_mtx);
 
-               error = device_resume_noirq(dev, state);
-               if (error) {
-                       suspend_stats.failed_resume_noirq++;
-                       dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
-                       dpm_save_failed_dev(dev_name(dev));
-                       pm_dev_err(dev, state, " noirq", error);
+               if (!is_async(dev)) {
+                       int error;
+
+                       error = device_resume_noirq(dev, state, false);
+                       if (error) {
+                               suspend_stats.failed_resume_noirq++;
+                               dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
+                               dpm_save_failed_dev(dev_name(dev));
+                               pm_dev_err(dev, state, " noirq", error);
+                       }
                }
 
                mutex_lock(&dpm_list_mtx);
                put_device(dev);
        }
        mutex_unlock(&dpm_list_mtx);
+       async_synchronize_full();
        dpm_show_time(starttime, state, "noirq");
        resume_device_irqs();
        cpuidle_resume();
@@ -549,7 +596,7 @@ static void dpm_resume_noirq(pm_message_t state)
  *
  * Runtime PM is disabled for @dev while this function is being executed.
  */
-static int device_resume_early(struct device *dev, pm_message_t state)
+static int device_resume_early(struct device *dev, pm_message_t state, bool async)
 {
        pm_callback_t callback = NULL;
        char *info = NULL;
@@ -561,6 +608,11 @@ static int device_resume_early(struct device *dev, pm_message_t state)
        if (dev->power.syscore)
                goto Out;
 
+       if (!dev->power.is_late_suspended)
+               goto Out;
+
+       dpm_wait(dev->parent, async);
+
        if (dev->pm_domain) {
                info = "early power domain ";
                callback = pm_late_early_op(&dev->pm_domain->ops, state);
@@ -581,43 +633,75 @@ static int device_resume_early(struct device *dev, pm_message_t state)
        }
 
        error = dpm_run_callback(callback, dev, state, info);
+       dev->power.is_late_suspended = false;
 
  Out:
        TRACE_RESUME(error);
 
        pm_runtime_enable(dev);
+       complete_all(&dev->power.completion);
        return error;
 }
 
+static void async_resume_early(void *data, async_cookie_t cookie)
+{
+       struct device *dev = (struct device *)data;
+       int error;
+
+       error = device_resume_early(dev, pm_transition, true);
+       if (error)
+               pm_dev_err(dev, pm_transition, " async", error);
+
+       put_device(dev);
+}
+
 /**
  * dpm_resume_early - Execute "early resume" callbacks for all devices.
  * @state: PM transition of the system being carried out.
  */
 static void dpm_resume_early(pm_message_t state)
 {
+       struct device *dev;
        ktime_t starttime = ktime_get();
 
        mutex_lock(&dpm_list_mtx);
-       while (!list_empty(&dpm_late_early_list)) {
-               struct device *dev = to_device(dpm_late_early_list.next);
-               int error;
+       pm_transition = state;
 
+       /*
+        * Advanced the async threads upfront,
+        * in case the starting of async threads is
+        * delayed by non-async resuming devices.
+        */
+       list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
+               reinit_completion(&dev->power.completion);
+               if (is_async(dev)) {
+                       get_device(dev);
+                       async_schedule(async_resume_early, dev);
+               }
+       }
+
+       while (!list_empty(&dpm_late_early_list)) {
+               dev = to_device(dpm_late_early_list.next);
                get_device(dev);
                list_move_tail(&dev->power.entry, &dpm_suspended_list);
                mutex_unlock(&dpm_list_mtx);
 
-               error = device_resume_early(dev, state);
-               if (error) {
-                       suspend_stats.failed_resume_early++;
-                       dpm_save_failed_step(SUSPEND_RESUME_EARLY);
-                       dpm_save_failed_dev(dev_name(dev));
-                       pm_dev_err(dev, state, " early", error);
-               }
+               if (!is_async(dev)) {
+                       int error;
 
+                       error = device_resume_early(dev, state, false);
+                       if (error) {
+                               suspend_stats.failed_resume_early++;
+                               dpm_save_failed_step(SUSPEND_RESUME_EARLY);
+                               dpm_save_failed_dev(dev_name(dev));
+                               pm_dev_err(dev, state, " early", error);
+                       }
+               }
                mutex_lock(&dpm_list_mtx);
                put_device(dev);
        }
        mutex_unlock(&dpm_list_mtx);
+       async_synchronize_full();
        dpm_show_time(starttime, state, "early");
 }
 
@@ -732,12 +816,6 @@ static void async_resume(void *data, async_cookie_t cookie)
        put_device(dev);
 }
 
-static bool is_async(struct device *dev)
-{
-       return dev->power.async_suspend && pm_async_enabled
-               && !pm_trace_is_enabled();
-}
-
 /**
  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
  * @state: PM transition of the system being carried out.
@@ -789,6 +867,8 @@ void dpm_resume(pm_message_t state)
        mutex_unlock(&dpm_list_mtx);
        async_synchronize_full();
        dpm_show_time(starttime, state, NULL);
+
+       cpufreq_resume();
 }
 
 /**
@@ -913,13 +993,24 @@ static pm_message_t resume_event(pm_message_t sleep_state)
  * The driver of @dev will not receive interrupts while this function is being
  * executed.
  */
-static int device_suspend_noirq(struct device *dev, pm_message_t state)
+static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
 {
        pm_callback_t callback = NULL;
        char *info = NULL;
+       int error = 0;
+
+       if (async_error)
+               goto Complete;
+
+       if (pm_wakeup_pending()) {
+               async_error = -EBUSY;
+               goto Complete;
+       }
 
        if (dev->power.syscore)
-               return 0;
+               goto Complete;
+
+       dpm_wait_for_children(dev, async);
 
        if (dev->pm_domain) {
                info = "noirq power domain ";
@@ -940,7 +1031,41 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
                callback = pm_noirq_op(dev->driver->pm, state);
        }
 
-       return dpm_run_callback(callback, dev, state, info);
+       error = dpm_run_callback(callback, dev, state, info);
+       if (!error)
+               dev->power.is_noirq_suspended = true;
+       else
+               async_error = error;
+
+Complete:
+       complete_all(&dev->power.completion);
+       return error;
+}
+
+static void async_suspend_noirq(void *data, async_cookie_t cookie)
+{
+       struct device *dev = (struct device *)data;
+       int error;
+
+       error = __device_suspend_noirq(dev, pm_transition, true);
+       if (error) {
+               dpm_save_failed_dev(dev_name(dev));
+               pm_dev_err(dev, pm_transition, " async", error);
+       }
+
+       put_device(dev);
+}
+
+static int device_suspend_noirq(struct device *dev)
+{
+       reinit_completion(&dev->power.completion);
+
+       if (pm_async_enabled && dev->power.async_suspend) {
+               get_device(dev);
+               async_schedule(async_suspend_noirq, dev);
+               return 0;
+       }
+       return __device_suspend_noirq(dev, pm_transition, false);
 }
 
 /**
@@ -958,19 +1083,20 @@ static int dpm_suspend_noirq(pm_message_t state)
        cpuidle_pause();
        suspend_device_irqs();
        mutex_lock(&dpm_list_mtx);
+       pm_transition = state;
+       async_error = 0;
+
        while (!list_empty(&dpm_late_early_list)) {
                struct device *dev = to_device(dpm_late_early_list.prev);
 
                get_device(dev);
                mutex_unlock(&dpm_list_mtx);
 
-               error = device_suspend_noirq(dev, state);
+               error = device_suspend_noirq(dev);
 
                mutex_lock(&dpm_list_mtx);
                if (error) {
                        pm_dev_err(dev, state, " noirq", error);
-                       suspend_stats.failed_suspend_noirq++;
-                       dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
                        dpm_save_failed_dev(dev_name(dev));
                        put_device(dev);
                        break;
@@ -979,16 +1105,21 @@ static int dpm_suspend_noirq(pm_message_t state)
                        list_move(&dev->power.entry, &dpm_noirq_list);
                put_device(dev);
 
-               if (pm_wakeup_pending()) {
-                       error = -EBUSY;
+               if (async_error)
                        break;
-               }
        }
        mutex_unlock(&dpm_list_mtx);
-       if (error)
+       async_synchronize_full();
+       if (!error)
+               error = async_error;
+
+       if (error) {
+               suspend_stats.failed_suspend_noirq++;
+               dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
                dpm_resume_noirq(resume_event(state));
-       else
+       } else {
                dpm_show_time(starttime, state, "noirq");
+       }
        return error;
 }
 
@@ -999,15 +1130,26 @@ static int dpm_suspend_noirq(pm_message_t state)
  *
  * Runtime PM is disabled for @dev while this function is being executed.
  */
-static int device_suspend_late(struct device *dev, pm_message_t state)
+static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
 {
        pm_callback_t callback = NULL;
        char *info = NULL;
+       int error = 0;
 
        __pm_runtime_disable(dev, false);
 
+       if (async_error)
+               goto Complete;
+
+       if (pm_wakeup_pending()) {
+               async_error = -EBUSY;
+               goto Complete;
+       }
+
        if (dev->power.syscore)
-               return 0;
+               goto Complete;
+
+       dpm_wait_for_children(dev, async);
 
        if (dev->pm_domain) {
                info = "late power domain ";
@@ -1028,7 +1170,41 @@ static int device_suspend_late(struct device *dev, pm_message_t state)
                callback = pm_late_early_op(dev->driver->pm, state);
        }
 
-       return dpm_run_callback(callback, dev, state, info);
+       error = dpm_run_callback(callback, dev, state, info);
+       if (!error)
+               dev->power.is_late_suspended = true;
+       else
+               async_error = error;
+
+Complete:
+       complete_all(&dev->power.completion);
+       return error;
+}
+
+static void async_suspend_late(void *data, async_cookie_t cookie)
+{
+       struct device *dev = (struct device *)data;
+       int error;
+
+       error = __device_suspend_late(dev, pm_transition, true);
+       if (error) {
+               dpm_save_failed_dev(dev_name(dev));
+               pm_dev_err(dev, pm_transition, " async", error);
+       }
+       put_device(dev);
+}
+
+static int device_suspend_late(struct device *dev)
+{
+       reinit_completion(&dev->power.completion);
+
+       if (pm_async_enabled && dev->power.async_suspend) {
+               get_device(dev);
+               async_schedule(async_suspend_late, dev);
+               return 0;
+       }
+
+       return __device_suspend_late(dev, pm_transition, false);
 }
 
 /**
@@ -1041,19 +1217,20 @@ static int dpm_suspend_late(pm_message_t state)
        int error = 0;
 
        mutex_lock(&dpm_list_mtx);
+       pm_transition = state;
+       async_error = 0;
+
        while (!list_empty(&dpm_suspended_list)) {
                struct device *dev = to_device(dpm_suspended_list.prev);
 
                get_device(dev);
                mutex_unlock(&dpm_list_mtx);
 
-               error = device_suspend_late(dev, state);
+               error = device_suspend_late(dev);
 
                mutex_lock(&dpm_list_mtx);
                if (error) {
                        pm_dev_err(dev, state, " late", error);
-                       suspend_stats.failed_suspend_late++;
-                       dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
                        dpm_save_failed_dev(dev_name(dev));
                        put_device(dev);
                        break;
@@ -1062,17 +1239,18 @@ static int dpm_suspend_late(pm_message_t state)
                        list_move(&dev->power.entry, &dpm_late_early_list);
                put_device(dev);
 
-               if (pm_wakeup_pending()) {
-                       error = -EBUSY;
+               if (async_error)
                        break;
-               }
        }
        mutex_unlock(&dpm_list_mtx);
-       if (error)
+       async_synchronize_full();
+       if (error) {
+               suspend_stats.failed_suspend_late++;
+               dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
                dpm_resume_early(resume_event(state));
-       else
+       } else {
                dpm_show_time(starttime, state, "late");
-
+       }
        return error;
 }
 
@@ -1259,6 +1437,8 @@ int dpm_suspend(pm_message_t state)
 
        might_sleep();
 
+       cpufreq_suspend();
+
        mutex_lock(&dpm_list_mtx);
        pm_transition = state;
        async_error = 0;
index cfc3226..a21223d 100644 (file)
@@ -89,8 +89,8 @@ extern void dpm_sysfs_remove(struct device *dev);
 extern void rpm_sysfs_remove(struct device *dev);
 extern int wakeup_sysfs_add(struct device *dev);
 extern void wakeup_sysfs_remove(struct device *dev);
-extern int pm_qos_sysfs_add_latency(struct device *dev);
-extern void pm_qos_sysfs_remove_latency(struct device *dev);
+extern int pm_qos_sysfs_add_resume_latency(struct device *dev);
+extern void pm_qos_sysfs_remove_resume_latency(struct device *dev);
 extern int pm_qos_sysfs_add_flags(struct device *dev);
 extern void pm_qos_sysfs_remove_flags(struct device *dev);
 
index 5c1361a..36b9eb4 100644 (file)
@@ -105,7 +105,7 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
 s32 __dev_pm_qos_read_value(struct device *dev)
 {
        return IS_ERR_OR_NULL(dev->power.qos) ?
-               0 : pm_qos_read_value(&dev->power.qos->latency);
+               0 : pm_qos_read_value(&dev->power.qos->resume_latency);
 }
 
 /**
@@ -141,16 +141,24 @@ static int apply_constraint(struct dev_pm_qos_request *req,
        int ret;
 
        switch(req->type) {
-       case DEV_PM_QOS_LATENCY:
-               ret = pm_qos_update_target(&qos->latency, &req->data.pnode,
-                                          action, value);
+       case DEV_PM_QOS_RESUME_LATENCY:
+               ret = pm_qos_update_target(&qos->resume_latency,
+                                          &req->data.pnode, action, value);
                if (ret) {
-                       value = pm_qos_read_value(&qos->latency);
+                       value = pm_qos_read_value(&qos->resume_latency);
                        blocking_notifier_call_chain(&dev_pm_notifiers,
                                                     (unsigned long)value,
                                                     req);
                }
                break;
+       case DEV_PM_QOS_LATENCY_TOLERANCE:
+               ret = pm_qos_update_target(&qos->latency_tolerance,
+                                          &req->data.pnode, action, value);
+               if (ret) {
+                       value = pm_qos_read_value(&qos->latency_tolerance);
+                       req->dev->power.set_latency_tolerance(req->dev, value);
+               }
+               break;
        case DEV_PM_QOS_FLAGS:
                ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
                                          action, value);
@@ -186,13 +194,21 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
        }
        BLOCKING_INIT_NOTIFIER_HEAD(n);
 
-       c = &qos->latency;
+       c = &qos->resume_latency;
        plist_head_init(&c->list);
-       c->target_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
-       c->default_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
+       c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
+       c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
+       c->no_constraint_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
        c->type = PM_QOS_MIN;
        c->notifiers = n;
 
+       c = &qos->latency_tolerance;
+       plist_head_init(&c->list);
+       c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
+       c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
+       c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
+       c->type = PM_QOS_MIN;
+
        INIT_LIST_HEAD(&qos->flags.list);
 
        spin_lock_irq(&dev->power.lock);
@@ -224,7 +240,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
         * If the device's PM QoS resume latency limit or PM QoS flags have been
         * exposed to user space, they have to be hidden at this point.
         */
-       pm_qos_sysfs_remove_latency(dev);
+       pm_qos_sysfs_remove_resume_latency(dev);
        pm_qos_sysfs_remove_flags(dev);
 
        mutex_lock(&dev_pm_qos_mtx);
@@ -237,7 +253,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
                goto out;
 
        /* Flush the constraints lists for the device. */
-       c = &qos->latency;
+       c = &qos->resume_latency;
        plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
                /*
                 * Update constraints list and call the notification
@@ -246,6 +262,11 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
                apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
                memset(req, 0, sizeof(*req));
        }
+       c = &qos->latency_tolerance;
+       plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
+               apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
+               memset(req, 0, sizeof(*req));
+       }
        f = &qos->flags;
        list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
                apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
@@ -265,6 +286,40 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
        mutex_unlock(&dev_pm_qos_sysfs_mtx);
 }
 
+static bool dev_pm_qos_invalid_request(struct device *dev,
+                                      struct dev_pm_qos_request *req)
+{
+       return !req || (req->type == DEV_PM_QOS_LATENCY_TOLERANCE
+                       && !dev->power.set_latency_tolerance);
+}
+
+static int __dev_pm_qos_add_request(struct device *dev,
+                                   struct dev_pm_qos_request *req,
+                                   enum dev_pm_qos_req_type type, s32 value)
+{
+       int ret = 0;
+
+       if (!dev || dev_pm_qos_invalid_request(dev, req))
+               return -EINVAL;
+
+       if (WARN(dev_pm_qos_request_active(req),
+                "%s() called for already added request\n", __func__))
+               return -EINVAL;
+
+       if (IS_ERR(dev->power.qos))
+               ret = -ENODEV;
+       else if (!dev->power.qos)
+               ret = dev_pm_qos_constraints_allocate(dev);
+
+       trace_dev_pm_qos_add_request(dev_name(dev), type, value);
+       if (!ret) {
+               req->dev = dev;
+               req->type = type;
+               ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
+       }
+       return ret;
+}
+
 /**
  * dev_pm_qos_add_request - inserts new qos request into the list
  * @dev: target device for the constraint
@@ -290,31 +345,11 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
 int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
                           enum dev_pm_qos_req_type type, s32 value)
 {
-       int ret = 0;
-
-       if (!dev || !req) /*guard against callers passing in null */
-               return -EINVAL;
-
-       if (WARN(dev_pm_qos_request_active(req),
-                "%s() called for already added request\n", __func__))
-               return -EINVAL;
+       int ret;
 
        mutex_lock(&dev_pm_qos_mtx);
-
-       if (IS_ERR(dev->power.qos))
-               ret = -ENODEV;
-       else if (!dev->power.qos)
-               ret = dev_pm_qos_constraints_allocate(dev);
-
-       trace_dev_pm_qos_add_request(dev_name(dev), type, value);
-       if (!ret) {
-               req->dev = dev;
-               req->type = type;
-               ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
-       }
-
+       ret = __dev_pm_qos_add_request(dev, req, type, value);
        mutex_unlock(&dev_pm_qos_mtx);
-
        return ret;
 }
 EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
@@ -341,7 +376,8 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
                return -ENODEV;
 
        switch(req->type) {
-       case DEV_PM_QOS_LATENCY:
+       case DEV_PM_QOS_RESUME_LATENCY:
+       case DEV_PM_QOS_LATENCY_TOLERANCE:
                curr_value = req->data.pnode.prio;
                break;
        case DEV_PM_QOS_FLAGS:
@@ -460,8 +496,8 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
                ret = dev_pm_qos_constraints_allocate(dev);
 
        if (!ret)
-               ret = blocking_notifier_chain_register(
-                               dev->power.qos->latency.notifiers, notifier);
+               ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
+                                                      notifier);
 
        mutex_unlock(&dev_pm_qos_mtx);
        return ret;
@@ -487,9 +523,8 @@ int dev_pm_qos_remove_notifier(struct device *dev,
 
        /* Silently return if the constraints object is not present. */
        if (!IS_ERR_OR_NULL(dev->power.qos))
-               retval = blocking_notifier_chain_unregister(
-                               dev->power.qos->latency.notifiers,
-                               notifier);
+               retval = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
+                                                           notifier);
 
        mutex_unlock(&dev_pm_qos_mtx);
        return retval;
@@ -530,20 +565,32 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier);
  * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
  * @dev: Device whose ancestor to add the request for.
  * @req: Pointer to the preallocated handle.
+ * @type: Type of the request.
  * @value: Constraint latency value.
  */
 int dev_pm_qos_add_ancestor_request(struct device *dev,
-                                   struct dev_pm_qos_request *req, s32 value)
+                                   struct dev_pm_qos_request *req,
+                                   enum dev_pm_qos_req_type type, s32 value)
 {
        struct device *ancestor = dev->parent;
        int ret = -ENODEV;
 
-       while (ancestor && !ancestor->power.ignore_children)
-               ancestor = ancestor->parent;
+       switch (type) {
+       case DEV_PM_QOS_RESUME_LATENCY:
+               while (ancestor && !ancestor->power.ignore_children)
+                       ancestor = ancestor->parent;
 
+               break;
+       case DEV_PM_QOS_LATENCY_TOLERANCE:
+               while (ancestor && !ancestor->power.set_latency_tolerance)
+                       ancestor = ancestor->parent;
+
+               break;
+       default:
+               ancestor = NULL;
+       }
        if (ancestor)
-               ret = dev_pm_qos_add_request(ancestor, req,
-                                            DEV_PM_QOS_LATENCY, value);
+               ret = dev_pm_qos_add_request(ancestor, req, type, value);
 
        if (ret < 0)
                req->dev = NULL;
@@ -559,9 +606,13 @@ static void __dev_pm_qos_drop_user_request(struct device *dev,
        struct dev_pm_qos_request *req = NULL;
 
        switch(type) {
-       case DEV_PM_QOS_LATENCY:
-               req = dev->power.qos->latency_req;
-               dev->power.qos->latency_req = NULL;
+       case DEV_PM_QOS_RESUME_LATENCY:
+               req = dev->power.qos->resume_latency_req;
+               dev->power.qos->resume_latency_req = NULL;
+               break;
+       case DEV_PM_QOS_LATENCY_TOLERANCE:
+               req = dev->power.qos->latency_tolerance_req;
+               dev->power.qos->latency_tolerance_req = NULL;
                break;
        case DEV_PM_QOS_FLAGS:
                req = dev->power.qos->flags_req;
@@ -597,7 +648,7 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
        if (!req)
                return -ENOMEM;
 
-       ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value);
+       ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value);
        if (ret < 0) {
                kfree(req);
                return ret;
@@ -609,7 +660,7 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
 
        if (IS_ERR_OR_NULL(dev->power.qos))
                ret = -ENODEV;
-       else if (dev->power.qos->latency_req)
+       else if (dev->power.qos->resume_latency_req)
                ret = -EEXIST;
 
        if (ret < 0) {
@@ -618,13 +669,13 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
                mutex_unlock(&dev_pm_qos_mtx);
                goto out;
        }
-       dev->power.qos->latency_req = req;
+       dev->power.qos->resume_latency_req = req;
 
        mutex_unlock(&dev_pm_qos_mtx);
 
-       ret = pm_qos_sysfs_add_latency(dev);
+       ret = pm_qos_sysfs_add_resume_latency(dev);
        if (ret)
-               dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
+               dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
 
  out:
        mutex_unlock(&dev_pm_qos_sysfs_mtx);
@@ -634,8 +685,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
 
 static void __dev_pm_qos_hide_latency_limit(struct device *dev)
 {
-       if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req)
-               __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
+       if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->resume_latency_req)
+               __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
 }
 
 /**
@@ -646,7 +697,7 @@ void dev_pm_qos_hide_latency_limit(struct device *dev)
 {
        mutex_lock(&dev_pm_qos_sysfs_mtx);
 
-       pm_qos_sysfs_remove_latency(dev);
+       pm_qos_sysfs_remove_resume_latency(dev);
 
        mutex_lock(&dev_pm_qos_mtx);
        __dev_pm_qos_hide_latency_limit(dev);
@@ -768,6 +819,67 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
        pm_runtime_put(dev);
        return ret;
 }
+
+/**
+ * dev_pm_qos_get_user_latency_tolerance - Get user space latency tolerance.
+ * @dev: Device to obtain the user space latency tolerance for.
+ */
+s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
+{
+       s32 ret;
+
+       mutex_lock(&dev_pm_qos_mtx);
+       ret = IS_ERR_OR_NULL(dev->power.qos)
+               || !dev->power.qos->latency_tolerance_req ?
+                       PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT :
+                       dev->power.qos->latency_tolerance_req->data.pnode.prio;
+       mutex_unlock(&dev_pm_qos_mtx);
+       return ret;
+}
+
+/**
+ * dev_pm_qos_update_user_latency_tolerance - Update user space latency tolerance.
+ * @dev: Device to update the user space latency tolerance for.
+ * @val: New user space latency tolerance for @dev (negative values disable).
+ */
+int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
+{
+       int ret;
+
+       mutex_lock(&dev_pm_qos_mtx);
+
+       if (IS_ERR_OR_NULL(dev->power.qos)
+           || !dev->power.qos->latency_tolerance_req) {
+               struct dev_pm_qos_request *req;
+
+               if (val < 0) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+               req = kzalloc(sizeof(*req), GFP_KERNEL);
+               if (!req) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+               ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val);
+               if (ret < 0) {
+                       kfree(req);
+                       goto out;
+               }
+               dev->power.qos->latency_tolerance_req = req;
+       } else {
+               if (val < 0) {
+                       __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE);
+                       ret = 0;
+               } else {
+                       ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val);
+               }
+       }
+
+ out:
+       mutex_unlock(&dev_pm_qos_mtx);
+       return ret;
+}
 #else /* !CONFIG_PM_RUNTIME */
 static void __dev_pm_qos_hide_latency_limit(struct device *dev) {}
 static void __dev_pm_qos_hide_flags(struct device *dev) {}
index 72e00e6..67c7938 100644 (file)
 #include <trace/events/rpm.h>
 #include "power.h"
 
+#define RPM_GET_CALLBACK(dev, cb)                              \
+({                                                             \
+       int (*__rpm_cb)(struct device *__d);                    \
+                                                               \
+       if (dev->pm_domain)                                     \
+               __rpm_cb = dev->pm_domain->ops.cb;              \
+       else if (dev->type && dev->type->pm)                    \
+               __rpm_cb = dev->type->pm->cb;                   \
+       else if (dev->class && dev->class->pm)                  \
+               __rpm_cb = dev->class->pm->cb;                  \
+       else if (dev->bus && dev->bus->pm)                      \
+               __rpm_cb = dev->bus->pm->cb;                    \
+       else                                                    \
+               __rpm_cb = NULL;                                \
+                                                               \
+       if (!__rpm_cb && dev->driver && dev->driver->pm)        \
+               __rpm_cb = dev->driver->pm->cb;                 \
+                                                               \
+       __rpm_cb;                                               \
+})
+
+static int (*rpm_get_suspend_cb(struct device *dev))(struct device *)
+{
+       return RPM_GET_CALLBACK(dev, runtime_suspend);
+}
+
+static int (*rpm_get_resume_cb(struct device *dev))(struct device *)
+{
+       return RPM_GET_CALLBACK(dev, runtime_resume);
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static int (*rpm_get_idle_cb(struct device *dev))(struct device *)
+{
+       return RPM_GET_CALLBACK(dev, runtime_idle);
+}
+
 static int rpm_resume(struct device *dev, int rpmflags);
 static int rpm_suspend(struct device *dev, int rpmflags);
 
@@ -310,19 +347,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
 
        dev->power.idle_notification = true;
 
-       if (dev->pm_domain)
-               callback = dev->pm_domain->ops.runtime_idle;
-       else if (dev->type && dev->type->pm)
-               callback = dev->type->pm->runtime_idle;
-       else if (dev->class && dev->class->pm)
-               callback = dev->class->pm->runtime_idle;
-       else if (dev->bus && dev->bus->pm)
-               callback = dev->bus->pm->runtime_idle;
-       else
-               callback = NULL;
-
-       if (!callback && dev->driver && dev->driver->pm)
-               callback = dev->driver->pm->runtime_idle;
+       callback = rpm_get_idle_cb(dev);
 
        if (callback)
                retval = __rpm_callback(callback, dev);
@@ -492,19 +517,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
 
        __update_runtime_status(dev, RPM_SUSPENDING);
 
-       if (dev->pm_domain)
-               callback = dev->pm_domain->ops.runtime_suspend;
-       else if (dev->type && dev->type->pm)
-               callback = dev->type->pm->runtime_suspend;
-       else if (dev->class && dev->class->pm)
-               callback = dev->class->pm->runtime_suspend;
-       else if (dev->bus && dev->bus->pm)
-               callback = dev->bus->pm->runtime_suspend;
-       else
-               callback = NULL;
-
-       if (!callback && dev->driver && dev->driver->pm)
-               callback = dev->driver->pm->runtime_suspend;
+       callback = rpm_get_suspend_cb(dev);
 
        retval = rpm_callback(callback, dev);
        if (retval)
@@ -724,19 +737,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
 
        __update_runtime_status(dev, RPM_RESUMING);
 
-       if (dev->pm_domain)
-               callback = dev->pm_domain->ops.runtime_resume;
-       else if (dev->type && dev->type->pm)
-               callback = dev->type->pm->runtime_resume;
-       else if (dev->class && dev->class->pm)
-               callback = dev->class->pm->runtime_resume;
-       else if (dev->bus && dev->bus->pm)
-               callback = dev->bus->pm->runtime_resume;
-       else
-               callback = NULL;
-
-       if (!callback && dev->driver && dev->driver->pm)
-               callback = dev->driver->pm->runtime_resume;
+       callback = rpm_get_resume_cb(dev);
 
        retval = rpm_callback(callback, dev);
        if (retval) {
@@ -1130,7 +1131,7 @@ EXPORT_SYMBOL_GPL(pm_runtime_barrier);
  * @dev: Device to handle.
  * @check_resume: If set, check if there's a resume request for the device.
  *
- * Increment power.disable_depth for the device and if was zero previously,
+ * Increment power.disable_depth for the device and if it was zero previously,
  * cancel all pending runtime PM requests for the device and wait for all
  * operations in progress to complete.  The device can be either active or
  * suspended after its runtime PM has been disabled.
@@ -1401,3 +1402,86 @@ void pm_runtime_remove(struct device *dev)
        if (dev->power.irq_safe && dev->parent)
                pm_runtime_put(dev->parent);
 }
+#endif
+
+/**
+ * pm_runtime_force_suspend - Force a device into suspend state if needed.
+ * @dev: Device to suspend.
+ *
+ * Disable runtime PM so we safely can check the device's runtime PM status and
+ * if it is active, invoke it's .runtime_suspend callback to bring it into
+ * suspend state. Keep runtime PM disabled to preserve the state unless we
+ * encounter errors.
+ *
+ * Typically this function may be invoked from a system suspend callback to make
+ * sure the device is put into low power state.
+ */
+int pm_runtime_force_suspend(struct device *dev)
+{
+       int (*callback)(struct device *);
+       int ret = 0;
+
+       pm_runtime_disable(dev);
+
+       /*
+        * Note that pm_runtime_status_suspended() returns false while
+        * !CONFIG_PM_RUNTIME, which means the device will be put into low
+        * power state.
+        */
+       if (pm_runtime_status_suspended(dev))
+               return 0;
+
+       callback = rpm_get_suspend_cb(dev);
+
+       if (!callback) {
+               ret = -ENOSYS;
+               goto err;
+       }
+
+       ret = callback(dev);
+       if (ret)
+               goto err;
+
+       pm_runtime_set_suspended(dev);
+       return 0;
+err:
+       pm_runtime_enable(dev);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
+
+/**
+ * pm_runtime_force_resume - Force a device into resume state.
+ * @dev: Device to resume.
+ *
+ * Prior invoking this function we expect the user to have brought the device
+ * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
+ * those actions and brings the device into full power. We update the runtime PM
+ * status and re-enables runtime PM.
+ *
+ * Typically this function may be invoked from a system resume callback to make
+ * sure the device is put into full power state.
+ */
+int pm_runtime_force_resume(struct device *dev)
+{
+       int (*callback)(struct device *);
+       int ret = 0;
+
+       callback = rpm_get_resume_cb(dev);
+
+       if (!callback) {
+               ret = -ENOSYS;
+               goto out;
+       }
+
+       ret = callback(dev);
+       if (ret)
+               goto out;
+
+       pm_runtime_set_active(dev);
+       pm_runtime_mark_last_busy(dev);
+out:
+       pm_runtime_enable(dev);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(pm_runtime_force_resume);
index 03e089a..95b181d 100644 (file)
@@ -218,15 +218,16 @@ static ssize_t autosuspend_delay_ms_store(struct device *dev,
 static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show,
                autosuspend_delay_ms_store);
 
-static ssize_t pm_qos_latency_show(struct device *dev,
-                                  struct device_attribute *attr, char *buf)
+static ssize_t pm_qos_resume_latency_show(struct device *dev,
+                                         struct device_attribute *attr,
+                                         char *buf)
 {
-       return sprintf(buf, "%d\n", dev_pm_qos_requested_latency(dev));
+       return sprintf(buf, "%d\n", dev_pm_qos_requested_resume_latency(dev));
 }
 
-static ssize_t pm_qos_latency_store(struct device *dev,
-                                   struct device_attribute *attr,
-                                   const char *buf, size_t n)
+static ssize_t pm_qos_resume_latency_store(struct device *dev,
+                                          struct device_attribute *attr,
+                                          const char *buf, size_t n)
 {
        s32 value;
        int ret;
@@ -237,12 +238,47 @@ static ssize_t pm_qos_latency_store(struct device *dev,
        if (value < 0)
                return -EINVAL;
 
-       ret = dev_pm_qos_update_request(dev->power.qos->latency_req, value);
+       ret = dev_pm_qos_update_request(dev->power.qos->resume_latency_req,
+                                       value);
        return ret < 0 ? ret : n;
 }
 
 static DEVICE_ATTR(pm_qos_resume_latency_us, 0644,
-                  pm_qos_latency_show, pm_qos_latency_store);
+                  pm_qos_resume_latency_show, pm_qos_resume_latency_store);
+
+static ssize_t pm_qos_latency_tolerance_show(struct device *dev,
+                                            struct device_attribute *attr,
+                                            char *buf)
+{
+       s32 value = dev_pm_qos_get_user_latency_tolerance(dev);
+
+       if (value < 0)
+               return sprintf(buf, "auto\n");
+       else if (value == PM_QOS_LATENCY_ANY)
+               return sprintf(buf, "any\n");
+
+       return sprintf(buf, "%d\n", value);
+}
+
+static ssize_t pm_qos_latency_tolerance_store(struct device *dev,
+                                             struct device_attribute *attr,
+                                             const char *buf, size_t n)
+{
+       s32 value;
+       int ret;
+
+       if (kstrtos32(buf, 0, &value)) {
+               if (!strcmp(buf, "auto") || !strcmp(buf, "auto\n"))
+                       value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
+               else if (!strcmp(buf, "any") || !strcmp(buf, "any\n"))
+                       value = PM_QOS_LATENCY_ANY;
+       }
+       ret = dev_pm_qos_update_user_latency_tolerance(dev, value);
+       return ret < 0 ? ret : n;
+}
+
+static DEVICE_ATTR(pm_qos_latency_tolerance_us, 0644,
+                  pm_qos_latency_tolerance_show, pm_qos_latency_tolerance_store);
 
 static ssize_t pm_qos_no_power_off_show(struct device *dev,
                                        struct device_attribute *attr,
@@ -618,15 +654,26 @@ static struct attribute_group pm_runtime_attr_group = {
        .attrs  = runtime_attrs,
 };
 
-static struct attribute *pm_qos_latency_attrs[] = {
+static struct attribute *pm_qos_resume_latency_attrs[] = {
 #ifdef CONFIG_PM_RUNTIME
        &dev_attr_pm_qos_resume_latency_us.attr,
 #endif /* CONFIG_PM_RUNTIME */
        NULL,
 };
-static struct attribute_group pm_qos_latency_attr_group = {
+static struct attribute_group pm_qos_resume_latency_attr_group = {
+       .name   = power_group_name,
+       .attrs  = pm_qos_resume_latency_attrs,
+};
+
+static struct attribute *pm_qos_latency_tolerance_attrs[] = {
+#ifdef CONFIG_PM_RUNTIME
+       &dev_attr_pm_qos_latency_tolerance_us.attr,
+#endif /* CONFIG_PM_RUNTIME */
+       NULL,
+};
+static struct attribute_group pm_qos_latency_tolerance_attr_group = {
        .name   = power_group_name,
-       .attrs  = pm_qos_latency_attrs,
+       .attrs  = pm_qos_latency_tolerance_attrs,
 };
 
 static struct attribute *pm_qos_flags_attrs[] = {
@@ -654,18 +701,23 @@ int dpm_sysfs_add(struct device *dev)
                if (rc)
                        goto err_out;
        }
-
        if (device_can_wakeup(dev)) {
                rc = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group);
-               if (rc) {
-                       if (pm_runtime_callbacks_present(dev))
-                               sysfs_unmerge_group(&dev->kobj,
-                                                   &pm_runtime_attr_group);
-                       goto err_out;
-               }
+               if (rc)
+                       goto err_runtime;
+       }
+       if (dev->power.set_latency_tolerance) {
+               rc = sysfs_merge_group(&dev->kobj,
+                                      &pm_qos_latency_tolerance_attr_group);
+               if (rc)
+                       goto err_wakeup;
        }
        return 0;
 
+ err_wakeup:
+       sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
+ err_runtime:
+       sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group);
  err_out:
        sysfs_remove_group(&dev->kobj, &pm_attr_group);
        return rc;
@@ -681,14 +733,14 @@ void wakeup_sysfs_remove(struct device *dev)
        sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
 }
 
-int pm_qos_sysfs_add_latency(struct device *dev)
+int pm_qos_sysfs_add_resume_latency(struct device *dev)
 {
-       return sysfs_merge_group(&dev->kobj, &pm_qos_latency_attr_group);
+       return sysfs_merge_group(&dev->kobj, &pm_qos_resume_latency_attr_group);
 }
 
-void pm_qos_sysfs_remove_latency(struct device *dev)
+void pm_qos_sysfs_remove_resume_latency(struct device *dev)
 {
-       sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_attr_group);
+       sysfs_unmerge_group(&dev->kobj, &pm_qos_resume_latency_attr_group);
 }
 
 int pm_qos_sysfs_add_flags(struct device *dev)
@@ -708,6 +760,7 @@ void rpm_sysfs_remove(struct device *dev)
 
 void dpm_sysfs_remove(struct device *dev)
 {
+       sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);
        dev_pm_qos_constraints_destroy(dev);
        rpm_sysfs_remove(dev);
        sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
index 3129749..9fb6270 100644 (file)
@@ -2,6 +2,7 @@
 # ARM CPU Frequency scaling drivers
 #
 
+# big LITTLE core layer and glue drivers
 config ARM_BIG_LITTLE_CPUFREQ
        tristate "Generic ARM big LITTLE CPUfreq driver"
        depends on ARM && BIG_LITTLE && ARM_CPU_TOPOLOGY && HAVE_CLK
@@ -16,6 +17,14 @@ config ARM_DT_BL_CPUFREQ
          This enables probing via DT for Generic CPUfreq driver for ARM
          big.LITTLE platform. This gets frequency tables from DT.
 
+config ARM_VEXPRESS_SPC_CPUFREQ
+        tristate "Versatile Express SPC based CPUfreq driver"
+       depends on ARM_BIG_LITTLE_CPUFREQ && ARCH_VEXPRESS_SPC
+        help
+          This add the CPUfreq driver support for Versatile Express
+         big.LITTLE platforms using SPC for power management.
+
+
 config ARM_EXYNOS_CPUFREQ
        bool
 
@@ -241,11 +250,3 @@ config ARM_TEGRA_CPUFREQ
        default y
        help
          This adds the CPUFreq driver support for TEGRA SOCs.
-
-config ARM_VEXPRESS_SPC_CPUFREQ
-        tristate "Versatile Express SPC based CPUfreq driver"
-        select ARM_BIG_LITTLE_CPUFREQ
-        depends on ARCH_VEXPRESS_SPC
-        help
-          This add the CPUfreq driver support for Versatile Express
-         big.LITTLE platforms using SPC for power management.
index 18448a7..822ca03 100644 (file)
@@ -855,7 +855,6 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
        pr_debug("acpi_cpufreq_cpu_exit\n");
 
        if (data) {
-               cpufreq_frequency_table_put_attr(policy->cpu);
                per_cpu(acfreq_data, policy->cpu) = NULL;
                acpi_processor_unregister_performance(data->acpi_data,
                                                      policy->cpu);
index 72f87e9..bad2ed3 100644 (file)
@@ -446,9 +446,12 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy)
        }
 
        if (cur_cluster < MAX_CLUSTERS) {
+               int cpu;
+
                cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
 
-               per_cpu(physical_cluster, policy->cpu) = cur_cluster;
+               for_each_cpu(cpu, policy->cpus)
+                       per_cpu(physical_cluster, cpu) = cur_cluster;
        } else {
                /* Assumption: during init, we are always running on A15 */
                per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER;
@@ -478,7 +481,6 @@ static int bL_cpufreq_exit(struct cpufreq_policy *policy)
                return -ENODEV;
        }
 
-       cpufreq_frequency_table_put_attr(policy->cpu);
        put_cluster_clk_and_freq_table(cpu_dev);
        dev_dbg(cpu_dev, "%s: Exited, cpu: %d\n", __func__, policy->cpu);
 
index e9e63fc..a9f8e5b 100644 (file)
@@ -195,7 +195,6 @@ static struct cpufreq_driver bfin_driver = {
        .target_index = bfin_target,
        .get = bfin_getfreq_khz,
        .init = __bfin_cpu_init,
-       .exit = cpufreq_generic_exit,
        .name = "bfin cpufreq",
        .attr = cpufreq_generic_attr,
 };
index 0c12ffc..1bf6bba 100644 (file)
@@ -109,7 +109,6 @@ static struct cpufreq_driver cpu0_cpufreq_driver = {
        .target_index = cpu0_set_target,
        .get = cpufreq_generic_get,
        .init = cpu0_cpufreq_init,
-       .exit = cpufreq_generic_exit,
        .name = "generic_cpu0",
        .attr = cpufreq_generic_attr,
 };
index 199b52b..3aa7a7a 100644 (file)
@@ -26,7 +26,7 @@
 #include <linux/module.h>
 #include <linux/mutex.h>
 #include <linux/slab.h>
-#include <linux/syscore_ops.h>
+#include <linux/suspend.h>
 #include <linux/tick.h>
 #include <trace/events/power.h>
 
@@ -42,10 +42,11 @@ static DEFINE_RWLOCK(cpufreq_driver_lock);
 DEFINE_MUTEX(cpufreq_governor_lock);
 static LIST_HEAD(cpufreq_policy_list);
 
-#ifdef CONFIG_HOTPLUG_CPU
 /* This one keeps track of the previously set governor of a removed CPU */
 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
-#endif
+
+/* Flag to suspend/resume CPUFreq governors */
+static bool cpufreq_suspended;
 
 static inline bool has_target(void)
 {
@@ -181,8 +182,8 @@ unsigned int cpufreq_generic_get(unsigned int cpu)
        struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
 
        if (!policy || IS_ERR(policy->clk)) {
-               pr_err("%s: No %s associated to cpu: %d\n", __func__,
-                               policy ? "clk" : "policy", cpu);
+               pr_err("%s: No %s associated to cpu: %d\n",
+                      __func__, policy ? "clk" : "policy", cpu);
                return 0;
        }
 
@@ -190,6 +191,12 @@ unsigned int cpufreq_generic_get(unsigned int cpu)
 }
 EXPORT_SYMBOL_GPL(cpufreq_generic_get);
 
+/* Only for cpufreq core internal use */
+struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
+{
+       return per_cpu(cpufreq_cpu_data, cpu);
+}
+
 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
 {
        struct cpufreq_policy *policy = NULL;
@@ -254,15 +261,14 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
        if (!l_p_j_ref_freq) {
                l_p_j_ref = loops_per_jiffy;
                l_p_j_ref_freq = ci->old;
-               pr_debug("saving %lu as reference value for loops_per_jiffy; "
-                       "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
+               pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
+                        l_p_j_ref, l_p_j_ref_freq);
        }
-       if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
-           (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
+       if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
                loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
                                                                ci->new);
-               pr_debug("scaling loops_per_jiffy to %lu "
-                       "for frequency %u kHz\n", loops_per_jiffy, ci->new);
+               pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
+                        loops_per_jiffy, ci->new);
        }
 }
 #else
@@ -282,7 +288,7 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
 
        freqs->flags = cpufreq_driver->flags;
        pr_debug("notification %u of frequency transition to %u kHz\n",
-               state, freqs->new);
+                state, freqs->new);
 
        switch (state) {
 
@@ -294,9 +300,8 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
                if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
                        if ((policy) && (policy->cpu == freqs->cpu) &&
                            (policy->cur) && (policy->cur != freqs->old)) {
-                               pr_debug("Warning: CPU frequency is"
-                                       " %u, cpufreq assumed %u kHz.\n",
-                                       freqs->old, policy->cur);
+                               pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
+                                        freqs->old, policy->cur);
                                freqs->old = policy->cur;
                        }
                }
@@ -307,8 +312,8 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
 
        case CPUFREQ_POSTCHANGE:
                adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
-               pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
-                       (unsigned long)freqs->cpu);
+               pr_debug("FREQ: %lu - CPU: %lu\n",
+                        (unsigned long)freqs->new, (unsigned long)freqs->cpu);
                trace_cpu_frequency(freqs->new, freqs->cpu);
                srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
                                CPUFREQ_POSTCHANGE, freqs);
@@ -352,7 +357,7 @@ EXPORT_SYMBOL_GPL(cpufreq_notify_post_transition);
 /*********************************************************************
  *                          SYSFS INTERFACE                          *
  *********************************************************************/
-ssize_t show_boost(struct kobject *kobj,
+static ssize_t show_boost(struct kobject *kobj,
                                 struct attribute *attr, char *buf)
 {
        return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
@@ -368,13 +373,13 @@ static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
                return -EINVAL;
 
        if (cpufreq_boost_trigger_state(enable)) {
-               pr_err("%s: Cannot %s BOOST!\n", __func__,
-                      enable ? "enable" : "disable");
+               pr_err("%s: Cannot %s BOOST!\n",
+                      __func__, enable ? "enable" : "disable");
                return -EINVAL;
        }
 
-       pr_debug("%s: cpufreq BOOST %s\n", __func__,
-                enable ? "enabled" : "disabled");
+       pr_debug("%s: cpufreq BOOST %s\n",
+                __func__, enable ? "enabled" : "disabled");
 
        return count;
 }
@@ -879,18 +884,25 @@ err_out_kobj_put:
 
 static void cpufreq_init_policy(struct cpufreq_policy *policy)
 {
+       struct cpufreq_governor *gov = NULL;
        struct cpufreq_policy new_policy;
        int ret = 0;
 
        memcpy(&new_policy, policy, sizeof(*policy));
 
+       /* Update governor of new_policy to the governor used before hotplug */
+       gov = __find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu));
+       if (gov)
+               pr_debug("Restoring governor %s for cpu %d\n",
+                               policy->governor->name, policy->cpu);
+       else
+               gov = CPUFREQ_DEFAULT_GOVERNOR;
+
+       new_policy.governor = gov;
+
        /* Use the default policy if its valid. */
        if (cpufreq_driver->setpolicy)
-               cpufreq_parse_governor(policy->governor->name,
-                                       &new_policy.policy, NULL);
-
-       /* assure that the starting sequence is run in cpufreq_set_policy */
-       policy->governor = NULL;
+               cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
 
        /* set default policy */
        ret = cpufreq_set_policy(policy, &new_policy);
@@ -927,8 +939,11 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
        up_write(&policy->rwsem);
 
        if (has_target()) {
-               if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
-                       (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
+               ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
+               if (!ret)
+                       ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+
+               if (ret) {
                        pr_err("%s: Failed to start governor\n", __func__);
                        return ret;
                }
@@ -949,6 +964,8 @@ static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
 
        read_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
+       policy->governor = NULL;
+
        return policy;
 }
 
@@ -1022,21 +1039,19 @@ static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
 
        up_write(&policy->rwsem);
 
-       cpufreq_frequency_table_update_policy_cpu(policy);
        blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
                        CPUFREQ_UPDATE_POLICY_CPU, policy);
 }
 
-static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
-                            bool frozen)
+static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
 {
        unsigned int j, cpu = dev->id;
        int ret = -ENOMEM;
        struct cpufreq_policy *policy;
        unsigned long flags;
+       bool recover_policy = cpufreq_suspended;
 #ifdef CONFIG_HOTPLUG_CPU
        struct cpufreq_policy *tpolicy;
-       struct cpufreq_governor *gov;
 #endif
 
        if (cpu_is_offline(cpu))
@@ -1075,9 +1090,9 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
         * Restore the saved policy when doing light-weight init and fall back
         * to the full init if that fails.
         */
-       policy = frozen ? cpufreq_policy_restore(cpu) : NULL;
+       policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
        if (!policy) {
-               frozen = false;
+               recover_policy = false;
                policy = cpufreq_policy_alloc();
                if (!policy)
                        goto nomem_out;
@@ -1089,12 +1104,11 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
         * the creation of a brand new one. So we need to perform this update
         * by invoking update_policy_cpu().
         */
-       if (frozen && cpu != policy->cpu)
+       if (recover_policy && cpu != policy->cpu)
                update_policy_cpu(policy, cpu);
        else
                policy->cpu = cpu;
 
-       policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
        cpumask_copy(policy->cpus, cpumask_of(cpu));
 
        init_completion(&policy->kobj_unregister);
@@ -1118,7 +1132,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
         */
        cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
 
-       if (!frozen) {
+       if (!recover_policy) {
                policy->user_policy.min = policy->min;
                policy->user_policy.max = policy->max;
        }
@@ -1180,16 +1194,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
        blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
                                     CPUFREQ_START, policy);
 
-#ifdef CONFIG_HOTPLUG_CPU
-       gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
-       if (gov) {
-               policy->governor = gov;
-               pr_debug("Restoring governor %s for cpu %d\n",
-                      policy->governor->name, cpu);
-       }
-#endif
-
-       if (!frozen) {
+       if (!recover_policy) {
                ret = cpufreq_add_dev_interface(policy, dev);
                if (ret)
                        goto err_out_unregister;
@@ -1203,7 +1208,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
 
        cpufreq_init_policy(policy);
 
-       if (!frozen) {
+       if (!recover_policy) {
                policy->user_policy.policy = policy->policy;
                policy->user_policy.governor = policy->governor;
        }
@@ -1226,7 +1231,7 @@ err_get_freq:
        if (cpufreq_driver->exit)
                cpufreq_driver->exit(policy);
 err_set_policy_cpu:
-       if (frozen) {
+       if (recover_policy) {
                /* Do not leave stale fallback data behind. */
                per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
                cpufreq_policy_put_kobj(policy);
@@ -1250,7 +1255,7 @@ nomem_out:
  */
 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
 {
-       return __cpufreq_add_dev(dev, sif, false);
+       return __cpufreq_add_dev(dev, sif);
 }
 
 static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
@@ -1265,7 +1270,7 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
        sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
        ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
        if (ret) {
-               pr_err("%s: Failed to move kobj: %d", __func__, ret);
+               pr_err("%s: Failed to move kobj: %d\n", __func__, ret);
 
                down_write(&policy->rwsem);
                cpumask_set_cpu(old_cpu, policy->cpus);
@@ -1281,8 +1286,7 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
 }
 
 static int __cpufreq_remove_dev_prepare(struct device *dev,
-                                       struct subsys_interface *sif,
-                                       bool frozen)
+                                       struct subsys_interface *sif)
 {
        unsigned int cpu = dev->id, cpus;
        int new_cpu, ret;
@@ -1296,7 +1300,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
        policy = per_cpu(cpufreq_cpu_data, cpu);
 
        /* Save the policy somewhere when doing a light-weight tear-down */
-       if (frozen)
+       if (cpufreq_suspended)
                per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
 
        write_unlock_irqrestore(&cpufreq_driver_lock, flags);
@@ -1314,11 +1318,9 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
                }
        }
 
-#ifdef CONFIG_HOTPLUG_CPU
        if (!cpufreq_driver->setpolicy)
                strncpy(per_cpu(cpufreq_cpu_governor, cpu),
                        policy->governor->name, CPUFREQ_NAME_LEN);
-#endif
 
        down_read(&policy->rwsem);
        cpus = cpumask_weight(policy->cpus);
@@ -1331,19 +1333,19 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
                if (new_cpu >= 0) {
                        update_policy_cpu(policy, new_cpu);
 
-                       if (!frozen) {
+                       if (!cpufreq_suspended)
                                pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
-                                               __func__, new_cpu, cpu);
-                       }
+                                        __func__, new_cpu, cpu);
                }
+       } else if (cpufreq_driver->stop_cpu && cpufreq_driver->setpolicy) {
+               cpufreq_driver->stop_cpu(policy);
        }
 
        return 0;
 }
 
 static int __cpufreq_remove_dev_finish(struct device *dev,
-                                      struct subsys_interface *sif,
-                                      bool frozen)
+                                      struct subsys_interface *sif)
 {
        unsigned int cpu = dev->id, cpus;
        int ret;
@@ -1373,12 +1375,12 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
                                        CPUFREQ_GOV_POLICY_EXIT);
                        if (ret) {
                                pr_err("%s: Failed to exit governor\n",
-                                               __func__);
+                                      __func__);
                                return ret;
                        }
                }
 
-               if (!frozen)
+               if (!cpufreq_suspended)
                        cpufreq_policy_put_kobj(policy);
 
                /*
@@ -1394,16 +1396,16 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
                list_del(&policy->policy_list);
                write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
-               if (!frozen)
+               if (!cpufreq_suspended)
                        cpufreq_policy_free(policy);
-       } else {
-               if (has_target()) {
-                       if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
-                                       (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
-                               pr_err("%s: Failed to start governor\n",
-                                               __func__);
-                               return ret;
-                       }
+       } else if (has_target()) {
+               ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
+               if (!ret)
+                       ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+
+               if (ret) {
+                       pr_err("%s: Failed to start governor\n", __func__);
+                       return ret;
                }
        }
 
@@ -1424,10 +1426,10 @@ static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
        if (cpu_is_offline(cpu))
                return 0;
 
-       ret = __cpufreq_remove_dev_prepare(dev, sif, false);
+       ret = __cpufreq_remove_dev_prepare(dev, sif);
 
        if (!ret)
-               ret = __cpufreq_remove_dev_finish(dev, sif, false);
+               ret = __cpufreq_remove_dev_finish(dev, sif);
 
        return ret;
 }
@@ -1458,8 +1460,8 @@ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
        struct cpufreq_freqs freqs;
        unsigned long flags;
 
-       pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
-              "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
+       pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
+                old_freq, new_freq);
 
        freqs.old = old_freq;
        freqs.new = new_freq;
@@ -1570,83 +1572,104 @@ static struct subsys_interface cpufreq_interface = {
        .remove_dev     = cpufreq_remove_dev,
 };
 
+/*
+ * In case platform wants some specific frequency to be configured
+ * during suspend..
+ */
+int cpufreq_generic_suspend(struct cpufreq_policy *policy)
+{
+       int ret;
+
+       if (!policy->suspend_freq) {
+               pr_err("%s: suspend_freq can't be zero\n", __func__);
+               return -EINVAL;
+       }
+
+       pr_debug("%s: Setting suspend-freq: %u\n", __func__,
+                       policy->suspend_freq);
+
+       ret = __cpufreq_driver_target(policy, policy->suspend_freq,
+                       CPUFREQ_RELATION_H);
+       if (ret)
+               pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
+                               __func__, policy->suspend_freq, ret);
+
+       return ret;
+}
+EXPORT_SYMBOL(cpufreq_generic_suspend);
+
 /**
- * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
+ * cpufreq_suspend() - Suspend CPUFreq governors
  *
- * This function is only executed for the boot processor.  The other CPUs
- * have been put offline by means of CPU hotplug.
+ * Called during system wide Suspend/Hibernate cycles for suspending governors
+ * as some platforms can't change frequency after this point in suspend cycle.
+ * Because some of the devices (like: i2c, regulators, etc) they use for
+ * changing frequency are suspended quickly after this point.
  */
-static int cpufreq_bp_suspend(void)
+void cpufreq_suspend(void)
 {
-       int ret = 0;
-
-       int cpu = smp_processor_id();
        struct cpufreq_policy *policy;
 
-       pr_debug("suspending cpu %u\n", cpu);
+       if (!cpufreq_driver)
+               return;
 
-       /* If there's no policy for the boot CPU, we have nothing to do. */
-       policy = cpufreq_cpu_get(cpu);
-       if (!policy)
-               return 0;
+       if (!has_target())
+               return;
 
-       if (cpufreq_driver->suspend) {
-               ret = cpufreq_driver->suspend(policy);
-               if (ret)
-                       printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
-                                       "step on CPU %u\n", policy->cpu);
+       pr_debug("%s: Suspending Governors\n", __func__);
+
+       list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
+               if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
+                       pr_err("%s: Failed to stop governor for policy: %p\n",
+                               __func__, policy);
+               else if (cpufreq_driver->suspend
+                   && cpufreq_driver->suspend(policy))
+                       pr_err("%s: Failed to suspend driver: %p\n", __func__,
+                               policy);
        }
 
-       cpufreq_cpu_put(policy);
-       return ret;
+       cpufreq_suspended = true;
 }
 
 /**
- * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
+ * cpufreq_resume() - Resume CPUFreq governors
  *
- *     1.) resume CPUfreq hardware support (cpufreq_driver->resume())
- *     2.) schedule call cpufreq_update_policy() ASAP as interrupts are
- *         restored. It will verify that the current freq is in sync with
- *         what we believe it to be. This is a bit later than when it
- *         should be, but nonethteless it's better than calling
- *         cpufreq_driver->get() here which might re-enable interrupts...
- *
- * This function is only executed for the boot CPU.  The other CPUs have not
- * been turned on yet.
+ * Called during system wide Suspend/Hibernate cycle for resuming governors that
+ * are suspended with cpufreq_suspend().
  */
-static void cpufreq_bp_resume(void)
+void cpufreq_resume(void)
 {
-       int ret = 0;
-
-       int cpu = smp_processor_id();
        struct cpufreq_policy *policy;
 
-       pr_debug("resuming cpu %u\n", cpu);
+       if (!cpufreq_driver)
+               return;
 
-       /* If there's no policy for the boot CPU, we have nothing to do. */
-       policy = cpufreq_cpu_get(cpu);
-       if (!policy)
+       if (!has_target())
                return;
 
-       if (cpufreq_driver->resume) {
-               ret = cpufreq_driver->resume(policy);
-               if (ret) {
-                       printk(KERN_ERR "cpufreq: resume failed in ->resume "
-                                       "step on CPU %u\n", policy->cpu);
-                       goto fail;
-               }
-       }
+       pr_debug("%s: Resuming Governors\n", __func__);
 
-       schedule_work(&policy->update);
+       cpufreq_suspended = false;
 
-fail:
-       cpufreq_cpu_put(policy);
-}
+       list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
+               if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
+                   || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
+                       pr_err("%s: Failed to start governor for policy: %p\n",
+                               __func__, policy);
+               else if (cpufreq_driver->resume
+                   && cpufreq_driver->resume(policy))
+                       pr_err("%s: Failed to resume driver: %p\n", __func__,
+                               policy);
 
-static struct syscore_ops cpufreq_syscore_ops = {
-       .suspend        = cpufreq_bp_suspend,
-       .resume         = cpufreq_bp_resume,
-};
+               /*
+                * schedule call cpufreq_update_policy() for boot CPU, i.e. last
+                * policy in list. It will verify that the current freq is in
+                * sync with what we believe it to be.
+                */
+               if (list_is_last(&policy->policy_list, &cpufreq_policy_list))
+                       schedule_work(&policy->update);
+       }
+}
 
 /**
  *     cpufreq_get_current_driver - return current driver's name
@@ -1762,7 +1785,7 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
                target_freq = policy->min;
 
        pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
-                       policy->cpu, target_freq, relation, old_target_freq);
+                policy->cpu, target_freq, relation, old_target_freq);
 
        /*
         * This might look like a redundant call as we are checking it again
@@ -1807,8 +1830,7 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
                        freqs.flags = 0;
 
                        pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
-                                       __func__, policy->cpu, freqs.old,
-                                       freqs.new);
+                                __func__, policy->cpu, freqs.old, freqs.new);
 
                        cpufreq_notify_transition(policy, &freqs,
                                        CPUFREQ_PRECHANGE);
@@ -1817,7 +1839,7 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
                retval = cpufreq_driver->target_index(policy, index);
                if (retval)
                        pr_err("%s: Failed to change cpu frequency: %d\n",
-                                       __func__, retval);
+                              __func__, retval);
 
                if (notify)
                        cpufreq_notify_post_transition(policy, &freqs, retval);
@@ -1863,17 +1885,18 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
        struct cpufreq_governor *gov = NULL;
 #endif
 
+       /* Don't start any governor operations if we are entering suspend */
+       if (cpufreq_suspended)
+               return 0;
+
        if (policy->governor->max_transition_latency &&
            policy->cpuinfo.transition_latency >
            policy->governor->max_transition_latency) {
                if (!gov)
                        return -EINVAL;
                else {
-                       printk(KERN_WARNING "%s governor failed, too long"
-                              " transition latency of HW, fallback"
-                              " to %s governor\n",
-                              policy->governor->name,
-                              gov->name);
+                       pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
+                               policy->governor->name, gov->name);
                        policy->governor = gov;
                }
        }
@@ -1883,7 +1906,7 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
                        return -EINVAL;
 
        pr_debug("__cpufreq_governor for CPU %u, event %u\n",
-                                               policy->cpu, event);
+                policy->cpu, event);
 
        mutex_lock(&cpufreq_governor_lock);
        if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
@@ -1950,9 +1973,7 @@ EXPORT_SYMBOL_GPL(cpufreq_register_governor);
 
 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
 {
-#ifdef CONFIG_HOTPLUG_CPU
        int cpu;
-#endif
 
        if (!governor)
                return;
@@ -1960,14 +1981,12 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
        if (cpufreq_disabled())
                return;
 
-#ifdef CONFIG_HOTPLUG_CPU
        for_each_present_cpu(cpu) {
                if (cpu_online(cpu))
                        continue;
                if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
                        strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
        }
-#endif
 
        mutex_lock(&cpufreq_governor_mutex);
        list_del(&governor->governor_list);
@@ -2012,22 +2031,21 @@ EXPORT_SYMBOL(cpufreq_get_policy);
 static int cpufreq_set_policy(struct cpufreq_policy *policy,
                                struct cpufreq_policy *new_policy)
 {
-       int ret = 0, failed = 1;
+       struct cpufreq_governor *old_gov;
+       int ret;
 
-       pr_debug("setting new policy for CPU %u: %u - %u kHz\n", new_policy->cpu,
-               new_policy->min, new_policy->max);
+       pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
+                new_policy->cpu, new_policy->min, new_policy->max);
 
        memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
 
-       if (new_policy->min > policy->max || new_policy->max < policy->min) {
-               ret = -EINVAL;
-               goto error_out;
-       }
+       if (new_policy->min > policy->max || new_policy->max < policy->min)
+               return -EINVAL;
 
        /* verify the cpu speed can be set within this limit */
        ret = cpufreq_driver->verify(new_policy);
        if (ret)
-               goto error_out;
+               return ret;
 
        /* adjust if necessary - all reasons */
        blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
@@ -2043,7 +2061,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
         */
        ret = cpufreq_driver->verify(new_policy);
        if (ret)
-               goto error_out;
+               return ret;
 
        /* notification of the new policy */
        blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
@@ -2053,63 +2071,53 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
        policy->max = new_policy->max;
 
        pr_debug("new min and max freqs are %u - %u kHz\n",
-                                       policy->min, policy->max);
+                policy->min, policy->max);
 
        if (cpufreq_driver->setpolicy) {
                policy->policy = new_policy->policy;
                pr_debug("setting range\n");
-               ret = cpufreq_driver->setpolicy(new_policy);
-       } else {
-               if (new_policy->governor != policy->governor) {
-                       /* save old, working values */
-                       struct cpufreq_governor *old_gov = policy->governor;
-
-                       pr_debug("governor switch\n");
-
-                       /* end old governor */
-                       if (policy->governor) {
-                               __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
-                               up_write(&policy->rwsem);
-                               __cpufreq_governor(policy,
-                                               CPUFREQ_GOV_POLICY_EXIT);
-                               down_write(&policy->rwsem);
-                       }
+               return cpufreq_driver->setpolicy(new_policy);
+       }
 
-                       /* start new governor */
-                       policy->governor = new_policy->governor;
-                       if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
-                               if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) {
-                                       failed = 0;
-                               } else {
-                                       up_write(&policy->rwsem);
-                                       __cpufreq_governor(policy,
-                                                       CPUFREQ_GOV_POLICY_EXIT);
-                                       down_write(&policy->rwsem);
-                               }
-                       }
+       if (new_policy->governor == policy->governor)
+               goto out;
 
-                       if (failed) {
-                               /* new governor failed, so re-start old one */
-                               pr_debug("starting governor %s failed\n",
-                                                       policy->governor->name);
-                               if (old_gov) {
-                                       policy->governor = old_gov;
-                                       __cpufreq_governor(policy,
-                                                       CPUFREQ_GOV_POLICY_INIT);
-                                       __cpufreq_governor(policy,
-                                                          CPUFREQ_GOV_START);
-                               }
-                               ret = -EINVAL;
-                               goto error_out;
-                       }
-                       /* might be a policy change, too, so fall through */
-               }
-               pr_debug("governor: change or update limits\n");
-               ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+       pr_debug("governor switch\n");
+
+       /* save old, working values */
+       old_gov = policy->governor;
+       /* end old governor */
+       if (old_gov) {
+               __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+               up_write(&policy->rwsem);
+               __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
+               down_write(&policy->rwsem);
        }
 
-error_out:
-       return ret;
+       /* start new governor */
+       policy->governor = new_policy->governor;
+       if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
+               if (!__cpufreq_governor(policy, CPUFREQ_GOV_START))
+                       goto out;
+
+               up_write(&policy->rwsem);
+               __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
+               down_write(&policy->rwsem);
+       }
+
+       /* new governor failed, so re-start old one */
+       pr_debug("starting governor %s failed\n", policy->governor->name);
+       if (old_gov) {
+               policy->governor = old_gov;
+               __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
+               __cpufreq_governor(policy, CPUFREQ_GOV_START);
+       }
+
+       return -EINVAL;
+
+ out:
+       pr_debug("governor: change or update limits\n");
+       return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
 }
 
 /**
@@ -2145,8 +2153,13 @@ int cpufreq_update_policy(unsigned int cpu)
         */
        if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
                new_policy.cur = cpufreq_driver->get(cpu);
+               if (WARN_ON(!new_policy.cur)) {
+                       ret = -EIO;
+                       goto no_policy;
+               }
+
                if (!policy->cur) {
-                       pr_debug("Driver did not initialize current freq");
+                       pr_debug("Driver did not initialize current freq\n");
                        policy->cur = new_policy.cur;
                } else {
                        if (policy->cur != new_policy.cur && has_target())
@@ -2170,30 +2183,24 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
 {
        unsigned int cpu = (unsigned long)hcpu;
        struct device *dev;
-       bool frozen = false;
 
        dev = get_cpu_device(cpu);
        if (dev) {
-
-               if (action & CPU_TASKS_FROZEN)
-                       frozen = true;
-
                switch (action & ~CPU_TASKS_FROZEN) {
                case CPU_ONLINE:
-                       __cpufreq_add_dev(dev, NULL, frozen);
-                       cpufreq_update_policy(cpu);
+                       __cpufreq_add_dev(dev, NULL);
                        break;
 
                case CPU_DOWN_PREPARE:
-                       __cpufreq_remove_dev_prepare(dev, NULL, frozen);
+                       __cpufreq_remove_dev_prepare(dev, NULL);
                        break;
 
                case CPU_POST_DEAD:
-                       __cpufreq_remove_dev_finish(dev, NULL, frozen);
+                       __cpufreq_remove_dev_finish(dev, NULL);
                        break;
 
                case CPU_DOWN_FAILED:
-                       __cpufreq_add_dev(dev, NULL, frozen);
+                       __cpufreq_add_dev(dev, NULL);
                        break;
                }
        }
@@ -2249,8 +2256,8 @@ int cpufreq_boost_trigger_state(int state)
                cpufreq_driver->boost_enabled = !state;
                write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
-               pr_err("%s: Cannot %s BOOST\n", __func__,
-                      state ? "enable" : "disable");
+               pr_err("%s: Cannot %s BOOST\n",
+                      __func__, state ? "enable" : "disable");
        }
 
        return ret;
@@ -2295,7 +2302,9 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
 
        if (!driver_data || !driver_data->verify || !driver_data->init ||
            !(driver_data->setpolicy || driver_data->target_index ||
-                   driver_data->target))
+                   driver_data->target) ||
+            (driver_data->setpolicy && (driver_data->target_index ||
+                   driver_data->target)))
                return -EINVAL;
 
        pr_debug("trying to register driver %s\n", driver_data->name);
@@ -2322,7 +2331,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
                ret = cpufreq_sysfs_create_file(&boost.attr);
                if (ret) {
                        pr_err("%s: cannot register global BOOST sysfs file\n",
-                               __func__);
+                              __func__);
                        goto err_null_driver;
                }
        }
@@ -2345,7 +2354,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
                /* if all ->init() calls failed, unregister */
                if (ret) {
                        pr_debug("no CPU initialized for driver %s\n",
-                                                       driver_data->name);
+                                driver_data->name);
                        goto err_if_unreg;
                }
        }
@@ -2409,7 +2418,6 @@ static int __init cpufreq_core_init(void)
 
        cpufreq_global_kobject = kobject_create();
        BUG_ON(!cpufreq_global_kobject);
-       register_syscore_ops(&cpufreq_syscore_ops);
 
        return 0;
 }
index 79911a2..ecaaebf 100644 (file)
@@ -180,27 +180,25 @@ static void cpufreq_stats_free_table(unsigned int cpu)
        cpufreq_cpu_put(policy);
 }
 
-static int __cpufreq_stats_create_table(struct cpufreq_policy *policy,
-               struct cpufreq_frequency_table *table)
+static int __cpufreq_stats_create_table(struct cpufreq_policy *policy)
 {
        unsigned int i, j, count = 0, ret = 0;
        struct cpufreq_stats *stat;
-       struct cpufreq_policy *current_policy;
        unsigned int alloc_size;
        unsigned int cpu = policy->cpu;
+       struct cpufreq_frequency_table *table;
+
+       table = cpufreq_frequency_get_table(cpu);
+       if (unlikely(!table))
+               return 0;
+
        if (per_cpu(cpufreq_stats_table, cpu))
                return -EBUSY;
        stat = kzalloc(sizeof(*stat), GFP_KERNEL);
        if ((stat) == NULL)
                return -ENOMEM;
 
-       current_policy = cpufreq_cpu_get(cpu);
-       if (current_policy == NULL) {
-               ret = -EINVAL;
-               goto error_get_fail;
-       }
-
-       ret = sysfs_create_group(&current_policy->kobj, &stats_attr_group);
+       ret = sysfs_create_group(&policy->kobj, &stats_attr_group);
        if (ret)
                goto error_out;
 
@@ -223,7 +221,7 @@ static int __cpufreq_stats_create_table(struct cpufreq_policy *policy,
        stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
        if (!stat->time_in_state) {
                ret = -ENOMEM;
-               goto error_out;
+               goto error_alloc;
        }
        stat->freq_table = (unsigned int *)(stat->time_in_state + count);
 
@@ -243,11 +241,10 @@ static int __cpufreq_stats_create_table(struct cpufreq_policy *policy,
        stat->last_time = get_jiffies_64();
        stat->last_index = freq_table_get_index(stat, policy->cur);
        spin_unlock(&cpufreq_stats_lock);
-       cpufreq_cpu_put(current_policy);
        return 0;
+error_alloc:
+       sysfs_remove_group(&policy->kobj, &stats_attr_group);
 error_out:
-       cpufreq_cpu_put(current_policy);
-error_get_fail:
        kfree(stat);
        per_cpu(cpufreq_stats_table, cpu) = NULL;
        return ret;
@@ -256,7 +253,6 @@ error_get_fail:
 static void cpufreq_stats_create_table(unsigned int cpu)
 {
        struct cpufreq_policy *policy;
-       struct cpufreq_frequency_table *table;
 
        /*
         * "likely(!policy)" because normally cpufreq_stats will be registered
@@ -266,9 +262,7 @@ static void cpufreq_stats_create_table(unsigned int cpu)
        if (likely(!policy))
                return;
 
-       table = cpufreq_frequency_get_table(policy->cpu);
-       if (likely(table))
-               __cpufreq_stats_create_table(policy, table);
+       __cpufreq_stats_create_table(policy);
 
        cpufreq_cpu_put(policy);
 }
@@ -291,20 +285,14 @@ static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
 {
        int ret = 0;
        struct cpufreq_policy *policy = data;
-       struct cpufreq_frequency_table *table;
-       unsigned int cpu = policy->cpu;
 
        if (val == CPUFREQ_UPDATE_POLICY_CPU) {
                cpufreq_stats_update_policy_cpu(policy);
                return 0;
        }
 
-       table = cpufreq_frequency_get_table(cpu);
-       if (!table)
-               return 0;
-
        if (val == CPUFREQ_CREATE_POLICY)
-               ret = __cpufreq_stats_create_table(policy, table);
+               ret = __cpufreq_stats_create_table(policy);
        else if (val == CPUFREQ_REMOVE_POLICY)
                __cpufreq_stats_free_table(policy);
 
index 8655904..d457303 100644 (file)
@@ -57,7 +57,6 @@ static struct cpufreq_driver cris_freq_driver = {
        .verify = cpufreq_generic_frequency_table_verify,
        .target_index = cris_freq_target,
        .init   = cris_freq_cpu_init,
-       .exit   = cpufreq_generic_exit,
        .name   = "cris_freq",
        .attr   = cpufreq_generic_attr,
 };
index 26d940d..13c3361 100644 (file)
@@ -57,7 +57,6 @@ static struct cpufreq_driver cris_freq_driver = {
        .verify = cpufreq_generic_frequency_table_verify,
        .target_index = cris_freq_target,
        .init = cris_freq_cpu_init,
-       .exit = cpufreq_generic_exit,
        .name = "cris_freq",
        .attr = cpufreq_generic_attr,
 };
index 2cf3384..28a16dc 100644 (file)
@@ -125,7 +125,6 @@ static struct cpufreq_driver davinci_driver = {
        .target_index   = davinci_target,
        .get            = cpufreq_generic_get,
        .init           = davinci_cpu_init,
-       .exit           = cpufreq_generic_exit,
        .name           = "davinci",
        .attr           = cpufreq_generic_attr,
 };
index 9012b8b..a0d2a42 100644 (file)
@@ -382,7 +382,6 @@ static int eps_cpu_exit(struct cpufreq_policy *policy)
        unsigned int cpu = policy->cpu;
 
        /* Bye */
-       cpufreq_frequency_table_put_attr(policy->cpu);
        kfree(eps_cpu[cpu]);
        eps_cpu[cpu] = NULL;
        return 0;
index de08acf..c987e94 100644 (file)
@@ -198,7 +198,6 @@ static struct cpufreq_driver elanfreq_driver = {
        .verify         = cpufreq_generic_frequency_table_verify,
        .target_index   = elanfreq_target,
        .init           = elanfreq_cpu_init,
-       .exit           = cpufreq_generic_exit,
        .name           = "elanfreq",
        .attr           = cpufreq_generic_attr,
 };
index fcd2914..f99cfe2 100644 (file)
@@ -16,7 +16,6 @@
 #include <linux/slab.h>
 #include <linux/regulator/consumer.h>
 #include <linux/cpufreq.h>
-#include <linux/suspend.h>
 #include <linux/platform_device.h>
 
 #include <plat/cpu.h>
 #include "exynos-cpufreq.h"
 
 static struct exynos_dvfs_info *exynos_info;
-
 static struct regulator *arm_regulator;
-
 static unsigned int locking_frequency;
-static bool frequency_locked;
-static DEFINE_MUTEX(cpufreq_lock);
 
 static int exynos_cpufreq_get_index(unsigned int freq)
 {
@@ -134,83 +129,13 @@ out:
 
 static int exynos_target(struct cpufreq_policy *policy, unsigned int index)
 {
-       struct cpufreq_frequency_table *freq_table = exynos_info->freq_table;
-       int ret = 0;
-
-       mutex_lock(&cpufreq_lock);
-
-       if (frequency_locked)
-               goto out;
-
-       ret = exynos_cpufreq_scale(freq_table[index].frequency);
-
-out:
-       mutex_unlock(&cpufreq_lock);
-
-       return ret;
-}
-
-#ifdef CONFIG_PM
-static int exynos_cpufreq_suspend(struct cpufreq_policy *policy)
-{
-       return 0;
-}
-
-static int exynos_cpufreq_resume(struct cpufreq_policy *policy)
-{
-       return 0;
-}
-#endif
-
-/**
- * exynos_cpufreq_pm_notifier - block CPUFREQ's activities in suspend-resume
- *                     context
- * @notifier
- * @pm_event
- * @v
- *
- * While frequency_locked == true, target() ignores every frequency but
- * locking_frequency. The locking_frequency value is the initial frequency,
- * which is set by the bootloader. In order to eliminate possible
- * inconsistency in clock values, we save and restore frequencies during
- * suspend and resume and block CPUFREQ activities. Note that the standard
- * suspend/resume cannot be used as they are too deep (syscore_ops) for
- * regulator actions.
- */
-static int exynos_cpufreq_pm_notifier(struct notifier_block *notifier,
-                                      unsigned long pm_event, void *v)
-{
-       int ret;
-
-       switch (pm_event) {
-       case PM_SUSPEND_PREPARE:
-               mutex_lock(&cpufreq_lock);
-               frequency_locked = true;
-               mutex_unlock(&cpufreq_lock);
-
-               ret = exynos_cpufreq_scale(locking_frequency);
-               if (ret < 0)
-                       return NOTIFY_BAD;
-
-               break;
-
-       case PM_POST_SUSPEND:
-               mutex_lock(&cpufreq_lock);
-               frequency_locked = false;
-               mutex_unlock(&cpufreq_lock);
-               break;
-       }
-
-       return NOTIFY_OK;
+       return exynos_cpufreq_scale(exynos_info->freq_table[index].frequency);
 }
 
-static struct notifier_block exynos_cpufreq_nb = {
-       .notifier_call = exynos_cpufreq_pm_notifier,
-};
-
 static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
 {
        policy->clk = exynos_info->cpu_clk;
+       policy->suspend_freq = locking_frequency;
        return cpufreq_generic_init(policy, exynos_info->freq_table, 100000);
 }
 
@@ -220,15 +145,13 @@ static struct cpufreq_driver exynos_driver = {
        .target_index   = exynos_target,
        .get            = cpufreq_generic_get,
        .init           = exynos_cpufreq_cpu_init,
-       .exit           = cpufreq_generic_exit,
        .name           = "exynos_cpufreq",
        .attr           = cpufreq_generic_attr,
 #ifdef CONFIG_ARM_EXYNOS_CPU_FREQ_BOOST_SW
        .boost_supported = true,
 #endif
 #ifdef CONFIG_PM
-       .suspend        = exynos_cpufreq_suspend,
-       .resume         = exynos_cpufreq_resume,
+       .suspend        = cpufreq_generic_suspend,
 #endif
 };
 
@@ -263,19 +186,13 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
                goto err_vdd_arm;
        }
 
+       /* Done here as we want to capture boot frequency */
        locking_frequency = clk_get_rate(exynos_info->cpu_clk) / 1000;
 
-       register_pm_notifier(&exynos_cpufreq_nb);
-
-       if (cpufreq_register_driver(&exynos_driver)) {
-               pr_err("%s: failed to register cpufreq driver\n", __func__);
-               goto err_cpufreq;
-       }
-
-       return 0;
-err_cpufreq:
-       unregister_pm_notifier(&exynos_cpufreq_nb);
+       if (!cpufreq_register_driver(&exynos_driver))
+               return 0;
 
+       pr_err("%s: failed to register cpufreq driver\n", __func__);
        regulator_put(arm_regulator);
 err_vdd_arm:
        kfree(exynos_info);
index 49b7560..7f776aa 100644 (file)
@@ -312,7 +312,6 @@ static struct cpufreq_driver exynos_driver = {
        .target_index   = exynos_target,
        .get            = cpufreq_generic_get,
        .init           = exynos_cpufreq_cpu_init,
-       .exit           = cpufreq_generic_exit,
        .name           = CPUFREQ_NAME,
        .attr           = cpufreq_generic_attr,
 };
index 8e54f97..65a4770 100644 (file)
@@ -91,8 +91,8 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
 EXPORT_SYMBOL_GPL(cpufreq_frequency_table_verify);
 
 /*
- * Generic routine to verify policy & frequency table, requires driver to call
- * cpufreq_frequency_table_get_attr() prior to it.
+ * Generic routine to verify policy & frequency table, requires driver to set
+ * policy->freq_table prior to it.
  */
 int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy)
 {
@@ -203,8 +203,6 @@ int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
 }
 EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_index);
 
-static DEFINE_PER_CPU(struct cpufreq_frequency_table *, cpufreq_show_table);
-
 /**
  * show_available_freqs - show available frequencies for the specified CPU
  */
@@ -212,15 +210,12 @@ static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf,
                                    bool show_boost)
 {
        unsigned int i = 0;
-       unsigned int cpu = policy->cpu;
        ssize_t count = 0;
-       struct cpufreq_frequency_table *table;
+       struct cpufreq_frequency_table *table = policy->freq_table;
 
-       if (!per_cpu(cpufreq_show_table, cpu))
+       if (!table)
                return -ENODEV;
 
-       table = per_cpu(cpufreq_show_table, cpu);
-
        for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
                if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
                        continue;
@@ -283,49 +278,24 @@ struct freq_attr *cpufreq_generic_attr[] = {
 };
 EXPORT_SYMBOL_GPL(cpufreq_generic_attr);
 
-/*
- * if you use these, you must assure that the frequency table is valid
- * all the time between get_attr and put_attr!
- */
-void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table,
-                                     unsigned int cpu)
-{
-       pr_debug("setting show_table for cpu %u to %p\n", cpu, table);
-       per_cpu(cpufreq_show_table, cpu) = table;
-}
-EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_attr);
-
-void cpufreq_frequency_table_put_attr(unsigned int cpu)
-{
-       pr_debug("clearing show_table for cpu %u\n", cpu);
-       per_cpu(cpufreq_show_table, cpu) = NULL;
-}
-EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr);
-
 int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
                                      struct cpufreq_frequency_table *table)
 {
        int ret = cpufreq_frequency_table_cpuinfo(policy, table);
 
        if (!ret)
-               cpufreq_frequency_table_get_attr(table, policy->cpu);
+               policy->freq_table = table;
 
        return ret;
 }
 EXPORT_SYMBOL_GPL(cpufreq_table_validate_and_show);
 
-void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy)
-{
-       pr_debug("Updating show_table for new_cpu %u from last_cpu %u\n",
-                       policy->cpu, policy->last_cpu);
-       per_cpu(cpufreq_show_table, policy->cpu) = per_cpu(cpufreq_show_table,
-                       policy->last_cpu);
-       per_cpu(cpufreq_show_table, policy->last_cpu) = NULL;
-}
+struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu);
 
 struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
 {
-       return per_cpu(cpufreq_show_table, cpu);
+       struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
+       return policy ? policy->freq_table : NULL;
 }
 EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table);
 
index 53c6ac6..a22b5d1 100644 (file)
@@ -332,7 +332,6 @@ acpi_cpufreq_cpu_exit (
        pr_debug("acpi_cpufreq_cpu_exit\n");
 
        if (data) {
-               cpufreq_frequency_table_put_attr(policy->cpu);
                acpi_io_data[policy->cpu] = NULL;
                acpi_processor_unregister_performance(&data->acpi_data,
                                                      policy->cpu);
index ce69059..e27fca8 100644 (file)
@@ -144,7 +144,6 @@ static struct cpufreq_driver imx6q_cpufreq_driver = {
        .target_index = imx6q_set_target,
        .get = cpufreq_generic_get,
        .init = imx6q_cpufreq_init,
-       .exit = cpufreq_generic_exit,
        .name = "imx6q-cpufreq",
        .attr = cpufreq_generic_attr,
 };
index 2cd36b9..bcb9a6d 100644 (file)
@@ -99,8 +99,7 @@ struct cpudata {
        u64     prev_aperf;
        u64     prev_mperf;
        unsigned long long prev_tsc;
-       int     sample_ptr;
-       struct sample samples[SAMPLE_COUNT];
+       struct sample sample;
 };
 
 static struct cpudata **all_cpu_data;
@@ -154,7 +153,7 @@ static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
        pid->setpoint = setpoint;
        pid->deadband  = deadband;
        pid->integral  = int_tofp(integral);
-       pid->last_err  = setpoint - busy;
+       pid->last_err  = int_tofp(setpoint) - int_tofp(busy);
 }
 
 static inline void pid_p_gain_set(struct _pid *pid, int percent)
@@ -447,7 +446,7 @@ static void core_set_pstate(struct cpudata *cpudata, int pstate)
        if (limits.no_turbo)
                val |= (u64)1 << 32;
 
-       wrmsrl(MSR_IA32_PERF_CTL, val);
+       wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
 }
 
 static struct cpu_defaults core_params = {
@@ -586,15 +585,14 @@ static inline void intel_pstate_sample(struct cpudata *cpu)
        mperf = mperf >> FRAC_BITS;
        tsc = tsc >> FRAC_BITS;
 
-       cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT;
-       cpu->samples[cpu->sample_ptr].aperf = aperf;
-       cpu->samples[cpu->sample_ptr].mperf = mperf;
-       cpu->samples[cpu->sample_ptr].tsc = tsc;
-       cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf;
-       cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf;
-       cpu->samples[cpu->sample_ptr].tsc -= cpu->prev_tsc;
+       cpu->sample.aperf = aperf;
+       cpu->sample.mperf = mperf;
+       cpu->sample.tsc = tsc;
+       cpu->sample.aperf -= cpu->prev_aperf;
+       cpu->sample.mperf -= cpu->prev_mperf;
+       cpu->sample.tsc -= cpu->prev_tsc;
 
-       intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]);
+       intel_pstate_calc_busy(cpu, &cpu->sample);
 
        cpu->prev_aperf = aperf;
        cpu->prev_mperf = mperf;
@@ -614,7 +612,7 @@ static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
 {
        int32_t core_busy, max_pstate, current_pstate;
 
-       core_busy = cpu->samples[cpu->sample_ptr].core_pct_busy;
+       core_busy = cpu->sample.core_pct_busy;
        max_pstate = int_tofp(cpu->pstate.max_pstate);
        current_pstate = int_tofp(cpu->pstate.current_pstate);
        core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
@@ -648,7 +646,7 @@ static void intel_pstate_timer_func(unsigned long __data)
 
        intel_pstate_sample(cpu);
 
-       sample = &cpu->samples[cpu->sample_ptr];
+       sample = &cpu->sample;
 
        intel_pstate_adjust_busy_pstate(cpu);
 
@@ -729,7 +727,7 @@ static unsigned int intel_pstate_get(unsigned int cpu_num)
        cpu = all_cpu_data[cpu_num];
        if (!cpu)
                return 0;
-       sample = &cpu->samples[cpu->sample_ptr];
+       sample = &cpu->sample;
        return sample->freq;
 }
 
@@ -773,14 +771,17 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
        return 0;
 }
 
-static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
+static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
 {
-       int cpu = policy->cpu;
+       int cpu_num = policy->cpu;
+       struct cpudata *cpu = all_cpu_data[cpu_num];
 
-       del_timer(&all_cpu_data[cpu]->timer);
-       kfree(all_cpu_data[cpu]);
-       all_cpu_data[cpu] = NULL;
-       return 0;
+       pr_info("intel_pstate CPU %d exiting\n", cpu_num);
+
+       del_timer(&all_cpu_data[cpu_num]->timer);
+       intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
+       kfree(all_cpu_data[cpu_num]);
+       all_cpu_data[cpu_num] = NULL;
 }
 
 static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
@@ -818,7 +819,7 @@ static struct cpufreq_driver intel_pstate_driver = {
        .setpolicy      = intel_pstate_set_policy,
        .get            = intel_pstate_get,
        .init           = intel_pstate_cpu_init,
-       .exit           = intel_pstate_cpu_exit,
+       .stop_cpu       = intel_pstate_stop_cpu,
        .name           = "intel_pstate",
 };
 
index eb7abe3..3d114bc 100644 (file)
@@ -102,7 +102,6 @@ static struct cpufreq_driver kirkwood_cpufreq_driver = {
        .verify = cpufreq_generic_frequency_table_verify,
        .target_index = kirkwood_cpufreq_target,
        .init   = kirkwood_cpufreq_cpu_init,
-       .exit   = cpufreq_generic_exit,
        .name   = "kirkwood-cpufreq",
        .attr   = cpufreq_generic_attr,
 };
index 45bafdd..7b94da3 100644 (file)
@@ -913,7 +913,6 @@ static struct cpufreq_driver longhaul_driver = {
        .target_index = longhaul_target,
        .get    = longhaul_get,
        .init   = longhaul_cpu_init,
-       .exit   = cpufreq_generic_exit,
        .name   = "longhaul",
        .attr   = cpufreq_generic_attr,
 };
index b6581ab..a3588d6 100644 (file)
@@ -104,7 +104,6 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
 
 static int loongson2_cpufreq_exit(struct cpufreq_policy *policy)
 {
-       cpufreq_frequency_table_put_attr(policy->cpu);
        clk_put(policy->clk);
        return 0;
 }
index 590f5b6..5f69c9a 100644 (file)
@@ -143,7 +143,6 @@ fail:
 
 static int omap_cpu_exit(struct cpufreq_policy *policy)
 {
-       cpufreq_frequency_table_put_attr(policy->cpu);
        freq_table_free();
        clk_put(policy->clk);
        return 0;
index 3d1cba9..74f593e 100644 (file)
@@ -237,7 +237,6 @@ static struct cpufreq_driver p4clockmod_driver = {
        .verify         = cpufreq_generic_frequency_table_verify,
        .target_index   = cpufreq_p4_target,
        .init           = cpufreq_p4_cpu_init,
-       .exit           = cpufreq_generic_exit,
        .get            = cpufreq_p4_get,
        .name           = "p4-clockmod",
        .attr           = cpufreq_generic_attr,
index 0426008..6a2b7d3 100644 (file)
@@ -234,7 +234,6 @@ static int pas_cpufreq_cpu_exit(struct cpufreq_policy *policy)
        if (sdcpwr_mapbase)
                iounmap(sdcpwr_mapbase);
 
-       cpufreq_frequency_table_put_attr(policy->cpu);
        return 0;
 }
 
index b9a444e..ce27e6c 100644 (file)
@@ -231,7 +231,6 @@ static int powernow_k6_cpu_exit(struct cpufreq_policy *policy)
                if (i == max_multiplier)
                        powernow_k6_target(policy, i);
        }
-       cpufreq_frequency_table_put_attr(policy->cpu);
        return 0;
 }
 
index 946708a..0e68e02 100644 (file)
@@ -664,8 +664,6 @@ static int powernow_cpu_init(struct cpufreq_policy *policy)
 
 static int powernow_cpu_exit(struct cpufreq_policy *policy)
 {
-       cpufreq_frequency_table_put_attr(policy->cpu);
-
 #ifdef CONFIG_X86_POWERNOW_K7_ACPI
        if (acpi_processor_perf) {
                acpi_processor_unregister_performance(acpi_processor_perf, 0);
index 6684e03..27eb2be 100644 (file)
@@ -1164,8 +1164,6 @@ static int powernowk8_cpu_exit(struct cpufreq_policy *pol)
 
        powernow_k8_cpu_exit_acpi(data);
 
-       cpufreq_frequency_table_put_attr(pol->cpu);
-
        kfree(data->powernow_table);
        kfree(data);
        for_each_cpu(cpu, pol->cpus)
index 051000f..3bd9123 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/of.h>
 #include <linux/slab.h>
 #include <linux/smp.h>
+#include <sysdev/fsl_soc.h>
 
 /**
  * struct cpu_data - per CPU data struct
@@ -205,7 +206,8 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
        for_each_cpu(i, per_cpu(cpu_mask, cpu))
                per_cpu(cpu_data, i) = data;
 
-       policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
+       policy->cpuinfo.transition_latency =
+                               (12 * NSEC_PER_SEC) / fsl_get_sys_freq();
        of_node_put(np);
 
        return 0;
@@ -228,7 +230,6 @@ static int __exit corenet_cpufreq_cpu_exit(struct cpufreq_policy *policy)
        struct cpu_data *data = per_cpu(cpu_data, policy->cpu);
        unsigned int cpu;
 
-       cpufreq_frequency_table_put_attr(policy->cpu);
        of_node_put(data->parent);
        kfree(data->table);
        kfree(data);
index e42ca9c..af7b1ca 100644 (file)
@@ -141,7 +141,6 @@ static struct cpufreq_driver cbe_cpufreq_driver = {
        .verify         = cpufreq_generic_frequency_table_verify,
        .target_index   = cbe_cpufreq_target,
        .init           = cbe_cpufreq_cpu_init,
-       .exit           = cpufreq_generic_exit,
        .name           = "cbe-cpufreq",
        .flags          = CPUFREQ_CONST_LOOPS,
 };
index a9195a8..e24269a 100644 (file)
@@ -427,7 +427,6 @@ static struct cpufreq_driver pxa_cpufreq_driver = {
        .verify = cpufreq_generic_frequency_table_verify,
        .target_index = pxa_set_target,
        .init   = pxa_cpufreq_init,
-       .exit   = cpufreq_generic_exit,
        .get    = pxa_cpufreq_get,
        .name   = "PXA2xx",
 };
index 3785687..a012759 100644 (file)
@@ -205,7 +205,6 @@ static struct cpufreq_driver pxa3xx_cpufreq_driver = {
        .verify         = cpufreq_generic_frequency_table_verify,
        .target_index   = pxa3xx_cpufreq_set,
        .init           = pxa3xx_cpufreq_init,
-       .exit           = cpufreq_generic_exit,
        .get            = pxa3xx_cpufreq_get,
        .name           = "pxa3xx-cpufreq",
 };
index 55a8e9f..7242153 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/cpufreq.h>
 #include <linux/reboot.h>
 #include <linux/regulator/consumer.h>
-#include <linux/suspend.h>
 
 #include <mach/map.h>
 #include <mach/regs-clock.h>
@@ -435,18 +434,6 @@ exit:
        return ret;
 }
 
-#ifdef CONFIG_PM
-static int s5pv210_cpufreq_suspend(struct cpufreq_policy *policy)
-{
-       return 0;
-}
-
-static int s5pv210_cpufreq_resume(struct cpufreq_policy *policy)
-{
-       return 0;
-}
-#endif
-
 static int check_mem_type(void __iomem *dmc_reg)
 {
        unsigned long val;
@@ -502,6 +489,7 @@ static int __init s5pv210_cpu_init(struct cpufreq_policy *policy)
        s5pv210_dram_conf[1].refresh = (__raw_readl(S5P_VA_DMC1 + 0x30) * 1000);
        s5pv210_dram_conf[1].freq = clk_get_rate(dmc1_clk);
 
+       policy->suspend_freq = SLEEP_FREQ;
        return cpufreq_generic_init(policy, s5pv210_freq_table, 40000);
 
 out_dmc1:
@@ -511,32 +499,6 @@ out_dmc0:
        return ret;
 }
 
-static int s5pv210_cpufreq_notifier_event(struct notifier_block *this,
-                                         unsigned long event, void *ptr)
-{
-       int ret;
-
-       switch (event) {
-       case PM_SUSPEND_PREPARE:
-               ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, 0);
-               if (ret < 0)
-                       return NOTIFY_BAD;
-
-               /* Disable updation of cpu frequency */
-               no_cpufreq_access = true;
-               return NOTIFY_OK;
-       case PM_POST_RESTORE:
-       case PM_POST_SUSPEND:
-               /* Enable updation of cpu frequency */
-               no_cpufreq_access = false;
-               cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, 0);
-
-               return NOTIFY_OK;
-       }
-
-       return NOTIFY_DONE;
-}
-
 static int s5pv210_cpufreq_reboot_notifier_event(struct notifier_block *this,
                                                 unsigned long event, void *ptr)
 {
@@ -558,15 +520,11 @@ static struct cpufreq_driver s5pv210_driver = {
        .init           = s5pv210_cpu_init,
        .name           = "s5pv210",
 #ifdef CONFIG_PM
-       .suspend        = s5pv210_cpufreq_suspend,
-       .resume         = s5pv210_cpufreq_resume,
+       .suspend        = cpufreq_generic_suspend,
+       .resume         = cpufreq_generic_suspend, /* We need to set SLEEP FREQ again */
 #endif
 };
 
-static struct notifier_block s5pv210_cpufreq_notifier = {
-       .notifier_call = s5pv210_cpufreq_notifier_event,
-};
-
 static struct notifier_block s5pv210_cpufreq_reboot_notifier = {
        .notifier_call = s5pv210_cpufreq_reboot_notifier_event,
 };
@@ -586,7 +544,6 @@ static int __init s5pv210_cpufreq_init(void)
                return PTR_ERR(int_regulator);
        }
 
-       register_pm_notifier(&s5pv210_cpufreq_notifier);
        register_reboot_notifier(&s5pv210_cpufreq_reboot_notifier);
 
        return cpufreq_register_driver(&s5pv210_driver);
index 6adb354..69371bf 100644 (file)
@@ -93,7 +93,6 @@ static struct cpufreq_driver sc520_freq_driver = {
        .verify = cpufreq_generic_frequency_table_verify,
        .target_index = sc520_freq_target,
        .init   = sc520_freq_cpu_init,
-       .exit   = cpufreq_generic_exit,
        .name   = "sc520_freq",
        .attr   = cpufreq_generic_attr,
 };
index 387af12..696170e 100644 (file)
@@ -143,7 +143,6 @@ static int sh_cpufreq_cpu_exit(struct cpufreq_policy *policy)
        unsigned int cpu = policy->cpu;
        struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
 
-       cpufreq_frequency_table_put_attr(cpu);
        clk_put(cpuclk);
 
        return 0;
index 62aa23e..b73feeb 100644 (file)
@@ -301,10 +301,8 @@ static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy)
 
 static int us2e_freq_cpu_exit(struct cpufreq_policy *policy)
 {
-       if (cpufreq_us2e_driver) {
-               cpufreq_frequency_table_put_attr(policy->cpu);
+       if (cpufreq_us2e_driver)
                us2e_freq_target(policy, 0);
-       }
 
        return 0;
 }
index 724ffbd..9bb42ba 100644 (file)
@@ -156,10 +156,8 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
 
 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
 {
-       if (cpufreq_us3_driver) {
-               cpufreq_frequency_table_put_attr(policy->cpu);
+       if (cpufreq_us3_driver)
                us3_freq_target(policy, 0);
-       }
 
        return 0;
 }
index 5c86e3f..4cfdcff 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/of_device.h>
+#include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/types.h>
 
@@ -163,11 +164,10 @@ static struct cpufreq_driver spear_cpufreq_driver = {
        .target_index   = spear_cpufreq_target,
        .get            = cpufreq_generic_get,
        .init           = spear_cpufreq_init,
-       .exit           = cpufreq_generic_exit,
        .attr           = cpufreq_generic_attr,
 };
 
-static int spear_cpufreq_driver_init(void)
+static int spear_cpufreq_probe(struct platform_device *pdev)
 {
        struct device_node *np;
        const struct property *prop;
@@ -235,7 +235,15 @@ out_put_node:
        of_node_put(np);
        return ret;
 }
-late_initcall(spear_cpufreq_driver_init);
+
+static struct platform_driver spear_cpufreq_platdrv = {
+       .driver = {
+               .name   = "spear-cpufreq",
+               .owner  = THIS_MODULE,
+       },
+       .probe          = spear_cpufreq_probe,
+};
+module_platform_driver(spear_cpufreq_platdrv);
 
 MODULE_AUTHOR("Deepak Sikri <deepak.sikri@st.com>");
 MODULE_DESCRIPTION("SPEAr CPUFreq driver");
index 4e1daca..6723f03 100644 (file)
@@ -406,8 +406,6 @@ static int centrino_cpu_exit(struct cpufreq_policy *policy)
        if (!per_cpu(centrino_model, cpu))
                return -ENODEV;
 
-       cpufreq_frequency_table_put_attr(cpu);
-
        per_cpu(centrino_model, cpu) = NULL;
 
        return 0;
index 7639b2b..394ac15 100644 (file)
@@ -311,7 +311,6 @@ static struct cpufreq_driver speedstep_driver = {
        .verify = cpufreq_generic_frequency_table_verify,
        .target_index = speedstep_target,
        .init   = speedstep_cpu_init,
-       .exit   = cpufreq_generic_exit,
        .get    = speedstep_get,
        .attr   = cpufreq_generic_attr,
 };
index 998c17b..db5d274 100644 (file)
@@ -280,7 +280,6 @@ static struct cpufreq_driver speedstep_driver = {
        .verify         = cpufreq_generic_frequency_table_verify,
        .target_index   = speedstep_target,
        .init           = speedstep_cpu_init,
-       .exit           = cpufreq_generic_exit,
        .get            = speedstep_get,
        .resume         = speedstep_resume,
        .attr           = cpufreq_generic_attr,
index e652c1b..63f0059 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/err.h>
 #include <linux/clk.h>
 #include <linux/io.h>
-#include <linux/suspend.h>
 
 static struct cpufreq_frequency_table freq_table[] = {
        { .frequency = 216000 },
@@ -47,9 +46,6 @@ static struct clk *pll_x_clk;
 static struct clk *pll_p_clk;
 static struct clk *emc_clk;
 
-static DEFINE_MUTEX(tegra_cpu_lock);
-static bool is_suspended;
-
 static int tegra_cpu_clk_set_rate(unsigned long rate)
 {
        int ret;
@@ -112,42 +108,9 @@ static int tegra_update_cpu_speed(struct cpufreq_policy *policy,
 
 static int tegra_target(struct cpufreq_policy *policy, unsigned int index)
 {
-       int ret = -EBUSY;
-
-       mutex_lock(&tegra_cpu_lock);
-
-       if (!is_suspended)
-               ret = tegra_update_cpu_speed(policy,
-                               freq_table[index].frequency);
-
-       mutex_unlock(&tegra_cpu_lock);
-       return ret;
+       return tegra_update_cpu_speed(policy, freq_table[index].frequency);
 }
 
-static int tegra_pm_notify(struct notifier_block *nb, unsigned long event,
-       void *dummy)
-{
-       mutex_lock(&tegra_cpu_lock);
-       if (event == PM_SUSPEND_PREPARE) {
-               struct cpufreq_policy *policy = cpufreq_cpu_get(0);
-               is_suspended = true;
-               pr_info("Tegra cpufreq suspend: setting frequency to %d kHz\n",
-                       freq_table[0].frequency);
-               if (clk_get_rate(cpu_clk) / 1000 != freq_table[0].frequency)
-                       tegra_update_cpu_speed(policy, freq_table[0].frequency);
-               cpufreq_cpu_put(policy);
-       } else if (event == PM_POST_SUSPEND) {
-               is_suspended = false;
-       }
-       mutex_unlock(&tegra_cpu_lock);
-
-       return NOTIFY_OK;
-}
-
-static struct notifier_block tegra_cpu_pm_notifier = {
-       .notifier_call = tegra_pm_notify,
-};
-
 static int tegra_cpu_init(struct cpufreq_policy *policy)
 {
        int ret;
@@ -166,16 +129,13 @@ static int tegra_cpu_init(struct cpufreq_policy *policy)
                return ret;
        }
 
-       if (policy->cpu == 0)
-               register_pm_notifier(&tegra_cpu_pm_notifier);
-
        policy->clk = cpu_clk;
+       policy->suspend_freq = freq_table[0].frequency;
        return 0;
 }
 
 static int tegra_cpu_exit(struct cpufreq_policy *policy)
 {
-       cpufreq_frequency_table_put_attr(policy->cpu);
        clk_disable_unprepare(cpu_clk);
        clk_disable_unprepare(emc_clk);
        return 0;
@@ -190,6 +150,9 @@ static struct cpufreq_driver tegra_cpufreq_driver = {
        .exit           = tegra_cpu_exit,
        .name           = "tegra",
        .attr           = cpufreq_generic_attr,
+#ifdef CONFIG_PM
+       .suspend        = cpufreq_generic_suspend,
+#endif
 };
 
 static int __init tegra_cpufreq_init(void)
index 09d05ab..cb20fd9 100644 (file)
@@ -85,7 +85,8 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
 
        time_end = ktime_get();
 
-       local_irq_enable();
+       if (!cpuidle_state_is_coupled(dev, drv, entered_state))
+               local_irq_enable();
 
        diff = ktime_to_us(ktime_sub(time_end, time_start));
        if (diff > INT_MAX)
index 06dbe7c..136d6a2 100644 (file)
@@ -209,7 +209,7 @@ static void poll_idle_init(struct cpuidle_driver *drv)
        state->exit_latency = 0;
        state->target_residency = 0;
        state->power_usage = -1;
-       state->flags = 0;
+       state->flags = CPUIDLE_FLAG_TIME_VALID;
        state->enter = poll_idle;
        state->disabled = false;
 }
index cf7f2f0..71b5232 100644 (file)
@@ -122,9 +122,8 @@ struct menu_device {
        int             last_state_idx;
        int             needs_update;
 
-       unsigned int    expected_us;
+       unsigned int    next_timer_us;
        unsigned int    predicted_us;
-       unsigned int    exit_us;
        unsigned int    bucket;
        unsigned int    correction_factor[BUCKETS];
        unsigned int    intervals[INTERVALS];
@@ -257,7 +256,7 @@ again:
                stddev = int_sqrt(stddev);
                if (((avg > stddev * 6) && (divisor * 4 >= INTERVALS * 3))
                                                        || stddev <= 20) {
-                       if (data->expected_us > avg)
+                       if (data->next_timer_us > avg)
                                data->predicted_us = avg;
                        return;
                }
@@ -289,7 +288,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
        struct menu_device *data = &__get_cpu_var(menu_devices);
        int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
        int i;
-       int multiplier;
+       unsigned int interactivity_req;
        struct timespec t;
 
        if (data->needs_update) {
@@ -298,7 +297,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
        }
 
        data->last_state_idx = 0;
-       data->exit_us = 0;
 
        /* Special case when user has set very strict latency requirement */
        if (unlikely(latency_req == 0))
@@ -306,13 +304,11 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
 
        /* determine the expected residency time, round up */
        t = ktime_to_timespec(tick_nohz_get_sleep_length());
-       data->expected_us =
+       data->next_timer_us =
                t.tv_sec * USEC_PER_SEC + t.tv_nsec / NSEC_PER_USEC;
 
 
-       data->bucket = which_bucket(data->expected_us);
-
-       multiplier = performance_multiplier();
+       data->bucket = which_bucket(data->next_timer_us);
 
        /*
         * if the correction factor is 0 (eg first time init or cpu hotplug
@@ -326,17 +322,26 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
         * operands are 32 bits.
         * Make sure to round up for half microseconds.
         */
-       data->predicted_us = div_round64((uint64_t)data->expected_us *
+       data->predicted_us = div_round64((uint64_t)data->next_timer_us *
                                         data->correction_factor[data->bucket],
                                         RESOLUTION * DECAY);
 
        get_typical_interval(data);
 
+       /*
+        * Performance multiplier defines a minimum predicted idle
+        * duration / latency ratio. Adjust the latency limit if
+        * necessary.
+        */
+       interactivity_req = data->predicted_us / performance_multiplier();
+       if (latency_req > interactivity_req)
+               latency_req = interactivity_req;
+
        /*
         * We want to default to C1 (hlt), not to busy polling
         * unless the timer is happening really really soon.
         */
-       if (data->expected_us > 5 &&
+       if (data->next_timer_us > 5 &&
            !drv->states[CPUIDLE_DRIVER_STATE_START].disabled &&
                dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable == 0)
                data->last_state_idx = CPUIDLE_DRIVER_STATE_START;
@@ -355,11 +360,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
                        continue;
                if (s->exit_latency > latency_req)
                        continue;
-               if (s->exit_latency * multiplier > data->predicted_us)
-                       continue;
 
                data->last_state_idx = i;
-               data->exit_us = s->exit_latency;
        }
 
        return data->last_state_idx;
@@ -390,36 +392,47 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
 {
        struct menu_device *data = &__get_cpu_var(menu_devices);
        int last_idx = data->last_state_idx;
-       unsigned int last_idle_us = cpuidle_get_last_residency(dev);
        struct cpuidle_state *target = &drv->states[last_idx];
        unsigned int measured_us;
        unsigned int new_factor;
 
        /*
-        * Ugh, this idle state doesn't support residency measurements, so we
-        * are basically lost in the dark.  As a compromise, assume we slept
-        * for the whole expected time.
+        * Try to figure out how much time passed between entry to low
+        * power state and occurrence of the wakeup event.
+        *
+        * If the entered idle state didn't support residency measurements,
+        * we are basically lost in the dark how much time passed.
+        * As a compromise, assume we slept for the whole expected time.
+        *
+        * Any measured amount of time will include the exit latency.
+        * Since we are interested in when the wakeup begun, not when it
+        * was completed, we must substract the exit latency. However, if
+        * the measured amount of time is less than the exit latency,
+        * assume the state was never reached and the exit latency is 0.
         */
-       if (unlikely(!(target->flags & CPUIDLE_FLAG_TIME_VALID)))
-               last_idle_us = data->expected_us;
+       if (unlikely(!(target->flags & CPUIDLE_FLAG_TIME_VALID))) {
+               /* Use timer value as is */
+               measured_us = data->next_timer_us;
 
+       } else {
+               /* Use measured value */
+               measured_us = cpuidle_get_last_residency(dev);
 
-       measured_us = last_idle_us;
-
-       /*
-        * We correct for the exit latency; we are assuming here that the
-        * exit latency happens after the event that we're interested in.
-        */
-       if (measured_us > data->exit_us)
-               measured_us -= data->exit_us;
+               /* Deduct exit latency */
+               if (measured_us > target->exit_latency)
+                       measured_us -= target->exit_latency;
 
+               /* Make sure our coefficients do not exceed unity */
+               if (measured_us > data->next_timer_us)
+                       measured_us = data->next_timer_us;
+       }
 
        /* Update our correction ratio */
        new_factor = data->correction_factor[data->bucket];
        new_factor -= new_factor / DECAY;
 
-       if (data->expected_us > 0 && measured_us < MAX_INTERESTING)
-               new_factor += RESOLUTION * measured_us / data->expected_us;
+       if (data->next_timer_us > 0 && measured_us < MAX_INTERESTING)
+               new_factor += RESOLUTION * measured_us / data->next_timer_us;
        else
                /*
                 * we were idle so long that we count it as a perfect
@@ -439,7 +452,7 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
        data->correction_factor[data->bucket] = new_factor;
 
        /* update the repeating-pattern data */
-       data->intervals[data->interval_ptr++] = last_idle_us;
+       data->intervals[data->interval_ptr++] = measured_us;
        if (data->interval_ptr >= INTERVALS)
                data->interval_ptr = 0;
 }
index a0b2f7e..2042ec3 100644 (file)
@@ -91,26 +91,35 @@ static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq)
  */
 static int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
 {
-       int lev, prev_lev;
+       int lev, prev_lev, ret = 0;
        unsigned long cur_time;
 
-       lev = devfreq_get_freq_level(devfreq, freq);
-       if (lev < 0)
-               return lev;
-
        cur_time = jiffies;
-       devfreq->time_in_state[lev] +=
+
+       prev_lev = devfreq_get_freq_level(devfreq, devfreq->previous_freq);
+       if (prev_lev < 0) {
+               ret = prev_lev;
+               goto out;
+       }
+
+       devfreq->time_in_state[prev_lev] +=
                         cur_time - devfreq->last_stat_updated;
-       if (freq != devfreq->previous_freq) {
-               prev_lev = devfreq_get_freq_level(devfreq,
-                                               devfreq->previous_freq);
+
+       lev = devfreq_get_freq_level(devfreq, freq);
+       if (lev < 0) {
+               ret = lev;
+               goto out;
+       }
+
+       if (lev != prev_lev) {
                devfreq->trans_table[(prev_lev *
                                devfreq->profile->max_state) + lev]++;
                devfreq->total_trans++;
        }
-       devfreq->last_stat_updated = cur_time;
 
-       return 0;
+out:
+       devfreq->last_stat_updated = cur_time;
+       return ret;
 }
 
 /**
index 508cf99..17f928e 100644 (file)
@@ -10,7 +10,6 @@ config DRM_GMA500
        # GMA500 depends on ACPI_VIDEO when ACPI is enabled, just like i915
        select ACPI_VIDEO if ACPI
        select BACKLIGHT_CLASS_DEVICE if ACPI
-       select VIDEO_OUTPUT_CONTROL if ACPI
        select INPUT if ACPI
        help
          Say yes for an experimental 2D KMS framebuffer driver for the
index 73ed59e..bea2d67 100644 (file)
@@ -14,7 +14,6 @@ config DRM_I915
        # but for select to work, need to select ACPI_VIDEO's dependencies, ick
        select BACKLIGHT_LCD_SUPPORT if ACPI
        select BACKLIGHT_CLASS_DEVICE if ACPI
-       select VIDEO_OUTPUT_CONTROL if ACPI
        select INPUT if ACPI
        select ACPI_VIDEO if ACPI
        select ACPI_BUTTON if ACPI
index 7cf787d..637c29a 100644 (file)
@@ -11,7 +11,7 @@ config DRM_NOUVEAU
        select FB
        select FRAMEBUFFER_CONSOLE if !EXPERT
        select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
-       select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL && INPUT
+       select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && INPUT
        select X86_PLATFORM_DEVICES if ACPI && X86
        select ACPI_WMI if ACPI && X86
        select MXM_WMI if ACPI && X86
@@ -19,7 +19,6 @@ config DRM_NOUVEAU
        # Similar to i915, we need to select ACPI_VIDEO and it's dependencies
        select BACKLIGHT_LCD_SUPPORT if ACPI && X86
        select BACKLIGHT_CLASS_DEVICE if ACPI && X86
-       select VIDEO_OUTPUT_CONTROL if ACPI && X86
        select INPUT if ACPI && X86
        select THERMAL if ACPI && X86
        select ACPI_VIDEO if ACPI && X86
index 5c342b3..3c0f57e 100644 (file)
@@ -134,7 +134,8 @@ static irqreturn_t st1232_ts_irq_handler(int irq, void *dev_id)
        } else if (!ts->low_latency_req.dev) {
                /* First contact, request 100 us latency. */
                dev_pm_qos_add_ancestor_request(&ts->client->dev,
-                                               &ts->low_latency_req, 100);
+                                               &ts->low_latency_req,
+                                               DEV_PM_QOS_RESUME_LATENCY, 100);
        }
 
        /* SYN_REPORT */
index d72783d..c067023 100644 (file)
@@ -897,7 +897,7 @@ static void flctl_select_chip(struct mtd_info *mtd, int chipnr)
                if (!flctl->qos_request) {
                        ret = dev_pm_qos_add_request(&flctl->pdev->dev,
                                                        &flctl->pm_qos,
-                                                       DEV_PM_QOS_LATENCY,
+                                                       DEV_PM_QOS_RESUME_LATENCY,
                                                        100);
                        if (ret < 0)
                                dev_err(&flctl->pdev->dev,
index b6162be..2b85924 100644 (file)
@@ -93,7 +93,6 @@ struct acpiphp_slot {
        struct list_head funcs;         /* one slot may have different
                                           objects (i.e. for each function) */
        struct slot *slot;
-       struct mutex crit_sect;
 
        u8              device;         /* pci device# */
        u32             flags;          /* see below */
@@ -117,20 +116,30 @@ struct acpiphp_func {
 };
 
 struct acpiphp_context {
-       acpi_handle handle;
+       struct acpi_hotplug_context hp;
        struct acpiphp_func func;
        struct acpiphp_bridge *bridge;
        unsigned int refcount;
 };
 
+static inline struct acpiphp_context *to_acpiphp_context(struct acpi_hotplug_context *hp)
+{
+       return container_of(hp, struct acpiphp_context, hp);
+}
+
 static inline struct acpiphp_context *func_to_context(struct acpiphp_func *func)
 {
        return container_of(func, struct acpiphp_context, func);
 }
 
+static inline struct acpi_device *func_to_acpi_device(struct acpiphp_func *func)
+{
+       return func_to_context(func)->hp.self;
+}
+
 static inline acpi_handle func_to_handle(struct acpiphp_func *func)
 {
-       return func_to_context(func)->handle;
+       return func_to_acpi_device(func)->handle;
 }
 
 /*
@@ -158,7 +167,6 @@ struct acpiphp_attention_info
 
 #define FUNC_HAS_STA           (0x00000001)
 #define FUNC_HAS_EJ0           (0x00000002)
-#define FUNC_HAS_DCK            (0x00000004)
 
 /* function prototypes */
 
index 7c7a388..828acf4 100644 (file)
 
 static LIST_HEAD(bridge_list);
 static DEFINE_MUTEX(bridge_mutex);
-static DEFINE_MUTEX(acpiphp_context_lock);
 
-static void handle_hotplug_event(acpi_handle handle, u32 type, void *data);
+static int acpiphp_hotplug_notify(struct acpi_device *adev, u32 type);
+static void acpiphp_post_dock_fixup(struct acpi_device *adev);
 static void acpiphp_sanitize_bus(struct pci_bus *bus);
 static void acpiphp_set_hpp_values(struct pci_bus *bus);
-static void hotplug_event(acpi_handle handle, u32 type, void *data);
+static void hotplug_event(u32 type, struct acpiphp_context *context);
 static void free_bridge(struct kref *kref);
 
-static void acpiphp_context_handler(acpi_handle handle, void *context)
-{
-       /* Intentionally empty. */
-}
-
 /**
  * acpiphp_init_context - Create hotplug context and grab a reference to it.
- * @handle: ACPI object handle to create the context for.
+ * @adev: ACPI device object to create the context for.
  *
- * Call under acpiphp_context_lock.
+ * Call under acpi_hp_context_lock.
  */
-static struct acpiphp_context *acpiphp_init_context(acpi_handle handle)
+static struct acpiphp_context *acpiphp_init_context(struct acpi_device *adev)
 {
        struct acpiphp_context *context;
-       acpi_status status;
 
        context = kzalloc(sizeof(*context), GFP_KERNEL);
        if (!context)
                return NULL;
 
-       context->handle = handle;
        context->refcount = 1;
-       status = acpi_attach_data(handle, acpiphp_context_handler, context);
-       if (ACPI_FAILURE(status)) {
-               kfree(context);
-               return NULL;
-       }
+       acpi_set_hp_context(adev, &context->hp, acpiphp_hotplug_notify, NULL,
+                           acpiphp_post_dock_fixup);
        return context;
 }
 
 /**
  * acpiphp_get_context - Get hotplug context and grab a reference to it.
- * @handle: ACPI object handle to get the context for.
+ * @adev: ACPI device object to get the context for.
  *
- * Call under acpiphp_context_lock.
+ * Call under acpi_hp_context_lock.
  */
-static struct acpiphp_context *acpiphp_get_context(acpi_handle handle)
+static struct acpiphp_context *acpiphp_get_context(struct acpi_device *adev)
 {
-       struct acpiphp_context *context = NULL;
-       acpi_status status;
-       void *data;
+       struct acpiphp_context *context;
 
-       status = acpi_get_data(handle, acpiphp_context_handler, &data);
-       if (ACPI_SUCCESS(status)) {
-               context = data;
-               context->refcount++;
-       }
+       if (!adev->hp)
+               return NULL;
+
+       context = to_acpiphp_context(adev->hp);
+       context->refcount++;
        return context;
 }
 
 /**
  * acpiphp_put_context - Drop a reference to ACPI hotplug context.
- * @handle: ACPI object handle to put the context for.
+ * @context: ACPI hotplug context to drop a reference to.
  *
  * The context object is removed if there are no more references to it.
  *
- * Call under acpiphp_context_lock.
+ * Call under acpi_hp_context_lock.
  */
 static void acpiphp_put_context(struct acpiphp_context *context)
 {
@@ -130,7 +118,7 @@ static void acpiphp_put_context(struct acpiphp_context *context)
                return;
 
        WARN_ON(context->bridge);
-       acpi_detach_data(context->handle, acpiphp_context_handler);
+       context->hp.self->hp = NULL;
        kfree(context);
 }
 
@@ -144,6 +132,27 @@ static inline void put_bridge(struct acpiphp_bridge *bridge)
        kref_put(&bridge->ref, free_bridge);
 }
 
+static struct acpiphp_context *acpiphp_grab_context(struct acpi_device *adev)
+{
+       struct acpiphp_context *context;
+
+       acpi_lock_hp_context();
+       context = acpiphp_get_context(adev);
+       if (!context || context->func.parent->is_going_away) {
+               acpi_unlock_hp_context();
+               return NULL;
+       }
+       get_bridge(context->func.parent);
+       acpiphp_put_context(context);
+       acpi_unlock_hp_context();
+       return context;
+}
+
+static void acpiphp_let_context_go(struct acpiphp_context *context)
+{
+       put_bridge(context->func.parent);
+}
+
 static void free_bridge(struct kref *kref)
 {
        struct acpiphp_context *context;
@@ -151,7 +160,7 @@ static void free_bridge(struct kref *kref)
        struct acpiphp_slot *slot, *next;
        struct acpiphp_func *func, *tmp;
 
-       mutex_lock(&acpiphp_context_lock);
+       acpi_lock_hp_context();
 
        bridge = container_of(kref, struct acpiphp_bridge, ref);
 
@@ -175,31 +184,32 @@ static void free_bridge(struct kref *kref)
        pci_dev_put(bridge->pci_dev);
        kfree(bridge);
 
-       mutex_unlock(&acpiphp_context_lock);
+       acpi_unlock_hp_context();
 }
 
-/*
- * the _DCK method can do funny things... and sometimes not
- * hah-hah funny.
+/**
+ * acpiphp_post_dock_fixup - Post-dock fixups for PCI devices.
+ * @adev: ACPI device object corresponding to a PCI device.
  *
- * TBD - figure out a way to only call fixups for
- * systems that require them.
+ * TBD - figure out a way to only call fixups for systems that require them.
  */
-static void post_dock_fixups(acpi_handle not_used, u32 event, void *data)
+static void acpiphp_post_dock_fixup(struct acpi_device *adev)
 {
-       struct acpiphp_context *context = data;
-       struct pci_bus *bus = context->func.slot->bus;
+       struct acpiphp_context *context = acpiphp_grab_context(adev);
+       struct pci_bus *bus;
        u32 buses;
 
-       if (!bus->self)
+       if (!context)
                return;
 
+       bus = context->func.slot->bus;
+       if (!bus->self)
+               goto out;
+
        /* fixup bad _DCK function that rewrites
         * secondary bridge on slot
         */
-       pci_read_config_dword(bus->self,
-                       PCI_PRIMARY_BUS,
-                       &buses);
+       pci_read_config_dword(bus->self, PCI_PRIMARY_BUS, &buses);
 
        if (((buses >> 8) & 0xff) != bus->busn_res.start) {
                buses = (buses & 0xff000000)
@@ -208,33 +218,11 @@ static void post_dock_fixups(acpi_handle not_used, u32 event, void *data)
                        | ((unsigned int)(bus->busn_res.end) << 16);
                pci_write_config_dword(bus->self, PCI_PRIMARY_BUS, buses);
        }
-}
-
-static void dock_event(acpi_handle handle, u32 type, void *data)
-{
-       struct acpiphp_context *context;
-
-       mutex_lock(&acpiphp_context_lock);
-       context = acpiphp_get_context(handle);
-       if (!context || WARN_ON(context->handle != handle)
-           || context->func.parent->is_going_away) {
-               mutex_unlock(&acpiphp_context_lock);
-               return;
-       }
-       get_bridge(context->func.parent);
-       acpiphp_put_context(context);
-       mutex_unlock(&acpiphp_context_lock);
-
-       hotplug_event(handle, type, data);
 
-       put_bridge(context->func.parent);
+ out:
+       acpiphp_let_context_go(context);
 }
 
-static const struct acpi_dock_ops acpiphp_dock_ops = {
-       .fixup = post_dock_fixups,
-       .handler = dock_event,
-};
-
 /* Check whether the PCI device is managed by native PCIe hotplug driver */
 static bool device_is_managed_by_native_pciehp(struct pci_dev *pdev)
 {
@@ -264,26 +252,19 @@ static bool device_is_managed_by_native_pciehp(struct pci_dev *pdev)
        return true;
 }
 
-static void acpiphp_dock_init(void *data)
-{
-       struct acpiphp_context *context = data;
-
-       get_bridge(context->func.parent);
-}
-
-static void acpiphp_dock_release(void *data)
-{
-       struct acpiphp_context *context = data;
-
-       put_bridge(context->func.parent);
-}
-
-/* callback routine to register each ACPI PCI slot object */
-static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data,
-                                void **rv)
+/**
+ * acpiphp_add_context - Add ACPIPHP context to an ACPI device object.
+ * @handle: ACPI handle of the object to add a context to.
+ * @lvl: Not used.
+ * @data: The object's parent ACPIPHP bridge.
+ * @rv: Not used.
+ */
+static acpi_status acpiphp_add_context(acpi_handle handle, u32 lvl, void *data,
+                                      void **rv)
 {
        struct acpiphp_bridge *bridge = data;
        struct acpiphp_context *context;
+       struct acpi_device *adev;
        struct acpiphp_slot *slot;
        struct acpiphp_func *newfunc;
        acpi_status status = AE_OK;
@@ -293,9 +274,6 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data,
        struct pci_dev *pdev = bridge->pci_dev;
        u32 val;
 
-       if (pdev && device_is_managed_by_native_pciehp(pdev))
-               return AE_OK;
-
        status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
        if (ACPI_FAILURE(status)) {
                if (status != AE_NOT_FOUND)
@@ -303,31 +281,34 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data,
                                "can't evaluate _ADR (%#x)\n", status);
                return AE_OK;
        }
+       if (acpi_bus_get_device(handle, &adev))
+               return AE_OK;
 
        device = (adr >> 16) & 0xffff;
        function = adr & 0xffff;
 
-       mutex_lock(&acpiphp_context_lock);
-       context = acpiphp_init_context(handle);
+       acpi_lock_hp_context();
+       context = acpiphp_init_context(adev);
        if (!context) {
-               mutex_unlock(&acpiphp_context_lock);
+               acpi_unlock_hp_context();
                acpi_handle_err(handle, "No hotplug context\n");
                return AE_NOT_EXIST;
        }
        newfunc = &context->func;
        newfunc->function = function;
        newfunc->parent = bridge;
-       mutex_unlock(&acpiphp_context_lock);
+       acpi_unlock_hp_context();
 
-       if (acpi_has_method(handle, "_EJ0"))
+       /*
+        * If this is a dock device, its _EJ0 should be executed by the dock
+        * notify handler after calling _DCK.
+        */
+       if (!is_dock_device(adev) && acpi_has_method(handle, "_EJ0"))
                newfunc->flags = FUNC_HAS_EJ0;
 
        if (acpi_has_method(handle, "_STA"))
                newfunc->flags |= FUNC_HAS_STA;
 
-       if (acpi_has_method(handle, "_DCK"))
-               newfunc->flags |= FUNC_HAS_DCK;
-
        /* search for objects that share the same slot */
        list_for_each_entry(slot, &bridge->slots, node)
                if (slot->device == device)
@@ -335,19 +316,26 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data,
 
        slot = kzalloc(sizeof(struct acpiphp_slot), GFP_KERNEL);
        if (!slot) {
-               status = AE_NO_MEMORY;
-               goto err;
+               acpi_lock_hp_context();
+               acpiphp_put_context(context);
+               acpi_unlock_hp_context();
+               return AE_NO_MEMORY;
        }
 
        slot->bus = bridge->pci_bus;
        slot->device = device;
        INIT_LIST_HEAD(&slot->funcs);
-       mutex_init(&slot->crit_sect);
 
        list_add_tail(&slot->node, &bridge->slots);
 
-       /* Register slots for ejectable functions only. */
-       if (acpi_pci_check_ejectable(pbus, handle)  || is_dock_device(handle)) {
+       /*
+        * Expose slots to user space for functions that have _EJ0 or _RMV or
+        * are located in dock stations.  Do not expose them for devices handled
+        * by the native PCIe hotplug (PCIeHP), becuase that code is supposed to
+        * expose slots to user space in those cases.
+        */
+       if ((acpi_pci_check_ejectable(pbus, handle) || is_dock_device(adev))
+           && !(pdev && device_is_managed_by_native_pciehp(pdev))) {
                unsigned long long sun;
                int retval;
 
@@ -381,44 +369,16 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data,
                                       &val, 60*1000))
                slot->flags |= SLOT_ENABLED;
 
-       if (is_dock_device(handle)) {
-               /* we don't want to call this device's _EJ0
-                * because we want the dock notify handler
-                * to call it after it calls _DCK
-                */
-               newfunc->flags &= ~FUNC_HAS_EJ0;
-               if (register_hotplug_dock_device(handle,
-                       &acpiphp_dock_ops, context,
-                       acpiphp_dock_init, acpiphp_dock_release))
-                       pr_debug("failed to register dock device\n");
-       }
-
-       /* install notify handler */
-       if (!(newfunc->flags & FUNC_HAS_DCK)) {
-               status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
-                                                    handle_hotplug_event,
-                                                    context);
-               if (ACPI_FAILURE(status))
-                       acpi_handle_err(handle,
-                                       "failed to install notify handler\n");
-       }
-
        return AE_OK;
-
- err:
-       mutex_lock(&acpiphp_context_lock);
-       acpiphp_put_context(context);
-       mutex_unlock(&acpiphp_context_lock);
-       return status;
 }
 
-static struct acpiphp_bridge *acpiphp_handle_to_bridge(acpi_handle handle)
+static struct acpiphp_bridge *acpiphp_dev_to_bridge(struct acpi_device *adev)
 {
        struct acpiphp_context *context;
        struct acpiphp_bridge *bridge = NULL;
 
-       mutex_lock(&acpiphp_context_lock);
-       context = acpiphp_get_context(handle);
+       acpi_lock_hp_context();
+       context = acpiphp_get_context(adev);
        if (context) {
                bridge = context->bridge;
                if (bridge)
@@ -426,7 +386,7 @@ static struct acpiphp_bridge *acpiphp_handle_to_bridge(acpi_handle handle)
 
                acpiphp_put_context(context);
        }
-       mutex_unlock(&acpiphp_context_lock);
+       acpi_unlock_hp_context();
        return bridge;
 }
 
@@ -434,22 +394,15 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge)
 {
        struct acpiphp_slot *slot;
        struct acpiphp_func *func;
-       acpi_status status;
 
        list_for_each_entry(slot, &bridge->slots, node) {
                list_for_each_entry(func, &slot->funcs, sibling) {
-                       acpi_handle handle = func_to_handle(func);
-
-                       if (is_dock_device(handle))
-                               unregister_hotplug_dock_device(handle);
+                       struct acpi_device *adev = func_to_acpi_device(func);
 
-                       if (!(func->flags & FUNC_HAS_DCK)) {
-                               status = acpi_remove_notify_handler(handle,
-                                                       ACPI_SYSTEM_NOTIFY,
-                                                       handle_hotplug_event);
-                               if (ACPI_FAILURE(status))
-                                       pr_err("failed to remove notify handler\n");
-                       }
+                       acpi_lock_hp_context();
+                       adev->hp->notify = NULL;
+                       adev->hp->fixup = NULL;
+                       acpi_unlock_hp_context();
                }
                slot->flags |= SLOT_IS_GOING_AWAY;
                if (slot->slot)
@@ -460,9 +413,9 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge)
        list_del(&bridge->list);
        mutex_unlock(&bridge_mutex);
 
-       mutex_lock(&acpiphp_context_lock);
+       acpi_lock_hp_context();
        bridge->is_going_away = true;
-       mutex_unlock(&acpiphp_context_lock);
+       acpi_unlock_hp_context();
 }
 
 /**
@@ -492,33 +445,6 @@ static unsigned char acpiphp_max_busnr(struct pci_bus *bus)
        return max;
 }
 
-/**
- * acpiphp_bus_trim - Trim device objects in an ACPI namespace subtree.
- * @handle: ACPI device object handle to start from.
- */
-static void acpiphp_bus_trim(acpi_handle handle)
-{
-       struct acpi_device *adev = NULL;
-
-       acpi_bus_get_device(handle, &adev);
-       if (adev)
-               acpi_bus_trim(adev);
-}
-
-/**
- * acpiphp_bus_add - Scan ACPI namespace subtree.
- * @handle: ACPI object handle to start the scan from.
- */
-static void acpiphp_bus_add(acpi_handle handle)
-{
-       struct acpi_device *adev = NULL;
-
-       acpi_bus_scan(handle);
-       acpi_bus_get_device(handle, &adev);
-       if (acpi_device_enumerated(adev))
-               acpi_device_set_power(adev, ACPI_STATE_D0);
-}
-
 static void acpiphp_set_acpi_region(struct acpiphp_slot *slot)
 {
        struct acpiphp_func *func;
@@ -558,9 +484,13 @@ static int acpiphp_rescan_slot(struct acpiphp_slot *slot)
 {
        struct acpiphp_func *func;
 
-       list_for_each_entry(func, &slot->funcs, sibling)
-               acpiphp_bus_add(func_to_handle(func));
+       list_for_each_entry(func, &slot->funcs, sibling) {
+               struct acpi_device *adev = func_to_acpi_device(func);
 
+               acpi_bus_scan(adev->handle);
+               if (acpi_device_enumerated(adev))
+                       acpi_device_set_power(adev, ACPI_STATE_D0);
+       }
        return pci_scan_slot(slot->bus, PCI_DEVFN(slot->device, 0));
 }
 
@@ -625,32 +555,15 @@ static void __ref enable_slot(struct acpiphp_slot *slot)
        }
 }
 
-/* return first device in slot, acquiring a reference on it */
-static struct pci_dev *dev_in_slot(struct acpiphp_slot *slot)
-{
-       struct pci_bus *bus = slot->bus;
-       struct pci_dev *dev;
-       struct pci_dev *ret = NULL;
-
-       down_read(&pci_bus_sem);
-       list_for_each_entry(dev, &bus->devices, bus_list)
-               if (PCI_SLOT(dev->devfn) == slot->device) {
-                       ret = pci_dev_get(dev);
-                       break;
-               }
-       up_read(&pci_bus_sem);
-
-       return ret;
-}
-
 /**
  * disable_slot - disable a slot
  * @slot: ACPI PHP slot
  */
 static void disable_slot(struct acpiphp_slot *slot)
 {
+       struct pci_bus *bus = slot->bus;
+       struct pci_dev *dev, *prev;
        struct acpiphp_func *func;
-       struct pci_dev *pdev;
 
        /*
         * enable_slot() enumerates all functions in this device via
@@ -658,22 +571,18 @@ static void disable_slot(struct acpiphp_slot *slot)
         * methods (_EJ0, etc.) or not.  Therefore, we remove all functions
         * here.
         */
-       while ((pdev = dev_in_slot(slot))) {
-               pci_stop_and_remove_bus_device(pdev);
-               pci_dev_put(pdev);
-       }
+       list_for_each_entry_safe_reverse(dev, prev, &bus->devices, bus_list)
+               if (PCI_SLOT(dev->devfn) == slot->device)
+                       pci_stop_and_remove_bus_device(dev);
 
        list_for_each_entry(func, &slot->funcs, sibling)
-               acpiphp_bus_trim(func_to_handle(func));
+               acpi_bus_trim(func_to_acpi_device(func));
 
        slot->flags &= (~SLOT_ENABLED);
 }
 
-static bool acpiphp_no_hotplug(acpi_handle handle)
+static bool acpiphp_no_hotplug(struct acpi_device *adev)
 {
-       struct acpi_device *adev = NULL;
-
-       acpi_bus_get_device(handle, &adev);
        return adev && adev->flags.no_hotplug;
 }
 
@@ -682,7 +591,7 @@ static bool slot_no_hotplug(struct acpiphp_slot *slot)
        struct acpiphp_func *func;
 
        list_for_each_entry(func, &slot->funcs, sibling)
-               if (acpiphp_no_hotplug(func_to_handle(func)))
+               if (acpiphp_no_hotplug(func_to_acpi_device(func)))
                        return true;
 
        return false;
@@ -747,28 +656,25 @@ static inline bool device_status_valid(unsigned int sta)
  */
 static void trim_stale_devices(struct pci_dev *dev)
 {
-       acpi_handle handle = ACPI_HANDLE(&dev->dev);
+       struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
        struct pci_bus *bus = dev->subordinate;
        bool alive = false;
 
-       if (handle) {
+       if (adev) {
                acpi_status status;
                unsigned long long sta;
 
-               status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
+               status = acpi_evaluate_integer(adev->handle, "_STA", NULL, &sta);
                alive = (ACPI_SUCCESS(status) && device_status_valid(sta))
-                       || acpiphp_no_hotplug(handle);
+                       || acpiphp_no_hotplug(adev);
        }
-       if (!alive) {
-               u32 v;
+       if (!alive)
+               alive = pci_device_is_present(dev);
 
-               /* Check if the device responds. */
-               alive = pci_bus_read_dev_vendor_id(dev->bus, dev->devfn, &v, 0);
-       }
        if (!alive) {
                pci_stop_and_remove_bus_device(dev);
-               if (handle)
-                       acpiphp_bus_trim(handle);
+               if (adev)
+                       acpi_bus_trim(adev);
        } else if (bus) {
                struct pci_dev *child, *tmp;
 
@@ -800,7 +706,6 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge)
                struct pci_bus *bus = slot->bus;
                struct pci_dev *dev, *tmp;
 
-               mutex_lock(&slot->crit_sect);
                if (slot_no_hotplug(slot)) {
                        ; /* do nothing */
                } else if (device_status_valid(get_slot_status(slot))) {
@@ -815,7 +720,6 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge)
                } else {
                        disable_slot(slot);
                }
-               mutex_unlock(&slot->crit_sect);
        }
 }
 
@@ -855,11 +759,11 @@ static void acpiphp_sanitize_bus(struct pci_bus *bus)
  * ACPI event handlers
  */
 
-void acpiphp_check_host_bridge(acpi_handle handle)
+void acpiphp_check_host_bridge(struct acpi_device *adev)
 {
        struct acpiphp_bridge *bridge;
 
-       bridge = acpiphp_handle_to_bridge(handle);
+       bridge = acpiphp_dev_to_bridge(adev);
        if (bridge) {
                pci_lock_rescan_remove();
 
@@ -872,73 +776,52 @@ void acpiphp_check_host_bridge(acpi_handle handle)
 
 static int acpiphp_disable_and_eject_slot(struct acpiphp_slot *slot);
 
-static void hotplug_event(acpi_handle handle, u32 type, void *data)
+static void hotplug_event(u32 type, struct acpiphp_context *context)
 {
-       struct acpiphp_context *context = data;
+       acpi_handle handle = context->hp.self->handle;
        struct acpiphp_func *func = &context->func;
+       struct acpiphp_slot *slot = func->slot;
        struct acpiphp_bridge *bridge;
-       char objname[64];
-       struct acpi_buffer buffer = { .length = sizeof(objname),
-                                     .pointer = objname };
 
-       mutex_lock(&acpiphp_context_lock);
+       acpi_lock_hp_context();
        bridge = context->bridge;
        if (bridge)
                get_bridge(bridge);
 
-       mutex_unlock(&acpiphp_context_lock);
+       acpi_unlock_hp_context();
 
        pci_lock_rescan_remove();
-       acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
 
        switch (type) {
        case ACPI_NOTIFY_BUS_CHECK:
                /* bus re-enumerate */
-               pr_debug("%s: Bus check notify on %s\n", __func__, objname);
-               pr_debug("%s: re-enumerating slots under %s\n",
-                        __func__, objname);
-               if (bridge) {
+               acpi_handle_debug(handle, "Bus check in %s()\n", __func__);
+               if (bridge)
                        acpiphp_check_bridge(bridge);
-               } else {
-                       struct acpiphp_slot *slot = func->slot;
-
-                       if (slot->flags & SLOT_IS_GOING_AWAY)
-                               break;
-
-                       mutex_lock(&slot->crit_sect);
+               else if (!(slot->flags & SLOT_IS_GOING_AWAY))
                        enable_slot(slot);
-                       mutex_unlock(&slot->crit_sect);
-               }
+
                break;
 
        case ACPI_NOTIFY_DEVICE_CHECK:
                /* device check */
-               pr_debug("%s: Device check notify on %s\n", __func__, objname);
+               acpi_handle_debug(handle, "Device check in %s()\n", __func__);
                if (bridge) {
                        acpiphp_check_bridge(bridge);
-               } else {
-                       struct acpiphp_slot *slot = func->slot;
-                       int ret;
-
-                       if (slot->flags & SLOT_IS_GOING_AWAY)
-                               break;
-
+               } else if (!(slot->flags & SLOT_IS_GOING_AWAY)) {
                        /*
                         * Check if anything has changed in the slot and rescan
                         * from the parent if that's the case.
                         */
-                       mutex_lock(&slot->crit_sect);
-                       ret = acpiphp_rescan_slot(slot);
-                       mutex_unlock(&slot->crit_sect);
-                       if (ret)
+                       if (acpiphp_rescan_slot(slot))
                                acpiphp_check_bridge(func->parent);
                }
                break;
 
        case ACPI_NOTIFY_EJECT_REQUEST:
                /* request device eject */
-               pr_debug("%s: Device eject notify on %s\n", __func__, objname);
-               acpiphp_disable_and_eject_slot(func->slot);
+               acpi_handle_debug(handle, "Eject request in %s()\n", __func__);
+               acpiphp_disable_and_eject_slot(slot);
                break;
        }
 
@@ -947,106 +830,41 @@ static void hotplug_event(acpi_handle handle, u32 type, void *data)
                put_bridge(bridge);
 }
 
-static void hotplug_event_work(void *data, u32 type)
+static int acpiphp_hotplug_notify(struct acpi_device *adev, u32 type)
 {
-       struct acpiphp_context *context = data;
-       acpi_handle handle = context->handle;
-
-       acpi_scan_lock_acquire();
+       struct acpiphp_context *context;
 
-       hotplug_event(handle, type, context);
+       context = acpiphp_grab_context(adev);
+       if (!context)
+               return -ENODATA;
 
-       acpi_scan_lock_release();
-       acpi_evaluate_hotplug_ost(handle, type, ACPI_OST_SC_SUCCESS, NULL);
-       put_bridge(context->func.parent);
+       hotplug_event(type, context);
+       acpiphp_let_context_go(context);
+       return 0;
 }
 
 /**
- * handle_hotplug_event - handle ACPI hotplug event
- * @handle: Notify()'ed acpi_handle
- * @type: Notify code
- * @data: pointer to acpiphp_context structure
+ * acpiphp_enumerate_slots - Enumerate PCI slots for a given bus.
+ * @bus: PCI bus to enumerate the slots for.
  *
- * Handles ACPI event notification on slots.
- */
-static void handle_hotplug_event(acpi_handle handle, u32 type, void *data)
-{
-       struct acpiphp_context *context;
-       u32 ost_code = ACPI_OST_SC_SUCCESS;
-       acpi_status status;
-
-       switch (type) {
-       case ACPI_NOTIFY_BUS_CHECK:
-       case ACPI_NOTIFY_DEVICE_CHECK:
-               break;
-       case ACPI_NOTIFY_EJECT_REQUEST:
-               ost_code = ACPI_OST_SC_EJECT_IN_PROGRESS;
-               acpi_evaluate_hotplug_ost(handle, type, ost_code, NULL);
-               break;
-
-       case ACPI_NOTIFY_DEVICE_WAKE:
-               return;
-
-       case ACPI_NOTIFY_FREQUENCY_MISMATCH:
-               acpi_handle_err(handle, "Device cannot be configured due "
-                               "to a frequency mismatch\n");
-               goto out;
-
-       case ACPI_NOTIFY_BUS_MODE_MISMATCH:
-               acpi_handle_err(handle, "Device cannot be configured due "
-                               "to a bus mode mismatch\n");
-               goto out;
-
-       case ACPI_NOTIFY_POWER_FAULT:
-               acpi_handle_err(handle, "Device has suffered a power fault\n");
-               goto out;
-
-       default:
-               acpi_handle_warn(handle, "Unsupported event type 0x%x\n", type);
-               ost_code = ACPI_OST_SC_UNRECOGNIZED_NOTIFY;
-               goto out;
-       }
-
-       mutex_lock(&acpiphp_context_lock);
-       context = acpiphp_get_context(handle);
-       if (!context || WARN_ON(context->handle != handle)
-           || context->func.parent->is_going_away)
-               goto err_out;
-
-       get_bridge(context->func.parent);
-       acpiphp_put_context(context);
-       status = acpi_hotplug_execute(hotplug_event_work, context, type);
-       if (ACPI_SUCCESS(status)) {
-               mutex_unlock(&acpiphp_context_lock);
-               return;
-       }
-       put_bridge(context->func.parent);
-
- err_out:
-       mutex_unlock(&acpiphp_context_lock);
-       ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
-
- out:
-       acpi_evaluate_hotplug_ost(handle, type, ost_code, NULL);
-}
-
-/*
- * Create hotplug slots for the PCI bus.
- * It should always return 0 to avoid skipping following notifiers.
+ * A "slot" is an object associated with a PCI device number.  All functions
+ * (PCI devices) with the same bus and device number belong to the same slot.
  */
 void acpiphp_enumerate_slots(struct pci_bus *bus)
 {
        struct acpiphp_bridge *bridge;
+       struct acpi_device *adev;
        acpi_handle handle;
        acpi_status status;
 
        if (acpiphp_disabled)
                return;
 
-       handle = ACPI_HANDLE(bus->bridge);
-       if (!handle)
+       adev = ACPI_COMPANION(bus->bridge);
+       if (!adev)
                return;
 
+       handle = adev->handle;
        bridge = kzalloc(sizeof(struct acpiphp_bridge), GFP_KERNEL);
        if (!bridge) {
                acpi_handle_err(handle, "No memory for bridge object\n");
@@ -1074,10 +892,10 @@ void acpiphp_enumerate_slots(struct pci_bus *bus)
                 * parent is going to be handled by pciehp, in which case this
                 * bridge is not interesting to us either.
                 */
-               mutex_lock(&acpiphp_context_lock);
-               context = acpiphp_get_context(handle);
+               acpi_lock_hp_context();
+               context = acpiphp_get_context(adev);
                if (!context) {
-                       mutex_unlock(&acpiphp_context_lock);
+                       acpi_unlock_hp_context();
                        put_device(&bus->dev);
                        pci_dev_put(bridge->pci_dev);
                        kfree(bridge);
@@ -1087,17 +905,17 @@ void acpiphp_enumerate_slots(struct pci_bus *bus)
                context->bridge = bridge;
                /* Get a reference to the parent bridge. */
                get_bridge(context->func.parent);
-               mutex_unlock(&acpiphp_context_lock);
+               acpi_unlock_hp_context();
        }
 
-       /* must be added to the list prior to calling register_slot */
+       /* Must be added to the list prior to calling acpiphp_add_context(). */
        mutex_lock(&bridge_mutex);
        list_add(&bridge->list, &bridge_list);
        mutex_unlock(&bridge_mutex);
 
        /* register all slot objects under this bridge */
        status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
-                                    register_slot, NULL, bridge, NULL);
+                                    acpiphp_add_context, NULL, bridge, NULL);
        if (ACPI_FAILURE(status)) {
                acpi_handle_err(handle, "failed to register slots\n");
                cleanup_bridge(bridge);
@@ -1105,7 +923,10 @@ void acpiphp_enumerate_slots(struct pci_bus *bus)
        }
 }
 
-/* Destroy hotplug slots associated with the PCI bus */
+/**
+ * acpiphp_remove_slots - Remove slot objects associated with a given bus.
+ * @bus: PCI bus to remove the slot objects for.
+ */
 void acpiphp_remove_slots(struct pci_bus *bus)
 {
        struct acpiphp_bridge *bridge;
@@ -1136,13 +957,10 @@ int acpiphp_enable_slot(struct acpiphp_slot *slot)
        if (slot->flags & SLOT_IS_GOING_AWAY)
                return -ENODEV;
 
-       mutex_lock(&slot->crit_sect);
        /* configure all functions */
        if (!(slot->flags & SLOT_ENABLED))
                enable_slot(slot);
 
-       mutex_unlock(&slot->crit_sect);
-
        pci_unlock_rescan_remove();
        return 0;
 }
@@ -1158,8 +976,6 @@ static int acpiphp_disable_and_eject_slot(struct acpiphp_slot *slot)
        if (slot->flags & SLOT_IS_GOING_AWAY)
                return -ENODEV;
 
-       mutex_lock(&slot->crit_sect);
-
        /* unconfigure all functions */
        disable_slot(slot);
 
@@ -1173,7 +989,6 @@ static int acpiphp_disable_and_eject_slot(struct acpiphp_slot *slot)
                        break;
                }
 
-       mutex_unlock(&slot->crit_sect);
        return 0;
 }
 
@@ -1181,9 +996,15 @@ int acpiphp_disable_slot(struct acpiphp_slot *slot)
 {
        int ret;
 
+       /*
+        * Acquire acpi_scan_lock to ensure that the execution of _EJ0 in
+        * acpiphp_disable_and_eject_slot() will be synchronized properly.
+        */
+       acpi_scan_lock_acquire();
        pci_lock_rescan_remove();
        ret = acpiphp_disable_and_eject_slot(slot);
        pci_unlock_rescan_remove();
+       acpi_scan_lock_release();
        return ret;
 }
 
index 25f0bc6..d911e0c 100644 (file)
@@ -616,15 +616,11 @@ static int pci_pm_prepare(struct device *dev)
        int error = 0;
 
        /*
-        * PCI devices suspended at run time need to be resumed at this
-        * point, because in general it is necessary to reconfigure them for
-        * system suspend.  Namely, if the device is supposed to wake up the
-        * system from the sleep state, we may need to reconfigure it for this
-        * purpose.  In turn, if the device is not supposed to wake up the
-        * system from the sleep state, we'll have to prevent it from signaling
-        * wake-up.
+        * Devices having power.ignore_children set may still be necessary for
+        * suspending their children in the next phase of device suspend.
         */
-       pm_runtime_resume(dev);
+       if (dev->power.ignore_children)
+               pm_runtime_resume(dev);
 
        if (drv && drv->pm && drv->pm->prepare)
                error = drv->pm->prepare(dev);
@@ -654,6 +650,16 @@ static int pci_pm_suspend(struct device *dev)
                goto Fixup;
        }
 
+       /*
+        * PCI devices suspended at run time need to be resumed at this point,
+        * because in general it is necessary to reconfigure them for system
+        * suspend.  Namely, if the device is supposed to wake up the system
+        * from the sleep state, we may need to reconfigure it for this purpose.
+        * In turn, if the device is not supposed to wake up the system from the
+        * sleep state, we'll have to prevent it from signaling wake-up.
+        */
+       pm_runtime_resume(dev);
+
        pci_dev->state_saved = false;
        if (pm->suspend) {
                pci_power_t prev = pci_dev->current_state;
@@ -808,6 +814,14 @@ static int pci_pm_freeze(struct device *dev)
                return 0;
        }
 
+       /*
+        * This used to be done in pci_pm_prepare() for all devices and some
+        * drivers may depend on it, so do it here.  Ideally, runtime-suspended
+        * devices should not be touched during freeze/thaw transitions,
+        * however.
+        */
+       pm_runtime_resume(dev);
+
        pci_dev->state_saved = false;
        if (pm->freeze) {
                int error;
@@ -915,6 +929,9 @@ static int pci_pm_poweroff(struct device *dev)
                goto Fixup;
        }
 
+       /* The reason to do that is the same as in pci_pm_suspend(). */
+       pm_runtime_resume(dev);
+
        pci_dev->state_saved = false;
        if (pm->poweroff) {
                int error;
index 6eecd7c..54d3089 100644 (file)
@@ -125,9 +125,6 @@ sa1100_pcmcia_frequency_change(struct soc_pcmcia_socket *skt,
                if (freqs->new < freqs->old)
                        sa1100_pcmcia_set_mecr(skt, freqs->new);
                break;
-       case CPUFREQ_RESUMECHANGE:
-               sa1100_pcmcia_set_mecr(skt, freqs->new);
-               break;
        }
 
        return 0;
index 5ae65c1..5f67843 100644 (file)
@@ -27,8 +27,6 @@ config ACER_WMI
        depends on ACPI_WMI
        select INPUT_SPARSEKMAP
        # Acer WMI depends on ACPI_VIDEO when ACPI is enabled
-       # but for select to work, need to select ACPI_VIDEO's dependencies, ick
-        select VIDEO_OUTPUT_CONTROL if ACPI
         select ACPI_VIDEO if ACPI
        ---help---
          This is a driver for newer Acer (and Wistron) laptops. It adds
index be02bcc..e6f3362 100644 (file)
@@ -66,7 +66,6 @@
 #include <linux/backlight.h>
 #include <linux/input.h>
 #include <linux/kfifo.h>
-#include <linux/video_output.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
index 3c67683..61b51e1 100644 (file)
@@ -834,7 +834,7 @@ static int rapl_write_data_raw(struct rapl_domain *rd,
 }
 
 static const struct x86_cpu_id energy_unit_quirk_ids[] = {
-       { X86_VENDOR_INTEL, 6, 0x37},/* VLV */
+       { X86_VENDOR_INTEL, 6, 0x37},/* Valleyview */
        {}
 };
 
@@ -947,11 +947,11 @@ static void package_power_limit_irq_restore(int package_id)
 }
 
 static const struct x86_cpu_id rapl_ids[] = {
-       { X86_VENDOR_INTEL, 6, 0x2a},/* SNB */
-       { X86_VENDOR_INTEL, 6, 0x2d},/* SNB EP */
-       { X86_VENDOR_INTEL, 6, 0x37},/* VLV */
-       { X86_VENDOR_INTEL, 6, 0x3a},/* IVB */
-       { X86_VENDOR_INTEL, 6, 0x45},/* HSW */
+       { X86_VENDOR_INTEL, 6, 0x2a},/* Sandy Bridge */
+       { X86_VENDOR_INTEL, 6, 0x2d},/* Sandy Bridge EP */
+       { X86_VENDOR_INTEL, 6, 0x37},/* Valleyview */
+       { X86_VENDOR_INTEL, 6, 0x3a},/* Ivy Bridge */
+       { X86_VENDOR_INTEL, 6, 0x45},/* Haswell */
        /* TODO: Add more CPU IDs after testing */
        {}
 };
@@ -1147,6 +1147,11 @@ static int rapl_check_domain(int cpu, int domain)
        if (rdmsrl_safe_on_cpu(cpu, msr, &val1))
                return -ENODEV;
 
+       /* PP1/uncore/graphics domain may not be active at the time of
+        * driver loading. So skip further checks.
+        */
+       if (domain == RAPL_DOMAIN_PP1)
+               return 0;
        /* energy counters roll slowly on some domains */
        while (++retry < 10) {
                usleep_range(10000, 15000);
index be33d2b..7e0b626 100644 (file)
@@ -1041,8 +1041,7 @@ static int sci_notifier(struct notifier_block *self,
 
        sci_port = container_of(self, struct sci_port, freq_transition);
 
-       if ((phase == CPUFREQ_POSTCHANGE) ||
-           (phase == CPUFREQ_RESUMECHANGE)) {
+       if (phase == CPUFREQ_POSTCHANGE) {
                struct uart_port *port = &sci_port->port;
 
                spin_lock_irqsave(&port->lock, flags);
index dade5b7..97a8f3a 100644 (file)
@@ -27,12 +27,6 @@ config VGASTATE
        tristate
        default n
 
-config VIDEO_OUTPUT_CONTROL
-       tristate "Lowlevel video output switch controls"
-       help
-         This framework adds support for low-level control of the video 
-         output switch.
-
 config VIDEOMODE_HELPERS
        bool
 
index ae17ddf..08d6a4a 100644 (file)
@@ -172,8 +172,6 @@ obj-$(CONFIG_FB_SIMPLE)           += simplefb.o
 # the test framebuffer is last
 obj-$(CONFIG_FB_VIRTUAL)          += vfb.o
 
-#video output switch sysfs driver
-obj-$(CONFIG_VIDEO_OUTPUT_CONTROL) += output.o
 obj-$(CONFIG_VIDEOMODE_HELPERS) += display_timing.o videomode.o
 ifeq ($(CONFIG_OF),y)
 obj-$(CONFIG_VIDEOMODE_HELPERS) += of_display_timing.o of_videomode.o
diff --git a/drivers/video/output.c b/drivers/video/output.c
deleted file mode 100644 (file)
index 1446c49..0000000
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- *  output.c - Display Output Switch driver
- *
- *  Copyright (C) 2006 Luming Yu <luming.yu@intel.com>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; either version 2 of the License, or (at
- *  your option) any later version.
- *
- *  This program is distributed in the hope that it will be useful, but
- *  WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- *  General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- */
-#include <linux/module.h>
-#include <linux/video_output.h>
-#include <linux/slab.h>
-#include <linux/err.h>
-#include <linux/ctype.h>
-
-
-MODULE_DESCRIPTION("Display Output Switcher Lowlevel Control Abstraction");
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Luming Yu <luming.yu@intel.com>");
-
-static ssize_t state_show(struct device *dev, struct device_attribute *attr,
-                         char *buf)
-{
-       ssize_t ret_size = 0;
-       struct output_device *od = to_output_device(dev);
-       if (od->props)
-               ret_size = sprintf(buf,"%.8x\n",od->props->get_status(od));
-       return ret_size;
-}
-
-static ssize_t state_store(struct device *dev, struct device_attribute *attr,
-                          const char *buf,size_t count)
-{
-       char *endp;
-       struct output_device *od = to_output_device(dev);
-       int request_state = simple_strtoul(buf,&endp,0);
-       size_t size = endp - buf;
-
-       if (isspace(*endp))
-               size++;
-       if (size != count)
-               return -EINVAL;
-
-       if (od->props) {
-               od->request_state = request_state;
-               od->props->set_state(od);
-       }
-       return count;
-}
-static DEVICE_ATTR_RW(state);
-
-static void video_output_release(struct device *dev)
-{
-       struct output_device *od = to_output_device(dev);
-       kfree(od);
-}
-
-static struct attribute *video_output_attrs[] = {
-       &dev_attr_state.attr,
-       NULL,
-};
-ATTRIBUTE_GROUPS(video_output);
-
-static struct class video_output_class = {
-       .name = "video_output",
-       .dev_release = video_output_release,
-       .dev_groups = video_output_groups,
-};
-
-struct output_device *video_output_register(const char *name,
-       struct device *dev,
-       void *devdata,
-       struct output_properties *op)
-{
-       struct output_device *new_dev;
-       int ret_code = 0;
-
-       new_dev = kzalloc(sizeof(struct output_device),GFP_KERNEL);
-       if (!new_dev) {
-               ret_code = -ENOMEM;
-               goto error_return;
-       }
-       new_dev->props = op;
-       new_dev->dev.class = &video_output_class;
-       new_dev->dev.parent = dev;
-       dev_set_name(&new_dev->dev, "%s", name);
-       dev_set_drvdata(&new_dev->dev, devdata);
-       ret_code = device_register(&new_dev->dev);
-       if (ret_code) {
-               kfree(new_dev);
-               goto error_return;
-       }
-       return new_dev;
-
-error_return:
-       return ERR_PTR(ret_code);
-}
-EXPORT_SYMBOL(video_output_register);
-
-void video_output_unregister(struct output_device *dev)
-{
-       if (!dev)
-               return;
-       device_unregister(&dev->dev);
-}
-EXPORT_SYMBOL(video_output_unregister);
-
-static void __exit video_output_class_exit(void)
-{
-       class_unregister(&video_output_class);
-}
-
-static int __init video_output_class_init(void)
-{
-       return class_register(&video_output_class);
-}
-
-postcore_initcall(video_output_class_init);
-module_exit(video_output_class_exit);
index 80875fb..3e62ee4 100644 (file)
@@ -313,7 +313,7 @@ static void acpi_processor_hotplug_notify(acpi_handle handle,
                goto out;
        }
 
-       (void) acpi_evaluate_hotplug_ost(handle, event, ost_code, NULL);
+       (void) acpi_evaluate_ost(handle, event, ost_code, NULL);
 
 out:
        acpi_scan_lock_release();
index f8d1862..34e40b7 100644 (file)
@@ -285,7 +285,7 @@ static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data)
                return;
        }
 
-       (void) acpi_evaluate_hotplug_ost(handle, event, ost_code, NULL);
+       (void) acpi_evaluate_ost(handle, event, ost_code, NULL);
        return;
 }
 
index 40c4bc0..f83b754 100644 (file)
@@ -77,27 +77,14 @@ static int acpi_pad_pur(acpi_handle handle)
        return num;
 }
 
-/* Notify firmware how many CPUs are idle */
-static void acpi_pad_ost(acpi_handle handle, int stat,
-       uint32_t idle_nums)
-{
-       union acpi_object params[3] = {
-               {.type = ACPI_TYPE_INTEGER,},
-               {.type = ACPI_TYPE_INTEGER,},
-               {.type = ACPI_TYPE_BUFFER,},
-       };
-       struct acpi_object_list arg_list = {3, params};
-
-       params[0].integer.value = ACPI_PROCESSOR_AGGREGATOR_NOTIFY;
-       params[1].integer.value =  stat;
-       params[2].buffer.length = 4;
-       params[2].buffer.pointer = (void *)&idle_nums;
-       acpi_evaluate_object(handle, "_OST", &arg_list, NULL);
-}
-
 static void acpi_pad_handle_notify(acpi_handle handle)
 {
        int idle_nums;
+       struct acpi_buffer param = {
+               .length = 4,
+               .pointer = (void *)&idle_nums,
+       };
+
 
        mutex_lock(&xen_cpu_lock);
        idle_nums = acpi_pad_pur(handle);
@@ -109,7 +96,8 @@ static void acpi_pad_handle_notify(acpi_handle handle)
        idle_nums = xen_acpi_pad_idle_cpus(idle_nums)
                    ?: xen_acpi_pad_idle_cpus_num();
        if (idle_nums >= 0)
-               acpi_pad_ost(handle, 0, idle_nums);
+               acpi_evaluate_ost(handle, ACPI_PROCESSOR_AGGREGATOR_NOTIFY,
+                                 0, &param);
        mutex_unlock(&xen_cpu_lock);
 }
 
index c927a0b..88cb477 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 3ea214c..932a60d 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 4e280bd..8b06e4c 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 1f36777..3dd6e83 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 4607b02..1baae6e 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 6187877..ca0cb60 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -62,5 +62,8 @@
 #include <acpi/acrestyp.h>             /* Resource Descriptor structs */
 #include <acpi/acpiosxf.h>             /* OSL interfaces (ACPICA-to-OS) */
 #include <acpi/acpixf.h>               /* ACPI core subsystem external interfaces */
+#ifdef ACPI_NATIVE_INTERFACE_HEADER
+#include ACPI_NATIVE_INTERFACE_HEADER
+#endif
 
 #endif                         /* __ACPI_H__ */
index 8256eb4..84a2e29 100644 (file)
@@ -49,8 +49,8 @@ acpi_evaluate_reference(acpi_handle handle,
                        struct acpi_object_list *arguments,
                        struct acpi_handle_list *list);
 acpi_status
-acpi_evaluate_hotplug_ost(acpi_handle handle, u32 source_event,
-                       u32 status_code, struct acpi_buffer *status_buf);
+acpi_evaluate_ost(acpi_handle handle, u32 source_event, u32 status_code,
+                 struct acpi_buffer *status_buf);
 
 acpi_status
 acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld);
@@ -133,9 +133,23 @@ struct acpi_scan_handler {
        struct list_head list_node;
        int (*attach)(struct acpi_device *dev, const struct acpi_device_id *id);
        void (*detach)(struct acpi_device *dev);
+       void (*bind)(struct device *phys_dev);
+       void (*unbind)(struct device *phys_dev);
        struct acpi_hotplug_profile hotplug;
 };
 
+/*
+ * ACPI Hotplug Context
+ * --------------------
+ */
+
+struct acpi_hotplug_context {
+       struct acpi_device *self;
+       int (*notify)(struct acpi_device *, u32);
+       void (*uevent)(struct acpi_device *, u32);
+       void (*fixup)(struct acpi_device *);
+};
+
 /*
  * ACPI Driver
  * -----------
@@ -190,7 +204,9 @@ struct acpi_device_flags {
        u32 initialized:1;
        u32 visited:1;
        u32 no_hotplug:1;
-       u32 reserved:24;
+       u32 hotplug_notify:1;
+       u32 is_dock_station:1;
+       u32 reserved:22;
 };
 
 /* File System */
@@ -329,6 +345,7 @@ struct acpi_device {
        struct acpi_device_perf performance;
        struct acpi_device_dir dir;
        struct acpi_scan_handler *handler;
+       struct acpi_hotplug_context *hp;
        struct acpi_driver *driver;
        void *driver_data;
        struct device dev;
@@ -351,6 +368,24 @@ static inline void acpi_set_device_status(struct acpi_device *adev, u32 sta)
        *((u32 *)&adev->status) = sta;
 }
 
+static inline void acpi_set_hp_context(struct acpi_device *adev,
+                                      struct acpi_hotplug_context *hp,
+                                      int (*notify)(struct acpi_device *, u32),
+                                      void (*uevent)(struct acpi_device *, u32),
+                                      void (*fixup)(struct acpi_device *))
+{
+       hp->self = adev;
+       hp->notify = notify;
+       hp->uevent = uevent;
+       hp->fixup = fixup;
+       adev->hp = hp;
+}
+
+void acpi_initialize_hp_context(struct acpi_device *adev,
+                               struct acpi_hotplug_context *hp,
+                               int (*notify)(struct acpi_device *, u32),
+                               void (*uevent)(struct acpi_device *, u32));
+
 /* acpi_device.dev.bus == &acpi_bus_type */
 extern struct bus_type acpi_bus_type;
 
@@ -381,6 +416,8 @@ extern int unregister_acpi_notifier(struct notifier_block *);
  */
 
 int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device);
+struct acpi_device *acpi_bus_get_acpi_device(acpi_handle handle);
+void acpi_bus_put_acpi_device(struct acpi_device *adev);
 acpi_status acpi_bus_get_status_handle(acpi_handle handle,
                                       unsigned long long *sta);
 int acpi_bus_get_status(struct acpi_device *device);
@@ -402,6 +439,8 @@ static inline bool acpi_bus_can_wakeup(acpi_handle handle) { return false; }
 
 void acpi_scan_lock_acquire(void);
 void acpi_scan_lock_release(void);
+void acpi_lock_hp_context(void);
+void acpi_unlock_hp_context(void);
 int acpi_scan_add_handler(struct acpi_scan_handler *handler);
 int acpi_bus_register_driver(struct acpi_driver *driver);
 void acpi_bus_unregister_driver(struct acpi_driver *driver);
@@ -418,10 +457,6 @@ static inline bool acpi_device_enumerated(struct acpi_device *adev)
        return adev && adev->flags.initialized && adev->flags.visited;
 }
 
-typedef void (*acpi_hp_callback)(void *data, u32 src);
-
-acpi_status acpi_hotplug_execute(acpi_hp_callback func, void *data, u32 src);
-
 /**
  * module_acpi_driver(acpi_driver) - Helper macro for registering an ACPI driver
  * @__acpi_driver: acpi_driver struct
index b124fdb..d504613 100644 (file)
@@ -109,36 +109,14 @@ void pci_acpi_crs_quirks(void);
 /*--------------------------------------------------------------------------
                                   Dock Station
   -------------------------------------------------------------------------- */
-struct acpi_dock_ops {
-       acpi_notify_handler fixup;
-       acpi_notify_handler handler;
-       acpi_notify_handler uevent;
-};
 
 #ifdef CONFIG_ACPI_DOCK
-extern int is_dock_device(acpi_handle handle);
-extern int register_hotplug_dock_device(acpi_handle handle,
-                                       const struct acpi_dock_ops *ops,
-                                       void *context,
-                                       void (*init)(void *),
-                                       void (*release)(void *));
-extern void unregister_hotplug_dock_device(acpi_handle handle);
+extern int is_dock_device(struct acpi_device *adev);
 #else
-static inline int is_dock_device(acpi_handle handle)
+static inline int is_dock_device(struct acpi_device *adev)
 {
        return 0;
 }
-static inline int register_hotplug_dock_device(acpi_handle handle,
-                                              const struct acpi_dock_ops *ops,
-                                              void *context,
-                                              void (*init)(void *),
-                                              void (*release)(void *))
-{
-       return -ENODEV;
-}
-static inline void unregister_hotplug_dock_device(acpi_handle handle)
-{
-}
 #endif /* CONFIG_ACPI_DOCK */
 
 #endif /*__ACPI_DRIVERS_H__*/
index 01e6c6d..f6f5f8a 100644 (file)
@@ -7,7 +7,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index fea6773..b0b01b1 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -46,7 +46,7 @@
 
 /* Current ACPICA subsystem version in YYYYMMDD format */
 
-#define ACPI_CA_VERSION                 0x20131218
+#define ACPI_CA_VERSION                 0x20140214
 
 #include <acpi/acconfig.h>
 #include <acpi/actypes.h>
@@ -229,6 +229,10 @@ acpi_attach_data(acpi_handle object, acpi_object_handler handler, void *data);
 
 acpi_status acpi_detach_data(acpi_handle object, acpi_object_handler handler);
 
+acpi_status
+acpi_get_data_full(acpi_handle object, acpi_object_handler handler, void **data,
+                  void (*callback)(void *));
+
 acpi_status
 acpi_get_data(acpi_handle object, acpi_object_handler handler, void **data);
 
index cbf4bf9..eb760ca 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 325aeae..3b30e36 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 4ec8c19..212c65d 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 094a906..f337244 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 01c2a90..c2295cc 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 68a3ada..e763565 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 #error ACPI_MACHINE_WIDTH not defined
 #endif
 
-/*! [Begin] no source code translation */
-
 /*
  * Data type ranges
  * Note: These macros are designed to be compiler independent as well as
  * working around problems that some 32-bit compilers have with 64-bit
  * constants.
  */
-#define ACPI_UINT8_MAX                  (UINT8) (~((UINT8)  0))        /* 0xFF               */
-#define ACPI_UINT16_MAX                 (UINT16)(~((UINT16) 0))        /* 0xFFFF             */
-#define ACPI_UINT32_MAX                 (UINT32)(~((UINT32) 0))        /* 0xFFFFFFFF         */
-#define ACPI_UINT64_MAX                 (UINT64)(~((UINT64) 0))        /* 0xFFFFFFFFFFFFFFFF */
+#define ACPI_UINT8_MAX                  (u8) (~((u8)  0))      /* 0xFF               */
+#define ACPI_UINT16_MAX                 (u16)(~((u16) 0))      /* 0xFFFF             */
+#define ACPI_UINT32_MAX                 (u32)(~((u32) 0))      /* 0xFFFFFFFF         */
+#define ACPI_UINT64_MAX                 (u64)(~((u64) 0))      /* 0xFFFFFFFFFFFFFFFF */
 #define ACPI_ASCII_MAX                  0x7F
 
 /*
  *
  * 1) The following types are of fixed size for all targets (16/32/64):
  *
- * BOOLEAN      Logical boolean
+ * u8           Logical boolean
  *
- * UINT8        8-bit  (1 byte) unsigned value
- * UINT16       16-bit (2 byte) unsigned value
- * UINT32       32-bit (4 byte) unsigned value
- * UINT64       64-bit (8 byte) unsigned value
+ * u8           8-bit  (1 byte) unsigned value
+ * u16          16-bit (2 byte) unsigned value
+ * u32          32-bit (4 byte) unsigned value
+ * u64          64-bit (8 byte) unsigned value
  *
- * INT16        16-bit (2 byte) signed value
- * INT32        32-bit (4 byte) signed value
- * INT64        64-bit (8 byte) signed value
+ * s16          16-bit (2 byte) signed value
+ * s32          32-bit (4 byte) signed value
+ * s64          64-bit (8 byte) signed value
  *
- * COMPILER_DEPENDENT_UINT64/INT64 - These types are defined in the
+ * COMPILER_DEPENDENT_UINT64/s64 - These types are defined in the
  * compiler-dependent header(s) and were introduced because there is no common
  * 64-bit integer type across the various compilation models, as shown in
  * the table below.
  * usually used for memory allocation, efficient loop counters, and array
  * indexes. The types are similar to the size_t type in the C library and are
  * required because there is no C type that consistently represents the native
- * data width. ACPI_SIZE is needed because there is no guarantee that a
+ * data width. acpi_size is needed because there is no guarantee that a
  * kernel-level C library is present.
  *
- * ACPI_SIZE        16/32/64-bit unsigned value
- * ACPI_NATIVE_INT  16/32/64-bit signed value
+ * acpi_size        16/32/64-bit unsigned value
+ * acpi_native_int  16/32/64-bit signed value
  */
 
 /*******************************************************************************
  *
  ******************************************************************************/
 
-typedef unsigned char BOOLEAN;
-typedef unsigned char UINT8;
-typedef unsigned short UINT16;
-typedef COMPILER_DEPENDENT_UINT64 UINT64;
-typedef COMPILER_DEPENDENT_INT64 INT64;
+#ifndef ACPI_USE_SYSTEM_INTTYPES
+
+typedef unsigned char u8;
+typedef unsigned char u8;
+typedef unsigned short u16;
+typedef COMPILER_DEPENDENT_UINT64 u64;
+typedef COMPILER_DEPENDENT_INT64 s64;
 
-/*! [End] no source code translation !*/
+#endif                         /* ACPI_USE_SYSTEM_INTTYPES */
 
 /*
  * Value returned by acpi_os_get_thread_id. There is no standard "thread_id"
@@ -149,12 +149,12 @@ typedef COMPILER_DEPENDENT_INT64 INT64;
 
 #if ACPI_MACHINE_WIDTH == 64
 
-/*! [Begin] no source code translation (keep the typedefs as-is) */
+#ifndef ACPI_USE_SYSTEM_INTTYPES
 
-typedef unsigned int UINT32;
-typedef int INT32;
+typedef unsigned int u32;
+typedef int s32;
 
-/*! [End] no source code translation !*/
+#endif                         /* ACPI_USE_SYSTEM_INTTYPES */
 
 typedef s64 acpi_native_int;
 
@@ -188,12 +188,12 @@ typedef u64 acpi_physical_address;
 
 #elif ACPI_MACHINE_WIDTH == 32
 
-/*! [Begin] no source code translation (keep the typedefs as-is) */
+#ifndef ACPI_USE_SYSTEM_INTTYPES
 
-typedef unsigned int UINT32;
-typedef int INT32;
+typedef unsigned int u32;
+typedef int s32;
 
-/*! [End] no source code translation !*/
+#endif                         /* ACPI_USE_SYSTEM_INTTYPES */
 
 typedef s32 acpi_native_int;
 
index b402eb6..e863dd5 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index e077ce6..a476b91 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 008aa28..93c55ed 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 
 #ifdef __KERNEL__
 
+#define ACPI_USE_SYSTEM_INTTYPES
+
+/* Compile for reduced hardware mode only with this kernel config */
+
+#ifdef CONFIG_ACPI_REDUCED_HARDWARE_ONLY
+#define ACPI_REDUCED_HARDWARE 1
+#endif
+
 #include <linux/string.h>
 #include <linux/kernel.h>
 #include <linux/ctype.h>
 #include <ctype.h>
 #include <unistd.h>
 
+/* Disable kernel specific declarators */
+
+#ifndef __init
+#define __init
+#endif
+
+#ifndef __iomem
+#define __iomem
+#endif
+
 /* Host-dependent types and defines for user-space ACPICA */
 
 #define ACPI_FLUSH_CPU_CACHE()
index 1151a1d..6a15ddd 100644 (file)
@@ -108,6 +108,10 @@ static inline void acpi_initrd_override(void *data, size_t size)
 }
 #endif
 
+#define BAD_MADT_ENTRY(entry, end) (                                       \
+               (!entry) || (unsigned long)entry + sizeof(*entry) > end ||  \
+               ((struct acpi_subtable_header *)entry)->length < sizeof(*entry))
+
 char * __acpi_map_table (unsigned long phys_addr, unsigned long size);
 void __acpi_unmap_table(char *map, unsigned long size);
 int early_acpi_boot_init(void);
index 4d89e0e..2d2e62c 100644 (file)
@@ -74,6 +74,8 @@ struct cpufreq_policy {
        unsigned int            max;    /* in kHz */
        unsigned int            cur;    /* in kHz, only needed if cpufreq
                                         * governors are used */
+       unsigned int            suspend_freq; /* freq to set during suspend */
+
        unsigned int            policy; /* see above */
        struct cpufreq_governor *governor; /* see below */
        void                    *governor_data;
@@ -83,6 +85,7 @@ struct cpufreq_policy {
                                         * called, but you're in IRQ context */
 
        struct cpufreq_real_policy      user_policy;
+       struct cpufreq_frequency_table  *freq_table;
 
        struct list_head        policy_list;
        struct kobject          kobj;
@@ -224,6 +227,7 @@ struct cpufreq_driver {
        int     (*bios_limit)   (int cpu, unsigned int *limit);
 
        int     (*exit)         (struct cpufreq_policy *policy);
+       void    (*stop_cpu)     (struct cpufreq_policy *policy);
        int     (*suspend)      (struct cpufreq_policy *policy);
        int     (*resume)       (struct cpufreq_policy *policy);
        struct freq_attr        **attr;
@@ -296,6 +300,15 @@ cpufreq_verify_within_cpu_limits(struct cpufreq_policy *policy)
                        policy->cpuinfo.max_freq);
 }
 
+#ifdef CONFIG_CPU_FREQ
+void cpufreq_suspend(void);
+void cpufreq_resume(void);
+int cpufreq_generic_suspend(struct cpufreq_policy *policy);
+#else
+static inline void cpufreq_suspend(void) {}
+static inline void cpufreq_resume(void) {}
+#endif
+
 /*********************************************************************
  *                     CPUFREQ NOTIFIER INTERFACE                    *
  *********************************************************************/
@@ -306,8 +319,6 @@ cpufreq_verify_within_cpu_limits(struct cpufreq_policy *policy)
 /* Transition notifiers */
 #define CPUFREQ_PRECHANGE              (0)
 #define CPUFREQ_POSTCHANGE             (1)
-#define CPUFREQ_RESUMECHANGE           (8)
-#define CPUFREQ_SUSPENDCHANGE          (9)
 
 /* Policy Notifiers  */
 #define CPUFREQ_ADJUST                 (0)
@@ -463,7 +474,6 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
 int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
                unsigned int freq);
 
-void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy);
 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf);
 
 #ifdef CONFIG_CPU_FREQ
@@ -490,9 +500,6 @@ struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu);
 /* the following are really really optional */
 extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
 extern struct freq_attr *cpufreq_generic_attr[];
-void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table,
-                                     unsigned int cpu);
-void cpufreq_frequency_table_put_attr(unsigned int cpu);
 int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
                                      struct cpufreq_frequency_table *table);
 
@@ -500,10 +507,4 @@ unsigned int cpufreq_generic_get(unsigned int cpu);
 int cpufreq_generic_init(struct cpufreq_policy *policy,
                struct cpufreq_frequency_table *table,
                unsigned int transition_latency);
-static inline int cpufreq_generic_exit(struct cpufreq_policy *policy)
-{
-       cpufreq_frequency_table_put_attr(policy->cpu);
-       return 0;
-}
-
 #endif /* _LINUX_CPUFREQ_H */
index 5a462c4..637a608 100644 (file)
@@ -59,12 +59,12 @@ static inline void acpi_pci_slot_remove(struct pci_bus *bus) { }
 void acpiphp_init(void);
 void acpiphp_enumerate_slots(struct pci_bus *bus);
 void acpiphp_remove_slots(struct pci_bus *bus);
-void acpiphp_check_host_bridge(acpi_handle handle);
+void acpiphp_check_host_bridge(struct acpi_device *adev);
 #else
 static inline void acpiphp_init(void) { }
 static inline void acpiphp_enumerate_slots(struct pci_bus *bus) { }
 static inline void acpiphp_remove_slots(struct pci_bus *bus) { }
-static inline void acpiphp_check_host_bridge(acpi_handle handle) { }
+static inline void acpiphp_check_host_bridge(struct acpi_device *adev) { }
 #endif
 
 #else  /* CONFIG_ACPI */
index 8c6583a..d915d03 100644 (file)
@@ -264,9 +264,9 @@ typedef struct pm_message {
  *     registers, so that it is fully operational.
  *
  * @runtime_idle: Device appears to be inactive and it might be put into a
- *     low-power state if all of the necessary conditions are satisfied.  Check
- *     these conditions and handle the device as appropriate, possibly queueing
- *     a suspend request for it.  The return value is ignored by the PM core.
+ *     low-power state if all of the necessary conditions are satisfied.
+ *     Check these conditions, and return 0 if it's appropriate to let the PM
+ *     core queue a suspend request for the device.
  *
  * Refer to Documentation/power/runtime_pm.txt for more information about the
  * role of the above callbacks in device runtime power management.
@@ -352,7 +352,7 @@ const struct dev_pm_ops name = { \
 
 /*
  * Use this for defining a set of PM operations to be used in all situations
- * (sustem suspend, hibernation or runtime PM).
+ * (system suspend, hibernation or runtime PM).
  * NOTE: In general, system suspend callbacks, .suspend() and .resume(), should
  * be different from the corresponding runtime PM callbacks, .runtime_suspend(),
  * and .runtime_resume(), because .runtime_suspend() always works on an already
@@ -379,7 +379,7 @@ const struct dev_pm_ops name = { \
  *
  * ON          No transition.
  *
- * FREEZE      System is going to hibernate, call ->prepare() and ->freeze()
+ * FREEZE      System is going to hibernate, call ->prepare() and ->freeze()
  *             for all devices.
  *
  * SUSPEND     System is going to suspend, call ->prepare() and ->suspend()
@@ -423,7 +423,7 @@ const struct dev_pm_ops name = { \
 
 #define PM_EVENT_INVALID       (-1)
 #define PM_EVENT_ON            0x0000
-#define PM_EVENT_FREEZE        0x0001
+#define PM_EVENT_FREEZE                0x0001
 #define PM_EVENT_SUSPEND       0x0002
 #define PM_EVENT_HIBERNATE     0x0004
 #define PM_EVENT_QUIESCE       0x0008
@@ -542,6 +542,8 @@ struct dev_pm_info {
        unsigned int            async_suspend:1;
        bool                    is_prepared:1;  /* Owned by the PM core */
        bool                    is_suspended:1; /* Ditto */
+       bool                    is_noirq_suspended:1;
+       bool                    is_late_suspended:1;
        bool                    ignore_children:1;
        bool                    early_init:1;   /* Owned by the PM core */
        spinlock_t              lock;
@@ -582,6 +584,7 @@ struct dev_pm_info {
        unsigned long           accounting_timestamp;
 #endif
        struct pm_subsys_data   *subsys_data;  /* Owned by the subsystem. */
+       void (*set_latency_tolerance)(struct device *, s32);
        struct dev_pm_qos       *qos;
 };
 
@@ -612,11 +615,11 @@ struct dev_pm_domain {
  * message is implicit:
  *
  * ON          Driver starts working again, responding to hardware events
- *             and software requests.  The hardware may have gone through
- *             a power-off reset, or it may have maintained state from the
- *             previous suspend() which the driver will rely on while
- *             resuming.  On most platforms, there are no restrictions on
- *             availability of resources like clocks during resume().
+ *             and software requests.  The hardware may have gone through
+ *             a power-off reset, or it may have maintained state from the
+ *             previous suspend() which the driver will rely on while
+ *             resuming.  On most platforms, there are no restrictions on
+ *             availability of resources like clocks during resume().
  *
  * Other transitions are triggered by messages sent using suspend().  All
  * these transitions quiesce the driver, so that I/O queues are inactive.
@@ -626,21 +629,21 @@ struct dev_pm_domain {
  * differ according to the message:
  *
  * SUSPEND     Quiesce, enter a low power device state appropriate for
- *             the upcoming system state (such as PCI_D3hot), and enable
- *             wakeup events as appropriate.
+ *             the upcoming system state (such as PCI_D3hot), and enable
+ *             wakeup events as appropriate.
  *
  * HIBERNATE   Enter a low power device state appropriate for the hibernation
- *             state (eg. ACPI S4) and enable wakeup events as appropriate.
+ *             state (eg. ACPI S4) and enable wakeup events as appropriate.
  *
  * FREEZE      Quiesce operations so that a consistent image can be saved;
- *             but do NOT otherwise enter a low power device state, and do
- *             NOT emit system wakeup events.
+ *             but do NOT otherwise enter a low power device state, and do
+ *             NOT emit system wakeup events.
  *
  * PRETHAW     Quiesce as if for FREEZE; additionally, prepare for restoring
- *             the system from a snapshot taken after an earlier FREEZE.
- *             Some drivers will need to reset their hardware state instead
- *             of preserving it, to ensure that it's never mistaken for the
- *             state which that earlier snapshot had set up.
+ *             the system from a snapshot taken after an earlier FREEZE.
+ *             Some drivers will need to reset their hardware state instead
+ *             of preserving it, to ensure that it's never mistaken for the
+ *             state which that earlier snapshot had set up.
  *
  * A minimally power-aware driver treats all messages as SUSPEND, fully
  * reinitializes its device during resume() -- whether or not it was reset
@@ -717,14 +720,26 @@ static inline void dpm_for_each_dev(void *data, void (*fn)(struct device *, void
 {
 }
 
-#define pm_generic_prepare     NULL
-#define pm_generic_suspend     NULL
-#define pm_generic_resume      NULL
-#define pm_generic_freeze      NULL
-#define pm_generic_thaw                NULL
-#define pm_generic_restore     NULL
-#define pm_generic_poweroff    NULL
-#define pm_generic_complete    NULL
+#define pm_generic_prepare             NULL
+#define pm_generic_suspend_late                NULL
+#define pm_generic_suspend_noirq       NULL
+#define pm_generic_suspend             NULL
+#define pm_generic_resume_early                NULL
+#define pm_generic_resume_noirq                NULL
+#define pm_generic_resume              NULL
+#define pm_generic_freeze_noirq                NULL
+#define pm_generic_freeze_late         NULL
+#define pm_generic_freeze              NULL
+#define pm_generic_thaw_noirq          NULL
+#define pm_generic_thaw_early          NULL
+#define pm_generic_thaw                        NULL
+#define pm_generic_restore_noirq       NULL
+#define pm_generic_restore_early       NULL
+#define pm_generic_restore             NULL
+#define pm_generic_poweroff_noirq      NULL
+#define pm_generic_poweroff_late       NULL
+#define pm_generic_poweroff            NULL
+#define pm_generic_complete            NULL
 #endif /* !CONFIG_PM_SLEEP */
 
 /* How to reorder dpm_list after device_move() */
index 5a95013..9ab4bf7 100644 (file)
@@ -32,7 +32,10 @@ enum pm_qos_flags_status {
 #define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE       (2000 * USEC_PER_SEC)
 #define PM_QOS_NETWORK_LAT_DEFAULT_VALUE       (2000 * USEC_PER_SEC)
 #define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE        0
-#define PM_QOS_DEV_LAT_DEFAULT_VALUE           0
+#define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE    0
+#define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0
+#define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1)
+#define PM_QOS_LATENCY_ANY                     ((s32)(~(__u32)0 >> 1))
 
 #define PM_QOS_FLAG_NO_POWER_OFF       (1 << 0)
 #define PM_QOS_FLAG_REMOTE_WAKEUP      (1 << 1)
@@ -49,7 +52,8 @@ struct pm_qos_flags_request {
 };
 
 enum dev_pm_qos_req_type {
-       DEV_PM_QOS_LATENCY = 1,
+       DEV_PM_QOS_RESUME_LATENCY = 1,
+       DEV_PM_QOS_LATENCY_TOLERANCE,
        DEV_PM_QOS_FLAGS,
 };
 
@@ -77,6 +81,7 @@ struct pm_qos_constraints {
        struct plist_head list;
        s32 target_value;       /* Do not change to 64 bit */
        s32 default_value;
+       s32 no_constraint_value;
        enum pm_qos_type type;
        struct blocking_notifier_head *notifiers;
 };
@@ -87,9 +92,11 @@ struct pm_qos_flags {
 };
 
 struct dev_pm_qos {
-       struct pm_qos_constraints latency;
+       struct pm_qos_constraints resume_latency;
+       struct pm_qos_constraints latency_tolerance;
        struct pm_qos_flags flags;
-       struct dev_pm_qos_request *latency_req;
+       struct dev_pm_qos_request *resume_latency_req;
+       struct dev_pm_qos_request *latency_tolerance_req;
        struct dev_pm_qos_request *flags_req;
 };
 
@@ -142,7 +149,8 @@ int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier);
 void dev_pm_qos_constraints_init(struct device *dev);
 void dev_pm_qos_constraints_destroy(struct device *dev);
 int dev_pm_qos_add_ancestor_request(struct device *dev,
-                                   struct dev_pm_qos_request *req, s32 value);
+                                   struct dev_pm_qos_request *req,
+                                   enum dev_pm_qos_req_type type, s32 value);
 #else
 static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev,
                                                          s32 mask)
@@ -185,7 +193,9 @@ static inline void dev_pm_qos_constraints_destroy(struct device *dev)
        dev->power.power_state = PMSG_INVALID;
 }
 static inline int dev_pm_qos_add_ancestor_request(struct device *dev,
-                                   struct dev_pm_qos_request *req, s32 value)
+                                                 struct dev_pm_qos_request *req,
+                                                 enum dev_pm_qos_req_type type,
+                                                 s32 value)
                        { return 0; }
 #endif
 
@@ -195,10 +205,12 @@ void dev_pm_qos_hide_latency_limit(struct device *dev);
 int dev_pm_qos_expose_flags(struct device *dev, s32 value);
 void dev_pm_qos_hide_flags(struct device *dev);
 int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set);
+s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev);
+int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val);
 
-static inline s32 dev_pm_qos_requested_latency(struct device *dev)
+static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev)
 {
-       return dev->power.qos->latency_req->data.pnode.prio;
+       return dev->power.qos->resume_latency_req->data.pnode.prio;
 }
 
 static inline s32 dev_pm_qos_requested_flags(struct device *dev)
@@ -214,8 +226,12 @@ static inline int dev_pm_qos_expose_flags(struct device *dev, s32 value)
 static inline void dev_pm_qos_hide_flags(struct device *dev) {}
 static inline int dev_pm_qos_update_flags(struct device *dev, s32 m, bool set)
                        { return 0; }
+static inline s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
+                       { return PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; }
+static inline int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
+                       { return 0; }
 
-static inline s32 dev_pm_qos_requested_latency(struct device *dev) { return 0; }
+static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) { return 0; }
 static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; }
 #endif
 
index 16c9a62..2a5897a 100644 (file)
 #ifdef CONFIG_PM
 extern int pm_generic_runtime_suspend(struct device *dev);
 extern int pm_generic_runtime_resume(struct device *dev);
+extern int pm_runtime_force_suspend(struct device *dev);
+extern int pm_runtime_force_resume(struct device *dev);
 #else
 static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; }
 static inline int pm_generic_runtime_resume(struct device *dev) { return 0; }
+static inline int pm_runtime_force_suspend(struct device *dev) { return 0; }
+static inline int pm_runtime_force_resume(struct device *dev) { return 0; }
 #endif
 
 #ifdef CONFIG_PM_RUNTIME
diff --git a/include/linux/video_output.h b/include/linux/video_output.h
deleted file mode 100644 (file)
index ed5cdeb..0000000
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- *
- *  Copyright (C) 2006 Luming Yu <luming.yu@intel.com>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; either version 2 of the License, or (at
- *  your option) any later version.
- *
- *  This program is distributed in the hope that it will be useful, but
- *  WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- *  General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- */
-#ifndef _LINUX_VIDEO_OUTPUT_H
-#define _LINUX_VIDEO_OUTPUT_H
-#include <linux/device.h>
-#include <linux/err.h>
-struct output_device;
-struct output_properties {
-       int (*set_state)(struct output_device *);
-       int (*get_status)(struct output_device *);
-};
-struct output_device {
-       int request_state;
-       struct output_properties *props;
-       struct device dev;
-};
-#define to_output_device(obj) container_of(obj, struct output_device, dev)
-#if    defined(CONFIG_VIDEO_OUTPUT_CONTROL) || defined(CONFIG_VIDEO_OUTPUT_CONTROL_MODULE)
-struct output_device *video_output_register(const char *name,
-       struct device *dev,
-       void *devdata,
-       struct output_properties *op);
-void video_output_unregister(struct output_device *dev);
-#else
-static struct output_device *video_output_register(const char *name,
-        struct device *dev,
-        void *devdata,
-        struct output_properties *op)
-{
-       return ERR_PTR(-ENODEV);
-}
-static void video_output_unregister(struct output_device *dev)
-{
-       return;
-}
-#endif
-#endif
index e5bf9a7..9a7e08d 100644 (file)
@@ -407,8 +407,8 @@ DECLARE_EVENT_CLASS(dev_pm_qos_request,
        TP_printk("device=%s type=%s new_value=%d",
                  __get_str(name),
                  __print_symbolic(__entry->type,
-                       { DEV_PM_QOS_LATENCY,   "DEV_PM_QOS_LATENCY" },
-                       { DEV_PM_QOS_FLAGS,     "DEV_PM_QOS_FLAGS" }),
+                       { DEV_PM_QOS_RESUME_LATENCY, "DEV_PM_QOS_RESUME_LATENCY" },
+                       { DEV_PM_QOS_FLAGS, "DEV_PM_QOS_FLAGS" }),
                  __entry->new_value)
 );
 
index 37170d4..f4f2073 100644 (file)
@@ -973,16 +973,20 @@ static ssize_t resume_show(struct kobject *kobj, struct kobj_attribute *attr,
 static ssize_t resume_store(struct kobject *kobj, struct kobj_attribute *attr,
                            const char *buf, size_t n)
 {
-       unsigned int maj, min;
        dev_t res;
-       int ret = -EINVAL;
+       int len = n;
+       char *name;
 
-       if (sscanf(buf, "%u:%u", &maj, &min) != 2)
-               goto out;
+       if (len && buf[len-1] == '\n')
+               len--;
+       name = kstrndup(buf, len, GFP_KERNEL);
+       if (!name)
+               return -ENOMEM;
 
-       res = MKDEV(maj,min);
-       if (maj != MAJOR(res) || min != MINOR(res))
-               goto out;
+       res = name_to_dev_t(name);
+       kfree(name);
+       if (!res)
+               return -EINVAL;
 
        lock_system_sleep();
        swsusp_resume_device = res;
@@ -990,9 +994,7 @@ static ssize_t resume_store(struct kobject *kobj, struct kobj_attribute *attr,
        printk(KERN_INFO "PM: Starting manual resume from disk\n");
        noresume = 0;
        software_resume();
-       ret = n;
- out:
-       return ret;
+       return n;
 }
 
 power_attr(resume);
index 1d1bf63..6271bc4 100644 (file)
@@ -282,8 +282,8 @@ struct kobject *power_kobj;
  *     state - control system power state.
  *
  *     show() returns what states are supported, which is hard-coded to
- *     'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and
- *     'disk' (Suspend-to-Disk).
+ *     'freeze' (Low-Power Idle), 'standby' (Power-On Suspend),
+ *     'mem' (Suspend-to-RAM), and 'disk' (Suspend-to-Disk).
  *
  *     store() accepts one of those strings, translates it into the
  *     proper enumerated value, and initiates a suspend transition.
index 7d4b7ff..1ca7531 100644 (file)
@@ -49,6 +49,8 @@ static inline char *check_image_kernel(struct swsusp_info *info)
  */
 #define SPARE_PAGES    ((1024 * 1024) >> PAGE_SHIFT)
 
+asmlinkage int swsusp_save(void);
+
 /* kernel/power/hibernate.c */
 extern bool freezer_test_done;
 
index 8dff9b4..884b770 100644 (file)
@@ -66,6 +66,7 @@ static struct pm_qos_constraints cpu_dma_constraints = {
        .list = PLIST_HEAD_INIT(cpu_dma_constraints.list),
        .target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
        .default_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
+       .no_constraint_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
        .type = PM_QOS_MIN,
        .notifiers = &cpu_dma_lat_notifier,
 };
@@ -79,6 +80,7 @@ static struct pm_qos_constraints network_lat_constraints = {
        .list = PLIST_HEAD_INIT(network_lat_constraints.list),
        .target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
        .default_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
+       .no_constraint_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
        .type = PM_QOS_MIN,
        .notifiers = &network_lat_notifier,
 };
@@ -93,6 +95,7 @@ static struct pm_qos_constraints network_tput_constraints = {
        .list = PLIST_HEAD_INIT(network_tput_constraints.list),
        .target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
        .default_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
+       .no_constraint_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
        .type = PM_QOS_MAX,
        .notifiers = &network_throughput_notifier,
 };
@@ -128,7 +131,7 @@ static const struct file_operations pm_qos_power_fops = {
 static inline int pm_qos_get_value(struct pm_qos_constraints *c)
 {
        if (plist_head_empty(&c->list))
-               return c->default_value;
+               return c->no_constraint_value;
 
        switch (c->type) {
        case PM_QOS_MIN:
@@ -170,6 +173,7 @@ int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
 {
        unsigned long flags;
        int prev_value, curr_value, new_value;
+       int ret;
 
        spin_lock_irqsave(&pm_qos_lock, flags);
        prev_value = pm_qos_get_value(c);
@@ -205,13 +209,15 @@ int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
 
        trace_pm_qos_update_target(action, prev_value, curr_value);
        if (prev_value != curr_value) {
-               blocking_notifier_call_chain(c->notifiers,
-                                            (unsigned long)curr_value,
-                                            NULL);
-               return 1;
+               ret = 1;
+               if (c->notifiers)
+                       blocking_notifier_call_chain(c->notifiers,
+                                                    (unsigned long)curr_value,
+                                                    NULL);
        } else {
-               return 0;
+               ret = 0;
        }
+       return ret;
 }
 
 /**
index d9f61a1..149e745 100644 (file)
@@ -1268,7 +1268,7 @@ static void free_unnecessary_pages(void)
  * [number of saveable pages] - [number of pages that can be freed in theory]
  *
  * where the second term is the sum of (1) reclaimable slab pages, (2) active
- * and (3) inactive anonymouns pages, (4) active and (5) inactive file pages,
+ * and (3) inactive anonymous pages, (4) active and (5) inactive file pages,
  * minus mapped file pages.
  */
 static unsigned long minimum_image_size(unsigned long saveable)
index 62ee437..90b3d93 100644 (file)
@@ -39,7 +39,7 @@ static const struct platform_suspend_ops *suspend_ops;
 
 static bool need_suspend_ops(suspend_state_t state)
 {
-       return !!(state > PM_SUSPEND_FREEZE);
+       return state > PM_SUSPEND_FREEZE;
 }
 
 static DECLARE_WAIT_QUEUE_HEAD(suspend_freeze_wait_head);
index 8f50de3..019069c 100644 (file)
@@ -18,6 +18,8 @@
 #include <linux/rbtree.h>
 #include <linux/slab.h>
 
+#include "power.h"
+
 static DEFINE_MUTEX(wakelocks_lock);
 
 struct wakelock {