2 * drivers/base/power/runtime.c - Helper functions for device runtime PM
4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
7 * This file is released under the GPLv2.
10 #include <linux/sched.h>
11 #include <linux/export.h>
12 #include <linux/pm_runtime.h>
13 #include <trace/events/rpm.h>
16 static int rpm_resume(struct device *dev, int rpmflags);
17 static int rpm_suspend(struct device *dev, int rpmflags);
20 * update_pm_runtime_accounting - Update the time accounting of power states
21 * @dev: Device to update the accounting for
23 * In order to be able to have time accounting of the various power states
24 * (as used by programs such as PowerTOP to show the effectiveness of runtime
25 * PM), we need to track the time spent in each state.
26 * update_pm_runtime_accounting must be called each time before the
27 * runtime_status field is updated, to account the time in the old state
30 void update_pm_runtime_accounting(struct device *dev)
32 unsigned long now = jiffies;
35 delta = now - dev->power.accounting_timestamp;
37 dev->power.accounting_timestamp = now;
39 if (dev->power.disable_depth > 0)
42 if (dev->power.runtime_status == RPM_SUSPENDED)
43 dev->power.suspended_jiffies += delta;
45 dev->power.active_jiffies += delta;
48 static void __update_runtime_status(struct device *dev, enum rpm_status status)
50 update_pm_runtime_accounting(dev);
51 dev->power.runtime_status = status;
55 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
56 * @dev: Device to handle.
58 static void pm_runtime_deactivate_timer(struct device *dev)
60 if (dev->power.timer_expires > 0) {
61 del_timer(&dev->power.suspend_timer);
62 dev->power.timer_expires = 0;
67 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
68 * @dev: Device to handle.
70 static void pm_runtime_cancel_pending(struct device *dev)
72 pm_runtime_deactivate_timer(dev);
74 * In case there's a request pending, make sure its work function will
75 * return without doing anything.
77 dev->power.request = RPM_REQ_NONE;
81 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
82 * @dev: Device to handle.
84 * Compute the autosuspend-delay expiration time based on the device's
85 * power.last_busy time. If the delay has already expired or is disabled
86 * (negative) or the power.use_autosuspend flag isn't set, return 0.
87 * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
89 * This function may be called either with or without dev->power.lock held.
90 * Either way it can be racy, since power.last_busy may be updated at any time.
92 unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
94 int autosuspend_delay;
96 unsigned long last_busy;
97 unsigned long expires = 0;
99 if (!dev->power.use_autosuspend)
102 autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay);
103 if (autosuspend_delay < 0)
106 last_busy = ACCESS_ONCE(dev->power.last_busy);
107 elapsed = jiffies - last_busy;
109 goto out; /* jiffies has wrapped around. */
112 * If the autosuspend_delay is >= 1 second, align the timer by rounding
113 * up to the nearest second.
115 expires = last_busy + msecs_to_jiffies(autosuspend_delay);
116 if (autosuspend_delay >= 1000)
117 expires = round_jiffies(expires);
119 if (elapsed >= expires - last_busy)
120 expires = 0; /* Already expired. */
125 EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
128 * rpm_check_suspend_allowed - Test whether a device may be suspended.
129 * @dev: Device to test.
131 static int rpm_check_suspend_allowed(struct device *dev)
135 if (dev->power.runtime_error)
137 else if (dev->power.disable_depth > 0)
139 else if (atomic_read(&dev->power.usage_count) > 0)
141 else if (!pm_children_suspended(dev))
144 /* Pending resume requests take precedence over suspends. */
145 else if ((dev->power.deferred_resume
146 && dev->power.runtime_status == RPM_SUSPENDING)
147 || (dev->power.request_pending
148 && dev->power.request == RPM_REQ_RESUME))
150 else if (dev->power.runtime_status == RPM_SUSPENDED)
157 * __rpm_callback - Run a given runtime PM callback for a given device.
158 * @cb: Runtime PM callback to run.
159 * @dev: Device to run the callback for.
161 static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
162 __releases(&dev->power.lock) __acquires(&dev->power.lock)
166 if (dev->power.irq_safe)
167 spin_unlock(&dev->power.lock);
169 spin_unlock_irq(&dev->power.lock);
173 if (dev->power.irq_safe)
174 spin_lock(&dev->power.lock);
176 spin_lock_irq(&dev->power.lock);
182 * rpm_idle - Notify device bus type if the device can be suspended.
183 * @dev: Device to notify the bus type about.
184 * @rpmflags: Flag bits.
186 * Check if the device's runtime PM status allows it to be suspended. If
187 * another idle notification has been started earlier, return immediately. If
188 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
189 * run the ->runtime_idle() callback directly.
191 * This function must be called under dev->power.lock with interrupts disabled.
193 static int rpm_idle(struct device *dev, int rpmflags)
195 int (*callback)(struct device *);
198 trace_rpm_idle(dev, rpmflags);
199 retval = rpm_check_suspend_allowed(dev);
201 ; /* Conditions are wrong. */
203 /* Idle notifications are allowed only in the RPM_ACTIVE state. */
204 else if (dev->power.runtime_status != RPM_ACTIVE)
208 * Any pending request other than an idle notification takes
209 * precedence over us, except that the timer may be running.
211 else if (dev->power.request_pending &&
212 dev->power.request > RPM_REQ_IDLE)
215 /* Act as though RPM_NOWAIT is always set. */
216 else if (dev->power.idle_notification)
217 retval = -EINPROGRESS;
221 /* Pending requests need to be canceled. */
222 dev->power.request = RPM_REQ_NONE;
224 if (dev->power.no_callbacks) {
225 /* Assume ->runtime_idle() callback would have suspended. */
226 retval = rpm_suspend(dev, rpmflags);
230 /* Carry out an asynchronous or a synchronous idle notification. */
231 if (rpmflags & RPM_ASYNC) {
232 dev->power.request = RPM_REQ_IDLE;
233 if (!dev->power.request_pending) {
234 dev->power.request_pending = true;
235 queue_work(pm_wq, &dev->power.work);
240 dev->power.idle_notification = true;
243 callback = dev->pm_domain->ops.runtime_idle;
244 else if (dev->type && dev->type->pm)
245 callback = dev->type->pm->runtime_idle;
246 else if (dev->class && dev->class->pm)
247 callback = dev->class->pm->runtime_idle;
248 else if (dev->bus && dev->bus->pm)
249 callback = dev->bus->pm->runtime_idle;
254 __rpm_callback(callback, dev);
256 dev->power.idle_notification = false;
257 wake_up_all(&dev->power.wait_queue);
260 trace_rpm_return_int(dev, _THIS_IP_, retval);
265 * rpm_callback - Run a given runtime PM callback for a given device.
266 * @cb: Runtime PM callback to run.
267 * @dev: Device to run the callback for.
269 static int rpm_callback(int (*cb)(struct device *), struct device *dev)
276 retval = __rpm_callback(cb, dev);
278 dev->power.runtime_error = retval;
279 return retval != -EACCES ? retval : -EIO;
283 * rpm_suspend - Carry out runtime suspend of given device.
284 * @dev: Device to suspend.
285 * @rpmflags: Flag bits.
287 * Check if the device's runtime PM status allows it to be suspended.
288 * Cancel a pending idle notification, autosuspend or suspend. If
289 * another suspend has been started earlier, either return immediately
290 * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
291 * flags. If the RPM_ASYNC flag is set then queue a suspend request;
292 * otherwise run the ->runtime_suspend() callback directly. When
293 * ->runtime_suspend succeeded, if a deferred resume was requested while
294 * the callback was running then carry it out, otherwise send an idle
295 * notification for its parent (if the suspend succeeded and both
296 * ignore_children of parent->power and irq_safe of dev->power are not set).
297 * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
298 * flag is set and the next autosuspend-delay expiration time is in the
299 * future, schedule another autosuspend attempt.
301 * This function must be called under dev->power.lock with interrupts disabled.
303 static int rpm_suspend(struct device *dev, int rpmflags)
304 __releases(&dev->power.lock) __acquires(&dev->power.lock)
306 int (*callback)(struct device *);
307 struct device *parent = NULL;
310 trace_rpm_suspend(dev, rpmflags);
313 retval = rpm_check_suspend_allowed(dev);
316 ; /* Conditions are wrong. */
318 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
319 else if (dev->power.runtime_status == RPM_RESUMING &&
320 !(rpmflags & RPM_ASYNC))
325 /* If the autosuspend_delay time hasn't expired yet, reschedule. */
326 if ((rpmflags & RPM_AUTO)
327 && dev->power.runtime_status != RPM_SUSPENDING) {
328 unsigned long expires = pm_runtime_autosuspend_expiration(dev);
331 /* Pending requests need to be canceled. */
332 dev->power.request = RPM_REQ_NONE;
335 * Optimization: If the timer is already running and is
336 * set to expire at or before the autosuspend delay,
337 * avoid the overhead of resetting it. Just let it
338 * expire; pm_suspend_timer_fn() will take care of the
341 if (!(dev->power.timer_expires && time_before_eq(
342 dev->power.timer_expires, expires))) {
343 dev->power.timer_expires = expires;
344 mod_timer(&dev->power.suspend_timer, expires);
346 dev->power.timer_autosuspends = 1;
351 /* Other scheduled or pending requests need to be canceled. */
352 pm_runtime_cancel_pending(dev);
354 if (dev->power.runtime_status == RPM_SUSPENDING) {
357 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
358 retval = -EINPROGRESS;
362 if (dev->power.irq_safe) {
363 spin_unlock(&dev->power.lock);
367 spin_lock(&dev->power.lock);
371 /* Wait for the other suspend running in parallel with us. */
373 prepare_to_wait(&dev->power.wait_queue, &wait,
374 TASK_UNINTERRUPTIBLE);
375 if (dev->power.runtime_status != RPM_SUSPENDING)
378 spin_unlock_irq(&dev->power.lock);
382 spin_lock_irq(&dev->power.lock);
384 finish_wait(&dev->power.wait_queue, &wait);
388 if (dev->power.no_callbacks)
389 goto no_callback; /* Assume success. */
391 /* Carry out an asynchronous or a synchronous suspend. */
392 if (rpmflags & RPM_ASYNC) {
393 dev->power.request = (rpmflags & RPM_AUTO) ?
394 RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
395 if (!dev->power.request_pending) {
396 dev->power.request_pending = true;
397 queue_work(pm_wq, &dev->power.work);
402 __update_runtime_status(dev, RPM_SUSPENDING);
405 callback = dev->pm_domain->ops.runtime_suspend;
406 else if (dev->type && dev->type->pm)
407 callback = dev->type->pm->runtime_suspend;
408 else if (dev->class && dev->class->pm)
409 callback = dev->class->pm->runtime_suspend;
410 else if (dev->bus && dev->bus->pm)
411 callback = dev->bus->pm->runtime_suspend;
415 retval = rpm_callback(callback, dev);
417 __update_runtime_status(dev, RPM_ACTIVE);
418 dev->power.deferred_resume = false;
419 if (retval == -EAGAIN || retval == -EBUSY) {
420 dev->power.runtime_error = 0;
423 * If the callback routine failed an autosuspend, and
424 * if the last_busy time has been updated so that there
425 * is a new autosuspend expiration time, automatically
426 * reschedule another autosuspend.
428 if ((rpmflags & RPM_AUTO) &&
429 pm_runtime_autosuspend_expiration(dev) != 0)
432 pm_runtime_cancel_pending(dev);
434 wake_up_all(&dev->power.wait_queue);
438 __update_runtime_status(dev, RPM_SUSPENDED);
439 pm_runtime_deactivate_timer(dev);
442 parent = dev->parent;
443 atomic_add_unless(&parent->power.child_count, -1, 0);
445 wake_up_all(&dev->power.wait_queue);
447 if (dev->power.deferred_resume) {
448 dev->power.deferred_resume = false;
454 /* Maybe the parent is now able to suspend. */
455 if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
456 spin_unlock(&dev->power.lock);
458 spin_lock(&parent->power.lock);
459 rpm_idle(parent, RPM_ASYNC);
460 spin_unlock(&parent->power.lock);
462 spin_lock(&dev->power.lock);
466 trace_rpm_return_int(dev, _THIS_IP_, retval);
472 * rpm_resume - Carry out runtime resume of given device.
473 * @dev: Device to resume.
474 * @rpmflags: Flag bits.
476 * Check if the device's runtime PM status allows it to be resumed. Cancel
477 * any scheduled or pending requests. If another resume has been started
478 * earlier, either return immediately or wait for it to finish, depending on the
479 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
480 * parallel with this function, either tell the other process to resume after
481 * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
482 * flag is set then queue a resume request; otherwise run the
483 * ->runtime_resume() callback directly. Queue an idle notification for the
484 * device if the resume succeeded.
486 * This function must be called under dev->power.lock with interrupts disabled.
488 static int rpm_resume(struct device *dev, int rpmflags)
489 __releases(&dev->power.lock) __acquires(&dev->power.lock)
491 int (*callback)(struct device *);
492 struct device *parent = NULL;
495 trace_rpm_resume(dev, rpmflags);
498 if (dev->power.runtime_error)
500 else if (dev->power.disable_depth > 0)
506 * Other scheduled or pending requests need to be canceled. Small
507 * optimization: If an autosuspend timer is running, leave it running
508 * rather than cancelling it now only to restart it again in the near
511 dev->power.request = RPM_REQ_NONE;
512 if (!dev->power.timer_autosuspends)
513 pm_runtime_deactivate_timer(dev);
515 if (dev->power.runtime_status == RPM_ACTIVE) {
520 if (dev->power.runtime_status == RPM_RESUMING
521 || dev->power.runtime_status == RPM_SUSPENDING) {
524 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
525 if (dev->power.runtime_status == RPM_SUSPENDING)
526 dev->power.deferred_resume = true;
528 retval = -EINPROGRESS;
532 if (dev->power.irq_safe) {
533 spin_unlock(&dev->power.lock);
537 spin_lock(&dev->power.lock);
541 /* Wait for the operation carried out in parallel with us. */
543 prepare_to_wait(&dev->power.wait_queue, &wait,
544 TASK_UNINTERRUPTIBLE);
545 if (dev->power.runtime_status != RPM_RESUMING
546 && dev->power.runtime_status != RPM_SUSPENDING)
549 spin_unlock_irq(&dev->power.lock);
553 spin_lock_irq(&dev->power.lock);
555 finish_wait(&dev->power.wait_queue, &wait);
560 * See if we can skip waking up the parent. This is safe only if
561 * power.no_callbacks is set, because otherwise we don't know whether
562 * the resume will actually succeed.
564 if (dev->power.no_callbacks && !parent && dev->parent) {
565 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
566 if (dev->parent->power.disable_depth > 0
567 || dev->parent->power.ignore_children
568 || dev->parent->power.runtime_status == RPM_ACTIVE) {
569 atomic_inc(&dev->parent->power.child_count);
570 spin_unlock(&dev->parent->power.lock);
572 goto no_callback; /* Assume success. */
574 spin_unlock(&dev->parent->power.lock);
577 /* Carry out an asynchronous or a synchronous resume. */
578 if (rpmflags & RPM_ASYNC) {
579 dev->power.request = RPM_REQ_RESUME;
580 if (!dev->power.request_pending) {
581 dev->power.request_pending = true;
582 queue_work(pm_wq, &dev->power.work);
588 if (!parent && dev->parent) {
590 * Increment the parent's usage counter and resume it if
591 * necessary. Not needed if dev is irq-safe; then the
592 * parent is permanently resumed.
594 parent = dev->parent;
595 if (dev->power.irq_safe)
597 spin_unlock(&dev->power.lock);
599 pm_runtime_get_noresume(parent);
601 spin_lock(&parent->power.lock);
603 * We can resume if the parent's runtime PM is disabled or it
604 * is set to ignore children.
606 if (!parent->power.disable_depth
607 && !parent->power.ignore_children) {
608 rpm_resume(parent, 0);
609 if (parent->power.runtime_status != RPM_ACTIVE)
612 spin_unlock(&parent->power.lock);
614 spin_lock(&dev->power.lock);
621 if (dev->power.no_callbacks)
622 goto no_callback; /* Assume success. */
624 __update_runtime_status(dev, RPM_RESUMING);
627 callback = dev->pm_domain->ops.runtime_resume;
628 else if (dev->type && dev->type->pm)
629 callback = dev->type->pm->runtime_resume;
630 else if (dev->class && dev->class->pm)
631 callback = dev->class->pm->runtime_resume;
632 else if (dev->bus && dev->bus->pm)
633 callback = dev->bus->pm->runtime_resume;
637 retval = rpm_callback(callback, dev);
639 __update_runtime_status(dev, RPM_SUSPENDED);
640 pm_runtime_cancel_pending(dev);
643 __update_runtime_status(dev, RPM_ACTIVE);
645 atomic_inc(&parent->power.child_count);
647 wake_up_all(&dev->power.wait_queue);
650 rpm_idle(dev, RPM_ASYNC);
653 if (parent && !dev->power.irq_safe) {
654 spin_unlock_irq(&dev->power.lock);
656 pm_runtime_put(parent);
658 spin_lock_irq(&dev->power.lock);
661 trace_rpm_return_int(dev, _THIS_IP_, retval);
667 * pm_runtime_work - Universal runtime PM work function.
668 * @work: Work structure used for scheduling the execution of this function.
670 * Use @work to get the device object the work is to be done for, determine what
671 * is to be done and execute the appropriate runtime PM function.
673 static void pm_runtime_work(struct work_struct *work)
675 struct device *dev = container_of(work, struct device, power.work);
676 enum rpm_request req;
678 spin_lock_irq(&dev->power.lock);
680 if (!dev->power.request_pending)
683 req = dev->power.request;
684 dev->power.request = RPM_REQ_NONE;
685 dev->power.request_pending = false;
691 rpm_idle(dev, RPM_NOWAIT);
693 case RPM_REQ_SUSPEND:
694 rpm_suspend(dev, RPM_NOWAIT);
696 case RPM_REQ_AUTOSUSPEND:
697 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
700 rpm_resume(dev, RPM_NOWAIT);
705 spin_unlock_irq(&dev->power.lock);
709 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
710 * @data: Device pointer passed by pm_schedule_suspend().
712 * Check if the time is right and queue a suspend request.
714 static void pm_suspend_timer_fn(unsigned long data)
716 struct device *dev = (struct device *)data;
718 unsigned long expires;
720 spin_lock_irqsave(&dev->power.lock, flags);
722 expires = dev->power.timer_expires;
723 /* If 'expire' is after 'jiffies' we've been called too early. */
724 if (expires > 0 && !time_after(expires, jiffies)) {
725 dev->power.timer_expires = 0;
726 rpm_suspend(dev, dev->power.timer_autosuspends ?
727 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
730 spin_unlock_irqrestore(&dev->power.lock, flags);
734 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
735 * @dev: Device to suspend.
736 * @delay: Time to wait before submitting a suspend request, in milliseconds.
738 int pm_schedule_suspend(struct device *dev, unsigned int delay)
743 spin_lock_irqsave(&dev->power.lock, flags);
746 retval = rpm_suspend(dev, RPM_ASYNC);
750 retval = rpm_check_suspend_allowed(dev);
754 /* Other scheduled or pending requests need to be canceled. */
755 pm_runtime_cancel_pending(dev);
757 dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
758 dev->power.timer_expires += !dev->power.timer_expires;
759 dev->power.timer_autosuspends = 0;
760 mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
763 spin_unlock_irqrestore(&dev->power.lock, flags);
767 EXPORT_SYMBOL_GPL(pm_schedule_suspend);
770 * __pm_runtime_idle - Entry point for runtime idle operations.
771 * @dev: Device to send idle notification for.
772 * @rpmflags: Flag bits.
774 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
775 * return immediately if it is larger than zero. Then carry out an idle
776 * notification, either synchronous or asynchronous.
778 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
779 * or if pm_runtime_irq_safe() has been called.
781 int __pm_runtime_idle(struct device *dev, int rpmflags)
786 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
788 if (rpmflags & RPM_GET_PUT) {
789 if (!atomic_dec_and_test(&dev->power.usage_count))
793 spin_lock_irqsave(&dev->power.lock, flags);
794 retval = rpm_idle(dev, rpmflags);
795 spin_unlock_irqrestore(&dev->power.lock, flags);
799 EXPORT_SYMBOL_GPL(__pm_runtime_idle);
802 * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
803 * @dev: Device to suspend.
804 * @rpmflags: Flag bits.
806 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
807 * return immediately if it is larger than zero. Then carry out a suspend,
808 * either synchronous or asynchronous.
810 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
811 * or if pm_runtime_irq_safe() has been called.
813 int __pm_runtime_suspend(struct device *dev, int rpmflags)
818 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
820 if (rpmflags & RPM_GET_PUT) {
821 if (!atomic_dec_and_test(&dev->power.usage_count))
825 spin_lock_irqsave(&dev->power.lock, flags);
826 retval = rpm_suspend(dev, rpmflags);
827 spin_unlock_irqrestore(&dev->power.lock, flags);
831 EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
834 * __pm_runtime_resume - Entry point for runtime resume operations.
835 * @dev: Device to resume.
836 * @rpmflags: Flag bits.
838 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
839 * carry out a resume, either synchronous or asynchronous.
841 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
842 * or if pm_runtime_irq_safe() has been called.
844 int __pm_runtime_resume(struct device *dev, int rpmflags)
849 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
851 if (rpmflags & RPM_GET_PUT)
852 atomic_inc(&dev->power.usage_count);
854 spin_lock_irqsave(&dev->power.lock, flags);
855 retval = rpm_resume(dev, rpmflags);
856 spin_unlock_irqrestore(&dev->power.lock, flags);
860 EXPORT_SYMBOL_GPL(__pm_runtime_resume);
863 * __pm_runtime_set_status - Set runtime PM status of a device.
864 * @dev: Device to handle.
865 * @status: New runtime PM status of the device.
867 * If runtime PM of the device is disabled or its power.runtime_error field is
868 * different from zero, the status may be changed either to RPM_ACTIVE, or to
869 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
870 * However, if the device has a parent and the parent is not active, and the
871 * parent's power.ignore_children flag is unset, the device's status cannot be
872 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
874 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
875 * and the device parent's counter of unsuspended children is modified to
876 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
877 * notification request for the parent is submitted.
879 int __pm_runtime_set_status(struct device *dev, unsigned int status)
881 struct device *parent = dev->parent;
883 bool notify_parent = false;
886 if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
889 spin_lock_irqsave(&dev->power.lock, flags);
891 if (!dev->power.runtime_error && !dev->power.disable_depth) {
896 if (dev->power.runtime_status == status)
899 if (status == RPM_SUSPENDED) {
900 /* It always is possible to set the status to 'suspended'. */
902 atomic_add_unless(&parent->power.child_count, -1, 0);
903 notify_parent = !parent->power.ignore_children;
909 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
912 * It is invalid to put an active child under a parent that is
913 * not active, has runtime PM enabled and the
914 * 'power.ignore_children' flag unset.
916 if (!parent->power.disable_depth
917 && !parent->power.ignore_children
918 && parent->power.runtime_status != RPM_ACTIVE)
920 else if (dev->power.runtime_status == RPM_SUSPENDED)
921 atomic_inc(&parent->power.child_count);
923 spin_unlock(&parent->power.lock);
930 __update_runtime_status(dev, status);
931 dev->power.runtime_error = 0;
933 spin_unlock_irqrestore(&dev->power.lock, flags);
936 pm_request_idle(parent);
940 EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
943 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
944 * @dev: Device to handle.
946 * Flush all pending requests for the device from pm_wq and wait for all
947 * runtime PM operations involving the device in progress to complete.
949 * Should be called under dev->power.lock with interrupts disabled.
951 static void __pm_runtime_barrier(struct device *dev)
953 pm_runtime_deactivate_timer(dev);
955 if (dev->power.request_pending) {
956 dev->power.request = RPM_REQ_NONE;
957 spin_unlock_irq(&dev->power.lock);
959 cancel_work_sync(&dev->power.work);
961 spin_lock_irq(&dev->power.lock);
962 dev->power.request_pending = false;
965 if (dev->power.runtime_status == RPM_SUSPENDING
966 || dev->power.runtime_status == RPM_RESUMING
967 || dev->power.idle_notification) {
970 /* Suspend, wake-up or idle notification in progress. */
972 prepare_to_wait(&dev->power.wait_queue, &wait,
973 TASK_UNINTERRUPTIBLE);
974 if (dev->power.runtime_status != RPM_SUSPENDING
975 && dev->power.runtime_status != RPM_RESUMING
976 && !dev->power.idle_notification)
978 spin_unlock_irq(&dev->power.lock);
982 spin_lock_irq(&dev->power.lock);
984 finish_wait(&dev->power.wait_queue, &wait);
989 * pm_runtime_barrier - Flush pending requests and wait for completions.
990 * @dev: Device to handle.
992 * Prevent the device from being suspended by incrementing its usage counter and
993 * if there's a pending resume request for the device, wake the device up.
994 * Next, make sure that all pending requests for the device have been flushed
995 * from pm_wq and wait for all runtime PM operations involving the device in
996 * progress to complete.
999 * 1, if there was a resume request pending and the device had to be woken up,
1002 int pm_runtime_barrier(struct device *dev)
1006 pm_runtime_get_noresume(dev);
1007 spin_lock_irq(&dev->power.lock);
1009 if (dev->power.request_pending
1010 && dev->power.request == RPM_REQ_RESUME) {
1015 __pm_runtime_barrier(dev);
1017 spin_unlock_irq(&dev->power.lock);
1018 pm_runtime_put_noidle(dev);
1022 EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1025 * __pm_runtime_disable - Disable runtime PM of a device.
1026 * @dev: Device to handle.
1027 * @check_resume: If set, check if there's a resume request for the device.
1029 * Increment power.disable_depth for the device and if was zero previously,
1030 * cancel all pending runtime PM requests for the device and wait for all
1031 * operations in progress to complete. The device can be either active or
1032 * suspended after its runtime PM has been disabled.
1034 * If @check_resume is set and there's a resume request pending when
1035 * __pm_runtime_disable() is called and power.disable_depth is zero, the
1036 * function will wake up the device before disabling its runtime PM.
1038 void __pm_runtime_disable(struct device *dev, bool check_resume)
1040 spin_lock_irq(&dev->power.lock);
1042 if (dev->power.disable_depth > 0) {
1043 dev->power.disable_depth++;
1048 * Wake up the device if there's a resume request pending, because that
1049 * means there probably is some I/O to process and disabling runtime PM
1050 * shouldn't prevent the device from processing the I/O.
1052 if (check_resume && dev->power.request_pending
1053 && dev->power.request == RPM_REQ_RESUME) {
1055 * Prevent suspends and idle notifications from being carried
1056 * out after we have woken up the device.
1058 pm_runtime_get_noresume(dev);
1062 pm_runtime_put_noidle(dev);
1065 if (!dev->power.disable_depth++)
1066 __pm_runtime_barrier(dev);
1069 spin_unlock_irq(&dev->power.lock);
1071 EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1074 * pm_runtime_enable - Enable runtime PM of a device.
1075 * @dev: Device to handle.
1077 void pm_runtime_enable(struct device *dev)
1079 unsigned long flags;
1081 spin_lock_irqsave(&dev->power.lock, flags);
1083 if (dev->power.disable_depth > 0)
1084 dev->power.disable_depth--;
1086 dev_warn(dev, "Unbalanced %s!\n", __func__);
1088 spin_unlock_irqrestore(&dev->power.lock, flags);
1090 EXPORT_SYMBOL_GPL(pm_runtime_enable);
1093 * pm_runtime_forbid - Block runtime PM of a device.
1094 * @dev: Device to handle.
1096 * Increase the device's usage count and clear its power.runtime_auto flag,
1097 * so that it cannot be suspended at run time until pm_runtime_allow() is called
1100 void pm_runtime_forbid(struct device *dev)
1102 spin_lock_irq(&dev->power.lock);
1103 if (!dev->power.runtime_auto)
1106 dev->power.runtime_auto = false;
1107 atomic_inc(&dev->power.usage_count);
1111 spin_unlock_irq(&dev->power.lock);
1113 EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1116 * pm_runtime_allow - Unblock runtime PM of a device.
1117 * @dev: Device to handle.
1119 * Decrease the device's usage count and set its power.runtime_auto flag.
1121 void pm_runtime_allow(struct device *dev)
1123 spin_lock_irq(&dev->power.lock);
1124 if (dev->power.runtime_auto)
1127 dev->power.runtime_auto = true;
1128 if (atomic_dec_and_test(&dev->power.usage_count))
1129 rpm_idle(dev, RPM_AUTO);
1132 spin_unlock_irq(&dev->power.lock);
1134 EXPORT_SYMBOL_GPL(pm_runtime_allow);
1137 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1138 * @dev: Device to handle.
1140 * Set the power.no_callbacks flag, which tells the PM core that this
1141 * device is power-managed through its parent and has no runtime PM
1142 * callbacks of its own. The runtime sysfs attributes will be removed.
1144 void pm_runtime_no_callbacks(struct device *dev)
1146 spin_lock_irq(&dev->power.lock);
1147 dev->power.no_callbacks = 1;
1148 spin_unlock_irq(&dev->power.lock);
1149 if (device_is_registered(dev))
1150 rpm_sysfs_remove(dev);
1152 EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1155 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1156 * @dev: Device to handle
1158 * Set the power.irq_safe flag, which tells the PM core that the
1159 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1160 * always be invoked with the spinlock held and interrupts disabled. It also
1161 * causes the parent's usage counter to be permanently incremented, preventing
1162 * the parent from runtime suspending -- otherwise an irq-safe child might have
1163 * to wait for a non-irq-safe parent.
1165 void pm_runtime_irq_safe(struct device *dev)
1168 pm_runtime_get_sync(dev->parent);
1169 spin_lock_irq(&dev->power.lock);
1170 dev->power.irq_safe = 1;
1171 spin_unlock_irq(&dev->power.lock);
1173 EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1176 * update_autosuspend - Handle a change to a device's autosuspend settings.
1177 * @dev: Device to handle.
1178 * @old_delay: The former autosuspend_delay value.
1179 * @old_use: The former use_autosuspend value.
1181 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1182 * set; otherwise allow it. Send an idle notification if suspends are allowed.
1184 * This function must be called under dev->power.lock with interrupts disabled.
1186 static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1188 int delay = dev->power.autosuspend_delay;
1190 /* Should runtime suspend be prevented now? */
1191 if (dev->power.use_autosuspend && delay < 0) {
1193 /* If it used to be allowed then prevent it. */
1194 if (!old_use || old_delay >= 0) {
1195 atomic_inc(&dev->power.usage_count);
1200 /* Runtime suspend should be allowed now. */
1203 /* If it used to be prevented then allow it. */
1204 if (old_use && old_delay < 0)
1205 atomic_dec(&dev->power.usage_count);
1207 /* Maybe we can autosuspend now. */
1208 rpm_idle(dev, RPM_AUTO);
1213 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1214 * @dev: Device to handle.
1215 * @delay: Value of the new delay in milliseconds.
1217 * Set the device's power.autosuspend_delay value. If it changes to negative
1218 * and the power.use_autosuspend flag is set, prevent runtime suspends. If it
1219 * changes the other way, allow runtime suspends.
1221 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1223 int old_delay, old_use;
1225 spin_lock_irq(&dev->power.lock);
1226 old_delay = dev->power.autosuspend_delay;
1227 old_use = dev->power.use_autosuspend;
1228 dev->power.autosuspend_delay = delay;
1229 update_autosuspend(dev, old_delay, old_use);
1230 spin_unlock_irq(&dev->power.lock);
1232 EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1235 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1236 * @dev: Device to handle.
1237 * @use: New value for use_autosuspend.
1239 * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1240 * suspends as needed.
1242 void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1244 int old_delay, old_use;
1246 spin_lock_irq(&dev->power.lock);
1247 old_delay = dev->power.autosuspend_delay;
1248 old_use = dev->power.use_autosuspend;
1249 dev->power.use_autosuspend = use;
1250 update_autosuspend(dev, old_delay, old_use);
1251 spin_unlock_irq(&dev->power.lock);
1253 EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1256 * pm_runtime_init - Initialize runtime PM fields in given device object.
1257 * @dev: Device object to initialize.
1259 void pm_runtime_init(struct device *dev)
1261 dev->power.runtime_status = RPM_SUSPENDED;
1262 dev->power.idle_notification = false;
1264 dev->power.disable_depth = 1;
1265 atomic_set(&dev->power.usage_count, 0);
1267 dev->power.runtime_error = 0;
1269 atomic_set(&dev->power.child_count, 0);
1270 pm_suspend_ignore_children(dev, false);
1271 dev->power.runtime_auto = true;
1273 dev->power.request_pending = false;
1274 dev->power.request = RPM_REQ_NONE;
1275 dev->power.deferred_resume = false;
1276 dev->power.accounting_timestamp = jiffies;
1277 INIT_WORK(&dev->power.work, pm_runtime_work);
1279 dev->power.timer_expires = 0;
1280 setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
1281 (unsigned long)dev);
1283 init_waitqueue_head(&dev->power.wait_queue);
1287 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1288 * @dev: Device object being removed from device hierarchy.
1290 void pm_runtime_remove(struct device *dev)
1292 __pm_runtime_disable(dev, false);
1294 /* Change the status back to 'suspended' to match the initial status. */
1295 if (dev->power.runtime_status == RPM_ACTIVE)
1296 pm_runtime_set_suspended(dev);
1297 if (dev->power.irq_safe && dev->parent)
1298 pm_runtime_put_sync(dev->parent);