2 * drivers/base/power/runtime.c - Helper functions for device runtime PM
4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
7 * This file is released under the GPLv2.
10 #include <linux/sched.h>
11 #include <linux/pm_runtime.h>
12 #include <trace/events/rpm.h>
15 static int rpm_resume(struct device *dev, int rpmflags);
16 static int rpm_suspend(struct device *dev, int rpmflags);
19 * update_pm_runtime_accounting - Update the time accounting of power states
20 * @dev: Device to update the accounting for
22 * In order to be able to have time accounting of the various power states
23 * (as used by programs such as PowerTOP to show the effectiveness of runtime
24 * PM), we need to track the time spent in each state.
25 * update_pm_runtime_accounting must be called each time before the
26 * runtime_status field is updated, to account the time in the old state
29 void update_pm_runtime_accounting(struct device *dev)
31 unsigned long now = jiffies;
34 delta = now - dev->power.accounting_timestamp;
36 dev->power.accounting_timestamp = now;
38 if (dev->power.disable_depth > 0)
41 if (dev->power.runtime_status == RPM_SUSPENDED)
42 dev->power.suspended_jiffies += delta;
44 dev->power.active_jiffies += delta;
47 static void __update_runtime_status(struct device *dev, enum rpm_status status)
49 update_pm_runtime_accounting(dev);
50 dev->power.runtime_status = status;
54 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
55 * @dev: Device to handle.
57 static void pm_runtime_deactivate_timer(struct device *dev)
59 if (dev->power.timer_expires > 0) {
60 del_timer(&dev->power.suspend_timer);
61 dev->power.timer_expires = 0;
66 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
67 * @dev: Device to handle.
69 static void pm_runtime_cancel_pending(struct device *dev)
71 pm_runtime_deactivate_timer(dev);
73 * In case there's a request pending, make sure its work function will
74 * return without doing anything.
76 dev->power.request = RPM_REQ_NONE;
80 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
81 * @dev: Device to handle.
83 * Compute the autosuspend-delay expiration time based on the device's
84 * power.last_busy time. If the delay has already expired or is disabled
85 * (negative) or the power.use_autosuspend flag isn't set, return 0.
86 * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
88 * This function may be called either with or without dev->power.lock held.
89 * Either way it can be racy, since power.last_busy may be updated at any time.
91 unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
93 int autosuspend_delay;
95 unsigned long last_busy;
96 unsigned long expires = 0;
98 if (!dev->power.use_autosuspend)
101 autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay);
102 if (autosuspend_delay < 0)
105 last_busy = ACCESS_ONCE(dev->power.last_busy);
106 elapsed = jiffies - last_busy;
108 goto out; /* jiffies has wrapped around. */
111 * If the autosuspend_delay is >= 1 second, align the timer by rounding
112 * up to the nearest second.
114 expires = last_busy + msecs_to_jiffies(autosuspend_delay);
115 if (autosuspend_delay >= 1000)
116 expires = round_jiffies(expires);
118 if (elapsed >= expires - last_busy)
119 expires = 0; /* Already expired. */
124 EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
127 * rpm_check_suspend_allowed - Test whether a device may be suspended.
128 * @dev: Device to test.
130 static int rpm_check_suspend_allowed(struct device *dev)
134 if (dev->power.runtime_error)
136 else if (dev->power.disable_depth > 0)
138 else if (atomic_read(&dev->power.usage_count) > 0)
140 else if (!pm_children_suspended(dev))
143 /* Pending resume requests take precedence over suspends. */
144 else if ((dev->power.deferred_resume
145 && dev->power.runtime_status == RPM_SUSPENDING)
146 || (dev->power.request_pending
147 && dev->power.request == RPM_REQ_RESUME))
149 else if (dev->power.runtime_status == RPM_SUSPENDED)
156 * __rpm_callback - Run a given runtime PM callback for a given device.
157 * @cb: Runtime PM callback to run.
158 * @dev: Device to run the callback for.
160 static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
161 __releases(&dev->power.lock) __acquires(&dev->power.lock)
165 if (dev->power.irq_safe)
166 spin_unlock(&dev->power.lock);
168 spin_unlock_irq(&dev->power.lock);
172 if (dev->power.irq_safe)
173 spin_lock(&dev->power.lock);
175 spin_lock_irq(&dev->power.lock);
181 * rpm_idle - Notify device bus type if the device can be suspended.
182 * @dev: Device to notify the bus type about.
183 * @rpmflags: Flag bits.
185 * Check if the device's runtime PM status allows it to be suspended. If
186 * another idle notification has been started earlier, return immediately. If
187 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
188 * run the ->runtime_idle() callback directly.
190 * This function must be called under dev->power.lock with interrupts disabled.
192 static int rpm_idle(struct device *dev, int rpmflags)
194 int (*callback)(struct device *);
197 trace_rpm_idle(dev, rpmflags);
198 retval = rpm_check_suspend_allowed(dev);
200 ; /* Conditions are wrong. */
202 /* Idle notifications are allowed only in the RPM_ACTIVE state. */
203 else if (dev->power.runtime_status != RPM_ACTIVE)
207 * Any pending request other than an idle notification takes
208 * precedence over us, except that the timer may be running.
210 else if (dev->power.request_pending &&
211 dev->power.request > RPM_REQ_IDLE)
214 /* Act as though RPM_NOWAIT is always set. */
215 else if (dev->power.idle_notification)
216 retval = -EINPROGRESS;
220 /* Pending requests need to be canceled. */
221 dev->power.request = RPM_REQ_NONE;
223 if (dev->power.no_callbacks) {
224 /* Assume ->runtime_idle() callback would have suspended. */
225 retval = rpm_suspend(dev, rpmflags);
229 /* Carry out an asynchronous or a synchronous idle notification. */
230 if (rpmflags & RPM_ASYNC) {
231 dev->power.request = RPM_REQ_IDLE;
232 if (!dev->power.request_pending) {
233 dev->power.request_pending = true;
234 queue_work(pm_wq, &dev->power.work);
239 dev->power.idle_notification = true;
242 callback = dev->pm_domain->ops.runtime_idle;
243 else if (dev->type && dev->type->pm)
244 callback = dev->type->pm->runtime_idle;
245 else if (dev->class && dev->class->pm)
246 callback = dev->class->pm->runtime_idle;
247 else if (dev->bus && dev->bus->pm)
248 callback = dev->bus->pm->runtime_idle;
253 __rpm_callback(callback, dev);
255 dev->power.idle_notification = false;
256 wake_up_all(&dev->power.wait_queue);
259 trace_rpm_return_int(dev, _THIS_IP_, retval);
264 * rpm_callback - Run a given runtime PM callback for a given device.
265 * @cb: Runtime PM callback to run.
266 * @dev: Device to run the callback for.
268 static int rpm_callback(int (*cb)(struct device *), struct device *dev)
275 retval = __rpm_callback(cb, dev);
277 dev->power.runtime_error = retval;
278 return retval != -EACCES ? retval : -EIO;
282 * rpm_suspend - Carry out runtime suspend of given device.
283 * @dev: Device to suspend.
284 * @rpmflags: Flag bits.
286 * Check if the device's runtime PM status allows it to be suspended.
287 * Cancel a pending idle notification, autosuspend or suspend. If
288 * another suspend has been started earlier, either return immediately
289 * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
290 * flags. If the RPM_ASYNC flag is set then queue a suspend request;
291 * otherwise run the ->runtime_suspend() callback directly. When
292 * ->runtime_suspend succeeded, if a deferred resume was requested while
293 * the callback was running then carry it out, otherwise send an idle
294 * notification for its parent (if the suspend succeeded and both
295 * ignore_children of parent->power and irq_safe of dev->power are not set).
296 * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
297 * flag is set and the next autosuspend-delay expiration time is in the
298 * future, schedule another autosuspend attempt.
300 * This function must be called under dev->power.lock with interrupts disabled.
302 static int rpm_suspend(struct device *dev, int rpmflags)
303 __releases(&dev->power.lock) __acquires(&dev->power.lock)
305 int (*callback)(struct device *);
306 struct device *parent = NULL;
309 trace_rpm_suspend(dev, rpmflags);
312 retval = rpm_check_suspend_allowed(dev);
315 ; /* Conditions are wrong. */
317 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
318 else if (dev->power.runtime_status == RPM_RESUMING &&
319 !(rpmflags & RPM_ASYNC))
324 /* If the autosuspend_delay time hasn't expired yet, reschedule. */
325 if ((rpmflags & RPM_AUTO)
326 && dev->power.runtime_status != RPM_SUSPENDING) {
327 unsigned long expires = pm_runtime_autosuspend_expiration(dev);
330 /* Pending requests need to be canceled. */
331 dev->power.request = RPM_REQ_NONE;
334 * Optimization: If the timer is already running and is
335 * set to expire at or before the autosuspend delay,
336 * avoid the overhead of resetting it. Just let it
337 * expire; pm_suspend_timer_fn() will take care of the
340 if (!(dev->power.timer_expires && time_before_eq(
341 dev->power.timer_expires, expires))) {
342 dev->power.timer_expires = expires;
343 mod_timer(&dev->power.suspend_timer, expires);
345 dev->power.timer_autosuspends = 1;
350 /* Other scheduled or pending requests need to be canceled. */
351 pm_runtime_cancel_pending(dev);
353 if (dev->power.runtime_status == RPM_SUSPENDING) {
356 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
357 retval = -EINPROGRESS;
361 if (dev->power.irq_safe) {
362 spin_unlock(&dev->power.lock);
366 spin_lock(&dev->power.lock);
370 /* Wait for the other suspend running in parallel with us. */
372 prepare_to_wait(&dev->power.wait_queue, &wait,
373 TASK_UNINTERRUPTIBLE);
374 if (dev->power.runtime_status != RPM_SUSPENDING)
377 spin_unlock_irq(&dev->power.lock);
381 spin_lock_irq(&dev->power.lock);
383 finish_wait(&dev->power.wait_queue, &wait);
387 dev->power.deferred_resume = false;
388 if (dev->power.no_callbacks)
389 goto no_callback; /* Assume success. */
391 /* Carry out an asynchronous or a synchronous suspend. */
392 if (rpmflags & RPM_ASYNC) {
393 dev->power.request = (rpmflags & RPM_AUTO) ?
394 RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
395 if (!dev->power.request_pending) {
396 dev->power.request_pending = true;
397 queue_work(pm_wq, &dev->power.work);
402 __update_runtime_status(dev, RPM_SUSPENDING);
405 callback = dev->pm_domain->ops.runtime_suspend;
406 else if (dev->type && dev->type->pm)
407 callback = dev->type->pm->runtime_suspend;
408 else if (dev->class && dev->class->pm)
409 callback = dev->class->pm->runtime_suspend;
410 else if (dev->bus && dev->bus->pm)
411 callback = dev->bus->pm->runtime_suspend;
415 retval = rpm_callback(callback, dev);
417 __update_runtime_status(dev, RPM_ACTIVE);
418 dev->power.deferred_resume = false;
419 if (retval == -EAGAIN || retval == -EBUSY) {
420 dev->power.runtime_error = 0;
423 * If the callback routine failed an autosuspend, and
424 * if the last_busy time has been updated so that there
425 * is a new autosuspend expiration time, automatically
426 * reschedule another autosuspend.
428 if ((rpmflags & RPM_AUTO) &&
429 pm_runtime_autosuspend_expiration(dev) != 0)
432 pm_runtime_cancel_pending(dev);
434 wake_up_all(&dev->power.wait_queue);
438 __update_runtime_status(dev, RPM_SUSPENDED);
439 pm_runtime_deactivate_timer(dev);
442 parent = dev->parent;
443 atomic_add_unless(&parent->power.child_count, -1, 0);
445 wake_up_all(&dev->power.wait_queue);
447 if (dev->power.deferred_resume) {
453 /* Maybe the parent is now able to suspend. */
454 if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
455 spin_unlock(&dev->power.lock);
457 spin_lock(&parent->power.lock);
458 rpm_idle(parent, RPM_ASYNC);
459 spin_unlock(&parent->power.lock);
461 spin_lock(&dev->power.lock);
465 trace_rpm_return_int(dev, _THIS_IP_, retval);
471 * rpm_resume - Carry out runtime resume of given device.
472 * @dev: Device to resume.
473 * @rpmflags: Flag bits.
475 * Check if the device's runtime PM status allows it to be resumed. Cancel
476 * any scheduled or pending requests. If another resume has been started
477 * earlier, either return immediately or wait for it to finish, depending on the
478 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
479 * parallel with this function, either tell the other process to resume after
480 * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
481 * flag is set then queue a resume request; otherwise run the
482 * ->runtime_resume() callback directly. Queue an idle notification for the
483 * device if the resume succeeded.
485 * This function must be called under dev->power.lock with interrupts disabled.
487 static int rpm_resume(struct device *dev, int rpmflags)
488 __releases(&dev->power.lock) __acquires(&dev->power.lock)
490 int (*callback)(struct device *);
491 struct device *parent = NULL;
494 trace_rpm_resume(dev, rpmflags);
497 if (dev->power.runtime_error)
499 else if (dev->power.disable_depth > 0)
505 * Other scheduled or pending requests need to be canceled. Small
506 * optimization: If an autosuspend timer is running, leave it running
507 * rather than cancelling it now only to restart it again in the near
510 dev->power.request = RPM_REQ_NONE;
511 if (!dev->power.timer_autosuspends)
512 pm_runtime_deactivate_timer(dev);
514 if (dev->power.runtime_status == RPM_ACTIVE) {
519 if (dev->power.runtime_status == RPM_RESUMING
520 || dev->power.runtime_status == RPM_SUSPENDING) {
523 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
524 if (dev->power.runtime_status == RPM_SUSPENDING)
525 dev->power.deferred_resume = true;
527 retval = -EINPROGRESS;
531 if (dev->power.irq_safe) {
532 spin_unlock(&dev->power.lock);
536 spin_lock(&dev->power.lock);
540 /* Wait for the operation carried out in parallel with us. */
542 prepare_to_wait(&dev->power.wait_queue, &wait,
543 TASK_UNINTERRUPTIBLE);
544 if (dev->power.runtime_status != RPM_RESUMING
545 && dev->power.runtime_status != RPM_SUSPENDING)
548 spin_unlock_irq(&dev->power.lock);
552 spin_lock_irq(&dev->power.lock);
554 finish_wait(&dev->power.wait_queue, &wait);
559 * See if we can skip waking up the parent. This is safe only if
560 * power.no_callbacks is set, because otherwise we don't know whether
561 * the resume will actually succeed.
563 if (dev->power.no_callbacks && !parent && dev->parent) {
564 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
565 if (dev->parent->power.disable_depth > 0
566 || dev->parent->power.ignore_children
567 || dev->parent->power.runtime_status == RPM_ACTIVE) {
568 atomic_inc(&dev->parent->power.child_count);
569 spin_unlock(&dev->parent->power.lock);
570 goto no_callback; /* Assume success. */
572 spin_unlock(&dev->parent->power.lock);
575 /* Carry out an asynchronous or a synchronous resume. */
576 if (rpmflags & RPM_ASYNC) {
577 dev->power.request = RPM_REQ_RESUME;
578 if (!dev->power.request_pending) {
579 dev->power.request_pending = true;
580 queue_work(pm_wq, &dev->power.work);
586 if (!parent && dev->parent) {
588 * Increment the parent's usage counter and resume it if
589 * necessary. Not needed if dev is irq-safe; then the
590 * parent is permanently resumed.
592 parent = dev->parent;
593 if (dev->power.irq_safe)
595 spin_unlock(&dev->power.lock);
597 pm_runtime_get_noresume(parent);
599 spin_lock(&parent->power.lock);
601 * We can resume if the parent's runtime PM is disabled or it
602 * is set to ignore children.
604 if (!parent->power.disable_depth
605 && !parent->power.ignore_children) {
606 rpm_resume(parent, 0);
607 if (parent->power.runtime_status != RPM_ACTIVE)
610 spin_unlock(&parent->power.lock);
612 spin_lock(&dev->power.lock);
619 if (dev->power.no_callbacks)
620 goto no_callback; /* Assume success. */
622 __update_runtime_status(dev, RPM_RESUMING);
625 callback = dev->pm_domain->ops.runtime_resume;
626 else if (dev->type && dev->type->pm)
627 callback = dev->type->pm->runtime_resume;
628 else if (dev->class && dev->class->pm)
629 callback = dev->class->pm->runtime_resume;
630 else if (dev->bus && dev->bus->pm)
631 callback = dev->bus->pm->runtime_resume;
635 retval = rpm_callback(callback, dev);
637 __update_runtime_status(dev, RPM_SUSPENDED);
638 pm_runtime_cancel_pending(dev);
641 __update_runtime_status(dev, RPM_ACTIVE);
643 atomic_inc(&parent->power.child_count);
645 wake_up_all(&dev->power.wait_queue);
648 rpm_idle(dev, RPM_ASYNC);
651 if (parent && !dev->power.irq_safe) {
652 spin_unlock_irq(&dev->power.lock);
654 pm_runtime_put(parent);
656 spin_lock_irq(&dev->power.lock);
659 trace_rpm_return_int(dev, _THIS_IP_, retval);
665 * pm_runtime_work - Universal runtime PM work function.
666 * @work: Work structure used for scheduling the execution of this function.
668 * Use @work to get the device object the work is to be done for, determine what
669 * is to be done and execute the appropriate runtime PM function.
671 static void pm_runtime_work(struct work_struct *work)
673 struct device *dev = container_of(work, struct device, power.work);
674 enum rpm_request req;
676 spin_lock_irq(&dev->power.lock);
678 if (!dev->power.request_pending)
681 req = dev->power.request;
682 dev->power.request = RPM_REQ_NONE;
683 dev->power.request_pending = false;
689 rpm_idle(dev, RPM_NOWAIT);
691 case RPM_REQ_SUSPEND:
692 rpm_suspend(dev, RPM_NOWAIT);
694 case RPM_REQ_AUTOSUSPEND:
695 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
698 rpm_resume(dev, RPM_NOWAIT);
703 spin_unlock_irq(&dev->power.lock);
707 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
708 * @data: Device pointer passed by pm_schedule_suspend().
710 * Check if the time is right and queue a suspend request.
712 static void pm_suspend_timer_fn(unsigned long data)
714 struct device *dev = (struct device *)data;
716 unsigned long expires;
718 spin_lock_irqsave(&dev->power.lock, flags);
720 expires = dev->power.timer_expires;
721 /* If 'expire' is after 'jiffies' we've been called too early. */
722 if (expires > 0 && !time_after(expires, jiffies)) {
723 dev->power.timer_expires = 0;
724 rpm_suspend(dev, dev->power.timer_autosuspends ?
725 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
728 spin_unlock_irqrestore(&dev->power.lock, flags);
732 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
733 * @dev: Device to suspend.
734 * @delay: Time to wait before submitting a suspend request, in milliseconds.
736 int pm_schedule_suspend(struct device *dev, unsigned int delay)
741 spin_lock_irqsave(&dev->power.lock, flags);
744 retval = rpm_suspend(dev, RPM_ASYNC);
748 retval = rpm_check_suspend_allowed(dev);
752 /* Other scheduled or pending requests need to be canceled. */
753 pm_runtime_cancel_pending(dev);
755 dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
756 dev->power.timer_expires += !dev->power.timer_expires;
757 dev->power.timer_autosuspends = 0;
758 mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
761 spin_unlock_irqrestore(&dev->power.lock, flags);
765 EXPORT_SYMBOL_GPL(pm_schedule_suspend);
768 * __pm_runtime_idle - Entry point for runtime idle operations.
769 * @dev: Device to send idle notification for.
770 * @rpmflags: Flag bits.
772 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
773 * return immediately if it is larger than zero. Then carry out an idle
774 * notification, either synchronous or asynchronous.
776 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
777 * or if pm_runtime_irq_safe() has been called.
779 int __pm_runtime_idle(struct device *dev, int rpmflags)
784 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
786 if (rpmflags & RPM_GET_PUT) {
787 if (!atomic_dec_and_test(&dev->power.usage_count))
791 spin_lock_irqsave(&dev->power.lock, flags);
792 retval = rpm_idle(dev, rpmflags);
793 spin_unlock_irqrestore(&dev->power.lock, flags);
797 EXPORT_SYMBOL_GPL(__pm_runtime_idle);
800 * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
801 * @dev: Device to suspend.
802 * @rpmflags: Flag bits.
804 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
805 * return immediately if it is larger than zero. Then carry out a suspend,
806 * either synchronous or asynchronous.
808 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
809 * or if pm_runtime_irq_safe() has been called.
811 int __pm_runtime_suspend(struct device *dev, int rpmflags)
816 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
818 if (rpmflags & RPM_GET_PUT) {
819 if (!atomic_dec_and_test(&dev->power.usage_count))
823 spin_lock_irqsave(&dev->power.lock, flags);
824 retval = rpm_suspend(dev, rpmflags);
825 spin_unlock_irqrestore(&dev->power.lock, flags);
829 EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
832 * __pm_runtime_resume - Entry point for runtime resume operations.
833 * @dev: Device to resume.
834 * @rpmflags: Flag bits.
836 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
837 * carry out a resume, either synchronous or asynchronous.
839 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
840 * or if pm_runtime_irq_safe() has been called.
842 int __pm_runtime_resume(struct device *dev, int rpmflags)
847 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
849 if (rpmflags & RPM_GET_PUT)
850 atomic_inc(&dev->power.usage_count);
852 spin_lock_irqsave(&dev->power.lock, flags);
853 retval = rpm_resume(dev, rpmflags);
854 spin_unlock_irqrestore(&dev->power.lock, flags);
858 EXPORT_SYMBOL_GPL(__pm_runtime_resume);
861 * __pm_runtime_set_status - Set runtime PM status of a device.
862 * @dev: Device to handle.
863 * @status: New runtime PM status of the device.
865 * If runtime PM of the device is disabled or its power.runtime_error field is
866 * different from zero, the status may be changed either to RPM_ACTIVE, or to
867 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
868 * However, if the device has a parent and the parent is not active, and the
869 * parent's power.ignore_children flag is unset, the device's status cannot be
870 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
872 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
873 * and the device parent's counter of unsuspended children is modified to
874 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
875 * notification request for the parent is submitted.
877 int __pm_runtime_set_status(struct device *dev, unsigned int status)
879 struct device *parent = dev->parent;
881 bool notify_parent = false;
884 if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
887 spin_lock_irqsave(&dev->power.lock, flags);
889 if (!dev->power.runtime_error && !dev->power.disable_depth) {
894 if (dev->power.runtime_status == status)
897 if (status == RPM_SUSPENDED) {
898 /* It always is possible to set the status to 'suspended'. */
900 atomic_add_unless(&parent->power.child_count, -1, 0);
901 notify_parent = !parent->power.ignore_children;
907 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
910 * It is invalid to put an active child under a parent that is
911 * not active, has runtime PM enabled and the
912 * 'power.ignore_children' flag unset.
914 if (!parent->power.disable_depth
915 && !parent->power.ignore_children
916 && parent->power.runtime_status != RPM_ACTIVE)
918 else if (dev->power.runtime_status == RPM_SUSPENDED)
919 atomic_inc(&parent->power.child_count);
921 spin_unlock(&parent->power.lock);
928 __update_runtime_status(dev, status);
929 dev->power.runtime_error = 0;
931 spin_unlock_irqrestore(&dev->power.lock, flags);
934 pm_request_idle(parent);
938 EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
941 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
942 * @dev: Device to handle.
944 * Flush all pending requests for the device from pm_wq and wait for all
945 * runtime PM operations involving the device in progress to complete.
947 * Should be called under dev->power.lock with interrupts disabled.
949 static void __pm_runtime_barrier(struct device *dev)
951 pm_runtime_deactivate_timer(dev);
953 if (dev->power.request_pending) {
954 dev->power.request = RPM_REQ_NONE;
955 spin_unlock_irq(&dev->power.lock);
957 cancel_work_sync(&dev->power.work);
959 spin_lock_irq(&dev->power.lock);
960 dev->power.request_pending = false;
963 if (dev->power.runtime_status == RPM_SUSPENDING
964 || dev->power.runtime_status == RPM_RESUMING
965 || dev->power.idle_notification) {
968 /* Suspend, wake-up or idle notification in progress. */
970 prepare_to_wait(&dev->power.wait_queue, &wait,
971 TASK_UNINTERRUPTIBLE);
972 if (dev->power.runtime_status != RPM_SUSPENDING
973 && dev->power.runtime_status != RPM_RESUMING
974 && !dev->power.idle_notification)
976 spin_unlock_irq(&dev->power.lock);
980 spin_lock_irq(&dev->power.lock);
982 finish_wait(&dev->power.wait_queue, &wait);
987 * pm_runtime_barrier - Flush pending requests and wait for completions.
988 * @dev: Device to handle.
990 * Prevent the device from being suspended by incrementing its usage counter and
991 * if there's a pending resume request for the device, wake the device up.
992 * Next, make sure that all pending requests for the device have been flushed
993 * from pm_wq and wait for all runtime PM operations involving the device in
994 * progress to complete.
997 * 1, if there was a resume request pending and the device had to be woken up,
1000 int pm_runtime_barrier(struct device *dev)
1004 pm_runtime_get_noresume(dev);
1005 spin_lock_irq(&dev->power.lock);
1007 if (dev->power.request_pending
1008 && dev->power.request == RPM_REQ_RESUME) {
1013 __pm_runtime_barrier(dev);
1015 spin_unlock_irq(&dev->power.lock);
1016 pm_runtime_put_noidle(dev);
1020 EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1023 * __pm_runtime_disable - Disable runtime PM of a device.
1024 * @dev: Device to handle.
1025 * @check_resume: If set, check if there's a resume request for the device.
1027 * Increment power.disable_depth for the device and if was zero previously,
1028 * cancel all pending runtime PM requests for the device and wait for all
1029 * operations in progress to complete. The device can be either active or
1030 * suspended after its runtime PM has been disabled.
1032 * If @check_resume is set and there's a resume request pending when
1033 * __pm_runtime_disable() is called and power.disable_depth is zero, the
1034 * function will wake up the device before disabling its runtime PM.
1036 void __pm_runtime_disable(struct device *dev, bool check_resume)
1038 spin_lock_irq(&dev->power.lock);
1040 if (dev->power.disable_depth > 0) {
1041 dev->power.disable_depth++;
1046 * Wake up the device if there's a resume request pending, because that
1047 * means there probably is some I/O to process and disabling runtime PM
1048 * shouldn't prevent the device from processing the I/O.
1050 if (check_resume && dev->power.request_pending
1051 && dev->power.request == RPM_REQ_RESUME) {
1053 * Prevent suspends and idle notifications from being carried
1054 * out after we have woken up the device.
1056 pm_runtime_get_noresume(dev);
1060 pm_runtime_put_noidle(dev);
1063 if (!dev->power.disable_depth++)
1064 __pm_runtime_barrier(dev);
1067 spin_unlock_irq(&dev->power.lock);
1069 EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1072 * pm_runtime_enable - Enable runtime PM of a device.
1073 * @dev: Device to handle.
1075 void pm_runtime_enable(struct device *dev)
1077 unsigned long flags;
1079 spin_lock_irqsave(&dev->power.lock, flags);
1081 if (dev->power.disable_depth > 0)
1082 dev->power.disable_depth--;
1084 dev_warn(dev, "Unbalanced %s!\n", __func__);
1086 spin_unlock_irqrestore(&dev->power.lock, flags);
1088 EXPORT_SYMBOL_GPL(pm_runtime_enable);
1091 * pm_runtime_forbid - Block runtime PM of a device.
1092 * @dev: Device to handle.
1094 * Increase the device's usage count and clear its power.runtime_auto flag,
1095 * so that it cannot be suspended at run time until pm_runtime_allow() is called
1098 void pm_runtime_forbid(struct device *dev)
1100 spin_lock_irq(&dev->power.lock);
1101 if (!dev->power.runtime_auto)
1104 dev->power.runtime_auto = false;
1105 atomic_inc(&dev->power.usage_count);
1109 spin_unlock_irq(&dev->power.lock);
1111 EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1114 * pm_runtime_allow - Unblock runtime PM of a device.
1115 * @dev: Device to handle.
1117 * Decrease the device's usage count and set its power.runtime_auto flag.
1119 void pm_runtime_allow(struct device *dev)
1121 spin_lock_irq(&dev->power.lock);
1122 if (dev->power.runtime_auto)
1125 dev->power.runtime_auto = true;
1126 if (atomic_dec_and_test(&dev->power.usage_count))
1127 rpm_idle(dev, RPM_AUTO);
1130 spin_unlock_irq(&dev->power.lock);
1132 EXPORT_SYMBOL_GPL(pm_runtime_allow);
1135 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1136 * @dev: Device to handle.
1138 * Set the power.no_callbacks flag, which tells the PM core that this
1139 * device is power-managed through its parent and has no runtime PM
1140 * callbacks of its own. The runtime sysfs attributes will be removed.
1142 void pm_runtime_no_callbacks(struct device *dev)
1144 spin_lock_irq(&dev->power.lock);
1145 dev->power.no_callbacks = 1;
1146 spin_unlock_irq(&dev->power.lock);
1147 if (device_is_registered(dev))
1148 rpm_sysfs_remove(dev);
1150 EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1153 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1154 * @dev: Device to handle
1156 * Set the power.irq_safe flag, which tells the PM core that the
1157 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1158 * always be invoked with the spinlock held and interrupts disabled. It also
1159 * causes the parent's usage counter to be permanently incremented, preventing
1160 * the parent from runtime suspending -- otherwise an irq-safe child might have
1161 * to wait for a non-irq-safe parent.
1163 void pm_runtime_irq_safe(struct device *dev)
1166 pm_runtime_get_sync(dev->parent);
1167 spin_lock_irq(&dev->power.lock);
1168 dev->power.irq_safe = 1;
1169 spin_unlock_irq(&dev->power.lock);
1171 EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1174 * update_autosuspend - Handle a change to a device's autosuspend settings.
1175 * @dev: Device to handle.
1176 * @old_delay: The former autosuspend_delay value.
1177 * @old_use: The former use_autosuspend value.
1179 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1180 * set; otherwise allow it. Send an idle notification if suspends are allowed.
1182 * This function must be called under dev->power.lock with interrupts disabled.
1184 static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1186 int delay = dev->power.autosuspend_delay;
1188 /* Should runtime suspend be prevented now? */
1189 if (dev->power.use_autosuspend && delay < 0) {
1191 /* If it used to be allowed then prevent it. */
1192 if (!old_use || old_delay >= 0) {
1193 atomic_inc(&dev->power.usage_count);
1198 /* Runtime suspend should be allowed now. */
1201 /* If it used to be prevented then allow it. */
1202 if (old_use && old_delay < 0)
1203 atomic_dec(&dev->power.usage_count);
1205 /* Maybe we can autosuspend now. */
1206 rpm_idle(dev, RPM_AUTO);
1211 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1212 * @dev: Device to handle.
1213 * @delay: Value of the new delay in milliseconds.
1215 * Set the device's power.autosuspend_delay value. If it changes to negative
1216 * and the power.use_autosuspend flag is set, prevent runtime suspends. If it
1217 * changes the other way, allow runtime suspends.
1219 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1221 int old_delay, old_use;
1223 spin_lock_irq(&dev->power.lock);
1224 old_delay = dev->power.autosuspend_delay;
1225 old_use = dev->power.use_autosuspend;
1226 dev->power.autosuspend_delay = delay;
1227 update_autosuspend(dev, old_delay, old_use);
1228 spin_unlock_irq(&dev->power.lock);
1230 EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1233 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1234 * @dev: Device to handle.
1235 * @use: New value for use_autosuspend.
1237 * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1238 * suspends as needed.
1240 void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1242 int old_delay, old_use;
1244 spin_lock_irq(&dev->power.lock);
1245 old_delay = dev->power.autosuspend_delay;
1246 old_use = dev->power.use_autosuspend;
1247 dev->power.use_autosuspend = use;
1248 update_autosuspend(dev, old_delay, old_use);
1249 spin_unlock_irq(&dev->power.lock);
1251 EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1254 * pm_runtime_init - Initialize runtime PM fields in given device object.
1255 * @dev: Device object to initialize.
1257 void pm_runtime_init(struct device *dev)
1259 dev->power.runtime_status = RPM_SUSPENDED;
1260 dev->power.idle_notification = false;
1262 dev->power.disable_depth = 1;
1263 atomic_set(&dev->power.usage_count, 0);
1265 dev->power.runtime_error = 0;
1267 atomic_set(&dev->power.child_count, 0);
1268 pm_suspend_ignore_children(dev, false);
1269 dev->power.runtime_auto = true;
1271 dev->power.request_pending = false;
1272 dev->power.request = RPM_REQ_NONE;
1273 dev->power.deferred_resume = false;
1274 dev->power.accounting_timestamp = jiffies;
1275 INIT_WORK(&dev->power.work, pm_runtime_work);
1277 dev->power.timer_expires = 0;
1278 setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
1279 (unsigned long)dev);
1281 init_waitqueue_head(&dev->power.wait_queue);
1285 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1286 * @dev: Device object being removed from device hierarchy.
1288 void pm_runtime_remove(struct device *dev)
1290 __pm_runtime_disable(dev, false);
1292 /* Change the status back to 'suspended' to match the initial status. */
1293 if (dev->power.runtime_status == RPM_ACTIVE)
1294 pm_runtime_set_suspended(dev);
1295 if (dev->power.irq_safe && dev->parent)
1296 pm_runtime_put_sync(dev->parent);