2 * drivers/base/power/runtime.c - Helper functions for device runtime PM
4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
7 * This file is released under the GPLv2.
10 #include <linux/sched.h>
11 #include <linux/pm_runtime.h>
12 #include <trace/events/rpm.h>
15 static int rpm_resume(struct device *dev, int rpmflags);
16 static int rpm_suspend(struct device *dev, int rpmflags);
19 * update_pm_runtime_accounting - Update the time accounting of power states
20 * @dev: Device to update the accounting for
22 * In order to be able to have time accounting of the various power states
23 * (as used by programs such as PowerTOP to show the effectiveness of runtime
24 * PM), we need to track the time spent in each state.
25 * update_pm_runtime_accounting must be called each time before the
26 * runtime_status field is updated, to account the time in the old state
29 void update_pm_runtime_accounting(struct device *dev)
31 unsigned long now = jiffies;
34 delta = now - dev->power.accounting_timestamp;
39 dev->power.accounting_timestamp = now;
41 if (dev->power.disable_depth > 0)
44 if (dev->power.runtime_status == RPM_SUSPENDED)
45 dev->power.suspended_jiffies += delta;
47 dev->power.active_jiffies += delta;
50 static void __update_runtime_status(struct device *dev, enum rpm_status status)
52 update_pm_runtime_accounting(dev);
53 dev->power.runtime_status = status;
57 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
58 * @dev: Device to handle.
60 static void pm_runtime_deactivate_timer(struct device *dev)
62 if (dev->power.timer_expires > 0) {
63 del_timer(&dev->power.suspend_timer);
64 dev->power.timer_expires = 0;
69 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
70 * @dev: Device to handle.
72 static void pm_runtime_cancel_pending(struct device *dev)
74 pm_runtime_deactivate_timer(dev);
76 * In case there's a request pending, make sure its work function will
77 * return without doing anything.
79 dev->power.request = RPM_REQ_NONE;
83 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
84 * @dev: Device to handle.
86 * Compute the autosuspend-delay expiration time based on the device's
87 * power.last_busy time. If the delay has already expired or is disabled
88 * (negative) or the power.use_autosuspend flag isn't set, return 0.
89 * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
91 * This function may be called either with or without dev->power.lock held.
92 * Either way it can be racy, since power.last_busy may be updated at any time.
94 unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
96 int autosuspend_delay;
98 unsigned long last_busy;
99 unsigned long expires = 0;
101 if (!dev->power.use_autosuspend)
104 autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay);
105 if (autosuspend_delay < 0)
108 last_busy = ACCESS_ONCE(dev->power.last_busy);
109 elapsed = jiffies - last_busy;
111 goto out; /* jiffies has wrapped around. */
114 * If the autosuspend_delay is >= 1 second, align the timer by rounding
115 * up to the nearest second.
117 expires = last_busy + msecs_to_jiffies(autosuspend_delay);
118 if (autosuspend_delay >= 1000)
119 expires = round_jiffies(expires);
121 if (elapsed >= expires - last_busy)
122 expires = 0; /* Already expired. */
127 EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
130 * rpm_check_suspend_allowed - Test whether a device may be suspended.
131 * @dev: Device to test.
133 static int rpm_check_suspend_allowed(struct device *dev)
137 if (dev->power.runtime_error)
139 else if (dev->power.disable_depth > 0)
141 else if (atomic_read(&dev->power.usage_count) > 0)
143 else if (!pm_children_suspended(dev))
146 /* Pending resume requests take precedence over suspends. */
147 else if ((dev->power.deferred_resume
148 && dev->power.runtime_status == RPM_SUSPENDING)
149 || (dev->power.request_pending
150 && dev->power.request == RPM_REQ_RESUME))
152 else if (dev->power.runtime_status == RPM_SUSPENDED)
159 * __rpm_callback - Run a given runtime PM callback for a given device.
160 * @cb: Runtime PM callback to run.
161 * @dev: Device to run the callback for.
163 static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
164 __releases(&dev->power.lock) __acquires(&dev->power.lock)
168 if (dev->power.irq_safe)
169 spin_unlock(&dev->power.lock);
171 spin_unlock_irq(&dev->power.lock);
175 if (dev->power.irq_safe)
176 spin_lock(&dev->power.lock);
178 spin_lock_irq(&dev->power.lock);
184 * rpm_idle - Notify device bus type if the device can be suspended.
185 * @dev: Device to notify the bus type about.
186 * @rpmflags: Flag bits.
188 * Check if the device's runtime PM status allows it to be suspended. If
189 * another idle notification has been started earlier, return immediately. If
190 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
191 * run the ->runtime_idle() callback directly.
193 * This function must be called under dev->power.lock with interrupts disabled.
195 static int rpm_idle(struct device *dev, int rpmflags)
197 int (*callback)(struct device *);
200 trace_rpm_idle(dev, rpmflags);
201 retval = rpm_check_suspend_allowed(dev);
203 ; /* Conditions are wrong. */
205 /* Idle notifications are allowed only in the RPM_ACTIVE state. */
206 else if (dev->power.runtime_status != RPM_ACTIVE)
210 * Any pending request other than an idle notification takes
211 * precedence over us, except that the timer may be running.
213 else if (dev->power.request_pending &&
214 dev->power.request > RPM_REQ_IDLE)
217 /* Act as though RPM_NOWAIT is always set. */
218 else if (dev->power.idle_notification)
219 retval = -EINPROGRESS;
223 /* Pending requests need to be canceled. */
224 dev->power.request = RPM_REQ_NONE;
226 if (dev->power.no_callbacks) {
227 /* Assume ->runtime_idle() callback would have suspended. */
228 retval = rpm_suspend(dev, rpmflags);
232 /* Carry out an asynchronous or a synchronous idle notification. */
233 if (rpmflags & RPM_ASYNC) {
234 dev->power.request = RPM_REQ_IDLE;
235 if (!dev->power.request_pending) {
236 dev->power.request_pending = true;
237 queue_work(pm_wq, &dev->power.work);
242 dev->power.idle_notification = true;
245 callback = dev->pm_domain->ops.runtime_idle;
246 else if (dev->type && dev->type->pm)
247 callback = dev->type->pm->runtime_idle;
248 else if (dev->class && dev->class->pm)
249 callback = dev->class->pm->runtime_idle;
250 else if (dev->bus && dev->bus->pm)
251 callback = dev->bus->pm->runtime_idle;
256 __rpm_callback(callback, dev);
258 dev->power.idle_notification = false;
259 wake_up_all(&dev->power.wait_queue);
262 trace_rpm_return_int(dev, _THIS_IP_, retval);
267 * rpm_callback - Run a given runtime PM callback for a given device.
268 * @cb: Runtime PM callback to run.
269 * @dev: Device to run the callback for.
271 static int rpm_callback(int (*cb)(struct device *), struct device *dev)
278 retval = __rpm_callback(cb, dev);
280 dev->power.runtime_error = retval;
281 return retval != -EACCES ? retval : -EIO;
285 * rpm_suspend - Carry out runtime suspend of given device.
286 * @dev: Device to suspend.
287 * @rpmflags: Flag bits.
289 * Check if the device's runtime PM status allows it to be suspended. If
290 * another suspend has been started earlier, either return immediately or wait
291 * for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags. Cancel a
292 * pending idle notification. If the RPM_ASYNC flag is set then queue a
293 * suspend request; otherwise run the ->runtime_suspend() callback directly.
294 * If a deferred resume was requested while the callback was running then carry
295 * it out; otherwise send an idle notification for the device (if the suspend
296 * failed) or for its parent (if the suspend succeeded).
298 * This function must be called under dev->power.lock with interrupts disabled.
300 static int rpm_suspend(struct device *dev, int rpmflags)
301 __releases(&dev->power.lock) __acquires(&dev->power.lock)
303 int (*callback)(struct device *);
304 struct device *parent = NULL;
307 trace_rpm_suspend(dev, rpmflags);
310 retval = rpm_check_suspend_allowed(dev);
313 ; /* Conditions are wrong. */
315 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
316 else if (dev->power.runtime_status == RPM_RESUMING &&
317 !(rpmflags & RPM_ASYNC))
322 /* If the autosuspend_delay time hasn't expired yet, reschedule. */
323 if ((rpmflags & RPM_AUTO)
324 && dev->power.runtime_status != RPM_SUSPENDING) {
325 unsigned long expires = pm_runtime_autosuspend_expiration(dev);
328 /* Pending requests need to be canceled. */
329 dev->power.request = RPM_REQ_NONE;
332 * Optimization: If the timer is already running and is
333 * set to expire at or before the autosuspend delay,
334 * avoid the overhead of resetting it. Just let it
335 * expire; pm_suspend_timer_fn() will take care of the
338 if (!(dev->power.timer_expires && time_before_eq(
339 dev->power.timer_expires, expires))) {
340 dev->power.timer_expires = expires;
341 mod_timer(&dev->power.suspend_timer, expires);
343 dev->power.timer_autosuspends = 1;
348 /* Other scheduled or pending requests need to be canceled. */
349 pm_runtime_cancel_pending(dev);
351 if (dev->power.runtime_status == RPM_SUSPENDING) {
354 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
355 retval = -EINPROGRESS;
359 if (dev->power.irq_safe) {
360 spin_unlock(&dev->power.lock);
364 spin_lock(&dev->power.lock);
368 /* Wait for the other suspend running in parallel with us. */
370 prepare_to_wait(&dev->power.wait_queue, &wait,
371 TASK_UNINTERRUPTIBLE);
372 if (dev->power.runtime_status != RPM_SUSPENDING)
375 spin_unlock_irq(&dev->power.lock);
379 spin_lock_irq(&dev->power.lock);
381 finish_wait(&dev->power.wait_queue, &wait);
385 dev->power.deferred_resume = false;
386 if (dev->power.no_callbacks)
387 goto no_callback; /* Assume success. */
389 /* Carry out an asynchronous or a synchronous suspend. */
390 if (rpmflags & RPM_ASYNC) {
391 dev->power.request = (rpmflags & RPM_AUTO) ?
392 RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
393 if (!dev->power.request_pending) {
394 dev->power.request_pending = true;
395 queue_work(pm_wq, &dev->power.work);
400 __update_runtime_status(dev, RPM_SUSPENDING);
403 callback = dev->pm_domain->ops.runtime_suspend;
404 else if (dev->type && dev->type->pm)
405 callback = dev->type->pm->runtime_suspend;
406 else if (dev->class && dev->class->pm)
407 callback = dev->class->pm->runtime_suspend;
408 else if (dev->bus && dev->bus->pm)
409 callback = dev->bus->pm->runtime_suspend;
413 retval = rpm_callback(callback, dev);
415 __update_runtime_status(dev, RPM_ACTIVE);
416 dev->power.deferred_resume = false;
417 if (retval == -EAGAIN || retval == -EBUSY)
418 dev->power.runtime_error = 0;
420 pm_runtime_cancel_pending(dev);
423 __update_runtime_status(dev, RPM_SUSPENDED);
424 pm_runtime_deactivate_timer(dev);
427 parent = dev->parent;
428 atomic_add_unless(&parent->power.child_count, -1, 0);
431 wake_up_all(&dev->power.wait_queue);
433 if (dev->power.deferred_resume) {
439 /* Maybe the parent is now able to suspend. */
440 if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
441 spin_unlock(&dev->power.lock);
443 spin_lock(&parent->power.lock);
444 rpm_idle(parent, RPM_ASYNC);
445 spin_unlock(&parent->power.lock);
447 spin_lock(&dev->power.lock);
451 trace_rpm_return_int(dev, _THIS_IP_, retval);
457 * rpm_resume - Carry out runtime resume of given device.
458 * @dev: Device to resume.
459 * @rpmflags: Flag bits.
461 * Check if the device's runtime PM status allows it to be resumed. Cancel
462 * any scheduled or pending requests. If another resume has been started
463 * earlier, either return immediately or wait for it to finish, depending on the
464 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
465 * parallel with this function, either tell the other process to resume after
466 * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
467 * flag is set then queue a resume request; otherwise run the
468 * ->runtime_resume() callback directly. Queue an idle notification for the
469 * device if the resume succeeded.
471 * This function must be called under dev->power.lock with interrupts disabled.
473 static int rpm_resume(struct device *dev, int rpmflags)
474 __releases(&dev->power.lock) __acquires(&dev->power.lock)
476 int (*callback)(struct device *);
477 struct device *parent = NULL;
480 trace_rpm_resume(dev, rpmflags);
483 if (dev->power.runtime_error)
485 else if (dev->power.disable_depth > 0)
491 * Other scheduled or pending requests need to be canceled. Small
492 * optimization: If an autosuspend timer is running, leave it running
493 * rather than cancelling it now only to restart it again in the near
496 dev->power.request = RPM_REQ_NONE;
497 if (!dev->power.timer_autosuspends)
498 pm_runtime_deactivate_timer(dev);
500 if (dev->power.runtime_status == RPM_ACTIVE) {
505 if (dev->power.runtime_status == RPM_RESUMING
506 || dev->power.runtime_status == RPM_SUSPENDING) {
509 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
510 if (dev->power.runtime_status == RPM_SUSPENDING)
511 dev->power.deferred_resume = true;
513 retval = -EINPROGRESS;
517 if (dev->power.irq_safe) {
518 spin_unlock(&dev->power.lock);
522 spin_lock(&dev->power.lock);
526 /* Wait for the operation carried out in parallel with us. */
528 prepare_to_wait(&dev->power.wait_queue, &wait,
529 TASK_UNINTERRUPTIBLE);
530 if (dev->power.runtime_status != RPM_RESUMING
531 && dev->power.runtime_status != RPM_SUSPENDING)
534 spin_unlock_irq(&dev->power.lock);
538 spin_lock_irq(&dev->power.lock);
540 finish_wait(&dev->power.wait_queue, &wait);
545 * See if we can skip waking up the parent. This is safe only if
546 * power.no_callbacks is set, because otherwise we don't know whether
547 * the resume will actually succeed.
549 if (dev->power.no_callbacks && !parent && dev->parent) {
550 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
551 if (dev->parent->power.disable_depth > 0
552 || dev->parent->power.ignore_children
553 || dev->parent->power.runtime_status == RPM_ACTIVE) {
554 atomic_inc(&dev->parent->power.child_count);
555 spin_unlock(&dev->parent->power.lock);
556 goto no_callback; /* Assume success. */
558 spin_unlock(&dev->parent->power.lock);
561 /* Carry out an asynchronous or a synchronous resume. */
562 if (rpmflags & RPM_ASYNC) {
563 dev->power.request = RPM_REQ_RESUME;
564 if (!dev->power.request_pending) {
565 dev->power.request_pending = true;
566 queue_work(pm_wq, &dev->power.work);
572 if (!parent && dev->parent) {
574 * Increment the parent's usage counter and resume it if
575 * necessary. Not needed if dev is irq-safe; then the
576 * parent is permanently resumed.
578 parent = dev->parent;
579 if (dev->power.irq_safe)
581 spin_unlock(&dev->power.lock);
583 pm_runtime_get_noresume(parent);
585 spin_lock(&parent->power.lock);
587 * We can resume if the parent's runtime PM is disabled or it
588 * is set to ignore children.
590 if (!parent->power.disable_depth
591 && !parent->power.ignore_children) {
592 rpm_resume(parent, 0);
593 if (parent->power.runtime_status != RPM_ACTIVE)
596 spin_unlock(&parent->power.lock);
598 spin_lock(&dev->power.lock);
605 if (dev->power.no_callbacks)
606 goto no_callback; /* Assume success. */
608 __update_runtime_status(dev, RPM_RESUMING);
611 callback = dev->pm_domain->ops.runtime_resume;
612 else if (dev->type && dev->type->pm)
613 callback = dev->type->pm->runtime_resume;
614 else if (dev->class && dev->class->pm)
615 callback = dev->class->pm->runtime_resume;
616 else if (dev->bus && dev->bus->pm)
617 callback = dev->bus->pm->runtime_resume;
621 retval = rpm_callback(callback, dev);
623 __update_runtime_status(dev, RPM_SUSPENDED);
624 pm_runtime_cancel_pending(dev);
627 __update_runtime_status(dev, RPM_ACTIVE);
629 atomic_inc(&parent->power.child_count);
631 wake_up_all(&dev->power.wait_queue);
634 rpm_idle(dev, RPM_ASYNC);
637 if (parent && !dev->power.irq_safe) {
638 spin_unlock_irq(&dev->power.lock);
640 pm_runtime_put(parent);
642 spin_lock_irq(&dev->power.lock);
645 trace_rpm_return_int(dev, _THIS_IP_, retval);
651 * pm_runtime_work - Universal runtime PM work function.
652 * @work: Work structure used for scheduling the execution of this function.
654 * Use @work to get the device object the work is to be done for, determine what
655 * is to be done and execute the appropriate runtime PM function.
657 static void pm_runtime_work(struct work_struct *work)
659 struct device *dev = container_of(work, struct device, power.work);
660 enum rpm_request req;
662 spin_lock_irq(&dev->power.lock);
664 if (!dev->power.request_pending)
667 req = dev->power.request;
668 dev->power.request = RPM_REQ_NONE;
669 dev->power.request_pending = false;
675 rpm_idle(dev, RPM_NOWAIT);
677 case RPM_REQ_SUSPEND:
678 rpm_suspend(dev, RPM_NOWAIT);
680 case RPM_REQ_AUTOSUSPEND:
681 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
684 rpm_resume(dev, RPM_NOWAIT);
689 spin_unlock_irq(&dev->power.lock);
693 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
694 * @data: Device pointer passed by pm_schedule_suspend().
696 * Check if the time is right and queue a suspend request.
698 static void pm_suspend_timer_fn(unsigned long data)
700 struct device *dev = (struct device *)data;
702 unsigned long expires;
704 spin_lock_irqsave(&dev->power.lock, flags);
706 expires = dev->power.timer_expires;
707 /* If 'expire' is after 'jiffies' we've been called too early. */
708 if (expires > 0 && !time_after(expires, jiffies)) {
709 dev->power.timer_expires = 0;
710 rpm_suspend(dev, dev->power.timer_autosuspends ?
711 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
714 spin_unlock_irqrestore(&dev->power.lock, flags);
718 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
719 * @dev: Device to suspend.
720 * @delay: Time to wait before submitting a suspend request, in milliseconds.
722 int pm_schedule_suspend(struct device *dev, unsigned int delay)
727 spin_lock_irqsave(&dev->power.lock, flags);
730 retval = rpm_suspend(dev, RPM_ASYNC);
734 retval = rpm_check_suspend_allowed(dev);
738 /* Other scheduled or pending requests need to be canceled. */
739 pm_runtime_cancel_pending(dev);
741 dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
742 dev->power.timer_expires += !dev->power.timer_expires;
743 dev->power.timer_autosuspends = 0;
744 mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
747 spin_unlock_irqrestore(&dev->power.lock, flags);
751 EXPORT_SYMBOL_GPL(pm_schedule_suspend);
754 * __pm_runtime_idle - Entry point for runtime idle operations.
755 * @dev: Device to send idle notification for.
756 * @rpmflags: Flag bits.
758 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
759 * return immediately if it is larger than zero. Then carry out an idle
760 * notification, either synchronous or asynchronous.
762 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
763 * or if pm_runtime_irq_safe() has been called.
765 int __pm_runtime_idle(struct device *dev, int rpmflags)
770 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
772 if (rpmflags & RPM_GET_PUT) {
773 if (!atomic_dec_and_test(&dev->power.usage_count))
777 spin_lock_irqsave(&dev->power.lock, flags);
778 retval = rpm_idle(dev, rpmflags);
779 spin_unlock_irqrestore(&dev->power.lock, flags);
783 EXPORT_SYMBOL_GPL(__pm_runtime_idle);
786 * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
787 * @dev: Device to suspend.
788 * @rpmflags: Flag bits.
790 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
791 * return immediately if it is larger than zero. Then carry out a suspend,
792 * either synchronous or asynchronous.
794 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
795 * or if pm_runtime_irq_safe() has been called.
797 int __pm_runtime_suspend(struct device *dev, int rpmflags)
802 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
804 if (rpmflags & RPM_GET_PUT) {
805 if (!atomic_dec_and_test(&dev->power.usage_count))
809 spin_lock_irqsave(&dev->power.lock, flags);
810 retval = rpm_suspend(dev, rpmflags);
811 spin_unlock_irqrestore(&dev->power.lock, flags);
815 EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
818 * __pm_runtime_resume - Entry point for runtime resume operations.
819 * @dev: Device to resume.
820 * @rpmflags: Flag bits.
822 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
823 * carry out a resume, either synchronous or asynchronous.
825 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
826 * or if pm_runtime_irq_safe() has been called.
828 int __pm_runtime_resume(struct device *dev, int rpmflags)
833 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
835 if (rpmflags & RPM_GET_PUT)
836 atomic_inc(&dev->power.usage_count);
838 spin_lock_irqsave(&dev->power.lock, flags);
839 retval = rpm_resume(dev, rpmflags);
840 spin_unlock_irqrestore(&dev->power.lock, flags);
844 EXPORT_SYMBOL_GPL(__pm_runtime_resume);
847 * __pm_runtime_set_status - Set runtime PM status of a device.
848 * @dev: Device to handle.
849 * @status: New runtime PM status of the device.
851 * If runtime PM of the device is disabled or its power.runtime_error field is
852 * different from zero, the status may be changed either to RPM_ACTIVE, or to
853 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
854 * However, if the device has a parent and the parent is not active, and the
855 * parent's power.ignore_children flag is unset, the device's status cannot be
856 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
858 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
859 * and the device parent's counter of unsuspended children is modified to
860 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
861 * notification request for the parent is submitted.
863 int __pm_runtime_set_status(struct device *dev, unsigned int status)
865 struct device *parent = dev->parent;
867 bool notify_parent = false;
870 if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
873 spin_lock_irqsave(&dev->power.lock, flags);
875 if (!dev->power.runtime_error && !dev->power.disable_depth) {
880 if (dev->power.runtime_status == status)
883 if (status == RPM_SUSPENDED) {
884 /* It always is possible to set the status to 'suspended'. */
886 atomic_add_unless(&parent->power.child_count, -1, 0);
887 notify_parent = !parent->power.ignore_children;
893 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
896 * It is invalid to put an active child under a parent that is
897 * not active, has runtime PM enabled and the
898 * 'power.ignore_children' flag unset.
900 if (!parent->power.disable_depth
901 && !parent->power.ignore_children
902 && parent->power.runtime_status != RPM_ACTIVE)
904 else if (dev->power.runtime_status == RPM_SUSPENDED)
905 atomic_inc(&parent->power.child_count);
907 spin_unlock(&parent->power.lock);
914 __update_runtime_status(dev, status);
915 dev->power.runtime_error = 0;
917 spin_unlock_irqrestore(&dev->power.lock, flags);
920 pm_request_idle(parent);
924 EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
927 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
928 * @dev: Device to handle.
930 * Flush all pending requests for the device from pm_wq and wait for all
931 * runtime PM operations involving the device in progress to complete.
933 * Should be called under dev->power.lock with interrupts disabled.
935 static void __pm_runtime_barrier(struct device *dev)
937 pm_runtime_deactivate_timer(dev);
939 if (dev->power.request_pending) {
940 dev->power.request = RPM_REQ_NONE;
941 spin_unlock_irq(&dev->power.lock);
943 cancel_work_sync(&dev->power.work);
945 spin_lock_irq(&dev->power.lock);
946 dev->power.request_pending = false;
949 if (dev->power.runtime_status == RPM_SUSPENDING
950 || dev->power.runtime_status == RPM_RESUMING
951 || dev->power.idle_notification) {
954 /* Suspend, wake-up or idle notification in progress. */
956 prepare_to_wait(&dev->power.wait_queue, &wait,
957 TASK_UNINTERRUPTIBLE);
958 if (dev->power.runtime_status != RPM_SUSPENDING
959 && dev->power.runtime_status != RPM_RESUMING
960 && !dev->power.idle_notification)
962 spin_unlock_irq(&dev->power.lock);
966 spin_lock_irq(&dev->power.lock);
968 finish_wait(&dev->power.wait_queue, &wait);
973 * pm_runtime_barrier - Flush pending requests and wait for completions.
974 * @dev: Device to handle.
976 * Prevent the device from being suspended by incrementing its usage counter and
977 * if there's a pending resume request for the device, wake the device up.
978 * Next, make sure that all pending requests for the device have been flushed
979 * from pm_wq and wait for all runtime PM operations involving the device in
980 * progress to complete.
983 * 1, if there was a resume request pending and the device had to be woken up,
986 int pm_runtime_barrier(struct device *dev)
990 pm_runtime_get_noresume(dev);
991 spin_lock_irq(&dev->power.lock);
993 if (dev->power.request_pending
994 && dev->power.request == RPM_REQ_RESUME) {
999 __pm_runtime_barrier(dev);
1001 spin_unlock_irq(&dev->power.lock);
1002 pm_runtime_put_noidle(dev);
1006 EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1009 * __pm_runtime_disable - Disable runtime PM of a device.
1010 * @dev: Device to handle.
1011 * @check_resume: If set, check if there's a resume request for the device.
1013 * Increment power.disable_depth for the device and if was zero previously,
1014 * cancel all pending runtime PM requests for the device and wait for all
1015 * operations in progress to complete. The device can be either active or
1016 * suspended after its runtime PM has been disabled.
1018 * If @check_resume is set and there's a resume request pending when
1019 * __pm_runtime_disable() is called and power.disable_depth is zero, the
1020 * function will wake up the device before disabling its runtime PM.
1022 void __pm_runtime_disable(struct device *dev, bool check_resume)
1024 spin_lock_irq(&dev->power.lock);
1026 if (dev->power.disable_depth > 0) {
1027 dev->power.disable_depth++;
1032 * Wake up the device if there's a resume request pending, because that
1033 * means there probably is some I/O to process and disabling runtime PM
1034 * shouldn't prevent the device from processing the I/O.
1036 if (check_resume && dev->power.request_pending
1037 && dev->power.request == RPM_REQ_RESUME) {
1039 * Prevent suspends and idle notifications from being carried
1040 * out after we have woken up the device.
1042 pm_runtime_get_noresume(dev);
1046 pm_runtime_put_noidle(dev);
1049 if (!dev->power.disable_depth++)
1050 __pm_runtime_barrier(dev);
1053 spin_unlock_irq(&dev->power.lock);
1055 EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1058 * pm_runtime_enable - Enable runtime PM of a device.
1059 * @dev: Device to handle.
1061 void pm_runtime_enable(struct device *dev)
1063 unsigned long flags;
1065 spin_lock_irqsave(&dev->power.lock, flags);
1067 if (dev->power.disable_depth > 0)
1068 dev->power.disable_depth--;
1070 dev_warn(dev, "Unbalanced %s!\n", __func__);
1072 spin_unlock_irqrestore(&dev->power.lock, flags);
1074 EXPORT_SYMBOL_GPL(pm_runtime_enable);
1077 * pm_runtime_forbid - Block runtime PM of a device.
1078 * @dev: Device to handle.
1080 * Increase the device's usage count and clear its power.runtime_auto flag,
1081 * so that it cannot be suspended at run time until pm_runtime_allow() is called
1084 void pm_runtime_forbid(struct device *dev)
1086 spin_lock_irq(&dev->power.lock);
1087 if (!dev->power.runtime_auto)
1090 dev->power.runtime_auto = false;
1091 atomic_inc(&dev->power.usage_count);
1095 spin_unlock_irq(&dev->power.lock);
1097 EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1100 * pm_runtime_allow - Unblock runtime PM of a device.
1101 * @dev: Device to handle.
1103 * Decrease the device's usage count and set its power.runtime_auto flag.
1105 void pm_runtime_allow(struct device *dev)
1107 spin_lock_irq(&dev->power.lock);
1108 if (dev->power.runtime_auto)
1111 dev->power.runtime_auto = true;
1112 if (atomic_dec_and_test(&dev->power.usage_count))
1113 rpm_idle(dev, RPM_AUTO);
1116 spin_unlock_irq(&dev->power.lock);
1118 EXPORT_SYMBOL_GPL(pm_runtime_allow);
1121 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1122 * @dev: Device to handle.
1124 * Set the power.no_callbacks flag, which tells the PM core that this
1125 * device is power-managed through its parent and has no runtime PM
1126 * callbacks of its own. The runtime sysfs attributes will be removed.
1128 void pm_runtime_no_callbacks(struct device *dev)
1130 spin_lock_irq(&dev->power.lock);
1131 dev->power.no_callbacks = 1;
1132 spin_unlock_irq(&dev->power.lock);
1133 if (device_is_registered(dev))
1134 rpm_sysfs_remove(dev);
1136 EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1139 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1140 * @dev: Device to handle
1142 * Set the power.irq_safe flag, which tells the PM core that the
1143 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1144 * always be invoked with the spinlock held and interrupts disabled. It also
1145 * causes the parent's usage counter to be permanently incremented, preventing
1146 * the parent from runtime suspending -- otherwise an irq-safe child might have
1147 * to wait for a non-irq-safe parent.
1149 void pm_runtime_irq_safe(struct device *dev)
1152 pm_runtime_get_sync(dev->parent);
1153 spin_lock_irq(&dev->power.lock);
1154 dev->power.irq_safe = 1;
1155 spin_unlock_irq(&dev->power.lock);
1157 EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1160 * update_autosuspend - Handle a change to a device's autosuspend settings.
1161 * @dev: Device to handle.
1162 * @old_delay: The former autosuspend_delay value.
1163 * @old_use: The former use_autosuspend value.
1165 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1166 * set; otherwise allow it. Send an idle notification if suspends are allowed.
1168 * This function must be called under dev->power.lock with interrupts disabled.
1170 static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1172 int delay = dev->power.autosuspend_delay;
1174 /* Should runtime suspend be prevented now? */
1175 if (dev->power.use_autosuspend && delay < 0) {
1177 /* If it used to be allowed then prevent it. */
1178 if (!old_use || old_delay >= 0) {
1179 atomic_inc(&dev->power.usage_count);
1184 /* Runtime suspend should be allowed now. */
1187 /* If it used to be prevented then allow it. */
1188 if (old_use && old_delay < 0)
1189 atomic_dec(&dev->power.usage_count);
1191 /* Maybe we can autosuspend now. */
1192 rpm_idle(dev, RPM_AUTO);
1197 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1198 * @dev: Device to handle.
1199 * @delay: Value of the new delay in milliseconds.
1201 * Set the device's power.autosuspend_delay value. If it changes to negative
1202 * and the power.use_autosuspend flag is set, prevent runtime suspends. If it
1203 * changes the other way, allow runtime suspends.
1205 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1207 int old_delay, old_use;
1209 spin_lock_irq(&dev->power.lock);
1210 old_delay = dev->power.autosuspend_delay;
1211 old_use = dev->power.use_autosuspend;
1212 dev->power.autosuspend_delay = delay;
1213 update_autosuspend(dev, old_delay, old_use);
1214 spin_unlock_irq(&dev->power.lock);
1216 EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1219 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1220 * @dev: Device to handle.
1221 * @use: New value for use_autosuspend.
1223 * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1224 * suspends as needed.
1226 void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1228 int old_delay, old_use;
1230 spin_lock_irq(&dev->power.lock);
1231 old_delay = dev->power.autosuspend_delay;
1232 old_use = dev->power.use_autosuspend;
1233 dev->power.use_autosuspend = use;
1234 update_autosuspend(dev, old_delay, old_use);
1235 spin_unlock_irq(&dev->power.lock);
1237 EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1240 * pm_runtime_init - Initialize runtime PM fields in given device object.
1241 * @dev: Device object to initialize.
1243 void pm_runtime_init(struct device *dev)
1245 dev->power.runtime_status = RPM_SUSPENDED;
1246 dev->power.idle_notification = false;
1248 dev->power.disable_depth = 1;
1249 atomic_set(&dev->power.usage_count, 0);
1251 dev->power.runtime_error = 0;
1253 atomic_set(&dev->power.child_count, 0);
1254 pm_suspend_ignore_children(dev, false);
1255 dev->power.runtime_auto = true;
1257 dev->power.request_pending = false;
1258 dev->power.request = RPM_REQ_NONE;
1259 dev->power.deferred_resume = false;
1260 dev->power.accounting_timestamp = jiffies;
1261 INIT_WORK(&dev->power.work, pm_runtime_work);
1263 dev->power.timer_expires = 0;
1264 setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
1265 (unsigned long)dev);
1267 init_waitqueue_head(&dev->power.wait_queue);
1271 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1272 * @dev: Device object being removed from device hierarchy.
1274 void pm_runtime_remove(struct device *dev)
1276 __pm_runtime_disable(dev, false);
1278 /* Change the status back to 'suspended' to match the initial status. */
1279 if (dev->power.runtime_status == RPM_ACTIVE)
1280 pm_runtime_set_suspended(dev);
1281 if (dev->power.irq_safe && dev->parent)
1282 pm_runtime_put_sync(dev->parent);