2 * drivers/base/power/runtime.c - Helper functions for device runtime PM
4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
7 * This file is released under the GPLv2.
10 #include <linux/sched.h>
11 #include <linux/export.h>
12 #include <linux/pm_runtime.h>
13 #include <trace/events/rpm.h>
16 static int rpm_resume(struct device *dev, int rpmflags);
17 static int rpm_suspend(struct device *dev, int rpmflags);
20 * update_pm_runtime_accounting - Update the time accounting of power states
21 * @dev: Device to update the accounting for
23 * In order to be able to have time accounting of the various power states
24 * (as used by programs such as PowerTOP to show the effectiveness of runtime
25 * PM), we need to track the time spent in each state.
26 * update_pm_runtime_accounting must be called each time before the
27 * runtime_status field is updated, to account the time in the old state
30 void update_pm_runtime_accounting(struct device *dev)
32 unsigned long now = jiffies;
35 delta = now - dev->power.accounting_timestamp;
37 dev->power.accounting_timestamp = now;
39 if (dev->power.disable_depth > 0)
42 if (dev->power.runtime_status == RPM_SUSPENDED)
43 dev->power.suspended_jiffies += delta;
45 dev->power.active_jiffies += delta;
48 static void __update_runtime_status(struct device *dev, enum rpm_status status)
50 update_pm_runtime_accounting(dev);
51 dev->power.runtime_status = status;
55 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
56 * @dev: Device to handle.
58 static void pm_runtime_deactivate_timer(struct device *dev)
60 if (dev->power.timer_expires > 0) {
61 del_timer(&dev->power.suspend_timer);
62 dev->power.timer_expires = 0;
67 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
68 * @dev: Device to handle.
70 static void pm_runtime_cancel_pending(struct device *dev)
72 pm_runtime_deactivate_timer(dev);
74 * In case there's a request pending, make sure its work function will
75 * return without doing anything.
77 dev->power.request = RPM_REQ_NONE;
81 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
82 * @dev: Device to handle.
84 * Compute the autosuspend-delay expiration time based on the device's
85 * power.last_busy time. If the delay has already expired or is disabled
86 * (negative) or the power.use_autosuspend flag isn't set, return 0.
87 * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
89 * This function may be called either with or without dev->power.lock held.
90 * Either way it can be racy, since power.last_busy may be updated at any time.
92 unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
94 int autosuspend_delay;
96 unsigned long last_busy;
97 unsigned long expires = 0;
99 if (!dev->power.use_autosuspend)
102 autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay);
103 if (autosuspend_delay < 0)
106 last_busy = ACCESS_ONCE(dev->power.last_busy);
107 elapsed = jiffies - last_busy;
109 goto out; /* jiffies has wrapped around. */
112 * If the autosuspend_delay is >= 1 second, align the timer by rounding
113 * up to the nearest second.
115 expires = last_busy + msecs_to_jiffies(autosuspend_delay);
116 if (autosuspend_delay >= 1000)
117 expires = round_jiffies(expires);
119 if (elapsed >= expires - last_busy)
120 expires = 0; /* Already expired. */
125 EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
128 * rpm_check_suspend_allowed - Test whether a device may be suspended.
129 * @dev: Device to test.
131 static int rpm_check_suspend_allowed(struct device *dev)
135 if (dev->power.runtime_error)
137 else if (dev->power.disable_depth > 0)
139 else if (atomic_read(&dev->power.usage_count) > 0)
141 else if (!pm_children_suspended(dev))
144 /* Pending resume requests take precedence over suspends. */
145 else if ((dev->power.deferred_resume
146 && dev->power.runtime_status == RPM_SUSPENDING)
147 || (dev->power.request_pending
148 && dev->power.request == RPM_REQ_RESUME))
150 else if (dev->power.runtime_status == RPM_SUSPENDED)
157 * __rpm_callback - Run a given runtime PM callback for a given device.
158 * @cb: Runtime PM callback to run.
159 * @dev: Device to run the callback for.
161 static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
162 __releases(&dev->power.lock) __acquires(&dev->power.lock)
166 if (dev->power.irq_safe)
167 spin_unlock(&dev->power.lock);
169 spin_unlock_irq(&dev->power.lock);
173 if (dev->power.irq_safe)
174 spin_lock(&dev->power.lock);
176 spin_lock_irq(&dev->power.lock);
182 * rpm_idle - Notify device bus type if the device can be suspended.
183 * @dev: Device to notify the bus type about.
184 * @rpmflags: Flag bits.
186 * Check if the device's runtime PM status allows it to be suspended. If
187 * another idle notification has been started earlier, return immediately. If
188 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
189 * run the ->runtime_idle() callback directly.
191 * This function must be called under dev->power.lock with interrupts disabled.
193 static int rpm_idle(struct device *dev, int rpmflags)
195 int (*callback)(struct device *);
198 trace_rpm_idle(dev, rpmflags);
199 retval = rpm_check_suspend_allowed(dev);
201 ; /* Conditions are wrong. */
203 /* Idle notifications are allowed only in the RPM_ACTIVE state. */
204 else if (dev->power.runtime_status != RPM_ACTIVE)
208 * Any pending request other than an idle notification takes
209 * precedence over us, except that the timer may be running.
211 else if (dev->power.request_pending &&
212 dev->power.request > RPM_REQ_IDLE)
215 /* Act as though RPM_NOWAIT is always set. */
216 else if (dev->power.idle_notification)
217 retval = -EINPROGRESS;
221 /* Pending requests need to be canceled. */
222 dev->power.request = RPM_REQ_NONE;
224 if (dev->power.no_callbacks) {
225 /* Assume ->runtime_idle() callback would have suspended. */
226 retval = rpm_suspend(dev, rpmflags);
230 /* Carry out an asynchronous or a synchronous idle notification. */
231 if (rpmflags & RPM_ASYNC) {
232 dev->power.request = RPM_REQ_IDLE;
233 if (!dev->power.request_pending) {
234 dev->power.request_pending = true;
235 queue_work(pm_wq, &dev->power.work);
240 dev->power.idle_notification = true;
243 callback = dev->pm_domain->ops.runtime_idle;
244 else if (dev->type && dev->type->pm)
245 callback = dev->type->pm->runtime_idle;
246 else if (dev->class && dev->class->pm)
247 callback = dev->class->pm->runtime_idle;
248 else if (dev->bus && dev->bus->pm)
249 callback = dev->bus->pm->runtime_idle;
254 __rpm_callback(callback, dev);
256 dev->power.idle_notification = false;
257 wake_up_all(&dev->power.wait_queue);
260 trace_rpm_return_int(dev, _THIS_IP_, retval);
265 * rpm_callback - Run a given runtime PM callback for a given device.
266 * @cb: Runtime PM callback to run.
267 * @dev: Device to run the callback for.
269 static int rpm_callback(int (*cb)(struct device *), struct device *dev)
276 retval = __rpm_callback(cb, dev);
278 dev->power.runtime_error = retval;
279 return retval != -EACCES ? retval : -EIO;
282 struct rpm_qos_data {
288 * rpm_update_qos_constraint - Update a given PM QoS constraint data.
289 * @dev: Device whose timing data to use.
290 * @data: PM QoS constraint data to update.
292 * Use the suspend timing data of @dev to update PM QoS constraint data pointed
295 static int rpm_update_qos_constraint(struct device *dev, void *data)
297 struct rpm_qos_data *qos = data;
302 spin_lock_irqsave(&dev->power.lock, flags);
304 if (dev->power.max_time_suspended_ns < 0)
307 delta_ns = dev->power.max_time_suspended_ns -
308 ktime_to_ns(ktime_sub(qos->time_now, dev->power.suspend_time));
314 if (qos->constraint_ns > delta_ns || qos->constraint_ns == 0)
315 qos->constraint_ns = delta_ns;
318 spin_unlock_irqrestore(&dev->power.lock, flags);
324 * rpm_suspend - Carry out runtime suspend of given device.
325 * @dev: Device to suspend.
326 * @rpmflags: Flag bits.
328 * Check if the device's runtime PM status allows it to be suspended.
329 * Cancel a pending idle notification, autosuspend or suspend. If
330 * another suspend has been started earlier, either return immediately
331 * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
332 * flags. If the RPM_ASYNC flag is set then queue a suspend request;
333 * otherwise run the ->runtime_suspend() callback directly. When
334 * ->runtime_suspend succeeded, if a deferred resume was requested while
335 * the callback was running then carry it out, otherwise send an idle
336 * notification for its parent (if the suspend succeeded and both
337 * ignore_children of parent->power and irq_safe of dev->power are not set).
338 * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
339 * flag is set and the next autosuspend-delay expiration time is in the
340 * future, schedule another autosuspend attempt.
342 * This function must be called under dev->power.lock with interrupts disabled.
344 static int rpm_suspend(struct device *dev, int rpmflags)
345 __releases(&dev->power.lock) __acquires(&dev->power.lock)
347 int (*callback)(struct device *);
348 struct device *parent = NULL;
349 struct rpm_qos_data qos;
352 trace_rpm_suspend(dev, rpmflags);
355 retval = rpm_check_suspend_allowed(dev);
358 ; /* Conditions are wrong. */
360 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
361 else if (dev->power.runtime_status == RPM_RESUMING &&
362 !(rpmflags & RPM_ASYNC))
367 /* If the autosuspend_delay time hasn't expired yet, reschedule. */
368 if ((rpmflags & RPM_AUTO)
369 && dev->power.runtime_status != RPM_SUSPENDING) {
370 unsigned long expires = pm_runtime_autosuspend_expiration(dev);
373 /* Pending requests need to be canceled. */
374 dev->power.request = RPM_REQ_NONE;
377 * Optimization: If the timer is already running and is
378 * set to expire at or before the autosuspend delay,
379 * avoid the overhead of resetting it. Just let it
380 * expire; pm_suspend_timer_fn() will take care of the
383 if (!(dev->power.timer_expires && time_before_eq(
384 dev->power.timer_expires, expires))) {
385 dev->power.timer_expires = expires;
386 mod_timer(&dev->power.suspend_timer, expires);
388 dev->power.timer_autosuspends = 1;
393 /* Other scheduled or pending requests need to be canceled. */
394 pm_runtime_cancel_pending(dev);
396 if (dev->power.runtime_status == RPM_SUSPENDING) {
399 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
400 retval = -EINPROGRESS;
404 if (dev->power.irq_safe) {
405 spin_unlock(&dev->power.lock);
409 spin_lock(&dev->power.lock);
413 /* Wait for the other suspend running in parallel with us. */
415 prepare_to_wait(&dev->power.wait_queue, &wait,
416 TASK_UNINTERRUPTIBLE);
417 if (dev->power.runtime_status != RPM_SUSPENDING)
420 spin_unlock_irq(&dev->power.lock);
424 spin_lock_irq(&dev->power.lock);
426 finish_wait(&dev->power.wait_queue, &wait);
430 dev->power.deferred_resume = false;
431 if (dev->power.no_callbacks)
432 goto no_callback; /* Assume success. */
434 /* Carry out an asynchronous or a synchronous suspend. */
435 if (rpmflags & RPM_ASYNC) {
436 dev->power.request = (rpmflags & RPM_AUTO) ?
437 RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
438 if (!dev->power.request_pending) {
439 dev->power.request_pending = true;
440 queue_work(pm_wq, &dev->power.work);
445 qos.constraint_ns = __dev_pm_qos_read_value(dev);
446 if (qos.constraint_ns < 0) {
447 /* Negative constraint means "never suspend". */
451 qos.constraint_ns *= NSEC_PER_USEC;
452 qos.time_now = ktime_get();
454 __update_runtime_status(dev, RPM_SUSPENDING);
456 if (!dev->power.ignore_children) {
457 if (dev->power.irq_safe)
458 spin_unlock(&dev->power.lock);
460 spin_unlock_irq(&dev->power.lock);
462 retval = device_for_each_child(dev, &qos,
463 rpm_update_qos_constraint);
465 if (dev->power.irq_safe)
466 spin_lock(&dev->power.lock);
468 spin_lock_irq(&dev->power.lock);
474 dev->power.suspend_time = qos.time_now;
475 dev->power.max_time_suspended_ns = qos.constraint_ns ? : -1;
478 callback = dev->pm_domain->ops.runtime_suspend;
479 else if (dev->type && dev->type->pm)
480 callback = dev->type->pm->runtime_suspend;
481 else if (dev->class && dev->class->pm)
482 callback = dev->class->pm->runtime_suspend;
483 else if (dev->bus && dev->bus->pm)
484 callback = dev->bus->pm->runtime_suspend;
488 retval = rpm_callback(callback, dev);
493 __update_runtime_status(dev, RPM_SUSPENDED);
494 pm_runtime_deactivate_timer(dev);
497 parent = dev->parent;
498 atomic_add_unless(&parent->power.child_count, -1, 0);
500 wake_up_all(&dev->power.wait_queue);
502 if (dev->power.deferred_resume) {
508 /* Maybe the parent is now able to suspend. */
509 if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
510 spin_unlock(&dev->power.lock);
512 spin_lock(&parent->power.lock);
513 rpm_idle(parent, RPM_ASYNC);
514 spin_unlock(&parent->power.lock);
516 spin_lock(&dev->power.lock);
520 trace_rpm_return_int(dev, _THIS_IP_, retval);
525 __update_runtime_status(dev, RPM_ACTIVE);
526 dev->power.suspend_time = ktime_set(0, 0);
527 dev->power.max_time_suspended_ns = -1;
528 dev->power.deferred_resume = false;
529 if (retval == -EAGAIN || retval == -EBUSY) {
530 dev->power.runtime_error = 0;
533 * If the callback routine failed an autosuspend, and
534 * if the last_busy time has been updated so that there
535 * is a new autosuspend expiration time, automatically
536 * reschedule another autosuspend.
538 if ((rpmflags & RPM_AUTO) &&
539 pm_runtime_autosuspend_expiration(dev) != 0)
542 pm_runtime_cancel_pending(dev);
544 wake_up_all(&dev->power.wait_queue);
549 * rpm_resume - Carry out runtime resume of given device.
550 * @dev: Device to resume.
551 * @rpmflags: Flag bits.
553 * Check if the device's runtime PM status allows it to be resumed. Cancel
554 * any scheduled or pending requests. If another resume has been started
555 * earlier, either return immediately or wait for it to finish, depending on the
556 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
557 * parallel with this function, either tell the other process to resume after
558 * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
559 * flag is set then queue a resume request; otherwise run the
560 * ->runtime_resume() callback directly. Queue an idle notification for the
561 * device if the resume succeeded.
563 * This function must be called under dev->power.lock with interrupts disabled.
565 static int rpm_resume(struct device *dev, int rpmflags)
566 __releases(&dev->power.lock) __acquires(&dev->power.lock)
568 int (*callback)(struct device *);
569 struct device *parent = NULL;
572 trace_rpm_resume(dev, rpmflags);
575 if (dev->power.runtime_error)
577 else if (dev->power.disable_depth > 0)
583 * Other scheduled or pending requests need to be canceled. Small
584 * optimization: If an autosuspend timer is running, leave it running
585 * rather than cancelling it now only to restart it again in the near
588 dev->power.request = RPM_REQ_NONE;
589 if (!dev->power.timer_autosuspends)
590 pm_runtime_deactivate_timer(dev);
592 if (dev->power.runtime_status == RPM_ACTIVE) {
597 if (dev->power.runtime_status == RPM_RESUMING
598 || dev->power.runtime_status == RPM_SUSPENDING) {
601 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
602 if (dev->power.runtime_status == RPM_SUSPENDING)
603 dev->power.deferred_resume = true;
605 retval = -EINPROGRESS;
609 if (dev->power.irq_safe) {
610 spin_unlock(&dev->power.lock);
614 spin_lock(&dev->power.lock);
618 /* Wait for the operation carried out in parallel with us. */
620 prepare_to_wait(&dev->power.wait_queue, &wait,
621 TASK_UNINTERRUPTIBLE);
622 if (dev->power.runtime_status != RPM_RESUMING
623 && dev->power.runtime_status != RPM_SUSPENDING)
626 spin_unlock_irq(&dev->power.lock);
630 spin_lock_irq(&dev->power.lock);
632 finish_wait(&dev->power.wait_queue, &wait);
637 * See if we can skip waking up the parent. This is safe only if
638 * power.no_callbacks is set, because otherwise we don't know whether
639 * the resume will actually succeed.
641 if (dev->power.no_callbacks && !parent && dev->parent) {
642 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
643 if (dev->parent->power.disable_depth > 0
644 || dev->parent->power.ignore_children
645 || dev->parent->power.runtime_status == RPM_ACTIVE) {
646 atomic_inc(&dev->parent->power.child_count);
647 spin_unlock(&dev->parent->power.lock);
648 goto no_callback; /* Assume success. */
650 spin_unlock(&dev->parent->power.lock);
653 /* Carry out an asynchronous or a synchronous resume. */
654 if (rpmflags & RPM_ASYNC) {
655 dev->power.request = RPM_REQ_RESUME;
656 if (!dev->power.request_pending) {
657 dev->power.request_pending = true;
658 queue_work(pm_wq, &dev->power.work);
664 if (!parent && dev->parent) {
666 * Increment the parent's usage counter and resume it if
667 * necessary. Not needed if dev is irq-safe; then the
668 * parent is permanently resumed.
670 parent = dev->parent;
671 if (dev->power.irq_safe)
673 spin_unlock(&dev->power.lock);
675 pm_runtime_get_noresume(parent);
677 spin_lock(&parent->power.lock);
679 * We can resume if the parent's runtime PM is disabled or it
680 * is set to ignore children.
682 if (!parent->power.disable_depth
683 && !parent->power.ignore_children) {
684 rpm_resume(parent, 0);
685 if (parent->power.runtime_status != RPM_ACTIVE)
688 spin_unlock(&parent->power.lock);
690 spin_lock(&dev->power.lock);
697 if (dev->power.no_callbacks)
698 goto no_callback; /* Assume success. */
700 dev->power.suspend_time = ktime_set(0, 0);
701 dev->power.max_time_suspended_ns = -1;
703 __update_runtime_status(dev, RPM_RESUMING);
706 callback = dev->pm_domain->ops.runtime_resume;
707 else if (dev->type && dev->type->pm)
708 callback = dev->type->pm->runtime_resume;
709 else if (dev->class && dev->class->pm)
710 callback = dev->class->pm->runtime_resume;
711 else if (dev->bus && dev->bus->pm)
712 callback = dev->bus->pm->runtime_resume;
716 retval = rpm_callback(callback, dev);
718 __update_runtime_status(dev, RPM_SUSPENDED);
719 pm_runtime_cancel_pending(dev);
722 __update_runtime_status(dev, RPM_ACTIVE);
724 atomic_inc(&parent->power.child_count);
726 wake_up_all(&dev->power.wait_queue);
729 rpm_idle(dev, RPM_ASYNC);
732 if (parent && !dev->power.irq_safe) {
733 spin_unlock_irq(&dev->power.lock);
735 pm_runtime_put(parent);
737 spin_lock_irq(&dev->power.lock);
740 trace_rpm_return_int(dev, _THIS_IP_, retval);
746 * pm_runtime_work - Universal runtime PM work function.
747 * @work: Work structure used for scheduling the execution of this function.
749 * Use @work to get the device object the work is to be done for, determine what
750 * is to be done and execute the appropriate runtime PM function.
752 static void pm_runtime_work(struct work_struct *work)
754 struct device *dev = container_of(work, struct device, power.work);
755 enum rpm_request req;
757 spin_lock_irq(&dev->power.lock);
759 if (!dev->power.request_pending)
762 req = dev->power.request;
763 dev->power.request = RPM_REQ_NONE;
764 dev->power.request_pending = false;
770 rpm_idle(dev, RPM_NOWAIT);
772 case RPM_REQ_SUSPEND:
773 rpm_suspend(dev, RPM_NOWAIT);
775 case RPM_REQ_AUTOSUSPEND:
776 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
779 rpm_resume(dev, RPM_NOWAIT);
784 spin_unlock_irq(&dev->power.lock);
788 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
789 * @data: Device pointer passed by pm_schedule_suspend().
791 * Check if the time is right and queue a suspend request.
793 static void pm_suspend_timer_fn(unsigned long data)
795 struct device *dev = (struct device *)data;
797 unsigned long expires;
799 spin_lock_irqsave(&dev->power.lock, flags);
801 expires = dev->power.timer_expires;
802 /* If 'expire' is after 'jiffies' we've been called too early. */
803 if (expires > 0 && !time_after(expires, jiffies)) {
804 dev->power.timer_expires = 0;
805 rpm_suspend(dev, dev->power.timer_autosuspends ?
806 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
809 spin_unlock_irqrestore(&dev->power.lock, flags);
813 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
814 * @dev: Device to suspend.
815 * @delay: Time to wait before submitting a suspend request, in milliseconds.
817 int pm_schedule_suspend(struct device *dev, unsigned int delay)
822 spin_lock_irqsave(&dev->power.lock, flags);
825 retval = rpm_suspend(dev, RPM_ASYNC);
829 retval = rpm_check_suspend_allowed(dev);
833 /* Other scheduled or pending requests need to be canceled. */
834 pm_runtime_cancel_pending(dev);
836 dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
837 dev->power.timer_expires += !dev->power.timer_expires;
838 dev->power.timer_autosuspends = 0;
839 mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
842 spin_unlock_irqrestore(&dev->power.lock, flags);
846 EXPORT_SYMBOL_GPL(pm_schedule_suspend);
849 * __pm_runtime_idle - Entry point for runtime idle operations.
850 * @dev: Device to send idle notification for.
851 * @rpmflags: Flag bits.
853 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
854 * return immediately if it is larger than zero. Then carry out an idle
855 * notification, either synchronous or asynchronous.
857 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
858 * or if pm_runtime_irq_safe() has been called.
860 int __pm_runtime_idle(struct device *dev, int rpmflags)
865 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
867 if (rpmflags & RPM_GET_PUT) {
868 if (!atomic_dec_and_test(&dev->power.usage_count))
872 spin_lock_irqsave(&dev->power.lock, flags);
873 retval = rpm_idle(dev, rpmflags);
874 spin_unlock_irqrestore(&dev->power.lock, flags);
878 EXPORT_SYMBOL_GPL(__pm_runtime_idle);
881 * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
882 * @dev: Device to suspend.
883 * @rpmflags: Flag bits.
885 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
886 * return immediately if it is larger than zero. Then carry out a suspend,
887 * either synchronous or asynchronous.
889 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
890 * or if pm_runtime_irq_safe() has been called.
892 int __pm_runtime_suspend(struct device *dev, int rpmflags)
897 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
899 if (rpmflags & RPM_GET_PUT) {
900 if (!atomic_dec_and_test(&dev->power.usage_count))
904 spin_lock_irqsave(&dev->power.lock, flags);
905 retval = rpm_suspend(dev, rpmflags);
906 spin_unlock_irqrestore(&dev->power.lock, flags);
910 EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
913 * __pm_runtime_resume - Entry point for runtime resume operations.
914 * @dev: Device to resume.
915 * @rpmflags: Flag bits.
917 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
918 * carry out a resume, either synchronous or asynchronous.
920 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
921 * or if pm_runtime_irq_safe() has been called.
923 int __pm_runtime_resume(struct device *dev, int rpmflags)
928 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
930 if (rpmflags & RPM_GET_PUT)
931 atomic_inc(&dev->power.usage_count);
933 spin_lock_irqsave(&dev->power.lock, flags);
934 retval = rpm_resume(dev, rpmflags);
935 spin_unlock_irqrestore(&dev->power.lock, flags);
939 EXPORT_SYMBOL_GPL(__pm_runtime_resume);
942 * __pm_runtime_set_status - Set runtime PM status of a device.
943 * @dev: Device to handle.
944 * @status: New runtime PM status of the device.
946 * If runtime PM of the device is disabled or its power.runtime_error field is
947 * different from zero, the status may be changed either to RPM_ACTIVE, or to
948 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
949 * However, if the device has a parent and the parent is not active, and the
950 * parent's power.ignore_children flag is unset, the device's status cannot be
951 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
953 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
954 * and the device parent's counter of unsuspended children is modified to
955 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
956 * notification request for the parent is submitted.
958 int __pm_runtime_set_status(struct device *dev, unsigned int status)
960 struct device *parent = dev->parent;
962 bool notify_parent = false;
965 if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
968 spin_lock_irqsave(&dev->power.lock, flags);
970 if (!dev->power.runtime_error && !dev->power.disable_depth) {
975 if (dev->power.runtime_status == status)
978 if (status == RPM_SUSPENDED) {
979 /* It always is possible to set the status to 'suspended'. */
981 atomic_add_unless(&parent->power.child_count, -1, 0);
982 notify_parent = !parent->power.ignore_children;
988 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
991 * It is invalid to put an active child under a parent that is
992 * not active, has runtime PM enabled and the
993 * 'power.ignore_children' flag unset.
995 if (!parent->power.disable_depth
996 && !parent->power.ignore_children
997 && parent->power.runtime_status != RPM_ACTIVE)
999 else if (dev->power.runtime_status == RPM_SUSPENDED)
1000 atomic_inc(&parent->power.child_count);
1002 spin_unlock(&parent->power.lock);
1009 __update_runtime_status(dev, status);
1010 dev->power.runtime_error = 0;
1012 spin_unlock_irqrestore(&dev->power.lock, flags);
1015 pm_request_idle(parent);
1019 EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
1022 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
1023 * @dev: Device to handle.
1025 * Flush all pending requests for the device from pm_wq and wait for all
1026 * runtime PM operations involving the device in progress to complete.
1028 * Should be called under dev->power.lock with interrupts disabled.
1030 static void __pm_runtime_barrier(struct device *dev)
1032 pm_runtime_deactivate_timer(dev);
1034 if (dev->power.request_pending) {
1035 dev->power.request = RPM_REQ_NONE;
1036 spin_unlock_irq(&dev->power.lock);
1038 cancel_work_sync(&dev->power.work);
1040 spin_lock_irq(&dev->power.lock);
1041 dev->power.request_pending = false;
1044 if (dev->power.runtime_status == RPM_SUSPENDING
1045 || dev->power.runtime_status == RPM_RESUMING
1046 || dev->power.idle_notification) {
1049 /* Suspend, wake-up or idle notification in progress. */
1051 prepare_to_wait(&dev->power.wait_queue, &wait,
1052 TASK_UNINTERRUPTIBLE);
1053 if (dev->power.runtime_status != RPM_SUSPENDING
1054 && dev->power.runtime_status != RPM_RESUMING
1055 && !dev->power.idle_notification)
1057 spin_unlock_irq(&dev->power.lock);
1061 spin_lock_irq(&dev->power.lock);
1063 finish_wait(&dev->power.wait_queue, &wait);
1068 * pm_runtime_barrier - Flush pending requests and wait for completions.
1069 * @dev: Device to handle.
1071 * Prevent the device from being suspended by incrementing its usage counter and
1072 * if there's a pending resume request for the device, wake the device up.
1073 * Next, make sure that all pending requests for the device have been flushed
1074 * from pm_wq and wait for all runtime PM operations involving the device in
1075 * progress to complete.
1078 * 1, if there was a resume request pending and the device had to be woken up,
1081 int pm_runtime_barrier(struct device *dev)
1085 pm_runtime_get_noresume(dev);
1086 spin_lock_irq(&dev->power.lock);
1088 if (dev->power.request_pending
1089 && dev->power.request == RPM_REQ_RESUME) {
1094 __pm_runtime_barrier(dev);
1096 spin_unlock_irq(&dev->power.lock);
1097 pm_runtime_put_noidle(dev);
1101 EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1104 * __pm_runtime_disable - Disable runtime PM of a device.
1105 * @dev: Device to handle.
1106 * @check_resume: If set, check if there's a resume request for the device.
1108 * Increment power.disable_depth for the device and if was zero previously,
1109 * cancel all pending runtime PM requests for the device and wait for all
1110 * operations in progress to complete. The device can be either active or
1111 * suspended after its runtime PM has been disabled.
1113 * If @check_resume is set and there's a resume request pending when
1114 * __pm_runtime_disable() is called and power.disable_depth is zero, the
1115 * function will wake up the device before disabling its runtime PM.
1117 void __pm_runtime_disable(struct device *dev, bool check_resume)
1119 spin_lock_irq(&dev->power.lock);
1121 if (dev->power.disable_depth > 0) {
1122 dev->power.disable_depth++;
1127 * Wake up the device if there's a resume request pending, because that
1128 * means there probably is some I/O to process and disabling runtime PM
1129 * shouldn't prevent the device from processing the I/O.
1131 if (check_resume && dev->power.request_pending
1132 && dev->power.request == RPM_REQ_RESUME) {
1134 * Prevent suspends and idle notifications from being carried
1135 * out after we have woken up the device.
1137 pm_runtime_get_noresume(dev);
1141 pm_runtime_put_noidle(dev);
1144 if (!dev->power.disable_depth++)
1145 __pm_runtime_barrier(dev);
1148 spin_unlock_irq(&dev->power.lock);
1150 EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1153 * pm_runtime_enable - Enable runtime PM of a device.
1154 * @dev: Device to handle.
1156 void pm_runtime_enable(struct device *dev)
1158 unsigned long flags;
1160 spin_lock_irqsave(&dev->power.lock, flags);
1162 if (dev->power.disable_depth > 0)
1163 dev->power.disable_depth--;
1165 dev_warn(dev, "Unbalanced %s!\n", __func__);
1167 spin_unlock_irqrestore(&dev->power.lock, flags);
1169 EXPORT_SYMBOL_GPL(pm_runtime_enable);
1172 * pm_runtime_forbid - Block runtime PM of a device.
1173 * @dev: Device to handle.
1175 * Increase the device's usage count and clear its power.runtime_auto flag,
1176 * so that it cannot be suspended at run time until pm_runtime_allow() is called
1179 void pm_runtime_forbid(struct device *dev)
1181 spin_lock_irq(&dev->power.lock);
1182 if (!dev->power.runtime_auto)
1185 dev->power.runtime_auto = false;
1186 atomic_inc(&dev->power.usage_count);
1190 spin_unlock_irq(&dev->power.lock);
1192 EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1195 * pm_runtime_allow - Unblock runtime PM of a device.
1196 * @dev: Device to handle.
1198 * Decrease the device's usage count and set its power.runtime_auto flag.
1200 void pm_runtime_allow(struct device *dev)
1202 spin_lock_irq(&dev->power.lock);
1203 if (dev->power.runtime_auto)
1206 dev->power.runtime_auto = true;
1207 if (atomic_dec_and_test(&dev->power.usage_count))
1208 rpm_idle(dev, RPM_AUTO);
1211 spin_unlock_irq(&dev->power.lock);
1213 EXPORT_SYMBOL_GPL(pm_runtime_allow);
1216 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1217 * @dev: Device to handle.
1219 * Set the power.no_callbacks flag, which tells the PM core that this
1220 * device is power-managed through its parent and has no runtime PM
1221 * callbacks of its own. The runtime sysfs attributes will be removed.
1223 void pm_runtime_no_callbacks(struct device *dev)
1225 spin_lock_irq(&dev->power.lock);
1226 dev->power.no_callbacks = 1;
1227 spin_unlock_irq(&dev->power.lock);
1228 if (device_is_registered(dev))
1229 rpm_sysfs_remove(dev);
1231 EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1234 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1235 * @dev: Device to handle
1237 * Set the power.irq_safe flag, which tells the PM core that the
1238 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1239 * always be invoked with the spinlock held and interrupts disabled. It also
1240 * causes the parent's usage counter to be permanently incremented, preventing
1241 * the parent from runtime suspending -- otherwise an irq-safe child might have
1242 * to wait for a non-irq-safe parent.
1244 void pm_runtime_irq_safe(struct device *dev)
1247 pm_runtime_get_sync(dev->parent);
1248 spin_lock_irq(&dev->power.lock);
1249 dev->power.irq_safe = 1;
1250 spin_unlock_irq(&dev->power.lock);
1252 EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1255 * update_autosuspend - Handle a change to a device's autosuspend settings.
1256 * @dev: Device to handle.
1257 * @old_delay: The former autosuspend_delay value.
1258 * @old_use: The former use_autosuspend value.
1260 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1261 * set; otherwise allow it. Send an idle notification if suspends are allowed.
1263 * This function must be called under dev->power.lock with interrupts disabled.
1265 static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1267 int delay = dev->power.autosuspend_delay;
1269 /* Should runtime suspend be prevented now? */
1270 if (dev->power.use_autosuspend && delay < 0) {
1272 /* If it used to be allowed then prevent it. */
1273 if (!old_use || old_delay >= 0) {
1274 atomic_inc(&dev->power.usage_count);
1279 /* Runtime suspend should be allowed now. */
1282 /* If it used to be prevented then allow it. */
1283 if (old_use && old_delay < 0)
1284 atomic_dec(&dev->power.usage_count);
1286 /* Maybe we can autosuspend now. */
1287 rpm_idle(dev, RPM_AUTO);
1292 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1293 * @dev: Device to handle.
1294 * @delay: Value of the new delay in milliseconds.
1296 * Set the device's power.autosuspend_delay value. If it changes to negative
1297 * and the power.use_autosuspend flag is set, prevent runtime suspends. If it
1298 * changes the other way, allow runtime suspends.
1300 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1302 int old_delay, old_use;
1304 spin_lock_irq(&dev->power.lock);
1305 old_delay = dev->power.autosuspend_delay;
1306 old_use = dev->power.use_autosuspend;
1307 dev->power.autosuspend_delay = delay;
1308 update_autosuspend(dev, old_delay, old_use);
1309 spin_unlock_irq(&dev->power.lock);
1311 EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1314 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1315 * @dev: Device to handle.
1316 * @use: New value for use_autosuspend.
1318 * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1319 * suspends as needed.
1321 void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1323 int old_delay, old_use;
1325 spin_lock_irq(&dev->power.lock);
1326 old_delay = dev->power.autosuspend_delay;
1327 old_use = dev->power.use_autosuspend;
1328 dev->power.use_autosuspend = use;
1329 update_autosuspend(dev, old_delay, old_use);
1330 spin_unlock_irq(&dev->power.lock);
1332 EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1335 * pm_runtime_init - Initialize runtime PM fields in given device object.
1336 * @dev: Device object to initialize.
1338 void pm_runtime_init(struct device *dev)
1340 dev->power.runtime_status = RPM_SUSPENDED;
1341 dev->power.idle_notification = false;
1343 dev->power.disable_depth = 1;
1344 atomic_set(&dev->power.usage_count, 0);
1346 dev->power.runtime_error = 0;
1348 atomic_set(&dev->power.child_count, 0);
1349 pm_suspend_ignore_children(dev, false);
1350 dev->power.runtime_auto = true;
1352 dev->power.request_pending = false;
1353 dev->power.request = RPM_REQ_NONE;
1354 dev->power.deferred_resume = false;
1355 dev->power.accounting_timestamp = jiffies;
1356 INIT_WORK(&dev->power.work, pm_runtime_work);
1358 dev->power.timer_expires = 0;
1359 setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
1360 (unsigned long)dev);
1362 dev->power.suspend_time = ktime_set(0, 0);
1363 dev->power.max_time_suspended_ns = -1;
1365 init_waitqueue_head(&dev->power.wait_queue);
1369 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1370 * @dev: Device object being removed from device hierarchy.
1372 void pm_runtime_remove(struct device *dev)
1374 __pm_runtime_disable(dev, false);
1376 /* Change the status back to 'suspended' to match the initial status. */
1377 if (dev->power.runtime_status == RPM_ACTIVE)
1378 pm_runtime_set_suspended(dev);
1379 if (dev->power.irq_safe && dev->parent)
1380 pm_runtime_put_sync(dev->parent);
1384 * pm_runtime_update_max_time_suspended - Update device's suspend time data.
1385 * @dev: Device to handle.
1386 * @delta_ns: Value to subtract from the device's max_time_suspended_ns field.
1388 * Update the device's power.max_time_suspended_ns field by subtracting
1389 * @delta_ns from it. The resulting value of power.max_time_suspended_ns is
1392 void pm_runtime_update_max_time_suspended(struct device *dev, s64 delta_ns)
1394 unsigned long flags;
1396 spin_lock_irqsave(&dev->power.lock, flags);
1398 if (delta_ns > 0 && dev->power.max_time_suspended_ns > 0) {
1399 if (dev->power.max_time_suspended_ns > delta_ns)
1400 dev->power.max_time_suspended_ns -= delta_ns;
1402 dev->power.max_time_suspended_ns = 0;
1405 spin_unlock_irqrestore(&dev->power.lock, flags);