Merge branch 'pm-qos' into pm-for-linus
[pandora-kernel.git] / drivers / base / power / runtime.c
1 /*
2  * drivers/base/power/runtime.c - Helper functions for device runtime PM
3  *
4  * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5  * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
6  *
7  * This file is released under the GPLv2.
8  */
9
10 #include <linux/sched.h>
11 #include <linux/pm_runtime.h>
12 #include <trace/events/rpm.h>
13 #include "power.h"
14
15 static int rpm_resume(struct device *dev, int rpmflags);
16 static int rpm_suspend(struct device *dev, int rpmflags);
17
18 /**
19  * update_pm_runtime_accounting - Update the time accounting of power states
20  * @dev: Device to update the accounting for
21  *
22  * In order to be able to have time accounting of the various power states
23  * (as used by programs such as PowerTOP to show the effectiveness of runtime
24  * PM), we need to track the time spent in each state.
25  * update_pm_runtime_accounting must be called each time before the
26  * runtime_status field is updated, to account the time in the old state
27  * correctly.
28  */
29 void update_pm_runtime_accounting(struct device *dev)
30 {
31         unsigned long now = jiffies;
32         int delta;
33
34         delta = now - dev->power.accounting_timestamp;
35
36         if (delta < 0)
37                 delta = 0;
38
39         dev->power.accounting_timestamp = now;
40
41         if (dev->power.disable_depth > 0)
42                 return;
43
44         if (dev->power.runtime_status == RPM_SUSPENDED)
45                 dev->power.suspended_jiffies += delta;
46         else
47                 dev->power.active_jiffies += delta;
48 }
49
50 static void __update_runtime_status(struct device *dev, enum rpm_status status)
51 {
52         update_pm_runtime_accounting(dev);
53         dev->power.runtime_status = status;
54 }
55
56 /**
57  * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
58  * @dev: Device to handle.
59  */
60 static void pm_runtime_deactivate_timer(struct device *dev)
61 {
62         if (dev->power.timer_expires > 0) {
63                 del_timer(&dev->power.suspend_timer);
64                 dev->power.timer_expires = 0;
65         }
66 }
67
68 /**
69  * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
70  * @dev: Device to handle.
71  */
72 static void pm_runtime_cancel_pending(struct device *dev)
73 {
74         pm_runtime_deactivate_timer(dev);
75         /*
76          * In case there's a request pending, make sure its work function will
77          * return without doing anything.
78          */
79         dev->power.request = RPM_REQ_NONE;
80 }
81
82 /*
83  * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
84  * @dev: Device to handle.
85  *
86  * Compute the autosuspend-delay expiration time based on the device's
87  * power.last_busy time.  If the delay has already expired or is disabled
88  * (negative) or the power.use_autosuspend flag isn't set, return 0.
89  * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
90  *
91  * This function may be called either with or without dev->power.lock held.
92  * Either way it can be racy, since power.last_busy may be updated at any time.
93  */
94 unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
95 {
96         int autosuspend_delay;
97         long elapsed;
98         unsigned long last_busy;
99         unsigned long expires = 0;
100
101         if (!dev->power.use_autosuspend)
102                 goto out;
103
104         autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay);
105         if (autosuspend_delay < 0)
106                 goto out;
107
108         last_busy = ACCESS_ONCE(dev->power.last_busy);
109         elapsed = jiffies - last_busy;
110         if (elapsed < 0)
111                 goto out;       /* jiffies has wrapped around. */
112
113         /*
114          * If the autosuspend_delay is >= 1 second, align the timer by rounding
115          * up to the nearest second.
116          */
117         expires = last_busy + msecs_to_jiffies(autosuspend_delay);
118         if (autosuspend_delay >= 1000)
119                 expires = round_jiffies(expires);
120         expires += !expires;
121         if (elapsed >= expires - last_busy)
122                 expires = 0;    /* Already expired. */
123
124  out:
125         return expires;
126 }
127 EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
128
129 /**
130  * rpm_check_suspend_allowed - Test whether a device may be suspended.
131  * @dev: Device to test.
132  */
133 static int rpm_check_suspend_allowed(struct device *dev)
134 {
135         int retval = 0;
136
137         if (dev->power.runtime_error)
138                 retval = -EINVAL;
139         else if (dev->power.disable_depth > 0)
140                 retval = -EACCES;
141         else if (atomic_read(&dev->power.usage_count) > 0)
142                 retval = -EAGAIN;
143         else if (!pm_children_suspended(dev))
144                 retval = -EBUSY;
145
146         /* Pending resume requests take precedence over suspends. */
147         else if ((dev->power.deferred_resume
148                         && dev->power.runtime_status == RPM_SUSPENDING)
149             || (dev->power.request_pending
150                         && dev->power.request == RPM_REQ_RESUME))
151                 retval = -EAGAIN;
152         else if (dev->power.runtime_status == RPM_SUSPENDED)
153                 retval = 1;
154
155         return retval;
156 }
157
158 /**
159  * __rpm_callback - Run a given runtime PM callback for a given device.
160  * @cb: Runtime PM callback to run.
161  * @dev: Device to run the callback for.
162  */
163 static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
164         __releases(&dev->power.lock) __acquires(&dev->power.lock)
165 {
166         int retval;
167
168         if (dev->power.irq_safe)
169                 spin_unlock(&dev->power.lock);
170         else
171                 spin_unlock_irq(&dev->power.lock);
172
173         retval = cb(dev);
174
175         if (dev->power.irq_safe)
176                 spin_lock(&dev->power.lock);
177         else
178                 spin_lock_irq(&dev->power.lock);
179
180         return retval;
181 }
182
183 /**
184  * rpm_idle - Notify device bus type if the device can be suspended.
185  * @dev: Device to notify the bus type about.
186  * @rpmflags: Flag bits.
187  *
188  * Check if the device's runtime PM status allows it to be suspended.  If
189  * another idle notification has been started earlier, return immediately.  If
190  * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
191  * run the ->runtime_idle() callback directly.
192  *
193  * This function must be called under dev->power.lock with interrupts disabled.
194  */
195 static int rpm_idle(struct device *dev, int rpmflags)
196 {
197         int (*callback)(struct device *);
198         int retval;
199
200         trace_rpm_idle(dev, rpmflags);
201         retval = rpm_check_suspend_allowed(dev);
202         if (retval < 0)
203                 ;       /* Conditions are wrong. */
204
205         /* Idle notifications are allowed only in the RPM_ACTIVE state. */
206         else if (dev->power.runtime_status != RPM_ACTIVE)
207                 retval = -EAGAIN;
208
209         /*
210          * Any pending request other than an idle notification takes
211          * precedence over us, except that the timer may be running.
212          */
213         else if (dev->power.request_pending &&
214             dev->power.request > RPM_REQ_IDLE)
215                 retval = -EAGAIN;
216
217         /* Act as though RPM_NOWAIT is always set. */
218         else if (dev->power.idle_notification)
219                 retval = -EINPROGRESS;
220         if (retval)
221                 goto out;
222
223         /* Pending requests need to be canceled. */
224         dev->power.request = RPM_REQ_NONE;
225
226         if (dev->power.no_callbacks) {
227                 /* Assume ->runtime_idle() callback would have suspended. */
228                 retval = rpm_suspend(dev, rpmflags);
229                 goto out;
230         }
231
232         /* Carry out an asynchronous or a synchronous idle notification. */
233         if (rpmflags & RPM_ASYNC) {
234                 dev->power.request = RPM_REQ_IDLE;
235                 if (!dev->power.request_pending) {
236                         dev->power.request_pending = true;
237                         queue_work(pm_wq, &dev->power.work);
238                 }
239                 goto out;
240         }
241
242         dev->power.idle_notification = true;
243
244         if (dev->pm_domain)
245                 callback = dev->pm_domain->ops.runtime_idle;
246         else if (dev->type && dev->type->pm)
247                 callback = dev->type->pm->runtime_idle;
248         else if (dev->class && dev->class->pm)
249                 callback = dev->class->pm->runtime_idle;
250         else if (dev->bus && dev->bus->pm)
251                 callback = dev->bus->pm->runtime_idle;
252         else
253                 callback = NULL;
254
255         if (callback)
256                 __rpm_callback(callback, dev);
257
258         dev->power.idle_notification = false;
259         wake_up_all(&dev->power.wait_queue);
260
261  out:
262         trace_rpm_return_int(dev, _THIS_IP_, retval);
263         return retval;
264 }
265
266 /**
267  * rpm_callback - Run a given runtime PM callback for a given device.
268  * @cb: Runtime PM callback to run.
269  * @dev: Device to run the callback for.
270  */
271 static int rpm_callback(int (*cb)(struct device *), struct device *dev)
272 {
273         int retval;
274
275         if (!cb)
276                 return -ENOSYS;
277
278         retval = __rpm_callback(cb, dev);
279
280         dev->power.runtime_error = retval;
281         return retval != -EACCES ? retval : -EIO;
282 }
283
284 /**
285  * rpm_suspend - Carry out runtime suspend of given device.
286  * @dev: Device to suspend.
287  * @rpmflags: Flag bits.
288  *
289  * Check if the device's runtime PM status allows it to be suspended.  If
290  * another suspend has been started earlier, either return immediately or wait
291  * for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags.  Cancel a
292  * pending idle notification.  If the RPM_ASYNC flag is set then queue a
293  * suspend request; otherwise run the ->runtime_suspend() callback directly.
294  * If a deferred resume was requested while the callback was running then carry
295  * it out; otherwise send an idle notification for the device (if the suspend
296  * failed) or for its parent (if the suspend succeeded).
297  *
298  * This function must be called under dev->power.lock with interrupts disabled.
299  */
300 static int rpm_suspend(struct device *dev, int rpmflags)
301         __releases(&dev->power.lock) __acquires(&dev->power.lock)
302 {
303         int (*callback)(struct device *);
304         struct device *parent = NULL;
305         int retval;
306
307         trace_rpm_suspend(dev, rpmflags);
308
309  repeat:
310         retval = rpm_check_suspend_allowed(dev);
311
312         if (retval < 0)
313                 ;       /* Conditions are wrong. */
314
315         /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
316         else if (dev->power.runtime_status == RPM_RESUMING &&
317             !(rpmflags & RPM_ASYNC))
318                 retval = -EAGAIN;
319         if (retval)
320                 goto out;
321
322         /* If the autosuspend_delay time hasn't expired yet, reschedule. */
323         if ((rpmflags & RPM_AUTO)
324             && dev->power.runtime_status != RPM_SUSPENDING) {
325                 unsigned long expires = pm_runtime_autosuspend_expiration(dev);
326
327                 if (expires != 0) {
328                         /* Pending requests need to be canceled. */
329                         dev->power.request = RPM_REQ_NONE;
330
331                         /*
332                          * Optimization: If the timer is already running and is
333                          * set to expire at or before the autosuspend delay,
334                          * avoid the overhead of resetting it.  Just let it
335                          * expire; pm_suspend_timer_fn() will take care of the
336                          * rest.
337                          */
338                         if (!(dev->power.timer_expires && time_before_eq(
339                             dev->power.timer_expires, expires))) {
340                                 dev->power.timer_expires = expires;
341                                 mod_timer(&dev->power.suspend_timer, expires);
342                         }
343                         dev->power.timer_autosuspends = 1;
344                         goto out;
345                 }
346         }
347
348         /* Other scheduled or pending requests need to be canceled. */
349         pm_runtime_cancel_pending(dev);
350
351         if (dev->power.runtime_status == RPM_SUSPENDING) {
352                 DEFINE_WAIT(wait);
353
354                 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
355                         retval = -EINPROGRESS;
356                         goto out;
357                 }
358
359                 if (dev->power.irq_safe) {
360                         spin_unlock(&dev->power.lock);
361
362                         cpu_relax();
363
364                         spin_lock(&dev->power.lock);
365                         goto repeat;
366                 }
367
368                 /* Wait for the other suspend running in parallel with us. */
369                 for (;;) {
370                         prepare_to_wait(&dev->power.wait_queue, &wait,
371                                         TASK_UNINTERRUPTIBLE);
372                         if (dev->power.runtime_status != RPM_SUSPENDING)
373                                 break;
374
375                         spin_unlock_irq(&dev->power.lock);
376
377                         schedule();
378
379                         spin_lock_irq(&dev->power.lock);
380                 }
381                 finish_wait(&dev->power.wait_queue, &wait);
382                 goto repeat;
383         }
384
385         dev->power.deferred_resume = false;
386         if (dev->power.no_callbacks)
387                 goto no_callback;       /* Assume success. */
388
389         /* Carry out an asynchronous or a synchronous suspend. */
390         if (rpmflags & RPM_ASYNC) {
391                 dev->power.request = (rpmflags & RPM_AUTO) ?
392                     RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
393                 if (!dev->power.request_pending) {
394                         dev->power.request_pending = true;
395                         queue_work(pm_wq, &dev->power.work);
396                 }
397                 goto out;
398         }
399
400         __update_runtime_status(dev, RPM_SUSPENDING);
401
402         if (dev->pm_domain)
403                 callback = dev->pm_domain->ops.runtime_suspend;
404         else if (dev->type && dev->type->pm)
405                 callback = dev->type->pm->runtime_suspend;
406         else if (dev->class && dev->class->pm)
407                 callback = dev->class->pm->runtime_suspend;
408         else if (dev->bus && dev->bus->pm)
409                 callback = dev->bus->pm->runtime_suspend;
410         else
411                 callback = NULL;
412
413         retval = rpm_callback(callback, dev);
414         if (retval) {
415                 __update_runtime_status(dev, RPM_ACTIVE);
416                 dev->power.deferred_resume = false;
417                 if (retval == -EAGAIN || retval == -EBUSY)
418                         dev->power.runtime_error = 0;
419                 else
420                         pm_runtime_cancel_pending(dev);
421         } else {
422  no_callback:
423                 __update_runtime_status(dev, RPM_SUSPENDED);
424                 pm_runtime_deactivate_timer(dev);
425
426                 if (dev->parent) {
427                         parent = dev->parent;
428                         atomic_add_unless(&parent->power.child_count, -1, 0);
429                 }
430         }
431         wake_up_all(&dev->power.wait_queue);
432
433         if (dev->power.deferred_resume) {
434                 rpm_resume(dev, 0);
435                 retval = -EAGAIN;
436                 goto out;
437         }
438
439         /* Maybe the parent is now able to suspend. */
440         if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
441                 spin_unlock(&dev->power.lock);
442
443                 spin_lock(&parent->power.lock);
444                 rpm_idle(parent, RPM_ASYNC);
445                 spin_unlock(&parent->power.lock);
446
447                 spin_lock(&dev->power.lock);
448         }
449
450  out:
451         trace_rpm_return_int(dev, _THIS_IP_, retval);
452
453         return retval;
454 }
455
456 /**
457  * rpm_resume - Carry out runtime resume of given device.
458  * @dev: Device to resume.
459  * @rpmflags: Flag bits.
460  *
461  * Check if the device's runtime PM status allows it to be resumed.  Cancel
462  * any scheduled or pending requests.  If another resume has been started
463  * earlier, either return immediately or wait for it to finish, depending on the
464  * RPM_NOWAIT and RPM_ASYNC flags.  Similarly, if there's a suspend running in
465  * parallel with this function, either tell the other process to resume after
466  * suspending (deferred_resume) or wait for it to finish.  If the RPM_ASYNC
467  * flag is set then queue a resume request; otherwise run the
468  * ->runtime_resume() callback directly.  Queue an idle notification for the
469  * device if the resume succeeded.
470  *
471  * This function must be called under dev->power.lock with interrupts disabled.
472  */
473 static int rpm_resume(struct device *dev, int rpmflags)
474         __releases(&dev->power.lock) __acquires(&dev->power.lock)
475 {
476         int (*callback)(struct device *);
477         struct device *parent = NULL;
478         int retval = 0;
479
480         trace_rpm_resume(dev, rpmflags);
481
482  repeat:
483         if (dev->power.runtime_error)
484                 retval = -EINVAL;
485         else if (dev->power.disable_depth > 0)
486                 retval = -EACCES;
487         if (retval)
488                 goto out;
489
490         /*
491          * Other scheduled or pending requests need to be canceled.  Small
492          * optimization: If an autosuspend timer is running, leave it running
493          * rather than cancelling it now only to restart it again in the near
494          * future.
495          */
496         dev->power.request = RPM_REQ_NONE;
497         if (!dev->power.timer_autosuspends)
498                 pm_runtime_deactivate_timer(dev);
499
500         if (dev->power.runtime_status == RPM_ACTIVE) {
501                 retval = 1;
502                 goto out;
503         }
504
505         if (dev->power.runtime_status == RPM_RESUMING
506             || dev->power.runtime_status == RPM_SUSPENDING) {
507                 DEFINE_WAIT(wait);
508
509                 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
510                         if (dev->power.runtime_status == RPM_SUSPENDING)
511                                 dev->power.deferred_resume = true;
512                         else
513                                 retval = -EINPROGRESS;
514                         goto out;
515                 }
516
517                 if (dev->power.irq_safe) {
518                         spin_unlock(&dev->power.lock);
519
520                         cpu_relax();
521
522                         spin_lock(&dev->power.lock);
523                         goto repeat;
524                 }
525
526                 /* Wait for the operation carried out in parallel with us. */
527                 for (;;) {
528                         prepare_to_wait(&dev->power.wait_queue, &wait,
529                                         TASK_UNINTERRUPTIBLE);
530                         if (dev->power.runtime_status != RPM_RESUMING
531                             && dev->power.runtime_status != RPM_SUSPENDING)
532                                 break;
533
534                         spin_unlock_irq(&dev->power.lock);
535
536                         schedule();
537
538                         spin_lock_irq(&dev->power.lock);
539                 }
540                 finish_wait(&dev->power.wait_queue, &wait);
541                 goto repeat;
542         }
543
544         /*
545          * See if we can skip waking up the parent.  This is safe only if
546          * power.no_callbacks is set, because otherwise we don't know whether
547          * the resume will actually succeed.
548          */
549         if (dev->power.no_callbacks && !parent && dev->parent) {
550                 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
551                 if (dev->parent->power.disable_depth > 0
552                     || dev->parent->power.ignore_children
553                     || dev->parent->power.runtime_status == RPM_ACTIVE) {
554                         atomic_inc(&dev->parent->power.child_count);
555                         spin_unlock(&dev->parent->power.lock);
556                         goto no_callback;       /* Assume success. */
557                 }
558                 spin_unlock(&dev->parent->power.lock);
559         }
560
561         /* Carry out an asynchronous or a synchronous resume. */
562         if (rpmflags & RPM_ASYNC) {
563                 dev->power.request = RPM_REQ_RESUME;
564                 if (!dev->power.request_pending) {
565                         dev->power.request_pending = true;
566                         queue_work(pm_wq, &dev->power.work);
567                 }
568                 retval = 0;
569                 goto out;
570         }
571
572         if (!parent && dev->parent) {
573                 /*
574                  * Increment the parent's usage counter and resume it if
575                  * necessary.  Not needed if dev is irq-safe; then the
576                  * parent is permanently resumed.
577                  */
578                 parent = dev->parent;
579                 if (dev->power.irq_safe)
580                         goto skip_parent;
581                 spin_unlock(&dev->power.lock);
582
583                 pm_runtime_get_noresume(parent);
584
585                 spin_lock(&parent->power.lock);
586                 /*
587                  * We can resume if the parent's runtime PM is disabled or it
588                  * is set to ignore children.
589                  */
590                 if (!parent->power.disable_depth
591                     && !parent->power.ignore_children) {
592                         rpm_resume(parent, 0);
593                         if (parent->power.runtime_status != RPM_ACTIVE)
594                                 retval = -EBUSY;
595                 }
596                 spin_unlock(&parent->power.lock);
597
598                 spin_lock(&dev->power.lock);
599                 if (retval)
600                         goto out;
601                 goto repeat;
602         }
603  skip_parent:
604
605         if (dev->power.no_callbacks)
606                 goto no_callback;       /* Assume success. */
607
608         __update_runtime_status(dev, RPM_RESUMING);
609
610         if (dev->pm_domain)
611                 callback = dev->pm_domain->ops.runtime_resume;
612         else if (dev->type && dev->type->pm)
613                 callback = dev->type->pm->runtime_resume;
614         else if (dev->class && dev->class->pm)
615                 callback = dev->class->pm->runtime_resume;
616         else if (dev->bus && dev->bus->pm)
617                 callback = dev->bus->pm->runtime_resume;
618         else
619                 callback = NULL;
620
621         retval = rpm_callback(callback, dev);
622         if (retval) {
623                 __update_runtime_status(dev, RPM_SUSPENDED);
624                 pm_runtime_cancel_pending(dev);
625         } else {
626  no_callback:
627                 __update_runtime_status(dev, RPM_ACTIVE);
628                 if (parent)
629                         atomic_inc(&parent->power.child_count);
630         }
631         wake_up_all(&dev->power.wait_queue);
632
633         if (!retval)
634                 rpm_idle(dev, RPM_ASYNC);
635
636  out:
637         if (parent && !dev->power.irq_safe) {
638                 spin_unlock_irq(&dev->power.lock);
639
640                 pm_runtime_put(parent);
641
642                 spin_lock_irq(&dev->power.lock);
643         }
644
645         trace_rpm_return_int(dev, _THIS_IP_, retval);
646
647         return retval;
648 }
649
650 /**
651  * pm_runtime_work - Universal runtime PM work function.
652  * @work: Work structure used for scheduling the execution of this function.
653  *
654  * Use @work to get the device object the work is to be done for, determine what
655  * is to be done and execute the appropriate runtime PM function.
656  */
657 static void pm_runtime_work(struct work_struct *work)
658 {
659         struct device *dev = container_of(work, struct device, power.work);
660         enum rpm_request req;
661
662         spin_lock_irq(&dev->power.lock);
663
664         if (!dev->power.request_pending)
665                 goto out;
666
667         req = dev->power.request;
668         dev->power.request = RPM_REQ_NONE;
669         dev->power.request_pending = false;
670
671         switch (req) {
672         case RPM_REQ_NONE:
673                 break;
674         case RPM_REQ_IDLE:
675                 rpm_idle(dev, RPM_NOWAIT);
676                 break;
677         case RPM_REQ_SUSPEND:
678                 rpm_suspend(dev, RPM_NOWAIT);
679                 break;
680         case RPM_REQ_AUTOSUSPEND:
681                 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
682                 break;
683         case RPM_REQ_RESUME:
684                 rpm_resume(dev, RPM_NOWAIT);
685                 break;
686         }
687
688  out:
689         spin_unlock_irq(&dev->power.lock);
690 }
691
692 /**
693  * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
694  * @data: Device pointer passed by pm_schedule_suspend().
695  *
696  * Check if the time is right and queue a suspend request.
697  */
698 static void pm_suspend_timer_fn(unsigned long data)
699 {
700         struct device *dev = (struct device *)data;
701         unsigned long flags;
702         unsigned long expires;
703
704         spin_lock_irqsave(&dev->power.lock, flags);
705
706         expires = dev->power.timer_expires;
707         /* If 'expire' is after 'jiffies' we've been called too early. */
708         if (expires > 0 && !time_after(expires, jiffies)) {
709                 dev->power.timer_expires = 0;
710                 rpm_suspend(dev, dev->power.timer_autosuspends ?
711                     (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
712         }
713
714         spin_unlock_irqrestore(&dev->power.lock, flags);
715 }
716
717 /**
718  * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
719  * @dev: Device to suspend.
720  * @delay: Time to wait before submitting a suspend request, in milliseconds.
721  */
722 int pm_schedule_suspend(struct device *dev, unsigned int delay)
723 {
724         unsigned long flags;
725         int retval;
726
727         spin_lock_irqsave(&dev->power.lock, flags);
728
729         if (!delay) {
730                 retval = rpm_suspend(dev, RPM_ASYNC);
731                 goto out;
732         }
733
734         retval = rpm_check_suspend_allowed(dev);
735         if (retval)
736                 goto out;
737
738         /* Other scheduled or pending requests need to be canceled. */
739         pm_runtime_cancel_pending(dev);
740
741         dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
742         dev->power.timer_expires += !dev->power.timer_expires;
743         dev->power.timer_autosuspends = 0;
744         mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
745
746  out:
747         spin_unlock_irqrestore(&dev->power.lock, flags);
748
749         return retval;
750 }
751 EXPORT_SYMBOL_GPL(pm_schedule_suspend);
752
753 /**
754  * __pm_runtime_idle - Entry point for runtime idle operations.
755  * @dev: Device to send idle notification for.
756  * @rpmflags: Flag bits.
757  *
758  * If the RPM_GET_PUT flag is set, decrement the device's usage count and
759  * return immediately if it is larger than zero.  Then carry out an idle
760  * notification, either synchronous or asynchronous.
761  *
762  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
763  * or if pm_runtime_irq_safe() has been called.
764  */
765 int __pm_runtime_idle(struct device *dev, int rpmflags)
766 {
767         unsigned long flags;
768         int retval;
769
770         might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
771
772         if (rpmflags & RPM_GET_PUT) {
773                 if (!atomic_dec_and_test(&dev->power.usage_count))
774                         return 0;
775         }
776
777         spin_lock_irqsave(&dev->power.lock, flags);
778         retval = rpm_idle(dev, rpmflags);
779         spin_unlock_irqrestore(&dev->power.lock, flags);
780
781         return retval;
782 }
783 EXPORT_SYMBOL_GPL(__pm_runtime_idle);
784
785 /**
786  * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
787  * @dev: Device to suspend.
788  * @rpmflags: Flag bits.
789  *
790  * If the RPM_GET_PUT flag is set, decrement the device's usage count and
791  * return immediately if it is larger than zero.  Then carry out a suspend,
792  * either synchronous or asynchronous.
793  *
794  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
795  * or if pm_runtime_irq_safe() has been called.
796  */
797 int __pm_runtime_suspend(struct device *dev, int rpmflags)
798 {
799         unsigned long flags;
800         int retval;
801
802         might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
803
804         if (rpmflags & RPM_GET_PUT) {
805                 if (!atomic_dec_and_test(&dev->power.usage_count))
806                         return 0;
807         }
808
809         spin_lock_irqsave(&dev->power.lock, flags);
810         retval = rpm_suspend(dev, rpmflags);
811         spin_unlock_irqrestore(&dev->power.lock, flags);
812
813         return retval;
814 }
815 EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
816
817 /**
818  * __pm_runtime_resume - Entry point for runtime resume operations.
819  * @dev: Device to resume.
820  * @rpmflags: Flag bits.
821  *
822  * If the RPM_GET_PUT flag is set, increment the device's usage count.  Then
823  * carry out a resume, either synchronous or asynchronous.
824  *
825  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
826  * or if pm_runtime_irq_safe() has been called.
827  */
828 int __pm_runtime_resume(struct device *dev, int rpmflags)
829 {
830         unsigned long flags;
831         int retval;
832
833         might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
834
835         if (rpmflags & RPM_GET_PUT)
836                 atomic_inc(&dev->power.usage_count);
837
838         spin_lock_irqsave(&dev->power.lock, flags);
839         retval = rpm_resume(dev, rpmflags);
840         spin_unlock_irqrestore(&dev->power.lock, flags);
841
842         return retval;
843 }
844 EXPORT_SYMBOL_GPL(__pm_runtime_resume);
845
846 /**
847  * __pm_runtime_set_status - Set runtime PM status of a device.
848  * @dev: Device to handle.
849  * @status: New runtime PM status of the device.
850  *
851  * If runtime PM of the device is disabled or its power.runtime_error field is
852  * different from zero, the status may be changed either to RPM_ACTIVE, or to
853  * RPM_SUSPENDED, as long as that reflects the actual state of the device.
854  * However, if the device has a parent and the parent is not active, and the
855  * parent's power.ignore_children flag is unset, the device's status cannot be
856  * set to RPM_ACTIVE, so -EBUSY is returned in that case.
857  *
858  * If successful, __pm_runtime_set_status() clears the power.runtime_error field
859  * and the device parent's counter of unsuspended children is modified to
860  * reflect the new status.  If the new status is RPM_SUSPENDED, an idle
861  * notification request for the parent is submitted.
862  */
863 int __pm_runtime_set_status(struct device *dev, unsigned int status)
864 {
865         struct device *parent = dev->parent;
866         unsigned long flags;
867         bool notify_parent = false;
868         int error = 0;
869
870         if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
871                 return -EINVAL;
872
873         spin_lock_irqsave(&dev->power.lock, flags);
874
875         if (!dev->power.runtime_error && !dev->power.disable_depth) {
876                 error = -EAGAIN;
877                 goto out;
878         }
879
880         if (dev->power.runtime_status == status)
881                 goto out_set;
882
883         if (status == RPM_SUSPENDED) {
884                 /* It always is possible to set the status to 'suspended'. */
885                 if (parent) {
886                         atomic_add_unless(&parent->power.child_count, -1, 0);
887                         notify_parent = !parent->power.ignore_children;
888                 }
889                 goto out_set;
890         }
891
892         if (parent) {
893                 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
894
895                 /*
896                  * It is invalid to put an active child under a parent that is
897                  * not active, has runtime PM enabled and the
898                  * 'power.ignore_children' flag unset.
899                  */
900                 if (!parent->power.disable_depth
901                     && !parent->power.ignore_children
902                     && parent->power.runtime_status != RPM_ACTIVE)
903                         error = -EBUSY;
904                 else if (dev->power.runtime_status == RPM_SUSPENDED)
905                         atomic_inc(&parent->power.child_count);
906
907                 spin_unlock(&parent->power.lock);
908
909                 if (error)
910                         goto out;
911         }
912
913  out_set:
914         __update_runtime_status(dev, status);
915         dev->power.runtime_error = 0;
916  out:
917         spin_unlock_irqrestore(&dev->power.lock, flags);
918
919         if (notify_parent)
920                 pm_request_idle(parent);
921
922         return error;
923 }
924 EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
925
926 /**
927  * __pm_runtime_barrier - Cancel pending requests and wait for completions.
928  * @dev: Device to handle.
929  *
930  * Flush all pending requests for the device from pm_wq and wait for all
931  * runtime PM operations involving the device in progress to complete.
932  *
933  * Should be called under dev->power.lock with interrupts disabled.
934  */
935 static void __pm_runtime_barrier(struct device *dev)
936 {
937         pm_runtime_deactivate_timer(dev);
938
939         if (dev->power.request_pending) {
940                 dev->power.request = RPM_REQ_NONE;
941                 spin_unlock_irq(&dev->power.lock);
942
943                 cancel_work_sync(&dev->power.work);
944
945                 spin_lock_irq(&dev->power.lock);
946                 dev->power.request_pending = false;
947         }
948
949         if (dev->power.runtime_status == RPM_SUSPENDING
950             || dev->power.runtime_status == RPM_RESUMING
951             || dev->power.idle_notification) {
952                 DEFINE_WAIT(wait);
953
954                 /* Suspend, wake-up or idle notification in progress. */
955                 for (;;) {
956                         prepare_to_wait(&dev->power.wait_queue, &wait,
957                                         TASK_UNINTERRUPTIBLE);
958                         if (dev->power.runtime_status != RPM_SUSPENDING
959                             && dev->power.runtime_status != RPM_RESUMING
960                             && !dev->power.idle_notification)
961                                 break;
962                         spin_unlock_irq(&dev->power.lock);
963
964                         schedule();
965
966                         spin_lock_irq(&dev->power.lock);
967                 }
968                 finish_wait(&dev->power.wait_queue, &wait);
969         }
970 }
971
972 /**
973  * pm_runtime_barrier - Flush pending requests and wait for completions.
974  * @dev: Device to handle.
975  *
976  * Prevent the device from being suspended by incrementing its usage counter and
977  * if there's a pending resume request for the device, wake the device up.
978  * Next, make sure that all pending requests for the device have been flushed
979  * from pm_wq and wait for all runtime PM operations involving the device in
980  * progress to complete.
981  *
982  * Return value:
983  * 1, if there was a resume request pending and the device had to be woken up,
984  * 0, otherwise
985  */
986 int pm_runtime_barrier(struct device *dev)
987 {
988         int retval = 0;
989
990         pm_runtime_get_noresume(dev);
991         spin_lock_irq(&dev->power.lock);
992
993         if (dev->power.request_pending
994             && dev->power.request == RPM_REQ_RESUME) {
995                 rpm_resume(dev, 0);
996                 retval = 1;
997         }
998
999         __pm_runtime_barrier(dev);
1000
1001         spin_unlock_irq(&dev->power.lock);
1002         pm_runtime_put_noidle(dev);
1003
1004         return retval;
1005 }
1006 EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1007
1008 /**
1009  * __pm_runtime_disable - Disable runtime PM of a device.
1010  * @dev: Device to handle.
1011  * @check_resume: If set, check if there's a resume request for the device.
1012  *
1013  * Increment power.disable_depth for the device and if was zero previously,
1014  * cancel all pending runtime PM requests for the device and wait for all
1015  * operations in progress to complete.  The device can be either active or
1016  * suspended after its runtime PM has been disabled.
1017  *
1018  * If @check_resume is set and there's a resume request pending when
1019  * __pm_runtime_disable() is called and power.disable_depth is zero, the
1020  * function will wake up the device before disabling its runtime PM.
1021  */
1022 void __pm_runtime_disable(struct device *dev, bool check_resume)
1023 {
1024         spin_lock_irq(&dev->power.lock);
1025
1026         if (dev->power.disable_depth > 0) {
1027                 dev->power.disable_depth++;
1028                 goto out;
1029         }
1030
1031         /*
1032          * Wake up the device if there's a resume request pending, because that
1033          * means there probably is some I/O to process and disabling runtime PM
1034          * shouldn't prevent the device from processing the I/O.
1035          */
1036         if (check_resume && dev->power.request_pending
1037             && dev->power.request == RPM_REQ_RESUME) {
1038                 /*
1039                  * Prevent suspends and idle notifications from being carried
1040                  * out after we have woken up the device.
1041                  */
1042                 pm_runtime_get_noresume(dev);
1043
1044                 rpm_resume(dev, 0);
1045
1046                 pm_runtime_put_noidle(dev);
1047         }
1048
1049         if (!dev->power.disable_depth++)
1050                 __pm_runtime_barrier(dev);
1051
1052  out:
1053         spin_unlock_irq(&dev->power.lock);
1054 }
1055 EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1056
1057 /**
1058  * pm_runtime_enable - Enable runtime PM of a device.
1059  * @dev: Device to handle.
1060  */
1061 void pm_runtime_enable(struct device *dev)
1062 {
1063         unsigned long flags;
1064
1065         spin_lock_irqsave(&dev->power.lock, flags);
1066
1067         if (dev->power.disable_depth > 0)
1068                 dev->power.disable_depth--;
1069         else
1070                 dev_warn(dev, "Unbalanced %s!\n", __func__);
1071
1072         spin_unlock_irqrestore(&dev->power.lock, flags);
1073 }
1074 EXPORT_SYMBOL_GPL(pm_runtime_enable);
1075
1076 /**
1077  * pm_runtime_forbid - Block runtime PM of a device.
1078  * @dev: Device to handle.
1079  *
1080  * Increase the device's usage count and clear its power.runtime_auto flag,
1081  * so that it cannot be suspended at run time until pm_runtime_allow() is called
1082  * for it.
1083  */
1084 void pm_runtime_forbid(struct device *dev)
1085 {
1086         spin_lock_irq(&dev->power.lock);
1087         if (!dev->power.runtime_auto)
1088                 goto out;
1089
1090         dev->power.runtime_auto = false;
1091         atomic_inc(&dev->power.usage_count);
1092         rpm_resume(dev, 0);
1093
1094  out:
1095         spin_unlock_irq(&dev->power.lock);
1096 }
1097 EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1098
1099 /**
1100  * pm_runtime_allow - Unblock runtime PM of a device.
1101  * @dev: Device to handle.
1102  *
1103  * Decrease the device's usage count and set its power.runtime_auto flag.
1104  */
1105 void pm_runtime_allow(struct device *dev)
1106 {
1107         spin_lock_irq(&dev->power.lock);
1108         if (dev->power.runtime_auto)
1109                 goto out;
1110
1111         dev->power.runtime_auto = true;
1112         if (atomic_dec_and_test(&dev->power.usage_count))
1113                 rpm_idle(dev, RPM_AUTO);
1114
1115  out:
1116         spin_unlock_irq(&dev->power.lock);
1117 }
1118 EXPORT_SYMBOL_GPL(pm_runtime_allow);
1119
1120 /**
1121  * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1122  * @dev: Device to handle.
1123  *
1124  * Set the power.no_callbacks flag, which tells the PM core that this
1125  * device is power-managed through its parent and has no runtime PM
1126  * callbacks of its own.  The runtime sysfs attributes will be removed.
1127  */
1128 void pm_runtime_no_callbacks(struct device *dev)
1129 {
1130         spin_lock_irq(&dev->power.lock);
1131         dev->power.no_callbacks = 1;
1132         spin_unlock_irq(&dev->power.lock);
1133         if (device_is_registered(dev))
1134                 rpm_sysfs_remove(dev);
1135 }
1136 EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1137
1138 /**
1139  * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1140  * @dev: Device to handle
1141  *
1142  * Set the power.irq_safe flag, which tells the PM core that the
1143  * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1144  * always be invoked with the spinlock held and interrupts disabled.  It also
1145  * causes the parent's usage counter to be permanently incremented, preventing
1146  * the parent from runtime suspending -- otherwise an irq-safe child might have
1147  * to wait for a non-irq-safe parent.
1148  */
1149 void pm_runtime_irq_safe(struct device *dev)
1150 {
1151         if (dev->parent)
1152                 pm_runtime_get_sync(dev->parent);
1153         spin_lock_irq(&dev->power.lock);
1154         dev->power.irq_safe = 1;
1155         spin_unlock_irq(&dev->power.lock);
1156 }
1157 EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1158
1159 /**
1160  * update_autosuspend - Handle a change to a device's autosuspend settings.
1161  * @dev: Device to handle.
1162  * @old_delay: The former autosuspend_delay value.
1163  * @old_use: The former use_autosuspend value.
1164  *
1165  * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1166  * set; otherwise allow it.  Send an idle notification if suspends are allowed.
1167  *
1168  * This function must be called under dev->power.lock with interrupts disabled.
1169  */
1170 static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1171 {
1172         int delay = dev->power.autosuspend_delay;
1173
1174         /* Should runtime suspend be prevented now? */
1175         if (dev->power.use_autosuspend && delay < 0) {
1176
1177                 /* If it used to be allowed then prevent it. */
1178                 if (!old_use || old_delay >= 0) {
1179                         atomic_inc(&dev->power.usage_count);
1180                         rpm_resume(dev, 0);
1181                 }
1182         }
1183
1184         /* Runtime suspend should be allowed now. */
1185         else {
1186
1187                 /* If it used to be prevented then allow it. */
1188                 if (old_use && old_delay < 0)
1189                         atomic_dec(&dev->power.usage_count);
1190
1191                 /* Maybe we can autosuspend now. */
1192                 rpm_idle(dev, RPM_AUTO);
1193         }
1194 }
1195
1196 /**
1197  * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1198  * @dev: Device to handle.
1199  * @delay: Value of the new delay in milliseconds.
1200  *
1201  * Set the device's power.autosuspend_delay value.  If it changes to negative
1202  * and the power.use_autosuspend flag is set, prevent runtime suspends.  If it
1203  * changes the other way, allow runtime suspends.
1204  */
1205 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1206 {
1207         int old_delay, old_use;
1208
1209         spin_lock_irq(&dev->power.lock);
1210         old_delay = dev->power.autosuspend_delay;
1211         old_use = dev->power.use_autosuspend;
1212         dev->power.autosuspend_delay = delay;
1213         update_autosuspend(dev, old_delay, old_use);
1214         spin_unlock_irq(&dev->power.lock);
1215 }
1216 EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1217
1218 /**
1219  * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1220  * @dev: Device to handle.
1221  * @use: New value for use_autosuspend.
1222  *
1223  * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1224  * suspends as needed.
1225  */
1226 void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1227 {
1228         int old_delay, old_use;
1229
1230         spin_lock_irq(&dev->power.lock);
1231         old_delay = dev->power.autosuspend_delay;
1232         old_use = dev->power.use_autosuspend;
1233         dev->power.use_autosuspend = use;
1234         update_autosuspend(dev, old_delay, old_use);
1235         spin_unlock_irq(&dev->power.lock);
1236 }
1237 EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1238
1239 /**
1240  * pm_runtime_init - Initialize runtime PM fields in given device object.
1241  * @dev: Device object to initialize.
1242  */
1243 void pm_runtime_init(struct device *dev)
1244 {
1245         dev->power.runtime_status = RPM_SUSPENDED;
1246         dev->power.idle_notification = false;
1247
1248         dev->power.disable_depth = 1;
1249         atomic_set(&dev->power.usage_count, 0);
1250
1251         dev->power.runtime_error = 0;
1252
1253         atomic_set(&dev->power.child_count, 0);
1254         pm_suspend_ignore_children(dev, false);
1255         dev->power.runtime_auto = true;
1256
1257         dev->power.request_pending = false;
1258         dev->power.request = RPM_REQ_NONE;
1259         dev->power.deferred_resume = false;
1260         dev->power.accounting_timestamp = jiffies;
1261         INIT_WORK(&dev->power.work, pm_runtime_work);
1262
1263         dev->power.timer_expires = 0;
1264         setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
1265                         (unsigned long)dev);
1266
1267         init_waitqueue_head(&dev->power.wait_queue);
1268 }
1269
1270 /**
1271  * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1272  * @dev: Device object being removed from device hierarchy.
1273  */
1274 void pm_runtime_remove(struct device *dev)
1275 {
1276         __pm_runtime_disable(dev, false);
1277
1278         /* Change the status back to 'suspended' to match the initial status. */
1279         if (dev->power.runtime_status == RPM_ACTIVE)
1280                 pm_runtime_set_suspended(dev);
1281         if (dev->power.irq_safe && dev->parent)
1282                 pm_runtime_put_sync(dev->parent);
1283 }