Merge git://github.com/rustyrussell/linux
[pandora-kernel.git] / drivers / base / power / runtime.c
1 /*
2  * drivers/base/power/runtime.c - Helper functions for device runtime PM
3  *
4  * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5  * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
6  *
7  * This file is released under the GPLv2.
8  */
9
10 #include <linux/sched.h>
11 #include <linux/pm_runtime.h>
12 #include <trace/events/rpm.h>
13 #include "power.h"
14
15 static int rpm_resume(struct device *dev, int rpmflags);
16 static int rpm_suspend(struct device *dev, int rpmflags);
17
18 /**
19  * update_pm_runtime_accounting - Update the time accounting of power states
20  * @dev: Device to update the accounting for
21  *
22  * In order to be able to have time accounting of the various power states
23  * (as used by programs such as PowerTOP to show the effectiveness of runtime
24  * PM), we need to track the time spent in each state.
25  * update_pm_runtime_accounting must be called each time before the
26  * runtime_status field is updated, to account the time in the old state
27  * correctly.
28  */
29 void update_pm_runtime_accounting(struct device *dev)
30 {
31         unsigned long now = jiffies;
32         unsigned long delta;
33
34         delta = now - dev->power.accounting_timestamp;
35
36         dev->power.accounting_timestamp = now;
37
38         if (dev->power.disable_depth > 0)
39                 return;
40
41         if (dev->power.runtime_status == RPM_SUSPENDED)
42                 dev->power.suspended_jiffies += delta;
43         else
44                 dev->power.active_jiffies += delta;
45 }
46
47 static void __update_runtime_status(struct device *dev, enum rpm_status status)
48 {
49         update_pm_runtime_accounting(dev);
50         dev->power.runtime_status = status;
51 }
52
53 /**
54  * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
55  * @dev: Device to handle.
56  */
57 static void pm_runtime_deactivate_timer(struct device *dev)
58 {
59         if (dev->power.timer_expires > 0) {
60                 del_timer(&dev->power.suspend_timer);
61                 dev->power.timer_expires = 0;
62         }
63 }
64
65 /**
66  * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
67  * @dev: Device to handle.
68  */
69 static void pm_runtime_cancel_pending(struct device *dev)
70 {
71         pm_runtime_deactivate_timer(dev);
72         /*
73          * In case there's a request pending, make sure its work function will
74          * return without doing anything.
75          */
76         dev->power.request = RPM_REQ_NONE;
77 }
78
79 /*
80  * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
81  * @dev: Device to handle.
82  *
83  * Compute the autosuspend-delay expiration time based on the device's
84  * power.last_busy time.  If the delay has already expired or is disabled
85  * (negative) or the power.use_autosuspend flag isn't set, return 0.
86  * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
87  *
88  * This function may be called either with or without dev->power.lock held.
89  * Either way it can be racy, since power.last_busy may be updated at any time.
90  */
91 unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
92 {
93         int autosuspend_delay;
94         long elapsed;
95         unsigned long last_busy;
96         unsigned long expires = 0;
97
98         if (!dev->power.use_autosuspend)
99                 goto out;
100
101         autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay);
102         if (autosuspend_delay < 0)
103                 goto out;
104
105         last_busy = ACCESS_ONCE(dev->power.last_busy);
106         elapsed = jiffies - last_busy;
107         if (elapsed < 0)
108                 goto out;       /* jiffies has wrapped around. */
109
110         /*
111          * If the autosuspend_delay is >= 1 second, align the timer by rounding
112          * up to the nearest second.
113          */
114         expires = last_busy + msecs_to_jiffies(autosuspend_delay);
115         if (autosuspend_delay >= 1000)
116                 expires = round_jiffies(expires);
117         expires += !expires;
118         if (elapsed >= expires - last_busy)
119                 expires = 0;    /* Already expired. */
120
121  out:
122         return expires;
123 }
124 EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
125
126 /**
127  * rpm_check_suspend_allowed - Test whether a device may be suspended.
128  * @dev: Device to test.
129  */
130 static int rpm_check_suspend_allowed(struct device *dev)
131 {
132         int retval = 0;
133
134         if (dev->power.runtime_error)
135                 retval = -EINVAL;
136         else if (dev->power.disable_depth > 0)
137                 retval = -EACCES;
138         else if (atomic_read(&dev->power.usage_count) > 0)
139                 retval = -EAGAIN;
140         else if (!pm_children_suspended(dev))
141                 retval = -EBUSY;
142
143         /* Pending resume requests take precedence over suspends. */
144         else if ((dev->power.deferred_resume
145                         && dev->power.runtime_status == RPM_SUSPENDING)
146             || (dev->power.request_pending
147                         && dev->power.request == RPM_REQ_RESUME))
148                 retval = -EAGAIN;
149         else if (dev->power.runtime_status == RPM_SUSPENDED)
150                 retval = 1;
151
152         return retval;
153 }
154
155 /**
156  * __rpm_callback - Run a given runtime PM callback for a given device.
157  * @cb: Runtime PM callback to run.
158  * @dev: Device to run the callback for.
159  */
160 static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
161         __releases(&dev->power.lock) __acquires(&dev->power.lock)
162 {
163         int retval;
164
165         if (dev->power.irq_safe)
166                 spin_unlock(&dev->power.lock);
167         else
168                 spin_unlock_irq(&dev->power.lock);
169
170         retval = cb(dev);
171
172         if (dev->power.irq_safe)
173                 spin_lock(&dev->power.lock);
174         else
175                 spin_lock_irq(&dev->power.lock);
176
177         return retval;
178 }
179
180 /**
181  * rpm_idle - Notify device bus type if the device can be suspended.
182  * @dev: Device to notify the bus type about.
183  * @rpmflags: Flag bits.
184  *
185  * Check if the device's runtime PM status allows it to be suspended.  If
186  * another idle notification has been started earlier, return immediately.  If
187  * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
188  * run the ->runtime_idle() callback directly.
189  *
190  * This function must be called under dev->power.lock with interrupts disabled.
191  */
192 static int rpm_idle(struct device *dev, int rpmflags)
193 {
194         int (*callback)(struct device *);
195         int retval;
196
197         trace_rpm_idle(dev, rpmflags);
198         retval = rpm_check_suspend_allowed(dev);
199         if (retval < 0)
200                 ;       /* Conditions are wrong. */
201
202         /* Idle notifications are allowed only in the RPM_ACTIVE state. */
203         else if (dev->power.runtime_status != RPM_ACTIVE)
204                 retval = -EAGAIN;
205
206         /*
207          * Any pending request other than an idle notification takes
208          * precedence over us, except that the timer may be running.
209          */
210         else if (dev->power.request_pending &&
211             dev->power.request > RPM_REQ_IDLE)
212                 retval = -EAGAIN;
213
214         /* Act as though RPM_NOWAIT is always set. */
215         else if (dev->power.idle_notification)
216                 retval = -EINPROGRESS;
217         if (retval)
218                 goto out;
219
220         /* Pending requests need to be canceled. */
221         dev->power.request = RPM_REQ_NONE;
222
223         if (dev->power.no_callbacks) {
224                 /* Assume ->runtime_idle() callback would have suspended. */
225                 retval = rpm_suspend(dev, rpmflags);
226                 goto out;
227         }
228
229         /* Carry out an asynchronous or a synchronous idle notification. */
230         if (rpmflags & RPM_ASYNC) {
231                 dev->power.request = RPM_REQ_IDLE;
232                 if (!dev->power.request_pending) {
233                         dev->power.request_pending = true;
234                         queue_work(pm_wq, &dev->power.work);
235                 }
236                 goto out;
237         }
238
239         dev->power.idle_notification = true;
240
241         if (dev->pm_domain)
242                 callback = dev->pm_domain->ops.runtime_idle;
243         else if (dev->type && dev->type->pm)
244                 callback = dev->type->pm->runtime_idle;
245         else if (dev->class && dev->class->pm)
246                 callback = dev->class->pm->runtime_idle;
247         else if (dev->bus && dev->bus->pm)
248                 callback = dev->bus->pm->runtime_idle;
249         else
250                 callback = NULL;
251
252         if (callback)
253                 __rpm_callback(callback, dev);
254
255         dev->power.idle_notification = false;
256         wake_up_all(&dev->power.wait_queue);
257
258  out:
259         trace_rpm_return_int(dev, _THIS_IP_, retval);
260         return retval;
261 }
262
263 /**
264  * rpm_callback - Run a given runtime PM callback for a given device.
265  * @cb: Runtime PM callback to run.
266  * @dev: Device to run the callback for.
267  */
268 static int rpm_callback(int (*cb)(struct device *), struct device *dev)
269 {
270         int retval;
271
272         if (!cb)
273                 return -ENOSYS;
274
275         retval = __rpm_callback(cb, dev);
276
277         dev->power.runtime_error = retval;
278         return retval != -EACCES ? retval : -EIO;
279 }
280
281 /**
282  * rpm_suspend - Carry out runtime suspend of given device.
283  * @dev: Device to suspend.
284  * @rpmflags: Flag bits.
285  *
286  * Check if the device's runtime PM status allows it to be suspended.
287  * Cancel a pending idle notification, autosuspend or suspend. If
288  * another suspend has been started earlier, either return immediately
289  * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
290  * flags. If the RPM_ASYNC flag is set then queue a suspend request;
291  * otherwise run the ->runtime_suspend() callback directly. When
292  * ->runtime_suspend succeeded, if a deferred resume was requested while
293  * the callback was running then carry it out, otherwise send an idle
294  * notification for its parent (if the suspend succeeded and both
295  * ignore_children of parent->power and irq_safe of dev->power are not set).
296  * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
297  * flag is set and the next autosuspend-delay expiration time is in the
298  * future, schedule another autosuspend attempt.
299  *
300  * This function must be called under dev->power.lock with interrupts disabled.
301  */
302 static int rpm_suspend(struct device *dev, int rpmflags)
303         __releases(&dev->power.lock) __acquires(&dev->power.lock)
304 {
305         int (*callback)(struct device *);
306         struct device *parent = NULL;
307         int retval;
308
309         trace_rpm_suspend(dev, rpmflags);
310
311  repeat:
312         retval = rpm_check_suspend_allowed(dev);
313
314         if (retval < 0)
315                 ;       /* Conditions are wrong. */
316
317         /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
318         else if (dev->power.runtime_status == RPM_RESUMING &&
319             !(rpmflags & RPM_ASYNC))
320                 retval = -EAGAIN;
321         if (retval)
322                 goto out;
323
324         /* If the autosuspend_delay time hasn't expired yet, reschedule. */
325         if ((rpmflags & RPM_AUTO)
326             && dev->power.runtime_status != RPM_SUSPENDING) {
327                 unsigned long expires = pm_runtime_autosuspend_expiration(dev);
328
329                 if (expires != 0) {
330                         /* Pending requests need to be canceled. */
331                         dev->power.request = RPM_REQ_NONE;
332
333                         /*
334                          * Optimization: If the timer is already running and is
335                          * set to expire at or before the autosuspend delay,
336                          * avoid the overhead of resetting it.  Just let it
337                          * expire; pm_suspend_timer_fn() will take care of the
338                          * rest.
339                          */
340                         if (!(dev->power.timer_expires && time_before_eq(
341                             dev->power.timer_expires, expires))) {
342                                 dev->power.timer_expires = expires;
343                                 mod_timer(&dev->power.suspend_timer, expires);
344                         }
345                         dev->power.timer_autosuspends = 1;
346                         goto out;
347                 }
348         }
349
350         /* Other scheduled or pending requests need to be canceled. */
351         pm_runtime_cancel_pending(dev);
352
353         if (dev->power.runtime_status == RPM_SUSPENDING) {
354                 DEFINE_WAIT(wait);
355
356                 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
357                         retval = -EINPROGRESS;
358                         goto out;
359                 }
360
361                 if (dev->power.irq_safe) {
362                         spin_unlock(&dev->power.lock);
363
364                         cpu_relax();
365
366                         spin_lock(&dev->power.lock);
367                         goto repeat;
368                 }
369
370                 /* Wait for the other suspend running in parallel with us. */
371                 for (;;) {
372                         prepare_to_wait(&dev->power.wait_queue, &wait,
373                                         TASK_UNINTERRUPTIBLE);
374                         if (dev->power.runtime_status != RPM_SUSPENDING)
375                                 break;
376
377                         spin_unlock_irq(&dev->power.lock);
378
379                         schedule();
380
381                         spin_lock_irq(&dev->power.lock);
382                 }
383                 finish_wait(&dev->power.wait_queue, &wait);
384                 goto repeat;
385         }
386
387         dev->power.deferred_resume = false;
388         if (dev->power.no_callbacks)
389                 goto no_callback;       /* Assume success. */
390
391         /* Carry out an asynchronous or a synchronous suspend. */
392         if (rpmflags & RPM_ASYNC) {
393                 dev->power.request = (rpmflags & RPM_AUTO) ?
394                     RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
395                 if (!dev->power.request_pending) {
396                         dev->power.request_pending = true;
397                         queue_work(pm_wq, &dev->power.work);
398                 }
399                 goto out;
400         }
401
402         __update_runtime_status(dev, RPM_SUSPENDING);
403
404         if (dev->pm_domain)
405                 callback = dev->pm_domain->ops.runtime_suspend;
406         else if (dev->type && dev->type->pm)
407                 callback = dev->type->pm->runtime_suspend;
408         else if (dev->class && dev->class->pm)
409                 callback = dev->class->pm->runtime_suspend;
410         else if (dev->bus && dev->bus->pm)
411                 callback = dev->bus->pm->runtime_suspend;
412         else
413                 callback = NULL;
414
415         retval = rpm_callback(callback, dev);
416         if (retval) {
417                 __update_runtime_status(dev, RPM_ACTIVE);
418                 dev->power.deferred_resume = false;
419                 if (retval == -EAGAIN || retval == -EBUSY) {
420                         dev->power.runtime_error = 0;
421
422                         /*
423                          * If the callback routine failed an autosuspend, and
424                          * if the last_busy time has been updated so that there
425                          * is a new autosuspend expiration time, automatically
426                          * reschedule another autosuspend.
427                          */
428                         if ((rpmflags & RPM_AUTO) &&
429                             pm_runtime_autosuspend_expiration(dev) != 0)
430                                 goto repeat;
431                 } else {
432                         pm_runtime_cancel_pending(dev);
433                 }
434                 wake_up_all(&dev->power.wait_queue);
435                 goto out;
436         }
437  no_callback:
438         __update_runtime_status(dev, RPM_SUSPENDED);
439         pm_runtime_deactivate_timer(dev);
440
441         if (dev->parent) {
442                 parent = dev->parent;
443                 atomic_add_unless(&parent->power.child_count, -1, 0);
444         }
445         wake_up_all(&dev->power.wait_queue);
446
447         if (dev->power.deferred_resume) {
448                 rpm_resume(dev, 0);
449                 retval = -EAGAIN;
450                 goto out;
451         }
452
453         /* Maybe the parent is now able to suspend. */
454         if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
455                 spin_unlock(&dev->power.lock);
456
457                 spin_lock(&parent->power.lock);
458                 rpm_idle(parent, RPM_ASYNC);
459                 spin_unlock(&parent->power.lock);
460
461                 spin_lock(&dev->power.lock);
462         }
463
464  out:
465         trace_rpm_return_int(dev, _THIS_IP_, retval);
466
467         return retval;
468 }
469
470 /**
471  * rpm_resume - Carry out runtime resume of given device.
472  * @dev: Device to resume.
473  * @rpmflags: Flag bits.
474  *
475  * Check if the device's runtime PM status allows it to be resumed.  Cancel
476  * any scheduled or pending requests.  If another resume has been started
477  * earlier, either return immediately or wait for it to finish, depending on the
478  * RPM_NOWAIT and RPM_ASYNC flags.  Similarly, if there's a suspend running in
479  * parallel with this function, either tell the other process to resume after
480  * suspending (deferred_resume) or wait for it to finish.  If the RPM_ASYNC
481  * flag is set then queue a resume request; otherwise run the
482  * ->runtime_resume() callback directly.  Queue an idle notification for the
483  * device if the resume succeeded.
484  *
485  * This function must be called under dev->power.lock with interrupts disabled.
486  */
487 static int rpm_resume(struct device *dev, int rpmflags)
488         __releases(&dev->power.lock) __acquires(&dev->power.lock)
489 {
490         int (*callback)(struct device *);
491         struct device *parent = NULL;
492         int retval = 0;
493
494         trace_rpm_resume(dev, rpmflags);
495
496  repeat:
497         if (dev->power.runtime_error)
498                 retval = -EINVAL;
499         else if (dev->power.disable_depth > 0)
500                 retval = -EACCES;
501         if (retval)
502                 goto out;
503
504         /*
505          * Other scheduled or pending requests need to be canceled.  Small
506          * optimization: If an autosuspend timer is running, leave it running
507          * rather than cancelling it now only to restart it again in the near
508          * future.
509          */
510         dev->power.request = RPM_REQ_NONE;
511         if (!dev->power.timer_autosuspends)
512                 pm_runtime_deactivate_timer(dev);
513
514         if (dev->power.runtime_status == RPM_ACTIVE) {
515                 retval = 1;
516                 goto out;
517         }
518
519         if (dev->power.runtime_status == RPM_RESUMING
520             || dev->power.runtime_status == RPM_SUSPENDING) {
521                 DEFINE_WAIT(wait);
522
523                 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
524                         if (dev->power.runtime_status == RPM_SUSPENDING)
525                                 dev->power.deferred_resume = true;
526                         else
527                                 retval = -EINPROGRESS;
528                         goto out;
529                 }
530
531                 if (dev->power.irq_safe) {
532                         spin_unlock(&dev->power.lock);
533
534                         cpu_relax();
535
536                         spin_lock(&dev->power.lock);
537                         goto repeat;
538                 }
539
540                 /* Wait for the operation carried out in parallel with us. */
541                 for (;;) {
542                         prepare_to_wait(&dev->power.wait_queue, &wait,
543                                         TASK_UNINTERRUPTIBLE);
544                         if (dev->power.runtime_status != RPM_RESUMING
545                             && dev->power.runtime_status != RPM_SUSPENDING)
546                                 break;
547
548                         spin_unlock_irq(&dev->power.lock);
549
550                         schedule();
551
552                         spin_lock_irq(&dev->power.lock);
553                 }
554                 finish_wait(&dev->power.wait_queue, &wait);
555                 goto repeat;
556         }
557
558         /*
559          * See if we can skip waking up the parent.  This is safe only if
560          * power.no_callbacks is set, because otherwise we don't know whether
561          * the resume will actually succeed.
562          */
563         if (dev->power.no_callbacks && !parent && dev->parent) {
564                 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
565                 if (dev->parent->power.disable_depth > 0
566                     || dev->parent->power.ignore_children
567                     || dev->parent->power.runtime_status == RPM_ACTIVE) {
568                         atomic_inc(&dev->parent->power.child_count);
569                         spin_unlock(&dev->parent->power.lock);
570                         goto no_callback;       /* Assume success. */
571                 }
572                 spin_unlock(&dev->parent->power.lock);
573         }
574
575         /* Carry out an asynchronous or a synchronous resume. */
576         if (rpmflags & RPM_ASYNC) {
577                 dev->power.request = RPM_REQ_RESUME;
578                 if (!dev->power.request_pending) {
579                         dev->power.request_pending = true;
580                         queue_work(pm_wq, &dev->power.work);
581                 }
582                 retval = 0;
583                 goto out;
584         }
585
586         if (!parent && dev->parent) {
587                 /*
588                  * Increment the parent's usage counter and resume it if
589                  * necessary.  Not needed if dev is irq-safe; then the
590                  * parent is permanently resumed.
591                  */
592                 parent = dev->parent;
593                 if (dev->power.irq_safe)
594                         goto skip_parent;
595                 spin_unlock(&dev->power.lock);
596
597                 pm_runtime_get_noresume(parent);
598
599                 spin_lock(&parent->power.lock);
600                 /*
601                  * We can resume if the parent's runtime PM is disabled or it
602                  * is set to ignore children.
603                  */
604                 if (!parent->power.disable_depth
605                     && !parent->power.ignore_children) {
606                         rpm_resume(parent, 0);
607                         if (parent->power.runtime_status != RPM_ACTIVE)
608                                 retval = -EBUSY;
609                 }
610                 spin_unlock(&parent->power.lock);
611
612                 spin_lock(&dev->power.lock);
613                 if (retval)
614                         goto out;
615                 goto repeat;
616         }
617  skip_parent:
618
619         if (dev->power.no_callbacks)
620                 goto no_callback;       /* Assume success. */
621
622         __update_runtime_status(dev, RPM_RESUMING);
623
624         if (dev->pm_domain)
625                 callback = dev->pm_domain->ops.runtime_resume;
626         else if (dev->type && dev->type->pm)
627                 callback = dev->type->pm->runtime_resume;
628         else if (dev->class && dev->class->pm)
629                 callback = dev->class->pm->runtime_resume;
630         else if (dev->bus && dev->bus->pm)
631                 callback = dev->bus->pm->runtime_resume;
632         else
633                 callback = NULL;
634
635         retval = rpm_callback(callback, dev);
636         if (retval) {
637                 __update_runtime_status(dev, RPM_SUSPENDED);
638                 pm_runtime_cancel_pending(dev);
639         } else {
640  no_callback:
641                 __update_runtime_status(dev, RPM_ACTIVE);
642                 if (parent)
643                         atomic_inc(&parent->power.child_count);
644         }
645         wake_up_all(&dev->power.wait_queue);
646
647         if (!retval)
648                 rpm_idle(dev, RPM_ASYNC);
649
650  out:
651         if (parent && !dev->power.irq_safe) {
652                 spin_unlock_irq(&dev->power.lock);
653
654                 pm_runtime_put(parent);
655
656                 spin_lock_irq(&dev->power.lock);
657         }
658
659         trace_rpm_return_int(dev, _THIS_IP_, retval);
660
661         return retval;
662 }
663
664 /**
665  * pm_runtime_work - Universal runtime PM work function.
666  * @work: Work structure used for scheduling the execution of this function.
667  *
668  * Use @work to get the device object the work is to be done for, determine what
669  * is to be done and execute the appropriate runtime PM function.
670  */
671 static void pm_runtime_work(struct work_struct *work)
672 {
673         struct device *dev = container_of(work, struct device, power.work);
674         enum rpm_request req;
675
676         spin_lock_irq(&dev->power.lock);
677
678         if (!dev->power.request_pending)
679                 goto out;
680
681         req = dev->power.request;
682         dev->power.request = RPM_REQ_NONE;
683         dev->power.request_pending = false;
684
685         switch (req) {
686         case RPM_REQ_NONE:
687                 break;
688         case RPM_REQ_IDLE:
689                 rpm_idle(dev, RPM_NOWAIT);
690                 break;
691         case RPM_REQ_SUSPEND:
692                 rpm_suspend(dev, RPM_NOWAIT);
693                 break;
694         case RPM_REQ_AUTOSUSPEND:
695                 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
696                 break;
697         case RPM_REQ_RESUME:
698                 rpm_resume(dev, RPM_NOWAIT);
699                 break;
700         }
701
702  out:
703         spin_unlock_irq(&dev->power.lock);
704 }
705
706 /**
707  * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
708  * @data: Device pointer passed by pm_schedule_suspend().
709  *
710  * Check if the time is right and queue a suspend request.
711  */
712 static void pm_suspend_timer_fn(unsigned long data)
713 {
714         struct device *dev = (struct device *)data;
715         unsigned long flags;
716         unsigned long expires;
717
718         spin_lock_irqsave(&dev->power.lock, flags);
719
720         expires = dev->power.timer_expires;
721         /* If 'expire' is after 'jiffies' we've been called too early. */
722         if (expires > 0 && !time_after(expires, jiffies)) {
723                 dev->power.timer_expires = 0;
724                 rpm_suspend(dev, dev->power.timer_autosuspends ?
725                     (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
726         }
727
728         spin_unlock_irqrestore(&dev->power.lock, flags);
729 }
730
731 /**
732  * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
733  * @dev: Device to suspend.
734  * @delay: Time to wait before submitting a suspend request, in milliseconds.
735  */
736 int pm_schedule_suspend(struct device *dev, unsigned int delay)
737 {
738         unsigned long flags;
739         int retval;
740
741         spin_lock_irqsave(&dev->power.lock, flags);
742
743         if (!delay) {
744                 retval = rpm_suspend(dev, RPM_ASYNC);
745                 goto out;
746         }
747
748         retval = rpm_check_suspend_allowed(dev);
749         if (retval)
750                 goto out;
751
752         /* Other scheduled or pending requests need to be canceled. */
753         pm_runtime_cancel_pending(dev);
754
755         dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
756         dev->power.timer_expires += !dev->power.timer_expires;
757         dev->power.timer_autosuspends = 0;
758         mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
759
760  out:
761         spin_unlock_irqrestore(&dev->power.lock, flags);
762
763         return retval;
764 }
765 EXPORT_SYMBOL_GPL(pm_schedule_suspend);
766
767 /**
768  * __pm_runtime_idle - Entry point for runtime idle operations.
769  * @dev: Device to send idle notification for.
770  * @rpmflags: Flag bits.
771  *
772  * If the RPM_GET_PUT flag is set, decrement the device's usage count and
773  * return immediately if it is larger than zero.  Then carry out an idle
774  * notification, either synchronous or asynchronous.
775  *
776  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
777  * or if pm_runtime_irq_safe() has been called.
778  */
779 int __pm_runtime_idle(struct device *dev, int rpmflags)
780 {
781         unsigned long flags;
782         int retval;
783
784         might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
785
786         if (rpmflags & RPM_GET_PUT) {
787                 if (!atomic_dec_and_test(&dev->power.usage_count))
788                         return 0;
789         }
790
791         spin_lock_irqsave(&dev->power.lock, flags);
792         retval = rpm_idle(dev, rpmflags);
793         spin_unlock_irqrestore(&dev->power.lock, flags);
794
795         return retval;
796 }
797 EXPORT_SYMBOL_GPL(__pm_runtime_idle);
798
799 /**
800  * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
801  * @dev: Device to suspend.
802  * @rpmflags: Flag bits.
803  *
804  * If the RPM_GET_PUT flag is set, decrement the device's usage count and
805  * return immediately if it is larger than zero.  Then carry out a suspend,
806  * either synchronous or asynchronous.
807  *
808  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
809  * or if pm_runtime_irq_safe() has been called.
810  */
811 int __pm_runtime_suspend(struct device *dev, int rpmflags)
812 {
813         unsigned long flags;
814         int retval;
815
816         might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
817
818         if (rpmflags & RPM_GET_PUT) {
819                 if (!atomic_dec_and_test(&dev->power.usage_count))
820                         return 0;
821         }
822
823         spin_lock_irqsave(&dev->power.lock, flags);
824         retval = rpm_suspend(dev, rpmflags);
825         spin_unlock_irqrestore(&dev->power.lock, flags);
826
827         return retval;
828 }
829 EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
830
831 /**
832  * __pm_runtime_resume - Entry point for runtime resume operations.
833  * @dev: Device to resume.
834  * @rpmflags: Flag bits.
835  *
836  * If the RPM_GET_PUT flag is set, increment the device's usage count.  Then
837  * carry out a resume, either synchronous or asynchronous.
838  *
839  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
840  * or if pm_runtime_irq_safe() has been called.
841  */
842 int __pm_runtime_resume(struct device *dev, int rpmflags)
843 {
844         unsigned long flags;
845         int retval;
846
847         might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
848
849         if (rpmflags & RPM_GET_PUT)
850                 atomic_inc(&dev->power.usage_count);
851
852         spin_lock_irqsave(&dev->power.lock, flags);
853         retval = rpm_resume(dev, rpmflags);
854         spin_unlock_irqrestore(&dev->power.lock, flags);
855
856         return retval;
857 }
858 EXPORT_SYMBOL_GPL(__pm_runtime_resume);
859
860 /**
861  * __pm_runtime_set_status - Set runtime PM status of a device.
862  * @dev: Device to handle.
863  * @status: New runtime PM status of the device.
864  *
865  * If runtime PM of the device is disabled or its power.runtime_error field is
866  * different from zero, the status may be changed either to RPM_ACTIVE, or to
867  * RPM_SUSPENDED, as long as that reflects the actual state of the device.
868  * However, if the device has a parent and the parent is not active, and the
869  * parent's power.ignore_children flag is unset, the device's status cannot be
870  * set to RPM_ACTIVE, so -EBUSY is returned in that case.
871  *
872  * If successful, __pm_runtime_set_status() clears the power.runtime_error field
873  * and the device parent's counter of unsuspended children is modified to
874  * reflect the new status.  If the new status is RPM_SUSPENDED, an idle
875  * notification request for the parent is submitted.
876  */
877 int __pm_runtime_set_status(struct device *dev, unsigned int status)
878 {
879         struct device *parent = dev->parent;
880         unsigned long flags;
881         bool notify_parent = false;
882         int error = 0;
883
884         if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
885                 return -EINVAL;
886
887         spin_lock_irqsave(&dev->power.lock, flags);
888
889         if (!dev->power.runtime_error && !dev->power.disable_depth) {
890                 error = -EAGAIN;
891                 goto out;
892         }
893
894         if (dev->power.runtime_status == status)
895                 goto out_set;
896
897         if (status == RPM_SUSPENDED) {
898                 /* It always is possible to set the status to 'suspended'. */
899                 if (parent) {
900                         atomic_add_unless(&parent->power.child_count, -1, 0);
901                         notify_parent = !parent->power.ignore_children;
902                 }
903                 goto out_set;
904         }
905
906         if (parent) {
907                 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
908
909                 /*
910                  * It is invalid to put an active child under a parent that is
911                  * not active, has runtime PM enabled and the
912                  * 'power.ignore_children' flag unset.
913                  */
914                 if (!parent->power.disable_depth
915                     && !parent->power.ignore_children
916                     && parent->power.runtime_status != RPM_ACTIVE)
917                         error = -EBUSY;
918                 else if (dev->power.runtime_status == RPM_SUSPENDED)
919                         atomic_inc(&parent->power.child_count);
920
921                 spin_unlock(&parent->power.lock);
922
923                 if (error)
924                         goto out;
925         }
926
927  out_set:
928         __update_runtime_status(dev, status);
929         dev->power.runtime_error = 0;
930  out:
931         spin_unlock_irqrestore(&dev->power.lock, flags);
932
933         if (notify_parent)
934                 pm_request_idle(parent);
935
936         return error;
937 }
938 EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
939
940 /**
941  * __pm_runtime_barrier - Cancel pending requests and wait for completions.
942  * @dev: Device to handle.
943  *
944  * Flush all pending requests for the device from pm_wq and wait for all
945  * runtime PM operations involving the device in progress to complete.
946  *
947  * Should be called under dev->power.lock with interrupts disabled.
948  */
949 static void __pm_runtime_barrier(struct device *dev)
950 {
951         pm_runtime_deactivate_timer(dev);
952
953         if (dev->power.request_pending) {
954                 dev->power.request = RPM_REQ_NONE;
955                 spin_unlock_irq(&dev->power.lock);
956
957                 cancel_work_sync(&dev->power.work);
958
959                 spin_lock_irq(&dev->power.lock);
960                 dev->power.request_pending = false;
961         }
962
963         if (dev->power.runtime_status == RPM_SUSPENDING
964             || dev->power.runtime_status == RPM_RESUMING
965             || dev->power.idle_notification) {
966                 DEFINE_WAIT(wait);
967
968                 /* Suspend, wake-up or idle notification in progress. */
969                 for (;;) {
970                         prepare_to_wait(&dev->power.wait_queue, &wait,
971                                         TASK_UNINTERRUPTIBLE);
972                         if (dev->power.runtime_status != RPM_SUSPENDING
973                             && dev->power.runtime_status != RPM_RESUMING
974                             && !dev->power.idle_notification)
975                                 break;
976                         spin_unlock_irq(&dev->power.lock);
977
978                         schedule();
979
980                         spin_lock_irq(&dev->power.lock);
981                 }
982                 finish_wait(&dev->power.wait_queue, &wait);
983         }
984 }
985
986 /**
987  * pm_runtime_barrier - Flush pending requests and wait for completions.
988  * @dev: Device to handle.
989  *
990  * Prevent the device from being suspended by incrementing its usage counter and
991  * if there's a pending resume request for the device, wake the device up.
992  * Next, make sure that all pending requests for the device have been flushed
993  * from pm_wq and wait for all runtime PM operations involving the device in
994  * progress to complete.
995  *
996  * Return value:
997  * 1, if there was a resume request pending and the device had to be woken up,
998  * 0, otherwise
999  */
1000 int pm_runtime_barrier(struct device *dev)
1001 {
1002         int retval = 0;
1003
1004         pm_runtime_get_noresume(dev);
1005         spin_lock_irq(&dev->power.lock);
1006
1007         if (dev->power.request_pending
1008             && dev->power.request == RPM_REQ_RESUME) {
1009                 rpm_resume(dev, 0);
1010                 retval = 1;
1011         }
1012
1013         __pm_runtime_barrier(dev);
1014
1015         spin_unlock_irq(&dev->power.lock);
1016         pm_runtime_put_noidle(dev);
1017
1018         return retval;
1019 }
1020 EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1021
1022 /**
1023  * __pm_runtime_disable - Disable runtime PM of a device.
1024  * @dev: Device to handle.
1025  * @check_resume: If set, check if there's a resume request for the device.
1026  *
1027  * Increment power.disable_depth for the device and if was zero previously,
1028  * cancel all pending runtime PM requests for the device and wait for all
1029  * operations in progress to complete.  The device can be either active or
1030  * suspended after its runtime PM has been disabled.
1031  *
1032  * If @check_resume is set and there's a resume request pending when
1033  * __pm_runtime_disable() is called and power.disable_depth is zero, the
1034  * function will wake up the device before disabling its runtime PM.
1035  */
1036 void __pm_runtime_disable(struct device *dev, bool check_resume)
1037 {
1038         spin_lock_irq(&dev->power.lock);
1039
1040         if (dev->power.disable_depth > 0) {
1041                 dev->power.disable_depth++;
1042                 goto out;
1043         }
1044
1045         /*
1046          * Wake up the device if there's a resume request pending, because that
1047          * means there probably is some I/O to process and disabling runtime PM
1048          * shouldn't prevent the device from processing the I/O.
1049          */
1050         if (check_resume && dev->power.request_pending
1051             && dev->power.request == RPM_REQ_RESUME) {
1052                 /*
1053                  * Prevent suspends and idle notifications from being carried
1054                  * out after we have woken up the device.
1055                  */
1056                 pm_runtime_get_noresume(dev);
1057
1058                 rpm_resume(dev, 0);
1059
1060                 pm_runtime_put_noidle(dev);
1061         }
1062
1063         if (!dev->power.disable_depth++)
1064                 __pm_runtime_barrier(dev);
1065
1066  out:
1067         spin_unlock_irq(&dev->power.lock);
1068 }
1069 EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1070
1071 /**
1072  * pm_runtime_enable - Enable runtime PM of a device.
1073  * @dev: Device to handle.
1074  */
1075 void pm_runtime_enable(struct device *dev)
1076 {
1077         unsigned long flags;
1078
1079         spin_lock_irqsave(&dev->power.lock, flags);
1080
1081         if (dev->power.disable_depth > 0)
1082                 dev->power.disable_depth--;
1083         else
1084                 dev_warn(dev, "Unbalanced %s!\n", __func__);
1085
1086         spin_unlock_irqrestore(&dev->power.lock, flags);
1087 }
1088 EXPORT_SYMBOL_GPL(pm_runtime_enable);
1089
1090 /**
1091  * pm_runtime_forbid - Block runtime PM of a device.
1092  * @dev: Device to handle.
1093  *
1094  * Increase the device's usage count and clear its power.runtime_auto flag,
1095  * so that it cannot be suspended at run time until pm_runtime_allow() is called
1096  * for it.
1097  */
1098 void pm_runtime_forbid(struct device *dev)
1099 {
1100         spin_lock_irq(&dev->power.lock);
1101         if (!dev->power.runtime_auto)
1102                 goto out;
1103
1104         dev->power.runtime_auto = false;
1105         atomic_inc(&dev->power.usage_count);
1106         rpm_resume(dev, 0);
1107
1108  out:
1109         spin_unlock_irq(&dev->power.lock);
1110 }
1111 EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1112
1113 /**
1114  * pm_runtime_allow - Unblock runtime PM of a device.
1115  * @dev: Device to handle.
1116  *
1117  * Decrease the device's usage count and set its power.runtime_auto flag.
1118  */
1119 void pm_runtime_allow(struct device *dev)
1120 {
1121         spin_lock_irq(&dev->power.lock);
1122         if (dev->power.runtime_auto)
1123                 goto out;
1124
1125         dev->power.runtime_auto = true;
1126         if (atomic_dec_and_test(&dev->power.usage_count))
1127                 rpm_idle(dev, RPM_AUTO);
1128
1129  out:
1130         spin_unlock_irq(&dev->power.lock);
1131 }
1132 EXPORT_SYMBOL_GPL(pm_runtime_allow);
1133
1134 /**
1135  * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1136  * @dev: Device to handle.
1137  *
1138  * Set the power.no_callbacks flag, which tells the PM core that this
1139  * device is power-managed through its parent and has no runtime PM
1140  * callbacks of its own.  The runtime sysfs attributes will be removed.
1141  */
1142 void pm_runtime_no_callbacks(struct device *dev)
1143 {
1144         spin_lock_irq(&dev->power.lock);
1145         dev->power.no_callbacks = 1;
1146         spin_unlock_irq(&dev->power.lock);
1147         if (device_is_registered(dev))
1148                 rpm_sysfs_remove(dev);
1149 }
1150 EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1151
1152 /**
1153  * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1154  * @dev: Device to handle
1155  *
1156  * Set the power.irq_safe flag, which tells the PM core that the
1157  * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1158  * always be invoked with the spinlock held and interrupts disabled.  It also
1159  * causes the parent's usage counter to be permanently incremented, preventing
1160  * the parent from runtime suspending -- otherwise an irq-safe child might have
1161  * to wait for a non-irq-safe parent.
1162  */
1163 void pm_runtime_irq_safe(struct device *dev)
1164 {
1165         if (dev->parent)
1166                 pm_runtime_get_sync(dev->parent);
1167         spin_lock_irq(&dev->power.lock);
1168         dev->power.irq_safe = 1;
1169         spin_unlock_irq(&dev->power.lock);
1170 }
1171 EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1172
1173 /**
1174  * update_autosuspend - Handle a change to a device's autosuspend settings.
1175  * @dev: Device to handle.
1176  * @old_delay: The former autosuspend_delay value.
1177  * @old_use: The former use_autosuspend value.
1178  *
1179  * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1180  * set; otherwise allow it.  Send an idle notification if suspends are allowed.
1181  *
1182  * This function must be called under dev->power.lock with interrupts disabled.
1183  */
1184 static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1185 {
1186         int delay = dev->power.autosuspend_delay;
1187
1188         /* Should runtime suspend be prevented now? */
1189         if (dev->power.use_autosuspend && delay < 0) {
1190
1191                 /* If it used to be allowed then prevent it. */
1192                 if (!old_use || old_delay >= 0) {
1193                         atomic_inc(&dev->power.usage_count);
1194                         rpm_resume(dev, 0);
1195                 }
1196         }
1197
1198         /* Runtime suspend should be allowed now. */
1199         else {
1200
1201                 /* If it used to be prevented then allow it. */
1202                 if (old_use && old_delay < 0)
1203                         atomic_dec(&dev->power.usage_count);
1204
1205                 /* Maybe we can autosuspend now. */
1206                 rpm_idle(dev, RPM_AUTO);
1207         }
1208 }
1209
1210 /**
1211  * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1212  * @dev: Device to handle.
1213  * @delay: Value of the new delay in milliseconds.
1214  *
1215  * Set the device's power.autosuspend_delay value.  If it changes to negative
1216  * and the power.use_autosuspend flag is set, prevent runtime suspends.  If it
1217  * changes the other way, allow runtime suspends.
1218  */
1219 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1220 {
1221         int old_delay, old_use;
1222
1223         spin_lock_irq(&dev->power.lock);
1224         old_delay = dev->power.autosuspend_delay;
1225         old_use = dev->power.use_autosuspend;
1226         dev->power.autosuspend_delay = delay;
1227         update_autosuspend(dev, old_delay, old_use);
1228         spin_unlock_irq(&dev->power.lock);
1229 }
1230 EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1231
1232 /**
1233  * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1234  * @dev: Device to handle.
1235  * @use: New value for use_autosuspend.
1236  *
1237  * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1238  * suspends as needed.
1239  */
1240 void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1241 {
1242         int old_delay, old_use;
1243
1244         spin_lock_irq(&dev->power.lock);
1245         old_delay = dev->power.autosuspend_delay;
1246         old_use = dev->power.use_autosuspend;
1247         dev->power.use_autosuspend = use;
1248         update_autosuspend(dev, old_delay, old_use);
1249         spin_unlock_irq(&dev->power.lock);
1250 }
1251 EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1252
1253 /**
1254  * pm_runtime_init - Initialize runtime PM fields in given device object.
1255  * @dev: Device object to initialize.
1256  */
1257 void pm_runtime_init(struct device *dev)
1258 {
1259         dev->power.runtime_status = RPM_SUSPENDED;
1260         dev->power.idle_notification = false;
1261
1262         dev->power.disable_depth = 1;
1263         atomic_set(&dev->power.usage_count, 0);
1264
1265         dev->power.runtime_error = 0;
1266
1267         atomic_set(&dev->power.child_count, 0);
1268         pm_suspend_ignore_children(dev, false);
1269         dev->power.runtime_auto = true;
1270
1271         dev->power.request_pending = false;
1272         dev->power.request = RPM_REQ_NONE;
1273         dev->power.deferred_resume = false;
1274         dev->power.accounting_timestamp = jiffies;
1275         INIT_WORK(&dev->power.work, pm_runtime_work);
1276
1277         dev->power.timer_expires = 0;
1278         setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
1279                         (unsigned long)dev);
1280
1281         init_waitqueue_head(&dev->power.wait_queue);
1282 }
1283
1284 /**
1285  * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1286  * @dev: Device object being removed from device hierarchy.
1287  */
1288 void pm_runtime_remove(struct device *dev)
1289 {
1290         __pm_runtime_disable(dev, false);
1291
1292         /* Change the status back to 'suspended' to match the initial status. */
1293         if (dev->power.runtime_status == RPM_ACTIVE)
1294                 pm_runtime_set_suspended(dev);
1295         if (dev->power.irq_safe && dev->parent)
1296                 pm_runtime_put_sync(dev->parent);
1297 }