pandora: defconfig: update
[pandora-kernel.git] / drivers / base / power / runtime.c
1 /*
2  * drivers/base/power/runtime.c - Helper functions for device runtime PM
3  *
4  * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5  * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
6  *
7  * This file is released under the GPLv2.
8  */
9
10 #include <linux/sched.h>
11 #include <linux/export.h>
12 #include <linux/pm_runtime.h>
13 #include <trace/events/rpm.h>
14 #include "power.h"
15
16 static int rpm_resume(struct device *dev, int rpmflags);
17 static int rpm_suspend(struct device *dev, int rpmflags);
18
19 /**
20  * update_pm_runtime_accounting - Update the time accounting of power states
21  * @dev: Device to update the accounting for
22  *
23  * In order to be able to have time accounting of the various power states
24  * (as used by programs such as PowerTOP to show the effectiveness of runtime
25  * PM), we need to track the time spent in each state.
26  * update_pm_runtime_accounting must be called each time before the
27  * runtime_status field is updated, to account the time in the old state
28  * correctly.
29  */
30 void update_pm_runtime_accounting(struct device *dev)
31 {
32         unsigned long now = jiffies;
33         unsigned long delta;
34
35         delta = now - dev->power.accounting_timestamp;
36
37         dev->power.accounting_timestamp = now;
38
39         if (dev->power.disable_depth > 0)
40                 return;
41
42         if (dev->power.runtime_status == RPM_SUSPENDED)
43                 dev->power.suspended_jiffies += delta;
44         else
45                 dev->power.active_jiffies += delta;
46 }
47
48 static void __update_runtime_status(struct device *dev, enum rpm_status status)
49 {
50         update_pm_runtime_accounting(dev);
51         dev->power.runtime_status = status;
52 }
53
54 /**
55  * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
56  * @dev: Device to handle.
57  */
58 static void pm_runtime_deactivate_timer(struct device *dev)
59 {
60         if (dev->power.timer_expires > 0) {
61                 del_timer(&dev->power.suspend_timer);
62                 dev->power.timer_expires = 0;
63         }
64 }
65
66 /**
67  * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
68  * @dev: Device to handle.
69  */
70 static void pm_runtime_cancel_pending(struct device *dev)
71 {
72         pm_runtime_deactivate_timer(dev);
73         /*
74          * In case there's a request pending, make sure its work function will
75          * return without doing anything.
76          */
77         dev->power.request = RPM_REQ_NONE;
78 }
79
80 /*
81  * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
82  * @dev: Device to handle.
83  *
84  * Compute the autosuspend-delay expiration time based on the device's
85  * power.last_busy time.  If the delay has already expired or is disabled
86  * (negative) or the power.use_autosuspend flag isn't set, return 0.
87  * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
88  *
89  * This function may be called either with or without dev->power.lock held.
90  * Either way it can be racy, since power.last_busy may be updated at any time.
91  */
92 unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
93 {
94         int autosuspend_delay;
95         long elapsed;
96         unsigned long last_busy;
97         unsigned long expires = 0;
98
99         if (!dev->power.use_autosuspend)
100                 goto out;
101
102         autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay);
103         if (autosuspend_delay < 0)
104                 goto out;
105
106         last_busy = ACCESS_ONCE(dev->power.last_busy);
107         elapsed = jiffies - last_busy;
108         if (elapsed < 0)
109                 goto out;       /* jiffies has wrapped around. */
110
111         /*
112          * If the autosuspend_delay is >= 1 second, align the timer by rounding
113          * up to the nearest second.
114          */
115         expires = last_busy + msecs_to_jiffies(autosuspend_delay);
116         if (autosuspend_delay >= 1000)
117                 expires = round_jiffies(expires);
118         expires += !expires;
119         if (elapsed >= expires - last_busy)
120                 expires = 0;    /* Already expired. */
121
122  out:
123         return expires;
124 }
125 EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
126
127 /**
128  * rpm_check_suspend_allowed - Test whether a device may be suspended.
129  * @dev: Device to test.
130  */
131 static int rpm_check_suspend_allowed(struct device *dev)
132 {
133         int retval = 0;
134
135         if (dev->power.runtime_error)
136                 retval = -EINVAL;
137         else if (dev->power.disable_depth > 0)
138                 retval = -EACCES;
139         else if (atomic_read(&dev->power.usage_count) > 0)
140                 retval = -EAGAIN;
141         else if (!pm_children_suspended(dev))
142                 retval = -EBUSY;
143
144         /* Pending resume requests take precedence over suspends. */
145         else if ((dev->power.deferred_resume
146                         && dev->power.runtime_status == RPM_SUSPENDING)
147             || (dev->power.request_pending
148                         && dev->power.request == RPM_REQ_RESUME))
149                 retval = -EAGAIN;
150         else if (dev->power.runtime_status == RPM_SUSPENDED)
151                 retval = 1;
152
153         return retval;
154 }
155
156 /**
157  * __rpm_callback - Run a given runtime PM callback for a given device.
158  * @cb: Runtime PM callback to run.
159  * @dev: Device to run the callback for.
160  */
161 static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
162         __releases(&dev->power.lock) __acquires(&dev->power.lock)
163 {
164         int retval;
165
166         if (dev->power.irq_safe)
167                 spin_unlock(&dev->power.lock);
168         else
169                 spin_unlock_irq(&dev->power.lock);
170
171         retval = cb(dev);
172
173         if (dev->power.irq_safe)
174                 spin_lock(&dev->power.lock);
175         else
176                 spin_lock_irq(&dev->power.lock);
177
178         return retval;
179 }
180
181 /**
182  * rpm_idle - Notify device bus type if the device can be suspended.
183  * @dev: Device to notify the bus type about.
184  * @rpmflags: Flag bits.
185  *
186  * Check if the device's runtime PM status allows it to be suspended.  If
187  * another idle notification has been started earlier, return immediately.  If
188  * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
189  * run the ->runtime_idle() callback directly.
190  *
191  * This function must be called under dev->power.lock with interrupts disabled.
192  */
193 static int rpm_idle(struct device *dev, int rpmflags)
194 {
195         int (*callback)(struct device *);
196         int retval;
197
198         trace_rpm_idle(dev, rpmflags);
199         retval = rpm_check_suspend_allowed(dev);
200         if (retval < 0)
201                 ;       /* Conditions are wrong. */
202
203         /* Idle notifications are allowed only in the RPM_ACTIVE state. */
204         else if (dev->power.runtime_status != RPM_ACTIVE)
205                 retval = -EAGAIN;
206
207         /*
208          * Any pending request other than an idle notification takes
209          * precedence over us, except that the timer may be running.
210          */
211         else if (dev->power.request_pending &&
212             dev->power.request > RPM_REQ_IDLE)
213                 retval = -EAGAIN;
214
215         /* Act as though RPM_NOWAIT is always set. */
216         else if (dev->power.idle_notification)
217                 retval = -EINPROGRESS;
218         if (retval)
219                 goto out;
220
221         /* Pending requests need to be canceled. */
222         dev->power.request = RPM_REQ_NONE;
223
224         if (dev->power.no_callbacks) {
225                 /* Assume ->runtime_idle() callback would have suspended. */
226                 retval = rpm_suspend(dev, rpmflags);
227                 goto out;
228         }
229
230         /* Carry out an asynchronous or a synchronous idle notification. */
231         if (rpmflags & RPM_ASYNC) {
232                 dev->power.request = RPM_REQ_IDLE;
233                 if (!dev->power.request_pending) {
234                         dev->power.request_pending = true;
235                         queue_work(pm_wq, &dev->power.work);
236                 }
237                 goto out;
238         }
239
240         dev->power.idle_notification = true;
241
242         if (dev->pm_domain)
243                 callback = dev->pm_domain->ops.runtime_idle;
244         else if (dev->type && dev->type->pm)
245                 callback = dev->type->pm->runtime_idle;
246         else if (dev->class && dev->class->pm)
247                 callback = dev->class->pm->runtime_idle;
248         else if (dev->bus && dev->bus->pm)
249                 callback = dev->bus->pm->runtime_idle;
250         else
251                 callback = NULL;
252
253         if (callback)
254                 __rpm_callback(callback, dev);
255
256         dev->power.idle_notification = false;
257         wake_up_all(&dev->power.wait_queue);
258
259  out:
260         trace_rpm_return_int(dev, _THIS_IP_, retval);
261         return retval;
262 }
263
264 /**
265  * rpm_callback - Run a given runtime PM callback for a given device.
266  * @cb: Runtime PM callback to run.
267  * @dev: Device to run the callback for.
268  */
269 static int rpm_callback(int (*cb)(struct device *), struct device *dev)
270 {
271         int retval;
272
273         if (!cb)
274                 return -ENOSYS;
275
276         retval = __rpm_callback(cb, dev);
277
278         dev->power.runtime_error = retval;
279         return retval != -EACCES ? retval : -EIO;
280 }
281
282 /**
283  * rpm_suspend - Carry out runtime suspend of given device.
284  * @dev: Device to suspend.
285  * @rpmflags: Flag bits.
286  *
287  * Check if the device's runtime PM status allows it to be suspended.
288  * Cancel a pending idle notification, autosuspend or suspend. If
289  * another suspend has been started earlier, either return immediately
290  * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
291  * flags. If the RPM_ASYNC flag is set then queue a suspend request;
292  * otherwise run the ->runtime_suspend() callback directly. When
293  * ->runtime_suspend succeeded, if a deferred resume was requested while
294  * the callback was running then carry it out, otherwise send an idle
295  * notification for its parent (if the suspend succeeded and both
296  * ignore_children of parent->power and irq_safe of dev->power are not set).
297  * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
298  * flag is set and the next autosuspend-delay expiration time is in the
299  * future, schedule another autosuspend attempt.
300  *
301  * This function must be called under dev->power.lock with interrupts disabled.
302  */
303 static int rpm_suspend(struct device *dev, int rpmflags)
304         __releases(&dev->power.lock) __acquires(&dev->power.lock)
305 {
306         int (*callback)(struct device *);
307         struct device *parent = NULL;
308         int retval;
309
310         trace_rpm_suspend(dev, rpmflags);
311
312  repeat:
313         retval = rpm_check_suspend_allowed(dev);
314
315         if (retval < 0)
316                 ;       /* Conditions are wrong. */
317
318         /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
319         else if (dev->power.runtime_status == RPM_RESUMING &&
320             !(rpmflags & RPM_ASYNC))
321                 retval = -EAGAIN;
322         if (retval)
323                 goto out;
324
325         /* If the autosuspend_delay time hasn't expired yet, reschedule. */
326         if ((rpmflags & RPM_AUTO)
327             && dev->power.runtime_status != RPM_SUSPENDING) {
328                 unsigned long expires = pm_runtime_autosuspend_expiration(dev);
329
330                 if (expires != 0) {
331                         /* Pending requests need to be canceled. */
332                         dev->power.request = RPM_REQ_NONE;
333
334                         /*
335                          * Optimization: If the timer is already running and is
336                          * set to expire at or before the autosuspend delay,
337                          * avoid the overhead of resetting it.  Just let it
338                          * expire; pm_suspend_timer_fn() will take care of the
339                          * rest.
340                          */
341                         if (!(dev->power.timer_expires && time_before_eq(
342                             dev->power.timer_expires, expires))) {
343                                 dev->power.timer_expires = expires;
344                                 mod_timer(&dev->power.suspend_timer, expires);
345                         }
346                         dev->power.timer_autosuspends = 1;
347                         goto out;
348                 }
349         }
350
351         /* Other scheduled or pending requests need to be canceled. */
352         pm_runtime_cancel_pending(dev);
353
354         if (dev->power.runtime_status == RPM_SUSPENDING) {
355                 DEFINE_WAIT(wait);
356
357                 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
358                         retval = -EINPROGRESS;
359                         goto out;
360                 }
361
362                 if (dev->power.irq_safe) {
363                         spin_unlock(&dev->power.lock);
364
365                         cpu_relax();
366
367                         spin_lock(&dev->power.lock);
368                         goto repeat;
369                 }
370
371                 /* Wait for the other suspend running in parallel with us. */
372                 for (;;) {
373                         prepare_to_wait(&dev->power.wait_queue, &wait,
374                                         TASK_UNINTERRUPTIBLE);
375                         if (dev->power.runtime_status != RPM_SUSPENDING)
376                                 break;
377
378                         spin_unlock_irq(&dev->power.lock);
379
380                         schedule();
381
382                         spin_lock_irq(&dev->power.lock);
383                 }
384                 finish_wait(&dev->power.wait_queue, &wait);
385                 goto repeat;
386         }
387
388         if (dev->power.no_callbacks)
389                 goto no_callback;       /* Assume success. */
390
391         /* Carry out an asynchronous or a synchronous suspend. */
392         if (rpmflags & RPM_ASYNC) {
393                 dev->power.request = (rpmflags & RPM_AUTO) ?
394                     RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
395                 if (!dev->power.request_pending) {
396                         dev->power.request_pending = true;
397                         queue_work(pm_wq, &dev->power.work);
398                 }
399                 goto out;
400         }
401
402         __update_runtime_status(dev, RPM_SUSPENDING);
403
404         if (dev->pm_domain)
405                 callback = dev->pm_domain->ops.runtime_suspend;
406         else if (dev->type && dev->type->pm)
407                 callback = dev->type->pm->runtime_suspend;
408         else if (dev->class && dev->class->pm)
409                 callback = dev->class->pm->runtime_suspend;
410         else if (dev->bus && dev->bus->pm)
411                 callback = dev->bus->pm->runtime_suspend;
412         else
413                 callback = NULL;
414
415         retval = rpm_callback(callback, dev);
416         if (retval) {
417                 __update_runtime_status(dev, RPM_ACTIVE);
418                 dev->power.deferred_resume = false;
419                 if (retval == -EAGAIN || retval == -EBUSY) {
420                         dev->power.runtime_error = 0;
421
422                         /*
423                          * If the callback routine failed an autosuspend, and
424                          * if the last_busy time has been updated so that there
425                          * is a new autosuspend expiration time, automatically
426                          * reschedule another autosuspend.
427                          */
428                         if ((rpmflags & RPM_AUTO) &&
429                             pm_runtime_autosuspend_expiration(dev) != 0)
430                                 goto repeat;
431                 } else {
432                         pm_runtime_cancel_pending(dev);
433                 }
434                 wake_up_all(&dev->power.wait_queue);
435                 goto out;
436         }
437  no_callback:
438         __update_runtime_status(dev, RPM_SUSPENDED);
439         pm_runtime_deactivate_timer(dev);
440
441         if (dev->parent) {
442                 parent = dev->parent;
443                 atomic_add_unless(&parent->power.child_count, -1, 0);
444         }
445         wake_up_all(&dev->power.wait_queue);
446
447         if (dev->power.deferred_resume) {
448                 dev->power.deferred_resume = false;
449                 rpm_resume(dev, 0);
450                 retval = -EAGAIN;
451                 goto out;
452         }
453
454         /* Maybe the parent is now able to suspend. */
455         if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
456                 spin_unlock(&dev->power.lock);
457
458                 spin_lock(&parent->power.lock);
459                 rpm_idle(parent, RPM_ASYNC);
460                 spin_unlock(&parent->power.lock);
461
462                 spin_lock(&dev->power.lock);
463         }
464
465  out:
466         trace_rpm_return_int(dev, _THIS_IP_, retval);
467
468         return retval;
469 }
470
471 /**
472  * rpm_resume - Carry out runtime resume of given device.
473  * @dev: Device to resume.
474  * @rpmflags: Flag bits.
475  *
476  * Check if the device's runtime PM status allows it to be resumed.  Cancel
477  * any scheduled or pending requests.  If another resume has been started
478  * earlier, either return immediately or wait for it to finish, depending on the
479  * RPM_NOWAIT and RPM_ASYNC flags.  Similarly, if there's a suspend running in
480  * parallel with this function, either tell the other process to resume after
481  * suspending (deferred_resume) or wait for it to finish.  If the RPM_ASYNC
482  * flag is set then queue a resume request; otherwise run the
483  * ->runtime_resume() callback directly.  Queue an idle notification for the
484  * device if the resume succeeded.
485  *
486  * This function must be called under dev->power.lock with interrupts disabled.
487  */
488 static int rpm_resume(struct device *dev, int rpmflags)
489         __releases(&dev->power.lock) __acquires(&dev->power.lock)
490 {
491         int (*callback)(struct device *);
492         struct device *parent = NULL;
493         int retval = 0;
494
495         trace_rpm_resume(dev, rpmflags);
496
497  repeat:
498         if (dev->power.runtime_error)
499                 retval = -EINVAL;
500         else if (dev->power.disable_depth > 0)
501                 retval = -EACCES;
502         if (retval)
503                 goto out;
504
505         /*
506          * Other scheduled or pending requests need to be canceled.  Small
507          * optimization: If an autosuspend timer is running, leave it running
508          * rather than cancelling it now only to restart it again in the near
509          * future.
510          */
511         dev->power.request = RPM_REQ_NONE;
512         if (!dev->power.timer_autosuspends)
513                 pm_runtime_deactivate_timer(dev);
514
515         if (dev->power.runtime_status == RPM_ACTIVE) {
516                 retval = 1;
517                 goto out;
518         }
519
520         if (dev->power.runtime_status == RPM_RESUMING
521             || dev->power.runtime_status == RPM_SUSPENDING) {
522                 DEFINE_WAIT(wait);
523
524                 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
525                         if (dev->power.runtime_status == RPM_SUSPENDING)
526                                 dev->power.deferred_resume = true;
527                         else
528                                 retval = -EINPROGRESS;
529                         goto out;
530                 }
531
532                 if (dev->power.irq_safe) {
533                         spin_unlock(&dev->power.lock);
534
535                         cpu_relax();
536
537                         spin_lock(&dev->power.lock);
538                         goto repeat;
539                 }
540
541                 /* Wait for the operation carried out in parallel with us. */
542                 for (;;) {
543                         prepare_to_wait(&dev->power.wait_queue, &wait,
544                                         TASK_UNINTERRUPTIBLE);
545                         if (dev->power.runtime_status != RPM_RESUMING
546                             && dev->power.runtime_status != RPM_SUSPENDING)
547                                 break;
548
549                         spin_unlock_irq(&dev->power.lock);
550
551                         schedule();
552
553                         spin_lock_irq(&dev->power.lock);
554                 }
555                 finish_wait(&dev->power.wait_queue, &wait);
556                 goto repeat;
557         }
558
559         /*
560          * See if we can skip waking up the parent.  This is safe only if
561          * power.no_callbacks is set, because otherwise we don't know whether
562          * the resume will actually succeed.
563          */
564         if (dev->power.no_callbacks && !parent && dev->parent) {
565                 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
566                 if (dev->parent->power.disable_depth > 0
567                     || dev->parent->power.ignore_children
568                     || dev->parent->power.runtime_status == RPM_ACTIVE) {
569                         atomic_inc(&dev->parent->power.child_count);
570                         spin_unlock(&dev->parent->power.lock);
571                         retval = 1;
572                         goto no_callback;       /* Assume success. */
573                 }
574                 spin_unlock(&dev->parent->power.lock);
575         }
576
577         /* Carry out an asynchronous or a synchronous resume. */
578         if (rpmflags & RPM_ASYNC) {
579                 dev->power.request = RPM_REQ_RESUME;
580                 if (!dev->power.request_pending) {
581                         dev->power.request_pending = true;
582                         queue_work(pm_wq, &dev->power.work);
583                 }
584                 retval = 0;
585                 goto out;
586         }
587
588         if (!parent && dev->parent) {
589                 /*
590                  * Increment the parent's usage counter and resume it if
591                  * necessary.  Not needed if dev is irq-safe; then the
592                  * parent is permanently resumed.
593                  */
594                 parent = dev->parent;
595                 if (dev->power.irq_safe)
596                         goto skip_parent;
597                 spin_unlock(&dev->power.lock);
598
599                 pm_runtime_get_noresume(parent);
600
601                 spin_lock(&parent->power.lock);
602                 /*
603                  * We can resume if the parent's runtime PM is disabled or it
604                  * is set to ignore children.
605                  */
606                 if (!parent->power.disable_depth
607                     && !parent->power.ignore_children) {
608                         rpm_resume(parent, 0);
609                         if (parent->power.runtime_status != RPM_ACTIVE)
610                                 retval = -EBUSY;
611                 }
612                 spin_unlock(&parent->power.lock);
613
614                 spin_lock(&dev->power.lock);
615                 if (retval)
616                         goto out;
617                 goto repeat;
618         }
619  skip_parent:
620
621         if (dev->power.no_callbacks)
622                 goto no_callback;       /* Assume success. */
623
624         __update_runtime_status(dev, RPM_RESUMING);
625
626         if (dev->pm_domain)
627                 callback = dev->pm_domain->ops.runtime_resume;
628         else if (dev->type && dev->type->pm)
629                 callback = dev->type->pm->runtime_resume;
630         else if (dev->class && dev->class->pm)
631                 callback = dev->class->pm->runtime_resume;
632         else if (dev->bus && dev->bus->pm)
633                 callback = dev->bus->pm->runtime_resume;
634         else
635                 callback = NULL;
636
637         retval = rpm_callback(callback, dev);
638         if (retval) {
639                 __update_runtime_status(dev, RPM_SUSPENDED);
640                 pm_runtime_cancel_pending(dev);
641         } else {
642  no_callback:
643                 __update_runtime_status(dev, RPM_ACTIVE);
644                 if (parent)
645                         atomic_inc(&parent->power.child_count);
646         }
647         wake_up_all(&dev->power.wait_queue);
648
649         if (retval >= 0)
650                 rpm_idle(dev, RPM_ASYNC);
651
652  out:
653         if (parent && !dev->power.irq_safe) {
654                 spin_unlock_irq(&dev->power.lock);
655
656                 pm_runtime_put(parent);
657
658                 spin_lock_irq(&dev->power.lock);
659         }
660
661         trace_rpm_return_int(dev, _THIS_IP_, retval);
662
663         return retval;
664 }
665
666 /**
667  * pm_runtime_work - Universal runtime PM work function.
668  * @work: Work structure used for scheduling the execution of this function.
669  *
670  * Use @work to get the device object the work is to be done for, determine what
671  * is to be done and execute the appropriate runtime PM function.
672  */
673 static void pm_runtime_work(struct work_struct *work)
674 {
675         struct device *dev = container_of(work, struct device, power.work);
676         enum rpm_request req;
677
678         spin_lock_irq(&dev->power.lock);
679
680         if (!dev->power.request_pending)
681                 goto out;
682
683         req = dev->power.request;
684         dev->power.request = RPM_REQ_NONE;
685         dev->power.request_pending = false;
686
687         switch (req) {
688         case RPM_REQ_NONE:
689                 break;
690         case RPM_REQ_IDLE:
691                 rpm_idle(dev, RPM_NOWAIT);
692                 break;
693         case RPM_REQ_SUSPEND:
694                 rpm_suspend(dev, RPM_NOWAIT);
695                 break;
696         case RPM_REQ_AUTOSUSPEND:
697                 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
698                 break;
699         case RPM_REQ_RESUME:
700                 rpm_resume(dev, RPM_NOWAIT);
701                 break;
702         }
703
704  out:
705         spin_unlock_irq(&dev->power.lock);
706 }
707
708 /**
709  * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
710  * @data: Device pointer passed by pm_schedule_suspend().
711  *
712  * Check if the time is right and queue a suspend request.
713  */
714 static void pm_suspend_timer_fn(unsigned long data)
715 {
716         struct device *dev = (struct device *)data;
717         unsigned long flags;
718         unsigned long expires;
719
720         spin_lock_irqsave(&dev->power.lock, flags);
721
722         expires = dev->power.timer_expires;
723         /* If 'expire' is after 'jiffies' we've been called too early. */
724         if (expires > 0 && !time_after(expires, jiffies)) {
725                 dev->power.timer_expires = 0;
726                 rpm_suspend(dev, dev->power.timer_autosuspends ?
727                     (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
728         }
729
730         spin_unlock_irqrestore(&dev->power.lock, flags);
731 }
732
733 /**
734  * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
735  * @dev: Device to suspend.
736  * @delay: Time to wait before submitting a suspend request, in milliseconds.
737  */
738 int pm_schedule_suspend(struct device *dev, unsigned int delay)
739 {
740         unsigned long flags;
741         int retval;
742
743         spin_lock_irqsave(&dev->power.lock, flags);
744
745         if (!delay) {
746                 retval = rpm_suspend(dev, RPM_ASYNC);
747                 goto out;
748         }
749
750         retval = rpm_check_suspend_allowed(dev);
751         if (retval)
752                 goto out;
753
754         /* Other scheduled or pending requests need to be canceled. */
755         pm_runtime_cancel_pending(dev);
756
757         dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
758         dev->power.timer_expires += !dev->power.timer_expires;
759         dev->power.timer_autosuspends = 0;
760         mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
761
762  out:
763         spin_unlock_irqrestore(&dev->power.lock, flags);
764
765         return retval;
766 }
767 EXPORT_SYMBOL_GPL(pm_schedule_suspend);
768
769 /**
770  * __pm_runtime_idle - Entry point for runtime idle operations.
771  * @dev: Device to send idle notification for.
772  * @rpmflags: Flag bits.
773  *
774  * If the RPM_GET_PUT flag is set, decrement the device's usage count and
775  * return immediately if it is larger than zero.  Then carry out an idle
776  * notification, either synchronous or asynchronous.
777  *
778  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
779  * or if pm_runtime_irq_safe() has been called.
780  */
781 int __pm_runtime_idle(struct device *dev, int rpmflags)
782 {
783         unsigned long flags;
784         int retval;
785
786         might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
787
788         if (rpmflags & RPM_GET_PUT) {
789                 if (!atomic_dec_and_test(&dev->power.usage_count))
790                         return 0;
791         }
792
793         spin_lock_irqsave(&dev->power.lock, flags);
794         retval = rpm_idle(dev, rpmflags);
795         spin_unlock_irqrestore(&dev->power.lock, flags);
796
797         return retval;
798 }
799 EXPORT_SYMBOL_GPL(__pm_runtime_idle);
800
801 /**
802  * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
803  * @dev: Device to suspend.
804  * @rpmflags: Flag bits.
805  *
806  * If the RPM_GET_PUT flag is set, decrement the device's usage count and
807  * return immediately if it is larger than zero.  Then carry out a suspend,
808  * either synchronous or asynchronous.
809  *
810  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
811  * or if pm_runtime_irq_safe() has been called.
812  */
813 int __pm_runtime_suspend(struct device *dev, int rpmflags)
814 {
815         unsigned long flags;
816         int retval;
817
818         might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
819
820         if (rpmflags & RPM_GET_PUT) {
821                 if (!atomic_dec_and_test(&dev->power.usage_count))
822                         return 0;
823         }
824
825         spin_lock_irqsave(&dev->power.lock, flags);
826         retval = rpm_suspend(dev, rpmflags);
827         spin_unlock_irqrestore(&dev->power.lock, flags);
828
829         return retval;
830 }
831 EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
832
833 /**
834  * __pm_runtime_resume - Entry point for runtime resume operations.
835  * @dev: Device to resume.
836  * @rpmflags: Flag bits.
837  *
838  * If the RPM_GET_PUT flag is set, increment the device's usage count.  Then
839  * carry out a resume, either synchronous or asynchronous.
840  *
841  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
842  * or if pm_runtime_irq_safe() has been called.
843  */
844 int __pm_runtime_resume(struct device *dev, int rpmflags)
845 {
846         unsigned long flags;
847         int retval;
848
849         might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
850
851         if (rpmflags & RPM_GET_PUT)
852                 atomic_inc(&dev->power.usage_count);
853
854         spin_lock_irqsave(&dev->power.lock, flags);
855         retval = rpm_resume(dev, rpmflags);
856         spin_unlock_irqrestore(&dev->power.lock, flags);
857
858         return retval;
859 }
860 EXPORT_SYMBOL_GPL(__pm_runtime_resume);
861
862 /**
863  * __pm_runtime_set_status - Set runtime PM status of a device.
864  * @dev: Device to handle.
865  * @status: New runtime PM status of the device.
866  *
867  * If runtime PM of the device is disabled or its power.runtime_error field is
868  * different from zero, the status may be changed either to RPM_ACTIVE, or to
869  * RPM_SUSPENDED, as long as that reflects the actual state of the device.
870  * However, if the device has a parent and the parent is not active, and the
871  * parent's power.ignore_children flag is unset, the device's status cannot be
872  * set to RPM_ACTIVE, so -EBUSY is returned in that case.
873  *
874  * If successful, __pm_runtime_set_status() clears the power.runtime_error field
875  * and the device parent's counter of unsuspended children is modified to
876  * reflect the new status.  If the new status is RPM_SUSPENDED, an idle
877  * notification request for the parent is submitted.
878  */
879 int __pm_runtime_set_status(struct device *dev, unsigned int status)
880 {
881         struct device *parent = dev->parent;
882         unsigned long flags;
883         bool notify_parent = false;
884         int error = 0;
885
886         if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
887                 return -EINVAL;
888
889         spin_lock_irqsave(&dev->power.lock, flags);
890
891         if (!dev->power.runtime_error && !dev->power.disable_depth) {
892                 error = -EAGAIN;
893                 goto out;
894         }
895
896         if (dev->power.runtime_status == status)
897                 goto out_set;
898
899         if (status == RPM_SUSPENDED) {
900                 /* It always is possible to set the status to 'suspended'. */
901                 if (parent) {
902                         atomic_add_unless(&parent->power.child_count, -1, 0);
903                         notify_parent = !parent->power.ignore_children;
904                 }
905                 goto out_set;
906         }
907
908         if (parent) {
909                 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
910
911                 /*
912                  * It is invalid to put an active child under a parent that is
913                  * not active, has runtime PM enabled and the
914                  * 'power.ignore_children' flag unset.
915                  */
916                 if (!parent->power.disable_depth
917                     && !parent->power.ignore_children
918                     && parent->power.runtime_status != RPM_ACTIVE)
919                         error = -EBUSY;
920                 else if (dev->power.runtime_status == RPM_SUSPENDED)
921                         atomic_inc(&parent->power.child_count);
922
923                 spin_unlock(&parent->power.lock);
924
925                 if (error)
926                         goto out;
927         }
928
929  out_set:
930         __update_runtime_status(dev, status);
931         dev->power.runtime_error = 0;
932  out:
933         spin_unlock_irqrestore(&dev->power.lock, flags);
934
935         if (notify_parent)
936                 pm_request_idle(parent);
937
938         return error;
939 }
940 EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
941
942 /**
943  * __pm_runtime_barrier - Cancel pending requests and wait for completions.
944  * @dev: Device to handle.
945  *
946  * Flush all pending requests for the device from pm_wq and wait for all
947  * runtime PM operations involving the device in progress to complete.
948  *
949  * Should be called under dev->power.lock with interrupts disabled.
950  */
951 static void __pm_runtime_barrier(struct device *dev)
952 {
953         pm_runtime_deactivate_timer(dev);
954
955         if (dev->power.request_pending) {
956                 dev->power.request = RPM_REQ_NONE;
957                 spin_unlock_irq(&dev->power.lock);
958
959                 cancel_work_sync(&dev->power.work);
960
961                 spin_lock_irq(&dev->power.lock);
962                 dev->power.request_pending = false;
963         }
964
965         if (dev->power.runtime_status == RPM_SUSPENDING
966             || dev->power.runtime_status == RPM_RESUMING
967             || dev->power.idle_notification) {
968                 DEFINE_WAIT(wait);
969
970                 /* Suspend, wake-up or idle notification in progress. */
971                 for (;;) {
972                         prepare_to_wait(&dev->power.wait_queue, &wait,
973                                         TASK_UNINTERRUPTIBLE);
974                         if (dev->power.runtime_status != RPM_SUSPENDING
975                             && dev->power.runtime_status != RPM_RESUMING
976                             && !dev->power.idle_notification)
977                                 break;
978                         spin_unlock_irq(&dev->power.lock);
979
980                         schedule();
981
982                         spin_lock_irq(&dev->power.lock);
983                 }
984                 finish_wait(&dev->power.wait_queue, &wait);
985         }
986 }
987
988 /**
989  * pm_runtime_barrier - Flush pending requests and wait for completions.
990  * @dev: Device to handle.
991  *
992  * Prevent the device from being suspended by incrementing its usage counter and
993  * if there's a pending resume request for the device, wake the device up.
994  * Next, make sure that all pending requests for the device have been flushed
995  * from pm_wq and wait for all runtime PM operations involving the device in
996  * progress to complete.
997  *
998  * Return value:
999  * 1, if there was a resume request pending and the device had to be woken up,
1000  * 0, otherwise
1001  */
1002 int pm_runtime_barrier(struct device *dev)
1003 {
1004         int retval = 0;
1005
1006         pm_runtime_get_noresume(dev);
1007         spin_lock_irq(&dev->power.lock);
1008
1009         if (dev->power.request_pending
1010             && dev->power.request == RPM_REQ_RESUME) {
1011                 rpm_resume(dev, 0);
1012                 retval = 1;
1013         }
1014
1015         __pm_runtime_barrier(dev);
1016
1017         spin_unlock_irq(&dev->power.lock);
1018         pm_runtime_put_noidle(dev);
1019
1020         return retval;
1021 }
1022 EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1023
1024 /**
1025  * __pm_runtime_disable - Disable runtime PM of a device.
1026  * @dev: Device to handle.
1027  * @check_resume: If set, check if there's a resume request for the device.
1028  *
1029  * Increment power.disable_depth for the device and if was zero previously,
1030  * cancel all pending runtime PM requests for the device and wait for all
1031  * operations in progress to complete.  The device can be either active or
1032  * suspended after its runtime PM has been disabled.
1033  *
1034  * If @check_resume is set and there's a resume request pending when
1035  * __pm_runtime_disable() is called and power.disable_depth is zero, the
1036  * function will wake up the device before disabling its runtime PM.
1037  */
1038 void __pm_runtime_disable(struct device *dev, bool check_resume)
1039 {
1040         spin_lock_irq(&dev->power.lock);
1041
1042         if (dev->power.disable_depth > 0) {
1043                 dev->power.disable_depth++;
1044                 goto out;
1045         }
1046
1047         /*
1048          * Wake up the device if there's a resume request pending, because that
1049          * means there probably is some I/O to process and disabling runtime PM
1050          * shouldn't prevent the device from processing the I/O.
1051          */
1052         if (check_resume && dev->power.request_pending
1053             && dev->power.request == RPM_REQ_RESUME) {
1054                 /*
1055                  * Prevent suspends and idle notifications from being carried
1056                  * out after we have woken up the device.
1057                  */
1058                 pm_runtime_get_noresume(dev);
1059
1060                 rpm_resume(dev, 0);
1061
1062                 pm_runtime_put_noidle(dev);
1063         }
1064
1065         if (!dev->power.disable_depth++)
1066                 __pm_runtime_barrier(dev);
1067
1068  out:
1069         spin_unlock_irq(&dev->power.lock);
1070 }
1071 EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1072
1073 /**
1074  * pm_runtime_enable - Enable runtime PM of a device.
1075  * @dev: Device to handle.
1076  */
1077 void pm_runtime_enable(struct device *dev)
1078 {
1079         unsigned long flags;
1080
1081         spin_lock_irqsave(&dev->power.lock, flags);
1082
1083         if (dev->power.disable_depth > 0)
1084                 dev->power.disable_depth--;
1085         else
1086                 dev_warn(dev, "Unbalanced %s!\n", __func__);
1087
1088         spin_unlock_irqrestore(&dev->power.lock, flags);
1089 }
1090 EXPORT_SYMBOL_GPL(pm_runtime_enable);
1091
1092 /**
1093  * pm_runtime_forbid - Block runtime PM of a device.
1094  * @dev: Device to handle.
1095  *
1096  * Increase the device's usage count and clear its power.runtime_auto flag,
1097  * so that it cannot be suspended at run time until pm_runtime_allow() is called
1098  * for it.
1099  */
1100 void pm_runtime_forbid(struct device *dev)
1101 {
1102         spin_lock_irq(&dev->power.lock);
1103         if (!dev->power.runtime_auto)
1104                 goto out;
1105
1106         dev->power.runtime_auto = false;
1107         atomic_inc(&dev->power.usage_count);
1108         rpm_resume(dev, 0);
1109
1110  out:
1111         spin_unlock_irq(&dev->power.lock);
1112 }
1113 EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1114
1115 /**
1116  * pm_runtime_allow - Unblock runtime PM of a device.
1117  * @dev: Device to handle.
1118  *
1119  * Decrease the device's usage count and set its power.runtime_auto flag.
1120  */
1121 void pm_runtime_allow(struct device *dev)
1122 {
1123         spin_lock_irq(&dev->power.lock);
1124         if (dev->power.runtime_auto)
1125                 goto out;
1126
1127         dev->power.runtime_auto = true;
1128         if (atomic_dec_and_test(&dev->power.usage_count))
1129                 rpm_idle(dev, RPM_AUTO);
1130
1131  out:
1132         spin_unlock_irq(&dev->power.lock);
1133 }
1134 EXPORT_SYMBOL_GPL(pm_runtime_allow);
1135
1136 /**
1137  * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1138  * @dev: Device to handle.
1139  *
1140  * Set the power.no_callbacks flag, which tells the PM core that this
1141  * device is power-managed through its parent and has no runtime PM
1142  * callbacks of its own.  The runtime sysfs attributes will be removed.
1143  */
1144 void pm_runtime_no_callbacks(struct device *dev)
1145 {
1146         spin_lock_irq(&dev->power.lock);
1147         dev->power.no_callbacks = 1;
1148         spin_unlock_irq(&dev->power.lock);
1149         if (device_is_registered(dev))
1150                 rpm_sysfs_remove(dev);
1151 }
1152 EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1153
1154 /**
1155  * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1156  * @dev: Device to handle
1157  *
1158  * Set the power.irq_safe flag, which tells the PM core that the
1159  * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1160  * always be invoked with the spinlock held and interrupts disabled.  It also
1161  * causes the parent's usage counter to be permanently incremented, preventing
1162  * the parent from runtime suspending -- otherwise an irq-safe child might have
1163  * to wait for a non-irq-safe parent.
1164  */
1165 void pm_runtime_irq_safe(struct device *dev)
1166 {
1167         if (dev->parent)
1168                 pm_runtime_get_sync(dev->parent);
1169         spin_lock_irq(&dev->power.lock);
1170         dev->power.irq_safe = 1;
1171         spin_unlock_irq(&dev->power.lock);
1172 }
1173 EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1174
1175 /**
1176  * update_autosuspend - Handle a change to a device's autosuspend settings.
1177  * @dev: Device to handle.
1178  * @old_delay: The former autosuspend_delay value.
1179  * @old_use: The former use_autosuspend value.
1180  *
1181  * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1182  * set; otherwise allow it.  Send an idle notification if suspends are allowed.
1183  *
1184  * This function must be called under dev->power.lock with interrupts disabled.
1185  */
1186 static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1187 {
1188         int delay = dev->power.autosuspend_delay;
1189
1190         /* Should runtime suspend be prevented now? */
1191         if (dev->power.use_autosuspend && delay < 0) {
1192
1193                 /* If it used to be allowed then prevent it. */
1194                 if (!old_use || old_delay >= 0) {
1195                         atomic_inc(&dev->power.usage_count);
1196                         rpm_resume(dev, 0);
1197                 }
1198         }
1199
1200         /* Runtime suspend should be allowed now. */
1201         else {
1202
1203                 /* If it used to be prevented then allow it. */
1204                 if (old_use && old_delay < 0)
1205                         atomic_dec(&dev->power.usage_count);
1206
1207                 /* Maybe we can autosuspend now. */
1208                 rpm_idle(dev, RPM_AUTO);
1209         }
1210 }
1211
1212 /**
1213  * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1214  * @dev: Device to handle.
1215  * @delay: Value of the new delay in milliseconds.
1216  *
1217  * Set the device's power.autosuspend_delay value.  If it changes to negative
1218  * and the power.use_autosuspend flag is set, prevent runtime suspends.  If it
1219  * changes the other way, allow runtime suspends.
1220  */
1221 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1222 {
1223         int old_delay, old_use;
1224
1225         spin_lock_irq(&dev->power.lock);
1226         old_delay = dev->power.autosuspend_delay;
1227         old_use = dev->power.use_autosuspend;
1228         dev->power.autosuspend_delay = delay;
1229         update_autosuspend(dev, old_delay, old_use);
1230         spin_unlock_irq(&dev->power.lock);
1231 }
1232 EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1233
1234 /**
1235  * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1236  * @dev: Device to handle.
1237  * @use: New value for use_autosuspend.
1238  *
1239  * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1240  * suspends as needed.
1241  */
1242 void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1243 {
1244         int old_delay, old_use;
1245
1246         spin_lock_irq(&dev->power.lock);
1247         old_delay = dev->power.autosuspend_delay;
1248         old_use = dev->power.use_autosuspend;
1249         dev->power.use_autosuspend = use;
1250         update_autosuspend(dev, old_delay, old_use);
1251         spin_unlock_irq(&dev->power.lock);
1252 }
1253 EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1254
1255 /**
1256  * pm_runtime_init - Initialize runtime PM fields in given device object.
1257  * @dev: Device object to initialize.
1258  */
1259 void pm_runtime_init(struct device *dev)
1260 {
1261         dev->power.runtime_status = RPM_SUSPENDED;
1262         dev->power.idle_notification = false;
1263
1264         dev->power.disable_depth = 1;
1265         atomic_set(&dev->power.usage_count, 0);
1266
1267         dev->power.runtime_error = 0;
1268
1269         atomic_set(&dev->power.child_count, 0);
1270         pm_suspend_ignore_children(dev, false);
1271         dev->power.runtime_auto = true;
1272
1273         dev->power.request_pending = false;
1274         dev->power.request = RPM_REQ_NONE;
1275         dev->power.deferred_resume = false;
1276         dev->power.accounting_timestamp = jiffies;
1277         INIT_WORK(&dev->power.work, pm_runtime_work);
1278
1279         dev->power.timer_expires = 0;
1280         setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
1281                         (unsigned long)dev);
1282
1283         init_waitqueue_head(&dev->power.wait_queue);
1284 }
1285
1286 /**
1287  * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1288  * @dev: Device object being removed from device hierarchy.
1289  */
1290 void pm_runtime_remove(struct device *dev)
1291 {
1292         __pm_runtime_disable(dev, false);
1293
1294         /* Change the status back to 'suspended' to match the initial status. */
1295         if (dev->power.runtime_status == RPM_ACTIVE)
1296                 pm_runtime_set_suspended(dev);
1297         if (dev->power.irq_safe && dev->parent)
1298                 pm_runtime_put_sync(dev->parent);
1299 }