PM / Runtime: Implement autosuspend support
[pandora-kernel.git] / drivers / base / power / runtime.c
1 /*
2  * drivers/base/power/runtime.c - Helper functions for device run-time PM
3  *
4  * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5  * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
6  *
7  * This file is released under the GPLv2.
8  */
9
10 #include <linux/sched.h>
11 #include <linux/pm_runtime.h>
12 #include "power.h"
13
14 static int rpm_resume(struct device *dev, int rpmflags);
15 static int rpm_suspend(struct device *dev, int rpmflags);
16
17 /**
18  * update_pm_runtime_accounting - Update the time accounting of power states
19  * @dev: Device to update the accounting for
20  *
21  * In order to be able to have time accounting of the various power states
22  * (as used by programs such as PowerTOP to show the effectiveness of runtime
23  * PM), we need to track the time spent in each state.
24  * update_pm_runtime_accounting must be called each time before the
25  * runtime_status field is updated, to account the time in the old state
26  * correctly.
27  */
28 void update_pm_runtime_accounting(struct device *dev)
29 {
30         unsigned long now = jiffies;
31         int delta;
32
33         delta = now - dev->power.accounting_timestamp;
34
35         if (delta < 0)
36                 delta = 0;
37
38         dev->power.accounting_timestamp = now;
39
40         if (dev->power.disable_depth > 0)
41                 return;
42
43         if (dev->power.runtime_status == RPM_SUSPENDED)
44                 dev->power.suspended_jiffies += delta;
45         else
46                 dev->power.active_jiffies += delta;
47 }
48
49 static void __update_runtime_status(struct device *dev, enum rpm_status status)
50 {
51         update_pm_runtime_accounting(dev);
52         dev->power.runtime_status = status;
53 }
54
55 /**
56  * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
57  * @dev: Device to handle.
58  */
59 static void pm_runtime_deactivate_timer(struct device *dev)
60 {
61         if (dev->power.timer_expires > 0) {
62                 del_timer(&dev->power.suspend_timer);
63                 dev->power.timer_expires = 0;
64         }
65 }
66
67 /**
68  * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
69  * @dev: Device to handle.
70  */
71 static void pm_runtime_cancel_pending(struct device *dev)
72 {
73         pm_runtime_deactivate_timer(dev);
74         /*
75          * In case there's a request pending, make sure its work function will
76          * return without doing anything.
77          */
78         dev->power.request = RPM_REQ_NONE;
79 }
80
81 /*
82  * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
83  * @dev: Device to handle.
84  *
85  * Compute the autosuspend-delay expiration time based on the device's
86  * power.last_busy time.  If the delay has already expired or is disabled
87  * (negative) or the power.use_autosuspend flag isn't set, return 0.
88  * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
89  *
90  * This function may be called either with or without dev->power.lock held.
91  * Either way it can be racy, since power.last_busy may be updated at any time.
92  */
93 unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
94 {
95         int autosuspend_delay;
96         long elapsed;
97         unsigned long last_busy;
98         unsigned long expires = 0;
99
100         if (!dev->power.use_autosuspend)
101                 goto out;
102
103         autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay);
104         if (autosuspend_delay < 0)
105                 goto out;
106
107         last_busy = ACCESS_ONCE(dev->power.last_busy);
108         elapsed = jiffies - last_busy;
109         if (elapsed < 0)
110                 goto out;       /* jiffies has wrapped around. */
111
112         /*
113          * If the autosuspend_delay is >= 1 second, align the timer by rounding
114          * up to the nearest second.
115          */
116         expires = last_busy + msecs_to_jiffies(autosuspend_delay);
117         if (autosuspend_delay >= 1000)
118                 expires = round_jiffies(expires);
119         expires += !expires;
120         if (elapsed >= expires - last_busy)
121                 expires = 0;    /* Already expired. */
122
123  out:
124         return expires;
125 }
126 EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
127
128 /**
129  * rpm_check_suspend_allowed - Test whether a device may be suspended.
130  * @dev: Device to test.
131  */
132 static int rpm_check_suspend_allowed(struct device *dev)
133 {
134         int retval = 0;
135
136         if (dev->power.runtime_error)
137                 retval = -EINVAL;
138         else if (atomic_read(&dev->power.usage_count) > 0
139             || dev->power.disable_depth > 0)
140                 retval = -EAGAIN;
141         else if (!pm_children_suspended(dev))
142                 retval = -EBUSY;
143
144         /* Pending resume requests take precedence over suspends. */
145         else if ((dev->power.deferred_resume
146                         && dev->power.status == RPM_SUSPENDING)
147             || (dev->power.request_pending
148                         && dev->power.request == RPM_REQ_RESUME))
149                 retval = -EAGAIN;
150         else if (dev->power.runtime_status == RPM_SUSPENDED)
151                 retval = 1;
152
153         return retval;
154 }
155
156
157 /**
158  * rpm_idle - Notify device bus type if the device can be suspended.
159  * @dev: Device to notify the bus type about.
160  * @rpmflags: Flag bits.
161  *
162  * Check if the device's run-time PM status allows it to be suspended.  If
163  * another idle notification has been started earlier, return immediately.  If
164  * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
165  * run the ->runtime_idle() callback directly.
166  *
167  * This function must be called under dev->power.lock with interrupts disabled.
168  */
169 static int rpm_idle(struct device *dev, int rpmflags)
170         __releases(&dev->power.lock) __acquires(&dev->power.lock)
171 {
172         int retval;
173
174         retval = rpm_check_suspend_allowed(dev);
175         if (retval < 0)
176                 ;       /* Conditions are wrong. */
177
178         /* Idle notifications are allowed only in the RPM_ACTIVE state. */
179         else if (dev->power.runtime_status != RPM_ACTIVE)
180                 retval = -EAGAIN;
181
182         /*
183          * Any pending request other than an idle notification takes
184          * precedence over us, except that the timer may be running.
185          */
186         else if (dev->power.request_pending &&
187             dev->power.request > RPM_REQ_IDLE)
188                 retval = -EAGAIN;
189
190         /* Act as though RPM_NOWAIT is always set. */
191         else if (dev->power.idle_notification)
192                 retval = -EINPROGRESS;
193         if (retval)
194                 goto out;
195
196         /* Pending requests need to be canceled. */
197         dev->power.request = RPM_REQ_NONE;
198
199         if (dev->power.no_callbacks) {
200                 /* Assume ->runtime_idle() callback would have suspended. */
201                 retval = rpm_suspend(dev, rpmflags);
202                 goto out;
203         }
204
205         /* Carry out an asynchronous or a synchronous idle notification. */
206         if (rpmflags & RPM_ASYNC) {
207                 dev->power.request = RPM_REQ_IDLE;
208                 if (!dev->power.request_pending) {
209                         dev->power.request_pending = true;
210                         queue_work(pm_wq, &dev->power.work);
211                 }
212                 goto out;
213         }
214
215         dev->power.idle_notification = true;
216
217         if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_idle) {
218                 spin_unlock_irq(&dev->power.lock);
219
220                 dev->bus->pm->runtime_idle(dev);
221
222                 spin_lock_irq(&dev->power.lock);
223         } else if (dev->type && dev->type->pm && dev->type->pm->runtime_idle) {
224                 spin_unlock_irq(&dev->power.lock);
225
226                 dev->type->pm->runtime_idle(dev);
227
228                 spin_lock_irq(&dev->power.lock);
229         } else if (dev->class && dev->class->pm
230             && dev->class->pm->runtime_idle) {
231                 spin_unlock_irq(&dev->power.lock);
232
233                 dev->class->pm->runtime_idle(dev);
234
235                 spin_lock_irq(&dev->power.lock);
236         }
237
238         dev->power.idle_notification = false;
239         wake_up_all(&dev->power.wait_queue);
240
241  out:
242         return retval;
243 }
244
245 /**
246  * rpm_suspend - Carry out run-time suspend of given device.
247  * @dev: Device to suspend.
248  * @rpmflags: Flag bits.
249  *
250  * Check if the device's run-time PM status allows it to be suspended.  If
251  * another suspend has been started earlier, either return immediately or wait
252  * for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags.  Cancel a
253  * pending idle notification.  If the RPM_ASYNC flag is set then queue a
254  * suspend request; otherwise run the ->runtime_suspend() callback directly.
255  * If a deferred resume was requested while the callback was running then carry
256  * it out; otherwise send an idle notification for the device (if the suspend
257  * failed) or for its parent (if the suspend succeeded).
258  *
259  * This function must be called under dev->power.lock with interrupts disabled.
260  */
261 static int rpm_suspend(struct device *dev, int rpmflags)
262         __releases(&dev->power.lock) __acquires(&dev->power.lock)
263 {
264         struct device *parent = NULL;
265         bool notify = false;
266         int retval;
267
268         dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
269
270  repeat:
271         retval = rpm_check_suspend_allowed(dev);
272
273         if (retval < 0)
274                 ;       /* Conditions are wrong. */
275
276         /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
277         else if (dev->power.runtime_status == RPM_RESUMING &&
278             !(rpmflags & RPM_ASYNC))
279                 retval = -EAGAIN;
280         if (retval)
281                 goto out;
282
283         /* If the autosuspend_delay time hasn't expired yet, reschedule. */
284         if ((rpmflags & RPM_AUTO)
285             && dev->power.runtime_status != RPM_SUSPENDING) {
286                 unsigned long expires = pm_runtime_autosuspend_expiration(dev);
287
288                 if (expires != 0) {
289                         /* Pending requests need to be canceled. */
290                         dev->power.request = RPM_REQ_NONE;
291
292                         /*
293                          * Optimization: If the timer is already running and is
294                          * set to expire at or before the autosuspend delay,
295                          * avoid the overhead of resetting it.  Just let it
296                          * expire; pm_suspend_timer_fn() will take care of the
297                          * rest.
298                          */
299                         if (!(dev->power.timer_expires && time_before_eq(
300                             dev->power.timer_expires, expires))) {
301                                 dev->power.timer_expires = expires;
302                                 mod_timer(&dev->power.suspend_timer, expires);
303                         }
304                         dev->power.timer_autosuspends = 1;
305                         goto out;
306                 }
307         }
308
309         /* Other scheduled or pending requests need to be canceled. */
310         pm_runtime_cancel_pending(dev);
311
312         if (dev->power.runtime_status == RPM_SUSPENDING) {
313                 DEFINE_WAIT(wait);
314
315                 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
316                         retval = -EINPROGRESS;
317                         goto out;
318                 }
319
320                 /* Wait for the other suspend running in parallel with us. */
321                 for (;;) {
322                         prepare_to_wait(&dev->power.wait_queue, &wait,
323                                         TASK_UNINTERRUPTIBLE);
324                         if (dev->power.runtime_status != RPM_SUSPENDING)
325                                 break;
326
327                         spin_unlock_irq(&dev->power.lock);
328
329                         schedule();
330
331                         spin_lock_irq(&dev->power.lock);
332                 }
333                 finish_wait(&dev->power.wait_queue, &wait);
334                 goto repeat;
335         }
336
337         dev->power.deferred_resume = false;
338         if (dev->power.no_callbacks)
339                 goto no_callback;       /* Assume success. */
340
341         /* Carry out an asynchronous or a synchronous suspend. */
342         if (rpmflags & RPM_ASYNC) {
343                 dev->power.request = (rpmflags & RPM_AUTO) ?
344                     RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
345                 if (!dev->power.request_pending) {
346                         dev->power.request_pending = true;
347                         queue_work(pm_wq, &dev->power.work);
348                 }
349                 goto out;
350         }
351
352         __update_runtime_status(dev, RPM_SUSPENDING);
353
354         if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) {
355                 spin_unlock_irq(&dev->power.lock);
356
357                 retval = dev->bus->pm->runtime_suspend(dev);
358
359                 spin_lock_irq(&dev->power.lock);
360                 dev->power.runtime_error = retval;
361         } else if (dev->type && dev->type->pm
362             && dev->type->pm->runtime_suspend) {
363                 spin_unlock_irq(&dev->power.lock);
364
365                 retval = dev->type->pm->runtime_suspend(dev);
366
367                 spin_lock_irq(&dev->power.lock);
368                 dev->power.runtime_error = retval;
369         } else if (dev->class && dev->class->pm
370             && dev->class->pm->runtime_suspend) {
371                 spin_unlock_irq(&dev->power.lock);
372
373                 retval = dev->class->pm->runtime_suspend(dev);
374
375                 spin_lock_irq(&dev->power.lock);
376                 dev->power.runtime_error = retval;
377         } else {
378                 retval = -ENOSYS;
379         }
380
381         if (retval) {
382                 __update_runtime_status(dev, RPM_ACTIVE);
383                 dev->power.deferred_resume = 0;
384                 if (retval == -EAGAIN || retval == -EBUSY) {
385                         if (dev->power.timer_expires == 0)
386                                 notify = true;
387                         dev->power.runtime_error = 0;
388                 } else {
389                         pm_runtime_cancel_pending(dev);
390                 }
391         } else {
392  no_callback:
393                 __update_runtime_status(dev, RPM_SUSPENDED);
394                 pm_runtime_deactivate_timer(dev);
395
396                 if (dev->parent) {
397                         parent = dev->parent;
398                         atomic_add_unless(&parent->power.child_count, -1, 0);
399                 }
400         }
401         wake_up_all(&dev->power.wait_queue);
402
403         if (dev->power.deferred_resume) {
404                 rpm_resume(dev, 0);
405                 retval = -EAGAIN;
406                 goto out;
407         }
408
409         if (notify)
410                 rpm_idle(dev, 0);
411
412         if (parent && !parent->power.ignore_children) {
413                 spin_unlock_irq(&dev->power.lock);
414
415                 pm_request_idle(parent);
416
417                 spin_lock_irq(&dev->power.lock);
418         }
419
420  out:
421         dev_dbg(dev, "%s returns %d\n", __func__, retval);
422
423         return retval;
424 }
425
426 /**
427  * rpm_resume - Carry out run-time resume of given device.
428  * @dev: Device to resume.
429  * @rpmflags: Flag bits.
430  *
431  * Check if the device's run-time PM status allows it to be resumed.  Cancel
432  * any scheduled or pending requests.  If another resume has been started
433  * earlier, either return imediately or wait for it to finish, depending on the
434  * RPM_NOWAIT and RPM_ASYNC flags.  Similarly, if there's a suspend running in
435  * parallel with this function, either tell the other process to resume after
436  * suspending (deferred_resume) or wait for it to finish.  If the RPM_ASYNC
437  * flag is set then queue a resume request; otherwise run the
438  * ->runtime_resume() callback directly.  Queue an idle notification for the
439  * device if the resume succeeded.
440  *
441  * This function must be called under dev->power.lock with interrupts disabled.
442  */
443 static int rpm_resume(struct device *dev, int rpmflags)
444         __releases(&dev->power.lock) __acquires(&dev->power.lock)
445 {
446         struct device *parent = NULL;
447         int retval = 0;
448
449         dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
450
451  repeat:
452         if (dev->power.runtime_error)
453                 retval = -EINVAL;
454         else if (dev->power.disable_depth > 0)
455                 retval = -EAGAIN;
456         if (retval)
457                 goto out;
458
459         /*
460          * Other scheduled or pending requests need to be canceled.  Small
461          * optimization: If an autosuspend timer is running, leave it running
462          * rather than cancelling it now only to restart it again in the near
463          * future.
464          */
465         dev->power.request = RPM_REQ_NONE;
466         if (!dev->power.timer_autosuspends)
467                 pm_runtime_deactivate_timer(dev);
468
469         if (dev->power.runtime_status == RPM_ACTIVE) {
470                 retval = 1;
471                 goto out;
472         }
473
474         if (dev->power.runtime_status == RPM_RESUMING
475             || dev->power.runtime_status == RPM_SUSPENDING) {
476                 DEFINE_WAIT(wait);
477
478                 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
479                         if (dev->power.runtime_status == RPM_SUSPENDING)
480                                 dev->power.deferred_resume = true;
481                         else
482                                 retval = -EINPROGRESS;
483                         goto out;
484                 }
485
486                 /* Wait for the operation carried out in parallel with us. */
487                 for (;;) {
488                         prepare_to_wait(&dev->power.wait_queue, &wait,
489                                         TASK_UNINTERRUPTIBLE);
490                         if (dev->power.runtime_status != RPM_RESUMING
491                             && dev->power.runtime_status != RPM_SUSPENDING)
492                                 break;
493
494                         spin_unlock_irq(&dev->power.lock);
495
496                         schedule();
497
498                         spin_lock_irq(&dev->power.lock);
499                 }
500                 finish_wait(&dev->power.wait_queue, &wait);
501                 goto repeat;
502         }
503
504         /*
505          * See if we can skip waking up the parent.  This is safe only if
506          * power.no_callbacks is set, because otherwise we don't know whether
507          * the resume will actually succeed.
508          */
509         if (dev->power.no_callbacks && !parent && dev->parent) {
510                 spin_lock(&dev->parent->power.lock);
511                 if (dev->parent->power.disable_depth > 0
512                     || dev->parent->power.ignore_children
513                     || dev->parent->power.runtime_status == RPM_ACTIVE) {
514                         atomic_inc(&dev->parent->power.child_count);
515                         spin_unlock(&dev->parent->power.lock);
516                         goto no_callback;       /* Assume success. */
517                 }
518                 spin_unlock(&dev->parent->power.lock);
519         }
520
521         /* Carry out an asynchronous or a synchronous resume. */
522         if (rpmflags & RPM_ASYNC) {
523                 dev->power.request = RPM_REQ_RESUME;
524                 if (!dev->power.request_pending) {
525                         dev->power.request_pending = true;
526                         queue_work(pm_wq, &dev->power.work);
527                 }
528                 retval = 0;
529                 goto out;
530         }
531
532         if (!parent && dev->parent) {
533                 /*
534                  * Increment the parent's resume counter and resume it if
535                  * necessary.
536                  */
537                 parent = dev->parent;
538                 spin_unlock(&dev->power.lock);
539
540                 pm_runtime_get_noresume(parent);
541
542                 spin_lock(&parent->power.lock);
543                 /*
544                  * We can resume if the parent's run-time PM is disabled or it
545                  * is set to ignore children.
546                  */
547                 if (!parent->power.disable_depth
548                     && !parent->power.ignore_children) {
549                         rpm_resume(parent, 0);
550                         if (parent->power.runtime_status != RPM_ACTIVE)
551                                 retval = -EBUSY;
552                 }
553                 spin_unlock(&parent->power.lock);
554
555                 spin_lock(&dev->power.lock);
556                 if (retval)
557                         goto out;
558                 goto repeat;
559         }
560
561         if (dev->power.no_callbacks)
562                 goto no_callback;       /* Assume success. */
563
564         __update_runtime_status(dev, RPM_RESUMING);
565
566         if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) {
567                 spin_unlock_irq(&dev->power.lock);
568
569                 retval = dev->bus->pm->runtime_resume(dev);
570
571                 spin_lock_irq(&dev->power.lock);
572                 dev->power.runtime_error = retval;
573         } else if (dev->type && dev->type->pm
574             && dev->type->pm->runtime_resume) {
575                 spin_unlock_irq(&dev->power.lock);
576
577                 retval = dev->type->pm->runtime_resume(dev);
578
579                 spin_lock_irq(&dev->power.lock);
580                 dev->power.runtime_error = retval;
581         } else if (dev->class && dev->class->pm
582             && dev->class->pm->runtime_resume) {
583                 spin_unlock_irq(&dev->power.lock);
584
585                 retval = dev->class->pm->runtime_resume(dev);
586
587                 spin_lock_irq(&dev->power.lock);
588                 dev->power.runtime_error = retval;
589         } else {
590                 retval = -ENOSYS;
591         }
592
593         if (retval) {
594                 __update_runtime_status(dev, RPM_SUSPENDED);
595                 pm_runtime_cancel_pending(dev);
596         } else {
597  no_callback:
598                 __update_runtime_status(dev, RPM_ACTIVE);
599                 if (parent)
600                         atomic_inc(&parent->power.child_count);
601         }
602         wake_up_all(&dev->power.wait_queue);
603
604         if (!retval)
605                 rpm_idle(dev, RPM_ASYNC);
606
607  out:
608         if (parent) {
609                 spin_unlock_irq(&dev->power.lock);
610
611                 pm_runtime_put(parent);
612
613                 spin_lock_irq(&dev->power.lock);
614         }
615
616         dev_dbg(dev, "%s returns %d\n", __func__, retval);
617
618         return retval;
619 }
620
621 /**
622  * pm_runtime_work - Universal run-time PM work function.
623  * @work: Work structure used for scheduling the execution of this function.
624  *
625  * Use @work to get the device object the work is to be done for, determine what
626  * is to be done and execute the appropriate run-time PM function.
627  */
628 static void pm_runtime_work(struct work_struct *work)
629 {
630         struct device *dev = container_of(work, struct device, power.work);
631         enum rpm_request req;
632
633         spin_lock_irq(&dev->power.lock);
634
635         if (!dev->power.request_pending)
636                 goto out;
637
638         req = dev->power.request;
639         dev->power.request = RPM_REQ_NONE;
640         dev->power.request_pending = false;
641
642         switch (req) {
643         case RPM_REQ_NONE:
644                 break;
645         case RPM_REQ_IDLE:
646                 rpm_idle(dev, RPM_NOWAIT);
647                 break;
648         case RPM_REQ_SUSPEND:
649                 rpm_suspend(dev, RPM_NOWAIT);
650                 break;
651         case RPM_REQ_AUTOSUSPEND:
652                 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
653                 break;
654         case RPM_REQ_RESUME:
655                 rpm_resume(dev, RPM_NOWAIT);
656                 break;
657         }
658
659  out:
660         spin_unlock_irq(&dev->power.lock);
661 }
662
663 /**
664  * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
665  * @data: Device pointer passed by pm_schedule_suspend().
666  *
667  * Check if the time is right and queue a suspend request.
668  */
669 static void pm_suspend_timer_fn(unsigned long data)
670 {
671         struct device *dev = (struct device *)data;
672         unsigned long flags;
673         unsigned long expires;
674
675         spin_lock_irqsave(&dev->power.lock, flags);
676
677         expires = dev->power.timer_expires;
678         /* If 'expire' is after 'jiffies' we've been called too early. */
679         if (expires > 0 && !time_after(expires, jiffies)) {
680                 dev->power.timer_expires = 0;
681                 rpm_suspend(dev, dev->power.timer_autosuspends ?
682                     (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
683         }
684
685         spin_unlock_irqrestore(&dev->power.lock, flags);
686 }
687
688 /**
689  * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
690  * @dev: Device to suspend.
691  * @delay: Time to wait before submitting a suspend request, in milliseconds.
692  */
693 int pm_schedule_suspend(struct device *dev, unsigned int delay)
694 {
695         unsigned long flags;
696         int retval;
697
698         spin_lock_irqsave(&dev->power.lock, flags);
699
700         if (!delay) {
701                 retval = rpm_suspend(dev, RPM_ASYNC);
702                 goto out;
703         }
704
705         retval = rpm_check_suspend_allowed(dev);
706         if (retval)
707                 goto out;
708
709         /* Other scheduled or pending requests need to be canceled. */
710         pm_runtime_cancel_pending(dev);
711
712         dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
713         dev->power.timer_expires += !dev->power.timer_expires;
714         dev->power.timer_autosuspends = 0;
715         mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
716
717  out:
718         spin_unlock_irqrestore(&dev->power.lock, flags);
719
720         return retval;
721 }
722 EXPORT_SYMBOL_GPL(pm_schedule_suspend);
723
724 /**
725  * __pm_runtime_idle - Entry point for run-time idle operations.
726  * @dev: Device to send idle notification for.
727  * @rpmflags: Flag bits.
728  *
729  * If the RPM_GET_PUT flag is set, decrement the device's usage count and
730  * return immediately if it is larger than zero.  Then carry out an idle
731  * notification, either synchronous or asynchronous.
732  *
733  * This routine may be called in atomic context if the RPM_ASYNC flag is set.
734  */
735 int __pm_runtime_idle(struct device *dev, int rpmflags)
736 {
737         unsigned long flags;
738         int retval;
739
740         if (rpmflags & RPM_GET_PUT) {
741                 if (!atomic_dec_and_test(&dev->power.usage_count))
742                         return 0;
743         }
744
745         spin_lock_irqsave(&dev->power.lock, flags);
746         retval = rpm_idle(dev, rpmflags);
747         spin_unlock_irqrestore(&dev->power.lock, flags);
748
749         return retval;
750 }
751 EXPORT_SYMBOL_GPL(__pm_runtime_idle);
752
753 /**
754  * __pm_runtime_suspend - Entry point for run-time put/suspend operations.
755  * @dev: Device to suspend.
756  * @rpmflags: Flag bits.
757  *
758  * If the RPM_GET_PUT flag is set, decrement the device's usage count and
759  * return immediately if it is larger than zero.  Then carry out a suspend,
760  * either synchronous or asynchronous.
761  *
762  * This routine may be called in atomic context if the RPM_ASYNC flag is set.
763  */
764 int __pm_runtime_suspend(struct device *dev, int rpmflags)
765 {
766         unsigned long flags;
767         int retval;
768
769         if (rpmflags & RPM_GET_PUT) {
770                 if (!atomic_dec_and_test(&dev->power.usage_count))
771                         return 0;
772         }
773
774         spin_lock_irqsave(&dev->power.lock, flags);
775         retval = rpm_suspend(dev, rpmflags);
776         spin_unlock_irqrestore(&dev->power.lock, flags);
777
778         return retval;
779 }
780 EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
781
782 /**
783  * __pm_runtime_resume - Entry point for run-time resume operations.
784  * @dev: Device to resume.
785  * @rpmflags: Flag bits.
786  *
787  * If the RPM_GET_PUT flag is set, increment the device's usage count.  Then
788  * carry out a resume, either synchronous or asynchronous.
789  *
790  * This routine may be called in atomic context if the RPM_ASYNC flag is set.
791  */
792 int __pm_runtime_resume(struct device *dev, int rpmflags)
793 {
794         unsigned long flags;
795         int retval;
796
797         if (rpmflags & RPM_GET_PUT)
798                 atomic_inc(&dev->power.usage_count);
799
800         spin_lock_irqsave(&dev->power.lock, flags);
801         retval = rpm_resume(dev, rpmflags);
802         spin_unlock_irqrestore(&dev->power.lock, flags);
803
804         return retval;
805 }
806 EXPORT_SYMBOL_GPL(__pm_runtime_resume);
807
808 /**
809  * __pm_runtime_set_status - Set run-time PM status of a device.
810  * @dev: Device to handle.
811  * @status: New run-time PM status of the device.
812  *
813  * If run-time PM of the device is disabled or its power.runtime_error field is
814  * different from zero, the status may be changed either to RPM_ACTIVE, or to
815  * RPM_SUSPENDED, as long as that reflects the actual state of the device.
816  * However, if the device has a parent and the parent is not active, and the
817  * parent's power.ignore_children flag is unset, the device's status cannot be
818  * set to RPM_ACTIVE, so -EBUSY is returned in that case.
819  *
820  * If successful, __pm_runtime_set_status() clears the power.runtime_error field
821  * and the device parent's counter of unsuspended children is modified to
822  * reflect the new status.  If the new status is RPM_SUSPENDED, an idle
823  * notification request for the parent is submitted.
824  */
825 int __pm_runtime_set_status(struct device *dev, unsigned int status)
826 {
827         struct device *parent = dev->parent;
828         unsigned long flags;
829         bool notify_parent = false;
830         int error = 0;
831
832         if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
833                 return -EINVAL;
834
835         spin_lock_irqsave(&dev->power.lock, flags);
836
837         if (!dev->power.runtime_error && !dev->power.disable_depth) {
838                 error = -EAGAIN;
839                 goto out;
840         }
841
842         if (dev->power.runtime_status == status)
843                 goto out_set;
844
845         if (status == RPM_SUSPENDED) {
846                 /* It always is possible to set the status to 'suspended'. */
847                 if (parent) {
848                         atomic_add_unless(&parent->power.child_count, -1, 0);
849                         notify_parent = !parent->power.ignore_children;
850                 }
851                 goto out_set;
852         }
853
854         if (parent) {
855                 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
856
857                 /*
858                  * It is invalid to put an active child under a parent that is
859                  * not active, has run-time PM enabled and the
860                  * 'power.ignore_children' flag unset.
861                  */
862                 if (!parent->power.disable_depth
863                     && !parent->power.ignore_children
864                     && parent->power.runtime_status != RPM_ACTIVE)
865                         error = -EBUSY;
866                 else if (dev->power.runtime_status == RPM_SUSPENDED)
867                         atomic_inc(&parent->power.child_count);
868
869                 spin_unlock(&parent->power.lock);
870
871                 if (error)
872                         goto out;
873         }
874
875  out_set:
876         __update_runtime_status(dev, status);
877         dev->power.runtime_error = 0;
878  out:
879         spin_unlock_irqrestore(&dev->power.lock, flags);
880
881         if (notify_parent)
882                 pm_request_idle(parent);
883
884         return error;
885 }
886 EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
887
888 /**
889  * __pm_runtime_barrier - Cancel pending requests and wait for completions.
890  * @dev: Device to handle.
891  *
892  * Flush all pending requests for the device from pm_wq and wait for all
893  * run-time PM operations involving the device in progress to complete.
894  *
895  * Should be called under dev->power.lock with interrupts disabled.
896  */
897 static void __pm_runtime_barrier(struct device *dev)
898 {
899         pm_runtime_deactivate_timer(dev);
900
901         if (dev->power.request_pending) {
902                 dev->power.request = RPM_REQ_NONE;
903                 spin_unlock_irq(&dev->power.lock);
904
905                 cancel_work_sync(&dev->power.work);
906
907                 spin_lock_irq(&dev->power.lock);
908                 dev->power.request_pending = false;
909         }
910
911         if (dev->power.runtime_status == RPM_SUSPENDING
912             || dev->power.runtime_status == RPM_RESUMING
913             || dev->power.idle_notification) {
914                 DEFINE_WAIT(wait);
915
916                 /* Suspend, wake-up or idle notification in progress. */
917                 for (;;) {
918                         prepare_to_wait(&dev->power.wait_queue, &wait,
919                                         TASK_UNINTERRUPTIBLE);
920                         if (dev->power.runtime_status != RPM_SUSPENDING
921                             && dev->power.runtime_status != RPM_RESUMING
922                             && !dev->power.idle_notification)
923                                 break;
924                         spin_unlock_irq(&dev->power.lock);
925
926                         schedule();
927
928                         spin_lock_irq(&dev->power.lock);
929                 }
930                 finish_wait(&dev->power.wait_queue, &wait);
931         }
932 }
933
934 /**
935  * pm_runtime_barrier - Flush pending requests and wait for completions.
936  * @dev: Device to handle.
937  *
938  * Prevent the device from being suspended by incrementing its usage counter and
939  * if there's a pending resume request for the device, wake the device up.
940  * Next, make sure that all pending requests for the device have been flushed
941  * from pm_wq and wait for all run-time PM operations involving the device in
942  * progress to complete.
943  *
944  * Return value:
945  * 1, if there was a resume request pending and the device had to be woken up,
946  * 0, otherwise
947  */
948 int pm_runtime_barrier(struct device *dev)
949 {
950         int retval = 0;
951
952         pm_runtime_get_noresume(dev);
953         spin_lock_irq(&dev->power.lock);
954
955         if (dev->power.request_pending
956             && dev->power.request == RPM_REQ_RESUME) {
957                 rpm_resume(dev, 0);
958                 retval = 1;
959         }
960
961         __pm_runtime_barrier(dev);
962
963         spin_unlock_irq(&dev->power.lock);
964         pm_runtime_put_noidle(dev);
965
966         return retval;
967 }
968 EXPORT_SYMBOL_GPL(pm_runtime_barrier);
969
970 /**
971  * __pm_runtime_disable - Disable run-time PM of a device.
972  * @dev: Device to handle.
973  * @check_resume: If set, check if there's a resume request for the device.
974  *
975  * Increment power.disable_depth for the device and if was zero previously,
976  * cancel all pending run-time PM requests for the device and wait for all
977  * operations in progress to complete.  The device can be either active or
978  * suspended after its run-time PM has been disabled.
979  *
980  * If @check_resume is set and there's a resume request pending when
981  * __pm_runtime_disable() is called and power.disable_depth is zero, the
982  * function will wake up the device before disabling its run-time PM.
983  */
984 void __pm_runtime_disable(struct device *dev, bool check_resume)
985 {
986         spin_lock_irq(&dev->power.lock);
987
988         if (dev->power.disable_depth > 0) {
989                 dev->power.disable_depth++;
990                 goto out;
991         }
992
993         /*
994          * Wake up the device if there's a resume request pending, because that
995          * means there probably is some I/O to process and disabling run-time PM
996          * shouldn't prevent the device from processing the I/O.
997          */
998         if (check_resume && dev->power.request_pending
999             && dev->power.request == RPM_REQ_RESUME) {
1000                 /*
1001                  * Prevent suspends and idle notifications from being carried
1002                  * out after we have woken up the device.
1003                  */
1004                 pm_runtime_get_noresume(dev);
1005
1006                 rpm_resume(dev, 0);
1007
1008                 pm_runtime_put_noidle(dev);
1009         }
1010
1011         if (!dev->power.disable_depth++)
1012                 __pm_runtime_barrier(dev);
1013
1014  out:
1015         spin_unlock_irq(&dev->power.lock);
1016 }
1017 EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1018
1019 /**
1020  * pm_runtime_enable - Enable run-time PM of a device.
1021  * @dev: Device to handle.
1022  */
1023 void pm_runtime_enable(struct device *dev)
1024 {
1025         unsigned long flags;
1026
1027         spin_lock_irqsave(&dev->power.lock, flags);
1028
1029         if (dev->power.disable_depth > 0)
1030                 dev->power.disable_depth--;
1031         else
1032                 dev_warn(dev, "Unbalanced %s!\n", __func__);
1033
1034         spin_unlock_irqrestore(&dev->power.lock, flags);
1035 }
1036 EXPORT_SYMBOL_GPL(pm_runtime_enable);
1037
1038 /**
1039  * pm_runtime_forbid - Block run-time PM of a device.
1040  * @dev: Device to handle.
1041  *
1042  * Increase the device's usage count and clear its power.runtime_auto flag,
1043  * so that it cannot be suspended at run time until pm_runtime_allow() is called
1044  * for it.
1045  */
1046 void pm_runtime_forbid(struct device *dev)
1047 {
1048         spin_lock_irq(&dev->power.lock);
1049         if (!dev->power.runtime_auto)
1050                 goto out;
1051
1052         dev->power.runtime_auto = false;
1053         atomic_inc(&dev->power.usage_count);
1054         rpm_resume(dev, 0);
1055
1056  out:
1057         spin_unlock_irq(&dev->power.lock);
1058 }
1059 EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1060
1061 /**
1062  * pm_runtime_allow - Unblock run-time PM of a device.
1063  * @dev: Device to handle.
1064  *
1065  * Decrease the device's usage count and set its power.runtime_auto flag.
1066  */
1067 void pm_runtime_allow(struct device *dev)
1068 {
1069         spin_lock_irq(&dev->power.lock);
1070         if (dev->power.runtime_auto)
1071                 goto out;
1072
1073         dev->power.runtime_auto = true;
1074         if (atomic_dec_and_test(&dev->power.usage_count))
1075                 rpm_idle(dev, RPM_AUTO);
1076
1077  out:
1078         spin_unlock_irq(&dev->power.lock);
1079 }
1080 EXPORT_SYMBOL_GPL(pm_runtime_allow);
1081
1082 /**
1083  * pm_runtime_no_callbacks - Ignore run-time PM callbacks for a device.
1084  * @dev: Device to handle.
1085  *
1086  * Set the power.no_callbacks flag, which tells the PM core that this
1087  * device is power-managed through its parent and has no run-time PM
1088  * callbacks of its own.  The run-time sysfs attributes will be removed.
1089  *
1090  */
1091 void pm_runtime_no_callbacks(struct device *dev)
1092 {
1093         spin_lock_irq(&dev->power.lock);
1094         dev->power.no_callbacks = 1;
1095         spin_unlock_irq(&dev->power.lock);
1096         if (device_is_registered(dev))
1097                 rpm_sysfs_remove(dev);
1098 }
1099 EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1100
1101 /**
1102  * update_autosuspend - Handle a change to a device's autosuspend settings.
1103  * @dev: Device to handle.
1104  * @old_delay: The former autosuspend_delay value.
1105  * @old_use: The former use_autosuspend value.
1106  *
1107  * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1108  * set; otherwise allow it.  Send an idle notification if suspends are allowed.
1109  *
1110  * This function must be called under dev->power.lock with interrupts disabled.
1111  */
1112 static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1113 {
1114         int delay = dev->power.autosuspend_delay;
1115
1116         /* Should runtime suspend be prevented now? */
1117         if (dev->power.use_autosuspend && delay < 0) {
1118
1119                 /* If it used to be allowed then prevent it. */
1120                 if (!old_use || old_delay >= 0) {
1121                         atomic_inc(&dev->power.usage_count);
1122                         rpm_resume(dev, 0);
1123                 }
1124         }
1125
1126         /* Runtime suspend should be allowed now. */
1127         else {
1128
1129                 /* If it used to be prevented then allow it. */
1130                 if (old_use && old_delay < 0)
1131                         atomic_dec(&dev->power.usage_count);
1132
1133                 /* Maybe we can autosuspend now. */
1134                 rpm_idle(dev, RPM_AUTO);
1135         }
1136 }
1137
1138 /**
1139  * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1140  * @dev: Device to handle.
1141  * @delay: Value of the new delay in milliseconds.
1142  *
1143  * Set the device's power.autosuspend_delay value.  If it changes to negative
1144  * and the power.use_autosuspend flag is set, prevent run-time suspends.  If it
1145  * changes the other way, allow run-time suspends.
1146  */
1147 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1148 {
1149         int old_delay, old_use;
1150
1151         spin_lock_irq(&dev->power.lock);
1152         old_delay = dev->power.autosuspend_delay;
1153         old_use = dev->power.use_autosuspend;
1154         dev->power.autosuspend_delay = delay;
1155         update_autosuspend(dev, old_delay, old_use);
1156         spin_unlock_irq(&dev->power.lock);
1157 }
1158 EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1159
1160 /**
1161  * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1162  * @dev: Device to handle.
1163  * @use: New value for use_autosuspend.
1164  *
1165  * Set the device's power.use_autosuspend flag, and allow or prevent run-time
1166  * suspends as needed.
1167  */
1168 void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1169 {
1170         int old_delay, old_use;
1171
1172         spin_lock_irq(&dev->power.lock);
1173         old_delay = dev->power.autosuspend_delay;
1174         old_use = dev->power.use_autosuspend;
1175         dev->power.use_autosuspend = use;
1176         update_autosuspend(dev, old_delay, old_use);
1177         spin_unlock_irq(&dev->power.lock);
1178 }
1179 EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1180
1181 /**
1182  * pm_runtime_init - Initialize run-time PM fields in given device object.
1183  * @dev: Device object to initialize.
1184  */
1185 void pm_runtime_init(struct device *dev)
1186 {
1187         dev->power.runtime_status = RPM_SUSPENDED;
1188         dev->power.idle_notification = false;
1189
1190         dev->power.disable_depth = 1;
1191         atomic_set(&dev->power.usage_count, 0);
1192
1193         dev->power.runtime_error = 0;
1194
1195         atomic_set(&dev->power.child_count, 0);
1196         pm_suspend_ignore_children(dev, false);
1197         dev->power.runtime_auto = true;
1198
1199         dev->power.request_pending = false;
1200         dev->power.request = RPM_REQ_NONE;
1201         dev->power.deferred_resume = false;
1202         dev->power.accounting_timestamp = jiffies;
1203         INIT_WORK(&dev->power.work, pm_runtime_work);
1204
1205         dev->power.timer_expires = 0;
1206         setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
1207                         (unsigned long)dev);
1208
1209         init_waitqueue_head(&dev->power.wait_queue);
1210 }
1211
1212 /**
1213  * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1214  * @dev: Device object being removed from device hierarchy.
1215  */
1216 void pm_runtime_remove(struct device *dev)
1217 {
1218         __pm_runtime_disable(dev, false);
1219
1220         /* Change the status back to 'suspended' to match the initial status. */
1221         if (dev->power.runtime_status == RPM_ACTIVE)
1222                 pm_runtime_set_suspended(dev);
1223 }