Merge branch 'power-domains' into for-linus
[pandora-kernel.git] / drivers / base / power / main.c
1 /*
2  * drivers/base/power/main.c - Where the driver meets power management.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  *
7  * This file is released under the GPLv2
8  *
9  *
10  * The driver model core calls device_pm_add() when a device is registered.
11  * This will initialize the embedded device_pm_info object in the device
12  * and add it to the list of power-controlled devices. sysfs entries for
13  * controlling device power management will also be added.
14  *
15  * A separate list is used for keeping track of power info, because the power
16  * domain dependencies may differ from the ancestral dependencies that the
17  * subsystem list maintains.
18  */
19
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/mutex.h>
23 #include <linux/pm.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/resume-trace.h>
26 #include <linux/interrupt.h>
27 #include <linux/sched.h>
28 #include <linux/async.h>
29 #include <linux/suspend.h>
30
31 #include "../base.h"
32 #include "power.h"
33
34 /*
35  * The entries in the dpm_list list are in a depth first order, simply
36  * because children are guaranteed to be discovered after parents, and
37  * are inserted at the back of the list on discovery.
38  *
39  * Since device_pm_add() may be called with a device lock held,
40  * we must never try to acquire a device lock while holding
41  * dpm_list_mutex.
42  */
43
44 LIST_HEAD(dpm_list);
45 LIST_HEAD(dpm_prepared_list);
46 LIST_HEAD(dpm_suspended_list);
47 LIST_HEAD(dpm_noirq_list);
48
49 static DEFINE_MUTEX(dpm_list_mtx);
50 static pm_message_t pm_transition;
51
52 static int async_error;
53
54 /**
55  * device_pm_init - Initialize the PM-related part of a device object.
56  * @dev: Device object being initialized.
57  */
58 void device_pm_init(struct device *dev)
59 {
60         dev->power.in_suspend = false;
61         init_completion(&dev->power.completion);
62         complete_all(&dev->power.completion);
63         dev->power.wakeup = NULL;
64         spin_lock_init(&dev->power.lock);
65         pm_runtime_init(dev);
66         INIT_LIST_HEAD(&dev->power.entry);
67 }
68
69 /**
70  * device_pm_lock - Lock the list of active devices used by the PM core.
71  */
72 void device_pm_lock(void)
73 {
74         mutex_lock(&dpm_list_mtx);
75 }
76
77 /**
78  * device_pm_unlock - Unlock the list of active devices used by the PM core.
79  */
80 void device_pm_unlock(void)
81 {
82         mutex_unlock(&dpm_list_mtx);
83 }
84
85 /**
86  * device_pm_add - Add a device to the PM core's list of active devices.
87  * @dev: Device to add to the list.
88  */
89 void device_pm_add(struct device *dev)
90 {
91         pr_debug("PM: Adding info for %s:%s\n",
92                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
93         mutex_lock(&dpm_list_mtx);
94         if (dev->parent && dev->parent->power.in_suspend)
95                 dev_warn(dev, "parent %s should not be sleeping\n",
96                         dev_name(dev->parent));
97         list_add_tail(&dev->power.entry, &dpm_list);
98         mutex_unlock(&dpm_list_mtx);
99 }
100
101 /**
102  * device_pm_remove - Remove a device from the PM core's list of active devices.
103  * @dev: Device to be removed from the list.
104  */
105 void device_pm_remove(struct device *dev)
106 {
107         pr_debug("PM: Removing info for %s:%s\n",
108                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
109         complete_all(&dev->power.completion);
110         mutex_lock(&dpm_list_mtx);
111         list_del_init(&dev->power.entry);
112         mutex_unlock(&dpm_list_mtx);
113         device_wakeup_disable(dev);
114         pm_runtime_remove(dev);
115 }
116
117 /**
118  * device_pm_move_before - Move device in the PM core's list of active devices.
119  * @deva: Device to move in dpm_list.
120  * @devb: Device @deva should come before.
121  */
122 void device_pm_move_before(struct device *deva, struct device *devb)
123 {
124         pr_debug("PM: Moving %s:%s before %s:%s\n",
125                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
126                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
127         /* Delete deva from dpm_list and reinsert before devb. */
128         list_move_tail(&deva->power.entry, &devb->power.entry);
129 }
130
131 /**
132  * device_pm_move_after - Move device in the PM core's list of active devices.
133  * @deva: Device to move in dpm_list.
134  * @devb: Device @deva should come after.
135  */
136 void device_pm_move_after(struct device *deva, struct device *devb)
137 {
138         pr_debug("PM: Moving %s:%s after %s:%s\n",
139                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
140                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
141         /* Delete deva from dpm_list and reinsert after devb. */
142         list_move(&deva->power.entry, &devb->power.entry);
143 }
144
145 /**
146  * device_pm_move_last - Move device to end of the PM core's list of devices.
147  * @dev: Device to move in dpm_list.
148  */
149 void device_pm_move_last(struct device *dev)
150 {
151         pr_debug("PM: Moving %s:%s to end of list\n",
152                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
153         list_move_tail(&dev->power.entry, &dpm_list);
154 }
155
156 static ktime_t initcall_debug_start(struct device *dev)
157 {
158         ktime_t calltime = ktime_set(0, 0);
159
160         if (initcall_debug) {
161                 pr_info("calling  %s+ @ %i\n",
162                                 dev_name(dev), task_pid_nr(current));
163                 calltime = ktime_get();
164         }
165
166         return calltime;
167 }
168
169 static void initcall_debug_report(struct device *dev, ktime_t calltime,
170                                   int error)
171 {
172         ktime_t delta, rettime;
173
174         if (initcall_debug) {
175                 rettime = ktime_get();
176                 delta = ktime_sub(rettime, calltime);
177                 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
178                         error, (unsigned long long)ktime_to_ns(delta) >> 10);
179         }
180 }
181
182 /**
183  * dpm_wait - Wait for a PM operation to complete.
184  * @dev: Device to wait for.
185  * @async: If unset, wait only if the device's power.async_suspend flag is set.
186  */
187 static void dpm_wait(struct device *dev, bool async)
188 {
189         if (!dev)
190                 return;
191
192         if (async || (pm_async_enabled && dev->power.async_suspend))
193                 wait_for_completion(&dev->power.completion);
194 }
195
196 static int dpm_wait_fn(struct device *dev, void *async_ptr)
197 {
198         dpm_wait(dev, *((bool *)async_ptr));
199         return 0;
200 }
201
202 static void dpm_wait_for_children(struct device *dev, bool async)
203 {
204        device_for_each_child(dev, &async, dpm_wait_fn);
205 }
206
207 /**
208  * pm_op - Execute the PM operation appropriate for given PM event.
209  * @dev: Device to handle.
210  * @ops: PM operations to choose from.
211  * @state: PM transition of the system being carried out.
212  */
213 static int pm_op(struct device *dev,
214                  const struct dev_pm_ops *ops,
215                  pm_message_t state)
216 {
217         int error = 0;
218         ktime_t calltime;
219
220         calltime = initcall_debug_start(dev);
221
222         switch (state.event) {
223 #ifdef CONFIG_SUSPEND
224         case PM_EVENT_SUSPEND:
225                 if (ops->suspend) {
226                         error = ops->suspend(dev);
227                         suspend_report_result(ops->suspend, error);
228                 }
229                 break;
230         case PM_EVENT_RESUME:
231                 if (ops->resume) {
232                         error = ops->resume(dev);
233                         suspend_report_result(ops->resume, error);
234                 }
235                 break;
236 #endif /* CONFIG_SUSPEND */
237 #ifdef CONFIG_HIBERNATE_CALLBACKS
238         case PM_EVENT_FREEZE:
239         case PM_EVENT_QUIESCE:
240                 if (ops->freeze) {
241                         error = ops->freeze(dev);
242                         suspend_report_result(ops->freeze, error);
243                 }
244                 break;
245         case PM_EVENT_HIBERNATE:
246                 if (ops->poweroff) {
247                         error = ops->poweroff(dev);
248                         suspend_report_result(ops->poweroff, error);
249                 }
250                 break;
251         case PM_EVENT_THAW:
252         case PM_EVENT_RECOVER:
253                 if (ops->thaw) {
254                         error = ops->thaw(dev);
255                         suspend_report_result(ops->thaw, error);
256                 }
257                 break;
258         case PM_EVENT_RESTORE:
259                 if (ops->restore) {
260                         error = ops->restore(dev);
261                         suspend_report_result(ops->restore, error);
262                 }
263                 break;
264 #endif /* CONFIG_HIBERNATE_CALLBACKS */
265         default:
266                 error = -EINVAL;
267         }
268
269         initcall_debug_report(dev, calltime, error);
270
271         return error;
272 }
273
274 /**
275  * pm_noirq_op - Execute the PM operation appropriate for given PM event.
276  * @dev: Device to handle.
277  * @ops: PM operations to choose from.
278  * @state: PM transition of the system being carried out.
279  *
280  * The driver of @dev will not receive interrupts while this function is being
281  * executed.
282  */
283 static int pm_noirq_op(struct device *dev,
284                         const struct dev_pm_ops *ops,
285                         pm_message_t state)
286 {
287         int error = 0;
288         ktime_t calltime = ktime_set(0, 0), delta, rettime;
289
290         if (initcall_debug) {
291                 pr_info("calling  %s+ @ %i, parent: %s\n",
292                                 dev_name(dev), task_pid_nr(current),
293                                 dev->parent ? dev_name(dev->parent) : "none");
294                 calltime = ktime_get();
295         }
296
297         switch (state.event) {
298 #ifdef CONFIG_SUSPEND
299         case PM_EVENT_SUSPEND:
300                 if (ops->suspend_noirq) {
301                         error = ops->suspend_noirq(dev);
302                         suspend_report_result(ops->suspend_noirq, error);
303                 }
304                 break;
305         case PM_EVENT_RESUME:
306                 if (ops->resume_noirq) {
307                         error = ops->resume_noirq(dev);
308                         suspend_report_result(ops->resume_noirq, error);
309                 }
310                 break;
311 #endif /* CONFIG_SUSPEND */
312 #ifdef CONFIG_HIBERNATE_CALLBACKS
313         case PM_EVENT_FREEZE:
314         case PM_EVENT_QUIESCE:
315                 if (ops->freeze_noirq) {
316                         error = ops->freeze_noirq(dev);
317                         suspend_report_result(ops->freeze_noirq, error);
318                 }
319                 break;
320         case PM_EVENT_HIBERNATE:
321                 if (ops->poweroff_noirq) {
322                         error = ops->poweroff_noirq(dev);
323                         suspend_report_result(ops->poweroff_noirq, error);
324                 }
325                 break;
326         case PM_EVENT_THAW:
327         case PM_EVENT_RECOVER:
328                 if (ops->thaw_noirq) {
329                         error = ops->thaw_noirq(dev);
330                         suspend_report_result(ops->thaw_noirq, error);
331                 }
332                 break;
333         case PM_EVENT_RESTORE:
334                 if (ops->restore_noirq) {
335                         error = ops->restore_noirq(dev);
336                         suspend_report_result(ops->restore_noirq, error);
337                 }
338                 break;
339 #endif /* CONFIG_HIBERNATE_CALLBACKS */
340         default:
341                 error = -EINVAL;
342         }
343
344         if (initcall_debug) {
345                 rettime = ktime_get();
346                 delta = ktime_sub(rettime, calltime);
347                 printk("initcall %s_i+ returned %d after %Ld usecs\n",
348                         dev_name(dev), error,
349                         (unsigned long long)ktime_to_ns(delta) >> 10);
350         }
351
352         return error;
353 }
354
355 static char *pm_verb(int event)
356 {
357         switch (event) {
358         case PM_EVENT_SUSPEND:
359                 return "suspend";
360         case PM_EVENT_RESUME:
361                 return "resume";
362         case PM_EVENT_FREEZE:
363                 return "freeze";
364         case PM_EVENT_QUIESCE:
365                 return "quiesce";
366         case PM_EVENT_HIBERNATE:
367                 return "hibernate";
368         case PM_EVENT_THAW:
369                 return "thaw";
370         case PM_EVENT_RESTORE:
371                 return "restore";
372         case PM_EVENT_RECOVER:
373                 return "recover";
374         default:
375                 return "(unknown PM event)";
376         }
377 }
378
379 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
380 {
381         dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
382                 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
383                 ", may wakeup" : "");
384 }
385
386 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
387                         int error)
388 {
389         printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
390                 dev_name(dev), pm_verb(state.event), info, error);
391 }
392
393 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
394 {
395         ktime_t calltime;
396         u64 usecs64;
397         int usecs;
398
399         calltime = ktime_get();
400         usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
401         do_div(usecs64, NSEC_PER_USEC);
402         usecs = usecs64;
403         if (usecs == 0)
404                 usecs = 1;
405         pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
406                 info ?: "", info ? " " : "", pm_verb(state.event),
407                 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
408 }
409
410 /*------------------------- Resume routines -------------------------*/
411
412 /**
413  * device_resume_noirq - Execute an "early resume" callback for given device.
414  * @dev: Device to handle.
415  * @state: PM transition of the system being carried out.
416  *
417  * The driver of @dev will not receive interrupts while this function is being
418  * executed.
419  */
420 static int device_resume_noirq(struct device *dev, pm_message_t state)
421 {
422         int error = 0;
423
424         TRACE_DEVICE(dev);
425         TRACE_RESUME(0);
426
427         if (dev->pwr_domain) {
428                 pm_dev_dbg(dev, state, "EARLY power domain ");
429                 error = pm_noirq_op(dev, &dev->pwr_domain->ops, state);
430         } else if (dev->type && dev->type->pm) {
431                 pm_dev_dbg(dev, state, "EARLY type ");
432                 error = pm_noirq_op(dev, dev->type->pm, state);
433         } else if (dev->class && dev->class->pm) {
434                 pm_dev_dbg(dev, state, "EARLY class ");
435                 error = pm_noirq_op(dev, dev->class->pm, state);
436         } else if (dev->bus && dev->bus->pm) {
437                 pm_dev_dbg(dev, state, "EARLY ");
438                 error = pm_noirq_op(dev, dev->bus->pm, state);
439         }
440
441         TRACE_RESUME(error);
442         return error;
443 }
444
445 /**
446  * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices.
447  * @state: PM transition of the system being carried out.
448  *
449  * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and
450  * enable device drivers to receive interrupts.
451  */
452 void dpm_resume_noirq(pm_message_t state)
453 {
454         ktime_t starttime = ktime_get();
455
456         mutex_lock(&dpm_list_mtx);
457         while (!list_empty(&dpm_noirq_list)) {
458                 struct device *dev = to_device(dpm_noirq_list.next);
459                 int error;
460
461                 get_device(dev);
462                 list_move_tail(&dev->power.entry, &dpm_suspended_list);
463                 mutex_unlock(&dpm_list_mtx);
464
465                 error = device_resume_noirq(dev, state);
466                 if (error)
467                         pm_dev_err(dev, state, " early", error);
468
469                 mutex_lock(&dpm_list_mtx);
470                 put_device(dev);
471         }
472         mutex_unlock(&dpm_list_mtx);
473         dpm_show_time(starttime, state, "early");
474         resume_device_irqs();
475 }
476 EXPORT_SYMBOL_GPL(dpm_resume_noirq);
477
478 /**
479  * legacy_resume - Execute a legacy (bus or class) resume callback for device.
480  * @dev: Device to resume.
481  * @cb: Resume callback to execute.
482  */
483 static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
484 {
485         int error;
486         ktime_t calltime;
487
488         calltime = initcall_debug_start(dev);
489
490         error = cb(dev);
491         suspend_report_result(cb, error);
492
493         initcall_debug_report(dev, calltime, error);
494
495         return error;
496 }
497
498 /**
499  * device_resume - Execute "resume" callbacks for given device.
500  * @dev: Device to handle.
501  * @state: PM transition of the system being carried out.
502  * @async: If true, the device is being resumed asynchronously.
503  */
504 static int device_resume(struct device *dev, pm_message_t state, bool async)
505 {
506         int error = 0;
507
508         TRACE_DEVICE(dev);
509         TRACE_RESUME(0);
510
511         dpm_wait(dev->parent, async);
512         device_lock(dev);
513
514         dev->power.in_suspend = false;
515
516         if (dev->pwr_domain) {
517                 pm_dev_dbg(dev, state, "power domain ");
518                 error = pm_op(dev, &dev->pwr_domain->ops, state);
519                 goto End;
520         }
521
522         if (dev->type && dev->type->pm) {
523                 pm_dev_dbg(dev, state, "type ");
524                 error = pm_op(dev, dev->type->pm, state);
525                 goto End;
526         }
527
528         if (dev->class) {
529                 if (dev->class->pm) {
530                         pm_dev_dbg(dev, state, "class ");
531                         error = pm_op(dev, dev->class->pm, state);
532                         goto End;
533                 } else if (dev->class->resume) {
534                         pm_dev_dbg(dev, state, "legacy class ");
535                         error = legacy_resume(dev, dev->class->resume);
536                         goto End;
537                 }
538         }
539
540         if (dev->bus) {
541                 if (dev->bus->pm) {
542                         pm_dev_dbg(dev, state, "");
543                         error = pm_op(dev, dev->bus->pm, state);
544                 } else if (dev->bus->resume) {
545                         pm_dev_dbg(dev, state, "legacy ");
546                         error = legacy_resume(dev, dev->bus->resume);
547                 }
548         }
549
550  End:
551         device_unlock(dev);
552         complete_all(&dev->power.completion);
553
554         TRACE_RESUME(error);
555         return error;
556 }
557
558 static void async_resume(void *data, async_cookie_t cookie)
559 {
560         struct device *dev = (struct device *)data;
561         int error;
562
563         error = device_resume(dev, pm_transition, true);
564         if (error)
565                 pm_dev_err(dev, pm_transition, " async", error);
566         put_device(dev);
567 }
568
569 static bool is_async(struct device *dev)
570 {
571         return dev->power.async_suspend && pm_async_enabled
572                 && !pm_trace_is_enabled();
573 }
574
575 /**
576  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
577  * @state: PM transition of the system being carried out.
578  *
579  * Execute the appropriate "resume" callback for all devices whose status
580  * indicates that they are suspended.
581  */
582 static void dpm_resume(pm_message_t state)
583 {
584         struct device *dev;
585         ktime_t starttime = ktime_get();
586
587         mutex_lock(&dpm_list_mtx);
588         pm_transition = state;
589         async_error = 0;
590
591         list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
592                 INIT_COMPLETION(dev->power.completion);
593                 if (is_async(dev)) {
594                         get_device(dev);
595                         async_schedule(async_resume, dev);
596                 }
597         }
598
599         while (!list_empty(&dpm_suspended_list)) {
600                 dev = to_device(dpm_suspended_list.next);
601                 get_device(dev);
602                 if (!is_async(dev)) {
603                         int error;
604
605                         mutex_unlock(&dpm_list_mtx);
606
607                         error = device_resume(dev, state, false);
608                         if (error)
609                                 pm_dev_err(dev, state, "", error);
610
611                         mutex_lock(&dpm_list_mtx);
612                 }
613                 if (!list_empty(&dev->power.entry))
614                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
615                 put_device(dev);
616         }
617         mutex_unlock(&dpm_list_mtx);
618         async_synchronize_full();
619         dpm_show_time(starttime, state, NULL);
620 }
621
622 /**
623  * device_complete - Complete a PM transition for given device.
624  * @dev: Device to handle.
625  * @state: PM transition of the system being carried out.
626  */
627 static void device_complete(struct device *dev, pm_message_t state)
628 {
629         device_lock(dev);
630
631         if (dev->pwr_domain) {
632                 pm_dev_dbg(dev, state, "completing power domain ");
633                 if (dev->pwr_domain->ops.complete)
634                         dev->pwr_domain->ops.complete(dev);
635         } else if (dev->type && dev->type->pm) {
636                 pm_dev_dbg(dev, state, "completing type ");
637                 if (dev->type->pm->complete)
638                         dev->type->pm->complete(dev);
639         } else if (dev->class && dev->class->pm) {
640                 pm_dev_dbg(dev, state, "completing class ");
641                 if (dev->class->pm->complete)
642                         dev->class->pm->complete(dev);
643         } else if (dev->bus && dev->bus->pm) {
644                 pm_dev_dbg(dev, state, "completing ");
645                 if (dev->bus->pm->complete)
646                         dev->bus->pm->complete(dev);
647         }
648
649         device_unlock(dev);
650 }
651
652 /**
653  * dpm_complete - Complete a PM transition for all non-sysdev devices.
654  * @state: PM transition of the system being carried out.
655  *
656  * Execute the ->complete() callbacks for all devices whose PM status is not
657  * DPM_ON (this allows new devices to be registered).
658  */
659 static void dpm_complete(pm_message_t state)
660 {
661         struct list_head list;
662
663         INIT_LIST_HEAD(&list);
664         mutex_lock(&dpm_list_mtx);
665         while (!list_empty(&dpm_prepared_list)) {
666                 struct device *dev = to_device(dpm_prepared_list.prev);
667
668                 get_device(dev);
669                 dev->power.in_suspend = false;
670                 list_move(&dev->power.entry, &list);
671                 mutex_unlock(&dpm_list_mtx);
672
673                 device_complete(dev, state);
674
675                 mutex_lock(&dpm_list_mtx);
676                 put_device(dev);
677         }
678         list_splice(&list, &dpm_list);
679         mutex_unlock(&dpm_list_mtx);
680 }
681
682 /**
683  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
684  * @state: PM transition of the system being carried out.
685  *
686  * Execute "resume" callbacks for all devices and complete the PM transition of
687  * the system.
688  */
689 void dpm_resume_end(pm_message_t state)
690 {
691         might_sleep();
692         dpm_resume(state);
693         dpm_complete(state);
694 }
695 EXPORT_SYMBOL_GPL(dpm_resume_end);
696
697
698 /*------------------------- Suspend routines -------------------------*/
699
700 /**
701  * resume_event - Return a "resume" message for given "suspend" sleep state.
702  * @sleep_state: PM message representing a sleep state.
703  *
704  * Return a PM message representing the resume event corresponding to given
705  * sleep state.
706  */
707 static pm_message_t resume_event(pm_message_t sleep_state)
708 {
709         switch (sleep_state.event) {
710         case PM_EVENT_SUSPEND:
711                 return PMSG_RESUME;
712         case PM_EVENT_FREEZE:
713         case PM_EVENT_QUIESCE:
714                 return PMSG_RECOVER;
715         case PM_EVENT_HIBERNATE:
716                 return PMSG_RESTORE;
717         }
718         return PMSG_ON;
719 }
720
721 /**
722  * device_suspend_noirq - Execute a "late suspend" callback for given device.
723  * @dev: Device to handle.
724  * @state: PM transition of the system being carried out.
725  *
726  * The driver of @dev will not receive interrupts while this function is being
727  * executed.
728  */
729 static int device_suspend_noirq(struct device *dev, pm_message_t state)
730 {
731         int error;
732
733         if (dev->pwr_domain) {
734                 pm_dev_dbg(dev, state, "LATE power domain ");
735                 error = pm_noirq_op(dev, &dev->pwr_domain->ops, state);
736                 if (error)
737                         return error;
738         } else if (dev->type && dev->type->pm) {
739                 pm_dev_dbg(dev, state, "LATE type ");
740                 error = pm_noirq_op(dev, dev->type->pm, state);
741                 if (error)
742                         return error;
743         } else if (dev->class && dev->class->pm) {
744                 pm_dev_dbg(dev, state, "LATE class ");
745                 error = pm_noirq_op(dev, dev->class->pm, state);
746                 if (error)
747                         return error;
748         } else if (dev->bus && dev->bus->pm) {
749                 pm_dev_dbg(dev, state, "LATE ");
750                 error = pm_noirq_op(dev, dev->bus->pm, state);
751                 if (error)
752                         return error;
753         }
754
755         return 0;
756 }
757
758 /**
759  * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices.
760  * @state: PM transition of the system being carried out.
761  *
762  * Prevent device drivers from receiving interrupts and call the "noirq" suspend
763  * handlers for all non-sysdev devices.
764  */
765 int dpm_suspend_noirq(pm_message_t state)
766 {
767         ktime_t starttime = ktime_get();
768         int error = 0;
769
770         suspend_device_irqs();
771         mutex_lock(&dpm_list_mtx);
772         while (!list_empty(&dpm_suspended_list)) {
773                 struct device *dev = to_device(dpm_suspended_list.prev);
774
775                 get_device(dev);
776                 mutex_unlock(&dpm_list_mtx);
777
778                 error = device_suspend_noirq(dev, state);
779
780                 mutex_lock(&dpm_list_mtx);
781                 if (error) {
782                         pm_dev_err(dev, state, " late", error);
783                         put_device(dev);
784                         break;
785                 }
786                 if (!list_empty(&dev->power.entry))
787                         list_move(&dev->power.entry, &dpm_noirq_list);
788                 put_device(dev);
789         }
790         mutex_unlock(&dpm_list_mtx);
791         if (error)
792                 dpm_resume_noirq(resume_event(state));
793         else
794                 dpm_show_time(starttime, state, "late");
795         return error;
796 }
797 EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
798
799 /**
800  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
801  * @dev: Device to suspend.
802  * @state: PM transition of the system being carried out.
803  * @cb: Suspend callback to execute.
804  */
805 static int legacy_suspend(struct device *dev, pm_message_t state,
806                           int (*cb)(struct device *dev, pm_message_t state))
807 {
808         int error;
809         ktime_t calltime;
810
811         calltime = initcall_debug_start(dev);
812
813         error = cb(dev, state);
814         suspend_report_result(cb, error);
815
816         initcall_debug_report(dev, calltime, error);
817
818         return error;
819 }
820
821 /**
822  * device_suspend - Execute "suspend" callbacks for given device.
823  * @dev: Device to handle.
824  * @state: PM transition of the system being carried out.
825  * @async: If true, the device is being suspended asynchronously.
826  */
827 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
828 {
829         int error = 0;
830
831         dpm_wait_for_children(dev, async);
832         device_lock(dev);
833
834         if (async_error)
835                 goto End;
836
837         if (pm_wakeup_pending()) {
838                 async_error = -EBUSY;
839                 goto End;
840         }
841
842         if (dev->pwr_domain) {
843                 pm_dev_dbg(dev, state, "power domain ");
844                 error = pm_op(dev, &dev->pwr_domain->ops, state);
845                 goto End;
846         }
847
848         if (dev->type && dev->type->pm) {
849                 pm_dev_dbg(dev, state, "type ");
850                 error = pm_op(dev, dev->type->pm, state);
851                 goto End;
852         }
853
854         if (dev->class) {
855                 if (dev->class->pm) {
856                         pm_dev_dbg(dev, state, "class ");
857                         error = pm_op(dev, dev->class->pm, state);
858                         goto End;
859                 } else if (dev->class->suspend) {
860                         pm_dev_dbg(dev, state, "legacy class ");
861                         error = legacy_suspend(dev, state, dev->class->suspend);
862                         goto End;
863                 }
864         }
865
866         if (dev->bus) {
867                 if (dev->bus->pm) {
868                         pm_dev_dbg(dev, state, "");
869                         error = pm_op(dev, dev->bus->pm, state);
870                 } else if (dev->bus->suspend) {
871                         pm_dev_dbg(dev, state, "legacy ");
872                         error = legacy_suspend(dev, state, dev->bus->suspend);
873                 }
874         }
875
876  End:
877         device_unlock(dev);
878         complete_all(&dev->power.completion);
879
880         if (error)
881                 async_error = error;
882
883         return error;
884 }
885
886 static void async_suspend(void *data, async_cookie_t cookie)
887 {
888         struct device *dev = (struct device *)data;
889         int error;
890
891         error = __device_suspend(dev, pm_transition, true);
892         if (error)
893                 pm_dev_err(dev, pm_transition, " async", error);
894
895         put_device(dev);
896 }
897
898 static int device_suspend(struct device *dev)
899 {
900         INIT_COMPLETION(dev->power.completion);
901
902         if (pm_async_enabled && dev->power.async_suspend) {
903                 get_device(dev);
904                 async_schedule(async_suspend, dev);
905                 return 0;
906         }
907
908         return __device_suspend(dev, pm_transition, false);
909 }
910
911 /**
912  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
913  * @state: PM transition of the system being carried out.
914  */
915 static int dpm_suspend(pm_message_t state)
916 {
917         ktime_t starttime = ktime_get();
918         int error = 0;
919
920         mutex_lock(&dpm_list_mtx);
921         pm_transition = state;
922         async_error = 0;
923         while (!list_empty(&dpm_prepared_list)) {
924                 struct device *dev = to_device(dpm_prepared_list.prev);
925
926                 get_device(dev);
927                 mutex_unlock(&dpm_list_mtx);
928
929                 error = device_suspend(dev);
930
931                 mutex_lock(&dpm_list_mtx);
932                 if (error) {
933                         pm_dev_err(dev, state, "", error);
934                         put_device(dev);
935                         break;
936                 }
937                 if (!list_empty(&dev->power.entry))
938                         list_move(&dev->power.entry, &dpm_suspended_list);
939                 put_device(dev);
940                 if (async_error)
941                         break;
942         }
943         mutex_unlock(&dpm_list_mtx);
944         async_synchronize_full();
945         if (!error)
946                 error = async_error;
947         if (!error)
948                 dpm_show_time(starttime, state, NULL);
949         return error;
950 }
951
952 /**
953  * device_prepare - Prepare a device for system power transition.
954  * @dev: Device to handle.
955  * @state: PM transition of the system being carried out.
956  *
957  * Execute the ->prepare() callback(s) for given device.  No new children of the
958  * device may be registered after this function has returned.
959  */
960 static int device_prepare(struct device *dev, pm_message_t state)
961 {
962         int error = 0;
963
964         device_lock(dev);
965
966         if (dev->pwr_domain) {
967                 pm_dev_dbg(dev, state, "preparing power domain ");
968                 if (dev->pwr_domain->ops.prepare)
969                         error = dev->pwr_domain->ops.prepare(dev);
970                 suspend_report_result(dev->pwr_domain->ops.prepare, error);
971                 if (error)
972                         goto End;
973         } else if (dev->type && dev->type->pm) {
974                 pm_dev_dbg(dev, state, "preparing type ");
975                 if (dev->type->pm->prepare)
976                         error = dev->type->pm->prepare(dev);
977                 suspend_report_result(dev->type->pm->prepare, error);
978                 if (error)
979                         goto End;
980         } else if (dev->class && dev->class->pm) {
981                 pm_dev_dbg(dev, state, "preparing class ");
982                 if (dev->class->pm->prepare)
983                         error = dev->class->pm->prepare(dev);
984                 suspend_report_result(dev->class->pm->prepare, error);
985                 if (error)
986                         goto End;
987         } else if (dev->bus && dev->bus->pm) {
988                 pm_dev_dbg(dev, state, "preparing ");
989                 if (dev->bus->pm->prepare)
990                         error = dev->bus->pm->prepare(dev);
991                 suspend_report_result(dev->bus->pm->prepare, error);
992         }
993
994  End:
995         device_unlock(dev);
996
997         return error;
998 }
999
1000 /**
1001  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1002  * @state: PM transition of the system being carried out.
1003  *
1004  * Execute the ->prepare() callback(s) for all devices.
1005  */
1006 static int dpm_prepare(pm_message_t state)
1007 {
1008         int error = 0;
1009
1010         mutex_lock(&dpm_list_mtx);
1011         while (!list_empty(&dpm_list)) {
1012                 struct device *dev = to_device(dpm_list.next);
1013
1014                 get_device(dev);
1015                 mutex_unlock(&dpm_list_mtx);
1016
1017                 pm_runtime_get_noresume(dev);
1018                 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1019                         pm_wakeup_event(dev, 0);
1020
1021                 pm_runtime_put_sync(dev);
1022                 error = pm_wakeup_pending() ?
1023                                 -EBUSY : device_prepare(dev, state);
1024
1025                 mutex_lock(&dpm_list_mtx);
1026                 if (error) {
1027                         if (error == -EAGAIN) {
1028                                 put_device(dev);
1029                                 error = 0;
1030                                 continue;
1031                         }
1032                         printk(KERN_INFO "PM: Device %s not prepared "
1033                                 "for power transition: code %d\n",
1034                                 dev_name(dev), error);
1035                         put_device(dev);
1036                         break;
1037                 }
1038                 dev->power.in_suspend = true;
1039                 if (!list_empty(&dev->power.entry))
1040                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
1041                 put_device(dev);
1042         }
1043         mutex_unlock(&dpm_list_mtx);
1044         return error;
1045 }
1046
1047 /**
1048  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1049  * @state: PM transition of the system being carried out.
1050  *
1051  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1052  * callbacks for them.
1053  */
1054 int dpm_suspend_start(pm_message_t state)
1055 {
1056         int error;
1057
1058         might_sleep();
1059         error = dpm_prepare(state);
1060         if (!error)
1061                 error = dpm_suspend(state);
1062         return error;
1063 }
1064 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1065
1066 void __suspend_report_result(const char *function, void *fn, int ret)
1067 {
1068         if (ret)
1069                 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1070 }
1071 EXPORT_SYMBOL_GPL(__suspend_report_result);
1072
1073 /**
1074  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1075  * @dev: Device to wait for.
1076  * @subordinate: Device that needs to wait for @dev.
1077  */
1078 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1079 {
1080         dpm_wait(dev, subordinate->power.async_suspend);
1081         return async_error;
1082 }
1083 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);