Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael...
[pandora-kernel.git] / drivers / base / power / main.c
1 /*
2  * drivers/base/power/main.c - Where the driver meets power management.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  *
7  * This file is released under the GPLv2
8  *
9  *
10  * The driver model core calls device_pm_add() when a device is registered.
11  * This will intialize the embedded device_pm_info object in the device
12  * and add it to the list of power-controlled devices. sysfs entries for
13  * controlling device power management will also be added.
14  *
15  * A separate list is used for keeping track of power info, because the power
16  * domain dependencies may differ from the ancestral dependencies that the
17  * subsystem list maintains.
18  */
19
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/mutex.h>
23 #include <linux/pm.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/resume-trace.h>
26 #include <linux/interrupt.h>
27 #include <linux/sched.h>
28 #include <linux/async.h>
29 #include <linux/suspend.h>
30
31 #include "../base.h"
32 #include "power.h"
33
34 /*
35  * The entries in the dpm_list list are in a depth first order, simply
36  * because children are guaranteed to be discovered after parents, and
37  * are inserted at the back of the list on discovery.
38  *
39  * Since device_pm_add() may be called with a device lock held,
40  * we must never try to acquire a device lock while holding
41  * dpm_list_mutex.
42  */
43
44 LIST_HEAD(dpm_list);
45 LIST_HEAD(dpm_prepared_list);
46 LIST_HEAD(dpm_suspended_list);
47 LIST_HEAD(dpm_noirq_list);
48
49 static DEFINE_MUTEX(dpm_list_mtx);
50 static pm_message_t pm_transition;
51
52 static int async_error;
53
54 /**
55  * device_pm_init - Initialize the PM-related part of a device object.
56  * @dev: Device object being initialized.
57  */
58 void device_pm_init(struct device *dev)
59 {
60         dev->power.in_suspend = false;
61         init_completion(&dev->power.completion);
62         complete_all(&dev->power.completion);
63         dev->power.wakeup = NULL;
64         spin_lock_init(&dev->power.lock);
65         pm_runtime_init(dev);
66 }
67
68 /**
69  * device_pm_lock - Lock the list of active devices used by the PM core.
70  */
71 void device_pm_lock(void)
72 {
73         mutex_lock(&dpm_list_mtx);
74 }
75
76 /**
77  * device_pm_unlock - Unlock the list of active devices used by the PM core.
78  */
79 void device_pm_unlock(void)
80 {
81         mutex_unlock(&dpm_list_mtx);
82 }
83
84 /**
85  * device_pm_add - Add a device to the PM core's list of active devices.
86  * @dev: Device to add to the list.
87  */
88 void device_pm_add(struct device *dev)
89 {
90         pr_debug("PM: Adding info for %s:%s\n",
91                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
92         mutex_lock(&dpm_list_mtx);
93         if (dev->parent && dev->parent->power.in_suspend)
94                 dev_warn(dev, "parent %s should not be sleeping\n",
95                         dev_name(dev->parent));
96         list_add_tail(&dev->power.entry, &dpm_list);
97         mutex_unlock(&dpm_list_mtx);
98 }
99
100 /**
101  * device_pm_remove - Remove a device from the PM core's list of active devices.
102  * @dev: Device to be removed from the list.
103  */
104 void device_pm_remove(struct device *dev)
105 {
106         pr_debug("PM: Removing info for %s:%s\n",
107                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
108         complete_all(&dev->power.completion);
109         mutex_lock(&dpm_list_mtx);
110         list_del_init(&dev->power.entry);
111         mutex_unlock(&dpm_list_mtx);
112         device_wakeup_disable(dev);
113         pm_runtime_remove(dev);
114 }
115
116 /**
117  * device_pm_move_before - Move device in the PM core's list of active devices.
118  * @deva: Device to move in dpm_list.
119  * @devb: Device @deva should come before.
120  */
121 void device_pm_move_before(struct device *deva, struct device *devb)
122 {
123         pr_debug("PM: Moving %s:%s before %s:%s\n",
124                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
125                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
126         /* Delete deva from dpm_list and reinsert before devb. */
127         list_move_tail(&deva->power.entry, &devb->power.entry);
128 }
129
130 /**
131  * device_pm_move_after - Move device in the PM core's list of active devices.
132  * @deva: Device to move in dpm_list.
133  * @devb: Device @deva should come after.
134  */
135 void device_pm_move_after(struct device *deva, struct device *devb)
136 {
137         pr_debug("PM: Moving %s:%s after %s:%s\n",
138                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
139                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
140         /* Delete deva from dpm_list and reinsert after devb. */
141         list_move(&deva->power.entry, &devb->power.entry);
142 }
143
144 /**
145  * device_pm_move_last - Move device to end of the PM core's list of devices.
146  * @dev: Device to move in dpm_list.
147  */
148 void device_pm_move_last(struct device *dev)
149 {
150         pr_debug("PM: Moving %s:%s to end of list\n",
151                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
152         list_move_tail(&dev->power.entry, &dpm_list);
153 }
154
155 static ktime_t initcall_debug_start(struct device *dev)
156 {
157         ktime_t calltime = ktime_set(0, 0);
158
159         if (initcall_debug) {
160                 pr_info("calling  %s+ @ %i\n",
161                                 dev_name(dev), task_pid_nr(current));
162                 calltime = ktime_get();
163         }
164
165         return calltime;
166 }
167
168 static void initcall_debug_report(struct device *dev, ktime_t calltime,
169                                   int error)
170 {
171         ktime_t delta, rettime;
172
173         if (initcall_debug) {
174                 rettime = ktime_get();
175                 delta = ktime_sub(rettime, calltime);
176                 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
177                         error, (unsigned long long)ktime_to_ns(delta) >> 10);
178         }
179 }
180
181 /**
182  * dpm_wait - Wait for a PM operation to complete.
183  * @dev: Device to wait for.
184  * @async: If unset, wait only if the device's power.async_suspend flag is set.
185  */
186 static void dpm_wait(struct device *dev, bool async)
187 {
188         if (!dev)
189                 return;
190
191         if (async || (pm_async_enabled && dev->power.async_suspend))
192                 wait_for_completion(&dev->power.completion);
193 }
194
195 static int dpm_wait_fn(struct device *dev, void *async_ptr)
196 {
197         dpm_wait(dev, *((bool *)async_ptr));
198         return 0;
199 }
200
201 static void dpm_wait_for_children(struct device *dev, bool async)
202 {
203        device_for_each_child(dev, &async, dpm_wait_fn);
204 }
205
206 /**
207  * pm_op - Execute the PM operation appropriate for given PM event.
208  * @dev: Device to handle.
209  * @ops: PM operations to choose from.
210  * @state: PM transition of the system being carried out.
211  */
212 static int pm_op(struct device *dev,
213                  const struct dev_pm_ops *ops,
214                  pm_message_t state)
215 {
216         int error = 0;
217         ktime_t calltime;
218
219         calltime = initcall_debug_start(dev);
220
221         switch (state.event) {
222 #ifdef CONFIG_SUSPEND
223         case PM_EVENT_SUSPEND:
224                 if (ops->suspend) {
225                         error = ops->suspend(dev);
226                         suspend_report_result(ops->suspend, error);
227                 }
228                 break;
229         case PM_EVENT_RESUME:
230                 if (ops->resume) {
231                         error = ops->resume(dev);
232                         suspend_report_result(ops->resume, error);
233                 }
234                 break;
235 #endif /* CONFIG_SUSPEND */
236 #ifdef CONFIG_HIBERNATION
237         case PM_EVENT_FREEZE:
238         case PM_EVENT_QUIESCE:
239                 if (ops->freeze) {
240                         error = ops->freeze(dev);
241                         suspend_report_result(ops->freeze, error);
242                 }
243                 break;
244         case PM_EVENT_HIBERNATE:
245                 if (ops->poweroff) {
246                         error = ops->poweroff(dev);
247                         suspend_report_result(ops->poweroff, error);
248                 }
249                 break;
250         case PM_EVENT_THAW:
251         case PM_EVENT_RECOVER:
252                 if (ops->thaw) {
253                         error = ops->thaw(dev);
254                         suspend_report_result(ops->thaw, error);
255                 }
256                 break;
257         case PM_EVENT_RESTORE:
258                 if (ops->restore) {
259                         error = ops->restore(dev);
260                         suspend_report_result(ops->restore, error);
261                 }
262                 break;
263 #endif /* CONFIG_HIBERNATION */
264         default:
265                 error = -EINVAL;
266         }
267
268         initcall_debug_report(dev, calltime, error);
269
270         return error;
271 }
272
273 /**
274  * pm_noirq_op - Execute the PM operation appropriate for given PM event.
275  * @dev: Device to handle.
276  * @ops: PM operations to choose from.
277  * @state: PM transition of the system being carried out.
278  *
279  * The driver of @dev will not receive interrupts while this function is being
280  * executed.
281  */
282 static int pm_noirq_op(struct device *dev,
283                         const struct dev_pm_ops *ops,
284                         pm_message_t state)
285 {
286         int error = 0;
287         ktime_t calltime = ktime_set(0, 0), delta, rettime;
288
289         if (initcall_debug) {
290                 pr_info("calling  %s+ @ %i, parent: %s\n",
291                                 dev_name(dev), task_pid_nr(current),
292                                 dev->parent ? dev_name(dev->parent) : "none");
293                 calltime = ktime_get();
294         }
295
296         switch (state.event) {
297 #ifdef CONFIG_SUSPEND
298         case PM_EVENT_SUSPEND:
299                 if (ops->suspend_noirq) {
300                         error = ops->suspend_noirq(dev);
301                         suspend_report_result(ops->suspend_noirq, error);
302                 }
303                 break;
304         case PM_EVENT_RESUME:
305                 if (ops->resume_noirq) {
306                         error = ops->resume_noirq(dev);
307                         suspend_report_result(ops->resume_noirq, error);
308                 }
309                 break;
310 #endif /* CONFIG_SUSPEND */
311 #ifdef CONFIG_HIBERNATION
312         case PM_EVENT_FREEZE:
313         case PM_EVENT_QUIESCE:
314                 if (ops->freeze_noirq) {
315                         error = ops->freeze_noirq(dev);
316                         suspend_report_result(ops->freeze_noirq, error);
317                 }
318                 break;
319         case PM_EVENT_HIBERNATE:
320                 if (ops->poweroff_noirq) {
321                         error = ops->poweroff_noirq(dev);
322                         suspend_report_result(ops->poweroff_noirq, error);
323                 }
324                 break;
325         case PM_EVENT_THAW:
326         case PM_EVENT_RECOVER:
327                 if (ops->thaw_noirq) {
328                         error = ops->thaw_noirq(dev);
329                         suspend_report_result(ops->thaw_noirq, error);
330                 }
331                 break;
332         case PM_EVENT_RESTORE:
333                 if (ops->restore_noirq) {
334                         error = ops->restore_noirq(dev);
335                         suspend_report_result(ops->restore_noirq, error);
336                 }
337                 break;
338 #endif /* CONFIG_HIBERNATION */
339         default:
340                 error = -EINVAL;
341         }
342
343         if (initcall_debug) {
344                 rettime = ktime_get();
345                 delta = ktime_sub(rettime, calltime);
346                 printk("initcall %s_i+ returned %d after %Ld usecs\n",
347                         dev_name(dev), error,
348                         (unsigned long long)ktime_to_ns(delta) >> 10);
349         }
350
351         return error;
352 }
353
354 static char *pm_verb(int event)
355 {
356         switch (event) {
357         case PM_EVENT_SUSPEND:
358                 return "suspend";
359         case PM_EVENT_RESUME:
360                 return "resume";
361         case PM_EVENT_FREEZE:
362                 return "freeze";
363         case PM_EVENT_QUIESCE:
364                 return "quiesce";
365         case PM_EVENT_HIBERNATE:
366                 return "hibernate";
367         case PM_EVENT_THAW:
368                 return "thaw";
369         case PM_EVENT_RESTORE:
370                 return "restore";
371         case PM_EVENT_RECOVER:
372                 return "recover";
373         default:
374                 return "(unknown PM event)";
375         }
376 }
377
378 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
379 {
380         dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
381                 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
382                 ", may wakeup" : "");
383 }
384
385 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
386                         int error)
387 {
388         printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
389                 dev_name(dev), pm_verb(state.event), info, error);
390 }
391
392 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
393 {
394         ktime_t calltime;
395         u64 usecs64;
396         int usecs;
397
398         calltime = ktime_get();
399         usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
400         do_div(usecs64, NSEC_PER_USEC);
401         usecs = usecs64;
402         if (usecs == 0)
403                 usecs = 1;
404         pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
405                 info ?: "", info ? " " : "", pm_verb(state.event),
406                 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
407 }
408
409 /*------------------------- Resume routines -------------------------*/
410
411 /**
412  * device_resume_noirq - Execute an "early resume" callback for given device.
413  * @dev: Device to handle.
414  * @state: PM transition of the system being carried out.
415  *
416  * The driver of @dev will not receive interrupts while this function is being
417  * executed.
418  */
419 static int device_resume_noirq(struct device *dev, pm_message_t state)
420 {
421         int error = 0;
422
423         TRACE_DEVICE(dev);
424         TRACE_RESUME(0);
425
426         if (dev->bus && dev->bus->pm) {
427                 pm_dev_dbg(dev, state, "EARLY ");
428                 error = pm_noirq_op(dev, dev->bus->pm, state);
429                 if (error)
430                         goto End;
431         }
432
433         if (dev->type && dev->type->pm) {
434                 pm_dev_dbg(dev, state, "EARLY type ");
435                 error = pm_noirq_op(dev, dev->type->pm, state);
436                 if (error)
437                         goto End;
438         }
439
440         if (dev->class && dev->class->pm) {
441                 pm_dev_dbg(dev, state, "EARLY class ");
442                 error = pm_noirq_op(dev, dev->class->pm, state);
443         }
444
445 End:
446         TRACE_RESUME(error);
447         return error;
448 }
449
450 /**
451  * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices.
452  * @state: PM transition of the system being carried out.
453  *
454  * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and
455  * enable device drivers to receive interrupts.
456  */
457 void dpm_resume_noirq(pm_message_t state)
458 {
459         ktime_t starttime = ktime_get();
460
461         mutex_lock(&dpm_list_mtx);
462         while (!list_empty(&dpm_noirq_list)) {
463                 struct device *dev = to_device(dpm_noirq_list.next);
464                 int error;
465
466                 get_device(dev);
467                 list_move_tail(&dev->power.entry, &dpm_suspended_list);
468                 mutex_unlock(&dpm_list_mtx);
469
470                 error = device_resume_noirq(dev, state);
471                 if (error)
472                         pm_dev_err(dev, state, " early", error);
473
474                 mutex_lock(&dpm_list_mtx);
475                 put_device(dev);
476         }
477         mutex_unlock(&dpm_list_mtx);
478         dpm_show_time(starttime, state, "early");
479         resume_device_irqs();
480 }
481 EXPORT_SYMBOL_GPL(dpm_resume_noirq);
482
483 /**
484  * legacy_resume - Execute a legacy (bus or class) resume callback for device.
485  * @dev: Device to resume.
486  * @cb: Resume callback to execute.
487  */
488 static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
489 {
490         int error;
491         ktime_t calltime;
492
493         calltime = initcall_debug_start(dev);
494
495         error = cb(dev);
496         suspend_report_result(cb, error);
497
498         initcall_debug_report(dev, calltime, error);
499
500         return error;
501 }
502
503 /**
504  * device_resume - Execute "resume" callbacks for given device.
505  * @dev: Device to handle.
506  * @state: PM transition of the system being carried out.
507  * @async: If true, the device is being resumed asynchronously.
508  */
509 static int device_resume(struct device *dev, pm_message_t state, bool async)
510 {
511         int error = 0;
512
513         TRACE_DEVICE(dev);
514         TRACE_RESUME(0);
515
516         dpm_wait(dev->parent, async);
517         device_lock(dev);
518
519         dev->power.in_suspend = false;
520
521         if (dev->bus) {
522                 if (dev->bus->pm) {
523                         pm_dev_dbg(dev, state, "");
524                         error = pm_op(dev, dev->bus->pm, state);
525                 } else if (dev->bus->resume) {
526                         pm_dev_dbg(dev, state, "legacy ");
527                         error = legacy_resume(dev, dev->bus->resume);
528                 }
529                 if (error)
530                         goto End;
531         }
532
533         if (dev->type) {
534                 if (dev->type->pm) {
535                         pm_dev_dbg(dev, state, "type ");
536                         error = pm_op(dev, dev->type->pm, state);
537                 }
538                 if (error)
539                         goto End;
540         }
541
542         if (dev->class) {
543                 if (dev->class->pm) {
544                         pm_dev_dbg(dev, state, "class ");
545                         error = pm_op(dev, dev->class->pm, state);
546                 } else if (dev->class->resume) {
547                         pm_dev_dbg(dev, state, "legacy class ");
548                         error = legacy_resume(dev, dev->class->resume);
549                 }
550         }
551  End:
552         device_unlock(dev);
553         complete_all(&dev->power.completion);
554
555         TRACE_RESUME(error);
556         return error;
557 }
558
559 static void async_resume(void *data, async_cookie_t cookie)
560 {
561         struct device *dev = (struct device *)data;
562         int error;
563
564         error = device_resume(dev, pm_transition, true);
565         if (error)
566                 pm_dev_err(dev, pm_transition, " async", error);
567         put_device(dev);
568 }
569
570 static bool is_async(struct device *dev)
571 {
572         return dev->power.async_suspend && pm_async_enabled
573                 && !pm_trace_is_enabled();
574 }
575
576 /**
577  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
578  * @state: PM transition of the system being carried out.
579  *
580  * Execute the appropriate "resume" callback for all devices whose status
581  * indicates that they are suspended.
582  */
583 static void dpm_resume(pm_message_t state)
584 {
585         struct device *dev;
586         ktime_t starttime = ktime_get();
587
588         mutex_lock(&dpm_list_mtx);
589         pm_transition = state;
590         async_error = 0;
591
592         list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
593                 INIT_COMPLETION(dev->power.completion);
594                 if (is_async(dev)) {
595                         get_device(dev);
596                         async_schedule(async_resume, dev);
597                 }
598         }
599
600         while (!list_empty(&dpm_suspended_list)) {
601                 dev = to_device(dpm_suspended_list.next);
602                 get_device(dev);
603                 if (!is_async(dev)) {
604                         int error;
605
606                         mutex_unlock(&dpm_list_mtx);
607
608                         error = device_resume(dev, state, false);
609                         if (error)
610                                 pm_dev_err(dev, state, "", error);
611
612                         mutex_lock(&dpm_list_mtx);
613                 }
614                 if (!list_empty(&dev->power.entry))
615                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
616                 put_device(dev);
617         }
618         mutex_unlock(&dpm_list_mtx);
619         async_synchronize_full();
620         dpm_show_time(starttime, state, NULL);
621 }
622
623 /**
624  * device_complete - Complete a PM transition for given device.
625  * @dev: Device to handle.
626  * @state: PM transition of the system being carried out.
627  */
628 static void device_complete(struct device *dev, pm_message_t state)
629 {
630         device_lock(dev);
631
632         if (dev->class && dev->class->pm && dev->class->pm->complete) {
633                 pm_dev_dbg(dev, state, "completing class ");
634                 dev->class->pm->complete(dev);
635         }
636
637         if (dev->type && dev->type->pm && dev->type->pm->complete) {
638                 pm_dev_dbg(dev, state, "completing type ");
639                 dev->type->pm->complete(dev);
640         }
641
642         if (dev->bus && dev->bus->pm && dev->bus->pm->complete) {
643                 pm_dev_dbg(dev, state, "completing ");
644                 dev->bus->pm->complete(dev);
645         }
646
647         device_unlock(dev);
648 }
649
650 /**
651  * dpm_complete - Complete a PM transition for all non-sysdev devices.
652  * @state: PM transition of the system being carried out.
653  *
654  * Execute the ->complete() callbacks for all devices whose PM status is not
655  * DPM_ON (this allows new devices to be registered).
656  */
657 static void dpm_complete(pm_message_t state)
658 {
659         struct list_head list;
660
661         INIT_LIST_HEAD(&list);
662         mutex_lock(&dpm_list_mtx);
663         while (!list_empty(&dpm_prepared_list)) {
664                 struct device *dev = to_device(dpm_prepared_list.prev);
665
666                 get_device(dev);
667                 dev->power.in_suspend = false;
668                 list_move(&dev->power.entry, &list);
669                 mutex_unlock(&dpm_list_mtx);
670
671                 device_complete(dev, state);
672                 pm_runtime_put_sync(dev);
673
674                 mutex_lock(&dpm_list_mtx);
675                 put_device(dev);
676         }
677         list_splice(&list, &dpm_list);
678         mutex_unlock(&dpm_list_mtx);
679 }
680
681 /**
682  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
683  * @state: PM transition of the system being carried out.
684  *
685  * Execute "resume" callbacks for all devices and complete the PM transition of
686  * the system.
687  */
688 void dpm_resume_end(pm_message_t state)
689 {
690         might_sleep();
691         dpm_resume(state);
692         dpm_complete(state);
693 }
694 EXPORT_SYMBOL_GPL(dpm_resume_end);
695
696
697 /*------------------------- Suspend routines -------------------------*/
698
699 /**
700  * resume_event - Return a "resume" message for given "suspend" sleep state.
701  * @sleep_state: PM message representing a sleep state.
702  *
703  * Return a PM message representing the resume event corresponding to given
704  * sleep state.
705  */
706 static pm_message_t resume_event(pm_message_t sleep_state)
707 {
708         switch (sleep_state.event) {
709         case PM_EVENT_SUSPEND:
710                 return PMSG_RESUME;
711         case PM_EVENT_FREEZE:
712         case PM_EVENT_QUIESCE:
713                 return PMSG_RECOVER;
714         case PM_EVENT_HIBERNATE:
715                 return PMSG_RESTORE;
716         }
717         return PMSG_ON;
718 }
719
720 /**
721  * device_suspend_noirq - Execute a "late suspend" callback for given device.
722  * @dev: Device to handle.
723  * @state: PM transition of the system being carried out.
724  *
725  * The driver of @dev will not receive interrupts while this function is being
726  * executed.
727  */
728 static int device_suspend_noirq(struct device *dev, pm_message_t state)
729 {
730         int error = 0;
731
732         if (dev->class && dev->class->pm) {
733                 pm_dev_dbg(dev, state, "LATE class ");
734                 error = pm_noirq_op(dev, dev->class->pm, state);
735                 if (error)
736                         goto End;
737         }
738
739         if (dev->type && dev->type->pm) {
740                 pm_dev_dbg(dev, state, "LATE type ");
741                 error = pm_noirq_op(dev, dev->type->pm, state);
742                 if (error)
743                         goto End;
744         }
745
746         if (dev->bus && dev->bus->pm) {
747                 pm_dev_dbg(dev, state, "LATE ");
748                 error = pm_noirq_op(dev, dev->bus->pm, state);
749         }
750
751 End:
752         return error;
753 }
754
755 /**
756  * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices.
757  * @state: PM transition of the system being carried out.
758  *
759  * Prevent device drivers from receiving interrupts and call the "noirq" suspend
760  * handlers for all non-sysdev devices.
761  */
762 int dpm_suspend_noirq(pm_message_t state)
763 {
764         ktime_t starttime = ktime_get();
765         int error = 0;
766
767         suspend_device_irqs();
768         mutex_lock(&dpm_list_mtx);
769         while (!list_empty(&dpm_suspended_list)) {
770                 struct device *dev = to_device(dpm_suspended_list.prev);
771
772                 get_device(dev);
773                 mutex_unlock(&dpm_list_mtx);
774
775                 error = device_suspend_noirq(dev, state);
776
777                 mutex_lock(&dpm_list_mtx);
778                 if (error) {
779                         pm_dev_err(dev, state, " late", error);
780                         put_device(dev);
781                         break;
782                 }
783                 if (!list_empty(&dev->power.entry))
784                         list_move(&dev->power.entry, &dpm_noirq_list);
785                 put_device(dev);
786         }
787         mutex_unlock(&dpm_list_mtx);
788         if (error)
789                 dpm_resume_noirq(resume_event(state));
790         else
791                 dpm_show_time(starttime, state, "late");
792         return error;
793 }
794 EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
795
796 /**
797  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
798  * @dev: Device to suspend.
799  * @state: PM transition of the system being carried out.
800  * @cb: Suspend callback to execute.
801  */
802 static int legacy_suspend(struct device *dev, pm_message_t state,
803                           int (*cb)(struct device *dev, pm_message_t state))
804 {
805         int error;
806         ktime_t calltime;
807
808         calltime = initcall_debug_start(dev);
809
810         error = cb(dev, state);
811         suspend_report_result(cb, error);
812
813         initcall_debug_report(dev, calltime, error);
814
815         return error;
816 }
817
818 /**
819  * device_suspend - Execute "suspend" callbacks for given device.
820  * @dev: Device to handle.
821  * @state: PM transition of the system being carried out.
822  * @async: If true, the device is being suspended asynchronously.
823  */
824 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
825 {
826         int error = 0;
827
828         dpm_wait_for_children(dev, async);
829         device_lock(dev);
830
831         if (async_error)
832                 goto End;
833
834         if (pm_wakeup_pending()) {
835                 async_error = -EBUSY;
836                 goto End;
837         }
838
839         if (dev->class) {
840                 if (dev->class->pm) {
841                         pm_dev_dbg(dev, state, "class ");
842                         error = pm_op(dev, dev->class->pm, state);
843                 } else if (dev->class->suspend) {
844                         pm_dev_dbg(dev, state, "legacy class ");
845                         error = legacy_suspend(dev, state, dev->class->suspend);
846                 }
847                 if (error)
848                         goto End;
849         }
850
851         if (dev->type) {
852                 if (dev->type->pm) {
853                         pm_dev_dbg(dev, state, "type ");
854                         error = pm_op(dev, dev->type->pm, state);
855                 }
856                 if (error)
857                         goto End;
858         }
859
860         if (dev->bus) {
861                 if (dev->bus->pm) {
862                         pm_dev_dbg(dev, state, "");
863                         error = pm_op(dev, dev->bus->pm, state);
864                 } else if (dev->bus->suspend) {
865                         pm_dev_dbg(dev, state, "legacy ");
866                         error = legacy_suspend(dev, state, dev->bus->suspend);
867                 }
868         }
869
870  End:
871         device_unlock(dev);
872         complete_all(&dev->power.completion);
873
874         if (error)
875                 async_error = error;
876
877         return error;
878 }
879
880 static void async_suspend(void *data, async_cookie_t cookie)
881 {
882         struct device *dev = (struct device *)data;
883         int error;
884
885         error = __device_suspend(dev, pm_transition, true);
886         if (error)
887                 pm_dev_err(dev, pm_transition, " async", error);
888
889         put_device(dev);
890 }
891
892 static int device_suspend(struct device *dev)
893 {
894         INIT_COMPLETION(dev->power.completion);
895
896         if (pm_async_enabled && dev->power.async_suspend) {
897                 get_device(dev);
898                 async_schedule(async_suspend, dev);
899                 return 0;
900         }
901
902         return __device_suspend(dev, pm_transition, false);
903 }
904
905 /**
906  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
907  * @state: PM transition of the system being carried out.
908  */
909 static int dpm_suspend(pm_message_t state)
910 {
911         ktime_t starttime = ktime_get();
912         int error = 0;
913
914         mutex_lock(&dpm_list_mtx);
915         pm_transition = state;
916         async_error = 0;
917         while (!list_empty(&dpm_prepared_list)) {
918                 struct device *dev = to_device(dpm_prepared_list.prev);
919
920                 get_device(dev);
921                 mutex_unlock(&dpm_list_mtx);
922
923                 error = device_suspend(dev);
924
925                 mutex_lock(&dpm_list_mtx);
926                 if (error) {
927                         pm_dev_err(dev, state, "", error);
928                         put_device(dev);
929                         break;
930                 }
931                 if (!list_empty(&dev->power.entry))
932                         list_move(&dev->power.entry, &dpm_suspended_list);
933                 put_device(dev);
934                 if (async_error)
935                         break;
936         }
937         mutex_unlock(&dpm_list_mtx);
938         async_synchronize_full();
939         if (!error)
940                 error = async_error;
941         if (!error)
942                 dpm_show_time(starttime, state, NULL);
943         return error;
944 }
945
946 /**
947  * device_prepare - Prepare a device for system power transition.
948  * @dev: Device to handle.
949  * @state: PM transition of the system being carried out.
950  *
951  * Execute the ->prepare() callback(s) for given device.  No new children of the
952  * device may be registered after this function has returned.
953  */
954 static int device_prepare(struct device *dev, pm_message_t state)
955 {
956         int error = 0;
957
958         device_lock(dev);
959
960         if (dev->bus && dev->bus->pm && dev->bus->pm->prepare) {
961                 pm_dev_dbg(dev, state, "preparing ");
962                 error = dev->bus->pm->prepare(dev);
963                 suspend_report_result(dev->bus->pm->prepare, error);
964                 if (error)
965                         goto End;
966         }
967
968         if (dev->type && dev->type->pm && dev->type->pm->prepare) {
969                 pm_dev_dbg(dev, state, "preparing type ");
970                 error = dev->type->pm->prepare(dev);
971                 suspend_report_result(dev->type->pm->prepare, error);
972                 if (error)
973                         goto End;
974         }
975
976         if (dev->class && dev->class->pm && dev->class->pm->prepare) {
977                 pm_dev_dbg(dev, state, "preparing class ");
978                 error = dev->class->pm->prepare(dev);
979                 suspend_report_result(dev->class->pm->prepare, error);
980         }
981  End:
982         device_unlock(dev);
983
984         return error;
985 }
986
987 /**
988  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
989  * @state: PM transition of the system being carried out.
990  *
991  * Execute the ->prepare() callback(s) for all devices.
992  */
993 static int dpm_prepare(pm_message_t state)
994 {
995         int error = 0;
996
997         mutex_lock(&dpm_list_mtx);
998         while (!list_empty(&dpm_list)) {
999                 struct device *dev = to_device(dpm_list.next);
1000
1001                 get_device(dev);
1002                 mutex_unlock(&dpm_list_mtx);
1003
1004                 pm_runtime_get_noresume(dev);
1005                 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1006                         pm_wakeup_event(dev, 0);
1007
1008                 if (pm_wakeup_pending()) {
1009                         pm_runtime_put_sync(dev);
1010                         error = -EBUSY;
1011                 } else {
1012                         error = device_prepare(dev, state);
1013                 }
1014
1015                 mutex_lock(&dpm_list_mtx);
1016                 if (error) {
1017                         if (error == -EAGAIN) {
1018                                 put_device(dev);
1019                                 error = 0;
1020                                 continue;
1021                         }
1022                         printk(KERN_INFO "PM: Device %s not prepared "
1023                                 "for power transition: code %d\n",
1024                                 dev_name(dev), error);
1025                         put_device(dev);
1026                         break;
1027                 }
1028                 dev->power.in_suspend = true;
1029                 if (!list_empty(&dev->power.entry))
1030                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
1031                 put_device(dev);
1032         }
1033         mutex_unlock(&dpm_list_mtx);
1034         return error;
1035 }
1036
1037 /**
1038  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1039  * @state: PM transition of the system being carried out.
1040  *
1041  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1042  * callbacks for them.
1043  */
1044 int dpm_suspend_start(pm_message_t state)
1045 {
1046         int error;
1047
1048         might_sleep();
1049         error = dpm_prepare(state);
1050         if (!error)
1051                 error = dpm_suspend(state);
1052         return error;
1053 }
1054 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1055
1056 void __suspend_report_result(const char *function, void *fn, int ret)
1057 {
1058         if (ret)
1059                 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1060 }
1061 EXPORT_SYMBOL_GPL(__suspend_report_result);
1062
1063 /**
1064  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1065  * @dev: Device to wait for.
1066  * @subordinate: Device that needs to wait for @dev.
1067  */
1068 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1069 {
1070         dpm_wait(dev, subordinate->power.async_suspend);
1071         return async_error;
1072 }
1073 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);