]> Git Repo - J-linux.git/blob - drivers/base/power/main.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/livepatchin...
[J-linux.git] / drivers / base / power / main.c
1 /*
2  * drivers/base/power/main.c - Where the driver meets power management.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  *
7  * This file is released under the GPLv2
8  *
9  *
10  * The driver model core calls device_pm_add() when a device is registered.
11  * This will initialize the embedded device_pm_info object in the device
12  * and add it to the list of power-controlled devices. sysfs entries for
13  * controlling device power management will also be added.
14  *
15  * A separate list is used for keeping track of power info, because the power
16  * domain dependencies may differ from the ancestral dependencies that the
17  * subsystem list maintains.
18  */
19
20 #define pr_fmt(fmt) "PM: " fmt
21
22 #include <linux/device.h>
23 #include <linux/export.h>
24 #include <linux/mutex.h>
25 #include <linux/pm.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/pm-trace.h>
28 #include <linux/pm_wakeirq.h>
29 #include <linux/interrupt.h>
30 #include <linux/sched.h>
31 #include <linux/sched/debug.h>
32 #include <linux/async.h>
33 #include <linux/suspend.h>
34 #include <trace/events/power.h>
35 #include <linux/cpufreq.h>
36 #include <linux/cpuidle.h>
37 #include <linux/devfreq.h>
38 #include <linux/timer.h>
39
40 #include "../base.h"
41 #include "power.h"
42
43 typedef int (*pm_callback_t)(struct device *);
44
45 /*
46  * The entries in the dpm_list list are in a depth first order, simply
47  * because children are guaranteed to be discovered after parents, and
48  * are inserted at the back of the list on discovery.
49  *
50  * Since device_pm_add() may be called with a device lock held,
51  * we must never try to acquire a device lock while holding
52  * dpm_list_mutex.
53  */
54
55 LIST_HEAD(dpm_list);
56 static LIST_HEAD(dpm_prepared_list);
57 static LIST_HEAD(dpm_suspended_list);
58 static LIST_HEAD(dpm_late_early_list);
59 static LIST_HEAD(dpm_noirq_list);
60
61 struct suspend_stats suspend_stats;
62 static DEFINE_MUTEX(dpm_list_mtx);
63 static pm_message_t pm_transition;
64
65 static int async_error;
66
67 static const char *pm_verb(int event)
68 {
69         switch (event) {
70         case PM_EVENT_SUSPEND:
71                 return "suspend";
72         case PM_EVENT_RESUME:
73                 return "resume";
74         case PM_EVENT_FREEZE:
75                 return "freeze";
76         case PM_EVENT_QUIESCE:
77                 return "quiesce";
78         case PM_EVENT_HIBERNATE:
79                 return "hibernate";
80         case PM_EVENT_THAW:
81                 return "thaw";
82         case PM_EVENT_RESTORE:
83                 return "restore";
84         case PM_EVENT_RECOVER:
85                 return "recover";
86         default:
87                 return "(unknown PM event)";
88         }
89 }
90
91 /**
92  * device_pm_sleep_init - Initialize system suspend-related device fields.
93  * @dev: Device object being initialized.
94  */
95 void device_pm_sleep_init(struct device *dev)
96 {
97         dev->power.is_prepared = false;
98         dev->power.is_suspended = false;
99         dev->power.is_noirq_suspended = false;
100         dev->power.is_late_suspended = false;
101         init_completion(&dev->power.completion);
102         complete_all(&dev->power.completion);
103         dev->power.wakeup = NULL;
104         INIT_LIST_HEAD(&dev->power.entry);
105 }
106
107 /**
108  * device_pm_lock - Lock the list of active devices used by the PM core.
109  */
110 void device_pm_lock(void)
111 {
112         mutex_lock(&dpm_list_mtx);
113 }
114
115 /**
116  * device_pm_unlock - Unlock the list of active devices used by the PM core.
117  */
118 void device_pm_unlock(void)
119 {
120         mutex_unlock(&dpm_list_mtx);
121 }
122
123 /**
124  * device_pm_add - Add a device to the PM core's list of active devices.
125  * @dev: Device to add to the list.
126  */
127 void device_pm_add(struct device *dev)
128 {
129         /* Skip PM setup/initialization. */
130         if (device_pm_not_required(dev))
131                 return;
132
133         pr_debug("Adding info for %s:%s\n",
134                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
135         device_pm_check_callbacks(dev);
136         mutex_lock(&dpm_list_mtx);
137         if (dev->parent && dev->parent->power.is_prepared)
138                 dev_warn(dev, "parent %s should not be sleeping\n",
139                         dev_name(dev->parent));
140         list_add_tail(&dev->power.entry, &dpm_list);
141         dev->power.in_dpm_list = true;
142         mutex_unlock(&dpm_list_mtx);
143 }
144
145 /**
146  * device_pm_remove - Remove a device from the PM core's list of active devices.
147  * @dev: Device to be removed from the list.
148  */
149 void device_pm_remove(struct device *dev)
150 {
151         if (device_pm_not_required(dev))
152                 return;
153
154         pr_debug("Removing info for %s:%s\n",
155                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
156         complete_all(&dev->power.completion);
157         mutex_lock(&dpm_list_mtx);
158         list_del_init(&dev->power.entry);
159         dev->power.in_dpm_list = false;
160         mutex_unlock(&dpm_list_mtx);
161         device_wakeup_disable(dev);
162         pm_runtime_remove(dev);
163         device_pm_check_callbacks(dev);
164 }
165
166 /**
167  * device_pm_move_before - Move device in the PM core's list of active devices.
168  * @deva: Device to move in dpm_list.
169  * @devb: Device @deva should come before.
170  */
171 void device_pm_move_before(struct device *deva, struct device *devb)
172 {
173         pr_debug("Moving %s:%s before %s:%s\n",
174                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
175                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
176         /* Delete deva from dpm_list and reinsert before devb. */
177         list_move_tail(&deva->power.entry, &devb->power.entry);
178 }
179
180 /**
181  * device_pm_move_after - Move device in the PM core's list of active devices.
182  * @deva: Device to move in dpm_list.
183  * @devb: Device @deva should come after.
184  */
185 void device_pm_move_after(struct device *deva, struct device *devb)
186 {
187         pr_debug("Moving %s:%s after %s:%s\n",
188                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
189                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
190         /* Delete deva from dpm_list and reinsert after devb. */
191         list_move(&deva->power.entry, &devb->power.entry);
192 }
193
194 /**
195  * device_pm_move_last - Move device to end of the PM core's list of devices.
196  * @dev: Device to move in dpm_list.
197  */
198 void device_pm_move_last(struct device *dev)
199 {
200         pr_debug("Moving %s:%s to end of list\n",
201                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
202         list_move_tail(&dev->power.entry, &dpm_list);
203 }
204
205 static ktime_t initcall_debug_start(struct device *dev, void *cb)
206 {
207         if (!pm_print_times_enabled)
208                 return 0;
209
210         dev_info(dev, "calling %pF @ %i, parent: %s\n", cb,
211                  task_pid_nr(current),
212                  dev->parent ? dev_name(dev->parent) : "none");
213         return ktime_get();
214 }
215
216 static void initcall_debug_report(struct device *dev, ktime_t calltime,
217                                   void *cb, int error)
218 {
219         ktime_t rettime;
220         s64 nsecs;
221
222         if (!pm_print_times_enabled)
223                 return;
224
225         rettime = ktime_get();
226         nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
227
228         dev_info(dev, "%pF returned %d after %Ld usecs\n", cb, error,
229                  (unsigned long long)nsecs >> 10);
230 }
231
232 /**
233  * dpm_wait - Wait for a PM operation to complete.
234  * @dev: Device to wait for.
235  * @async: If unset, wait only if the device's power.async_suspend flag is set.
236  */
237 static void dpm_wait(struct device *dev, bool async)
238 {
239         if (!dev)
240                 return;
241
242         if (async || (pm_async_enabled && dev->power.async_suspend))
243                 wait_for_completion(&dev->power.completion);
244 }
245
246 static int dpm_wait_fn(struct device *dev, void *async_ptr)
247 {
248         dpm_wait(dev, *((bool *)async_ptr));
249         return 0;
250 }
251
252 static void dpm_wait_for_children(struct device *dev, bool async)
253 {
254        device_for_each_child(dev, &async, dpm_wait_fn);
255 }
256
257 static void dpm_wait_for_suppliers(struct device *dev, bool async)
258 {
259         struct device_link *link;
260         int idx;
261
262         idx = device_links_read_lock();
263
264         /*
265          * If the supplier goes away right after we've checked the link to it,
266          * we'll wait for its completion to change the state, but that's fine,
267          * because the only things that will block as a result are the SRCU
268          * callbacks freeing the link objects for the links in the list we're
269          * walking.
270          */
271         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
272                 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
273                         dpm_wait(link->supplier, async);
274
275         device_links_read_unlock(idx);
276 }
277
278 static void dpm_wait_for_superior(struct device *dev, bool async)
279 {
280         dpm_wait(dev->parent, async);
281         dpm_wait_for_suppliers(dev, async);
282 }
283
284 static void dpm_wait_for_consumers(struct device *dev, bool async)
285 {
286         struct device_link *link;
287         int idx;
288
289         idx = device_links_read_lock();
290
291         /*
292          * The status of a device link can only be changed from "dormant" by a
293          * probe, but that cannot happen during system suspend/resume.  In
294          * theory it can change to "dormant" at that time, but then it is
295          * reasonable to wait for the target device anyway (eg. if it goes
296          * away, it's better to wait for it to go away completely and then
297          * continue instead of trying to continue in parallel with its
298          * unregistration).
299          */
300         list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
301                 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
302                         dpm_wait(link->consumer, async);
303
304         device_links_read_unlock(idx);
305 }
306
307 static void dpm_wait_for_subordinate(struct device *dev, bool async)
308 {
309         dpm_wait_for_children(dev, async);
310         dpm_wait_for_consumers(dev, async);
311 }
312
313 /**
314  * pm_op - Return the PM operation appropriate for given PM event.
315  * @ops: PM operations to choose from.
316  * @state: PM transition of the system being carried out.
317  */
318 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
319 {
320         switch (state.event) {
321 #ifdef CONFIG_SUSPEND
322         case PM_EVENT_SUSPEND:
323                 return ops->suspend;
324         case PM_EVENT_RESUME:
325                 return ops->resume;
326 #endif /* CONFIG_SUSPEND */
327 #ifdef CONFIG_HIBERNATE_CALLBACKS
328         case PM_EVENT_FREEZE:
329         case PM_EVENT_QUIESCE:
330                 return ops->freeze;
331         case PM_EVENT_HIBERNATE:
332                 return ops->poweroff;
333         case PM_EVENT_THAW:
334         case PM_EVENT_RECOVER:
335                 return ops->thaw;
336                 break;
337         case PM_EVENT_RESTORE:
338                 return ops->restore;
339 #endif /* CONFIG_HIBERNATE_CALLBACKS */
340         }
341
342         return NULL;
343 }
344
345 /**
346  * pm_late_early_op - Return the PM operation appropriate for given PM event.
347  * @ops: PM operations to choose from.
348  * @state: PM transition of the system being carried out.
349  *
350  * Runtime PM is disabled for @dev while this function is being executed.
351  */
352 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
353                                       pm_message_t state)
354 {
355         switch (state.event) {
356 #ifdef CONFIG_SUSPEND
357         case PM_EVENT_SUSPEND:
358                 return ops->suspend_late;
359         case PM_EVENT_RESUME:
360                 return ops->resume_early;
361 #endif /* CONFIG_SUSPEND */
362 #ifdef CONFIG_HIBERNATE_CALLBACKS
363         case PM_EVENT_FREEZE:
364         case PM_EVENT_QUIESCE:
365                 return ops->freeze_late;
366         case PM_EVENT_HIBERNATE:
367                 return ops->poweroff_late;
368         case PM_EVENT_THAW:
369         case PM_EVENT_RECOVER:
370                 return ops->thaw_early;
371         case PM_EVENT_RESTORE:
372                 return ops->restore_early;
373 #endif /* CONFIG_HIBERNATE_CALLBACKS */
374         }
375
376         return NULL;
377 }
378
379 /**
380  * pm_noirq_op - Return the PM operation appropriate for given PM event.
381  * @ops: PM operations to choose from.
382  * @state: PM transition of the system being carried out.
383  *
384  * The driver of @dev will not receive interrupts while this function is being
385  * executed.
386  */
387 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
388 {
389         switch (state.event) {
390 #ifdef CONFIG_SUSPEND
391         case PM_EVENT_SUSPEND:
392                 return ops->suspend_noirq;
393         case PM_EVENT_RESUME:
394                 return ops->resume_noirq;
395 #endif /* CONFIG_SUSPEND */
396 #ifdef CONFIG_HIBERNATE_CALLBACKS
397         case PM_EVENT_FREEZE:
398         case PM_EVENT_QUIESCE:
399                 return ops->freeze_noirq;
400         case PM_EVENT_HIBERNATE:
401                 return ops->poweroff_noirq;
402         case PM_EVENT_THAW:
403         case PM_EVENT_RECOVER:
404                 return ops->thaw_noirq;
405         case PM_EVENT_RESTORE:
406                 return ops->restore_noirq;
407 #endif /* CONFIG_HIBERNATE_CALLBACKS */
408         }
409
410         return NULL;
411 }
412
413 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
414 {
415         dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
416                 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
417                 ", may wakeup" : "");
418 }
419
420 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
421                         int error)
422 {
423         pr_err("Device %s failed to %s%s: error %d\n",
424                dev_name(dev), pm_verb(state.event), info, error);
425 }
426
427 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
428                           const char *info)
429 {
430         ktime_t calltime;
431         u64 usecs64;
432         int usecs;
433
434         calltime = ktime_get();
435         usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
436         do_div(usecs64, NSEC_PER_USEC);
437         usecs = usecs64;
438         if (usecs == 0)
439                 usecs = 1;
440
441         pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
442                   info ?: "", info ? " " : "", pm_verb(state.event),
443                   error ? "aborted" : "complete",
444                   usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
445 }
446
447 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
448                             pm_message_t state, const char *info)
449 {
450         ktime_t calltime;
451         int error;
452
453         if (!cb)
454                 return 0;
455
456         calltime = initcall_debug_start(dev, cb);
457
458         pm_dev_dbg(dev, state, info);
459         trace_device_pm_callback_start(dev, info, state.event);
460         error = cb(dev);
461         trace_device_pm_callback_end(dev, error);
462         suspend_report_result(cb, error);
463
464         initcall_debug_report(dev, calltime, cb, error);
465
466         return error;
467 }
468
469 #ifdef CONFIG_DPM_WATCHDOG
470 struct dpm_watchdog {
471         struct device           *dev;
472         struct task_struct      *tsk;
473         struct timer_list       timer;
474 };
475
476 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
477         struct dpm_watchdog wd
478
479 /**
480  * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
481  * @t: The timer that PM watchdog depends on.
482  *
483  * Called when a driver has timed out suspending or resuming.
484  * There's not much we can do here to recover so panic() to
485  * capture a crash-dump in pstore.
486  */
487 static void dpm_watchdog_handler(struct timer_list *t)
488 {
489         struct dpm_watchdog *wd = from_timer(wd, t, timer);
490
491         dev_emerg(wd->dev, "**** DPM device timeout ****\n");
492         show_stack(wd->tsk, NULL);
493         panic("%s %s: unrecoverable failure\n",
494                 dev_driver_string(wd->dev), dev_name(wd->dev));
495 }
496
497 /**
498  * dpm_watchdog_set - Enable pm watchdog for given device.
499  * @wd: Watchdog. Must be allocated on the stack.
500  * @dev: Device to handle.
501  */
502 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
503 {
504         struct timer_list *timer = &wd->timer;
505
506         wd->dev = dev;
507         wd->tsk = current;
508
509         timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
510         /* use same timeout value for both suspend and resume */
511         timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
512         add_timer(timer);
513 }
514
515 /**
516  * dpm_watchdog_clear - Disable suspend/resume watchdog.
517  * @wd: Watchdog to disable.
518  */
519 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
520 {
521         struct timer_list *timer = &wd->timer;
522
523         del_timer_sync(timer);
524         destroy_timer_on_stack(timer);
525 }
526 #else
527 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
528 #define dpm_watchdog_set(x, y)
529 #define dpm_watchdog_clear(x)
530 #endif
531
532 /*------------------------- Resume routines -------------------------*/
533
534 /**
535  * dev_pm_skip_next_resume_phases - Skip next system resume phases for device.
536  * @dev: Target device.
537  *
538  * Make the core skip the "early resume" and "resume" phases for @dev.
539  *
540  * This function can be called by middle-layer code during the "noirq" phase of
541  * system resume if necessary, but not by device drivers.
542  */
543 void dev_pm_skip_next_resume_phases(struct device *dev)
544 {
545         dev->power.is_late_suspended = false;
546         dev->power.is_suspended = false;
547 }
548
549 /**
550  * suspend_event - Return a "suspend" message for given "resume" one.
551  * @resume_msg: PM message representing a system-wide resume transition.
552  */
553 static pm_message_t suspend_event(pm_message_t resume_msg)
554 {
555         switch (resume_msg.event) {
556         case PM_EVENT_RESUME:
557                 return PMSG_SUSPEND;
558         case PM_EVENT_THAW:
559         case PM_EVENT_RESTORE:
560                 return PMSG_FREEZE;
561         case PM_EVENT_RECOVER:
562                 return PMSG_HIBERNATE;
563         }
564         return PMSG_ON;
565 }
566
567 /**
568  * dev_pm_may_skip_resume - System-wide device resume optimization check.
569  * @dev: Target device.
570  *
571  * Checks whether or not the device may be left in suspend after a system-wide
572  * transition to the working state.
573  */
574 bool dev_pm_may_skip_resume(struct device *dev)
575 {
576         return !dev->power.must_resume && pm_transition.event != PM_EVENT_RESTORE;
577 }
578
579 static pm_callback_t dpm_subsys_resume_noirq_cb(struct device *dev,
580                                                 pm_message_t state,
581                                                 const char **info_p)
582 {
583         pm_callback_t callback;
584         const char *info;
585
586         if (dev->pm_domain) {
587                 info = "noirq power domain ";
588                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
589         } else if (dev->type && dev->type->pm) {
590                 info = "noirq type ";
591                 callback = pm_noirq_op(dev->type->pm, state);
592         } else if (dev->class && dev->class->pm) {
593                 info = "noirq class ";
594                 callback = pm_noirq_op(dev->class->pm, state);
595         } else if (dev->bus && dev->bus->pm) {
596                 info = "noirq bus ";
597                 callback = pm_noirq_op(dev->bus->pm, state);
598         } else {
599                 return NULL;
600         }
601
602         if (info_p)
603                 *info_p = info;
604
605         return callback;
606 }
607
608 static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
609                                                  pm_message_t state,
610                                                  const char **info_p);
611
612 static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
613                                                 pm_message_t state,
614                                                 const char **info_p);
615
616 /**
617  * device_resume_noirq - Execute a "noirq resume" callback for given device.
618  * @dev: Device to handle.
619  * @state: PM transition of the system being carried out.
620  * @async: If true, the device is being resumed asynchronously.
621  *
622  * The driver of @dev will not receive interrupts while this function is being
623  * executed.
624  */
625 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
626 {
627         pm_callback_t callback;
628         const char *info;
629         bool skip_resume;
630         int error = 0;
631
632         TRACE_DEVICE(dev);
633         TRACE_RESUME(0);
634
635         if (dev->power.syscore || dev->power.direct_complete)
636                 goto Out;
637
638         if (!dev->power.is_noirq_suspended)
639                 goto Out;
640
641         dpm_wait_for_superior(dev, async);
642
643         skip_resume = dev_pm_may_skip_resume(dev);
644
645         callback = dpm_subsys_resume_noirq_cb(dev, state, &info);
646         if (callback)
647                 goto Run;
648
649         if (skip_resume)
650                 goto Skip;
651
652         if (dev_pm_smart_suspend_and_suspended(dev)) {
653                 pm_message_t suspend_msg = suspend_event(state);
654
655                 /*
656                  * If "freeze" callbacks have been skipped during a transition
657                  * related to hibernation, the subsequent "thaw" callbacks must
658                  * be skipped too or bad things may happen.  Otherwise, resume
659                  * callbacks are going to be run for the device, so its runtime
660                  * PM status must be changed to reflect the new state after the
661                  * transition under way.
662                  */
663                 if (!dpm_subsys_suspend_late_cb(dev, suspend_msg, NULL) &&
664                     !dpm_subsys_suspend_noirq_cb(dev, suspend_msg, NULL)) {
665                         if (state.event == PM_EVENT_THAW) {
666                                 skip_resume = true;
667                                 goto Skip;
668                         } else {
669                                 pm_runtime_set_active(dev);
670                         }
671                 }
672         }
673
674         if (dev->driver && dev->driver->pm) {
675                 info = "noirq driver ";
676                 callback = pm_noirq_op(dev->driver->pm, state);
677         }
678
679 Run:
680         error = dpm_run_callback(callback, dev, state, info);
681
682 Skip:
683         dev->power.is_noirq_suspended = false;
684
685         if (skip_resume) {
686                 /*
687                  * The device is going to be left in suspend, but it might not
688                  * have been in runtime suspend before the system suspended, so
689                  * its runtime PM status needs to be updated to avoid confusing
690                  * the runtime PM framework when runtime PM is enabled for the
691                  * device again.
692                  */
693                 pm_runtime_set_suspended(dev);
694                 dev_pm_skip_next_resume_phases(dev);
695         }
696
697 Out:
698         complete_all(&dev->power.completion);
699         TRACE_RESUME(error);
700         return error;
701 }
702
703 static bool is_async(struct device *dev)
704 {
705         return dev->power.async_suspend && pm_async_enabled
706                 && !pm_trace_is_enabled();
707 }
708
709 static bool dpm_async_fn(struct device *dev, async_func_t func)
710 {
711         reinit_completion(&dev->power.completion);
712
713         if (is_async(dev)) {
714                 get_device(dev);
715                 async_schedule(func, dev);
716                 return true;
717         }
718
719         return false;
720 }
721
722 static void async_resume_noirq(void *data, async_cookie_t cookie)
723 {
724         struct device *dev = (struct device *)data;
725         int error;
726
727         error = device_resume_noirq(dev, pm_transition, true);
728         if (error)
729                 pm_dev_err(dev, pm_transition, " async", error);
730
731         put_device(dev);
732 }
733
734 void dpm_noirq_resume_devices(pm_message_t state)
735 {
736         struct device *dev;
737         ktime_t starttime = ktime_get();
738
739         trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
740         mutex_lock(&dpm_list_mtx);
741         pm_transition = state;
742
743         /*
744          * Advanced the async threads upfront,
745          * in case the starting of async threads is
746          * delayed by non-async resuming devices.
747          */
748         list_for_each_entry(dev, &dpm_noirq_list, power.entry)
749                 dpm_async_fn(dev, async_resume_noirq);
750
751         while (!list_empty(&dpm_noirq_list)) {
752                 dev = to_device(dpm_noirq_list.next);
753                 get_device(dev);
754                 list_move_tail(&dev->power.entry, &dpm_late_early_list);
755                 mutex_unlock(&dpm_list_mtx);
756
757                 if (!is_async(dev)) {
758                         int error;
759
760                         error = device_resume_noirq(dev, state, false);
761                         if (error) {
762                                 suspend_stats.failed_resume_noirq++;
763                                 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
764                                 dpm_save_failed_dev(dev_name(dev));
765                                 pm_dev_err(dev, state, " noirq", error);
766                         }
767                 }
768
769                 mutex_lock(&dpm_list_mtx);
770                 put_device(dev);
771         }
772         mutex_unlock(&dpm_list_mtx);
773         async_synchronize_full();
774         dpm_show_time(starttime, state, 0, "noirq");
775         trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
776 }
777
778 void dpm_noirq_end(void)
779 {
780         resume_device_irqs();
781         device_wakeup_disarm_wake_irqs();
782         cpuidle_resume();
783 }
784
785 /**
786  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
787  * @state: PM transition of the system being carried out.
788  *
789  * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
790  * allow device drivers' interrupt handlers to be called.
791  */
792 void dpm_resume_noirq(pm_message_t state)
793 {
794         dpm_noirq_resume_devices(state);
795         dpm_noirq_end();
796 }
797
798 static pm_callback_t dpm_subsys_resume_early_cb(struct device *dev,
799                                                 pm_message_t state,
800                                                 const char **info_p)
801 {
802         pm_callback_t callback;
803         const char *info;
804
805         if (dev->pm_domain) {
806                 info = "early power domain ";
807                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
808         } else if (dev->type && dev->type->pm) {
809                 info = "early type ";
810                 callback = pm_late_early_op(dev->type->pm, state);
811         } else if (dev->class && dev->class->pm) {
812                 info = "early class ";
813                 callback = pm_late_early_op(dev->class->pm, state);
814         } else if (dev->bus && dev->bus->pm) {
815                 info = "early bus ";
816                 callback = pm_late_early_op(dev->bus->pm, state);
817         } else {
818                 return NULL;
819         }
820
821         if (info_p)
822                 *info_p = info;
823
824         return callback;
825 }
826
827 /**
828  * device_resume_early - Execute an "early resume" callback for given device.
829  * @dev: Device to handle.
830  * @state: PM transition of the system being carried out.
831  * @async: If true, the device is being resumed asynchronously.
832  *
833  * Runtime PM is disabled for @dev while this function is being executed.
834  */
835 static int device_resume_early(struct device *dev, pm_message_t state, bool async)
836 {
837         pm_callback_t callback;
838         const char *info;
839         int error = 0;
840
841         TRACE_DEVICE(dev);
842         TRACE_RESUME(0);
843
844         if (dev->power.syscore || dev->power.direct_complete)
845                 goto Out;
846
847         if (!dev->power.is_late_suspended)
848                 goto Out;
849
850         dpm_wait_for_superior(dev, async);
851
852         callback = dpm_subsys_resume_early_cb(dev, state, &info);
853
854         if (!callback && dev->driver && dev->driver->pm) {
855                 info = "early driver ";
856                 callback = pm_late_early_op(dev->driver->pm, state);
857         }
858
859         error = dpm_run_callback(callback, dev, state, info);
860         dev->power.is_late_suspended = false;
861
862  Out:
863         TRACE_RESUME(error);
864
865         pm_runtime_enable(dev);
866         complete_all(&dev->power.completion);
867         return error;
868 }
869
870 static void async_resume_early(void *data, async_cookie_t cookie)
871 {
872         struct device *dev = (struct device *)data;
873         int error;
874
875         error = device_resume_early(dev, pm_transition, true);
876         if (error)
877                 pm_dev_err(dev, pm_transition, " async", error);
878
879         put_device(dev);
880 }
881
882 /**
883  * dpm_resume_early - Execute "early resume" callbacks for all devices.
884  * @state: PM transition of the system being carried out.
885  */
886 void dpm_resume_early(pm_message_t state)
887 {
888         struct device *dev;
889         ktime_t starttime = ktime_get();
890
891         trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
892         mutex_lock(&dpm_list_mtx);
893         pm_transition = state;
894
895         /*
896          * Advanced the async threads upfront,
897          * in case the starting of async threads is
898          * delayed by non-async resuming devices.
899          */
900         list_for_each_entry(dev, &dpm_late_early_list, power.entry)
901                 dpm_async_fn(dev, async_resume_early);
902
903         while (!list_empty(&dpm_late_early_list)) {
904                 dev = to_device(dpm_late_early_list.next);
905                 get_device(dev);
906                 list_move_tail(&dev->power.entry, &dpm_suspended_list);
907                 mutex_unlock(&dpm_list_mtx);
908
909                 if (!is_async(dev)) {
910                         int error;
911
912                         error = device_resume_early(dev, state, false);
913                         if (error) {
914                                 suspend_stats.failed_resume_early++;
915                                 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
916                                 dpm_save_failed_dev(dev_name(dev));
917                                 pm_dev_err(dev, state, " early", error);
918                         }
919                 }
920                 mutex_lock(&dpm_list_mtx);
921                 put_device(dev);
922         }
923         mutex_unlock(&dpm_list_mtx);
924         async_synchronize_full();
925         dpm_show_time(starttime, state, 0, "early");
926         trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
927 }
928
929 /**
930  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
931  * @state: PM transition of the system being carried out.
932  */
933 void dpm_resume_start(pm_message_t state)
934 {
935         dpm_resume_noirq(state);
936         dpm_resume_early(state);
937 }
938 EXPORT_SYMBOL_GPL(dpm_resume_start);
939
940 /**
941  * device_resume - Execute "resume" callbacks for given device.
942  * @dev: Device to handle.
943  * @state: PM transition of the system being carried out.
944  * @async: If true, the device is being resumed asynchronously.
945  */
946 static int device_resume(struct device *dev, pm_message_t state, bool async)
947 {
948         pm_callback_t callback = NULL;
949         const char *info = NULL;
950         int error = 0;
951         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
952
953         TRACE_DEVICE(dev);
954         TRACE_RESUME(0);
955
956         if (dev->power.syscore)
957                 goto Complete;
958
959         if (dev->power.direct_complete) {
960                 /* Match the pm_runtime_disable() in __device_suspend(). */
961                 pm_runtime_enable(dev);
962                 goto Complete;
963         }
964
965         dpm_wait_for_superior(dev, async);
966         dpm_watchdog_set(&wd, dev);
967         device_lock(dev);
968
969         /*
970          * This is a fib.  But we'll allow new children to be added below
971          * a resumed device, even if the device hasn't been completed yet.
972          */
973         dev->power.is_prepared = false;
974
975         if (!dev->power.is_suspended)
976                 goto Unlock;
977
978         if (dev->pm_domain) {
979                 info = "power domain ";
980                 callback = pm_op(&dev->pm_domain->ops, state);
981                 goto Driver;
982         }
983
984         if (dev->type && dev->type->pm) {
985                 info = "type ";
986                 callback = pm_op(dev->type->pm, state);
987                 goto Driver;
988         }
989
990         if (dev->class && dev->class->pm) {
991                 info = "class ";
992                 callback = pm_op(dev->class->pm, state);
993                 goto Driver;
994         }
995
996         if (dev->bus) {
997                 if (dev->bus->pm) {
998                         info = "bus ";
999                         callback = pm_op(dev->bus->pm, state);
1000                 } else if (dev->bus->resume) {
1001                         info = "legacy bus ";
1002                         callback = dev->bus->resume;
1003                         goto End;
1004                 }
1005         }
1006
1007  Driver:
1008         if (!callback && dev->driver && dev->driver->pm) {
1009                 info = "driver ";
1010                 callback = pm_op(dev->driver->pm, state);
1011         }
1012
1013  End:
1014         error = dpm_run_callback(callback, dev, state, info);
1015         dev->power.is_suspended = false;
1016
1017  Unlock:
1018         device_unlock(dev);
1019         dpm_watchdog_clear(&wd);
1020
1021  Complete:
1022         complete_all(&dev->power.completion);
1023
1024         TRACE_RESUME(error);
1025
1026         return error;
1027 }
1028
1029 static void async_resume(void *data, async_cookie_t cookie)
1030 {
1031         struct device *dev = (struct device *)data;
1032         int error;
1033
1034         error = device_resume(dev, pm_transition, true);
1035         if (error)
1036                 pm_dev_err(dev, pm_transition, " async", error);
1037         put_device(dev);
1038 }
1039
1040 /**
1041  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
1042  * @state: PM transition of the system being carried out.
1043  *
1044  * Execute the appropriate "resume" callback for all devices whose status
1045  * indicates that they are suspended.
1046  */
1047 void dpm_resume(pm_message_t state)
1048 {
1049         struct device *dev;
1050         ktime_t starttime = ktime_get();
1051
1052         trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1053         might_sleep();
1054
1055         mutex_lock(&dpm_list_mtx);
1056         pm_transition = state;
1057         async_error = 0;
1058
1059         list_for_each_entry(dev, &dpm_suspended_list, power.entry)
1060                 dpm_async_fn(dev, async_resume);
1061
1062         while (!list_empty(&dpm_suspended_list)) {
1063                 dev = to_device(dpm_suspended_list.next);
1064                 get_device(dev);
1065                 if (!is_async(dev)) {
1066                         int error;
1067
1068                         mutex_unlock(&dpm_list_mtx);
1069
1070                         error = device_resume(dev, state, false);
1071                         if (error) {
1072                                 suspend_stats.failed_resume++;
1073                                 dpm_save_failed_step(SUSPEND_RESUME);
1074                                 dpm_save_failed_dev(dev_name(dev));
1075                                 pm_dev_err(dev, state, "", error);
1076                         }
1077
1078                         mutex_lock(&dpm_list_mtx);
1079                 }
1080                 if (!list_empty(&dev->power.entry))
1081                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
1082                 put_device(dev);
1083         }
1084         mutex_unlock(&dpm_list_mtx);
1085         async_synchronize_full();
1086         dpm_show_time(starttime, state, 0, NULL);
1087
1088         cpufreq_resume();
1089         devfreq_resume();
1090         trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1091 }
1092
1093 /**
1094  * device_complete - Complete a PM transition for given device.
1095  * @dev: Device to handle.
1096  * @state: PM transition of the system being carried out.
1097  */
1098 static void device_complete(struct device *dev, pm_message_t state)
1099 {
1100         void (*callback)(struct device *) = NULL;
1101         const char *info = NULL;
1102
1103         if (dev->power.syscore)
1104                 return;
1105
1106         device_lock(dev);
1107
1108         if (dev->pm_domain) {
1109                 info = "completing power domain ";
1110                 callback = dev->pm_domain->ops.complete;
1111         } else if (dev->type && dev->type->pm) {
1112                 info = "completing type ";
1113                 callback = dev->type->pm->complete;
1114         } else if (dev->class && dev->class->pm) {
1115                 info = "completing class ";
1116                 callback = dev->class->pm->complete;
1117         } else if (dev->bus && dev->bus->pm) {
1118                 info = "completing bus ";
1119                 callback = dev->bus->pm->complete;
1120         }
1121
1122         if (!callback && dev->driver && dev->driver->pm) {
1123                 info = "completing driver ";
1124                 callback = dev->driver->pm->complete;
1125         }
1126
1127         if (callback) {
1128                 pm_dev_dbg(dev, state, info);
1129                 callback(dev);
1130         }
1131
1132         device_unlock(dev);
1133
1134         pm_runtime_put(dev);
1135 }
1136
1137 /**
1138  * dpm_complete - Complete a PM transition for all non-sysdev devices.
1139  * @state: PM transition of the system being carried out.
1140  *
1141  * Execute the ->complete() callbacks for all devices whose PM status is not
1142  * DPM_ON (this allows new devices to be registered).
1143  */
1144 void dpm_complete(pm_message_t state)
1145 {
1146         struct list_head list;
1147
1148         trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1149         might_sleep();
1150
1151         INIT_LIST_HEAD(&list);
1152         mutex_lock(&dpm_list_mtx);
1153         while (!list_empty(&dpm_prepared_list)) {
1154                 struct device *dev = to_device(dpm_prepared_list.prev);
1155
1156                 get_device(dev);
1157                 dev->power.is_prepared = false;
1158                 list_move(&dev->power.entry, &list);
1159                 mutex_unlock(&dpm_list_mtx);
1160
1161                 trace_device_pm_callback_start(dev, "", state.event);
1162                 device_complete(dev, state);
1163                 trace_device_pm_callback_end(dev, 0);
1164
1165                 mutex_lock(&dpm_list_mtx);
1166                 put_device(dev);
1167         }
1168         list_splice(&list, &dpm_list);
1169         mutex_unlock(&dpm_list_mtx);
1170
1171         /* Allow device probing and trigger re-probing of deferred devices */
1172         device_unblock_probing();
1173         trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1174 }
1175
1176 /**
1177  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1178  * @state: PM transition of the system being carried out.
1179  *
1180  * Execute "resume" callbacks for all devices and complete the PM transition of
1181  * the system.
1182  */
1183 void dpm_resume_end(pm_message_t state)
1184 {
1185         dpm_resume(state);
1186         dpm_complete(state);
1187 }
1188 EXPORT_SYMBOL_GPL(dpm_resume_end);
1189
1190
1191 /*------------------------- Suspend routines -------------------------*/
1192
1193 /**
1194  * resume_event - Return a "resume" message for given "suspend" sleep state.
1195  * @sleep_state: PM message representing a sleep state.
1196  *
1197  * Return a PM message representing the resume event corresponding to given
1198  * sleep state.
1199  */
1200 static pm_message_t resume_event(pm_message_t sleep_state)
1201 {
1202         switch (sleep_state.event) {
1203         case PM_EVENT_SUSPEND:
1204                 return PMSG_RESUME;
1205         case PM_EVENT_FREEZE:
1206         case PM_EVENT_QUIESCE:
1207                 return PMSG_RECOVER;
1208         case PM_EVENT_HIBERNATE:
1209                 return PMSG_RESTORE;
1210         }
1211         return PMSG_ON;
1212 }
1213
1214 static void dpm_superior_set_must_resume(struct device *dev)
1215 {
1216         struct device_link *link;
1217         int idx;
1218
1219         if (dev->parent)
1220                 dev->parent->power.must_resume = true;
1221
1222         idx = device_links_read_lock();
1223
1224         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1225                 link->supplier->power.must_resume = true;
1226
1227         device_links_read_unlock(idx);
1228 }
1229
1230 static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
1231                                                  pm_message_t state,
1232                                                  const char **info_p)
1233 {
1234         pm_callback_t callback;
1235         const char *info;
1236
1237         if (dev->pm_domain) {
1238                 info = "noirq power domain ";
1239                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1240         } else if (dev->type && dev->type->pm) {
1241                 info = "noirq type ";
1242                 callback = pm_noirq_op(dev->type->pm, state);
1243         } else if (dev->class && dev->class->pm) {
1244                 info = "noirq class ";
1245                 callback = pm_noirq_op(dev->class->pm, state);
1246         } else if (dev->bus && dev->bus->pm) {
1247                 info = "noirq bus ";
1248                 callback = pm_noirq_op(dev->bus->pm, state);
1249         } else {
1250                 return NULL;
1251         }
1252
1253         if (info_p)
1254                 *info_p = info;
1255
1256         return callback;
1257 }
1258
1259 static bool device_must_resume(struct device *dev, pm_message_t state,
1260                                bool no_subsys_suspend_noirq)
1261 {
1262         pm_message_t resume_msg = resume_event(state);
1263
1264         /*
1265          * If all of the device driver's "noirq", "late" and "early" callbacks
1266          * are invoked directly by the core, the decision to allow the device to
1267          * stay in suspend can be based on its current runtime PM status and its
1268          * wakeup settings.
1269          */
1270         if (no_subsys_suspend_noirq &&
1271             !dpm_subsys_suspend_late_cb(dev, state, NULL) &&
1272             !dpm_subsys_resume_early_cb(dev, resume_msg, NULL) &&
1273             !dpm_subsys_resume_noirq_cb(dev, resume_msg, NULL))
1274                 return !pm_runtime_status_suspended(dev) &&
1275                         (resume_msg.event != PM_EVENT_RESUME ||
1276                          (device_can_wakeup(dev) && !device_may_wakeup(dev)));
1277
1278         /*
1279          * The only safe strategy here is to require that if the device may not
1280          * be left in suspend, resume callbacks must be invoked for it.
1281          */
1282         return !dev->power.may_skip_resume;
1283 }
1284
1285 /**
1286  * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1287  * @dev: Device to handle.
1288  * @state: PM transition of the system being carried out.
1289  * @async: If true, the device is being suspended asynchronously.
1290  *
1291  * The driver of @dev will not receive interrupts while this function is being
1292  * executed.
1293  */
1294 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1295 {
1296         pm_callback_t callback;
1297         const char *info;
1298         bool no_subsys_cb = false;
1299         int error = 0;
1300
1301         TRACE_DEVICE(dev);
1302         TRACE_SUSPEND(0);
1303
1304         dpm_wait_for_subordinate(dev, async);
1305
1306         if (async_error)
1307                 goto Complete;
1308
1309         if (pm_wakeup_pending()) {
1310                 async_error = -EBUSY;
1311                 goto Complete;
1312         }
1313
1314         if (dev->power.syscore || dev->power.direct_complete)
1315                 goto Complete;
1316
1317         callback = dpm_subsys_suspend_noirq_cb(dev, state, &info);
1318         if (callback)
1319                 goto Run;
1320
1321         no_subsys_cb = !dpm_subsys_suspend_late_cb(dev, state, NULL);
1322
1323         if (dev_pm_smart_suspend_and_suspended(dev) && no_subsys_cb)
1324                 goto Skip;
1325
1326         if (dev->driver && dev->driver->pm) {
1327                 info = "noirq driver ";
1328                 callback = pm_noirq_op(dev->driver->pm, state);
1329         }
1330
1331 Run:
1332         error = dpm_run_callback(callback, dev, state, info);
1333         if (error) {
1334                 async_error = error;
1335                 goto Complete;
1336         }
1337
1338 Skip:
1339         dev->power.is_noirq_suspended = true;
1340
1341         if (dev_pm_test_driver_flags(dev, DPM_FLAG_LEAVE_SUSPENDED)) {
1342                 dev->power.must_resume = dev->power.must_resume ||
1343                                 atomic_read(&dev->power.usage_count) > 1 ||
1344                                 device_must_resume(dev, state, no_subsys_cb);
1345         } else {
1346                 dev->power.must_resume = true;
1347         }
1348
1349         if (dev->power.must_resume)
1350                 dpm_superior_set_must_resume(dev);
1351
1352 Complete:
1353         complete_all(&dev->power.completion);
1354         TRACE_SUSPEND(error);
1355         return error;
1356 }
1357
1358 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1359 {
1360         struct device *dev = (struct device *)data;
1361         int error;
1362
1363         error = __device_suspend_noirq(dev, pm_transition, true);
1364         if (error) {
1365                 dpm_save_failed_dev(dev_name(dev));
1366                 pm_dev_err(dev, pm_transition, " async", error);
1367         }
1368
1369         put_device(dev);
1370 }
1371
1372 static int device_suspend_noirq(struct device *dev)
1373 {
1374         if (dpm_async_fn(dev, async_suspend_noirq))
1375                 return 0;
1376
1377         return __device_suspend_noirq(dev, pm_transition, false);
1378 }
1379
1380 void dpm_noirq_begin(void)
1381 {
1382         cpuidle_pause();
1383         device_wakeup_arm_wake_irqs();
1384         suspend_device_irqs();
1385 }
1386
1387 int dpm_noirq_suspend_devices(pm_message_t state)
1388 {
1389         ktime_t starttime = ktime_get();
1390         int error = 0;
1391
1392         trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1393         mutex_lock(&dpm_list_mtx);
1394         pm_transition = state;
1395         async_error = 0;
1396
1397         while (!list_empty(&dpm_late_early_list)) {
1398                 struct device *dev = to_device(dpm_late_early_list.prev);
1399
1400                 get_device(dev);
1401                 mutex_unlock(&dpm_list_mtx);
1402
1403                 error = device_suspend_noirq(dev);
1404
1405                 mutex_lock(&dpm_list_mtx);
1406                 if (error) {
1407                         pm_dev_err(dev, state, " noirq", error);
1408                         dpm_save_failed_dev(dev_name(dev));
1409                         put_device(dev);
1410                         break;
1411                 }
1412                 if (!list_empty(&dev->power.entry))
1413                         list_move(&dev->power.entry, &dpm_noirq_list);
1414                 put_device(dev);
1415
1416                 if (async_error)
1417                         break;
1418         }
1419         mutex_unlock(&dpm_list_mtx);
1420         async_synchronize_full();
1421         if (!error)
1422                 error = async_error;
1423
1424         if (error) {
1425                 suspend_stats.failed_suspend_noirq++;
1426                 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1427         }
1428         dpm_show_time(starttime, state, error, "noirq");
1429         trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1430         return error;
1431 }
1432
1433 /**
1434  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1435  * @state: PM transition of the system being carried out.
1436  *
1437  * Prevent device drivers' interrupt handlers from being called and invoke
1438  * "noirq" suspend callbacks for all non-sysdev devices.
1439  */
1440 int dpm_suspend_noirq(pm_message_t state)
1441 {
1442         int ret;
1443
1444         dpm_noirq_begin();
1445         ret = dpm_noirq_suspend_devices(state);
1446         if (ret)
1447                 dpm_resume_noirq(resume_event(state));
1448
1449         return ret;
1450 }
1451
1452 static void dpm_propagate_wakeup_to_parent(struct device *dev)
1453 {
1454         struct device *parent = dev->parent;
1455
1456         if (!parent)
1457                 return;
1458
1459         spin_lock_irq(&parent->power.lock);
1460
1461         if (dev->power.wakeup_path && !parent->power.ignore_children)
1462                 parent->power.wakeup_path = true;
1463
1464         spin_unlock_irq(&parent->power.lock);
1465 }
1466
1467 static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
1468                                                 pm_message_t state,
1469                                                 const char **info_p)
1470 {
1471         pm_callback_t callback;
1472         const char *info;
1473
1474         if (dev->pm_domain) {
1475                 info = "late power domain ";
1476                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1477         } else if (dev->type && dev->type->pm) {
1478                 info = "late type ";
1479                 callback = pm_late_early_op(dev->type->pm, state);
1480         } else if (dev->class && dev->class->pm) {
1481                 info = "late class ";
1482                 callback = pm_late_early_op(dev->class->pm, state);
1483         } else if (dev->bus && dev->bus->pm) {
1484                 info = "late bus ";
1485                 callback = pm_late_early_op(dev->bus->pm, state);
1486         } else {
1487                 return NULL;
1488         }
1489
1490         if (info_p)
1491                 *info_p = info;
1492
1493         return callback;
1494 }
1495
1496 /**
1497  * __device_suspend_late - Execute a "late suspend" callback for given device.
1498  * @dev: Device to handle.
1499  * @state: PM transition of the system being carried out.
1500  * @async: If true, the device is being suspended asynchronously.
1501  *
1502  * Runtime PM is disabled for @dev while this function is being executed.
1503  */
1504 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1505 {
1506         pm_callback_t callback;
1507         const char *info;
1508         int error = 0;
1509
1510         TRACE_DEVICE(dev);
1511         TRACE_SUSPEND(0);
1512
1513         __pm_runtime_disable(dev, false);
1514
1515         dpm_wait_for_subordinate(dev, async);
1516
1517         if (async_error)
1518                 goto Complete;
1519
1520         if (pm_wakeup_pending()) {
1521                 async_error = -EBUSY;
1522                 goto Complete;
1523         }
1524
1525         if (dev->power.syscore || dev->power.direct_complete)
1526                 goto Complete;
1527
1528         callback = dpm_subsys_suspend_late_cb(dev, state, &info);
1529         if (callback)
1530                 goto Run;
1531
1532         if (dev_pm_smart_suspend_and_suspended(dev) &&
1533             !dpm_subsys_suspend_noirq_cb(dev, state, NULL))
1534                 goto Skip;
1535
1536         if (dev->driver && dev->driver->pm) {
1537                 info = "late driver ";
1538                 callback = pm_late_early_op(dev->driver->pm, state);
1539         }
1540
1541 Run:
1542         error = dpm_run_callback(callback, dev, state, info);
1543         if (error) {
1544                 async_error = error;
1545                 goto Complete;
1546         }
1547         dpm_propagate_wakeup_to_parent(dev);
1548
1549 Skip:
1550         dev->power.is_late_suspended = true;
1551
1552 Complete:
1553         TRACE_SUSPEND(error);
1554         complete_all(&dev->power.completion);
1555         return error;
1556 }
1557
1558 static void async_suspend_late(void *data, async_cookie_t cookie)
1559 {
1560         struct device *dev = (struct device *)data;
1561         int error;
1562
1563         error = __device_suspend_late(dev, pm_transition, true);
1564         if (error) {
1565                 dpm_save_failed_dev(dev_name(dev));
1566                 pm_dev_err(dev, pm_transition, " async", error);
1567         }
1568         put_device(dev);
1569 }
1570
1571 static int device_suspend_late(struct device *dev)
1572 {
1573         if (dpm_async_fn(dev, async_suspend_late))
1574                 return 0;
1575
1576         return __device_suspend_late(dev, pm_transition, false);
1577 }
1578
1579 /**
1580  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1581  * @state: PM transition of the system being carried out.
1582  */
1583 int dpm_suspend_late(pm_message_t state)
1584 {
1585         ktime_t starttime = ktime_get();
1586         int error = 0;
1587
1588         trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1589         mutex_lock(&dpm_list_mtx);
1590         pm_transition = state;
1591         async_error = 0;
1592
1593         while (!list_empty(&dpm_suspended_list)) {
1594                 struct device *dev = to_device(dpm_suspended_list.prev);
1595
1596                 get_device(dev);
1597                 mutex_unlock(&dpm_list_mtx);
1598
1599                 error = device_suspend_late(dev);
1600
1601                 mutex_lock(&dpm_list_mtx);
1602                 if (!list_empty(&dev->power.entry))
1603                         list_move(&dev->power.entry, &dpm_late_early_list);
1604
1605                 if (error) {
1606                         pm_dev_err(dev, state, " late", error);
1607                         dpm_save_failed_dev(dev_name(dev));
1608                         put_device(dev);
1609                         break;
1610                 }
1611                 put_device(dev);
1612
1613                 if (async_error)
1614                         break;
1615         }
1616         mutex_unlock(&dpm_list_mtx);
1617         async_synchronize_full();
1618         if (!error)
1619                 error = async_error;
1620         if (error) {
1621                 suspend_stats.failed_suspend_late++;
1622                 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1623                 dpm_resume_early(resume_event(state));
1624         }
1625         dpm_show_time(starttime, state, error, "late");
1626         trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1627         return error;
1628 }
1629
1630 /**
1631  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1632  * @state: PM transition of the system being carried out.
1633  */
1634 int dpm_suspend_end(pm_message_t state)
1635 {
1636         int error = dpm_suspend_late(state);
1637         if (error)
1638                 return error;
1639
1640         error = dpm_suspend_noirq(state);
1641         if (error) {
1642                 dpm_resume_early(resume_event(state));
1643                 return error;
1644         }
1645
1646         return 0;
1647 }
1648 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1649
1650 /**
1651  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1652  * @dev: Device to suspend.
1653  * @state: PM transition of the system being carried out.
1654  * @cb: Suspend callback to execute.
1655  * @info: string description of caller.
1656  */
1657 static int legacy_suspend(struct device *dev, pm_message_t state,
1658                           int (*cb)(struct device *dev, pm_message_t state),
1659                           const char *info)
1660 {
1661         int error;
1662         ktime_t calltime;
1663
1664         calltime = initcall_debug_start(dev, cb);
1665
1666         trace_device_pm_callback_start(dev, info, state.event);
1667         error = cb(dev, state);
1668         trace_device_pm_callback_end(dev, error);
1669         suspend_report_result(cb, error);
1670
1671         initcall_debug_report(dev, calltime, cb, error);
1672
1673         return error;
1674 }
1675
1676 static void dpm_clear_superiors_direct_complete(struct device *dev)
1677 {
1678         struct device_link *link;
1679         int idx;
1680
1681         if (dev->parent) {
1682                 spin_lock_irq(&dev->parent->power.lock);
1683                 dev->parent->power.direct_complete = false;
1684                 spin_unlock_irq(&dev->parent->power.lock);
1685         }
1686
1687         idx = device_links_read_lock();
1688
1689         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
1690                 spin_lock_irq(&link->supplier->power.lock);
1691                 link->supplier->power.direct_complete = false;
1692                 spin_unlock_irq(&link->supplier->power.lock);
1693         }
1694
1695         device_links_read_unlock(idx);
1696 }
1697
1698 /**
1699  * __device_suspend - Execute "suspend" callbacks for given device.
1700  * @dev: Device to handle.
1701  * @state: PM transition of the system being carried out.
1702  * @async: If true, the device is being suspended asynchronously.
1703  */
1704 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1705 {
1706         pm_callback_t callback = NULL;
1707         const char *info = NULL;
1708         int error = 0;
1709         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1710
1711         TRACE_DEVICE(dev);
1712         TRACE_SUSPEND(0);
1713
1714         dpm_wait_for_subordinate(dev, async);
1715
1716         if (async_error) {
1717                 dev->power.direct_complete = false;
1718                 goto Complete;
1719         }
1720
1721         /*
1722          * If a device configured to wake up the system from sleep states
1723          * has been suspended at run time and there's a resume request pending
1724          * for it, this is equivalent to the device signaling wakeup, so the
1725          * system suspend operation should be aborted.
1726          */
1727         if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1728                 pm_wakeup_event(dev, 0);
1729
1730         if (pm_wakeup_pending()) {
1731                 dev->power.direct_complete = false;
1732                 async_error = -EBUSY;
1733                 goto Complete;
1734         }
1735
1736         if (dev->power.syscore)
1737                 goto Complete;
1738
1739         /* Avoid direct_complete to let wakeup_path propagate. */
1740         if (device_may_wakeup(dev) || dev->power.wakeup_path)
1741                 dev->power.direct_complete = false;
1742
1743         if (dev->power.direct_complete) {
1744                 if (pm_runtime_status_suspended(dev)) {
1745                         pm_runtime_disable(dev);
1746                         if (pm_runtime_status_suspended(dev)) {
1747                                 pm_dev_dbg(dev, state, "direct-complete ");
1748                                 goto Complete;
1749                         }
1750
1751                         pm_runtime_enable(dev);
1752                 }
1753                 dev->power.direct_complete = false;
1754         }
1755
1756         dev->power.may_skip_resume = false;
1757         dev->power.must_resume = false;
1758
1759         dpm_watchdog_set(&wd, dev);
1760         device_lock(dev);
1761
1762         if (dev->pm_domain) {
1763                 info = "power domain ";
1764                 callback = pm_op(&dev->pm_domain->ops, state);
1765                 goto Run;
1766         }
1767
1768         if (dev->type && dev->type->pm) {
1769                 info = "type ";
1770                 callback = pm_op(dev->type->pm, state);
1771                 goto Run;
1772         }
1773
1774         if (dev->class && dev->class->pm) {
1775                 info = "class ";
1776                 callback = pm_op(dev->class->pm, state);
1777                 goto Run;
1778         }
1779
1780         if (dev->bus) {
1781                 if (dev->bus->pm) {
1782                         info = "bus ";
1783                         callback = pm_op(dev->bus->pm, state);
1784                 } else if (dev->bus->suspend) {
1785                         pm_dev_dbg(dev, state, "legacy bus ");
1786                         error = legacy_suspend(dev, state, dev->bus->suspend,
1787                                                 "legacy bus ");
1788                         goto End;
1789                 }
1790         }
1791
1792  Run:
1793         if (!callback && dev->driver && dev->driver->pm) {
1794                 info = "driver ";
1795                 callback = pm_op(dev->driver->pm, state);
1796         }
1797
1798         error = dpm_run_callback(callback, dev, state, info);
1799
1800  End:
1801         if (!error) {
1802                 dev->power.is_suspended = true;
1803                 if (device_may_wakeup(dev))
1804                         dev->power.wakeup_path = true;
1805
1806                 dpm_propagate_wakeup_to_parent(dev);
1807                 dpm_clear_superiors_direct_complete(dev);
1808         }
1809
1810         device_unlock(dev);
1811         dpm_watchdog_clear(&wd);
1812
1813  Complete:
1814         if (error)
1815                 async_error = error;
1816
1817         complete_all(&dev->power.completion);
1818         TRACE_SUSPEND(error);
1819         return error;
1820 }
1821
1822 static void async_suspend(void *data, async_cookie_t cookie)
1823 {
1824         struct device *dev = (struct device *)data;
1825         int error;
1826
1827         error = __device_suspend(dev, pm_transition, true);
1828         if (error) {
1829                 dpm_save_failed_dev(dev_name(dev));
1830                 pm_dev_err(dev, pm_transition, " async", error);
1831         }
1832
1833         put_device(dev);
1834 }
1835
1836 static int device_suspend(struct device *dev)
1837 {
1838         if (dpm_async_fn(dev, async_suspend))
1839                 return 0;
1840
1841         return __device_suspend(dev, pm_transition, false);
1842 }
1843
1844 /**
1845  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1846  * @state: PM transition of the system being carried out.
1847  */
1848 int dpm_suspend(pm_message_t state)
1849 {
1850         ktime_t starttime = ktime_get();
1851         int error = 0;
1852
1853         trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1854         might_sleep();
1855
1856         devfreq_suspend();
1857         cpufreq_suspend();
1858
1859         mutex_lock(&dpm_list_mtx);
1860         pm_transition = state;
1861         async_error = 0;
1862         while (!list_empty(&dpm_prepared_list)) {
1863                 struct device *dev = to_device(dpm_prepared_list.prev);
1864
1865                 get_device(dev);
1866                 mutex_unlock(&dpm_list_mtx);
1867
1868                 error = device_suspend(dev);
1869
1870                 mutex_lock(&dpm_list_mtx);
1871                 if (error) {
1872                         pm_dev_err(dev, state, "", error);
1873                         dpm_save_failed_dev(dev_name(dev));
1874                         put_device(dev);
1875                         break;
1876                 }
1877                 if (!list_empty(&dev->power.entry))
1878                         list_move(&dev->power.entry, &dpm_suspended_list);
1879                 put_device(dev);
1880                 if (async_error)
1881                         break;
1882         }
1883         mutex_unlock(&dpm_list_mtx);
1884         async_synchronize_full();
1885         if (!error)
1886                 error = async_error;
1887         if (error) {
1888                 suspend_stats.failed_suspend++;
1889                 dpm_save_failed_step(SUSPEND_SUSPEND);
1890         }
1891         dpm_show_time(starttime, state, error, NULL);
1892         trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1893         return error;
1894 }
1895
1896 /**
1897  * device_prepare - Prepare a device for system power transition.
1898  * @dev: Device to handle.
1899  * @state: PM transition of the system being carried out.
1900  *
1901  * Execute the ->prepare() callback(s) for given device.  No new children of the
1902  * device may be registered after this function has returned.
1903  */
1904 static int device_prepare(struct device *dev, pm_message_t state)
1905 {
1906         int (*callback)(struct device *) = NULL;
1907         int ret = 0;
1908
1909         if (dev->power.syscore)
1910                 return 0;
1911
1912         WARN_ON(!pm_runtime_enabled(dev) &&
1913                 dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND |
1914                                               DPM_FLAG_LEAVE_SUSPENDED));
1915
1916         /*
1917          * If a device's parent goes into runtime suspend at the wrong time,
1918          * it won't be possible to resume the device.  To prevent this we
1919          * block runtime suspend here, during the prepare phase, and allow
1920          * it again during the complete phase.
1921          */
1922         pm_runtime_get_noresume(dev);
1923
1924         device_lock(dev);
1925
1926         dev->power.wakeup_path = false;
1927
1928         if (dev->power.no_pm_callbacks)
1929                 goto unlock;
1930
1931         if (dev->pm_domain)
1932                 callback = dev->pm_domain->ops.prepare;
1933         else if (dev->type && dev->type->pm)
1934                 callback = dev->type->pm->prepare;
1935         else if (dev->class && dev->class->pm)
1936                 callback = dev->class->pm->prepare;
1937         else if (dev->bus && dev->bus->pm)
1938                 callback = dev->bus->pm->prepare;
1939
1940         if (!callback && dev->driver && dev->driver->pm)
1941                 callback = dev->driver->pm->prepare;
1942
1943         if (callback)
1944                 ret = callback(dev);
1945
1946 unlock:
1947         device_unlock(dev);
1948
1949         if (ret < 0) {
1950                 suspend_report_result(callback, ret);
1951                 pm_runtime_put(dev);
1952                 return ret;
1953         }
1954         /*
1955          * A positive return value from ->prepare() means "this device appears
1956          * to be runtime-suspended and its state is fine, so if it really is
1957          * runtime-suspended, you can leave it in that state provided that you
1958          * will do the same thing with all of its descendants".  This only
1959          * applies to suspend transitions, however.
1960          */
1961         spin_lock_irq(&dev->power.lock);
1962         dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1963                 ((pm_runtime_suspended(dev) && ret > 0) ||
1964                  dev->power.no_pm_callbacks) &&
1965                 !dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP);
1966         spin_unlock_irq(&dev->power.lock);
1967         return 0;
1968 }
1969
1970 /**
1971  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1972  * @state: PM transition of the system being carried out.
1973  *
1974  * Execute the ->prepare() callback(s) for all devices.
1975  */
1976 int dpm_prepare(pm_message_t state)
1977 {
1978         int error = 0;
1979
1980         trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1981         might_sleep();
1982
1983         /*
1984          * Give a chance for the known devices to complete their probes, before
1985          * disable probing of devices. This sync point is important at least
1986          * at boot time + hibernation restore.
1987          */
1988         wait_for_device_probe();
1989         /*
1990          * It is unsafe if probing of devices will happen during suspend or
1991          * hibernation and system behavior will be unpredictable in this case.
1992          * So, let's prohibit device's probing here and defer their probes
1993          * instead. The normal behavior will be restored in dpm_complete().
1994          */
1995         device_block_probing();
1996
1997         mutex_lock(&dpm_list_mtx);
1998         while (!list_empty(&dpm_list)) {
1999                 struct device *dev = to_device(dpm_list.next);
2000
2001                 get_device(dev);
2002                 mutex_unlock(&dpm_list_mtx);
2003
2004                 trace_device_pm_callback_start(dev, "", state.event);
2005                 error = device_prepare(dev, state);
2006                 trace_device_pm_callback_end(dev, error);
2007
2008                 mutex_lock(&dpm_list_mtx);
2009                 if (error) {
2010                         if (error == -EAGAIN) {
2011                                 put_device(dev);
2012                                 error = 0;
2013                                 continue;
2014                         }
2015                         pr_info("Device %s not prepared for power transition: code %d\n",
2016                                 dev_name(dev), error);
2017                         put_device(dev);
2018                         break;
2019                 }
2020                 dev->power.is_prepared = true;
2021                 if (!list_empty(&dev->power.entry))
2022                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
2023                 put_device(dev);
2024         }
2025         mutex_unlock(&dpm_list_mtx);
2026         trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
2027         return error;
2028 }
2029
2030 /**
2031  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
2032  * @state: PM transition of the system being carried out.
2033  *
2034  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
2035  * callbacks for them.
2036  */
2037 int dpm_suspend_start(pm_message_t state)
2038 {
2039         int error;
2040
2041         error = dpm_prepare(state);
2042         if (error) {
2043                 suspend_stats.failed_prepare++;
2044                 dpm_save_failed_step(SUSPEND_PREPARE);
2045         } else
2046                 error = dpm_suspend(state);
2047         return error;
2048 }
2049 EXPORT_SYMBOL_GPL(dpm_suspend_start);
2050
2051 void __suspend_report_result(const char *function, void *fn, int ret)
2052 {
2053         if (ret)
2054                 pr_err("%s(): %pF returns %d\n", function, fn, ret);
2055 }
2056 EXPORT_SYMBOL_GPL(__suspend_report_result);
2057
2058 /**
2059  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
2060  * @subordinate: Device that needs to wait for @dev.
2061  * @dev: Device to wait for.
2062  */
2063 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
2064 {
2065         dpm_wait(dev, subordinate->power.async_suspend);
2066         return async_error;
2067 }
2068 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
2069
2070 /**
2071  * dpm_for_each_dev - device iterator.
2072  * @data: data for the callback.
2073  * @fn: function to be called for each device.
2074  *
2075  * Iterate over devices in dpm_list, and call @fn for each device,
2076  * passing it @data.
2077  */
2078 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
2079 {
2080         struct device *dev;
2081
2082         if (!fn)
2083                 return;
2084
2085         device_pm_lock();
2086         list_for_each_entry(dev, &dpm_list, power.entry)
2087                 fn(dev, data);
2088         device_pm_unlock();
2089 }
2090 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
2091
2092 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
2093 {
2094         if (!ops)
2095                 return true;
2096
2097         return !ops->prepare &&
2098                !ops->suspend &&
2099                !ops->suspend_late &&
2100                !ops->suspend_noirq &&
2101                !ops->resume_noirq &&
2102                !ops->resume_early &&
2103                !ops->resume &&
2104                !ops->complete;
2105 }
2106
2107 void device_pm_check_callbacks(struct device *dev)
2108 {
2109         spin_lock_irq(&dev->power.lock);
2110         dev->power.no_pm_callbacks =
2111                 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2112                  !dev->bus->suspend && !dev->bus->resume)) &&
2113                 (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2114                 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2115                 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2116                 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2117                  !dev->driver->suspend && !dev->driver->resume));
2118         spin_unlock_irq(&dev->power.lock);
2119 }
2120
2121 bool dev_pm_smart_suspend_and_suspended(struct device *dev)
2122 {
2123         return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2124                 pm_runtime_status_suspended(dev);
2125 }
This page took 0.152966 seconds and 4 git commands to generate.