]> Git Repo - linux.git/blob - drivers/base/power/main.c
Merge branch 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux.git] / drivers / base / power / main.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/base/power/main.c - Where the driver meets power management.
4  *
5  * Copyright (c) 2003 Patrick Mochel
6  * Copyright (c) 2003 Open Source Development Lab
7  *
8  * The driver model core calls device_pm_add() when a device is registered.
9  * This will initialize the embedded device_pm_info object in the device
10  * and add it to the list of power-controlled devices. sysfs entries for
11  * controlling device power management will also be added.
12  *
13  * A separate list is used for keeping track of power info, because the power
14  * domain dependencies may differ from the ancestral dependencies that the
15  * subsystem list maintains.
16  */
17
18 #define pr_fmt(fmt) "PM: " fmt
19
20 #include <linux/device.h>
21 #include <linux/export.h>
22 #include <linux/mutex.h>
23 #include <linux/pm.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/pm-trace.h>
26 #include <linux/pm_wakeirq.h>
27 #include <linux/interrupt.h>
28 #include <linux/sched.h>
29 #include <linux/sched/debug.h>
30 #include <linux/async.h>
31 #include <linux/suspend.h>
32 #include <trace/events/power.h>
33 #include <linux/cpufreq.h>
34 #include <linux/cpuidle.h>
35 #include <linux/devfreq.h>
36 #include <linux/timer.h>
37
38 #include "../base.h"
39 #include "power.h"
40
41 typedef int (*pm_callback_t)(struct device *);
42
43 /*
44  * The entries in the dpm_list list are in a depth first order, simply
45  * because children are guaranteed to be discovered after parents, and
46  * are inserted at the back of the list on discovery.
47  *
48  * Since device_pm_add() may be called with a device lock held,
49  * we must never try to acquire a device lock while holding
50  * dpm_list_mutex.
51  */
52
53 LIST_HEAD(dpm_list);
54 static LIST_HEAD(dpm_prepared_list);
55 static LIST_HEAD(dpm_suspended_list);
56 static LIST_HEAD(dpm_late_early_list);
57 static LIST_HEAD(dpm_noirq_list);
58
59 struct suspend_stats suspend_stats;
60 static DEFINE_MUTEX(dpm_list_mtx);
61 static pm_message_t pm_transition;
62
63 static int async_error;
64
65 static const char *pm_verb(int event)
66 {
67         switch (event) {
68         case PM_EVENT_SUSPEND:
69                 return "suspend";
70         case PM_EVENT_RESUME:
71                 return "resume";
72         case PM_EVENT_FREEZE:
73                 return "freeze";
74         case PM_EVENT_QUIESCE:
75                 return "quiesce";
76         case PM_EVENT_HIBERNATE:
77                 return "hibernate";
78         case PM_EVENT_THAW:
79                 return "thaw";
80         case PM_EVENT_RESTORE:
81                 return "restore";
82         case PM_EVENT_RECOVER:
83                 return "recover";
84         default:
85                 return "(unknown PM event)";
86         }
87 }
88
89 /**
90  * device_pm_sleep_init - Initialize system suspend-related device fields.
91  * @dev: Device object being initialized.
92  */
93 void device_pm_sleep_init(struct device *dev)
94 {
95         dev->power.is_prepared = false;
96         dev->power.is_suspended = false;
97         dev->power.is_noirq_suspended = false;
98         dev->power.is_late_suspended = false;
99         init_completion(&dev->power.completion);
100         complete_all(&dev->power.completion);
101         dev->power.wakeup = NULL;
102         INIT_LIST_HEAD(&dev->power.entry);
103 }
104
105 /**
106  * device_pm_lock - Lock the list of active devices used by the PM core.
107  */
108 void device_pm_lock(void)
109 {
110         mutex_lock(&dpm_list_mtx);
111 }
112
113 /**
114  * device_pm_unlock - Unlock the list of active devices used by the PM core.
115  */
116 void device_pm_unlock(void)
117 {
118         mutex_unlock(&dpm_list_mtx);
119 }
120
121 /**
122  * device_pm_add - Add a device to the PM core's list of active devices.
123  * @dev: Device to add to the list.
124  */
125 void device_pm_add(struct device *dev)
126 {
127         /* Skip PM setup/initialization. */
128         if (device_pm_not_required(dev))
129                 return;
130
131         pr_debug("Adding info for %s:%s\n",
132                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
133         device_pm_check_callbacks(dev);
134         mutex_lock(&dpm_list_mtx);
135         if (dev->parent && dev->parent->power.is_prepared)
136                 dev_warn(dev, "parent %s should not be sleeping\n",
137                         dev_name(dev->parent));
138         list_add_tail(&dev->power.entry, &dpm_list);
139         dev->power.in_dpm_list = true;
140         mutex_unlock(&dpm_list_mtx);
141 }
142
143 /**
144  * device_pm_remove - Remove a device from the PM core's list of active devices.
145  * @dev: Device to be removed from the list.
146  */
147 void device_pm_remove(struct device *dev)
148 {
149         if (device_pm_not_required(dev))
150                 return;
151
152         pr_debug("Removing info for %s:%s\n",
153                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
154         complete_all(&dev->power.completion);
155         mutex_lock(&dpm_list_mtx);
156         list_del_init(&dev->power.entry);
157         dev->power.in_dpm_list = false;
158         mutex_unlock(&dpm_list_mtx);
159         device_wakeup_disable(dev);
160         pm_runtime_remove(dev);
161         device_pm_check_callbacks(dev);
162 }
163
164 /**
165  * device_pm_move_before - Move device in the PM core's list of active devices.
166  * @deva: Device to move in dpm_list.
167  * @devb: Device @deva should come before.
168  */
169 void device_pm_move_before(struct device *deva, struct device *devb)
170 {
171         pr_debug("Moving %s:%s before %s:%s\n",
172                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
173                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
174         /* Delete deva from dpm_list and reinsert before devb. */
175         list_move_tail(&deva->power.entry, &devb->power.entry);
176 }
177
178 /**
179  * device_pm_move_after - Move device in the PM core's list of active devices.
180  * @deva: Device to move in dpm_list.
181  * @devb: Device @deva should come after.
182  */
183 void device_pm_move_after(struct device *deva, struct device *devb)
184 {
185         pr_debug("Moving %s:%s after %s:%s\n",
186                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
187                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
188         /* Delete deva from dpm_list and reinsert after devb. */
189         list_move(&deva->power.entry, &devb->power.entry);
190 }
191
192 /**
193  * device_pm_move_last - Move device to end of the PM core's list of devices.
194  * @dev: Device to move in dpm_list.
195  */
196 void device_pm_move_last(struct device *dev)
197 {
198         pr_debug("Moving %s:%s to end of list\n",
199                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
200         list_move_tail(&dev->power.entry, &dpm_list);
201 }
202
203 static ktime_t initcall_debug_start(struct device *dev, void *cb)
204 {
205         if (!pm_print_times_enabled)
206                 return 0;
207
208         dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
209                  task_pid_nr(current),
210                  dev->parent ? dev_name(dev->parent) : "none");
211         return ktime_get();
212 }
213
214 static void initcall_debug_report(struct device *dev, ktime_t calltime,
215                                   void *cb, int error)
216 {
217         ktime_t rettime;
218         s64 nsecs;
219
220         if (!pm_print_times_enabled)
221                 return;
222
223         rettime = ktime_get();
224         nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
225
226         dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
227                  (unsigned long long)nsecs >> 10);
228 }
229
230 /**
231  * dpm_wait - Wait for a PM operation to complete.
232  * @dev: Device to wait for.
233  * @async: If unset, wait only if the device's power.async_suspend flag is set.
234  */
235 static void dpm_wait(struct device *dev, bool async)
236 {
237         if (!dev)
238                 return;
239
240         if (async || (pm_async_enabled && dev->power.async_suspend))
241                 wait_for_completion(&dev->power.completion);
242 }
243
244 static int dpm_wait_fn(struct device *dev, void *async_ptr)
245 {
246         dpm_wait(dev, *((bool *)async_ptr));
247         return 0;
248 }
249
250 static void dpm_wait_for_children(struct device *dev, bool async)
251 {
252        device_for_each_child(dev, &async, dpm_wait_fn);
253 }
254
255 static void dpm_wait_for_suppliers(struct device *dev, bool async)
256 {
257         struct device_link *link;
258         int idx;
259
260         idx = device_links_read_lock();
261
262         /*
263          * If the supplier goes away right after we've checked the link to it,
264          * we'll wait for its completion to change the state, but that's fine,
265          * because the only things that will block as a result are the SRCU
266          * callbacks freeing the link objects for the links in the list we're
267          * walking.
268          */
269         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
270                 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
271                         dpm_wait(link->supplier, async);
272
273         device_links_read_unlock(idx);
274 }
275
276 static void dpm_wait_for_superior(struct device *dev, bool async)
277 {
278         dpm_wait(dev->parent, async);
279         dpm_wait_for_suppliers(dev, async);
280 }
281
282 static void dpm_wait_for_consumers(struct device *dev, bool async)
283 {
284         struct device_link *link;
285         int idx;
286
287         idx = device_links_read_lock();
288
289         /*
290          * The status of a device link can only be changed from "dormant" by a
291          * probe, but that cannot happen during system suspend/resume.  In
292          * theory it can change to "dormant" at that time, but then it is
293          * reasonable to wait for the target device anyway (eg. if it goes
294          * away, it's better to wait for it to go away completely and then
295          * continue instead of trying to continue in parallel with its
296          * unregistration).
297          */
298         list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
299                 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
300                         dpm_wait(link->consumer, async);
301
302         device_links_read_unlock(idx);
303 }
304
305 static void dpm_wait_for_subordinate(struct device *dev, bool async)
306 {
307         dpm_wait_for_children(dev, async);
308         dpm_wait_for_consumers(dev, async);
309 }
310
311 /**
312  * pm_op - Return the PM operation appropriate for given PM event.
313  * @ops: PM operations to choose from.
314  * @state: PM transition of the system being carried out.
315  */
316 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
317 {
318         switch (state.event) {
319 #ifdef CONFIG_SUSPEND
320         case PM_EVENT_SUSPEND:
321                 return ops->suspend;
322         case PM_EVENT_RESUME:
323                 return ops->resume;
324 #endif /* CONFIG_SUSPEND */
325 #ifdef CONFIG_HIBERNATE_CALLBACKS
326         case PM_EVENT_FREEZE:
327         case PM_EVENT_QUIESCE:
328                 return ops->freeze;
329         case PM_EVENT_HIBERNATE:
330                 return ops->poweroff;
331         case PM_EVENT_THAW:
332         case PM_EVENT_RECOVER:
333                 return ops->thaw;
334                 break;
335         case PM_EVENT_RESTORE:
336                 return ops->restore;
337 #endif /* CONFIG_HIBERNATE_CALLBACKS */
338         }
339
340         return NULL;
341 }
342
343 /**
344  * pm_late_early_op - Return the PM operation appropriate for given PM event.
345  * @ops: PM operations to choose from.
346  * @state: PM transition of the system being carried out.
347  *
348  * Runtime PM is disabled for @dev while this function is being executed.
349  */
350 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
351                                       pm_message_t state)
352 {
353         switch (state.event) {
354 #ifdef CONFIG_SUSPEND
355         case PM_EVENT_SUSPEND:
356                 return ops->suspend_late;
357         case PM_EVENT_RESUME:
358                 return ops->resume_early;
359 #endif /* CONFIG_SUSPEND */
360 #ifdef CONFIG_HIBERNATE_CALLBACKS
361         case PM_EVENT_FREEZE:
362         case PM_EVENT_QUIESCE:
363                 return ops->freeze_late;
364         case PM_EVENT_HIBERNATE:
365                 return ops->poweroff_late;
366         case PM_EVENT_THAW:
367         case PM_EVENT_RECOVER:
368                 return ops->thaw_early;
369         case PM_EVENT_RESTORE:
370                 return ops->restore_early;
371 #endif /* CONFIG_HIBERNATE_CALLBACKS */
372         }
373
374         return NULL;
375 }
376
377 /**
378  * pm_noirq_op - Return the PM operation appropriate for given PM event.
379  * @ops: PM operations to choose from.
380  * @state: PM transition of the system being carried out.
381  *
382  * The driver of @dev will not receive interrupts while this function is being
383  * executed.
384  */
385 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
386 {
387         switch (state.event) {
388 #ifdef CONFIG_SUSPEND
389         case PM_EVENT_SUSPEND:
390                 return ops->suspend_noirq;
391         case PM_EVENT_RESUME:
392                 return ops->resume_noirq;
393 #endif /* CONFIG_SUSPEND */
394 #ifdef CONFIG_HIBERNATE_CALLBACKS
395         case PM_EVENT_FREEZE:
396         case PM_EVENT_QUIESCE:
397                 return ops->freeze_noirq;
398         case PM_EVENT_HIBERNATE:
399                 return ops->poweroff_noirq;
400         case PM_EVENT_THAW:
401         case PM_EVENT_RECOVER:
402                 return ops->thaw_noirq;
403         case PM_EVENT_RESTORE:
404                 return ops->restore_noirq;
405 #endif /* CONFIG_HIBERNATE_CALLBACKS */
406         }
407
408         return NULL;
409 }
410
411 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
412 {
413         dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
414                 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
415                 ", may wakeup" : "");
416 }
417
418 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
419                         int error)
420 {
421         pr_err("Device %s failed to %s%s: error %d\n",
422                dev_name(dev), pm_verb(state.event), info, error);
423 }
424
425 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
426                           const char *info)
427 {
428         ktime_t calltime;
429         u64 usecs64;
430         int usecs;
431
432         calltime = ktime_get();
433         usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
434         do_div(usecs64, NSEC_PER_USEC);
435         usecs = usecs64;
436         if (usecs == 0)
437                 usecs = 1;
438
439         pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
440                   info ?: "", info ? " " : "", pm_verb(state.event),
441                   error ? "aborted" : "complete",
442                   usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
443 }
444
445 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
446                             pm_message_t state, const char *info)
447 {
448         ktime_t calltime;
449         int error;
450
451         if (!cb)
452                 return 0;
453
454         calltime = initcall_debug_start(dev, cb);
455
456         pm_dev_dbg(dev, state, info);
457         trace_device_pm_callback_start(dev, info, state.event);
458         error = cb(dev);
459         trace_device_pm_callback_end(dev, error);
460         suspend_report_result(cb, error);
461
462         initcall_debug_report(dev, calltime, cb, error);
463
464         return error;
465 }
466
467 #ifdef CONFIG_DPM_WATCHDOG
468 struct dpm_watchdog {
469         struct device           *dev;
470         struct task_struct      *tsk;
471         struct timer_list       timer;
472 };
473
474 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
475         struct dpm_watchdog wd
476
477 /**
478  * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
479  * @t: The timer that PM watchdog depends on.
480  *
481  * Called when a driver has timed out suspending or resuming.
482  * There's not much we can do here to recover so panic() to
483  * capture a crash-dump in pstore.
484  */
485 static void dpm_watchdog_handler(struct timer_list *t)
486 {
487         struct dpm_watchdog *wd = from_timer(wd, t, timer);
488
489         dev_emerg(wd->dev, "**** DPM device timeout ****\n");
490         show_stack(wd->tsk, NULL);
491         panic("%s %s: unrecoverable failure\n",
492                 dev_driver_string(wd->dev), dev_name(wd->dev));
493 }
494
495 /**
496  * dpm_watchdog_set - Enable pm watchdog for given device.
497  * @wd: Watchdog. Must be allocated on the stack.
498  * @dev: Device to handle.
499  */
500 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
501 {
502         struct timer_list *timer = &wd->timer;
503
504         wd->dev = dev;
505         wd->tsk = current;
506
507         timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
508         /* use same timeout value for both suspend and resume */
509         timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
510         add_timer(timer);
511 }
512
513 /**
514  * dpm_watchdog_clear - Disable suspend/resume watchdog.
515  * @wd: Watchdog to disable.
516  */
517 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
518 {
519         struct timer_list *timer = &wd->timer;
520
521         del_timer_sync(timer);
522         destroy_timer_on_stack(timer);
523 }
524 #else
525 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
526 #define dpm_watchdog_set(x, y)
527 #define dpm_watchdog_clear(x)
528 #endif
529
530 /*------------------------- Resume routines -------------------------*/
531
532 /**
533  * dev_pm_skip_next_resume_phases - Skip next system resume phases for device.
534  * @dev: Target device.
535  *
536  * Make the core skip the "early resume" and "resume" phases for @dev.
537  *
538  * This function can be called by middle-layer code during the "noirq" phase of
539  * system resume if necessary, but not by device drivers.
540  */
541 void dev_pm_skip_next_resume_phases(struct device *dev)
542 {
543         dev->power.is_late_suspended = false;
544         dev->power.is_suspended = false;
545 }
546
547 /**
548  * suspend_event - Return a "suspend" message for given "resume" one.
549  * @resume_msg: PM message representing a system-wide resume transition.
550  */
551 static pm_message_t suspend_event(pm_message_t resume_msg)
552 {
553         switch (resume_msg.event) {
554         case PM_EVENT_RESUME:
555                 return PMSG_SUSPEND;
556         case PM_EVENT_THAW:
557         case PM_EVENT_RESTORE:
558                 return PMSG_FREEZE;
559         case PM_EVENT_RECOVER:
560                 return PMSG_HIBERNATE;
561         }
562         return PMSG_ON;
563 }
564
565 /**
566  * dev_pm_may_skip_resume - System-wide device resume optimization check.
567  * @dev: Target device.
568  *
569  * Checks whether or not the device may be left in suspend after a system-wide
570  * transition to the working state.
571  */
572 bool dev_pm_may_skip_resume(struct device *dev)
573 {
574         return !dev->power.must_resume && pm_transition.event != PM_EVENT_RESTORE;
575 }
576
577 static pm_callback_t dpm_subsys_resume_noirq_cb(struct device *dev,
578                                                 pm_message_t state,
579                                                 const char **info_p)
580 {
581         pm_callback_t callback;
582         const char *info;
583
584         if (dev->pm_domain) {
585                 info = "noirq power domain ";
586                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
587         } else if (dev->type && dev->type->pm) {
588                 info = "noirq type ";
589                 callback = pm_noirq_op(dev->type->pm, state);
590         } else if (dev->class && dev->class->pm) {
591                 info = "noirq class ";
592                 callback = pm_noirq_op(dev->class->pm, state);
593         } else if (dev->bus && dev->bus->pm) {
594                 info = "noirq bus ";
595                 callback = pm_noirq_op(dev->bus->pm, state);
596         } else {
597                 return NULL;
598         }
599
600         if (info_p)
601                 *info_p = info;
602
603         return callback;
604 }
605
606 static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
607                                                  pm_message_t state,
608                                                  const char **info_p);
609
610 static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
611                                                 pm_message_t state,
612                                                 const char **info_p);
613
614 /**
615  * device_resume_noirq - Execute a "noirq resume" callback for given device.
616  * @dev: Device to handle.
617  * @state: PM transition of the system being carried out.
618  * @async: If true, the device is being resumed asynchronously.
619  *
620  * The driver of @dev will not receive interrupts while this function is being
621  * executed.
622  */
623 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
624 {
625         pm_callback_t callback;
626         const char *info;
627         bool skip_resume;
628         int error = 0;
629
630         TRACE_DEVICE(dev);
631         TRACE_RESUME(0);
632
633         if (dev->power.syscore || dev->power.direct_complete)
634                 goto Out;
635
636         if (!dev->power.is_noirq_suspended)
637                 goto Out;
638
639         dpm_wait_for_superior(dev, async);
640
641         skip_resume = dev_pm_may_skip_resume(dev);
642
643         callback = dpm_subsys_resume_noirq_cb(dev, state, &info);
644         if (callback)
645                 goto Run;
646
647         if (skip_resume)
648                 goto Skip;
649
650         if (dev_pm_smart_suspend_and_suspended(dev)) {
651                 pm_message_t suspend_msg = suspend_event(state);
652
653                 /*
654                  * If "freeze" callbacks have been skipped during a transition
655                  * related to hibernation, the subsequent "thaw" callbacks must
656                  * be skipped too or bad things may happen.  Otherwise, resume
657                  * callbacks are going to be run for the device, so its runtime
658                  * PM status must be changed to reflect the new state after the
659                  * transition under way.
660                  */
661                 if (!dpm_subsys_suspend_late_cb(dev, suspend_msg, NULL) &&
662                     !dpm_subsys_suspend_noirq_cb(dev, suspend_msg, NULL)) {
663                         if (state.event == PM_EVENT_THAW) {
664                                 skip_resume = true;
665                                 goto Skip;
666                         } else {
667                                 pm_runtime_set_active(dev);
668                         }
669                 }
670         }
671
672         if (dev->driver && dev->driver->pm) {
673                 info = "noirq driver ";
674                 callback = pm_noirq_op(dev->driver->pm, state);
675         }
676
677 Run:
678         error = dpm_run_callback(callback, dev, state, info);
679
680 Skip:
681         dev->power.is_noirq_suspended = false;
682
683         if (skip_resume) {
684                 /*
685                  * The device is going to be left in suspend, but it might not
686                  * have been in runtime suspend before the system suspended, so
687                  * its runtime PM status needs to be updated to avoid confusing
688                  * the runtime PM framework when runtime PM is enabled for the
689                  * device again.
690                  */
691                 pm_runtime_set_suspended(dev);
692                 dev_pm_skip_next_resume_phases(dev);
693         }
694
695 Out:
696         complete_all(&dev->power.completion);
697         TRACE_RESUME(error);
698         return error;
699 }
700
701 static bool is_async(struct device *dev)
702 {
703         return dev->power.async_suspend && pm_async_enabled
704                 && !pm_trace_is_enabled();
705 }
706
707 static bool dpm_async_fn(struct device *dev, async_func_t func)
708 {
709         reinit_completion(&dev->power.completion);
710
711         if (is_async(dev)) {
712                 get_device(dev);
713                 async_schedule(func, dev);
714                 return true;
715         }
716
717         return false;
718 }
719
720 static void async_resume_noirq(void *data, async_cookie_t cookie)
721 {
722         struct device *dev = (struct device *)data;
723         int error;
724
725         error = device_resume_noirq(dev, pm_transition, true);
726         if (error)
727                 pm_dev_err(dev, pm_transition, " async", error);
728
729         put_device(dev);
730 }
731
732 void dpm_noirq_resume_devices(pm_message_t state)
733 {
734         struct device *dev;
735         ktime_t starttime = ktime_get();
736
737         trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
738         mutex_lock(&dpm_list_mtx);
739         pm_transition = state;
740
741         /*
742          * Advanced the async threads upfront,
743          * in case the starting of async threads is
744          * delayed by non-async resuming devices.
745          */
746         list_for_each_entry(dev, &dpm_noirq_list, power.entry)
747                 dpm_async_fn(dev, async_resume_noirq);
748
749         while (!list_empty(&dpm_noirq_list)) {
750                 dev = to_device(dpm_noirq_list.next);
751                 get_device(dev);
752                 list_move_tail(&dev->power.entry, &dpm_late_early_list);
753                 mutex_unlock(&dpm_list_mtx);
754
755                 if (!is_async(dev)) {
756                         int error;
757
758                         error = device_resume_noirq(dev, state, false);
759                         if (error) {
760                                 suspend_stats.failed_resume_noirq++;
761                                 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
762                                 dpm_save_failed_dev(dev_name(dev));
763                                 pm_dev_err(dev, state, " noirq", error);
764                         }
765                 }
766
767                 mutex_lock(&dpm_list_mtx);
768                 put_device(dev);
769         }
770         mutex_unlock(&dpm_list_mtx);
771         async_synchronize_full();
772         dpm_show_time(starttime, state, 0, "noirq");
773         trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
774 }
775
776 void dpm_noirq_end(void)
777 {
778         resume_device_irqs();
779         device_wakeup_disarm_wake_irqs();
780         cpuidle_resume();
781 }
782
783 /**
784  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
785  * @state: PM transition of the system being carried out.
786  *
787  * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
788  * allow device drivers' interrupt handlers to be called.
789  */
790 void dpm_resume_noirq(pm_message_t state)
791 {
792         dpm_noirq_resume_devices(state);
793         dpm_noirq_end();
794 }
795
796 static pm_callback_t dpm_subsys_resume_early_cb(struct device *dev,
797                                                 pm_message_t state,
798                                                 const char **info_p)
799 {
800         pm_callback_t callback;
801         const char *info;
802
803         if (dev->pm_domain) {
804                 info = "early power domain ";
805                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
806         } else if (dev->type && dev->type->pm) {
807                 info = "early type ";
808                 callback = pm_late_early_op(dev->type->pm, state);
809         } else if (dev->class && dev->class->pm) {
810                 info = "early class ";
811                 callback = pm_late_early_op(dev->class->pm, state);
812         } else if (dev->bus && dev->bus->pm) {
813                 info = "early bus ";
814                 callback = pm_late_early_op(dev->bus->pm, state);
815         } else {
816                 return NULL;
817         }
818
819         if (info_p)
820                 *info_p = info;
821
822         return callback;
823 }
824
825 /**
826  * device_resume_early - Execute an "early resume" callback for given device.
827  * @dev: Device to handle.
828  * @state: PM transition of the system being carried out.
829  * @async: If true, the device is being resumed asynchronously.
830  *
831  * Runtime PM is disabled for @dev while this function is being executed.
832  */
833 static int device_resume_early(struct device *dev, pm_message_t state, bool async)
834 {
835         pm_callback_t callback;
836         const char *info;
837         int error = 0;
838
839         TRACE_DEVICE(dev);
840         TRACE_RESUME(0);
841
842         if (dev->power.syscore || dev->power.direct_complete)
843                 goto Out;
844
845         if (!dev->power.is_late_suspended)
846                 goto Out;
847
848         dpm_wait_for_superior(dev, async);
849
850         callback = dpm_subsys_resume_early_cb(dev, state, &info);
851
852         if (!callback && dev->driver && dev->driver->pm) {
853                 info = "early driver ";
854                 callback = pm_late_early_op(dev->driver->pm, state);
855         }
856
857         error = dpm_run_callback(callback, dev, state, info);
858         dev->power.is_late_suspended = false;
859
860  Out:
861         TRACE_RESUME(error);
862
863         pm_runtime_enable(dev);
864         complete_all(&dev->power.completion);
865         return error;
866 }
867
868 static void async_resume_early(void *data, async_cookie_t cookie)
869 {
870         struct device *dev = (struct device *)data;
871         int error;
872
873         error = device_resume_early(dev, pm_transition, true);
874         if (error)
875                 pm_dev_err(dev, pm_transition, " async", error);
876
877         put_device(dev);
878 }
879
880 /**
881  * dpm_resume_early - Execute "early resume" callbacks for all devices.
882  * @state: PM transition of the system being carried out.
883  */
884 void dpm_resume_early(pm_message_t state)
885 {
886         struct device *dev;
887         ktime_t starttime = ktime_get();
888
889         trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
890         mutex_lock(&dpm_list_mtx);
891         pm_transition = state;
892
893         /*
894          * Advanced the async threads upfront,
895          * in case the starting of async threads is
896          * delayed by non-async resuming devices.
897          */
898         list_for_each_entry(dev, &dpm_late_early_list, power.entry)
899                 dpm_async_fn(dev, async_resume_early);
900
901         while (!list_empty(&dpm_late_early_list)) {
902                 dev = to_device(dpm_late_early_list.next);
903                 get_device(dev);
904                 list_move_tail(&dev->power.entry, &dpm_suspended_list);
905                 mutex_unlock(&dpm_list_mtx);
906
907                 if (!is_async(dev)) {
908                         int error;
909
910                         error = device_resume_early(dev, state, false);
911                         if (error) {
912                                 suspend_stats.failed_resume_early++;
913                                 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
914                                 dpm_save_failed_dev(dev_name(dev));
915                                 pm_dev_err(dev, state, " early", error);
916                         }
917                 }
918                 mutex_lock(&dpm_list_mtx);
919                 put_device(dev);
920         }
921         mutex_unlock(&dpm_list_mtx);
922         async_synchronize_full();
923         dpm_show_time(starttime, state, 0, "early");
924         trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
925 }
926
927 /**
928  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
929  * @state: PM transition of the system being carried out.
930  */
931 void dpm_resume_start(pm_message_t state)
932 {
933         dpm_resume_noirq(state);
934         dpm_resume_early(state);
935 }
936 EXPORT_SYMBOL_GPL(dpm_resume_start);
937
938 /**
939  * device_resume - Execute "resume" callbacks for given device.
940  * @dev: Device to handle.
941  * @state: PM transition of the system being carried out.
942  * @async: If true, the device is being resumed asynchronously.
943  */
944 static int device_resume(struct device *dev, pm_message_t state, bool async)
945 {
946         pm_callback_t callback = NULL;
947         const char *info = NULL;
948         int error = 0;
949         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
950
951         TRACE_DEVICE(dev);
952         TRACE_RESUME(0);
953
954         if (dev->power.syscore)
955                 goto Complete;
956
957         if (dev->power.direct_complete) {
958                 /* Match the pm_runtime_disable() in __device_suspend(). */
959                 pm_runtime_enable(dev);
960                 goto Complete;
961         }
962
963         dpm_wait_for_superior(dev, async);
964         dpm_watchdog_set(&wd, dev);
965         device_lock(dev);
966
967         /*
968          * This is a fib.  But we'll allow new children to be added below
969          * a resumed device, even if the device hasn't been completed yet.
970          */
971         dev->power.is_prepared = false;
972
973         if (!dev->power.is_suspended)
974                 goto Unlock;
975
976         if (dev->pm_domain) {
977                 info = "power domain ";
978                 callback = pm_op(&dev->pm_domain->ops, state);
979                 goto Driver;
980         }
981
982         if (dev->type && dev->type->pm) {
983                 info = "type ";
984                 callback = pm_op(dev->type->pm, state);
985                 goto Driver;
986         }
987
988         if (dev->class && dev->class->pm) {
989                 info = "class ";
990                 callback = pm_op(dev->class->pm, state);
991                 goto Driver;
992         }
993
994         if (dev->bus) {
995                 if (dev->bus->pm) {
996                         info = "bus ";
997                         callback = pm_op(dev->bus->pm, state);
998                 } else if (dev->bus->resume) {
999                         info = "legacy bus ";
1000                         callback = dev->bus->resume;
1001                         goto End;
1002                 }
1003         }
1004
1005  Driver:
1006         if (!callback && dev->driver && dev->driver->pm) {
1007                 info = "driver ";
1008                 callback = pm_op(dev->driver->pm, state);
1009         }
1010
1011  End:
1012         error = dpm_run_callback(callback, dev, state, info);
1013         dev->power.is_suspended = false;
1014
1015  Unlock:
1016         device_unlock(dev);
1017         dpm_watchdog_clear(&wd);
1018
1019  Complete:
1020         complete_all(&dev->power.completion);
1021
1022         TRACE_RESUME(error);
1023
1024         return error;
1025 }
1026
1027 static void async_resume(void *data, async_cookie_t cookie)
1028 {
1029         struct device *dev = (struct device *)data;
1030         int error;
1031
1032         error = device_resume(dev, pm_transition, true);
1033         if (error)
1034                 pm_dev_err(dev, pm_transition, " async", error);
1035         put_device(dev);
1036 }
1037
1038 /**
1039  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
1040  * @state: PM transition of the system being carried out.
1041  *
1042  * Execute the appropriate "resume" callback for all devices whose status
1043  * indicates that they are suspended.
1044  */
1045 void dpm_resume(pm_message_t state)
1046 {
1047         struct device *dev;
1048         ktime_t starttime = ktime_get();
1049
1050         trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1051         might_sleep();
1052
1053         mutex_lock(&dpm_list_mtx);
1054         pm_transition = state;
1055         async_error = 0;
1056
1057         list_for_each_entry(dev, &dpm_suspended_list, power.entry)
1058                 dpm_async_fn(dev, async_resume);
1059
1060         while (!list_empty(&dpm_suspended_list)) {
1061                 dev = to_device(dpm_suspended_list.next);
1062                 get_device(dev);
1063                 if (!is_async(dev)) {
1064                         int error;
1065
1066                         mutex_unlock(&dpm_list_mtx);
1067
1068                         error = device_resume(dev, state, false);
1069                         if (error) {
1070                                 suspend_stats.failed_resume++;
1071                                 dpm_save_failed_step(SUSPEND_RESUME);
1072                                 dpm_save_failed_dev(dev_name(dev));
1073                                 pm_dev_err(dev, state, "", error);
1074                         }
1075
1076                         mutex_lock(&dpm_list_mtx);
1077                 }
1078                 if (!list_empty(&dev->power.entry))
1079                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
1080                 put_device(dev);
1081         }
1082         mutex_unlock(&dpm_list_mtx);
1083         async_synchronize_full();
1084         dpm_show_time(starttime, state, 0, NULL);
1085
1086         cpufreq_resume();
1087         devfreq_resume();
1088         trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1089 }
1090
1091 /**
1092  * device_complete - Complete a PM transition for given device.
1093  * @dev: Device to handle.
1094  * @state: PM transition of the system being carried out.
1095  */
1096 static void device_complete(struct device *dev, pm_message_t state)
1097 {
1098         void (*callback)(struct device *) = NULL;
1099         const char *info = NULL;
1100
1101         if (dev->power.syscore)
1102                 return;
1103
1104         device_lock(dev);
1105
1106         if (dev->pm_domain) {
1107                 info = "completing power domain ";
1108                 callback = dev->pm_domain->ops.complete;
1109         } else if (dev->type && dev->type->pm) {
1110                 info = "completing type ";
1111                 callback = dev->type->pm->complete;
1112         } else if (dev->class && dev->class->pm) {
1113                 info = "completing class ";
1114                 callback = dev->class->pm->complete;
1115         } else if (dev->bus && dev->bus->pm) {
1116                 info = "completing bus ";
1117                 callback = dev->bus->pm->complete;
1118         }
1119
1120         if (!callback && dev->driver && dev->driver->pm) {
1121                 info = "completing driver ";
1122                 callback = dev->driver->pm->complete;
1123         }
1124
1125         if (callback) {
1126                 pm_dev_dbg(dev, state, info);
1127                 callback(dev);
1128         }
1129
1130         device_unlock(dev);
1131
1132         pm_runtime_put(dev);
1133 }
1134
1135 /**
1136  * dpm_complete - Complete a PM transition for all non-sysdev devices.
1137  * @state: PM transition of the system being carried out.
1138  *
1139  * Execute the ->complete() callbacks for all devices whose PM status is not
1140  * DPM_ON (this allows new devices to be registered).
1141  */
1142 void dpm_complete(pm_message_t state)
1143 {
1144         struct list_head list;
1145
1146         trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1147         might_sleep();
1148
1149         INIT_LIST_HEAD(&list);
1150         mutex_lock(&dpm_list_mtx);
1151         while (!list_empty(&dpm_prepared_list)) {
1152                 struct device *dev = to_device(dpm_prepared_list.prev);
1153
1154                 get_device(dev);
1155                 dev->power.is_prepared = false;
1156                 list_move(&dev->power.entry, &list);
1157                 mutex_unlock(&dpm_list_mtx);
1158
1159                 trace_device_pm_callback_start(dev, "", state.event);
1160                 device_complete(dev, state);
1161                 trace_device_pm_callback_end(dev, 0);
1162
1163                 mutex_lock(&dpm_list_mtx);
1164                 put_device(dev);
1165         }
1166         list_splice(&list, &dpm_list);
1167         mutex_unlock(&dpm_list_mtx);
1168
1169         /* Allow device probing and trigger re-probing of deferred devices */
1170         device_unblock_probing();
1171         trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1172 }
1173
1174 /**
1175  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1176  * @state: PM transition of the system being carried out.
1177  *
1178  * Execute "resume" callbacks for all devices and complete the PM transition of
1179  * the system.
1180  */
1181 void dpm_resume_end(pm_message_t state)
1182 {
1183         dpm_resume(state);
1184         dpm_complete(state);
1185 }
1186 EXPORT_SYMBOL_GPL(dpm_resume_end);
1187
1188
1189 /*------------------------- Suspend routines -------------------------*/
1190
1191 /**
1192  * resume_event - Return a "resume" message for given "suspend" sleep state.
1193  * @sleep_state: PM message representing a sleep state.
1194  *
1195  * Return a PM message representing the resume event corresponding to given
1196  * sleep state.
1197  */
1198 static pm_message_t resume_event(pm_message_t sleep_state)
1199 {
1200         switch (sleep_state.event) {
1201         case PM_EVENT_SUSPEND:
1202                 return PMSG_RESUME;
1203         case PM_EVENT_FREEZE:
1204         case PM_EVENT_QUIESCE:
1205                 return PMSG_RECOVER;
1206         case PM_EVENT_HIBERNATE:
1207                 return PMSG_RESTORE;
1208         }
1209         return PMSG_ON;
1210 }
1211
1212 static void dpm_superior_set_must_resume(struct device *dev)
1213 {
1214         struct device_link *link;
1215         int idx;
1216
1217         if (dev->parent)
1218                 dev->parent->power.must_resume = true;
1219
1220         idx = device_links_read_lock();
1221
1222         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1223                 link->supplier->power.must_resume = true;
1224
1225         device_links_read_unlock(idx);
1226 }
1227
1228 static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
1229                                                  pm_message_t state,
1230                                                  const char **info_p)
1231 {
1232         pm_callback_t callback;
1233         const char *info;
1234
1235         if (dev->pm_domain) {
1236                 info = "noirq power domain ";
1237                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1238         } else if (dev->type && dev->type->pm) {
1239                 info = "noirq type ";
1240                 callback = pm_noirq_op(dev->type->pm, state);
1241         } else if (dev->class && dev->class->pm) {
1242                 info = "noirq class ";
1243                 callback = pm_noirq_op(dev->class->pm, state);
1244         } else if (dev->bus && dev->bus->pm) {
1245                 info = "noirq bus ";
1246                 callback = pm_noirq_op(dev->bus->pm, state);
1247         } else {
1248                 return NULL;
1249         }
1250
1251         if (info_p)
1252                 *info_p = info;
1253
1254         return callback;
1255 }
1256
1257 static bool device_must_resume(struct device *dev, pm_message_t state,
1258                                bool no_subsys_suspend_noirq)
1259 {
1260         pm_message_t resume_msg = resume_event(state);
1261
1262         /*
1263          * If all of the device driver's "noirq", "late" and "early" callbacks
1264          * are invoked directly by the core, the decision to allow the device to
1265          * stay in suspend can be based on its current runtime PM status and its
1266          * wakeup settings.
1267          */
1268         if (no_subsys_suspend_noirq &&
1269             !dpm_subsys_suspend_late_cb(dev, state, NULL) &&
1270             !dpm_subsys_resume_early_cb(dev, resume_msg, NULL) &&
1271             !dpm_subsys_resume_noirq_cb(dev, resume_msg, NULL))
1272                 return !pm_runtime_status_suspended(dev) &&
1273                         (resume_msg.event != PM_EVENT_RESUME ||
1274                          (device_can_wakeup(dev) && !device_may_wakeup(dev)));
1275
1276         /*
1277          * The only safe strategy here is to require that if the device may not
1278          * be left in suspend, resume callbacks must be invoked for it.
1279          */
1280         return !dev->power.may_skip_resume;
1281 }
1282
1283 /**
1284  * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1285  * @dev: Device to handle.
1286  * @state: PM transition of the system being carried out.
1287  * @async: If true, the device is being suspended asynchronously.
1288  *
1289  * The driver of @dev will not receive interrupts while this function is being
1290  * executed.
1291  */
1292 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1293 {
1294         pm_callback_t callback;
1295         const char *info;
1296         bool no_subsys_cb = false;
1297         int error = 0;
1298
1299         TRACE_DEVICE(dev);
1300         TRACE_SUSPEND(0);
1301
1302         dpm_wait_for_subordinate(dev, async);
1303
1304         if (async_error)
1305                 goto Complete;
1306
1307         if (pm_wakeup_pending()) {
1308                 async_error = -EBUSY;
1309                 goto Complete;
1310         }
1311
1312         if (dev->power.syscore || dev->power.direct_complete)
1313                 goto Complete;
1314
1315         callback = dpm_subsys_suspend_noirq_cb(dev, state, &info);
1316         if (callback)
1317                 goto Run;
1318
1319         no_subsys_cb = !dpm_subsys_suspend_late_cb(dev, state, NULL);
1320
1321         if (dev_pm_smart_suspend_and_suspended(dev) && no_subsys_cb)
1322                 goto Skip;
1323
1324         if (dev->driver && dev->driver->pm) {
1325                 info = "noirq driver ";
1326                 callback = pm_noirq_op(dev->driver->pm, state);
1327         }
1328
1329 Run:
1330         error = dpm_run_callback(callback, dev, state, info);
1331         if (error) {
1332                 async_error = error;
1333                 goto Complete;
1334         }
1335
1336 Skip:
1337         dev->power.is_noirq_suspended = true;
1338
1339         if (dev_pm_test_driver_flags(dev, DPM_FLAG_LEAVE_SUSPENDED)) {
1340                 dev->power.must_resume = dev->power.must_resume ||
1341                                 atomic_read(&dev->power.usage_count) > 1 ||
1342                                 device_must_resume(dev, state, no_subsys_cb);
1343         } else {
1344                 dev->power.must_resume = true;
1345         }
1346
1347         if (dev->power.must_resume)
1348                 dpm_superior_set_must_resume(dev);
1349
1350 Complete:
1351         complete_all(&dev->power.completion);
1352         TRACE_SUSPEND(error);
1353         return error;
1354 }
1355
1356 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1357 {
1358         struct device *dev = (struct device *)data;
1359         int error;
1360
1361         error = __device_suspend_noirq(dev, pm_transition, true);
1362         if (error) {
1363                 dpm_save_failed_dev(dev_name(dev));
1364                 pm_dev_err(dev, pm_transition, " async", error);
1365         }
1366
1367         put_device(dev);
1368 }
1369
1370 static int device_suspend_noirq(struct device *dev)
1371 {
1372         if (dpm_async_fn(dev, async_suspend_noirq))
1373                 return 0;
1374
1375         return __device_suspend_noirq(dev, pm_transition, false);
1376 }
1377
1378 void dpm_noirq_begin(void)
1379 {
1380         cpuidle_pause();
1381         device_wakeup_arm_wake_irqs();
1382         suspend_device_irqs();
1383 }
1384
1385 int dpm_noirq_suspend_devices(pm_message_t state)
1386 {
1387         ktime_t starttime = ktime_get();
1388         int error = 0;
1389
1390         trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1391         mutex_lock(&dpm_list_mtx);
1392         pm_transition = state;
1393         async_error = 0;
1394
1395         while (!list_empty(&dpm_late_early_list)) {
1396                 struct device *dev = to_device(dpm_late_early_list.prev);
1397
1398                 get_device(dev);
1399                 mutex_unlock(&dpm_list_mtx);
1400
1401                 error = device_suspend_noirq(dev);
1402
1403                 mutex_lock(&dpm_list_mtx);
1404                 if (error) {
1405                         pm_dev_err(dev, state, " noirq", error);
1406                         dpm_save_failed_dev(dev_name(dev));
1407                         put_device(dev);
1408                         break;
1409                 }
1410                 if (!list_empty(&dev->power.entry))
1411                         list_move(&dev->power.entry, &dpm_noirq_list);
1412                 put_device(dev);
1413
1414                 if (async_error)
1415                         break;
1416         }
1417         mutex_unlock(&dpm_list_mtx);
1418         async_synchronize_full();
1419         if (!error)
1420                 error = async_error;
1421
1422         if (error) {
1423                 suspend_stats.failed_suspend_noirq++;
1424                 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1425         }
1426         dpm_show_time(starttime, state, error, "noirq");
1427         trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1428         return error;
1429 }
1430
1431 /**
1432  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1433  * @state: PM transition of the system being carried out.
1434  *
1435  * Prevent device drivers' interrupt handlers from being called and invoke
1436  * "noirq" suspend callbacks for all non-sysdev devices.
1437  */
1438 int dpm_suspend_noirq(pm_message_t state)
1439 {
1440         int ret;
1441
1442         dpm_noirq_begin();
1443         ret = dpm_noirq_suspend_devices(state);
1444         if (ret)
1445                 dpm_resume_noirq(resume_event(state));
1446
1447         return ret;
1448 }
1449
1450 static void dpm_propagate_wakeup_to_parent(struct device *dev)
1451 {
1452         struct device *parent = dev->parent;
1453
1454         if (!parent)
1455                 return;
1456
1457         spin_lock_irq(&parent->power.lock);
1458
1459         if (dev->power.wakeup_path && !parent->power.ignore_children)
1460                 parent->power.wakeup_path = true;
1461
1462         spin_unlock_irq(&parent->power.lock);
1463 }
1464
1465 static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
1466                                                 pm_message_t state,
1467                                                 const char **info_p)
1468 {
1469         pm_callback_t callback;
1470         const char *info;
1471
1472         if (dev->pm_domain) {
1473                 info = "late power domain ";
1474                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1475         } else if (dev->type && dev->type->pm) {
1476                 info = "late type ";
1477                 callback = pm_late_early_op(dev->type->pm, state);
1478         } else if (dev->class && dev->class->pm) {
1479                 info = "late class ";
1480                 callback = pm_late_early_op(dev->class->pm, state);
1481         } else if (dev->bus && dev->bus->pm) {
1482                 info = "late bus ";
1483                 callback = pm_late_early_op(dev->bus->pm, state);
1484         } else {
1485                 return NULL;
1486         }
1487
1488         if (info_p)
1489                 *info_p = info;
1490
1491         return callback;
1492 }
1493
1494 /**
1495  * __device_suspend_late - Execute a "late suspend" callback for given device.
1496  * @dev: Device to handle.
1497  * @state: PM transition of the system being carried out.
1498  * @async: If true, the device is being suspended asynchronously.
1499  *
1500  * Runtime PM is disabled for @dev while this function is being executed.
1501  */
1502 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1503 {
1504         pm_callback_t callback;
1505         const char *info;
1506         int error = 0;
1507
1508         TRACE_DEVICE(dev);
1509         TRACE_SUSPEND(0);
1510
1511         __pm_runtime_disable(dev, false);
1512
1513         dpm_wait_for_subordinate(dev, async);
1514
1515         if (async_error)
1516                 goto Complete;
1517
1518         if (pm_wakeup_pending()) {
1519                 async_error = -EBUSY;
1520                 goto Complete;
1521         }
1522
1523         if (dev->power.syscore || dev->power.direct_complete)
1524                 goto Complete;
1525
1526         callback = dpm_subsys_suspend_late_cb(dev, state, &info);
1527         if (callback)
1528                 goto Run;
1529
1530         if (dev_pm_smart_suspend_and_suspended(dev) &&
1531             !dpm_subsys_suspend_noirq_cb(dev, state, NULL))
1532                 goto Skip;
1533
1534         if (dev->driver && dev->driver->pm) {
1535                 info = "late driver ";
1536                 callback = pm_late_early_op(dev->driver->pm, state);
1537         }
1538
1539 Run:
1540         error = dpm_run_callback(callback, dev, state, info);
1541         if (error) {
1542                 async_error = error;
1543                 goto Complete;
1544         }
1545         dpm_propagate_wakeup_to_parent(dev);
1546
1547 Skip:
1548         dev->power.is_late_suspended = true;
1549
1550 Complete:
1551         TRACE_SUSPEND(error);
1552         complete_all(&dev->power.completion);
1553         return error;
1554 }
1555
1556 static void async_suspend_late(void *data, async_cookie_t cookie)
1557 {
1558         struct device *dev = (struct device *)data;
1559         int error;
1560
1561         error = __device_suspend_late(dev, pm_transition, true);
1562         if (error) {
1563                 dpm_save_failed_dev(dev_name(dev));
1564                 pm_dev_err(dev, pm_transition, " async", error);
1565         }
1566         put_device(dev);
1567 }
1568
1569 static int device_suspend_late(struct device *dev)
1570 {
1571         if (dpm_async_fn(dev, async_suspend_late))
1572                 return 0;
1573
1574         return __device_suspend_late(dev, pm_transition, false);
1575 }
1576
1577 /**
1578  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1579  * @state: PM transition of the system being carried out.
1580  */
1581 int dpm_suspend_late(pm_message_t state)
1582 {
1583         ktime_t starttime = ktime_get();
1584         int error = 0;
1585
1586         trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1587         mutex_lock(&dpm_list_mtx);
1588         pm_transition = state;
1589         async_error = 0;
1590
1591         while (!list_empty(&dpm_suspended_list)) {
1592                 struct device *dev = to_device(dpm_suspended_list.prev);
1593
1594                 get_device(dev);
1595                 mutex_unlock(&dpm_list_mtx);
1596
1597                 error = device_suspend_late(dev);
1598
1599                 mutex_lock(&dpm_list_mtx);
1600                 if (!list_empty(&dev->power.entry))
1601                         list_move(&dev->power.entry, &dpm_late_early_list);
1602
1603                 if (error) {
1604                         pm_dev_err(dev, state, " late", error);
1605                         dpm_save_failed_dev(dev_name(dev));
1606                         put_device(dev);
1607                         break;
1608                 }
1609                 put_device(dev);
1610
1611                 if (async_error)
1612                         break;
1613         }
1614         mutex_unlock(&dpm_list_mtx);
1615         async_synchronize_full();
1616         if (!error)
1617                 error = async_error;
1618         if (error) {
1619                 suspend_stats.failed_suspend_late++;
1620                 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1621                 dpm_resume_early(resume_event(state));
1622         }
1623         dpm_show_time(starttime, state, error, "late");
1624         trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1625         return error;
1626 }
1627
1628 /**
1629  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1630  * @state: PM transition of the system being carried out.
1631  */
1632 int dpm_suspend_end(pm_message_t state)
1633 {
1634         int error = dpm_suspend_late(state);
1635         if (error)
1636                 return error;
1637
1638         error = dpm_suspend_noirq(state);
1639         if (error) {
1640                 dpm_resume_early(resume_event(state));
1641                 return error;
1642         }
1643
1644         return 0;
1645 }
1646 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1647
1648 /**
1649  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1650  * @dev: Device to suspend.
1651  * @state: PM transition of the system being carried out.
1652  * @cb: Suspend callback to execute.
1653  * @info: string description of caller.
1654  */
1655 static int legacy_suspend(struct device *dev, pm_message_t state,
1656                           int (*cb)(struct device *dev, pm_message_t state),
1657                           const char *info)
1658 {
1659         int error;
1660         ktime_t calltime;
1661
1662         calltime = initcall_debug_start(dev, cb);
1663
1664         trace_device_pm_callback_start(dev, info, state.event);
1665         error = cb(dev, state);
1666         trace_device_pm_callback_end(dev, error);
1667         suspend_report_result(cb, error);
1668
1669         initcall_debug_report(dev, calltime, cb, error);
1670
1671         return error;
1672 }
1673
1674 static void dpm_clear_superiors_direct_complete(struct device *dev)
1675 {
1676         struct device_link *link;
1677         int idx;
1678
1679         if (dev->parent) {
1680                 spin_lock_irq(&dev->parent->power.lock);
1681                 dev->parent->power.direct_complete = false;
1682                 spin_unlock_irq(&dev->parent->power.lock);
1683         }
1684
1685         idx = device_links_read_lock();
1686
1687         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
1688                 spin_lock_irq(&link->supplier->power.lock);
1689                 link->supplier->power.direct_complete = false;
1690                 spin_unlock_irq(&link->supplier->power.lock);
1691         }
1692
1693         device_links_read_unlock(idx);
1694 }
1695
1696 /**
1697  * __device_suspend - Execute "suspend" callbacks for given device.
1698  * @dev: Device to handle.
1699  * @state: PM transition of the system being carried out.
1700  * @async: If true, the device is being suspended asynchronously.
1701  */
1702 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1703 {
1704         pm_callback_t callback = NULL;
1705         const char *info = NULL;
1706         int error = 0;
1707         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1708
1709         TRACE_DEVICE(dev);
1710         TRACE_SUSPEND(0);
1711
1712         dpm_wait_for_subordinate(dev, async);
1713
1714         if (async_error) {
1715                 dev->power.direct_complete = false;
1716                 goto Complete;
1717         }
1718
1719         /*
1720          * If a device configured to wake up the system from sleep states
1721          * has been suspended at run time and there's a resume request pending
1722          * for it, this is equivalent to the device signaling wakeup, so the
1723          * system suspend operation should be aborted.
1724          */
1725         if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1726                 pm_wakeup_event(dev, 0);
1727
1728         if (pm_wakeup_pending()) {
1729                 dev->power.direct_complete = false;
1730                 async_error = -EBUSY;
1731                 goto Complete;
1732         }
1733
1734         if (dev->power.syscore)
1735                 goto Complete;
1736
1737         /* Avoid direct_complete to let wakeup_path propagate. */
1738         if (device_may_wakeup(dev) || dev->power.wakeup_path)
1739                 dev->power.direct_complete = false;
1740
1741         if (dev->power.direct_complete) {
1742                 if (pm_runtime_status_suspended(dev)) {
1743                         pm_runtime_disable(dev);
1744                         if (pm_runtime_status_suspended(dev)) {
1745                                 pm_dev_dbg(dev, state, "direct-complete ");
1746                                 goto Complete;
1747                         }
1748
1749                         pm_runtime_enable(dev);
1750                 }
1751                 dev->power.direct_complete = false;
1752         }
1753
1754         dev->power.may_skip_resume = false;
1755         dev->power.must_resume = false;
1756
1757         dpm_watchdog_set(&wd, dev);
1758         device_lock(dev);
1759
1760         if (dev->pm_domain) {
1761                 info = "power domain ";
1762                 callback = pm_op(&dev->pm_domain->ops, state);
1763                 goto Run;
1764         }
1765
1766         if (dev->type && dev->type->pm) {
1767                 info = "type ";
1768                 callback = pm_op(dev->type->pm, state);
1769                 goto Run;
1770         }
1771
1772         if (dev->class && dev->class->pm) {
1773                 info = "class ";
1774                 callback = pm_op(dev->class->pm, state);
1775                 goto Run;
1776         }
1777
1778         if (dev->bus) {
1779                 if (dev->bus->pm) {
1780                         info = "bus ";
1781                         callback = pm_op(dev->bus->pm, state);
1782                 } else if (dev->bus->suspend) {
1783                         pm_dev_dbg(dev, state, "legacy bus ");
1784                         error = legacy_suspend(dev, state, dev->bus->suspend,
1785                                                 "legacy bus ");
1786                         goto End;
1787                 }
1788         }
1789
1790  Run:
1791         if (!callback && dev->driver && dev->driver->pm) {
1792                 info = "driver ";
1793                 callback = pm_op(dev->driver->pm, state);
1794         }
1795
1796         error = dpm_run_callback(callback, dev, state, info);
1797
1798  End:
1799         if (!error) {
1800                 dev->power.is_suspended = true;
1801                 if (device_may_wakeup(dev))
1802                         dev->power.wakeup_path = true;
1803
1804                 dpm_propagate_wakeup_to_parent(dev);
1805                 dpm_clear_superiors_direct_complete(dev);
1806         }
1807
1808         device_unlock(dev);
1809         dpm_watchdog_clear(&wd);
1810
1811  Complete:
1812         if (error)
1813                 async_error = error;
1814
1815         complete_all(&dev->power.completion);
1816         TRACE_SUSPEND(error);
1817         return error;
1818 }
1819
1820 static void async_suspend(void *data, async_cookie_t cookie)
1821 {
1822         struct device *dev = (struct device *)data;
1823         int error;
1824
1825         error = __device_suspend(dev, pm_transition, true);
1826         if (error) {
1827                 dpm_save_failed_dev(dev_name(dev));
1828                 pm_dev_err(dev, pm_transition, " async", error);
1829         }
1830
1831         put_device(dev);
1832 }
1833
1834 static int device_suspend(struct device *dev)
1835 {
1836         if (dpm_async_fn(dev, async_suspend))
1837                 return 0;
1838
1839         return __device_suspend(dev, pm_transition, false);
1840 }
1841
1842 /**
1843  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1844  * @state: PM transition of the system being carried out.
1845  */
1846 int dpm_suspend(pm_message_t state)
1847 {
1848         ktime_t starttime = ktime_get();
1849         int error = 0;
1850
1851         trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1852         might_sleep();
1853
1854         devfreq_suspend();
1855         cpufreq_suspend();
1856
1857         mutex_lock(&dpm_list_mtx);
1858         pm_transition = state;
1859         async_error = 0;
1860         while (!list_empty(&dpm_prepared_list)) {
1861                 struct device *dev = to_device(dpm_prepared_list.prev);
1862
1863                 get_device(dev);
1864                 mutex_unlock(&dpm_list_mtx);
1865
1866                 error = device_suspend(dev);
1867
1868                 mutex_lock(&dpm_list_mtx);
1869                 if (error) {
1870                         pm_dev_err(dev, state, "", error);
1871                         dpm_save_failed_dev(dev_name(dev));
1872                         put_device(dev);
1873                         break;
1874                 }
1875                 if (!list_empty(&dev->power.entry))
1876                         list_move(&dev->power.entry, &dpm_suspended_list);
1877                 put_device(dev);
1878                 if (async_error)
1879                         break;
1880         }
1881         mutex_unlock(&dpm_list_mtx);
1882         async_synchronize_full();
1883         if (!error)
1884                 error = async_error;
1885         if (error) {
1886                 suspend_stats.failed_suspend++;
1887                 dpm_save_failed_step(SUSPEND_SUSPEND);
1888         }
1889         dpm_show_time(starttime, state, error, NULL);
1890         trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1891         return error;
1892 }
1893
1894 /**
1895  * device_prepare - Prepare a device for system power transition.
1896  * @dev: Device to handle.
1897  * @state: PM transition of the system being carried out.
1898  *
1899  * Execute the ->prepare() callback(s) for given device.  No new children of the
1900  * device may be registered after this function has returned.
1901  */
1902 static int device_prepare(struct device *dev, pm_message_t state)
1903 {
1904         int (*callback)(struct device *) = NULL;
1905         int ret = 0;
1906
1907         if (dev->power.syscore)
1908                 return 0;
1909
1910         WARN_ON(!pm_runtime_enabled(dev) &&
1911                 dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND |
1912                                               DPM_FLAG_LEAVE_SUSPENDED));
1913
1914         /*
1915          * If a device's parent goes into runtime suspend at the wrong time,
1916          * it won't be possible to resume the device.  To prevent this we
1917          * block runtime suspend here, during the prepare phase, and allow
1918          * it again during the complete phase.
1919          */
1920         pm_runtime_get_noresume(dev);
1921
1922         device_lock(dev);
1923
1924         dev->power.wakeup_path = false;
1925
1926         if (dev->power.no_pm_callbacks)
1927                 goto unlock;
1928
1929         if (dev->pm_domain)
1930                 callback = dev->pm_domain->ops.prepare;
1931         else if (dev->type && dev->type->pm)
1932                 callback = dev->type->pm->prepare;
1933         else if (dev->class && dev->class->pm)
1934                 callback = dev->class->pm->prepare;
1935         else if (dev->bus && dev->bus->pm)
1936                 callback = dev->bus->pm->prepare;
1937
1938         if (!callback && dev->driver && dev->driver->pm)
1939                 callback = dev->driver->pm->prepare;
1940
1941         if (callback)
1942                 ret = callback(dev);
1943
1944 unlock:
1945         device_unlock(dev);
1946
1947         if (ret < 0) {
1948                 suspend_report_result(callback, ret);
1949                 pm_runtime_put(dev);
1950                 return ret;
1951         }
1952         /*
1953          * A positive return value from ->prepare() means "this device appears
1954          * to be runtime-suspended and its state is fine, so if it really is
1955          * runtime-suspended, you can leave it in that state provided that you
1956          * will do the same thing with all of its descendants".  This only
1957          * applies to suspend transitions, however.
1958          */
1959         spin_lock_irq(&dev->power.lock);
1960         dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1961                 ((pm_runtime_suspended(dev) && ret > 0) ||
1962                  dev->power.no_pm_callbacks) &&
1963                 !dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP);
1964         spin_unlock_irq(&dev->power.lock);
1965         return 0;
1966 }
1967
1968 /**
1969  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1970  * @state: PM transition of the system being carried out.
1971  *
1972  * Execute the ->prepare() callback(s) for all devices.
1973  */
1974 int dpm_prepare(pm_message_t state)
1975 {
1976         int error = 0;
1977
1978         trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1979         might_sleep();
1980
1981         /*
1982          * Give a chance for the known devices to complete their probes, before
1983          * disable probing of devices. This sync point is important at least
1984          * at boot time + hibernation restore.
1985          */
1986         wait_for_device_probe();
1987         /*
1988          * It is unsafe if probing of devices will happen during suspend or
1989          * hibernation and system behavior will be unpredictable in this case.
1990          * So, let's prohibit device's probing here and defer their probes
1991          * instead. The normal behavior will be restored in dpm_complete().
1992          */
1993         device_block_probing();
1994
1995         mutex_lock(&dpm_list_mtx);
1996         while (!list_empty(&dpm_list)) {
1997                 struct device *dev = to_device(dpm_list.next);
1998
1999                 get_device(dev);
2000                 mutex_unlock(&dpm_list_mtx);
2001
2002                 trace_device_pm_callback_start(dev, "", state.event);
2003                 error = device_prepare(dev, state);
2004                 trace_device_pm_callback_end(dev, error);
2005
2006                 mutex_lock(&dpm_list_mtx);
2007                 if (error) {
2008                         if (error == -EAGAIN) {
2009                                 put_device(dev);
2010                                 error = 0;
2011                                 continue;
2012                         }
2013                         pr_info("Device %s not prepared for power transition: code %d\n",
2014                                 dev_name(dev), error);
2015                         put_device(dev);
2016                         break;
2017                 }
2018                 dev->power.is_prepared = true;
2019                 if (!list_empty(&dev->power.entry))
2020                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
2021                 put_device(dev);
2022         }
2023         mutex_unlock(&dpm_list_mtx);
2024         trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
2025         return error;
2026 }
2027
2028 /**
2029  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
2030  * @state: PM transition of the system being carried out.
2031  *
2032  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
2033  * callbacks for them.
2034  */
2035 int dpm_suspend_start(pm_message_t state)
2036 {
2037         int error;
2038
2039         error = dpm_prepare(state);
2040         if (error) {
2041                 suspend_stats.failed_prepare++;
2042                 dpm_save_failed_step(SUSPEND_PREPARE);
2043         } else
2044                 error = dpm_suspend(state);
2045         return error;
2046 }
2047 EXPORT_SYMBOL_GPL(dpm_suspend_start);
2048
2049 void __suspend_report_result(const char *function, void *fn, int ret)
2050 {
2051         if (ret)
2052                 pr_err("%s(): %pS returns %d\n", function, fn, ret);
2053 }
2054 EXPORT_SYMBOL_GPL(__suspend_report_result);
2055
2056 /**
2057  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
2058  * @subordinate: Device that needs to wait for @dev.
2059  * @dev: Device to wait for.
2060  */
2061 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
2062 {
2063         dpm_wait(dev, subordinate->power.async_suspend);
2064         return async_error;
2065 }
2066 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
2067
2068 /**
2069  * dpm_for_each_dev - device iterator.
2070  * @data: data for the callback.
2071  * @fn: function to be called for each device.
2072  *
2073  * Iterate over devices in dpm_list, and call @fn for each device,
2074  * passing it @data.
2075  */
2076 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
2077 {
2078         struct device *dev;
2079
2080         if (!fn)
2081                 return;
2082
2083         device_pm_lock();
2084         list_for_each_entry(dev, &dpm_list, power.entry)
2085                 fn(dev, data);
2086         device_pm_unlock();
2087 }
2088 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
2089
2090 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
2091 {
2092         if (!ops)
2093                 return true;
2094
2095         return !ops->prepare &&
2096                !ops->suspend &&
2097                !ops->suspend_late &&
2098                !ops->suspend_noirq &&
2099                !ops->resume_noirq &&
2100                !ops->resume_early &&
2101                !ops->resume &&
2102                !ops->complete;
2103 }
2104
2105 void device_pm_check_callbacks(struct device *dev)
2106 {
2107         spin_lock_irq(&dev->power.lock);
2108         dev->power.no_pm_callbacks =
2109                 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2110                  !dev->bus->suspend && !dev->bus->resume)) &&
2111                 (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2112                 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2113                 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2114                 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2115                  !dev->driver->suspend && !dev->driver->resume));
2116         spin_unlock_irq(&dev->power.lock);
2117 }
2118
2119 bool dev_pm_smart_suspend_and_suspended(struct device *dev)
2120 {
2121         return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2122                 pm_runtime_status_suspended(dev);
2123 }
This page took 0.158232 seconds and 4 git commands to generate.