2 * SPDX-License-Identifier: MIT
4 * Copyright © 2019 Intel Corporation
7 #ifndef INTEL_WAKEREF_H
8 #define INTEL_WAKEREF_H
10 #include <linux/atomic.h>
11 #include <linux/bits.h>
12 #include <linux/lockdep.h>
13 #include <linux/mutex.h>
14 #include <linux/refcount.h>
15 #include <linux/stackdepot.h>
16 #include <linux/timer.h>
17 #include <linux/workqueue.h>
19 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
20 #define INTEL_WAKEREF_BUG_ON(expr) BUG_ON(expr)
22 #define INTEL_WAKEREF_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
25 struct intel_runtime_pm;
28 typedef depot_stack_handle_t intel_wakeref_t;
30 struct intel_wakeref_ops {
31 int (*get)(struct intel_wakeref *wf);
32 int (*put)(struct intel_wakeref *wf);
35 struct intel_wakeref {
39 intel_wakeref_t wakeref;
41 struct intel_runtime_pm *rpm;
42 const struct intel_wakeref_ops *ops;
44 struct work_struct work;
47 void __intel_wakeref_init(struct intel_wakeref *wf,
48 struct intel_runtime_pm *rpm,
49 const struct intel_wakeref_ops *ops,
50 struct lock_class_key *key);
51 #define intel_wakeref_init(wf, rpm, ops) do { \
52 static struct lock_class_key __key; \
54 __intel_wakeref_init((wf), (rpm), (ops), &__key); \
57 int __intel_wakeref_get_first(struct intel_wakeref *wf);
58 void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags);
61 * intel_wakeref_get: Acquire the wakeref
62 * @i915: the drm_i915_private device
64 * @fn: callback for acquired the wakeref, called only on first acquire.
66 * Acquire a hold on the wakeref. The first user to do so, will acquire
67 * the runtime pm wakeref and then call the @fn underneath the wakeref
70 * Note that @fn is allowed to fail, in which case the runtime-pm wakeref
71 * will be released and the acquisition unwound, and an error reported.
73 * Returns: 0 if the wakeref was acquired successfully, or a negative error
77 intel_wakeref_get(struct intel_wakeref *wf)
79 if (unlikely(!atomic_inc_not_zero(&wf->count)))
80 return __intel_wakeref_get_first(wf);
86 * intel_wakeref_get_if_in_use: Acquire the wakeref
89 * Acquire a hold on the wakeref, but only if the wakeref is already
92 * Returns: true if the wakeref was acquired, false otherwise.
95 intel_wakeref_get_if_active(struct intel_wakeref *wf)
97 return atomic_inc_not_zero(&wf->count);
101 * intel_wakeref_put_flags: Release the wakeref
103 * @flags: control flags
105 * Release our hold on the wakeref. When there are no more users,
106 * the runtime pm wakeref will be released after the @fn callback is called
107 * underneath the wakeref mutex.
109 * Note that @fn is allowed to fail, in which case the runtime-pm wakeref
110 * is retained and an error reported.
112 * Returns: 0 if the wakeref was released successfully, or a negative error
116 __intel_wakeref_put(struct intel_wakeref *wf, unsigned long flags)
117 #define INTEL_WAKEREF_PUT_ASYNC BIT(0)
119 INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
120 if (unlikely(!atomic_add_unless(&wf->count, -1, 1)))
121 __intel_wakeref_put_last(wf, flags);
125 intel_wakeref_put(struct intel_wakeref *wf)
128 __intel_wakeref_put(wf, 0);
132 intel_wakeref_put_async(struct intel_wakeref *wf)
134 __intel_wakeref_put(wf, INTEL_WAKEREF_PUT_ASYNC);
138 * intel_wakeref_lock: Lock the wakeref (mutex)
141 * Locks the wakeref to prevent it being acquired or released. New users
142 * can still adjust the counter, but the wakeref itself (and callback)
143 * cannot be acquired or released.
146 intel_wakeref_lock(struct intel_wakeref *wf)
147 __acquires(wf->mutex)
149 mutex_lock(&wf->mutex);
153 * intel_wakeref_unlock: Unlock the wakeref
156 * Releases a previously acquired intel_wakeref_lock().
159 intel_wakeref_unlock(struct intel_wakeref *wf)
160 __releases(wf->mutex)
162 mutex_unlock(&wf->mutex);
166 * intel_wakeref_unlock_wait: Wait until the active callback is complete
169 * Waits for the active callback (under the @wf->mutex or another CPU) is
173 intel_wakeref_unlock_wait(struct intel_wakeref *wf)
175 mutex_lock(&wf->mutex);
176 mutex_unlock(&wf->mutex);
177 flush_work(&wf->work);
181 * intel_wakeref_is_active: Query whether the wakeref is currently held
184 * Returns: true if the wakeref is currently held.
187 intel_wakeref_is_active(const struct intel_wakeref *wf)
189 return READ_ONCE(wf->wakeref);
193 * __intel_wakeref_defer_park: Defer the current park callback
197 __intel_wakeref_defer_park(struct intel_wakeref *wf)
199 lockdep_assert_held(&wf->mutex);
200 INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count));
201 atomic_set_release(&wf->count, 1);
205 * intel_wakeref_wait_for_idle: Wait until the wakeref is idle
208 * Wait for the earlier asynchronous release of the wakeref. Note
209 * this will wait for any third party as well, so make sure you only wait
210 * when you have control over the wakeref and trust no one else is acquiring
213 * Return: 0 on success, error code if killed.
215 int intel_wakeref_wait_for_idle(struct intel_wakeref *wf);
217 struct intel_wakeref_auto {
218 struct intel_runtime_pm *rpm;
219 struct timer_list timer;
220 intel_wakeref_t wakeref;
226 * intel_wakeref_auto: Delay the runtime-pm autosuspend
228 * @timeout: relative timeout in jiffies
230 * The runtime-pm core uses a suspend delay after the last wakeref
231 * is released before triggering runtime suspend of the device. That
232 * delay is configurable via sysfs with little regard to the device
233 * characteristics. Instead, we want to tune the autosuspend based on our
234 * HW knowledge. intel_wakeref_auto() delays the sleep by the supplied
237 * Pass @timeout = 0 to cancel a previous autosuspend by executing the
238 * suspend immediately.
240 void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout);
242 void intel_wakeref_auto_init(struct intel_wakeref_auto *wf,
243 struct intel_runtime_pm *rpm);
244 void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf);
246 #endif /* INTEL_WAKEREF_H */