2 * SPDX-License-Identifier: MIT
4 * Copyright © 2019 Intel Corporation
7 #ifndef INTEL_WAKEREF_H
8 #define INTEL_WAKEREF_H
10 #include <drm/drm_print.h>
12 #include <linux/atomic.h>
13 #include <linux/bitfield.h>
14 #include <linux/bits.h>
15 #include <linux/lockdep.h>
16 #include <linux/mutex.h>
17 #include <linux/refcount.h>
18 #include <linux/ref_tracker.h>
19 #include <linux/slab.h>
20 #include <linux/stackdepot.h>
21 #include <linux/timer.h>
22 #include <linux/workqueue.h>
24 typedef unsigned long intel_wakeref_t;
26 #define INTEL_REFTRACK_DEAD_COUNT 16
27 #define INTEL_REFTRACK_PRINT_LIMIT 16
29 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
30 #define INTEL_WAKEREF_BUG_ON(expr) BUG_ON(expr)
32 #define INTEL_WAKEREF_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
35 struct intel_runtime_pm;
38 struct intel_wakeref_ops {
39 int (*get)(struct intel_wakeref *wf);
40 int (*put)(struct intel_wakeref *wf);
43 struct intel_wakeref {
47 intel_wakeref_t wakeref;
49 struct drm_i915_private *i915;
50 const struct intel_wakeref_ops *ops;
52 struct delayed_work work;
54 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_WAKEREF)
55 struct ref_tracker_dir debug;
59 struct intel_wakeref_lockclass {
60 struct lock_class_key mutex;
61 struct lock_class_key work;
64 void __intel_wakeref_init(struct intel_wakeref *wf,
65 struct drm_i915_private *i915,
66 const struct intel_wakeref_ops *ops,
67 struct intel_wakeref_lockclass *key,
69 #define intel_wakeref_init(wf, i915, ops, name) do { \
70 static struct intel_wakeref_lockclass __key; \
72 __intel_wakeref_init((wf), (i915), (ops), &__key, name); \
75 int __intel_wakeref_get_first(struct intel_wakeref *wf);
76 void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags);
79 * intel_wakeref_get: Acquire the wakeref
82 * Acquire a hold on the wakeref. The first user to do so, will acquire
83 * the runtime pm wakeref and then call the intel_wakeref_ops->get()
84 * underneath the wakeref mutex.
86 * Note that intel_wakeref_ops->get() is allowed to fail, in which case
87 * the runtime-pm wakeref will be released and the acquisition unwound,
88 * and an error reported.
90 * Returns: 0 if the wakeref was acquired successfully, or a negative error
94 intel_wakeref_get(struct intel_wakeref *wf)
97 if (unlikely(!atomic_inc_not_zero(&wf->count)))
98 return __intel_wakeref_get_first(wf);
104 * __intel_wakeref_get: Acquire the wakeref, again
107 * Increment the wakeref counter, only valid if it is already held by
110 * See intel_wakeref_get().
113 __intel_wakeref_get(struct intel_wakeref *wf)
115 INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
116 atomic_inc(&wf->count);
120 * intel_wakeref_get_if_active: Acquire the wakeref
123 * Acquire a hold on the wakeref, but only if the wakeref is already
126 * Returns: true if the wakeref was acquired, false otherwise.
129 intel_wakeref_get_if_active(struct intel_wakeref *wf)
131 return atomic_inc_not_zero(&wf->count);
135 INTEL_WAKEREF_PUT_ASYNC_BIT = 0,
136 __INTEL_WAKEREF_PUT_LAST_BIT__
140 intel_wakeref_might_get(struct intel_wakeref *wf)
142 might_lock(&wf->mutex);
146 * __intel_wakeref_put: Release the wakeref
148 * @flags: control flags
150 * Release our hold on the wakeref. When there are no more users,
151 * the runtime pm wakeref will be released after the intel_wakeref_ops->put()
152 * callback is called underneath the wakeref mutex.
154 * Note that intel_wakeref_ops->put() is allowed to fail, in which case the
155 * runtime-pm wakeref is retained.
159 __intel_wakeref_put(struct intel_wakeref *wf, unsigned long flags)
160 #define INTEL_WAKEREF_PUT_ASYNC BIT(INTEL_WAKEREF_PUT_ASYNC_BIT)
161 #define INTEL_WAKEREF_PUT_DELAY \
162 GENMASK(BITS_PER_LONG - 1, __INTEL_WAKEREF_PUT_LAST_BIT__)
164 INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
165 if (unlikely(!atomic_add_unless(&wf->count, -1, 1)))
166 __intel_wakeref_put_last(wf, flags);
170 intel_wakeref_put(struct intel_wakeref *wf)
173 __intel_wakeref_put(wf, 0);
177 intel_wakeref_put_async(struct intel_wakeref *wf)
179 __intel_wakeref_put(wf, INTEL_WAKEREF_PUT_ASYNC);
183 intel_wakeref_put_delay(struct intel_wakeref *wf, unsigned long delay)
185 __intel_wakeref_put(wf,
186 INTEL_WAKEREF_PUT_ASYNC |
187 FIELD_PREP(INTEL_WAKEREF_PUT_DELAY, delay));
191 intel_wakeref_might_put(struct intel_wakeref *wf)
193 might_lock(&wf->mutex);
197 * intel_wakeref_lock: Lock the wakeref (mutex)
200 * Locks the wakeref to prevent it being acquired or released. New users
201 * can still adjust the counter, but the wakeref itself (and callback)
202 * cannot be acquired or released.
205 intel_wakeref_lock(struct intel_wakeref *wf)
206 __acquires(wf->mutex)
208 mutex_lock(&wf->mutex);
212 * intel_wakeref_unlock: Unlock the wakeref
215 * Releases a previously acquired intel_wakeref_lock().
218 intel_wakeref_unlock(struct intel_wakeref *wf)
219 __releases(wf->mutex)
221 mutex_unlock(&wf->mutex);
225 * intel_wakeref_unlock_wait: Wait until the active callback is complete
228 * Waits for the active callback (under the @wf->mutex or another CPU) is
232 intel_wakeref_unlock_wait(struct intel_wakeref *wf)
234 mutex_lock(&wf->mutex);
235 mutex_unlock(&wf->mutex);
236 flush_delayed_work(&wf->work);
240 * intel_wakeref_is_active: Query whether the wakeref is currently held
243 * Returns: true if the wakeref is currently held.
246 intel_wakeref_is_active(const struct intel_wakeref *wf)
248 return READ_ONCE(wf->wakeref);
252 * __intel_wakeref_defer_park: Defer the current park callback
256 __intel_wakeref_defer_park(struct intel_wakeref *wf)
258 lockdep_assert_held(&wf->mutex);
259 INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count));
260 atomic_set_release(&wf->count, 1);
264 * intel_wakeref_wait_for_idle: Wait until the wakeref is idle
267 * Wait for the earlier asynchronous release of the wakeref. Note
268 * this will wait for any third party as well, so make sure you only wait
269 * when you have control over the wakeref and trust no one else is acquiring
272 * Return: 0 on success, error code if killed.
274 int intel_wakeref_wait_for_idle(struct intel_wakeref *wf);
276 #define INTEL_WAKEREF_DEF ((intel_wakeref_t)(-1))
278 static inline intel_wakeref_t intel_ref_tracker_alloc(struct ref_tracker_dir *dir)
280 struct ref_tracker *user = NULL;
282 ref_tracker_alloc(dir, &user, GFP_NOWAIT);
284 return (intel_wakeref_t)user ?: INTEL_WAKEREF_DEF;
287 static inline void intel_ref_tracker_free(struct ref_tracker_dir *dir,
288 intel_wakeref_t handle)
290 struct ref_tracker *user;
292 user = (handle == INTEL_WAKEREF_DEF) ? NULL : (void *)handle;
294 ref_tracker_free(dir, &user);
297 void intel_ref_tracker_show(struct ref_tracker_dir *dir,
298 struct drm_printer *p);
300 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_WAKEREF)
302 static inline intel_wakeref_t intel_wakeref_track(struct intel_wakeref *wf)
304 return intel_ref_tracker_alloc(&wf->debug);
307 static inline void intel_wakeref_untrack(struct intel_wakeref *wf,
308 intel_wakeref_t handle)
310 intel_ref_tracker_free(&wf->debug, handle);
315 static inline intel_wakeref_t intel_wakeref_track(struct intel_wakeref *wf)
320 static inline void intel_wakeref_untrack(struct intel_wakeref *wf,
321 intel_wakeref_t handle)
327 struct intel_wakeref_auto {
328 struct drm_i915_private *i915;
329 struct timer_list timer;
330 intel_wakeref_t wakeref;
336 * intel_wakeref_auto: Delay the runtime-pm autosuspend
338 * @timeout: relative timeout in jiffies
340 * The runtime-pm core uses a suspend delay after the last wakeref
341 * is released before triggering runtime suspend of the device. That
342 * delay is configurable via sysfs with little regard to the device
343 * characteristics. Instead, we want to tune the autosuspend based on our
344 * HW knowledge. intel_wakeref_auto() delays the sleep by the supplied
347 * Pass @timeout = 0 to cancel a previous autosuspend by executing the
348 * suspend immediately.
350 void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout);
352 void intel_wakeref_auto_init(struct intel_wakeref_auto *wf,
353 struct drm_i915_private *i915);
354 void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf);
356 #endif /* INTEL_WAKEREF_H */