1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2013-2016 Red Hat
7 #ifndef __MSM_FENCE_H__
8 #define __MSM_FENCE_H__
13 * struct msm_fence_context - fence context for gpu
15 * Each ringbuffer has a single fence context, with the GPU writing an
16 * incrementing fence seqno at the end of each submit
18 struct msm_fence_context {
19 struct drm_device *dev;
20 /** name: human readable name for fence timeline */
22 /** context: see dma_fence_context_alloc() */
28 * Last assigned fence, incremented each time a fence is created
29 * on this fence context. If last_fence == completed_fence,
30 * there is no remaining pending work
37 * The last completed fence, updated from the CPU after interrupt
40 uint32_t completed_fence;
45 * The address that the GPU directly writes with completed fence
46 * seqno. This can be ahead of completed_fence. We can peek at
47 * this to see if a fence has already signaled but the CPU hasn't
48 * gotten around to handling the irq and updating completed_fence
50 volatile uint32_t *fenceptr;
55 struct msm_fence_context * msm_fence_context_alloc(struct drm_device *dev,
56 volatile uint32_t *fenceptr, const char *name);
57 void msm_fence_context_free(struct msm_fence_context *fctx);
59 void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence);
61 struct dma_fence * msm_fence_alloc(struct msm_fence_context *fctx);