]>
Commit | Line | Data |
---|---|---|
8187a2b7 ZN |
1 | #ifndef _INTEL_RINGBUFFER_H_ |
2 | #define _INTEL_RINGBUFFER_H_ | |
3 | ||
4 | struct intel_hw_status_page { | |
4225d0f2 | 5 | u32 *page_addr; |
8187a2b7 | 6 | unsigned int gfx_addr; |
05394f39 | 7 | struct drm_i915_gem_object *obj; |
8187a2b7 ZN |
8 | }; |
9 | ||
b7287d80 BW |
10 | #define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base)) |
11 | #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) | |
cae5852d | 12 | |
b7287d80 BW |
13 | #define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base)) |
14 | #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) | |
cae5852d | 15 | |
b7287d80 BW |
16 | #define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base)) |
17 | #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) | |
cae5852d | 18 | |
b7287d80 BW |
19 | #define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base)) |
20 | #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) | |
cae5852d | 21 | |
b7287d80 BW |
22 | #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base)) |
23 | #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) | |
870e86dd | 24 | |
b7287d80 BW |
25 | #define I915_READ_NOPID(ring) I915_READ(RING_NOPID((ring)->mmio_base)) |
26 | #define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base)) | |
27 | #define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base)) | |
1ec14ad3 | 28 | |
8187a2b7 ZN |
29 | struct intel_ring_buffer { |
30 | const char *name; | |
9220434a | 31 | enum intel_ring_id { |
96154f2f SV |
32 | RCS = 0x0, |
33 | VCS, | |
34 | BCS, | |
9220434a | 35 | } id; |
96154f2f | 36 | #define I915_NUM_RINGS 3 |
333e9fe9 | 37 | u32 mmio_base; |
311bd68e | 38 | void __iomem *virtual_start; |
8187a2b7 | 39 | struct drm_device *dev; |
05394f39 | 40 | struct drm_i915_gem_object *obj; |
8187a2b7 | 41 | |
8c0a6bfe CW |
42 | u32 head; |
43 | u32 tail; | |
780f0ca3 | 44 | int space; |
c2c347a9 | 45 | int size; |
55249baa | 46 | int effective_size; |
8187a2b7 ZN |
47 | struct intel_hw_status_page status_page; |
48 | ||
a71d8d94 CW |
49 | /** We track the position of the requests in the ring buffer, and |
50 | * when each is retired we increment last_retired_head as the GPU | |
51 | * must have finished processing the request and so we know we | |
52 | * can advance the ringbuffer up to that position. | |
53 | * | |
54 | * last_retired_head is set to -1 after the value is consumed so | |
55 | * we can detect new retirements. | |
56 | */ | |
57 | u32 last_retired_head; | |
58 | ||
7338aefa | 59 | u32 irq_refcount; /* protected by dev_priv->irq_lock */ |
6a848ccb | 60 | u32 irq_enable_mask; /* bitmask to enable ring interrupt */ |
db53a302 | 61 | u32 trace_irq_seqno; |
1ec14ad3 | 62 | u32 sync_seqno[I915_NUM_RINGS-1]; |
b13c2b96 | 63 | bool __must_check (*irq_get)(struct intel_ring_buffer *ring); |
1ec14ad3 | 64 | void (*irq_put)(struct intel_ring_buffer *ring); |
8187a2b7 | 65 | |
78501eac | 66 | int (*init)(struct intel_ring_buffer *ring); |
8187a2b7 | 67 | |
78501eac | 68 | void (*write_tail)(struct intel_ring_buffer *ring, |
297b0c5b | 69 | u32 value); |
b72f3acb CW |
70 | int __must_check (*flush)(struct intel_ring_buffer *ring, |
71 | u32 invalidate_domains, | |
72 | u32 flush_domains); | |
3cce469c CW |
73 | int (*add_request)(struct intel_ring_buffer *ring, |
74 | u32 *seqno); | |
78501eac CW |
75 | u32 (*get_seqno)(struct intel_ring_buffer *ring); |
76 | int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, | |
c4e7a414 | 77 | u32 offset, u32 length); |
8d19215b | 78 | void (*cleanup)(struct intel_ring_buffer *ring); |
c8c99b0f BW |
79 | int (*sync_to)(struct intel_ring_buffer *ring, |
80 | struct intel_ring_buffer *to, | |
81 | u32 seqno); | |
8187a2b7 | 82 | |
c8c99b0f BW |
83 | u32 semaphore_register[3]; /*our mbox written by others */ |
84 | u32 signal_mbox[2]; /* mboxes this ring signals to */ | |
8187a2b7 ZN |
85 | /** |
86 | * List of objects currently involved in rendering from the | |
87 | * ringbuffer. | |
88 | * | |
89 | * Includes buffers having the contents of their GPU caches | |
90 | * flushed, not necessarily primitives. last_rendering_seqno | |
91 | * represents when the rendering involved will be completed. | |
92 | * | |
93 | * A reference is held on the buffer while on this list. | |
94 | */ | |
95 | struct list_head active_list; | |
96 | ||
97 | /** | |
98 | * List of breadcrumbs associated with GPU requests currently | |
99 | * outstanding. | |
100 | */ | |
101 | struct list_head request_list; | |
102 | ||
a56ba56c CW |
103 | /** |
104 | * Do we have some not yet emitted requests outstanding? | |
105 | */ | |
5d97eb69 | 106 | u32 outstanding_lazy_request; |
cc889e0f | 107 | bool gpu_caches_dirty; |
a56ba56c | 108 | |
8187a2b7 | 109 | wait_queue_head_t irq_queue; |
8d19215b | 110 | |
12b0286f BW |
111 | /** |
112 | * Do an explicit TLB flush before MI_SET_CONTEXT | |
113 | */ | |
114 | bool itlb_before_ctx_switch; | |
40521054 | 115 | struct i915_hw_context *default_context; |
e0556841 | 116 | struct drm_i915_gem_object *last_context_obj; |
40521054 | 117 | |
8d19215b | 118 | void *private; |
8187a2b7 ZN |
119 | }; |
120 | ||
b4519513 CW |
121 | static inline bool |
122 | intel_ring_initialized(struct intel_ring_buffer *ring) | |
123 | { | |
124 | return ring->obj != NULL; | |
125 | } | |
126 | ||
96154f2f SV |
127 | static inline unsigned |
128 | intel_ring_flag(struct intel_ring_buffer *ring) | |
129 | { | |
130 | return 1 << ring->id; | |
131 | } | |
132 | ||
1ec14ad3 CW |
133 | static inline u32 |
134 | intel_ring_sync_index(struct intel_ring_buffer *ring, | |
135 | struct intel_ring_buffer *other) | |
136 | { | |
137 | int idx; | |
138 | ||
139 | /* | |
140 | * cs -> 0 = vcs, 1 = bcs | |
141 | * vcs -> 0 = bcs, 1 = cs, | |
142 | * bcs -> 0 = cs, 1 = vcs. | |
143 | */ | |
144 | ||
145 | idx = (other - ring) - 1; | |
146 | if (idx < 0) | |
147 | idx += I915_NUM_RINGS; | |
148 | ||
149 | return idx; | |
150 | } | |
151 | ||
8187a2b7 ZN |
152 | static inline u32 |
153 | intel_read_status_page(struct intel_ring_buffer *ring, | |
78501eac | 154 | int reg) |
8187a2b7 | 155 | { |
4225d0f2 SV |
156 | /* Ensure that the compiler doesn't optimize away the load. */ |
157 | barrier(); | |
158 | return ring->status_page.page_addr[reg]; | |
8187a2b7 ZN |
159 | } |
160 | ||
311bd68e CW |
161 | /** |
162 | * Reads a dword out of the status page, which is written to from the command | |
163 | * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or | |
164 | * MI_STORE_DATA_IMM. | |
165 | * | |
166 | * The following dwords have a reserved meaning: | |
167 | * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes. | |
168 | * 0x04: ring 0 head pointer | |
169 | * 0x05: ring 1 head pointer (915-class) | |
170 | * 0x06: ring 2 head pointer (915-class) | |
171 | * 0x10-0x1b: Context status DWords (GM45) | |
172 | * 0x1f: Last written status offset. (GM45) | |
173 | * | |
174 | * The area from dword 0x20 to 0x3ff is available for driver usage. | |
175 | */ | |
311bd68e | 176 | #define I915_GEM_HWS_INDEX 0x20 |
311bd68e | 177 | |
78501eac | 178 | void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); |
96f298aa | 179 | |
e1f99ce6 | 180 | int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n); |
96f298aa BW |
181 | static inline int intel_wait_ring_idle(struct intel_ring_buffer *ring) |
182 | { | |
a94919ea | 183 | return intel_wait_ring_buffer(ring, ring->size - 8); |
96f298aa BW |
184 | } |
185 | ||
e1f99ce6 | 186 | int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); |
78501eac CW |
187 | |
188 | static inline void intel_ring_emit(struct intel_ring_buffer *ring, | |
189 | u32 data) | |
e898cd22 | 190 | { |
78501eac | 191 | iowrite32(data, ring->virtual_start + ring->tail); |
e898cd22 CW |
192 | ring->tail += 4; |
193 | } | |
194 | ||
78501eac | 195 | void intel_ring_advance(struct intel_ring_buffer *ring); |
8187a2b7 | 196 | |
78501eac | 197 | u32 intel_ring_get_seqno(struct intel_ring_buffer *ring); |
a7b9761d CW |
198 | int intel_ring_flush_all_caches(struct intel_ring_buffer *ring); |
199 | int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring); | |
8187a2b7 | 200 | |
5c1143bb XH |
201 | int intel_init_render_ring_buffer(struct drm_device *dev); |
202 | int intel_init_bsd_ring_buffer(struct drm_device *dev); | |
549f7365 | 203 | int intel_init_blt_ring_buffer(struct drm_device *dev); |
8187a2b7 | 204 | |
78501eac CW |
205 | u32 intel_ring_get_active_head(struct intel_ring_buffer *ring); |
206 | void intel_ring_setup_status_page(struct intel_ring_buffer *ring); | |
79f321b7 | 207 | |
a71d8d94 CW |
208 | static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring) |
209 | { | |
210 | return ring->tail; | |
211 | } | |
212 | ||
db53a302 CW |
213 | static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno) |
214 | { | |
215 | if (ring->trace_irq_seqno == 0 && ring->irq_get(ring)) | |
216 | ring->trace_irq_seqno = seqno; | |
217 | } | |
218 | ||
e8616b6c CW |
219 | /* DRI warts */ |
220 | int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size); | |
221 | ||
8187a2b7 | 222 | #endif /* _INTEL_RINGBUFFER_H_ */ |