]> Git Repo - linux.git/blob - drivers/gpu/drm/i915/intel_ringbuffer.c
Merge tag 'ipu-3.18' of git://git.pengutronix.de/git/pza/linux into drm-next
[linux.git] / drivers / gpu / drm / i915 / intel_ringbuffer.c
1 /*
2  * Copyright © 2008-2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <[email protected]>
25  *    Zou Nan hai <[email protected]>
26  *    Xiang Hai hao<[email protected]>
27  *
28  */
29
30 #include <drm/drmP.h>
31 #include "i915_drv.h"
32 #include <drm/i915_drm.h>
33 #include "i915_trace.h"
34 #include "intel_drv.h"
35
36 bool
37 intel_ring_initialized(struct intel_engine_cs *ring)
38 {
39         struct drm_device *dev = ring->dev;
40
41         if (!dev)
42                 return false;
43
44         if (i915.enable_execlists) {
45                 struct intel_context *dctx = ring->default_context;
46                 struct intel_ringbuffer *ringbuf = dctx->engine[ring->id].ringbuf;
47
48                 return ringbuf->obj;
49         } else
50                 return ring->buffer && ring->buffer->obj;
51 }
52
53 int __intel_ring_space(int head, int tail, int size)
54 {
55         int space = head - (tail + I915_RING_FREE_SPACE);
56         if (space < 0)
57                 space += size;
58         return space;
59 }
60
61 int intel_ring_space(struct intel_ringbuffer *ringbuf)
62 {
63         return __intel_ring_space(ringbuf->head & HEAD_ADDR,
64                                   ringbuf->tail, ringbuf->size);
65 }
66
67 bool intel_ring_stopped(struct intel_engine_cs *ring)
68 {
69         struct drm_i915_private *dev_priv = ring->dev->dev_private;
70         return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring);
71 }
72
73 void __intel_ring_advance(struct intel_engine_cs *ring)
74 {
75         struct intel_ringbuffer *ringbuf = ring->buffer;
76         ringbuf->tail &= ringbuf->size - 1;
77         if (intel_ring_stopped(ring))
78                 return;
79         ring->write_tail(ring, ringbuf->tail);
80 }
81
82 static int
83 gen2_render_ring_flush(struct intel_engine_cs *ring,
84                        u32      invalidate_domains,
85                        u32      flush_domains)
86 {
87         u32 cmd;
88         int ret;
89
90         cmd = MI_FLUSH;
91         if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0)
92                 cmd |= MI_NO_WRITE_FLUSH;
93
94         if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
95                 cmd |= MI_READ_FLUSH;
96
97         ret = intel_ring_begin(ring, 2);
98         if (ret)
99                 return ret;
100
101         intel_ring_emit(ring, cmd);
102         intel_ring_emit(ring, MI_NOOP);
103         intel_ring_advance(ring);
104
105         return 0;
106 }
107
108 static int
109 gen4_render_ring_flush(struct intel_engine_cs *ring,
110                        u32      invalidate_domains,
111                        u32      flush_domains)
112 {
113         struct drm_device *dev = ring->dev;
114         u32 cmd;
115         int ret;
116
117         /*
118          * read/write caches:
119          *
120          * I915_GEM_DOMAIN_RENDER is always invalidated, but is
121          * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
122          * also flushed at 2d versus 3d pipeline switches.
123          *
124          * read-only caches:
125          *
126          * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
127          * MI_READ_FLUSH is set, and is always flushed on 965.
128          *
129          * I915_GEM_DOMAIN_COMMAND may not exist?
130          *
131          * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
132          * invalidated when MI_EXE_FLUSH is set.
133          *
134          * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
135          * invalidated with every MI_FLUSH.
136          *
137          * TLBs:
138          *
139          * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
140          * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
141          * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
142          * are flushed at any MI_FLUSH.
143          */
144
145         cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
146         if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER)
147                 cmd &= ~MI_NO_WRITE_FLUSH;
148         if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
149                 cmd |= MI_EXE_FLUSH;
150
151         if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
152             (IS_G4X(dev) || IS_GEN5(dev)))
153                 cmd |= MI_INVALIDATE_ISP;
154
155         ret = intel_ring_begin(ring, 2);
156         if (ret)
157                 return ret;
158
159         intel_ring_emit(ring, cmd);
160         intel_ring_emit(ring, MI_NOOP);
161         intel_ring_advance(ring);
162
163         return 0;
164 }
165
166 /**
167  * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
168  * implementing two workarounds on gen6.  From section 1.4.7.1
169  * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
170  *
171  * [DevSNB-C+{W/A}] Before any depth stall flush (including those
172  * produced by non-pipelined state commands), software needs to first
173  * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
174  * 0.
175  *
176  * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
177  * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
178  *
179  * And the workaround for these two requires this workaround first:
180  *
181  * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
182  * BEFORE the pipe-control with a post-sync op and no write-cache
183  * flushes.
184  *
185  * And this last workaround is tricky because of the requirements on
186  * that bit.  From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
187  * volume 2 part 1:
188  *
189  *     "1 of the following must also be set:
190  *      - Render Target Cache Flush Enable ([12] of DW1)
191  *      - Depth Cache Flush Enable ([0] of DW1)
192  *      - Stall at Pixel Scoreboard ([1] of DW1)
193  *      - Depth Stall ([13] of DW1)
194  *      - Post-Sync Operation ([13] of DW1)
195  *      - Notify Enable ([8] of DW1)"
196  *
197  * The cache flushes require the workaround flush that triggered this
198  * one, so we can't use it.  Depth stall would trigger the same.
199  * Post-sync nonzero is what triggered this second workaround, so we
200  * can't use that one either.  Notify enable is IRQs, which aren't
201  * really our business.  That leaves only stall at scoreboard.
202  */
203 static int
204 intel_emit_post_sync_nonzero_flush(struct intel_engine_cs *ring)
205 {
206         u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
207         int ret;
208
209
210         ret = intel_ring_begin(ring, 6);
211         if (ret)
212                 return ret;
213
214         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
215         intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
216                         PIPE_CONTROL_STALL_AT_SCOREBOARD);
217         intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
218         intel_ring_emit(ring, 0); /* low dword */
219         intel_ring_emit(ring, 0); /* high dword */
220         intel_ring_emit(ring, MI_NOOP);
221         intel_ring_advance(ring);
222
223         ret = intel_ring_begin(ring, 6);
224         if (ret)
225                 return ret;
226
227         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
228         intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
229         intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
230         intel_ring_emit(ring, 0);
231         intel_ring_emit(ring, 0);
232         intel_ring_emit(ring, MI_NOOP);
233         intel_ring_advance(ring);
234
235         return 0;
236 }
237
238 static int
239 gen6_render_ring_flush(struct intel_engine_cs *ring,
240                          u32 invalidate_domains, u32 flush_domains)
241 {
242         u32 flags = 0;
243         u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
244         int ret;
245
246         /* Force SNB workarounds for PIPE_CONTROL flushes */
247         ret = intel_emit_post_sync_nonzero_flush(ring);
248         if (ret)
249                 return ret;
250
251         /* Just flush everything.  Experiments have shown that reducing the
252          * number of bits based on the write domains has little performance
253          * impact.
254          */
255         if (flush_domains) {
256                 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
257                 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
258                 /*
259                  * Ensure that any following seqno writes only happen
260                  * when the render cache is indeed flushed.
261                  */
262                 flags |= PIPE_CONTROL_CS_STALL;
263         }
264         if (invalidate_domains) {
265                 flags |= PIPE_CONTROL_TLB_INVALIDATE;
266                 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
267                 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
268                 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
269                 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
270                 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
271                 /*
272                  * TLB invalidate requires a post-sync write.
273                  */
274                 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
275         }
276
277         ret = intel_ring_begin(ring, 4);
278         if (ret)
279                 return ret;
280
281         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
282         intel_ring_emit(ring, flags);
283         intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
284         intel_ring_emit(ring, 0);
285         intel_ring_advance(ring);
286
287         return 0;
288 }
289
290 static int
291 gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring)
292 {
293         int ret;
294
295         ret = intel_ring_begin(ring, 4);
296         if (ret)
297                 return ret;
298
299         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
300         intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
301                               PIPE_CONTROL_STALL_AT_SCOREBOARD);
302         intel_ring_emit(ring, 0);
303         intel_ring_emit(ring, 0);
304         intel_ring_advance(ring);
305
306         return 0;
307 }
308
309 static int gen7_ring_fbc_flush(struct intel_engine_cs *ring, u32 value)
310 {
311         int ret;
312
313         if (!ring->fbc_dirty)
314                 return 0;
315
316         ret = intel_ring_begin(ring, 6);
317         if (ret)
318                 return ret;
319         /* WaFbcNukeOn3DBlt:ivb/hsw */
320         intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
321         intel_ring_emit(ring, MSG_FBC_REND_STATE);
322         intel_ring_emit(ring, value);
323         intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | MI_SRM_LRM_GLOBAL_GTT);
324         intel_ring_emit(ring, MSG_FBC_REND_STATE);
325         intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
326         intel_ring_advance(ring);
327
328         ring->fbc_dirty = false;
329         return 0;
330 }
331
332 static int
333 gen7_render_ring_flush(struct intel_engine_cs *ring,
334                        u32 invalidate_domains, u32 flush_domains)
335 {
336         u32 flags = 0;
337         u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
338         int ret;
339
340         /*
341          * Ensure that any following seqno writes only happen when the render
342          * cache is indeed flushed.
343          *
344          * Workaround: 4th PIPE_CONTROL command (except the ones with only
345          * read-cache invalidate bits set) must have the CS_STALL bit set. We
346          * don't try to be clever and just set it unconditionally.
347          */
348         flags |= PIPE_CONTROL_CS_STALL;
349
350         /* Just flush everything.  Experiments have shown that reducing the
351          * number of bits based on the write domains has little performance
352          * impact.
353          */
354         if (flush_domains) {
355                 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
356                 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
357         }
358         if (invalidate_domains) {
359                 flags |= PIPE_CONTROL_TLB_INVALIDATE;
360                 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
361                 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
362                 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
363                 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
364                 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
365                 /*
366                  * TLB invalidate requires a post-sync write.
367                  */
368                 flags |= PIPE_CONTROL_QW_WRITE;
369                 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
370
371                 /* Workaround: we must issue a pipe_control with CS-stall bit
372                  * set before a pipe_control command that has the state cache
373                  * invalidate bit set. */
374                 gen7_render_ring_cs_stall_wa(ring);
375         }
376
377         ret = intel_ring_begin(ring, 4);
378         if (ret)
379                 return ret;
380
381         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
382         intel_ring_emit(ring, flags);
383         intel_ring_emit(ring, scratch_addr);
384         intel_ring_emit(ring, 0);
385         intel_ring_advance(ring);
386
387         if (!invalidate_domains && flush_domains)
388                 return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
389
390         return 0;
391 }
392
393 static int
394 gen8_emit_pipe_control(struct intel_engine_cs *ring,
395                        u32 flags, u32 scratch_addr)
396 {
397         int ret;
398
399         ret = intel_ring_begin(ring, 6);
400         if (ret)
401                 return ret;
402
403         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
404         intel_ring_emit(ring, flags);
405         intel_ring_emit(ring, scratch_addr);
406         intel_ring_emit(ring, 0);
407         intel_ring_emit(ring, 0);
408         intel_ring_emit(ring, 0);
409         intel_ring_advance(ring);
410
411         return 0;
412 }
413
414 static int
415 gen8_render_ring_flush(struct intel_engine_cs *ring,
416                        u32 invalidate_domains, u32 flush_domains)
417 {
418         u32 flags = 0;
419         u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
420         int ret;
421
422         flags |= PIPE_CONTROL_CS_STALL;
423
424         if (flush_domains) {
425                 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
426                 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
427         }
428         if (invalidate_domains) {
429                 flags |= PIPE_CONTROL_TLB_INVALIDATE;
430                 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
431                 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
432                 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
433                 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
434                 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
435                 flags |= PIPE_CONTROL_QW_WRITE;
436                 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
437
438                 /* WaCsStallBeforeStateCacheInvalidate:bdw,chv */
439                 ret = gen8_emit_pipe_control(ring,
440                                              PIPE_CONTROL_CS_STALL |
441                                              PIPE_CONTROL_STALL_AT_SCOREBOARD,
442                                              0);
443                 if (ret)
444                         return ret;
445         }
446
447         return gen8_emit_pipe_control(ring, flags, scratch_addr);
448 }
449
450 static void ring_write_tail(struct intel_engine_cs *ring,
451                             u32 value)
452 {
453         struct drm_i915_private *dev_priv = ring->dev->dev_private;
454         I915_WRITE_TAIL(ring, value);
455 }
456
457 u64 intel_ring_get_active_head(struct intel_engine_cs *ring)
458 {
459         struct drm_i915_private *dev_priv = ring->dev->dev_private;
460         u64 acthd;
461
462         if (INTEL_INFO(ring->dev)->gen >= 8)
463                 acthd = I915_READ64_2x32(RING_ACTHD(ring->mmio_base),
464                                          RING_ACTHD_UDW(ring->mmio_base));
465         else if (INTEL_INFO(ring->dev)->gen >= 4)
466                 acthd = I915_READ(RING_ACTHD(ring->mmio_base));
467         else
468                 acthd = I915_READ(ACTHD);
469
470         return acthd;
471 }
472
473 static void ring_setup_phys_status_page(struct intel_engine_cs *ring)
474 {
475         struct drm_i915_private *dev_priv = ring->dev->dev_private;
476         u32 addr;
477
478         addr = dev_priv->status_page_dmah->busaddr;
479         if (INTEL_INFO(ring->dev)->gen >= 4)
480                 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
481         I915_WRITE(HWS_PGA, addr);
482 }
483
484 static bool stop_ring(struct intel_engine_cs *ring)
485 {
486         struct drm_i915_private *dev_priv = to_i915(ring->dev);
487
488         if (!IS_GEN2(ring->dev)) {
489                 I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
490                 if (wait_for((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
491                         DRM_ERROR("%s : timed out trying to stop ring\n", ring->name);
492                         /* Sometimes we observe that the idle flag is not
493                          * set even though the ring is empty. So double
494                          * check before giving up.
495                          */
496                         if (I915_READ_HEAD(ring) != I915_READ_TAIL(ring))
497                                 return false;
498                 }
499         }
500
501         I915_WRITE_CTL(ring, 0);
502         I915_WRITE_HEAD(ring, 0);
503         ring->write_tail(ring, 0);
504
505         if (!IS_GEN2(ring->dev)) {
506                 (void)I915_READ_CTL(ring);
507                 I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
508         }
509
510         return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0;
511 }
512
513 static int init_ring_common(struct intel_engine_cs *ring)
514 {
515         struct drm_device *dev = ring->dev;
516         struct drm_i915_private *dev_priv = dev->dev_private;
517         struct intel_ringbuffer *ringbuf = ring->buffer;
518         struct drm_i915_gem_object *obj = ringbuf->obj;
519         int ret = 0;
520
521         gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
522
523         if (!stop_ring(ring)) {
524                 /* G45 ring initialization often fails to reset head to zero */
525                 DRM_DEBUG_KMS("%s head not reset to zero "
526                               "ctl %08x head %08x tail %08x start %08x\n",
527                               ring->name,
528                               I915_READ_CTL(ring),
529                               I915_READ_HEAD(ring),
530                               I915_READ_TAIL(ring),
531                               I915_READ_START(ring));
532
533                 if (!stop_ring(ring)) {
534                         DRM_ERROR("failed to set %s head to zero "
535                                   "ctl %08x head %08x tail %08x start %08x\n",
536                                   ring->name,
537                                   I915_READ_CTL(ring),
538                                   I915_READ_HEAD(ring),
539                                   I915_READ_TAIL(ring),
540                                   I915_READ_START(ring));
541                         ret = -EIO;
542                         goto out;
543                 }
544         }
545
546         if (I915_NEED_GFX_HWS(dev))
547                 intel_ring_setup_status_page(ring);
548         else
549                 ring_setup_phys_status_page(ring);
550
551         /* Enforce ordering by reading HEAD register back */
552         I915_READ_HEAD(ring);
553
554         /* Initialize the ring. This must happen _after_ we've cleared the ring
555          * registers with the above sequence (the readback of the HEAD registers
556          * also enforces ordering), otherwise the hw might lose the new ring
557          * register values. */
558         I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
559         I915_WRITE_CTL(ring,
560                         ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES)
561                         | RING_VALID);
562
563         /* If the head is still not zero, the ring is dead */
564         if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
565                      I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&
566                      (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
567                 DRM_ERROR("%s initialization failed "
568                           "ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08lx]\n",
569                           ring->name,
570                           I915_READ_CTL(ring), I915_READ_CTL(ring) & RING_VALID,
571                           I915_READ_HEAD(ring), I915_READ_TAIL(ring),
572                           I915_READ_START(ring), (unsigned long)i915_gem_obj_ggtt_offset(obj));
573                 ret = -EIO;
574                 goto out;
575         }
576
577         if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
578                 i915_kernel_lost_context(ring->dev);
579         else {
580                 ringbuf->head = I915_READ_HEAD(ring);
581                 ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
582                 ringbuf->space = intel_ring_space(ringbuf);
583                 ringbuf->last_retired_head = -1;
584         }
585
586         memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
587
588 out:
589         gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
590
591         return ret;
592 }
593
594 void
595 intel_fini_pipe_control(struct intel_engine_cs *ring)
596 {
597         struct drm_device *dev = ring->dev;
598
599         if (ring->scratch.obj == NULL)
600                 return;
601
602         if (INTEL_INFO(dev)->gen >= 5) {
603                 kunmap(sg_page(ring->scratch.obj->pages->sgl));
604                 i915_gem_object_ggtt_unpin(ring->scratch.obj);
605         }
606
607         drm_gem_object_unreference(&ring->scratch.obj->base);
608         ring->scratch.obj = NULL;
609 }
610
611 int
612 intel_init_pipe_control(struct intel_engine_cs *ring)
613 {
614         int ret;
615
616         if (ring->scratch.obj)
617                 return 0;
618
619         ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096);
620         if (ring->scratch.obj == NULL) {
621                 DRM_ERROR("Failed to allocate seqno page\n");
622                 ret = -ENOMEM;
623                 goto err;
624         }
625
626         ret = i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC);
627         if (ret)
628                 goto err_unref;
629
630         ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, 0);
631         if (ret)
632                 goto err_unref;
633
634         ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj);
635         ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl));
636         if (ring->scratch.cpu_page == NULL) {
637                 ret = -ENOMEM;
638                 goto err_unpin;
639         }
640
641         DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
642                          ring->name, ring->scratch.gtt_offset);
643         return 0;
644
645 err_unpin:
646         i915_gem_object_ggtt_unpin(ring->scratch.obj);
647 err_unref:
648         drm_gem_object_unreference(&ring->scratch.obj->base);
649 err:
650         return ret;
651 }
652
653 static int init_render_ring(struct intel_engine_cs *ring)
654 {
655         struct drm_device *dev = ring->dev;
656         struct drm_i915_private *dev_priv = dev->dev_private;
657         int ret = init_ring_common(ring);
658         if (ret)
659                 return ret;
660
661         /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
662         if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7)
663                 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
664
665         /* We need to disable the AsyncFlip performance optimisations in order
666          * to use MI_WAIT_FOR_EVENT within the CS. It should already be
667          * programmed to '1' on all products.
668          *
669          * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
670          */
671         if (INTEL_INFO(dev)->gen >= 6)
672                 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
673
674         /* Required for the hardware to program scanline values for waiting */
675         /* WaEnableFlushTlbInvalidationMode:snb */
676         if (INTEL_INFO(dev)->gen == 6)
677                 I915_WRITE(GFX_MODE,
678                            _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
679
680         /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
681         if (IS_GEN7(dev))
682                 I915_WRITE(GFX_MODE_GEN7,
683                            _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
684                            _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
685
686         if (INTEL_INFO(dev)->gen >= 5) {
687                 ret = intel_init_pipe_control(ring);
688                 if (ret)
689                         return ret;
690         }
691
692         if (IS_GEN6(dev)) {
693                 /* From the Sandybridge PRM, volume 1 part 3, page 24:
694                  * "If this bit is set, STCunit will have LRA as replacement
695                  *  policy. [...] This bit must be reset.  LRA replacement
696                  *  policy is not supported."
697                  */
698                 I915_WRITE(CACHE_MODE_0,
699                            _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
700         }
701
702         if (INTEL_INFO(dev)->gen >= 6)
703                 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
704
705         if (HAS_L3_DPF(dev))
706                 I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
707
708         return ret;
709 }
710
711 static void render_ring_cleanup(struct intel_engine_cs *ring)
712 {
713         struct drm_device *dev = ring->dev;
714         struct drm_i915_private *dev_priv = dev->dev_private;
715
716         if (dev_priv->semaphore_obj) {
717                 i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj);
718                 drm_gem_object_unreference(&dev_priv->semaphore_obj->base);
719                 dev_priv->semaphore_obj = NULL;
720         }
721
722         intel_fini_pipe_control(ring);
723 }
724
725 static int gen8_rcs_signal(struct intel_engine_cs *signaller,
726                            unsigned int num_dwords)
727 {
728 #define MBOX_UPDATE_DWORDS 8
729         struct drm_device *dev = signaller->dev;
730         struct drm_i915_private *dev_priv = dev->dev_private;
731         struct intel_engine_cs *waiter;
732         int i, ret, num_rings;
733
734         num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
735         num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
736 #undef MBOX_UPDATE_DWORDS
737
738         ret = intel_ring_begin(signaller, num_dwords);
739         if (ret)
740                 return ret;
741
742         for_each_ring(waiter, dev_priv, i) {
743                 u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
744                 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
745                         continue;
746
747                 intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
748                 intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
749                                            PIPE_CONTROL_QW_WRITE |
750                                            PIPE_CONTROL_FLUSH_ENABLE);
751                 intel_ring_emit(signaller, lower_32_bits(gtt_offset));
752                 intel_ring_emit(signaller, upper_32_bits(gtt_offset));
753                 intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
754                 intel_ring_emit(signaller, 0);
755                 intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
756                                            MI_SEMAPHORE_TARGET(waiter->id));
757                 intel_ring_emit(signaller, 0);
758         }
759
760         return 0;
761 }
762
763 static int gen8_xcs_signal(struct intel_engine_cs *signaller,
764                            unsigned int num_dwords)
765 {
766 #define MBOX_UPDATE_DWORDS 6
767         struct drm_device *dev = signaller->dev;
768         struct drm_i915_private *dev_priv = dev->dev_private;
769         struct intel_engine_cs *waiter;
770         int i, ret, num_rings;
771
772         num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
773         num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
774 #undef MBOX_UPDATE_DWORDS
775
776         ret = intel_ring_begin(signaller, num_dwords);
777         if (ret)
778                 return ret;
779
780         for_each_ring(waiter, dev_priv, i) {
781                 u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
782                 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
783                         continue;
784
785                 intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
786                                            MI_FLUSH_DW_OP_STOREDW);
787                 intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
788                                            MI_FLUSH_DW_USE_GTT);
789                 intel_ring_emit(signaller, upper_32_bits(gtt_offset));
790                 intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
791                 intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
792                                            MI_SEMAPHORE_TARGET(waiter->id));
793                 intel_ring_emit(signaller, 0);
794         }
795
796         return 0;
797 }
798
799 static int gen6_signal(struct intel_engine_cs *signaller,
800                        unsigned int num_dwords)
801 {
802         struct drm_device *dev = signaller->dev;
803         struct drm_i915_private *dev_priv = dev->dev_private;
804         struct intel_engine_cs *useless;
805         int i, ret, num_rings;
806
807 #define MBOX_UPDATE_DWORDS 3
808         num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
809         num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
810 #undef MBOX_UPDATE_DWORDS
811
812         ret = intel_ring_begin(signaller, num_dwords);
813         if (ret)
814                 return ret;
815
816         for_each_ring(useless, dev_priv, i) {
817                 u32 mbox_reg = signaller->semaphore.mbox.signal[i];
818                 if (mbox_reg != GEN6_NOSYNC) {
819                         intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
820                         intel_ring_emit(signaller, mbox_reg);
821                         intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
822                 }
823         }
824
825         /* If num_dwords was rounded, make sure the tail pointer is correct */
826         if (num_rings % 2 == 0)
827                 intel_ring_emit(signaller, MI_NOOP);
828
829         return 0;
830 }
831
832 /**
833  * gen6_add_request - Update the semaphore mailbox registers
834  * 
835  * @ring - ring that is adding a request
836  * @seqno - return seqno stuck into the ring
837  *
838  * Update the mailbox registers in the *other* rings with the current seqno.
839  * This acts like a signal in the canonical semaphore.
840  */
841 static int
842 gen6_add_request(struct intel_engine_cs *ring)
843 {
844         int ret;
845
846         if (ring->semaphore.signal)
847                 ret = ring->semaphore.signal(ring, 4);
848         else
849                 ret = intel_ring_begin(ring, 4);
850
851         if (ret)
852                 return ret;
853
854         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
855         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
856         intel_ring_emit(ring, ring->outstanding_lazy_seqno);
857         intel_ring_emit(ring, MI_USER_INTERRUPT);
858         __intel_ring_advance(ring);
859
860         return 0;
861 }
862
863 static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
864                                               u32 seqno)
865 {
866         struct drm_i915_private *dev_priv = dev->dev_private;
867         return dev_priv->last_seqno < seqno;
868 }
869
870 /**
871  * intel_ring_sync - sync the waiter to the signaller on seqno
872  *
873  * @waiter - ring that is waiting
874  * @signaller - ring which has, or will signal
875  * @seqno - seqno which the waiter will block on
876  */
877
878 static int
879 gen8_ring_sync(struct intel_engine_cs *waiter,
880                struct intel_engine_cs *signaller,
881                u32 seqno)
882 {
883         struct drm_i915_private *dev_priv = waiter->dev->dev_private;
884         int ret;
885
886         ret = intel_ring_begin(waiter, 4);
887         if (ret)
888                 return ret;
889
890         intel_ring_emit(waiter, MI_SEMAPHORE_WAIT |
891                                 MI_SEMAPHORE_GLOBAL_GTT |
892                                 MI_SEMAPHORE_POLL |
893                                 MI_SEMAPHORE_SAD_GTE_SDD);
894         intel_ring_emit(waiter, seqno);
895         intel_ring_emit(waiter,
896                         lower_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
897         intel_ring_emit(waiter,
898                         upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
899         intel_ring_advance(waiter);
900         return 0;
901 }
902
903 static int
904 gen6_ring_sync(struct intel_engine_cs *waiter,
905                struct intel_engine_cs *signaller,
906                u32 seqno)
907 {
908         u32 dw1 = MI_SEMAPHORE_MBOX |
909                   MI_SEMAPHORE_COMPARE |
910                   MI_SEMAPHORE_REGISTER;
911         u32 wait_mbox = signaller->semaphore.mbox.wait[waiter->id];
912         int ret;
913
914         /* Throughout all of the GEM code, seqno passed implies our current
915          * seqno is >= the last seqno executed. However for hardware the
916          * comparison is strictly greater than.
917          */
918         seqno -= 1;
919
920         WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
921
922         ret = intel_ring_begin(waiter, 4);
923         if (ret)
924                 return ret;
925
926         /* If seqno wrap happened, omit the wait with no-ops */
927         if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) {
928                 intel_ring_emit(waiter, dw1 | wait_mbox);
929                 intel_ring_emit(waiter, seqno);
930                 intel_ring_emit(waiter, 0);
931                 intel_ring_emit(waiter, MI_NOOP);
932         } else {
933                 intel_ring_emit(waiter, MI_NOOP);
934                 intel_ring_emit(waiter, MI_NOOP);
935                 intel_ring_emit(waiter, MI_NOOP);
936                 intel_ring_emit(waiter, MI_NOOP);
937         }
938         intel_ring_advance(waiter);
939
940         return 0;
941 }
942
943 #define PIPE_CONTROL_FLUSH(ring__, addr__)                                      \
944 do {                                                                    \
945         intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |                \
946                  PIPE_CONTROL_DEPTH_STALL);                             \
947         intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT);                    \
948         intel_ring_emit(ring__, 0);                                                     \
949         intel_ring_emit(ring__, 0);                                                     \
950 } while (0)
951
952 static int
953 pc_render_add_request(struct intel_engine_cs *ring)
954 {
955         u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
956         int ret;
957
958         /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
959          * incoherent with writes to memory, i.e. completely fubar,
960          * so we need to use PIPE_NOTIFY instead.
961          *
962          * However, we also need to workaround the qword write
963          * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
964          * memory before requesting an interrupt.
965          */
966         ret = intel_ring_begin(ring, 32);
967         if (ret)
968                 return ret;
969
970         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
971                         PIPE_CONTROL_WRITE_FLUSH |
972                         PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
973         intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
974         intel_ring_emit(ring, ring->outstanding_lazy_seqno);
975         intel_ring_emit(ring, 0);
976         PIPE_CONTROL_FLUSH(ring, scratch_addr);
977         scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
978         PIPE_CONTROL_FLUSH(ring, scratch_addr);
979         scratch_addr += 2 * CACHELINE_BYTES;
980         PIPE_CONTROL_FLUSH(ring, scratch_addr);
981         scratch_addr += 2 * CACHELINE_BYTES;
982         PIPE_CONTROL_FLUSH(ring, scratch_addr);
983         scratch_addr += 2 * CACHELINE_BYTES;
984         PIPE_CONTROL_FLUSH(ring, scratch_addr);
985         scratch_addr += 2 * CACHELINE_BYTES;
986         PIPE_CONTROL_FLUSH(ring, scratch_addr);
987
988         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
989                         PIPE_CONTROL_WRITE_FLUSH |
990                         PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
991                         PIPE_CONTROL_NOTIFY);
992         intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
993         intel_ring_emit(ring, ring->outstanding_lazy_seqno);
994         intel_ring_emit(ring, 0);
995         __intel_ring_advance(ring);
996
997         return 0;
998 }
999
1000 static u32
1001 gen6_ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
1002 {
1003         /* Workaround to force correct ordering between irq and seqno writes on
1004          * ivb (and maybe also on snb) by reading from a CS register (like
1005          * ACTHD) before reading the status page. */
1006         if (!lazy_coherency) {
1007                 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1008                 POSTING_READ(RING_ACTHD(ring->mmio_base));
1009         }
1010
1011         return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
1012 }
1013
1014 static u32
1015 ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
1016 {
1017         return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
1018 }
1019
1020 static void
1021 ring_set_seqno(struct intel_engine_cs *ring, u32 seqno)
1022 {
1023         intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
1024 }
1025
1026 static u32
1027 pc_render_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
1028 {
1029         return ring->scratch.cpu_page[0];
1030 }
1031
1032 static void
1033 pc_render_set_seqno(struct intel_engine_cs *ring, u32 seqno)
1034 {
1035         ring->scratch.cpu_page[0] = seqno;
1036 }
1037
1038 static bool
1039 gen5_ring_get_irq(struct intel_engine_cs *ring)
1040 {
1041         struct drm_device *dev = ring->dev;
1042         struct drm_i915_private *dev_priv = dev->dev_private;
1043         unsigned long flags;
1044
1045         if (!dev->irq_enabled)
1046                 return false;
1047
1048         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1049         if (ring->irq_refcount++ == 0)
1050                 gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask);
1051         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1052
1053         return true;
1054 }
1055
1056 static void
1057 gen5_ring_put_irq(struct intel_engine_cs *ring)
1058 {
1059         struct drm_device *dev = ring->dev;
1060         struct drm_i915_private *dev_priv = dev->dev_private;
1061         unsigned long flags;
1062
1063         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1064         if (--ring->irq_refcount == 0)
1065                 gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask);
1066         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1067 }
1068
1069 static bool
1070 i9xx_ring_get_irq(struct intel_engine_cs *ring)
1071 {
1072         struct drm_device *dev = ring->dev;
1073         struct drm_i915_private *dev_priv = dev->dev_private;
1074         unsigned long flags;
1075
1076         if (!dev->irq_enabled)
1077                 return false;
1078
1079         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1080         if (ring->irq_refcount++ == 0) {
1081                 dev_priv->irq_mask &= ~ring->irq_enable_mask;
1082                 I915_WRITE(IMR, dev_priv->irq_mask);
1083                 POSTING_READ(IMR);
1084         }
1085         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1086
1087         return true;
1088 }
1089
1090 static void
1091 i9xx_ring_put_irq(struct intel_engine_cs *ring)
1092 {
1093         struct drm_device *dev = ring->dev;
1094         struct drm_i915_private *dev_priv = dev->dev_private;
1095         unsigned long flags;
1096
1097         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1098         if (--ring->irq_refcount == 0) {
1099                 dev_priv->irq_mask |= ring->irq_enable_mask;
1100                 I915_WRITE(IMR, dev_priv->irq_mask);
1101                 POSTING_READ(IMR);
1102         }
1103         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1104 }
1105
1106 static bool
1107 i8xx_ring_get_irq(struct intel_engine_cs *ring)
1108 {
1109         struct drm_device *dev = ring->dev;
1110         struct drm_i915_private *dev_priv = dev->dev_private;
1111         unsigned long flags;
1112
1113         if (!dev->irq_enabled)
1114                 return false;
1115
1116         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1117         if (ring->irq_refcount++ == 0) {
1118                 dev_priv->irq_mask &= ~ring->irq_enable_mask;
1119                 I915_WRITE16(IMR, dev_priv->irq_mask);
1120                 POSTING_READ16(IMR);
1121         }
1122         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1123
1124         return true;
1125 }
1126
1127 static void
1128 i8xx_ring_put_irq(struct intel_engine_cs *ring)
1129 {
1130         struct drm_device *dev = ring->dev;
1131         struct drm_i915_private *dev_priv = dev->dev_private;
1132         unsigned long flags;
1133
1134         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1135         if (--ring->irq_refcount == 0) {
1136                 dev_priv->irq_mask |= ring->irq_enable_mask;
1137                 I915_WRITE16(IMR, dev_priv->irq_mask);
1138                 POSTING_READ16(IMR);
1139         }
1140         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1141 }
1142
1143 void intel_ring_setup_status_page(struct intel_engine_cs *ring)
1144 {
1145         struct drm_device *dev = ring->dev;
1146         struct drm_i915_private *dev_priv = ring->dev->dev_private;
1147         u32 mmio = 0;
1148
1149         /* The ring status page addresses are no longer next to the rest of
1150          * the ring registers as of gen7.
1151          */
1152         if (IS_GEN7(dev)) {
1153                 switch (ring->id) {
1154                 case RCS:
1155                         mmio = RENDER_HWS_PGA_GEN7;
1156                         break;
1157                 case BCS:
1158                         mmio = BLT_HWS_PGA_GEN7;
1159                         break;
1160                 /*
1161                  * VCS2 actually doesn't exist on Gen7. Only shut up
1162                  * gcc switch check warning
1163                  */
1164                 case VCS2:
1165                 case VCS:
1166                         mmio = BSD_HWS_PGA_GEN7;
1167                         break;
1168                 case VECS:
1169                         mmio = VEBOX_HWS_PGA_GEN7;
1170                         break;
1171                 }
1172         } else if (IS_GEN6(ring->dev)) {
1173                 mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
1174         } else {
1175                 /* XXX: gen8 returns to sanity */
1176                 mmio = RING_HWS_PGA(ring->mmio_base);
1177         }
1178
1179         I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
1180         POSTING_READ(mmio);
1181
1182         /*
1183          * Flush the TLB for this page
1184          *
1185          * FIXME: These two bits have disappeared on gen8, so a question
1186          * arises: do we still need this and if so how should we go about
1187          * invalidating the TLB?
1188          */
1189         if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
1190                 u32 reg = RING_INSTPM(ring->mmio_base);
1191
1192                 /* ring should be idle before issuing a sync flush*/
1193                 WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
1194
1195                 I915_WRITE(reg,
1196                            _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
1197                                               INSTPM_SYNC_FLUSH));
1198                 if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
1199                              1000))
1200                         DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
1201                                   ring->name);
1202         }
1203 }
1204
1205 static int
1206 bsd_ring_flush(struct intel_engine_cs *ring,
1207                u32     invalidate_domains,
1208                u32     flush_domains)
1209 {
1210         int ret;
1211
1212         ret = intel_ring_begin(ring, 2);
1213         if (ret)
1214                 return ret;
1215
1216         intel_ring_emit(ring, MI_FLUSH);
1217         intel_ring_emit(ring, MI_NOOP);
1218         intel_ring_advance(ring);
1219         return 0;
1220 }
1221
1222 static int
1223 i9xx_add_request(struct intel_engine_cs *ring)
1224 {
1225         int ret;
1226
1227         ret = intel_ring_begin(ring, 4);
1228         if (ret)
1229                 return ret;
1230
1231         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
1232         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1233         intel_ring_emit(ring, ring->outstanding_lazy_seqno);
1234         intel_ring_emit(ring, MI_USER_INTERRUPT);
1235         __intel_ring_advance(ring);
1236
1237         return 0;
1238 }
1239
1240 static bool
1241 gen6_ring_get_irq(struct intel_engine_cs *ring)
1242 {
1243         struct drm_device *dev = ring->dev;
1244         struct drm_i915_private *dev_priv = dev->dev_private;
1245         unsigned long flags;
1246
1247         if (!dev->irq_enabled)
1248                return false;
1249
1250         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1251         if (ring->irq_refcount++ == 0) {
1252                 if (HAS_L3_DPF(dev) && ring->id == RCS)
1253                         I915_WRITE_IMR(ring,
1254                                        ~(ring->irq_enable_mask |
1255                                          GT_PARITY_ERROR(dev)));
1256                 else
1257                         I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1258                 gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask);
1259         }
1260         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1261
1262         return true;
1263 }
1264
1265 static void
1266 gen6_ring_put_irq(struct intel_engine_cs *ring)
1267 {
1268         struct drm_device *dev = ring->dev;
1269         struct drm_i915_private *dev_priv = dev->dev_private;
1270         unsigned long flags;
1271
1272         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1273         if (--ring->irq_refcount == 0) {
1274                 if (HAS_L3_DPF(dev) && ring->id == RCS)
1275                         I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
1276                 else
1277                         I915_WRITE_IMR(ring, ~0);
1278                 gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask);
1279         }
1280         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1281 }
1282
1283 static bool
1284 hsw_vebox_get_irq(struct intel_engine_cs *ring)
1285 {
1286         struct drm_device *dev = ring->dev;
1287         struct drm_i915_private *dev_priv = dev->dev_private;
1288         unsigned long flags;
1289
1290         if (!dev->irq_enabled)
1291                 return false;
1292
1293         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1294         if (ring->irq_refcount++ == 0) {
1295                 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1296                 gen6_enable_pm_irq(dev_priv, ring->irq_enable_mask);
1297         }
1298         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1299
1300         return true;
1301 }
1302
1303 static void
1304 hsw_vebox_put_irq(struct intel_engine_cs *ring)
1305 {
1306         struct drm_device *dev = ring->dev;
1307         struct drm_i915_private *dev_priv = dev->dev_private;
1308         unsigned long flags;
1309
1310         if (!dev->irq_enabled)
1311                 return;
1312
1313         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1314         if (--ring->irq_refcount == 0) {
1315                 I915_WRITE_IMR(ring, ~0);
1316                 gen6_disable_pm_irq(dev_priv, ring->irq_enable_mask);
1317         }
1318         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1319 }
1320
1321 static bool
1322 gen8_ring_get_irq(struct intel_engine_cs *ring)
1323 {
1324         struct drm_device *dev = ring->dev;
1325         struct drm_i915_private *dev_priv = dev->dev_private;
1326         unsigned long flags;
1327
1328         if (!dev->irq_enabled)
1329                 return false;
1330
1331         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1332         if (ring->irq_refcount++ == 0) {
1333                 if (HAS_L3_DPF(dev) && ring->id == RCS) {
1334                         I915_WRITE_IMR(ring,
1335                                        ~(ring->irq_enable_mask |
1336                                          GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
1337                 } else {
1338                         I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1339                 }
1340                 POSTING_READ(RING_IMR(ring->mmio_base));
1341         }
1342         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1343
1344         return true;
1345 }
1346
1347 static void
1348 gen8_ring_put_irq(struct intel_engine_cs *ring)
1349 {
1350         struct drm_device *dev = ring->dev;
1351         struct drm_i915_private *dev_priv = dev->dev_private;
1352         unsigned long flags;
1353
1354         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1355         if (--ring->irq_refcount == 0) {
1356                 if (HAS_L3_DPF(dev) && ring->id == RCS) {
1357                         I915_WRITE_IMR(ring,
1358                                        ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
1359                 } else {
1360                         I915_WRITE_IMR(ring, ~0);
1361                 }
1362                 POSTING_READ(RING_IMR(ring->mmio_base));
1363         }
1364         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1365 }
1366
1367 static int
1368 i965_dispatch_execbuffer(struct intel_engine_cs *ring,
1369                          u64 offset, u32 length,
1370                          unsigned flags)
1371 {
1372         int ret;
1373
1374         ret = intel_ring_begin(ring, 2);
1375         if (ret)
1376                 return ret;
1377
1378         intel_ring_emit(ring,
1379                         MI_BATCH_BUFFER_START |
1380                         MI_BATCH_GTT |
1381                         (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
1382         intel_ring_emit(ring, offset);
1383         intel_ring_advance(ring);
1384
1385         return 0;
1386 }
1387
1388 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */
1389 #define I830_BATCH_LIMIT (256*1024)
1390 static int
1391 i830_dispatch_execbuffer(struct intel_engine_cs *ring,
1392                                 u64 offset, u32 len,
1393                                 unsigned flags)
1394 {
1395         int ret;
1396
1397         if (flags & I915_DISPATCH_PINNED) {
1398                 ret = intel_ring_begin(ring, 4);
1399                 if (ret)
1400                         return ret;
1401
1402                 intel_ring_emit(ring, MI_BATCH_BUFFER);
1403                 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
1404                 intel_ring_emit(ring, offset + len - 8);
1405                 intel_ring_emit(ring, MI_NOOP);
1406                 intel_ring_advance(ring);
1407         } else {
1408                 u32 cs_offset = ring->scratch.gtt_offset;
1409
1410                 if (len > I830_BATCH_LIMIT)
1411                         return -ENOSPC;
1412
1413                 ret = intel_ring_begin(ring, 9+3);
1414                 if (ret)
1415                         return ret;
1416                 /* Blit the batch (which has now all relocs applied) to the stable batch
1417                  * scratch bo area (so that the CS never stumbles over its tlb
1418                  * invalidation bug) ... */
1419                 intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD |
1420                                 XY_SRC_COPY_BLT_WRITE_ALPHA |
1421                                 XY_SRC_COPY_BLT_WRITE_RGB);
1422                 intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096);
1423                 intel_ring_emit(ring, 0);
1424                 intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024);
1425                 intel_ring_emit(ring, cs_offset);
1426                 intel_ring_emit(ring, 0);
1427                 intel_ring_emit(ring, 4096);
1428                 intel_ring_emit(ring, offset);
1429                 intel_ring_emit(ring, MI_FLUSH);
1430
1431                 /* ... and execute it. */
1432                 intel_ring_emit(ring, MI_BATCH_BUFFER);
1433                 intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
1434                 intel_ring_emit(ring, cs_offset + len - 8);
1435                 intel_ring_advance(ring);
1436         }
1437
1438         return 0;
1439 }
1440
1441 static int
1442 i915_dispatch_execbuffer(struct intel_engine_cs *ring,
1443                          u64 offset, u32 len,
1444                          unsigned flags)
1445 {
1446         int ret;
1447
1448         ret = intel_ring_begin(ring, 2);
1449         if (ret)
1450                 return ret;
1451
1452         intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
1453         intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
1454         intel_ring_advance(ring);
1455
1456         return 0;
1457 }
1458
1459 static void cleanup_status_page(struct intel_engine_cs *ring)
1460 {
1461         struct drm_i915_gem_object *obj;
1462
1463         obj = ring->status_page.obj;
1464         if (obj == NULL)
1465                 return;
1466
1467         kunmap(sg_page(obj->pages->sgl));
1468         i915_gem_object_ggtt_unpin(obj);
1469         drm_gem_object_unreference(&obj->base);
1470         ring->status_page.obj = NULL;
1471 }
1472
1473 static int init_status_page(struct intel_engine_cs *ring)
1474 {
1475         struct drm_i915_gem_object *obj;
1476
1477         if ((obj = ring->status_page.obj) == NULL) {
1478                 unsigned flags;
1479                 int ret;
1480
1481                 obj = i915_gem_alloc_object(ring->dev, 4096);
1482                 if (obj == NULL) {
1483                         DRM_ERROR("Failed to allocate status page\n");
1484                         return -ENOMEM;
1485                 }
1486
1487                 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
1488                 if (ret)
1489                         goto err_unref;
1490
1491                 flags = 0;
1492                 if (!HAS_LLC(ring->dev))
1493                         /* On g33, we cannot place HWS above 256MiB, so
1494                          * restrict its pinning to the low mappable arena.
1495                          * Though this restriction is not documented for
1496                          * gen4, gen5, or byt, they also behave similarly
1497                          * and hang if the HWS is placed at the top of the
1498                          * GTT. To generalise, it appears that all !llc
1499                          * platforms have issues with us placing the HWS
1500                          * above the mappable region (even though we never
1501                          * actualy map it).
1502                          */
1503                         flags |= PIN_MAPPABLE;
1504                 ret = i915_gem_obj_ggtt_pin(obj, 4096, flags);
1505                 if (ret) {
1506 err_unref:
1507                         drm_gem_object_unreference(&obj->base);
1508                         return ret;
1509                 }
1510
1511                 ring->status_page.obj = obj;
1512         }
1513
1514         ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
1515         ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
1516         memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1517
1518         DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
1519                         ring->name, ring->status_page.gfx_addr);
1520
1521         return 0;
1522 }
1523
1524 static int init_phys_status_page(struct intel_engine_cs *ring)
1525 {
1526         struct drm_i915_private *dev_priv = ring->dev->dev_private;
1527
1528         if (!dev_priv->status_page_dmah) {
1529                 dev_priv->status_page_dmah =
1530                         drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE);
1531                 if (!dev_priv->status_page_dmah)
1532                         return -ENOMEM;
1533         }
1534
1535         ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1536         memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1537
1538         return 0;
1539 }
1540
1541 void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
1542 {
1543         if (!ringbuf->obj)
1544                 return;
1545
1546         iounmap(ringbuf->virtual_start);
1547         i915_gem_object_ggtt_unpin(ringbuf->obj);
1548         drm_gem_object_unreference(&ringbuf->obj->base);
1549         ringbuf->obj = NULL;
1550 }
1551
1552 int intel_alloc_ringbuffer_obj(struct drm_device *dev,
1553                                struct intel_ringbuffer *ringbuf)
1554 {
1555         struct drm_i915_private *dev_priv = to_i915(dev);
1556         struct drm_i915_gem_object *obj;
1557         int ret;
1558
1559         if (ringbuf->obj)
1560                 return 0;
1561
1562         obj = NULL;
1563         if (!HAS_LLC(dev))
1564                 obj = i915_gem_object_create_stolen(dev, ringbuf->size);
1565         if (obj == NULL)
1566                 obj = i915_gem_alloc_object(dev, ringbuf->size);
1567         if (obj == NULL)
1568                 return -ENOMEM;
1569
1570         /* mark ring buffers as read-only from GPU side by default */
1571         obj->gt_ro = 1;
1572
1573         ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
1574         if (ret)
1575                 goto err_unref;
1576
1577         ret = i915_gem_object_set_to_gtt_domain(obj, true);
1578         if (ret)
1579                 goto err_unpin;
1580
1581         ringbuf->virtual_start =
1582                 ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
1583                                 ringbuf->size);
1584         if (ringbuf->virtual_start == NULL) {
1585                 ret = -EINVAL;
1586                 goto err_unpin;
1587         }
1588
1589         ringbuf->obj = obj;
1590         return 0;
1591
1592 err_unpin:
1593         i915_gem_object_ggtt_unpin(obj);
1594 err_unref:
1595         drm_gem_object_unreference(&obj->base);
1596         return ret;
1597 }
1598
1599 static int intel_init_ring_buffer(struct drm_device *dev,
1600                                   struct intel_engine_cs *ring)
1601 {
1602         struct intel_ringbuffer *ringbuf = ring->buffer;
1603         int ret;
1604
1605         if (ringbuf == NULL) {
1606                 ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
1607                 if (!ringbuf)
1608                         return -ENOMEM;
1609                 ring->buffer = ringbuf;
1610         }
1611
1612         ring->dev = dev;
1613         INIT_LIST_HEAD(&ring->active_list);
1614         INIT_LIST_HEAD(&ring->request_list);
1615         INIT_LIST_HEAD(&ring->execlist_queue);
1616         ringbuf->size = 32 * PAGE_SIZE;
1617         ringbuf->ring = ring;
1618         memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
1619
1620         init_waitqueue_head(&ring->irq_queue);
1621
1622         if (I915_NEED_GFX_HWS(dev)) {
1623                 ret = init_status_page(ring);
1624                 if (ret)
1625                         goto error;
1626         } else {
1627                 BUG_ON(ring->id != RCS);
1628                 ret = init_phys_status_page(ring);
1629                 if (ret)
1630                         goto error;
1631         }
1632
1633         ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
1634         if (ret) {
1635                 DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret);
1636                 goto error;
1637         }
1638
1639         /* Workaround an erratum on the i830 which causes a hang if
1640          * the TAIL pointer points to within the last 2 cachelines
1641          * of the buffer.
1642          */
1643         ringbuf->effective_size = ringbuf->size;
1644         if (IS_I830(dev) || IS_845G(dev))
1645                 ringbuf->effective_size -= 2 * CACHELINE_BYTES;
1646
1647         ret = i915_cmd_parser_init_ring(ring);
1648         if (ret)
1649                 goto error;
1650
1651         ret = ring->init(ring);
1652         if (ret)
1653                 goto error;
1654
1655         return 0;
1656
1657 error:
1658         kfree(ringbuf);
1659         ring->buffer = NULL;
1660         return ret;
1661 }
1662
1663 void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
1664 {
1665         struct drm_i915_private *dev_priv = to_i915(ring->dev);
1666         struct intel_ringbuffer *ringbuf = ring->buffer;
1667
1668         if (!intel_ring_initialized(ring))
1669                 return;
1670
1671         intel_stop_ring_buffer(ring);
1672         WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
1673
1674         intel_destroy_ringbuffer_obj(ringbuf);
1675         ring->preallocated_lazy_request = NULL;
1676         ring->outstanding_lazy_seqno = 0;
1677
1678         if (ring->cleanup)
1679                 ring->cleanup(ring);
1680
1681         cleanup_status_page(ring);
1682
1683         i915_cmd_parser_fini_ring(ring);
1684
1685         kfree(ringbuf);
1686         ring->buffer = NULL;
1687 }
1688
1689 static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
1690 {
1691         struct intel_ringbuffer *ringbuf = ring->buffer;
1692         struct drm_i915_gem_request *request;
1693         u32 seqno = 0;
1694         int ret;
1695
1696         if (ringbuf->last_retired_head != -1) {
1697                 ringbuf->head = ringbuf->last_retired_head;
1698                 ringbuf->last_retired_head = -1;
1699
1700                 ringbuf->space = intel_ring_space(ringbuf);
1701                 if (ringbuf->space >= n)
1702                         return 0;
1703         }
1704
1705         list_for_each_entry(request, &ring->request_list, list) {
1706                 if (__intel_ring_space(request->tail, ringbuf->tail,
1707                                        ringbuf->size) >= n) {
1708                         seqno = request->seqno;
1709                         break;
1710                 }
1711         }
1712
1713         if (seqno == 0)
1714                 return -ENOSPC;
1715
1716         ret = i915_wait_seqno(ring, seqno);
1717         if (ret)
1718                 return ret;
1719
1720         i915_gem_retire_requests_ring(ring);
1721         ringbuf->head = ringbuf->last_retired_head;
1722         ringbuf->last_retired_head = -1;
1723
1724         ringbuf->space = intel_ring_space(ringbuf);
1725         return 0;
1726 }
1727
1728 static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
1729 {
1730         struct drm_device *dev = ring->dev;
1731         struct drm_i915_private *dev_priv = dev->dev_private;
1732         struct intel_ringbuffer *ringbuf = ring->buffer;
1733         unsigned long end;
1734         int ret;
1735
1736         ret = intel_ring_wait_request(ring, n);
1737         if (ret != -ENOSPC)
1738                 return ret;
1739
1740         /* force the tail write in case we have been skipping them */
1741         __intel_ring_advance(ring);
1742
1743         /* With GEM the hangcheck timer should kick us out of the loop,
1744          * leaving it early runs the risk of corrupting GEM state (due
1745          * to running on almost untested codepaths). But on resume
1746          * timers don't work yet, so prevent a complete hang in that
1747          * case by choosing an insanely large timeout. */
1748         end = jiffies + 60 * HZ;
1749
1750         trace_i915_ring_wait_begin(ring);
1751         do {
1752                 ringbuf->head = I915_READ_HEAD(ring);
1753                 ringbuf->space = intel_ring_space(ringbuf);
1754                 if (ringbuf->space >= n) {
1755                         ret = 0;
1756                         break;
1757                 }
1758
1759                 if (!drm_core_check_feature(dev, DRIVER_MODESET) &&
1760                     dev->primary->master) {
1761                         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1762                         if (master_priv->sarea_priv)
1763                                 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1764                 }
1765
1766                 msleep(1);
1767
1768                 if (dev_priv->mm.interruptible && signal_pending(current)) {
1769                         ret = -ERESTARTSYS;
1770                         break;
1771                 }
1772
1773                 ret = i915_gem_check_wedge(&dev_priv->gpu_error,
1774                                            dev_priv->mm.interruptible);
1775                 if (ret)
1776                         break;
1777
1778                 if (time_after(jiffies, end)) {
1779                         ret = -EBUSY;
1780                         break;
1781                 }
1782         } while (1);
1783         trace_i915_ring_wait_end(ring);
1784         return ret;
1785 }
1786
1787 static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
1788 {
1789         uint32_t __iomem *virt;
1790         struct intel_ringbuffer *ringbuf = ring->buffer;
1791         int rem = ringbuf->size - ringbuf->tail;
1792
1793         if (ringbuf->space < rem) {
1794                 int ret = ring_wait_for_space(ring, rem);
1795                 if (ret)
1796                         return ret;
1797         }
1798
1799         virt = ringbuf->virtual_start + ringbuf->tail;
1800         rem /= 4;
1801         while (rem--)
1802                 iowrite32(MI_NOOP, virt++);
1803
1804         ringbuf->tail = 0;
1805         ringbuf->space = intel_ring_space(ringbuf);
1806
1807         return 0;
1808 }
1809
1810 int intel_ring_idle(struct intel_engine_cs *ring)
1811 {
1812         u32 seqno;
1813         int ret;
1814
1815         /* We need to add any requests required to flush the objects and ring */
1816         if (ring->outstanding_lazy_seqno) {
1817                 ret = i915_add_request(ring, NULL);
1818                 if (ret)
1819                         return ret;
1820         }
1821
1822         /* Wait upon the last request to be completed */
1823         if (list_empty(&ring->request_list))
1824                 return 0;
1825
1826         seqno = list_entry(ring->request_list.prev,
1827                            struct drm_i915_gem_request,
1828                            list)->seqno;
1829
1830         return i915_wait_seqno(ring, seqno);
1831 }
1832
1833 static int
1834 intel_ring_alloc_seqno(struct intel_engine_cs *ring)
1835 {
1836         if (ring->outstanding_lazy_seqno)
1837                 return 0;
1838
1839         if (ring->preallocated_lazy_request == NULL) {
1840                 struct drm_i915_gem_request *request;
1841
1842                 request = kmalloc(sizeof(*request), GFP_KERNEL);
1843                 if (request == NULL)
1844                         return -ENOMEM;
1845
1846                 ring->preallocated_lazy_request = request;
1847         }
1848
1849         return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
1850 }
1851
1852 static int __intel_ring_prepare(struct intel_engine_cs *ring,
1853                                 int bytes)
1854 {
1855         struct intel_ringbuffer *ringbuf = ring->buffer;
1856         int ret;
1857
1858         if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
1859                 ret = intel_wrap_ring_buffer(ring);
1860                 if (unlikely(ret))
1861                         return ret;
1862         }
1863
1864         if (unlikely(ringbuf->space < bytes)) {
1865                 ret = ring_wait_for_space(ring, bytes);
1866                 if (unlikely(ret))
1867                         return ret;
1868         }
1869
1870         return 0;
1871 }
1872
1873 int intel_ring_begin(struct intel_engine_cs *ring,
1874                      int num_dwords)
1875 {
1876         struct drm_i915_private *dev_priv = ring->dev->dev_private;
1877         int ret;
1878
1879         ret = i915_gem_check_wedge(&dev_priv->gpu_error,
1880                                    dev_priv->mm.interruptible);
1881         if (ret)
1882                 return ret;
1883
1884         ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t));
1885         if (ret)
1886                 return ret;
1887
1888         /* Preallocate the olr before touching the ring */
1889         ret = intel_ring_alloc_seqno(ring);
1890         if (ret)
1891                 return ret;
1892
1893         ring->buffer->space -= num_dwords * sizeof(uint32_t);
1894         return 0;
1895 }
1896
1897 /* Align the ring tail to a cacheline boundary */
1898 int intel_ring_cacheline_align(struct intel_engine_cs *ring)
1899 {
1900         int num_dwords = (ring->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
1901         int ret;
1902
1903         if (num_dwords == 0)
1904                 return 0;
1905
1906         num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
1907         ret = intel_ring_begin(ring, num_dwords);
1908         if (ret)
1909                 return ret;
1910
1911         while (num_dwords--)
1912                 intel_ring_emit(ring, MI_NOOP);
1913
1914         intel_ring_advance(ring);
1915
1916         return 0;
1917 }
1918
1919 void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno)
1920 {
1921         struct drm_device *dev = ring->dev;
1922         struct drm_i915_private *dev_priv = dev->dev_private;
1923
1924         BUG_ON(ring->outstanding_lazy_seqno);
1925
1926         if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) {
1927                 I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
1928                 I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
1929                 if (HAS_VEBOX(dev))
1930                         I915_WRITE(RING_SYNC_2(ring->mmio_base), 0);
1931         }
1932
1933         ring->set_seqno(ring, seqno);
1934         ring->hangcheck.seqno = seqno;
1935 }
1936
1937 static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring,
1938                                      u32 value)
1939 {
1940         struct drm_i915_private *dev_priv = ring->dev->dev_private;
1941
1942        /* Every tail move must follow the sequence below */
1943
1944         /* Disable notification that the ring is IDLE. The GT
1945          * will then assume that it is busy and bring it out of rc6.
1946          */
1947         I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1948                    _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
1949
1950         /* Clear the context id. Here be magic! */
1951         I915_WRITE64(GEN6_BSD_RNCID, 0x0);
1952
1953         /* Wait for the ring not to be idle, i.e. for it to wake up. */
1954         if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
1955                       GEN6_BSD_SLEEP_INDICATOR) == 0,
1956                      50))
1957                 DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
1958
1959         /* Now that the ring is fully powered up, update the tail */
1960         I915_WRITE_TAIL(ring, value);
1961         POSTING_READ(RING_TAIL(ring->mmio_base));
1962
1963         /* Let the ring send IDLE messages to the GT again,
1964          * and so let it sleep to conserve power when idle.
1965          */
1966         I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1967                    _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
1968 }
1969
1970 static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
1971                                u32 invalidate, u32 flush)
1972 {
1973         uint32_t cmd;
1974         int ret;
1975
1976         ret = intel_ring_begin(ring, 4);
1977         if (ret)
1978                 return ret;
1979
1980         cmd = MI_FLUSH_DW;
1981         if (INTEL_INFO(ring->dev)->gen >= 8)
1982                 cmd += 1;
1983         /*
1984          * Bspec vol 1c.5 - video engine command streamer:
1985          * "If ENABLED, all TLBs will be invalidated once the flush
1986          * operation is complete. This bit is only valid when the
1987          * Post-Sync Operation field is a value of 1h or 3h."
1988          */
1989         if (invalidate & I915_GEM_GPU_DOMAINS)
1990                 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
1991                         MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
1992         intel_ring_emit(ring, cmd);
1993         intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
1994         if (INTEL_INFO(ring->dev)->gen >= 8) {
1995                 intel_ring_emit(ring, 0); /* upper addr */
1996                 intel_ring_emit(ring, 0); /* value */
1997         } else  {
1998                 intel_ring_emit(ring, 0);
1999                 intel_ring_emit(ring, MI_NOOP);
2000         }
2001         intel_ring_advance(ring);
2002         return 0;
2003 }
2004
2005 static int
2006 gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
2007                               u64 offset, u32 len,
2008                               unsigned flags)
2009 {
2010         bool ppgtt = USES_PPGTT(ring->dev) && !(flags & I915_DISPATCH_SECURE);
2011         int ret;
2012
2013         ret = intel_ring_begin(ring, 4);
2014         if (ret)
2015                 return ret;
2016
2017         /* FIXME(BDW): Address space and security selectors. */
2018         intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
2019         intel_ring_emit(ring, lower_32_bits(offset));
2020         intel_ring_emit(ring, upper_32_bits(offset));
2021         intel_ring_emit(ring, MI_NOOP);
2022         intel_ring_advance(ring);
2023
2024         return 0;
2025 }
2026
2027 static int
2028 hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
2029                               u64 offset, u32 len,
2030                               unsigned flags)
2031 {
2032         int ret;
2033
2034         ret = intel_ring_begin(ring, 2);
2035         if (ret)
2036                 return ret;
2037
2038         intel_ring_emit(ring,
2039                         MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW |
2040                         (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW));
2041         /* bit0-7 is the length on GEN6+ */
2042         intel_ring_emit(ring, offset);
2043         intel_ring_advance(ring);
2044
2045         return 0;
2046 }
2047
2048 static int
2049 gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
2050                               u64 offset, u32 len,
2051                               unsigned flags)
2052 {
2053         int ret;
2054
2055         ret = intel_ring_begin(ring, 2);
2056         if (ret)
2057                 return ret;
2058
2059         intel_ring_emit(ring,
2060                         MI_BATCH_BUFFER_START |
2061                         (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
2062         /* bit0-7 is the length on GEN6+ */
2063         intel_ring_emit(ring, offset);
2064         intel_ring_advance(ring);
2065
2066         return 0;
2067 }
2068
2069 /* Blitter support (SandyBridge+) */
2070
2071 static int gen6_ring_flush(struct intel_engine_cs *ring,
2072                            u32 invalidate, u32 flush)
2073 {
2074         struct drm_device *dev = ring->dev;
2075         uint32_t cmd;
2076         int ret;
2077
2078         ret = intel_ring_begin(ring, 4);
2079         if (ret)
2080                 return ret;
2081
2082         cmd = MI_FLUSH_DW;
2083         if (INTEL_INFO(ring->dev)->gen >= 8)
2084                 cmd += 1;
2085         /*
2086          * Bspec vol 1c.3 - blitter engine command streamer:
2087          * "If ENABLED, all TLBs will be invalidated once the flush
2088          * operation is complete. This bit is only valid when the
2089          * Post-Sync Operation field is a value of 1h or 3h."
2090          */
2091         if (invalidate & I915_GEM_DOMAIN_RENDER)
2092                 cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
2093                         MI_FLUSH_DW_OP_STOREDW;
2094         intel_ring_emit(ring, cmd);
2095         intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
2096         if (INTEL_INFO(ring->dev)->gen >= 8) {
2097                 intel_ring_emit(ring, 0); /* upper addr */
2098                 intel_ring_emit(ring, 0); /* value */
2099         } else  {
2100                 intel_ring_emit(ring, 0);
2101                 intel_ring_emit(ring, MI_NOOP);
2102         }
2103         intel_ring_advance(ring);
2104
2105         if (IS_GEN7(dev) && !invalidate && flush)
2106                 return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
2107
2108         return 0;
2109 }
2110
2111 int intel_init_render_ring_buffer(struct drm_device *dev)
2112 {
2113         struct drm_i915_private *dev_priv = dev->dev_private;
2114         struct intel_engine_cs *ring = &dev_priv->ring[RCS];
2115         struct drm_i915_gem_object *obj;
2116         int ret;
2117
2118         ring->name = "render ring";
2119         ring->id = RCS;
2120         ring->mmio_base = RENDER_RING_BASE;
2121
2122         if (INTEL_INFO(dev)->gen >= 8) {
2123                 if (i915_semaphore_is_enabled(dev)) {
2124                         obj = i915_gem_alloc_object(dev, 4096);
2125                         if (obj == NULL) {
2126                                 DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
2127                                 i915.semaphores = 0;
2128                         } else {
2129                                 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
2130                                 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK);
2131                                 if (ret != 0) {
2132                                         drm_gem_object_unreference(&obj->base);
2133                                         DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n");
2134                                         i915.semaphores = 0;
2135                                 } else
2136                                         dev_priv->semaphore_obj = obj;
2137                         }
2138                 }
2139                 ring->add_request = gen6_add_request;
2140                 ring->flush = gen8_render_ring_flush;
2141                 ring->irq_get = gen8_ring_get_irq;
2142                 ring->irq_put = gen8_ring_put_irq;
2143                 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
2144                 ring->get_seqno = gen6_ring_get_seqno;
2145                 ring->set_seqno = ring_set_seqno;
2146                 if (i915_semaphore_is_enabled(dev)) {
2147                         WARN_ON(!dev_priv->semaphore_obj);
2148                         ring->semaphore.sync_to = gen8_ring_sync;
2149                         ring->semaphore.signal = gen8_rcs_signal;
2150                         GEN8_RING_SEMAPHORE_INIT;
2151                 }
2152         } else if (INTEL_INFO(dev)->gen >= 6) {
2153                 ring->add_request = gen6_add_request;
2154                 ring->flush = gen7_render_ring_flush;
2155                 if (INTEL_INFO(dev)->gen == 6)
2156                         ring->flush = gen6_render_ring_flush;
2157                 ring->irq_get = gen6_ring_get_irq;
2158                 ring->irq_put = gen6_ring_put_irq;
2159                 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
2160                 ring->get_seqno = gen6_ring_get_seqno;
2161                 ring->set_seqno = ring_set_seqno;
2162                 if (i915_semaphore_is_enabled(dev)) {
2163                         ring->semaphore.sync_to = gen6_ring_sync;
2164                         ring->semaphore.signal = gen6_signal;
2165                         /*
2166                          * The current semaphore is only applied on pre-gen8
2167                          * platform.  And there is no VCS2 ring on the pre-gen8
2168                          * platform. So the semaphore between RCS and VCS2 is
2169                          * initialized as INVALID.  Gen8 will initialize the
2170                          * sema between VCS2 and RCS later.
2171                          */
2172                         ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
2173                         ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV;
2174                         ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB;
2175                         ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE;
2176                         ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
2177                         ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
2178                         ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC;
2179                         ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC;
2180                         ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
2181                         ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2182                 }
2183         } else if (IS_GEN5(dev)) {
2184                 ring->add_request = pc_render_add_request;
2185                 ring->flush = gen4_render_ring_flush;
2186                 ring->get_seqno = pc_render_get_seqno;
2187                 ring->set_seqno = pc_render_set_seqno;
2188                 ring->irq_get = gen5_ring_get_irq;
2189                 ring->irq_put = gen5_ring_put_irq;
2190                 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT |
2191                                         GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
2192         } else {
2193                 ring->add_request = i9xx_add_request;
2194                 if (INTEL_INFO(dev)->gen < 4)
2195                         ring->flush = gen2_render_ring_flush;
2196                 else
2197                         ring->flush = gen4_render_ring_flush;
2198                 ring->get_seqno = ring_get_seqno;
2199                 ring->set_seqno = ring_set_seqno;
2200                 if (IS_GEN2(dev)) {
2201                         ring->irq_get = i8xx_ring_get_irq;
2202                         ring->irq_put = i8xx_ring_put_irq;
2203                 } else {
2204                         ring->irq_get = i9xx_ring_get_irq;
2205                         ring->irq_put = i9xx_ring_put_irq;
2206                 }
2207                 ring->irq_enable_mask = I915_USER_INTERRUPT;
2208         }
2209         ring->write_tail = ring_write_tail;
2210
2211         if (IS_HASWELL(dev))
2212                 ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
2213         else if (IS_GEN8(dev))
2214                 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
2215         else if (INTEL_INFO(dev)->gen >= 6)
2216                 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2217         else if (INTEL_INFO(dev)->gen >= 4)
2218                 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
2219         else if (IS_I830(dev) || IS_845G(dev))
2220                 ring->dispatch_execbuffer = i830_dispatch_execbuffer;
2221         else
2222                 ring->dispatch_execbuffer = i915_dispatch_execbuffer;
2223         ring->init = init_render_ring;
2224         ring->cleanup = render_ring_cleanup;
2225
2226         /* Workaround batchbuffer to combat CS tlb bug. */
2227         if (HAS_BROKEN_CS_TLB(dev)) {
2228                 obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
2229                 if (obj == NULL) {
2230                         DRM_ERROR("Failed to allocate batch bo\n");
2231                         return -ENOMEM;
2232                 }
2233
2234                 ret = i915_gem_obj_ggtt_pin(obj, 0, 0);
2235                 if (ret != 0) {
2236                         drm_gem_object_unreference(&obj->base);
2237                         DRM_ERROR("Failed to ping batch bo\n");
2238                         return ret;
2239                 }
2240
2241                 ring->scratch.obj = obj;
2242                 ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
2243         }
2244
2245         return intel_init_ring_buffer(dev, ring);
2246 }
2247
2248 int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
2249 {
2250         struct drm_i915_private *dev_priv = dev->dev_private;
2251         struct intel_engine_cs *ring = &dev_priv->ring[RCS];
2252         struct intel_ringbuffer *ringbuf = ring->buffer;
2253         int ret;
2254
2255         if (ringbuf == NULL) {
2256                 ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
2257                 if (!ringbuf)
2258                         return -ENOMEM;
2259                 ring->buffer = ringbuf;
2260         }
2261
2262         ring->name = "render ring";
2263         ring->id = RCS;
2264         ring->mmio_base = RENDER_RING_BASE;
2265
2266         if (INTEL_INFO(dev)->gen >= 6) {
2267                 /* non-kms not supported on gen6+ */
2268                 ret = -ENODEV;
2269                 goto err_ringbuf;
2270         }
2271
2272         /* Note: gem is not supported on gen5/ilk without kms (the corresponding
2273          * gem_init ioctl returns with -ENODEV). Hence we do not need to set up
2274          * the special gen5 functions. */
2275         ring->add_request = i9xx_add_request;
2276         if (INTEL_INFO(dev)->gen < 4)
2277                 ring->flush = gen2_render_ring_flush;
2278         else
2279                 ring->flush = gen4_render_ring_flush;
2280         ring->get_seqno = ring_get_seqno;
2281         ring->set_seqno = ring_set_seqno;
2282         if (IS_GEN2(dev)) {
2283                 ring->irq_get = i8xx_ring_get_irq;
2284                 ring->irq_put = i8xx_ring_put_irq;
2285         } else {
2286                 ring->irq_get = i9xx_ring_get_irq;
2287                 ring->irq_put = i9xx_ring_put_irq;
2288         }
2289         ring->irq_enable_mask = I915_USER_INTERRUPT;
2290         ring->write_tail = ring_write_tail;
2291         if (INTEL_INFO(dev)->gen >= 4)
2292                 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
2293         else if (IS_I830(dev) || IS_845G(dev))
2294                 ring->dispatch_execbuffer = i830_dispatch_execbuffer;
2295         else
2296                 ring->dispatch_execbuffer = i915_dispatch_execbuffer;
2297         ring->init = init_render_ring;
2298         ring->cleanup = render_ring_cleanup;
2299
2300         ring->dev = dev;
2301         INIT_LIST_HEAD(&ring->active_list);
2302         INIT_LIST_HEAD(&ring->request_list);
2303
2304         ringbuf->size = size;
2305         ringbuf->effective_size = ringbuf->size;
2306         if (IS_I830(ring->dev) || IS_845G(ring->dev))
2307                 ringbuf->effective_size -= 2 * CACHELINE_BYTES;
2308
2309         ringbuf->virtual_start = ioremap_wc(start, size);
2310         if (ringbuf->virtual_start == NULL) {
2311                 DRM_ERROR("can not ioremap virtual address for"
2312                           " ring buffer\n");
2313                 ret = -ENOMEM;
2314                 goto err_ringbuf;
2315         }
2316
2317         if (!I915_NEED_GFX_HWS(dev)) {
2318                 ret = init_phys_status_page(ring);
2319                 if (ret)
2320                         goto err_vstart;
2321         }
2322
2323         return 0;
2324
2325 err_vstart:
2326         iounmap(ringbuf->virtual_start);
2327 err_ringbuf:
2328         kfree(ringbuf);
2329         ring->buffer = NULL;
2330         return ret;
2331 }
2332
2333 int intel_init_bsd_ring_buffer(struct drm_device *dev)
2334 {
2335         struct drm_i915_private *dev_priv = dev->dev_private;
2336         struct intel_engine_cs *ring = &dev_priv->ring[VCS];
2337
2338         ring->name = "bsd ring";
2339         ring->id = VCS;
2340
2341         ring->write_tail = ring_write_tail;
2342         if (INTEL_INFO(dev)->gen >= 6) {
2343                 ring->mmio_base = GEN6_BSD_RING_BASE;
2344                 /* gen6 bsd needs a special wa for tail updates */
2345                 if (IS_GEN6(dev))
2346                         ring->write_tail = gen6_bsd_ring_write_tail;
2347                 ring->flush = gen6_bsd_ring_flush;
2348                 ring->add_request = gen6_add_request;
2349                 ring->get_seqno = gen6_ring_get_seqno;
2350                 ring->set_seqno = ring_set_seqno;
2351                 if (INTEL_INFO(dev)->gen >= 8) {
2352                         ring->irq_enable_mask =
2353                                 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
2354                         ring->irq_get = gen8_ring_get_irq;
2355                         ring->irq_put = gen8_ring_put_irq;
2356                         ring->dispatch_execbuffer =
2357                                 gen8_ring_dispatch_execbuffer;
2358                         if (i915_semaphore_is_enabled(dev)) {
2359                                 ring->semaphore.sync_to = gen8_ring_sync;
2360                                 ring->semaphore.signal = gen8_xcs_signal;
2361                                 GEN8_RING_SEMAPHORE_INIT;
2362                         }
2363                 } else {
2364                         ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
2365                         ring->irq_get = gen6_ring_get_irq;
2366                         ring->irq_put = gen6_ring_put_irq;
2367                         ring->dispatch_execbuffer =
2368                                 gen6_ring_dispatch_execbuffer;
2369                         if (i915_semaphore_is_enabled(dev)) {
2370                                 ring->semaphore.sync_to = gen6_ring_sync;
2371                                 ring->semaphore.signal = gen6_signal;
2372                                 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
2373                                 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
2374                                 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
2375                                 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
2376                                 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
2377                                 ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
2378                                 ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
2379                                 ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
2380                                 ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
2381                                 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2382                         }
2383                 }
2384         } else {
2385                 ring->mmio_base = BSD_RING_BASE;
2386                 ring->flush = bsd_ring_flush;
2387                 ring->add_request = i9xx_add_request;
2388                 ring->get_seqno = ring_get_seqno;
2389                 ring->set_seqno = ring_set_seqno;
2390                 if (IS_GEN5(dev)) {
2391                         ring->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
2392                         ring->irq_get = gen5_ring_get_irq;
2393                         ring->irq_put = gen5_ring_put_irq;
2394                 } else {
2395                         ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
2396                         ring->irq_get = i9xx_ring_get_irq;
2397                         ring->irq_put = i9xx_ring_put_irq;
2398                 }
2399                 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
2400         }
2401         ring->init = init_ring_common;
2402
2403         return intel_init_ring_buffer(dev, ring);
2404 }
2405
2406 /**
2407  * Initialize the second BSD ring for Broadwell GT3.
2408  * It is noted that this only exists on Broadwell GT3.
2409  */
2410 int intel_init_bsd2_ring_buffer(struct drm_device *dev)
2411 {
2412         struct drm_i915_private *dev_priv = dev->dev_private;
2413         struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
2414
2415         if ((INTEL_INFO(dev)->gen != 8)) {
2416                 DRM_ERROR("No dual-BSD ring on non-BDW machine\n");
2417                 return -EINVAL;
2418         }
2419
2420         ring->name = "bsd2 ring";
2421         ring->id = VCS2;
2422
2423         ring->write_tail = ring_write_tail;
2424         ring->mmio_base = GEN8_BSD2_RING_BASE;
2425         ring->flush = gen6_bsd_ring_flush;
2426         ring->add_request = gen6_add_request;
2427         ring->get_seqno = gen6_ring_get_seqno;
2428         ring->set_seqno = ring_set_seqno;
2429         ring->irq_enable_mask =
2430                         GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
2431         ring->irq_get = gen8_ring_get_irq;
2432         ring->irq_put = gen8_ring_put_irq;
2433         ring->dispatch_execbuffer =
2434                         gen8_ring_dispatch_execbuffer;
2435         if (i915_semaphore_is_enabled(dev)) {
2436                 ring->semaphore.sync_to = gen8_ring_sync;
2437                 ring->semaphore.signal = gen8_xcs_signal;
2438                 GEN8_RING_SEMAPHORE_INIT;
2439         }
2440         ring->init = init_ring_common;
2441
2442         return intel_init_ring_buffer(dev, ring);
2443 }
2444
2445 int intel_init_blt_ring_buffer(struct drm_device *dev)
2446 {
2447         struct drm_i915_private *dev_priv = dev->dev_private;
2448         struct intel_engine_cs *ring = &dev_priv->ring[BCS];
2449
2450         ring->name = "blitter ring";
2451         ring->id = BCS;
2452
2453         ring->mmio_base = BLT_RING_BASE;
2454         ring->write_tail = ring_write_tail;
2455         ring->flush = gen6_ring_flush;
2456         ring->add_request = gen6_add_request;
2457         ring->get_seqno = gen6_ring_get_seqno;
2458         ring->set_seqno = ring_set_seqno;
2459         if (INTEL_INFO(dev)->gen >= 8) {
2460                 ring->irq_enable_mask =
2461                         GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
2462                 ring->irq_get = gen8_ring_get_irq;
2463                 ring->irq_put = gen8_ring_put_irq;
2464                 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
2465                 if (i915_semaphore_is_enabled(dev)) {
2466                         ring->semaphore.sync_to = gen8_ring_sync;
2467                         ring->semaphore.signal = gen8_xcs_signal;
2468                         GEN8_RING_SEMAPHORE_INIT;
2469                 }
2470         } else {
2471                 ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
2472                 ring->irq_get = gen6_ring_get_irq;
2473                 ring->irq_put = gen6_ring_put_irq;
2474                 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2475                 if (i915_semaphore_is_enabled(dev)) {
2476                         ring->semaphore.signal = gen6_signal;
2477                         ring->semaphore.sync_to = gen6_ring_sync;
2478                         /*
2479                          * The current semaphore is only applied on pre-gen8
2480                          * platform.  And there is no VCS2 ring on the pre-gen8
2481                          * platform. So the semaphore between BCS and VCS2 is
2482                          * initialized as INVALID.  Gen8 will initialize the
2483                          * sema between BCS and VCS2 later.
2484                          */
2485                         ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
2486                         ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
2487                         ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
2488                         ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
2489                         ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
2490                         ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
2491                         ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
2492                         ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
2493                         ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
2494                         ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2495                 }
2496         }
2497         ring->init = init_ring_common;
2498
2499         return intel_init_ring_buffer(dev, ring);
2500 }
2501
2502 int intel_init_vebox_ring_buffer(struct drm_device *dev)
2503 {
2504         struct drm_i915_private *dev_priv = dev->dev_private;
2505         struct intel_engine_cs *ring = &dev_priv->ring[VECS];
2506
2507         ring->name = "video enhancement ring";
2508         ring->id = VECS;
2509
2510         ring->mmio_base = VEBOX_RING_BASE;
2511         ring->write_tail = ring_write_tail;
2512         ring->flush = gen6_ring_flush;
2513         ring->add_request = gen6_add_request;
2514         ring->get_seqno = gen6_ring_get_seqno;
2515         ring->set_seqno = ring_set_seqno;
2516
2517         if (INTEL_INFO(dev)->gen >= 8) {
2518                 ring->irq_enable_mask =
2519                         GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
2520                 ring->irq_get = gen8_ring_get_irq;
2521                 ring->irq_put = gen8_ring_put_irq;
2522                 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
2523                 if (i915_semaphore_is_enabled(dev)) {
2524                         ring->semaphore.sync_to = gen8_ring_sync;
2525                         ring->semaphore.signal = gen8_xcs_signal;
2526                         GEN8_RING_SEMAPHORE_INIT;
2527                 }
2528         } else {
2529                 ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
2530                 ring->irq_get = hsw_vebox_get_irq;
2531                 ring->irq_put = hsw_vebox_put_irq;
2532                 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2533                 if (i915_semaphore_is_enabled(dev)) {
2534                         ring->semaphore.sync_to = gen6_ring_sync;
2535                         ring->semaphore.signal = gen6_signal;
2536                         ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
2537                         ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
2538                         ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
2539                         ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
2540                         ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
2541                         ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
2542                         ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
2543                         ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
2544                         ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
2545                         ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2546                 }
2547         }
2548         ring->init = init_ring_common;
2549
2550         return intel_init_ring_buffer(dev, ring);
2551 }
2552
2553 int
2554 intel_ring_flush_all_caches(struct intel_engine_cs *ring)
2555 {
2556         int ret;
2557
2558         if (!ring->gpu_caches_dirty)
2559                 return 0;
2560
2561         ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
2562         if (ret)
2563                 return ret;
2564
2565         trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS);
2566
2567         ring->gpu_caches_dirty = false;
2568         return 0;
2569 }
2570
2571 int
2572 intel_ring_invalidate_all_caches(struct intel_engine_cs *ring)
2573 {
2574         uint32_t flush_domains;
2575         int ret;
2576
2577         flush_domains = 0;
2578         if (ring->gpu_caches_dirty)
2579                 flush_domains = I915_GEM_GPU_DOMAINS;
2580
2581         ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
2582         if (ret)
2583                 return ret;
2584
2585         trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
2586
2587         ring->gpu_caches_dirty = false;
2588         return 0;
2589 }
2590
2591 void
2592 intel_stop_ring_buffer(struct intel_engine_cs *ring)
2593 {
2594         int ret;
2595
2596         if (!intel_ring_initialized(ring))
2597                 return;
2598
2599         ret = intel_ring_idle(ring);
2600         if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error))
2601                 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
2602                           ring->name, ret);
2603
2604         stop_ring(ring);
2605 }
This page took 0.188347 seconds and 4 git commands to generate.