]> Git Repo - linux.git/blob - drivers/gpu/drm/i915/intel_guc_submission.c
Merge tag 'drm-intel-next-2019-05-24' of git://anongit.freedesktop.org/drm/drm-intel...
[linux.git] / drivers / gpu / drm / i915 / intel_guc_submission.c
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24
25 #include <linux/circ_buf.h>
26
27 #include "gt/intel_engine_pm.h"
28 #include "gt/intel_lrc_reg.h"
29
30 #include "intel_guc_submission.h"
31 #include "i915_drv.h"
32
33 #define GUC_PREEMPT_FINISHED            0x1
34 #define GUC_PREEMPT_BREADCRUMB_DWORDS   0x8
35 #define GUC_PREEMPT_BREADCRUMB_BYTES    \
36         (sizeof(u32) * GUC_PREEMPT_BREADCRUMB_DWORDS)
37
38 /**
39  * DOC: GuC-based command submission
40  *
41  * GuC client:
42  * A intel_guc_client refers to a submission path through GuC. Currently, there
43  * are two clients. One of them (the execbuf_client) is charged with all
44  * submissions to the GuC, the other one (preempt_client) is responsible for
45  * preempting the execbuf_client. This struct is the owner of a doorbell, a
46  * process descriptor and a workqueue (all of them inside a single gem object
47  * that contains all required pages for these elements).
48  *
49  * GuC stage descriptor:
50  * During initialization, the driver allocates a static pool of 1024 such
51  * descriptors, and shares them with the GuC.
52  * Currently, there exists a 1:1 mapping between a intel_guc_client and a
53  * guc_stage_desc (via the client's stage_id), so effectively only one
54  * gets used. This stage descriptor lets the GuC know about the doorbell,
55  * workqueue and process descriptor. Theoretically, it also lets the GuC
56  * know about our HW contexts (context ID, etc...), but we actually
57  * employ a kind of submission where the GuC uses the LRCA sent via the work
58  * item instead (the single guc_stage_desc associated to execbuf client
59  * contains information about the default kernel context only, but this is
60  * essentially unused). This is called a "proxy" submission.
61  *
62  * The Scratch registers:
63  * There are 16 MMIO-based registers start from 0xC180. The kernel driver writes
64  * a value to the action register (SOFT_SCRATCH_0) along with any data. It then
65  * triggers an interrupt on the GuC via another register write (0xC4C8).
66  * Firmware writes a success/fail code back to the action register after
67  * processes the request. The kernel driver polls waiting for this update and
68  * then proceeds.
69  * See intel_guc_send()
70  *
71  * Doorbells:
72  * Doorbells are interrupts to uKernel. A doorbell is a single cache line (QW)
73  * mapped into process space.
74  *
75  * Work Items:
76  * There are several types of work items that the host may place into a
77  * workqueue, each with its own requirements and limitations. Currently only
78  * WQ_TYPE_INORDER is needed to support legacy submission via GuC, which
79  * represents in-order queue. The kernel driver packs ring tail pointer and an
80  * ELSP context descriptor dword into Work Item.
81  * See guc_add_request()
82  *
83  */
84
85 static inline u32 intel_hws_preempt_done_address(struct intel_engine_cs *engine)
86 {
87         return (i915_ggtt_offset(engine->status_page.vma) +
88                 I915_GEM_HWS_PREEMPT_ADDR);
89 }
90
91 static inline struct i915_priolist *to_priolist(struct rb_node *rb)
92 {
93         return rb_entry(rb, struct i915_priolist, node);
94 }
95
96 static inline bool is_high_priority(struct intel_guc_client *client)
97 {
98         return (client->priority == GUC_CLIENT_PRIORITY_KMD_HIGH ||
99                 client->priority == GUC_CLIENT_PRIORITY_HIGH);
100 }
101
102 static int reserve_doorbell(struct intel_guc_client *client)
103 {
104         unsigned long offset;
105         unsigned long end;
106         u16 id;
107
108         GEM_BUG_ON(client->doorbell_id != GUC_DOORBELL_INVALID);
109
110         /*
111          * The bitmap tracks which doorbell registers are currently in use.
112          * It is split into two halves; the first half is used for normal
113          * priority contexts, the second half for high-priority ones.
114          */
115         offset = 0;
116         end = GUC_NUM_DOORBELLS / 2;
117         if (is_high_priority(client)) {
118                 offset = end;
119                 end += offset;
120         }
121
122         id = find_next_zero_bit(client->guc->doorbell_bitmap, end, offset);
123         if (id == end)
124                 return -ENOSPC;
125
126         __set_bit(id, client->guc->doorbell_bitmap);
127         client->doorbell_id = id;
128         DRM_DEBUG_DRIVER("client %u (high prio=%s) reserved doorbell: %d\n",
129                          client->stage_id, yesno(is_high_priority(client)),
130                          id);
131         return 0;
132 }
133
134 static bool has_doorbell(struct intel_guc_client *client)
135 {
136         if (client->doorbell_id == GUC_DOORBELL_INVALID)
137                 return false;
138
139         return test_bit(client->doorbell_id, client->guc->doorbell_bitmap);
140 }
141
142 static void unreserve_doorbell(struct intel_guc_client *client)
143 {
144         GEM_BUG_ON(!has_doorbell(client));
145
146         __clear_bit(client->doorbell_id, client->guc->doorbell_bitmap);
147         client->doorbell_id = GUC_DOORBELL_INVALID;
148 }
149
150 /*
151  * Tell the GuC to allocate or deallocate a specific doorbell
152  */
153
154 static int __guc_allocate_doorbell(struct intel_guc *guc, u32 stage_id)
155 {
156         u32 action[] = {
157                 INTEL_GUC_ACTION_ALLOCATE_DOORBELL,
158                 stage_id
159         };
160
161         return intel_guc_send(guc, action, ARRAY_SIZE(action));
162 }
163
164 static int __guc_deallocate_doorbell(struct intel_guc *guc, u32 stage_id)
165 {
166         u32 action[] = {
167                 INTEL_GUC_ACTION_DEALLOCATE_DOORBELL,
168                 stage_id
169         };
170
171         return intel_guc_send(guc, action, ARRAY_SIZE(action));
172 }
173
174 static struct guc_stage_desc *__get_stage_desc(struct intel_guc_client *client)
175 {
176         struct guc_stage_desc *base = client->guc->stage_desc_pool_vaddr;
177
178         return &base[client->stage_id];
179 }
180
181 /*
182  * Initialise, update, or clear doorbell data shared with the GuC
183  *
184  * These functions modify shared data and so need access to the mapped
185  * client object which contains the page being used for the doorbell
186  */
187
188 static void __update_doorbell_desc(struct intel_guc_client *client, u16 new_id)
189 {
190         struct guc_stage_desc *desc;
191
192         /* Update the GuC's idea of the doorbell ID */
193         desc = __get_stage_desc(client);
194         desc->db_id = new_id;
195 }
196
197 static struct guc_doorbell_info *__get_doorbell(struct intel_guc_client *client)
198 {
199         return client->vaddr + client->doorbell_offset;
200 }
201
202 static bool __doorbell_valid(struct intel_guc *guc, u16 db_id)
203 {
204         struct drm_i915_private *dev_priv = guc_to_i915(guc);
205
206         GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS);
207         return I915_READ(GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID;
208 }
209
210 static void __init_doorbell(struct intel_guc_client *client)
211 {
212         struct guc_doorbell_info *doorbell;
213
214         doorbell = __get_doorbell(client);
215         doorbell->db_status = GUC_DOORBELL_ENABLED;
216         doorbell->cookie = 0;
217 }
218
219 static void __fini_doorbell(struct intel_guc_client *client)
220 {
221         struct guc_doorbell_info *doorbell;
222         u16 db_id = client->doorbell_id;
223
224         doorbell = __get_doorbell(client);
225         doorbell->db_status = GUC_DOORBELL_DISABLED;
226
227         /* Doorbell release flow requires that we wait for GEN8_DRB_VALID bit
228          * to go to zero after updating db_status before we call the GuC to
229          * release the doorbell
230          */
231         if (wait_for_us(!__doorbell_valid(client->guc, db_id), 10))
232                 WARN_ONCE(true, "Doorbell never became invalid after disable\n");
233 }
234
235 static int create_doorbell(struct intel_guc_client *client)
236 {
237         int ret;
238
239         if (WARN_ON(!has_doorbell(client)))
240                 return -ENODEV; /* internal setup error, should never happen */
241
242         __update_doorbell_desc(client, client->doorbell_id);
243         __init_doorbell(client);
244
245         ret = __guc_allocate_doorbell(client->guc, client->stage_id);
246         if (ret) {
247                 __fini_doorbell(client);
248                 __update_doorbell_desc(client, GUC_DOORBELL_INVALID);
249                 DRM_DEBUG_DRIVER("Couldn't create client %u doorbell: %d\n",
250                                  client->stage_id, ret);
251                 return ret;
252         }
253
254         return 0;
255 }
256
257 static int destroy_doorbell(struct intel_guc_client *client)
258 {
259         int ret;
260
261         GEM_BUG_ON(!has_doorbell(client));
262
263         __fini_doorbell(client);
264         ret = __guc_deallocate_doorbell(client->guc, client->stage_id);
265         if (ret)
266                 DRM_ERROR("Couldn't destroy client %u doorbell: %d\n",
267                           client->stage_id, ret);
268
269         __update_doorbell_desc(client, GUC_DOORBELL_INVALID);
270
271         return ret;
272 }
273
274 static unsigned long __select_cacheline(struct intel_guc *guc)
275 {
276         unsigned long offset;
277
278         /* Doorbell uses a single cache line within a page */
279         offset = offset_in_page(guc->db_cacheline);
280
281         /* Moving to next cache line to reduce contention */
282         guc->db_cacheline += cache_line_size();
283
284         DRM_DEBUG_DRIVER("reserved cacheline 0x%lx, next 0x%x, linesize %u\n",
285                          offset, guc->db_cacheline, cache_line_size());
286         return offset;
287 }
288
289 static inline struct guc_process_desc *
290 __get_process_desc(struct intel_guc_client *client)
291 {
292         return client->vaddr + client->proc_desc_offset;
293 }
294
295 /*
296  * Initialise the process descriptor shared with the GuC firmware.
297  */
298 static void guc_proc_desc_init(struct intel_guc_client *client)
299 {
300         struct guc_process_desc *desc;
301
302         desc = memset(__get_process_desc(client), 0, sizeof(*desc));
303
304         /*
305          * XXX: pDoorbell and WQVBaseAddress are pointers in process address
306          * space for ring3 clients (set them as in mmap_ioctl) or kernel
307          * space for kernel clients (map on demand instead? May make debug
308          * easier to have it mapped).
309          */
310         desc->wq_base_addr = 0;
311         desc->db_base_addr = 0;
312
313         desc->stage_id = client->stage_id;
314         desc->wq_size_bytes = GUC_WQ_SIZE;
315         desc->wq_status = WQ_STATUS_ACTIVE;
316         desc->priority = client->priority;
317 }
318
319 static void guc_proc_desc_fini(struct intel_guc_client *client)
320 {
321         struct guc_process_desc *desc;
322
323         desc = __get_process_desc(client);
324         memset(desc, 0, sizeof(*desc));
325 }
326
327 static int guc_stage_desc_pool_create(struct intel_guc *guc)
328 {
329         struct i915_vma *vma;
330         void *vaddr;
331
332         vma = intel_guc_allocate_vma(guc,
333                                      PAGE_ALIGN(sizeof(struct guc_stage_desc) *
334                                      GUC_MAX_STAGE_DESCRIPTORS));
335         if (IS_ERR(vma))
336                 return PTR_ERR(vma);
337
338         vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
339         if (IS_ERR(vaddr)) {
340                 i915_vma_unpin_and_release(&vma, 0);
341                 return PTR_ERR(vaddr);
342         }
343
344         guc->stage_desc_pool = vma;
345         guc->stage_desc_pool_vaddr = vaddr;
346         ida_init(&guc->stage_ids);
347
348         return 0;
349 }
350
351 static void guc_stage_desc_pool_destroy(struct intel_guc *guc)
352 {
353         ida_destroy(&guc->stage_ids);
354         i915_vma_unpin_and_release(&guc->stage_desc_pool, I915_VMA_RELEASE_MAP);
355 }
356
357 /*
358  * Initialise/clear the stage descriptor shared with the GuC firmware.
359  *
360  * This descriptor tells the GuC where (in GGTT space) to find the important
361  * data structures relating to this client (doorbell, process descriptor,
362  * write queue, etc).
363  */
364 static void guc_stage_desc_init(struct intel_guc_client *client)
365 {
366         struct intel_guc *guc = client->guc;
367         struct i915_gem_context *ctx = client->owner;
368         struct i915_gem_engines_iter it;
369         struct guc_stage_desc *desc;
370         struct intel_context *ce;
371         u32 gfx_addr;
372
373         desc = __get_stage_desc(client);
374         memset(desc, 0, sizeof(*desc));
375
376         desc->attribute = GUC_STAGE_DESC_ATTR_ACTIVE |
377                           GUC_STAGE_DESC_ATTR_KERNEL;
378         if (is_high_priority(client))
379                 desc->attribute |= GUC_STAGE_DESC_ATTR_PREEMPT;
380         desc->stage_id = client->stage_id;
381         desc->priority = client->priority;
382         desc->db_id = client->doorbell_id;
383
384         for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
385                 struct guc_execlist_context *lrc;
386
387                 if (!(ce->engine->mask & client->engines))
388                         continue;
389
390                 /* TODO: We have a design issue to be solved here. Only when we
391                  * receive the first batch, we know which engine is used by the
392                  * user. But here GuC expects the lrc and ring to be pinned. It
393                  * is not an issue for default context, which is the only one
394                  * for now who owns a GuC client. But for future owner of GuC
395                  * client, need to make sure lrc is pinned prior to enter here.
396                  */
397                 if (!ce->state)
398                         break;  /* XXX: continue? */
399
400                 /*
401                  * XXX: When this is a GUC_STAGE_DESC_ATTR_KERNEL client (proxy
402                  * submission or, in other words, not using a direct submission
403                  * model) the KMD's LRCA is not used for any work submission.
404                  * Instead, the GuC uses the LRCA of the user mode context (see
405                  * guc_add_request below).
406                  */
407                 lrc = &desc->lrc[ce->engine->guc_id];
408                 lrc->context_desc = lower_32_bits(ce->lrc_desc);
409
410                 /* The state page is after PPHWSP */
411                 lrc->ring_lrca = intel_guc_ggtt_offset(guc, ce->state) +
412                                  LRC_STATE_PN * PAGE_SIZE;
413
414                 /* XXX: In direct submission, the GuC wants the HW context id
415                  * here. In proxy submission, it wants the stage id
416                  */
417                 lrc->context_id = (client->stage_id << GUC_ELC_CTXID_OFFSET) |
418                                 (ce->engine->guc_id << GUC_ELC_ENGINE_OFFSET);
419
420                 lrc->ring_begin = intel_guc_ggtt_offset(guc, ce->ring->vma);
421                 lrc->ring_end = lrc->ring_begin + ce->ring->size - 1;
422                 lrc->ring_next_free_location = lrc->ring_begin;
423                 lrc->ring_current_tail_pointer_value = 0;
424
425                 desc->engines_used |= BIT(ce->engine->guc_id);
426         }
427         i915_gem_context_unlock_engines(ctx);
428
429         DRM_DEBUG_DRIVER("Host engines 0x%x => GuC engines used 0x%x\n",
430                          client->engines, desc->engines_used);
431         WARN_ON(desc->engines_used == 0);
432
433         /*
434          * The doorbell, process descriptor, and workqueue are all parts
435          * of the client object, which the GuC will reference via the GGTT
436          */
437         gfx_addr = intel_guc_ggtt_offset(guc, client->vma);
438         desc->db_trigger_phy = sg_dma_address(client->vma->pages->sgl) +
439                                 client->doorbell_offset;
440         desc->db_trigger_cpu = ptr_to_u64(__get_doorbell(client));
441         desc->db_trigger_uk = gfx_addr + client->doorbell_offset;
442         desc->process_desc = gfx_addr + client->proc_desc_offset;
443         desc->wq_addr = gfx_addr + GUC_DB_SIZE;
444         desc->wq_size = GUC_WQ_SIZE;
445
446         desc->desc_private = ptr_to_u64(client);
447 }
448
449 static void guc_stage_desc_fini(struct intel_guc_client *client)
450 {
451         struct guc_stage_desc *desc;
452
453         desc = __get_stage_desc(client);
454         memset(desc, 0, sizeof(*desc));
455 }
456
457 /* Construct a Work Item and append it to the GuC's Work Queue */
458 static void guc_wq_item_append(struct intel_guc_client *client,
459                                u32 target_engine, u32 context_desc,
460                                u32 ring_tail, u32 fence_id)
461 {
462         /* wqi_len is in DWords, and does not include the one-word header */
463         const size_t wqi_size = sizeof(struct guc_wq_item);
464         const u32 wqi_len = wqi_size / sizeof(u32) - 1;
465         struct guc_process_desc *desc = __get_process_desc(client);
466         struct guc_wq_item *wqi;
467         u32 wq_off;
468
469         lockdep_assert_held(&client->wq_lock);
470
471         /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
472          * should not have the case where structure wqi is across page, neither
473          * wrapped to the beginning. This simplifies the implementation below.
474          *
475          * XXX: if not the case, we need save data to a temp wqi and copy it to
476          * workqueue buffer dw by dw.
477          */
478         BUILD_BUG_ON(wqi_size != 16);
479
480         /* We expect the WQ to be active if we're appending items to it */
481         GEM_BUG_ON(desc->wq_status != WQ_STATUS_ACTIVE);
482
483         /* Free space is guaranteed. */
484         wq_off = READ_ONCE(desc->tail);
485         GEM_BUG_ON(CIRC_SPACE(wq_off, READ_ONCE(desc->head),
486                               GUC_WQ_SIZE) < wqi_size);
487         GEM_BUG_ON(wq_off & (wqi_size - 1));
488
489         /* WQ starts from the page after doorbell / process_desc */
490         wqi = client->vaddr + wq_off + GUC_DB_SIZE;
491
492         if (I915_SELFTEST_ONLY(client->use_nop_wqi)) {
493                 wqi->header = WQ_TYPE_NOOP | (wqi_len << WQ_LEN_SHIFT);
494         } else {
495                 /* Now fill in the 4-word work queue item */
496                 wqi->header = WQ_TYPE_INORDER |
497                               (wqi_len << WQ_LEN_SHIFT) |
498                               (target_engine << WQ_TARGET_SHIFT) |
499                               WQ_NO_WCFLUSH_WAIT;
500                 wqi->context_desc = context_desc;
501                 wqi->submit_element_info = ring_tail << WQ_RING_TAIL_SHIFT;
502                 GEM_BUG_ON(ring_tail > WQ_RING_TAIL_MAX);
503                 wqi->fence_id = fence_id;
504         }
505
506         /* Make the update visible to GuC */
507         WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1));
508 }
509
510 static void guc_ring_doorbell(struct intel_guc_client *client)
511 {
512         struct guc_doorbell_info *db;
513         u32 cookie;
514
515         lockdep_assert_held(&client->wq_lock);
516
517         /* pointer of current doorbell cacheline */
518         db = __get_doorbell(client);
519
520         /*
521          * We're not expecting the doorbell cookie to change behind our back,
522          * we also need to treat 0 as a reserved value.
523          */
524         cookie = READ_ONCE(db->cookie);
525         WARN_ON_ONCE(xchg(&db->cookie, cookie + 1 ?: cookie + 2) != cookie);
526
527         /* XXX: doorbell was lost and need to acquire it again */
528         GEM_BUG_ON(db->db_status != GUC_DOORBELL_ENABLED);
529 }
530
531 static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
532 {
533         struct intel_guc_client *client = guc->execbuf_client;
534         struct intel_engine_cs *engine = rq->engine;
535         u32 ctx_desc = lower_32_bits(rq->hw_context->lrc_desc);
536         u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
537
538         spin_lock(&client->wq_lock);
539
540         guc_wq_item_append(client, engine->guc_id, ctx_desc,
541                            ring_tail, rq->fence.seqno);
542         guc_ring_doorbell(client);
543
544         client->submissions[engine->id] += 1;
545
546         spin_unlock(&client->wq_lock);
547 }
548
549 /*
550  * When we're doing submissions using regular execlists backend, writing to
551  * ELSP from CPU side is enough to make sure that writes to ringbuffer pages
552  * pinned in mappable aperture portion of GGTT are visible to command streamer.
553  * Writes done by GuC on our behalf are not guaranteeing such ordering,
554  * therefore, to ensure the flush, we're issuing a POSTING READ.
555  */
556 static void flush_ggtt_writes(struct i915_vma *vma)
557 {
558         struct drm_i915_private *dev_priv = vma->vm->i915;
559
560         if (i915_vma_is_map_and_fenceable(vma))
561                 POSTING_READ_FW(GUC_STATUS);
562 }
563
564 static void inject_preempt_context(struct work_struct *work)
565 {
566         struct guc_preempt_work *preempt_work =
567                 container_of(work, typeof(*preempt_work), work);
568         struct intel_engine_cs *engine = preempt_work->engine;
569         struct intel_guc *guc = container_of(preempt_work, typeof(*guc),
570                                              preempt_work[engine->id]);
571         struct intel_guc_client *client = guc->preempt_client;
572         struct guc_stage_desc *stage_desc = __get_stage_desc(client);
573         struct intel_context *ce = engine->preempt_context;
574         u32 data[7];
575
576         if (!ce->ring->emit) { /* recreate upon load/resume */
577                 u32 addr = intel_hws_preempt_done_address(engine);
578                 u32 *cs;
579
580                 cs = ce->ring->vaddr;
581                 if (engine->class == RENDER_CLASS) {
582                         cs = gen8_emit_ggtt_write_rcs(cs,
583                                                       GUC_PREEMPT_FINISHED,
584                                                       addr,
585                                                       PIPE_CONTROL_CS_STALL);
586                 } else {
587                         cs = gen8_emit_ggtt_write(cs,
588                                                   GUC_PREEMPT_FINISHED,
589                                                   addr,
590                                                   0);
591                         *cs++ = MI_NOOP;
592                         *cs++ = MI_NOOP;
593                 }
594                 *cs++ = MI_USER_INTERRUPT;
595                 *cs++ = MI_NOOP;
596
597                 ce->ring->emit = GUC_PREEMPT_BREADCRUMB_BYTES;
598                 GEM_BUG_ON((void *)cs - ce->ring->vaddr != ce->ring->emit);
599
600                 flush_ggtt_writes(ce->ring->vma);
601         }
602
603         spin_lock_irq(&client->wq_lock);
604         guc_wq_item_append(client, engine->guc_id, lower_32_bits(ce->lrc_desc),
605                            GUC_PREEMPT_BREADCRUMB_BYTES / sizeof(u64), 0);
606         spin_unlock_irq(&client->wq_lock);
607
608         /*
609          * If GuC firmware performs an engine reset while that engine had
610          * a preemption pending, it will set the terminated attribute bit
611          * on our preemption stage descriptor. GuC firmware retains all
612          * pending work items for a high-priority GuC client, unlike the
613          * normal-priority GuC client where work items are dropped. It
614          * wants to make sure the preempt-to-idle work doesn't run when
615          * scheduling resumes, and uses this bit to inform its scheduler
616          * and presumably us as well. Our job is to clear it for the next
617          * preemption after reset, otherwise that and future preemptions
618          * will never complete. We'll just clear it every time.
619          */
620         stage_desc->attribute &= ~GUC_STAGE_DESC_ATTR_TERMINATED;
621
622         data[0] = INTEL_GUC_ACTION_REQUEST_PREEMPTION;
623         data[1] = client->stage_id;
624         data[2] = INTEL_GUC_PREEMPT_OPTION_DROP_WORK_Q |
625                   INTEL_GUC_PREEMPT_OPTION_DROP_SUBMIT_Q;
626         data[3] = engine->guc_id;
627         data[4] = guc->execbuf_client->priority;
628         data[5] = guc->execbuf_client->stage_id;
629         data[6] = intel_guc_ggtt_offset(guc, guc->shared_data);
630
631         if (WARN_ON(intel_guc_send(guc, data, ARRAY_SIZE(data)))) {
632                 execlists_clear_active(&engine->execlists,
633                                        EXECLISTS_ACTIVE_PREEMPT);
634                 tasklet_schedule(&engine->execlists.tasklet);
635         }
636
637         (void)I915_SELFTEST_ONLY(engine->execlists.preempt_hang.count++);
638 }
639
640 /*
641  * We're using user interrupt and HWSP value to mark that preemption has
642  * finished and GPU is idle. Normally, we could unwind and continue similar to
643  * execlists submission path. Unfortunately, with GuC we also need to wait for
644  * it to finish its own postprocessing, before attempting to submit. Otherwise
645  * GuC may silently ignore our submissions, and thus we risk losing request at
646  * best, executing out-of-order and causing kernel panic at worst.
647  */
648 #define GUC_PREEMPT_POSTPROCESS_DELAY_MS 10
649 static void wait_for_guc_preempt_report(struct intel_engine_cs *engine)
650 {
651         struct intel_guc *guc = &engine->i915->guc;
652         struct guc_shared_ctx_data *data = guc->shared_data_vaddr;
653         struct guc_ctx_report *report =
654                 &data->preempt_ctx_report[engine->guc_id];
655
656         if (wait_for_atomic(report->report_return_status ==
657                             INTEL_GUC_REPORT_STATUS_COMPLETE,
658                             GUC_PREEMPT_POSTPROCESS_DELAY_MS))
659                 DRM_ERROR("Timed out waiting for GuC preemption report\n");
660         /*
661          * GuC is expecting that we're also going to clear the affected context
662          * counter, let's also reset the return status to not depend on GuC
663          * resetting it after recieving another preempt action
664          */
665         report->affected_count = 0;
666         report->report_return_status = INTEL_GUC_REPORT_STATUS_UNKNOWN;
667 }
668
669 static void complete_preempt_context(struct intel_engine_cs *engine)
670 {
671         struct intel_engine_execlists *execlists = &engine->execlists;
672
673         GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT));
674
675         if (inject_preempt_hang(execlists))
676                 return;
677
678         execlists_cancel_port_requests(execlists);
679         execlists_unwind_incomplete_requests(execlists);
680
681         wait_for_guc_preempt_report(engine);
682         intel_write_status_page(engine, I915_GEM_HWS_PREEMPT, 0);
683 }
684
685 /**
686  * guc_submit() - Submit commands through GuC
687  * @engine: engine associated with the commands
688  *
689  * The only error here arises if the doorbell hardware isn't functioning
690  * as expected, which really shouln't happen.
691  */
692 static void guc_submit(struct intel_engine_cs *engine)
693 {
694         struct intel_guc *guc = &engine->i915->guc;
695         struct intel_engine_execlists * const execlists = &engine->execlists;
696         struct execlist_port *port = execlists->port;
697         unsigned int n;
698
699         for (n = 0; n < execlists_num_ports(execlists); n++) {
700                 struct i915_request *rq;
701                 unsigned int count;
702
703                 rq = port_unpack(&port[n], &count);
704                 if (rq && count == 0) {
705                         port_set(&port[n], port_pack(rq, ++count));
706
707                         flush_ggtt_writes(rq->ring->vma);
708
709                         guc_add_request(guc, rq);
710                 }
711         }
712 }
713
714 static void port_assign(struct execlist_port *port, struct i915_request *rq)
715 {
716         GEM_BUG_ON(port_isset(port));
717
718         port_set(port, i915_request_get(rq));
719 }
720
721 static inline int rq_prio(const struct i915_request *rq)
722 {
723         return rq->sched.attr.priority;
724 }
725
726 static inline int port_prio(const struct execlist_port *port)
727 {
728         return rq_prio(port_request(port)) | __NO_PREEMPTION;
729 }
730
731 static bool __guc_dequeue(struct intel_engine_cs *engine)
732 {
733         struct intel_engine_execlists * const execlists = &engine->execlists;
734         struct execlist_port *port = execlists->port;
735         struct i915_request *last = NULL;
736         const struct execlist_port * const last_port =
737                 &execlists->port[execlists->port_mask];
738         bool submit = false;
739         struct rb_node *rb;
740
741         lockdep_assert_held(&engine->timeline.lock);
742
743         if (port_isset(port)) {
744                 if (intel_engine_has_preemption(engine)) {
745                         struct guc_preempt_work *preempt_work =
746                                 &engine->i915->guc.preempt_work[engine->id];
747                         int prio = execlists->queue_priority_hint;
748
749                         if (i915_scheduler_need_preempt(prio,
750                                                         port_prio(port))) {
751                                 execlists_set_active(execlists,
752                                                      EXECLISTS_ACTIVE_PREEMPT);
753                                 queue_work(engine->i915->guc.preempt_wq,
754                                            &preempt_work->work);
755                                 return false;
756                         }
757                 }
758
759                 port++;
760                 if (port_isset(port))
761                         return false;
762         }
763         GEM_BUG_ON(port_isset(port));
764
765         while ((rb = rb_first_cached(&execlists->queue))) {
766                 struct i915_priolist *p = to_priolist(rb);
767                 struct i915_request *rq, *rn;
768                 int i;
769
770                 priolist_for_each_request_consume(rq, rn, p, i) {
771                         if (last && rq->hw_context != last->hw_context) {
772                                 if (port == last_port)
773                                         goto done;
774
775                                 if (submit)
776                                         port_assign(port, last);
777                                 port++;
778                         }
779
780                         list_del_init(&rq->sched.link);
781
782                         __i915_request_submit(rq);
783                         trace_i915_request_in(rq, port_index(port, execlists));
784
785                         last = rq;
786                         submit = true;
787                 }
788
789                 rb_erase_cached(&p->node, &execlists->queue);
790                 i915_priolist_free(p);
791         }
792 done:
793         execlists->queue_priority_hint =
794                 rb ? to_priolist(rb)->priority : INT_MIN;
795         if (submit)
796                 port_assign(port, last);
797         if (last)
798                 execlists_user_begin(execlists, execlists->port);
799
800         /* We must always keep the beast fed if we have work piled up */
801         GEM_BUG_ON(port_isset(execlists->port) &&
802                    !execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
803         GEM_BUG_ON(rb_first_cached(&execlists->queue) &&
804                    !port_isset(execlists->port));
805
806         return submit;
807 }
808
809 static void guc_dequeue(struct intel_engine_cs *engine)
810 {
811         if (__guc_dequeue(engine))
812                 guc_submit(engine);
813 }
814
815 static void guc_submission_tasklet(unsigned long data)
816 {
817         struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
818         struct intel_engine_execlists * const execlists = &engine->execlists;
819         struct execlist_port *port = execlists->port;
820         struct i915_request *rq;
821         unsigned long flags;
822
823         spin_lock_irqsave(&engine->timeline.lock, flags);
824
825         rq = port_request(port);
826         while (rq && i915_request_completed(rq)) {
827                 trace_i915_request_out(rq);
828                 i915_request_put(rq);
829
830                 port = execlists_port_complete(execlists, port);
831                 if (port_isset(port)) {
832                         execlists_user_begin(execlists, port);
833                         rq = port_request(port);
834                 } else {
835                         execlists_user_end(execlists);
836                         rq = NULL;
837                 }
838         }
839
840         if (execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT) &&
841             intel_read_status_page(engine, I915_GEM_HWS_PREEMPT) ==
842             GUC_PREEMPT_FINISHED)
843                 complete_preempt_context(engine);
844
845         if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT))
846                 guc_dequeue(engine);
847
848         spin_unlock_irqrestore(&engine->timeline.lock, flags);
849 }
850
851 static void guc_reset_prepare(struct intel_engine_cs *engine)
852 {
853         struct intel_engine_execlists * const execlists = &engine->execlists;
854
855         GEM_TRACE("%s\n", engine->name);
856
857         /*
858          * Prevent request submission to the hardware until we have
859          * completed the reset in i915_gem_reset_finish(). If a request
860          * is completed by one engine, it may then queue a request
861          * to a second via its execlists->tasklet *just* as we are
862          * calling engine->init_hw() and also writing the ELSP.
863          * Turning off the execlists->tasklet until the reset is over
864          * prevents the race.
865          */
866         __tasklet_disable_sync_once(&execlists->tasklet);
867
868         /*
869          * We're using worker to queue preemption requests from the tasklet in
870          * GuC submission mode.
871          * Even though tasklet was disabled, we may still have a worker queued.
872          * Let's make sure that all workers scheduled before disabling the
873          * tasklet are completed before continuing with the reset.
874          */
875         if (engine->i915->guc.preempt_wq)
876                 flush_workqueue(engine->i915->guc.preempt_wq);
877 }
878
879 static void guc_reset(struct intel_engine_cs *engine, bool stalled)
880 {
881         struct intel_engine_execlists * const execlists = &engine->execlists;
882         struct i915_request *rq;
883         unsigned long flags;
884
885         spin_lock_irqsave(&engine->timeline.lock, flags);
886
887         execlists_cancel_port_requests(execlists);
888
889         /* Push back any incomplete requests for replay after the reset. */
890         rq = execlists_unwind_incomplete_requests(execlists);
891         if (!rq)
892                 goto out_unlock;
893
894         if (!i915_request_started(rq))
895                 stalled = false;
896
897         i915_reset_request(rq, stalled);
898         intel_lr_context_reset(engine, rq->hw_context, rq->head, stalled);
899
900 out_unlock:
901         spin_unlock_irqrestore(&engine->timeline.lock, flags);
902 }
903
904 static void guc_cancel_requests(struct intel_engine_cs *engine)
905 {
906         struct intel_engine_execlists * const execlists = &engine->execlists;
907         struct i915_request *rq, *rn;
908         struct rb_node *rb;
909         unsigned long flags;
910
911         GEM_TRACE("%s\n", engine->name);
912
913         /*
914          * Before we call engine->cancel_requests(), we should have exclusive
915          * access to the submission state. This is arranged for us by the
916          * caller disabling the interrupt generation, the tasklet and other
917          * threads that may then access the same state, giving us a free hand
918          * to reset state. However, we still need to let lockdep be aware that
919          * we know this state may be accessed in hardirq context, so we
920          * disable the irq around this manipulation and we want to keep
921          * the spinlock focused on its duties and not accidentally conflate
922          * coverage to the submission's irq state. (Similarly, although we
923          * shouldn't need to disable irq around the manipulation of the
924          * submission's irq state, we also wish to remind ourselves that
925          * it is irq state.)
926          */
927         spin_lock_irqsave(&engine->timeline.lock, flags);
928
929         /* Cancel the requests on the HW and clear the ELSP tracker. */
930         execlists_cancel_port_requests(execlists);
931
932         /* Mark all executing requests as skipped. */
933         list_for_each_entry(rq, &engine->timeline.requests, link) {
934                 if (!i915_request_signaled(rq))
935                         dma_fence_set_error(&rq->fence, -EIO);
936
937                 i915_request_mark_complete(rq);
938         }
939
940         /* Flush the queued requests to the timeline list (for retiring). */
941         while ((rb = rb_first_cached(&execlists->queue))) {
942                 struct i915_priolist *p = to_priolist(rb);
943                 int i;
944
945                 priolist_for_each_request_consume(rq, rn, p, i) {
946                         list_del_init(&rq->sched.link);
947                         __i915_request_submit(rq);
948                         dma_fence_set_error(&rq->fence, -EIO);
949                         i915_request_mark_complete(rq);
950                 }
951
952                 rb_erase_cached(&p->node, &execlists->queue);
953                 i915_priolist_free(p);
954         }
955
956         /* Remaining _unready_ requests will be nop'ed when submitted */
957
958         execlists->queue_priority_hint = INT_MIN;
959         execlists->queue = RB_ROOT_CACHED;
960         GEM_BUG_ON(port_isset(execlists->port));
961
962         spin_unlock_irqrestore(&engine->timeline.lock, flags);
963 }
964
965 static void guc_reset_finish(struct intel_engine_cs *engine)
966 {
967         struct intel_engine_execlists * const execlists = &engine->execlists;
968
969         if (__tasklet_enable(&execlists->tasklet))
970                 /* And kick in case we missed a new request submission. */
971                 tasklet_hi_schedule(&execlists->tasklet);
972
973         GEM_TRACE("%s: depth->%d\n", engine->name,
974                   atomic_read(&execlists->tasklet.count));
975 }
976
977 /*
978  * Everything below here is concerned with setup & teardown, and is
979  * therefore not part of the somewhat time-critical batch-submission
980  * path of guc_submit() above.
981  */
982
983 /* Check that a doorbell register is in the expected state */
984 static bool doorbell_ok(struct intel_guc *guc, u16 db_id)
985 {
986         bool valid;
987
988         GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS);
989
990         valid = __doorbell_valid(guc, db_id);
991
992         if (test_bit(db_id, guc->doorbell_bitmap) == valid)
993                 return true;
994
995         DRM_DEBUG_DRIVER("Doorbell %u has unexpected state: valid=%s\n",
996                          db_id, yesno(valid));
997
998         return false;
999 }
1000
1001 static bool guc_verify_doorbells(struct intel_guc *guc)
1002 {
1003         bool doorbells_ok = true;
1004         u16 db_id;
1005
1006         for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id)
1007                 if (!doorbell_ok(guc, db_id))
1008                         doorbells_ok = false;
1009
1010         return doorbells_ok;
1011 }
1012
1013 /**
1014  * guc_client_alloc() - Allocate an intel_guc_client
1015  * @dev_priv:   driver private data structure
1016  * @engines:    The set of engines to enable for this client
1017  * @priority:   four levels priority _CRITICAL, _HIGH, _NORMAL and _LOW
1018  *              The kernel client to replace ExecList submission is created with
1019  *              NORMAL priority. Priority of a client for scheduler can be HIGH,
1020  *              while a preemption context can use CRITICAL.
1021  * @ctx:        the context that owns the client (we use the default render
1022  *              context)
1023  *
1024  * Return:      An intel_guc_client object if success, else NULL.
1025  */
1026 static struct intel_guc_client *
1027 guc_client_alloc(struct drm_i915_private *dev_priv,
1028                  u32 engines,
1029                  u32 priority,
1030                  struct i915_gem_context *ctx)
1031 {
1032         struct intel_guc_client *client;
1033         struct intel_guc *guc = &dev_priv->guc;
1034         struct i915_vma *vma;
1035         void *vaddr;
1036         int ret;
1037
1038         client = kzalloc(sizeof(*client), GFP_KERNEL);
1039         if (!client)
1040                 return ERR_PTR(-ENOMEM);
1041
1042         client->guc = guc;
1043         client->owner = ctx;
1044         client->engines = engines;
1045         client->priority = priority;
1046         client->doorbell_id = GUC_DOORBELL_INVALID;
1047         spin_lock_init(&client->wq_lock);
1048
1049         ret = ida_simple_get(&guc->stage_ids, 0, GUC_MAX_STAGE_DESCRIPTORS,
1050                              GFP_KERNEL);
1051         if (ret < 0)
1052                 goto err_client;
1053
1054         client->stage_id = ret;
1055
1056         /* The first page is doorbell/proc_desc. Two followed pages are wq. */
1057         vma = intel_guc_allocate_vma(guc, GUC_DB_SIZE + GUC_WQ_SIZE);
1058         if (IS_ERR(vma)) {
1059                 ret = PTR_ERR(vma);
1060                 goto err_id;
1061         }
1062
1063         /* We'll keep just the first (doorbell/proc) page permanently kmap'd. */
1064         client->vma = vma;
1065
1066         vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
1067         if (IS_ERR(vaddr)) {
1068                 ret = PTR_ERR(vaddr);
1069                 goto err_vma;
1070         }
1071         client->vaddr = vaddr;
1072
1073         ret = reserve_doorbell(client);
1074         if (ret)
1075                 goto err_vaddr;
1076
1077         client->doorbell_offset = __select_cacheline(guc);
1078
1079         /*
1080          * Since the doorbell only requires a single cacheline, we can save
1081          * space by putting the application process descriptor in the same
1082          * page. Use the half of the page that doesn't include the doorbell.
1083          */
1084         if (client->doorbell_offset >= (GUC_DB_SIZE / 2))
1085                 client->proc_desc_offset = 0;
1086         else
1087                 client->proc_desc_offset = (GUC_DB_SIZE / 2);
1088
1089         DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: stage_id %u\n",
1090                          priority, client, client->engines, client->stage_id);
1091         DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%lx\n",
1092                          client->doorbell_id, client->doorbell_offset);
1093
1094         return client;
1095
1096 err_vaddr:
1097         i915_gem_object_unpin_map(client->vma->obj);
1098 err_vma:
1099         i915_vma_unpin_and_release(&client->vma, 0);
1100 err_id:
1101         ida_simple_remove(&guc->stage_ids, client->stage_id);
1102 err_client:
1103         kfree(client);
1104         return ERR_PTR(ret);
1105 }
1106
1107 static void guc_client_free(struct intel_guc_client *client)
1108 {
1109         unreserve_doorbell(client);
1110         i915_vma_unpin_and_release(&client->vma, I915_VMA_RELEASE_MAP);
1111         ida_simple_remove(&client->guc->stage_ids, client->stage_id);
1112         kfree(client);
1113 }
1114
1115 static inline bool ctx_save_restore_disabled(struct intel_context *ce)
1116 {
1117         u32 sr = ce->lrc_reg_state[CTX_CONTEXT_CONTROL + 1];
1118
1119 #define SR_DISABLED \
1120         _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | \
1121                            CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT)
1122
1123         return (sr & SR_DISABLED) == SR_DISABLED;
1124
1125 #undef SR_DISABLED
1126 }
1127
1128 static int guc_clients_create(struct intel_guc *guc)
1129 {
1130         struct drm_i915_private *dev_priv = guc_to_i915(guc);
1131         struct intel_guc_client *client;
1132
1133         GEM_BUG_ON(guc->execbuf_client);
1134         GEM_BUG_ON(guc->preempt_client);
1135
1136         client = guc_client_alloc(dev_priv,
1137                                   INTEL_INFO(dev_priv)->engine_mask,
1138                                   GUC_CLIENT_PRIORITY_KMD_NORMAL,
1139                                   dev_priv->kernel_context);
1140         if (IS_ERR(client)) {
1141                 DRM_ERROR("Failed to create GuC client for submission!\n");
1142                 return PTR_ERR(client);
1143         }
1144         guc->execbuf_client = client;
1145
1146         if (dev_priv->preempt_context) {
1147                 client = guc_client_alloc(dev_priv,
1148                                           INTEL_INFO(dev_priv)->engine_mask,
1149                                           GUC_CLIENT_PRIORITY_KMD_HIGH,
1150                                           dev_priv->preempt_context);
1151                 if (IS_ERR(client)) {
1152                         DRM_ERROR("Failed to create GuC client for preemption!\n");
1153                         guc_client_free(guc->execbuf_client);
1154                         guc->execbuf_client = NULL;
1155                         return PTR_ERR(client);
1156                 }
1157                 guc->preempt_client = client;
1158         }
1159
1160         return 0;
1161 }
1162
1163 static void guc_clients_destroy(struct intel_guc *guc)
1164 {
1165         struct intel_guc_client *client;
1166
1167         client = fetch_and_zero(&guc->preempt_client);
1168         if (client)
1169                 guc_client_free(client);
1170
1171         client = fetch_and_zero(&guc->execbuf_client);
1172         if (client)
1173                 guc_client_free(client);
1174 }
1175
1176 static int __guc_client_enable(struct intel_guc_client *client)
1177 {
1178         int ret;
1179
1180         guc_proc_desc_init(client);
1181         guc_stage_desc_init(client);
1182
1183         ret = create_doorbell(client);
1184         if (ret)
1185                 goto fail;
1186
1187         return 0;
1188
1189 fail:
1190         guc_stage_desc_fini(client);
1191         guc_proc_desc_fini(client);
1192         return ret;
1193 }
1194
1195 static void __guc_client_disable(struct intel_guc_client *client)
1196 {
1197         /*
1198          * By the time we're here, GuC may have already been reset. if that is
1199          * the case, instead of trying (in vain) to communicate with it, let's
1200          * just cleanup the doorbell HW and our internal state.
1201          */
1202         if (intel_guc_is_loaded(client->guc))
1203                 destroy_doorbell(client);
1204         else
1205                 __fini_doorbell(client);
1206
1207         guc_stage_desc_fini(client);
1208         guc_proc_desc_fini(client);
1209 }
1210
1211 static int guc_clients_enable(struct intel_guc *guc)
1212 {
1213         int ret;
1214
1215         ret = __guc_client_enable(guc->execbuf_client);
1216         if (ret)
1217                 return ret;
1218
1219         if (guc->preempt_client) {
1220                 ret = __guc_client_enable(guc->preempt_client);
1221                 if (ret) {
1222                         __guc_client_disable(guc->execbuf_client);
1223                         return ret;
1224                 }
1225         }
1226
1227         return 0;
1228 }
1229
1230 static void guc_clients_disable(struct intel_guc *guc)
1231 {
1232         if (guc->preempt_client)
1233                 __guc_client_disable(guc->preempt_client);
1234
1235         if (guc->execbuf_client)
1236                 __guc_client_disable(guc->execbuf_client);
1237 }
1238
1239 /*
1240  * Set up the memory resources to be shared with the GuC (via the GGTT)
1241  * at firmware loading time.
1242  */
1243 int intel_guc_submission_init(struct intel_guc *guc)
1244 {
1245         struct drm_i915_private *dev_priv = guc_to_i915(guc);
1246         struct intel_engine_cs *engine;
1247         enum intel_engine_id id;
1248         int ret;
1249
1250         if (guc->stage_desc_pool)
1251                 return 0;
1252
1253         ret = guc_stage_desc_pool_create(guc);
1254         if (ret)
1255                 return ret;
1256         /*
1257          * Keep static analysers happy, let them know that we allocated the
1258          * vma after testing that it didn't exist earlier.
1259          */
1260         GEM_BUG_ON(!guc->stage_desc_pool);
1261
1262         WARN_ON(!guc_verify_doorbells(guc));
1263         ret = guc_clients_create(guc);
1264         if (ret)
1265                 goto err_pool;
1266
1267         for_each_engine(engine, dev_priv, id) {
1268                 guc->preempt_work[id].engine = engine;
1269                 INIT_WORK(&guc->preempt_work[id].work, inject_preempt_context);
1270         }
1271
1272         return 0;
1273
1274 err_pool:
1275         guc_stage_desc_pool_destroy(guc);
1276         return ret;
1277 }
1278
1279 void intel_guc_submission_fini(struct intel_guc *guc)
1280 {
1281         struct drm_i915_private *dev_priv = guc_to_i915(guc);
1282         struct intel_engine_cs *engine;
1283         enum intel_engine_id id;
1284
1285         for_each_engine(engine, dev_priv, id)
1286                 cancel_work_sync(&guc->preempt_work[id].work);
1287
1288         guc_clients_destroy(guc);
1289         WARN_ON(!guc_verify_doorbells(guc));
1290
1291         if (guc->stage_desc_pool)
1292                 guc_stage_desc_pool_destroy(guc);
1293 }
1294
1295 static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
1296 {
1297         struct intel_rps *rps = &dev_priv->gt_pm.rps;
1298         struct intel_engine_cs *engine;
1299         enum intel_engine_id id;
1300         int irqs;
1301
1302         /* tell all command streamers to forward interrupts (but not vblank)
1303          * to GuC
1304          */
1305         irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
1306         for_each_engine(engine, dev_priv, id)
1307                 I915_WRITE(RING_MODE_GEN7(engine), irqs);
1308
1309         /* route USER_INTERRUPT to Host, all others are sent to GuC. */
1310         irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
1311                GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
1312         /* These three registers have the same bit definitions */
1313         I915_WRITE(GUC_BCS_RCS_IER, ~irqs);
1314         I915_WRITE(GUC_VCS2_VCS1_IER, ~irqs);
1315         I915_WRITE(GUC_WD_VECS_IER, ~irqs);
1316
1317         /*
1318          * The REDIRECT_TO_GUC bit of the PMINTRMSK register directs all
1319          * (unmasked) PM interrupts to the GuC. All other bits of this
1320          * register *disable* generation of a specific interrupt.
1321          *
1322          * 'pm_intrmsk_mbz' indicates bits that are NOT to be set when
1323          * writing to the PM interrupt mask register, i.e. interrupts
1324          * that must not be disabled.
1325          *
1326          * If the GuC is handling these interrupts, then we must not let
1327          * the PM code disable ANY interrupt that the GuC is expecting.
1328          * So for each ENABLED (0) bit in this register, we must SET the
1329          * bit in pm_intrmsk_mbz so that it's left enabled for the GuC.
1330          * GuC needs ARAT expired interrupt unmasked hence it is set in
1331          * pm_intrmsk_mbz.
1332          *
1333          * Here we CLEAR REDIRECT_TO_GUC bit in pm_intrmsk_mbz, which will
1334          * result in the register bit being left SET!
1335          */
1336         rps->pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK;
1337         rps->pm_intrmsk_mbz &= ~GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
1338 }
1339
1340 static void guc_interrupts_release(struct drm_i915_private *dev_priv)
1341 {
1342         struct intel_rps *rps = &dev_priv->gt_pm.rps;
1343         struct intel_engine_cs *engine;
1344         enum intel_engine_id id;
1345         int irqs;
1346
1347         /*
1348          * tell all command streamers NOT to forward interrupts or vblank
1349          * to GuC.
1350          */
1351         irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
1352         irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
1353         for_each_engine(engine, dev_priv, id)
1354                 I915_WRITE(RING_MODE_GEN7(engine), irqs);
1355
1356         /* route all GT interrupts to the host */
1357         I915_WRITE(GUC_BCS_RCS_IER, 0);
1358         I915_WRITE(GUC_VCS2_VCS1_IER, 0);
1359         I915_WRITE(GUC_WD_VECS_IER, 0);
1360
1361         rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
1362         rps->pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK;
1363 }
1364
1365 static void guc_submission_park(struct intel_engine_cs *engine)
1366 {
1367         intel_engine_park(engine);
1368         intel_engine_unpin_breadcrumbs_irq(engine);
1369         engine->flags &= ~I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
1370 }
1371
1372 static void guc_submission_unpark(struct intel_engine_cs *engine)
1373 {
1374         engine->flags |= I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
1375         intel_engine_pin_breadcrumbs_irq(engine);
1376 }
1377
1378 static void guc_set_default_submission(struct intel_engine_cs *engine)
1379 {
1380         /*
1381          * We inherit a bunch of functions from execlists that we'd like
1382          * to keep using:
1383          *
1384          *    engine->submit_request = execlists_submit_request;
1385          *    engine->cancel_requests = execlists_cancel_requests;
1386          *    engine->schedule = execlists_schedule;
1387          *
1388          * But we need to override the actual submission backend in order
1389          * to talk to the GuC.
1390          */
1391         intel_execlists_set_default_submission(engine);
1392
1393         engine->execlists.tasklet.func = guc_submission_tasklet;
1394
1395         engine->park = guc_submission_park;
1396         engine->unpark = guc_submission_unpark;
1397
1398         engine->reset.prepare = guc_reset_prepare;
1399         engine->reset.reset = guc_reset;
1400         engine->reset.finish = guc_reset_finish;
1401
1402         engine->cancel_requests = guc_cancel_requests;
1403
1404         engine->flags &= ~I915_ENGINE_SUPPORTS_STATS;
1405 }
1406
1407 int intel_guc_submission_enable(struct intel_guc *guc)
1408 {
1409         struct drm_i915_private *dev_priv = guc_to_i915(guc);
1410         struct intel_engine_cs *engine;
1411         enum intel_engine_id id;
1412         int err;
1413
1414         /*
1415          * We're using GuC work items for submitting work through GuC. Since
1416          * we're coalescing multiple requests from a single context into a
1417          * single work item prior to assigning it to execlist_port, we can
1418          * never have more work items than the total number of ports (for all
1419          * engines). The GuC firmware is controlling the HEAD of work queue,
1420          * and it is guaranteed that it will remove the work item from the
1421          * queue before our request is completed.
1422          */
1423         BUILD_BUG_ON(ARRAY_SIZE(engine->execlists.port) *
1424                      sizeof(struct guc_wq_item) *
1425                      I915_NUM_ENGINES > GUC_WQ_SIZE);
1426
1427         GEM_BUG_ON(!guc->execbuf_client);
1428
1429         err = intel_guc_sample_forcewake(guc);
1430         if (err)
1431                 return err;
1432
1433         err = guc_clients_enable(guc);
1434         if (err)
1435                 return err;
1436
1437         /* Take over from manual control of ELSP (execlists) */
1438         guc_interrupts_capture(dev_priv);
1439
1440         for_each_engine(engine, dev_priv, id) {
1441                 engine->set_default_submission = guc_set_default_submission;
1442                 engine->set_default_submission(engine);
1443         }
1444
1445         return 0;
1446 }
1447
1448 void intel_guc_submission_disable(struct intel_guc *guc)
1449 {
1450         struct drm_i915_private *dev_priv = guc_to_i915(guc);
1451
1452         GEM_BUG_ON(dev_priv->gt.awake); /* GT should be parked first */
1453
1454         guc_interrupts_release(dev_priv);
1455         guc_clients_disable(guc);
1456 }
1457
1458 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1459 #include "selftests/intel_guc.c"
1460 #endif
This page took 0.123326 seconds and 4 git commands to generate.