2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/circ_buf.h>
26 #include <trace/events/dma_fence.h>
28 #include "intel_guc_submission.h"
29 #include "intel_lrc_reg.h"
32 #define GUC_PREEMPT_FINISHED 0x1
33 #define GUC_PREEMPT_BREADCRUMB_DWORDS 0x8
34 #define GUC_PREEMPT_BREADCRUMB_BYTES \
35 (sizeof(u32) * GUC_PREEMPT_BREADCRUMB_DWORDS)
38 * DOC: GuC-based command submission
41 * A intel_guc_client refers to a submission path through GuC. Currently, there
42 * are two clients. One of them (the execbuf_client) is charged with all
43 * submissions to the GuC, the other one (preempt_client) is responsible for
44 * preempting the execbuf_client. This struct is the owner of a doorbell, a
45 * process descriptor and a workqueue (all of them inside a single gem object
46 * that contains all required pages for these elements).
48 * GuC stage descriptor:
49 * During initialization, the driver allocates a static pool of 1024 such
50 * descriptors, and shares them with the GuC.
51 * Currently, there exists a 1:1 mapping between a intel_guc_client and a
52 * guc_stage_desc (via the client's stage_id), so effectively only one
53 * gets used. This stage descriptor lets the GuC know about the doorbell,
54 * workqueue and process descriptor. Theoretically, it also lets the GuC
55 * know about our HW contexts (context ID, etc...), but we actually
56 * employ a kind of submission where the GuC uses the LRCA sent via the work
57 * item instead (the single guc_stage_desc associated to execbuf client
58 * contains information about the default kernel context only, but this is
59 * essentially unused). This is called a "proxy" submission.
61 * The Scratch registers:
62 * There are 16 MMIO-based registers start from 0xC180. The kernel driver writes
63 * a value to the action register (SOFT_SCRATCH_0) along with any data. It then
64 * triggers an interrupt on the GuC via another register write (0xC4C8).
65 * Firmware writes a success/fail code back to the action register after
66 * processes the request. The kernel driver polls waiting for this update and
68 * See intel_guc_send()
71 * Doorbells are interrupts to uKernel. A doorbell is a single cache line (QW)
72 * mapped into process space.
75 * There are several types of work items that the host may place into a
76 * workqueue, each with its own requirements and limitations. Currently only
77 * WQ_TYPE_INORDER is needed to support legacy submission via GuC, which
78 * represents in-order queue. The kernel driver packs ring tail pointer and an
79 * ELSP context descriptor dword into Work Item.
80 * See guc_add_request()
84 static inline struct i915_priolist *to_priolist(struct rb_node *rb)
86 return rb_entry(rb, struct i915_priolist, node);
89 static inline bool is_high_priority(struct intel_guc_client *client)
91 return (client->priority == GUC_CLIENT_PRIORITY_KMD_HIGH ||
92 client->priority == GUC_CLIENT_PRIORITY_HIGH);
95 static int reserve_doorbell(struct intel_guc_client *client)
101 GEM_BUG_ON(client->doorbell_id != GUC_DOORBELL_INVALID);
104 * The bitmap tracks which doorbell registers are currently in use.
105 * It is split into two halves; the first half is used for normal
106 * priority contexts, the second half for high-priority ones.
109 end = GUC_NUM_DOORBELLS / 2;
110 if (is_high_priority(client)) {
115 id = find_next_zero_bit(client->guc->doorbell_bitmap, end, offset);
119 __set_bit(id, client->guc->doorbell_bitmap);
120 client->doorbell_id = id;
121 DRM_DEBUG_DRIVER("client %u (high prio=%s) reserved doorbell: %d\n",
122 client->stage_id, yesno(is_high_priority(client)),
127 static void unreserve_doorbell(struct intel_guc_client *client)
129 GEM_BUG_ON(client->doorbell_id == GUC_DOORBELL_INVALID);
131 __clear_bit(client->doorbell_id, client->guc->doorbell_bitmap);
132 client->doorbell_id = GUC_DOORBELL_INVALID;
136 * Tell the GuC to allocate or deallocate a specific doorbell
139 static int __guc_allocate_doorbell(struct intel_guc *guc, u32 stage_id)
142 INTEL_GUC_ACTION_ALLOCATE_DOORBELL,
146 return intel_guc_send(guc, action, ARRAY_SIZE(action));
149 static int __guc_deallocate_doorbell(struct intel_guc *guc, u32 stage_id)
152 INTEL_GUC_ACTION_DEALLOCATE_DOORBELL,
156 return intel_guc_send(guc, action, ARRAY_SIZE(action));
159 static struct guc_stage_desc *__get_stage_desc(struct intel_guc_client *client)
161 struct guc_stage_desc *base = client->guc->stage_desc_pool_vaddr;
163 return &base[client->stage_id];
167 * Initialise, update, or clear doorbell data shared with the GuC
169 * These functions modify shared data and so need access to the mapped
170 * client object which contains the page being used for the doorbell
173 static void __update_doorbell_desc(struct intel_guc_client *client, u16 new_id)
175 struct guc_stage_desc *desc;
177 /* Update the GuC's idea of the doorbell ID */
178 desc = __get_stage_desc(client);
179 desc->db_id = new_id;
182 static struct guc_doorbell_info *__get_doorbell(struct intel_guc_client *client)
184 return client->vaddr + client->doorbell_offset;
187 static bool has_doorbell(struct intel_guc_client *client)
189 if (client->doorbell_id == GUC_DOORBELL_INVALID)
192 return test_bit(client->doorbell_id, client->guc->doorbell_bitmap);
195 static void __create_doorbell(struct intel_guc_client *client)
197 struct guc_doorbell_info *doorbell;
199 doorbell = __get_doorbell(client);
200 doorbell->db_status = GUC_DOORBELL_ENABLED;
201 doorbell->cookie = 0;
204 static void __destroy_doorbell(struct intel_guc_client *client)
206 struct drm_i915_private *dev_priv = guc_to_i915(client->guc);
207 struct guc_doorbell_info *doorbell;
208 u16 db_id = client->doorbell_id;
211 doorbell = __get_doorbell(client);
212 doorbell->db_status = GUC_DOORBELL_DISABLED;
213 doorbell->cookie = 0;
215 /* Doorbell release flow requires that we wait for GEN8_DRB_VALID bit
216 * to go to zero after updating db_status before we call the GuC to
217 * release the doorbell
219 if (wait_for_us(!(I915_READ(GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID), 10))
220 WARN_ONCE(true, "Doorbell never became invalid after disable\n");
223 static int create_doorbell(struct intel_guc_client *client)
227 __update_doorbell_desc(client, client->doorbell_id);
228 __create_doorbell(client);
230 ret = __guc_allocate_doorbell(client->guc, client->stage_id);
232 __destroy_doorbell(client);
233 __update_doorbell_desc(client, GUC_DOORBELL_INVALID);
234 DRM_ERROR("Couldn't create client %u doorbell: %d\n",
235 client->stage_id, ret);
242 static int destroy_doorbell(struct intel_guc_client *client)
246 GEM_BUG_ON(!has_doorbell(client));
248 __destroy_doorbell(client);
249 ret = __guc_deallocate_doorbell(client->guc, client->stage_id);
251 DRM_ERROR("Couldn't destroy client %u doorbell: %d\n",
252 client->stage_id, ret);
254 __update_doorbell_desc(client, GUC_DOORBELL_INVALID);
259 static unsigned long __select_cacheline(struct intel_guc *guc)
261 unsigned long offset;
263 /* Doorbell uses a single cache line within a page */
264 offset = offset_in_page(guc->db_cacheline);
266 /* Moving to next cache line to reduce contention */
267 guc->db_cacheline += cache_line_size();
269 DRM_DEBUG_DRIVER("reserved cacheline 0x%lx, next 0x%x, linesize %u\n",
270 offset, guc->db_cacheline, cache_line_size());
274 static inline struct guc_process_desc *
275 __get_process_desc(struct intel_guc_client *client)
277 return client->vaddr + client->proc_desc_offset;
281 * Initialise the process descriptor shared with the GuC firmware.
283 static void guc_proc_desc_init(struct intel_guc *guc,
284 struct intel_guc_client *client)
286 struct guc_process_desc *desc;
288 desc = memset(__get_process_desc(client), 0, sizeof(*desc));
291 * XXX: pDoorbell and WQVBaseAddress are pointers in process address
292 * space for ring3 clients (set them as in mmap_ioctl) or kernel
293 * space for kernel clients (map on demand instead? May make debug
294 * easier to have it mapped).
296 desc->wq_base_addr = 0;
297 desc->db_base_addr = 0;
299 desc->stage_id = client->stage_id;
300 desc->wq_size_bytes = GUC_WQ_SIZE;
301 desc->wq_status = WQ_STATUS_ACTIVE;
302 desc->priority = client->priority;
305 static int guc_stage_desc_pool_create(struct intel_guc *guc)
307 struct i915_vma *vma;
310 vma = intel_guc_allocate_vma(guc,
311 PAGE_ALIGN(sizeof(struct guc_stage_desc) *
312 GUC_MAX_STAGE_DESCRIPTORS));
316 vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
318 i915_vma_unpin_and_release(&vma);
319 return PTR_ERR(vaddr);
322 guc->stage_desc_pool = vma;
323 guc->stage_desc_pool_vaddr = vaddr;
324 ida_init(&guc->stage_ids);
329 static void guc_stage_desc_pool_destroy(struct intel_guc *guc)
331 ida_destroy(&guc->stage_ids);
332 i915_gem_object_unpin_map(guc->stage_desc_pool->obj);
333 i915_vma_unpin_and_release(&guc->stage_desc_pool);
337 * Initialise/clear the stage descriptor shared with the GuC firmware.
339 * This descriptor tells the GuC where (in GGTT space) to find the important
340 * data structures relating to this client (doorbell, process descriptor,
343 static void guc_stage_desc_init(struct intel_guc *guc,
344 struct intel_guc_client *client)
346 struct drm_i915_private *dev_priv = guc_to_i915(guc);
347 struct intel_engine_cs *engine;
348 struct i915_gem_context *ctx = client->owner;
349 struct guc_stage_desc *desc;
353 desc = __get_stage_desc(client);
354 memset(desc, 0, sizeof(*desc));
356 desc->attribute = GUC_STAGE_DESC_ATTR_ACTIVE |
357 GUC_STAGE_DESC_ATTR_KERNEL;
358 if (is_high_priority(client))
359 desc->attribute |= GUC_STAGE_DESC_ATTR_PREEMPT;
360 desc->stage_id = client->stage_id;
361 desc->priority = client->priority;
362 desc->db_id = client->doorbell_id;
364 for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
365 struct intel_context *ce = &ctx->engine[engine->id];
366 u32 guc_engine_id = engine->guc_id;
367 struct guc_execlist_context *lrc = &desc->lrc[guc_engine_id];
369 /* TODO: We have a design issue to be solved here. Only when we
370 * receive the first batch, we know which engine is used by the
371 * user. But here GuC expects the lrc and ring to be pinned. It
372 * is not an issue for default context, which is the only one
373 * for now who owns a GuC client. But for future owner of GuC
374 * client, need to make sure lrc is pinned prior to enter here.
377 break; /* XXX: continue? */
380 * XXX: When this is a GUC_STAGE_DESC_ATTR_KERNEL client (proxy
381 * submission or, in other words, not using a direct submission
382 * model) the KMD's LRCA is not used for any work submission.
383 * Instead, the GuC uses the LRCA of the user mode context (see
384 * guc_add_request below).
386 lrc->context_desc = lower_32_bits(ce->lrc_desc);
388 /* The state page is after PPHWSP */
390 guc_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE;
392 /* XXX: In direct submission, the GuC wants the HW context id
393 * here. In proxy submission, it wants the stage id
395 lrc->context_id = (client->stage_id << GUC_ELC_CTXID_OFFSET) |
396 (guc_engine_id << GUC_ELC_ENGINE_OFFSET);
398 lrc->ring_begin = guc_ggtt_offset(ce->ring->vma);
399 lrc->ring_end = lrc->ring_begin + ce->ring->size - 1;
400 lrc->ring_next_free_location = lrc->ring_begin;
401 lrc->ring_current_tail_pointer_value = 0;
403 desc->engines_used |= (1 << guc_engine_id);
406 DRM_DEBUG_DRIVER("Host engines 0x%x => GuC engines used 0x%x\n",
407 client->engines, desc->engines_used);
408 WARN_ON(desc->engines_used == 0);
411 * The doorbell, process descriptor, and workqueue are all parts
412 * of the client object, which the GuC will reference via the GGTT
414 gfx_addr = guc_ggtt_offset(client->vma);
415 desc->db_trigger_phy = sg_dma_address(client->vma->pages->sgl) +
416 client->doorbell_offset;
417 desc->db_trigger_cpu = ptr_to_u64(__get_doorbell(client));
418 desc->db_trigger_uk = gfx_addr + client->doorbell_offset;
419 desc->process_desc = gfx_addr + client->proc_desc_offset;
420 desc->wq_addr = gfx_addr + GUC_DB_SIZE;
421 desc->wq_size = GUC_WQ_SIZE;
423 desc->desc_private = ptr_to_u64(client);
426 static void guc_stage_desc_fini(struct intel_guc *guc,
427 struct intel_guc_client *client)
429 struct guc_stage_desc *desc;
431 desc = __get_stage_desc(client);
432 memset(desc, 0, sizeof(*desc));
435 /* Construct a Work Item and append it to the GuC's Work Queue */
436 static void guc_wq_item_append(struct intel_guc_client *client,
437 u32 target_engine, u32 context_desc,
438 u32 ring_tail, u32 fence_id)
440 /* wqi_len is in DWords, and does not include the one-word header */
441 const size_t wqi_size = sizeof(struct guc_wq_item);
442 const u32 wqi_len = wqi_size / sizeof(u32) - 1;
443 struct guc_process_desc *desc = __get_process_desc(client);
444 struct guc_wq_item *wqi;
447 lockdep_assert_held(&client->wq_lock);
449 /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
450 * should not have the case where structure wqi is across page, neither
451 * wrapped to the beginning. This simplifies the implementation below.
453 * XXX: if not the case, we need save data to a temp wqi and copy it to
454 * workqueue buffer dw by dw.
456 BUILD_BUG_ON(wqi_size != 16);
458 /* Free space is guaranteed. */
459 wq_off = READ_ONCE(desc->tail);
460 GEM_BUG_ON(CIRC_SPACE(wq_off, READ_ONCE(desc->head),
461 GUC_WQ_SIZE) < wqi_size);
462 GEM_BUG_ON(wq_off & (wqi_size - 1));
464 /* WQ starts from the page after doorbell / process_desc */
465 wqi = client->vaddr + wq_off + GUC_DB_SIZE;
467 /* Now fill in the 4-word work queue item */
468 wqi->header = WQ_TYPE_INORDER |
469 (wqi_len << WQ_LEN_SHIFT) |
470 (target_engine << WQ_TARGET_SHIFT) |
472 wqi->context_desc = context_desc;
473 wqi->submit_element_info = ring_tail << WQ_RING_TAIL_SHIFT;
474 GEM_BUG_ON(ring_tail > WQ_RING_TAIL_MAX);
475 wqi->fence_id = fence_id;
477 /* Make the update visible to GuC */
478 WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1));
481 static void guc_reset_wq(struct intel_guc_client *client)
483 struct guc_process_desc *desc = __get_process_desc(client);
489 static void guc_ring_doorbell(struct intel_guc_client *client)
491 struct guc_doorbell_info *db;
494 lockdep_assert_held(&client->wq_lock);
496 /* pointer of current doorbell cacheline */
497 db = __get_doorbell(client);
500 * We're not expecting the doorbell cookie to change behind our back,
501 * we also need to treat 0 as a reserved value.
503 cookie = READ_ONCE(db->cookie);
504 WARN_ON_ONCE(xchg(&db->cookie, cookie + 1 ?: cookie + 2) != cookie);
506 /* XXX: doorbell was lost and need to acquire it again */
507 GEM_BUG_ON(db->db_status != GUC_DOORBELL_ENABLED);
510 static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
512 struct intel_guc_client *client = guc->execbuf_client;
513 struct intel_engine_cs *engine = rq->engine;
514 u32 ctx_desc = lower_32_bits(intel_lr_context_descriptor(rq->ctx,
516 u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
518 spin_lock(&client->wq_lock);
520 guc_wq_item_append(client, engine->guc_id, ctx_desc,
521 ring_tail, rq->global_seqno);
522 guc_ring_doorbell(client);
524 client->submissions[engine->id] += 1;
526 spin_unlock(&client->wq_lock);
530 * When we're doing submissions using regular execlists backend, writing to
531 * ELSP from CPU side is enough to make sure that writes to ringbuffer pages
532 * pinned in mappable aperture portion of GGTT are visible to command streamer.
533 * Writes done by GuC on our behalf are not guaranteeing such ordering,
534 * therefore, to ensure the flush, we're issuing a POSTING READ.
536 static void flush_ggtt_writes(struct i915_vma *vma)
538 struct drm_i915_private *dev_priv = to_i915(vma->obj->base.dev);
540 if (i915_vma_is_map_and_fenceable(vma))
541 POSTING_READ_FW(GUC_STATUS);
544 static void inject_preempt_context(struct work_struct *work)
546 struct guc_preempt_work *preempt_work =
547 container_of(work, typeof(*preempt_work), work);
548 struct intel_engine_cs *engine = preempt_work->engine;
549 struct intel_guc *guc = container_of(preempt_work, typeof(*guc),
550 preempt_work[engine->id]);
551 struct intel_guc_client *client = guc->preempt_client;
552 struct guc_stage_desc *stage_desc = __get_stage_desc(client);
553 u32 ctx_desc = lower_32_bits(intel_lr_context_descriptor(client->owner,
558 * The ring contains commands to write GUC_PREEMPT_FINISHED into HWSP.
559 * See guc_fill_preempt_context().
561 spin_lock_irq(&client->wq_lock);
562 guc_wq_item_append(client, engine->guc_id, ctx_desc,
563 GUC_PREEMPT_BREADCRUMB_BYTES / sizeof(u64), 0);
564 spin_unlock_irq(&client->wq_lock);
567 * If GuC firmware performs an engine reset while that engine had
568 * a preemption pending, it will set the terminated attribute bit
569 * on our preemption stage descriptor. GuC firmware retains all
570 * pending work items for a high-priority GuC client, unlike the
571 * normal-priority GuC client where work items are dropped. It
572 * wants to make sure the preempt-to-idle work doesn't run when
573 * scheduling resumes, and uses this bit to inform its scheduler
574 * and presumably us as well. Our job is to clear it for the next
575 * preemption after reset, otherwise that and future preemptions
576 * will never complete. We'll just clear it every time.
578 stage_desc->attribute &= ~GUC_STAGE_DESC_ATTR_TERMINATED;
580 data[0] = INTEL_GUC_ACTION_REQUEST_PREEMPTION;
581 data[1] = client->stage_id;
582 data[2] = INTEL_GUC_PREEMPT_OPTION_DROP_WORK_Q |
583 INTEL_GUC_PREEMPT_OPTION_DROP_SUBMIT_Q;
584 data[3] = engine->guc_id;
585 data[4] = guc->execbuf_client->priority;
586 data[5] = guc->execbuf_client->stage_id;
587 data[6] = guc_ggtt_offset(guc->shared_data);
589 if (WARN_ON(intel_guc_send(guc, data, ARRAY_SIZE(data)))) {
590 execlists_clear_active(&engine->execlists,
591 EXECLISTS_ACTIVE_PREEMPT);
592 tasklet_schedule(&engine->execlists.tasklet);
597 * We're using user interrupt and HWSP value to mark that preemption has
598 * finished and GPU is idle. Normally, we could unwind and continue similar to
599 * execlists submission path. Unfortunately, with GuC we also need to wait for
600 * it to finish its own postprocessing, before attempting to submit. Otherwise
601 * GuC may silently ignore our submissions, and thus we risk losing request at
602 * best, executing out-of-order and causing kernel panic at worst.
604 #define GUC_PREEMPT_POSTPROCESS_DELAY_MS 10
605 static void wait_for_guc_preempt_report(struct intel_engine_cs *engine)
607 struct intel_guc *guc = &engine->i915->guc;
608 struct guc_shared_ctx_data *data = guc->shared_data_vaddr;
609 struct guc_ctx_report *report =
610 &data->preempt_ctx_report[engine->guc_id];
612 WARN_ON(wait_for_atomic(report->report_return_status ==
613 INTEL_GUC_REPORT_STATUS_COMPLETE,
614 GUC_PREEMPT_POSTPROCESS_DELAY_MS));
616 * GuC is expecting that we're also going to clear the affected context
617 * counter, let's also reset the return status to not depend on GuC
618 * resetting it after recieving another preempt action
620 report->affected_count = 0;
621 report->report_return_status = INTEL_GUC_REPORT_STATUS_UNKNOWN;
625 * guc_submit() - Submit commands through GuC
626 * @engine: engine associated with the commands
628 * The only error here arises if the doorbell hardware isn't functioning
629 * as expected, which really shouln't happen.
631 static void guc_submit(struct intel_engine_cs *engine)
633 struct intel_guc *guc = &engine->i915->guc;
634 struct intel_engine_execlists * const execlists = &engine->execlists;
635 struct execlist_port *port = execlists->port;
638 for (n = 0; n < execlists_num_ports(execlists); n++) {
639 struct i915_request *rq;
642 rq = port_unpack(&port[n], &count);
643 if (rq && count == 0) {
644 port_set(&port[n], port_pack(rq, ++count));
646 flush_ggtt_writes(rq->ring->vma);
648 guc_add_request(guc, rq);
653 static void port_assign(struct execlist_port *port, struct i915_request *rq)
655 GEM_BUG_ON(port_isset(port));
657 port_set(port, i915_request_get(rq));
660 static void guc_dequeue(struct intel_engine_cs *engine)
662 struct intel_engine_execlists * const execlists = &engine->execlists;
663 struct execlist_port *port = execlists->port;
664 struct i915_request *last = NULL;
665 const struct execlist_port * const last_port =
666 &execlists->port[execlists->port_mask];
670 spin_lock_irq(&engine->timeline->lock);
671 rb = execlists->first;
672 GEM_BUG_ON(rb_first(&execlists->queue) != rb);
674 if (port_isset(port)) {
675 if (engine->i915->preempt_context) {
676 struct guc_preempt_work *preempt_work =
677 &engine->i915->guc.preempt_work[engine->id];
679 if (execlists->queue_priority >
680 max(port_request(port)->priotree.priority, 0)) {
681 execlists_set_active(execlists,
682 EXECLISTS_ACTIVE_PREEMPT);
683 queue_work(engine->i915->guc.preempt_wq,
684 &preempt_work->work);
690 if (port_isset(port))
693 GEM_BUG_ON(port_isset(port));
696 struct i915_priolist *p = to_priolist(rb);
697 struct i915_request *rq, *rn;
699 list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) {
700 if (last && rq->ctx != last->ctx) {
701 if (port == last_port) {
702 __list_del_many(&p->requests,
708 port_assign(port, last);
712 INIT_LIST_HEAD(&rq->priotree.link);
714 __i915_request_submit(rq);
715 trace_i915_request_in(rq, port_index(port, execlists));
721 rb_erase(&p->node, &execlists->queue);
722 INIT_LIST_HEAD(&p->requests);
723 if (p->priority != I915_PRIORITY_NORMAL)
724 kmem_cache_free(engine->i915->priorities, p);
727 execlists->queue_priority = rb ? to_priolist(rb)->priority : INT_MIN;
728 execlists->first = rb;
730 port_assign(port, last);
731 execlists_set_active(execlists, EXECLISTS_ACTIVE_USER);
735 /* We must always keep the beast fed if we have work piled up */
736 GEM_BUG_ON(port_isset(execlists->port) &&
737 !execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
738 GEM_BUG_ON(execlists->first && !port_isset(execlists->port));
741 spin_unlock_irq(&engine->timeline->lock);
744 static void guc_submission_tasklet(unsigned long data)
746 struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
747 struct intel_engine_execlists * const execlists = &engine->execlists;
748 struct execlist_port *port = execlists->port;
749 struct i915_request *rq;
751 rq = port_request(&port[0]);
752 while (rq && i915_request_completed(rq)) {
753 trace_i915_request_out(rq);
754 i915_request_put(rq);
756 execlists_port_complete(execlists, port);
758 rq = port_request(&port[0]);
761 execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER);
763 if (execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT) &&
764 intel_read_status_page(engine, I915_GEM_HWS_PREEMPT_INDEX) ==
765 GUC_PREEMPT_FINISHED) {
766 execlists_cancel_port_requests(&engine->execlists);
767 execlists_unwind_incomplete_requests(execlists);
769 wait_for_guc_preempt_report(engine);
771 execlists_clear_active(execlists, EXECLISTS_ACTIVE_PREEMPT);
772 intel_write_status_page(engine, I915_GEM_HWS_PREEMPT_INDEX, 0);
775 if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT))
780 * Everything below here is concerned with setup & teardown, and is
781 * therefore not part of the somewhat time-critical batch-submission
782 * path of guc_submit() above.
785 /* Check that a doorbell register is in the expected state */
786 static bool doorbell_ok(struct intel_guc *guc, u16 db_id)
788 struct drm_i915_private *dev_priv = guc_to_i915(guc);
792 GEM_BUG_ON(db_id >= GUC_DOORBELL_INVALID);
794 drbregl = I915_READ(GEN8_DRBREGL(db_id));
795 valid = drbregl & GEN8_DRB_VALID;
797 if (test_bit(db_id, guc->doorbell_bitmap) == valid)
800 DRM_DEBUG_DRIVER("Doorbell %d has unexpected state (0x%x): valid=%s\n",
801 db_id, drbregl, yesno(valid));
806 static bool guc_verify_doorbells(struct intel_guc *guc)
810 for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id)
811 if (!doorbell_ok(guc, db_id))
817 static int guc_clients_doorbell_init(struct intel_guc *guc)
821 ret = create_doorbell(guc->execbuf_client);
825 if (guc->preempt_client) {
826 ret = create_doorbell(guc->preempt_client);
828 destroy_doorbell(guc->execbuf_client);
836 static void guc_clients_doorbell_fini(struct intel_guc *guc)
839 * By the time we're here, GuC has already been reset.
840 * Instead of trying (in vain) to communicate with it, let's just
841 * cleanup the doorbell HW and our internal state.
843 if (guc->preempt_client) {
844 __destroy_doorbell(guc->preempt_client);
845 __update_doorbell_desc(guc->preempt_client,
846 GUC_DOORBELL_INVALID);
848 __destroy_doorbell(guc->execbuf_client);
849 __update_doorbell_desc(guc->execbuf_client, GUC_DOORBELL_INVALID);
853 * guc_client_alloc() - Allocate an intel_guc_client
854 * @dev_priv: driver private data structure
855 * @engines: The set of engines to enable for this client
856 * @priority: four levels priority _CRITICAL, _HIGH, _NORMAL and _LOW
857 * The kernel client to replace ExecList submission is created with
858 * NORMAL priority. Priority of a client for scheduler can be HIGH,
859 * while a preemption context can use CRITICAL.
860 * @ctx: the context that owns the client (we use the default render
863 * Return: An intel_guc_client object if success, else NULL.
865 static struct intel_guc_client *
866 guc_client_alloc(struct drm_i915_private *dev_priv,
869 struct i915_gem_context *ctx)
871 struct intel_guc_client *client;
872 struct intel_guc *guc = &dev_priv->guc;
873 struct i915_vma *vma;
877 client = kzalloc(sizeof(*client), GFP_KERNEL);
879 return ERR_PTR(-ENOMEM);
883 client->engines = engines;
884 client->priority = priority;
885 client->doorbell_id = GUC_DOORBELL_INVALID;
886 spin_lock_init(&client->wq_lock);
888 ret = ida_simple_get(&guc->stage_ids, 0, GUC_MAX_STAGE_DESCRIPTORS,
893 client->stage_id = ret;
895 /* The first page is doorbell/proc_desc. Two followed pages are wq. */
896 vma = intel_guc_allocate_vma(guc, GUC_DB_SIZE + GUC_WQ_SIZE);
902 /* We'll keep just the first (doorbell/proc) page permanently kmap'd. */
905 vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
907 ret = PTR_ERR(vaddr);
910 client->vaddr = vaddr;
912 client->doorbell_offset = __select_cacheline(guc);
915 * Since the doorbell only requires a single cacheline, we can save
916 * space by putting the application process descriptor in the same
917 * page. Use the half of the page that doesn't include the doorbell.
919 if (client->doorbell_offset >= (GUC_DB_SIZE / 2))
920 client->proc_desc_offset = 0;
922 client->proc_desc_offset = (GUC_DB_SIZE / 2);
924 guc_proc_desc_init(guc, client);
925 guc_stage_desc_init(guc, client);
927 ret = reserve_doorbell(client);
931 DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: stage_id %u\n",
932 priority, client, client->engines, client->stage_id);
933 DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%lx\n",
934 client->doorbell_id, client->doorbell_offset);
939 i915_gem_object_unpin_map(client->vma->obj);
941 i915_vma_unpin_and_release(&client->vma);
943 ida_simple_remove(&guc->stage_ids, client->stage_id);
949 static void guc_client_free(struct intel_guc_client *client)
951 unreserve_doorbell(client);
952 guc_stage_desc_fini(client->guc, client);
953 i915_gem_object_unpin_map(client->vma->obj);
954 i915_vma_unpin_and_release(&client->vma);
955 ida_simple_remove(&client->guc->stage_ids, client->stage_id);
959 static inline bool ctx_save_restore_disabled(struct intel_context *ce)
961 u32 sr = ce->lrc_reg_state[CTX_CONTEXT_CONTROL + 1];
963 #define SR_DISABLED \
964 _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | \
965 CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT)
967 return (sr & SR_DISABLED) == SR_DISABLED;
972 static void guc_fill_preempt_context(struct intel_guc *guc)
974 struct drm_i915_private *dev_priv = guc_to_i915(guc);
975 struct intel_guc_client *client = guc->preempt_client;
976 struct intel_engine_cs *engine;
977 enum intel_engine_id id;
979 for_each_engine(engine, dev_priv, id) {
980 struct intel_context *ce = &client->owner->engine[id];
981 u32 addr = intel_hws_preempt_done_address(engine);
984 GEM_BUG_ON(!ce->pin_count);
987 * We rely on this context image *not* being saved after
988 * preemption. This ensures that the RING_HEAD / RING_TAIL
989 * remain pointing at initial values forever.
991 GEM_BUG_ON(!ctx_save_restore_disabled(ce));
993 cs = ce->ring->vaddr;
995 cs = gen8_emit_ggtt_write_rcs(cs,
996 GUC_PREEMPT_FINISHED,
999 cs = gen8_emit_ggtt_write(cs,
1000 GUC_PREEMPT_FINISHED,
1005 *cs++ = MI_USER_INTERRUPT;
1008 GEM_BUG_ON((void *)cs - ce->ring->vaddr !=
1009 GUC_PREEMPT_BREADCRUMB_BYTES);
1011 flush_ggtt_writes(ce->ring->vma);
1015 static int guc_clients_create(struct intel_guc *guc)
1017 struct drm_i915_private *dev_priv = guc_to_i915(guc);
1018 struct intel_guc_client *client;
1020 GEM_BUG_ON(guc->execbuf_client);
1021 GEM_BUG_ON(guc->preempt_client);
1023 client = guc_client_alloc(dev_priv,
1024 INTEL_INFO(dev_priv)->ring_mask,
1025 GUC_CLIENT_PRIORITY_KMD_NORMAL,
1026 dev_priv->kernel_context);
1027 if (IS_ERR(client)) {
1028 DRM_ERROR("Failed to create GuC client for submission!\n");
1029 return PTR_ERR(client);
1031 guc->execbuf_client = client;
1033 if (dev_priv->preempt_context) {
1034 client = guc_client_alloc(dev_priv,
1035 INTEL_INFO(dev_priv)->ring_mask,
1036 GUC_CLIENT_PRIORITY_KMD_HIGH,
1037 dev_priv->preempt_context);
1038 if (IS_ERR(client)) {
1039 DRM_ERROR("Failed to create GuC client for preemption!\n");
1040 guc_client_free(guc->execbuf_client);
1041 guc->execbuf_client = NULL;
1042 return PTR_ERR(client);
1044 guc->preempt_client = client;
1046 guc_fill_preempt_context(guc);
1052 static void guc_clients_destroy(struct intel_guc *guc)
1054 struct intel_guc_client *client;
1056 client = fetch_and_zero(&guc->preempt_client);
1058 guc_client_free(client);
1060 client = fetch_and_zero(&guc->execbuf_client);
1061 guc_client_free(client);
1065 * Set up the memory resources to be shared with the GuC (via the GGTT)
1066 * at firmware loading time.
1068 int intel_guc_submission_init(struct intel_guc *guc)
1070 struct drm_i915_private *dev_priv = guc_to_i915(guc);
1071 struct intel_engine_cs *engine;
1072 enum intel_engine_id id;
1075 if (guc->stage_desc_pool)
1078 ret = guc_stage_desc_pool_create(guc);
1082 * Keep static analysers happy, let them know that we allocated the
1083 * vma after testing that it didn't exist earlier.
1085 GEM_BUG_ON(!guc->stage_desc_pool);
1087 WARN_ON(!guc_verify_doorbells(guc));
1088 ret = guc_clients_create(guc);
1092 for_each_engine(engine, dev_priv, id) {
1093 guc->preempt_work[id].engine = engine;
1094 INIT_WORK(&guc->preempt_work[id].work, inject_preempt_context);
1101 void intel_guc_submission_fini(struct intel_guc *guc)
1103 struct drm_i915_private *dev_priv = guc_to_i915(guc);
1104 struct intel_engine_cs *engine;
1105 enum intel_engine_id id;
1107 for_each_engine(engine, dev_priv, id)
1108 cancel_work_sync(&guc->preempt_work[id].work);
1110 guc_clients_destroy(guc);
1111 WARN_ON(!guc_verify_doorbells(guc));
1113 guc_stage_desc_pool_destroy(guc);
1116 static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
1118 struct intel_rps *rps = &dev_priv->gt_pm.rps;
1119 struct intel_engine_cs *engine;
1120 enum intel_engine_id id;
1123 /* tell all command streamers to forward interrupts (but not vblank)
1126 irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
1127 for_each_engine(engine, dev_priv, id)
1128 I915_WRITE(RING_MODE_GEN7(engine), irqs);
1130 /* route USER_INTERRUPT to Host, all others are sent to GuC. */
1131 irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
1132 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
1133 /* These three registers have the same bit definitions */
1134 I915_WRITE(GUC_BCS_RCS_IER, ~irqs);
1135 I915_WRITE(GUC_VCS2_VCS1_IER, ~irqs);
1136 I915_WRITE(GUC_WD_VECS_IER, ~irqs);
1139 * The REDIRECT_TO_GUC bit of the PMINTRMSK register directs all
1140 * (unmasked) PM interrupts to the GuC. All other bits of this
1141 * register *disable* generation of a specific interrupt.
1143 * 'pm_intrmsk_mbz' indicates bits that are NOT to be set when
1144 * writing to the PM interrupt mask register, i.e. interrupts
1145 * that must not be disabled.
1147 * If the GuC is handling these interrupts, then we must not let
1148 * the PM code disable ANY interrupt that the GuC is expecting.
1149 * So for each ENABLED (0) bit in this register, we must SET the
1150 * bit in pm_intrmsk_mbz so that it's left enabled for the GuC.
1151 * GuC needs ARAT expired interrupt unmasked hence it is set in
1154 * Here we CLEAR REDIRECT_TO_GUC bit in pm_intrmsk_mbz, which will
1155 * result in the register bit being left SET!
1157 rps->pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK;
1158 rps->pm_intrmsk_mbz &= ~GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
1161 static void guc_interrupts_release(struct drm_i915_private *dev_priv)
1163 struct intel_rps *rps = &dev_priv->gt_pm.rps;
1164 struct intel_engine_cs *engine;
1165 enum intel_engine_id id;
1169 * tell all command streamers NOT to forward interrupts or vblank
1172 irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
1173 irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
1174 for_each_engine(engine, dev_priv, id)
1175 I915_WRITE(RING_MODE_GEN7(engine), irqs);
1177 /* route all GT interrupts to the host */
1178 I915_WRITE(GUC_BCS_RCS_IER, 0);
1179 I915_WRITE(GUC_VCS2_VCS1_IER, 0);
1180 I915_WRITE(GUC_WD_VECS_IER, 0);
1182 rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
1183 rps->pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK;
1186 static void guc_submission_park(struct intel_engine_cs *engine)
1188 intel_engine_unpin_breadcrumbs_irq(engine);
1191 static void guc_submission_unpark(struct intel_engine_cs *engine)
1193 intel_engine_pin_breadcrumbs_irq(engine);
1196 int intel_guc_submission_enable(struct intel_guc *guc)
1198 struct drm_i915_private *dev_priv = guc_to_i915(guc);
1199 struct intel_engine_cs *engine;
1200 enum intel_engine_id id;
1204 * We're using GuC work items for submitting work through GuC. Since
1205 * we're coalescing multiple requests from a single context into a
1206 * single work item prior to assigning it to execlist_port, we can
1207 * never have more work items than the total number of ports (for all
1208 * engines). The GuC firmware is controlling the HEAD of work queue,
1209 * and it is guaranteed that it will remove the work item from the
1210 * queue before our request is completed.
1212 BUILD_BUG_ON(ARRAY_SIZE(engine->execlists.port) *
1213 sizeof(struct guc_wq_item) *
1214 I915_NUM_ENGINES > GUC_WQ_SIZE);
1216 GEM_BUG_ON(!guc->execbuf_client);
1218 guc_reset_wq(guc->execbuf_client);
1219 if (guc->preempt_client)
1220 guc_reset_wq(guc->preempt_client);
1222 err = intel_guc_sample_forcewake(guc);
1226 err = guc_clients_doorbell_init(guc);
1230 /* Take over from manual control of ELSP (execlists) */
1231 guc_interrupts_capture(dev_priv);
1233 for_each_engine(engine, dev_priv, id) {
1234 struct intel_engine_execlists * const execlists =
1237 execlists->tasklet.func = guc_submission_tasklet;
1238 engine->park = guc_submission_park;
1239 engine->unpark = guc_submission_unpark;
1241 engine->flags &= ~I915_ENGINE_SUPPORTS_STATS;
1247 void intel_guc_submission_disable(struct intel_guc *guc)
1249 struct drm_i915_private *dev_priv = guc_to_i915(guc);
1251 GEM_BUG_ON(dev_priv->gt.awake); /* GT should be parked first */
1253 guc_interrupts_release(dev_priv);
1254 guc_clients_doorbell_fini(guc);
1256 /* Revert back to manual ELSP submission */
1257 intel_engines_reset_default_submission(dev_priv);
1260 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1261 #include "selftests/intel_guc.c"