]> Git Repo - linux.git/blob - drivers/gpu/drm/i915/gt/uc/intel_guc.c
Merge remote-tracking branch 'spi/for-5.14' into spi-linus
[linux.git] / drivers / gpu / drm / i915 / gt / uc / intel_guc.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2014-2019 Intel Corporation
4  */
5
6 #include "gt/intel_gt.h"
7 #include "gt/intel_gt_irq.h"
8 #include "gt/intel_gt_pm_irq.h"
9 #include "intel_guc.h"
10 #include "intel_guc_ads.h"
11 #include "intel_guc_submission.h"
12 #include "i915_drv.h"
13
14 /**
15  * DOC: GuC
16  *
17  * The GuC is a microcontroller inside the GT HW, introduced in gen9. The GuC is
18  * designed to offload some of the functionality usually performed by the host
19  * driver; currently the main operations it can take care of are:
20  *
21  * - Authentication of the HuC, which is required to fully enable HuC usage.
22  * - Low latency graphics context scheduling (a.k.a. GuC submission).
23  * - GT Power management.
24  *
25  * The enable_guc module parameter can be used to select which of those
26  * operations to enable within GuC. Note that not all the operations are
27  * supported on all gen9+ platforms.
28  *
29  * Enabling the GuC is not mandatory and therefore the firmware is only loaded
30  * if at least one of the operations is selected. However, not loading the GuC
31  * might result in the loss of some features that do require the GuC (currently
32  * just the HuC, but more are expected to land in the future).
33  */
34
35 void intel_guc_notify(struct intel_guc *guc)
36 {
37         struct intel_gt *gt = guc_to_gt(guc);
38
39         /*
40          * On Gen11+, the value written to the register is passes as a payload
41          * to the FW. However, the FW currently treats all values the same way
42          * (H2G interrupt), so we can just write the value that the HW expects
43          * on older gens.
44          */
45         intel_uncore_write(gt->uncore, guc->notify_reg, GUC_SEND_TRIGGER);
46 }
47
48 static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
49 {
50         GEM_BUG_ON(!guc->send_regs.base);
51         GEM_BUG_ON(!guc->send_regs.count);
52         GEM_BUG_ON(i >= guc->send_regs.count);
53
54         return _MMIO(guc->send_regs.base + 4 * i);
55 }
56
57 void intel_guc_init_send_regs(struct intel_guc *guc)
58 {
59         struct intel_gt *gt = guc_to_gt(guc);
60         enum forcewake_domains fw_domains = 0;
61         unsigned int i;
62
63         GEM_BUG_ON(!guc->send_regs.base);
64         GEM_BUG_ON(!guc->send_regs.count);
65
66         for (i = 0; i < guc->send_regs.count; i++) {
67                 fw_domains |= intel_uncore_forcewake_for_reg(gt->uncore,
68                                         guc_send_reg(guc, i),
69                                         FW_REG_READ | FW_REG_WRITE);
70         }
71         guc->send_regs.fw_domains = fw_domains;
72 }
73
74 static void gen9_reset_guc_interrupts(struct intel_guc *guc)
75 {
76         struct intel_gt *gt = guc_to_gt(guc);
77
78         assert_rpm_wakelock_held(&gt->i915->runtime_pm);
79
80         spin_lock_irq(&gt->irq_lock);
81         gen6_gt_pm_reset_iir(gt, gt->pm_guc_events);
82         spin_unlock_irq(&gt->irq_lock);
83 }
84
85 static void gen9_enable_guc_interrupts(struct intel_guc *guc)
86 {
87         struct intel_gt *gt = guc_to_gt(guc);
88
89         assert_rpm_wakelock_held(&gt->i915->runtime_pm);
90
91         spin_lock_irq(&gt->irq_lock);
92         WARN_ON_ONCE(intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) &
93                      gt->pm_guc_events);
94         gen6_gt_pm_enable_irq(gt, gt->pm_guc_events);
95         spin_unlock_irq(&gt->irq_lock);
96 }
97
98 static void gen9_disable_guc_interrupts(struct intel_guc *guc)
99 {
100         struct intel_gt *gt = guc_to_gt(guc);
101
102         assert_rpm_wakelock_held(&gt->i915->runtime_pm);
103
104         spin_lock_irq(&gt->irq_lock);
105
106         gen6_gt_pm_disable_irq(gt, gt->pm_guc_events);
107
108         spin_unlock_irq(&gt->irq_lock);
109         intel_synchronize_irq(gt->i915);
110
111         gen9_reset_guc_interrupts(guc);
112 }
113
114 static void gen11_reset_guc_interrupts(struct intel_guc *guc)
115 {
116         struct intel_gt *gt = guc_to_gt(guc);
117
118         spin_lock_irq(&gt->irq_lock);
119         gen11_gt_reset_one_iir(gt, 0, GEN11_GUC);
120         spin_unlock_irq(&gt->irq_lock);
121 }
122
123 static void gen11_enable_guc_interrupts(struct intel_guc *guc)
124 {
125         struct intel_gt *gt = guc_to_gt(guc);
126         u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
127
128         spin_lock_irq(&gt->irq_lock);
129         WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC));
130         intel_uncore_write(gt->uncore,
131                            GEN11_GUC_SG_INTR_ENABLE, events);
132         intel_uncore_write(gt->uncore,
133                            GEN11_GUC_SG_INTR_MASK, ~events);
134         spin_unlock_irq(&gt->irq_lock);
135 }
136
137 static void gen11_disable_guc_interrupts(struct intel_guc *guc)
138 {
139         struct intel_gt *gt = guc_to_gt(guc);
140
141         spin_lock_irq(&gt->irq_lock);
142
143         intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0);
144         intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
145
146         spin_unlock_irq(&gt->irq_lock);
147         intel_synchronize_irq(gt->i915);
148
149         gen11_reset_guc_interrupts(guc);
150 }
151
152 void intel_guc_init_early(struct intel_guc *guc)
153 {
154         struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
155
156         intel_uc_fw_init_early(&guc->fw, INTEL_UC_FW_TYPE_GUC);
157         intel_guc_ct_init_early(&guc->ct);
158         intel_guc_log_init_early(&guc->log);
159         intel_guc_submission_init_early(guc);
160
161         mutex_init(&guc->send_mutex);
162         spin_lock_init(&guc->irq_lock);
163         if (GRAPHICS_VER(i915) >= 11) {
164                 guc->notify_reg = GEN11_GUC_HOST_INTERRUPT;
165                 guc->interrupts.reset = gen11_reset_guc_interrupts;
166                 guc->interrupts.enable = gen11_enable_guc_interrupts;
167                 guc->interrupts.disable = gen11_disable_guc_interrupts;
168                 guc->send_regs.base =
169                         i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0));
170                 guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT;
171
172         } else {
173                 guc->notify_reg = GUC_SEND_INTERRUPT;
174                 guc->interrupts.reset = gen9_reset_guc_interrupts;
175                 guc->interrupts.enable = gen9_enable_guc_interrupts;
176                 guc->interrupts.disable = gen9_disable_guc_interrupts;
177                 guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
178                 guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN;
179                 BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT);
180         }
181 }
182
183 static u32 guc_ctl_debug_flags(struct intel_guc *guc)
184 {
185         u32 level = intel_guc_log_get_level(&guc->log);
186         u32 flags = 0;
187
188         if (!GUC_LOG_LEVEL_IS_VERBOSE(level))
189                 flags |= GUC_LOG_DISABLED;
190         else
191                 flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) <<
192                          GUC_LOG_VERBOSITY_SHIFT;
193
194         return flags;
195 }
196
197 static u32 guc_ctl_feature_flags(struct intel_guc *guc)
198 {
199         u32 flags = 0;
200
201         if (!intel_guc_submission_is_used(guc))
202                 flags |= GUC_CTL_DISABLE_SCHEDULER;
203
204         return flags;
205 }
206
207 static u32 guc_ctl_log_params_flags(struct intel_guc *guc)
208 {
209         u32 offset = intel_guc_ggtt_offset(guc, guc->log.vma) >> PAGE_SHIFT;
210         u32 flags;
211
212         #if (((CRASH_BUFFER_SIZE) % SZ_1M) == 0)
213         #define UNIT SZ_1M
214         #define FLAG GUC_LOG_ALLOC_IN_MEGABYTE
215         #else
216         #define UNIT SZ_4K
217         #define FLAG 0
218         #endif
219
220         BUILD_BUG_ON(!CRASH_BUFFER_SIZE);
221         BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, UNIT));
222         BUILD_BUG_ON(!DPC_BUFFER_SIZE);
223         BUILD_BUG_ON(!IS_ALIGNED(DPC_BUFFER_SIZE, UNIT));
224         BUILD_BUG_ON(!ISR_BUFFER_SIZE);
225         BUILD_BUG_ON(!IS_ALIGNED(ISR_BUFFER_SIZE, UNIT));
226
227         BUILD_BUG_ON((CRASH_BUFFER_SIZE / UNIT - 1) >
228                         (GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT));
229         BUILD_BUG_ON((DPC_BUFFER_SIZE / UNIT - 1) >
230                         (GUC_LOG_DPC_MASK >> GUC_LOG_DPC_SHIFT));
231         BUILD_BUG_ON((ISR_BUFFER_SIZE / UNIT - 1) >
232                         (GUC_LOG_ISR_MASK >> GUC_LOG_ISR_SHIFT));
233
234         flags = GUC_LOG_VALID |
235                 GUC_LOG_NOTIFY_ON_HALF_FULL |
236                 FLAG |
237                 ((CRASH_BUFFER_SIZE / UNIT - 1) << GUC_LOG_CRASH_SHIFT) |
238                 ((DPC_BUFFER_SIZE / UNIT - 1) << GUC_LOG_DPC_SHIFT) |
239                 ((ISR_BUFFER_SIZE / UNIT - 1) << GUC_LOG_ISR_SHIFT) |
240                 (offset << GUC_LOG_BUF_ADDR_SHIFT);
241
242         #undef UNIT
243         #undef FLAG
244
245         return flags;
246 }
247
248 static u32 guc_ctl_ads_flags(struct intel_guc *guc)
249 {
250         u32 ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT;
251         u32 flags = ads << GUC_ADS_ADDR_SHIFT;
252
253         return flags;
254 }
255
256 /*
257  * Initialise the GuC parameter block before starting the firmware
258  * transfer. These parameters are read by the firmware on startup
259  * and cannot be changed thereafter.
260  */
261 static void guc_init_params(struct intel_guc *guc)
262 {
263         u32 *params = guc->params;
264         int i;
265
266         BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32));
267
268         params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
269         params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
270         params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
271         params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc);
272
273         for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
274                 DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]);
275 }
276
277 /*
278  * Initialise the GuC parameter block before starting the firmware
279  * transfer. These parameters are read by the firmware on startup
280  * and cannot be changed thereafter.
281  */
282 void intel_guc_write_params(struct intel_guc *guc)
283 {
284         struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
285         int i;
286
287         /*
288          * All SOFT_SCRATCH registers are in FORCEWAKE_GT domain and
289          * they are power context saved so it's ok to release forcewake
290          * when we are done here and take it again at xfer time.
291          */
292         intel_uncore_forcewake_get(uncore, FORCEWAKE_GT);
293
294         intel_uncore_write(uncore, SOFT_SCRATCH(0), 0);
295
296         for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
297                 intel_uncore_write(uncore, SOFT_SCRATCH(1 + i), guc->params[i]);
298
299         intel_uncore_forcewake_put(uncore, FORCEWAKE_GT);
300 }
301
302 int intel_guc_init(struct intel_guc *guc)
303 {
304         struct intel_gt *gt = guc_to_gt(guc);
305         int ret;
306
307         ret = intel_uc_fw_init(&guc->fw);
308         if (ret)
309                 goto out;
310
311         ret = intel_guc_log_create(&guc->log);
312         if (ret)
313                 goto err_fw;
314
315         ret = intel_guc_ads_create(guc);
316         if (ret)
317                 goto err_log;
318         GEM_BUG_ON(!guc->ads_vma);
319
320         ret = intel_guc_ct_init(&guc->ct);
321         if (ret)
322                 goto err_ads;
323
324         if (intel_guc_submission_is_used(guc)) {
325                 /*
326                  * This is stuff we need to have available at fw load time
327                  * if we are planning to enable submission later
328                  */
329                 ret = intel_guc_submission_init(guc);
330                 if (ret)
331                         goto err_ct;
332         }
333
334         /* now that everything is perma-pinned, initialize the parameters */
335         guc_init_params(guc);
336
337         /* We need to notify the guc whenever we change the GGTT */
338         i915_ggtt_enable_guc(gt->ggtt);
339
340         intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_LOADABLE);
341
342         return 0;
343
344 err_ct:
345         intel_guc_ct_fini(&guc->ct);
346 err_ads:
347         intel_guc_ads_destroy(guc);
348 err_log:
349         intel_guc_log_destroy(&guc->log);
350 err_fw:
351         intel_uc_fw_fini(&guc->fw);
352 out:
353         i915_probe_error(gt->i915, "failed with %d\n", ret);
354         return ret;
355 }
356
357 void intel_guc_fini(struct intel_guc *guc)
358 {
359         struct intel_gt *gt = guc_to_gt(guc);
360
361         if (!intel_uc_fw_is_loadable(&guc->fw))
362                 return;
363
364         i915_ggtt_disable_guc(gt->ggtt);
365
366         if (intel_guc_submission_is_used(guc))
367                 intel_guc_submission_fini(guc);
368
369         intel_guc_ct_fini(&guc->ct);
370
371         intel_guc_ads_destroy(guc);
372         intel_guc_log_destroy(&guc->log);
373         intel_uc_fw_fini(&guc->fw);
374 }
375
376 /*
377  * This function implements the MMIO based host to GuC interface.
378  */
379 int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
380                         u32 *response_buf, u32 response_buf_size)
381 {
382         struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
383         u32 status;
384         int i;
385         int ret;
386
387         GEM_BUG_ON(!len);
388         GEM_BUG_ON(len > guc->send_regs.count);
389
390         /* We expect only action code */
391         GEM_BUG_ON(*action & ~INTEL_GUC_MSG_CODE_MASK);
392
393         /* If CT is available, we expect to use MMIO only during init/fini */
394         GEM_BUG_ON(*action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER &&
395                    *action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER);
396
397         mutex_lock(&guc->send_mutex);
398         intel_uncore_forcewake_get(uncore, guc->send_regs.fw_domains);
399
400         for (i = 0; i < len; i++)
401                 intel_uncore_write(uncore, guc_send_reg(guc, i), action[i]);
402
403         intel_uncore_posting_read(uncore, guc_send_reg(guc, i - 1));
404
405         intel_guc_notify(guc);
406
407         /*
408          * No GuC command should ever take longer than 10ms.
409          * Fast commands should still complete in 10us.
410          */
411         ret = __intel_wait_for_register_fw(uncore,
412                                            guc_send_reg(guc, 0),
413                                            INTEL_GUC_MSG_TYPE_MASK,
414                                            INTEL_GUC_MSG_TYPE_RESPONSE <<
415                                            INTEL_GUC_MSG_TYPE_SHIFT,
416                                            10, 10, &status);
417         /* If GuC explicitly returned an error, convert it to -EIO */
418         if (!ret && !INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(status))
419                 ret = -EIO;
420
421         if (ret) {
422                 DRM_ERROR("MMIO: GuC action %#x failed with error %d %#x\n",
423                           action[0], ret, status);
424                 goto out;
425         }
426
427         if (response_buf) {
428                 int count = min(response_buf_size, guc->send_regs.count - 1);
429
430                 for (i = 0; i < count; i++)
431                         response_buf[i] = intel_uncore_read(uncore,
432                                                             guc_send_reg(guc, i + 1));
433         }
434
435         /* Use data from the GuC response as our return value */
436         ret = INTEL_GUC_MSG_TO_DATA(status);
437
438 out:
439         intel_uncore_forcewake_put(uncore, guc->send_regs.fw_domains);
440         mutex_unlock(&guc->send_mutex);
441
442         return ret;
443 }
444
445 int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
446                                        const u32 *payload, u32 len)
447 {
448         u32 msg;
449
450         if (unlikely(!len))
451                 return -EPROTO;
452
453         /* Make sure to handle only enabled messages */
454         msg = payload[0] & guc->msg_enabled_mask;
455
456         if (msg & (INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
457                    INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED))
458                 intel_guc_log_handle_flush_event(&guc->log);
459
460         return 0;
461 }
462
463 /**
464  * intel_guc_auth_huc() - Send action to GuC to authenticate HuC ucode
465  * @guc: intel_guc structure
466  * @rsa_offset: rsa offset w.r.t ggtt base of huc vma
467  *
468  * Triggers a HuC firmware authentication request to the GuC via intel_guc_send
469  * INTEL_GUC_ACTION_AUTHENTICATE_HUC interface. This function is invoked by
470  * intel_huc_auth().
471  *
472  * Return:      non-zero code on error
473  */
474 int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
475 {
476         u32 action[] = {
477                 INTEL_GUC_ACTION_AUTHENTICATE_HUC,
478                 rsa_offset
479         };
480
481         return intel_guc_send(guc, action, ARRAY_SIZE(action));
482 }
483
484 /**
485  * intel_guc_suspend() - notify GuC entering suspend state
486  * @guc:        the guc
487  */
488 int intel_guc_suspend(struct intel_guc *guc)
489 {
490         struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
491         int ret;
492         u32 status;
493         u32 action[] = {
494                 INTEL_GUC_ACTION_ENTER_S_STATE,
495                 GUC_POWER_D1, /* any value greater than GUC_POWER_D0 */
496         };
497
498         /*
499          * If GuC communication is enabled but submission is not supported,
500          * we do not need to suspend the GuC.
501          */
502         if (!intel_guc_submission_is_used(guc) || !intel_guc_is_ready(guc))
503                 return 0;
504
505         /*
506          * The ENTER_S_STATE action queues the save/restore operation in GuC FW
507          * and then returns, so waiting on the H2G is not enough to guarantee
508          * GuC is done. When all the processing is done, GuC writes
509          * INTEL_GUC_SLEEP_STATE_SUCCESS to scratch register 14, so we can poll
510          * on that. Note that GuC does not ensure that the value in the register
511          * is different from INTEL_GUC_SLEEP_STATE_SUCCESS while the action is
512          * in progress so we need to take care of that ourselves as well.
513          */
514
515         intel_uncore_write(uncore, SOFT_SCRATCH(14),
516                            INTEL_GUC_SLEEP_STATE_INVALID_MASK);
517
518         ret = intel_guc_send(guc, action, ARRAY_SIZE(action));
519         if (ret)
520                 return ret;
521
522         ret = __intel_wait_for_register(uncore, SOFT_SCRATCH(14),
523                                         INTEL_GUC_SLEEP_STATE_INVALID_MASK,
524                                         0, 0, 10, &status);
525         if (ret)
526                 return ret;
527
528         if (status != INTEL_GUC_SLEEP_STATE_SUCCESS) {
529                 DRM_ERROR("GuC failed to change sleep state. "
530                           "action=0x%x, err=%u\n",
531                           action[0], status);
532                 return -EIO;
533         }
534
535         return 0;
536 }
537
538 /**
539  * intel_guc_reset_engine() - ask GuC to reset an engine
540  * @guc:        intel_guc structure
541  * @engine:     engine to be reset
542  */
543 int intel_guc_reset_engine(struct intel_guc *guc,
544                            struct intel_engine_cs *engine)
545 {
546         /* XXX: to be implemented with submission interface rework */
547
548         return -ENODEV;
549 }
550
551 /**
552  * intel_guc_resume() - notify GuC resuming from suspend state
553  * @guc:        the guc
554  */
555 int intel_guc_resume(struct intel_guc *guc)
556 {
557         /* XXX: to be implemented with submission interface rework */
558         return 0;
559 }
560
561 /**
562  * DOC: GuC Memory Management
563  *
564  * GuC can't allocate any memory for its own usage, so all the allocations must
565  * be handled by the host driver. GuC accesses the memory via the GGTT, with the
566  * exception of the top and bottom parts of the 4GB address space, which are
567  * instead re-mapped by the GuC HW to memory location of the FW itself (WOPCM)
568  * or other parts of the HW. The driver must take care not to place objects that
569  * the GuC is going to access in these reserved ranges. The layout of the GuC
570  * address space is shown below:
571  *
572  * ::
573  *
574  *     +===========> +====================+ <== FFFF_FFFF
575  *     ^             |      Reserved      |
576  *     |             +====================+ <== GUC_GGTT_TOP
577  *     |             |                    |
578  *     |             |        DRAM        |
579  *    GuC            |                    |
580  *  Address    +===> +====================+ <== GuC ggtt_pin_bias
581  *   Space     ^     |                    |
582  *     |       |     |                    |
583  *     |      GuC    |        GuC         |
584  *     |     WOPCM   |       WOPCM        |
585  *     |      Size   |                    |
586  *     |       |     |                    |
587  *     v       v     |                    |
588  *     +=======+===> +====================+ <== 0000_0000
589  *
590  * The lower part of GuC Address Space [0, ggtt_pin_bias) is mapped to GuC WOPCM
591  * while upper part of GuC Address Space [ggtt_pin_bias, GUC_GGTT_TOP) is mapped
592  * to DRAM. The value of the GuC ggtt_pin_bias is the GuC WOPCM size.
593  */
594
595 /**
596  * intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
597  * @guc:        the guc
598  * @size:       size of area to allocate (both virtual space and memory)
599  *
600  * This is a wrapper to create an object for use with the GuC. In order to
601  * use it inside the GuC, an object needs to be pinned lifetime, so we allocate
602  * both some backing storage and a range inside the Global GTT. We must pin
603  * it in the GGTT somewhere other than than [0, GUC ggtt_pin_bias) because that
604  * range is reserved inside GuC.
605  *
606  * Return:      A i915_vma if successful, otherwise an ERR_PTR.
607  */
608 struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
609 {
610         struct intel_gt *gt = guc_to_gt(guc);
611         struct drm_i915_gem_object *obj;
612         struct i915_vma *vma;
613         u64 flags;
614         int ret;
615
616         obj = i915_gem_object_create_shmem(gt->i915, size);
617         if (IS_ERR(obj))
618                 return ERR_CAST(obj);
619
620         vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
621         if (IS_ERR(vma))
622                 goto err;
623
624         flags = PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
625         ret = i915_ggtt_pin(vma, NULL, 0, flags);
626         if (ret) {
627                 vma = ERR_PTR(ret);
628                 goto err;
629         }
630
631         return i915_vma_make_unshrinkable(vma);
632
633 err:
634         i915_gem_object_put(obj);
635         return vma;
636 }
637
638 /**
639  * intel_guc_allocate_and_map_vma() - Allocate and map VMA for GuC usage
640  * @guc:        the guc
641  * @size:       size of area to allocate (both virtual space and memory)
642  * @out_vma:    return variable for the allocated vma pointer
643  * @out_vaddr:  return variable for the obj mapping
644  *
645  * This wrapper calls intel_guc_allocate_vma() and then maps the allocated
646  * object with I915_MAP_WB.
647  *
648  * Return:      0 if successful, a negative errno code otherwise.
649  */
650 int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
651                                    struct i915_vma **out_vma, void **out_vaddr)
652 {
653         struct i915_vma *vma;
654         void *vaddr;
655
656         vma = intel_guc_allocate_vma(guc, size);
657         if (IS_ERR(vma))
658                 return PTR_ERR(vma);
659
660         vaddr = i915_gem_object_pin_map_unlocked(vma->obj,
661                                                  i915_coherent_map_type(guc_to_gt(guc)->i915,
662                                                                         vma->obj, true));
663         if (IS_ERR(vaddr)) {
664                 i915_vma_unpin_and_release(&vma, 0);
665                 return PTR_ERR(vaddr);
666         }
667
668         *out_vma = vma;
669         *out_vaddr = vaddr;
670
671         return 0;
672 }
673
674 /**
675  * intel_guc_load_status - dump information about GuC load status
676  * @guc: the GuC
677  * @p: the &drm_printer
678  *
679  * Pretty printer for GuC load status.
680  */
681 void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p)
682 {
683         struct intel_gt *gt = guc_to_gt(guc);
684         struct intel_uncore *uncore = gt->uncore;
685         intel_wakeref_t wakeref;
686
687         if (!intel_guc_is_supported(guc)) {
688                 drm_printf(p, "GuC not supported\n");
689                 return;
690         }
691
692         if (!intel_guc_is_wanted(guc)) {
693                 drm_printf(p, "GuC disabled\n");
694                 return;
695         }
696
697         intel_uc_fw_dump(&guc->fw, p);
698
699         with_intel_runtime_pm(uncore->rpm, wakeref) {
700                 u32 status = intel_uncore_read(uncore, GUC_STATUS);
701                 u32 i;
702
703                 drm_printf(p, "\nGuC status 0x%08x:\n", status);
704                 drm_printf(p, "\tBootrom status = 0x%x\n",
705                            (status & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
706                 drm_printf(p, "\tuKernel status = 0x%x\n",
707                            (status & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
708                 drm_printf(p, "\tMIA Core status = 0x%x\n",
709                            (status & GS_MIA_MASK) >> GS_MIA_SHIFT);
710                 drm_puts(p, "\nScratch registers:\n");
711                 for (i = 0; i < 16; i++) {
712                         drm_printf(p, "\t%2d: \t0x%x\n",
713                                    i, intel_uncore_read(uncore, SOFT_SCRATCH(i)));
714                 }
715         }
716 }
This page took 0.081509 seconds and 4 git commands to generate.