]> Git Repo - linux.git/blob - drivers/gpu/drm/i915/intel_guc_log.c
Merge tag 'drm-for-v4.17' of git://people.freedesktop.org/~airlied/linux
[linux.git] / drivers / gpu / drm / i915 / intel_guc_log.c
1 /*
2  * Copyright © 2014-2017 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24
25 #include <linux/debugfs.h>
26 #include <linux/relay.h>
27
28 #include "intel_guc_log.h"
29 #include "i915_drv.h"
30
31 static void guc_log_capture_logs(struct intel_guc *guc);
32
33 /**
34  * DOC: GuC firmware log
35  *
36  * Firmware log is enabled by setting i915.guc_log_level to the positive level.
37  * Log data is printed out via reading debugfs i915_guc_log_dump. Reading from
38  * i915_guc_load_status will print out firmware loading status and scratch
39  * registers value.
40  */
41
42 static int guc_log_flush_complete(struct intel_guc *guc)
43 {
44         u32 action[] = {
45                 INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE
46         };
47
48         return intel_guc_send(guc, action, ARRAY_SIZE(action));
49 }
50
51 static int guc_log_flush(struct intel_guc *guc)
52 {
53         u32 action[] = {
54                 INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH,
55                 0
56         };
57
58         return intel_guc_send(guc, action, ARRAY_SIZE(action));
59 }
60
61 static int guc_log_control(struct intel_guc *guc, bool enable, u32 verbosity)
62 {
63         union guc_log_control control_val = {
64                 {
65                         .logging_enabled = enable,
66                         .verbosity = verbosity,
67                 },
68         };
69         u32 action[] = {
70                 INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING,
71                 control_val.value
72         };
73
74         return intel_guc_send(guc, action, ARRAY_SIZE(action));
75 }
76
77 /*
78  * Sub buffer switch callback. Called whenever relay has to switch to a new
79  * sub buffer, relay stays on the same sub buffer if 0 is returned.
80  */
81 static int subbuf_start_callback(struct rchan_buf *buf,
82                                  void *subbuf,
83                                  void *prev_subbuf,
84                                  size_t prev_padding)
85 {
86         /*
87          * Use no-overwrite mode by default, where relay will stop accepting
88          * new data if there are no empty sub buffers left.
89          * There is no strict synchronization enforced by relay between Consumer
90          * and Producer. In overwrite mode, there is a possibility of getting
91          * inconsistent/garbled data, the producer could be writing on to the
92          * same sub buffer from which Consumer is reading. This can't be avoided
93          * unless Consumer is fast enough and can always run in tandem with
94          * Producer.
95          */
96         if (relay_buf_full(buf))
97                 return 0;
98
99         return 1;
100 }
101
102 /*
103  * file_create() callback. Creates relay file in debugfs.
104  */
105 static struct dentry *create_buf_file_callback(const char *filename,
106                                                struct dentry *parent,
107                                                umode_t mode,
108                                                struct rchan_buf *buf,
109                                                int *is_global)
110 {
111         struct dentry *buf_file;
112
113         /*
114          * This to enable the use of a single buffer for the relay channel and
115          * correspondingly have a single file exposed to User, through which
116          * it can collect the logs in order without any post-processing.
117          * Need to set 'is_global' even if parent is NULL for early logging.
118          */
119         *is_global = 1;
120
121         if (!parent)
122                 return NULL;
123
124         /*
125          * Not using the channel filename passed as an argument, since for each
126          * channel relay appends the corresponding CPU number to the filename
127          * passed in relay_open(). This should be fine as relay just needs a
128          * dentry of the file associated with the channel buffer and that file's
129          * name need not be same as the filename passed as an argument.
130          */
131         buf_file = debugfs_create_file("guc_log", mode,
132                                        parent, buf, &relay_file_operations);
133         return buf_file;
134 }
135
136 /*
137  * file_remove() default callback. Removes relay file in debugfs.
138  */
139 static int remove_buf_file_callback(struct dentry *dentry)
140 {
141         debugfs_remove(dentry);
142         return 0;
143 }
144
145 /* relay channel callbacks */
146 static struct rchan_callbacks relay_callbacks = {
147         .subbuf_start = subbuf_start_callback,
148         .create_buf_file = create_buf_file_callback,
149         .remove_buf_file = remove_buf_file_callback,
150 };
151
152 static int guc_log_relay_file_create(struct intel_guc *guc)
153 {
154         struct drm_i915_private *dev_priv = guc_to_i915(guc);
155         struct dentry *log_dir;
156         int ret;
157
158         if (!i915_modparams.guc_log_level)
159                 return 0;
160
161         mutex_lock(&guc->log.runtime.relay_lock);
162
163         /* For now create the log file in /sys/kernel/debug/dri/0 dir */
164         log_dir = dev_priv->drm.primary->debugfs_root;
165
166         /*
167          * If /sys/kernel/debug/dri/0 location do not exist, then debugfs is
168          * not mounted and so can't create the relay file.
169          * The relay API seems to fit well with debugfs only, for availing relay
170          * there are 3 requirements which can be met for debugfs file only in a
171          * straightforward/clean manner :-
172          * i)   Need the associated dentry pointer of the file, while opening the
173          *      relay channel.
174          * ii)  Should be able to use 'relay_file_operations' fops for the file.
175          * iii) Set the 'i_private' field of file's inode to the pointer of
176          *      relay channel buffer.
177          */
178         if (!log_dir) {
179                 DRM_ERROR("Debugfs dir not available yet for GuC log file\n");
180                 ret = -ENODEV;
181                 goto out_unlock;
182         }
183
184         ret = relay_late_setup_files(guc->log.runtime.relay_chan, "guc_log", log_dir);
185         if (ret < 0 && ret != -EEXIST) {
186                 DRM_ERROR("Couldn't associate relay chan with file %d\n", ret);
187                 goto out_unlock;
188         }
189
190         ret = 0;
191
192 out_unlock:
193         mutex_unlock(&guc->log.runtime.relay_lock);
194         return ret;
195 }
196
197 static bool guc_log_has_relay(struct intel_guc *guc)
198 {
199         lockdep_assert_held(&guc->log.runtime.relay_lock);
200
201         return guc->log.runtime.relay_chan != NULL;
202 }
203
204 static void guc_move_to_next_buf(struct intel_guc *guc)
205 {
206         /*
207          * Make sure the updates made in the sub buffer are visible when
208          * Consumer sees the following update to offset inside the sub buffer.
209          */
210         smp_wmb();
211
212         if (!guc_log_has_relay(guc))
213                 return;
214
215         /* All data has been written, so now move the offset of sub buffer. */
216         relay_reserve(guc->log.runtime.relay_chan, guc->log.vma->obj->base.size);
217
218         /* Switch to the next sub buffer */
219         relay_flush(guc->log.runtime.relay_chan);
220 }
221
222 static void *guc_get_write_buffer(struct intel_guc *guc)
223 {
224         if (!guc_log_has_relay(guc))
225                 return NULL;
226
227         /*
228          * Just get the base address of a new sub buffer and copy data into it
229          * ourselves. NULL will be returned in no-overwrite mode, if all sub
230          * buffers are full. Could have used the relay_write() to indirectly
231          * copy the data, but that would have been bit convoluted, as we need to
232          * write to only certain locations inside a sub buffer which cannot be
233          * done without using relay_reserve() along with relay_write(). So its
234          * better to use relay_reserve() alone.
235          */
236         return relay_reserve(guc->log.runtime.relay_chan, 0);
237 }
238
239 static bool guc_check_log_buf_overflow(struct intel_guc *guc,
240                                        enum guc_log_buffer_type type,
241                                        unsigned int full_cnt)
242 {
243         unsigned int prev_full_cnt = guc->log.prev_overflow_count[type];
244         bool overflow = false;
245
246         if (full_cnt != prev_full_cnt) {
247                 overflow = true;
248
249                 guc->log.prev_overflow_count[type] = full_cnt;
250                 guc->log.total_overflow_count[type] += full_cnt - prev_full_cnt;
251
252                 if (full_cnt < prev_full_cnt) {
253                         /* buffer_full_cnt is a 4 bit counter */
254                         guc->log.total_overflow_count[type] += 16;
255                 }
256                 DRM_ERROR_RATELIMITED("GuC log buffer overflow\n");
257         }
258
259         return overflow;
260 }
261
262 static unsigned int guc_get_log_buffer_size(enum guc_log_buffer_type type)
263 {
264         switch (type) {
265         case GUC_ISR_LOG_BUFFER:
266                 return (GUC_LOG_ISR_PAGES + 1) * PAGE_SIZE;
267         case GUC_DPC_LOG_BUFFER:
268                 return (GUC_LOG_DPC_PAGES + 1) * PAGE_SIZE;
269         case GUC_CRASH_DUMP_LOG_BUFFER:
270                 return (GUC_LOG_CRASH_PAGES + 1) * PAGE_SIZE;
271         default:
272                 MISSING_CASE(type);
273         }
274
275         return 0;
276 }
277
278 static void guc_read_update_log_buffer(struct intel_guc *guc)
279 {
280         unsigned int buffer_size, read_offset, write_offset, bytes_to_copy, full_cnt;
281         struct guc_log_buffer_state *log_buf_state, *log_buf_snapshot_state;
282         struct guc_log_buffer_state log_buf_state_local;
283         enum guc_log_buffer_type type;
284         void *src_data, *dst_data;
285         bool new_overflow;
286
287         if (WARN_ON(!guc->log.runtime.buf_addr))
288                 return;
289
290         /* Get the pointer to shared GuC log buffer */
291         log_buf_state = src_data = guc->log.runtime.buf_addr;
292
293         mutex_lock(&guc->log.runtime.relay_lock);
294
295         /* Get the pointer to local buffer to store the logs */
296         log_buf_snapshot_state = dst_data = guc_get_write_buffer(guc);
297
298         if (unlikely(!log_buf_snapshot_state)) {
299                 /*
300                  * Used rate limited to avoid deluge of messages, logs might be
301                  * getting consumed by User at a slow rate.
302                  */
303                 DRM_ERROR_RATELIMITED("no sub-buffer to capture logs\n");
304                 guc->log.capture_miss_count++;
305                 mutex_unlock(&guc->log.runtime.relay_lock);
306
307                 return;
308         }
309
310         /* Actual logs are present from the 2nd page */
311         src_data += PAGE_SIZE;
312         dst_data += PAGE_SIZE;
313
314         for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
315                 /*
316                  * Make a copy of the state structure, inside GuC log buffer
317                  * (which is uncached mapped), on the stack to avoid reading
318                  * from it multiple times.
319                  */
320                 memcpy(&log_buf_state_local, log_buf_state,
321                        sizeof(struct guc_log_buffer_state));
322                 buffer_size = guc_get_log_buffer_size(type);
323                 read_offset = log_buf_state_local.read_ptr;
324                 write_offset = log_buf_state_local.sampled_write_ptr;
325                 full_cnt = log_buf_state_local.buffer_full_cnt;
326
327                 /* Bookkeeping stuff */
328                 guc->log.flush_count[type] += log_buf_state_local.flush_to_file;
329                 new_overflow = guc_check_log_buf_overflow(guc, type, full_cnt);
330
331                 /* Update the state of shared log buffer */
332                 log_buf_state->read_ptr = write_offset;
333                 log_buf_state->flush_to_file = 0;
334                 log_buf_state++;
335
336                 /* First copy the state structure in snapshot buffer */
337                 memcpy(log_buf_snapshot_state, &log_buf_state_local,
338                        sizeof(struct guc_log_buffer_state));
339
340                 /*
341                  * The write pointer could have been updated by GuC firmware,
342                  * after sending the flush interrupt to Host, for consistency
343                  * set write pointer value to same value of sampled_write_ptr
344                  * in the snapshot buffer.
345                  */
346                 log_buf_snapshot_state->write_ptr = write_offset;
347                 log_buf_snapshot_state++;
348
349                 /* Now copy the actual logs. */
350                 if (unlikely(new_overflow)) {
351                         /* copy the whole buffer in case of overflow */
352                         read_offset = 0;
353                         write_offset = buffer_size;
354                 } else if (unlikely((read_offset > buffer_size) ||
355                                     (write_offset > buffer_size))) {
356                         DRM_ERROR("invalid log buffer state\n");
357                         /* copy whole buffer as offsets are unreliable */
358                         read_offset = 0;
359                         write_offset = buffer_size;
360                 }
361
362                 /* Just copy the newly written data */
363                 if (read_offset > write_offset) {
364                         i915_memcpy_from_wc(dst_data, src_data, write_offset);
365                         bytes_to_copy = buffer_size - read_offset;
366                 } else {
367                         bytes_to_copy = write_offset - read_offset;
368                 }
369                 i915_memcpy_from_wc(dst_data + read_offset,
370                                     src_data + read_offset, bytes_to_copy);
371
372                 src_data += buffer_size;
373                 dst_data += buffer_size;
374         }
375
376         guc_move_to_next_buf(guc);
377
378         mutex_unlock(&guc->log.runtime.relay_lock);
379 }
380
381 static void capture_logs_work(struct work_struct *work)
382 {
383         struct intel_guc *guc =
384                 container_of(work, struct intel_guc, log.runtime.flush_work);
385
386         guc_log_capture_logs(guc);
387 }
388
389 static bool guc_log_has_runtime(struct intel_guc *guc)
390 {
391         return guc->log.runtime.buf_addr != NULL;
392 }
393
394 static int guc_log_runtime_create(struct intel_guc *guc)
395 {
396         struct drm_i915_private *dev_priv = guc_to_i915(guc);
397         void *vaddr;
398         int ret;
399
400         lockdep_assert_held(&dev_priv->drm.struct_mutex);
401
402         if (!guc->log.vma)
403                 return -ENODEV;
404
405         GEM_BUG_ON(guc_log_has_runtime(guc));
406
407         ret = i915_gem_object_set_to_wc_domain(guc->log.vma->obj, true);
408         if (ret)
409                 return ret;
410
411         /*
412          * Create a WC (Uncached for read) vmalloc mapping of log
413          * buffer pages, so that we can directly get the data
414          * (up-to-date) from memory.
415          */
416         vaddr = i915_gem_object_pin_map(guc->log.vma->obj, I915_MAP_WC);
417         if (IS_ERR(vaddr)) {
418                 DRM_ERROR("Couldn't map log buffer pages %d\n", ret);
419                 return PTR_ERR(vaddr);
420         }
421
422         guc->log.runtime.buf_addr = vaddr;
423
424         return 0;
425 }
426
427 static void guc_log_runtime_destroy(struct intel_guc *guc)
428 {
429         /*
430          * It's possible that the runtime stuff was never allocated because
431          * GuC log was disabled at the boot time.
432          */
433         if (!guc_log_has_runtime(guc))
434                 return;
435
436         i915_gem_object_unpin_map(guc->log.vma->obj);
437         guc->log.runtime.buf_addr = NULL;
438 }
439
440 void intel_guc_log_init_early(struct intel_guc *guc)
441 {
442         mutex_init(&guc->log.runtime.relay_lock);
443         INIT_WORK(&guc->log.runtime.flush_work, capture_logs_work);
444 }
445
446 int intel_guc_log_relay_create(struct intel_guc *guc)
447 {
448         struct drm_i915_private *dev_priv = guc_to_i915(guc);
449         struct rchan *guc_log_relay_chan;
450         size_t n_subbufs, subbuf_size;
451         int ret;
452
453         if (!i915_modparams.guc_log_level)
454                 return 0;
455
456         mutex_lock(&guc->log.runtime.relay_lock);
457
458         GEM_BUG_ON(guc_log_has_relay(guc));
459
460          /* Keep the size of sub buffers same as shared log buffer */
461         subbuf_size = GUC_LOG_SIZE;
462
463         /*
464          * Store up to 8 snapshots, which is large enough to buffer sufficient
465          * boot time logs and provides enough leeway to User, in terms of
466          * latency, for consuming the logs from relay. Also doesn't take
467          * up too much memory.
468          */
469         n_subbufs = 8;
470
471         /*
472          * Create a relay channel, so that we have buffers for storing
473          * the GuC firmware logs, the channel will be linked with a file
474          * later on when debugfs is registered.
475          */
476         guc_log_relay_chan = relay_open(NULL, NULL, subbuf_size,
477                                         n_subbufs, &relay_callbacks, dev_priv);
478         if (!guc_log_relay_chan) {
479                 DRM_ERROR("Couldn't create relay chan for GuC logging\n");
480
481                 ret = -ENOMEM;
482                 goto err;
483         }
484
485         GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
486         guc->log.runtime.relay_chan = guc_log_relay_chan;
487
488         mutex_unlock(&guc->log.runtime.relay_lock);
489
490         return 0;
491
492 err:
493         mutex_unlock(&guc->log.runtime.relay_lock);
494         /* logging will be off */
495         i915_modparams.guc_log_level = 0;
496         return ret;
497 }
498
499 void intel_guc_log_relay_destroy(struct intel_guc *guc)
500 {
501         mutex_lock(&guc->log.runtime.relay_lock);
502
503         /*
504          * It's possible that the relay was never allocated because
505          * GuC log was disabled at the boot time.
506          */
507         if (!guc_log_has_relay(guc))
508                 goto out_unlock;
509
510         relay_close(guc->log.runtime.relay_chan);
511         guc->log.runtime.relay_chan = NULL;
512
513 out_unlock:
514         mutex_unlock(&guc->log.runtime.relay_lock);
515 }
516
517 static int guc_log_late_setup(struct intel_guc *guc)
518 {
519         struct drm_i915_private *dev_priv = guc_to_i915(guc);
520         int ret;
521
522         if (!guc_log_has_runtime(guc)) {
523                 /*
524                  * If log was disabled at boot time, then setup needed to handle
525                  * log buffer flush interrupts would not have been done yet, so
526                  * do that now.
527                  */
528                 ret = intel_guc_log_relay_create(guc);
529                 if (ret)
530                         goto err;
531
532                 mutex_lock(&dev_priv->drm.struct_mutex);
533                 intel_runtime_pm_get(dev_priv);
534                 ret = guc_log_runtime_create(guc);
535                 intel_runtime_pm_put(dev_priv);
536                 mutex_unlock(&dev_priv->drm.struct_mutex);
537
538                 if (ret)
539                         goto err_relay;
540         }
541
542         ret = guc_log_relay_file_create(guc);
543         if (ret)
544                 goto err_runtime;
545
546         return 0;
547
548 err_runtime:
549         mutex_lock(&dev_priv->drm.struct_mutex);
550         guc_log_runtime_destroy(guc);
551         mutex_unlock(&dev_priv->drm.struct_mutex);
552 err_relay:
553         intel_guc_log_relay_destroy(guc);
554 err:
555         /* logging will remain off */
556         i915_modparams.guc_log_level = 0;
557         return ret;
558 }
559
560 static void guc_log_capture_logs(struct intel_guc *guc)
561 {
562         struct drm_i915_private *dev_priv = guc_to_i915(guc);
563
564         guc_read_update_log_buffer(guc);
565
566         /*
567          * Generally device is expected to be active only at this
568          * time, so get/put should be really quick.
569          */
570         intel_runtime_pm_get(dev_priv);
571         guc_log_flush_complete(guc);
572         intel_runtime_pm_put(dev_priv);
573 }
574
575 static void guc_flush_logs(struct intel_guc *guc)
576 {
577         struct drm_i915_private *dev_priv = guc_to_i915(guc);
578
579         if (!USES_GUC_SUBMISSION(dev_priv) || !i915_modparams.guc_log_level)
580                 return;
581
582         /* First disable the interrupts, will be renabled afterwards */
583         mutex_lock(&dev_priv->drm.struct_mutex);
584         intel_runtime_pm_get(dev_priv);
585         gen9_disable_guc_interrupts(dev_priv);
586         intel_runtime_pm_put(dev_priv);
587         mutex_unlock(&dev_priv->drm.struct_mutex);
588
589         /*
590          * Before initiating the forceful flush, wait for any pending/ongoing
591          * flush to complete otherwise forceful flush may not actually happen.
592          */
593         flush_work(&guc->log.runtime.flush_work);
594
595         /* Ask GuC to update the log buffer state */
596         intel_runtime_pm_get(dev_priv);
597         guc_log_flush(guc);
598         intel_runtime_pm_put(dev_priv);
599
600         /* GuC would have updated log buffer by now, so capture it */
601         guc_log_capture_logs(guc);
602 }
603
604 int intel_guc_log_create(struct intel_guc *guc)
605 {
606         struct i915_vma *vma;
607         unsigned long offset;
608         u32 flags;
609         int ret;
610
611         GEM_BUG_ON(guc->log.vma);
612
613         /*
614          * We require SSE 4.1 for fast reads from the GuC log buffer and
615          * it should be present on the chipsets supporting GuC based
616          * submisssions.
617          */
618         if (WARN_ON(!i915_has_memcpy_from_wc())) {
619                 ret = -EINVAL;
620                 goto err;
621         }
622
623         vma = intel_guc_allocate_vma(guc, GUC_LOG_SIZE);
624         if (IS_ERR(vma)) {
625                 ret = PTR_ERR(vma);
626                 goto err;
627         }
628
629         guc->log.vma = vma;
630
631         if (i915_modparams.guc_log_level) {
632                 ret = guc_log_runtime_create(guc);
633                 if (ret < 0)
634                         goto err_vma;
635         }
636
637         /* each allocated unit is a page */
638         flags = GUC_LOG_VALID | GUC_LOG_NOTIFY_ON_HALF_FULL |
639                 (GUC_LOG_DPC_PAGES << GUC_LOG_DPC_SHIFT) |
640                 (GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) |
641                 (GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT);
642
643         offset = guc_ggtt_offset(vma) >> PAGE_SHIFT; /* in pages */
644         guc->log.flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
645
646         return 0;
647
648 err_vma:
649         i915_vma_unpin_and_release(&guc->log.vma);
650 err:
651         /* logging will be off */
652         i915_modparams.guc_log_level = 0;
653         return ret;
654 }
655
656 void intel_guc_log_destroy(struct intel_guc *guc)
657 {
658         guc_log_runtime_destroy(guc);
659         i915_vma_unpin_and_release(&guc->log.vma);
660 }
661
662 int intel_guc_log_control(struct intel_guc *guc, u64 control_val)
663 {
664         struct drm_i915_private *dev_priv = guc_to_i915(guc);
665         bool enable_logging = control_val > 0;
666         u32 verbosity;
667         int ret;
668
669         if (!guc->log.vma)
670                 return -ENODEV;
671
672         BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN);
673         if (control_val > 1 + GUC_LOG_VERBOSITY_MAX)
674                 return -EINVAL;
675
676         /* This combination doesn't make sense & won't have any effect */
677         if (!enable_logging && !i915_modparams.guc_log_level)
678                 return 0;
679
680         verbosity = enable_logging ? control_val - 1 : 0;
681
682         ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
683         if (ret)
684                 return ret;
685         intel_runtime_pm_get(dev_priv);
686         ret = guc_log_control(guc, enable_logging, verbosity);
687         intel_runtime_pm_put(dev_priv);
688         mutex_unlock(&dev_priv->drm.struct_mutex);
689
690         if (ret < 0) {
691                 DRM_DEBUG_DRIVER("guc_logging_control action failed %d\n", ret);
692                 return ret;
693         }
694
695         if (enable_logging) {
696                 i915_modparams.guc_log_level = 1 + verbosity;
697
698                 /*
699                  * If log was disabled at boot time, then the relay channel file
700                  * wouldn't have been created by now and interrupts also would
701                  * not have been enabled. Try again now, just in case.
702                  */
703                 ret = guc_log_late_setup(guc);
704                 if (ret < 0) {
705                         DRM_DEBUG_DRIVER("GuC log late setup failed %d\n", ret);
706                         return ret;
707                 }
708
709                 /* GuC logging is currently the only user of Guc2Host interrupts */
710                 mutex_lock(&dev_priv->drm.struct_mutex);
711                 intel_runtime_pm_get(dev_priv);
712                 gen9_enable_guc_interrupts(dev_priv);
713                 intel_runtime_pm_put(dev_priv);
714                 mutex_unlock(&dev_priv->drm.struct_mutex);
715         } else {
716                 /*
717                  * Once logging is disabled, GuC won't generate logs & send an
718                  * interrupt. But there could be some data in the log buffer
719                  * which is yet to be captured. So request GuC to update the log
720                  * buffer state and then collect the left over logs.
721                  */
722                 guc_flush_logs(guc);
723
724                 /* As logging is disabled, update log level to reflect that */
725                 i915_modparams.guc_log_level = 0;
726         }
727
728         return ret;
729 }
730
731 void i915_guc_log_register(struct drm_i915_private *dev_priv)
732 {
733         if (!USES_GUC_SUBMISSION(dev_priv) || !i915_modparams.guc_log_level)
734                 return;
735
736         guc_log_late_setup(&dev_priv->guc);
737 }
738
739 void i915_guc_log_unregister(struct drm_i915_private *dev_priv)
740 {
741         struct intel_guc *guc = &dev_priv->guc;
742
743         if (!USES_GUC_SUBMISSION(dev_priv))
744                 return;
745
746         mutex_lock(&dev_priv->drm.struct_mutex);
747         /* GuC logging is currently the only user of Guc2Host interrupts */
748         intel_runtime_pm_get(dev_priv);
749         gen9_disable_guc_interrupts(dev_priv);
750         intel_runtime_pm_put(dev_priv);
751
752         guc_log_runtime_destroy(guc);
753         mutex_unlock(&dev_priv->drm.struct_mutex);
754
755         intel_guc_log_relay_destroy(guc);
756 }
This page took 0.073336 seconds and 4 git commands to generate.