1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2021 Collabora ltd. */
5 #include <linux/device.h>
6 #include <linux/devcoredump.h>
7 #include <linux/moduleparam.h>
8 #include <linux/iosys-map.h>
9 #include <drm/panfrost_drm.h>
10 #include <drm/drm_device.h>
12 #include "panfrost_job.h"
13 #include "panfrost_gem.h"
14 #include "panfrost_regs.h"
15 #include "panfrost_dump.h"
16 #include "panfrost_device.h"
18 static bool panfrost_dump_core = true;
19 module_param_named(dump_core, panfrost_dump_core, bool, 0600);
21 struct panfrost_dump_iterator {
23 struct panfrost_dump_object_header *hdr;
27 static const unsigned short panfrost_dump_registers[] = {
46 JS_AFFINITY_NEXT_LO(0),
47 JS_AFFINITY_NEXT_HI(0),
56 AS_FAULTADDRESS_LO(0),
57 AS_FAULTADDRESS_HI(0),
61 static void panfrost_core_dump_header(struct panfrost_dump_iterator *iter,
62 u32 type, void *data_end)
64 struct panfrost_dump_object_header *hdr = iter->hdr;
66 hdr->magic = PANFROSTDUMP_MAGIC;
68 hdr->file_offset = iter->data - iter->start;
69 hdr->file_size = data_end - iter->data;
72 iter->data += hdr->file_size;
76 panfrost_core_dump_registers(struct panfrost_dump_iterator *iter,
77 struct panfrost_device *pfdev,
80 struct panfrost_dump_registers *dumpreg = iter->data;
83 for (i = 0; i < ARRAY_SIZE(panfrost_dump_registers); i++, dumpreg++) {
84 unsigned int js_as_offset = 0;
87 if (panfrost_dump_registers[i] >= JS_BASE &&
88 panfrost_dump_registers[i] <= JS_BASE + JS_SLOT_STRIDE)
89 js_as_offset = slot * JS_SLOT_STRIDE;
90 else if (panfrost_dump_registers[i] >= MMU_BASE &&
91 panfrost_dump_registers[i] <= MMU_BASE + MMU_AS_STRIDE)
92 js_as_offset = (as_nr << MMU_AS_SHIFT);
94 reg = panfrost_dump_registers[i] + js_as_offset;
97 dumpreg->value = gpu_read(pfdev, reg);
100 panfrost_core_dump_header(iter, PANFROSTDUMP_BUF_REG, dumpreg);
103 void panfrost_core_dump(struct panfrost_job *job)
105 struct panfrost_device *pfdev = job->pfdev;
106 struct panfrost_dump_iterator iter;
107 struct drm_gem_object *dbo;
108 unsigned int n_obj, n_bomap_pages;
109 u64 *bomap, *bomap_start;
115 as_nr = job->mmu->as;
116 slot = panfrost_job_get_slot(job);
118 /* Only catch the first event, or when manually re-armed */
119 if (!panfrost_dump_core)
121 panfrost_dump_core = false;
123 /* At least, we dump registers and end marker */
126 file_size = ARRAY_SIZE(panfrost_dump_registers) *
127 sizeof(struct panfrost_dump_registers);
129 /* Add in the active buffer objects */
130 for (i = 0; i < job->bo_count; i++) {
132 * Even though the CPU could be configured to use 16K or 64K pages, this
133 * is a very unusual situation for most kernel setups on SoCs that have
134 * a Panfrost device. Also many places across the driver make the somewhat
135 * arbitrary assumption that Panfrost's MMU page size is the same as the CPU's,
136 * so let's have a sanity check to ensure that's always the case
139 WARN_ON(!IS_ALIGNED(dbo->size, PAGE_SIZE));
141 file_size += dbo->size;
142 n_bomap_pages += dbo->size >> PAGE_SHIFT;
146 /* If we have any buffer objects, add a bomap object */
148 file_size += n_bomap_pages * sizeof(*bomap);
152 /* Add the size of the headers */
153 file_size += sizeof(*iter.hdr) * n_obj;
156 * Allocate the file in vmalloc memory, it's likely to be big.
157 * The reason behind these GFP flags is that we don't want to trigger the
158 * OOM killer in the event that not enough memory could be found for our
159 * dump file. We also don't want the allocator to do any error reporting,
160 * as the right behaviour is failing gracefully if a big enough buffer
161 * could not be allocated.
163 iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN |
166 dev_warn(pfdev->dev, "failed to allocate devcoredump file\n");
170 /* Point the data member after the headers */
171 iter.hdr = iter.start;
172 iter.data = &iter.hdr[n_obj];
174 memset(iter.hdr, 0, iter.data - iter.start);
177 * For now, we write the job identifier in the register dump header,
178 * so that we can decode the entire dump later with pandecode
180 iter.hdr->reghdr.jc = job->jc;
181 iter.hdr->reghdr.major = PANFROSTDUMP_MAJOR;
182 iter.hdr->reghdr.minor = PANFROSTDUMP_MINOR;
183 iter.hdr->reghdr.gpu_id = pfdev->features.id;
184 iter.hdr->reghdr.nbos = job->bo_count;
186 panfrost_core_dump_registers(&iter, pfdev, as_nr, slot);
188 /* Reserve space for the bomap */
190 bomap_start = bomap = iter.data;
191 memset(bomap, 0, sizeof(*bomap) * n_bomap_pages);
192 panfrost_core_dump_header(&iter, PANFROSTDUMP_BUF_BOMAP,
193 bomap + n_bomap_pages);
196 for (i = 0; i < job->bo_count; i++) {
197 struct iosys_map map;
198 struct panfrost_gem_mapping *mapping;
199 struct panfrost_gem_object *bo;
200 struct sg_page_iter page_iter;
203 bo = to_panfrost_bo(job->bos[i]);
204 mapping = job->mappings[i];
207 dev_err(pfdev->dev, "Panfrost Dump: BO has no sgt, cannot dump\n");
208 iter.hdr->bomap.valid = 0;
212 ret = drm_gem_vmap_unlocked(&bo->base.base, &map);
214 dev_err(pfdev->dev, "Panfrost Dump: couldn't map Buffer Object\n");
215 iter.hdr->bomap.valid = 0;
219 WARN_ON(!mapping->active);
221 iter.hdr->bomap.data[0] = bomap - bomap_start;
223 for_each_sgtable_page(bo->base.sgt, &page_iter, 0)
224 *bomap++ = page_to_phys(sg_page_iter_page(&page_iter));
226 iter.hdr->bomap.iova = mapping->mmnode.start << PAGE_SHIFT;
229 memcpy(iter.data, vaddr, bo->base.base.size);
231 drm_gem_vunmap_unlocked(&bo->base.base, &map);
233 iter.hdr->bomap.valid = 1;
235 dump_header: panfrost_core_dump_header(&iter, PANFROSTDUMP_BUF_BO, iter.data +
238 panfrost_core_dump_header(&iter, PANFROSTDUMP_BUF_TRAILER, iter.data);
240 dev_coredumpv(pfdev->dev, iter.start, iter.data - iter.start, GFP_KERNEL);