1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2015-2018 Etnaviv Project
6 #include <linux/devcoredump.h>
7 #include <linux/moduleparam.h>
9 #include "etnaviv_cmdbuf.h"
10 #include "etnaviv_dump.h"
11 #include "etnaviv_gem.h"
12 #include "etnaviv_gpu.h"
13 #include "etnaviv_mmu.h"
14 #include "etnaviv_sched.h"
15 #include "state.xml.h"
16 #include "state_hi.xml.h"
18 static bool etnaviv_dump_core = true;
19 module_param_named(dump_core, etnaviv_dump_core, bool, 0600);
21 struct core_dump_iterator {
23 struct etnaviv_dump_object_header *hdr;
27 static const unsigned short etnaviv_dump_registers[] = {
29 VIVS_HI_CLOCK_CONTROL,
33 VIVS_HI_CHIP_IDENTITY,
39 VIVS_HI_CHIP_MINOR_FEATURE_0,
40 VIVS_HI_CACHE_CONTROL,
42 VIVS_PM_POWER_CONTROLS,
43 VIVS_PM_MODULE_CONTROLS,
44 VIVS_PM_MODULE_STATUS,
46 VIVS_MC_MMU_FE_PAGE_TABLE,
47 VIVS_MC_MMU_TX_PAGE_TABLE,
48 VIVS_MC_MMU_PE_PAGE_TABLE,
49 VIVS_MC_MMU_PEZ_PAGE_TABLE,
50 VIVS_MC_MMU_RA_PAGE_TABLE,
52 VIVS_MC_MEMORY_BASE_ADDR_RA,
53 VIVS_MC_MEMORY_BASE_ADDR_FE,
54 VIVS_MC_MEMORY_BASE_ADDR_TX,
55 VIVS_MC_MEMORY_BASE_ADDR_PEZ,
56 VIVS_MC_MEMORY_BASE_ADDR_PE,
57 VIVS_MC_MEMORY_TIMING_CONTROL,
60 VIVS_FE_DMA_DEBUG_STATE,
67 static void etnaviv_core_dump_header(struct core_dump_iterator *iter,
68 u32 type, void *data_end)
70 struct etnaviv_dump_object_header *hdr = iter->hdr;
72 hdr->magic = cpu_to_le32(ETDUMP_MAGIC);
73 hdr->type = cpu_to_le32(type);
74 hdr->file_offset = cpu_to_le32(iter->data - iter->start);
75 hdr->file_size = cpu_to_le32(data_end - iter->data);
78 iter->data += le32_to_cpu(hdr->file_size);
81 static void etnaviv_core_dump_registers(struct core_dump_iterator *iter,
82 struct etnaviv_gpu *gpu)
84 struct etnaviv_dump_registers *reg = iter->data;
88 for (i = 0; i < ARRAY_SIZE(etnaviv_dump_registers); i++, reg++) {
89 read_addr = etnaviv_dump_registers[i];
90 if (read_addr >= VIVS_PM_POWER_CONTROLS &&
91 read_addr <= VIVS_PM_PULSE_EATER)
92 read_addr = gpu_fix_power_address(gpu, read_addr);
93 reg->reg = cpu_to_le32(etnaviv_dump_registers[i]);
94 reg->value = cpu_to_le32(gpu_read(gpu, read_addr));
97 etnaviv_core_dump_header(iter, ETDUMP_BUF_REG, reg);
100 static void etnaviv_core_dump_mmu(struct core_dump_iterator *iter,
101 struct etnaviv_iommu_context *mmu, size_t mmu_size)
103 etnaviv_iommu_dump(mmu, iter->data);
105 etnaviv_core_dump_header(iter, ETDUMP_BUF_MMU, iter->data + mmu_size);
108 static void etnaviv_core_dump_mem(struct core_dump_iterator *iter, u32 type,
109 void *ptr, size_t size, u64 iova)
111 memcpy(iter->data, ptr, size);
113 iter->hdr->iova = cpu_to_le64(iova);
115 etnaviv_core_dump_header(iter, type, iter->data + size);
118 void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
120 struct etnaviv_gpu *gpu = submit->gpu;
121 struct core_dump_iterator iter;
122 struct etnaviv_gem_object *obj;
123 unsigned int n_obj, n_bomap_pages;
124 size_t file_size, mmu_size;
125 __le64 *bomap, *bomap_start;
128 /* Only catch the first event, or when manually re-armed */
129 if (!etnaviv_dump_core)
131 etnaviv_dump_core = false;
133 mutex_lock(&submit->mmu_context->lock);
135 mmu_size = etnaviv_iommu_dump_size(submit->mmu_context);
137 /* We always dump registers, mmu, ring, hanging cmdbuf and end marker */
140 file_size = ARRAY_SIZE(etnaviv_dump_registers) *
141 sizeof(struct etnaviv_dump_registers) +
142 mmu_size + gpu->buffer.size + submit->cmdbuf.size;
144 /* Add in the active buffer objects */
145 for (i = 0; i < submit->nr_bos; i++) {
146 obj = submit->bos[i].obj;
147 file_size += obj->base.size;
148 n_bomap_pages += obj->base.size >> PAGE_SHIFT;
152 /* If we have any buffer objects, add a bomap object */
154 file_size += n_bomap_pages * sizeof(__le64);
158 /* Add the size of the headers */
159 file_size += sizeof(*iter.hdr) * n_obj;
161 /* Allocate the file in vmalloc memory, it's likely to be big */
162 iter.start = __vmalloc(file_size, GFP_NOWAIT);
164 mutex_unlock(&submit->mmu_context->lock);
165 dev_warn(gpu->dev, "failed to allocate devcoredump file\n");
169 /* Point the data member after the headers */
170 iter.hdr = iter.start;
171 iter.data = &iter.hdr[n_obj];
173 memset(iter.hdr, 0, iter.data - iter.start);
175 etnaviv_core_dump_registers(&iter, gpu);
176 etnaviv_core_dump_mmu(&iter, submit->mmu_context, mmu_size);
177 etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer.vaddr,
179 etnaviv_cmdbuf_get_va(&gpu->buffer,
180 &submit->mmu_context->cmdbuf_mapping));
182 etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD,
183 submit->cmdbuf.vaddr, submit->cmdbuf.size,
184 etnaviv_cmdbuf_get_va(&submit->cmdbuf,
185 &submit->mmu_context->cmdbuf_mapping));
187 mutex_unlock(&submit->mmu_context->lock);
189 /* Reserve space for the bomap */
191 bomap_start = bomap = iter.data;
192 memset(bomap, 0, sizeof(*bomap) * n_bomap_pages);
193 etnaviv_core_dump_header(&iter, ETDUMP_BUF_BOMAP,
194 bomap + n_bomap_pages);
196 /* Silence warning */
197 bomap_start = bomap = NULL;
200 for (i = 0; i < submit->nr_bos; i++) {
201 struct etnaviv_vram_mapping *vram;
205 obj = submit->bos[i].obj;
206 vram = submit->bos[i].mapping;
208 mutex_lock(&obj->lock);
209 pages = etnaviv_gem_get_pages(obj);
210 mutex_unlock(&obj->lock);
211 if (!IS_ERR(pages)) {
214 iter.hdr->data[0] = cpu_to_le32((bomap - bomap_start));
216 for (j = 0; j < obj->base.size >> PAGE_SHIFT; j++)
217 *bomap++ = cpu_to_le64(page_to_phys(*pages++));
220 iter.hdr->iova = cpu_to_le64(vram->iova);
222 vaddr = etnaviv_gem_vmap(&obj->base);
224 memcpy(iter.data, vaddr, obj->base.size);
226 etnaviv_core_dump_header(&iter, ETDUMP_BUF_BO, iter.data +
230 etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data);
232 dev_coredumpv(gpu->dev, iter.start, iter.data - iter.start, GFP_NOWAIT);