2 * Copyright 2013 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Dave Airlie
26 #include <linux/io-mapping.h>
27 #include <linux/pci.h>
29 #include <drm/drm_drv.h>
30 #include <drm/drm_managed.h>
31 #include <drm/drm_probe_helper.h>
34 #include "qxl_object.h"
38 static bool qxl_check_device(struct qxl_device *qdev)
40 struct qxl_rom *rom = qdev->rom;
42 if (rom->magic != 0x4f525851) {
43 DRM_ERROR("bad rom signature %x\n", rom->magic);
47 DRM_INFO("Device Version %d.%d\n", rom->id, rom->update_id);
48 DRM_INFO("Compression level %d log level %d\n", rom->compression_level,
50 DRM_INFO("%d io pages at offset 0x%x\n",
51 rom->num_io_pages, rom->pages_offset);
52 DRM_INFO("%d byte draw area at offset 0x%x\n",
53 rom->surface0_area_size, rom->draw_area_offset);
55 qdev->vram_size = rom->surface0_area_size;
56 DRM_INFO("RAM header offset: 0x%x\n", rom->ram_header_offset);
60 static void setup_hw_slot(struct qxl_device *qdev, struct qxl_memslot *slot)
62 qdev->ram_header->mem_slot.mem_start = slot->start_phys_addr;
63 qdev->ram_header->mem_slot.mem_end = slot->start_phys_addr + slot->size;
64 qxl_io_memslot_add(qdev, qdev->rom->slots_start + slot->index);
67 static void setup_slot(struct qxl_device *qdev,
68 struct qxl_memslot *slot,
69 unsigned int slot_index,
70 const char *slot_name,
71 unsigned long start_phys_addr,
76 slot->index = slot_index;
77 slot->name = slot_name;
78 slot->start_phys_addr = start_phys_addr;
81 setup_hw_slot(qdev, slot);
83 slot->generation = qdev->rom->slot_generation;
84 high_bits = (qdev->rom->slots_start + slot->index)
85 << qdev->rom->slot_gen_bits;
86 high_bits |= slot->generation;
87 high_bits <<= (64 - (qdev->rom->slot_gen_bits + qdev->rom->slot_id_bits));
88 slot->high_bits = high_bits;
90 DRM_INFO("slot %d (%s): base 0x%08lx, size 0x%08lx, gpu_offset 0x%lx\n",
91 slot->index, slot->name,
92 (unsigned long)slot->start_phys_addr,
93 (unsigned long)slot->size,
94 (unsigned long)slot->gpu_offset);
97 void qxl_reinit_memslots(struct qxl_device *qdev)
99 setup_hw_slot(qdev, &qdev->main_slot);
100 setup_hw_slot(qdev, &qdev->surfaces_slot);
103 static void qxl_gc_work(struct work_struct *work)
105 struct qxl_device *qdev = container_of(work, struct qxl_device, gc_work);
107 qxl_garbage_collect(qdev);
110 int qxl_device_init(struct qxl_device *qdev,
111 struct pci_dev *pdev)
115 qdev->ddev.pdev = pdev;
116 pci_set_drvdata(pdev, &qdev->ddev);
118 mutex_init(&qdev->gem.mutex);
119 mutex_init(&qdev->update_area_mutex);
120 mutex_init(&qdev->release_mutex);
121 mutex_init(&qdev->surf_evict_mutex);
124 qdev->rom_base = pci_resource_start(pdev, 2);
125 qdev->rom_size = pci_resource_len(pdev, 2);
126 qdev->vram_base = pci_resource_start(pdev, 0);
127 qdev->io_base = pci_resource_start(pdev, 3);
129 qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0));
130 if (!qdev->vram_mapping) {
131 pr_err("Unable to create vram_mapping");
135 if (pci_resource_len(pdev, 4) > 0) {
136 /* 64bit surface bar present */
138 qdev->surfaceram_base = pci_resource_start(pdev, sb);
139 qdev->surfaceram_size = pci_resource_len(pdev, sb);
140 qdev->surface_mapping =
141 io_mapping_create_wc(qdev->surfaceram_base,
142 qdev->surfaceram_size);
144 if (qdev->surface_mapping == NULL) {
145 /* 64bit surface bar not present (or mapping failed) */
147 qdev->surfaceram_base = pci_resource_start(pdev, sb);
148 qdev->surfaceram_size = pci_resource_len(pdev, sb);
149 qdev->surface_mapping =
150 io_mapping_create_wc(qdev->surfaceram_base,
151 qdev->surfaceram_size);
152 if (!qdev->surface_mapping) {
153 pr_err("Unable to create surface_mapping");
155 goto vram_mapping_free;
159 DRM_DEBUG_KMS("qxl: vram %llx-%llx(%dM %dk), surface %llx-%llx(%dM %dk, %s)\n",
160 (unsigned long long)qdev->vram_base,
161 (unsigned long long)pci_resource_end(pdev, 0),
162 (int)pci_resource_len(pdev, 0) / 1024 / 1024,
163 (int)pci_resource_len(pdev, 0) / 1024,
164 (unsigned long long)qdev->surfaceram_base,
165 (unsigned long long)pci_resource_end(pdev, sb),
166 (int)qdev->surfaceram_size / 1024 / 1024,
167 (int)qdev->surfaceram_size / 1024,
168 (sb == 4) ? "64bit" : "32bit");
170 qdev->rom = ioremap(qdev->rom_base, qdev->rom_size);
172 pr_err("Unable to ioremap ROM\n");
174 goto surface_mapping_free;
177 if (!qxl_check_device(qdev)) {
182 r = qxl_bo_init(qdev);
184 DRM_ERROR("bo init failed %d\n", r);
188 qdev->ram_header = ioremap(qdev->vram_base +
189 qdev->rom->ram_header_offset,
190 sizeof(*qdev->ram_header));
191 if (!qdev->ram_header) {
192 DRM_ERROR("Unable to ioremap RAM header\n");
197 qdev->command_ring = qxl_ring_create(&(qdev->ram_header->cmd_ring_hdr),
198 sizeof(struct qxl_command),
199 QXL_COMMAND_RING_SIZE,
200 qdev->io_base + QXL_IO_NOTIFY_CMD,
202 &qdev->display_event);
203 if (!qdev->command_ring) {
204 DRM_ERROR("Unable to create command ring\n");
206 goto ram_header_unmap;
209 qdev->cursor_ring = qxl_ring_create(
210 &(qdev->ram_header->cursor_ring_hdr),
211 sizeof(struct qxl_command),
212 QXL_CURSOR_RING_SIZE,
213 qdev->io_base + QXL_IO_NOTIFY_CURSOR,
215 &qdev->cursor_event);
217 if (!qdev->cursor_ring) {
218 DRM_ERROR("Unable to create cursor ring\n");
220 goto command_ring_free;
223 qdev->release_ring = qxl_ring_create(
224 &(qdev->ram_header->release_ring_hdr),
226 QXL_RELEASE_RING_SIZE, 0, true,
229 if (!qdev->release_ring) {
230 DRM_ERROR("Unable to create release ring\n");
232 goto cursor_ring_free;
235 idr_init(&qdev->release_idr);
236 spin_lock_init(&qdev->release_idr_lock);
237 spin_lock_init(&qdev->release_lock);
239 idr_init(&qdev->surf_id_idr);
240 spin_lock_init(&qdev->surf_id_idr_lock);
242 mutex_init(&qdev->async_io_mutex);
244 /* reset the device into a known state - no memslots, no primary
245 * created, no surfaces. */
248 /* must initialize irq before first async io - slot creation */
249 r = qxl_irq_init(qdev);
251 DRM_ERROR("Unable to init qxl irq\n");
252 goto release_ring_free;
256 * Note that virtual is surface0. We rely on the single ioremap done
259 setup_slot(qdev, &qdev->main_slot, 0, "main",
260 (unsigned long)qdev->vram_base,
261 (unsigned long)qdev->rom->ram_header_offset);
262 setup_slot(qdev, &qdev->surfaces_slot, 1, "surfaces",
263 (unsigned long)qdev->surfaceram_base,
264 (unsigned long)qdev->surfaceram_size);
266 INIT_WORK(&qdev->gc_work, qxl_gc_work);
271 qxl_ring_free(qdev->release_ring);
273 qxl_ring_free(qdev->cursor_ring);
275 qxl_ring_free(qdev->command_ring);
277 iounmap(qdev->ram_header);
282 surface_mapping_free:
283 io_mapping_free(qdev->surface_mapping);
285 io_mapping_free(qdev->vram_mapping);
289 void qxl_device_fini(struct qxl_device *qdev)
291 qxl_bo_unref(&qdev->current_release_bo[0]);
292 qxl_bo_unref(&qdev->current_release_bo[1]);
295 flush_work(&qdev->gc_work);
296 qxl_ring_free(qdev->command_ring);
297 qxl_ring_free(qdev->cursor_ring);
298 qxl_ring_free(qdev->release_ring);
299 io_mapping_free(qdev->surface_mapping);
300 io_mapping_free(qdev->vram_mapping);
301 iounmap(qdev->ram_header);