1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2015, NVIDIA Corporation.
7 #include <linux/delay.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/host1x.h>
10 #include <linux/iommu.h>
11 #include <linux/module.h>
13 #include <linux/of_device.h>
14 #include <linux/of_platform.h>
15 #include <linux/platform_device.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/reset.h>
19 #include <soc/tegra/pmc.h>
35 struct tegra_drm_client client;
36 struct host1x_channel *channel;
39 struct reset_control *rst;
43 /* Platform configuration */
44 const struct vic_config *config;
47 static inline struct vic *to_vic(struct tegra_drm_client *client)
49 return container_of(client, struct vic, client);
52 static void vic_writel(struct vic *vic, u32 value, unsigned int offset)
54 writel(value, vic->regs + offset);
57 static int vic_boot(struct vic *vic)
59 u32 fce_ucode_size, fce_bin_data_offset, stream_id;
63 if (vic->config->supports_sid && tegra_dev_iommu_get_stream_id(vic->dev, &stream_id)) {
66 value = TRANSCFG_ATT(1, TRANSCFG_SID_FALCON) |
67 TRANSCFG_ATT(0, TRANSCFG_SID_HW);
68 vic_writel(vic, value, VIC_TFBIF_TRANSCFG);
71 * STREAMID0 is used for input/output buffers. Initialize it to SID_VIC in case
72 * context isolation is not enabled, and SID_VIC is used for both firmware and
75 * If context isolation is enabled, it will be overridden by the SETSTREAMID
76 * opcode as part of each job.
78 vic_writel(vic, stream_id, VIC_THI_STREAMID0);
80 /* STREAMID1 is used for firmware loading. */
81 vic_writel(vic, stream_id, VIC_THI_STREAMID1);
84 /* setup clockgating registers */
85 vic_writel(vic, CG_IDLE_CG_DLY_CNT(4) |
88 NV_PVIC_MISC_PRI_VIC_CG);
90 err = falcon_boot(&vic->falcon);
94 hdr = vic->falcon.firmware.virt;
95 fce_bin_data_offset = *(u32 *)(hdr + VIC_UCODE_FCE_DATA_OFFSET);
97 /* Old VIC firmware needs kernel help with setting up FCE microcode. */
98 if (fce_bin_data_offset != 0x0 && fce_bin_data_offset != 0xa5a5a5a5) {
99 hdr = vic->falcon.firmware.virt +
100 *(u32 *)(hdr + VIC_UCODE_FCE_HEADER_OFFSET);
101 fce_ucode_size = *(u32 *)(hdr + FCE_UCODE_SIZE_OFFSET);
103 falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_SIZE,
105 falcon_execute_method(
106 &vic->falcon, VIC_SET_FCE_UCODE_OFFSET,
107 (vic->falcon.firmware.iova + fce_bin_data_offset) >> 8);
110 err = falcon_wait_idle(&vic->falcon);
113 "failed to set application ID and FCE base\n");
120 static int vic_init(struct host1x_client *client)
122 struct tegra_drm_client *drm = host1x_to_drm_client(client);
123 struct drm_device *dev = dev_get_drvdata(client->host);
124 struct tegra_drm *tegra = dev->dev_private;
125 struct vic *vic = to_vic(drm);
128 err = host1x_client_iommu_attach(client);
129 if (err < 0 && err != -ENODEV) {
130 dev_err(vic->dev, "failed to attach to domain: %d\n", err);
134 vic->channel = host1x_channel_request(client);
140 client->syncpts[0] = host1x_syncpt_request(client, 0);
141 if (!client->syncpts[0]) {
146 pm_runtime_enable(client->dev);
147 pm_runtime_use_autosuspend(client->dev);
148 pm_runtime_set_autosuspend_delay(client->dev, 500);
150 err = tegra_drm_register_client(tegra, drm);
155 * Inherit the DMA parameters (such as maximum segment size) from the
156 * parent host1x device.
158 client->dev->dma_parms = client->host->dma_parms;
163 pm_runtime_dont_use_autosuspend(client->dev);
164 pm_runtime_force_suspend(client->dev);
166 host1x_syncpt_put(client->syncpts[0]);
168 host1x_channel_put(vic->channel);
170 host1x_client_iommu_detach(client);
175 static int vic_exit(struct host1x_client *client)
177 struct tegra_drm_client *drm = host1x_to_drm_client(client);
178 struct drm_device *dev = dev_get_drvdata(client->host);
179 struct tegra_drm *tegra = dev->dev_private;
180 struct vic *vic = to_vic(drm);
183 /* avoid a dangling pointer just in case this disappears */
184 client->dev->dma_parms = NULL;
186 err = tegra_drm_unregister_client(tegra, drm);
190 pm_runtime_dont_use_autosuspend(client->dev);
191 pm_runtime_force_suspend(client->dev);
193 host1x_syncpt_put(client->syncpts[0]);
194 host1x_channel_put(vic->channel);
195 host1x_client_iommu_detach(client);
200 dma_unmap_single(vic->dev, vic->falcon.firmware.phys,
201 vic->falcon.firmware.size, DMA_TO_DEVICE);
202 tegra_drm_free(tegra, vic->falcon.firmware.size,
203 vic->falcon.firmware.virt,
204 vic->falcon.firmware.iova);
206 dma_free_coherent(vic->dev, vic->falcon.firmware.size,
207 vic->falcon.firmware.virt,
208 vic->falcon.firmware.iova);
214 static const struct host1x_client_ops vic_client_ops = {
219 static int vic_load_firmware(struct vic *vic)
221 struct host1x_client *client = &vic->client.base;
222 struct tegra_drm *tegra = vic->client.drm;
223 static DEFINE_MUTEX(lock);
224 u32 fce_bin_data_offset;
232 if (vic->falcon.firmware.virt) {
237 err = falcon_read_firmware(&vic->falcon, vic->config->firmware);
241 size = vic->falcon.firmware.size;
243 if (!client->group) {
244 virt = dma_alloc_coherent(vic->dev, size, &iova, GFP_KERNEL);
250 virt = tegra_drm_alloc(tegra, size, &iova);
257 vic->falcon.firmware.virt = virt;
258 vic->falcon.firmware.iova = iova;
260 err = falcon_load_firmware(&vic->falcon);
265 * In this case we have received an IOVA from the shared domain, so we
266 * need to make sure to get the physical address so that the DMA API
267 * knows what memory pages to flush the cache for.
272 phys = dma_map_single(vic->dev, virt, size, DMA_TO_DEVICE);
274 err = dma_mapping_error(vic->dev, phys);
278 vic->falcon.firmware.phys = phys;
282 * Check if firmware is new enough to not require mapping firmware
283 * to data buffer domains.
285 fce_bin_data_offset = *(u32 *)(virt + VIC_UCODE_FCE_DATA_OFFSET);
287 if (!vic->config->supports_sid) {
288 vic->can_use_context = false;
289 } else if (fce_bin_data_offset != 0x0 && fce_bin_data_offset != 0xa5a5a5a5) {
291 * Firmware will access FCE through STREAMID0, so context
292 * isolation cannot be used.
294 vic->can_use_context = false;
295 dev_warn_once(vic->dev, "context isolation disabled due to old firmware\n");
297 vic->can_use_context = true;
306 dma_free_coherent(vic->dev, size, virt, iova);
308 tegra_drm_free(tegra, size, virt, iova);
315 static int __maybe_unused vic_runtime_resume(struct device *dev)
317 struct vic *vic = dev_get_drvdata(dev);
320 err = clk_prepare_enable(vic->clk);
324 usleep_range(10, 20);
326 err = reset_control_deassert(vic->rst);
330 usleep_range(10, 20);
332 err = vic_load_firmware(vic);
343 reset_control_assert(vic->rst);
345 clk_disable_unprepare(vic->clk);
349 static int __maybe_unused vic_runtime_suspend(struct device *dev)
351 struct vic *vic = dev_get_drvdata(dev);
354 host1x_channel_stop(vic->channel);
356 err = reset_control_assert(vic->rst);
360 usleep_range(2000, 4000);
362 clk_disable_unprepare(vic->clk);
367 static int vic_open_channel(struct tegra_drm_client *client,
368 struct tegra_drm_context *context)
370 struct vic *vic = to_vic(client);
372 context->channel = host1x_channel_get(vic->channel);
373 if (!context->channel)
379 static void vic_close_channel(struct tegra_drm_context *context)
381 host1x_channel_put(context->channel);
384 static int vic_can_use_memory_ctx(struct tegra_drm_client *client, bool *supported)
386 struct vic *vic = to_vic(client);
389 /* This doesn't access HW so it's safe to call without powering up. */
390 err = vic_load_firmware(vic);
394 *supported = vic->can_use_context;
399 static const struct tegra_drm_client_ops vic_ops = {
400 .open_channel = vic_open_channel,
401 .close_channel = vic_close_channel,
402 .submit = tegra_drm_submit,
403 .get_streamid_offset = tegra_drm_get_streamid_offset_thi,
404 .can_use_memory_ctx = vic_can_use_memory_ctx,
407 #define NVIDIA_TEGRA_124_VIC_FIRMWARE "nvidia/tegra124/vic03_ucode.bin"
409 static const struct vic_config vic_t124_config = {
410 .firmware = NVIDIA_TEGRA_124_VIC_FIRMWARE,
412 .supports_sid = false,
415 #define NVIDIA_TEGRA_210_VIC_FIRMWARE "nvidia/tegra210/vic04_ucode.bin"
417 static const struct vic_config vic_t210_config = {
418 .firmware = NVIDIA_TEGRA_210_VIC_FIRMWARE,
420 .supports_sid = false,
423 #define NVIDIA_TEGRA_186_VIC_FIRMWARE "nvidia/tegra186/vic04_ucode.bin"
425 static const struct vic_config vic_t186_config = {
426 .firmware = NVIDIA_TEGRA_186_VIC_FIRMWARE,
428 .supports_sid = true,
431 #define NVIDIA_TEGRA_194_VIC_FIRMWARE "nvidia/tegra194/vic.bin"
433 static const struct vic_config vic_t194_config = {
434 .firmware = NVIDIA_TEGRA_194_VIC_FIRMWARE,
436 .supports_sid = true,
439 #define NVIDIA_TEGRA_234_VIC_FIRMWARE "nvidia/tegra234/vic.bin"
441 static const struct vic_config vic_t234_config = {
442 .firmware = NVIDIA_TEGRA_234_VIC_FIRMWARE,
444 .supports_sid = true,
447 static const struct of_device_id tegra_vic_of_match[] = {
448 { .compatible = "nvidia,tegra124-vic", .data = &vic_t124_config },
449 { .compatible = "nvidia,tegra210-vic", .data = &vic_t210_config },
450 { .compatible = "nvidia,tegra186-vic", .data = &vic_t186_config },
451 { .compatible = "nvidia,tegra194-vic", .data = &vic_t194_config },
452 { .compatible = "nvidia,tegra234-vic", .data = &vic_t234_config },
455 MODULE_DEVICE_TABLE(of, tegra_vic_of_match);
457 static int vic_probe(struct platform_device *pdev)
459 struct device *dev = &pdev->dev;
460 struct host1x_syncpt **syncpts;
464 /* inherit DMA mask from host1x parent */
465 err = dma_coerce_mask_and_coherent(dev, *dev->parent->dma_mask);
467 dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
471 vic = devm_kzalloc(dev, sizeof(*vic), GFP_KERNEL);
475 vic->config = of_device_get_match_data(dev);
477 syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL);
481 vic->regs = devm_platform_ioremap_resource(pdev, 0);
482 if (IS_ERR(vic->regs))
483 return PTR_ERR(vic->regs);
485 vic->clk = devm_clk_get(dev, NULL);
486 if (IS_ERR(vic->clk)) {
487 dev_err(&pdev->dev, "failed to get clock\n");
488 return PTR_ERR(vic->clk);
491 err = clk_set_rate(vic->clk, ULONG_MAX);
493 dev_err(&pdev->dev, "failed to set clock rate\n");
497 if (!dev->pm_domain) {
498 vic->rst = devm_reset_control_get(dev, "vic");
499 if (IS_ERR(vic->rst)) {
500 dev_err(&pdev->dev, "failed to get reset\n");
501 return PTR_ERR(vic->rst);
505 vic->falcon.dev = dev;
506 vic->falcon.regs = vic->regs;
508 err = falcon_init(&vic->falcon);
512 platform_set_drvdata(pdev, vic);
514 INIT_LIST_HEAD(&vic->client.base.list);
515 vic->client.base.ops = &vic_client_ops;
516 vic->client.base.dev = dev;
517 vic->client.base.class = HOST1X_CLASS_VIC;
518 vic->client.base.syncpts = syncpts;
519 vic->client.base.num_syncpts = 1;
522 INIT_LIST_HEAD(&vic->client.list);
523 vic->client.version = vic->config->version;
524 vic->client.ops = &vic_ops;
526 err = host1x_client_register(&vic->client.base);
528 dev_err(dev, "failed to register host1x client: %d\n", err);
535 falcon_exit(&vic->falcon);
540 static void vic_remove(struct platform_device *pdev)
542 struct vic *vic = platform_get_drvdata(pdev);
544 host1x_client_unregister(&vic->client.base);
546 falcon_exit(&vic->falcon);
549 static const struct dev_pm_ops vic_pm_ops = {
550 RUNTIME_PM_OPS(vic_runtime_suspend, vic_runtime_resume, NULL)
551 SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
554 struct platform_driver tegra_vic_driver = {
557 .of_match_table = tegra_vic_of_match,
561 .remove_new = vic_remove,
564 #if IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC)
565 MODULE_FIRMWARE(NVIDIA_TEGRA_124_VIC_FIRMWARE);
567 #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
568 MODULE_FIRMWARE(NVIDIA_TEGRA_210_VIC_FIRMWARE);
570 #if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
571 MODULE_FIRMWARE(NVIDIA_TEGRA_186_VIC_FIRMWARE);
573 #if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC)
574 MODULE_FIRMWARE(NVIDIA_TEGRA_194_VIC_FIRMWARE);
576 #if IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC)
577 MODULE_FIRMWARE(NVIDIA_TEGRA_234_VIC_FIRMWARE);