1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2015-2021, NVIDIA Corporation.
7 #include <linux/delay.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/host1x.h>
10 #include <linux/iommu.h>
11 #include <linux/module.h>
13 #include <linux/of_device.h>
14 #include <linux/of_platform.h>
15 #include <linux/platform_device.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/reset.h>
19 #include <soc/tegra/pmc.h>
25 #define NVDEC_TFBIF_TRANSCFG 0x2c44
37 struct tegra_drm_client client;
38 struct host1x_channel *channel;
42 /* Platform configuration */
43 const struct nvdec_config *config;
46 static inline struct nvdec *to_nvdec(struct tegra_drm_client *client)
48 return container_of(client, struct nvdec, client);
51 static inline void nvdec_writel(struct nvdec *nvdec, u32 value,
54 writel(value, nvdec->regs + offset);
57 static int nvdec_boot(struct nvdec *nvdec)
59 #ifdef CONFIG_IOMMU_API
60 struct iommu_fwspec *spec = dev_iommu_fwspec_get(nvdec->dev);
64 #ifdef CONFIG_IOMMU_API
65 if (nvdec->config->supports_sid && spec) {
68 value = TRANSCFG_ATT(1, TRANSCFG_SID_FALCON) | TRANSCFG_ATT(0, TRANSCFG_SID_HW);
69 nvdec_writel(nvdec, value, NVDEC_TFBIF_TRANSCFG);
71 if (spec->num_ids > 0) {
72 value = spec->ids[0] & 0xffff;
74 nvdec_writel(nvdec, value, VIC_THI_STREAMID0);
75 nvdec_writel(nvdec, value, VIC_THI_STREAMID1);
80 err = falcon_boot(&nvdec->falcon);
84 err = falcon_wait_idle(&nvdec->falcon);
86 dev_err(nvdec->dev, "falcon boot timed out\n");
93 static int nvdec_init(struct host1x_client *client)
95 struct tegra_drm_client *drm = host1x_to_drm_client(client);
96 struct drm_device *dev = dev_get_drvdata(client->host);
97 struct tegra_drm *tegra = dev->dev_private;
98 struct nvdec *nvdec = to_nvdec(drm);
101 err = host1x_client_iommu_attach(client);
102 if (err < 0 && err != -ENODEV) {
103 dev_err(nvdec->dev, "failed to attach to domain: %d\n", err);
107 nvdec->channel = host1x_channel_request(client);
108 if (!nvdec->channel) {
113 client->syncpts[0] = host1x_syncpt_request(client, 0);
114 if (!client->syncpts[0]) {
119 pm_runtime_enable(client->dev);
120 pm_runtime_use_autosuspend(client->dev);
121 pm_runtime_set_autosuspend_delay(client->dev, 500);
123 err = tegra_drm_register_client(tegra, drm);
128 * Inherit the DMA parameters (such as maximum segment size) from the
129 * parent host1x device.
131 client->dev->dma_parms = client->host->dma_parms;
136 pm_runtime_dont_use_autosuspend(client->dev);
137 pm_runtime_force_suspend(client->dev);
139 host1x_syncpt_put(client->syncpts[0]);
141 host1x_channel_put(nvdec->channel);
143 host1x_client_iommu_detach(client);
148 static int nvdec_exit(struct host1x_client *client)
150 struct tegra_drm_client *drm = host1x_to_drm_client(client);
151 struct drm_device *dev = dev_get_drvdata(client->host);
152 struct tegra_drm *tegra = dev->dev_private;
153 struct nvdec *nvdec = to_nvdec(drm);
156 /* avoid a dangling pointer just in case this disappears */
157 client->dev->dma_parms = NULL;
159 err = tegra_drm_unregister_client(tegra, drm);
163 pm_runtime_dont_use_autosuspend(client->dev);
164 pm_runtime_force_suspend(client->dev);
166 host1x_syncpt_put(client->syncpts[0]);
167 host1x_channel_put(nvdec->channel);
168 host1x_client_iommu_detach(client);
170 nvdec->channel = NULL;
173 dma_unmap_single(nvdec->dev, nvdec->falcon.firmware.phys,
174 nvdec->falcon.firmware.size, DMA_TO_DEVICE);
175 tegra_drm_free(tegra, nvdec->falcon.firmware.size,
176 nvdec->falcon.firmware.virt,
177 nvdec->falcon.firmware.iova);
179 dma_free_coherent(nvdec->dev, nvdec->falcon.firmware.size,
180 nvdec->falcon.firmware.virt,
181 nvdec->falcon.firmware.iova);
187 static const struct host1x_client_ops nvdec_client_ops = {
192 static int nvdec_load_firmware(struct nvdec *nvdec)
194 struct host1x_client *client = &nvdec->client.base;
195 struct tegra_drm *tegra = nvdec->client.drm;
201 if (nvdec->falcon.firmware.virt)
204 err = falcon_read_firmware(&nvdec->falcon, nvdec->config->firmware);
208 size = nvdec->falcon.firmware.size;
210 if (!client->group) {
211 virt = dma_alloc_coherent(nvdec->dev, size, &iova, GFP_KERNEL);
213 err = dma_mapping_error(nvdec->dev, iova);
217 virt = tegra_drm_alloc(tegra, size, &iova);
220 nvdec->falcon.firmware.virt = virt;
221 nvdec->falcon.firmware.iova = iova;
223 err = falcon_load_firmware(&nvdec->falcon);
228 * In this case we have received an IOVA from the shared domain, so we
229 * need to make sure to get the physical address so that the DMA API
230 * knows what memory pages to flush the cache for.
235 phys = dma_map_single(nvdec->dev, virt, size, DMA_TO_DEVICE);
237 err = dma_mapping_error(nvdec->dev, phys);
241 nvdec->falcon.firmware.phys = phys;
248 dma_free_coherent(nvdec->dev, size, virt, iova);
250 tegra_drm_free(tegra, size, virt, iova);
256 static __maybe_unused int nvdec_runtime_resume(struct device *dev)
258 struct nvdec *nvdec = dev_get_drvdata(dev);
261 err = clk_prepare_enable(nvdec->clk);
265 usleep_range(10, 20);
267 err = nvdec_load_firmware(nvdec);
271 err = nvdec_boot(nvdec);
278 clk_disable_unprepare(nvdec->clk);
282 static __maybe_unused int nvdec_runtime_suspend(struct device *dev)
284 struct nvdec *nvdec = dev_get_drvdata(dev);
286 host1x_channel_stop(nvdec->channel);
288 clk_disable_unprepare(nvdec->clk);
293 static int nvdec_open_channel(struct tegra_drm_client *client,
294 struct tegra_drm_context *context)
296 struct nvdec *nvdec = to_nvdec(client);
298 context->channel = host1x_channel_get(nvdec->channel);
299 if (!context->channel)
305 static void nvdec_close_channel(struct tegra_drm_context *context)
307 host1x_channel_put(context->channel);
310 static int nvdec_can_use_memory_ctx(struct tegra_drm_client *client, bool *supported)
317 static const struct tegra_drm_client_ops nvdec_ops = {
318 .open_channel = nvdec_open_channel,
319 .close_channel = nvdec_close_channel,
320 .submit = tegra_drm_submit,
321 .get_streamid_offset = tegra_drm_get_streamid_offset_thi,
322 .can_use_memory_ctx = nvdec_can_use_memory_ctx,
325 #define NVIDIA_TEGRA_210_NVDEC_FIRMWARE "nvidia/tegra210/nvdec.bin"
327 static const struct nvdec_config nvdec_t210_config = {
328 .firmware = NVIDIA_TEGRA_210_NVDEC_FIRMWARE,
330 .supports_sid = false,
333 #define NVIDIA_TEGRA_186_NVDEC_FIRMWARE "nvidia/tegra186/nvdec.bin"
335 static const struct nvdec_config nvdec_t186_config = {
336 .firmware = NVIDIA_TEGRA_186_NVDEC_FIRMWARE,
338 .supports_sid = true,
341 #define NVIDIA_TEGRA_194_NVDEC_FIRMWARE "nvidia/tegra194/nvdec.bin"
343 static const struct nvdec_config nvdec_t194_config = {
344 .firmware = NVIDIA_TEGRA_194_NVDEC_FIRMWARE,
346 .supports_sid = true,
349 static const struct of_device_id tegra_nvdec_of_match[] = {
350 { .compatible = "nvidia,tegra210-nvdec", .data = &nvdec_t210_config },
351 { .compatible = "nvidia,tegra186-nvdec", .data = &nvdec_t186_config },
352 { .compatible = "nvidia,tegra194-nvdec", .data = &nvdec_t194_config },
355 MODULE_DEVICE_TABLE(of, tegra_nvdec_of_match);
357 static int nvdec_probe(struct platform_device *pdev)
359 struct device *dev = &pdev->dev;
360 struct host1x_syncpt **syncpts;
365 /* inherit DMA mask from host1x parent */
366 err = dma_coerce_mask_and_coherent(dev, *dev->parent->dma_mask);
368 dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
372 nvdec = devm_kzalloc(dev, sizeof(*nvdec), GFP_KERNEL);
376 nvdec->config = of_device_get_match_data(dev);
378 syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL);
382 nvdec->regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
383 if (IS_ERR(nvdec->regs))
384 return PTR_ERR(nvdec->regs);
386 nvdec->clk = devm_clk_get(dev, NULL);
387 if (IS_ERR(nvdec->clk)) {
388 dev_err(&pdev->dev, "failed to get clock\n");
389 return PTR_ERR(nvdec->clk);
392 err = clk_set_rate(nvdec->clk, ULONG_MAX);
394 dev_err(&pdev->dev, "failed to set clock rate\n");
398 err = of_property_read_u32(dev->of_node, "nvidia,host1x-class", &host_class);
400 host_class = HOST1X_CLASS_NVDEC;
402 nvdec->falcon.dev = dev;
403 nvdec->falcon.regs = nvdec->regs;
405 err = falcon_init(&nvdec->falcon);
409 platform_set_drvdata(pdev, nvdec);
411 INIT_LIST_HEAD(&nvdec->client.base.list);
412 nvdec->client.base.ops = &nvdec_client_ops;
413 nvdec->client.base.dev = dev;
414 nvdec->client.base.class = host_class;
415 nvdec->client.base.syncpts = syncpts;
416 nvdec->client.base.num_syncpts = 1;
419 INIT_LIST_HEAD(&nvdec->client.list);
420 nvdec->client.version = nvdec->config->version;
421 nvdec->client.ops = &nvdec_ops;
423 err = host1x_client_register(&nvdec->client.base);
425 dev_err(dev, "failed to register host1x client: %d\n", err);
432 falcon_exit(&nvdec->falcon);
437 static int nvdec_remove(struct platform_device *pdev)
439 struct nvdec *nvdec = platform_get_drvdata(pdev);
442 err = host1x_client_unregister(&nvdec->client.base);
444 dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
449 falcon_exit(&nvdec->falcon);
454 static const struct dev_pm_ops nvdec_pm_ops = {
455 SET_RUNTIME_PM_OPS(nvdec_runtime_suspend, nvdec_runtime_resume, NULL)
456 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
457 pm_runtime_force_resume)
460 struct platform_driver tegra_nvdec_driver = {
462 .name = "tegra-nvdec",
463 .of_match_table = tegra_nvdec_of_match,
466 .probe = nvdec_probe,
467 .remove = nvdec_remove,
470 #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
471 MODULE_FIRMWARE(NVIDIA_TEGRA_210_NVDEC_FIRMWARE);
473 #if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
474 MODULE_FIRMWARE(NVIDIA_TEGRA_186_NVDEC_FIRMWARE);
476 #if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC)
477 MODULE_FIRMWARE(NVIDIA_TEGRA_194_NVDEC_FIRMWARE);