1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (c) 2010-2013, NVIDIA Corporation.
9 #include <linux/dma-mapping.h>
11 #include <linux/list.h>
12 #include <linux/module.h>
13 #include <linux/of_device.h>
15 #include <linux/slab.h>
17 #define CREATE_TRACE_POINTS
18 #include <trace/events/host1x.h>
19 #undef CREATE_TRACE_POINTS
27 #include "hw/host1x01.h"
28 #include "hw/host1x02.h"
29 #include "hw/host1x04.h"
30 #include "hw/host1x05.h"
31 #include "hw/host1x06.h"
32 #include "hw/host1x07.h"
34 void host1x_hypervisor_writel(struct host1x *host1x, u32 v, u32 r)
36 writel(v, host1x->hv_regs + r);
39 u32 host1x_hypervisor_readl(struct host1x *host1x, u32 r)
41 return readl(host1x->hv_regs + r);
44 void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r)
46 void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset;
48 writel(v, sync_regs + r);
51 u32 host1x_sync_readl(struct host1x *host1x, u32 r)
53 void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset;
55 return readl(sync_regs + r);
58 void host1x_ch_writel(struct host1x_channel *ch, u32 v, u32 r)
60 writel(v, ch->regs + r);
63 u32 host1x_ch_readl(struct host1x_channel *ch, u32 r)
65 return readl(ch->regs + r);
68 static const struct host1x_info host1x01_info = {
73 .init = host1x01_init,
74 .sync_offset = 0x3000,
75 .dma_mask = DMA_BIT_MASK(32),
76 .has_wide_gather = false,
77 .has_hypervisor = false,
82 static const struct host1x_info host1x02_info = {
87 .init = host1x02_init,
88 .sync_offset = 0x3000,
89 .dma_mask = DMA_BIT_MASK(32),
90 .has_wide_gather = false,
91 .has_hypervisor = false,
96 static const struct host1x_info host1x04_info = {
101 .init = host1x04_init,
102 .sync_offset = 0x2100,
103 .dma_mask = DMA_BIT_MASK(34),
104 .has_wide_gather = false,
105 .has_hypervisor = false,
106 .num_sid_entries = 0,
110 static const struct host1x_info host1x05_info = {
115 .init = host1x05_init,
116 .sync_offset = 0x2100,
117 .dma_mask = DMA_BIT_MASK(34),
118 .has_wide_gather = false,
119 .has_hypervisor = false,
120 .num_sid_entries = 0,
124 static const struct host1x_sid_entry tegra186_sid_table[] = {
133 static const struct host1x_info host1x06_info = {
138 .init = host1x06_init,
140 .dma_mask = DMA_BIT_MASK(40),
141 .has_wide_gather = true,
142 .has_hypervisor = true,
143 .num_sid_entries = ARRAY_SIZE(tegra186_sid_table),
144 .sid_table = tegra186_sid_table,
147 static const struct host1x_sid_entry tegra194_sid_table[] = {
156 static const struct host1x_info host1x07_info = {
161 .init = host1x07_init,
163 .dma_mask = DMA_BIT_MASK(40),
164 .has_wide_gather = true,
165 .has_hypervisor = true,
166 .num_sid_entries = ARRAY_SIZE(tegra194_sid_table),
167 .sid_table = tegra194_sid_table,
170 static const struct of_device_id host1x_of_match[] = {
171 { .compatible = "nvidia,tegra194-host1x", .data = &host1x07_info, },
172 { .compatible = "nvidia,tegra186-host1x", .data = &host1x06_info, },
173 { .compatible = "nvidia,tegra210-host1x", .data = &host1x05_info, },
174 { .compatible = "nvidia,tegra124-host1x", .data = &host1x04_info, },
175 { .compatible = "nvidia,tegra114-host1x", .data = &host1x02_info, },
176 { .compatible = "nvidia,tegra30-host1x", .data = &host1x01_info, },
177 { .compatible = "nvidia,tegra20-host1x", .data = &host1x01_info, },
180 MODULE_DEVICE_TABLE(of, host1x_of_match);
182 static void host1x_setup_sid_table(struct host1x *host)
184 const struct host1x_info *info = host->info;
187 for (i = 0; i < info->num_sid_entries; i++) {
188 const struct host1x_sid_entry *entry = &info->sid_table[i];
190 host1x_hypervisor_writel(host, entry->offset, entry->base);
191 host1x_hypervisor_writel(host, entry->limit, entry->base + 4);
195 static bool host1x_wants_iommu(struct host1x *host1x)
198 * If we support addressing a maximum of 32 bits of physical memory
199 * and if the host1x firewall is enabled, there's no need to enable
200 * IOMMU support. This can happen for example on Tegra20, Tegra30
203 * Tegra124 and later can address up to 34 bits of physical memory and
204 * many platforms come equipped with more than 2 GiB of system memory,
205 * which requires crossing the 4 GiB boundary. But there's a catch: on
206 * SoCs before Tegra186 (i.e. Tegra124 and Tegra210), the host1x can
207 * only address up to 32 bits of memory in GATHER opcodes, which means
208 * that command buffers need to either be in the first 2 GiB of system
209 * memory (which could quickly lead to memory exhaustion), or command
210 * buffers need to be treated differently from other buffers (which is
211 * not possible with the current ABI).
213 * A third option is to use the IOMMU in these cases to make sure all
214 * buffers will be mapped into a 32-bit IOVA space that host1x can
215 * address. This allows all of the system memory to be used and works
216 * within the limitations of the host1x on these SoCs.
218 * In summary, default to enable IOMMU on Tegra124 and later. For any
219 * of the earlier SoCs, only use the IOMMU for additional safety when
220 * the host1x firewall is disabled.
222 if (host1x->info->dma_mask <= DMA_BIT_MASK(32)) {
223 if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
230 static struct iommu_domain *host1x_iommu_attach(struct host1x *host)
232 struct iommu_domain *domain = iommu_get_domain_for_dev(host->dev);
236 * We may not always want to enable IOMMU support (for example if the
237 * host1x firewall is already enabled and we don't support addressing
238 * more than 32 bits of physical memory), so check for that first.
240 * Similarly, if host1x is already attached to an IOMMU (via the DMA
241 * API), don't try to attach again.
243 if (!host1x_wants_iommu(host) || domain)
246 host->group = iommu_group_get(host->dev);
248 struct iommu_domain_geometry *geometry;
249 dma_addr_t start, end;
252 err = iova_cache_get();
256 host->domain = iommu_domain_alloc(&platform_bus_type);
262 err = iommu_attach_group(host->domain, host->group);
270 geometry = &host->domain->geometry;
271 start = geometry->aperture_start & host->info->dma_mask;
272 end = geometry->aperture_end & host->info->dma_mask;
274 order = __ffs(host->domain->pgsize_bitmap);
275 init_iova_domain(&host->iova, 1UL << order, start >> order);
276 host->iova_end = end;
278 domain = host->domain;
284 iommu_domain_free(host->domain);
289 iommu_group_put(host->group);
295 static int host1x_iommu_init(struct host1x *host)
297 u64 mask = host->info->dma_mask;
298 struct iommu_domain *domain;
301 domain = host1x_iommu_attach(host);
302 if (IS_ERR(domain)) {
303 err = PTR_ERR(domain);
304 dev_err(host->dev, "failed to attach to IOMMU: %d\n", err);
309 * If we're not behind an IOMMU make sure we don't get push buffers
310 * that are allocated outside of the range addressable by the GATHER
313 * Newer generations of Tegra (Tegra186 and later) support a wide
314 * variant of the GATHER opcode that allows addressing more bits.
316 if (!domain && !host->info->has_wide_gather)
317 mask = DMA_BIT_MASK(32);
319 err = dma_coerce_mask_and_coherent(host->dev, mask);
321 dev_err(host->dev, "failed to set DMA mask: %d\n", err);
328 static void host1x_iommu_exit(struct host1x *host)
331 put_iova_domain(&host->iova);
332 iommu_detach_group(host->domain, host->group);
334 iommu_domain_free(host->domain);
339 iommu_group_put(host->group);
344 static int host1x_probe(struct platform_device *pdev)
347 struct resource *regs, *hv_regs = NULL;
351 host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
355 host->info = of_device_get_match_data(&pdev->dev);
357 if (host->info->has_hypervisor) {
358 regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vm");
360 dev_err(&pdev->dev, "failed to get vm registers\n");
364 hv_regs = platform_get_resource_byname(pdev, IORESOURCE_MEM,
368 "failed to get hypervisor registers\n");
372 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
374 dev_err(&pdev->dev, "failed to get registers\n");
379 syncpt_irq = platform_get_irq(pdev, 0);
383 mutex_init(&host->devices_lock);
384 INIT_LIST_HEAD(&host->devices);
385 INIT_LIST_HEAD(&host->list);
386 host->dev = &pdev->dev;
388 /* set common host1x device data */
389 platform_set_drvdata(pdev, host);
391 host->regs = devm_ioremap_resource(&pdev->dev, regs);
392 if (IS_ERR(host->regs))
393 return PTR_ERR(host->regs);
395 if (host->info->has_hypervisor) {
396 host->hv_regs = devm_ioremap_resource(&pdev->dev, hv_regs);
397 if (IS_ERR(host->hv_regs))
398 return PTR_ERR(host->hv_regs);
401 host->dev->dma_parms = &host->dma_parms;
402 dma_set_max_seg_size(host->dev, UINT_MAX);
404 if (host->info->init) {
405 err = host->info->init(host);
410 host->clk = devm_clk_get(&pdev->dev, NULL);
411 if (IS_ERR(host->clk)) {
412 err = PTR_ERR(host->clk);
414 if (err != -EPROBE_DEFER)
415 dev_err(&pdev->dev, "failed to get clock: %d\n", err);
420 host->rst = devm_reset_control_get(&pdev->dev, "host1x");
421 if (IS_ERR(host->rst)) {
422 err = PTR_ERR(host->rst);
423 dev_err(&pdev->dev, "failed to get reset: %d\n", err);
427 err = host1x_iommu_init(host);
429 dev_err(&pdev->dev, "failed to setup IOMMU: %d\n", err);
433 err = host1x_channel_list_init(&host->channel_list,
434 host->info->nb_channels);
436 dev_err(&pdev->dev, "failed to initialize channel list\n");
440 err = clk_prepare_enable(host->clk);
442 dev_err(&pdev->dev, "failed to enable clock\n");
446 err = reset_control_deassert(host->rst);
448 dev_err(&pdev->dev, "failed to deassert reset: %d\n", err);
449 goto unprepare_disable;
452 err = host1x_syncpt_init(host);
454 dev_err(&pdev->dev, "failed to initialize syncpts\n");
458 err = host1x_intr_init(host, syncpt_irq);
460 dev_err(&pdev->dev, "failed to initialize interrupts\n");
464 host1x_debug_init(host);
466 if (host->info->has_hypervisor)
467 host1x_setup_sid_table(host);
469 err = host1x_register(host);
473 err = devm_of_platform_populate(&pdev->dev);
480 host1x_unregister(host);
482 host1x_debug_deinit(host);
483 host1x_intr_deinit(host);
485 host1x_syncpt_deinit(host);
487 reset_control_assert(host->rst);
489 clk_disable_unprepare(host->clk);
491 host1x_channel_list_free(&host->channel_list);
493 host1x_iommu_exit(host);
498 static int host1x_remove(struct platform_device *pdev)
500 struct host1x *host = platform_get_drvdata(pdev);
502 host1x_unregister(host);
503 host1x_debug_deinit(host);
504 host1x_intr_deinit(host);
505 host1x_syncpt_deinit(host);
506 reset_control_assert(host->rst);
507 clk_disable_unprepare(host->clk);
508 host1x_iommu_exit(host);
513 static struct platform_driver tegra_host1x_driver = {
515 .name = "tegra-host1x",
516 .of_match_table = host1x_of_match,
518 .probe = host1x_probe,
519 .remove = host1x_remove,
522 static struct platform_driver * const drivers[] = {
523 &tegra_host1x_driver,
527 static int __init tegra_host1x_init(void)
531 err = bus_register(&host1x_bus_type);
535 err = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
537 bus_unregister(&host1x_bus_type);
541 module_init(tegra_host1x_init);
543 static void __exit tegra_host1x_exit(void)
545 platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
546 bus_unregister(&host1x_bus_type);
548 module_exit(tegra_host1x_exit);
551 * host1x_get_dma_mask() - query the supported DMA mask for host1x
552 * @host1x: host1x instance
554 * Note that this returns the supported DMA mask for host1x, which can be
555 * different from the applicable DMA mask under certain circumstances.
557 u64 host1x_get_dma_mask(struct host1x *host1x)
559 return host1x->info->dma_mask;
561 EXPORT_SYMBOL(host1x_get_dma_mask);
565 MODULE_DESCRIPTION("Host1x driver for Tegra products");
566 MODULE_LICENSE("GPL");