1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (c) 2010-2013, NVIDIA Corporation.
9 #include <linux/delay.h>
10 #include <linux/dma-mapping.h>
12 #include <linux/list.h>
13 #include <linux/module.h>
15 #include <linux/of_platform.h>
16 #include <linux/platform_device.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/slab.h>
20 #include <soc/tegra/common.h>
22 #define CREATE_TRACE_POINTS
23 #include <trace/events/host1x.h>
24 #undef CREATE_TRACE_POINTS
26 #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
27 #include <asm/dma-iommu.h>
37 #include "hw/host1x01.h"
38 #include "hw/host1x02.h"
39 #include "hw/host1x04.h"
40 #include "hw/host1x05.h"
41 #include "hw/host1x06.h"
42 #include "hw/host1x07.h"
43 #include "hw/host1x08.h"
45 void host1x_common_writel(struct host1x *host1x, u32 v, u32 r)
47 writel(v, host1x->common_regs + r);
50 void host1x_hypervisor_writel(struct host1x *host1x, u32 v, u32 r)
52 writel(v, host1x->hv_regs + r);
55 u32 host1x_hypervisor_readl(struct host1x *host1x, u32 r)
57 return readl(host1x->hv_regs + r);
60 void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r)
62 void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset;
64 writel(v, sync_regs + r);
67 u32 host1x_sync_readl(struct host1x *host1x, u32 r)
69 void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset;
71 return readl(sync_regs + r);
74 void host1x_ch_writel(struct host1x_channel *ch, u32 v, u32 r)
76 writel(v, ch->regs + r);
79 u32 host1x_ch_readl(struct host1x_channel *ch, u32 r)
81 return readl(ch->regs + r);
84 static const struct host1x_info host1x01_info = {
89 .init = host1x01_init,
90 .sync_offset = 0x3000,
91 .dma_mask = DMA_BIT_MASK(32),
92 .has_wide_gather = false,
93 .has_hypervisor = false,
96 .reserve_vblank_syncpts = true,
99 static const struct host1x_info host1x02_info = {
104 .init = host1x02_init,
105 .sync_offset = 0x3000,
106 .dma_mask = DMA_BIT_MASK(32),
107 .has_wide_gather = false,
108 .has_hypervisor = false,
109 .num_sid_entries = 0,
111 .reserve_vblank_syncpts = true,
114 static const struct host1x_info host1x04_info = {
119 .init = host1x04_init,
120 .sync_offset = 0x2100,
121 .dma_mask = DMA_BIT_MASK(34),
122 .has_wide_gather = false,
123 .has_hypervisor = false,
124 .num_sid_entries = 0,
126 .reserve_vblank_syncpts = false,
129 static const struct host1x_info host1x05_info = {
134 .init = host1x05_init,
135 .sync_offset = 0x2100,
136 .dma_mask = DMA_BIT_MASK(34),
137 .has_wide_gather = false,
138 .has_hypervisor = false,
139 .num_sid_entries = 0,
141 .reserve_vblank_syncpts = false,
144 static const struct host1x_sid_entry tegra186_sid_table[] = {
159 static const struct host1x_info host1x06_info = {
164 .init = host1x06_init,
166 .dma_mask = DMA_BIT_MASK(40),
167 .has_wide_gather = true,
168 .has_hypervisor = true,
169 .num_sid_entries = ARRAY_SIZE(tegra186_sid_table),
170 .sid_table = tegra186_sid_table,
171 .reserve_vblank_syncpts = false,
172 .skip_reset_assert = true,
175 static const struct host1x_sid_entry tegra194_sid_table[] = {
196 static const struct host1x_info host1x07_info = {
201 .init = host1x07_init,
203 .dma_mask = DMA_BIT_MASK(40),
204 .has_wide_gather = true,
205 .has_hypervisor = true,
206 .num_sid_entries = ARRAY_SIZE(tegra194_sid_table),
207 .sid_table = tegra194_sid_table,
208 .reserve_vblank_syncpts = false,
212 * Tegra234 has two stream ID protection tables, one for setting stream IDs
213 * through the channel path via SETSTREAMID, and one for setting them via
214 * MMIO. We program each engine's data stream ID in the channel path table
215 * and firmware stream ID in the MMIO path table.
217 static const struct host1x_sid_entry tegra234_sid_table[] = {
244 static const struct host1x_info host1x08_info = {
249 .init = host1x08_init,
251 .dma_mask = DMA_BIT_MASK(40),
252 .has_wide_gather = true,
253 .has_hypervisor = true,
255 .num_sid_entries = ARRAY_SIZE(tegra234_sid_table),
256 .sid_table = tegra234_sid_table,
257 .streamid_vm_table = { 0x1004, 128 },
258 .classid_vm_table = { 0x1404, 25 },
259 .mmio_vm_table = { 0x1504, 25 },
260 .reserve_vblank_syncpts = false,
263 static const struct of_device_id host1x_of_match[] = {
264 { .compatible = "nvidia,tegra234-host1x", .data = &host1x08_info, },
265 { .compatible = "nvidia,tegra194-host1x", .data = &host1x07_info, },
266 { .compatible = "nvidia,tegra186-host1x", .data = &host1x06_info, },
267 { .compatible = "nvidia,tegra210-host1x", .data = &host1x05_info, },
268 { .compatible = "nvidia,tegra124-host1x", .data = &host1x04_info, },
269 { .compatible = "nvidia,tegra114-host1x", .data = &host1x02_info, },
270 { .compatible = "nvidia,tegra30-host1x", .data = &host1x01_info, },
271 { .compatible = "nvidia,tegra20-host1x", .data = &host1x01_info, },
274 MODULE_DEVICE_TABLE(of, host1x_of_match);
276 static void host1x_setup_virtualization_tables(struct host1x *host)
278 const struct host1x_info *info = host->info;
281 if (!info->has_hypervisor)
284 for (i = 0; i < info->num_sid_entries; i++) {
285 const struct host1x_sid_entry *entry = &info->sid_table[i];
287 host1x_hypervisor_writel(host, entry->offset, entry->base);
288 host1x_hypervisor_writel(host, entry->limit, entry->base + 4);
291 for (i = 0; i < info->streamid_vm_table.count; i++) {
292 /* Allow access to all stream IDs to all VMs. */
293 host1x_hypervisor_writel(host, 0xff, info->streamid_vm_table.base + 4 * i);
296 for (i = 0; i < info->classid_vm_table.count; i++) {
297 /* Allow access to all classes to all VMs. */
298 host1x_hypervisor_writel(host, 0xff, info->classid_vm_table.base + 4 * i);
301 for (i = 0; i < info->mmio_vm_table.count; i++) {
302 /* Use VM1 (that's us) as originator VMID for engine MMIO accesses. */
303 host1x_hypervisor_writel(host, 0x1, info->mmio_vm_table.base + 4 * i);
307 static bool host1x_wants_iommu(struct host1x *host1x)
309 /* Our IOMMU usage policy doesn't currently play well with GART */
310 if (of_machine_is_compatible("nvidia,tegra20"))
314 * If we support addressing a maximum of 32 bits of physical memory
315 * and if the host1x firewall is enabled, there's no need to enable
316 * IOMMU support. This can happen for example on Tegra20, Tegra30
319 * Tegra124 and later can address up to 34 bits of physical memory and
320 * many platforms come equipped with more than 2 GiB of system memory,
321 * which requires crossing the 4 GiB boundary. But there's a catch: on
322 * SoCs before Tegra186 (i.e. Tegra124 and Tegra210), the host1x can
323 * only address up to 32 bits of memory in GATHER opcodes, which means
324 * that command buffers need to either be in the first 2 GiB of system
325 * memory (which could quickly lead to memory exhaustion), or command
326 * buffers need to be treated differently from other buffers (which is
327 * not possible with the current ABI).
329 * A third option is to use the IOMMU in these cases to make sure all
330 * buffers will be mapped into a 32-bit IOVA space that host1x can
331 * address. This allows all of the system memory to be used and works
332 * within the limitations of the host1x on these SoCs.
334 * In summary, default to enable IOMMU on Tegra124 and later. For any
335 * of the earlier SoCs, only use the IOMMU for additional safety when
336 * the host1x firewall is disabled.
338 if (host1x->info->dma_mask <= DMA_BIT_MASK(32)) {
339 if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
346 static struct iommu_domain *host1x_iommu_attach(struct host1x *host)
348 struct iommu_domain *domain = iommu_get_domain_for_dev(host->dev);
351 #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
352 if (host->dev->archdata.mapping) {
353 struct dma_iommu_mapping *mapping =
354 to_dma_iommu_mapping(host->dev);
355 arm_iommu_detach_device(host->dev);
356 arm_iommu_release_mapping(mapping);
358 domain = iommu_get_domain_for_dev(host->dev);
363 * We may not always want to enable IOMMU support (for example if the
364 * host1x firewall is already enabled and we don't support addressing
365 * more than 32 bits of physical memory), so check for that first.
367 * Similarly, if host1x is already attached to an IOMMU (via the DMA
368 * API), don't try to attach again.
370 if (!host1x_wants_iommu(host) || domain)
373 host->group = iommu_group_get(host->dev);
375 struct iommu_domain_geometry *geometry;
376 dma_addr_t start, end;
379 err = iova_cache_get();
383 host->domain = iommu_domain_alloc(&platform_bus_type);
389 err = iommu_attach_group(host->domain, host->group);
397 geometry = &host->domain->geometry;
398 start = geometry->aperture_start & host->info->dma_mask;
399 end = geometry->aperture_end & host->info->dma_mask;
401 order = __ffs(host->domain->pgsize_bitmap);
402 init_iova_domain(&host->iova, 1UL << order, start >> order);
403 host->iova_end = end;
405 domain = host->domain;
411 iommu_domain_free(host->domain);
416 iommu_group_put(host->group);
422 static int host1x_iommu_init(struct host1x *host)
424 u64 mask = host->info->dma_mask;
425 struct iommu_domain *domain;
428 domain = host1x_iommu_attach(host);
429 if (IS_ERR(domain)) {
430 err = PTR_ERR(domain);
431 dev_err(host->dev, "failed to attach to IOMMU: %d\n", err);
436 * If we're not behind an IOMMU make sure we don't get push buffers
437 * that are allocated outside of the range addressable by the GATHER
440 * Newer generations of Tegra (Tegra186 and later) support a wide
441 * variant of the GATHER opcode that allows addressing more bits.
443 if (!domain && !host->info->has_wide_gather)
444 mask = DMA_BIT_MASK(32);
446 err = dma_coerce_mask_and_coherent(host->dev, mask);
448 dev_err(host->dev, "failed to set DMA mask: %d\n", err);
455 static void host1x_iommu_exit(struct host1x *host)
458 put_iova_domain(&host->iova);
459 iommu_detach_group(host->domain, host->group);
461 iommu_domain_free(host->domain);
466 iommu_group_put(host->group);
471 static int host1x_get_resets(struct host1x *host)
475 host->resets[0].id = "mc";
476 host->resets[1].id = "host1x";
477 host->nresets = ARRAY_SIZE(host->resets);
479 err = devm_reset_control_bulk_get_optional_exclusive_released(
480 host->dev, host->nresets, host->resets);
482 dev_err(host->dev, "failed to get reset: %d\n", err);
489 static int host1x_probe(struct platform_device *pdev)
494 host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
498 host->info = of_device_get_match_data(&pdev->dev);
500 if (host->info->has_hypervisor) {
501 host->regs = devm_platform_ioremap_resource_byname(pdev, "vm");
502 if (IS_ERR(host->regs))
503 return PTR_ERR(host->regs);
505 host->hv_regs = devm_platform_ioremap_resource_byname(pdev, "hypervisor");
506 if (IS_ERR(host->hv_regs))
507 return PTR_ERR(host->hv_regs);
509 if (host->info->has_common) {
510 host->common_regs = devm_platform_ioremap_resource_byname(pdev, "common");
511 if (IS_ERR(host->common_regs))
512 return PTR_ERR(host->common_regs);
515 host->regs = devm_platform_ioremap_resource(pdev, 0);
516 if (IS_ERR(host->regs))
517 return PTR_ERR(host->regs);
520 for (i = 0; i < ARRAY_SIZE(host->syncpt_irqs); i++) {
521 char irq_name[] = "syncptX";
523 sprintf(irq_name, "syncpt%d", i);
525 err = platform_get_irq_byname_optional(pdev, irq_name);
531 host->syncpt_irqs[i] = err;
534 host->num_syncpt_irqs = i;
536 /* Device tree without irq names */
538 host->syncpt_irqs[0] = platform_get_irq(pdev, 0);
539 if (host->syncpt_irqs[0] < 0)
540 return host->syncpt_irqs[0];
542 host->num_syncpt_irqs = 1;
545 mutex_init(&host->devices_lock);
546 INIT_LIST_HEAD(&host->devices);
547 INIT_LIST_HEAD(&host->list);
548 host->dev = &pdev->dev;
550 /* set common host1x device data */
551 platform_set_drvdata(pdev, host);
553 host->dev->dma_parms = &host->dma_parms;
554 dma_set_max_seg_size(host->dev, UINT_MAX);
556 if (host->info->init) {
557 err = host->info->init(host);
562 host->clk = devm_clk_get(&pdev->dev, NULL);
563 if (IS_ERR(host->clk)) {
564 err = PTR_ERR(host->clk);
566 if (err != -EPROBE_DEFER)
567 dev_err(&pdev->dev, "failed to get clock: %d\n", err);
572 err = host1x_get_resets(host);
576 host1x_bo_cache_init(&host->cache);
578 err = host1x_iommu_init(host);
580 dev_err(&pdev->dev, "failed to setup IOMMU: %d\n", err);
584 err = host1x_channel_list_init(&host->channel_list,
585 host->info->nb_channels);
587 dev_err(&pdev->dev, "failed to initialize channel list\n");
591 err = host1x_memory_context_list_init(host);
593 dev_err(&pdev->dev, "failed to initialize context list\n");
597 err = host1x_syncpt_init(host);
599 dev_err(&pdev->dev, "failed to initialize syncpts\n");
603 err = host1x_intr_init(host);
605 dev_err(&pdev->dev, "failed to initialize interrupts\n");
609 pm_runtime_enable(&pdev->dev);
611 err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
615 /* the driver's code isn't ready yet for the dynamic RPM */
616 err = pm_runtime_resume_and_get(&pdev->dev);
620 host1x_debug_init(host);
622 err = host1x_register(host);
626 err = devm_of_platform_populate(&pdev->dev);
633 host1x_unregister(host);
635 host1x_debug_deinit(host);
637 pm_runtime_put_sync_suspend(&pdev->dev);
639 pm_runtime_disable(&pdev->dev);
641 host1x_intr_deinit(host);
643 host1x_syncpt_deinit(host);
645 host1x_memory_context_list_free(&host->context_list);
647 host1x_channel_list_free(&host->channel_list);
649 host1x_iommu_exit(host);
651 host1x_bo_cache_destroy(&host->cache);
656 static int host1x_remove(struct platform_device *pdev)
658 struct host1x *host = platform_get_drvdata(pdev);
660 host1x_unregister(host);
661 host1x_debug_deinit(host);
663 pm_runtime_force_suspend(&pdev->dev);
665 host1x_intr_deinit(host);
666 host1x_syncpt_deinit(host);
667 host1x_memory_context_list_free(&host->context_list);
668 host1x_channel_list_free(&host->channel_list);
669 host1x_iommu_exit(host);
670 host1x_bo_cache_destroy(&host->cache);
675 static int __maybe_unused host1x_runtime_suspend(struct device *dev)
677 struct host1x *host = dev_get_drvdata(dev);
680 host1x_channel_stop_all(host);
681 host1x_intr_stop(host);
682 host1x_syncpt_save(host);
684 if (!host->info->skip_reset_assert) {
685 err = reset_control_bulk_assert(host->nresets, host->resets);
687 dev_err(dev, "failed to assert reset: %d\n", err);
691 usleep_range(1000, 2000);
694 clk_disable_unprepare(host->clk);
695 reset_control_bulk_release(host->nresets, host->resets);
700 host1x_setup_virtualization_tables(host);
701 host1x_syncpt_restore(host);
702 host1x_intr_start(host);
707 static int __maybe_unused host1x_runtime_resume(struct device *dev)
709 struct host1x *host = dev_get_drvdata(dev);
712 err = reset_control_bulk_acquire(host->nresets, host->resets);
714 dev_err(dev, "failed to acquire reset: %d\n", err);
718 err = clk_prepare_enable(host->clk);
720 dev_err(dev, "failed to enable clock: %d\n", err);
724 err = reset_control_bulk_deassert(host->nresets, host->resets);
726 dev_err(dev, "failed to deassert reset: %d\n", err);
730 host1x_setup_virtualization_tables(host);
731 host1x_syncpt_restore(host);
732 host1x_intr_start(host);
737 clk_disable_unprepare(host->clk);
739 reset_control_bulk_release(host->nresets, host->resets);
744 static const struct dev_pm_ops host1x_pm_ops = {
745 SET_RUNTIME_PM_OPS(host1x_runtime_suspend, host1x_runtime_resume,
747 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
750 static struct platform_driver tegra_host1x_driver = {
752 .name = "tegra-host1x",
753 .of_match_table = host1x_of_match,
754 .pm = &host1x_pm_ops,
756 .probe = host1x_probe,
757 .remove = host1x_remove,
760 static struct platform_driver * const drivers[] = {
761 &tegra_host1x_driver,
765 static int __init tegra_host1x_init(void)
769 err = bus_register(&host1x_bus_type);
773 err = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
775 bus_unregister(&host1x_bus_type);
779 module_init(tegra_host1x_init);
781 static void __exit tegra_host1x_exit(void)
783 platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
784 bus_unregister(&host1x_bus_type);
786 module_exit(tegra_host1x_exit);
789 * host1x_get_dma_mask() - query the supported DMA mask for host1x
790 * @host1x: host1x instance
792 * Note that this returns the supported DMA mask for host1x, which can be
793 * different from the applicable DMA mask under certain circumstances.
795 u64 host1x_get_dma_mask(struct host1x *host1x)
797 return host1x->info->dma_mask;
799 EXPORT_SYMBOL(host1x_get_dma_mask);
803 MODULE_DESCRIPTION("Host1x driver for Tegra products");
804 MODULE_LICENSE("GPL");