4 * Copyright (c) 2010-2013, NVIDIA Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <linux/clk.h>
20 #include <linux/dma-mapping.h>
22 #include <linux/list.h>
23 #include <linux/module.h>
24 #include <linux/of_device.h>
26 #include <linux/slab.h>
28 #define CREATE_TRACE_POINTS
29 #include <trace/events/host1x.h>
30 #undef CREATE_TRACE_POINTS
38 #include "hw/host1x01.h"
39 #include "hw/host1x02.h"
40 #include "hw/host1x04.h"
41 #include "hw/host1x05.h"
42 #include "hw/host1x06.h"
44 void host1x_hypervisor_writel(struct host1x *host1x, u32 v, u32 r)
46 writel(v, host1x->hv_regs + r);
49 u32 host1x_hypervisor_readl(struct host1x *host1x, u32 r)
51 return readl(host1x->hv_regs + r);
54 void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r)
56 void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset;
58 writel(v, sync_regs + r);
61 u32 host1x_sync_readl(struct host1x *host1x, u32 r)
63 void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset;
65 return readl(sync_regs + r);
68 void host1x_ch_writel(struct host1x_channel *ch, u32 v, u32 r)
70 writel(v, ch->regs + r);
73 u32 host1x_ch_readl(struct host1x_channel *ch, u32 r)
75 return readl(ch->regs + r);
78 static const struct host1x_info host1x01_info = {
83 .init = host1x01_init,
84 .sync_offset = 0x3000,
85 .dma_mask = DMA_BIT_MASK(32),
88 static const struct host1x_info host1x02_info = {
93 .init = host1x02_init,
94 .sync_offset = 0x3000,
95 .dma_mask = DMA_BIT_MASK(32),
98 static const struct host1x_info host1x04_info = {
103 .init = host1x04_init,
104 .sync_offset = 0x2100,
105 .dma_mask = DMA_BIT_MASK(34),
108 static const struct host1x_info host1x05_info = {
113 .init = host1x05_init,
114 .sync_offset = 0x2100,
115 .dma_mask = DMA_BIT_MASK(34),
118 static const struct host1x_info host1x06_info = {
123 .init = host1x06_init,
125 .dma_mask = DMA_BIT_MASK(34),
126 .has_hypervisor = true,
129 static const struct of_device_id host1x_of_match[] = {
130 { .compatible = "nvidia,tegra186-host1x", .data = &host1x06_info, },
131 { .compatible = "nvidia,tegra210-host1x", .data = &host1x05_info, },
132 { .compatible = "nvidia,tegra124-host1x", .data = &host1x04_info, },
133 { .compatible = "nvidia,tegra114-host1x", .data = &host1x02_info, },
134 { .compatible = "nvidia,tegra30-host1x", .data = &host1x01_info, },
135 { .compatible = "nvidia,tegra20-host1x", .data = &host1x01_info, },
138 MODULE_DEVICE_TABLE(of, host1x_of_match);
140 static int host1x_probe(struct platform_device *pdev)
143 struct resource *regs, *hv_regs = NULL;
147 host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
151 host->info = of_device_get_match_data(&pdev->dev);
153 if (host->info->has_hypervisor) {
154 regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vm");
156 dev_err(&pdev->dev, "failed to get vm registers\n");
160 hv_regs = platform_get_resource_byname(pdev, IORESOURCE_MEM,
164 "failed to get hypervisor registers\n");
168 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
170 dev_err(&pdev->dev, "failed to get registers\n");
175 syncpt_irq = platform_get_irq(pdev, 0);
176 if (syncpt_irq < 0) {
177 dev_err(&pdev->dev, "failed to get IRQ: %d\n", syncpt_irq);
181 mutex_init(&host->devices_lock);
182 INIT_LIST_HEAD(&host->devices);
183 INIT_LIST_HEAD(&host->list);
184 host->dev = &pdev->dev;
186 /* set common host1x device data */
187 platform_set_drvdata(pdev, host);
189 host->regs = devm_ioremap_resource(&pdev->dev, regs);
190 if (IS_ERR(host->regs))
191 return PTR_ERR(host->regs);
193 if (host->info->has_hypervisor) {
194 host->hv_regs = devm_ioremap_resource(&pdev->dev, hv_regs);
195 if (IS_ERR(host->hv_regs))
196 return PTR_ERR(host->hv_regs);
199 dma_set_mask_and_coherent(host->dev, host->info->dma_mask);
201 if (host->info->init) {
202 err = host->info->init(host);
207 host->clk = devm_clk_get(&pdev->dev, NULL);
208 if (IS_ERR(host->clk)) {
209 dev_err(&pdev->dev, "failed to get clock\n");
210 err = PTR_ERR(host->clk);
214 host->rst = devm_reset_control_get(&pdev->dev, "host1x");
215 if (IS_ERR(host->rst)) {
216 err = PTR_ERR(host->rst);
217 dev_err(&pdev->dev, "failed to get reset: %d\n", err);
221 host->group = iommu_group_get(&pdev->dev);
223 struct iommu_domain_geometry *geometry;
226 err = iova_cache_get();
230 host->domain = iommu_domain_alloc(&platform_bus_type);
236 err = iommu_attach_group(host->domain, host->group);
238 if (err == -ENODEV) {
239 iommu_domain_free(host->domain);
242 iommu_group_put(host->group);
247 goto fail_free_domain;
250 geometry = &host->domain->geometry;
252 order = __ffs(host->domain->pgsize_bitmap);
253 init_iova_domain(&host->iova, 1UL << order,
254 geometry->aperture_start >> order);
255 host->iova_end = geometry->aperture_end;
259 err = host1x_channel_list_init(&host->channel_list,
260 host->info->nb_channels);
262 dev_err(&pdev->dev, "failed to initialize channel list\n");
263 goto fail_detach_device;
266 err = clk_prepare_enable(host->clk);
268 dev_err(&pdev->dev, "failed to enable clock\n");
269 goto fail_free_channels;
272 err = reset_control_deassert(host->rst);
274 dev_err(&pdev->dev, "failed to deassert reset: %d\n", err);
275 goto fail_unprepare_disable;
278 err = host1x_syncpt_init(host);
280 dev_err(&pdev->dev, "failed to initialize syncpts\n");
281 goto fail_reset_assert;
284 err = host1x_intr_init(host, syncpt_irq);
286 dev_err(&pdev->dev, "failed to initialize interrupts\n");
287 goto fail_deinit_syncpt;
290 host1x_debug_init(host);
292 err = host1x_register(host);
294 goto fail_deinit_intr;
299 host1x_intr_deinit(host);
301 host1x_syncpt_deinit(host);
303 reset_control_assert(host->rst);
304 fail_unprepare_disable:
305 clk_disable_unprepare(host->clk);
307 host1x_channel_list_free(&host->channel_list);
309 if (host->group && host->domain) {
310 put_iova_domain(&host->iova);
311 iommu_detach_group(host->domain, host->group);
315 iommu_domain_free(host->domain);
320 iommu_group_put(host->group);
325 static int host1x_remove(struct platform_device *pdev)
327 struct host1x *host = platform_get_drvdata(pdev);
329 host1x_unregister(host);
330 host1x_intr_deinit(host);
331 host1x_syncpt_deinit(host);
332 reset_control_assert(host->rst);
333 clk_disable_unprepare(host->clk);
336 put_iova_domain(&host->iova);
337 iommu_detach_group(host->domain, host->group);
338 iommu_domain_free(host->domain);
340 iommu_group_put(host->group);
346 static struct platform_driver tegra_host1x_driver = {
348 .name = "tegra-host1x",
349 .of_match_table = host1x_of_match,
351 .probe = host1x_probe,
352 .remove = host1x_remove,
355 static struct platform_driver * const drivers[] = {
356 &tegra_host1x_driver,
360 static int __init tegra_host1x_init(void)
364 err = bus_register(&host1x_bus_type);
368 err = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
370 bus_unregister(&host1x_bus_type);
374 module_init(tegra_host1x_init);
376 static void __exit tegra_host1x_exit(void)
378 platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
379 bus_unregister(&host1x_bus_type);
381 module_exit(tegra_host1x_exit);
385 MODULE_DESCRIPTION("Host1x driver for Tegra products");
386 MODULE_LICENSE("GPL");