]> Git Repo - linux.git/blob - drivers/gpu/host1x/dev.c
Merge tag 'rtc-6.6' of git://git.kernel.org/pub/scm/linux/kernel/git/abelloni/linux
[linux.git] / drivers / gpu / host1x / dev.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Tegra host1x driver
4  *
5  * Copyright (c) 2010-2013, NVIDIA Corporation.
6  */
7
8 #include <linux/clk.h>
9 #include <linux/delay.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/io.h>
12 #include <linux/list.h>
13 #include <linux/module.h>
14 #include <linux/of.h>
15 #include <linux/of_platform.h>
16 #include <linux/platform_device.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/slab.h>
19
20 #include <soc/tegra/common.h>
21
22 #define CREATE_TRACE_POINTS
23 #include <trace/events/host1x.h>
24 #undef CREATE_TRACE_POINTS
25
26 #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
27 #include <asm/dma-iommu.h>
28 #endif
29
30 #include "bus.h"
31 #include "channel.h"
32 #include "context.h"
33 #include "debug.h"
34 #include "dev.h"
35 #include "intr.h"
36
37 #include "hw/host1x01.h"
38 #include "hw/host1x02.h"
39 #include "hw/host1x04.h"
40 #include "hw/host1x05.h"
41 #include "hw/host1x06.h"
42 #include "hw/host1x07.h"
43 #include "hw/host1x08.h"
44
45 void host1x_common_writel(struct host1x *host1x, u32 v, u32 r)
46 {
47         writel(v, host1x->common_regs + r);
48 }
49
50 void host1x_hypervisor_writel(struct host1x *host1x, u32 v, u32 r)
51 {
52         writel(v, host1x->hv_regs + r);
53 }
54
55 u32 host1x_hypervisor_readl(struct host1x *host1x, u32 r)
56 {
57         return readl(host1x->hv_regs + r);
58 }
59
60 void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r)
61 {
62         void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset;
63
64         writel(v, sync_regs + r);
65 }
66
67 u32 host1x_sync_readl(struct host1x *host1x, u32 r)
68 {
69         void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset;
70
71         return readl(sync_regs + r);
72 }
73
74 void host1x_ch_writel(struct host1x_channel *ch, u32 v, u32 r)
75 {
76         writel(v, ch->regs + r);
77 }
78
79 u32 host1x_ch_readl(struct host1x_channel *ch, u32 r)
80 {
81         return readl(ch->regs + r);
82 }
83
84 static const struct host1x_info host1x01_info = {
85         .nb_channels = 8,
86         .nb_pts = 32,
87         .nb_mlocks = 16,
88         .nb_bases = 8,
89         .init = host1x01_init,
90         .sync_offset = 0x3000,
91         .dma_mask = DMA_BIT_MASK(32),
92         .has_wide_gather = false,
93         .has_hypervisor = false,
94         .num_sid_entries = 0,
95         .sid_table = NULL,
96         .reserve_vblank_syncpts = true,
97 };
98
99 static const struct host1x_info host1x02_info = {
100         .nb_channels = 9,
101         .nb_pts = 32,
102         .nb_mlocks = 16,
103         .nb_bases = 12,
104         .init = host1x02_init,
105         .sync_offset = 0x3000,
106         .dma_mask = DMA_BIT_MASK(32),
107         .has_wide_gather = false,
108         .has_hypervisor = false,
109         .num_sid_entries = 0,
110         .sid_table = NULL,
111         .reserve_vblank_syncpts = true,
112 };
113
114 static const struct host1x_info host1x04_info = {
115         .nb_channels = 12,
116         .nb_pts = 192,
117         .nb_mlocks = 16,
118         .nb_bases = 64,
119         .init = host1x04_init,
120         .sync_offset = 0x2100,
121         .dma_mask = DMA_BIT_MASK(34),
122         .has_wide_gather = false,
123         .has_hypervisor = false,
124         .num_sid_entries = 0,
125         .sid_table = NULL,
126         .reserve_vblank_syncpts = false,
127 };
128
129 static const struct host1x_info host1x05_info = {
130         .nb_channels = 14,
131         .nb_pts = 192,
132         .nb_mlocks = 16,
133         .nb_bases = 64,
134         .init = host1x05_init,
135         .sync_offset = 0x2100,
136         .dma_mask = DMA_BIT_MASK(34),
137         .has_wide_gather = false,
138         .has_hypervisor = false,
139         .num_sid_entries = 0,
140         .sid_table = NULL,
141         .reserve_vblank_syncpts = false,
142 };
143
144 static const struct host1x_sid_entry tegra186_sid_table[] = {
145         {
146                 /* VIC */
147                 .base = 0x1af0,
148                 .offset = 0x30,
149                 .limit = 0x34
150         },
151         {
152                 /* NVDEC */
153                 .base = 0x1b00,
154                 .offset = 0x30,
155                 .limit = 0x34
156         },
157 };
158
159 static const struct host1x_info host1x06_info = {
160         .nb_channels = 63,
161         .nb_pts = 576,
162         .nb_mlocks = 24,
163         .nb_bases = 16,
164         .init = host1x06_init,
165         .sync_offset = 0x0,
166         .dma_mask = DMA_BIT_MASK(40),
167         .has_wide_gather = true,
168         .has_hypervisor = true,
169         .num_sid_entries = ARRAY_SIZE(tegra186_sid_table),
170         .sid_table = tegra186_sid_table,
171         .reserve_vblank_syncpts = false,
172 };
173
174 static const struct host1x_sid_entry tegra194_sid_table[] = {
175         {
176                 /* VIC */
177                 .base = 0x1af0,
178                 .offset = 0x30,
179                 .limit = 0x34
180         },
181         {
182                 /* NVDEC */
183                 .base = 0x1b00,
184                 .offset = 0x30,
185                 .limit = 0x34
186         },
187         {
188                 /* NVDEC1 */
189                 .base = 0x1bc0,
190                 .offset = 0x30,
191                 .limit = 0x34
192         },
193 };
194
195 static const struct host1x_info host1x07_info = {
196         .nb_channels = 63,
197         .nb_pts = 704,
198         .nb_mlocks = 32,
199         .nb_bases = 0,
200         .init = host1x07_init,
201         .sync_offset = 0x0,
202         .dma_mask = DMA_BIT_MASK(40),
203         .has_wide_gather = true,
204         .has_hypervisor = true,
205         .num_sid_entries = ARRAY_SIZE(tegra194_sid_table),
206         .sid_table = tegra194_sid_table,
207         .reserve_vblank_syncpts = false,
208 };
209
210 /*
211  * Tegra234 has two stream ID protection tables, one for setting stream IDs
212  * through the channel path via SETSTREAMID, and one for setting them via
213  * MMIO. We program each engine's data stream ID in the channel path table
214  * and firmware stream ID in the MMIO path table.
215  */
216 static const struct host1x_sid_entry tegra234_sid_table[] = {
217         {
218                 /* VIC channel */
219                 .base = 0x17b8,
220                 .offset = 0x30,
221                 .limit = 0x30
222         },
223         {
224                 /* VIC MMIO */
225                 .base = 0x1688,
226                 .offset = 0x34,
227                 .limit = 0x34
228         },
229         {
230                 /* NVDEC channel */
231                 .base = 0x17c8,
232                 .offset = 0x30,
233                 .limit = 0x30,
234         },
235         {
236                 /* NVDEC MMIO */
237                 .base = 0x1698,
238                 .offset = 0x34,
239                 .limit = 0x34,
240         },
241 };
242
243 static const struct host1x_info host1x08_info = {
244         .nb_channels = 63,
245         .nb_pts = 1024,
246         .nb_mlocks = 24,
247         .nb_bases = 0,
248         .init = host1x08_init,
249         .sync_offset = 0x0,
250         .dma_mask = DMA_BIT_MASK(40),
251         .has_wide_gather = true,
252         .has_hypervisor = true,
253         .has_common = true,
254         .num_sid_entries = ARRAY_SIZE(tegra234_sid_table),
255         .sid_table = tegra234_sid_table,
256         .streamid_vm_table = { 0x1004, 128 },
257         .classid_vm_table = { 0x1404, 25 },
258         .mmio_vm_table = { 0x1504, 25 },
259         .reserve_vblank_syncpts = false,
260 };
261
262 static const struct of_device_id host1x_of_match[] = {
263         { .compatible = "nvidia,tegra234-host1x", .data = &host1x08_info, },
264         { .compatible = "nvidia,tegra194-host1x", .data = &host1x07_info, },
265         { .compatible = "nvidia,tegra186-host1x", .data = &host1x06_info, },
266         { .compatible = "nvidia,tegra210-host1x", .data = &host1x05_info, },
267         { .compatible = "nvidia,tegra124-host1x", .data = &host1x04_info, },
268         { .compatible = "nvidia,tegra114-host1x", .data = &host1x02_info, },
269         { .compatible = "nvidia,tegra30-host1x", .data = &host1x01_info, },
270         { .compatible = "nvidia,tegra20-host1x", .data = &host1x01_info, },
271         { },
272 };
273 MODULE_DEVICE_TABLE(of, host1x_of_match);
274
275 static void host1x_setup_virtualization_tables(struct host1x *host)
276 {
277         const struct host1x_info *info = host->info;
278         unsigned int i;
279
280         if (!info->has_hypervisor)
281                 return;
282
283         for (i = 0; i < info->num_sid_entries; i++) {
284                 const struct host1x_sid_entry *entry = &info->sid_table[i];
285
286                 host1x_hypervisor_writel(host, entry->offset, entry->base);
287                 host1x_hypervisor_writel(host, entry->limit, entry->base + 4);
288         }
289
290         for (i = 0; i < info->streamid_vm_table.count; i++) {
291                 /* Allow access to all stream IDs to all VMs. */
292                 host1x_hypervisor_writel(host, 0xff, info->streamid_vm_table.base + 4 * i);
293         }
294
295         for (i = 0; i < info->classid_vm_table.count; i++) {
296                 /* Allow access to all classes to all VMs. */
297                 host1x_hypervisor_writel(host, 0xff, info->classid_vm_table.base + 4 * i);
298         }
299
300         for (i = 0; i < info->mmio_vm_table.count; i++) {
301                 /* Use VM1 (that's us) as originator VMID for engine MMIO accesses. */
302                 host1x_hypervisor_writel(host, 0x1, info->mmio_vm_table.base + 4 * i);
303         }
304 }
305
306 static bool host1x_wants_iommu(struct host1x *host1x)
307 {
308         /* Our IOMMU usage policy doesn't currently play well with GART */
309         if (of_machine_is_compatible("nvidia,tegra20"))
310                 return false;
311
312         /*
313          * If we support addressing a maximum of 32 bits of physical memory
314          * and if the host1x firewall is enabled, there's no need to enable
315          * IOMMU support. This can happen for example on Tegra20, Tegra30
316          * and Tegra114.
317          *
318          * Tegra124 and later can address up to 34 bits of physical memory and
319          * many platforms come equipped with more than 2 GiB of system memory,
320          * which requires crossing the 4 GiB boundary. But there's a catch: on
321          * SoCs before Tegra186 (i.e. Tegra124 and Tegra210), the host1x can
322          * only address up to 32 bits of memory in GATHER opcodes, which means
323          * that command buffers need to either be in the first 2 GiB of system
324          * memory (which could quickly lead to memory exhaustion), or command
325          * buffers need to be treated differently from other buffers (which is
326          * not possible with the current ABI).
327          *
328          * A third option is to use the IOMMU in these cases to make sure all
329          * buffers will be mapped into a 32-bit IOVA space that host1x can
330          * address. This allows all of the system memory to be used and works
331          * within the limitations of the host1x on these SoCs.
332          *
333          * In summary, default to enable IOMMU on Tegra124 and later. For any
334          * of the earlier SoCs, only use the IOMMU for additional safety when
335          * the host1x firewall is disabled.
336          */
337         if (host1x->info->dma_mask <= DMA_BIT_MASK(32)) {
338                 if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
339                         return false;
340         }
341
342         return true;
343 }
344
345 static struct iommu_domain *host1x_iommu_attach(struct host1x *host)
346 {
347         struct iommu_domain *domain = iommu_get_domain_for_dev(host->dev);
348         int err;
349
350 #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
351         if (host->dev->archdata.mapping) {
352                 struct dma_iommu_mapping *mapping =
353                                 to_dma_iommu_mapping(host->dev);
354                 arm_iommu_detach_device(host->dev);
355                 arm_iommu_release_mapping(mapping);
356
357                 domain = iommu_get_domain_for_dev(host->dev);
358         }
359 #endif
360
361         /*
362          * We may not always want to enable IOMMU support (for example if the
363          * host1x firewall is already enabled and we don't support addressing
364          * more than 32 bits of physical memory), so check for that first.
365          *
366          * Similarly, if host1x is already attached to an IOMMU (via the DMA
367          * API), don't try to attach again.
368          */
369         if (!host1x_wants_iommu(host) || domain)
370                 return domain;
371
372         host->group = iommu_group_get(host->dev);
373         if (host->group) {
374                 struct iommu_domain_geometry *geometry;
375                 dma_addr_t start, end;
376                 unsigned long order;
377
378                 err = iova_cache_get();
379                 if (err < 0)
380                         goto put_group;
381
382                 host->domain = iommu_domain_alloc(&platform_bus_type);
383                 if (!host->domain) {
384                         err = -ENOMEM;
385                         goto put_cache;
386                 }
387
388                 err = iommu_attach_group(host->domain, host->group);
389                 if (err) {
390                         if (err == -ENODEV)
391                                 err = 0;
392
393                         goto free_domain;
394                 }
395
396                 geometry = &host->domain->geometry;
397                 start = geometry->aperture_start & host->info->dma_mask;
398                 end = geometry->aperture_end & host->info->dma_mask;
399
400                 order = __ffs(host->domain->pgsize_bitmap);
401                 init_iova_domain(&host->iova, 1UL << order, start >> order);
402                 host->iova_end = end;
403
404                 domain = host->domain;
405         }
406
407         return domain;
408
409 free_domain:
410         iommu_domain_free(host->domain);
411         host->domain = NULL;
412 put_cache:
413         iova_cache_put();
414 put_group:
415         iommu_group_put(host->group);
416         host->group = NULL;
417
418         return ERR_PTR(err);
419 }
420
421 static int host1x_iommu_init(struct host1x *host)
422 {
423         u64 mask = host->info->dma_mask;
424         struct iommu_domain *domain;
425         int err;
426
427         domain = host1x_iommu_attach(host);
428         if (IS_ERR(domain)) {
429                 err = PTR_ERR(domain);
430                 dev_err(host->dev, "failed to attach to IOMMU: %d\n", err);
431                 return err;
432         }
433
434         /*
435          * If we're not behind an IOMMU make sure we don't get push buffers
436          * that are allocated outside of the range addressable by the GATHER
437          * opcode.
438          *
439          * Newer generations of Tegra (Tegra186 and later) support a wide
440          * variant of the GATHER opcode that allows addressing more bits.
441          */
442         if (!domain && !host->info->has_wide_gather)
443                 mask = DMA_BIT_MASK(32);
444
445         err = dma_coerce_mask_and_coherent(host->dev, mask);
446         if (err < 0) {
447                 dev_err(host->dev, "failed to set DMA mask: %d\n", err);
448                 return err;
449         }
450
451         return 0;
452 }
453
454 static void host1x_iommu_exit(struct host1x *host)
455 {
456         if (host->domain) {
457                 put_iova_domain(&host->iova);
458                 iommu_detach_group(host->domain, host->group);
459
460                 iommu_domain_free(host->domain);
461                 host->domain = NULL;
462
463                 iova_cache_put();
464
465                 iommu_group_put(host->group);
466                 host->group = NULL;
467         }
468 }
469
470 static int host1x_get_resets(struct host1x *host)
471 {
472         int err;
473
474         host->resets[0].id = "mc";
475         host->resets[1].id = "host1x";
476         host->nresets = ARRAY_SIZE(host->resets);
477
478         err = devm_reset_control_bulk_get_optional_exclusive_released(
479                                 host->dev, host->nresets, host->resets);
480         if (err) {
481                 dev_err(host->dev, "failed to get reset: %d\n", err);
482                 return err;
483         }
484
485         return 0;
486 }
487
488 static int host1x_probe(struct platform_device *pdev)
489 {
490         struct host1x *host;
491         int err;
492
493         host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
494         if (!host)
495                 return -ENOMEM;
496
497         host->info = of_device_get_match_data(&pdev->dev);
498
499         if (host->info->has_hypervisor) {
500                 host->regs = devm_platform_ioremap_resource_byname(pdev, "vm");
501                 if (IS_ERR(host->regs))
502                         return PTR_ERR(host->regs);
503
504                 host->hv_regs = devm_platform_ioremap_resource_byname(pdev, "hypervisor");
505                 if (IS_ERR(host->hv_regs))
506                         return PTR_ERR(host->hv_regs);
507
508                 if (host->info->has_common) {
509                         host->common_regs = devm_platform_ioremap_resource_byname(pdev, "common");
510                         if (IS_ERR(host->common_regs))
511                                 return PTR_ERR(host->common_regs);
512                 }
513         } else {
514                 host->regs = devm_platform_ioremap_resource(pdev, 0);
515                 if (IS_ERR(host->regs))
516                         return PTR_ERR(host->regs);
517         }
518
519         host->syncpt_irq = platform_get_irq(pdev, 0);
520         if (host->syncpt_irq < 0)
521                 return host->syncpt_irq;
522
523         mutex_init(&host->devices_lock);
524         INIT_LIST_HEAD(&host->devices);
525         INIT_LIST_HEAD(&host->list);
526         host->dev = &pdev->dev;
527
528         /* set common host1x device data */
529         platform_set_drvdata(pdev, host);
530
531         host->dev->dma_parms = &host->dma_parms;
532         dma_set_max_seg_size(host->dev, UINT_MAX);
533
534         if (host->info->init) {
535                 err = host->info->init(host);
536                 if (err)
537                         return err;
538         }
539
540         host->clk = devm_clk_get(&pdev->dev, NULL);
541         if (IS_ERR(host->clk)) {
542                 err = PTR_ERR(host->clk);
543
544                 if (err != -EPROBE_DEFER)
545                         dev_err(&pdev->dev, "failed to get clock: %d\n", err);
546
547                 return err;
548         }
549
550         err = host1x_get_resets(host);
551         if (err)
552                 return err;
553
554         host1x_bo_cache_init(&host->cache);
555
556         err = host1x_iommu_init(host);
557         if (err < 0) {
558                 dev_err(&pdev->dev, "failed to setup IOMMU: %d\n", err);
559                 goto destroy_cache;
560         }
561
562         err = host1x_channel_list_init(&host->channel_list,
563                                        host->info->nb_channels);
564         if (err) {
565                 dev_err(&pdev->dev, "failed to initialize channel list\n");
566                 goto iommu_exit;
567         }
568
569         err = host1x_memory_context_list_init(host);
570         if (err) {
571                 dev_err(&pdev->dev, "failed to initialize context list\n");
572                 goto free_channels;
573         }
574
575         err = host1x_syncpt_init(host);
576         if (err) {
577                 dev_err(&pdev->dev, "failed to initialize syncpts\n");
578                 goto free_contexts;
579         }
580
581         err = host1x_intr_init(host);
582         if (err) {
583                 dev_err(&pdev->dev, "failed to initialize interrupts\n");
584                 goto deinit_syncpt;
585         }
586
587         pm_runtime_enable(&pdev->dev);
588
589         err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
590         if (err)
591                 goto pm_disable;
592
593         /* the driver's code isn't ready yet for the dynamic RPM */
594         err = pm_runtime_resume_and_get(&pdev->dev);
595         if (err)
596                 goto pm_disable;
597
598         host1x_debug_init(host);
599
600         err = host1x_register(host);
601         if (err < 0)
602                 goto deinit_debugfs;
603
604         err = devm_of_platform_populate(&pdev->dev);
605         if (err < 0)
606                 goto unregister;
607
608         return 0;
609
610 unregister:
611         host1x_unregister(host);
612 deinit_debugfs:
613         host1x_debug_deinit(host);
614
615         pm_runtime_put_sync_suspend(&pdev->dev);
616 pm_disable:
617         pm_runtime_disable(&pdev->dev);
618
619         host1x_intr_deinit(host);
620 deinit_syncpt:
621         host1x_syncpt_deinit(host);
622 free_contexts:
623         host1x_memory_context_list_free(&host->context_list);
624 free_channels:
625         host1x_channel_list_free(&host->channel_list);
626 iommu_exit:
627         host1x_iommu_exit(host);
628 destroy_cache:
629         host1x_bo_cache_destroy(&host->cache);
630
631         return err;
632 }
633
634 static int host1x_remove(struct platform_device *pdev)
635 {
636         struct host1x *host = platform_get_drvdata(pdev);
637
638         host1x_unregister(host);
639         host1x_debug_deinit(host);
640
641         pm_runtime_force_suspend(&pdev->dev);
642
643         host1x_intr_deinit(host);
644         host1x_syncpt_deinit(host);
645         host1x_memory_context_list_free(&host->context_list);
646         host1x_channel_list_free(&host->channel_list);
647         host1x_iommu_exit(host);
648         host1x_bo_cache_destroy(&host->cache);
649
650         return 0;
651 }
652
653 static int __maybe_unused host1x_runtime_suspend(struct device *dev)
654 {
655         struct host1x *host = dev_get_drvdata(dev);
656         int err;
657
658         host1x_intr_stop(host);
659         host1x_syncpt_save(host);
660
661         err = reset_control_bulk_assert(host->nresets, host->resets);
662         if (err) {
663                 dev_err(dev, "failed to assert reset: %d\n", err);
664                 goto resume_host1x;
665         }
666
667         usleep_range(1000, 2000);
668
669         clk_disable_unprepare(host->clk);
670         reset_control_bulk_release(host->nresets, host->resets);
671
672         return 0;
673
674 resume_host1x:
675         host1x_setup_virtualization_tables(host);
676         host1x_syncpt_restore(host);
677         host1x_intr_start(host);
678
679         return err;
680 }
681
682 static int __maybe_unused host1x_runtime_resume(struct device *dev)
683 {
684         struct host1x *host = dev_get_drvdata(dev);
685         int err;
686
687         err = reset_control_bulk_acquire(host->nresets, host->resets);
688         if (err) {
689                 dev_err(dev, "failed to acquire reset: %d\n", err);
690                 return err;
691         }
692
693         err = clk_prepare_enable(host->clk);
694         if (err) {
695                 dev_err(dev, "failed to enable clock: %d\n", err);
696                 goto release_reset;
697         }
698
699         err = reset_control_bulk_deassert(host->nresets, host->resets);
700         if (err < 0) {
701                 dev_err(dev, "failed to deassert reset: %d\n", err);
702                 goto disable_clk;
703         }
704
705         host1x_setup_virtualization_tables(host);
706         host1x_syncpt_restore(host);
707         host1x_intr_start(host);
708
709         return 0;
710
711 disable_clk:
712         clk_disable_unprepare(host->clk);
713 release_reset:
714         reset_control_bulk_release(host->nresets, host->resets);
715
716         return err;
717 }
718
719 static const struct dev_pm_ops host1x_pm_ops = {
720         SET_RUNTIME_PM_OPS(host1x_runtime_suspend, host1x_runtime_resume,
721                            NULL)
722         /* TODO: add system suspend-resume once driver will be ready for that */
723 };
724
725 static struct platform_driver tegra_host1x_driver = {
726         .driver = {
727                 .name = "tegra-host1x",
728                 .of_match_table = host1x_of_match,
729                 .pm = &host1x_pm_ops,
730         },
731         .probe = host1x_probe,
732         .remove = host1x_remove,
733 };
734
735 static struct platform_driver * const drivers[] = {
736         &tegra_host1x_driver,
737         &tegra_mipi_driver,
738 };
739
740 static int __init tegra_host1x_init(void)
741 {
742         int err;
743
744         err = bus_register(&host1x_bus_type);
745         if (err < 0)
746                 return err;
747
748         err = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
749         if (err < 0)
750                 bus_unregister(&host1x_bus_type);
751
752         return err;
753 }
754 module_init(tegra_host1x_init);
755
756 static void __exit tegra_host1x_exit(void)
757 {
758         platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
759         bus_unregister(&host1x_bus_type);
760 }
761 module_exit(tegra_host1x_exit);
762
763 /**
764  * host1x_get_dma_mask() - query the supported DMA mask for host1x
765  * @host1x: host1x instance
766  *
767  * Note that this returns the supported DMA mask for host1x, which can be
768  * different from the applicable DMA mask under certain circumstances.
769  */
770 u64 host1x_get_dma_mask(struct host1x *host1x)
771 {
772         return host1x->info->dma_mask;
773 }
774 EXPORT_SYMBOL(host1x_get_dma_mask);
775
776 MODULE_AUTHOR("Thierry Reding <[email protected]>");
777 MODULE_AUTHOR("Terje Bergstrom <[email protected]>");
778 MODULE_DESCRIPTION("Host1x driver for Tegra products");
779 MODULE_LICENSE("GPL");
This page took 0.082424 seconds and 4 git commands to generate.