1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013 Avionic Design GmbH
4 * Copyright (C) 2013 NVIDIA Corporation
8 #include <linux/delay.h>
9 #include <linux/host1x.h>
10 #include <linux/iommu.h>
11 #include <linux/module.h>
12 #include <linux/of_device.h>
13 #include <linux/platform_device.h>
14 #include <linux/pm_domain.h>
15 #include <linux/pm_opp.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/reset.h>
19 #include <soc/tegra/common.h>
20 #include <soc/tegra/pmc.h>
36 unsigned int num_clocks;
37 unsigned int num_resets;
41 struct tegra_drm_client client;
42 struct host1x_channel *channel;
44 const struct gr3d_soc *soc;
45 struct clk_bulk_data *clocks;
47 struct reset_control_bulk_data resets[RST_GR3D_MAX];
50 DECLARE_BITMAP(addr_regs, GR3D_NUM_REGS);
53 static inline struct gr3d *to_gr3d(struct tegra_drm_client *client)
55 return container_of(client, struct gr3d, client);
58 static int gr3d_init(struct host1x_client *client)
60 struct tegra_drm_client *drm = host1x_to_drm_client(client);
61 struct drm_device *dev = dev_get_drvdata(client->host);
62 unsigned long flags = HOST1X_SYNCPT_HAS_BASE;
63 struct gr3d *gr3d = to_gr3d(drm);
66 gr3d->channel = host1x_channel_request(client);
70 client->syncpts[0] = host1x_syncpt_request(client, flags);
71 if (!client->syncpts[0]) {
73 dev_err(client->dev, "failed to request syncpoint: %d\n", err);
77 err = host1x_client_iommu_attach(client);
79 dev_err(client->dev, "failed to attach to domain: %d\n", err);
83 pm_runtime_enable(client->dev);
84 pm_runtime_use_autosuspend(client->dev);
85 pm_runtime_set_autosuspend_delay(client->dev, 200);
87 err = tegra_drm_register_client(dev->dev_private, drm);
89 dev_err(client->dev, "failed to register client: %d\n", err);
96 pm_runtime_dont_use_autosuspend(client->dev);
97 pm_runtime_force_suspend(client->dev);
99 host1x_client_iommu_detach(client);
101 host1x_syncpt_put(client->syncpts[0]);
103 host1x_channel_put(gr3d->channel);
107 static int gr3d_exit(struct host1x_client *client)
109 struct tegra_drm_client *drm = host1x_to_drm_client(client);
110 struct drm_device *dev = dev_get_drvdata(client->host);
111 struct gr3d *gr3d = to_gr3d(drm);
114 err = tegra_drm_unregister_client(dev->dev_private, drm);
118 pm_runtime_dont_use_autosuspend(client->dev);
119 pm_runtime_force_suspend(client->dev);
121 host1x_client_iommu_detach(client);
122 host1x_syncpt_put(client->syncpts[0]);
123 host1x_channel_put(gr3d->channel);
125 gr3d->channel = NULL;
130 static const struct host1x_client_ops gr3d_client_ops = {
135 static int gr3d_open_channel(struct tegra_drm_client *client,
136 struct tegra_drm_context *context)
138 struct gr3d *gr3d = to_gr3d(client);
140 context->channel = host1x_channel_get(gr3d->channel);
141 if (!context->channel)
147 static void gr3d_close_channel(struct tegra_drm_context *context)
149 host1x_channel_put(context->channel);
152 static int gr3d_is_addr_reg(struct device *dev, u32 class, u32 offset)
154 struct gr3d *gr3d = dev_get_drvdata(dev);
157 case HOST1X_CLASS_HOST1X:
163 case HOST1X_CLASS_GR3D:
164 if (offset >= GR3D_NUM_REGS)
167 if (test_bit(offset, gr3d->addr_regs))
176 static const struct tegra_drm_client_ops gr3d_ops = {
177 .open_channel = gr3d_open_channel,
178 .close_channel = gr3d_close_channel,
179 .is_addr_reg = gr3d_is_addr_reg,
180 .submit = tegra_drm_submit,
183 static const struct gr3d_soc tegra20_gr3d_soc = {
189 static const struct gr3d_soc tegra30_gr3d_soc = {
195 static const struct gr3d_soc tegra114_gr3d_soc = {
201 static const struct of_device_id tegra_gr3d_match[] = {
202 { .compatible = "nvidia,tegra114-gr3d", .data = &tegra114_gr3d_soc },
203 { .compatible = "nvidia,tegra30-gr3d", .data = &tegra30_gr3d_soc },
204 { .compatible = "nvidia,tegra20-gr3d", .data = &tegra20_gr3d_soc },
207 MODULE_DEVICE_TABLE(of, tegra_gr3d_match);
209 static const u32 gr3d_addr_regs[] = {
210 GR3D_IDX_ATTRIBUTE( 0),
211 GR3D_IDX_ATTRIBUTE( 1),
212 GR3D_IDX_ATTRIBUTE( 2),
213 GR3D_IDX_ATTRIBUTE( 3),
214 GR3D_IDX_ATTRIBUTE( 4),
215 GR3D_IDX_ATTRIBUTE( 5),
216 GR3D_IDX_ATTRIBUTE( 6),
217 GR3D_IDX_ATTRIBUTE( 7),
218 GR3D_IDX_ATTRIBUTE( 8),
219 GR3D_IDX_ATTRIBUTE( 9),
220 GR3D_IDX_ATTRIBUTE(10),
221 GR3D_IDX_ATTRIBUTE(11),
222 GR3D_IDX_ATTRIBUTE(12),
223 GR3D_IDX_ATTRIBUTE(13),
224 GR3D_IDX_ATTRIBUTE(14),
225 GR3D_IDX_ATTRIBUTE(15),
230 GR3D_TEX_TEX_ADDR( 0),
231 GR3D_TEX_TEX_ADDR( 1),
232 GR3D_TEX_TEX_ADDR( 2),
233 GR3D_TEX_TEX_ADDR( 3),
234 GR3D_TEX_TEX_ADDR( 4),
235 GR3D_TEX_TEX_ADDR( 5),
236 GR3D_TEX_TEX_ADDR( 6),
237 GR3D_TEX_TEX_ADDR( 7),
238 GR3D_TEX_TEX_ADDR( 8),
239 GR3D_TEX_TEX_ADDR( 9),
240 GR3D_TEX_TEX_ADDR(10),
241 GR3D_TEX_TEX_ADDR(11),
242 GR3D_TEX_TEX_ADDR(12),
243 GR3D_TEX_TEX_ADDR(13),
244 GR3D_TEX_TEX_ADDR(14),
245 GR3D_TEX_TEX_ADDR(15),
246 GR3D_DW_MEMORY_OUTPUT_ADDRESS,
247 GR3D_GLOBAL_SURFADDR( 0),
248 GR3D_GLOBAL_SURFADDR( 1),
249 GR3D_GLOBAL_SURFADDR( 2),
250 GR3D_GLOBAL_SURFADDR( 3),
251 GR3D_GLOBAL_SURFADDR( 4),
252 GR3D_GLOBAL_SURFADDR( 5),
253 GR3D_GLOBAL_SURFADDR( 6),
254 GR3D_GLOBAL_SURFADDR( 7),
255 GR3D_GLOBAL_SURFADDR( 8),
256 GR3D_GLOBAL_SURFADDR( 9),
257 GR3D_GLOBAL_SURFADDR(10),
258 GR3D_GLOBAL_SURFADDR(11),
259 GR3D_GLOBAL_SURFADDR(12),
260 GR3D_GLOBAL_SURFADDR(13),
261 GR3D_GLOBAL_SURFADDR(14),
262 GR3D_GLOBAL_SURFADDR(15),
263 GR3D_GLOBAL_SPILLSURFADDR,
264 GR3D_GLOBAL_SURFOVERADDR( 0),
265 GR3D_GLOBAL_SURFOVERADDR( 1),
266 GR3D_GLOBAL_SURFOVERADDR( 2),
267 GR3D_GLOBAL_SURFOVERADDR( 3),
268 GR3D_GLOBAL_SURFOVERADDR( 4),
269 GR3D_GLOBAL_SURFOVERADDR( 5),
270 GR3D_GLOBAL_SURFOVERADDR( 6),
271 GR3D_GLOBAL_SURFOVERADDR( 7),
272 GR3D_GLOBAL_SURFOVERADDR( 8),
273 GR3D_GLOBAL_SURFOVERADDR( 9),
274 GR3D_GLOBAL_SURFOVERADDR(10),
275 GR3D_GLOBAL_SURFOVERADDR(11),
276 GR3D_GLOBAL_SURFOVERADDR(12),
277 GR3D_GLOBAL_SURFOVERADDR(13),
278 GR3D_GLOBAL_SURFOVERADDR(14),
279 GR3D_GLOBAL_SURFOVERADDR(15),
280 GR3D_GLOBAL_SAMP01SURFADDR( 0),
281 GR3D_GLOBAL_SAMP01SURFADDR( 1),
282 GR3D_GLOBAL_SAMP01SURFADDR( 2),
283 GR3D_GLOBAL_SAMP01SURFADDR( 3),
284 GR3D_GLOBAL_SAMP01SURFADDR( 4),
285 GR3D_GLOBAL_SAMP01SURFADDR( 5),
286 GR3D_GLOBAL_SAMP01SURFADDR( 6),
287 GR3D_GLOBAL_SAMP01SURFADDR( 7),
288 GR3D_GLOBAL_SAMP01SURFADDR( 8),
289 GR3D_GLOBAL_SAMP01SURFADDR( 9),
290 GR3D_GLOBAL_SAMP01SURFADDR(10),
291 GR3D_GLOBAL_SAMP01SURFADDR(11),
292 GR3D_GLOBAL_SAMP01SURFADDR(12),
293 GR3D_GLOBAL_SAMP01SURFADDR(13),
294 GR3D_GLOBAL_SAMP01SURFADDR(14),
295 GR3D_GLOBAL_SAMP01SURFADDR(15),
296 GR3D_GLOBAL_SAMP23SURFADDR( 0),
297 GR3D_GLOBAL_SAMP23SURFADDR( 1),
298 GR3D_GLOBAL_SAMP23SURFADDR( 2),
299 GR3D_GLOBAL_SAMP23SURFADDR( 3),
300 GR3D_GLOBAL_SAMP23SURFADDR( 4),
301 GR3D_GLOBAL_SAMP23SURFADDR( 5),
302 GR3D_GLOBAL_SAMP23SURFADDR( 6),
303 GR3D_GLOBAL_SAMP23SURFADDR( 7),
304 GR3D_GLOBAL_SAMP23SURFADDR( 8),
305 GR3D_GLOBAL_SAMP23SURFADDR( 9),
306 GR3D_GLOBAL_SAMP23SURFADDR(10),
307 GR3D_GLOBAL_SAMP23SURFADDR(11),
308 GR3D_GLOBAL_SAMP23SURFADDR(12),
309 GR3D_GLOBAL_SAMP23SURFADDR(13),
310 GR3D_GLOBAL_SAMP23SURFADDR(14),
311 GR3D_GLOBAL_SAMP23SURFADDR(15),
314 static int gr3d_power_up_legacy_domain(struct device *dev, const char *name,
317 struct gr3d *gr3d = dev_get_drvdata(dev);
318 struct reset_control *reset;
324 * Tegra20 device-tree doesn't specify 3d clock name and there is only
325 * one clock for Tegra20. Tegra30+ device-trees always specified names
328 if (gr3d->nclocks == 1) {
329 if (id == TEGRA_POWERGATE_3D1)
332 clk = gr3d->clocks[0].clk;
334 for (i = 0; i < gr3d->nclocks; i++) {
335 if (WARN_ON(!gr3d->clocks[i].id))
338 if (!strcmp(gr3d->clocks[i].id, name)) {
339 clk = gr3d->clocks[i].clk;
344 if (WARN_ON(i == gr3d->nclocks))
349 * We use array of resets, which includes MC resets, and MC
350 * reset shouldn't be asserted while hardware is gated because
351 * MC flushing will fail for gated hardware. Hence for legacy
352 * PD we request the individual reset separately.
354 reset = reset_control_get_exclusive_released(dev, name);
356 return PTR_ERR(reset);
358 err = reset_control_acquire(reset);
360 dev_err(dev, "failed to acquire %s reset: %d\n", name, err);
362 err = tegra_powergate_sequence_power_up(id, clk, reset);
363 reset_control_release(reset);
366 reset_control_put(reset);
371 * tegra_powergate_sequence_power_up() leaves clocks enabled,
372 * while GENPD not. Hence keep clock-enable balanced.
374 clk_disable_unprepare(clk);
379 static void gr3d_del_link(void *link)
381 device_link_del(link);
384 static int gr3d_init_power(struct device *dev, struct gr3d *gr3d)
386 static const char * const opp_genpd_names[] = { "3d0", "3d1", NULL };
387 const u32 link_flags = DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME;
388 struct device **opp_virt_devs, *pd_dev;
389 struct device_link *link;
393 err = of_count_phandle_with_args(dev->of_node, "power-domains",
394 "#power-domain-cells");
400 * Older device-trees don't use GENPD. In this case we should
401 * toggle power domain manually.
403 err = gr3d_power_up_legacy_domain(dev, "3d",
408 err = gr3d_power_up_legacy_domain(dev, "3d2",
409 TEGRA_POWERGATE_3D1);
417 * The PM domain core automatically attaches a single power domain,
418 * otherwise it skips attaching completely. We have a single domain
419 * on Tegra20 and two domains on Tegra30+.
424 err = devm_pm_opp_attach_genpd(dev, opp_genpd_names, &opp_virt_devs);
428 for (i = 0; opp_genpd_names[i]; i++) {
429 pd_dev = opp_virt_devs[i];
431 dev_err(dev, "failed to get %s power domain\n",
436 link = device_link_add(dev, pd_dev, link_flags);
438 dev_err(dev, "failed to link to %s\n", dev_name(pd_dev));
442 err = devm_add_action_or_reset(dev, gr3d_del_link, link);
450 static int gr3d_get_clocks(struct device *dev, struct gr3d *gr3d)
454 err = devm_clk_bulk_get_all(dev, &gr3d->clocks);
456 dev_err(dev, "failed to get clock: %d\n", err);
461 if (gr3d->nclocks != gr3d->soc->num_clocks) {
462 dev_err(dev, "invalid number of clocks: %u\n", gr3d->nclocks);
469 static int gr3d_get_resets(struct device *dev, struct gr3d *gr3d)
473 gr3d->resets[RST_MC].id = "mc";
474 gr3d->resets[RST_MC2].id = "mc2";
475 gr3d->resets[RST_GR3D].id = "3d";
476 gr3d->resets[RST_GR3D2].id = "3d2";
477 gr3d->nresets = gr3d->soc->num_resets;
479 err = devm_reset_control_bulk_get_optional_exclusive_released(
480 dev, gr3d->nresets, gr3d->resets);
482 dev_err(dev, "failed to get reset: %d\n", err);
486 if (WARN_ON(!gr3d->resets[RST_GR3D].rstc) ||
487 WARN_ON(!gr3d->resets[RST_GR3D2].rstc && gr3d->nresets == 4))
493 static int gr3d_probe(struct platform_device *pdev)
495 struct host1x_syncpt **syncpts;
500 gr3d = devm_kzalloc(&pdev->dev, sizeof(*gr3d), GFP_KERNEL);
504 platform_set_drvdata(pdev, gr3d);
506 gr3d->soc = of_device_get_match_data(&pdev->dev);
508 syncpts = devm_kzalloc(&pdev->dev, sizeof(*syncpts), GFP_KERNEL);
512 err = gr3d_get_clocks(&pdev->dev, gr3d);
516 err = gr3d_get_resets(&pdev->dev, gr3d);
520 err = gr3d_init_power(&pdev->dev, gr3d);
524 INIT_LIST_HEAD(&gr3d->client.base.list);
525 gr3d->client.base.ops = &gr3d_client_ops;
526 gr3d->client.base.dev = &pdev->dev;
527 gr3d->client.base.class = HOST1X_CLASS_GR3D;
528 gr3d->client.base.syncpts = syncpts;
529 gr3d->client.base.num_syncpts = 1;
531 INIT_LIST_HEAD(&gr3d->client.list);
532 gr3d->client.version = gr3d->soc->version;
533 gr3d->client.ops = &gr3d_ops;
535 err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
539 err = host1x_client_register(&gr3d->client.base);
541 dev_err(&pdev->dev, "failed to register host1x client: %d\n",
546 /* initialize address register map */
547 for (i = 0; i < ARRAY_SIZE(gr3d_addr_regs); i++)
548 set_bit(gr3d_addr_regs[i], gr3d->addr_regs);
553 static void gr3d_remove(struct platform_device *pdev)
555 struct gr3d *gr3d = platform_get_drvdata(pdev);
557 host1x_client_unregister(&gr3d->client.base);
560 static int __maybe_unused gr3d_runtime_suspend(struct device *dev)
562 struct gr3d *gr3d = dev_get_drvdata(dev);
565 host1x_channel_stop(gr3d->channel);
567 err = reset_control_bulk_assert(gr3d->nresets, gr3d->resets);
569 dev_err(dev, "failed to assert reset: %d\n", err);
573 usleep_range(10, 20);
576 * Older device-trees don't specify MC resets and power-gating can't
577 * be done safely in that case. Hence we will keep the power ungated
578 * for older DTBs. For newer DTBs, GENPD will perform the power-gating.
581 clk_bulk_disable_unprepare(gr3d->nclocks, gr3d->clocks);
582 reset_control_bulk_release(gr3d->nresets, gr3d->resets);
587 static int __maybe_unused gr3d_runtime_resume(struct device *dev)
589 struct gr3d *gr3d = dev_get_drvdata(dev);
592 err = reset_control_bulk_acquire(gr3d->nresets, gr3d->resets);
594 dev_err(dev, "failed to acquire reset: %d\n", err);
598 err = clk_bulk_prepare_enable(gr3d->nclocks, gr3d->clocks);
600 dev_err(dev, "failed to enable clock: %d\n", err);
604 err = reset_control_bulk_deassert(gr3d->nresets, gr3d->resets);
606 dev_err(dev, "failed to deassert reset: %d\n", err);
613 clk_bulk_disable_unprepare(gr3d->nclocks, gr3d->clocks);
615 reset_control_bulk_release(gr3d->nresets, gr3d->resets);
620 static const struct dev_pm_ops tegra_gr3d_pm = {
621 SET_RUNTIME_PM_OPS(gr3d_runtime_suspend, gr3d_runtime_resume, NULL)
622 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
623 pm_runtime_force_resume)
626 struct platform_driver tegra_gr3d_driver = {
628 .name = "tegra-gr3d",
629 .of_match_table = tegra_gr3d_match,
630 .pm = &tegra_gr3d_pm,
633 .remove_new = gr3d_remove,