]> Git Repo - J-u-boot.git/blame - drivers/remoteproc/ti_k3_r5f_rproc.c
dm: core: Create a new header file for 'compat' features
[J-u-boot.git] / drivers / remoteproc / ti_k3_r5f_rproc.c
CommitLineData
4c850356
LV
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Texas Instruments' K3 R5 Remoteproc driver
4 *
5 * Copyright (C) 2018-2019 Texas Instruments Incorporated - http://www.ti.com/
6 * Lokesh Vutla <[email protected]>
7 */
8
9#include <common.h>
10#include <dm.h>
336d4615 11#include <malloc.h>
4c850356
LV
12#include <remoteproc.h>
13#include <errno.h>
14#include <clk.h>
15#include <reset.h>
16#include <asm/io.h>
336d4615 17#include <dm/device_compat.h>
61b29b82 18#include <linux/err.h>
4c850356
LV
19#include <linux/kernel.h>
20#include <linux/soc/ti/ti_sci_protocol.h>
21#include "ti_sci_proc.h"
22
23/*
24 * R5F's view of this address can either be for ATCM or BTCM with the other
25 * at address 0x0 based on loczrama signal.
26 */
27#define K3_R5_TCM_DEV_ADDR 0x41010000
28
29/* R5 TI-SCI Processor Configuration Flags */
30#define PROC_BOOT_CFG_FLAG_R5_DBG_EN 0x00000001
31#define PROC_BOOT_CFG_FLAG_R5_DBG_NIDEN 0x00000002
32#define PROC_BOOT_CFG_FLAG_R5_LOCKSTEP 0x00000100
33#define PROC_BOOT_CFG_FLAG_R5_TEINIT 0x00000200
34#define PROC_BOOT_CFG_FLAG_R5_NMFI_EN 0x00000400
35#define PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE 0x00000800
36#define PROC_BOOT_CFG_FLAG_R5_BTCM_EN 0x00001000
37#define PROC_BOOT_CFG_FLAG_R5_ATCM_EN 0x00002000
38#define PROC_BOOT_CFG_FLAG_GEN_IGN_BOOTVECTOR 0x10000000
39
40/* R5 TI-SCI Processor Control Flags */
41#define PROC_BOOT_CTRL_FLAG_R5_CORE_HALT 0x00000001
42
43/* R5 TI-SCI Processor Status Flags */
44#define PROC_BOOT_STATUS_FLAG_R5_WFE 0x00000001
45#define PROC_BOOT_STATUS_FLAG_R5_WFI 0x00000002
46#define PROC_BOOT_STATUS_FLAG_R5_CLK_GATED 0x00000004
47#define PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED 0x00000100
48
49#define NR_CORES 2
50
51enum cluster_mode {
52 CLUSTER_MODE_SPLIT = 0,
53 CLUSTER_MODE_LOCKSTEP,
54};
55
56/**
57 * struct k3_r5_mem - internal memory structure
58 * @cpu_addr: MPU virtual address of the memory region
59 * @bus_addr: Bus address used to access the memory region
60 * @dev_addr: Device address from remoteproc view
61 * @size: Size of the memory region
62 */
63struct k3_r5f_mem {
64 void __iomem *cpu_addr;
65 phys_addr_t bus_addr;
66 u32 dev_addr;
67 size_t size;
68};
69
70/**
71 * struct k3_r5f_core - K3 R5 core structure
72 * @dev: cached device pointer
73 * @cluster: pointer to the parent cluster.
74 * @reset: reset control handle
75 * @tsp: TI-SCI processor control handle
76 * @mem: Array of available internal memories
77 * @num_mem: Number of available memories
78 * @atcm_enable: flag to control ATCM enablement
79 * @btcm_enable: flag to control BTCM enablement
80 * @loczrama: flag to dictate which TCM is at device address 0x0
81 * @in_use: flag to tell if the core is already in use.
82 */
83struct k3_r5f_core {
84 struct udevice *dev;
85 struct k3_r5f_cluster *cluster;
86 struct reset_ctl reset;
87 struct ti_sci_proc tsp;
88 struct k3_r5f_mem *mem;
89 int num_mems;
90 u32 atcm_enable;
91 u32 btcm_enable;
92 u32 loczrama;
93 bool in_use;
94};
95
96/**
97 * struct k3_r5f_cluster - K3 R5F Cluster structure
98 * @mode: Mode to configure the Cluster - Split or LockStep
99 * @cores: Array of pointers to R5 cores within the cluster
100 */
101struct k3_r5f_cluster {
102 enum cluster_mode mode;
103 struct k3_r5f_core *cores[NR_CORES];
104};
105
106static bool is_primary_core(struct k3_r5f_core *core)
107{
108 return core == core->cluster->cores[0];
109}
110
111static int k3_r5f_proc_request(struct k3_r5f_core *core)
112{
113 struct k3_r5f_cluster *cluster = core->cluster;
114 int i, ret;
115
116 if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
117 for (i = 0; i < NR_CORES; i++) {
118 ret = ti_sci_proc_request(&cluster->cores[i]->tsp);
119 if (ret)
120 goto proc_release;
121 }
122 } else {
123 ret = ti_sci_proc_request(&core->tsp);
124 }
125
126 return 0;
127
128proc_release:
129 while (i >= 0) {
130 ti_sci_proc_release(&cluster->cores[i]->tsp);
131 i--;
132 }
133 return ret;
134}
135
136static void k3_r5f_proc_release(struct k3_r5f_core *core)
137{
138 struct k3_r5f_cluster *cluster = core->cluster;
139 int i;
140
141 if (cluster->mode == CLUSTER_MODE_LOCKSTEP)
142 for (i = 0; i < NR_CORES; i++)
143 ti_sci_proc_release(&cluster->cores[i]->tsp);
144 else
145 ti_sci_proc_release(&core->tsp);
146}
147
148static int k3_r5f_lockstep_release(struct k3_r5f_cluster *cluster)
149{
150 int ret, c;
151
152 dev_dbg(dev, "%s\n", __func__);
153
154 for (c = NR_CORES - 1; c >= 0; c--) {
155 ret = ti_sci_proc_power_domain_on(&cluster->cores[c]->tsp);
156 if (ret)
157 goto unroll_module_reset;
158 }
159
160 /* deassert local reset on all applicable cores */
161 for (c = NR_CORES - 1; c >= 0; c--) {
162 ret = reset_deassert(&cluster->cores[c]->reset);
163 if (ret)
164 goto unroll_local_reset;
165 }
166
167 return 0;
168
169unroll_local_reset:
170 while (c < NR_CORES) {
171 reset_assert(&cluster->cores[c]->reset);
172 c++;
173 }
174 c = 0;
175unroll_module_reset:
176 while (c < NR_CORES) {
177 ti_sci_proc_power_domain_off(&cluster->cores[c]->tsp);
178 c++;
179 }
180
181 return ret;
182}
183
184static int k3_r5f_split_release(struct k3_r5f_core *core)
185{
186 int ret;
187
188 dev_dbg(dev, "%s\n", __func__);
189
190 ret = ti_sci_proc_power_domain_on(&core->tsp);
191 if (ret) {
192 dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
193 ret);
194 return ret;
195 }
196
197 ret = reset_deassert(&core->reset);
198 if (ret) {
199 dev_err(core->dev, "local-reset deassert failed, ret = %d\n",
200 ret);
201 if (ti_sci_proc_power_domain_off(&core->tsp))
202 dev_warn(core->dev, "module-reset assert back failed\n");
203 }
204
205 return ret;
206}
207
208static int k3_r5f_prepare(struct udevice *dev)
209{
210 struct k3_r5f_core *core = dev_get_priv(dev);
211 struct k3_r5f_cluster *cluster = core->cluster;
212 int ret = 0;
213
214 dev_dbg(dev, "%s\n", __func__);
215
216 if (cluster->mode == CLUSTER_MODE_LOCKSTEP)
217 ret = k3_r5f_lockstep_release(cluster);
218 else
219 ret = k3_r5f_split_release(core);
220
221 if (ret)
222 dev_err(dev, "Unable to enable cores for TCM loading %d\n",
223 ret);
224
225 return ret;
226}
227
228static int k3_r5f_core_sanity_check(struct k3_r5f_core *core)
229{
230 struct k3_r5f_cluster *cluster = core->cluster;
231
232 if (core->in_use) {
233 dev_err(dev, "Invalid op: Trying to load/start on already running core %d\n",
234 core->tsp.proc_id);
235 return -EINVAL;
236 }
237
238 if (cluster->mode == CLUSTER_MODE_LOCKSTEP && !cluster->cores[1]) {
239 printf("Secondary core is not probed in this cluster\n");
240 return -EAGAIN;
241 }
242
243 if (cluster->mode == CLUSTER_MODE_LOCKSTEP && !is_primary_core(core)) {
244 dev_err(dev, "Invalid op: Trying to start secondary core %d in lockstep mode\n",
245 core->tsp.proc_id);
246 return -EINVAL;
247 }
248
249 if (cluster->mode == CLUSTER_MODE_SPLIT && !is_primary_core(core)) {
250 if (!core->cluster->cores[0]->in_use) {
251 dev_err(dev, "Invalid seq: Enable primary core before loading secondary core\n");
252 return -EINVAL;
253 }
254 }
255
256 return 0;
257}
258
259/**
260 * k3_r5f_load() - Load up the Remote processor image
261 * @dev: rproc device pointer
262 * @addr: Address at which image is available
263 * @size: size of the image
264 *
265 * Return: 0 if all goes good, else appropriate error message.
266 */
267static int k3_r5f_load(struct udevice *dev, ulong addr, ulong size)
268{
269 struct k3_r5f_core *core = dev_get_priv(dev);
270 u32 boot_vector;
271 int ret;
272
273 dev_dbg(dev, "%s addr = 0x%lx, size = 0x%lx\n", __func__, addr, size);
274
275 ret = k3_r5f_core_sanity_check(core);
276 if (ret)
277 return ret;
278
279 ret = k3_r5f_proc_request(core);
280 if (ret)
281 return ret;
282
283 ret = k3_r5f_prepare(dev);
284 if (ret) {
285 dev_err(dev, "R5f prepare failed for core %d\n",
286 core->tsp.proc_id);
287 goto proc_release;
288 }
289
290 /* Zero out TCMs so that ECC can be effective on all TCM addresses */
291 if (core->atcm_enable)
292 memset(core->mem[0].cpu_addr, 0x00, core->mem[0].size);
293 if (core->btcm_enable)
294 memset(core->mem[1].cpu_addr, 0x00, core->mem[1].size);
295
296 ret = rproc_elf_load_image(dev, addr, size);
297 if (ret < 0) {
298 dev_err(dev, "Loading elf failedi %d\n", ret);
299 goto proc_release;
300 }
301
302 boot_vector = rproc_elf_get_boot_addr(dev, addr);
303
304 dev_dbg(dev, "%s: Boot vector = 0x%x\n", __func__, boot_vector);
305
306 ret = ti_sci_proc_set_config(&core->tsp, boot_vector, 0, 0);
307
308proc_release:
309 k3_r5f_proc_release(core);
310
311 return ret;
312}
313
314static int k3_r5f_core_halt(struct k3_r5f_core *core)
315{
316 int ret;
317
318 ret = ti_sci_proc_set_control(&core->tsp,
319 PROC_BOOT_CTRL_FLAG_R5_CORE_HALT, 0);
320 if (ret)
321 dev_err(core->dev, "Core %d failed to stop\n",
322 core->tsp.proc_id);
323
324 return ret;
325}
326
327static int k3_r5f_core_run(struct k3_r5f_core *core)
328{
329 int ret;
330
331 ret = ti_sci_proc_set_control(&core->tsp,
332 0, PROC_BOOT_CTRL_FLAG_R5_CORE_HALT);
333 if (ret) {
334 dev_err(core->dev, "Core %d failed to start\n",
335 core->tsp.proc_id);
336 return ret;
337 }
338
339 return 0;
340}
341
342/**
343 * k3_r5f_start() - Start the remote processor
344 * @dev: rproc device pointer
345 *
346 * Return: 0 if all went ok, else return appropriate error
347 */
348static int k3_r5f_start(struct udevice *dev)
349{
350 struct k3_r5f_core *core = dev_get_priv(dev);
351 struct k3_r5f_cluster *cluster = core->cluster;
352 int ret, c;
353
354 dev_dbg(dev, "%s\n", __func__);
355
356 ret = k3_r5f_core_sanity_check(core);
357 if (ret)
358 return ret;
359
360 ret = k3_r5f_proc_request(core);
361 if (ret)
362 return ret;
363
364 if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
365 if (is_primary_core(core)) {
366 for (c = NR_CORES - 1; c >= 0; c--) {
367 ret = k3_r5f_core_run(cluster->cores[c]);
368 if (ret)
369 goto unroll_core_run;
370 }
371 } else {
372 dev_err(dev, "Invalid op: Trying to start secondary core %d in lockstep mode\n",
373 core->tsp.proc_id);
374 ret = -EINVAL;
375 goto proc_release;
376 }
377 } else {
378 ret = k3_r5f_core_run(core);
379 if (ret)
380 goto proc_release;
381 }
382
383 core->in_use = true;
384
385 k3_r5f_proc_release(core);
386 return 0;
387
388unroll_core_run:
389 while (c < NR_CORES) {
390 k3_r5f_core_halt(cluster->cores[c]);
391 c++;
392 }
393proc_release:
394 k3_r5f_proc_release(core);
395
396 return ret;
397}
398
399static int k3_r5f_split_reset(struct k3_r5f_core *core)
400{
401 int ret;
402
403 dev_dbg(dev, "%s\n", __func__);
404
405 if (reset_assert(&core->reset))
406 ret = -EINVAL;
407
408 if (ti_sci_proc_power_domain_off(&core->tsp))
409 ret = -EINVAL;
410
411 return ret;
412}
413
414static int k3_r5f_lockstep_reset(struct k3_r5f_cluster *cluster)
415{
416 int ret = 0, c;
417
418 dev_dbg(dev, "%s\n", __func__);
419
420 for (c = 0; c < NR_CORES; c++)
421 if (reset_assert(&cluster->cores[c]->reset))
422 ret = -EINVAL;
423
424 /* disable PSC modules on all applicable cores */
425 for (c = 0; c < NR_CORES; c++)
426 if (ti_sci_proc_power_domain_off(&cluster->cores[c]->tsp))
427 ret = -EINVAL;
428
429 return ret;
430}
431
432static int k3_r5f_unprepare(struct udevice *dev)
433{
434 struct k3_r5f_core *core = dev_get_priv(dev);
435 struct k3_r5f_cluster *cluster = core->cluster;
436 int ret;
437
438 dev_dbg(dev, "%s\n", __func__);
439
440 if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
441 if (is_primary_core(core))
442 ret = k3_r5f_lockstep_reset(cluster);
443 } else {
444 ret = k3_r5f_split_reset(core);
445 }
446
447 if (ret)
448 dev_warn(dev, "Unable to enable cores for TCM loading %d\n",
449 ret);
450
451 return 0;
452}
453
454static int k3_r5f_stop(struct udevice *dev)
455{
456 struct k3_r5f_core *core = dev_get_priv(dev);
457 struct k3_r5f_cluster *cluster = core->cluster;
458 int c, ret;
459
460 dev_dbg(dev, "%s\n", __func__);
461
462 ret = k3_r5f_proc_request(core);
463 if (ret)
464 return ret;
465
466 core->in_use = false;
467
468 if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
469 if (is_primary_core(core)) {
470 for (c = 0; c < NR_CORES; c++)
471 k3_r5f_core_halt(cluster->cores[c]);
472 } else {
473 dev_err(dev, "Invalid op: Trying to stop secondary core in lockstep mode\n");
474 ret = -EINVAL;
475 goto proc_release;
476 }
477 } else {
478 k3_r5f_core_halt(core);
479 }
480
481 ret = k3_r5f_unprepare(dev);
482proc_release:
483 k3_r5f_proc_release(core);
484 return ret;
485}
486
487static void *k3_r5f_da_to_va(struct udevice *dev, ulong da, ulong size)
488{
489 struct k3_r5f_core *core = dev_get_priv(dev);
490 void __iomem *va = NULL;
491 phys_addr_t bus_addr;
492 u32 dev_addr, offset;
493 ulong mem_size;
494 int i;
495
496 dev_dbg(dev, "%s\n", __func__);
497
498 if (size <= 0)
499 return NULL;
500
501 for (i = 0; i < core->num_mems; i++) {
502 bus_addr = core->mem[i].bus_addr;
503 dev_addr = core->mem[i].dev_addr;
504 mem_size = core->mem[i].size;
505
506 if (da >= bus_addr && (da + size) <= (bus_addr + mem_size)) {
507 offset = da - bus_addr;
508 va = core->mem[i].cpu_addr + offset;
509 return (__force void *)va;
510 }
511
512 if (da >= dev_addr && (da + size) <= (dev_addr + mem_size)) {
513 offset = da - dev_addr;
514 va = core->mem[i].cpu_addr + offset;
515 return (__force void *)va;
516 }
517 }
518
519 /* Assume it is DDR region and return da */
520 return map_physmem(da, size, MAP_NOCACHE);
521}
522
523static int k3_r5f_init(struct udevice *dev)
524{
525 return 0;
526}
527
528static int k3_r5f_reset(struct udevice *dev)
529{
530 return 0;
531}
532
533static const struct dm_rproc_ops k3_r5f_rproc_ops = {
534 .init = k3_r5f_init,
535 .reset = k3_r5f_reset,
536 .start = k3_r5f_start,
537 .stop = k3_r5f_stop,
538 .load = k3_r5f_load,
539 .device_to_virt = k3_r5f_da_to_va,
540};
541
542static int k3_r5f_rproc_configure(struct k3_r5f_core *core)
543{
544 struct k3_r5f_cluster *cluster = core->cluster;
545 u32 set_cfg = 0, clr_cfg = 0, cfg, ctrl, sts;
546 u64 boot_vec = 0;
547 int ret;
548
549 dev_dbg(dev, "%s\n", __func__);
550
551 ret = ti_sci_proc_request(&core->tsp);
552 if (ret < 0)
553 return ret;
554
555 /* Do not touch boot vector now. Load will take care of it. */
556 clr_cfg |= PROC_BOOT_CFG_FLAG_GEN_IGN_BOOTVECTOR;
557
558 ret = ti_sci_proc_get_status(&core->tsp, &boot_vec, &cfg, &ctrl, &sts);
559 if (ret)
560 goto out;
561
562 /* Sanity check for Lockstep mode */
563 if (cluster->mode && is_primary_core(core) &&
564 !(sts & PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED)) {
565 dev_err(core->dev, "LockStep mode not permitted on this device\n");
566 ret = -EINVAL;
567 goto out;
568 }
569
570 /* Primary core only configuration */
571 if (is_primary_core(core)) {
572 /* always enable ARM mode */
573 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_TEINIT;
574 if (cluster->mode == CLUSTER_MODE_LOCKSTEP)
575 set_cfg |= PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
576 else
577 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
578 }
579
580 if (core->atcm_enable)
581 set_cfg |= PROC_BOOT_CFG_FLAG_R5_ATCM_EN;
582 else
583 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_ATCM_EN;
584
585 if (core->btcm_enable)
586 set_cfg |= PROC_BOOT_CFG_FLAG_R5_BTCM_EN;
587 else
588 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_BTCM_EN;
589
590 if (core->loczrama)
591 set_cfg |= PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE;
592 else
593 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE;
594
595 ret = k3_r5f_core_halt(core);
596 if (ret)
597 goto out;
598
599 ret = ti_sci_proc_set_config(&core->tsp, boot_vec, set_cfg, clr_cfg);
600out:
601 ti_sci_proc_release(&core->tsp);
602 return ret;
603}
604
605static int ti_sci_proc_of_to_priv(struct udevice *dev, struct ti_sci_proc *tsp)
606{
607 u32 ids[2];
608 int ret;
609
610 dev_dbg(dev, "%s\n", __func__);
611
612 tsp->sci = ti_sci_get_by_phandle(dev, "ti,sci");
613 if (IS_ERR(tsp->sci)) {
614 dev_err(dev, "ti_sci get failed: %ld\n", PTR_ERR(tsp->sci));
615 return PTR_ERR(tsp->sci);
616 }
617
618 ret = dev_read_u32_array(dev, "ti,sci-proc-ids", ids, 2);
619 if (ret) {
620 dev_err(dev, "Proc IDs not populated %d\n", ret);
621 return ret;
622 }
623
624 tsp->ops = &tsp->sci->ops.proc_ops;
625 tsp->proc_id = ids[0];
626 tsp->host_id = ids[1];
627 tsp->dev_id = dev_read_u32_default(dev, "ti,sci-dev-id",
628 TI_SCI_RESOURCE_NULL);
629 if (tsp->dev_id == TI_SCI_RESOURCE_NULL) {
630 dev_err(dev, "Device ID not populated %d\n", ret);
631 return -ENODEV;
632 }
633
634 return 0;
635}
636
637static int k3_r5f_of_to_priv(struct k3_r5f_core *core)
638{
639 int ret;
640
641 dev_dbg(dev, "%s\n", __func__);
642
643 core->atcm_enable = dev_read_u32_default(core->dev, "atcm-enable", 0);
644 core->btcm_enable = dev_read_u32_default(core->dev, "btcm-enable", 1);
645 core->loczrama = dev_read_u32_default(core->dev, "loczrama", 1);
646
647 ret = ti_sci_proc_of_to_priv(core->dev, &core->tsp);
648 if (ret)
649 return ret;
650
651 ret = reset_get_by_index(core->dev, 0, &core->reset);
652 if (ret) {
653 dev_err(core->dev, "Reset lines not available: %d\n", ret);
654 return ret;
655 }
656
657 return 0;
658}
659
660static int k3_r5f_core_of_get_memories(struct k3_r5f_core *core)
661{
662 static const char * const mem_names[] = {"atcm", "btcm"};
663 struct udevice *dev = core->dev;
664 int i;
665
666 dev_dbg(dev, "%s\n", __func__);
667
668 core->num_mems = ARRAY_SIZE(mem_names);
669 core->mem = calloc(core->num_mems, sizeof(*core->mem));
670 if (!core->mem)
671 return -ENOMEM;
672
673 for (i = 0; i < core->num_mems; i++) {
674 core->mem[i].bus_addr = dev_read_addr_size_name(dev,
675 mem_names[i],
676 (fdt_addr_t *)&core->mem[i].size);
677 if (core->mem[i].bus_addr == FDT_ADDR_T_NONE) {
678 dev_err(dev, "%s bus address not found\n",
679 mem_names[i]);
680 return -EINVAL;
681 }
682 core->mem[i].cpu_addr = map_physmem(core->mem[i].bus_addr,
683 core->mem[i].size,
684 MAP_NOCACHE);
685 if (!strcmp(mem_names[i], "atcm")) {
686 core->mem[i].dev_addr = core->loczrama ?
687 0 : K3_R5_TCM_DEV_ADDR;
688 } else {
689 core->mem[i].dev_addr = core->loczrama ?
690 K3_R5_TCM_DEV_ADDR : 0;
691 }
692
693 dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %p da 0x%x\n",
694 mem_names[i], &core->mem[i].bus_addr,
695 core->mem[i].size, core->mem[i].cpu_addr,
696 core->mem[i].dev_addr);
697 }
698
699 return 0;
700}
701
702/**
703 * k3_r5f_probe() - Basic probe
704 * @dev: corresponding k3 remote processor device
705 *
706 * Return: 0 if all goes good, else appropriate error message.
707 */
708static int k3_r5f_probe(struct udevice *dev)
709{
710 struct k3_r5f_cluster *cluster = dev_get_priv(dev->parent);
711 struct k3_r5f_core *core = dev_get_priv(dev);
712 bool r_state;
713 int ret;
714
715 dev_dbg(dev, "%s\n", __func__);
716
717 core->dev = dev;
718 ret = k3_r5f_of_to_priv(core);
719 if (ret)
720 return ret;
721
722 core->cluster = cluster;
723 /* Assume Primary core gets probed first */
724 if (!cluster->cores[0])
725 cluster->cores[0] = core;
726 else
727 cluster->cores[1] = core;
728
729 ret = k3_r5f_core_of_get_memories(core);
730 if (ret) {
731 dev_err(dev, "Rproc getting internal memories failed\n");
732 return ret;
733 }
734
735 ret = core->tsp.sci->ops.dev_ops.is_on(core->tsp.sci, core->tsp.dev_id,
736 &r_state, &core->in_use);
737 if (ret)
738 return ret;
739
740 if (core->in_use) {
741 dev_info(dev, "Core %d is already in use. No rproc commands work\n",
742 core->tsp.proc_id);
743 return 0;
744 }
745
746 /* Make sure Local reset is asserted. Redundant? */
747 reset_assert(&core->reset);
748
749 ret = k3_r5f_rproc_configure(core);
750 if (ret) {
751 dev_err(dev, "rproc configure failed %d\n", ret);
752 return ret;
753 }
754
755 dev_dbg(dev, "Remoteproc successfully probed\n");
756
757 return 0;
758}
759
760static int k3_r5f_remove(struct udevice *dev)
761{
762 struct k3_r5f_core *core = dev_get_priv(dev);
763
764 free(core->mem);
765
766 ti_sci_proc_release(&core->tsp);
767
768 return 0;
769}
770
771static const struct udevice_id k3_r5f_rproc_ids[] = {
772 { .compatible = "ti,am654-r5f"},
773 { .compatible = "ti,j721e-r5f"},
774 {}
775};
776
777U_BOOT_DRIVER(k3_r5f_rproc) = {
778 .name = "k3_r5f_rproc",
779 .of_match = k3_r5f_rproc_ids,
780 .id = UCLASS_REMOTEPROC,
781 .ops = &k3_r5f_rproc_ops,
782 .probe = k3_r5f_probe,
783 .remove = k3_r5f_remove,
784 .priv_auto_alloc_size = sizeof(struct k3_r5f_core),
785};
786
787static int k3_r5f_cluster_probe(struct udevice *dev)
788{
789 struct k3_r5f_cluster *cluster = dev_get_priv(dev);
790
791 dev_dbg(dev, "%s\n", __func__);
792
793 cluster->mode = dev_read_u32_default(dev, "lockstep-mode",
794 CLUSTER_MODE_LOCKSTEP);
795
796 if (device_get_child_count(dev) != 2) {
797 dev_err(dev, "Invalid number of R5 cores");
798 return -EINVAL;
799 }
800
801 dev_dbg(dev, "%s: Cluster successfully probed in %s mode\n",
802 __func__, cluster->mode ? "lockstep" : "split");
803
804 return 0;
805}
806
807static const struct udevice_id k3_r5fss_ids[] = {
808 { .compatible = "ti,am654-r5fss"},
809 { .compatible = "ti,j721e-r5fss"},
810 {}
811};
812
813U_BOOT_DRIVER(k3_r5fss) = {
814 .name = "k3_r5fss",
815 .of_match = k3_r5fss_ids,
816 .id = UCLASS_MISC,
817 .probe = k3_r5f_cluster_probe,
818 .priv_auto_alloc_size = sizeof(struct k3_r5f_cluster),
819};
This page took 0.118943 seconds and 4 git commands to generate.