]> Git Repo - J-u-boot.git/blame - drivers/net/pfe_eth/pfe_hw.c
common: Drop log.h from common header
[J-u-boot.git] / drivers / net / pfe_eth / pfe_hw.c
CommitLineData
83d290c5 1// SPDX-License-Identifier: GPL-2.0+
a4a40437
CJ
2/*
3 * Copyright 2015-2016 Freescale Semiconductor, Inc.
4 * Copyright 2017 NXP
a4a40437 5 */
f7ae49fc 6#include <log.h>
a4a40437
CJ
7#include <net/pfe_eth/pfe_eth.h>
8#include <net/pfe_eth/pfe/pfe_hw.h>
9
10static struct pe_info pe[MAX_PE];
11
12/*
13 * Initializes the PFE library.
14 * Must be called before using any of the library functions.
15 */
16void pfe_lib_init(void)
17{
18 int pfe_pe_id;
19
20 for (pfe_pe_id = CLASS0_ID; pfe_pe_id <= CLASS_MAX_ID; pfe_pe_id++) {
21 pe[pfe_pe_id].dmem_base_addr =
22 (u32)CLASS_DMEM_BASE_ADDR(pfe_pe_id);
23 pe[pfe_pe_id].pmem_base_addr =
24 (u32)CLASS_IMEM_BASE_ADDR(pfe_pe_id);
25 pe[pfe_pe_id].pmem_size = (u32)CLASS_IMEM_SIZE;
26 pe[pfe_pe_id].mem_access_wdata =
27 (void *)CLASS_MEM_ACCESS_WDATA;
28 pe[pfe_pe_id].mem_access_addr = (void *)CLASS_MEM_ACCESS_ADDR;
29 pe[pfe_pe_id].mem_access_rdata = (void *)CLASS_MEM_ACCESS_RDATA;
30 }
31
32 for (pfe_pe_id = TMU0_ID; pfe_pe_id <= TMU_MAX_ID; pfe_pe_id++) {
33 if (pfe_pe_id == TMU2_ID)
34 continue;
35 pe[pfe_pe_id].dmem_base_addr =
36 (u32)TMU_DMEM_BASE_ADDR(pfe_pe_id - TMU0_ID);
37 pe[pfe_pe_id].pmem_base_addr =
38 (u32)TMU_IMEM_BASE_ADDR(pfe_pe_id - TMU0_ID);
39 pe[pfe_pe_id].pmem_size = (u32)TMU_IMEM_SIZE;
40 pe[pfe_pe_id].mem_access_wdata = (void *)TMU_MEM_ACCESS_WDATA;
41 pe[pfe_pe_id].mem_access_addr = (void *)TMU_MEM_ACCESS_ADDR;
42 pe[pfe_pe_id].mem_access_rdata = (void *)TMU_MEM_ACCESS_RDATA;
43 }
44}
45
46/*
47 * Writes a buffer to PE internal memory from the host
48 * through indirect access registers.
49 *
50 * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
51 * ..., UTIL_ID)
52 * @param[in] mem_access_addr DMEM destination address (must be 32bit
53 * aligned)
54 * @param[in] src Buffer source address
55 * @param[in] len Number of bytes to copy
56 */
57static void pe_mem_memcpy_to32(int id, u32 mem_access_addr, const void *src,
58 unsigned int len)
59{
60 u32 offset = 0, val, addr;
61 unsigned int len32 = len >> 2;
62 int i;
63
64 addr = mem_access_addr | PE_MEM_ACCESS_WRITE |
65 PE_MEM_ACCESS_BYTE_ENABLE(0, 4);
66
67 for (i = 0; i < len32; i++, offset += 4, src += 4) {
68 val = *(u32 *)src;
69 writel(cpu_to_be32(val), pe[id].mem_access_wdata);
70 writel(addr + offset, pe[id].mem_access_addr);
71 }
72
73 len = (len & 0x3);
74 if (len) {
75 val = 0;
76
77 addr = (mem_access_addr | PE_MEM_ACCESS_WRITE |
78 PE_MEM_ACCESS_BYTE_ENABLE(0, len)) + offset;
79
80 for (i = 0; i < len; i++, src++)
81 val |= (*(u8 *)src) << (8 * i);
82
83 writel(cpu_to_be32(val), pe[id].mem_access_wdata);
84 writel(addr, pe[id].mem_access_addr);
85 }
86}
87
88/*
89 * Writes a buffer to PE internal data memory (DMEM) from the host
90 * through indirect access registers.
91 * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
92 * ..., UTIL_ID)
93 * @param[in] dst DMEM destination address (must be 32bit
94 * aligned)
95 * @param[in] src Buffer source address
96 * @param[in] len Number of bytes to copy
97 */
98static void pe_dmem_memcpy_to32(int id, u32 dst, const void *src,
99 unsigned int len)
100{
101 pe_mem_memcpy_to32(id, pe[id].dmem_base_addr | dst | PE_MEM_ACCESS_DMEM,
102 src, len);
103}
104
105/*
106 * Writes a buffer to PE internal program memory (PMEM) from the host
107 * through indirect access registers.
108 * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
109 * ..., TMU3_ID)
110 * @param[in] dst PMEM destination address (must be 32bit
111 * aligned)
112 * @param[in] src Buffer source address
113 * @param[in] len Number of bytes to copy
114 */
115static void pe_pmem_memcpy_to32(int id, u32 dst, const void *src,
116 unsigned int len)
117{
118 pe_mem_memcpy_to32(id, pe[id].pmem_base_addr | (dst & (pe[id].pmem_size
119 - 1)) | PE_MEM_ACCESS_IMEM, src, len);
120}
121
122/*
123 * Reads PE internal program memory (IMEM) from the host
124 * through indirect access registers.
125 * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
126 * ..., TMU3_ID)
127 * @param[in] addr PMEM read address (must be aligned on size)
128 * @param[in] size Number of bytes to read (maximum 4, must not
129 * cross 32bit boundaries)
130 * @return the data read (in PE endianness, i.e BE).
131 */
132u32 pe_pmem_read(int id, u32 addr, u8 size)
133{
134 u32 offset = addr & 0x3;
135 u32 mask = 0xffffffff >> ((4 - size) << 3);
136 u32 val;
137
138 addr = pe[id].pmem_base_addr | ((addr & ~0x3) & (pe[id].pmem_size - 1))
139 | PE_MEM_ACCESS_READ | PE_MEM_ACCESS_IMEM |
140 PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
141
142 writel(addr, pe[id].mem_access_addr);
143 val = be32_to_cpu(readl(pe[id].mem_access_rdata));
144
145 return (val >> (offset << 3)) & mask;
146}
147
148/*
149 * Writes PE internal data memory (DMEM) from the host
150 * through indirect access registers.
151 * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
152 * ..., UTIL_ID)
153 * @param[in] val Value to write (in PE endianness, i.e BE)
154 * @param[in] addr DMEM write address (must be aligned on size)
155 * @param[in] size Number of bytes to write (maximum 4, must not
156 * cross 32bit boundaries)
157 */
158void pe_dmem_write(int id, u32 val, u32 addr, u8 size)
159{
160 u32 offset = addr & 0x3;
161
162 addr = pe[id].dmem_base_addr | (addr & ~0x3) | PE_MEM_ACCESS_WRITE |
163 PE_MEM_ACCESS_DMEM | PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
164
165 /* Indirect access interface is byte swapping data being written */
166 writel(cpu_to_be32(val << (offset << 3)), pe[id].mem_access_wdata);
167 writel(addr, pe[id].mem_access_addr);
168}
169
170/*
171 * Reads PE internal data memory (DMEM) from the host
172 * through indirect access registers.
173 * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
174 * ..., UTIL_ID)
175 * @param[in] addr DMEM read address (must be aligned on size)
176 * @param[in] size Number of bytes to read (maximum 4, must not
177 * cross 32bit boundaries)
178 * @return the data read (in PE endianness, i.e BE).
179 */
180u32 pe_dmem_read(int id, u32 addr, u8 size)
181{
182 u32 offset = addr & 0x3;
183 u32 mask = 0xffffffff >> ((4 - size) << 3);
184 u32 val;
185
186 addr = pe[id].dmem_base_addr | (addr & ~0x3) | PE_MEM_ACCESS_READ |
187 PE_MEM_ACCESS_DMEM | PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
188
189 writel(addr, pe[id].mem_access_addr);
190
191 /* Indirect access interface is byte swapping data being read */
192 val = be32_to_cpu(readl(pe[id].mem_access_rdata));
193
194 return (val >> (offset << 3)) & mask;
195}
196
197/*
198 * This function is used to write to CLASS internal bus peripherals (ccu,
199 * pe-lem) from the host
200 * through indirect access registers.
201 * @param[in] val value to write
202 * @param[in] addr Address to write to (must be aligned on size)
203 * @param[in] size Number of bytes to write (1, 2 or 4)
204 *
205 */
206static void class_bus_write(u32 val, u32 addr, u8 size)
207{
208 u32 offset = addr & 0x3;
209
210 writel((addr & CLASS_BUS_ACCESS_BASE_MASK), CLASS_BUS_ACCESS_BASE);
211
212 addr = (addr & ~CLASS_BUS_ACCESS_BASE_MASK) | PE_MEM_ACCESS_WRITE |
213 (size << 24);
214
215 writel(cpu_to_be32(val << (offset << 3)), CLASS_BUS_ACCESS_WDATA);
216 writel(addr, CLASS_BUS_ACCESS_ADDR);
217}
218
219/*
220 * Reads from CLASS internal bus peripherals (ccu, pe-lem) from the host
221 * through indirect access registers.
222 * @param[in] addr Address to read from (must be aligned on size)
223 * @param[in] size Number of bytes to read (1, 2 or 4)
224 * @return the read data
225 */
226static u32 class_bus_read(u32 addr, u8 size)
227{
228 u32 offset = addr & 0x3;
229 u32 mask = 0xffffffff >> ((4 - size) << 3);
230 u32 val;
231
232 writel((addr & CLASS_BUS_ACCESS_BASE_MASK), CLASS_BUS_ACCESS_BASE);
233
234 addr = (addr & ~CLASS_BUS_ACCESS_BASE_MASK) | (size << 24);
235
236 writel(addr, CLASS_BUS_ACCESS_ADDR);
237 val = be32_to_cpu(readl(CLASS_BUS_ACCESS_RDATA));
238
239 return (val >> (offset << 3)) & mask;
240}
241
242/*
243 * Writes data to the cluster memory (PE_LMEM)
244 * @param[in] dst PE LMEM destination address (must be 32bit aligned)
245 * @param[in] src Buffer source address
246 * @param[in] len Number of bytes to copy
247 */
248static void class_pe_lmem_memcpy_to32(u32 dst, const void *src,
249 unsigned int len)
250{
251 u32 len32 = len >> 2;
252 int i;
253
254 for (i = 0; i < len32; i++, src += 4, dst += 4)
255 class_bus_write(*(u32 *)src, dst, 4);
256
257 if (len & 0x2) {
258 class_bus_write(*(u16 *)src, dst, 2);
259 src += 2;
260 dst += 2;
261 }
262
263 if (len & 0x1) {
264 class_bus_write(*(u8 *)src, dst, 1);
265 src++;
266 dst++;
267 }
268}
269
270/*
271 * Writes value to the cluster memory (PE_LMEM)
272 * @param[in] dst PE LMEM destination address (must be 32bit aligned)
273 * @param[in] val Value to write
274 * @param[in] len Number of bytes to write
275 */
276static void class_pe_lmem_memset(u32 dst, int val, unsigned int len)
277{
278 u32 len32 = len >> 2;
279 int i;
280
281 val = val | (val << 8) | (val << 16) | (val << 24);
282
283 for (i = 0; i < len32; i++, dst += 4)
284 class_bus_write(val, dst, 4);
285
286 if (len & 0x2) {
287 class_bus_write(val, dst, 2);
288 dst += 2;
289 }
290
291 if (len & 0x1) {
292 class_bus_write(val, dst, 1);
293 dst++;
294 }
295}
296
297/*
298 * Reads data from the cluster memory (PE_LMEM)
299 * @param[out] dst pointer to the source buffer data are copied to
300 * @param[in] len length in bytes of the amount of data to read
301 * from cluster memory
302 * @param[in] offset offset in bytes in the cluster memory where data are
303 * read from
304 */
305void pe_lmem_read(u32 *dst, u32 len, u32 offset)
306{
307 u32 len32 = len >> 2;
308 int i = 0;
309
310 for (i = 0; i < len32; dst++, i++, offset += 4)
311 *dst = class_bus_read(PE_LMEM_BASE_ADDR + offset, 4);
312
313 if (len & 0x03)
314 *dst = class_bus_read(PE_LMEM_BASE_ADDR + offset, (len & 0x03));
315}
316
317/*
318 * Writes data to the cluster memory (PE_LMEM)
319 * @param[in] src pointer to the source buffer data are copied from
320 * @param[in] len length in bytes of the amount of data to write to the
321 * cluster memory
322 * @param[in] offset offset in bytes in the cluster memory where data are
323 * written to
324 */
325void pe_lmem_write(u32 *src, u32 len, u32 offset)
326{
327 u32 len32 = len >> 2;
328 int i = 0;
329
330 for (i = 0; i < len32; src++, i++, offset += 4)
331 class_bus_write(*src, PE_LMEM_BASE_ADDR + offset, 4);
332
333 if (len & 0x03)
334 class_bus_write(*src, PE_LMEM_BASE_ADDR + offset, (len &
335 0x03));
336}
337
338/*
339 * Loads an elf section into pmem
340 * Code needs to be at least 16bit aligned and only PROGBITS sections are
341 * supported
342 *
343 * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID, ...,
344 * TMU3_ID)
345 * @param[in] data pointer to the elf firmware
346 * @param[in] shdr pointer to the elf section header
347 */
348static int pe_load_pmem_section(int id, const void *data, Elf32_Shdr *shdr)
349{
350 u32 offset = be32_to_cpu(shdr->sh_offset);
351 u32 addr = be32_to_cpu(shdr->sh_addr);
352 u32 size = be32_to_cpu(shdr->sh_size);
353 u32 type = be32_to_cpu(shdr->sh_type);
354
355 if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) {
356 printf(
357 "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n",
358 __func__, addr, (unsigned long)data + offset);
359
360 return -1;
361 }
362
363 if (addr & 0x1) {
364 printf("%s: load address(%x) is not 16bit aligned\n",
365 __func__, addr);
366 return -1;
367 }
368
369 if (size & 0x1) {
370 printf("%s: load size(%x) is not 16bit aligned\n", __func__,
371 size);
372 return -1;
373 }
374
375 debug("pmem pe%d @%x len %d\n", id, addr, size);
376 switch (type) {
377 case SHT_PROGBITS:
378 pe_pmem_memcpy_to32(id, addr, data + offset, size);
379 break;
380
381 default:
382 printf("%s: unsupported section type(%x)\n", __func__, type);
383 return -1;
384 }
385
386 return 0;
387}
388
389/*
390 * Loads an elf section into dmem
391 * Data needs to be at least 32bit aligned, NOBITS sections are correctly
392 * initialized to 0
393 *
394 * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
395 * ..., UTIL_ID)
396 * @param[in] data pointer to the elf firmware
397 * @param[in] shdr pointer to the elf section header
398 */
399static int pe_load_dmem_section(int id, const void *data, Elf32_Shdr *shdr)
400{
401 u32 offset = be32_to_cpu(shdr->sh_offset);
402 u32 addr = be32_to_cpu(shdr->sh_addr);
403 u32 size = be32_to_cpu(shdr->sh_size);
404 u32 type = be32_to_cpu(shdr->sh_type);
405 u32 size32 = size >> 2;
406 int i;
407
408 if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) {
409 printf(
410 "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n",
411 __func__, addr, (unsigned long)data + offset);
412
413 return -1;
414 }
415
416 if (addr & 0x3) {
417 printf("%s: load address(%x) is not 32bit aligned\n",
418 __func__, addr);
419 return -1;
420 }
421
422 switch (type) {
423 case SHT_PROGBITS:
424 debug("dmem pe%d @%x len %d\n", id, addr, size);
425 pe_dmem_memcpy_to32(id, addr, data + offset, size);
426 break;
427
428 case SHT_NOBITS:
429 debug("dmem zero pe%d @%x len %d\n", id, addr, size);
430 for (i = 0; i < size32; i++, addr += 4)
431 pe_dmem_write(id, 0, addr, 4);
432
433 if (size & 0x3)
434 pe_dmem_write(id, 0, addr, size & 0x3);
435
436 break;
437
438 default:
439 printf("%s: unsupported section type(%x)\n", __func__, type);
440 return -1;
441 }
442
443 return 0;
444}
445
446/*
447 * Loads an elf section into DDR
448 * Data needs to be at least 32bit aligned, NOBITS sections are correctly
449 * initialized to 0
450 *
451 * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
452 * ..., UTIL_ID)
453 * @param[in] data pointer to the elf firmware
454 * @param[in] shdr pointer to the elf section header
455 */
456static int pe_load_ddr_section(int id, const void *data, Elf32_Shdr *shdr)
457{
458 u32 offset = be32_to_cpu(shdr->sh_offset);
459 u32 addr = be32_to_cpu(shdr->sh_addr);
460 u32 size = be32_to_cpu(shdr->sh_size);
461 u32 type = be32_to_cpu(shdr->sh_type);
462 u32 flags = be32_to_cpu(shdr->sh_flags);
463
464 switch (type) {
465 case SHT_PROGBITS:
466 debug("ddr pe%d @%x len %d\n", id, addr, size);
467 if (flags & SHF_EXECINSTR) {
468 if (id <= CLASS_MAX_ID) {
469 /* DO the loading only once in DDR */
470 if (id == CLASS0_ID) {
471 debug(
472 "%s: load address(%x) and elf file address(%lx) rcvd\n"
473 , __func__, addr,
474 (unsigned long)data + offset);
475 if (((unsigned long)(data + offset)
476 & 0x3) != (addr & 0x3)) {
477 printf(
478 "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n",
479 __func__, addr,
480 (unsigned long)data +
481 offset);
482
483 return -1;
484 }
485
486 if (addr & 0x1) {
487 printf(
488 "%s: load address(%x) is not 16bit aligned\n"
489 , __func__, addr);
490 return -1;
491 }
492
493 if (size & 0x1) {
494 printf(
495 "%s: load length(%x) is not 16bit aligned\n"
496 , __func__, size);
497 return -1;
498 }
499
500 memcpy((void *)DDR_PFE_TO_VIRT(addr),
501 data + offset, size);
502 }
503 } else {
504 printf(
505 "%s: unsupported ddr section type(%x) for PE(%d)\n"
506 , __func__, type, id);
507 return -1;
508 }
509
510 } else {
511 memcpy((void *)DDR_PFE_TO_VIRT(addr), data + offset,
512 size);
513 }
514
515 break;
516
517 case SHT_NOBITS:
518 debug("ddr zero pe%d @%x len %d\n", id, addr, size);
519 memset((void *)DDR_PFE_TO_VIRT(addr), 0, size);
520
521 break;
522
523 default:
524 printf("%s: unsupported section type(%x)\n", __func__, type);
525 return -1;
526 }
527
528 return 0;
529}
530
531/*
532 * Loads an elf section into pe lmem
533 * Data needs to be at least 32bit aligned, NOBITS sections are correctly
534 * initialized to 0
535 *
536 * @param[in] id PE identification (CLASS0_ID,..., CLASS5_ID)
537 * @param[in] data pointer to the elf firmware
538 * @param[in] shdr pointer to the elf section header
539 */
540static int pe_load_pe_lmem_section(int id, const void *data, Elf32_Shdr *shdr)
541{
542 u32 offset = be32_to_cpu(shdr->sh_offset);
543 u32 addr = be32_to_cpu(shdr->sh_addr);
544 u32 size = be32_to_cpu(shdr->sh_size);
545 u32 type = be32_to_cpu(shdr->sh_type);
546
547 if (id > CLASS_MAX_ID) {
548 printf("%s: unsupported pe-lmem section type(%x) for PE(%d)\n",
549 __func__, type, id);
550 return -1;
551 }
552
553 if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) {
554 printf(
555 "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n",
556 __func__, addr, (unsigned long)data + offset);
557
558 return -1;
559 }
560
561 if (addr & 0x3) {
562 printf("%s: load address(%x) is not 32bit aligned\n",
563 __func__, addr);
564 return -1;
565 }
566
567 debug("lmem pe%d @%x len %d\n", id, addr, size);
568
569 switch (type) {
570 case SHT_PROGBITS:
571 class_pe_lmem_memcpy_to32(addr, data + offset, size);
572 break;
573
574 case SHT_NOBITS:
575 class_pe_lmem_memset(addr, 0, size);
576 break;
577
578 default:
579 printf("%s: unsupported section type(%x)\n", __func__, type);
580 return -1;
581 }
582
583 return 0;
584}
585
586/*
587 * Loads an elf section into a PE
588 * For now only supports loading a section to dmem (all PE's), pmem (class and
589 * tmu PE's), DDDR (util PE code)
590 * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
591 * ..., UTIL_ID)
592 * @param[in] data pointer to the elf firmware
593 * @param[in] shdr pointer to the elf section header
594 */
595int pe_load_elf_section(int id, const void *data, Elf32_Shdr *shdr)
596{
597 u32 addr = be32_to_cpu(shdr->sh_addr);
598 u32 size = be32_to_cpu(shdr->sh_size);
599
600 if (IS_DMEM(addr, size))
601 return pe_load_dmem_section(id, data, shdr);
602 else if (IS_PMEM(addr, size))
603 return pe_load_pmem_section(id, data, shdr);
604 else if (IS_PFE_LMEM(addr, size))
605 return 0;
606 else if (IS_PHYS_DDR(addr, size))
607 return pe_load_ddr_section(id, data, shdr);
608 else if (IS_PE_LMEM(addr, size))
609 return pe_load_pe_lmem_section(id, data, shdr);
610
611 printf("%s: unsupported memory range(%x)\n", __func__, addr);
612
613 return 0;
614}
615
616/**************************** BMU ***************************/
617/*
618 * Resets a BMU block.
619 * @param[in] base BMU block base address
620 */
621static inline void bmu_reset(void *base)
622{
623 writel(CORE_SW_RESET, base + BMU_CTRL);
624
625 /* Wait for self clear */
626 while (readl(base + BMU_CTRL) & CORE_SW_RESET)
627 ;
628}
629
630/*
631 * Enabled a BMU block.
632 * @param[in] base BMU block base address
633 */
634void bmu_enable(void *base)
635{
636 writel(CORE_ENABLE, base + BMU_CTRL);
637}
638
639/*
640 * Disables a BMU block.
641 * @param[in] base BMU block base address
642 */
643static inline void bmu_disable(void *base)
644{
645 writel(CORE_DISABLE, base + BMU_CTRL);
646}
647
648/*
649 * Sets the configuration of a BMU block.
650 * @param[in] base BMU block base address
651 * @param[in] cfg BMU configuration
652 */
653static inline void bmu_set_config(void *base, struct bmu_cfg *cfg)
654{
655 writel(cfg->baseaddr, base + BMU_UCAST_BASE_ADDR);
656 writel(cfg->count & 0xffff, base + BMU_UCAST_CONFIG);
657 writel(cfg->size & 0xffff, base + BMU_BUF_SIZE);
658
659 /* Interrupts are never used */
660 writel(0x0, base + BMU_INT_ENABLE);
661}
662
663/*
664 * Initializes a BMU block.
665 * @param[in] base BMU block base address
666 * @param[in] cfg BMU configuration
667 */
668void bmu_init(void *base, struct bmu_cfg *cfg)
669{
670 bmu_disable(base);
671
672 bmu_set_config(base, cfg);
673
674 bmu_reset(base);
675}
676
677/**************************** GPI ***************************/
678/*
679 * Resets a GPI block.
680 * @param[in] base GPI base address
681 */
682static inline void gpi_reset(void *base)
683{
684 writel(CORE_SW_RESET, base + GPI_CTRL);
685}
686
687/*
688 * Enables a GPI block.
689 * @param[in] base GPI base address
690 */
691void gpi_enable(void *base)
692{
693 writel(CORE_ENABLE, base + GPI_CTRL);
694}
695
696/*
697 * Disables a GPI block.
698 * @param[in] base GPI base address
699 */
700void gpi_disable(void *base)
701{
702 writel(CORE_DISABLE, base + GPI_CTRL);
703}
704
705/*
706 * Sets the configuration of a GPI block.
707 * @param[in] base GPI base address
708 * @param[in] cfg GPI configuration
709 */
710static inline void gpi_set_config(void *base, struct gpi_cfg *cfg)
711{
712 writel(CBUS_VIRT_TO_PFE(BMU1_BASE_ADDR + BMU_ALLOC_CTRL), base
713 + GPI_LMEM_ALLOC_ADDR);
714 writel(CBUS_VIRT_TO_PFE(BMU1_BASE_ADDR + BMU_FREE_CTRL), base
715 + GPI_LMEM_FREE_ADDR);
716 writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_ALLOC_CTRL), base
717 + GPI_DDR_ALLOC_ADDR);
718 writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_FREE_CTRL), base
719 + GPI_DDR_FREE_ADDR);
720 writel(CBUS_VIRT_TO_PFE(CLASS_INQ_PKTPTR), base + GPI_CLASS_ADDR);
721 writel(DDR_HDR_SIZE, base + GPI_DDR_DATA_OFFSET);
722 writel(LMEM_HDR_SIZE, base + GPI_LMEM_DATA_OFFSET);
723 writel(0, base + GPI_LMEM_SEC_BUF_DATA_OFFSET);
724 writel(0, base + GPI_DDR_SEC_BUF_DATA_OFFSET);
725 writel((DDR_HDR_SIZE << 16) | LMEM_HDR_SIZE, base + GPI_HDR_SIZE);
726 writel((DDR_BUF_SIZE << 16) | LMEM_BUF_SIZE, base + GPI_BUF_SIZE);
727
728 writel(((cfg->lmem_rtry_cnt << 16) | (GPI_DDR_BUF_EN << 1) |
729 GPI_LMEM_BUF_EN), base + GPI_RX_CONFIG);
730 writel(cfg->tmlf_txthres, base + GPI_TMLF_TX);
731 writel(cfg->aseq_len, base + GPI_DTX_ASEQ);
732
733 /*Make GPI AXI transactions non-bufferable */
734 writel(0x1, base + GPI_AXI_CTRL);
735}
736
737/*
738 * Initializes a GPI block.
739 * @param[in] base GPI base address
740 * @param[in] cfg GPI configuration
741 */
742void gpi_init(void *base, struct gpi_cfg *cfg)
743{
744 gpi_reset(base);
745
746 gpi_disable(base);
747
748 gpi_set_config(base, cfg);
749}
750
751/**************************** CLASSIFIER ***************************/
752/*
753 * Resets CLASSIFIER block.
754 */
755static inline void class_reset(void)
756{
757 writel(CORE_SW_RESET, CLASS_TX_CTRL);
758}
759
760/*
761 * Enables all CLASS-PE's cores.
762 */
763void class_enable(void)
764{
765 writel(CORE_ENABLE, CLASS_TX_CTRL);
766}
767
768/*
769 * Disables all CLASS-PE's cores.
770 */
771void class_disable(void)
772{
773 writel(CORE_DISABLE, CLASS_TX_CTRL);
774}
775
776/*
777 * Sets the configuration of the CLASSIFIER block.
778 * @param[in] cfg CLASSIFIER configuration
779 */
780static inline void class_set_config(struct class_cfg *cfg)
781{
782 if (PLL_CLK_EN == 0) {
783 /* Clock ratio: for 1:1 the value is 0 */
784 writel(0x0, CLASS_PE_SYS_CLK_RATIO);
785 } else {
786 /* Clock ratio: for 1:2 the value is 1 */
787 writel(0x1, CLASS_PE_SYS_CLK_RATIO);
788 }
789 writel((DDR_HDR_SIZE << 16) | LMEM_HDR_SIZE, CLASS_HDR_SIZE);
790 writel(LMEM_BUF_SIZE, CLASS_LMEM_BUF_SIZE);
791 writel(CLASS_ROUTE_ENTRY_SIZE(CLASS_ROUTE_SIZE) |
792 CLASS_ROUTE_HASH_SIZE(cfg->route_table_hash_bits),
793 CLASS_ROUTE_HASH_ENTRY_SIZE);
794 writel(HASH_CRC_PORT_IP | QB2BUS_LE, CLASS_ROUTE_MULTI);
795
796 writel(cfg->route_table_baseaddr, CLASS_ROUTE_TABLE_BASE);
797 memset((void *)DDR_PFE_TO_VIRT(cfg->route_table_baseaddr), 0,
798 ROUTE_TABLE_SIZE);
799
800 writel(CLASS_PE0_RO_DM_ADDR0_VAL, CLASS_PE0_RO_DM_ADDR0);
801 writel(CLASS_PE0_RO_DM_ADDR1_VAL, CLASS_PE0_RO_DM_ADDR1);
802 writel(CLASS_PE0_QB_DM_ADDR0_VAL, CLASS_PE0_QB_DM_ADDR0);
803 writel(CLASS_PE0_QB_DM_ADDR1_VAL, CLASS_PE0_QB_DM_ADDR1);
804 writel(CBUS_VIRT_TO_PFE(TMU_PHY_INQ_PKTPTR), CLASS_TM_INQ_ADDR);
805
806 writel(23, CLASS_AFULL_THRES);
807 writel(23, CLASS_TSQ_FIFO_THRES);
808
809 writel(24, CLASS_MAX_BUF_CNT);
810 writel(24, CLASS_TSQ_MAX_CNT);
811
812 /*Make Class AXI transactions non-bufferable */
813 writel(0x1, CLASS_AXI_CTRL);
814
815 /*Make Util AXI transactions non-bufferable */
816 /*Util is disabled in U-boot, do it from here */
817 writel(0x1, UTIL_AXI_CTRL);
818}
819
820/*
821 * Initializes CLASSIFIER block.
822 * @param[in] cfg CLASSIFIER configuration
823 */
824void class_init(struct class_cfg *cfg)
825{
826 class_reset();
827
828 class_disable();
829
830 class_set_config(cfg);
831}
832
833/**************************** TMU ***************************/
834/*
835 * Enables TMU-PE cores.
836 * @param[in] pe_mask TMU PE mask
837 */
838void tmu_enable(u32 pe_mask)
839{
840 writel(readl(TMU_TX_CTRL) | (pe_mask & 0xF), TMU_TX_CTRL);
841}
842
843/*
844 * Disables TMU cores.
845 * @param[in] pe_mask TMU PE mask
846 */
847void tmu_disable(u32 pe_mask)
848{
849 writel(readl(TMU_TX_CTRL) & ~(pe_mask & 0xF), TMU_TX_CTRL);
850}
851
852/*
853 * Initializes TMU block.
854 * @param[in] cfg TMU configuration
855 */
856void tmu_init(struct tmu_cfg *cfg)
857{
858 int q, phyno;
859
860 /* keep in soft reset */
861 writel(SW_RESET, TMU_CTRL);
862
863 /*Make Class AXI transactions non-bufferable */
864 writel(0x1, TMU_AXI_CTRL);
865
866 /* enable EMAC PHY ports */
867 writel(0x3, TMU_SYS_GENERIC_CONTROL);
868
869 writel(750, TMU_INQ_WATERMARK);
870
871 writel(CBUS_VIRT_TO_PFE(EGPI1_BASE_ADDR + GPI_INQ_PKTPTR),
872 TMU_PHY0_INQ_ADDR);
873 writel(CBUS_VIRT_TO_PFE(EGPI2_BASE_ADDR + GPI_INQ_PKTPTR),
874 TMU_PHY1_INQ_ADDR);
875
876 writel(CBUS_VIRT_TO_PFE(HGPI_BASE_ADDR + GPI_INQ_PKTPTR),
877 TMU_PHY3_INQ_ADDR);
878 writel(CBUS_VIRT_TO_PFE(HIF_NOCPY_RX_INQ0_PKTPTR), TMU_PHY4_INQ_ADDR);
879 writel(CBUS_VIRT_TO_PFE(UTIL_INQ_PKTPTR), TMU_PHY5_INQ_ADDR);
880 writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_FREE_CTRL),
881 TMU_BMU_INQ_ADDR);
882
883 /* enabling all 10 schedulers [9:0] of each TDQ */
884 writel(0x3FF, TMU_TDQ0_SCH_CTRL);
885 writel(0x3FF, TMU_TDQ1_SCH_CTRL);
886 writel(0x3FF, TMU_TDQ3_SCH_CTRL);
887
888 if (PLL_CLK_EN == 0) {
889 /* Clock ratio: for 1:1 the value is 0 */
890 writel(0x0, TMU_PE_SYS_CLK_RATIO);
891 } else {
892 /* Clock ratio: for 1:2 the value is 1 */
893 writel(0x1, TMU_PE_SYS_CLK_RATIO);
894 }
895
896 /* Extra packet pointers will be stored from this address onwards */
897 debug("TMU_LLM_BASE_ADDR %x\n", cfg->llm_base_addr);
898 writel(cfg->llm_base_addr, TMU_LLM_BASE_ADDR);
899
900 debug("TMU_LLM_QUE_LEN %x\n", cfg->llm_queue_len);
901 writel(cfg->llm_queue_len, TMU_LLM_QUE_LEN);
902
903 writel(5, TMU_TDQ_IIFG_CFG);
904 writel(DDR_BUF_SIZE, TMU_BMU_BUF_SIZE);
905
906 writel(0x0, TMU_CTRL);
907
908 /* MEM init */
909 writel(MEM_INIT, TMU_CTRL);
910
911 while (!(readl(TMU_CTRL) & MEM_INIT_DONE))
912 ;
913
914 /* LLM init */
915 writel(LLM_INIT, TMU_CTRL);
916
917 while (!(readl(TMU_CTRL) & LLM_INIT_DONE))
918 ;
919
920 /* set up each queue for tail drop */
921 for (phyno = 0; phyno < 4; phyno++) {
922 if (phyno == 2)
923 continue;
924 for (q = 0; q < 16; q++) {
925 u32 qmax;
926
927 writel((phyno << 8) | q, TMU_TEQ_CTRL);
928 writel(BIT(22), TMU_TEQ_QCFG);
929
930 if (phyno == 3)
931 qmax = DEFAULT_TMU3_QDEPTH;
932 else
933 qmax = (q == 0) ? DEFAULT_Q0_QDEPTH :
934 DEFAULT_MAX_QDEPTH;
935
936 writel(qmax << 18, TMU_TEQ_HW_PROB_CFG2);
937 writel(qmax >> 14, TMU_TEQ_HW_PROB_CFG3);
938 }
939 }
940 writel(0x05, TMU_TEQ_DISABLE_DROPCHK);
941 writel(0, TMU_CTRL);
942}
943
944/**************************** HIF ***************************/
945/*
946 * Enable hif tx DMA and interrupt
947 */
948void hif_tx_enable(void)
949{
950 writel(HIF_CTRL_DMA_EN, HIF_TX_CTRL);
951}
952
953/*
954 * Disable hif tx DMA and interrupt
955 */
956void hif_tx_disable(void)
957{
958 u32 hif_int;
959
960 writel(0, HIF_TX_CTRL);
961
962 hif_int = readl(HIF_INT_ENABLE);
963 hif_int &= HIF_TXPKT_INT_EN;
964 writel(hif_int, HIF_INT_ENABLE);
965}
966
967/*
968 * Enable hif rx DMA and interrupt
969 */
970void hif_rx_enable(void)
971{
972 writel((HIF_CTRL_DMA_EN | HIF_CTRL_BDP_CH_START_WSTB), HIF_RX_CTRL);
973}
974
975/*
976 * Disable hif rx DMA and interrupt
977 */
978void hif_rx_disable(void)
979{
980 u32 hif_int;
981
982 writel(0, HIF_RX_CTRL);
983
984 hif_int = readl(HIF_INT_ENABLE);
985 hif_int &= HIF_RXPKT_INT_EN;
986 writel(hif_int, HIF_INT_ENABLE);
987}
988
989/*
990 * Initializes HIF copy block.
991 */
992void hif_init(void)
993{
994 /* Initialize HIF registers */
995 writel(HIF_RX_POLL_CTRL_CYCLE << 16 | HIF_TX_POLL_CTRL_CYCLE,
996 HIF_POLL_CTRL);
997 /* Make HIF AXI transactions non-bufferable */
998 writel(0x1, HIF_AXI_CTRL);
999}
This page took 0.226278 seconds and 4 git commands to generate.