1 // SPDX-License-Identifier: GPL-2.0-only
4 #include <linux/kernel.h>
6 #include <linux/iopoll.h>
7 #include <linux/etherdevice.h>
8 #include <linux/platform_device.h>
10 #include "mtk_ppe_regs.h"
12 static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val)
14 writel(val, ppe->base + reg);
17 static u32 ppe_r32(struct mtk_ppe *ppe, u32 reg)
19 return readl(ppe->base + reg);
22 static u32 ppe_m32(struct mtk_ppe *ppe, u32 reg, u32 mask, u32 set)
26 val = ppe_r32(ppe, reg);
29 ppe_w32(ppe, reg, val);
34 static u32 ppe_set(struct mtk_ppe *ppe, u32 reg, u32 val)
36 return ppe_m32(ppe, reg, 0, val);
39 static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val)
41 return ppe_m32(ppe, reg, val, 0);
44 static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
49 ret = readl_poll_timeout(ppe->base + MTK_PPE_GLO_CFG, val,
50 !(val & MTK_PPE_GLO_CFG_BUSY),
51 20, MTK_PPE_WAIT_TIMEOUT_US);
54 dev_err(ppe->dev, "PPE table busy");
59 static void mtk_ppe_cache_clear(struct mtk_ppe *ppe)
61 ppe_set(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
62 ppe_clear(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
65 static void mtk_ppe_cache_enable(struct mtk_ppe *ppe, bool enable)
67 mtk_ppe_cache_clear(ppe);
69 ppe_m32(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_EN,
70 enable * MTK_PPE_CACHE_CTL_EN);
73 static u32 mtk_ppe_hash_entry(struct mtk_foe_entry *e)
78 switch (FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, e->ib1)) {
79 case MTK_PPE_PKT_TYPE_BRIDGE:
80 hv1 = e->bridge.src_mac_lo;
81 hv1 ^= ((e->bridge.src_mac_hi & 0xffff) << 16);
82 hv2 = e->bridge.src_mac_hi >> 16;
83 hv2 ^= e->bridge.dest_mac_lo;
84 hv3 = e->bridge.dest_mac_hi;
86 case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
87 case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
88 hv1 = e->ipv4.orig.ports;
89 hv2 = e->ipv4.orig.dest_ip;
90 hv3 = e->ipv4.orig.src_ip;
92 case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
93 case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
94 hv1 = e->ipv6.src_ip[3] ^ e->ipv6.dest_ip[3];
97 hv2 = e->ipv6.src_ip[2] ^ e->ipv6.dest_ip[2];
98 hv2 ^= e->ipv6.dest_ip[0];
100 hv3 = e->ipv6.src_ip[1] ^ e->ipv6.dest_ip[1];
101 hv3 ^= e->ipv6.src_ip[0];
103 case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
104 case MTK_PPE_PKT_TYPE_IPV6_6RD:
107 return MTK_PPE_HASH_MASK;
110 hash = (hv1 & hv2) | ((~hv1) & hv3);
111 hash = (hash >> 24) | ((hash & 0xffffff) << 8);
112 hash ^= hv1 ^ hv2 ^ hv3;
115 hash &= MTK_PPE_ENTRIES - 1;
120 static inline struct mtk_foe_mac_info *
121 mtk_foe_entry_l2(struct mtk_foe_entry *entry)
123 int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
125 if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
126 return &entry->ipv6.l2;
128 return &entry->ipv4.l2;
132 mtk_foe_entry_ib2(struct mtk_foe_entry *entry)
134 int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
136 if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
137 return &entry->ipv6.ib2;
139 return &entry->ipv4.ib2;
142 int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
143 u8 pse_port, u8 *src_mac, u8 *dest_mac)
145 struct mtk_foe_mac_info *l2;
148 memset(entry, 0, sizeof(*entry));
150 val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
151 FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) |
152 FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
153 MTK_FOE_IB1_BIND_TTL |
154 MTK_FOE_IB1_BIND_CACHE;
157 val = FIELD_PREP(MTK_FOE_IB2_PORT_MG, 0x3f) |
158 FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f) |
159 FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port);
161 if (is_multicast_ether_addr(dest_mac))
162 val |= MTK_FOE_IB2_MULTICAST;
164 ports_pad = 0xa5a5a500 | (l4proto & 0xff);
165 if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
166 entry->ipv4.orig.ports = ports_pad;
167 if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
168 entry->ipv6.ports = ports_pad;
170 if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
171 entry->ipv6.ib2 = val;
172 l2 = &entry->ipv6.l2;
174 entry->ipv4.ib2 = val;
175 l2 = &entry->ipv4.l2;
178 l2->dest_mac_hi = get_unaligned_be32(dest_mac);
179 l2->dest_mac_lo = get_unaligned_be16(dest_mac + 4);
180 l2->src_mac_hi = get_unaligned_be32(src_mac);
181 l2->src_mac_lo = get_unaligned_be16(src_mac + 4);
183 if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
184 l2->etype = ETH_P_IPV6;
186 l2->etype = ETH_P_IP;
191 int mtk_foe_entry_set_pse_port(struct mtk_foe_entry *entry, u8 port)
193 u32 *ib2 = mtk_foe_entry_ib2(entry);
197 val &= ~MTK_FOE_IB2_DEST_PORT;
198 val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port);
204 int mtk_foe_entry_set_ipv4_tuple(struct mtk_foe_entry *entry, bool egress,
205 __be32 src_addr, __be16 src_port,
206 __be32 dest_addr, __be16 dest_port)
208 int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
209 struct mtk_ipv4_tuple *t;
212 case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
214 t = &entry->ipv4.new;
218 case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
219 case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
220 t = &entry->ipv4.orig;
222 case MTK_PPE_PKT_TYPE_IPV6_6RD:
223 entry->ipv6_6rd.tunnel_src_ip = be32_to_cpu(src_addr);
224 entry->ipv6_6rd.tunnel_dest_ip = be32_to_cpu(dest_addr);
231 t->src_ip = be32_to_cpu(src_addr);
232 t->dest_ip = be32_to_cpu(dest_addr);
234 if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
237 t->src_port = be16_to_cpu(src_port);
238 t->dest_port = be16_to_cpu(dest_port);
243 int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry,
244 __be32 *src_addr, __be16 src_port,
245 __be32 *dest_addr, __be16 dest_port)
247 int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
252 case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
253 src = entry->dslite.tunnel_src_ip;
254 dest = entry->dslite.tunnel_dest_ip;
256 case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
257 case MTK_PPE_PKT_TYPE_IPV6_6RD:
258 entry->ipv6.src_port = be16_to_cpu(src_port);
259 entry->ipv6.dest_port = be16_to_cpu(dest_port);
261 case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
262 src = entry->ipv6.src_ip;
263 dest = entry->ipv6.dest_ip;
270 for (i = 0; i < 4; i++)
271 src[i] = be32_to_cpu(src_addr[i]);
272 for (i = 0; i < 4; i++)
273 dest[i] = be32_to_cpu(dest_addr[i]);
278 int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port)
280 struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
282 l2->etype = BIT(port);
284 if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER))
285 entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
289 entry->ib1 &= ~MTK_FOE_IB1_BIND_VLAN_TAG;
294 int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid)
296 struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
298 switch (FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER, entry->ib1)) {
300 entry->ib1 |= MTK_FOE_IB1_BIND_VLAN_TAG |
301 FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
305 if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG)) {
310 entry->ib1 += FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
318 int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid)
320 struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
322 if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER) ||
323 (entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG))
324 l2->etype = ETH_P_PPP_SES;
326 entry->ib1 |= MTK_FOE_IB1_BIND_PPPOE;
332 static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry)
334 return !(entry->ib1 & MTK_FOE_IB1_STATIC) &&
335 FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND;
338 int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
341 struct mtk_foe_entry *hwe;
344 timestamp &= MTK_FOE_IB1_BIND_TIMESTAMP;
345 entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
346 entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, timestamp);
348 hash = mtk_ppe_hash_entry(entry);
349 hwe = &ppe->foe_table[hash];
350 if (!mtk_foe_entry_usable(hwe)) {
354 if (!mtk_foe_entry_usable(hwe))
358 memcpy(&hwe->data, &entry->data, sizeof(hwe->data));
360 hwe->ib1 = entry->ib1;
364 mtk_ppe_cache_clear(ppe);
369 int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base,
372 struct mtk_foe_entry *foe;
374 /* need to allocate a separate device, since it PPE DMA access is
379 ppe->version = version;
381 foe = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*foe),
382 &ppe->foe_phys, GFP_KERNEL);
386 ppe->foe_table = foe;
388 mtk_ppe_debugfs_init(ppe);
393 static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
395 static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
398 memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(ppe->foe_table));
400 if (!IS_ENABLED(CONFIG_SOC_MT7621))
403 /* skip all entries that cross the 1024 byte boundary */
404 for (i = 0; i < MTK_PPE_ENTRIES; i += 128)
405 for (k = 0; k < ARRAY_SIZE(skip); k++)
406 ppe->foe_table[i + skip[k]].ib1 |= MTK_FOE_IB1_STATIC;
409 int mtk_ppe_start(struct mtk_ppe *ppe)
413 mtk_ppe_init_foe_table(ppe);
414 ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys);
416 val = MTK_PPE_TB_CFG_ENTRY_80B |
417 MTK_PPE_TB_CFG_AGE_NON_L4 |
418 MTK_PPE_TB_CFG_AGE_UNBIND |
419 MTK_PPE_TB_CFG_AGE_TCP |
420 MTK_PPE_TB_CFG_AGE_UDP |
421 MTK_PPE_TB_CFG_AGE_TCP_FIN |
422 FIELD_PREP(MTK_PPE_TB_CFG_SEARCH_MISS,
423 MTK_PPE_SEARCH_MISS_ACTION_FORWARD_BUILD) |
424 FIELD_PREP(MTK_PPE_TB_CFG_KEEPALIVE,
425 MTK_PPE_KEEPALIVE_DISABLE) |
426 FIELD_PREP(MTK_PPE_TB_CFG_HASH_MODE, 1) |
427 FIELD_PREP(MTK_PPE_TB_CFG_SCAN_MODE,
428 MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) |
429 FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM,
430 MTK_PPE_ENTRIES_SHIFT);
431 ppe_w32(ppe, MTK_PPE_TB_CFG, val);
433 ppe_w32(ppe, MTK_PPE_IP_PROTO_CHK,
434 MTK_PPE_IP_PROTO_CHK_IPV4 | MTK_PPE_IP_PROTO_CHK_IPV6);
436 mtk_ppe_cache_enable(ppe, true);
438 val = MTK_PPE_FLOW_CFG_IP4_TCP_FRAG |
439 MTK_PPE_FLOW_CFG_IP4_UDP_FRAG |
440 MTK_PPE_FLOW_CFG_IP6_3T_ROUTE |
441 MTK_PPE_FLOW_CFG_IP6_5T_ROUTE |
442 MTK_PPE_FLOW_CFG_IP6_6RD |
443 MTK_PPE_FLOW_CFG_IP4_NAT |
444 MTK_PPE_FLOW_CFG_IP4_NAPT |
445 MTK_PPE_FLOW_CFG_IP4_DSLITE |
446 MTK_PPE_FLOW_CFG_L2_BRIDGE |
447 MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
448 ppe_w32(ppe, MTK_PPE_FLOW_CFG, val);
450 val = FIELD_PREP(MTK_PPE_UNBIND_AGE_MIN_PACKETS, 1000) |
451 FIELD_PREP(MTK_PPE_UNBIND_AGE_DELTA, 3);
452 ppe_w32(ppe, MTK_PPE_UNBIND_AGE, val);
454 val = FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_UDP, 12) |
455 FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_NON_L4, 1);
456 ppe_w32(ppe, MTK_PPE_BIND_AGE0, val);
458 val = FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
459 FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP, 7);
460 ppe_w32(ppe, MTK_PPE_BIND_AGE1, val);
462 val = MTK_PPE_BIND_LIMIT0_QUARTER | MTK_PPE_BIND_LIMIT0_HALF;
463 ppe_w32(ppe, MTK_PPE_BIND_LIMIT0, val);
465 val = MTK_PPE_BIND_LIMIT1_FULL |
466 FIELD_PREP(MTK_PPE_BIND_LIMIT1_NON_L4, 1);
467 ppe_w32(ppe, MTK_PPE_BIND_LIMIT1, val);
469 val = FIELD_PREP(MTK_PPE_BIND_RATE_BIND, 30) |
470 FIELD_PREP(MTK_PPE_BIND_RATE_PREBIND, 1);
471 ppe_w32(ppe, MTK_PPE_BIND_RATE, val);
474 val = MTK_PPE_GLO_CFG_EN |
475 MTK_PPE_GLO_CFG_IP4_L4_CS_DROP |
476 MTK_PPE_GLO_CFG_IP4_CS_DROP |
477 MTK_PPE_GLO_CFG_FLOW_DROP_UPDATE;
478 ppe_w32(ppe, MTK_PPE_GLO_CFG, val);
480 ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0);
485 int mtk_ppe_stop(struct mtk_ppe *ppe)
490 for (i = 0; i < MTK_PPE_ENTRIES; i++)
491 ppe->foe_table[i].ib1 = FIELD_PREP(MTK_FOE_IB1_STATE,
492 MTK_FOE_STATE_INVALID);
494 mtk_ppe_cache_enable(ppe, false);
496 /* disable offload engine */
497 ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN);
498 ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0);
501 val = MTK_PPE_TB_CFG_AGE_NON_L4 |
502 MTK_PPE_TB_CFG_AGE_UNBIND |
503 MTK_PPE_TB_CFG_AGE_TCP |
504 MTK_PPE_TB_CFG_AGE_UDP |
505 MTK_PPE_TB_CFG_AGE_TCP_FIN;
506 ppe_clear(ppe, MTK_PPE_TB_CFG, val);
508 return mtk_ppe_wait_busy(ppe);