1 // SPDX-License-Identifier: GPL-2.0-only
4 #include <linux/kernel.h>
6 #include <linux/iopoll.h>
7 #include <linux/etherdevice.h>
8 #include <linux/platform_device.h>
9 #include <linux/if_ether.h>
10 #include <linux/if_vlan.h>
12 #include "mtk_eth_soc.h"
14 #include "mtk_ppe_regs.h"
16 static DEFINE_SPINLOCK(ppe_lock);
18 static const struct rhashtable_params mtk_flow_l2_ht_params = {
19 .head_offset = offsetof(struct mtk_flow_entry, l2_node),
20 .key_offset = offsetof(struct mtk_flow_entry, data.bridge),
21 .key_len = offsetof(struct mtk_foe_bridge, key_end),
22 .automatic_shrinking = true,
25 static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val)
27 writel(val, ppe->base + reg);
30 static u32 ppe_r32(struct mtk_ppe *ppe, u32 reg)
32 return readl(ppe->base + reg);
35 static u32 ppe_m32(struct mtk_ppe *ppe, u32 reg, u32 mask, u32 set)
39 val = ppe_r32(ppe, reg);
42 ppe_w32(ppe, reg, val);
47 static u32 ppe_set(struct mtk_ppe *ppe, u32 reg, u32 val)
49 return ppe_m32(ppe, reg, 0, val);
52 static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val)
54 return ppe_m32(ppe, reg, val, 0);
57 static u32 mtk_eth_timestamp(struct mtk_eth *eth)
59 return mtk_r32(eth, 0x0010) & mtk_get_ib1_ts_mask(eth);
62 static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
67 ret = readl_poll_timeout(ppe->base + MTK_PPE_GLO_CFG, val,
68 !(val & MTK_PPE_GLO_CFG_BUSY),
69 20, MTK_PPE_WAIT_TIMEOUT_US);
72 dev_err(ppe->dev, "PPE table busy");
77 static void mtk_ppe_cache_clear(struct mtk_ppe *ppe)
79 ppe_set(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
80 ppe_clear(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
83 static void mtk_ppe_cache_enable(struct mtk_ppe *ppe, bool enable)
85 mtk_ppe_cache_clear(ppe);
87 ppe_m32(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_EN,
88 enable * MTK_PPE_CACHE_CTL_EN);
91 static u32 mtk_ppe_hash_entry(struct mtk_eth *eth, struct mtk_foe_entry *e)
96 switch (mtk_get_ib1_pkt_type(eth, e->ib1)) {
97 case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
98 case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
99 hv1 = e->ipv4.orig.ports;
100 hv2 = e->ipv4.orig.dest_ip;
101 hv3 = e->ipv4.orig.src_ip;
103 case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
104 case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
105 hv1 = e->ipv6.src_ip[3] ^ e->ipv6.dest_ip[3];
106 hv1 ^= e->ipv6.ports;
108 hv2 = e->ipv6.src_ip[2] ^ e->ipv6.dest_ip[2];
109 hv2 ^= e->ipv6.dest_ip[0];
111 hv3 = e->ipv6.src_ip[1] ^ e->ipv6.dest_ip[1];
112 hv3 ^= e->ipv6.src_ip[0];
114 case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
115 case MTK_PPE_PKT_TYPE_IPV6_6RD:
118 return MTK_PPE_HASH_MASK;
121 hash = (hv1 & hv2) | ((~hv1) & hv3);
122 hash = (hash >> 24) | ((hash & 0xffffff) << 8);
123 hash ^= hv1 ^ hv2 ^ hv3;
125 hash <<= (ffs(eth->soc->hash_offset) - 1);
126 hash &= MTK_PPE_ENTRIES - 1;
131 static inline struct mtk_foe_mac_info *
132 mtk_foe_entry_l2(struct mtk_eth *eth, struct mtk_foe_entry *entry)
134 int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
136 if (type == MTK_PPE_PKT_TYPE_BRIDGE)
137 return &entry->bridge.l2;
139 if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
140 return &entry->ipv6.l2;
142 return &entry->ipv4.l2;
146 mtk_foe_entry_ib2(struct mtk_eth *eth, struct mtk_foe_entry *entry)
148 int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
150 if (type == MTK_PPE_PKT_TYPE_BRIDGE)
151 return &entry->bridge.ib2;
153 if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
154 return &entry->ipv6.ib2;
156 return &entry->ipv4.ib2;
159 int mtk_foe_entry_prepare(struct mtk_eth *eth, struct mtk_foe_entry *entry,
160 int type, int l4proto, u8 pse_port, u8 *src_mac,
163 struct mtk_foe_mac_info *l2;
166 memset(entry, 0, sizeof(*entry));
168 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
169 val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
170 FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE_V2, type) |
171 FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
172 MTK_FOE_IB1_BIND_CACHE_V2 | MTK_FOE_IB1_BIND_TTL_V2;
175 val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, pse_port) |
176 FIELD_PREP(MTK_FOE_IB2_PORT_AG_V2, 0xf);
178 int port_mg = eth->soc->offload_version > 1 ? 0 : 0x3f;
180 val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
181 FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) |
182 FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
183 MTK_FOE_IB1_BIND_CACHE | MTK_FOE_IB1_BIND_TTL;
186 val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port) |
187 FIELD_PREP(MTK_FOE_IB2_PORT_MG, port_mg) |
188 FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f);
191 if (is_multicast_ether_addr(dest_mac))
192 val |= mtk_get_ib2_multicast_mask(eth);
194 ports_pad = 0xa5a5a500 | (l4proto & 0xff);
195 if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
196 entry->ipv4.orig.ports = ports_pad;
197 if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
198 entry->ipv6.ports = ports_pad;
200 if (type == MTK_PPE_PKT_TYPE_BRIDGE) {
201 ether_addr_copy(entry->bridge.src_mac, src_mac);
202 ether_addr_copy(entry->bridge.dest_mac, dest_mac);
203 entry->bridge.ib2 = val;
204 l2 = &entry->bridge.l2;
205 } else if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
206 entry->ipv6.ib2 = val;
207 l2 = &entry->ipv6.l2;
209 entry->ipv4.ib2 = val;
210 l2 = &entry->ipv4.l2;
213 l2->dest_mac_hi = get_unaligned_be32(dest_mac);
214 l2->dest_mac_lo = get_unaligned_be16(dest_mac + 4);
215 l2->src_mac_hi = get_unaligned_be32(src_mac);
216 l2->src_mac_lo = get_unaligned_be16(src_mac + 4);
218 if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
219 l2->etype = ETH_P_IPV6;
221 l2->etype = ETH_P_IP;
226 int mtk_foe_entry_set_pse_port(struct mtk_eth *eth,
227 struct mtk_foe_entry *entry, u8 port)
229 u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
232 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
233 val &= ~MTK_FOE_IB2_DEST_PORT_V2;
234 val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, port);
236 val &= ~MTK_FOE_IB2_DEST_PORT;
237 val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port);
244 int mtk_foe_entry_set_ipv4_tuple(struct mtk_eth *eth,
245 struct mtk_foe_entry *entry, bool egress,
246 __be32 src_addr, __be16 src_port,
247 __be32 dest_addr, __be16 dest_port)
249 int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
250 struct mtk_ipv4_tuple *t;
253 case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
255 t = &entry->ipv4.new;
259 case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
260 case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
261 t = &entry->ipv4.orig;
263 case MTK_PPE_PKT_TYPE_IPV6_6RD:
264 entry->ipv6_6rd.tunnel_src_ip = be32_to_cpu(src_addr);
265 entry->ipv6_6rd.tunnel_dest_ip = be32_to_cpu(dest_addr);
272 t->src_ip = be32_to_cpu(src_addr);
273 t->dest_ip = be32_to_cpu(dest_addr);
275 if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
278 t->src_port = be16_to_cpu(src_port);
279 t->dest_port = be16_to_cpu(dest_port);
284 int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth,
285 struct mtk_foe_entry *entry,
286 __be32 *src_addr, __be16 src_port,
287 __be32 *dest_addr, __be16 dest_port)
289 int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
294 case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
295 src = entry->dslite.tunnel_src_ip;
296 dest = entry->dslite.tunnel_dest_ip;
298 case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
299 case MTK_PPE_PKT_TYPE_IPV6_6RD:
300 entry->ipv6.src_port = be16_to_cpu(src_port);
301 entry->ipv6.dest_port = be16_to_cpu(dest_port);
303 case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
304 src = entry->ipv6.src_ip;
305 dest = entry->ipv6.dest_ip;
312 for (i = 0; i < 4; i++)
313 src[i] = be32_to_cpu(src_addr[i]);
314 for (i = 0; i < 4; i++)
315 dest[i] = be32_to_cpu(dest_addr[i]);
320 int mtk_foe_entry_set_dsa(struct mtk_eth *eth, struct mtk_foe_entry *entry,
323 struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
325 l2->etype = BIT(port);
327 if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)))
328 entry->ib1 |= mtk_prep_ib1_vlan_layer(eth, 1);
332 entry->ib1 &= ~mtk_get_ib1_vlan_tag_mask(eth);
337 int mtk_foe_entry_set_vlan(struct mtk_eth *eth, struct mtk_foe_entry *entry,
340 struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
342 switch (mtk_get_ib1_vlan_layer(eth, entry->ib1)) {
344 entry->ib1 |= mtk_get_ib1_vlan_tag_mask(eth) |
345 mtk_prep_ib1_vlan_layer(eth, 1);
349 if (!(entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth))) {
354 entry->ib1 += mtk_prep_ib1_vlan_layer(eth, 1);
362 int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry,
365 struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
367 if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)) ||
368 (entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth)))
369 l2->etype = ETH_P_PPP_SES;
371 entry->ib1 |= mtk_get_ib1_ppoe_mask(eth);
377 int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
378 int wdma_idx, int txq, int bss, int wcid)
380 struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
381 u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
383 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
384 *ib2 &= ~MTK_FOE_IB2_PORT_MG_V2;
385 *ib2 |= FIELD_PREP(MTK_FOE_IB2_RX_IDX, txq) |
386 MTK_FOE_IB2_WDMA_WINFO_V2;
387 l2->winfo = FIELD_PREP(MTK_FOE_WINFO_WCID, wcid) |
388 FIELD_PREP(MTK_FOE_WINFO_BSS, bss);
390 *ib2 &= ~MTK_FOE_IB2_PORT_MG;
391 *ib2 |= MTK_FOE_IB2_WDMA_WINFO;
393 *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
394 l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) |
395 FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) |
396 FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq);
402 int mtk_foe_entry_set_queue(struct mtk_eth *eth, struct mtk_foe_entry *entry,
405 u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
407 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
408 *ib2 &= ~MTK_FOE_IB2_QID_V2;
409 *ib2 |= FIELD_PREP(MTK_FOE_IB2_QID_V2, queue);
410 *ib2 |= MTK_FOE_IB2_PSE_QOS_V2;
412 *ib2 &= ~MTK_FOE_IB2_QID;
413 *ib2 |= FIELD_PREP(MTK_FOE_IB2_QID, queue);
414 *ib2 |= MTK_FOE_IB2_PSE_QOS;
421 mtk_flow_entry_match(struct mtk_eth *eth, struct mtk_flow_entry *entry,
422 struct mtk_foe_entry *data)
426 if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP)
429 type = mtk_get_ib1_pkt_type(eth, entry->data.ib1);
430 if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
431 len = offsetof(struct mtk_foe_entry, ipv6._rsv);
433 len = offsetof(struct mtk_foe_entry, ipv4.ib2);
435 return !memcmp(&entry->data.data, &data->data, len - 4);
439 __mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
441 struct hlist_head *head;
442 struct hlist_node *tmp;
444 if (entry->type == MTK_FLOW_TYPE_L2) {
445 rhashtable_remove_fast(&ppe->l2_flows, &entry->l2_node,
446 mtk_flow_l2_ht_params);
448 head = &entry->l2_flows;
449 hlist_for_each_entry_safe(entry, tmp, head, l2_data.list)
450 __mtk_foe_entry_clear(ppe, entry);
454 hlist_del_init(&entry->list);
455 if (entry->hash != 0xffff) {
456 struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, entry->hash);
458 hwe->ib1 &= ~MTK_FOE_IB1_STATE;
459 hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_INVALID);
462 entry->hash = 0xffff;
464 if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW)
467 hlist_del_init(&entry->l2_data.list);
471 static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1)
473 u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
474 u16 now = mtk_eth_timestamp(ppe->eth);
475 u16 timestamp = ib1 & ib1_ts_mask;
478 return ib1_ts_mask + 1 - timestamp + now;
480 return now - timestamp;
484 mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
486 u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
487 struct mtk_flow_entry *cur;
488 struct mtk_foe_entry *hwe;
489 struct hlist_node *tmp;
492 idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
493 hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) {
497 hwe = mtk_foe_get_entry(ppe, cur->hash);
498 ib1 = READ_ONCE(hwe->ib1);
500 if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) {
502 __mtk_foe_entry_clear(ppe, cur);
506 cur_idle = __mtk_foe_entry_idle_time(ppe, ib1);
507 if (cur_idle >= idle)
511 entry->data.ib1 &= ~ib1_ts_mask;
512 entry->data.ib1 |= hwe->ib1 & ib1_ts_mask;
517 mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
519 struct mtk_foe_entry foe = {};
520 struct mtk_foe_entry *hwe;
522 spin_lock_bh(&ppe_lock);
524 if (entry->type == MTK_FLOW_TYPE_L2) {
525 mtk_flow_entry_update_l2(ppe, entry);
529 if (entry->hash == 0xffff)
532 hwe = mtk_foe_get_entry(ppe, entry->hash);
533 memcpy(&foe, hwe, ppe->eth->soc->foe_entry_size);
534 if (!mtk_flow_entry_match(ppe->eth, entry, &foe)) {
535 entry->hash = 0xffff;
539 entry->data.ib1 = foe.ib1;
542 spin_unlock_bh(&ppe_lock);
546 __mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
549 struct mtk_eth *eth = ppe->eth;
550 u16 timestamp = mtk_eth_timestamp(eth);
551 struct mtk_foe_entry *hwe;
553 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
554 entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP_V2;
555 entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP_V2,
558 entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
559 entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP,
563 hwe = mtk_foe_get_entry(ppe, hash);
564 memcpy(&hwe->data, &entry->data, eth->soc->foe_entry_size - sizeof(hwe->ib1));
566 hwe->ib1 = entry->ib1;
570 mtk_ppe_cache_clear(ppe);
573 void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
575 spin_lock_bh(&ppe_lock);
576 __mtk_foe_entry_clear(ppe, entry);
577 spin_unlock_bh(&ppe_lock);
581 mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
583 entry->type = MTK_FLOW_TYPE_L2;
585 return rhashtable_insert_fast(&ppe->l2_flows, &entry->l2_node,
586 mtk_flow_l2_ht_params);
589 int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
591 const struct mtk_soc_data *soc = ppe->eth->soc;
592 int type = mtk_get_ib1_pkt_type(ppe->eth, entry->data.ib1);
595 if (type == MTK_PPE_PKT_TYPE_BRIDGE)
596 return mtk_foe_entry_commit_l2(ppe, entry);
598 hash = mtk_ppe_hash_entry(ppe->eth, &entry->data);
599 entry->hash = 0xffff;
600 spin_lock_bh(&ppe_lock);
601 hlist_add_head(&entry->list, &ppe->foe_flow[hash / soc->hash_offset]);
602 spin_unlock_bh(&ppe_lock);
608 mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
611 const struct mtk_soc_data *soc = ppe->eth->soc;
612 struct mtk_flow_entry *flow_info;
613 struct mtk_foe_entry foe = {}, *hwe;
614 struct mtk_foe_mac_info *l2;
615 u32 ib1_mask = mtk_get_ib1_pkt_type_mask(ppe->eth) | MTK_FOE_IB1_UDP;
618 flow_info = kzalloc(offsetof(struct mtk_flow_entry, l2_data.end),
623 flow_info->l2_data.base_flow = entry;
624 flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW;
625 flow_info->hash = hash;
626 hlist_add_head(&flow_info->list,
627 &ppe->foe_flow[hash / soc->hash_offset]);
628 hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows);
630 hwe = mtk_foe_get_entry(ppe, hash);
631 memcpy(&foe, hwe, soc->foe_entry_size);
633 foe.ib1 |= entry->data.ib1 & ~ib1_mask;
635 l2 = mtk_foe_entry_l2(ppe->eth, &foe);
636 memcpy(l2, &entry->data.bridge.l2, sizeof(*l2));
638 type = mtk_get_ib1_pkt_type(ppe->eth, foe.ib1);
639 if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT)
640 memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new));
641 else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP)
642 l2->etype = ETH_P_IPV6;
644 *mtk_foe_entry_ib2(ppe->eth, &foe) = entry->data.bridge.ib2;
646 __mtk_foe_entry_commit(ppe, &foe, hash);
649 void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
651 const struct mtk_soc_data *soc = ppe->eth->soc;
652 struct hlist_head *head = &ppe->foe_flow[hash / soc->hash_offset];
653 struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, hash);
654 struct mtk_flow_entry *entry;
655 struct mtk_foe_bridge key = {};
656 struct hlist_node *n;
661 spin_lock_bh(&ppe_lock);
663 if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND)
666 hlist_for_each_entry_safe(entry, n, head, list) {
667 if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) {
668 if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) ==
672 entry->hash = 0xffff;
673 __mtk_foe_entry_clear(ppe, entry);
677 if (found || !mtk_flow_entry_match(ppe->eth, entry, hwe)) {
678 if (entry->hash != 0xffff)
679 entry->hash = 0xffff;
684 __mtk_foe_entry_commit(ppe, &entry->data, hash);
692 ether_addr_copy(key.dest_mac, eh->h_dest);
693 ether_addr_copy(key.src_mac, eh->h_source);
696 switch (skb->protocol) {
697 #if IS_ENABLED(CONFIG_NET_DSA)
698 case htons(ETH_P_XDSA):
699 if (!netdev_uses_dsa(skb->dev) ||
700 skb->dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
704 if (get_unaligned_be16(tag) != ETH_P_8021Q)
709 case htons(ETH_P_8021Q):
710 key.vlan = get_unaligned_be16(tag + 2) & VLAN_VID_MASK;
716 entry = rhashtable_lookup_fast(&ppe->l2_flows, &key, mtk_flow_l2_ht_params);
720 mtk_foe_entry_commit_subflow(ppe, entry, hash);
723 spin_unlock_bh(&ppe_lock);
726 int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
728 mtk_flow_entry_update(ppe, entry);
730 return __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
733 struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
734 int version, int index)
736 const struct mtk_soc_data *soc = eth->soc;
737 struct device *dev = eth->dev;
742 ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL);
746 rhashtable_init(&ppe->l2_flows, &mtk_flow_l2_ht_params);
748 /* need to allocate a separate device, since it PPE DMA access is
754 ppe->version = version;
756 foe = dmam_alloc_coherent(ppe->dev,
757 MTK_PPE_ENTRIES * soc->foe_entry_size,
758 &ppe->foe_phys, GFP_KERNEL);
760 goto err_free_l2_flows;
762 ppe->foe_table = foe;
764 foe_flow_size = (MTK_PPE_ENTRIES / soc->hash_offset) *
765 sizeof(*ppe->foe_flow);
766 ppe->foe_flow = devm_kzalloc(dev, foe_flow_size, GFP_KERNEL);
768 goto err_free_l2_flows;
770 mtk_ppe_debugfs_init(ppe, index);
775 rhashtable_destroy(&ppe->l2_flows);
779 void mtk_ppe_deinit(struct mtk_eth *eth)
783 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) {
786 rhashtable_destroy(ð->ppe[i]->l2_flows);
790 static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
792 static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
795 memset(ppe->foe_table, 0,
796 MTK_PPE_ENTRIES * ppe->eth->soc->foe_entry_size);
798 if (!IS_ENABLED(CONFIG_SOC_MT7621))
801 /* skip all entries that cross the 1024 byte boundary */
802 for (i = 0; i < MTK_PPE_ENTRIES; i += 128) {
803 for (k = 0; k < ARRAY_SIZE(skip); k++) {
804 struct mtk_foe_entry *hwe;
806 hwe = mtk_foe_get_entry(ppe, i + skip[k]);
807 hwe->ib1 |= MTK_FOE_IB1_STATIC;
812 void mtk_ppe_start(struct mtk_ppe *ppe)
819 mtk_ppe_init_foe_table(ppe);
820 ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys);
822 val = MTK_PPE_TB_CFG_ENTRY_80B |
823 MTK_PPE_TB_CFG_AGE_NON_L4 |
824 MTK_PPE_TB_CFG_AGE_UNBIND |
825 MTK_PPE_TB_CFG_AGE_TCP |
826 MTK_PPE_TB_CFG_AGE_UDP |
827 MTK_PPE_TB_CFG_AGE_TCP_FIN |
828 FIELD_PREP(MTK_PPE_TB_CFG_SEARCH_MISS,
829 MTK_PPE_SEARCH_MISS_ACTION_FORWARD_BUILD) |
830 FIELD_PREP(MTK_PPE_TB_CFG_KEEPALIVE,
831 MTK_PPE_KEEPALIVE_DISABLE) |
832 FIELD_PREP(MTK_PPE_TB_CFG_HASH_MODE, 1) |
833 FIELD_PREP(MTK_PPE_TB_CFG_SCAN_MODE,
834 MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) |
835 FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM,
836 MTK_PPE_ENTRIES_SHIFT);
837 if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2))
838 val |= MTK_PPE_TB_CFG_INFO_SEL;
839 ppe_w32(ppe, MTK_PPE_TB_CFG, val);
841 ppe_w32(ppe, MTK_PPE_IP_PROTO_CHK,
842 MTK_PPE_IP_PROTO_CHK_IPV4 | MTK_PPE_IP_PROTO_CHK_IPV6);
844 mtk_ppe_cache_enable(ppe, true);
846 val = MTK_PPE_FLOW_CFG_IP6_3T_ROUTE |
847 MTK_PPE_FLOW_CFG_IP6_5T_ROUTE |
848 MTK_PPE_FLOW_CFG_IP6_6RD |
849 MTK_PPE_FLOW_CFG_IP4_NAT |
850 MTK_PPE_FLOW_CFG_IP4_NAPT |
851 MTK_PPE_FLOW_CFG_IP4_DSLITE |
852 MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
853 if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2))
854 val |= MTK_PPE_MD_TOAP_BYP_CRSN0 |
855 MTK_PPE_MD_TOAP_BYP_CRSN1 |
856 MTK_PPE_MD_TOAP_BYP_CRSN2 |
857 MTK_PPE_FLOW_CFG_IP4_HASH_GRE_KEY;
859 val |= MTK_PPE_FLOW_CFG_IP4_TCP_FRAG |
860 MTK_PPE_FLOW_CFG_IP4_UDP_FRAG;
861 ppe_w32(ppe, MTK_PPE_FLOW_CFG, val);
863 val = FIELD_PREP(MTK_PPE_UNBIND_AGE_MIN_PACKETS, 1000) |
864 FIELD_PREP(MTK_PPE_UNBIND_AGE_DELTA, 3);
865 ppe_w32(ppe, MTK_PPE_UNBIND_AGE, val);
867 val = FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_UDP, 12) |
868 FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_NON_L4, 1);
869 ppe_w32(ppe, MTK_PPE_BIND_AGE0, val);
871 val = FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
872 FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP, 7);
873 ppe_w32(ppe, MTK_PPE_BIND_AGE1, val);
875 val = MTK_PPE_BIND_LIMIT0_QUARTER | MTK_PPE_BIND_LIMIT0_HALF;
876 ppe_w32(ppe, MTK_PPE_BIND_LIMIT0, val);
878 val = MTK_PPE_BIND_LIMIT1_FULL |
879 FIELD_PREP(MTK_PPE_BIND_LIMIT1_NON_L4, 1);
880 ppe_w32(ppe, MTK_PPE_BIND_LIMIT1, val);
882 val = FIELD_PREP(MTK_PPE_BIND_RATE_BIND, 30) |
883 FIELD_PREP(MTK_PPE_BIND_RATE_PREBIND, 1);
884 ppe_w32(ppe, MTK_PPE_BIND_RATE, val);
887 val = MTK_PPE_GLO_CFG_EN |
888 MTK_PPE_GLO_CFG_IP4_L4_CS_DROP |
889 MTK_PPE_GLO_CFG_IP4_CS_DROP |
890 MTK_PPE_GLO_CFG_FLOW_DROP_UPDATE;
891 ppe_w32(ppe, MTK_PPE_GLO_CFG, val);
893 ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0);
895 if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2)) {
896 ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT1, 0xcb777);
897 ppe_w32(ppe, MTK_PPE_SBW_CTRL, 0x7f);
901 int mtk_ppe_stop(struct mtk_ppe *ppe)
909 for (i = 0; i < MTK_PPE_ENTRIES; i++) {
910 struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, i);
912 hwe->ib1 = FIELD_PREP(MTK_FOE_IB1_STATE,
913 MTK_FOE_STATE_INVALID);
916 mtk_ppe_cache_enable(ppe, false);
918 /* disable offload engine */
919 ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN);
920 ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0);
923 val = MTK_PPE_TB_CFG_AGE_NON_L4 |
924 MTK_PPE_TB_CFG_AGE_UNBIND |
925 MTK_PPE_TB_CFG_AGE_TCP |
926 MTK_PPE_TB_CFG_AGE_UDP |
927 MTK_PPE_TB_CFG_AGE_TCP_FIN;
928 ppe_clear(ppe, MTK_PPE_TB_CFG, val);
930 return mtk_ppe_wait_busy(ppe);