]> Git Repo - linux.git/blob - drivers/net/ethernet/mediatek/mtk_ppe.c
block: add a sanity check for non-write flush/fua bios
[linux.git] / drivers / net / ethernet / mediatek / mtk_ppe.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2020 Felix Fietkau <[email protected]> */
3
4 #include <linux/kernel.h>
5 #include <linux/io.h>
6 #include <linux/iopoll.h>
7 #include <linux/etherdevice.h>
8 #include <linux/platform_device.h>
9 #include <linux/if_ether.h>
10 #include <linux/if_vlan.h>
11 #include <net/dsa.h>
12 #include "mtk_eth_soc.h"
13 #include "mtk_ppe.h"
14 #include "mtk_ppe_regs.h"
15
16 static DEFINE_SPINLOCK(ppe_lock);
17
18 static const struct rhashtable_params mtk_flow_l2_ht_params = {
19         .head_offset = offsetof(struct mtk_flow_entry, l2_node),
20         .key_offset = offsetof(struct mtk_flow_entry, data.bridge),
21         .key_len = offsetof(struct mtk_foe_bridge, key_end),
22         .automatic_shrinking = true,
23 };
24
25 static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val)
26 {
27         writel(val, ppe->base + reg);
28 }
29
30 static u32 ppe_r32(struct mtk_ppe *ppe, u32 reg)
31 {
32         return readl(ppe->base + reg);
33 }
34
35 static u32 ppe_m32(struct mtk_ppe *ppe, u32 reg, u32 mask, u32 set)
36 {
37         u32 val;
38
39         val = ppe_r32(ppe, reg);
40         val &= ~mask;
41         val |= set;
42         ppe_w32(ppe, reg, val);
43
44         return val;
45 }
46
47 static u32 ppe_set(struct mtk_ppe *ppe, u32 reg, u32 val)
48 {
49         return ppe_m32(ppe, reg, 0, val);
50 }
51
52 static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val)
53 {
54         return ppe_m32(ppe, reg, val, 0);
55 }
56
57 static u32 mtk_eth_timestamp(struct mtk_eth *eth)
58 {
59         return mtk_r32(eth, 0x0010) & mtk_get_ib1_ts_mask(eth);
60 }
61
62 static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
63 {
64         int ret;
65         u32 val;
66
67         ret = readl_poll_timeout(ppe->base + MTK_PPE_GLO_CFG, val,
68                                  !(val & MTK_PPE_GLO_CFG_BUSY),
69                                  20, MTK_PPE_WAIT_TIMEOUT_US);
70
71         if (ret)
72                 dev_err(ppe->dev, "PPE table busy");
73
74         return ret;
75 }
76
77 static void mtk_ppe_cache_clear(struct mtk_ppe *ppe)
78 {
79         ppe_set(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
80         ppe_clear(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
81 }
82
83 static void mtk_ppe_cache_enable(struct mtk_ppe *ppe, bool enable)
84 {
85         mtk_ppe_cache_clear(ppe);
86
87         ppe_m32(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_EN,
88                 enable * MTK_PPE_CACHE_CTL_EN);
89 }
90
91 static u32 mtk_ppe_hash_entry(struct mtk_eth *eth, struct mtk_foe_entry *e)
92 {
93         u32 hv1, hv2, hv3;
94         u32 hash;
95
96         switch (mtk_get_ib1_pkt_type(eth, e->ib1)) {
97                 case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
98                 case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
99                         hv1 = e->ipv4.orig.ports;
100                         hv2 = e->ipv4.orig.dest_ip;
101                         hv3 = e->ipv4.orig.src_ip;
102                         break;
103                 case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
104                 case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
105                         hv1 = e->ipv6.src_ip[3] ^ e->ipv6.dest_ip[3];
106                         hv1 ^= e->ipv6.ports;
107
108                         hv2 = e->ipv6.src_ip[2] ^ e->ipv6.dest_ip[2];
109                         hv2 ^= e->ipv6.dest_ip[0];
110
111                         hv3 = e->ipv6.src_ip[1] ^ e->ipv6.dest_ip[1];
112                         hv3 ^= e->ipv6.src_ip[0];
113                         break;
114                 case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
115                 case MTK_PPE_PKT_TYPE_IPV6_6RD:
116                 default:
117                         WARN_ON_ONCE(1);
118                         return MTK_PPE_HASH_MASK;
119         }
120
121         hash = (hv1 & hv2) | ((~hv1) & hv3);
122         hash = (hash >> 24) | ((hash & 0xffffff) << 8);
123         hash ^= hv1 ^ hv2 ^ hv3;
124         hash ^= hash >> 16;
125         hash <<= (ffs(eth->soc->hash_offset) - 1);
126         hash &= MTK_PPE_ENTRIES - 1;
127
128         return hash;
129 }
130
131 static inline struct mtk_foe_mac_info *
132 mtk_foe_entry_l2(struct mtk_eth *eth, struct mtk_foe_entry *entry)
133 {
134         int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
135
136         if (type == MTK_PPE_PKT_TYPE_BRIDGE)
137                 return &entry->bridge.l2;
138
139         if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
140                 return &entry->ipv6.l2;
141
142         return &entry->ipv4.l2;
143 }
144
145 static inline u32 *
146 mtk_foe_entry_ib2(struct mtk_eth *eth, struct mtk_foe_entry *entry)
147 {
148         int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
149
150         if (type == MTK_PPE_PKT_TYPE_BRIDGE)
151                 return &entry->bridge.ib2;
152
153         if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
154                 return &entry->ipv6.ib2;
155
156         return &entry->ipv4.ib2;
157 }
158
159 int mtk_foe_entry_prepare(struct mtk_eth *eth, struct mtk_foe_entry *entry,
160                           int type, int l4proto, u8 pse_port, u8 *src_mac,
161                           u8 *dest_mac)
162 {
163         struct mtk_foe_mac_info *l2;
164         u32 ports_pad, val;
165
166         memset(entry, 0, sizeof(*entry));
167
168         if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
169                 val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
170                       FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE_V2, type) |
171                       FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
172                       MTK_FOE_IB1_BIND_CACHE_V2 | MTK_FOE_IB1_BIND_TTL_V2;
173                 entry->ib1 = val;
174
175                 val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, pse_port) |
176                       FIELD_PREP(MTK_FOE_IB2_PORT_AG_V2, 0xf);
177         } else {
178                 int port_mg = eth->soc->offload_version > 1 ? 0 : 0x3f;
179
180                 val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
181                       FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) |
182                       FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
183                       MTK_FOE_IB1_BIND_CACHE | MTK_FOE_IB1_BIND_TTL;
184                 entry->ib1 = val;
185
186                 val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port) |
187                       FIELD_PREP(MTK_FOE_IB2_PORT_MG, port_mg) |
188                       FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f);
189         }
190
191         if (is_multicast_ether_addr(dest_mac))
192                 val |= mtk_get_ib2_multicast_mask(eth);
193
194         ports_pad = 0xa5a5a500 | (l4proto & 0xff);
195         if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
196                 entry->ipv4.orig.ports = ports_pad;
197         if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
198                 entry->ipv6.ports = ports_pad;
199
200         if (type == MTK_PPE_PKT_TYPE_BRIDGE) {
201                 ether_addr_copy(entry->bridge.src_mac, src_mac);
202                 ether_addr_copy(entry->bridge.dest_mac, dest_mac);
203                 entry->bridge.ib2 = val;
204                 l2 = &entry->bridge.l2;
205         } else if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
206                 entry->ipv6.ib2 = val;
207                 l2 = &entry->ipv6.l2;
208         } else {
209                 entry->ipv4.ib2 = val;
210                 l2 = &entry->ipv4.l2;
211         }
212
213         l2->dest_mac_hi = get_unaligned_be32(dest_mac);
214         l2->dest_mac_lo = get_unaligned_be16(dest_mac + 4);
215         l2->src_mac_hi = get_unaligned_be32(src_mac);
216         l2->src_mac_lo = get_unaligned_be16(src_mac + 4);
217
218         if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
219                 l2->etype = ETH_P_IPV6;
220         else
221                 l2->etype = ETH_P_IP;
222
223         return 0;
224 }
225
226 int mtk_foe_entry_set_pse_port(struct mtk_eth *eth,
227                                struct mtk_foe_entry *entry, u8 port)
228 {
229         u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
230         u32 val = *ib2;
231
232         if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
233                 val &= ~MTK_FOE_IB2_DEST_PORT_V2;
234                 val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, port);
235         } else {
236                 val &= ~MTK_FOE_IB2_DEST_PORT;
237                 val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port);
238         }
239         *ib2 = val;
240
241         return 0;
242 }
243
244 int mtk_foe_entry_set_ipv4_tuple(struct mtk_eth *eth,
245                                  struct mtk_foe_entry *entry, bool egress,
246                                  __be32 src_addr, __be16 src_port,
247                                  __be32 dest_addr, __be16 dest_port)
248 {
249         int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
250         struct mtk_ipv4_tuple *t;
251
252         switch (type) {
253         case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
254                 if (egress) {
255                         t = &entry->ipv4.new;
256                         break;
257                 }
258                 fallthrough;
259         case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
260         case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
261                 t = &entry->ipv4.orig;
262                 break;
263         case MTK_PPE_PKT_TYPE_IPV6_6RD:
264                 entry->ipv6_6rd.tunnel_src_ip = be32_to_cpu(src_addr);
265                 entry->ipv6_6rd.tunnel_dest_ip = be32_to_cpu(dest_addr);
266                 return 0;
267         default:
268                 WARN_ON_ONCE(1);
269                 return -EINVAL;
270         }
271
272         t->src_ip = be32_to_cpu(src_addr);
273         t->dest_ip = be32_to_cpu(dest_addr);
274
275         if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
276                 return 0;
277
278         t->src_port = be16_to_cpu(src_port);
279         t->dest_port = be16_to_cpu(dest_port);
280
281         return 0;
282 }
283
284 int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth,
285                                  struct mtk_foe_entry *entry,
286                                  __be32 *src_addr, __be16 src_port,
287                                  __be32 *dest_addr, __be16 dest_port)
288 {
289         int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
290         u32 *src, *dest;
291         int i;
292
293         switch (type) {
294         case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
295                 src = entry->dslite.tunnel_src_ip;
296                 dest = entry->dslite.tunnel_dest_ip;
297                 break;
298         case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
299         case MTK_PPE_PKT_TYPE_IPV6_6RD:
300                 entry->ipv6.src_port = be16_to_cpu(src_port);
301                 entry->ipv6.dest_port = be16_to_cpu(dest_port);
302                 fallthrough;
303         case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
304                 src = entry->ipv6.src_ip;
305                 dest = entry->ipv6.dest_ip;
306                 break;
307         default:
308                 WARN_ON_ONCE(1);
309                 return -EINVAL;
310         }
311
312         for (i = 0; i < 4; i++)
313                 src[i] = be32_to_cpu(src_addr[i]);
314         for (i = 0; i < 4; i++)
315                 dest[i] = be32_to_cpu(dest_addr[i]);
316
317         return 0;
318 }
319
320 int mtk_foe_entry_set_dsa(struct mtk_eth *eth, struct mtk_foe_entry *entry,
321                           int port)
322 {
323         struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
324
325         l2->etype = BIT(port);
326
327         if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)))
328                 entry->ib1 |= mtk_prep_ib1_vlan_layer(eth, 1);
329         else
330                 l2->etype |= BIT(8);
331
332         entry->ib1 &= ~mtk_get_ib1_vlan_tag_mask(eth);
333
334         return 0;
335 }
336
337 int mtk_foe_entry_set_vlan(struct mtk_eth *eth, struct mtk_foe_entry *entry,
338                            int vid)
339 {
340         struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
341
342         switch (mtk_get_ib1_vlan_layer(eth, entry->ib1)) {
343         case 0:
344                 entry->ib1 |= mtk_get_ib1_vlan_tag_mask(eth) |
345                               mtk_prep_ib1_vlan_layer(eth, 1);
346                 l2->vlan1 = vid;
347                 return 0;
348         case 1:
349                 if (!(entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth))) {
350                         l2->vlan1 = vid;
351                         l2->etype |= BIT(8);
352                 } else {
353                         l2->vlan2 = vid;
354                         entry->ib1 += mtk_prep_ib1_vlan_layer(eth, 1);
355                 }
356                 return 0;
357         default:
358                 return -ENOSPC;
359         }
360 }
361
362 int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry,
363                             int sid)
364 {
365         struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
366
367         if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)) ||
368             (entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth)))
369                 l2->etype = ETH_P_PPP_SES;
370
371         entry->ib1 |= mtk_get_ib1_ppoe_mask(eth);
372         l2->pppoe_id = sid;
373
374         return 0;
375 }
376
377 int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
378                            int wdma_idx, int txq, int bss, int wcid)
379 {
380         struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
381         u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
382
383         if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
384                 *ib2 &= ~MTK_FOE_IB2_PORT_MG_V2;
385                 *ib2 |=  FIELD_PREP(MTK_FOE_IB2_RX_IDX, txq) |
386                          MTK_FOE_IB2_WDMA_WINFO_V2;
387                 l2->winfo = FIELD_PREP(MTK_FOE_WINFO_WCID, wcid) |
388                             FIELD_PREP(MTK_FOE_WINFO_BSS, bss);
389         } else {
390                 *ib2 &= ~MTK_FOE_IB2_PORT_MG;
391                 *ib2 |= MTK_FOE_IB2_WDMA_WINFO;
392                 if (wdma_idx)
393                         *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
394                 l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) |
395                             FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) |
396                             FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq);
397         }
398
399         return 0;
400 }
401
402 int mtk_foe_entry_set_queue(struct mtk_eth *eth, struct mtk_foe_entry *entry,
403                             unsigned int queue)
404 {
405         u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
406
407         if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
408                 *ib2 &= ~MTK_FOE_IB2_QID_V2;
409                 *ib2 |= FIELD_PREP(MTK_FOE_IB2_QID_V2, queue);
410                 *ib2 |= MTK_FOE_IB2_PSE_QOS_V2;
411         } else {
412                 *ib2 &= ~MTK_FOE_IB2_QID;
413                 *ib2 |= FIELD_PREP(MTK_FOE_IB2_QID, queue);
414                 *ib2 |= MTK_FOE_IB2_PSE_QOS;
415         }
416
417         return 0;
418 }
419
420 static bool
421 mtk_flow_entry_match(struct mtk_eth *eth, struct mtk_flow_entry *entry,
422                      struct mtk_foe_entry *data)
423 {
424         int type, len;
425
426         if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP)
427                 return false;
428
429         type = mtk_get_ib1_pkt_type(eth, entry->data.ib1);
430         if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
431                 len = offsetof(struct mtk_foe_entry, ipv6._rsv);
432         else
433                 len = offsetof(struct mtk_foe_entry, ipv4.ib2);
434
435         return !memcmp(&entry->data.data, &data->data, len - 4);
436 }
437
438 static void
439 __mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
440 {
441         struct hlist_head *head;
442         struct hlist_node *tmp;
443
444         if (entry->type == MTK_FLOW_TYPE_L2) {
445                 rhashtable_remove_fast(&ppe->l2_flows, &entry->l2_node,
446                                        mtk_flow_l2_ht_params);
447
448                 head = &entry->l2_flows;
449                 hlist_for_each_entry_safe(entry, tmp, head, l2_data.list)
450                         __mtk_foe_entry_clear(ppe, entry);
451                 return;
452         }
453
454         hlist_del_init(&entry->list);
455         if (entry->hash != 0xffff) {
456                 struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, entry->hash);
457
458                 hwe->ib1 &= ~MTK_FOE_IB1_STATE;
459                 hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_INVALID);
460                 dma_wmb();
461         }
462         entry->hash = 0xffff;
463
464         if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW)
465                 return;
466
467         hlist_del_init(&entry->l2_data.list);
468         kfree(entry);
469 }
470
471 static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1)
472 {
473         u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
474         u16 now = mtk_eth_timestamp(ppe->eth);
475         u16 timestamp = ib1 & ib1_ts_mask;
476
477         if (timestamp > now)
478                 return ib1_ts_mask + 1 - timestamp + now;
479         else
480                 return now - timestamp;
481 }
482
483 static void
484 mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
485 {
486         u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
487         struct mtk_flow_entry *cur;
488         struct mtk_foe_entry *hwe;
489         struct hlist_node *tmp;
490         int idle;
491
492         idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
493         hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) {
494                 int cur_idle;
495                 u32 ib1;
496
497                 hwe = mtk_foe_get_entry(ppe, cur->hash);
498                 ib1 = READ_ONCE(hwe->ib1);
499
500                 if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) {
501                         cur->hash = 0xffff;
502                         __mtk_foe_entry_clear(ppe, cur);
503                         continue;
504                 }
505
506                 cur_idle = __mtk_foe_entry_idle_time(ppe, ib1);
507                 if (cur_idle >= idle)
508                         continue;
509
510                 idle = cur_idle;
511                 entry->data.ib1 &= ~ib1_ts_mask;
512                 entry->data.ib1 |= hwe->ib1 & ib1_ts_mask;
513         }
514 }
515
516 static void
517 mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
518 {
519         struct mtk_foe_entry foe = {};
520         struct mtk_foe_entry *hwe;
521
522         spin_lock_bh(&ppe_lock);
523
524         if (entry->type == MTK_FLOW_TYPE_L2) {
525                 mtk_flow_entry_update_l2(ppe, entry);
526                 goto out;
527         }
528
529         if (entry->hash == 0xffff)
530                 goto out;
531
532         hwe = mtk_foe_get_entry(ppe, entry->hash);
533         memcpy(&foe, hwe, ppe->eth->soc->foe_entry_size);
534         if (!mtk_flow_entry_match(ppe->eth, entry, &foe)) {
535                 entry->hash = 0xffff;
536                 goto out;
537         }
538
539         entry->data.ib1 = foe.ib1;
540
541 out:
542         spin_unlock_bh(&ppe_lock);
543 }
544
545 static void
546 __mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
547                        u16 hash)
548 {
549         struct mtk_eth *eth = ppe->eth;
550         u16 timestamp = mtk_eth_timestamp(eth);
551         struct mtk_foe_entry *hwe;
552
553         if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
554                 entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP_V2;
555                 entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP_V2,
556                                          timestamp);
557         } else {
558                 entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
559                 entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP,
560                                          timestamp);
561         }
562
563         hwe = mtk_foe_get_entry(ppe, hash);
564         memcpy(&hwe->data, &entry->data, eth->soc->foe_entry_size - sizeof(hwe->ib1));
565         wmb();
566         hwe->ib1 = entry->ib1;
567
568         dma_wmb();
569
570         mtk_ppe_cache_clear(ppe);
571 }
572
573 void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
574 {
575         spin_lock_bh(&ppe_lock);
576         __mtk_foe_entry_clear(ppe, entry);
577         spin_unlock_bh(&ppe_lock);
578 }
579
580 static int
581 mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
582 {
583         entry->type = MTK_FLOW_TYPE_L2;
584
585         return rhashtable_insert_fast(&ppe->l2_flows, &entry->l2_node,
586                                       mtk_flow_l2_ht_params);
587 }
588
589 int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
590 {
591         const struct mtk_soc_data *soc = ppe->eth->soc;
592         int type = mtk_get_ib1_pkt_type(ppe->eth, entry->data.ib1);
593         u32 hash;
594
595         if (type == MTK_PPE_PKT_TYPE_BRIDGE)
596                 return mtk_foe_entry_commit_l2(ppe, entry);
597
598         hash = mtk_ppe_hash_entry(ppe->eth, &entry->data);
599         entry->hash = 0xffff;
600         spin_lock_bh(&ppe_lock);
601         hlist_add_head(&entry->list, &ppe->foe_flow[hash / soc->hash_offset]);
602         spin_unlock_bh(&ppe_lock);
603
604         return 0;
605 }
606
607 static void
608 mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
609                              u16 hash)
610 {
611         const struct mtk_soc_data *soc = ppe->eth->soc;
612         struct mtk_flow_entry *flow_info;
613         struct mtk_foe_entry foe = {}, *hwe;
614         struct mtk_foe_mac_info *l2;
615         u32 ib1_mask = mtk_get_ib1_pkt_type_mask(ppe->eth) | MTK_FOE_IB1_UDP;
616         int type;
617
618         flow_info = kzalloc(offsetof(struct mtk_flow_entry, l2_data.end),
619                             GFP_ATOMIC);
620         if (!flow_info)
621                 return;
622
623         flow_info->l2_data.base_flow = entry;
624         flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW;
625         flow_info->hash = hash;
626         hlist_add_head(&flow_info->list,
627                        &ppe->foe_flow[hash / soc->hash_offset]);
628         hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows);
629
630         hwe = mtk_foe_get_entry(ppe, hash);
631         memcpy(&foe, hwe, soc->foe_entry_size);
632         foe.ib1 &= ib1_mask;
633         foe.ib1 |= entry->data.ib1 & ~ib1_mask;
634
635         l2 = mtk_foe_entry_l2(ppe->eth, &foe);
636         memcpy(l2, &entry->data.bridge.l2, sizeof(*l2));
637
638         type = mtk_get_ib1_pkt_type(ppe->eth, foe.ib1);
639         if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT)
640                 memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new));
641         else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP)
642                 l2->etype = ETH_P_IPV6;
643
644         *mtk_foe_entry_ib2(ppe->eth, &foe) = entry->data.bridge.ib2;
645
646         __mtk_foe_entry_commit(ppe, &foe, hash);
647 }
648
649 void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
650 {
651         const struct mtk_soc_data *soc = ppe->eth->soc;
652         struct hlist_head *head = &ppe->foe_flow[hash / soc->hash_offset];
653         struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, hash);
654         struct mtk_flow_entry *entry;
655         struct mtk_foe_bridge key = {};
656         struct hlist_node *n;
657         struct ethhdr *eh;
658         bool found = false;
659         u8 *tag;
660
661         spin_lock_bh(&ppe_lock);
662
663         if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND)
664                 goto out;
665
666         hlist_for_each_entry_safe(entry, n, head, list) {
667                 if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) {
668                         if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) ==
669                                      MTK_FOE_STATE_BIND))
670                                 continue;
671
672                         entry->hash = 0xffff;
673                         __mtk_foe_entry_clear(ppe, entry);
674                         continue;
675                 }
676
677                 if (found || !mtk_flow_entry_match(ppe->eth, entry, hwe)) {
678                         if (entry->hash != 0xffff)
679                                 entry->hash = 0xffff;
680                         continue;
681                 }
682
683                 entry->hash = hash;
684                 __mtk_foe_entry_commit(ppe, &entry->data, hash);
685                 found = true;
686         }
687
688         if (found)
689                 goto out;
690
691         eh = eth_hdr(skb);
692         ether_addr_copy(key.dest_mac, eh->h_dest);
693         ether_addr_copy(key.src_mac, eh->h_source);
694         tag = skb->data - 2;
695         key.vlan = 0;
696         switch (skb->protocol) {
697 #if IS_ENABLED(CONFIG_NET_DSA)
698         case htons(ETH_P_XDSA):
699                 if (!netdev_uses_dsa(skb->dev) ||
700                     skb->dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
701                         goto out;
702
703                 tag += 4;
704                 if (get_unaligned_be16(tag) != ETH_P_8021Q)
705                         break;
706
707                 fallthrough;
708 #endif
709         case htons(ETH_P_8021Q):
710                 key.vlan = get_unaligned_be16(tag + 2) & VLAN_VID_MASK;
711                 break;
712         default:
713                 break;
714         }
715
716         entry = rhashtable_lookup_fast(&ppe->l2_flows, &key, mtk_flow_l2_ht_params);
717         if (!entry)
718                 goto out;
719
720         mtk_foe_entry_commit_subflow(ppe, entry, hash);
721
722 out:
723         spin_unlock_bh(&ppe_lock);
724 }
725
726 int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
727 {
728         mtk_flow_entry_update(ppe, entry);
729
730         return __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
731 }
732
733 struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
734                              int version, int index)
735 {
736         const struct mtk_soc_data *soc = eth->soc;
737         struct device *dev = eth->dev;
738         struct mtk_ppe *ppe;
739         u32 foe_flow_size;
740         void *foe;
741
742         ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL);
743         if (!ppe)
744                 return NULL;
745
746         rhashtable_init(&ppe->l2_flows, &mtk_flow_l2_ht_params);
747
748         /* need to allocate a separate device, since it PPE DMA access is
749          * not coherent.
750          */
751         ppe->base = base;
752         ppe->eth = eth;
753         ppe->dev = dev;
754         ppe->version = version;
755
756         foe = dmam_alloc_coherent(ppe->dev,
757                                   MTK_PPE_ENTRIES * soc->foe_entry_size,
758                                   &ppe->foe_phys, GFP_KERNEL);
759         if (!foe)
760                 goto err_free_l2_flows;
761
762         ppe->foe_table = foe;
763
764         foe_flow_size = (MTK_PPE_ENTRIES / soc->hash_offset) *
765                         sizeof(*ppe->foe_flow);
766         ppe->foe_flow = devm_kzalloc(dev, foe_flow_size, GFP_KERNEL);
767         if (!ppe->foe_flow)
768                 goto err_free_l2_flows;
769
770         mtk_ppe_debugfs_init(ppe, index);
771
772         return ppe;
773
774 err_free_l2_flows:
775         rhashtable_destroy(&ppe->l2_flows);
776         return NULL;
777 }
778
779 void mtk_ppe_deinit(struct mtk_eth *eth)
780 {
781         int i;
782
783         for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) {
784                 if (!eth->ppe[i])
785                         return;
786                 rhashtable_destroy(&eth->ppe[i]->l2_flows);
787         }
788 }
789
790 static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
791 {
792         static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
793         int i, k;
794
795         memset(ppe->foe_table, 0,
796                MTK_PPE_ENTRIES * ppe->eth->soc->foe_entry_size);
797
798         if (!IS_ENABLED(CONFIG_SOC_MT7621))
799                 return;
800
801         /* skip all entries that cross the 1024 byte boundary */
802         for (i = 0; i < MTK_PPE_ENTRIES; i += 128) {
803                 for (k = 0; k < ARRAY_SIZE(skip); k++) {
804                         struct mtk_foe_entry *hwe;
805
806                         hwe = mtk_foe_get_entry(ppe, i + skip[k]);
807                         hwe->ib1 |= MTK_FOE_IB1_STATIC;
808                 }
809         }
810 }
811
812 void mtk_ppe_start(struct mtk_ppe *ppe)
813 {
814         u32 val;
815
816         if (!ppe)
817                 return;
818
819         mtk_ppe_init_foe_table(ppe);
820         ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys);
821
822         val = MTK_PPE_TB_CFG_ENTRY_80B |
823               MTK_PPE_TB_CFG_AGE_NON_L4 |
824               MTK_PPE_TB_CFG_AGE_UNBIND |
825               MTK_PPE_TB_CFG_AGE_TCP |
826               MTK_PPE_TB_CFG_AGE_UDP |
827               MTK_PPE_TB_CFG_AGE_TCP_FIN |
828               FIELD_PREP(MTK_PPE_TB_CFG_SEARCH_MISS,
829                          MTK_PPE_SEARCH_MISS_ACTION_FORWARD_BUILD) |
830               FIELD_PREP(MTK_PPE_TB_CFG_KEEPALIVE,
831                          MTK_PPE_KEEPALIVE_DISABLE) |
832               FIELD_PREP(MTK_PPE_TB_CFG_HASH_MODE, 1) |
833               FIELD_PREP(MTK_PPE_TB_CFG_SCAN_MODE,
834                          MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) |
835               FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM,
836                          MTK_PPE_ENTRIES_SHIFT);
837         if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2))
838                 val |= MTK_PPE_TB_CFG_INFO_SEL;
839         ppe_w32(ppe, MTK_PPE_TB_CFG, val);
840
841         ppe_w32(ppe, MTK_PPE_IP_PROTO_CHK,
842                 MTK_PPE_IP_PROTO_CHK_IPV4 | MTK_PPE_IP_PROTO_CHK_IPV6);
843
844         mtk_ppe_cache_enable(ppe, true);
845
846         val = MTK_PPE_FLOW_CFG_IP6_3T_ROUTE |
847               MTK_PPE_FLOW_CFG_IP6_5T_ROUTE |
848               MTK_PPE_FLOW_CFG_IP6_6RD |
849               MTK_PPE_FLOW_CFG_IP4_NAT |
850               MTK_PPE_FLOW_CFG_IP4_NAPT |
851               MTK_PPE_FLOW_CFG_IP4_DSLITE |
852               MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
853         if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2))
854                 val |= MTK_PPE_MD_TOAP_BYP_CRSN0 |
855                        MTK_PPE_MD_TOAP_BYP_CRSN1 |
856                        MTK_PPE_MD_TOAP_BYP_CRSN2 |
857                        MTK_PPE_FLOW_CFG_IP4_HASH_GRE_KEY;
858         else
859                 val |= MTK_PPE_FLOW_CFG_IP4_TCP_FRAG |
860                        MTK_PPE_FLOW_CFG_IP4_UDP_FRAG;
861         ppe_w32(ppe, MTK_PPE_FLOW_CFG, val);
862
863         val = FIELD_PREP(MTK_PPE_UNBIND_AGE_MIN_PACKETS, 1000) |
864               FIELD_PREP(MTK_PPE_UNBIND_AGE_DELTA, 3);
865         ppe_w32(ppe, MTK_PPE_UNBIND_AGE, val);
866
867         val = FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_UDP, 12) |
868               FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_NON_L4, 1);
869         ppe_w32(ppe, MTK_PPE_BIND_AGE0, val);
870
871         val = FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
872               FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP, 7);
873         ppe_w32(ppe, MTK_PPE_BIND_AGE1, val);
874
875         val = MTK_PPE_BIND_LIMIT0_QUARTER | MTK_PPE_BIND_LIMIT0_HALF;
876         ppe_w32(ppe, MTK_PPE_BIND_LIMIT0, val);
877
878         val = MTK_PPE_BIND_LIMIT1_FULL |
879               FIELD_PREP(MTK_PPE_BIND_LIMIT1_NON_L4, 1);
880         ppe_w32(ppe, MTK_PPE_BIND_LIMIT1, val);
881
882         val = FIELD_PREP(MTK_PPE_BIND_RATE_BIND, 30) |
883               FIELD_PREP(MTK_PPE_BIND_RATE_PREBIND, 1);
884         ppe_w32(ppe, MTK_PPE_BIND_RATE, val);
885
886         /* enable PPE */
887         val = MTK_PPE_GLO_CFG_EN |
888               MTK_PPE_GLO_CFG_IP4_L4_CS_DROP |
889               MTK_PPE_GLO_CFG_IP4_CS_DROP |
890               MTK_PPE_GLO_CFG_FLOW_DROP_UPDATE;
891         ppe_w32(ppe, MTK_PPE_GLO_CFG, val);
892
893         ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0);
894
895         if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2)) {
896                 ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT1, 0xcb777);
897                 ppe_w32(ppe, MTK_PPE_SBW_CTRL, 0x7f);
898         }
899 }
900
901 int mtk_ppe_stop(struct mtk_ppe *ppe)
902 {
903         u32 val;
904         int i;
905
906         if (!ppe)
907                 return 0;
908
909         for (i = 0; i < MTK_PPE_ENTRIES; i++) {
910                 struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, i);
911
912                 hwe->ib1 = FIELD_PREP(MTK_FOE_IB1_STATE,
913                                       MTK_FOE_STATE_INVALID);
914         }
915
916         mtk_ppe_cache_enable(ppe, false);
917
918         /* disable offload engine */
919         ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN);
920         ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0);
921
922         /* disable aging */
923         val = MTK_PPE_TB_CFG_AGE_NON_L4 |
924               MTK_PPE_TB_CFG_AGE_UNBIND |
925               MTK_PPE_TB_CFG_AGE_TCP |
926               MTK_PPE_TB_CFG_AGE_UDP |
927               MTK_PPE_TB_CFG_AGE_TCP_FIN;
928         ppe_clear(ppe, MTK_PPE_TB_CFG, val);
929
930         return mtk_ppe_wait_busy(ppe);
931 }
This page took 0.091193 seconds and 4 git commands to generate.