]> Git Repo - linux.git/blob - drivers/net/ethernet/mediatek/mtk_ppe.c
dma-mapping: don't return errors from dma_set_max_seg_size
[linux.git] / drivers / net / ethernet / mediatek / mtk_ppe.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2020 Felix Fietkau <[email protected]> */
3
4 #include <linux/kernel.h>
5 #include <linux/io.h>
6 #include <linux/iopoll.h>
7 #include <linux/etherdevice.h>
8 #include <linux/platform_device.h>
9 #include <linux/if_ether.h>
10 #include <linux/if_vlan.h>
11 #include <net/dst_metadata.h>
12 #include <net/dsa.h>
13 #include "mtk_eth_soc.h"
14 #include "mtk_ppe.h"
15 #include "mtk_ppe_regs.h"
16
17 static DEFINE_SPINLOCK(ppe_lock);
18
19 static const struct rhashtable_params mtk_flow_l2_ht_params = {
20         .head_offset = offsetof(struct mtk_flow_entry, l2_node),
21         .key_offset = offsetof(struct mtk_flow_entry, data.bridge),
22         .key_len = offsetof(struct mtk_foe_bridge, key_end),
23         .automatic_shrinking = true,
24 };
25
26 static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val)
27 {
28         writel(val, ppe->base + reg);
29 }
30
31 static u32 ppe_r32(struct mtk_ppe *ppe, u32 reg)
32 {
33         return readl(ppe->base + reg);
34 }
35
36 static u32 ppe_m32(struct mtk_ppe *ppe, u32 reg, u32 mask, u32 set)
37 {
38         u32 val;
39
40         val = ppe_r32(ppe, reg);
41         val &= ~mask;
42         val |= set;
43         ppe_w32(ppe, reg, val);
44
45         return val;
46 }
47
48 static u32 ppe_set(struct mtk_ppe *ppe, u32 reg, u32 val)
49 {
50         return ppe_m32(ppe, reg, 0, val);
51 }
52
53 static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val)
54 {
55         return ppe_m32(ppe, reg, val, 0);
56 }
57
58 static u32 mtk_eth_timestamp(struct mtk_eth *eth)
59 {
60         return mtk_r32(eth, 0x0010) & mtk_get_ib1_ts_mask(eth);
61 }
62
63 static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
64 {
65         int ret;
66         u32 val;
67
68         ret = readl_poll_timeout(ppe->base + MTK_PPE_GLO_CFG, val,
69                                  !(val & MTK_PPE_GLO_CFG_BUSY),
70                                  20, MTK_PPE_WAIT_TIMEOUT_US);
71
72         if (ret)
73                 dev_err(ppe->dev, "PPE table busy");
74
75         return ret;
76 }
77
78 static int mtk_ppe_mib_wait_busy(struct mtk_ppe *ppe)
79 {
80         int ret;
81         u32 val;
82
83         ret = readl_poll_timeout(ppe->base + MTK_PPE_MIB_SER_CR, val,
84                                  !(val & MTK_PPE_MIB_SER_CR_ST),
85                                  20, MTK_PPE_WAIT_TIMEOUT_US);
86
87         if (ret)
88                 dev_err(ppe->dev, "MIB table busy");
89
90         return ret;
91 }
92
93 static int mtk_mib_entry_read(struct mtk_ppe *ppe, u16 index, u64 *bytes, u64 *packets)
94 {
95         u32 val, cnt_r0, cnt_r1, cnt_r2;
96         int ret;
97
98         val = FIELD_PREP(MTK_PPE_MIB_SER_CR_ADDR, index) | MTK_PPE_MIB_SER_CR_ST;
99         ppe_w32(ppe, MTK_PPE_MIB_SER_CR, val);
100
101         ret = mtk_ppe_mib_wait_busy(ppe);
102         if (ret)
103                 return ret;
104
105         cnt_r0 = readl(ppe->base + MTK_PPE_MIB_SER_R0);
106         cnt_r1 = readl(ppe->base + MTK_PPE_MIB_SER_R1);
107         cnt_r2 = readl(ppe->base + MTK_PPE_MIB_SER_R2);
108
109         if (mtk_is_netsys_v3_or_greater(ppe->eth)) {
110                 /* 64 bit for each counter */
111                 u32 cnt_r3 = readl(ppe->base + MTK_PPE_MIB_SER_R3);
112                 *bytes = ((u64)cnt_r1 << 32) | cnt_r0;
113                 *packets = ((u64)cnt_r3 << 32) | cnt_r2;
114         } else {
115                 /* 48 bit byte counter, 40 bit packet counter */
116                 u32 byte_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R0_BYTE_CNT_LOW, cnt_r0);
117                 u32 byte_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R1_BYTE_CNT_HIGH, cnt_r1);
118                 u32 pkt_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R1_PKT_CNT_LOW, cnt_r1);
119                 u32 pkt_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R2_PKT_CNT_HIGH, cnt_r2);
120                 *bytes = ((u64)byte_cnt_high << 32) | byte_cnt_low;
121                 *packets = ((u64)pkt_cnt_high << 16) | pkt_cnt_low;
122         }
123
124         return 0;
125 }
126
127 static void mtk_ppe_cache_clear(struct mtk_ppe *ppe)
128 {
129         ppe_set(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
130         ppe_clear(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
131 }
132
133 static void mtk_ppe_cache_enable(struct mtk_ppe *ppe, bool enable)
134 {
135         mtk_ppe_cache_clear(ppe);
136
137         ppe_m32(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_EN,
138                 enable * MTK_PPE_CACHE_CTL_EN);
139 }
140
141 static u32 mtk_ppe_hash_entry(struct mtk_eth *eth, struct mtk_foe_entry *e)
142 {
143         u32 hv1, hv2, hv3;
144         u32 hash;
145
146         switch (mtk_get_ib1_pkt_type(eth, e->ib1)) {
147                 case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
148                 case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
149                         hv1 = e->ipv4.orig.ports;
150                         hv2 = e->ipv4.orig.dest_ip;
151                         hv3 = e->ipv4.orig.src_ip;
152                         break;
153                 case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
154                 case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
155                         hv1 = e->ipv6.src_ip[3] ^ e->ipv6.dest_ip[3];
156                         hv1 ^= e->ipv6.ports;
157
158                         hv2 = e->ipv6.src_ip[2] ^ e->ipv6.dest_ip[2];
159                         hv2 ^= e->ipv6.dest_ip[0];
160
161                         hv3 = e->ipv6.src_ip[1] ^ e->ipv6.dest_ip[1];
162                         hv3 ^= e->ipv6.src_ip[0];
163                         break;
164                 case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
165                 case MTK_PPE_PKT_TYPE_IPV6_6RD:
166                 default:
167                         WARN_ON_ONCE(1);
168                         return MTK_PPE_HASH_MASK;
169         }
170
171         hash = (hv1 & hv2) | ((~hv1) & hv3);
172         hash = (hash >> 24) | ((hash & 0xffffff) << 8);
173         hash ^= hv1 ^ hv2 ^ hv3;
174         hash ^= hash >> 16;
175         hash <<= (ffs(eth->soc->hash_offset) - 1);
176         hash &= MTK_PPE_ENTRIES - 1;
177
178         return hash;
179 }
180
181 static inline struct mtk_foe_mac_info *
182 mtk_foe_entry_l2(struct mtk_eth *eth, struct mtk_foe_entry *entry)
183 {
184         int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
185
186         if (type == MTK_PPE_PKT_TYPE_BRIDGE)
187                 return &entry->bridge.l2;
188
189         if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
190                 return &entry->ipv6.l2;
191
192         return &entry->ipv4.l2;
193 }
194
195 static inline u32 *
196 mtk_foe_entry_ib2(struct mtk_eth *eth, struct mtk_foe_entry *entry)
197 {
198         int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
199
200         if (type == MTK_PPE_PKT_TYPE_BRIDGE)
201                 return &entry->bridge.ib2;
202
203         if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
204                 return &entry->ipv6.ib2;
205
206         return &entry->ipv4.ib2;
207 }
208
209 int mtk_foe_entry_prepare(struct mtk_eth *eth, struct mtk_foe_entry *entry,
210                           int type, int l4proto, u8 pse_port, u8 *src_mac,
211                           u8 *dest_mac)
212 {
213         struct mtk_foe_mac_info *l2;
214         u32 ports_pad, val;
215
216         memset(entry, 0, sizeof(*entry));
217
218         if (mtk_is_netsys_v2_or_greater(eth)) {
219                 val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
220                       FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE_V2, type) |
221                       FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
222                       MTK_FOE_IB1_BIND_CACHE_V2 | MTK_FOE_IB1_BIND_TTL_V2;
223                 entry->ib1 = val;
224
225                 val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, pse_port) |
226                       FIELD_PREP(MTK_FOE_IB2_PORT_AG_V2, 0xf);
227         } else {
228                 int port_mg = eth->soc->offload_version > 1 ? 0 : 0x3f;
229
230                 val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
231                       FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) |
232                       FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
233                       MTK_FOE_IB1_BIND_CACHE | MTK_FOE_IB1_BIND_TTL;
234                 entry->ib1 = val;
235
236                 val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port) |
237                       FIELD_PREP(MTK_FOE_IB2_PORT_MG, port_mg) |
238                       FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f);
239         }
240
241         if (is_multicast_ether_addr(dest_mac))
242                 val |= mtk_get_ib2_multicast_mask(eth);
243
244         ports_pad = 0xa5a5a500 | (l4proto & 0xff);
245         if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
246                 entry->ipv4.orig.ports = ports_pad;
247         if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
248                 entry->ipv6.ports = ports_pad;
249
250         if (type == MTK_PPE_PKT_TYPE_BRIDGE) {
251                 ether_addr_copy(entry->bridge.src_mac, src_mac);
252                 ether_addr_copy(entry->bridge.dest_mac, dest_mac);
253                 entry->bridge.ib2 = val;
254                 l2 = &entry->bridge.l2;
255         } else if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
256                 entry->ipv6.ib2 = val;
257                 l2 = &entry->ipv6.l2;
258         } else {
259                 entry->ipv4.ib2 = val;
260                 l2 = &entry->ipv4.l2;
261         }
262
263         l2->dest_mac_hi = get_unaligned_be32(dest_mac);
264         l2->dest_mac_lo = get_unaligned_be16(dest_mac + 4);
265         l2->src_mac_hi = get_unaligned_be32(src_mac);
266         l2->src_mac_lo = get_unaligned_be16(src_mac + 4);
267
268         if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
269                 l2->etype = ETH_P_IPV6;
270         else
271                 l2->etype = ETH_P_IP;
272
273         return 0;
274 }
275
276 int mtk_foe_entry_set_pse_port(struct mtk_eth *eth,
277                                struct mtk_foe_entry *entry, u8 port)
278 {
279         u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
280         u32 val = *ib2;
281
282         if (mtk_is_netsys_v2_or_greater(eth)) {
283                 val &= ~MTK_FOE_IB2_DEST_PORT_V2;
284                 val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, port);
285         } else {
286                 val &= ~MTK_FOE_IB2_DEST_PORT;
287                 val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port);
288         }
289         *ib2 = val;
290
291         return 0;
292 }
293
294 int mtk_foe_entry_set_ipv4_tuple(struct mtk_eth *eth,
295                                  struct mtk_foe_entry *entry, bool egress,
296                                  __be32 src_addr, __be16 src_port,
297                                  __be32 dest_addr, __be16 dest_port)
298 {
299         int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
300         struct mtk_ipv4_tuple *t;
301
302         switch (type) {
303         case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
304                 if (egress) {
305                         t = &entry->ipv4.new;
306                         break;
307                 }
308                 fallthrough;
309         case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
310         case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
311                 t = &entry->ipv4.orig;
312                 break;
313         case MTK_PPE_PKT_TYPE_IPV6_6RD:
314                 entry->ipv6_6rd.tunnel_src_ip = be32_to_cpu(src_addr);
315                 entry->ipv6_6rd.tunnel_dest_ip = be32_to_cpu(dest_addr);
316                 return 0;
317         default:
318                 WARN_ON_ONCE(1);
319                 return -EINVAL;
320         }
321
322         t->src_ip = be32_to_cpu(src_addr);
323         t->dest_ip = be32_to_cpu(dest_addr);
324
325         if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
326                 return 0;
327
328         t->src_port = be16_to_cpu(src_port);
329         t->dest_port = be16_to_cpu(dest_port);
330
331         return 0;
332 }
333
334 int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth,
335                                  struct mtk_foe_entry *entry,
336                                  __be32 *src_addr, __be16 src_port,
337                                  __be32 *dest_addr, __be16 dest_port)
338 {
339         int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
340         u32 *src, *dest;
341         int i;
342
343         switch (type) {
344         case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
345                 src = entry->dslite.tunnel_src_ip;
346                 dest = entry->dslite.tunnel_dest_ip;
347                 break;
348         case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
349         case MTK_PPE_PKT_TYPE_IPV6_6RD:
350                 entry->ipv6.src_port = be16_to_cpu(src_port);
351                 entry->ipv6.dest_port = be16_to_cpu(dest_port);
352                 fallthrough;
353         case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
354                 src = entry->ipv6.src_ip;
355                 dest = entry->ipv6.dest_ip;
356                 break;
357         default:
358                 WARN_ON_ONCE(1);
359                 return -EINVAL;
360         }
361
362         for (i = 0; i < 4; i++)
363                 src[i] = be32_to_cpu(src_addr[i]);
364         for (i = 0; i < 4; i++)
365                 dest[i] = be32_to_cpu(dest_addr[i]);
366
367         return 0;
368 }
369
370 int mtk_foe_entry_set_dsa(struct mtk_eth *eth, struct mtk_foe_entry *entry,
371                           int port)
372 {
373         struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
374
375         l2->etype = BIT(port);
376
377         if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)))
378                 entry->ib1 |= mtk_prep_ib1_vlan_layer(eth, 1);
379         else
380                 l2->etype |= BIT(8);
381
382         entry->ib1 &= ~mtk_get_ib1_vlan_tag_mask(eth);
383
384         return 0;
385 }
386
387 int mtk_foe_entry_set_vlan(struct mtk_eth *eth, struct mtk_foe_entry *entry,
388                            int vid)
389 {
390         struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
391
392         switch (mtk_get_ib1_vlan_layer(eth, entry->ib1)) {
393         case 0:
394                 entry->ib1 |= mtk_get_ib1_vlan_tag_mask(eth) |
395                               mtk_prep_ib1_vlan_layer(eth, 1);
396                 l2->vlan1 = vid;
397                 return 0;
398         case 1:
399                 if (!(entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth))) {
400                         l2->vlan1 = vid;
401                         l2->etype |= BIT(8);
402                 } else {
403                         l2->vlan2 = vid;
404                         entry->ib1 += mtk_prep_ib1_vlan_layer(eth, 1);
405                 }
406                 return 0;
407         default:
408                 return -ENOSPC;
409         }
410 }
411
412 int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry,
413                             int sid)
414 {
415         struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
416
417         if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)) ||
418             (entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth)))
419                 l2->etype = ETH_P_PPP_SES;
420
421         entry->ib1 |= mtk_get_ib1_ppoe_mask(eth);
422         l2->pppoe_id = sid;
423
424         return 0;
425 }
426
427 int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
428                            int wdma_idx, int txq, int bss, int wcid,
429                            bool amsdu_en)
430 {
431         struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
432         u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
433
434         switch (eth->soc->version) {
435         case 3:
436                 *ib2 &= ~MTK_FOE_IB2_PORT_MG_V2;
437                 *ib2 |=  FIELD_PREP(MTK_FOE_IB2_RX_IDX, txq) |
438                          MTK_FOE_IB2_WDMA_WINFO_V2;
439                 l2->w3info = FIELD_PREP(MTK_FOE_WINFO_WCID_V3, wcid) |
440                              FIELD_PREP(MTK_FOE_WINFO_BSS_V3, bss);
441                 l2->amsdu = FIELD_PREP(MTK_FOE_WINFO_AMSDU_EN, amsdu_en);
442                 break;
443         case 2:
444                 *ib2 &= ~MTK_FOE_IB2_PORT_MG_V2;
445                 *ib2 |=  FIELD_PREP(MTK_FOE_IB2_RX_IDX, txq) |
446                          MTK_FOE_IB2_WDMA_WINFO_V2;
447                 l2->winfo = FIELD_PREP(MTK_FOE_WINFO_WCID, wcid) |
448                             FIELD_PREP(MTK_FOE_WINFO_BSS, bss);
449                 break;
450         default:
451                 *ib2 &= ~MTK_FOE_IB2_PORT_MG;
452                 *ib2 |= MTK_FOE_IB2_WDMA_WINFO;
453                 if (wdma_idx)
454                         *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
455                 l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) |
456                             FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) |
457                             FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq);
458                 break;
459         }
460
461         return 0;
462 }
463
464 int mtk_foe_entry_set_queue(struct mtk_eth *eth, struct mtk_foe_entry *entry,
465                             unsigned int queue)
466 {
467         u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
468
469         if (mtk_is_netsys_v2_or_greater(eth)) {
470                 *ib2 &= ~MTK_FOE_IB2_QID_V2;
471                 *ib2 |= FIELD_PREP(MTK_FOE_IB2_QID_V2, queue);
472                 *ib2 |= MTK_FOE_IB2_PSE_QOS_V2;
473         } else {
474                 *ib2 &= ~MTK_FOE_IB2_QID;
475                 *ib2 |= FIELD_PREP(MTK_FOE_IB2_QID, queue);
476                 *ib2 |= MTK_FOE_IB2_PSE_QOS;
477         }
478
479         return 0;
480 }
481
482 static bool
483 mtk_flow_entry_match(struct mtk_eth *eth, struct mtk_flow_entry *entry,
484                      struct mtk_foe_entry *data)
485 {
486         int type, len;
487
488         if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP)
489                 return false;
490
491         type = mtk_get_ib1_pkt_type(eth, entry->data.ib1);
492         if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
493                 len = offsetof(struct mtk_foe_entry, ipv6._rsv);
494         else
495                 len = offsetof(struct mtk_foe_entry, ipv4.ib2);
496
497         return !memcmp(&entry->data.data, &data->data, len - 4);
498 }
499
500 static void
501 __mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
502 {
503         struct hlist_head *head;
504         struct hlist_node *tmp;
505
506         if (entry->type == MTK_FLOW_TYPE_L2) {
507                 rhashtable_remove_fast(&ppe->l2_flows, &entry->l2_node,
508                                        mtk_flow_l2_ht_params);
509
510                 head = &entry->l2_flows;
511                 hlist_for_each_entry_safe(entry, tmp, head, l2_data.list)
512                         __mtk_foe_entry_clear(ppe, entry);
513                 return;
514         }
515
516         hlist_del_init(&entry->list);
517         if (entry->hash != 0xffff) {
518                 struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, entry->hash);
519
520                 hwe->ib1 &= ~MTK_FOE_IB1_STATE;
521                 hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_INVALID);
522                 dma_wmb();
523                 mtk_ppe_cache_clear(ppe);
524
525                 if (ppe->accounting) {
526                         struct mtk_foe_accounting *acct;
527
528                         acct = ppe->acct_table + entry->hash * sizeof(*acct);
529                         acct->packets = 0;
530                         acct->bytes = 0;
531                 }
532         }
533         entry->hash = 0xffff;
534
535         if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW)
536                 return;
537
538         hlist_del_init(&entry->l2_data.list);
539         kfree(entry);
540 }
541
542 static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1)
543 {
544         u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
545         u16 now = mtk_eth_timestamp(ppe->eth);
546         u16 timestamp = ib1 & ib1_ts_mask;
547
548         if (timestamp > now)
549                 return ib1_ts_mask + 1 - timestamp + now;
550         else
551                 return now - timestamp;
552 }
553
554 static void
555 mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
556 {
557         u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
558         struct mtk_flow_entry *cur;
559         struct mtk_foe_entry *hwe;
560         struct hlist_node *tmp;
561         int idle;
562
563         idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
564         hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) {
565                 int cur_idle;
566                 u32 ib1;
567
568                 hwe = mtk_foe_get_entry(ppe, cur->hash);
569                 ib1 = READ_ONCE(hwe->ib1);
570
571                 if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) {
572                         cur->hash = 0xffff;
573                         __mtk_foe_entry_clear(ppe, cur);
574                         continue;
575                 }
576
577                 cur_idle = __mtk_foe_entry_idle_time(ppe, ib1);
578                 if (cur_idle >= idle)
579                         continue;
580
581                 idle = cur_idle;
582                 entry->data.ib1 &= ~ib1_ts_mask;
583                 entry->data.ib1 |= ib1 & ib1_ts_mask;
584         }
585 }
586
587 static void
588 mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
589 {
590         struct mtk_foe_entry foe = {};
591         struct mtk_foe_entry *hwe;
592
593         spin_lock_bh(&ppe_lock);
594
595         if (entry->type == MTK_FLOW_TYPE_L2) {
596                 mtk_flow_entry_update_l2(ppe, entry);
597                 goto out;
598         }
599
600         if (entry->hash == 0xffff)
601                 goto out;
602
603         hwe = mtk_foe_get_entry(ppe, entry->hash);
604         memcpy(&foe, hwe, ppe->eth->soc->foe_entry_size);
605         if (!mtk_flow_entry_match(ppe->eth, entry, &foe)) {
606                 entry->hash = 0xffff;
607                 goto out;
608         }
609
610         entry->data.ib1 = foe.ib1;
611
612 out:
613         spin_unlock_bh(&ppe_lock);
614 }
615
616 static void
617 __mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
618                        u16 hash)
619 {
620         struct mtk_eth *eth = ppe->eth;
621         u16 timestamp = mtk_eth_timestamp(eth);
622         struct mtk_foe_entry *hwe;
623         u32 val;
624
625         if (mtk_is_netsys_v2_or_greater(eth)) {
626                 entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP_V2;
627                 entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP_V2,
628                                          timestamp);
629         } else {
630                 entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
631                 entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP,
632                                          timestamp);
633         }
634
635         hwe = mtk_foe_get_entry(ppe, hash);
636         memcpy(&hwe->data, &entry->data, eth->soc->foe_entry_size - sizeof(hwe->ib1));
637         wmb();
638         hwe->ib1 = entry->ib1;
639
640         if (ppe->accounting) {
641                 if (mtk_is_netsys_v2_or_greater(eth))
642                         val = MTK_FOE_IB2_MIB_CNT_V2;
643                 else
644                         val = MTK_FOE_IB2_MIB_CNT;
645                 *mtk_foe_entry_ib2(eth, hwe) |= val;
646         }
647
648         dma_wmb();
649
650         mtk_ppe_cache_clear(ppe);
651 }
652
653 void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
654 {
655         spin_lock_bh(&ppe_lock);
656         __mtk_foe_entry_clear(ppe, entry);
657         spin_unlock_bh(&ppe_lock);
658 }
659
660 static int
661 mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
662 {
663         struct mtk_flow_entry *prev;
664
665         entry->type = MTK_FLOW_TYPE_L2;
666
667         prev = rhashtable_lookup_get_insert_fast(&ppe->l2_flows, &entry->l2_node,
668                                                  mtk_flow_l2_ht_params);
669         if (likely(!prev))
670                 return 0;
671
672         if (IS_ERR(prev))
673                 return PTR_ERR(prev);
674
675         return rhashtable_replace_fast(&ppe->l2_flows, &prev->l2_node,
676                                        &entry->l2_node, mtk_flow_l2_ht_params);
677 }
678
679 int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
680 {
681         const struct mtk_soc_data *soc = ppe->eth->soc;
682         int type = mtk_get_ib1_pkt_type(ppe->eth, entry->data.ib1);
683         u32 hash;
684
685         if (type == MTK_PPE_PKT_TYPE_BRIDGE)
686                 return mtk_foe_entry_commit_l2(ppe, entry);
687
688         hash = mtk_ppe_hash_entry(ppe->eth, &entry->data);
689         entry->hash = 0xffff;
690         spin_lock_bh(&ppe_lock);
691         hlist_add_head(&entry->list, &ppe->foe_flow[hash / soc->hash_offset]);
692         spin_unlock_bh(&ppe_lock);
693
694         return 0;
695 }
696
697 static void
698 mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
699                              u16 hash)
700 {
701         const struct mtk_soc_data *soc = ppe->eth->soc;
702         struct mtk_flow_entry *flow_info;
703         struct mtk_foe_entry foe = {}, *hwe;
704         struct mtk_foe_mac_info *l2;
705         u32 ib1_mask = mtk_get_ib1_pkt_type_mask(ppe->eth) | MTK_FOE_IB1_UDP;
706         int type;
707
708         flow_info = kzalloc(sizeof(*flow_info), GFP_ATOMIC);
709         if (!flow_info)
710                 return;
711
712         flow_info->l2_data.base_flow = entry;
713         flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW;
714         flow_info->hash = hash;
715         hlist_add_head(&flow_info->list,
716                        &ppe->foe_flow[hash / soc->hash_offset]);
717         hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows);
718
719         hwe = mtk_foe_get_entry(ppe, hash);
720         memcpy(&foe, hwe, soc->foe_entry_size);
721         foe.ib1 &= ib1_mask;
722         foe.ib1 |= entry->data.ib1 & ~ib1_mask;
723
724         l2 = mtk_foe_entry_l2(ppe->eth, &foe);
725         memcpy(l2, &entry->data.bridge.l2, sizeof(*l2));
726
727         type = mtk_get_ib1_pkt_type(ppe->eth, foe.ib1);
728         if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT)
729                 memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new));
730         else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP)
731                 l2->etype = ETH_P_IPV6;
732
733         *mtk_foe_entry_ib2(ppe->eth, &foe) = entry->data.bridge.ib2;
734
735         __mtk_foe_entry_commit(ppe, &foe, hash);
736 }
737
738 void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
739 {
740         const struct mtk_soc_data *soc = ppe->eth->soc;
741         struct hlist_head *head = &ppe->foe_flow[hash / soc->hash_offset];
742         struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, hash);
743         struct mtk_flow_entry *entry;
744         struct mtk_foe_bridge key = {};
745         struct hlist_node *n;
746         struct ethhdr *eh;
747         bool found = false;
748         u8 *tag;
749
750         spin_lock_bh(&ppe_lock);
751
752         if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND)
753                 goto out;
754
755         hlist_for_each_entry_safe(entry, n, head, list) {
756                 if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) {
757                         if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) ==
758                                      MTK_FOE_STATE_BIND))
759                                 continue;
760
761                         entry->hash = 0xffff;
762                         __mtk_foe_entry_clear(ppe, entry);
763                         continue;
764                 }
765
766                 if (found || !mtk_flow_entry_match(ppe->eth, entry, hwe)) {
767                         if (entry->hash != 0xffff)
768                                 entry->hash = 0xffff;
769                         continue;
770                 }
771
772                 entry->hash = hash;
773                 __mtk_foe_entry_commit(ppe, &entry->data, hash);
774                 found = true;
775         }
776
777         if (found)
778                 goto out;
779
780         eh = eth_hdr(skb);
781         ether_addr_copy(key.dest_mac, eh->h_dest);
782         ether_addr_copy(key.src_mac, eh->h_source);
783         tag = skb->data - 2;
784         key.vlan = 0;
785         switch (skb->protocol) {
786 #if IS_ENABLED(CONFIG_NET_DSA)
787         case htons(ETH_P_XDSA):
788                 if (!netdev_uses_dsa(skb->dev) ||
789                     skb->dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
790                         goto out;
791
792                 if (!skb_metadata_dst(skb))
793                         tag += 4;
794
795                 if (get_unaligned_be16(tag) != ETH_P_8021Q)
796                         break;
797
798                 fallthrough;
799 #endif
800         case htons(ETH_P_8021Q):
801                 key.vlan = get_unaligned_be16(tag + 2) & VLAN_VID_MASK;
802                 break;
803         default:
804                 break;
805         }
806
807         entry = rhashtable_lookup_fast(&ppe->l2_flows, &key, mtk_flow_l2_ht_params);
808         if (!entry)
809                 goto out;
810
811         mtk_foe_entry_commit_subflow(ppe, entry, hash);
812
813 out:
814         spin_unlock_bh(&ppe_lock);
815 }
816
817 int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
818 {
819         mtk_flow_entry_update(ppe, entry);
820
821         return __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
822 }
823
824 int mtk_ppe_prepare_reset(struct mtk_ppe *ppe)
825 {
826         if (!ppe)
827                 return -EINVAL;
828
829         /* disable KA */
830         ppe_clear(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_KEEPALIVE);
831         ppe_clear(ppe, MTK_PPE_BIND_LMT1, MTK_PPE_NTU_KEEPALIVE);
832         ppe_w32(ppe, MTK_PPE_KEEPALIVE, 0);
833         usleep_range(10000, 11000);
834
835         /* set KA timer to maximum */
836         ppe_set(ppe, MTK_PPE_BIND_LMT1, MTK_PPE_NTU_KEEPALIVE);
837         ppe_w32(ppe, MTK_PPE_KEEPALIVE, 0xffffffff);
838
839         /* set KA tick select */
840         ppe_set(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_TICK_SEL);
841         ppe_set(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_KEEPALIVE);
842         usleep_range(10000, 11000);
843
844         /* disable scan mode */
845         ppe_clear(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_SCAN_MODE);
846         usleep_range(10000, 11000);
847
848         return mtk_ppe_wait_busy(ppe);
849 }
850
851 struct mtk_foe_accounting *mtk_foe_entry_get_mib(struct mtk_ppe *ppe, u32 index,
852                                                  struct mtk_foe_accounting *diff)
853 {
854         struct mtk_foe_accounting *acct;
855         int size = sizeof(struct mtk_foe_accounting);
856         u64 bytes, packets;
857
858         if (!ppe->accounting)
859                 return NULL;
860
861         if (mtk_mib_entry_read(ppe, index, &bytes, &packets))
862                 return NULL;
863
864         acct = ppe->acct_table + index * size;
865
866         acct->bytes += bytes;
867         acct->packets += packets;
868
869         if (diff) {
870                 diff->bytes = bytes;
871                 diff->packets = packets;
872         }
873
874         return acct;
875 }
876
877 struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int index)
878 {
879         bool accounting = eth->soc->has_accounting;
880         const struct mtk_soc_data *soc = eth->soc;
881         struct mtk_foe_accounting *acct;
882         struct device *dev = eth->dev;
883         struct mtk_mib_entry *mib;
884         struct mtk_ppe *ppe;
885         u32 foe_flow_size;
886         void *foe;
887
888         ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL);
889         if (!ppe)
890                 return NULL;
891
892         rhashtable_init(&ppe->l2_flows, &mtk_flow_l2_ht_params);
893
894         /* need to allocate a separate device, since it PPE DMA access is
895          * not coherent.
896          */
897         ppe->base = base;
898         ppe->eth = eth;
899         ppe->dev = dev;
900         ppe->version = eth->soc->offload_version;
901         ppe->accounting = accounting;
902
903         foe = dmam_alloc_coherent(ppe->dev,
904                                   MTK_PPE_ENTRIES * soc->foe_entry_size,
905                                   &ppe->foe_phys, GFP_KERNEL);
906         if (!foe)
907                 goto err_free_l2_flows;
908
909         ppe->foe_table = foe;
910
911         foe_flow_size = (MTK_PPE_ENTRIES / soc->hash_offset) *
912                         sizeof(*ppe->foe_flow);
913         ppe->foe_flow = devm_kzalloc(dev, foe_flow_size, GFP_KERNEL);
914         if (!ppe->foe_flow)
915                 goto err_free_l2_flows;
916
917         if (accounting) {
918                 mib = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*mib),
919                                           &ppe->mib_phys, GFP_KERNEL);
920                 if (!mib)
921                         return NULL;
922
923                 ppe->mib_table = mib;
924
925                 acct = devm_kzalloc(dev, MTK_PPE_ENTRIES * sizeof(*acct),
926                                     GFP_KERNEL);
927
928                 if (!acct)
929                         return NULL;
930
931                 ppe->acct_table = acct;
932         }
933
934         mtk_ppe_debugfs_init(ppe, index);
935
936         return ppe;
937
938 err_free_l2_flows:
939         rhashtable_destroy(&ppe->l2_flows);
940         return NULL;
941 }
942
943 void mtk_ppe_deinit(struct mtk_eth *eth)
944 {
945         int i;
946
947         for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) {
948                 if (!eth->ppe[i])
949                         return;
950                 rhashtable_destroy(&eth->ppe[i]->l2_flows);
951         }
952 }
953
954 static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
955 {
956         static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
957         int i, k;
958
959         memset(ppe->foe_table, 0,
960                MTK_PPE_ENTRIES * ppe->eth->soc->foe_entry_size);
961
962         if (!IS_ENABLED(CONFIG_SOC_MT7621))
963                 return;
964
965         /* skip all entries that cross the 1024 byte boundary */
966         for (i = 0; i < MTK_PPE_ENTRIES; i += 128) {
967                 for (k = 0; k < ARRAY_SIZE(skip); k++) {
968                         struct mtk_foe_entry *hwe;
969
970                         hwe = mtk_foe_get_entry(ppe, i + skip[k]);
971                         hwe->ib1 |= MTK_FOE_IB1_STATIC;
972                 }
973         }
974 }
975
976 void mtk_ppe_start(struct mtk_ppe *ppe)
977 {
978         u32 val;
979
980         if (!ppe)
981                 return;
982
983         mtk_ppe_init_foe_table(ppe);
984         ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys);
985
986         val = MTK_PPE_TB_CFG_AGE_NON_L4 |
987               MTK_PPE_TB_CFG_AGE_UNBIND |
988               MTK_PPE_TB_CFG_AGE_TCP |
989               MTK_PPE_TB_CFG_AGE_UDP |
990               MTK_PPE_TB_CFG_AGE_TCP_FIN |
991               FIELD_PREP(MTK_PPE_TB_CFG_SEARCH_MISS,
992                          MTK_PPE_SEARCH_MISS_ACTION_FORWARD_BUILD) |
993               FIELD_PREP(MTK_PPE_TB_CFG_KEEPALIVE,
994                          MTK_PPE_KEEPALIVE_DISABLE) |
995               FIELD_PREP(MTK_PPE_TB_CFG_HASH_MODE, 1) |
996               FIELD_PREP(MTK_PPE_TB_CFG_SCAN_MODE,
997                          MTK_PPE_SCAN_MODE_CHECK_AGE) |
998               FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM,
999                          MTK_PPE_ENTRIES_SHIFT);
1000         if (mtk_is_netsys_v2_or_greater(ppe->eth))
1001                 val |= MTK_PPE_TB_CFG_INFO_SEL;
1002         if (!mtk_is_netsys_v3_or_greater(ppe->eth))
1003                 val |= MTK_PPE_TB_CFG_ENTRY_80B;
1004         ppe_w32(ppe, MTK_PPE_TB_CFG, val);
1005
1006         ppe_w32(ppe, MTK_PPE_IP_PROTO_CHK,
1007                 MTK_PPE_IP_PROTO_CHK_IPV4 | MTK_PPE_IP_PROTO_CHK_IPV6);
1008
1009         mtk_ppe_cache_enable(ppe, true);
1010
1011         val = MTK_PPE_FLOW_CFG_IP6_3T_ROUTE |
1012               MTK_PPE_FLOW_CFG_IP6_5T_ROUTE |
1013               MTK_PPE_FLOW_CFG_IP6_6RD |
1014               MTK_PPE_FLOW_CFG_IP4_NAT |
1015               MTK_PPE_FLOW_CFG_IP4_NAPT |
1016               MTK_PPE_FLOW_CFG_IP4_DSLITE |
1017               MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
1018         if (mtk_is_netsys_v2_or_greater(ppe->eth))
1019                 val |= MTK_PPE_MD_TOAP_BYP_CRSN0 |
1020                        MTK_PPE_MD_TOAP_BYP_CRSN1 |
1021                        MTK_PPE_MD_TOAP_BYP_CRSN2 |
1022                        MTK_PPE_FLOW_CFG_IP4_HASH_GRE_KEY;
1023         else
1024                 val |= MTK_PPE_FLOW_CFG_IP4_TCP_FRAG |
1025                        MTK_PPE_FLOW_CFG_IP4_UDP_FRAG;
1026         ppe_w32(ppe, MTK_PPE_FLOW_CFG, val);
1027
1028         val = FIELD_PREP(MTK_PPE_UNBIND_AGE_MIN_PACKETS, 1000) |
1029               FIELD_PREP(MTK_PPE_UNBIND_AGE_DELTA, 3);
1030         ppe_w32(ppe, MTK_PPE_UNBIND_AGE, val);
1031
1032         val = FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_UDP, 12) |
1033               FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_NON_L4, 1);
1034         ppe_w32(ppe, MTK_PPE_BIND_AGE0, val);
1035
1036         val = FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
1037               FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP, 7);
1038         ppe_w32(ppe, MTK_PPE_BIND_AGE1, val);
1039
1040         val = MTK_PPE_BIND_LIMIT0_QUARTER | MTK_PPE_BIND_LIMIT0_HALF;
1041         ppe_w32(ppe, MTK_PPE_BIND_LIMIT0, val);
1042
1043         val = MTK_PPE_BIND_LIMIT1_FULL |
1044               FIELD_PREP(MTK_PPE_BIND_LIMIT1_NON_L4, 1);
1045         ppe_w32(ppe, MTK_PPE_BIND_LIMIT1, val);
1046
1047         val = FIELD_PREP(MTK_PPE_BIND_RATE_BIND, 30) |
1048               FIELD_PREP(MTK_PPE_BIND_RATE_PREBIND, 1);
1049         ppe_w32(ppe, MTK_PPE_BIND_RATE, val);
1050
1051         /* enable PPE */
1052         val = MTK_PPE_GLO_CFG_EN |
1053               MTK_PPE_GLO_CFG_IP4_L4_CS_DROP |
1054               MTK_PPE_GLO_CFG_IP4_CS_DROP |
1055               MTK_PPE_GLO_CFG_FLOW_DROP_UPDATE;
1056         ppe_w32(ppe, MTK_PPE_GLO_CFG, val);
1057
1058         ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0);
1059
1060         if (mtk_is_netsys_v2_or_greater(ppe->eth)) {
1061                 ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT1, 0xcb777);
1062                 ppe_w32(ppe, MTK_PPE_SBW_CTRL, 0x7f);
1063         }
1064
1065         if (ppe->accounting && ppe->mib_phys) {
1066                 ppe_w32(ppe, MTK_PPE_MIB_TB_BASE, ppe->mib_phys);
1067                 ppe_m32(ppe, MTK_PPE_MIB_CFG, MTK_PPE_MIB_CFG_EN,
1068                         MTK_PPE_MIB_CFG_EN);
1069                 ppe_m32(ppe, MTK_PPE_MIB_CFG, MTK_PPE_MIB_CFG_RD_CLR,
1070                         MTK_PPE_MIB_CFG_RD_CLR);
1071                 ppe_m32(ppe, MTK_PPE_MIB_CACHE_CTL, MTK_PPE_MIB_CACHE_CTL_EN,
1072                         MTK_PPE_MIB_CFG_RD_CLR);
1073         }
1074 }
1075
1076 int mtk_ppe_stop(struct mtk_ppe *ppe)
1077 {
1078         u32 val;
1079         int i;
1080
1081         if (!ppe)
1082                 return 0;
1083
1084         for (i = 0; i < MTK_PPE_ENTRIES; i++) {
1085                 struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, i);
1086
1087                 hwe->ib1 = FIELD_PREP(MTK_FOE_IB1_STATE,
1088                                       MTK_FOE_STATE_INVALID);
1089         }
1090
1091         mtk_ppe_cache_enable(ppe, false);
1092
1093         /* disable aging */
1094         val = MTK_PPE_TB_CFG_AGE_NON_L4 |
1095               MTK_PPE_TB_CFG_AGE_UNBIND |
1096               MTK_PPE_TB_CFG_AGE_TCP |
1097               MTK_PPE_TB_CFG_AGE_UDP |
1098               MTK_PPE_TB_CFG_AGE_TCP_FIN |
1099                   MTK_PPE_TB_CFG_SCAN_MODE;
1100         ppe_clear(ppe, MTK_PPE_TB_CFG, val);
1101
1102         if (mtk_ppe_wait_busy(ppe))
1103                 return -ETIMEDOUT;
1104
1105         /* disable offload engine */
1106         ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN);
1107         ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0);
1108
1109         return 0;
1110 }
This page took 0.099569 seconds and 4 git commands to generate.