]> Git Repo - J-linux.git/blob - drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_tc.c
1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /*
3  * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
4  * stmmac TC Handling (HW only)
5  */
6
7 #include <net/pkt_cls.h>
8 #include <net/tc_act/tc_gact.h>
9 #include "common.h"
10 #include "dwmac4.h"
11 #include "dwmac5.h"
12 #include "stmmac.h"
13
14 static void tc_fill_all_pass_entry(struct stmmac_tc_entry *entry)
15 {
16         memset(entry, 0, sizeof(*entry));
17         entry->in_use = true;
18         entry->is_last = true;
19         entry->is_frag = false;
20         entry->prio = ~0x0;
21         entry->handle = 0;
22         entry->val.match_data = 0x0;
23         entry->val.match_en = 0x0;
24         entry->val.af = 1;
25         entry->val.dma_ch_no = 0x0;
26 }
27
28 static struct stmmac_tc_entry *tc_find_entry(struct stmmac_priv *priv,
29                                              struct tc_cls_u32_offload *cls,
30                                              bool free)
31 {
32         struct stmmac_tc_entry *entry, *first = NULL, *dup = NULL;
33         u32 loc = cls->knode.handle;
34         int i;
35
36         for (i = 0; i < priv->tc_entries_max; i++) {
37                 entry = &priv->tc_entries[i];
38                 if (!entry->in_use && !first && free)
39                         first = entry;
40                 if ((entry->handle == loc) && !free && !entry->is_frag)
41                         dup = entry;
42         }
43
44         if (dup)
45                 return dup;
46         if (first) {
47                 first->handle = loc;
48                 first->in_use = true;
49
50                 /* Reset HW values */
51                 memset(&first->val, 0, sizeof(first->val));
52         }
53
54         return first;
55 }
56
57 static int tc_fill_actions(struct stmmac_tc_entry *entry,
58                            struct stmmac_tc_entry *frag,
59                            struct tc_cls_u32_offload *cls)
60 {
61         struct stmmac_tc_entry *action_entry = entry;
62         const struct tc_action *act;
63         struct tcf_exts *exts;
64         int i;
65
66         exts = cls->knode.exts;
67         if (!tcf_exts_has_actions(exts))
68                 return -EINVAL;
69         if (frag)
70                 action_entry = frag;
71
72         tcf_exts_for_each_action(i, act, exts) {
73                 /* Accept */
74                 if (is_tcf_gact_ok(act)) {
75                         action_entry->val.af = 1;
76                         break;
77                 }
78                 /* Drop */
79                 if (is_tcf_gact_shot(act)) {
80                         action_entry->val.rf = 1;
81                         break;
82                 }
83
84                 /* Unsupported */
85                 return -EINVAL;
86         }
87
88         return 0;
89 }
90
91 static int tc_fill_entry(struct stmmac_priv *priv,
92                          struct tc_cls_u32_offload *cls)
93 {
94         struct stmmac_tc_entry *entry, *frag = NULL;
95         struct tc_u32_sel *sel = cls->knode.sel;
96         u32 off, data, mask, real_off, rem;
97         u32 prio = cls->common.prio << 16;
98         int ret;
99
100         /* Only 1 match per entry */
101         if (sel->nkeys <= 0 || sel->nkeys > 1)
102                 return -EINVAL;
103
104         off = sel->keys[0].off << sel->offshift;
105         data = sel->keys[0].val;
106         mask = sel->keys[0].mask;
107
108         switch (ntohs(cls->common.protocol)) {
109         case ETH_P_ALL:
110                 break;
111         case ETH_P_IP:
112                 off += ETH_HLEN;
113                 break;
114         default:
115                 return -EINVAL;
116         }
117
118         if (off > priv->tc_off_max)
119                 return -EINVAL;
120
121         real_off = off / 4;
122         rem = off % 4;
123
124         entry = tc_find_entry(priv, cls, true);
125         if (!entry)
126                 return -EINVAL;
127
128         if (rem) {
129                 frag = tc_find_entry(priv, cls, true);
130                 if (!frag) {
131                         ret = -EINVAL;
132                         goto err_unuse;
133                 }
134
135                 entry->frag_ptr = frag;
136                 entry->val.match_en = (mask << (rem * 8)) &
137                         GENMASK(31, rem * 8);
138                 entry->val.match_data = (data << (rem * 8)) &
139                         GENMASK(31, rem * 8);
140                 entry->val.frame_offset = real_off;
141                 entry->prio = prio;
142
143                 frag->val.match_en = (mask >> (rem * 8)) &
144                         GENMASK(rem * 8 - 1, 0);
145                 frag->val.match_data = (data >> (rem * 8)) &
146                         GENMASK(rem * 8 - 1, 0);
147                 frag->val.frame_offset = real_off + 1;
148                 frag->prio = prio;
149                 frag->is_frag = true;
150         } else {
151                 entry->frag_ptr = NULL;
152                 entry->val.match_en = mask;
153                 entry->val.match_data = data;
154                 entry->val.frame_offset = real_off;
155                 entry->prio = prio;
156         }
157
158         ret = tc_fill_actions(entry, frag, cls);
159         if (ret)
160                 goto err_unuse;
161
162         return 0;
163
164 err_unuse:
165         if (frag)
166                 frag->in_use = false;
167         entry->in_use = false;
168         return ret;
169 }
170
171 static void tc_unfill_entry(struct stmmac_priv *priv,
172                             struct tc_cls_u32_offload *cls)
173 {
174         struct stmmac_tc_entry *entry;
175
176         entry = tc_find_entry(priv, cls, false);
177         if (!entry)
178                 return;
179
180         entry->in_use = false;
181         if (entry->frag_ptr) {
182                 entry = entry->frag_ptr;
183                 entry->is_frag = false;
184                 entry->in_use = false;
185         }
186 }
187
188 static int tc_config_knode(struct stmmac_priv *priv,
189                            struct tc_cls_u32_offload *cls)
190 {
191         int ret;
192
193         ret = tc_fill_entry(priv, cls);
194         if (ret)
195                 return ret;
196
197         ret = stmmac_rxp_config(priv, priv->hw->pcsr, priv->tc_entries,
198                         priv->tc_entries_max);
199         if (ret)
200                 goto err_unfill;
201
202         return 0;
203
204 err_unfill:
205         tc_unfill_entry(priv, cls);
206         return ret;
207 }
208
209 static int tc_delete_knode(struct stmmac_priv *priv,
210                            struct tc_cls_u32_offload *cls)
211 {
212         /* Set entry and fragments as not used */
213         tc_unfill_entry(priv, cls);
214
215         return stmmac_rxp_config(priv, priv->hw->pcsr, priv->tc_entries,
216                                  priv->tc_entries_max);
217 }
218
219 static int tc_setup_cls_u32(struct stmmac_priv *priv,
220                             struct tc_cls_u32_offload *cls)
221 {
222         switch (cls->command) {
223         case TC_CLSU32_REPLACE_KNODE:
224                 tc_unfill_entry(priv, cls);
225                 fallthrough;
226         case TC_CLSU32_NEW_KNODE:
227                 return tc_config_knode(priv, cls);
228         case TC_CLSU32_DELETE_KNODE:
229                 return tc_delete_knode(priv, cls);
230         default:
231                 return -EOPNOTSUPP;
232         }
233 }
234
235 static int tc_rfs_init(struct stmmac_priv *priv)
236 {
237         int i;
238
239         priv->rfs_entries_max[STMMAC_RFS_T_VLAN] = 8;
240         priv->rfs_entries_max[STMMAC_RFS_T_LLDP] = 1;
241         priv->rfs_entries_max[STMMAC_RFS_T_1588] = 1;
242
243         for (i = 0; i < STMMAC_RFS_T_MAX; i++)
244                 priv->rfs_entries_total += priv->rfs_entries_max[i];
245
246         priv->rfs_entries = devm_kcalloc(priv->device,
247                                          priv->rfs_entries_total,
248                                          sizeof(*priv->rfs_entries),
249                                          GFP_KERNEL);
250         if (!priv->rfs_entries)
251                 return -ENOMEM;
252
253         dev_info(priv->device, "Enabled RFS Flow TC (entries=%d)\n",
254                  priv->rfs_entries_total);
255
256         return 0;
257 }
258
259 static int tc_init(struct stmmac_priv *priv)
260 {
261         struct dma_features *dma_cap = &priv->dma_cap;
262         unsigned int count;
263         int ret, i;
264
265         if (dma_cap->l3l4fnum) {
266                 priv->flow_entries_max = dma_cap->l3l4fnum;
267                 priv->flow_entries = devm_kcalloc(priv->device,
268                                                   dma_cap->l3l4fnum,
269                                                   sizeof(*priv->flow_entries),
270                                                   GFP_KERNEL);
271                 if (!priv->flow_entries)
272                         return -ENOMEM;
273
274                 for (i = 0; i < priv->flow_entries_max; i++)
275                         priv->flow_entries[i].idx = i;
276
277                 dev_info(priv->device, "Enabled L3L4 Flow TC (entries=%d)\n",
278                          priv->flow_entries_max);
279         }
280
281         ret = tc_rfs_init(priv);
282         if (ret)
283                 return -ENOMEM;
284
285         /* Fail silently as we can still use remaining features, e.g. CBS */
286         if (!dma_cap->frpsel)
287                 return 0;
288
289         switch (dma_cap->frpbs) {
290         case 0x0:
291                 priv->tc_off_max = 64;
292                 break;
293         case 0x1:
294                 priv->tc_off_max = 128;
295                 break;
296         case 0x2:
297                 priv->tc_off_max = 256;
298                 break;
299         default:
300                 return -EINVAL;
301         }
302
303         switch (dma_cap->frpes) {
304         case 0x0:
305                 count = 64;
306                 break;
307         case 0x1:
308                 count = 128;
309                 break;
310         case 0x2:
311                 count = 256;
312                 break;
313         default:
314                 return -EINVAL;
315         }
316
317         /* Reserve one last filter which lets all pass */
318         priv->tc_entries_max = count;
319         priv->tc_entries = devm_kcalloc(priv->device,
320                         count, sizeof(*priv->tc_entries), GFP_KERNEL);
321         if (!priv->tc_entries)
322                 return -ENOMEM;
323
324         tc_fill_all_pass_entry(&priv->tc_entries[count - 1]);
325
326         dev_info(priv->device, "Enabling HW TC (entries=%d, max_off=%d)\n",
327                         priv->tc_entries_max, priv->tc_off_max);
328
329         return 0;
330 }
331
332 static int tc_setup_cbs(struct stmmac_priv *priv,
333                         struct tc_cbs_qopt_offload *qopt)
334 {
335         u32 tx_queues_count = priv->plat->tx_queues_to_use;
336         s64 port_transmit_rate_kbps;
337         u32 queue = qopt->queue;
338         u32 mode_to_use;
339         u64 value;
340         u32 ptr;
341         int ret;
342
343         /* Queue 0 is not AVB capable */
344         if (queue <= 0 || queue >= tx_queues_count)
345                 return -EINVAL;
346         if (!priv->dma_cap.av)
347                 return -EOPNOTSUPP;
348
349         port_transmit_rate_kbps = qopt->idleslope - qopt->sendslope;
350
351         if (qopt->enable) {
352                 /* Port Transmit Rate and Speed Divider */
353                 switch (div_s64(port_transmit_rate_kbps, 1000)) {
354                 case SPEED_10000:
355                 case SPEED_5000:
356                         ptr = 32;
357                         break;
358                 case SPEED_2500:
359                 case SPEED_1000:
360                         ptr = 8;
361                         break;
362                 case SPEED_100:
363                         ptr = 4;
364                         break;
365                 default:
366                         netdev_err(priv->dev,
367                                    "Invalid portTransmitRate %lld (idleSlope - sendSlope)\n",
368                                    port_transmit_rate_kbps);
369                         return -EINVAL;
370                 }
371         } else {
372                 ptr = 0;
373         }
374
375         mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
376         if (mode_to_use == MTL_QUEUE_DCB && qopt->enable) {
377                 ret = stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_AVB);
378                 if (ret)
379                         return ret;
380
381                 priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
382         } else if (!qopt->enable) {
383                 ret = stmmac_dma_qmode(priv, priv->ioaddr, queue,
384                                        MTL_QUEUE_DCB);
385                 if (ret)
386                         return ret;
387
388                 priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
389                 return 0;
390         }
391
392         /* Final adjustments for HW */
393         value = div_s64(qopt->idleslope * 1024ll * ptr, port_transmit_rate_kbps);
394         priv->plat->tx_queues_cfg[queue].idle_slope = value & GENMASK(31, 0);
395
396         value = div_s64(-qopt->sendslope * 1024ll * ptr, port_transmit_rate_kbps);
397         priv->plat->tx_queues_cfg[queue].send_slope = value & GENMASK(31, 0);
398
399         value = qopt->hicredit * 1024ll * 8;
400         priv->plat->tx_queues_cfg[queue].high_credit = value & GENMASK(31, 0);
401
402         value = qopt->locredit * 1024ll * 8;
403         priv->plat->tx_queues_cfg[queue].low_credit = value & GENMASK(31, 0);
404
405         ret = stmmac_config_cbs(priv, priv->hw,
406                                 priv->plat->tx_queues_cfg[queue].send_slope,
407                                 priv->plat->tx_queues_cfg[queue].idle_slope,
408                                 priv->plat->tx_queues_cfg[queue].high_credit,
409                                 priv->plat->tx_queues_cfg[queue].low_credit,
410                                 queue);
411         if (ret)
412                 return ret;
413
414         dev_info(priv->device, "CBS queue %d: send %d, idle %d, hi %d, lo %d\n",
415                         queue, qopt->sendslope, qopt->idleslope,
416                         qopt->hicredit, qopt->locredit);
417         return 0;
418 }
419
420 static int tc_parse_flow_actions(struct stmmac_priv *priv,
421                                  struct flow_action *action,
422                                  struct stmmac_flow_entry *entry,
423                                  struct netlink_ext_ack *extack)
424 {
425         struct flow_action_entry *act;
426         int i;
427
428         if (!flow_action_has_entries(action))
429                 return -EINVAL;
430
431         if (!flow_action_basic_hw_stats_check(action, extack))
432                 return -EOPNOTSUPP;
433
434         flow_action_for_each(i, act, action) {
435                 switch (act->id) {
436                 case FLOW_ACTION_DROP:
437                         entry->action |= STMMAC_FLOW_ACTION_DROP;
438                         return 0;
439                 default:
440                         break;
441                 }
442         }
443
444         /* Nothing to do, maybe inverse filter ? */
445         return 0;
446 }
447
448 #define ETHER_TYPE_FULL_MASK    cpu_to_be16(~0)
449
450 static int tc_add_basic_flow(struct stmmac_priv *priv,
451                              struct flow_cls_offload *cls,
452                              struct stmmac_flow_entry *entry)
453 {
454         struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
455         struct flow_dissector *dissector = rule->match.dissector;
456         struct flow_match_basic match;
457
458         /* Nothing to do here */
459         if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC))
460                 return -EINVAL;
461
462         flow_rule_match_basic(rule, &match);
463
464         entry->ip_proto = match.key->ip_proto;
465         return 0;
466 }
467
468 static int tc_add_ip4_flow(struct stmmac_priv *priv,
469                            struct flow_cls_offload *cls,
470                            struct stmmac_flow_entry *entry)
471 {
472         struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
473         struct flow_dissector *dissector = rule->match.dissector;
474         bool inv = entry->action & STMMAC_FLOW_ACTION_DROP;
475         struct flow_match_ipv4_addrs match;
476         u32 hw_match;
477         int ret;
478
479         /* Nothing to do here */
480         if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS))
481                 return -EINVAL;
482
483         flow_rule_match_ipv4_addrs(rule, &match);
484         hw_match = ntohl(match.key->src) & ntohl(match.mask->src);
485         if (hw_match) {
486                 ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, true,
487                                               false, true, inv, hw_match);
488                 if (ret)
489                         return ret;
490         }
491
492         hw_match = ntohl(match.key->dst) & ntohl(match.mask->dst);
493         if (hw_match) {
494                 ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, true,
495                                               false, false, inv, hw_match);
496                 if (ret)
497                         return ret;
498         }
499
500         return 0;
501 }
502
503 static int tc_add_ports_flow(struct stmmac_priv *priv,
504                              struct flow_cls_offload *cls,
505                              struct stmmac_flow_entry *entry)
506 {
507         struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
508         struct flow_dissector *dissector = rule->match.dissector;
509         bool inv = entry->action & STMMAC_FLOW_ACTION_DROP;
510         struct flow_match_ports match;
511         u32 hw_match;
512         bool is_udp;
513         int ret;
514
515         /* Nothing to do here */
516         if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_PORTS))
517                 return -EINVAL;
518
519         switch (entry->ip_proto) {
520         case IPPROTO_TCP:
521                 is_udp = false;
522                 break;
523         case IPPROTO_UDP:
524                 is_udp = true;
525                 break;
526         default:
527                 return -EINVAL;
528         }
529
530         flow_rule_match_ports(rule, &match);
531
532         hw_match = ntohs(match.key->src) & ntohs(match.mask->src);
533         if (hw_match) {
534                 ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, true,
535                                               is_udp, true, inv, hw_match);
536                 if (ret)
537                         return ret;
538         }
539
540         hw_match = ntohs(match.key->dst) & ntohs(match.mask->dst);
541         if (hw_match) {
542                 ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, true,
543                                               is_udp, false, inv, hw_match);
544                 if (ret)
545                         return ret;
546         }
547
548         entry->is_l4 = true;
549         return 0;
550 }
551
552 static struct stmmac_flow_entry *tc_find_flow(struct stmmac_priv *priv,
553                                               struct flow_cls_offload *cls,
554                                               bool get_free)
555 {
556         int i;
557
558         for (i = 0; i < priv->flow_entries_max; i++) {
559                 struct stmmac_flow_entry *entry = &priv->flow_entries[i];
560
561                 if (entry->cookie == cls->cookie)
562                         return entry;
563                 if (get_free && (entry->in_use == false))
564                         return entry;
565         }
566
567         return NULL;
568 }
569
570 static struct {
571         int (*fn)(struct stmmac_priv *priv, struct flow_cls_offload *cls,
572                   struct stmmac_flow_entry *entry);
573 } tc_flow_parsers[] = {
574         { .fn = tc_add_basic_flow },
575         { .fn = tc_add_ip4_flow },
576         { .fn = tc_add_ports_flow },
577 };
578
579 static int tc_add_flow(struct stmmac_priv *priv,
580                        struct flow_cls_offload *cls)
581 {
582         struct stmmac_flow_entry *entry = tc_find_flow(priv, cls, false);
583         struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
584         int i, ret;
585
586         if (!entry) {
587                 entry = tc_find_flow(priv, cls, true);
588                 if (!entry)
589                         return -ENOENT;
590         }
591
592         ret = tc_parse_flow_actions(priv, &rule->action, entry,
593                                     cls->common.extack);
594         if (ret)
595                 return ret;
596
597         for (i = 0; i < ARRAY_SIZE(tc_flow_parsers); i++) {
598                 ret = tc_flow_parsers[i].fn(priv, cls, entry);
599                 if (!ret)
600                         entry->in_use = true;
601         }
602
603         if (!entry->in_use)
604                 return -EINVAL;
605
606         entry->cookie = cls->cookie;
607         return 0;
608 }
609
610 static int tc_del_flow(struct stmmac_priv *priv,
611                        struct flow_cls_offload *cls)
612 {
613         struct stmmac_flow_entry *entry = tc_find_flow(priv, cls, false);
614         int ret;
615
616         if (!entry || !entry->in_use)
617                 return -ENOENT;
618
619         if (entry->is_l4) {
620                 ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, false,
621                                               false, false, false, 0);
622         } else {
623                 ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, false,
624                                               false, false, false, 0);
625         }
626
627         entry->in_use = false;
628         entry->cookie = 0;
629         entry->is_l4 = false;
630         return ret;
631 }
632
633 static struct stmmac_rfs_entry *tc_find_rfs(struct stmmac_priv *priv,
634                                             struct flow_cls_offload *cls,
635                                             bool get_free)
636 {
637         int i;
638
639         for (i = 0; i < priv->rfs_entries_total; i++) {
640                 struct stmmac_rfs_entry *entry = &priv->rfs_entries[i];
641
642                 if (entry->cookie == cls->cookie)
643                         return entry;
644                 if (get_free && entry->in_use == false)
645                         return entry;
646         }
647
648         return NULL;
649 }
650
651 #define VLAN_PRIO_FULL_MASK (0x07)
652
653 static int tc_add_vlan_flow(struct stmmac_priv *priv,
654                             struct flow_cls_offload *cls)
655 {
656         struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false);
657         struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
658         struct flow_dissector *dissector = rule->match.dissector;
659         int tc = tc_classid_to_hwtc(priv->dev, cls->classid);
660         struct flow_match_vlan match;
661
662         if (!entry) {
663                 entry = tc_find_rfs(priv, cls, true);
664                 if (!entry)
665                         return -ENOENT;
666         }
667
668         if (priv->rfs_entries_cnt[STMMAC_RFS_T_VLAN] >=
669             priv->rfs_entries_max[STMMAC_RFS_T_VLAN])
670                 return -ENOENT;
671
672         /* Nothing to do here */
673         if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN))
674                 return -EINVAL;
675
676         if (tc < 0) {
677                 netdev_err(priv->dev, "Invalid traffic class\n");
678                 return -EINVAL;
679         }
680
681         flow_rule_match_vlan(rule, &match);
682
683         if (match.mask->vlan_priority) {
684                 u32 prio;
685
686                 if (match.mask->vlan_priority != VLAN_PRIO_FULL_MASK) {
687                         netdev_err(priv->dev, "Only full mask is supported for VLAN priority");
688                         return -EINVAL;
689                 }
690
691                 prio = BIT(match.key->vlan_priority);
692                 stmmac_rx_queue_prio(priv, priv->hw, prio, tc);
693
694                 entry->in_use = true;
695                 entry->cookie = cls->cookie;
696                 entry->tc = tc;
697                 entry->type = STMMAC_RFS_T_VLAN;
698                 priv->rfs_entries_cnt[STMMAC_RFS_T_VLAN]++;
699         }
700
701         return 0;
702 }
703
704 static int tc_del_vlan_flow(struct stmmac_priv *priv,
705                             struct flow_cls_offload *cls)
706 {
707         struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false);
708
709         if (!entry || !entry->in_use || entry->type != STMMAC_RFS_T_VLAN)
710                 return -ENOENT;
711
712         stmmac_rx_queue_prio(priv, priv->hw, 0, entry->tc);
713
714         entry->in_use = false;
715         entry->cookie = 0;
716         entry->tc = 0;
717         entry->type = 0;
718
719         priv->rfs_entries_cnt[STMMAC_RFS_T_VLAN]--;
720
721         return 0;
722 }
723
724 static int tc_add_ethtype_flow(struct stmmac_priv *priv,
725                                struct flow_cls_offload *cls)
726 {
727         struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false);
728         struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
729         struct flow_dissector *dissector = rule->match.dissector;
730         int tc = tc_classid_to_hwtc(priv->dev, cls->classid);
731         struct flow_match_basic match;
732
733         if (!entry) {
734                 entry = tc_find_rfs(priv, cls, true);
735                 if (!entry)
736                         return -ENOENT;
737         }
738
739         /* Nothing to do here */
740         if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC))
741                 return -EINVAL;
742
743         if (tc < 0) {
744                 netdev_err(priv->dev, "Invalid traffic class\n");
745                 return -EINVAL;
746         }
747
748         flow_rule_match_basic(rule, &match);
749
750         if (match.mask->n_proto) {
751                 u16 etype = ntohs(match.key->n_proto);
752
753                 if (match.mask->n_proto != ETHER_TYPE_FULL_MASK) {
754                         netdev_err(priv->dev, "Only full mask is supported for EthType filter");
755                         return -EINVAL;
756                 }
757                 switch (etype) {
758                 case ETH_P_LLDP:
759                         if (priv->rfs_entries_cnt[STMMAC_RFS_T_LLDP] >=
760                             priv->rfs_entries_max[STMMAC_RFS_T_LLDP])
761                                 return -ENOENT;
762
763                         entry->type = STMMAC_RFS_T_LLDP;
764                         priv->rfs_entries_cnt[STMMAC_RFS_T_LLDP]++;
765
766                         stmmac_rx_queue_routing(priv, priv->hw,
767                                                 PACKET_DCBCPQ, tc);
768                         break;
769                 case ETH_P_1588:
770                         if (priv->rfs_entries_cnt[STMMAC_RFS_T_1588] >=
771                             priv->rfs_entries_max[STMMAC_RFS_T_1588])
772                                 return -ENOENT;
773
774                         entry->type = STMMAC_RFS_T_1588;
775                         priv->rfs_entries_cnt[STMMAC_RFS_T_1588]++;
776
777                         stmmac_rx_queue_routing(priv, priv->hw,
778                                                 PACKET_PTPQ, tc);
779                         break;
780                 default:
781                         netdev_err(priv->dev, "EthType(0x%x) is not supported", etype);
782                         return -EINVAL;
783                 }
784
785                 entry->in_use = true;
786                 entry->cookie = cls->cookie;
787                 entry->tc = tc;
788                 entry->etype = etype;
789
790                 return 0;
791         }
792
793         return -EINVAL;
794 }
795
796 static int tc_del_ethtype_flow(struct stmmac_priv *priv,
797                                struct flow_cls_offload *cls)
798 {
799         struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false);
800
801         if (!entry || !entry->in_use ||
802             entry->type < STMMAC_RFS_T_LLDP ||
803             entry->type > STMMAC_RFS_T_1588)
804                 return -ENOENT;
805
806         switch (entry->etype) {
807         case ETH_P_LLDP:
808                 stmmac_rx_queue_routing(priv, priv->hw,
809                                         PACKET_DCBCPQ, 0);
810                 priv->rfs_entries_cnt[STMMAC_RFS_T_LLDP]--;
811                 break;
812         case ETH_P_1588:
813                 stmmac_rx_queue_routing(priv, priv->hw,
814                                         PACKET_PTPQ, 0);
815                 priv->rfs_entries_cnt[STMMAC_RFS_T_1588]--;
816                 break;
817         default:
818                 netdev_err(priv->dev, "EthType(0x%x) is not supported",
819                            entry->etype);
820                 return -EINVAL;
821         }
822
823         entry->in_use = false;
824         entry->cookie = 0;
825         entry->tc = 0;
826         entry->etype = 0;
827         entry->type = 0;
828
829         return 0;
830 }
831
832 static int tc_add_flow_cls(struct stmmac_priv *priv,
833                            struct flow_cls_offload *cls)
834 {
835         int ret;
836
837         ret = tc_add_flow(priv, cls);
838         if (!ret)
839                 return ret;
840
841         ret = tc_add_ethtype_flow(priv, cls);
842         if (!ret)
843                 return ret;
844
845         return tc_add_vlan_flow(priv, cls);
846 }
847
848 static int tc_del_flow_cls(struct stmmac_priv *priv,
849                            struct flow_cls_offload *cls)
850 {
851         int ret;
852
853         ret = tc_del_flow(priv, cls);
854         if (!ret)
855                 return ret;
856
857         ret = tc_del_ethtype_flow(priv, cls);
858         if (!ret)
859                 return ret;
860
861         return tc_del_vlan_flow(priv, cls);
862 }
863
864 static int tc_setup_cls(struct stmmac_priv *priv,
865                         struct flow_cls_offload *cls)
866 {
867         int ret = 0;
868
869         /* When RSS is enabled, the filtering will be bypassed */
870         if (priv->rss.enable)
871                 return -EBUSY;
872
873         switch (cls->command) {
874         case FLOW_CLS_REPLACE:
875                 ret = tc_add_flow_cls(priv, cls);
876                 break;
877         case FLOW_CLS_DESTROY:
878                 ret = tc_del_flow_cls(priv, cls);
879                 break;
880         default:
881                 return -EOPNOTSUPP;
882         }
883
884         return ret;
885 }
886
887 struct timespec64 stmmac_calc_tas_basetime(ktime_t old_base_time,
888                                            ktime_t current_time,
889                                            u64 cycle_time)
890 {
891         struct timespec64 time;
892
893         if (ktime_after(old_base_time, current_time)) {
894                 time = ktime_to_timespec64(old_base_time);
895         } else {
896                 s64 n;
897                 ktime_t base_time;
898
899                 n = div64_s64(ktime_sub_ns(current_time, old_base_time),
900                               cycle_time);
901                 base_time = ktime_add_ns(old_base_time,
902                                          (n + 1) * cycle_time);
903
904                 time = ktime_to_timespec64(base_time);
905         }
906
907         return time;
908 }
909
910 static void tc_taprio_map_maxsdu_txq(struct stmmac_priv *priv,
911                                      struct tc_taprio_qopt_offload *qopt)
912 {
913         u32 num_tc = qopt->mqprio.qopt.num_tc;
914         u32 offset, count, i, j;
915
916         /* QueueMaxSDU received from the driver corresponds to the Linux traffic
917          * class. Map queueMaxSDU per Linux traffic class to DWMAC Tx queues.
918          */
919         for (i = 0; i < num_tc; i++) {
920                 if (!qopt->max_sdu[i])
921                         continue;
922
923                 offset = qopt->mqprio.qopt.offset[i];
924                 count = qopt->mqprio.qopt.count[i];
925
926                 for (j = offset; j < offset + count; j++)
927                         priv->est->max_sdu[j] = qopt->max_sdu[i] + ETH_HLEN - ETH_TLEN;
928         }
929 }
930
931 static int tc_taprio_configure(struct stmmac_priv *priv,
932                                struct tc_taprio_qopt_offload *qopt)
933 {
934         u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep;
935         struct netlink_ext_ack *extack = qopt->mqprio.extack;
936         struct timespec64 time, current_time, qopt_time;
937         ktime_t current_time_ns;
938         int i, ret = 0;
939         u64 ctr;
940
941         if (qopt->base_time < 0)
942                 return -ERANGE;
943
944         if (!priv->dma_cap.estsel)
945                 return -EOPNOTSUPP;
946
947         switch (wid) {
948         case 0x1:
949                 wid = 16;
950                 break;
951         case 0x2:
952                 wid = 20;
953                 break;
954         case 0x3:
955                 wid = 24;
956                 break;
957         default:
958                 return -EOPNOTSUPP;
959         }
960
961         switch (dep) {
962         case 0x1:
963                 dep = 64;
964                 break;
965         case 0x2:
966                 dep = 128;
967                 break;
968         case 0x3:
969                 dep = 256;
970                 break;
971         case 0x4:
972                 dep = 512;
973                 break;
974         case 0x5:
975                 dep = 1024;
976                 break;
977         default:
978                 return -EOPNOTSUPP;
979         }
980
981         if (qopt->cmd == TAPRIO_CMD_DESTROY)
982                 goto disable;
983
984         if (qopt->num_entries >= dep)
985                 return -EINVAL;
986         if (!qopt->cycle_time)
987                 return -ERANGE;
988         if (qopt->cycle_time_extension >= BIT(wid + 7))
989                 return -ERANGE;
990
991         if (!priv->est) {
992                 priv->est = devm_kzalloc(priv->device, sizeof(*priv->est),
993                                          GFP_KERNEL);
994                 if (!priv->est)
995                         return -ENOMEM;
996
997                 mutex_init(&priv->est_lock);
998         } else {
999                 mutex_lock(&priv->est_lock);
1000                 memset(priv->est, 0, sizeof(*priv->est));
1001                 mutex_unlock(&priv->est_lock);
1002         }
1003
1004         size = qopt->num_entries;
1005
1006         mutex_lock(&priv->est_lock);
1007         priv->est->gcl_size = size;
1008         priv->est->enable = qopt->cmd == TAPRIO_CMD_REPLACE;
1009         mutex_unlock(&priv->est_lock);
1010
1011         for (i = 0; i < size; i++) {
1012                 s64 delta_ns = qopt->entries[i].interval;
1013                 u32 gates = qopt->entries[i].gate_mask;
1014
1015                 if (delta_ns > GENMASK(wid, 0))
1016                         return -ERANGE;
1017                 if (gates > GENMASK(31 - wid, 0))
1018                         return -ERANGE;
1019
1020                 switch (qopt->entries[i].command) {
1021                 case TC_TAPRIO_CMD_SET_GATES:
1022                         break;
1023                 case TC_TAPRIO_CMD_SET_AND_HOLD:
1024                         gates |= BIT(0);
1025                         break;
1026                 case TC_TAPRIO_CMD_SET_AND_RELEASE:
1027                         gates &= ~BIT(0);
1028                         break;
1029                 default:
1030                         return -EOPNOTSUPP;
1031                 }
1032
1033                 priv->est->gcl[i] = delta_ns | (gates << wid);
1034         }
1035
1036         mutex_lock(&priv->est_lock);
1037         /* Adjust for real system time */
1038         priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, &current_time);
1039         current_time_ns = timespec64_to_ktime(current_time);
1040         time = stmmac_calc_tas_basetime(qopt->base_time, current_time_ns,
1041                                         qopt->cycle_time);
1042
1043         priv->est->btr[0] = (u32)time.tv_nsec;
1044         priv->est->btr[1] = (u32)time.tv_sec;
1045
1046         qopt_time = ktime_to_timespec64(qopt->base_time);
1047         priv->est->btr_reserve[0] = (u32)qopt_time.tv_nsec;
1048         priv->est->btr_reserve[1] = (u32)qopt_time.tv_sec;
1049
1050         ctr = qopt->cycle_time;
1051         priv->est->ctr[0] = do_div(ctr, NSEC_PER_SEC);
1052         priv->est->ctr[1] = (u32)ctr;
1053
1054         priv->est->ter = qopt->cycle_time_extension;
1055
1056         tc_taprio_map_maxsdu_txq(priv, qopt);
1057
1058         ret = stmmac_est_configure(priv, priv, priv->est,
1059                                    priv->plat->clk_ptp_rate);
1060         mutex_unlock(&priv->est_lock);
1061         if (ret) {
1062                 netdev_err(priv->dev, "failed to configure EST\n");
1063                 goto disable;
1064         }
1065
1066         ret = stmmac_fpe_map_preemption_class(priv, priv->dev, extack,
1067                                               qopt->mqprio.preemptible_tcs);
1068         if (ret)
1069                 goto disable;
1070
1071         return 0;
1072
1073 disable:
1074         if (priv->est) {
1075                 mutex_lock(&priv->est_lock);
1076                 priv->est->enable = false;
1077                 stmmac_est_configure(priv, priv, priv->est,
1078                                      priv->plat->clk_ptp_rate);
1079                 /* Reset taprio status */
1080                 for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
1081                         priv->xstats.max_sdu_txq_drop[i] = 0;
1082                         priv->xstats.mtl_est_txq_hlbf[i] = 0;
1083                 }
1084                 mutex_unlock(&priv->est_lock);
1085         }
1086
1087         stmmac_fpe_map_preemption_class(priv, priv->dev, extack, 0);
1088
1089         return ret;
1090 }
1091
1092 static void tc_taprio_stats(struct stmmac_priv *priv,
1093                             struct tc_taprio_qopt_offload *qopt)
1094 {
1095         u64 window_drops = 0;
1096         int i = 0;
1097
1098         for (i = 0; i < priv->plat->tx_queues_to_use; i++)
1099                 window_drops += priv->xstats.max_sdu_txq_drop[i] +
1100                                 priv->xstats.mtl_est_txq_hlbf[i];
1101         qopt->stats.window_drops = window_drops;
1102
1103         /* Transmission overrun doesn't happen for stmmac, hence always 0 */
1104         qopt->stats.tx_overruns = 0;
1105 }
1106
1107 static void tc_taprio_queue_stats(struct stmmac_priv *priv,
1108                                   struct tc_taprio_qopt_offload *qopt)
1109 {
1110         struct tc_taprio_qopt_queue_stats *q_stats = &qopt->queue_stats;
1111         int queue = qopt->queue_stats.queue;
1112
1113         q_stats->stats.window_drops = priv->xstats.max_sdu_txq_drop[queue] +
1114                                       priv->xstats.mtl_est_txq_hlbf[queue];
1115
1116         /* Transmission overrun doesn't happen for stmmac, hence always 0 */
1117         q_stats->stats.tx_overruns = 0;
1118 }
1119
1120 static int tc_setup_taprio(struct stmmac_priv *priv,
1121                            struct tc_taprio_qopt_offload *qopt)
1122 {
1123         int err = 0;
1124
1125         switch (qopt->cmd) {
1126         case TAPRIO_CMD_REPLACE:
1127         case TAPRIO_CMD_DESTROY:
1128                 err = tc_taprio_configure(priv, qopt);
1129                 break;
1130         case TAPRIO_CMD_STATS:
1131                 tc_taprio_stats(priv, qopt);
1132                 break;
1133         case TAPRIO_CMD_QUEUE_STATS:
1134                 tc_taprio_queue_stats(priv, qopt);
1135                 break;
1136         default:
1137                 err = -EOPNOTSUPP;
1138         }
1139
1140         return err;
1141 }
1142
1143 static int tc_setup_taprio_without_fpe(struct stmmac_priv *priv,
1144                                        struct tc_taprio_qopt_offload *qopt)
1145 {
1146         if (!qopt->mqprio.preemptible_tcs)
1147                 return tc_setup_taprio(priv, qopt);
1148
1149         NL_SET_ERR_MSG_MOD(qopt->mqprio.extack,
1150                            "taprio with FPE is not implemented for this MAC");
1151
1152         return -EOPNOTSUPP;
1153 }
1154
1155 static int tc_setup_etf(struct stmmac_priv *priv,
1156                         struct tc_etf_qopt_offload *qopt)
1157 {
1158         if (!priv->dma_cap.tbssel)
1159                 return -EOPNOTSUPP;
1160         if (qopt->queue >= priv->plat->tx_queues_to_use)
1161                 return -EINVAL;
1162         if (!(priv->dma_conf.tx_queue[qopt->queue].tbs & STMMAC_TBS_AVAIL))
1163                 return -EINVAL;
1164
1165         if (qopt->enable)
1166                 priv->dma_conf.tx_queue[qopt->queue].tbs |= STMMAC_TBS_EN;
1167         else
1168                 priv->dma_conf.tx_queue[qopt->queue].tbs &= ~STMMAC_TBS_EN;
1169
1170         netdev_info(priv->dev, "%s ETF for Queue %d\n",
1171                     qopt->enable ? "enabled" : "disabled", qopt->queue);
1172         return 0;
1173 }
1174
1175 static int tc_query_caps(struct stmmac_priv *priv,
1176                          struct tc_query_caps_base *base)
1177 {
1178         switch (base->type) {
1179         case TC_SETUP_QDISC_MQPRIO: {
1180                 struct tc_mqprio_caps *caps = base->caps;
1181
1182                 caps->validate_queue_counts = true;
1183
1184                 return 0;
1185         }
1186         case TC_SETUP_QDISC_TAPRIO: {
1187                 struct tc_taprio_caps *caps = base->caps;
1188
1189                 if (!priv->dma_cap.estsel)
1190                         return -EOPNOTSUPP;
1191
1192                 caps->gate_mask_per_txq = true;
1193                 caps->supports_queue_max_sdu = true;
1194
1195                 return 0;
1196         }
1197         default:
1198                 return -EOPNOTSUPP;
1199         }
1200 }
1201
1202 static void stmmac_reset_tc_mqprio(struct net_device *ndev,
1203                                    struct netlink_ext_ack *extack)
1204 {
1205         struct stmmac_priv *priv = netdev_priv(ndev);
1206
1207         netdev_reset_tc(ndev);
1208         netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
1209         stmmac_fpe_map_preemption_class(priv, ndev, extack, 0);
1210 }
1211
1212 static int tc_setup_dwmac510_mqprio(struct stmmac_priv *priv,
1213                                     struct tc_mqprio_qopt_offload *mqprio)
1214 {
1215         struct netlink_ext_ack *extack = mqprio->extack;
1216         struct tc_mqprio_qopt *qopt = &mqprio->qopt;
1217         u32 offset, count, num_stack_tx_queues = 0;
1218         struct net_device *ndev = priv->dev;
1219         u32 num_tc = qopt->num_tc;
1220         int err;
1221
1222         if (!num_tc) {
1223                 stmmac_reset_tc_mqprio(ndev, extack);
1224                 return 0;
1225         }
1226
1227         err = netdev_set_num_tc(ndev, num_tc);
1228         if (err)
1229                 return err;
1230
1231         for (u32 tc = 0; tc < num_tc; tc++) {
1232                 offset = qopt->offset[tc];
1233                 count = qopt->count[tc];
1234                 num_stack_tx_queues += count;
1235
1236                 err = netdev_set_tc_queue(ndev, tc, count, offset);
1237                 if (err)
1238                         goto err_reset_tc;
1239         }
1240
1241         err = netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
1242         if (err)
1243                 goto err_reset_tc;
1244
1245         err = stmmac_fpe_map_preemption_class(priv, ndev, extack,
1246                                               mqprio->preemptible_tcs);
1247         if (err)
1248                 goto err_reset_tc;
1249
1250         return 0;
1251
1252 err_reset_tc:
1253         stmmac_reset_tc_mqprio(ndev, extack);
1254
1255         return err;
1256 }
1257
1258 static int tc_setup_mqprio_unimplemented(struct stmmac_priv *priv,
1259                                          struct tc_mqprio_qopt_offload *mqprio)
1260 {
1261         NL_SET_ERR_MSG_MOD(mqprio->extack,
1262                            "mqprio HW offload is not implemented for this MAC");
1263         return -EOPNOTSUPP;
1264 }
1265
1266 const struct stmmac_tc_ops dwmac4_tc_ops = {
1267         .init = tc_init,
1268         .setup_cls_u32 = tc_setup_cls_u32,
1269         .setup_cbs = tc_setup_cbs,
1270         .setup_cls = tc_setup_cls,
1271         .setup_taprio = tc_setup_taprio_without_fpe,
1272         .setup_etf = tc_setup_etf,
1273         .query_caps = tc_query_caps,
1274         .setup_mqprio = tc_setup_mqprio_unimplemented,
1275 };
1276
1277 const struct stmmac_tc_ops dwmac510_tc_ops = {
1278         .init = tc_init,
1279         .setup_cls_u32 = tc_setup_cls_u32,
1280         .setup_cbs = tc_setup_cbs,
1281         .setup_cls = tc_setup_cls,
1282         .setup_taprio = tc_setup_taprio,
1283         .setup_etf = tc_setup_etf,
1284         .query_caps = tc_query_caps,
1285         .setup_mqprio = tc_setup_dwmac510_mqprio,
1286 };
1287
1288 const struct stmmac_tc_ops dwxgmac_tc_ops = {
1289         .init = tc_init,
1290         .setup_cls_u32 = tc_setup_cls_u32,
1291         .setup_cbs = tc_setup_cbs,
1292         .setup_cls = tc_setup_cls,
1293         .setup_taprio = tc_setup_taprio,
1294         .setup_etf = tc_setup_etf,
1295         .query_caps = tc_query_caps,
1296         .setup_mqprio = tc_setup_dwmac510_mqprio,
1297 };
This page took 0.09844 seconds and 4 git commands to generate.