]> Git Repo - J-linux.git/blob - drivers/net/dsa/sja1105/sja1105_flower.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / drivers / net / dsa / sja1105 / sja1105_flower.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2020 NXP
3  */
4 #include "sja1105.h"
5 #include "sja1105_vl.h"
6
7 struct sja1105_rule *sja1105_rule_find(struct sja1105_private *priv,
8                                        unsigned long cookie)
9 {
10         struct sja1105_rule *rule;
11
12         list_for_each_entry(rule, &priv->flow_block.rules, list)
13                 if (rule->cookie == cookie)
14                         return rule;
15
16         return NULL;
17 }
18
19 static int sja1105_find_free_l2_policer(struct sja1105_private *priv)
20 {
21         int i;
22
23         for (i = 0; i < SJA1105_NUM_L2_POLICERS; i++)
24                 if (!priv->flow_block.l2_policer_used[i])
25                         return i;
26
27         return -1;
28 }
29
30 static int sja1105_setup_bcast_policer(struct sja1105_private *priv,
31                                        struct netlink_ext_ack *extack,
32                                        unsigned long cookie, int port,
33                                        u64 rate_bytes_per_sec,
34                                        u32 burst)
35 {
36         struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
37         struct sja1105_l2_policing_entry *policing;
38         struct dsa_switch *ds = priv->ds;
39         bool new_rule = false;
40         unsigned long p;
41         int rc;
42
43         if (!rule) {
44                 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
45                 if (!rule)
46                         return -ENOMEM;
47
48                 rule->cookie = cookie;
49                 rule->type = SJA1105_RULE_BCAST_POLICER;
50                 rule->bcast_pol.sharindx = sja1105_find_free_l2_policer(priv);
51                 rule->key.type = SJA1105_KEY_BCAST;
52                 new_rule = true;
53         }
54
55         if (rule->bcast_pol.sharindx == -1) {
56                 NL_SET_ERR_MSG_MOD(extack, "No more L2 policers free");
57                 rc = -ENOSPC;
58                 goto out;
59         }
60
61         policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
62
63         if (policing[(ds->num_ports * SJA1105_NUM_TC) + port].sharindx != port) {
64                 NL_SET_ERR_MSG_MOD(extack,
65                                    "Port already has a broadcast policer");
66                 rc = -EEXIST;
67                 goto out;
68         }
69
70         rule->port_mask |= BIT(port);
71
72         /* Make the broadcast policers of all ports attached to this block
73          * point to the newly allocated policer
74          */
75         for_each_set_bit(p, &rule->port_mask, SJA1105_MAX_NUM_PORTS) {
76                 int bcast = (ds->num_ports * SJA1105_NUM_TC) + p;
77
78                 policing[bcast].sharindx = rule->bcast_pol.sharindx;
79         }
80
81         policing[rule->bcast_pol.sharindx].rate = div_u64(rate_bytes_per_sec *
82                                                           512, 1000000);
83         policing[rule->bcast_pol.sharindx].smax = burst;
84
85         /* TODO: support per-flow MTU */
86         policing[rule->bcast_pol.sharindx].maxlen = VLAN_ETH_FRAME_LEN +
87                                                     ETH_FCS_LEN;
88
89         rc = sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
90
91 out:
92         if (rc == 0 && new_rule) {
93                 priv->flow_block.l2_policer_used[rule->bcast_pol.sharindx] = true;
94                 list_add(&rule->list, &priv->flow_block.rules);
95         } else if (new_rule) {
96                 kfree(rule);
97         }
98
99         return rc;
100 }
101
102 static int sja1105_setup_tc_policer(struct sja1105_private *priv,
103                                     struct netlink_ext_ack *extack,
104                                     unsigned long cookie, int port, int tc,
105                                     u64 rate_bytes_per_sec,
106                                     u32 burst)
107 {
108         struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
109         struct sja1105_l2_policing_entry *policing;
110         bool new_rule = false;
111         unsigned long p;
112         int rc;
113
114         if (!rule) {
115                 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
116                 if (!rule)
117                         return -ENOMEM;
118
119                 rule->cookie = cookie;
120                 rule->type = SJA1105_RULE_TC_POLICER;
121                 rule->tc_pol.sharindx = sja1105_find_free_l2_policer(priv);
122                 rule->key.type = SJA1105_KEY_TC;
123                 rule->key.tc.pcp = tc;
124                 new_rule = true;
125         }
126
127         if (rule->tc_pol.sharindx == -1) {
128                 NL_SET_ERR_MSG_MOD(extack, "No more L2 policers free");
129                 rc = -ENOSPC;
130                 goto out;
131         }
132
133         policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
134
135         if (policing[(port * SJA1105_NUM_TC) + tc].sharindx != port) {
136                 NL_SET_ERR_MSG_MOD(extack,
137                                    "Port-TC pair already has an L2 policer");
138                 rc = -EEXIST;
139                 goto out;
140         }
141
142         rule->port_mask |= BIT(port);
143
144         /* Make the policers for traffic class @tc of all ports attached to
145          * this block point to the newly allocated policer
146          */
147         for_each_set_bit(p, &rule->port_mask, SJA1105_MAX_NUM_PORTS) {
148                 int index = (p * SJA1105_NUM_TC) + tc;
149
150                 policing[index].sharindx = rule->tc_pol.sharindx;
151         }
152
153         policing[rule->tc_pol.sharindx].rate = div_u64(rate_bytes_per_sec *
154                                                        512, 1000000);
155         policing[rule->tc_pol.sharindx].smax = burst;
156
157         /* TODO: support per-flow MTU */
158         policing[rule->tc_pol.sharindx].maxlen = VLAN_ETH_FRAME_LEN +
159                                                  ETH_FCS_LEN;
160
161         rc = sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
162
163 out:
164         if (rc == 0 && new_rule) {
165                 priv->flow_block.l2_policer_used[rule->tc_pol.sharindx] = true;
166                 list_add(&rule->list, &priv->flow_block.rules);
167         } else if (new_rule) {
168                 kfree(rule);
169         }
170
171         return rc;
172 }
173
174 static int sja1105_flower_policer(struct sja1105_private *priv, int port,
175                                   struct netlink_ext_ack *extack,
176                                   unsigned long cookie,
177                                   struct sja1105_key *key,
178                                   u64 rate_bytes_per_sec,
179                                   u32 burst)
180 {
181         switch (key->type) {
182         case SJA1105_KEY_BCAST:
183                 return sja1105_setup_bcast_policer(priv, extack, cookie, port,
184                                                    rate_bytes_per_sec, burst);
185         case SJA1105_KEY_TC:
186                 return sja1105_setup_tc_policer(priv, extack, cookie, port,
187                                                 key->tc.pcp, rate_bytes_per_sec,
188                                                 burst);
189         default:
190                 NL_SET_ERR_MSG_MOD(extack, "Unknown keys for policing");
191                 return -EOPNOTSUPP;
192         }
193 }
194
195 static int sja1105_flower_parse_key(struct sja1105_private *priv,
196                                     struct netlink_ext_ack *extack,
197                                     struct flow_cls_offload *cls,
198                                     struct sja1105_key *key)
199 {
200         struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
201         struct flow_dissector *dissector = rule->match.dissector;
202         bool is_bcast_dmac = false;
203         u64 dmac = U64_MAX;
204         u16 vid = U16_MAX;
205         u16 pcp = U16_MAX;
206
207         if (dissector->used_keys &
208             ~(BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
209               BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
210               BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
211               BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS))) {
212                 NL_SET_ERR_MSG_MOD(extack,
213                                    "Unsupported keys used");
214                 return -EOPNOTSUPP;
215         }
216
217         if (flow_rule_match_has_control_flags(rule, extack))
218                 return -EOPNOTSUPP;
219
220         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
221                 struct flow_match_basic match;
222
223                 flow_rule_match_basic(rule, &match);
224                 if (match.key->n_proto) {
225                         NL_SET_ERR_MSG_MOD(extack,
226                                            "Matching on protocol not supported");
227                         return -EOPNOTSUPP;
228                 }
229         }
230
231         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
232                 u8 bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
233                 u8 null[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
234                 struct flow_match_eth_addrs match;
235
236                 flow_rule_match_eth_addrs(rule, &match);
237
238                 if (!ether_addr_equal_masked(match.key->src, null,
239                                              match.mask->src)) {
240                         NL_SET_ERR_MSG_MOD(extack,
241                                            "Matching on source MAC not supported");
242                         return -EOPNOTSUPP;
243                 }
244
245                 if (!ether_addr_equal(match.mask->dst, bcast)) {
246                         NL_SET_ERR_MSG_MOD(extack,
247                                            "Masked matching on MAC not supported");
248                         return -EOPNOTSUPP;
249                 }
250
251                 dmac = ether_addr_to_u64(match.key->dst);
252                 is_bcast_dmac = ether_addr_equal(match.key->dst, bcast);
253         }
254
255         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
256                 struct flow_match_vlan match;
257
258                 flow_rule_match_vlan(rule, &match);
259
260                 if (match.mask->vlan_id &&
261                     match.mask->vlan_id != VLAN_VID_MASK) {
262                         NL_SET_ERR_MSG_MOD(extack,
263                                            "Masked matching on VID is not supported");
264                         return -EOPNOTSUPP;
265                 }
266
267                 if (match.mask->vlan_priority &&
268                     match.mask->vlan_priority != 0x7) {
269                         NL_SET_ERR_MSG_MOD(extack,
270                                            "Masked matching on PCP is not supported");
271                         return -EOPNOTSUPP;
272                 }
273
274                 if (match.mask->vlan_id)
275                         vid = match.key->vlan_id;
276                 if (match.mask->vlan_priority)
277                         pcp = match.key->vlan_priority;
278         }
279
280         if (is_bcast_dmac && vid == U16_MAX && pcp == U16_MAX) {
281                 key->type = SJA1105_KEY_BCAST;
282                 return 0;
283         }
284         if (dmac == U64_MAX && vid == U16_MAX && pcp != U16_MAX) {
285                 key->type = SJA1105_KEY_TC;
286                 key->tc.pcp = pcp;
287                 return 0;
288         }
289         if (dmac != U64_MAX && vid != U16_MAX && pcp != U16_MAX) {
290                 key->type = SJA1105_KEY_VLAN_AWARE_VL;
291                 key->vl.dmac = dmac;
292                 key->vl.vid = vid;
293                 key->vl.pcp = pcp;
294                 return 0;
295         }
296         if (dmac != U64_MAX) {
297                 key->type = SJA1105_KEY_VLAN_UNAWARE_VL;
298                 key->vl.dmac = dmac;
299                 return 0;
300         }
301
302         NL_SET_ERR_MSG_MOD(extack, "Not matching on any known key");
303         return -EOPNOTSUPP;
304 }
305
306 static int sja1105_policer_validate(const struct flow_action *action,
307                                     const struct flow_action_entry *act,
308                                     struct netlink_ext_ack *extack)
309 {
310         if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
311                 NL_SET_ERR_MSG_MOD(extack,
312                                    "Offload not supported when exceed action is not drop");
313                 return -EOPNOTSUPP;
314         }
315
316         if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
317             act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
318                 NL_SET_ERR_MSG_MOD(extack,
319                                    "Offload not supported when conform action is not pipe or ok");
320                 return -EOPNOTSUPP;
321         }
322
323         if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
324             !flow_action_is_last_entry(action, act)) {
325                 NL_SET_ERR_MSG_MOD(extack,
326                                    "Offload not supported when conform action is ok, but action is not last");
327                 return -EOPNOTSUPP;
328         }
329
330         if (act->police.peakrate_bytes_ps ||
331             act->police.avrate || act->police.overhead) {
332                 NL_SET_ERR_MSG_MOD(extack,
333                                    "Offload not supported when peakrate/avrate/overhead is configured");
334                 return -EOPNOTSUPP;
335         }
336
337         if (act->police.rate_pkt_ps) {
338                 NL_SET_ERR_MSG_MOD(extack,
339                                    "QoS offload not support packets per second");
340                 return -EOPNOTSUPP;
341         }
342
343         return 0;
344 }
345
346 int sja1105_cls_flower_add(struct dsa_switch *ds, int port,
347                            struct flow_cls_offload *cls, bool ingress)
348 {
349         struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
350         struct netlink_ext_ack *extack = cls->common.extack;
351         struct sja1105_private *priv = ds->priv;
352         const struct flow_action_entry *act;
353         unsigned long cookie = cls->cookie;
354         bool routing_rule = false;
355         struct sja1105_key key;
356         bool gate_rule = false;
357         bool vl_rule = false;
358         int rc, i;
359
360         rc = sja1105_flower_parse_key(priv, extack, cls, &key);
361         if (rc)
362                 return rc;
363
364         flow_action_for_each(i, act, &rule->action) {
365                 switch (act->id) {
366                 case FLOW_ACTION_POLICE:
367                         rc = sja1105_policer_validate(&rule->action, act, extack);
368                         if (rc)
369                                 goto out;
370
371                         rc = sja1105_flower_policer(priv, port, extack, cookie,
372                                                     &key,
373                                                     act->police.rate_bytes_ps,
374                                                     act->police.burst);
375                         if (rc)
376                                 goto out;
377                         break;
378                 case FLOW_ACTION_TRAP: {
379                         int cpu = dsa_upstream_port(ds, port);
380
381                         routing_rule = true;
382                         vl_rule = true;
383
384                         rc = sja1105_vl_redirect(priv, port, extack, cookie,
385                                                  &key, BIT(cpu), true);
386                         if (rc)
387                                 goto out;
388                         break;
389                 }
390                 case FLOW_ACTION_REDIRECT: {
391                         struct dsa_port *to_dp;
392
393                         to_dp = dsa_port_from_netdev(act->dev);
394                         if (IS_ERR(to_dp)) {
395                                 NL_SET_ERR_MSG_MOD(extack,
396                                                    "Destination not a switch port");
397                                 return -EOPNOTSUPP;
398                         }
399
400                         routing_rule = true;
401                         vl_rule = true;
402
403                         rc = sja1105_vl_redirect(priv, port, extack, cookie,
404                                                  &key, BIT(to_dp->index), true);
405                         if (rc)
406                                 goto out;
407                         break;
408                 }
409                 case FLOW_ACTION_DROP:
410                         vl_rule = true;
411
412                         rc = sja1105_vl_redirect(priv, port, extack, cookie,
413                                                  &key, 0, false);
414                         if (rc)
415                                 goto out;
416                         break;
417                 case FLOW_ACTION_GATE:
418                         gate_rule = true;
419                         vl_rule = true;
420
421                         rc = sja1105_vl_gate(priv, port, extack, cookie,
422                                              &key, act->hw_index,
423                                              act->gate.prio,
424                                              act->gate.basetime,
425                                              act->gate.cycletime,
426                                              act->gate.cycletimeext,
427                                              act->gate.num_entries,
428                                              act->gate.entries);
429                         if (rc)
430                                 goto out;
431                         break;
432                 default:
433                         NL_SET_ERR_MSG_MOD(extack,
434                                            "Action not supported");
435                         rc = -EOPNOTSUPP;
436                         goto out;
437                 }
438         }
439
440         if (vl_rule && !rc) {
441                 /* Delay scheduling configuration until DESTPORTS has been
442                  * populated by all other actions.
443                  */
444                 if (gate_rule) {
445                         if (!routing_rule) {
446                                 NL_SET_ERR_MSG_MOD(extack,
447                                                    "Can only offload gate action together with redirect or trap");
448                                 return -EOPNOTSUPP;
449                         }
450                         rc = sja1105_init_scheduling(priv);
451                         if (rc)
452                                 goto out;
453                 }
454
455                 rc = sja1105_static_config_reload(priv, SJA1105_VIRTUAL_LINKS);
456         }
457
458 out:
459         return rc;
460 }
461
462 int sja1105_cls_flower_del(struct dsa_switch *ds, int port,
463                            struct flow_cls_offload *cls, bool ingress)
464 {
465         struct sja1105_private *priv = ds->priv;
466         struct sja1105_rule *rule = sja1105_rule_find(priv, cls->cookie);
467         struct sja1105_l2_policing_entry *policing;
468         int old_sharindx;
469
470         if (!rule)
471                 return 0;
472
473         if (rule->type == SJA1105_RULE_VL)
474                 return sja1105_vl_delete(priv, port, rule, cls->common.extack);
475
476         policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
477
478         if (rule->type == SJA1105_RULE_BCAST_POLICER) {
479                 int bcast = (ds->num_ports * SJA1105_NUM_TC) + port;
480
481                 old_sharindx = policing[bcast].sharindx;
482                 policing[bcast].sharindx = port;
483         } else if (rule->type == SJA1105_RULE_TC_POLICER) {
484                 int index = (port * SJA1105_NUM_TC) + rule->key.tc.pcp;
485
486                 old_sharindx = policing[index].sharindx;
487                 policing[index].sharindx = port;
488         } else {
489                 return -EINVAL;
490         }
491
492         rule->port_mask &= ~BIT(port);
493         if (!rule->port_mask) {
494                 priv->flow_block.l2_policer_used[old_sharindx] = false;
495                 list_del(&rule->list);
496                 kfree(rule);
497         }
498
499         return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
500 }
501
502 int sja1105_cls_flower_stats(struct dsa_switch *ds, int port,
503                              struct flow_cls_offload *cls, bool ingress)
504 {
505         struct sja1105_private *priv = ds->priv;
506         struct sja1105_rule *rule = sja1105_rule_find(priv, cls->cookie);
507         int rc;
508
509         if (!rule)
510                 return 0;
511
512         if (rule->type != SJA1105_RULE_VL)
513                 return 0;
514
515         rc = sja1105_vl_stats(priv, port, rule, &cls->stats,
516                               cls->common.extack);
517         if (rc)
518                 return rc;
519
520         return 0;
521 }
522
523 void sja1105_flower_setup(struct dsa_switch *ds)
524 {
525         struct sja1105_private *priv = ds->priv;
526         int port;
527
528         INIT_LIST_HEAD(&priv->flow_block.rules);
529
530         for (port = 0; port < ds->num_ports; port++)
531                 priv->flow_block.l2_policer_used[port] = true;
532 }
533
534 void sja1105_flower_teardown(struct dsa_switch *ds)
535 {
536         struct sja1105_private *priv = ds->priv;
537         struct sja1105_rule *rule;
538         struct list_head *pos, *n;
539
540         list_for_each_safe(pos, n, &priv->flow_block.rules) {
541                 rule = list_entry(pos, struct sja1105_rule, list);
542                 list_del(&rule->list);
543                 kfree(rule);
544         }
545 }
This page took 0.064816 seconds and 4 git commands to generate.