1 // SPDX-License-Identifier: GPL-2.0
3 * DPAA2 Ethernet Switch flower support
9 #include "dpaa2-switch.h"
11 static int dpaa2_switch_flower_parse_key(struct flow_cls_offload *cls,
12 struct dpsw_acl_key *acl_key)
14 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
15 struct flow_dissector *dissector = rule->match.dissector;
16 struct netlink_ext_ack *extack = cls->common.extack;
17 struct dpsw_acl_fields *acl_h, *acl_m;
19 if (dissector->used_keys &
20 ~(BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
21 BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
22 BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
23 BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
24 BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
25 BIT_ULL(FLOW_DISSECTOR_KEY_IP) |
26 BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
27 BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS))) {
28 NL_SET_ERR_MSG_MOD(extack,
29 "Unsupported keys used");
33 acl_h = &acl_key->match;
34 acl_m = &acl_key->mask;
36 if (flow_rule_match_has_control_flags(rule, extack))
39 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
40 struct flow_match_basic match;
42 flow_rule_match_basic(rule, &match);
43 acl_h->l3_protocol = match.key->ip_proto;
44 acl_h->l2_ether_type = be16_to_cpu(match.key->n_proto);
45 acl_m->l3_protocol = match.mask->ip_proto;
46 acl_m->l2_ether_type = be16_to_cpu(match.mask->n_proto);
49 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
50 struct flow_match_eth_addrs match;
52 flow_rule_match_eth_addrs(rule, &match);
53 ether_addr_copy(acl_h->l2_dest_mac, &match.key->dst[0]);
54 ether_addr_copy(acl_h->l2_source_mac, &match.key->src[0]);
55 ether_addr_copy(acl_m->l2_dest_mac, &match.mask->dst[0]);
56 ether_addr_copy(acl_m->l2_source_mac, &match.mask->src[0]);
59 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
60 struct flow_match_vlan match;
62 flow_rule_match_vlan(rule, &match);
63 acl_h->l2_vlan_id = match.key->vlan_id;
64 acl_h->l2_tpid = be16_to_cpu(match.key->vlan_tpid);
65 acl_h->l2_pcp_dei = match.key->vlan_priority << 1 |
68 acl_m->l2_vlan_id = match.mask->vlan_id;
69 acl_m->l2_tpid = be16_to_cpu(match.mask->vlan_tpid);
70 acl_m->l2_pcp_dei = match.mask->vlan_priority << 1 |
74 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
75 struct flow_match_ipv4_addrs match;
77 flow_rule_match_ipv4_addrs(rule, &match);
78 acl_h->l3_source_ip = be32_to_cpu(match.key->src);
79 acl_h->l3_dest_ip = be32_to_cpu(match.key->dst);
80 acl_m->l3_source_ip = be32_to_cpu(match.mask->src);
81 acl_m->l3_dest_ip = be32_to_cpu(match.mask->dst);
84 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
85 struct flow_match_ports match;
87 flow_rule_match_ports(rule, &match);
88 acl_h->l4_source_port = be16_to_cpu(match.key->src);
89 acl_h->l4_dest_port = be16_to_cpu(match.key->dst);
90 acl_m->l4_source_port = be16_to_cpu(match.mask->src);
91 acl_m->l4_dest_port = be16_to_cpu(match.mask->dst);
94 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
95 struct flow_match_ip match;
97 flow_rule_match_ip(rule, &match);
98 if (match.mask->ttl != 0) {
99 NL_SET_ERR_MSG_MOD(extack,
100 "Matching on TTL not supported");
104 if ((match.mask->tos & 0x3) != 0) {
105 NL_SET_ERR_MSG_MOD(extack,
106 "Matching on ECN not supported, only DSCP");
110 acl_h->l3_dscp = match.key->tos >> 2;
111 acl_m->l3_dscp = match.mask->tos >> 2;
117 int dpaa2_switch_acl_entry_add(struct dpaa2_switch_filter_block *filter_block,
118 struct dpaa2_switch_acl_entry *entry)
120 struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg;
121 struct ethsw_core *ethsw = filter_block->ethsw;
122 struct dpsw_acl_key *acl_key = &entry->key;
123 struct device *dev = ethsw->dev;
127 cmd_buff = kzalloc(DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, GFP_KERNEL);
131 dpsw_acl_prepare_entry_cfg(acl_key, cmd_buff);
133 acl_entry_cfg->key_iova = dma_map_single(dev, cmd_buff,
134 DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE,
136 if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) {
137 dev_err(dev, "DMA mapping failed\n");
142 err = dpsw_acl_add_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
143 filter_block->acl_id, acl_entry_cfg);
145 dma_unmap_single(dev, acl_entry_cfg->key_iova,
146 DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE,
149 dev_err(dev, "dpsw_acl_add_entry() failed %d\n", err);
160 dpaa2_switch_acl_entry_remove(struct dpaa2_switch_filter_block *block,
161 struct dpaa2_switch_acl_entry *entry)
163 struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg;
164 struct dpsw_acl_key *acl_key = &entry->key;
165 struct ethsw_core *ethsw = block->ethsw;
166 struct device *dev = ethsw->dev;
170 cmd_buff = kzalloc(DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, GFP_KERNEL);
174 dpsw_acl_prepare_entry_cfg(acl_key, cmd_buff);
176 acl_entry_cfg->key_iova = dma_map_single(dev, cmd_buff,
177 DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE,
179 if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) {
180 dev_err(dev, "DMA mapping failed\n");
185 err = dpsw_acl_remove_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
186 block->acl_id, acl_entry_cfg);
188 dma_unmap_single(dev, acl_entry_cfg->key_iova,
189 DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, DMA_TO_DEVICE);
191 dev_err(dev, "dpsw_acl_remove_entry() failed %d\n", err);
202 dpaa2_switch_acl_entry_add_to_list(struct dpaa2_switch_filter_block *block,
203 struct dpaa2_switch_acl_entry *entry)
205 struct dpaa2_switch_acl_entry *tmp;
206 struct list_head *pos, *n;
209 if (list_empty(&block->acl_entries)) {
210 list_add(&entry->list, &block->acl_entries);
214 list_for_each_safe(pos, n, &block->acl_entries) {
215 tmp = list_entry(pos, struct dpaa2_switch_acl_entry, list);
216 if (entry->prio < tmp->prio)
220 list_add(&entry->list, pos->prev);
224 static struct dpaa2_switch_acl_entry*
225 dpaa2_switch_acl_entry_get_by_index(struct dpaa2_switch_filter_block *block,
228 struct dpaa2_switch_acl_entry *tmp;
231 list_for_each_entry(tmp, &block->acl_entries, list) {
241 dpaa2_switch_acl_entry_set_precedence(struct dpaa2_switch_filter_block *block,
242 struct dpaa2_switch_acl_entry *entry,
247 err = dpaa2_switch_acl_entry_remove(block, entry);
251 entry->cfg.precedence = precedence;
252 return dpaa2_switch_acl_entry_add(block, entry);
256 dpaa2_switch_acl_tbl_add_entry(struct dpaa2_switch_filter_block *block,
257 struct dpaa2_switch_acl_entry *entry)
259 struct dpaa2_switch_acl_entry *tmp;
260 int index, i, precedence, err;
262 /* Add the new ACL entry to the linked list and get its index */
263 index = dpaa2_switch_acl_entry_add_to_list(block, entry);
265 /* Move up in priority the ACL entries to make space
266 * for the new filter.
268 precedence = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES - block->num_acl_rules - 1;
269 for (i = 0; i < index; i++) {
270 tmp = dpaa2_switch_acl_entry_get_by_index(block, i);
272 err = dpaa2_switch_acl_entry_set_precedence(block, tmp,
280 /* Add the new entry to hardware */
281 entry->cfg.precedence = precedence;
282 err = dpaa2_switch_acl_entry_add(block, entry);
283 block->num_acl_rules++;
288 static struct dpaa2_switch_acl_entry *
289 dpaa2_switch_acl_tbl_find_entry_by_cookie(struct dpaa2_switch_filter_block *block,
290 unsigned long cookie)
292 struct dpaa2_switch_acl_entry *tmp, *n;
294 list_for_each_entry_safe(tmp, n, &block->acl_entries, list) {
295 if (tmp->cookie == cookie)
302 dpaa2_switch_acl_entry_get_index(struct dpaa2_switch_filter_block *block,
303 struct dpaa2_switch_acl_entry *entry)
305 struct dpaa2_switch_acl_entry *tmp, *n;
308 list_for_each_entry_safe(tmp, n, &block->acl_entries, list) {
309 if (tmp->cookie == entry->cookie)
316 static struct dpaa2_switch_mirror_entry *
317 dpaa2_switch_mirror_find_entry_by_cookie(struct dpaa2_switch_filter_block *block,
318 unsigned long cookie)
320 struct dpaa2_switch_mirror_entry *tmp, *n;
322 list_for_each_entry_safe(tmp, n, &block->mirror_entries, list) {
323 if (tmp->cookie == cookie)
330 dpaa2_switch_acl_tbl_remove_entry(struct dpaa2_switch_filter_block *block,
331 struct dpaa2_switch_acl_entry *entry)
333 struct dpaa2_switch_acl_entry *tmp;
334 int index, i, precedence, err;
336 index = dpaa2_switch_acl_entry_get_index(block, entry);
338 /* Remove from hardware the ACL entry */
339 err = dpaa2_switch_acl_entry_remove(block, entry);
343 block->num_acl_rules--;
345 /* Remove it from the list also */
346 list_del(&entry->list);
348 /* Move down in priority the entries over the deleted one */
349 precedence = entry->cfg.precedence;
350 for (i = index - 1; i >= 0; i--) {
351 tmp = dpaa2_switch_acl_entry_get_by_index(block, i);
352 err = dpaa2_switch_acl_entry_set_precedence(block, tmp,
365 static int dpaa2_switch_tc_parse_action_acl(struct ethsw_core *ethsw,
366 struct flow_action_entry *cls_act,
367 struct dpsw_acl_result *dpsw_act,
368 struct netlink_ext_ack *extack)
372 switch (cls_act->id) {
373 case FLOW_ACTION_TRAP:
374 dpsw_act->action = DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF;
376 case FLOW_ACTION_REDIRECT:
377 if (!dpaa2_switch_port_dev_check(cls_act->dev)) {
378 NL_SET_ERR_MSG_MOD(extack,
379 "Destination not a DPAA2 switch port");
383 dpsw_act->if_id = dpaa2_switch_get_index(ethsw, cls_act->dev);
384 dpsw_act->action = DPSW_ACL_ACTION_REDIRECT;
386 case FLOW_ACTION_DROP:
387 dpsw_act->action = DPSW_ACL_ACTION_DROP;
390 NL_SET_ERR_MSG_MOD(extack,
391 "Action not supported");
401 dpaa2_switch_block_add_mirror(struct dpaa2_switch_filter_block *block,
402 struct dpaa2_switch_mirror_entry *entry,
403 u16 to, struct netlink_ext_ack *extack)
405 unsigned long block_ports = block->ports;
406 struct ethsw_core *ethsw = block->ethsw;
407 struct ethsw_port_priv *port_priv;
408 unsigned long ports_added = 0;
409 u16 vlan = entry->cfg.vlan_id;
410 bool mirror_port_enabled;
413 /* Setup the mirroring port */
414 mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs);
415 if (!mirror_port_enabled) {
416 err = dpsw_set_reflection_if(ethsw->mc_io, 0,
417 ethsw->dpsw_handle, to);
420 ethsw->mirror_port = to;
423 /* Setup the same egress mirroring configuration on all the switch
424 * ports that share the same filter block.
426 for_each_set_bit(port, &block_ports, ethsw->sw_attr.num_ifs) {
427 port_priv = ethsw->ports[port];
429 /* We cannot add a per VLAN mirroring rule if the VLAN in
430 * question is not installed on the switch port.
432 if (entry->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_VLAN &&
433 !(port_priv->vlans[vlan] & ETHSW_VLAN_MEMBER)) {
434 NL_SET_ERR_MSG(extack,
435 "VLAN must be installed on the switch port");
437 goto err_remove_filters;
440 err = dpsw_if_add_reflection(ethsw->mc_io, 0,
444 goto err_remove_filters;
446 ports_added |= BIT(port);
449 list_add(&entry->list, &block->mirror_entries);
454 for_each_set_bit(port, &ports_added, ethsw->sw_attr.num_ifs) {
455 dpsw_if_remove_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle,
459 if (!mirror_port_enabled)
460 ethsw->mirror_port = ethsw->sw_attr.num_ifs;
466 dpaa2_switch_block_remove_mirror(struct dpaa2_switch_filter_block *block,
467 struct dpaa2_switch_mirror_entry *entry)
469 struct dpsw_reflection_cfg *cfg = &entry->cfg;
470 unsigned long block_ports = block->ports;
471 struct ethsw_core *ethsw = block->ethsw;
474 /* Remove this mirroring configuration from all the ports belonging to
477 for_each_set_bit(port, &block_ports, ethsw->sw_attr.num_ifs)
478 dpsw_if_remove_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle,
481 /* Also remove it from the list of mirror filters */
482 list_del(&entry->list);
485 /* If this was the last mirror filter, then unset the mirror port */
486 if (list_empty(&block->mirror_entries))
487 ethsw->mirror_port = ethsw->sw_attr.num_ifs;
493 dpaa2_switch_cls_flower_replace_acl(struct dpaa2_switch_filter_block *block,
494 struct flow_cls_offload *cls)
496 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
497 struct netlink_ext_ack *extack = cls->common.extack;
498 struct dpaa2_switch_acl_entry *acl_entry;
499 struct ethsw_core *ethsw = block->ethsw;
500 struct flow_action_entry *act;
503 if (dpaa2_switch_acl_tbl_is_full(block)) {
504 NL_SET_ERR_MSG(extack, "Maximum filter capacity reached");
508 acl_entry = kzalloc(sizeof(*acl_entry), GFP_KERNEL);
512 err = dpaa2_switch_flower_parse_key(cls, &acl_entry->key);
516 act = &rule->action.entries[0];
517 err = dpaa2_switch_tc_parse_action_acl(ethsw, act,
518 &acl_entry->cfg.result, extack);
522 acl_entry->prio = cls->common.prio;
523 acl_entry->cookie = cls->cookie;
525 err = dpaa2_switch_acl_tbl_add_entry(block, acl_entry);
537 static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls,
540 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
541 struct flow_dissector *dissector = rule->match.dissector;
542 struct netlink_ext_ack *extack = cls->common.extack;
543 int ret = -EOPNOTSUPP;
545 if (dissector->used_keys &
546 ~(BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
547 BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
548 BIT_ULL(FLOW_DISSECTOR_KEY_VLAN))) {
549 NL_SET_ERR_MSG_MOD(extack,
550 "Mirroring is supported only per VLAN");
554 if (flow_rule_match_has_control_flags(rule, extack))
557 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
558 struct flow_match_vlan match;
560 flow_rule_match_vlan(rule, &match);
562 if (match.mask->vlan_priority != 0 ||
563 match.mask->vlan_dei != 0) {
564 NL_SET_ERR_MSG_MOD(extack,
565 "Only matching on VLAN ID supported");
569 if (match.mask->vlan_id != 0xFFF) {
570 NL_SET_ERR_MSG_MOD(extack,
571 "Masked matching not supported");
575 *vlan = (u16)match.key->vlan_id;
583 dpaa2_switch_cls_flower_replace_mirror(struct dpaa2_switch_filter_block *block,
584 struct flow_cls_offload *cls)
586 struct netlink_ext_ack *extack = cls->common.extack;
587 struct dpaa2_switch_mirror_entry *mirror_entry;
588 struct ethsw_core *ethsw = block->ethsw;
589 struct dpaa2_switch_mirror_entry *tmp;
590 struct flow_action_entry *cls_act;
591 struct list_head *pos, *n;
592 bool mirror_port_enabled;
596 mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs);
597 cls_act = &cls->rule->action.entries[0];
599 /* Offload rules only when the destination is a DPAA2 switch port */
600 if (!dpaa2_switch_port_dev_check(cls_act->dev)) {
601 NL_SET_ERR_MSG_MOD(extack,
602 "Destination not a DPAA2 switch port");
605 if_id = dpaa2_switch_get_index(ethsw, cls_act->dev);
607 /* We have a single mirror port but can configure egress mirroring on
608 * all the other switch ports. We need to allow mirroring rules only
609 * when the destination port is the same.
611 if (mirror_port_enabled && ethsw->mirror_port != if_id) {
612 NL_SET_ERR_MSG_MOD(extack,
613 "Multiple mirror ports not supported");
618 err = dpaa2_switch_flower_parse_mirror_key(cls, &vlan);
622 /* Make sure that we don't already have a mirror rule with the same
625 list_for_each_safe(pos, n, &block->mirror_entries) {
626 tmp = list_entry(pos, struct dpaa2_switch_mirror_entry, list);
628 if (tmp->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_VLAN &&
629 tmp->cfg.vlan_id == vlan) {
630 NL_SET_ERR_MSG_MOD(extack,
631 "VLAN mirror filter already installed");
636 mirror_entry = kzalloc(sizeof(*mirror_entry), GFP_KERNEL);
640 mirror_entry->cfg.filter = DPSW_REFLECTION_FILTER_INGRESS_VLAN;
641 mirror_entry->cfg.vlan_id = vlan;
642 mirror_entry->cookie = cls->cookie;
644 return dpaa2_switch_block_add_mirror(block, mirror_entry, if_id,
648 int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_filter_block *block,
649 struct flow_cls_offload *cls)
651 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
652 struct netlink_ext_ack *extack = cls->common.extack;
653 struct flow_action_entry *act;
655 if (!flow_offload_has_one_action(&rule->action)) {
656 NL_SET_ERR_MSG(extack, "Only singular actions are supported");
660 act = &rule->action.entries[0];
662 case FLOW_ACTION_REDIRECT:
663 case FLOW_ACTION_TRAP:
664 case FLOW_ACTION_DROP:
665 return dpaa2_switch_cls_flower_replace_acl(block, cls);
666 case FLOW_ACTION_MIRRED:
667 return dpaa2_switch_cls_flower_replace_mirror(block, cls);
669 NL_SET_ERR_MSG_MOD(extack, "Action not supported");
674 int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_filter_block *block,
675 struct flow_cls_offload *cls)
677 struct dpaa2_switch_mirror_entry *mirror_entry;
678 struct dpaa2_switch_acl_entry *acl_entry;
680 /* If this filter is a an ACL one, remove it */
681 acl_entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(block,
684 return dpaa2_switch_acl_tbl_remove_entry(block, acl_entry);
686 /* If not, then it has to be a mirror */
687 mirror_entry = dpaa2_switch_mirror_find_entry_by_cookie(block,
690 return dpaa2_switch_block_remove_mirror(block,
697 dpaa2_switch_cls_matchall_replace_acl(struct dpaa2_switch_filter_block *block,
698 struct tc_cls_matchall_offload *cls)
700 struct netlink_ext_ack *extack = cls->common.extack;
701 struct ethsw_core *ethsw = block->ethsw;
702 struct dpaa2_switch_acl_entry *acl_entry;
703 struct flow_action_entry *act;
706 if (dpaa2_switch_acl_tbl_is_full(block)) {
707 NL_SET_ERR_MSG(extack, "Maximum filter capacity reached");
711 acl_entry = kzalloc(sizeof(*acl_entry), GFP_KERNEL);
715 act = &cls->rule->action.entries[0];
716 err = dpaa2_switch_tc_parse_action_acl(ethsw, act,
717 &acl_entry->cfg.result, extack);
721 acl_entry->prio = cls->common.prio;
722 acl_entry->cookie = cls->cookie;
724 err = dpaa2_switch_acl_tbl_add_entry(block, acl_entry);
737 dpaa2_switch_cls_matchall_replace_mirror(struct dpaa2_switch_filter_block *block,
738 struct tc_cls_matchall_offload *cls)
740 struct netlink_ext_ack *extack = cls->common.extack;
741 struct dpaa2_switch_mirror_entry *mirror_entry;
742 struct ethsw_core *ethsw = block->ethsw;
743 struct dpaa2_switch_mirror_entry *tmp;
744 struct flow_action_entry *cls_act;
745 struct list_head *pos, *n;
746 bool mirror_port_enabled;
749 mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs);
750 cls_act = &cls->rule->action.entries[0];
752 /* Offload rules only when the destination is a DPAA2 switch port */
753 if (!dpaa2_switch_port_dev_check(cls_act->dev)) {
754 NL_SET_ERR_MSG_MOD(extack,
755 "Destination not a DPAA2 switch port");
758 if_id = dpaa2_switch_get_index(ethsw, cls_act->dev);
760 /* We have a single mirror port but can configure egress mirroring on
761 * all the other switch ports. We need to allow mirroring rules only
762 * when the destination port is the same.
764 if (mirror_port_enabled && ethsw->mirror_port != if_id) {
765 NL_SET_ERR_MSG_MOD(extack,
766 "Multiple mirror ports not supported");
770 /* Make sure that we don't already have a mirror rule with the same
771 * configuration. One matchall rule per block is the maximum.
773 list_for_each_safe(pos, n, &block->mirror_entries) {
774 tmp = list_entry(pos, struct dpaa2_switch_mirror_entry, list);
776 if (tmp->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_ALL) {
777 NL_SET_ERR_MSG_MOD(extack,
778 "Matchall mirror filter already installed");
783 mirror_entry = kzalloc(sizeof(*mirror_entry), GFP_KERNEL);
787 mirror_entry->cfg.filter = DPSW_REFLECTION_FILTER_INGRESS_ALL;
788 mirror_entry->cookie = cls->cookie;
790 return dpaa2_switch_block_add_mirror(block, mirror_entry, if_id,
794 int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_filter_block *block,
795 struct tc_cls_matchall_offload *cls)
797 struct netlink_ext_ack *extack = cls->common.extack;
798 struct flow_action_entry *act;
800 if (!flow_offload_has_one_action(&cls->rule->action)) {
801 NL_SET_ERR_MSG(extack, "Only singular actions are supported");
805 act = &cls->rule->action.entries[0];
807 case FLOW_ACTION_REDIRECT:
808 case FLOW_ACTION_TRAP:
809 case FLOW_ACTION_DROP:
810 return dpaa2_switch_cls_matchall_replace_acl(block, cls);
811 case FLOW_ACTION_MIRRED:
812 return dpaa2_switch_cls_matchall_replace_mirror(block, cls);
814 NL_SET_ERR_MSG_MOD(extack, "Action not supported");
819 int dpaa2_switch_block_offload_mirror(struct dpaa2_switch_filter_block *block,
820 struct ethsw_port_priv *port_priv)
822 struct ethsw_core *ethsw = port_priv->ethsw_data;
823 struct dpaa2_switch_mirror_entry *tmp;
826 list_for_each_entry(tmp, &block->mirror_entries, list) {
827 err = dpsw_if_add_reflection(ethsw->mc_io, 0,
829 port_priv->idx, &tmp->cfg);
837 list_for_each_entry(tmp, &block->mirror_entries, list)
838 dpsw_if_remove_reflection(ethsw->mc_io, 0,
840 port_priv->idx, &tmp->cfg);
845 int dpaa2_switch_block_unoffload_mirror(struct dpaa2_switch_filter_block *block,
846 struct ethsw_port_priv *port_priv)
848 struct ethsw_core *ethsw = port_priv->ethsw_data;
849 struct dpaa2_switch_mirror_entry *tmp;
852 list_for_each_entry(tmp, &block->mirror_entries, list) {
853 err = dpsw_if_remove_reflection(ethsw->mc_io, 0,
855 port_priv->idx, &tmp->cfg);
863 list_for_each_entry(tmp, &block->mirror_entries, list)
864 dpsw_if_add_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle,
865 port_priv->idx, &tmp->cfg);
870 int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_filter_block *block,
871 struct tc_cls_matchall_offload *cls)
873 struct dpaa2_switch_mirror_entry *mirror_entry;
874 struct dpaa2_switch_acl_entry *acl_entry;
876 /* If this filter is a an ACL one, remove it */
877 acl_entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(block,
880 return dpaa2_switch_acl_tbl_remove_entry(block,
883 /* If not, then it has to be a mirror */
884 mirror_entry = dpaa2_switch_mirror_find_entry_by_cookie(block,
887 return dpaa2_switch_block_remove_mirror(block,