]> Git Repo - linux.git/blob - drivers/net/ethernet/freescale/enetc/enetc_qos.c
Linux 6.14-rc3
[linux.git] / drivers / net / ethernet / freescale / enetc / enetc_qos.c
1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2019 NXP */
3
4 #include "enetc.h"
5
6 #include <net/pkt_sched.h>
7 #include <linux/math64.h>
8 #include <linux/refcount.h>
9 #include <net/pkt_cls.h>
10 #include <net/tc_act/tc_gate.h>
11
12 static u16 enetc_get_max_gcl_len(struct enetc_hw *hw)
13 {
14         return enetc_rd(hw, ENETC_PTGCAPR) & ENETC_PTGCAPR_MAX_GCL_LEN_MASK;
15 }
16
17 void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed)
18 {
19         struct enetc_hw *hw = &priv->si->hw;
20         u32 old_speed = priv->speed;
21         u32 pspeed, tmp;
22
23         if (speed == old_speed)
24                 return;
25
26         switch (speed) {
27         case SPEED_1000:
28                 pspeed = ENETC_PMR_PSPEED_1000M;
29                 break;
30         case SPEED_2500:
31                 pspeed = ENETC_PMR_PSPEED_2500M;
32                 break;
33         case SPEED_100:
34                 pspeed = ENETC_PMR_PSPEED_100M;
35                 break;
36         case SPEED_10:
37         default:
38                 pspeed = ENETC_PMR_PSPEED_10M;
39         }
40
41         priv->speed = speed;
42         tmp = enetc_port_rd(hw, ENETC_PMR);
43         enetc_port_wr(hw, ENETC_PMR, (tmp & ~ENETC_PMR_PSPEED_MASK) | pspeed);
44 }
45
46 static int enetc_setup_taprio(struct enetc_ndev_priv *priv,
47                               struct tc_taprio_qopt_offload *admin_conf)
48 {
49         struct enetc_hw *hw = &priv->si->hw;
50         struct enetc_cbd cbd = {.cmd = 0};
51         struct tgs_gcl_conf *gcl_config;
52         struct tgs_gcl_data *gcl_data;
53         dma_addr_t dma;
54         struct gce *gce;
55         u16 data_size;
56         u16 gcl_len;
57         void *tmp;
58         u32 tge;
59         int err;
60         int i;
61
62         /* TSD and Qbv are mutually exclusive in hardware */
63         for (i = 0; i < priv->num_tx_rings; i++)
64                 if (priv->tx_ring[i]->tsd_enable)
65                         return -EBUSY;
66
67         if (admin_conf->num_entries > enetc_get_max_gcl_len(hw))
68                 return -EINVAL;
69
70         if (admin_conf->cycle_time > U32_MAX ||
71             admin_conf->cycle_time_extension > U32_MAX)
72                 return -EINVAL;
73
74         /* Configure the (administrative) gate control list using the
75          * control BD descriptor.
76          */
77         gcl_config = &cbd.gcl_conf;
78         gcl_len = admin_conf->num_entries;
79
80         data_size = struct_size(gcl_data, entry, gcl_len);
81         tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size,
82                                        &dma, (void *)&gcl_data);
83         if (!tmp)
84                 return -ENOMEM;
85
86         gce = (struct gce *)(gcl_data + 1);
87
88         /* Set all gates open as default */
89         gcl_config->atc = 0xff;
90         gcl_config->acl_len = cpu_to_le16(gcl_len);
91
92         gcl_data->btl = cpu_to_le32(lower_32_bits(admin_conf->base_time));
93         gcl_data->bth = cpu_to_le32(upper_32_bits(admin_conf->base_time));
94         gcl_data->ct = cpu_to_le32(admin_conf->cycle_time);
95         gcl_data->cte = cpu_to_le32(admin_conf->cycle_time_extension);
96
97         for (i = 0; i < gcl_len; i++) {
98                 struct tc_taprio_sched_entry *temp_entry;
99                 struct gce *temp_gce = gce + i;
100
101                 temp_entry = &admin_conf->entries[i];
102
103                 temp_gce->gate = (u8)temp_entry->gate_mask;
104                 temp_gce->period = cpu_to_le32(temp_entry->interval);
105         }
106
107         cbd.status_flags = 0;
108
109         cbd.cls = BDCR_CMD_PORT_GCL;
110         cbd.status_flags = 0;
111
112         tge = enetc_rd(hw, ENETC_PTGCR);
113         enetc_wr(hw, ENETC_PTGCR, tge | ENETC_PTGCR_TGE);
114
115         err = enetc_send_cmd(priv->si, &cbd);
116         if (err)
117                 enetc_wr(hw, ENETC_PTGCR, tge & ~ENETC_PTGCR_TGE);
118
119         enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma);
120
121         if (err)
122                 return err;
123
124         enetc_set_ptcmsdur(hw, admin_conf->max_sdu);
125         priv->active_offloads |= ENETC_F_QBV;
126
127         return 0;
128 }
129
130 static void enetc_reset_taprio_stats(struct enetc_ndev_priv *priv)
131 {
132         int i;
133
134         for (i = 0; i < priv->num_tx_rings; i++)
135                 priv->tx_ring[i]->stats.win_drop = 0;
136 }
137
138 static void enetc_reset_taprio(struct enetc_ndev_priv *priv)
139 {
140         struct enetc_hw *hw = &priv->si->hw;
141         u32 val;
142
143         val = enetc_rd(hw, ENETC_PTGCR);
144         enetc_wr(hw, ENETC_PTGCR, val & ~ENETC_PTGCR_TGE);
145         enetc_reset_ptcmsdur(hw);
146
147         priv->active_offloads &= ~ENETC_F_QBV;
148 }
149
150 static void enetc_taprio_destroy(struct net_device *ndev)
151 {
152         struct enetc_ndev_priv *priv = netdev_priv(ndev);
153
154         enetc_reset_taprio(priv);
155         enetc_reset_tc_mqprio(ndev);
156         enetc_reset_taprio_stats(priv);
157 }
158
159 static void enetc_taprio_stats(struct net_device *ndev,
160                                struct tc_taprio_qopt_stats *stats)
161 {
162         struct enetc_ndev_priv *priv = netdev_priv(ndev);
163         u64 window_drops = 0;
164         int i;
165
166         for (i = 0; i < priv->num_tx_rings; i++)
167                 window_drops += priv->tx_ring[i]->stats.win_drop;
168
169         stats->window_drops = window_drops;
170 }
171
172 static void enetc_taprio_queue_stats(struct net_device *ndev,
173                                      struct tc_taprio_qopt_queue_stats *queue_stats)
174 {
175         struct tc_taprio_qopt_stats *stats = &queue_stats->stats;
176         struct enetc_ndev_priv *priv = netdev_priv(ndev);
177         int queue = queue_stats->queue;
178
179         stats->window_drops = priv->tx_ring[queue]->stats.win_drop;
180 }
181
182 static int enetc_taprio_replace(struct net_device *ndev,
183                                 struct tc_taprio_qopt_offload *offload)
184 {
185         struct enetc_ndev_priv *priv = netdev_priv(ndev);
186         int err;
187
188         err = enetc_setup_tc_mqprio(ndev, &offload->mqprio);
189         if (err)
190                 return err;
191
192         err = enetc_setup_taprio(priv, offload);
193         if (err)
194                 enetc_reset_tc_mqprio(ndev);
195
196         return err;
197 }
198
199 int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data)
200 {
201         struct tc_taprio_qopt_offload *offload = type_data;
202         int err = 0;
203
204         switch (offload->cmd) {
205         case TAPRIO_CMD_REPLACE:
206                 err = enetc_taprio_replace(ndev, offload);
207                 break;
208         case TAPRIO_CMD_DESTROY:
209                 enetc_taprio_destroy(ndev);
210                 break;
211         case TAPRIO_CMD_STATS:
212                 enetc_taprio_stats(ndev, &offload->stats);
213                 break;
214         case TAPRIO_CMD_QUEUE_STATS:
215                 enetc_taprio_queue_stats(ndev, &offload->queue_stats);
216                 break;
217         default:
218                 err = -EOPNOTSUPP;
219         }
220
221         return err;
222 }
223
224 static u32 enetc_get_cbs_enable(struct enetc_hw *hw, u8 tc)
225 {
226         return enetc_port_rd(hw, ENETC_PTCCBSR0(tc)) & ENETC_CBSE;
227 }
228
229 static u8 enetc_get_cbs_bw(struct enetc_hw *hw, u8 tc)
230 {
231         return enetc_port_rd(hw, ENETC_PTCCBSR0(tc)) & ENETC_CBS_BW_MASK;
232 }
233
234 int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
235 {
236         struct enetc_ndev_priv *priv = netdev_priv(ndev);
237         struct tc_cbs_qopt_offload *cbs = type_data;
238         u32 port_transmit_rate = priv->speed;
239         u8 tc_nums = netdev_get_num_tc(ndev);
240         struct enetc_hw *hw = &priv->si->hw;
241         u32 hi_credit_bit, hi_credit_reg;
242         u32 max_interference_size;
243         u32 port_frame_max_size;
244         u8 tc = cbs->queue;
245         u8 prio_top, prio_next;
246         int bw_sum = 0;
247         u8 bw;
248
249         prio_top = tc_nums - 1;
250         prio_next = tc_nums - 2;
251
252         /* Support highest prio and second prio tc in cbs mode */
253         if (tc != prio_top && tc != prio_next)
254                 return -EOPNOTSUPP;
255
256         if (!cbs->enable) {
257                 /* Make sure the other TC that are numerically
258                  * lower than this TC have been disabled.
259                  */
260                 if (tc == prio_top &&
261                     enetc_get_cbs_enable(hw, prio_next)) {
262                         dev_err(&ndev->dev,
263                                 "Disable TC%d before disable TC%d\n",
264                                 prio_next, tc);
265                         return -EINVAL;
266                 }
267
268                 enetc_port_wr(hw, ENETC_PTCCBSR1(tc), 0);
269                 enetc_port_wr(hw, ENETC_PTCCBSR0(tc), 0);
270
271                 return 0;
272         }
273
274         if (cbs->idleslope - cbs->sendslope != port_transmit_rate * 1000L ||
275             cbs->idleslope < 0 || cbs->sendslope > 0)
276                 return -EOPNOTSUPP;
277
278         port_frame_max_size = ndev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
279
280         bw = cbs->idleslope / (port_transmit_rate * 10UL);
281
282         /* Make sure the other TC that are numerically
283          * higher than this TC have been enabled.
284          */
285         if (tc == prio_next) {
286                 if (!enetc_get_cbs_enable(hw, prio_top)) {
287                         dev_err(&ndev->dev,
288                                 "Enable TC%d first before enable TC%d\n",
289                                 prio_top, prio_next);
290                         return -EINVAL;
291                 }
292                 bw_sum += enetc_get_cbs_bw(hw, prio_top);
293         }
294
295         if (bw_sum + bw >= 100) {
296                 dev_err(&ndev->dev,
297                         "The sum of all CBS Bandwidth can't exceed 100\n");
298                 return -EINVAL;
299         }
300
301         enetc_port_rd(hw, ENETC_PTCMSDUR(tc));
302
303         /* For top prio TC, the max_interfrence_size is maxSizedFrame.
304          *
305          * For next prio TC, the max_interfrence_size is calculated as below:
306          *
307          *      max_interference_size = M0 + Ma + Ra * M0 / (R0 - Ra)
308          *
309          *      - RA: idleSlope for AVB Class A
310          *      - R0: port transmit rate
311          *      - M0: maximum sized frame for the port
312          *      - MA: maximum sized frame for AVB Class A
313          */
314
315         if (tc == prio_top) {
316                 max_interference_size = port_frame_max_size * 8;
317         } else {
318                 u32 m0, ma, r0, ra;
319
320                 m0 = port_frame_max_size * 8;
321                 ma = enetc_port_rd(hw, ENETC_PTCMSDUR(prio_top)) * 8;
322                 ra = enetc_get_cbs_bw(hw, prio_top) *
323                         port_transmit_rate * 10000ULL;
324                 r0 = port_transmit_rate * 1000000ULL;
325                 max_interference_size = m0 + ma +
326                         (u32)div_u64((u64)ra * m0, r0 - ra);
327         }
328
329         /* hiCredit bits calculate by:
330          *
331          * maxSizedFrame * (idleSlope/portTxRate)
332          */
333         hi_credit_bit = max_interference_size * bw / 100;
334
335         /* hiCredit bits to hiCredit register need to calculated as:
336          *
337          * (enetClockFrequency / portTransmitRate) * 100
338          */
339         hi_credit_reg = (u32)div_u64((priv->sysclk_freq * 100ULL) * hi_credit_bit,
340                                      port_transmit_rate * 1000000ULL);
341
342         enetc_port_wr(hw, ENETC_PTCCBSR1(tc), hi_credit_reg);
343
344         /* Set bw register and enable this traffic class */
345         enetc_port_wr(hw, ENETC_PTCCBSR0(tc), bw | ENETC_CBSE);
346
347         return 0;
348 }
349
350 int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data)
351 {
352         struct enetc_ndev_priv *priv = netdev_priv(ndev);
353         struct tc_etf_qopt_offload *qopt = type_data;
354         u8 tc_nums = netdev_get_num_tc(ndev);
355         struct enetc_hw *hw = &priv->si->hw;
356         int tc;
357
358         if (!tc_nums)
359                 return -EOPNOTSUPP;
360
361         tc = qopt->queue;
362
363         if (tc < 0 || tc >= priv->num_tx_rings)
364                 return -EINVAL;
365
366         /* TSD and Qbv are mutually exclusive in hardware */
367         if (enetc_rd(hw, ENETC_PTGCR) & ENETC_PTGCR_TGE)
368                 return -EBUSY;
369
370         priv->tx_ring[tc]->tsd_enable = qopt->enable;
371         enetc_port_wr(hw, ENETC_PTCTSDR(tc), qopt->enable ? ENETC_TSDE : 0);
372
373         return 0;
374 }
375
376 enum streamid_type {
377         STREAMID_TYPE_RESERVED = 0,
378         STREAMID_TYPE_NULL,
379         STREAMID_TYPE_SMAC,
380 };
381
382 enum streamid_vlan_tagged {
383         STREAMID_VLAN_RESERVED = 0,
384         STREAMID_VLAN_TAGGED,
385         STREAMID_VLAN_UNTAGGED,
386         STREAMID_VLAN_ALL,
387 };
388
389 #define ENETC_PSFP_WILDCARD -1
390 #define HANDLE_OFFSET 100
391
392 enum forward_type {
393         FILTER_ACTION_TYPE_PSFP = BIT(0),
394         FILTER_ACTION_TYPE_ACL = BIT(1),
395         FILTER_ACTION_TYPE_BOTH = GENMASK(1, 0),
396 };
397
398 /* This is for limit output type for input actions */
399 struct actions_fwd {
400         u64 actions;
401         u64 keys;       /* include the must needed keys */
402         enum forward_type output;
403 };
404
405 struct psfp_streamfilter_counters {
406         u64 matching_frames_count;
407         u64 passing_frames_count;
408         u64 not_passing_frames_count;
409         u64 passing_sdu_count;
410         u64 not_passing_sdu_count;
411         u64 red_frames_count;
412 };
413
414 struct enetc_streamid {
415         u32 index;
416         union {
417                 u8 src_mac[6];
418                 u8 dst_mac[6];
419         };
420         u8 filtertype;
421         u16 vid;
422         u8 tagged;
423         s32 handle;
424 };
425
426 struct enetc_psfp_filter {
427         u32 index;
428         s32 handle;
429         s8 prio;
430         u32 maxsdu;
431         u32 gate_id;
432         s32 meter_id;
433         refcount_t refcount;
434         struct hlist_node node;
435 };
436
437 struct enetc_psfp_gate {
438         u32 index;
439         s8 init_ipv;
440         u64 basetime;
441         u64 cycletime;
442         u64 cycletimext;
443         u32 num_entries;
444         refcount_t refcount;
445         struct hlist_node node;
446         struct action_gate_entry entries[] __counted_by(num_entries);
447 };
448
449 /* Only enable the green color frame now
450  * Will add eir and ebs color blind, couple flag etc when
451  * policing action add more offloading parameters
452  */
453 struct enetc_psfp_meter {
454         u32 index;
455         u32 cir;
456         u32 cbs;
457         refcount_t refcount;
458         struct hlist_node node;
459 };
460
461 #define ENETC_PSFP_FLAGS_FMI BIT(0)
462
463 struct enetc_stream_filter {
464         struct enetc_streamid sid;
465         u32 sfi_index;
466         u32 sgi_index;
467         u32 flags;
468         u32 fmi_index;
469         struct flow_stats stats;
470         struct hlist_node node;
471 };
472
473 struct enetc_psfp {
474         unsigned long dev_bitmap;
475         unsigned long *psfp_sfi_bitmap;
476         struct hlist_head stream_list;
477         struct hlist_head psfp_filter_list;
478         struct hlist_head psfp_gate_list;
479         struct hlist_head psfp_meter_list;
480         spinlock_t psfp_lock; /* spinlock for the struct enetc_psfp r/w */
481 };
482
483 static struct actions_fwd enetc_act_fwd[] = {
484         {
485                 BIT(FLOW_ACTION_GATE),
486                 BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS),
487                 FILTER_ACTION_TYPE_PSFP
488         },
489         {
490                 BIT(FLOW_ACTION_POLICE) |
491                 BIT(FLOW_ACTION_GATE),
492                 BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS),
493                 FILTER_ACTION_TYPE_PSFP
494         },
495         /* example for ACL actions */
496         {
497                 BIT(FLOW_ACTION_DROP),
498                 0,
499                 FILTER_ACTION_TYPE_ACL
500         }
501 };
502
503 static struct enetc_psfp epsfp = {
504         .dev_bitmap = 0,
505         .psfp_sfi_bitmap = NULL,
506 };
507
508 static LIST_HEAD(enetc_block_cb_list);
509
510 /* Stream Identity Entry Set Descriptor */
511 static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
512                                  struct enetc_streamid *sid,
513                                  u8 enable)
514 {
515         struct enetc_cbd cbd = {.cmd = 0};
516         struct streamid_data *si_data;
517         struct streamid_conf *si_conf;
518         dma_addr_t dma;
519         u16 data_size;
520         void *tmp;
521         int port;
522         int err;
523
524         port = enetc_pf_to_port(priv->si->pdev);
525         if (port < 0)
526                 return -EINVAL;
527
528         if (sid->index >= priv->psfp_cap.max_streamid)
529                 return -EINVAL;
530
531         if (sid->filtertype != STREAMID_TYPE_NULL &&
532             sid->filtertype != STREAMID_TYPE_SMAC)
533                 return -EOPNOTSUPP;
534
535         /* Disable operation before enable */
536         cbd.index = cpu_to_le16((u16)sid->index);
537         cbd.cls = BDCR_CMD_STREAM_IDENTIFY;
538         cbd.status_flags = 0;
539
540         data_size = sizeof(struct streamid_data);
541         tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size,
542                                        &dma, (void *)&si_data);
543         if (!tmp)
544                 return -ENOMEM;
545
546         eth_broadcast_addr(si_data->dmac);
547         si_data->vid_vidm_tg = (ENETC_CBDR_SID_VID_MASK
548                                + ((0x3 << 14) | ENETC_CBDR_SID_VIDM));
549
550         si_conf = &cbd.sid_set;
551         /* Only one port supported for one entry, set itself */
552         si_conf->iports = cpu_to_le32(1 << port);
553         si_conf->id_type = 1;
554         si_conf->oui[2] = 0x0;
555         si_conf->oui[1] = 0x80;
556         si_conf->oui[0] = 0xC2;
557
558         err = enetc_send_cmd(priv->si, &cbd);
559         if (err)
560                 goto out;
561
562         if (!enable)
563                 goto out;
564
565         /* Enable the entry overwrite again incase space flushed by hardware */
566         cbd.status_flags = 0;
567
568         si_conf->en = 0x80;
569         si_conf->stream_handle = cpu_to_le32(sid->handle);
570         si_conf->iports = cpu_to_le32(1 << port);
571         si_conf->id_type = sid->filtertype;
572         si_conf->oui[2] = 0x0;
573         si_conf->oui[1] = 0x80;
574         si_conf->oui[0] = 0xC2;
575
576         memset(si_data, 0, data_size);
577
578         /* VIDM default to be 1.
579          * VID Match. If set (b1) then the VID must match, otherwise
580          * any VID is considered a match. VIDM setting is only used
581          * when TG is set to b01.
582          */
583         if (si_conf->id_type == STREAMID_TYPE_NULL) {
584                 ether_addr_copy(si_data->dmac, sid->dst_mac);
585                 si_data->vid_vidm_tg = (sid->vid & ENETC_CBDR_SID_VID_MASK) +
586                                        ((((u16)(sid->tagged) & 0x3) << 14)
587                                        | ENETC_CBDR_SID_VIDM);
588         } else if (si_conf->id_type == STREAMID_TYPE_SMAC) {
589                 ether_addr_copy(si_data->smac, sid->src_mac);
590                 si_data->vid_vidm_tg = (sid->vid & ENETC_CBDR_SID_VID_MASK) +
591                                        ((((u16)(sid->tagged) & 0x3) << 14)
592                                        | ENETC_CBDR_SID_VIDM);
593         }
594
595         err = enetc_send_cmd(priv->si, &cbd);
596 out:
597         enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma);
598
599         return err;
600 }
601
602 /* Stream Filter Instance Set Descriptor */
603 static int enetc_streamfilter_hw_set(struct enetc_ndev_priv *priv,
604                                      struct enetc_psfp_filter *sfi,
605                                      u8 enable)
606 {
607         struct enetc_cbd cbd = {.cmd = 0};
608         struct sfi_conf *sfi_config;
609         int port;
610
611         port = enetc_pf_to_port(priv->si->pdev);
612         if (port < 0)
613                 return -EINVAL;
614
615         cbd.index = cpu_to_le16(sfi->index);
616         cbd.cls = BDCR_CMD_STREAM_FILTER;
617         cbd.status_flags = 0x80;
618         cbd.length = cpu_to_le16(1);
619
620         sfi_config = &cbd.sfi_conf;
621         if (!enable)
622                 goto exit;
623
624         sfi_config->en = 0x80;
625
626         if (sfi->handle >= 0) {
627                 sfi_config->stream_handle =
628                         cpu_to_le32(sfi->handle);
629                 sfi_config->sthm |= 0x80;
630         }
631
632         sfi_config->sg_inst_table_index = cpu_to_le16(sfi->gate_id);
633         sfi_config->input_ports = cpu_to_le32(1 << port);
634
635         /* The priority value which may be matched against the
636          * frame’s priority value to determine a match for this entry.
637          */
638         if (sfi->prio >= 0)
639                 sfi_config->multi |= (sfi->prio & 0x7) | 0x8;
640
641         /* Filter Type. Identifies the contents of the MSDU/FM_INST_INDEX
642          * field as being either an MSDU value or an index into the Flow
643          * Meter Instance table.
644          */
645         if (sfi->maxsdu) {
646                 sfi_config->msdu =
647                 cpu_to_le16(sfi->maxsdu);
648                 sfi_config->multi |= 0x40;
649         }
650
651         if (sfi->meter_id >= 0) {
652                 sfi_config->fm_inst_table_index = cpu_to_le16(sfi->meter_id);
653                 sfi_config->multi |= 0x80;
654         }
655
656 exit:
657         return enetc_send_cmd(priv->si, &cbd);
658 }
659
660 static int enetc_streamcounter_hw_get(struct enetc_ndev_priv *priv,
661                                       u32 index,
662                                       struct psfp_streamfilter_counters *cnt)
663 {
664         struct enetc_cbd cbd = { .cmd = 2 };
665         struct sfi_counter_data *data_buf;
666         dma_addr_t dma;
667         u16 data_size;
668         void *tmp;
669         int err;
670
671         cbd.index = cpu_to_le16((u16)index);
672         cbd.cmd = 2;
673         cbd.cls = BDCR_CMD_STREAM_FILTER;
674         cbd.status_flags = 0;
675
676         data_size = sizeof(struct sfi_counter_data);
677
678         tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size,
679                                        &dma, (void *)&data_buf);
680         if (!tmp)
681                 return -ENOMEM;
682
683         err = enetc_send_cmd(priv->si, &cbd);
684         if (err)
685                 goto exit;
686
687         cnt->matching_frames_count = ((u64)data_buf->matchh << 32) +
688                                      data_buf->matchl;
689
690         cnt->not_passing_sdu_count = ((u64)data_buf->msdu_droph << 32) +
691                                      data_buf->msdu_dropl;
692
693         cnt->passing_sdu_count = cnt->matching_frames_count
694                                 - cnt->not_passing_sdu_count;
695
696         cnt->not_passing_frames_count =
697                                 ((u64)data_buf->stream_gate_droph << 32) +
698                                 data_buf->stream_gate_dropl;
699
700         cnt->passing_frames_count = cnt->matching_frames_count -
701                                     cnt->not_passing_sdu_count -
702                                     cnt->not_passing_frames_count;
703
704         cnt->red_frames_count = ((u64)data_buf->flow_meter_droph << 32) +
705                                 data_buf->flow_meter_dropl;
706
707 exit:
708         enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma);
709
710         return err;
711 }
712
713 static u64 get_ptp_now(struct enetc_hw *hw)
714 {
715         u64 now_lo, now_hi, now;
716
717         now_lo = enetc_rd(hw, ENETC_SICTR0);
718         now_hi = enetc_rd(hw, ENETC_SICTR1);
719         now = now_lo | now_hi << 32;
720
721         return now;
722 }
723
724 static int get_start_ns(u64 now, u64 cycle, u64 *start)
725 {
726         u64 n;
727
728         if (!cycle)
729                 return -EFAULT;
730
731         n = div64_u64(now, cycle);
732
733         *start = (n + 1) * cycle;
734
735         return 0;
736 }
737
738 /* Stream Gate Instance Set Descriptor */
739 static int enetc_streamgate_hw_set(struct enetc_ndev_priv *priv,
740                                    struct enetc_psfp_gate *sgi,
741                                    u8 enable)
742 {
743         struct enetc_cbd cbd = { .cmd = 0 };
744         struct sgi_table *sgi_config;
745         struct sgcl_conf *sgcl_config;
746         struct sgcl_data *sgcl_data;
747         struct sgce *sgce;
748         dma_addr_t dma;
749         u16 data_size;
750         int err, i;
751         void *tmp;
752         u64 now;
753
754         cbd.index = cpu_to_le16(sgi->index);
755         cbd.cmd = 0;
756         cbd.cls = BDCR_CMD_STREAM_GCL;
757         cbd.status_flags = 0x80;
758
759         /* disable */
760         if (!enable)
761                 return enetc_send_cmd(priv->si, &cbd);
762
763         if (!sgi->num_entries)
764                 return 0;
765
766         if (sgi->num_entries > priv->psfp_cap.max_psfp_gatelist ||
767             !sgi->cycletime)
768                 return -EINVAL;
769
770         /* enable */
771         sgi_config = &cbd.sgi_table;
772
773         /* Keep open before gate list start */
774         sgi_config->ocgtst = 0x80;
775
776         sgi_config->oipv = (sgi->init_ipv < 0) ?
777                                 0x0 : ((sgi->init_ipv & 0x7) | 0x8);
778
779         sgi_config->en = 0x80;
780
781         /* Basic config */
782         err = enetc_send_cmd(priv->si, &cbd);
783         if (err)
784                 return -EINVAL;
785
786         memset(&cbd, 0, sizeof(cbd));
787
788         cbd.index = cpu_to_le16(sgi->index);
789         cbd.cmd = 1;
790         cbd.cls = BDCR_CMD_STREAM_GCL;
791         cbd.status_flags = 0;
792
793         sgcl_config = &cbd.sgcl_conf;
794
795         sgcl_config->acl_len = (sgi->num_entries - 1) & 0x3;
796
797         data_size = struct_size(sgcl_data, sgcl, sgi->num_entries);
798         tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size,
799                                        &dma, (void *)&sgcl_data);
800         if (!tmp)
801                 return -ENOMEM;
802
803         sgce = &sgcl_data->sgcl[0];
804
805         sgcl_config->agtst = 0x80;
806
807         sgcl_data->ct = sgi->cycletime;
808         sgcl_data->cte = sgi->cycletimext;
809
810         if (sgi->init_ipv >= 0)
811                 sgcl_config->aipv = (sgi->init_ipv & 0x7) | 0x8;
812
813         for (i = 0; i < sgi->num_entries; i++) {
814                 struct action_gate_entry *from = &sgi->entries[i];
815                 struct sgce *to = &sgce[i];
816
817                 if (from->gate_state)
818                         to->multi |= 0x10;
819
820                 if (from->ipv >= 0)
821                         to->multi |= ((from->ipv & 0x7) << 5) | 0x08;
822
823                 if (from->maxoctets >= 0) {
824                         to->multi |= 0x01;
825                         to->msdu[0] = from->maxoctets & 0xFF;
826                         to->msdu[1] = (from->maxoctets >> 8) & 0xFF;
827                         to->msdu[2] = (from->maxoctets >> 16) & 0xFF;
828                 }
829
830                 to->interval = from->interval;
831         }
832
833         /* If basetime is less than now, calculate start time */
834         now = get_ptp_now(&priv->si->hw);
835
836         if (sgi->basetime < now) {
837                 u64 start;
838
839                 err = get_start_ns(now, sgi->cycletime, &start);
840                 if (err)
841                         goto exit;
842                 sgcl_data->btl = lower_32_bits(start);
843                 sgcl_data->bth = upper_32_bits(start);
844         } else {
845                 u32 hi, lo;
846
847                 hi = upper_32_bits(sgi->basetime);
848                 lo = lower_32_bits(sgi->basetime);
849                 sgcl_data->bth = hi;
850                 sgcl_data->btl = lo;
851         }
852
853         err = enetc_send_cmd(priv->si, &cbd);
854
855 exit:
856         enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma);
857         return err;
858 }
859
860 static int enetc_flowmeter_hw_set(struct enetc_ndev_priv *priv,
861                                   struct enetc_psfp_meter *fmi,
862                                   u8 enable)
863 {
864         struct enetc_cbd cbd = { .cmd = 0 };
865         struct fmi_conf *fmi_config;
866         u64 temp = 0;
867
868         cbd.index = cpu_to_le16((u16)fmi->index);
869         cbd.cls = BDCR_CMD_FLOW_METER;
870         cbd.status_flags = 0x80;
871
872         if (!enable)
873                 return enetc_send_cmd(priv->si, &cbd);
874
875         fmi_config = &cbd.fmi_conf;
876         fmi_config->en = 0x80;
877
878         if (fmi->cir) {
879                 temp = (u64)8000 * fmi->cir;
880                 temp = div_u64(temp, 3725);
881         }
882
883         fmi_config->cir = cpu_to_le32((u32)temp);
884         fmi_config->cbs = cpu_to_le32(fmi->cbs);
885
886         /* Default for eir ebs disable */
887         fmi_config->eir = 0;
888         fmi_config->ebs = 0;
889
890         /* Default:
891          * mark red disable
892          * drop on yellow disable
893          * color mode disable
894          * couple flag disable
895          */
896         fmi_config->conf = 0;
897
898         return enetc_send_cmd(priv->si, &cbd);
899 }
900
901 static struct enetc_stream_filter *enetc_get_stream_by_index(u32 index)
902 {
903         struct enetc_stream_filter *f;
904
905         hlist_for_each_entry(f, &epsfp.stream_list, node)
906                 if (f->sid.index == index)
907                         return f;
908
909         return NULL;
910 }
911
912 static struct enetc_psfp_gate *enetc_get_gate_by_index(u32 index)
913 {
914         struct enetc_psfp_gate *g;
915
916         hlist_for_each_entry(g, &epsfp.psfp_gate_list, node)
917                 if (g->index == index)
918                         return g;
919
920         return NULL;
921 }
922
923 static struct enetc_psfp_filter *enetc_get_filter_by_index(u32 index)
924 {
925         struct enetc_psfp_filter *s;
926
927         hlist_for_each_entry(s, &epsfp.psfp_filter_list, node)
928                 if (s->index == index)
929                         return s;
930
931         return NULL;
932 }
933
934 static struct enetc_psfp_meter *enetc_get_meter_by_index(u32 index)
935 {
936         struct enetc_psfp_meter *m;
937
938         hlist_for_each_entry(m, &epsfp.psfp_meter_list, node)
939                 if (m->index == index)
940                         return m;
941
942         return NULL;
943 }
944
945 static struct enetc_psfp_filter
946         *enetc_psfp_check_sfi(struct enetc_psfp_filter *sfi)
947 {
948         struct enetc_psfp_filter *s;
949
950         hlist_for_each_entry(s, &epsfp.psfp_filter_list, node)
951                 if (s->gate_id == sfi->gate_id &&
952                     s->prio == sfi->prio &&
953                     s->maxsdu == sfi->maxsdu &&
954                     s->meter_id == sfi->meter_id)
955                         return s;
956
957         return NULL;
958 }
959
960 static int enetc_get_free_index(struct enetc_ndev_priv *priv)
961 {
962         u32 max_size = priv->psfp_cap.max_psfp_filter;
963         unsigned long index;
964
965         index = find_first_zero_bit(epsfp.psfp_sfi_bitmap, max_size);
966         if (index == max_size)
967                 return -1;
968
969         return index;
970 }
971
972 static void stream_filter_unref(struct enetc_ndev_priv *priv, u32 index)
973 {
974         struct enetc_psfp_filter *sfi;
975         u8 z;
976
977         sfi = enetc_get_filter_by_index(index);
978         WARN_ON(!sfi);
979         z = refcount_dec_and_test(&sfi->refcount);
980
981         if (z) {
982                 enetc_streamfilter_hw_set(priv, sfi, false);
983                 hlist_del(&sfi->node);
984                 kfree(sfi);
985                 clear_bit(index, epsfp.psfp_sfi_bitmap);
986         }
987 }
988
989 static void stream_gate_unref(struct enetc_ndev_priv *priv, u32 index)
990 {
991         struct enetc_psfp_gate *sgi;
992         u8 z;
993
994         sgi = enetc_get_gate_by_index(index);
995         WARN_ON(!sgi);
996         z = refcount_dec_and_test(&sgi->refcount);
997         if (z) {
998                 enetc_streamgate_hw_set(priv, sgi, false);
999                 hlist_del(&sgi->node);
1000                 kfree(sgi);
1001         }
1002 }
1003
1004 static void flow_meter_unref(struct enetc_ndev_priv *priv, u32 index)
1005 {
1006         struct enetc_psfp_meter *fmi;
1007         u8 z;
1008
1009         fmi = enetc_get_meter_by_index(index);
1010         WARN_ON(!fmi);
1011         z = refcount_dec_and_test(&fmi->refcount);
1012         if (z) {
1013                 enetc_flowmeter_hw_set(priv, fmi, false);
1014                 hlist_del(&fmi->node);
1015                 kfree(fmi);
1016         }
1017 }
1018
1019 static void remove_one_chain(struct enetc_ndev_priv *priv,
1020                              struct enetc_stream_filter *filter)
1021 {
1022         if (filter->flags & ENETC_PSFP_FLAGS_FMI)
1023                 flow_meter_unref(priv, filter->fmi_index);
1024
1025         stream_gate_unref(priv, filter->sgi_index);
1026         stream_filter_unref(priv, filter->sfi_index);
1027
1028         hlist_del(&filter->node);
1029         kfree(filter);
1030 }
1031
1032 static int enetc_psfp_hw_set(struct enetc_ndev_priv *priv,
1033                              struct enetc_streamid *sid,
1034                              struct enetc_psfp_filter *sfi,
1035                              struct enetc_psfp_gate *sgi,
1036                              struct enetc_psfp_meter *fmi)
1037 {
1038         int err;
1039
1040         err = enetc_streamid_hw_set(priv, sid, true);
1041         if (err)
1042                 return err;
1043
1044         if (sfi) {
1045                 err = enetc_streamfilter_hw_set(priv, sfi, true);
1046                 if (err)
1047                         goto revert_sid;
1048         }
1049
1050         err = enetc_streamgate_hw_set(priv, sgi, true);
1051         if (err)
1052                 goto revert_sfi;
1053
1054         if (fmi) {
1055                 err = enetc_flowmeter_hw_set(priv, fmi, true);
1056                 if (err)
1057                         goto revert_sgi;
1058         }
1059
1060         return 0;
1061
1062 revert_sgi:
1063         enetc_streamgate_hw_set(priv, sgi, false);
1064 revert_sfi:
1065         if (sfi)
1066                 enetc_streamfilter_hw_set(priv, sfi, false);
1067 revert_sid:
1068         enetc_streamid_hw_set(priv, sid, false);
1069         return err;
1070 }
1071
1072 static struct actions_fwd *
1073 enetc_check_flow_actions(u64 acts, unsigned long long inputkeys)
1074 {
1075         int i;
1076
1077         for (i = 0; i < ARRAY_SIZE(enetc_act_fwd); i++)
1078                 if (acts == enetc_act_fwd[i].actions &&
1079                     inputkeys & enetc_act_fwd[i].keys)
1080                         return &enetc_act_fwd[i];
1081
1082         return NULL;
1083 }
1084
1085 static int enetc_psfp_policer_validate(const struct flow_action *action,
1086                                        const struct flow_action_entry *act,
1087                                        struct netlink_ext_ack *extack)
1088 {
1089         if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
1090                 NL_SET_ERR_MSG_MOD(extack,
1091                                    "Offload not supported when exceed action is not drop");
1092                 return -EOPNOTSUPP;
1093         }
1094
1095         if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
1096             act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
1097                 NL_SET_ERR_MSG_MOD(extack,
1098                                    "Offload not supported when conform action is not pipe or ok");
1099                 return -EOPNOTSUPP;
1100         }
1101
1102         if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
1103             !flow_action_is_last_entry(action, act)) {
1104                 NL_SET_ERR_MSG_MOD(extack,
1105                                    "Offload not supported when conform action is ok, but action is not last");
1106                 return -EOPNOTSUPP;
1107         }
1108
1109         if (act->police.peakrate_bytes_ps ||
1110             act->police.avrate || act->police.overhead) {
1111                 NL_SET_ERR_MSG_MOD(extack,
1112                                    "Offload not supported when peakrate/avrate/overhead is configured");
1113                 return -EOPNOTSUPP;
1114         }
1115
1116         if (act->police.rate_pkt_ps) {
1117                 NL_SET_ERR_MSG_MOD(extack,
1118                                    "QoS offload not support packets per second");
1119                 return -EOPNOTSUPP;
1120         }
1121
1122         return 0;
1123 }
1124
1125 static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
1126                                       struct flow_cls_offload *f)
1127 {
1128         struct flow_action_entry *entryg = NULL, *entryp = NULL;
1129         struct flow_rule *rule = flow_cls_offload_flow_rule(f);
1130         struct netlink_ext_ack *extack = f->common.extack;
1131         struct enetc_stream_filter *filter, *old_filter;
1132         struct enetc_psfp_meter *fmi = NULL, *old_fmi;
1133         struct enetc_psfp_filter *sfi, *old_sfi;
1134         struct enetc_psfp_gate *sgi, *old_sgi;
1135         struct flow_action_entry *entry;
1136         struct action_gate_entry *e;
1137         u8 sfi_overwrite = 0;
1138         int entries_size;
1139         int i, err;
1140
1141         if (f->common.chain_index >= priv->psfp_cap.max_streamid) {
1142                 NL_SET_ERR_MSG_MOD(extack, "No Stream identify resource!");
1143                 return -ENOSPC;
1144         }
1145
1146         flow_action_for_each(i, entry, &rule->action)
1147                 if (entry->id == FLOW_ACTION_GATE)
1148                         entryg = entry;
1149                 else if (entry->id == FLOW_ACTION_POLICE)
1150                         entryp = entry;
1151
1152         /* Not support without gate action */
1153         if (!entryg)
1154                 return -EINVAL;
1155
1156         filter = kzalloc(sizeof(*filter), GFP_KERNEL);
1157         if (!filter)
1158                 return -ENOMEM;
1159
1160         filter->sid.index = f->common.chain_index;
1161
1162         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1163                 struct flow_match_eth_addrs match;
1164
1165                 flow_rule_match_eth_addrs(rule, &match);
1166
1167                 if (!is_zero_ether_addr(match.mask->dst) &&
1168                     !is_zero_ether_addr(match.mask->src)) {
1169                         NL_SET_ERR_MSG_MOD(extack,
1170                                            "Cannot match on both source and destination MAC");
1171                         err = -EINVAL;
1172                         goto free_filter;
1173                 }
1174
1175                 if (!is_zero_ether_addr(match.mask->dst)) {
1176                         if (!is_broadcast_ether_addr(match.mask->dst)) {
1177                                 NL_SET_ERR_MSG_MOD(extack,
1178                                                    "Masked matching on destination MAC not supported");
1179                                 err = -EINVAL;
1180                                 goto free_filter;
1181                         }
1182                         ether_addr_copy(filter->sid.dst_mac, match.key->dst);
1183                         filter->sid.filtertype = STREAMID_TYPE_NULL;
1184                 }
1185
1186                 if (!is_zero_ether_addr(match.mask->src)) {
1187                         if (!is_broadcast_ether_addr(match.mask->src)) {
1188                                 NL_SET_ERR_MSG_MOD(extack,
1189                                                    "Masked matching on source MAC not supported");
1190                                 err = -EINVAL;
1191                                 goto free_filter;
1192                         }
1193                         ether_addr_copy(filter->sid.src_mac, match.key->src);
1194                         filter->sid.filtertype = STREAMID_TYPE_SMAC;
1195                 }
1196         } else {
1197                 NL_SET_ERR_MSG_MOD(extack, "Unsupported, must include ETH_ADDRS");
1198                 err = -EINVAL;
1199                 goto free_filter;
1200         }
1201
1202         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
1203                 struct flow_match_vlan match;
1204
1205                 flow_rule_match_vlan(rule, &match);
1206                 if (match.mask->vlan_priority) {
1207                         if (match.mask->vlan_priority !=
1208                             (VLAN_PRIO_MASK >> VLAN_PRIO_SHIFT)) {
1209                                 NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority");
1210                                 err = -EINVAL;
1211                                 goto free_filter;
1212                         }
1213                 }
1214
1215                 if (match.mask->vlan_id) {
1216                         if (match.mask->vlan_id != VLAN_VID_MASK) {
1217                                 NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN id");
1218                                 err = -EINVAL;
1219                                 goto free_filter;
1220                         }
1221
1222                         filter->sid.vid = match.key->vlan_id;
1223                         if (!filter->sid.vid)
1224                                 filter->sid.tagged = STREAMID_VLAN_UNTAGGED;
1225                         else
1226                                 filter->sid.tagged = STREAMID_VLAN_TAGGED;
1227                 }
1228         } else {
1229                 filter->sid.tagged = STREAMID_VLAN_ALL;
1230         }
1231
1232         /* parsing gate action */
1233         if (entryg->hw_index >= priv->psfp_cap.max_psfp_gate) {
1234                 NL_SET_ERR_MSG_MOD(extack, "No Stream Gate resource!");
1235                 err = -ENOSPC;
1236                 goto free_filter;
1237         }
1238
1239         if (entryg->gate.num_entries >= priv->psfp_cap.max_psfp_gatelist) {
1240                 NL_SET_ERR_MSG_MOD(extack, "No Stream Gate resource!");
1241                 err = -ENOSPC;
1242                 goto free_filter;
1243         }
1244
1245         entries_size = struct_size(sgi, entries, entryg->gate.num_entries);
1246         sgi = kzalloc(entries_size, GFP_KERNEL);
1247         if (!sgi) {
1248                 err = -ENOMEM;
1249                 goto free_filter;
1250         }
1251
1252         refcount_set(&sgi->refcount, 1);
1253         sgi->index = entryg->hw_index;
1254         sgi->init_ipv = entryg->gate.prio;
1255         sgi->basetime = entryg->gate.basetime;
1256         sgi->cycletime = entryg->gate.cycletime;
1257         sgi->num_entries = entryg->gate.num_entries;
1258
1259         e = sgi->entries;
1260         for (i = 0; i < entryg->gate.num_entries; i++) {
1261                 e[i].gate_state = entryg->gate.entries[i].gate_state;
1262                 e[i].interval = entryg->gate.entries[i].interval;
1263                 e[i].ipv = entryg->gate.entries[i].ipv;
1264                 e[i].maxoctets = entryg->gate.entries[i].maxoctets;
1265         }
1266
1267         filter->sgi_index = sgi->index;
1268
1269         sfi = kzalloc(sizeof(*sfi), GFP_KERNEL);
1270         if (!sfi) {
1271                 err = -ENOMEM;
1272                 goto free_gate;
1273         }
1274
1275         refcount_set(&sfi->refcount, 1);
1276         sfi->gate_id = sgi->index;
1277         sfi->meter_id = ENETC_PSFP_WILDCARD;
1278
1279         /* Flow meter and max frame size */
1280         if (entryp) {
1281                 err = enetc_psfp_policer_validate(&rule->action, entryp, extack);
1282                 if (err)
1283                         goto free_sfi;
1284
1285                 if (entryp->police.burst) {
1286                         fmi = kzalloc(sizeof(*fmi), GFP_KERNEL);
1287                         if (!fmi) {
1288                                 err = -ENOMEM;
1289                                 goto free_sfi;
1290                         }
1291                         refcount_set(&fmi->refcount, 1);
1292                         fmi->cir = entryp->police.rate_bytes_ps;
1293                         fmi->cbs = entryp->police.burst;
1294                         fmi->index = entryp->hw_index;
1295                         filter->flags |= ENETC_PSFP_FLAGS_FMI;
1296                         filter->fmi_index = fmi->index;
1297                         sfi->meter_id = fmi->index;
1298                 }
1299
1300                 if (entryp->police.mtu)
1301                         sfi->maxsdu = entryp->police.mtu;
1302         }
1303
1304         /* prio ref the filter prio */
1305         if (f->common.prio && f->common.prio <= BIT(3))
1306                 sfi->prio = f->common.prio - 1;
1307         else
1308                 sfi->prio = ENETC_PSFP_WILDCARD;
1309
1310         old_sfi = enetc_psfp_check_sfi(sfi);
1311         if (!old_sfi) {
1312                 int index;
1313
1314                 index = enetc_get_free_index(priv);
1315                 if (index < 0) {
1316                         NL_SET_ERR_MSG_MOD(extack, "No Stream Filter resource!");
1317                         err = -ENOSPC;
1318                         goto free_fmi;
1319                 }
1320
1321                 sfi->index = index;
1322                 sfi->handle = index + HANDLE_OFFSET;
1323                 /* Update the stream filter handle also */
1324                 filter->sid.handle = sfi->handle;
1325                 filter->sfi_index = sfi->index;
1326                 sfi_overwrite = 0;
1327         } else {
1328                 filter->sfi_index = old_sfi->index;
1329                 filter->sid.handle = old_sfi->handle;
1330                 sfi_overwrite = 1;
1331         }
1332
1333         err = enetc_psfp_hw_set(priv, &filter->sid,
1334                                 sfi_overwrite ? NULL : sfi, sgi, fmi);
1335         if (err)
1336                 goto free_fmi;
1337
1338         spin_lock(&epsfp.psfp_lock);
1339         if (filter->flags & ENETC_PSFP_FLAGS_FMI) {
1340                 old_fmi = enetc_get_meter_by_index(filter->fmi_index);
1341                 if (old_fmi) {
1342                         fmi->refcount = old_fmi->refcount;
1343                         refcount_set(&fmi->refcount,
1344                                      refcount_read(&old_fmi->refcount) + 1);
1345                         hlist_del(&old_fmi->node);
1346                         kfree(old_fmi);
1347                 }
1348                 hlist_add_head(&fmi->node, &epsfp.psfp_meter_list);
1349         }
1350
1351         /* Remove the old node if exist and update with a new node */
1352         old_sgi = enetc_get_gate_by_index(filter->sgi_index);
1353         if (old_sgi) {
1354                 refcount_set(&sgi->refcount,
1355                              refcount_read(&old_sgi->refcount) + 1);
1356                 hlist_del(&old_sgi->node);
1357                 kfree(old_sgi);
1358         }
1359
1360         hlist_add_head(&sgi->node, &epsfp.psfp_gate_list);
1361
1362         if (!old_sfi) {
1363                 hlist_add_head(&sfi->node, &epsfp.psfp_filter_list);
1364                 set_bit(sfi->index, epsfp.psfp_sfi_bitmap);
1365         } else {
1366                 kfree(sfi);
1367                 refcount_inc(&old_sfi->refcount);
1368         }
1369
1370         old_filter = enetc_get_stream_by_index(filter->sid.index);
1371         if (old_filter)
1372                 remove_one_chain(priv, old_filter);
1373
1374         filter->stats.lastused = jiffies;
1375         hlist_add_head(&filter->node, &epsfp.stream_list);
1376
1377         spin_unlock(&epsfp.psfp_lock);
1378
1379         return 0;
1380
1381 free_fmi:
1382         kfree(fmi);
1383 free_sfi:
1384         kfree(sfi);
1385 free_gate:
1386         kfree(sgi);
1387 free_filter:
1388         kfree(filter);
1389
1390         return err;
1391 }
1392
1393 static int enetc_config_clsflower(struct enetc_ndev_priv *priv,
1394                                   struct flow_cls_offload *cls_flower)
1395 {
1396         struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower);
1397         struct netlink_ext_ack *extack = cls_flower->common.extack;
1398         struct flow_dissector *dissector = rule->match.dissector;
1399         struct flow_action *action = &rule->action;
1400         struct flow_action_entry *entry;
1401         struct actions_fwd *fwd;
1402         u64 actions = 0;
1403         int i, err;
1404
1405         if (!flow_action_has_entries(action)) {
1406                 NL_SET_ERR_MSG_MOD(extack, "At least one action is needed");
1407                 return -EINVAL;
1408         }
1409
1410         flow_action_for_each(i, entry, action)
1411                 actions |= BIT(entry->id);
1412
1413         fwd = enetc_check_flow_actions(actions, dissector->used_keys);
1414         if (!fwd) {
1415                 NL_SET_ERR_MSG_MOD(extack, "Unsupported filter type!");
1416                 return -EOPNOTSUPP;
1417         }
1418
1419         if (fwd->output & FILTER_ACTION_TYPE_PSFP) {
1420                 err = enetc_psfp_parse_clsflower(priv, cls_flower);
1421                 if (err) {
1422                         NL_SET_ERR_MSG_MOD(extack, "Invalid PSFP inputs");
1423                         return err;
1424                 }
1425         } else {
1426                 NL_SET_ERR_MSG_MOD(extack, "Unsupported actions");
1427                 return -EOPNOTSUPP;
1428         }
1429
1430         return 0;
1431 }
1432
1433 static int enetc_psfp_destroy_clsflower(struct enetc_ndev_priv *priv,
1434                                         struct flow_cls_offload *f)
1435 {
1436         struct enetc_stream_filter *filter;
1437         struct netlink_ext_ack *extack = f->common.extack;
1438         int err;
1439
1440         if (f->common.chain_index >= priv->psfp_cap.max_streamid) {
1441                 NL_SET_ERR_MSG_MOD(extack, "No Stream identify resource!");
1442                 return -ENOSPC;
1443         }
1444
1445         filter = enetc_get_stream_by_index(f->common.chain_index);
1446         if (!filter)
1447                 return -EINVAL;
1448
1449         err = enetc_streamid_hw_set(priv, &filter->sid, false);
1450         if (err)
1451                 return err;
1452
1453         remove_one_chain(priv, filter);
1454
1455         return 0;
1456 }
1457
1458 static int enetc_destroy_clsflower(struct enetc_ndev_priv *priv,
1459                                    struct flow_cls_offload *f)
1460 {
1461         return enetc_psfp_destroy_clsflower(priv, f);
1462 }
1463
1464 static int enetc_psfp_get_stats(struct enetc_ndev_priv *priv,
1465                                 struct flow_cls_offload *f)
1466 {
1467         struct psfp_streamfilter_counters counters = {};
1468         struct enetc_stream_filter *filter;
1469         struct flow_stats stats = {};
1470         int err;
1471
1472         filter = enetc_get_stream_by_index(f->common.chain_index);
1473         if (!filter)
1474                 return -EINVAL;
1475
1476         err = enetc_streamcounter_hw_get(priv, filter->sfi_index, &counters);
1477         if (err)
1478                 return -EINVAL;
1479
1480         spin_lock(&epsfp.psfp_lock);
1481         stats.pkts = counters.matching_frames_count +
1482                      counters.not_passing_sdu_count -
1483                      filter->stats.pkts;
1484         stats.drops = counters.not_passing_frames_count +
1485                       counters.not_passing_sdu_count +
1486                       counters.red_frames_count -
1487                       filter->stats.drops;
1488         stats.lastused = filter->stats.lastused;
1489         filter->stats.pkts += stats.pkts;
1490         filter->stats.drops += stats.drops;
1491         spin_unlock(&epsfp.psfp_lock);
1492
1493         flow_stats_update(&f->stats, 0x0, stats.pkts, stats.drops,
1494                           stats.lastused, FLOW_ACTION_HW_STATS_DELAYED);
1495
1496         return 0;
1497 }
1498
1499 static int enetc_setup_tc_cls_flower(struct enetc_ndev_priv *priv,
1500                                      struct flow_cls_offload *cls_flower)
1501 {
1502         switch (cls_flower->command) {
1503         case FLOW_CLS_REPLACE:
1504                 return enetc_config_clsflower(priv, cls_flower);
1505         case FLOW_CLS_DESTROY:
1506                 return enetc_destroy_clsflower(priv, cls_flower);
1507         case FLOW_CLS_STATS:
1508                 return enetc_psfp_get_stats(priv, cls_flower);
1509         default:
1510                 return -EOPNOTSUPP;
1511         }
1512 }
1513
1514 static inline void clean_psfp_sfi_bitmap(void)
1515 {
1516         bitmap_free(epsfp.psfp_sfi_bitmap);
1517         epsfp.psfp_sfi_bitmap = NULL;
1518 }
1519
1520 static void clean_stream_list(void)
1521 {
1522         struct enetc_stream_filter *s;
1523         struct hlist_node *tmp;
1524
1525         hlist_for_each_entry_safe(s, tmp, &epsfp.stream_list, node) {
1526                 hlist_del(&s->node);
1527                 kfree(s);
1528         }
1529 }
1530
1531 static void clean_sfi_list(void)
1532 {
1533         struct enetc_psfp_filter *sfi;
1534         struct hlist_node *tmp;
1535
1536         hlist_for_each_entry_safe(sfi, tmp, &epsfp.psfp_filter_list, node) {
1537                 hlist_del(&sfi->node);
1538                 kfree(sfi);
1539         }
1540 }
1541
1542 static void clean_sgi_list(void)
1543 {
1544         struct enetc_psfp_gate *sgi;
1545         struct hlist_node *tmp;
1546
1547         hlist_for_each_entry_safe(sgi, tmp, &epsfp.psfp_gate_list, node) {
1548                 hlist_del(&sgi->node);
1549                 kfree(sgi);
1550         }
1551 }
1552
1553 static void clean_psfp_all(void)
1554 {
1555         /* Disable all list nodes and free all memory */
1556         clean_sfi_list();
1557         clean_sgi_list();
1558         clean_stream_list();
1559         epsfp.dev_bitmap = 0;
1560         clean_psfp_sfi_bitmap();
1561 }
1562
1563 int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
1564                             void *cb_priv)
1565 {
1566         struct net_device *ndev = cb_priv;
1567
1568         if (!tc_can_offload(ndev))
1569                 return -EOPNOTSUPP;
1570
1571         switch (type) {
1572         case TC_SETUP_CLSFLOWER:
1573                 return enetc_setup_tc_cls_flower(netdev_priv(ndev), type_data);
1574         default:
1575                 return -EOPNOTSUPP;
1576         }
1577 }
1578
1579 int enetc_set_psfp(struct net_device *ndev, bool en)
1580 {
1581         struct enetc_ndev_priv *priv = netdev_priv(ndev);
1582         int err;
1583
1584         if (en) {
1585                 err = enetc_psfp_enable(priv);
1586                 if (err)
1587                         return err;
1588
1589                 priv->active_offloads |= ENETC_F_QCI;
1590                 return 0;
1591         }
1592
1593         err = enetc_psfp_disable(priv);
1594         if (err)
1595                 return err;
1596
1597         priv->active_offloads &= ~ENETC_F_QCI;
1598
1599         return 0;
1600 }
1601
1602 int enetc_psfp_init(struct enetc_ndev_priv *priv)
1603 {
1604         if (epsfp.psfp_sfi_bitmap)
1605                 return 0;
1606
1607         epsfp.psfp_sfi_bitmap = bitmap_zalloc(priv->psfp_cap.max_psfp_filter,
1608                                               GFP_KERNEL);
1609         if (!epsfp.psfp_sfi_bitmap)
1610                 return -ENOMEM;
1611
1612         spin_lock_init(&epsfp.psfp_lock);
1613
1614         if (list_empty(&enetc_block_cb_list))
1615                 epsfp.dev_bitmap = 0;
1616
1617         return 0;
1618 }
1619
1620 int enetc_psfp_clean(struct enetc_ndev_priv *priv)
1621 {
1622         if (!list_empty(&enetc_block_cb_list))
1623                 return -EBUSY;
1624
1625         clean_psfp_all();
1626
1627         return 0;
1628 }
1629
1630 int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data)
1631 {
1632         struct enetc_ndev_priv *priv = netdev_priv(ndev);
1633         struct flow_block_offload *f = type_data;
1634         int port, err;
1635
1636         err = flow_block_cb_setup_simple(f, &enetc_block_cb_list,
1637                                          enetc_setup_tc_block_cb,
1638                                          ndev, ndev, true);
1639         if (err)
1640                 return err;
1641
1642         switch (f->command) {
1643         case FLOW_BLOCK_BIND:
1644                 port = enetc_pf_to_port(priv->si->pdev);
1645                 if (port < 0)
1646                         return -EINVAL;
1647
1648                 set_bit(port, &epsfp.dev_bitmap);
1649                 break;
1650         case FLOW_BLOCK_UNBIND:
1651                 port = enetc_pf_to_port(priv->si->pdev);
1652                 if (port < 0)
1653                         return -EINVAL;
1654
1655                 clear_bit(port, &epsfp.dev_bitmap);
1656                 if (!epsfp.dev_bitmap)
1657                         clean_psfp_all();
1658                 break;
1659         }
1660
1661         return 0;
1662 }
1663
1664 int enetc_qos_query_caps(struct net_device *ndev, void *type_data)
1665 {
1666         struct enetc_ndev_priv *priv = netdev_priv(ndev);
1667         struct tc_query_caps_base *base = type_data;
1668         struct enetc_si *si = priv->si;
1669
1670         switch (base->type) {
1671         case TC_SETUP_QDISC_MQPRIO: {
1672                 struct tc_mqprio_caps *caps = base->caps;
1673
1674                 caps->validate_queue_counts = true;
1675
1676                 return 0;
1677         }
1678         case TC_SETUP_QDISC_TAPRIO: {
1679                 struct tc_taprio_caps *caps = base->caps;
1680
1681                 if (si->hw_features & ENETC_SI_F_QBV)
1682                         caps->supports_queue_max_sdu = true;
1683
1684                 return 0;
1685         }
1686         default:
1687                 return -EOPNOTSUPP;
1688         }
1689 }
This page took 0.132335 seconds and 4 git commands to generate.