]> Git Repo - linux.git/blob - drivers/net/ethernet/microchip/sparx5/sparx5_qos.c
crypto: akcipher - Drop sign/verify operations
[linux.git] / drivers / net / ethernet / microchip / sparx5 / sparx5_qos.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip Sparx5 Switch driver
3  *
4  * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
5  */
6
7 #include <net/pkt_cls.h>
8
9 #include "sparx5_main.h"
10 #include "sparx5_qos.h"
11
12 /* Calculate new base_time based on cycle_time.
13  *
14  * The hardware requires a base_time that is always in the future.
15  * We define threshold_time as current_time + (2 * cycle_time).
16  * If base_time is below threshold_time this function recalculates it to be in
17  * the interval:
18  * threshold_time <= base_time < (threshold_time + cycle_time)
19  *
20  * A very simple algorithm could be like this:
21  * new_base_time = org_base_time + N * cycle_time
22  * using the lowest N so (new_base_time >= threshold_time
23  */
24 void sparx5_new_base_time(struct sparx5 *sparx5, const u32 cycle_time,
25                           const ktime_t org_base_time, ktime_t *new_base_time)
26 {
27         ktime_t current_time, threshold_time, new_time;
28         struct timespec64 ts;
29         u64 nr_of_cycles_p2;
30         u64 nr_of_cycles;
31         u64 diff_time;
32
33         new_time = org_base_time;
34
35         sparx5_ptp_gettime64(&sparx5->phc[SPARX5_PHC_PORT].info, &ts);
36         current_time = timespec64_to_ktime(ts);
37         threshold_time = current_time + (2 * cycle_time);
38         diff_time = threshold_time - new_time;
39         nr_of_cycles = div_u64(diff_time, cycle_time);
40         nr_of_cycles_p2 = 1; /* Use 2^0 as start value */
41
42         if (new_time >= threshold_time) {
43                 *new_base_time = new_time;
44                 return;
45         }
46
47         /* Calculate the smallest power of 2 (nr_of_cycles_p2)
48          * that is larger than nr_of_cycles.
49          */
50         while (nr_of_cycles_p2 < nr_of_cycles)
51                 nr_of_cycles_p2 <<= 1; /* Next (higher) power of 2 */
52
53         /* Add as big chunks (power of 2 * cycle_time)
54          * as possible for each power of 2
55          */
56         while (nr_of_cycles_p2) {
57                 if (new_time < threshold_time) {
58                         new_time += cycle_time * nr_of_cycles_p2;
59                         while (new_time < threshold_time)
60                                 new_time += cycle_time * nr_of_cycles_p2;
61                         new_time -= cycle_time * nr_of_cycles_p2;
62                 }
63                 nr_of_cycles_p2 >>= 1; /* Next (lower) power of 2 */
64         }
65         new_time += cycle_time;
66         *new_base_time = new_time;
67 }
68
69 /* Max rates for leak groups */
70 static const u32 spx5_hsch_max_group_rate[SPX5_HSCH_LEAK_GRP_CNT] = {
71         1048568, /*  1.049 Gbps */
72         2621420, /*  2.621 Gbps */
73         10485680, /* 10.486 Gbps */
74         26214200 /* 26.214 Gbps */
75 };
76
77 static struct sparx5_layer layers[SPX5_HSCH_LAYER_CNT];
78
79 static u32 sparx5_lg_get_leak_time(struct sparx5 *sparx5, u32 layer, u32 group)
80 {
81         u32 value;
82
83         value = spx5_rd(sparx5, HSCH_HSCH_TIMER_CFG(layer, group));
84         return HSCH_HSCH_TIMER_CFG_LEAK_TIME_GET(value);
85 }
86
87 static void sparx5_lg_set_leak_time(struct sparx5 *sparx5, u32 layer, u32 group,
88                                     u32 leak_time)
89 {
90         spx5_wr(HSCH_HSCH_TIMER_CFG_LEAK_TIME_SET(leak_time), sparx5,
91                 HSCH_HSCH_TIMER_CFG(layer, group));
92 }
93
94 static u32 sparx5_lg_get_first(struct sparx5 *sparx5, u32 layer, u32 group)
95 {
96         u32 value;
97
98         value = spx5_rd(sparx5, HSCH_HSCH_LEAK_CFG(layer, group));
99         return HSCH_HSCH_LEAK_CFG_LEAK_FIRST_GET(value);
100 }
101
102 static u32 sparx5_lg_get_next(struct sparx5 *sparx5, u32 layer, u32 group,
103                               u32 idx)
104
105 {
106         u32 value;
107
108         value = spx5_rd(sparx5, HSCH_SE_CONNECT(idx));
109         return HSCH_SE_CONNECT_SE_LEAK_LINK_GET(value);
110 }
111
112 static u32 sparx5_lg_get_last(struct sparx5 *sparx5, u32 layer, u32 group)
113 {
114         u32 itr, next;
115
116         itr = sparx5_lg_get_first(sparx5, layer, group);
117
118         for (;;) {
119                 next = sparx5_lg_get_next(sparx5, layer, group, itr);
120                 if (itr == next)
121                         return itr;
122
123                 itr = next;
124         }
125 }
126
127 static bool sparx5_lg_is_last(struct sparx5 *sparx5, u32 layer, u32 group,
128                               u32 idx)
129 {
130         return idx == sparx5_lg_get_next(sparx5, layer, group, idx);
131 }
132
133 static bool sparx5_lg_is_first(struct sparx5 *sparx5, u32 layer, u32 group,
134                                u32 idx)
135 {
136         return idx == sparx5_lg_get_first(sparx5, layer, group);
137 }
138
139 static bool sparx5_lg_is_empty(struct sparx5 *sparx5, u32 layer, u32 group)
140 {
141         return sparx5_lg_get_leak_time(sparx5, layer, group) == 0;
142 }
143
144 static bool sparx5_lg_is_singular(struct sparx5 *sparx5, u32 layer, u32 group)
145 {
146         if (sparx5_lg_is_empty(sparx5, layer, group))
147                 return false;
148
149         return sparx5_lg_get_first(sparx5, layer, group) ==
150                sparx5_lg_get_last(sparx5, layer, group);
151 }
152
153 static void sparx5_lg_enable(struct sparx5 *sparx5, u32 layer, u32 group,
154                              u32 leak_time)
155 {
156         sparx5_lg_set_leak_time(sparx5, layer, group, leak_time);
157 }
158
159 static void sparx5_lg_disable(struct sparx5 *sparx5, u32 layer, u32 group)
160 {
161         sparx5_lg_set_leak_time(sparx5, layer, group, 0);
162 }
163
164 static int sparx5_lg_get_group_by_index(struct sparx5 *sparx5, u32 layer,
165                                         u32 idx, u32 *group)
166 {
167         u32 itr, next;
168         int i;
169
170         for (i = 0; i < SPX5_HSCH_LEAK_GRP_CNT; i++) {
171                 if (sparx5_lg_is_empty(sparx5, layer, i))
172                         continue;
173
174                 itr = sparx5_lg_get_first(sparx5, layer, i);
175
176                 for (;;) {
177                         next = sparx5_lg_get_next(sparx5, layer, i, itr);
178
179                         if (itr == idx) {
180                                 *group = i;
181                                 return 0; /* Found it */
182                         }
183                         if (itr == next)
184                                 break; /* Was not found */
185
186                         itr = next;
187                 }
188         }
189
190         return -1;
191 }
192
193 static int sparx5_lg_get_group_by_rate(u32 layer, u32 rate, u32 *group)
194 {
195         struct sparx5_layer *l = &layers[layer];
196         struct sparx5_lg *lg;
197         u32 i;
198
199         for (i = 0; i < SPX5_HSCH_LEAK_GRP_CNT; i++) {
200                 lg = &l->leak_groups[i];
201                 if (rate <= lg->max_rate) {
202                         *group = i;
203                         return 0;
204                 }
205         }
206
207         return -1;
208 }
209
210 static int sparx5_lg_get_adjacent(struct sparx5 *sparx5, u32 layer, u32 group,
211                                   u32 idx, u32 *prev, u32 *next, u32 *first)
212 {
213         u32 itr;
214
215         *first = sparx5_lg_get_first(sparx5, layer, group);
216         *prev = *first;
217         *next = *first;
218         itr = *first;
219
220         for (;;) {
221                 *next = sparx5_lg_get_next(sparx5, layer, group, itr);
222
223                 if (itr == idx)
224                         return 0; /* Found it */
225
226                 if (itr == *next)
227                         return -1; /* Was not found */
228
229                 *prev = itr;
230                 itr = *next;
231         }
232
233         return -1;
234 }
235
236 static int sparx5_lg_conf_set(struct sparx5 *sparx5, u32 layer, u32 group,
237                               u32 se_first, u32 idx, u32 idx_next, bool empty)
238 {
239         u32 leak_time = layers[layer].leak_groups[group].leak_time;
240
241         /* Stop leaking */
242         sparx5_lg_disable(sparx5, layer, group);
243
244         if (empty)
245                 return 0;
246
247         /* Select layer */
248         spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(layer),
249                  HSCH_HSCH_CFG_CFG_HSCH_LAYER, sparx5, HSCH_HSCH_CFG_CFG);
250
251         /* Link elements */
252         spx5_wr(HSCH_SE_CONNECT_SE_LEAK_LINK_SET(idx_next), sparx5,
253                 HSCH_SE_CONNECT(idx));
254
255         /* Set the first element. */
256         spx5_rmw(HSCH_HSCH_LEAK_CFG_LEAK_FIRST_SET(se_first),
257                  HSCH_HSCH_LEAK_CFG_LEAK_FIRST, sparx5,
258                  HSCH_HSCH_LEAK_CFG(layer, group));
259
260         /* Start leaking */
261         sparx5_lg_enable(sparx5, layer, group, leak_time);
262
263         return 0;
264 }
265
266 static int sparx5_lg_del(struct sparx5 *sparx5, u32 layer, u32 group, u32 idx)
267 {
268         u32 first, next, prev;
269         bool empty = false;
270
271         /* idx *must* be present in the leak group */
272         WARN_ON(sparx5_lg_get_adjacent(sparx5, layer, group, idx, &prev, &next,
273                                        &first) < 0);
274
275         if (sparx5_lg_is_singular(sparx5, layer, group)) {
276                 empty = true;
277         } else if (sparx5_lg_is_last(sparx5, layer, group, idx)) {
278                 /* idx is removed, prev is now last */
279                 idx = prev;
280                 next = prev;
281         } else if (sparx5_lg_is_first(sparx5, layer, group, idx)) {
282                 /* idx is removed and points to itself, first is next */
283                 first = next;
284                 next = idx;
285         } else {
286                 /* Next is not touched */
287                 idx = prev;
288         }
289
290         return sparx5_lg_conf_set(sparx5, layer, group, first, idx, next,
291                                   empty);
292 }
293
294 static int sparx5_lg_add(struct sparx5 *sparx5, u32 layer, u32 new_group,
295                          u32 idx)
296 {
297         u32 first, next, old_group;
298
299         pr_debug("ADD: layer: %d, new_group: %d, idx: %d", layer, new_group,
300                  idx);
301
302         /* Is this SE already shaping ? */
303         if (sparx5_lg_get_group_by_index(sparx5, layer, idx, &old_group) >= 0) {
304                 if (old_group != new_group) {
305                         /* Delete from old group */
306                         sparx5_lg_del(sparx5, layer, old_group, idx);
307                 } else {
308                         /* Nothing to do here */
309                         return 0;
310                 }
311         }
312
313         /* We always add to head of the list */
314         first = idx;
315
316         if (sparx5_lg_is_empty(sparx5, layer, new_group))
317                 next = idx;
318         else
319                 next = sparx5_lg_get_first(sparx5, layer, new_group);
320
321         return sparx5_lg_conf_set(sparx5, layer, new_group, first, idx, next,
322                                   false);
323 }
324
325 static int sparx5_shaper_conf_set(struct sparx5_port *port,
326                                   const struct sparx5_shaper *sh, u32 layer,
327                                   u32 idx, u32 group)
328 {
329         int (*sparx5_lg_action)(struct sparx5 *, u32, u32, u32);
330         struct sparx5 *sparx5 = port->sparx5;
331
332         if (!sh->rate && !sh->burst)
333                 sparx5_lg_action = &sparx5_lg_del;
334         else
335                 sparx5_lg_action = &sparx5_lg_add;
336
337         /* Select layer */
338         spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(layer),
339                  HSCH_HSCH_CFG_CFG_HSCH_LAYER, sparx5, HSCH_HSCH_CFG_CFG);
340
341         /* Set frame mode */
342         spx5_rmw(HSCH_SE_CFG_SE_FRM_MODE_SET(sh->mode), HSCH_SE_CFG_SE_FRM_MODE,
343                  sparx5, HSCH_SE_CFG(idx));
344
345         /* Set committed rate and burst */
346         spx5_wr(HSCH_CIR_CFG_CIR_RATE_SET(sh->rate) |
347                         HSCH_CIR_CFG_CIR_BURST_SET(sh->burst),
348                 sparx5, HSCH_CIR_CFG(idx));
349
350         /* This has to be done after the shaper configuration has been set */
351         sparx5_lg_action(sparx5, layer, group, idx);
352
353         return 0;
354 }
355
356 static u32 sparx5_weight_to_hw_cost(u32 weight_min, u32 weight)
357 {
358         return ((((SPX5_DWRR_COST_MAX << 4) * weight_min / weight) + 8) >> 4) -
359                1;
360 }
361
362 static int sparx5_dwrr_conf_set(struct sparx5_port *port,
363                                 struct sparx5_dwrr *dwrr)
364 {
365         int i;
366
367         spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(2) |
368                  HSCH_HSCH_CFG_CFG_CFG_SE_IDX_SET(port->portno),
369                  HSCH_HSCH_CFG_CFG_HSCH_LAYER | HSCH_HSCH_CFG_CFG_CFG_SE_IDX,
370                  port->sparx5, HSCH_HSCH_CFG_CFG);
371
372         /* Number of *lower* indexes that are arbitrated dwrr */
373         spx5_rmw(HSCH_SE_CFG_SE_DWRR_CNT_SET(dwrr->count),
374                  HSCH_SE_CFG_SE_DWRR_CNT, port->sparx5,
375                  HSCH_SE_CFG(port->portno));
376
377         for (i = 0; i < dwrr->count; i++) {
378                 spx5_rmw(HSCH_DWRR_ENTRY_DWRR_COST_SET(dwrr->cost[i]),
379                          HSCH_DWRR_ENTRY_DWRR_COST, port->sparx5,
380                          HSCH_DWRR_ENTRY(i));
381         }
382
383         return 0;
384 }
385
386 static int sparx5_leak_groups_init(struct sparx5 *sparx5)
387 {
388         struct sparx5_layer *layer;
389         u32 sys_clk_per_100ps;
390         struct sparx5_lg *lg;
391         u32 leak_time_us;
392         int i, ii;
393
394         sys_clk_per_100ps = spx5_rd(sparx5, HSCH_SYS_CLK_PER);
395
396         for (i = 0; i < SPX5_HSCH_LAYER_CNT; i++) {
397                 layer = &layers[i];
398                 for (ii = 0; ii < SPX5_HSCH_LEAK_GRP_CNT; ii++) {
399                         lg = &layer->leak_groups[ii];
400                         lg->max_rate = spx5_hsch_max_group_rate[ii];
401
402                         /* Calculate the leak time in us, to serve a maximum
403                          * rate of 'max_rate' for this group
404                          */
405                         leak_time_us = (SPX5_SE_RATE_MAX * 1000) / lg->max_rate;
406
407                         /* Hardware wants leak time in ns */
408                         lg->leak_time = 1000 * leak_time_us;
409
410                         /* Calculate resolution */
411                         lg->resolution = 1000 / leak_time_us;
412
413                         /* Maximum number of shapers that can be served by
414                          * this leak group
415                          */
416                         lg->max_ses = (1000 * leak_time_us) / sys_clk_per_100ps;
417
418                         /* Example:
419                          * Wanted bandwidth is 100Mbit:
420                          *
421                          * 100 mbps can be served by leak group zero.
422                          *
423                          * leak_time is 125000 ns.
424                          * resolution is: 8
425                          *
426                          * cir          = 100000 / 8 = 12500
427                          * leaks_pr_sec = 125000 / 10^9 = 8000
428                          * bw           = 12500 * 8000 = 10^8 (100 Mbit)
429                          */
430
431                         /* Disable by default - this also indicates an empty
432                          * leak group
433                          */
434                         sparx5_lg_disable(sparx5, i, ii);
435                 }
436         }
437
438         return 0;
439 }
440
441 int sparx5_qos_init(struct sparx5 *sparx5)
442 {
443         int ret;
444
445         ret = sparx5_leak_groups_init(sparx5);
446         if (ret < 0)
447                 return ret;
448
449         ret = sparx5_dcb_init(sparx5);
450         if (ret < 0)
451                 return ret;
452
453         sparx5_psfp_init(sparx5);
454
455         return 0;
456 }
457
458 int sparx5_tc_mqprio_add(struct net_device *ndev, u8 num_tc)
459 {
460         int i;
461
462         if (num_tc != SPX5_PRIOS) {
463                 netdev_err(ndev, "Only %d traffic classes supported\n",
464                            SPX5_PRIOS);
465                 return -EINVAL;
466         }
467
468         netdev_set_num_tc(ndev, num_tc);
469
470         for (i = 0; i < num_tc; i++)
471                 netdev_set_tc_queue(ndev, i, 1, i);
472
473         netdev_dbg(ndev, "dev->num_tc %u dev->real_num_tx_queues %u\n",
474                    ndev->num_tc, ndev->real_num_tx_queues);
475
476         return 0;
477 }
478
479 int sparx5_tc_mqprio_del(struct net_device *ndev)
480 {
481         netdev_reset_tc(ndev);
482
483         netdev_dbg(ndev, "dev->num_tc %u dev->real_num_tx_queues %u\n",
484                    ndev->num_tc, ndev->real_num_tx_queues);
485
486         return 0;
487 }
488
489 int sparx5_tc_tbf_add(struct sparx5_port *port,
490                       struct tc_tbf_qopt_offload_replace_params *params,
491                       u32 layer, u32 idx)
492 {
493         struct sparx5_shaper sh = {
494                 .mode = SPX5_SE_MODE_DATARATE,
495                 .rate = div_u64(params->rate.rate_bytes_ps, 1000) * 8,
496                 .burst = params->max_size,
497         };
498         struct sparx5_lg *lg;
499         u32 group;
500
501         /* Find suitable group for this se */
502         if (sparx5_lg_get_group_by_rate(layer, sh.rate, &group) < 0) {
503                 pr_debug("Could not find leak group for se with rate: %d",
504                          sh.rate);
505                 return -EINVAL;
506         }
507
508         lg = &layers[layer].leak_groups[group];
509
510         pr_debug("Found matching group (speed: %d)\n", lg->max_rate);
511
512         if (sh.rate < SPX5_SE_RATE_MIN || sh.burst < SPX5_SE_BURST_MIN)
513                 return -EINVAL;
514
515         /* Calculate committed rate and burst */
516         sh.rate = DIV_ROUND_UP(sh.rate, lg->resolution);
517         sh.burst = DIV_ROUND_UP(sh.burst, SPX5_SE_BURST_UNIT);
518
519         if (sh.rate > SPX5_SE_RATE_MAX || sh.burst > SPX5_SE_BURST_MAX)
520                 return -EINVAL;
521
522         return sparx5_shaper_conf_set(port, &sh, layer, idx, group);
523 }
524
525 int sparx5_tc_tbf_del(struct sparx5_port *port, u32 layer, u32 idx)
526 {
527         struct sparx5_shaper sh = {0};
528         u32 group;
529
530         sparx5_lg_get_group_by_index(port->sparx5, layer, idx, &group);
531
532         return sparx5_shaper_conf_set(port, &sh, layer, idx, group);
533 }
534
535 int sparx5_tc_ets_add(struct sparx5_port *port,
536                       struct tc_ets_qopt_offload_replace_params *params)
537 {
538         struct sparx5_dwrr dwrr = {0};
539         /* Minimum weight for each iteration */
540         unsigned int w_min = 100;
541         int i;
542
543         /* Find minimum weight for all dwrr bands */
544         for (i = 0; i < SPX5_PRIOS; i++) {
545                 if (params->quanta[i] == 0)
546                         continue;
547                 w_min = min(w_min, params->weights[i]);
548         }
549
550         for (i = 0; i < SPX5_PRIOS; i++) {
551                 /* Strict band; skip */
552                 if (params->quanta[i] == 0)
553                         continue;
554
555                 dwrr.count++;
556
557                 /* On the sparx5, bands with higher indexes are preferred and
558                  * arbitrated strict. Strict bands are put in the lower indexes,
559                  * by tc, so we reverse the bands here.
560                  *
561                  * Also convert the weight to something the hardware
562                  * understands.
563                  */
564                 dwrr.cost[SPX5_PRIOS - i - 1] =
565                         sparx5_weight_to_hw_cost(w_min, params->weights[i]);
566         }
567
568         return sparx5_dwrr_conf_set(port, &dwrr);
569 }
570
571 int sparx5_tc_ets_del(struct sparx5_port *port)
572 {
573         struct sparx5_dwrr dwrr = {0};
574
575         return sparx5_dwrr_conf_set(port, &dwrr);
576 }
This page took 0.06661 seconds and 4 git commands to generate.