]> Git Repo - linux.git/blob - drivers/net/dsa/sja1105/sja1105_main.c
drm/nouveau/kms: Don't change EDID when it hasn't actually changed
[linux.git] / drivers / net / dsa / sja1105 / sja1105_main.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
3  * Copyright (c) 2018-2019, Vladimir Oltean <[email protected]>
4  */
5
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8 #include <linux/delay.h>
9 #include <linux/module.h>
10 #include <linux/printk.h>
11 #include <linux/spi/spi.h>
12 #include <linux/errno.h>
13 #include <linux/gpio/consumer.h>
14 #include <linux/phylink.h>
15 #include <linux/of.h>
16 #include <linux/of_net.h>
17 #include <linux/of_mdio.h>
18 #include <linux/of_device.h>
19 #include <linux/netdev_features.h>
20 #include <linux/netdevice.h>
21 #include <linux/if_bridge.h>
22 #include <linux/if_ether.h>
23 #include <linux/dsa/8021q.h>
24 #include "sja1105.h"
25 #include "sja1105_sgmii.h"
26 #include "sja1105_tas.h"
27
28 static const struct dsa_switch_ops sja1105_switch_ops;
29
30 static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len,
31                              unsigned int startup_delay)
32 {
33         gpiod_set_value_cansleep(gpio, 1);
34         /* Wait for minimum reset pulse length */
35         msleep(pulse_len);
36         gpiod_set_value_cansleep(gpio, 0);
37         /* Wait until chip is ready after reset */
38         msleep(startup_delay);
39 }
40
41 static void
42 sja1105_port_allow_traffic(struct sja1105_l2_forwarding_entry *l2_fwd,
43                            int from, int to, bool allow)
44 {
45         if (allow) {
46                 l2_fwd[from].bc_domain  |= BIT(to);
47                 l2_fwd[from].reach_port |= BIT(to);
48                 l2_fwd[from].fl_domain  |= BIT(to);
49         } else {
50                 l2_fwd[from].bc_domain  &= ~BIT(to);
51                 l2_fwd[from].reach_port &= ~BIT(to);
52                 l2_fwd[from].fl_domain  &= ~BIT(to);
53         }
54 }
55
56 /* Structure used to temporarily transport device tree
57  * settings into sja1105_setup
58  */
59 struct sja1105_dt_port {
60         phy_interface_t phy_mode;
61         sja1105_mii_role_t role;
62 };
63
64 static int sja1105_init_mac_settings(struct sja1105_private *priv)
65 {
66         struct sja1105_mac_config_entry default_mac = {
67                 /* Enable all 8 priority queues on egress.
68                  * Every queue i holds top[i] - base[i] frames.
69                  * Sum of top[i] - base[i] is 511 (max hardware limit).
70                  */
71                 .top  = {0x3F, 0x7F, 0xBF, 0xFF, 0x13F, 0x17F, 0x1BF, 0x1FF},
72                 .base = {0x0, 0x40, 0x80, 0xC0, 0x100, 0x140, 0x180, 0x1C0},
73                 .enabled = {true, true, true, true, true, true, true, true},
74                 /* Keep standard IFG of 12 bytes on egress. */
75                 .ifg = 0,
76                 /* Always put the MAC speed in automatic mode, where it can be
77                  * adjusted at runtime by PHYLINK.
78                  */
79                 .speed = SJA1105_SPEED_AUTO,
80                 /* No static correction for 1-step 1588 events */
81                 .tp_delin = 0,
82                 .tp_delout = 0,
83                 /* Disable aging for critical TTEthernet traffic */
84                 .maxage = 0xFF,
85                 /* Internal VLAN (pvid) to apply to untagged ingress */
86                 .vlanprio = 0,
87                 .vlanid = 1,
88                 .ing_mirr = false,
89                 .egr_mirr = false,
90                 /* Don't drop traffic with other EtherType than ETH_P_IP */
91                 .drpnona664 = false,
92                 /* Don't drop double-tagged traffic */
93                 .drpdtag = false,
94                 /* Don't drop untagged traffic */
95                 .drpuntag = false,
96                 /* Don't retag 802.1p (VID 0) traffic with the pvid */
97                 .retag = false,
98                 /* Disable learning and I/O on user ports by default -
99                  * STP will enable it.
100                  */
101                 .dyn_learn = false,
102                 .egress = false,
103                 .ingress = false,
104         };
105         struct sja1105_mac_config_entry *mac;
106         struct sja1105_table *table;
107         int i;
108
109         table = &priv->static_config.tables[BLK_IDX_MAC_CONFIG];
110
111         /* Discard previous MAC Configuration Table */
112         if (table->entry_count) {
113                 kfree(table->entries);
114                 table->entry_count = 0;
115         }
116
117         table->entries = kcalloc(SJA1105_NUM_PORTS,
118                                  table->ops->unpacked_entry_size, GFP_KERNEL);
119         if (!table->entries)
120                 return -ENOMEM;
121
122         table->entry_count = SJA1105_NUM_PORTS;
123
124         mac = table->entries;
125
126         for (i = 0; i < SJA1105_NUM_PORTS; i++) {
127                 mac[i] = default_mac;
128                 if (i == dsa_upstream_port(priv->ds, i)) {
129                         /* STP doesn't get called for CPU port, so we need to
130                          * set the I/O parameters statically.
131                          */
132                         mac[i].dyn_learn = true;
133                         mac[i].ingress = true;
134                         mac[i].egress = true;
135                 }
136         }
137
138         return 0;
139 }
140
141 static bool sja1105_supports_sgmii(struct sja1105_private *priv, int port)
142 {
143         if (priv->info->part_no != SJA1105R_PART_NO &&
144             priv->info->part_no != SJA1105S_PART_NO)
145                 return false;
146
147         if (port != SJA1105_SGMII_PORT)
148                 return false;
149
150         if (dsa_is_unused_port(priv->ds, port))
151                 return false;
152
153         return true;
154 }
155
156 static int sja1105_init_mii_settings(struct sja1105_private *priv,
157                                      struct sja1105_dt_port *ports)
158 {
159         struct device *dev = &priv->spidev->dev;
160         struct sja1105_xmii_params_entry *mii;
161         struct sja1105_table *table;
162         int i;
163
164         table = &priv->static_config.tables[BLK_IDX_XMII_PARAMS];
165
166         /* Discard previous xMII Mode Parameters Table */
167         if (table->entry_count) {
168                 kfree(table->entries);
169                 table->entry_count = 0;
170         }
171
172         table->entries = kcalloc(SJA1105_MAX_XMII_PARAMS_COUNT,
173                                  table->ops->unpacked_entry_size, GFP_KERNEL);
174         if (!table->entries)
175                 return -ENOMEM;
176
177         /* Override table based on PHYLINK DT bindings */
178         table->entry_count = SJA1105_MAX_XMII_PARAMS_COUNT;
179
180         mii = table->entries;
181
182         for (i = 0; i < SJA1105_NUM_PORTS; i++) {
183                 if (dsa_is_unused_port(priv->ds, i))
184                         continue;
185
186                 switch (ports[i].phy_mode) {
187                 case PHY_INTERFACE_MODE_MII:
188                         mii->xmii_mode[i] = XMII_MODE_MII;
189                         break;
190                 case PHY_INTERFACE_MODE_RMII:
191                         mii->xmii_mode[i] = XMII_MODE_RMII;
192                         break;
193                 case PHY_INTERFACE_MODE_RGMII:
194                 case PHY_INTERFACE_MODE_RGMII_ID:
195                 case PHY_INTERFACE_MODE_RGMII_RXID:
196                 case PHY_INTERFACE_MODE_RGMII_TXID:
197                         mii->xmii_mode[i] = XMII_MODE_RGMII;
198                         break;
199                 case PHY_INTERFACE_MODE_SGMII:
200                         if (!sja1105_supports_sgmii(priv, i))
201                                 return -EINVAL;
202                         mii->xmii_mode[i] = XMII_MODE_SGMII;
203                         break;
204                 default:
205                         dev_err(dev, "Unsupported PHY mode %s!\n",
206                                 phy_modes(ports[i].phy_mode));
207                 }
208
209                 /* Even though the SerDes port is able to drive SGMII autoneg
210                  * like a PHY would, from the perspective of the XMII tables,
211                  * the SGMII port should always be put in MAC mode.
212                  */
213                 if (ports[i].phy_mode == PHY_INTERFACE_MODE_SGMII)
214                         mii->phy_mac[i] = XMII_MAC;
215                 else
216                         mii->phy_mac[i] = ports[i].role;
217         }
218         return 0;
219 }
220
221 static int sja1105_init_static_fdb(struct sja1105_private *priv)
222 {
223         struct sja1105_table *table;
224
225         table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
226
227         /* We only populate the FDB table through dynamic
228          * L2 Address Lookup entries
229          */
230         if (table->entry_count) {
231                 kfree(table->entries);
232                 table->entry_count = 0;
233         }
234         return 0;
235 }
236
237 static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
238 {
239         struct sja1105_table *table;
240         u64 max_fdb_entries = SJA1105_MAX_L2_LOOKUP_COUNT / SJA1105_NUM_PORTS;
241         struct sja1105_l2_lookup_params_entry default_l2_lookup_params = {
242                 /* Learned FDB entries are forgotten after 300 seconds */
243                 .maxage = SJA1105_AGEING_TIME_MS(300000),
244                 /* All entries within a FDB bin are available for learning */
245                 .dyn_tbsz = SJA1105ET_FDB_BIN_SIZE,
246                 /* And the P/Q/R/S equivalent setting: */
247                 .start_dynspc = 0,
248                 .maxaddrp = {max_fdb_entries, max_fdb_entries, max_fdb_entries,
249                              max_fdb_entries, max_fdb_entries, },
250                 /* 2^8 + 2^5 + 2^3 + 2^2 + 2^1 + 1 in Koopman notation */
251                 .poly = 0x97,
252                 /* This selects between Independent VLAN Learning (IVL) and
253                  * Shared VLAN Learning (SVL)
254                  */
255                 .shared_learn = true,
256                 /* Don't discard management traffic based on ENFPORT -
257                  * we don't perform SMAC port enforcement anyway, so
258                  * what we are setting here doesn't matter.
259                  */
260                 .no_enf_hostprt = false,
261                 /* Don't learn SMAC for mac_fltres1 and mac_fltres0.
262                  * Maybe correlate with no_linklocal_learn from bridge driver?
263                  */
264                 .no_mgmt_learn = true,
265                 /* P/Q/R/S only */
266                 .use_static = true,
267                 /* Dynamically learned FDB entries can overwrite other (older)
268                  * dynamic FDB entries
269                  */
270                 .owr_dyn = true,
271                 .drpnolearn = true,
272         };
273
274         table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
275
276         if (table->entry_count) {
277                 kfree(table->entries);
278                 table->entry_count = 0;
279         }
280
281         table->entries = kcalloc(SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
282                                  table->ops->unpacked_entry_size, GFP_KERNEL);
283         if (!table->entries)
284                 return -ENOMEM;
285
286         table->entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT;
287
288         /* This table only has a single entry */
289         ((struct sja1105_l2_lookup_params_entry *)table->entries)[0] =
290                                 default_l2_lookup_params;
291
292         return 0;
293 }
294
295 static int sja1105_init_static_vlan(struct sja1105_private *priv)
296 {
297         struct sja1105_table *table;
298         struct sja1105_vlan_lookup_entry pvid = {
299                 .ving_mirr = 0,
300                 .vegr_mirr = 0,
301                 .vmemb_port = 0,
302                 .vlan_bc = 0,
303                 .tag_port = 0,
304                 .vlanid = 1,
305         };
306         struct dsa_switch *ds = priv->ds;
307         int port;
308
309         table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
310
311         /* The static VLAN table will only contain the initial pvid of 1.
312          * All other VLANs are to be configured through dynamic entries,
313          * and kept in the static configuration table as backing memory.
314          */
315         if (table->entry_count) {
316                 kfree(table->entries);
317                 table->entry_count = 0;
318         }
319
320         table->entries = kcalloc(1, table->ops->unpacked_entry_size,
321                                  GFP_KERNEL);
322         if (!table->entries)
323                 return -ENOMEM;
324
325         table->entry_count = 1;
326
327         /* VLAN 1: all DT-defined ports are members; no restrictions on
328          * forwarding; always transmit as untagged.
329          */
330         for (port = 0; port < ds->num_ports; port++) {
331                 struct sja1105_bridge_vlan *v;
332
333                 if (dsa_is_unused_port(ds, port))
334                         continue;
335
336                 pvid.vmemb_port |= BIT(port);
337                 pvid.vlan_bc |= BIT(port);
338                 pvid.tag_port &= ~BIT(port);
339
340                 /* Let traffic that don't need dsa_8021q (e.g. STP, PTP) be
341                  * transmitted as untagged.
342                  */
343                 v = kzalloc(sizeof(*v), GFP_KERNEL);
344                 if (!v)
345                         return -ENOMEM;
346
347                 v->port = port;
348                 v->vid = 1;
349                 v->untagged = true;
350                 if (dsa_is_cpu_port(ds, port))
351                         v->pvid = true;
352                 list_add(&v->list, &priv->dsa_8021q_vlans);
353         }
354
355         ((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid;
356         return 0;
357 }
358
359 static int sja1105_init_l2_forwarding(struct sja1105_private *priv)
360 {
361         struct sja1105_l2_forwarding_entry *l2fwd;
362         struct sja1105_table *table;
363         int i, j;
364
365         table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING];
366
367         if (table->entry_count) {
368                 kfree(table->entries);
369                 table->entry_count = 0;
370         }
371
372         table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_COUNT,
373                                  table->ops->unpacked_entry_size, GFP_KERNEL);
374         if (!table->entries)
375                 return -ENOMEM;
376
377         table->entry_count = SJA1105_MAX_L2_FORWARDING_COUNT;
378
379         l2fwd = table->entries;
380
381         /* First 5 entries define the forwarding rules */
382         for (i = 0; i < SJA1105_NUM_PORTS; i++) {
383                 unsigned int upstream = dsa_upstream_port(priv->ds, i);
384
385                 for (j = 0; j < SJA1105_NUM_TC; j++)
386                         l2fwd[i].vlan_pmap[j] = j;
387
388                 if (i == upstream)
389                         continue;
390
391                 sja1105_port_allow_traffic(l2fwd, i, upstream, true);
392                 sja1105_port_allow_traffic(l2fwd, upstream, i, true);
393         }
394         /* Next 8 entries define VLAN PCP mapping from ingress to egress.
395          * Create a one-to-one mapping.
396          */
397         for (i = 0; i < SJA1105_NUM_TC; i++)
398                 for (j = 0; j < SJA1105_NUM_PORTS; j++)
399                         l2fwd[SJA1105_NUM_PORTS + i].vlan_pmap[j] = i;
400
401         return 0;
402 }
403
404 static int sja1105_init_l2_forwarding_params(struct sja1105_private *priv)
405 {
406         struct sja1105_l2_forwarding_params_entry default_l2fwd_params = {
407                 /* Disallow dynamic reconfiguration of vlan_pmap */
408                 .max_dynp = 0,
409                 /* Use a single memory partition for all ingress queues */
410                 .part_spc = { SJA1105_MAX_FRAME_MEMORY, 0, 0, 0, 0, 0, 0, 0 },
411         };
412         struct sja1105_table *table;
413
414         table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS];
415
416         if (table->entry_count) {
417                 kfree(table->entries);
418                 table->entry_count = 0;
419         }
420
421         table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
422                                  table->ops->unpacked_entry_size, GFP_KERNEL);
423         if (!table->entries)
424                 return -ENOMEM;
425
426         table->entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT;
427
428         /* This table only has a single entry */
429         ((struct sja1105_l2_forwarding_params_entry *)table->entries)[0] =
430                                 default_l2fwd_params;
431
432         return 0;
433 }
434
435 void sja1105_frame_memory_partitioning(struct sja1105_private *priv)
436 {
437         struct sja1105_l2_forwarding_params_entry *l2_fwd_params;
438         struct sja1105_vl_forwarding_params_entry *vl_fwd_params;
439         struct sja1105_table *table;
440         int max_mem;
441
442         /* VLAN retagging is implemented using a loopback port that consumes
443          * frame buffers. That leaves less for us.
444          */
445         if (priv->vlan_state == SJA1105_VLAN_BEST_EFFORT)
446                 max_mem = SJA1105_MAX_FRAME_MEMORY_RETAGGING;
447         else
448                 max_mem = SJA1105_MAX_FRAME_MEMORY;
449
450         table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS];
451         l2_fwd_params = table->entries;
452         l2_fwd_params->part_spc[0] = max_mem;
453
454         /* If we have any critical-traffic virtual links, we need to reserve
455          * some frame buffer memory for them. At the moment, hardcode the value
456          * at 100 blocks of 128 bytes of memory each. This leaves 829 blocks
457          * remaining for best-effort traffic. TODO: figure out a more flexible
458          * way to perform the frame buffer partitioning.
459          */
460         if (!priv->static_config.tables[BLK_IDX_VL_FORWARDING].entry_count)
461                 return;
462
463         table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING_PARAMS];
464         vl_fwd_params = table->entries;
465
466         l2_fwd_params->part_spc[0] -= SJA1105_VL_FRAME_MEMORY;
467         vl_fwd_params->partspc[0] = SJA1105_VL_FRAME_MEMORY;
468 }
469
470 static int sja1105_init_general_params(struct sja1105_private *priv)
471 {
472         struct sja1105_general_params_entry default_general_params = {
473                 /* Allow dynamic changing of the mirror port */
474                 .mirr_ptacu = true,
475                 .switchid = priv->ds->index,
476                 /* Priority queue for link-local management frames
477                  * (both ingress to and egress from CPU - PTP, STP etc)
478                  */
479                 .hostprio = 7,
480                 .mac_fltres1 = SJA1105_LINKLOCAL_FILTER_A,
481                 .mac_flt1    = SJA1105_LINKLOCAL_FILTER_A_MASK,
482                 .incl_srcpt1 = false,
483                 .send_meta1  = false,
484                 .mac_fltres0 = SJA1105_LINKLOCAL_FILTER_B,
485                 .mac_flt0    = SJA1105_LINKLOCAL_FILTER_B_MASK,
486                 .incl_srcpt0 = false,
487                 .send_meta0  = false,
488                 /* The destination for traffic matching mac_fltres1 and
489                  * mac_fltres0 on all ports except host_port. Such traffic
490                  * receieved on host_port itself would be dropped, except
491                  * by installing a temporary 'management route'
492                  */
493                 .host_port = dsa_upstream_port(priv->ds, 0),
494                 /* Default to an invalid value */
495                 .mirr_port = SJA1105_NUM_PORTS,
496                 /* Link-local traffic received on casc_port will be forwarded
497                  * to host_port without embedding the source port and device ID
498                  * info in the destination MAC address (presumably because it
499                  * is a cascaded port and a downstream SJA switch already did
500                  * that). Default to an invalid port (to disable the feature)
501                  * and overwrite this if we find any DSA (cascaded) ports.
502                  */
503                 .casc_port = SJA1105_NUM_PORTS,
504                 /* No TTEthernet */
505                 .vllupformat = SJA1105_VL_FORMAT_PSFP,
506                 .vlmarker = 0,
507                 .vlmask = 0,
508                 /* Only update correctionField for 1-step PTP (L2 transport) */
509                 .ignore2stf = 0,
510                 /* Forcefully disable VLAN filtering by telling
511                  * the switch that VLAN has a different EtherType.
512                  */
513                 .tpid = ETH_P_SJA1105,
514                 .tpid2 = ETH_P_SJA1105,
515         };
516         struct sja1105_table *table;
517
518         table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
519
520         if (table->entry_count) {
521                 kfree(table->entries);
522                 table->entry_count = 0;
523         }
524
525         table->entries = kcalloc(SJA1105_MAX_GENERAL_PARAMS_COUNT,
526                                  table->ops->unpacked_entry_size, GFP_KERNEL);
527         if (!table->entries)
528                 return -ENOMEM;
529
530         table->entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT;
531
532         /* This table only has a single entry */
533         ((struct sja1105_general_params_entry *)table->entries)[0] =
534                                 default_general_params;
535
536         return 0;
537 }
538
539 static int sja1105_init_avb_params(struct sja1105_private *priv)
540 {
541         struct sja1105_avb_params_entry *avb;
542         struct sja1105_table *table;
543
544         table = &priv->static_config.tables[BLK_IDX_AVB_PARAMS];
545
546         /* Discard previous AVB Parameters Table */
547         if (table->entry_count) {
548                 kfree(table->entries);
549                 table->entry_count = 0;
550         }
551
552         table->entries = kcalloc(SJA1105_MAX_AVB_PARAMS_COUNT,
553                                  table->ops->unpacked_entry_size, GFP_KERNEL);
554         if (!table->entries)
555                 return -ENOMEM;
556
557         table->entry_count = SJA1105_MAX_AVB_PARAMS_COUNT;
558
559         avb = table->entries;
560
561         /* Configure the MAC addresses for meta frames */
562         avb->destmeta = SJA1105_META_DMAC;
563         avb->srcmeta  = SJA1105_META_SMAC;
564         /* On P/Q/R/S, configure the direction of the PTP_CLK pin as input by
565          * default. This is because there might be boards with a hardware
566          * layout where enabling the pin as output might cause an electrical
567          * clash. On E/T the pin is always an output, which the board designers
568          * probably already knew, so even if there are going to be electrical
569          * issues, there's nothing we can do.
570          */
571         avb->cas_master = false;
572
573         return 0;
574 }
575
576 /* The L2 policing table is 2-stage. The table is looked up for each frame
577  * according to the ingress port, whether it was broadcast or not, and the
578  * classified traffic class (given by VLAN PCP). This portion of the lookup is
579  * fixed, and gives access to the SHARINDX, an indirection register pointing
580  * within the policing table itself, which is used to resolve the policer that
581  * will be used for this frame.
582  *
583  *  Stage 1                              Stage 2
584  * +------------+--------+              +---------------------------------+
585  * |Port 0 TC 0 |SHARINDX|              | Policer 0: Rate, Burst, MTU     |
586  * +------------+--------+              +---------------------------------+
587  * |Port 0 TC 1 |SHARINDX|              | Policer 1: Rate, Burst, MTU     |
588  * +------------+--------+              +---------------------------------+
589  *    ...                               | Policer 2: Rate, Burst, MTU     |
590  * +------------+--------+              +---------------------------------+
591  * |Port 0 TC 7 |SHARINDX|              | Policer 3: Rate, Burst, MTU     |
592  * +------------+--------+              +---------------------------------+
593  * |Port 1 TC 0 |SHARINDX|              | Policer 4: Rate, Burst, MTU     |
594  * +------------+--------+              +---------------------------------+
595  *    ...                               | Policer 5: Rate, Burst, MTU     |
596  * +------------+--------+              +---------------------------------+
597  * |Port 1 TC 7 |SHARINDX|              | Policer 6: Rate, Burst, MTU     |
598  * +------------+--------+              +---------------------------------+
599  *    ...                               | Policer 7: Rate, Burst, MTU     |
600  * +------------+--------+              +---------------------------------+
601  * |Port 4 TC 7 |SHARINDX|                 ...
602  * +------------+--------+
603  * |Port 0 BCAST|SHARINDX|                 ...
604  * +------------+--------+
605  * |Port 1 BCAST|SHARINDX|                 ...
606  * +------------+--------+
607  *    ...                                  ...
608  * +------------+--------+              +---------------------------------+
609  * |Port 4 BCAST|SHARINDX|              | Policer 44: Rate, Burst, MTU    |
610  * +------------+--------+              +---------------------------------+
611  *
612  * In this driver, we shall use policers 0-4 as statically alocated port
613  * (matchall) policers. So we need to make the SHARINDX for all lookups
614  * corresponding to this ingress port (8 VLAN PCP lookups and 1 broadcast
615  * lookup) equal.
616  * The remaining policers (40) shall be dynamically allocated for flower
617  * policers, where the key is either vlan_prio or dst_mac ff:ff:ff:ff:ff:ff.
618  */
619 #define SJA1105_RATE_MBPS(speed) (((speed) * 64000) / 1000)
620
621 static int sja1105_init_l2_policing(struct sja1105_private *priv)
622 {
623         struct sja1105_l2_policing_entry *policing;
624         struct sja1105_table *table;
625         int port, tc;
626
627         table = &priv->static_config.tables[BLK_IDX_L2_POLICING];
628
629         /* Discard previous L2 Policing Table */
630         if (table->entry_count) {
631                 kfree(table->entries);
632                 table->entry_count = 0;
633         }
634
635         table->entries = kcalloc(SJA1105_MAX_L2_POLICING_COUNT,
636                                  table->ops->unpacked_entry_size, GFP_KERNEL);
637         if (!table->entries)
638                 return -ENOMEM;
639
640         table->entry_count = SJA1105_MAX_L2_POLICING_COUNT;
641
642         policing = table->entries;
643
644         /* Setup shared indices for the matchall policers */
645         for (port = 0; port < SJA1105_NUM_PORTS; port++) {
646                 int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + port;
647
648                 for (tc = 0; tc < SJA1105_NUM_TC; tc++)
649                         policing[port * SJA1105_NUM_TC + tc].sharindx = port;
650
651                 policing[bcast].sharindx = port;
652         }
653
654         /* Setup the matchall policer parameters */
655         for (port = 0; port < SJA1105_NUM_PORTS; port++) {
656                 int mtu = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
657
658                 if (dsa_is_cpu_port(priv->ds, port))
659                         mtu += VLAN_HLEN;
660
661                 policing[port].smax = 65535; /* Burst size in bytes */
662                 policing[port].rate = SJA1105_RATE_MBPS(1000);
663                 policing[port].maxlen = mtu;
664                 policing[port].partition = 0;
665         }
666
667         return 0;
668 }
669
670 static int sja1105_static_config_load(struct sja1105_private *priv,
671                                       struct sja1105_dt_port *ports)
672 {
673         int rc;
674
675         sja1105_static_config_free(&priv->static_config);
676         rc = sja1105_static_config_init(&priv->static_config,
677                                         priv->info->static_ops,
678                                         priv->info->device_id);
679         if (rc)
680                 return rc;
681
682         /* Build static configuration */
683         rc = sja1105_init_mac_settings(priv);
684         if (rc < 0)
685                 return rc;
686         rc = sja1105_init_mii_settings(priv, ports);
687         if (rc < 0)
688                 return rc;
689         rc = sja1105_init_static_fdb(priv);
690         if (rc < 0)
691                 return rc;
692         rc = sja1105_init_static_vlan(priv);
693         if (rc < 0)
694                 return rc;
695         rc = sja1105_init_l2_lookup_params(priv);
696         if (rc < 0)
697                 return rc;
698         rc = sja1105_init_l2_forwarding(priv);
699         if (rc < 0)
700                 return rc;
701         rc = sja1105_init_l2_forwarding_params(priv);
702         if (rc < 0)
703                 return rc;
704         rc = sja1105_init_l2_policing(priv);
705         if (rc < 0)
706                 return rc;
707         rc = sja1105_init_general_params(priv);
708         if (rc < 0)
709                 return rc;
710         rc = sja1105_init_avb_params(priv);
711         if (rc < 0)
712                 return rc;
713
714         /* Send initial configuration to hardware via SPI */
715         return sja1105_static_config_upload(priv);
716 }
717
718 static int sja1105_parse_rgmii_delays(struct sja1105_private *priv,
719                                       const struct sja1105_dt_port *ports)
720 {
721         int i;
722
723         for (i = 0; i < SJA1105_NUM_PORTS; i++) {
724                 if (ports[i].role == XMII_MAC)
725                         continue;
726
727                 if (ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_RXID ||
728                     ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
729                         priv->rgmii_rx_delay[i] = true;
730
731                 if (ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_TXID ||
732                     ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
733                         priv->rgmii_tx_delay[i] = true;
734
735                 if ((priv->rgmii_rx_delay[i] || priv->rgmii_tx_delay[i]) &&
736                      !priv->info->setup_rgmii_delay)
737                         return -EINVAL;
738         }
739         return 0;
740 }
741
742 static int sja1105_parse_ports_node(struct sja1105_private *priv,
743                                     struct sja1105_dt_port *ports,
744                                     struct device_node *ports_node)
745 {
746         struct device *dev = &priv->spidev->dev;
747         struct device_node *child;
748
749         for_each_available_child_of_node(ports_node, child) {
750                 struct device_node *phy_node;
751                 phy_interface_t phy_mode;
752                 u32 index;
753                 int err;
754
755                 /* Get switch port number from DT */
756                 if (of_property_read_u32(child, "reg", &index) < 0) {
757                         dev_err(dev, "Port number not defined in device tree "
758                                 "(property \"reg\")\n");
759                         of_node_put(child);
760                         return -ENODEV;
761                 }
762
763                 /* Get PHY mode from DT */
764                 err = of_get_phy_mode(child, &phy_mode);
765                 if (err) {
766                         dev_err(dev, "Failed to read phy-mode or "
767                                 "phy-interface-type property for port %d\n",
768                                 index);
769                         of_node_put(child);
770                         return -ENODEV;
771                 }
772                 ports[index].phy_mode = phy_mode;
773
774                 phy_node = of_parse_phandle(child, "phy-handle", 0);
775                 if (!phy_node) {
776                         if (!of_phy_is_fixed_link(child)) {
777                                 dev_err(dev, "phy-handle or fixed-link "
778                                         "properties missing!\n");
779                                 of_node_put(child);
780                                 return -ENODEV;
781                         }
782                         /* phy-handle is missing, but fixed-link isn't.
783                          * So it's a fixed link. Default to PHY role.
784                          */
785                         ports[index].role = XMII_PHY;
786                 } else {
787                         /* phy-handle present => put port in MAC role */
788                         ports[index].role = XMII_MAC;
789                         of_node_put(phy_node);
790                 }
791
792                 /* The MAC/PHY role can be overridden with explicit bindings */
793                 if (of_property_read_bool(child, "sja1105,role-mac"))
794                         ports[index].role = XMII_MAC;
795                 else if (of_property_read_bool(child, "sja1105,role-phy"))
796                         ports[index].role = XMII_PHY;
797         }
798
799         return 0;
800 }
801
802 static int sja1105_parse_dt(struct sja1105_private *priv,
803                             struct sja1105_dt_port *ports)
804 {
805         struct device *dev = &priv->spidev->dev;
806         struct device_node *switch_node = dev->of_node;
807         struct device_node *ports_node;
808         int rc;
809
810         ports_node = of_get_child_by_name(switch_node, "ports");
811         if (!ports_node) {
812                 dev_err(dev, "Incorrect bindings: absent \"ports\" node\n");
813                 return -ENODEV;
814         }
815
816         rc = sja1105_parse_ports_node(priv, ports, ports_node);
817         of_node_put(ports_node);
818
819         return rc;
820 }
821
822 static int sja1105_sgmii_read(struct sja1105_private *priv, int pcs_reg)
823 {
824         const struct sja1105_regs *regs = priv->info->regs;
825         u32 val;
826         int rc;
827
828         rc = sja1105_xfer_u32(priv, SPI_READ, regs->sgmii + pcs_reg, &val,
829                               NULL);
830         if (rc < 0)
831                 return rc;
832
833         return val;
834 }
835
836 static int sja1105_sgmii_write(struct sja1105_private *priv, int pcs_reg,
837                                u16 pcs_val)
838 {
839         const struct sja1105_regs *regs = priv->info->regs;
840         u32 val = pcs_val;
841         int rc;
842
843         rc = sja1105_xfer_u32(priv, SPI_WRITE, regs->sgmii + pcs_reg, &val,
844                               NULL);
845         if (rc < 0)
846                 return rc;
847
848         return val;
849 }
850
851 static void sja1105_sgmii_pcs_config(struct sja1105_private *priv,
852                                      bool an_enabled, bool an_master)
853 {
854         u16 ac = SJA1105_AC_AUTONEG_MODE_SGMII;
855
856         /* DIGITAL_CONTROL_1: Enable vendor-specific MMD1, allow the PHY to
857          * stop the clock during LPI mode, make the MAC reconfigure
858          * autonomously after PCS autoneg is done, flush the internal FIFOs.
859          */
860         sja1105_sgmii_write(priv, SJA1105_DC1, SJA1105_DC1_EN_VSMMD1 |
861                                                SJA1105_DC1_CLOCK_STOP_EN |
862                                                SJA1105_DC1_MAC_AUTO_SW |
863                                                SJA1105_DC1_INIT);
864         /* DIGITAL_CONTROL_2: No polarity inversion for TX and RX lanes */
865         sja1105_sgmii_write(priv, SJA1105_DC2, SJA1105_DC2_TX_POL_INV_DISABLE);
866         /* AUTONEG_CONTROL: Use SGMII autoneg */
867         if (an_master)
868                 ac |= SJA1105_AC_PHY_MODE | SJA1105_AC_SGMII_LINK;
869         sja1105_sgmii_write(priv, SJA1105_AC, ac);
870         /* BASIC_CONTROL: enable in-band AN now, if requested. Otherwise,
871          * sja1105_sgmii_pcs_force_speed must be called later for the link
872          * to become operational.
873          */
874         if (an_enabled)
875                 sja1105_sgmii_write(priv, MII_BMCR,
876                                     BMCR_ANENABLE | BMCR_ANRESTART);
877 }
878
879 static void sja1105_sgmii_pcs_force_speed(struct sja1105_private *priv,
880                                           int speed)
881 {
882         int pcs_speed;
883
884         switch (speed) {
885         case SPEED_1000:
886                 pcs_speed = BMCR_SPEED1000;
887                 break;
888         case SPEED_100:
889                 pcs_speed = BMCR_SPEED100;
890                 break;
891         case SPEED_10:
892                 pcs_speed = BMCR_SPEED10;
893                 break;
894         default:
895                 dev_err(priv->ds->dev, "Invalid speed %d\n", speed);
896                 return;
897         }
898         sja1105_sgmii_write(priv, MII_BMCR, pcs_speed | BMCR_FULLDPLX);
899 }
900
901 /* Convert link speed from SJA1105 to ethtool encoding */
902 static int sja1105_speed[] = {
903         [SJA1105_SPEED_AUTO]            = SPEED_UNKNOWN,
904         [SJA1105_SPEED_10MBPS]          = SPEED_10,
905         [SJA1105_SPEED_100MBPS]         = SPEED_100,
906         [SJA1105_SPEED_1000MBPS]        = SPEED_1000,
907 };
908
909 /* Set link speed in the MAC configuration for a specific port. */
910 static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
911                                       int speed_mbps)
912 {
913         struct sja1105_xmii_params_entry *mii;
914         struct sja1105_mac_config_entry *mac;
915         struct device *dev = priv->ds->dev;
916         sja1105_phy_interface_t phy_mode;
917         sja1105_speed_t speed;
918         int rc;
919
920         /* On P/Q/R/S, one can read from the device via the MAC reconfiguration
921          * tables. On E/T, MAC reconfig tables are not readable, only writable.
922          * We have to *know* what the MAC looks like.  For the sake of keeping
923          * the code common, we'll use the static configuration tables as a
924          * reasonable approximation for both E/T and P/Q/R/S.
925          */
926         mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
927         mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
928
929         switch (speed_mbps) {
930         case SPEED_UNKNOWN:
931                 /* PHYLINK called sja1105_mac_config() to inform us about
932                  * the state->interface, but AN has not completed and the
933                  * speed is not yet valid. UM10944.pdf says that setting
934                  * SJA1105_SPEED_AUTO at runtime disables the port, so that is
935                  * ok for power consumption in case AN will never complete -
936                  * otherwise PHYLINK should come back with a new update.
937                  */
938                 speed = SJA1105_SPEED_AUTO;
939                 break;
940         case SPEED_10:
941                 speed = SJA1105_SPEED_10MBPS;
942                 break;
943         case SPEED_100:
944                 speed = SJA1105_SPEED_100MBPS;
945                 break;
946         case SPEED_1000:
947                 speed = SJA1105_SPEED_1000MBPS;
948                 break;
949         default:
950                 dev_err(dev, "Invalid speed %iMbps\n", speed_mbps);
951                 return -EINVAL;
952         }
953
954         /* Overwrite SJA1105_SPEED_AUTO from the static MAC configuration
955          * table, since this will be used for the clocking setup, and we no
956          * longer need to store it in the static config (already told hardware
957          * we want auto during upload phase).
958          * Actually for the SGMII port, the MAC is fixed at 1 Gbps and
959          * we need to configure the PCS only (if even that).
960          */
961         if (sja1105_supports_sgmii(priv, port))
962                 mac[port].speed = SJA1105_SPEED_1000MBPS;
963         else
964                 mac[port].speed = speed;
965
966         /* Write to the dynamic reconfiguration tables */
967         rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
968                                           &mac[port], true);
969         if (rc < 0) {
970                 dev_err(dev, "Failed to write MAC config: %d\n", rc);
971                 return rc;
972         }
973
974         /* Reconfigure the PLLs for the RGMII interfaces (required 125 MHz at
975          * gigabit, 25 MHz at 100 Mbps and 2.5 MHz at 10 Mbps). For MII and
976          * RMII no change of the clock setup is required. Actually, changing
977          * the clock setup does interrupt the clock signal for a certain time
978          * which causes trouble for all PHYs relying on this signal.
979          */
980         phy_mode = mii->xmii_mode[port];
981         if (phy_mode != XMII_MODE_RGMII)
982                 return 0;
983
984         return sja1105_clocking_setup_port(priv, port);
985 }
986
987 /* The SJA1105 MAC programming model is through the static config (the xMII
988  * Mode table cannot be dynamically reconfigured), and we have to program
989  * that early (earlier than PHYLINK calls us, anyway).
990  * So just error out in case the connected PHY attempts to change the initial
991  * system interface MII protocol from what is defined in the DT, at least for
992  * now.
993  */
994 static bool sja1105_phy_mode_mismatch(struct sja1105_private *priv, int port,
995                                       phy_interface_t interface)
996 {
997         struct sja1105_xmii_params_entry *mii;
998         sja1105_phy_interface_t phy_mode;
999
1000         mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
1001         phy_mode = mii->xmii_mode[port];
1002
1003         switch (interface) {
1004         case PHY_INTERFACE_MODE_MII:
1005                 return (phy_mode != XMII_MODE_MII);
1006         case PHY_INTERFACE_MODE_RMII:
1007                 return (phy_mode != XMII_MODE_RMII);
1008         case PHY_INTERFACE_MODE_RGMII:
1009         case PHY_INTERFACE_MODE_RGMII_ID:
1010         case PHY_INTERFACE_MODE_RGMII_RXID:
1011         case PHY_INTERFACE_MODE_RGMII_TXID:
1012                 return (phy_mode != XMII_MODE_RGMII);
1013         case PHY_INTERFACE_MODE_SGMII:
1014                 return (phy_mode != XMII_MODE_SGMII);
1015         default:
1016                 return true;
1017         }
1018 }
1019
1020 static void sja1105_mac_config(struct dsa_switch *ds, int port,
1021                                unsigned int mode,
1022                                const struct phylink_link_state *state)
1023 {
1024         struct sja1105_private *priv = ds->priv;
1025         bool is_sgmii = sja1105_supports_sgmii(priv, port);
1026
1027         if (sja1105_phy_mode_mismatch(priv, port, state->interface)) {
1028                 dev_err(ds->dev, "Changing PHY mode to %s not supported!\n",
1029                         phy_modes(state->interface));
1030                 return;
1031         }
1032
1033         if (phylink_autoneg_inband(mode) && !is_sgmii) {
1034                 dev_err(ds->dev, "In-band AN not supported!\n");
1035                 return;
1036         }
1037
1038         if (is_sgmii)
1039                 sja1105_sgmii_pcs_config(priv, phylink_autoneg_inband(mode),
1040                                          false);
1041 }
1042
1043 static void sja1105_mac_link_down(struct dsa_switch *ds, int port,
1044                                   unsigned int mode,
1045                                   phy_interface_t interface)
1046 {
1047         sja1105_inhibit_tx(ds->priv, BIT(port), true);
1048 }
1049
1050 static void sja1105_mac_link_up(struct dsa_switch *ds, int port,
1051                                 unsigned int mode,
1052                                 phy_interface_t interface,
1053                                 struct phy_device *phydev,
1054                                 int speed, int duplex,
1055                                 bool tx_pause, bool rx_pause)
1056 {
1057         struct sja1105_private *priv = ds->priv;
1058
1059         sja1105_adjust_port_config(priv, port, speed);
1060
1061         if (sja1105_supports_sgmii(priv, port) && !phylink_autoneg_inband(mode))
1062                 sja1105_sgmii_pcs_force_speed(priv, speed);
1063
1064         sja1105_inhibit_tx(priv, BIT(port), false);
1065 }
1066
1067 static void sja1105_phylink_validate(struct dsa_switch *ds, int port,
1068                                      unsigned long *supported,
1069                                      struct phylink_link_state *state)
1070 {
1071         /* Construct a new mask which exhaustively contains all link features
1072          * supported by the MAC, and then apply that (logical AND) to what will
1073          * be sent to the PHY for "marketing".
1074          */
1075         __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1076         struct sja1105_private *priv = ds->priv;
1077         struct sja1105_xmii_params_entry *mii;
1078
1079         mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
1080
1081         /* include/linux/phylink.h says:
1082          *     When @state->interface is %PHY_INTERFACE_MODE_NA, phylink
1083          *     expects the MAC driver to return all supported link modes.
1084          */
1085         if (state->interface != PHY_INTERFACE_MODE_NA &&
1086             sja1105_phy_mode_mismatch(priv, port, state->interface)) {
1087                 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
1088                 return;
1089         }
1090
1091         /* The MAC does not support pause frames, and also doesn't
1092          * support half-duplex traffic modes.
1093          */
1094         phylink_set(mask, Autoneg);
1095         phylink_set(mask, MII);
1096         phylink_set(mask, 10baseT_Full);
1097         phylink_set(mask, 100baseT_Full);
1098         phylink_set(mask, 100baseT1_Full);
1099         if (mii->xmii_mode[port] == XMII_MODE_RGMII ||
1100             mii->xmii_mode[port] == XMII_MODE_SGMII)
1101                 phylink_set(mask, 1000baseT_Full);
1102
1103         bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
1104         bitmap_and(state->advertising, state->advertising, mask,
1105                    __ETHTOOL_LINK_MODE_MASK_NBITS);
1106 }
1107
1108 static int sja1105_mac_pcs_get_state(struct dsa_switch *ds, int port,
1109                                      struct phylink_link_state *state)
1110 {
1111         struct sja1105_private *priv = ds->priv;
1112         int ais;
1113
1114         /* Read the vendor-specific AUTONEG_INTR_STATUS register */
1115         ais = sja1105_sgmii_read(priv, SJA1105_AIS);
1116         if (ais < 0)
1117                 return ais;
1118
1119         switch (SJA1105_AIS_SPEED(ais)) {
1120         case 0:
1121                 state->speed = SPEED_10;
1122                 break;
1123         case 1:
1124                 state->speed = SPEED_100;
1125                 break;
1126         case 2:
1127                 state->speed = SPEED_1000;
1128                 break;
1129         default:
1130                 dev_err(ds->dev, "Invalid SGMII PCS speed %lu\n",
1131                         SJA1105_AIS_SPEED(ais));
1132         }
1133         state->duplex = SJA1105_AIS_DUPLEX_MODE(ais);
1134         state->an_complete = SJA1105_AIS_COMPLETE(ais);
1135         state->link = SJA1105_AIS_LINK_STATUS(ais);
1136
1137         return 0;
1138 }
1139
1140 static int
1141 sja1105_find_static_fdb_entry(struct sja1105_private *priv, int port,
1142                               const struct sja1105_l2_lookup_entry *requested)
1143 {
1144         struct sja1105_l2_lookup_entry *l2_lookup;
1145         struct sja1105_table *table;
1146         int i;
1147
1148         table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
1149         l2_lookup = table->entries;
1150
1151         for (i = 0; i < table->entry_count; i++)
1152                 if (l2_lookup[i].macaddr == requested->macaddr &&
1153                     l2_lookup[i].vlanid == requested->vlanid &&
1154                     l2_lookup[i].destports & BIT(port))
1155                         return i;
1156
1157         return -1;
1158 }
1159
1160 /* We want FDB entries added statically through the bridge command to persist
1161  * across switch resets, which are a common thing during normal SJA1105
1162  * operation. So we have to back them up in the static configuration tables
1163  * and hence apply them on next static config upload... yay!
1164  */
1165 static int
1166 sja1105_static_fdb_change(struct sja1105_private *priv, int port,
1167                           const struct sja1105_l2_lookup_entry *requested,
1168                           bool keep)
1169 {
1170         struct sja1105_l2_lookup_entry *l2_lookup;
1171         struct sja1105_table *table;
1172         int rc, match;
1173
1174         table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
1175
1176         match = sja1105_find_static_fdb_entry(priv, port, requested);
1177         if (match < 0) {
1178                 /* Can't delete a missing entry. */
1179                 if (!keep)
1180                         return 0;
1181
1182                 /* No match => new entry */
1183                 rc = sja1105_table_resize(table, table->entry_count + 1);
1184                 if (rc)
1185                         return rc;
1186
1187                 match = table->entry_count - 1;
1188         }
1189
1190         /* Assign pointer after the resize (it may be new memory) */
1191         l2_lookup = table->entries;
1192
1193         /* We have a match.
1194          * If the job was to add this FDB entry, it's already done (mostly
1195          * anyway, since the port forwarding mask may have changed, case in
1196          * which we update it).
1197          * Otherwise we have to delete it.
1198          */
1199         if (keep) {
1200                 l2_lookup[match] = *requested;
1201                 return 0;
1202         }
1203
1204         /* To remove, the strategy is to overwrite the element with
1205          * the last one, and then reduce the array size by 1
1206          */
1207         l2_lookup[match] = l2_lookup[table->entry_count - 1];
1208         return sja1105_table_resize(table, table->entry_count - 1);
1209 }
1210
1211 /* First-generation switches have a 4-way set associative TCAM that
1212  * holds the FDB entries. An FDB index spans from 0 to 1023 and is comprised of
1213  * a "bin" (grouping of 4 entries) and a "way" (an entry within a bin).
1214  * For the placement of a newly learnt FDB entry, the switch selects the bin
1215  * based on a hash function, and the way within that bin incrementally.
1216  */
1217 static int sja1105et_fdb_index(int bin, int way)
1218 {
1219         return bin * SJA1105ET_FDB_BIN_SIZE + way;
1220 }
1221
1222 static int sja1105et_is_fdb_entry_in_bin(struct sja1105_private *priv, int bin,
1223                                          const u8 *addr, u16 vid,
1224                                          struct sja1105_l2_lookup_entry *match,
1225                                          int *last_unused)
1226 {
1227         int way;
1228
1229         for (way = 0; way < SJA1105ET_FDB_BIN_SIZE; way++) {
1230                 struct sja1105_l2_lookup_entry l2_lookup = {0};
1231                 int index = sja1105et_fdb_index(bin, way);
1232
1233                 /* Skip unused entries, optionally marking them
1234                  * into the return value
1235                  */
1236                 if (sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1237                                                 index, &l2_lookup)) {
1238                         if (last_unused)
1239                                 *last_unused = way;
1240                         continue;
1241                 }
1242
1243                 if (l2_lookup.macaddr == ether_addr_to_u64(addr) &&
1244                     l2_lookup.vlanid == vid) {
1245                         if (match)
1246                                 *match = l2_lookup;
1247                         return way;
1248                 }
1249         }
1250         /* Return an invalid entry index if not found */
1251         return -1;
1252 }
1253
1254 int sja1105et_fdb_add(struct dsa_switch *ds, int port,
1255                       const unsigned char *addr, u16 vid)
1256 {
1257         struct sja1105_l2_lookup_entry l2_lookup = {0};
1258         struct sja1105_private *priv = ds->priv;
1259         struct device *dev = ds->dev;
1260         int last_unused = -1;
1261         int bin, way, rc;
1262
1263         bin = sja1105et_fdb_hash(priv, addr, vid);
1264
1265         way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid,
1266                                             &l2_lookup, &last_unused);
1267         if (way >= 0) {
1268                 /* We have an FDB entry. Is our port in the destination
1269                  * mask? If yes, we need to do nothing. If not, we need
1270                  * to rewrite the entry by adding this port to it.
1271                  */
1272                 if (l2_lookup.destports & BIT(port))
1273                         return 0;
1274                 l2_lookup.destports |= BIT(port);
1275         } else {
1276                 int index = sja1105et_fdb_index(bin, way);
1277
1278                 /* We don't have an FDB entry. We construct a new one and
1279                  * try to find a place for it within the FDB table.
1280                  */
1281                 l2_lookup.macaddr = ether_addr_to_u64(addr);
1282                 l2_lookup.destports = BIT(port);
1283                 l2_lookup.vlanid = vid;
1284
1285                 if (last_unused >= 0) {
1286                         way = last_unused;
1287                 } else {
1288                         /* Bin is full, need to evict somebody.
1289                          * Choose victim at random. If you get these messages
1290                          * often, you may need to consider changing the
1291                          * distribution function:
1292                          * static_config[BLK_IDX_L2_LOOKUP_PARAMS].entries->poly
1293                          */
1294                         get_random_bytes(&way, sizeof(u8));
1295                         way %= SJA1105ET_FDB_BIN_SIZE;
1296                         dev_warn(dev, "Warning, FDB bin %d full while adding entry for %pM. Evicting entry %u.\n",
1297                                  bin, addr, way);
1298                         /* Evict entry */
1299                         sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1300                                                      index, NULL, false);
1301                 }
1302         }
1303         l2_lookup.index = sja1105et_fdb_index(bin, way);
1304
1305         rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1306                                           l2_lookup.index, &l2_lookup,
1307                                           true);
1308         if (rc < 0)
1309                 return rc;
1310
1311         return sja1105_static_fdb_change(priv, port, &l2_lookup, true);
1312 }
1313
1314 int sja1105et_fdb_del(struct dsa_switch *ds, int port,
1315                       const unsigned char *addr, u16 vid)
1316 {
1317         struct sja1105_l2_lookup_entry l2_lookup = {0};
1318         struct sja1105_private *priv = ds->priv;
1319         int index, bin, way, rc;
1320         bool keep;
1321
1322         bin = sja1105et_fdb_hash(priv, addr, vid);
1323         way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid,
1324                                             &l2_lookup, NULL);
1325         if (way < 0)
1326                 return 0;
1327         index = sja1105et_fdb_index(bin, way);
1328
1329         /* We have an FDB entry. Is our port in the destination mask? If yes,
1330          * we need to remove it. If the resulting port mask becomes empty, we
1331          * need to completely evict the FDB entry.
1332          * Otherwise we just write it back.
1333          */
1334         l2_lookup.destports &= ~BIT(port);
1335
1336         if (l2_lookup.destports)
1337                 keep = true;
1338         else
1339                 keep = false;
1340
1341         rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1342                                           index, &l2_lookup, keep);
1343         if (rc < 0)
1344                 return rc;
1345
1346         return sja1105_static_fdb_change(priv, port, &l2_lookup, keep);
1347 }
1348
1349 int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port,
1350                         const unsigned char *addr, u16 vid)
1351 {
1352         struct sja1105_l2_lookup_entry l2_lookup = {0};
1353         struct sja1105_private *priv = ds->priv;
1354         int rc, i;
1355
1356         /* Search for an existing entry in the FDB table */
1357         l2_lookup.macaddr = ether_addr_to_u64(addr);
1358         l2_lookup.vlanid = vid;
1359         l2_lookup.iotag = SJA1105_S_TAG;
1360         l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
1361         if (priv->vlan_state != SJA1105_VLAN_UNAWARE) {
1362                 l2_lookup.mask_vlanid = VLAN_VID_MASK;
1363                 l2_lookup.mask_iotag = BIT(0);
1364         } else {
1365                 l2_lookup.mask_vlanid = 0;
1366                 l2_lookup.mask_iotag = 0;
1367         }
1368         l2_lookup.destports = BIT(port);
1369
1370         rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1371                                          SJA1105_SEARCH, &l2_lookup);
1372         if (rc == 0) {
1373                 /* Found and this port is already in the entry's
1374                  * port mask => job done
1375                  */
1376                 if (l2_lookup.destports & BIT(port))
1377                         return 0;
1378                 /* l2_lookup.index is populated by the switch in case it
1379                  * found something.
1380                  */
1381                 l2_lookup.destports |= BIT(port);
1382                 goto skip_finding_an_index;
1383         }
1384
1385         /* Not found, so try to find an unused spot in the FDB.
1386          * This is slightly inefficient because the strategy is knock-knock at
1387          * every possible position from 0 to 1023.
1388          */
1389         for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
1390                 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1391                                                  i, NULL);
1392                 if (rc < 0)
1393                         break;
1394         }
1395         if (i == SJA1105_MAX_L2_LOOKUP_COUNT) {
1396                 dev_err(ds->dev, "FDB is full, cannot add entry.\n");
1397                 return -EINVAL;
1398         }
1399         l2_lookup.lockeds = true;
1400         l2_lookup.index = i;
1401
1402 skip_finding_an_index:
1403         rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1404                                           l2_lookup.index, &l2_lookup,
1405                                           true);
1406         if (rc < 0)
1407                 return rc;
1408
1409         return sja1105_static_fdb_change(priv, port, &l2_lookup, true);
1410 }
1411
1412 int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port,
1413                         const unsigned char *addr, u16 vid)
1414 {
1415         struct sja1105_l2_lookup_entry l2_lookup = {0};
1416         struct sja1105_private *priv = ds->priv;
1417         bool keep;
1418         int rc;
1419
1420         l2_lookup.macaddr = ether_addr_to_u64(addr);
1421         l2_lookup.vlanid = vid;
1422         l2_lookup.iotag = SJA1105_S_TAG;
1423         l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
1424         if (priv->vlan_state != SJA1105_VLAN_UNAWARE) {
1425                 l2_lookup.mask_vlanid = VLAN_VID_MASK;
1426                 l2_lookup.mask_iotag = BIT(0);
1427         } else {
1428                 l2_lookup.mask_vlanid = 0;
1429                 l2_lookup.mask_iotag = 0;
1430         }
1431         l2_lookup.destports = BIT(port);
1432
1433         rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1434                                          SJA1105_SEARCH, &l2_lookup);
1435         if (rc < 0)
1436                 return 0;
1437
1438         l2_lookup.destports &= ~BIT(port);
1439
1440         /* Decide whether we remove just this port from the FDB entry,
1441          * or if we remove it completely.
1442          */
1443         if (l2_lookup.destports)
1444                 keep = true;
1445         else
1446                 keep = false;
1447
1448         rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1449                                           l2_lookup.index, &l2_lookup, keep);
1450         if (rc < 0)
1451                 return rc;
1452
1453         return sja1105_static_fdb_change(priv, port, &l2_lookup, keep);
1454 }
1455
1456 static int sja1105_fdb_add(struct dsa_switch *ds, int port,
1457                            const unsigned char *addr, u16 vid)
1458 {
1459         struct sja1105_private *priv = ds->priv;
1460
1461         /* dsa_8021q is in effect when the bridge's vlan_filtering isn't,
1462          * so the switch still does some VLAN processing internally.
1463          * But Shared VLAN Learning (SVL) is also active, and it will take
1464          * care of autonomous forwarding between the unique pvid's of each
1465          * port.  Here we just make sure that users can't add duplicate FDB
1466          * entries when in this mode - the actual VID doesn't matter except
1467          * for what gets printed in 'bridge fdb show'.  In the case of zero,
1468          * no VID gets printed at all.
1469          */
1470         if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL)
1471                 vid = 0;
1472
1473         return priv->info->fdb_add_cmd(ds, port, addr, vid);
1474 }
1475
1476 static int sja1105_fdb_del(struct dsa_switch *ds, int port,
1477                            const unsigned char *addr, u16 vid)
1478 {
1479         struct sja1105_private *priv = ds->priv;
1480
1481         if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL)
1482                 vid = 0;
1483
1484         return priv->info->fdb_del_cmd(ds, port, addr, vid);
1485 }
1486
1487 static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
1488                             dsa_fdb_dump_cb_t *cb, void *data)
1489 {
1490         struct sja1105_private *priv = ds->priv;
1491         struct device *dev = ds->dev;
1492         int i;
1493
1494         for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
1495                 struct sja1105_l2_lookup_entry l2_lookup = {0};
1496                 u8 macaddr[ETH_ALEN];
1497                 int rc;
1498
1499                 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1500                                                  i, &l2_lookup);
1501                 /* No fdb entry at i, not an issue */
1502                 if (rc == -ENOENT)
1503                         continue;
1504                 if (rc) {
1505                         dev_err(dev, "Failed to dump FDB: %d\n", rc);
1506                         return rc;
1507                 }
1508
1509                 /* FDB dump callback is per port. This means we have to
1510                  * disregard a valid entry if it's not for this port, even if
1511                  * only to revisit it later. This is inefficient because the
1512                  * 1024-sized FDB table needs to be traversed 4 times through
1513                  * SPI during a 'bridge fdb show' command.
1514                  */
1515                 if (!(l2_lookup.destports & BIT(port)))
1516                         continue;
1517                 u64_to_ether_addr(l2_lookup.macaddr, macaddr);
1518
1519                 /* We need to hide the dsa_8021q VLANs from the user. */
1520                 if (priv->vlan_state == SJA1105_VLAN_UNAWARE)
1521                         l2_lookup.vlanid = 0;
1522                 cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
1523         }
1524         return 0;
1525 }
1526
1527 /* This callback needs to be present */
1528 static int sja1105_mdb_prepare(struct dsa_switch *ds, int port,
1529                                const struct switchdev_obj_port_mdb *mdb)
1530 {
1531         return 0;
1532 }
1533
1534 static void sja1105_mdb_add(struct dsa_switch *ds, int port,
1535                             const struct switchdev_obj_port_mdb *mdb)
1536 {
1537         sja1105_fdb_add(ds, port, mdb->addr, mdb->vid);
1538 }
1539
1540 static int sja1105_mdb_del(struct dsa_switch *ds, int port,
1541                            const struct switchdev_obj_port_mdb *mdb)
1542 {
1543         return sja1105_fdb_del(ds, port, mdb->addr, mdb->vid);
1544 }
1545
1546 static int sja1105_bridge_member(struct dsa_switch *ds, int port,
1547                                  struct net_device *br, bool member)
1548 {
1549         struct sja1105_l2_forwarding_entry *l2_fwd;
1550         struct sja1105_private *priv = ds->priv;
1551         int i, rc;
1552
1553         l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries;
1554
1555         for (i = 0; i < SJA1105_NUM_PORTS; i++) {
1556                 /* Add this port to the forwarding matrix of the
1557                  * other ports in the same bridge, and viceversa.
1558                  */
1559                 if (!dsa_is_user_port(ds, i))
1560                         continue;
1561                 /* For the ports already under the bridge, only one thing needs
1562                  * to be done, and that is to add this port to their
1563                  * reachability domain. So we can perform the SPI write for
1564                  * them immediately. However, for this port itself (the one
1565                  * that is new to the bridge), we need to add all other ports
1566                  * to its reachability domain. So we do that incrementally in
1567                  * this loop, and perform the SPI write only at the end, once
1568                  * the domain contains all other bridge ports.
1569                  */
1570                 if (i == port)
1571                         continue;
1572                 if (dsa_to_port(ds, i)->bridge_dev != br)
1573                         continue;
1574                 sja1105_port_allow_traffic(l2_fwd, i, port, member);
1575                 sja1105_port_allow_traffic(l2_fwd, port, i, member);
1576
1577                 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
1578                                                   i, &l2_fwd[i], true);
1579                 if (rc < 0)
1580                         return rc;
1581         }
1582
1583         return sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
1584                                             port, &l2_fwd[port], true);
1585 }
1586
1587 static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
1588                                          u8 state)
1589 {
1590         struct sja1105_private *priv = ds->priv;
1591         struct sja1105_mac_config_entry *mac;
1592
1593         mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
1594
1595         switch (state) {
1596         case BR_STATE_DISABLED:
1597         case BR_STATE_BLOCKING:
1598                 /* From UM10944 description of DRPDTAG (why put this there?):
1599                  * "Management traffic flows to the port regardless of the state
1600                  * of the INGRESS flag". So BPDUs are still be allowed to pass.
1601                  * At the moment no difference between DISABLED and BLOCKING.
1602                  */
1603                 mac[port].ingress   = false;
1604                 mac[port].egress    = false;
1605                 mac[port].dyn_learn = false;
1606                 break;
1607         case BR_STATE_LISTENING:
1608                 mac[port].ingress   = true;
1609                 mac[port].egress    = false;
1610                 mac[port].dyn_learn = false;
1611                 break;
1612         case BR_STATE_LEARNING:
1613                 mac[port].ingress   = true;
1614                 mac[port].egress    = false;
1615                 mac[port].dyn_learn = true;
1616                 break;
1617         case BR_STATE_FORWARDING:
1618                 mac[port].ingress   = true;
1619                 mac[port].egress    = true;
1620                 mac[port].dyn_learn = true;
1621                 break;
1622         default:
1623                 dev_err(ds->dev, "invalid STP state: %d\n", state);
1624                 return;
1625         }
1626
1627         sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
1628                                      &mac[port], true);
1629 }
1630
1631 static int sja1105_bridge_join(struct dsa_switch *ds, int port,
1632                                struct net_device *br)
1633 {
1634         return sja1105_bridge_member(ds, port, br, true);
1635 }
1636
1637 static void sja1105_bridge_leave(struct dsa_switch *ds, int port,
1638                                  struct net_device *br)
1639 {
1640         sja1105_bridge_member(ds, port, br, false);
1641 }
1642
1643 #define BYTES_PER_KBIT (1000LL / 8)
1644
1645 static int sja1105_find_unused_cbs_shaper(struct sja1105_private *priv)
1646 {
1647         int i;
1648
1649         for (i = 0; i < priv->info->num_cbs_shapers; i++)
1650                 if (!priv->cbs[i].idle_slope && !priv->cbs[i].send_slope)
1651                         return i;
1652
1653         return -1;
1654 }
1655
1656 static int sja1105_delete_cbs_shaper(struct sja1105_private *priv, int port,
1657                                      int prio)
1658 {
1659         int i;
1660
1661         for (i = 0; i < priv->info->num_cbs_shapers; i++) {
1662                 struct sja1105_cbs_entry *cbs = &priv->cbs[i];
1663
1664                 if (cbs->port == port && cbs->prio == prio) {
1665                         memset(cbs, 0, sizeof(*cbs));
1666                         return sja1105_dynamic_config_write(priv, BLK_IDX_CBS,
1667                                                             i, cbs, true);
1668                 }
1669         }
1670
1671         return 0;
1672 }
1673
1674 static int sja1105_setup_tc_cbs(struct dsa_switch *ds, int port,
1675                                 struct tc_cbs_qopt_offload *offload)
1676 {
1677         struct sja1105_private *priv = ds->priv;
1678         struct sja1105_cbs_entry *cbs;
1679         int index;
1680
1681         if (!offload->enable)
1682                 return sja1105_delete_cbs_shaper(priv, port, offload->queue);
1683
1684         index = sja1105_find_unused_cbs_shaper(priv);
1685         if (index < 0)
1686                 return -ENOSPC;
1687
1688         cbs = &priv->cbs[index];
1689         cbs->port = port;
1690         cbs->prio = offload->queue;
1691         /* locredit and sendslope are negative by definition. In hardware,
1692          * positive values must be provided, and the negative sign is implicit.
1693          */
1694         cbs->credit_hi = offload->hicredit;
1695         cbs->credit_lo = abs(offload->locredit);
1696         /* User space is in kbits/sec, hardware in bytes/sec */
1697         cbs->idle_slope = offload->idleslope * BYTES_PER_KBIT;
1698         cbs->send_slope = abs(offload->sendslope * BYTES_PER_KBIT);
1699         /* Convert the negative values from 64-bit 2's complement
1700          * to 32-bit 2's complement (for the case of 0x80000000 whose
1701          * negative is still negative).
1702          */
1703         cbs->credit_lo &= GENMASK_ULL(31, 0);
1704         cbs->send_slope &= GENMASK_ULL(31, 0);
1705
1706         return sja1105_dynamic_config_write(priv, BLK_IDX_CBS, index, cbs,
1707                                             true);
1708 }
1709
1710 static int sja1105_reload_cbs(struct sja1105_private *priv)
1711 {
1712         int rc = 0, i;
1713
1714         for (i = 0; i < priv->info->num_cbs_shapers; i++) {
1715                 struct sja1105_cbs_entry *cbs = &priv->cbs[i];
1716
1717                 if (!cbs->idle_slope && !cbs->send_slope)
1718                         continue;
1719
1720                 rc = sja1105_dynamic_config_write(priv, BLK_IDX_CBS, i, cbs,
1721                                                   true);
1722                 if (rc)
1723                         break;
1724         }
1725
1726         return rc;
1727 }
1728
1729 static const char * const sja1105_reset_reasons[] = {
1730         [SJA1105_VLAN_FILTERING] = "VLAN filtering",
1731         [SJA1105_RX_HWTSTAMPING] = "RX timestamping",
1732         [SJA1105_AGEING_TIME] = "Ageing time",
1733         [SJA1105_SCHEDULING] = "Time-aware scheduling",
1734         [SJA1105_BEST_EFFORT_POLICING] = "Best-effort policing",
1735         [SJA1105_VIRTUAL_LINKS] = "Virtual links",
1736 };
1737
1738 /* For situations where we need to change a setting at runtime that is only
1739  * available through the static configuration, resetting the switch in order
1740  * to upload the new static config is unavoidable. Back up the settings we
1741  * modify at runtime (currently only MAC) and restore them after uploading,
1742  * such that this operation is relatively seamless.
1743  */
1744 int sja1105_static_config_reload(struct sja1105_private *priv,
1745                                  enum sja1105_reset_reason reason)
1746 {
1747         struct ptp_system_timestamp ptp_sts_before;
1748         struct ptp_system_timestamp ptp_sts_after;
1749         struct sja1105_mac_config_entry *mac;
1750         int speed_mbps[SJA1105_NUM_PORTS];
1751         struct dsa_switch *ds = priv->ds;
1752         s64 t1, t2, t3, t4;
1753         s64 t12, t34;
1754         u16 bmcr = 0;
1755         int rc, i;
1756         s64 now;
1757
1758         mutex_lock(&priv->mgmt_lock);
1759
1760         mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
1761
1762         /* Back up the dynamic link speed changed by sja1105_adjust_port_config
1763          * in order to temporarily restore it to SJA1105_SPEED_AUTO - which the
1764          * switch wants to see in the static config in order to allow us to
1765          * change it through the dynamic interface later.
1766          */
1767         for (i = 0; i < SJA1105_NUM_PORTS; i++) {
1768                 speed_mbps[i] = sja1105_speed[mac[i].speed];
1769                 mac[i].speed = SJA1105_SPEED_AUTO;
1770         }
1771
1772         if (sja1105_supports_sgmii(priv, SJA1105_SGMII_PORT))
1773                 bmcr = sja1105_sgmii_read(priv, MII_BMCR);
1774
1775         /* No PTP operations can run right now */
1776         mutex_lock(&priv->ptp_data.lock);
1777
1778         rc = __sja1105_ptp_gettimex(ds, &now, &ptp_sts_before);
1779         if (rc < 0)
1780                 goto out_unlock_ptp;
1781
1782         /* Reset switch and send updated static configuration */
1783         rc = sja1105_static_config_upload(priv);
1784         if (rc < 0)
1785                 goto out_unlock_ptp;
1786
1787         rc = __sja1105_ptp_settime(ds, 0, &ptp_sts_after);
1788         if (rc < 0)
1789                 goto out_unlock_ptp;
1790
1791         t1 = timespec64_to_ns(&ptp_sts_before.pre_ts);
1792         t2 = timespec64_to_ns(&ptp_sts_before.post_ts);
1793         t3 = timespec64_to_ns(&ptp_sts_after.pre_ts);
1794         t4 = timespec64_to_ns(&ptp_sts_after.post_ts);
1795         /* Mid point, corresponds to pre-reset PTPCLKVAL */
1796         t12 = t1 + (t2 - t1) / 2;
1797         /* Mid point, corresponds to post-reset PTPCLKVAL, aka 0 */
1798         t34 = t3 + (t4 - t3) / 2;
1799         /* Advance PTPCLKVAL by the time it took since its readout */
1800         now += (t34 - t12);
1801
1802         __sja1105_ptp_adjtime(ds, now);
1803
1804 out_unlock_ptp:
1805         mutex_unlock(&priv->ptp_data.lock);
1806
1807         dev_info(priv->ds->dev,
1808                  "Reset switch and programmed static config. Reason: %s\n",
1809                  sja1105_reset_reasons[reason]);
1810
1811         /* Configure the CGU (PLLs) for MII and RMII PHYs.
1812          * For these interfaces there is no dynamic configuration
1813          * needed, since PLLs have same settings at all speeds.
1814          */
1815         rc = sja1105_clocking_setup(priv);
1816         if (rc < 0)
1817                 goto out;
1818
1819         for (i = 0; i < SJA1105_NUM_PORTS; i++) {
1820                 rc = sja1105_adjust_port_config(priv, i, speed_mbps[i]);
1821                 if (rc < 0)
1822                         goto out;
1823         }
1824
1825         if (sja1105_supports_sgmii(priv, SJA1105_SGMII_PORT)) {
1826                 bool an_enabled = !!(bmcr & BMCR_ANENABLE);
1827
1828                 sja1105_sgmii_pcs_config(priv, an_enabled, false);
1829
1830                 if (!an_enabled) {
1831                         int speed = SPEED_UNKNOWN;
1832
1833                         if (bmcr & BMCR_SPEED1000)
1834                                 speed = SPEED_1000;
1835                         else if (bmcr & BMCR_SPEED100)
1836                                 speed = SPEED_100;
1837                         else if (bmcr & BMCR_SPEED10)
1838                                 speed = SPEED_10;
1839
1840                         sja1105_sgmii_pcs_force_speed(priv, speed);
1841                 }
1842         }
1843
1844         rc = sja1105_reload_cbs(priv);
1845         if (rc < 0)
1846                 goto out;
1847 out:
1848         mutex_unlock(&priv->mgmt_lock);
1849
1850         return rc;
1851 }
1852
1853 static int sja1105_pvid_apply(struct sja1105_private *priv, int port, u16 pvid)
1854 {
1855         struct sja1105_mac_config_entry *mac;
1856
1857         mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
1858
1859         mac[port].vlanid = pvid;
1860
1861         return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
1862                                            &mac[port], true);
1863 }
1864
1865 static int sja1105_crosschip_bridge_join(struct dsa_switch *ds,
1866                                          int tree_index, int sw_index,
1867                                          int other_port, struct net_device *br)
1868 {
1869         struct dsa_switch *other_ds = dsa_switch_find(tree_index, sw_index);
1870         struct sja1105_private *other_priv = other_ds->priv;
1871         struct sja1105_private *priv = ds->priv;
1872         int port, rc;
1873
1874         if (other_ds->ops != &sja1105_switch_ops)
1875                 return 0;
1876
1877         for (port = 0; port < ds->num_ports; port++) {
1878                 if (!dsa_is_user_port(ds, port))
1879                         continue;
1880                 if (dsa_to_port(ds, port)->bridge_dev != br)
1881                         continue;
1882
1883                 other_priv->expect_dsa_8021q = true;
1884                 rc = dsa_8021q_crosschip_bridge_join(ds, port, other_ds,
1885                                                      other_port,
1886                                                      &priv->crosschip_links);
1887                 other_priv->expect_dsa_8021q = false;
1888                 if (rc)
1889                         return rc;
1890
1891                 priv->expect_dsa_8021q = true;
1892                 rc = dsa_8021q_crosschip_bridge_join(other_ds, other_port, ds,
1893                                                      port,
1894                                                      &other_priv->crosschip_links);
1895                 priv->expect_dsa_8021q = false;
1896                 if (rc)
1897                         return rc;
1898         }
1899
1900         return 0;
1901 }
1902
1903 static void sja1105_crosschip_bridge_leave(struct dsa_switch *ds,
1904                                            int tree_index, int sw_index,
1905                                            int other_port,
1906                                            struct net_device *br)
1907 {
1908         struct dsa_switch *other_ds = dsa_switch_find(tree_index, sw_index);
1909         struct sja1105_private *other_priv = other_ds->priv;
1910         struct sja1105_private *priv = ds->priv;
1911         int port;
1912
1913         if (other_ds->ops != &sja1105_switch_ops)
1914                 return;
1915
1916         for (port = 0; port < ds->num_ports; port++) {
1917                 if (!dsa_is_user_port(ds, port))
1918                         continue;
1919                 if (dsa_to_port(ds, port)->bridge_dev != br)
1920                         continue;
1921
1922                 other_priv->expect_dsa_8021q = true;
1923                 dsa_8021q_crosschip_bridge_leave(ds, port, other_ds, other_port,
1924                                                  &priv->crosschip_links);
1925                 other_priv->expect_dsa_8021q = false;
1926
1927                 priv->expect_dsa_8021q = true;
1928                 dsa_8021q_crosschip_bridge_leave(other_ds, other_port, ds, port,
1929                                                  &other_priv->crosschip_links);
1930                 priv->expect_dsa_8021q = false;
1931         }
1932 }
1933
1934 static int sja1105_setup_8021q_tagging(struct dsa_switch *ds, bool enabled)
1935 {
1936         struct sja1105_private *priv = ds->priv;
1937         int rc, i;
1938
1939         for (i = 0; i < SJA1105_NUM_PORTS; i++) {
1940                 priv->expect_dsa_8021q = true;
1941                 rc = dsa_port_setup_8021q_tagging(ds, i, enabled);
1942                 priv->expect_dsa_8021q = false;
1943                 if (rc < 0) {
1944                         dev_err(ds->dev, "Failed to setup VLAN tagging for port %d: %d\n",
1945                                 i, rc);
1946                         return rc;
1947                 }
1948         }
1949
1950         dev_info(ds->dev, "%s switch tagging\n",
1951                  enabled ? "Enabled" : "Disabled");
1952         return 0;
1953 }
1954
1955 static enum dsa_tag_protocol
1956 sja1105_get_tag_protocol(struct dsa_switch *ds, int port,
1957                          enum dsa_tag_protocol mp)
1958 {
1959         return DSA_TAG_PROTO_SJA1105;
1960 }
1961
1962 static int sja1105_find_free_subvlan(u16 *subvlan_map, bool pvid)
1963 {
1964         int subvlan;
1965
1966         if (pvid)
1967                 return 0;
1968
1969         for (subvlan = 1; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
1970                 if (subvlan_map[subvlan] == VLAN_N_VID)
1971                         return subvlan;
1972
1973         return -1;
1974 }
1975
1976 static int sja1105_find_subvlan(u16 *subvlan_map, u16 vid)
1977 {
1978         int subvlan;
1979
1980         for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
1981                 if (subvlan_map[subvlan] == vid)
1982                         return subvlan;
1983
1984         return -1;
1985 }
1986
1987 static int sja1105_find_committed_subvlan(struct sja1105_private *priv,
1988                                           int port, u16 vid)
1989 {
1990         struct sja1105_port *sp = &priv->ports[port];
1991
1992         return sja1105_find_subvlan(sp->subvlan_map, vid);
1993 }
1994
1995 static void sja1105_init_subvlan_map(u16 *subvlan_map)
1996 {
1997         int subvlan;
1998
1999         for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
2000                 subvlan_map[subvlan] = VLAN_N_VID;
2001 }
2002
2003 static void sja1105_commit_subvlan_map(struct sja1105_private *priv, int port,
2004                                        u16 *subvlan_map)
2005 {
2006         struct sja1105_port *sp = &priv->ports[port];
2007         int subvlan;
2008
2009         for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
2010                 sp->subvlan_map[subvlan] = subvlan_map[subvlan];
2011 }
2012
2013 static int sja1105_is_vlan_configured(struct sja1105_private *priv, u16 vid)
2014 {
2015         struct sja1105_vlan_lookup_entry *vlan;
2016         int count, i;
2017
2018         vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries;
2019         count = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entry_count;
2020
2021         for (i = 0; i < count; i++)
2022                 if (vlan[i].vlanid == vid)
2023                         return i;
2024
2025         /* Return an invalid entry index if not found */
2026         return -1;
2027 }
2028
2029 static int
2030 sja1105_find_retagging_entry(struct sja1105_retagging_entry *retagging,
2031                              int count, int from_port, u16 from_vid,
2032                              u16 to_vid)
2033 {
2034         int i;
2035
2036         for (i = 0; i < count; i++)
2037                 if (retagging[i].ing_port == BIT(from_port) &&
2038                     retagging[i].vlan_ing == from_vid &&
2039                     retagging[i].vlan_egr == to_vid)
2040                         return i;
2041
2042         /* Return an invalid entry index if not found */
2043         return -1;
2044 }
2045
2046 static int sja1105_commit_vlans(struct sja1105_private *priv,
2047                                 struct sja1105_vlan_lookup_entry *new_vlan,
2048                                 struct sja1105_retagging_entry *new_retagging,
2049                                 int num_retagging)
2050 {
2051         struct sja1105_retagging_entry *retagging;
2052         struct sja1105_vlan_lookup_entry *vlan;
2053         struct sja1105_table *table;
2054         int num_vlans = 0;
2055         int rc, i, k = 0;
2056
2057         /* VLAN table */
2058         table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
2059         vlan = table->entries;
2060
2061         for (i = 0; i < VLAN_N_VID; i++) {
2062                 int match = sja1105_is_vlan_configured(priv, i);
2063
2064                 if (new_vlan[i].vlanid != VLAN_N_VID)
2065                         num_vlans++;
2066
2067                 if (new_vlan[i].vlanid == VLAN_N_VID && match >= 0) {
2068                         /* Was there before, no longer is. Delete */
2069                         dev_dbg(priv->ds->dev, "Deleting VLAN %d\n", i);
2070                         rc = sja1105_dynamic_config_write(priv,
2071                                                           BLK_IDX_VLAN_LOOKUP,
2072                                                           i, &vlan[match], false);
2073                         if (rc < 0)
2074                                 return rc;
2075                 } else if (new_vlan[i].vlanid != VLAN_N_VID) {
2076                         /* Nothing changed, don't do anything */
2077                         if (match >= 0 &&
2078                             vlan[match].vlanid == new_vlan[i].vlanid &&
2079                             vlan[match].tag_port == new_vlan[i].tag_port &&
2080                             vlan[match].vlan_bc == new_vlan[i].vlan_bc &&
2081                             vlan[match].vmemb_port == new_vlan[i].vmemb_port)
2082                                 continue;
2083                         /* Update entry */
2084                         dev_dbg(priv->ds->dev, "Updating VLAN %d\n", i);
2085                         rc = sja1105_dynamic_config_write(priv,
2086                                                           BLK_IDX_VLAN_LOOKUP,
2087                                                           i, &new_vlan[i],
2088                                                           true);
2089                         if (rc < 0)
2090                                 return rc;
2091                 }
2092         }
2093
2094         if (table->entry_count)
2095                 kfree(table->entries);
2096
2097         table->entries = kcalloc(num_vlans, table->ops->unpacked_entry_size,
2098                                  GFP_KERNEL);
2099         if (!table->entries)
2100                 return -ENOMEM;
2101
2102         table->entry_count = num_vlans;
2103         vlan = table->entries;
2104
2105         for (i = 0; i < VLAN_N_VID; i++) {
2106                 if (new_vlan[i].vlanid == VLAN_N_VID)
2107                         continue;
2108                 vlan[k++] = new_vlan[i];
2109         }
2110
2111         /* VLAN Retagging Table */
2112         table = &priv->static_config.tables[BLK_IDX_RETAGGING];
2113         retagging = table->entries;
2114
2115         for (i = 0; i < table->entry_count; i++) {
2116                 rc = sja1105_dynamic_config_write(priv, BLK_IDX_RETAGGING,
2117                                                   i, &retagging[i], false);
2118                 if (rc)
2119                         return rc;
2120         }
2121
2122         if (table->entry_count)
2123                 kfree(table->entries);
2124
2125         table->entries = kcalloc(num_retagging, table->ops->unpacked_entry_size,
2126                                  GFP_KERNEL);
2127         if (!table->entries)
2128                 return -ENOMEM;
2129
2130         table->entry_count = num_retagging;
2131         retagging = table->entries;
2132
2133         for (i = 0; i < num_retagging; i++) {
2134                 retagging[i] = new_retagging[i];
2135
2136                 /* Update entry */
2137                 rc = sja1105_dynamic_config_write(priv, BLK_IDX_RETAGGING,
2138                                                   i, &retagging[i], true);
2139                 if (rc < 0)
2140                         return rc;
2141         }
2142
2143         return 0;
2144 }
2145
2146 struct sja1105_crosschip_vlan {
2147         struct list_head list;
2148         u16 vid;
2149         bool untagged;
2150         int port;
2151         int other_port;
2152         struct dsa_switch *other_ds;
2153 };
2154
2155 struct sja1105_crosschip_switch {
2156         struct list_head list;
2157         struct dsa_switch *other_ds;
2158 };
2159
2160 static int sja1105_commit_pvid(struct sja1105_private *priv)
2161 {
2162         struct sja1105_bridge_vlan *v;
2163         struct list_head *vlan_list;
2164         int rc = 0;
2165
2166         if (priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
2167                 vlan_list = &priv->bridge_vlans;
2168         else
2169                 vlan_list = &priv->dsa_8021q_vlans;
2170
2171         list_for_each_entry(v, vlan_list, list) {
2172                 if (v->pvid) {
2173                         rc = sja1105_pvid_apply(priv, v->port, v->vid);
2174                         if (rc)
2175                                 break;
2176                 }
2177         }
2178
2179         return rc;
2180 }
2181
2182 static int
2183 sja1105_build_bridge_vlans(struct sja1105_private *priv,
2184                            struct sja1105_vlan_lookup_entry *new_vlan)
2185 {
2186         struct sja1105_bridge_vlan *v;
2187
2188         if (priv->vlan_state == SJA1105_VLAN_UNAWARE)
2189                 return 0;
2190
2191         list_for_each_entry(v, &priv->bridge_vlans, list) {
2192                 int match = v->vid;
2193
2194                 new_vlan[match].vlanid = v->vid;
2195                 new_vlan[match].vmemb_port |= BIT(v->port);
2196                 new_vlan[match].vlan_bc |= BIT(v->port);
2197                 if (!v->untagged)
2198                         new_vlan[match].tag_port |= BIT(v->port);
2199         }
2200
2201         return 0;
2202 }
2203
2204 static int
2205 sja1105_build_dsa_8021q_vlans(struct sja1105_private *priv,
2206                               struct sja1105_vlan_lookup_entry *new_vlan)
2207 {
2208         struct sja1105_bridge_vlan *v;
2209
2210         if (priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
2211                 return 0;
2212
2213         list_for_each_entry(v, &priv->dsa_8021q_vlans, list) {
2214                 int match = v->vid;
2215
2216                 new_vlan[match].vlanid = v->vid;
2217                 new_vlan[match].vmemb_port |= BIT(v->port);
2218                 new_vlan[match].vlan_bc |= BIT(v->port);
2219                 if (!v->untagged)
2220                         new_vlan[match].tag_port |= BIT(v->port);
2221         }
2222
2223         return 0;
2224 }
2225
2226 static int sja1105_build_subvlans(struct sja1105_private *priv,
2227                                   u16 subvlan_map[][DSA_8021Q_N_SUBVLAN],
2228                                   struct sja1105_vlan_lookup_entry *new_vlan,
2229                                   struct sja1105_retagging_entry *new_retagging,
2230                                   int *num_retagging)
2231 {
2232         struct sja1105_bridge_vlan *v;
2233         int k = *num_retagging;
2234
2235         if (priv->vlan_state != SJA1105_VLAN_BEST_EFFORT)
2236                 return 0;
2237
2238         list_for_each_entry(v, &priv->bridge_vlans, list) {
2239                 int upstream = dsa_upstream_port(priv->ds, v->port);
2240                 int match, subvlan;
2241                 u16 rx_vid;
2242
2243                 /* Only sub-VLANs on user ports need to be applied.
2244                  * Bridge VLANs also include VLANs added automatically
2245                  * by DSA on the CPU port.
2246                  */
2247                 if (!dsa_is_user_port(priv->ds, v->port))
2248                         continue;
2249
2250                 subvlan = sja1105_find_subvlan(subvlan_map[v->port],
2251                                                v->vid);
2252                 if (subvlan < 0) {
2253                         subvlan = sja1105_find_free_subvlan(subvlan_map[v->port],
2254                                                             v->pvid);
2255                         if (subvlan < 0) {
2256                                 dev_err(priv->ds->dev, "No more free subvlans\n");
2257                                 return -ENOSPC;
2258                         }
2259                 }
2260
2261                 rx_vid = dsa_8021q_rx_vid_subvlan(priv->ds, v->port, subvlan);
2262
2263                 /* @v->vid on @v->port needs to be retagged to @rx_vid
2264                  * on @upstream. Assume @v->vid on @v->port and on
2265                  * @upstream was already configured by the previous
2266                  * iteration over bridge_vlans.
2267                  */
2268                 match = rx_vid;
2269                 new_vlan[match].vlanid = rx_vid;
2270                 new_vlan[match].vmemb_port |= BIT(v->port);
2271                 new_vlan[match].vmemb_port |= BIT(upstream);
2272                 new_vlan[match].vlan_bc |= BIT(v->port);
2273                 new_vlan[match].vlan_bc |= BIT(upstream);
2274                 /* The "untagged" flag is set the same as for the
2275                  * original VLAN
2276                  */
2277                 if (!v->untagged)
2278                         new_vlan[match].tag_port |= BIT(v->port);
2279                 /* But it's always tagged towards the CPU */
2280                 new_vlan[match].tag_port |= BIT(upstream);
2281
2282                 /* The Retagging Table generates packet *clones* with
2283                  * the new VLAN. This is a very odd hardware quirk
2284                  * which we need to suppress by dropping the original
2285                  * packet.
2286                  * Deny egress of the original VLAN towards the CPU
2287                  * port. This will force the switch to drop it, and
2288                  * we'll see only the retagged packets.
2289                  */
2290                 match = v->vid;
2291                 new_vlan[match].vlan_bc &= ~BIT(upstream);
2292
2293                 /* And the retagging itself */
2294                 new_retagging[k].vlan_ing = v->vid;
2295                 new_retagging[k].vlan_egr = rx_vid;
2296                 new_retagging[k].ing_port = BIT(v->port);
2297                 new_retagging[k].egr_port = BIT(upstream);
2298                 if (k++ == SJA1105_MAX_RETAGGING_COUNT) {
2299                         dev_err(priv->ds->dev, "No more retagging rules\n");
2300                         return -ENOSPC;
2301                 }
2302
2303                 subvlan_map[v->port][subvlan] = v->vid;
2304         }
2305
2306         *num_retagging = k;
2307
2308         return 0;
2309 }
2310
2311 /* Sadly, in crosschip scenarios where the CPU port is also the link to another
2312  * switch, we should retag backwards (the dsa_8021q vid to the original vid) on
2313  * the CPU port of neighbour switches.
2314  */
2315 static int
2316 sja1105_build_crosschip_subvlans(struct sja1105_private *priv,
2317                                  struct sja1105_vlan_lookup_entry *new_vlan,
2318                                  struct sja1105_retagging_entry *new_retagging,
2319                                  int *num_retagging)
2320 {
2321         struct sja1105_crosschip_vlan *tmp, *pos;
2322         struct dsa_8021q_crosschip_link *c;
2323         struct sja1105_bridge_vlan *v, *w;
2324         struct list_head crosschip_vlans;
2325         int k = *num_retagging;
2326         int rc = 0;
2327
2328         if (priv->vlan_state != SJA1105_VLAN_BEST_EFFORT)
2329                 return 0;
2330
2331         INIT_LIST_HEAD(&crosschip_vlans);
2332
2333         list_for_each_entry(c, &priv->crosschip_links, list) {
2334                 struct sja1105_private *other_priv = c->other_ds->priv;
2335
2336                 if (other_priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
2337                         continue;
2338
2339                 /* Crosschip links are also added to the CPU ports.
2340                  * Ignore those.
2341                  */
2342                 if (!dsa_is_user_port(priv->ds, c->port))
2343                         continue;
2344                 if (!dsa_is_user_port(c->other_ds, c->other_port))
2345                         continue;
2346
2347                 /* Search for VLANs on the remote port */
2348                 list_for_each_entry(v, &other_priv->bridge_vlans, list) {
2349                         bool already_added = false;
2350                         bool we_have_it = false;
2351
2352                         if (v->port != c->other_port)
2353                                 continue;
2354
2355                         /* If @v is a pvid on @other_ds, it does not need
2356                          * re-retagging, because its SVL field is 0 and we
2357                          * already allow that, via the dsa_8021q crosschip
2358                          * links.
2359                          */
2360                         if (v->pvid)
2361                                 continue;
2362
2363                         /* Search for the VLAN on our local port */
2364                         list_for_each_entry(w, &priv->bridge_vlans, list) {
2365                                 if (w->port == c->port && w->vid == v->vid) {
2366                                         we_have_it = true;
2367                                         break;
2368                                 }
2369                         }
2370
2371                         if (!we_have_it)
2372                                 continue;
2373
2374                         list_for_each_entry(tmp, &crosschip_vlans, list) {
2375                                 if (tmp->vid == v->vid &&
2376                                     tmp->untagged == v->untagged &&
2377                                     tmp->port == c->port &&
2378                                     tmp->other_port == v->port &&
2379                                     tmp->other_ds == c->other_ds) {
2380                                         already_added = true;
2381                                         break;
2382                                 }
2383                         }
2384
2385                         if (already_added)
2386                                 continue;
2387
2388                         tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
2389                         if (!tmp) {
2390                                 dev_err(priv->ds->dev, "Failed to allocate memory\n");
2391                                 rc = -ENOMEM;
2392                                 goto out;
2393                         }
2394                         tmp->vid = v->vid;
2395                         tmp->port = c->port;
2396                         tmp->other_port = v->port;
2397                         tmp->other_ds = c->other_ds;
2398                         tmp->untagged = v->untagged;
2399                         list_add(&tmp->list, &crosschip_vlans);
2400                 }
2401         }
2402
2403         list_for_each_entry(tmp, &crosschip_vlans, list) {
2404                 struct sja1105_private *other_priv = tmp->other_ds->priv;
2405                 int upstream = dsa_upstream_port(priv->ds, tmp->port);
2406                 int match, subvlan;
2407                 u16 rx_vid;
2408
2409                 subvlan = sja1105_find_committed_subvlan(other_priv,
2410                                                          tmp->other_port,
2411                                                          tmp->vid);
2412                 /* If this happens, it's a bug. The neighbour switch does not
2413                  * have a subvlan for tmp->vid on tmp->other_port, but it
2414                  * should, since we already checked for its vlan_state.
2415                  */
2416                 if (WARN_ON(subvlan < 0)) {
2417                         rc = -EINVAL;
2418                         goto out;
2419                 }
2420
2421                 rx_vid = dsa_8021q_rx_vid_subvlan(tmp->other_ds,
2422                                                   tmp->other_port,
2423                                                   subvlan);
2424
2425                 /* The @rx_vid retagged from @tmp->vid on
2426                  * {@tmp->other_ds, @tmp->other_port} needs to be
2427                  * re-retagged to @tmp->vid on the way back to us.
2428                  *
2429                  * Assume the original @tmp->vid is already configured
2430                  * on this local switch, otherwise we wouldn't be
2431                  * retagging its subvlan on the other switch in the
2432                  * first place. We just need to add a reverse retagging
2433                  * rule for @rx_vid and install @rx_vid on our ports.
2434                  */
2435                 match = rx_vid;
2436                 new_vlan[match].vlanid = rx_vid;
2437                 new_vlan[match].vmemb_port |= BIT(tmp->port);
2438                 new_vlan[match].vmemb_port |= BIT(upstream);
2439                 /* The "untagged" flag is set the same as for the
2440                  * original VLAN. And towards the CPU, it doesn't
2441                  * really matter, because @rx_vid will only receive
2442                  * traffic on that port. For consistency with other dsa_8021q
2443                  * VLANs, we'll keep the CPU port tagged.
2444                  */
2445                 if (!tmp->untagged)
2446                         new_vlan[match].tag_port |= BIT(tmp->port);
2447                 new_vlan[match].tag_port |= BIT(upstream);
2448                 /* Deny egress of @rx_vid towards our front-panel port.
2449                  * This will force the switch to drop it, and we'll see
2450                  * only the re-retagged packets (having the original,
2451                  * pre-initial-retagging, VLAN @tmp->vid).
2452                  */
2453                 new_vlan[match].vlan_bc &= ~BIT(tmp->port);
2454
2455                 /* On reverse retagging, the same ingress VLAN goes to multiple
2456                  * ports. So we have an opportunity to create composite rules
2457                  * to not waste the limited space in the retagging table.
2458                  */
2459                 k = sja1105_find_retagging_entry(new_retagging, *num_retagging,
2460                                                  upstream, rx_vid, tmp->vid);
2461                 if (k < 0) {
2462                         if (*num_retagging == SJA1105_MAX_RETAGGING_COUNT) {
2463                                 dev_err(priv->ds->dev, "No more retagging rules\n");
2464                                 rc = -ENOSPC;
2465                                 goto out;
2466                         }
2467                         k = (*num_retagging)++;
2468                 }
2469                 /* And the retagging itself */
2470                 new_retagging[k].vlan_ing = rx_vid;
2471                 new_retagging[k].vlan_egr = tmp->vid;
2472                 new_retagging[k].ing_port = BIT(upstream);
2473                 new_retagging[k].egr_port |= BIT(tmp->port);
2474         }
2475
2476 out:
2477         list_for_each_entry_safe(tmp, pos, &crosschip_vlans, list) {
2478                 list_del(&tmp->list);
2479                 kfree(tmp);
2480         }
2481
2482         return rc;
2483 }
2484
2485 static int sja1105_build_vlan_table(struct sja1105_private *priv, bool notify);
2486
2487 static int sja1105_notify_crosschip_switches(struct sja1105_private *priv)
2488 {
2489         struct sja1105_crosschip_switch *s, *pos;
2490         struct list_head crosschip_switches;
2491         struct dsa_8021q_crosschip_link *c;
2492         int rc = 0;
2493
2494         INIT_LIST_HEAD(&crosschip_switches);
2495
2496         list_for_each_entry(c, &priv->crosschip_links, list) {
2497                 bool already_added = false;
2498
2499                 list_for_each_entry(s, &crosschip_switches, list) {
2500                         if (s->other_ds == c->other_ds) {
2501                                 already_added = true;
2502                                 break;
2503                         }
2504                 }
2505
2506                 if (already_added)
2507                         continue;
2508
2509                 s = kzalloc(sizeof(*s), GFP_KERNEL);
2510                 if (!s) {
2511                         dev_err(priv->ds->dev, "Failed to allocate memory\n");
2512                         rc = -ENOMEM;
2513                         goto out;
2514                 }
2515                 s->other_ds = c->other_ds;
2516                 list_add(&s->list, &crosschip_switches);
2517         }
2518
2519         list_for_each_entry(s, &crosschip_switches, list) {
2520                 struct sja1105_private *other_priv = s->other_ds->priv;
2521
2522                 rc = sja1105_build_vlan_table(other_priv, false);
2523                 if (rc)
2524                         goto out;
2525         }
2526
2527 out:
2528         list_for_each_entry_safe(s, pos, &crosschip_switches, list) {
2529                 list_del(&s->list);
2530                 kfree(s);
2531         }
2532
2533         return rc;
2534 }
2535
2536 static int sja1105_build_vlan_table(struct sja1105_private *priv, bool notify)
2537 {
2538         u16 subvlan_map[SJA1105_NUM_PORTS][DSA_8021Q_N_SUBVLAN];
2539         struct sja1105_retagging_entry *new_retagging;
2540         struct sja1105_vlan_lookup_entry *new_vlan;
2541         struct sja1105_table *table;
2542         int i, num_retagging = 0;
2543         int rc;
2544
2545         table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
2546         new_vlan = kcalloc(VLAN_N_VID,
2547                            table->ops->unpacked_entry_size, GFP_KERNEL);
2548         if (!new_vlan)
2549                 return -ENOMEM;
2550
2551         table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
2552         new_retagging = kcalloc(SJA1105_MAX_RETAGGING_COUNT,
2553                                 table->ops->unpacked_entry_size, GFP_KERNEL);
2554         if (!new_retagging) {
2555                 kfree(new_vlan);
2556                 return -ENOMEM;
2557         }
2558
2559         for (i = 0; i < VLAN_N_VID; i++)
2560                 new_vlan[i].vlanid = VLAN_N_VID;
2561
2562         for (i = 0; i < SJA1105_MAX_RETAGGING_COUNT; i++)
2563                 new_retagging[i].vlan_ing = VLAN_N_VID;
2564
2565         for (i = 0; i < priv->ds->num_ports; i++)
2566                 sja1105_init_subvlan_map(subvlan_map[i]);
2567
2568         /* Bridge VLANs */
2569         rc = sja1105_build_bridge_vlans(priv, new_vlan);
2570         if (rc)
2571                 goto out;
2572
2573         /* VLANs necessary for dsa_8021q operation, given to us by tag_8021q.c:
2574          * - RX VLANs
2575          * - TX VLANs
2576          * - Crosschip links
2577          */
2578         rc = sja1105_build_dsa_8021q_vlans(priv, new_vlan);
2579         if (rc)
2580                 goto out;
2581
2582         /* Private VLANs necessary for dsa_8021q operation, which we need to
2583          * determine on our own:
2584          * - Sub-VLANs
2585          * - Sub-VLANs of crosschip switches
2586          */
2587         rc = sja1105_build_subvlans(priv, subvlan_map, new_vlan, new_retagging,
2588                                     &num_retagging);
2589         if (rc)
2590                 goto out;
2591
2592         rc = sja1105_build_crosschip_subvlans(priv, new_vlan, new_retagging,
2593                                               &num_retagging);
2594         if (rc)
2595                 goto out;
2596
2597         rc = sja1105_commit_vlans(priv, new_vlan, new_retagging, num_retagging);
2598         if (rc)
2599                 goto out;
2600
2601         rc = sja1105_commit_pvid(priv);
2602         if (rc)
2603                 goto out;
2604
2605         for (i = 0; i < priv->ds->num_ports; i++)
2606                 sja1105_commit_subvlan_map(priv, i, subvlan_map[i]);
2607
2608         if (notify) {
2609                 rc = sja1105_notify_crosschip_switches(priv);
2610                 if (rc)
2611                         goto out;
2612         }
2613
2614 out:
2615         kfree(new_vlan);
2616         kfree(new_retagging);
2617
2618         return rc;
2619 }
2620
2621 /* Select the list to which we should add this VLAN. */
2622 static struct list_head *sja1105_classify_vlan(struct sja1105_private *priv,
2623                                                u16 vid)
2624 {
2625         if (priv->expect_dsa_8021q)
2626                 return &priv->dsa_8021q_vlans;
2627
2628         return &priv->bridge_vlans;
2629 }
2630
2631 static int sja1105_vlan_prepare(struct dsa_switch *ds, int port,
2632                                 const struct switchdev_obj_port_vlan *vlan)
2633 {
2634         struct sja1105_private *priv = ds->priv;
2635         u16 vid;
2636
2637         if (priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
2638                 return 0;
2639
2640         /* If the user wants best-effort VLAN filtering (aka vlan_filtering
2641          * bridge plus tagging), be sure to at least deny alterations to the
2642          * configuration done by dsa_8021q.
2643          */
2644         for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
2645                 if (!priv->expect_dsa_8021q && vid_is_dsa_8021q(vid)) {
2646                         dev_err(ds->dev, "Range 1024-3071 reserved for dsa_8021q operation\n");
2647                         return -EBUSY;
2648                 }
2649         }
2650
2651         return 0;
2652 }
2653
2654 /* The TPID setting belongs to the General Parameters table,
2655  * which can only be partially reconfigured at runtime (and not the TPID).
2656  * So a switch reset is required.
2657  */
2658 static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
2659 {
2660         struct sja1105_l2_lookup_params_entry *l2_lookup_params;
2661         struct sja1105_general_params_entry *general_params;
2662         struct sja1105_private *priv = ds->priv;
2663         enum sja1105_vlan_state state;
2664         struct sja1105_table *table;
2665         struct sja1105_rule *rule;
2666         bool want_tagging;
2667         u16 tpid, tpid2;
2668         int rc;
2669
2670         list_for_each_entry(rule, &priv->flow_block.rules, list) {
2671                 if (rule->type == SJA1105_RULE_VL) {
2672                         dev_err(ds->dev,
2673                                 "Cannot change VLAN filtering state while VL rules are active\n");
2674                         return -EBUSY;
2675                 }
2676         }
2677
2678         if (enabled) {
2679                 /* Enable VLAN filtering. */
2680                 tpid  = ETH_P_8021Q;
2681                 tpid2 = ETH_P_8021AD;
2682         } else {
2683                 /* Disable VLAN filtering. */
2684                 tpid  = ETH_P_SJA1105;
2685                 tpid2 = ETH_P_SJA1105;
2686         }
2687
2688         for (port = 0; port < ds->num_ports; port++) {
2689                 struct sja1105_port *sp = &priv->ports[port];
2690
2691                 if (enabled)
2692                         sp->xmit_tpid = priv->info->qinq_tpid;
2693                 else
2694                         sp->xmit_tpid = ETH_P_SJA1105;
2695         }
2696
2697         if (!enabled)
2698                 state = SJA1105_VLAN_UNAWARE;
2699         else if (priv->best_effort_vlan_filtering)
2700                 state = SJA1105_VLAN_BEST_EFFORT;
2701         else
2702                 state = SJA1105_VLAN_FILTERING_FULL;
2703
2704         if (priv->vlan_state == state)
2705                 return 0;
2706
2707         priv->vlan_state = state;
2708         want_tagging = (state == SJA1105_VLAN_UNAWARE ||
2709                         state == SJA1105_VLAN_BEST_EFFORT);
2710
2711         table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
2712         general_params = table->entries;
2713         /* EtherType used to identify inner tagged (C-tag) VLAN traffic */
2714         general_params->tpid = tpid;
2715         /* EtherType used to identify outer tagged (S-tag) VLAN traffic */
2716         general_params->tpid2 = tpid2;
2717         /* When VLAN filtering is on, we need to at least be able to
2718          * decode management traffic through the "backup plan".
2719          */
2720         general_params->incl_srcpt1 = enabled;
2721         general_params->incl_srcpt0 = enabled;
2722
2723         want_tagging = priv->best_effort_vlan_filtering || !enabled;
2724
2725         /* VLAN filtering => independent VLAN learning.
2726          * No VLAN filtering (or best effort) => shared VLAN learning.
2727          *
2728          * In shared VLAN learning mode, untagged traffic still gets
2729          * pvid-tagged, and the FDB table gets populated with entries
2730          * containing the "real" (pvid or from VLAN tag) VLAN ID.
2731          * However the switch performs a masked L2 lookup in the FDB,
2732          * effectively only looking up a frame's DMAC (and not VID) for the
2733          * forwarding decision.
2734          *
2735          * This is extremely convenient for us, because in modes with
2736          * vlan_filtering=0, dsa_8021q actually installs unique pvid's into
2737          * each front panel port. This is good for identification but breaks
2738          * learning badly - the VID of the learnt FDB entry is unique, aka
2739          * no frames coming from any other port are going to have it. So
2740          * for forwarding purposes, this is as though learning was broken
2741          * (all frames get flooded).
2742          */
2743         table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
2744         l2_lookup_params = table->entries;
2745         l2_lookup_params->shared_learn = want_tagging;
2746
2747         sja1105_frame_memory_partitioning(priv);
2748
2749         rc = sja1105_build_vlan_table(priv, false);
2750         if (rc)
2751                 return rc;
2752
2753         rc = sja1105_static_config_reload(priv, SJA1105_VLAN_FILTERING);
2754         if (rc)
2755                 dev_err(ds->dev, "Failed to change VLAN Ethertype\n");
2756
2757         /* Switch port identification based on 802.1Q is only passable
2758          * if we are not under a vlan_filtering bridge. So make sure
2759          * the two configurations are mutually exclusive (of course, the
2760          * user may know better, i.e. best_effort_vlan_filtering).
2761          */
2762         return sja1105_setup_8021q_tagging(ds, want_tagging);
2763 }
2764
2765 static void sja1105_vlan_add(struct dsa_switch *ds, int port,
2766                              const struct switchdev_obj_port_vlan *vlan)
2767 {
2768         struct sja1105_private *priv = ds->priv;
2769         bool vlan_table_changed = false;
2770         u16 vid;
2771         int rc;
2772
2773         for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
2774                 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
2775                 bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
2776                 struct sja1105_bridge_vlan *v;
2777                 struct list_head *vlan_list;
2778                 bool already_added = false;
2779
2780                 vlan_list = sja1105_classify_vlan(priv, vid);
2781
2782                 list_for_each_entry(v, vlan_list, list) {
2783                         if (v->port == port && v->vid == vid &&
2784                             v->untagged == untagged && v->pvid == pvid) {
2785                                 already_added = true;
2786                                 break;
2787                         }
2788                 }
2789
2790                 if (already_added)
2791                         continue;
2792
2793                 v = kzalloc(sizeof(*v), GFP_KERNEL);
2794                 if (!v) {
2795                         dev_err(ds->dev, "Out of memory while storing VLAN\n");
2796                         return;
2797                 }
2798
2799                 v->port = port;
2800                 v->vid = vid;
2801                 v->untagged = untagged;
2802                 v->pvid = pvid;
2803                 list_add(&v->list, vlan_list);
2804
2805                 vlan_table_changed = true;
2806         }
2807
2808         if (!vlan_table_changed)
2809                 return;
2810
2811         rc = sja1105_build_vlan_table(priv, true);
2812         if (rc)
2813                 dev_err(ds->dev, "Failed to build VLAN table: %d\n", rc);
2814 }
2815
2816 static int sja1105_vlan_del(struct dsa_switch *ds, int port,
2817                             const struct switchdev_obj_port_vlan *vlan)
2818 {
2819         struct sja1105_private *priv = ds->priv;
2820         bool vlan_table_changed = false;
2821         u16 vid;
2822
2823         for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
2824                 struct sja1105_bridge_vlan *v, *n;
2825                 struct list_head *vlan_list;
2826
2827                 vlan_list = sja1105_classify_vlan(priv, vid);
2828
2829                 list_for_each_entry_safe(v, n, vlan_list, list) {
2830                         if (v->port == port && v->vid == vid) {
2831                                 list_del(&v->list);
2832                                 kfree(v);
2833                                 vlan_table_changed = true;
2834                                 break;
2835                         }
2836                 }
2837         }
2838
2839         if (!vlan_table_changed)
2840                 return 0;
2841
2842         return sja1105_build_vlan_table(priv, true);
2843 }
2844
2845 static int sja1105_best_effort_vlan_filtering_get(struct sja1105_private *priv,
2846                                                   bool *be_vlan)
2847 {
2848         *be_vlan = priv->best_effort_vlan_filtering;
2849
2850         return 0;
2851 }
2852
2853 static int sja1105_best_effort_vlan_filtering_set(struct sja1105_private *priv,
2854                                                   bool be_vlan)
2855 {
2856         struct dsa_switch *ds = priv->ds;
2857         bool vlan_filtering;
2858         int port;
2859         int rc;
2860
2861         priv->best_effort_vlan_filtering = be_vlan;
2862
2863         rtnl_lock();
2864         for (port = 0; port < ds->num_ports; port++) {
2865                 struct dsa_port *dp;
2866
2867                 if (!dsa_is_user_port(ds, port))
2868                         continue;
2869
2870                 dp = dsa_to_port(ds, port);
2871                 vlan_filtering = dsa_port_is_vlan_filtering(dp);
2872
2873                 rc = sja1105_vlan_filtering(ds, port, vlan_filtering);
2874                 if (rc)
2875                         break;
2876         }
2877         rtnl_unlock();
2878
2879         return rc;
2880 }
2881
2882 enum sja1105_devlink_param_id {
2883         SJA1105_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
2884         SJA1105_DEVLINK_PARAM_ID_BEST_EFFORT_VLAN_FILTERING,
2885 };
2886
2887 static int sja1105_devlink_param_get(struct dsa_switch *ds, u32 id,
2888                                      struct devlink_param_gset_ctx *ctx)
2889 {
2890         struct sja1105_private *priv = ds->priv;
2891         int err;
2892
2893         switch (id) {
2894         case SJA1105_DEVLINK_PARAM_ID_BEST_EFFORT_VLAN_FILTERING:
2895                 err = sja1105_best_effort_vlan_filtering_get(priv,
2896                                                              &ctx->val.vbool);
2897                 break;
2898         default:
2899                 err = -EOPNOTSUPP;
2900                 break;
2901         }
2902
2903         return err;
2904 }
2905
2906 static int sja1105_devlink_param_set(struct dsa_switch *ds, u32 id,
2907                                      struct devlink_param_gset_ctx *ctx)
2908 {
2909         struct sja1105_private *priv = ds->priv;
2910         int err;
2911
2912         switch (id) {
2913         case SJA1105_DEVLINK_PARAM_ID_BEST_EFFORT_VLAN_FILTERING:
2914                 err = sja1105_best_effort_vlan_filtering_set(priv,
2915                                                              ctx->val.vbool);
2916                 break;
2917         default:
2918                 err = -EOPNOTSUPP;
2919                 break;
2920         }
2921
2922         return err;
2923 }
2924
2925 static const struct devlink_param sja1105_devlink_params[] = {
2926         DSA_DEVLINK_PARAM_DRIVER(SJA1105_DEVLINK_PARAM_ID_BEST_EFFORT_VLAN_FILTERING,
2927                                  "best_effort_vlan_filtering",
2928                                  DEVLINK_PARAM_TYPE_BOOL,
2929                                  BIT(DEVLINK_PARAM_CMODE_RUNTIME)),
2930 };
2931
2932 static int sja1105_setup_devlink_params(struct dsa_switch *ds)
2933 {
2934         return dsa_devlink_params_register(ds, sja1105_devlink_params,
2935                                            ARRAY_SIZE(sja1105_devlink_params));
2936 }
2937
2938 static void sja1105_teardown_devlink_params(struct dsa_switch *ds)
2939 {
2940         dsa_devlink_params_unregister(ds, sja1105_devlink_params,
2941                                       ARRAY_SIZE(sja1105_devlink_params));
2942 }
2943
2944 /* The programming model for the SJA1105 switch is "all-at-once" via static
2945  * configuration tables. Some of these can be dynamically modified at runtime,
2946  * but not the xMII mode parameters table.
2947  * Furthermode, some PHYs may not have crystals for generating their clocks
2948  * (e.g. RMII). Instead, their 50MHz clock is supplied via the SJA1105 port's
2949  * ref_clk pin. So port clocking needs to be initialized early, before
2950  * connecting to PHYs is attempted, otherwise they won't respond through MDIO.
2951  * Setting correct PHY link speed does not matter now.
2952  * But dsa_slave_phy_setup is called later than sja1105_setup, so the PHY
2953  * bindings are not yet parsed by DSA core. We need to parse early so that we
2954  * can populate the xMII mode parameters table.
2955  */
2956 static int sja1105_setup(struct dsa_switch *ds)
2957 {
2958         struct sja1105_dt_port ports[SJA1105_NUM_PORTS];
2959         struct sja1105_private *priv = ds->priv;
2960         int rc;
2961
2962         rc = sja1105_parse_dt(priv, ports);
2963         if (rc < 0) {
2964                 dev_err(ds->dev, "Failed to parse DT: %d\n", rc);
2965                 return rc;
2966         }
2967
2968         /* Error out early if internal delays are required through DT
2969          * and we can't apply them.
2970          */
2971         rc = sja1105_parse_rgmii_delays(priv, ports);
2972         if (rc < 0) {
2973                 dev_err(ds->dev, "RGMII delay not supported\n");
2974                 return rc;
2975         }
2976
2977         rc = sja1105_ptp_clock_register(ds);
2978         if (rc < 0) {
2979                 dev_err(ds->dev, "Failed to register PTP clock: %d\n", rc);
2980                 return rc;
2981         }
2982         /* Create and send configuration down to device */
2983         rc = sja1105_static_config_load(priv, ports);
2984         if (rc < 0) {
2985                 dev_err(ds->dev, "Failed to load static config: %d\n", rc);
2986                 return rc;
2987         }
2988         /* Configure the CGU (PHY link modes and speeds) */
2989         rc = sja1105_clocking_setup(priv);
2990         if (rc < 0) {
2991                 dev_err(ds->dev, "Failed to configure MII clocking: %d\n", rc);
2992                 return rc;
2993         }
2994         /* On SJA1105, VLAN filtering per se is always enabled in hardware.
2995          * The only thing we can do to disable it is lie about what the 802.1Q
2996          * EtherType is.
2997          * So it will still try to apply VLAN filtering, but all ingress
2998          * traffic (except frames received with EtherType of ETH_P_SJA1105)
2999          * will be internally tagged with a distorted VLAN header where the
3000          * TPID is ETH_P_SJA1105, and the VLAN ID is the port pvid.
3001          */
3002         ds->vlan_filtering_is_global = true;
3003
3004         /* Advertise the 8 egress queues */
3005         ds->num_tx_queues = SJA1105_NUM_TC;
3006
3007         ds->mtu_enforcement_ingress = true;
3008
3009         ds->configure_vlan_while_not_filtering = true;
3010
3011         rc = sja1105_setup_devlink_params(ds);
3012         if (rc < 0)
3013                 return rc;
3014
3015         /* The DSA/switchdev model brings up switch ports in standalone mode by
3016          * default, and that means vlan_filtering is 0 since they're not under
3017          * a bridge, so it's safe to set up switch tagging at this time.
3018          */
3019         return sja1105_setup_8021q_tagging(ds, true);
3020 }
3021
3022 static void sja1105_teardown(struct dsa_switch *ds)
3023 {
3024         struct sja1105_private *priv = ds->priv;
3025         struct sja1105_bridge_vlan *v, *n;
3026         int port;
3027
3028         for (port = 0; port < SJA1105_NUM_PORTS; port++) {
3029                 struct sja1105_port *sp = &priv->ports[port];
3030
3031                 if (!dsa_is_user_port(ds, port))
3032                         continue;
3033
3034                 if (sp->xmit_worker)
3035                         kthread_destroy_worker(sp->xmit_worker);
3036         }
3037
3038         sja1105_teardown_devlink_params(ds);
3039         sja1105_flower_teardown(ds);
3040         sja1105_tas_teardown(ds);
3041         sja1105_ptp_clock_unregister(ds);
3042         sja1105_static_config_free(&priv->static_config);
3043
3044         list_for_each_entry_safe(v, n, &priv->dsa_8021q_vlans, list) {
3045                 list_del(&v->list);
3046                 kfree(v);
3047         }
3048
3049         list_for_each_entry_safe(v, n, &priv->bridge_vlans, list) {
3050                 list_del(&v->list);
3051                 kfree(v);
3052         }
3053 }
3054
3055 static int sja1105_port_enable(struct dsa_switch *ds, int port,
3056                                struct phy_device *phy)
3057 {
3058         struct net_device *slave;
3059
3060         if (!dsa_is_user_port(ds, port))
3061                 return 0;
3062
3063         slave = dsa_to_port(ds, port)->slave;
3064
3065         slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
3066
3067         return 0;
3068 }
3069
3070 static void sja1105_port_disable(struct dsa_switch *ds, int port)
3071 {
3072         struct sja1105_private *priv = ds->priv;
3073         struct sja1105_port *sp = &priv->ports[port];
3074
3075         if (!dsa_is_user_port(ds, port))
3076                 return;
3077
3078         kthread_cancel_work_sync(&sp->xmit_work);
3079         skb_queue_purge(&sp->xmit_queue);
3080 }
3081
3082 static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
3083                              struct sk_buff *skb, bool takets)
3084 {
3085         struct sja1105_mgmt_entry mgmt_route = {0};
3086         struct sja1105_private *priv = ds->priv;
3087         struct ethhdr *hdr;
3088         int timeout = 10;
3089         int rc;
3090
3091         hdr = eth_hdr(skb);
3092
3093         mgmt_route.macaddr = ether_addr_to_u64(hdr->h_dest);
3094         mgmt_route.destports = BIT(port);
3095         mgmt_route.enfport = 1;
3096         mgmt_route.tsreg = 0;
3097         mgmt_route.takets = takets;
3098
3099         rc = sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE,
3100                                           slot, &mgmt_route, true);
3101         if (rc < 0) {
3102                 kfree_skb(skb);
3103                 return rc;
3104         }
3105
3106         /* Transfer skb to the host port. */
3107         dsa_enqueue_skb(skb, dsa_to_port(ds, port)->slave);
3108
3109         /* Wait until the switch has processed the frame */
3110         do {
3111                 rc = sja1105_dynamic_config_read(priv, BLK_IDX_MGMT_ROUTE,
3112                                                  slot, &mgmt_route);
3113                 if (rc < 0) {
3114                         dev_err_ratelimited(priv->ds->dev,
3115                                             "failed to poll for mgmt route\n");
3116                         continue;
3117                 }
3118
3119                 /* UM10944: The ENFPORT flag of the respective entry is
3120                  * cleared when a match is found. The host can use this
3121                  * flag as an acknowledgment.
3122                  */
3123                 cpu_relax();
3124         } while (mgmt_route.enfport && --timeout);
3125
3126         if (!timeout) {
3127                 /* Clean up the management route so that a follow-up
3128                  * frame may not match on it by mistake.
3129                  * This is only hardware supported on P/Q/R/S - on E/T it is
3130                  * a no-op and we are silently discarding the -EOPNOTSUPP.
3131                  */
3132                 sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE,
3133                                              slot, &mgmt_route, false);
3134                 dev_err_ratelimited(priv->ds->dev, "xmit timed out\n");
3135         }
3136
3137         return NETDEV_TX_OK;
3138 }
3139
3140 #define work_to_port(work) \
3141                 container_of((work), struct sja1105_port, xmit_work)
3142 #define tagger_to_sja1105(t) \
3143                 container_of((t), struct sja1105_private, tagger_data)
3144
3145 /* Deferred work is unfortunately necessary because setting up the management
3146  * route cannot be done from atomit context (SPI transfer takes a sleepable
3147  * lock on the bus)
3148  */
3149 static void sja1105_port_deferred_xmit(struct kthread_work *work)
3150 {
3151         struct sja1105_port *sp = work_to_port(work);
3152         struct sja1105_tagger_data *tagger_data = sp->data;
3153         struct sja1105_private *priv = tagger_to_sja1105(tagger_data);
3154         int port = sp - priv->ports;
3155         struct sk_buff *skb;
3156
3157         while ((skb = skb_dequeue(&sp->xmit_queue)) != NULL) {
3158                 struct sk_buff *clone = DSA_SKB_CB(skb)->clone;
3159
3160                 mutex_lock(&priv->mgmt_lock);
3161
3162                 sja1105_mgmt_xmit(priv->ds, port, 0, skb, !!clone);
3163
3164                 /* The clone, if there, was made by dsa_skb_tx_timestamp */
3165                 if (clone)
3166                         sja1105_ptp_txtstamp_skb(priv->ds, port, clone);
3167
3168                 mutex_unlock(&priv->mgmt_lock);
3169         }
3170 }
3171
3172 /* The MAXAGE setting belongs to the L2 Forwarding Parameters table,
3173  * which cannot be reconfigured at runtime. So a switch reset is required.
3174  */
3175 static int sja1105_set_ageing_time(struct dsa_switch *ds,
3176                                    unsigned int ageing_time)
3177 {
3178         struct sja1105_l2_lookup_params_entry *l2_lookup_params;
3179         struct sja1105_private *priv = ds->priv;
3180         struct sja1105_table *table;
3181         unsigned int maxage;
3182
3183         table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
3184         l2_lookup_params = table->entries;
3185
3186         maxage = SJA1105_AGEING_TIME_MS(ageing_time);
3187
3188         if (l2_lookup_params->maxage == maxage)
3189                 return 0;
3190
3191         l2_lookup_params->maxage = maxage;
3192
3193         return sja1105_static_config_reload(priv, SJA1105_AGEING_TIME);
3194 }
3195
3196 static int sja1105_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
3197 {
3198         struct sja1105_l2_policing_entry *policing;
3199         struct sja1105_private *priv = ds->priv;
3200
3201         new_mtu += VLAN_ETH_HLEN + ETH_FCS_LEN;
3202
3203         if (dsa_is_cpu_port(ds, port))
3204                 new_mtu += VLAN_HLEN;
3205
3206         policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
3207
3208         if (policing[port].maxlen == new_mtu)
3209                 return 0;
3210
3211         policing[port].maxlen = new_mtu;
3212
3213         return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
3214 }
3215
3216 static int sja1105_get_max_mtu(struct dsa_switch *ds, int port)
3217 {
3218         return 2043 - VLAN_ETH_HLEN - ETH_FCS_LEN;
3219 }
3220
3221 static int sja1105_port_setup_tc(struct dsa_switch *ds, int port,
3222                                  enum tc_setup_type type,
3223                                  void *type_data)
3224 {
3225         switch (type) {
3226         case TC_SETUP_QDISC_TAPRIO:
3227                 return sja1105_setup_tc_taprio(ds, port, type_data);
3228         case TC_SETUP_QDISC_CBS:
3229                 return sja1105_setup_tc_cbs(ds, port, type_data);
3230         default:
3231                 return -EOPNOTSUPP;
3232         }
3233 }
3234
3235 /* We have a single mirror (@to) port, but can configure ingress and egress
3236  * mirroring on all other (@from) ports.
3237  * We need to allow mirroring rules only as long as the @to port is always the
3238  * same, and we need to unset the @to port from mirr_port only when there is no
3239  * mirroring rule that references it.
3240  */
3241 static int sja1105_mirror_apply(struct sja1105_private *priv, int from, int to,
3242                                 bool ingress, bool enabled)
3243 {
3244         struct sja1105_general_params_entry *general_params;
3245         struct sja1105_mac_config_entry *mac;
3246         struct sja1105_table *table;
3247         bool already_enabled;
3248         u64 new_mirr_port;
3249         int rc;
3250
3251         table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
3252         general_params = table->entries;
3253
3254         mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
3255
3256         already_enabled = (general_params->mirr_port != SJA1105_NUM_PORTS);
3257         if (already_enabled && enabled && general_params->mirr_port != to) {
3258                 dev_err(priv->ds->dev,
3259                         "Delete mirroring rules towards port %llu first\n",
3260                         general_params->mirr_port);
3261                 return -EBUSY;
3262         }
3263
3264         new_mirr_port = to;
3265         if (!enabled) {
3266                 bool keep = false;
3267                 int port;
3268
3269                 /* Anybody still referencing mirr_port? */
3270                 for (port = 0; port < SJA1105_NUM_PORTS; port++) {
3271                         if (mac[port].ing_mirr || mac[port].egr_mirr) {
3272                                 keep = true;
3273                                 break;
3274                         }
3275                 }
3276                 /* Unset already_enabled for next time */
3277                 if (!keep)
3278                         new_mirr_port = SJA1105_NUM_PORTS;
3279         }
3280         if (new_mirr_port != general_params->mirr_port) {
3281                 general_params->mirr_port = new_mirr_port;
3282
3283                 rc = sja1105_dynamic_config_write(priv, BLK_IDX_GENERAL_PARAMS,
3284                                                   0, general_params, true);
3285                 if (rc < 0)
3286                         return rc;
3287         }
3288
3289         if (ingress)
3290                 mac[from].ing_mirr = enabled;
3291         else
3292                 mac[from].egr_mirr = enabled;
3293
3294         return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, from,
3295                                             &mac[from], true);
3296 }
3297
3298 static int sja1105_mirror_add(struct dsa_switch *ds, int port,
3299                               struct dsa_mall_mirror_tc_entry *mirror,
3300                               bool ingress)
3301 {
3302         return sja1105_mirror_apply(ds->priv, port, mirror->to_local_port,
3303                                     ingress, true);
3304 }
3305
3306 static void sja1105_mirror_del(struct dsa_switch *ds, int port,
3307                                struct dsa_mall_mirror_tc_entry *mirror)
3308 {
3309         sja1105_mirror_apply(ds->priv, port, mirror->to_local_port,
3310                              mirror->ingress, false);
3311 }
3312
3313 static int sja1105_port_policer_add(struct dsa_switch *ds, int port,
3314                                     struct dsa_mall_policer_tc_entry *policer)
3315 {
3316         struct sja1105_l2_policing_entry *policing;
3317         struct sja1105_private *priv = ds->priv;
3318
3319         policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
3320
3321         /* In hardware, every 8 microseconds the credit level is incremented by
3322          * the value of RATE bytes divided by 64, up to a maximum of SMAX
3323          * bytes.
3324          */
3325         policing[port].rate = div_u64(512 * policer->rate_bytes_per_sec,
3326                                       1000000);
3327         policing[port].smax = policer->burst;
3328
3329         return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
3330 }
3331
3332 static void sja1105_port_policer_del(struct dsa_switch *ds, int port)
3333 {
3334         struct sja1105_l2_policing_entry *policing;
3335         struct sja1105_private *priv = ds->priv;
3336
3337         policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
3338
3339         policing[port].rate = SJA1105_RATE_MBPS(1000);
3340         policing[port].smax = 65535;
3341
3342         sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
3343 }
3344
3345 static const struct dsa_switch_ops sja1105_switch_ops = {
3346         .get_tag_protocol       = sja1105_get_tag_protocol,
3347         .setup                  = sja1105_setup,
3348         .teardown               = sja1105_teardown,
3349         .set_ageing_time        = sja1105_set_ageing_time,
3350         .port_change_mtu        = sja1105_change_mtu,
3351         .port_max_mtu           = sja1105_get_max_mtu,
3352         .phylink_validate       = sja1105_phylink_validate,
3353         .phylink_mac_link_state = sja1105_mac_pcs_get_state,
3354         .phylink_mac_config     = sja1105_mac_config,
3355         .phylink_mac_link_up    = sja1105_mac_link_up,
3356         .phylink_mac_link_down  = sja1105_mac_link_down,
3357         .get_strings            = sja1105_get_strings,
3358         .get_ethtool_stats      = sja1105_get_ethtool_stats,
3359         .get_sset_count         = sja1105_get_sset_count,
3360         .get_ts_info            = sja1105_get_ts_info,
3361         .port_enable            = sja1105_port_enable,
3362         .port_disable           = sja1105_port_disable,
3363         .port_fdb_dump          = sja1105_fdb_dump,
3364         .port_fdb_add           = sja1105_fdb_add,
3365         .port_fdb_del           = sja1105_fdb_del,
3366         .port_bridge_join       = sja1105_bridge_join,
3367         .port_bridge_leave      = sja1105_bridge_leave,
3368         .port_stp_state_set     = sja1105_bridge_stp_state_set,
3369         .port_vlan_prepare      = sja1105_vlan_prepare,
3370         .port_vlan_filtering    = sja1105_vlan_filtering,
3371         .port_vlan_add          = sja1105_vlan_add,
3372         .port_vlan_del          = sja1105_vlan_del,
3373         .port_mdb_prepare       = sja1105_mdb_prepare,
3374         .port_mdb_add           = sja1105_mdb_add,
3375         .port_mdb_del           = sja1105_mdb_del,
3376         .port_hwtstamp_get      = sja1105_hwtstamp_get,
3377         .port_hwtstamp_set      = sja1105_hwtstamp_set,
3378         .port_rxtstamp          = sja1105_port_rxtstamp,
3379         .port_txtstamp          = sja1105_port_txtstamp,
3380         .port_setup_tc          = sja1105_port_setup_tc,
3381         .port_mirror_add        = sja1105_mirror_add,
3382         .port_mirror_del        = sja1105_mirror_del,
3383         .port_policer_add       = sja1105_port_policer_add,
3384         .port_policer_del       = sja1105_port_policer_del,
3385         .cls_flower_add         = sja1105_cls_flower_add,
3386         .cls_flower_del         = sja1105_cls_flower_del,
3387         .cls_flower_stats       = sja1105_cls_flower_stats,
3388         .crosschip_bridge_join  = sja1105_crosschip_bridge_join,
3389         .crosschip_bridge_leave = sja1105_crosschip_bridge_leave,
3390         .devlink_param_get      = sja1105_devlink_param_get,
3391         .devlink_param_set      = sja1105_devlink_param_set,
3392 };
3393
3394 static const struct of_device_id sja1105_dt_ids[];
3395
3396 static int sja1105_check_device_id(struct sja1105_private *priv)
3397 {
3398         const struct sja1105_regs *regs = priv->info->regs;
3399         u8 prod_id[SJA1105_SIZE_DEVICE_ID] = {0};
3400         struct device *dev = &priv->spidev->dev;
3401         const struct of_device_id *match;
3402         u32 device_id;
3403         u64 part_no;
3404         int rc;
3405
3406         rc = sja1105_xfer_u32(priv, SPI_READ, regs->device_id, &device_id,
3407                               NULL);
3408         if (rc < 0)
3409                 return rc;
3410
3411         rc = sja1105_xfer_buf(priv, SPI_READ, regs->prod_id, prod_id,
3412                               SJA1105_SIZE_DEVICE_ID);
3413         if (rc < 0)
3414                 return rc;
3415
3416         sja1105_unpack(prod_id, &part_no, 19, 4, SJA1105_SIZE_DEVICE_ID);
3417
3418         for (match = sja1105_dt_ids; match->compatible; match++) {
3419                 const struct sja1105_info *info = match->data;
3420
3421                 /* Is what's been probed in our match table at all? */
3422                 if (info->device_id != device_id || info->part_no != part_no)
3423                         continue;
3424
3425                 /* But is it what's in the device tree? */
3426                 if (priv->info->device_id != device_id ||
3427                     priv->info->part_no != part_no) {
3428                         dev_warn(dev, "Device tree specifies chip %s but found %s, please fix it!\n",
3429                                  priv->info->name, info->name);
3430                         /* It isn't. No problem, pick that up. */
3431                         priv->info = info;
3432                 }
3433
3434                 return 0;
3435         }
3436
3437         dev_err(dev, "Unexpected {device ID, part number}: 0x%x 0x%llx\n",
3438                 device_id, part_no);
3439
3440         return -ENODEV;
3441 }
3442
3443 static int sja1105_probe(struct spi_device *spi)
3444 {
3445         struct sja1105_tagger_data *tagger_data;
3446         struct device *dev = &spi->dev;
3447         struct sja1105_private *priv;
3448         struct dsa_switch *ds;
3449         int rc, port;
3450
3451         if (!dev->of_node) {
3452                 dev_err(dev, "No DTS bindings for SJA1105 driver\n");
3453                 return -EINVAL;
3454         }
3455
3456         priv = devm_kzalloc(dev, sizeof(struct sja1105_private), GFP_KERNEL);
3457         if (!priv)
3458                 return -ENOMEM;
3459
3460         /* Configure the optional reset pin and bring up switch */
3461         priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
3462         if (IS_ERR(priv->reset_gpio))
3463                 dev_dbg(dev, "reset-gpios not defined, ignoring\n");
3464         else
3465                 sja1105_hw_reset(priv->reset_gpio, 1, 1);
3466
3467         /* Populate our driver private structure (priv) based on
3468          * the device tree node that was probed (spi)
3469          */
3470         priv->spidev = spi;
3471         spi_set_drvdata(spi, priv);
3472
3473         /* Configure the SPI bus */
3474         spi->bits_per_word = 8;
3475         rc = spi_setup(spi);
3476         if (rc < 0) {
3477                 dev_err(dev, "Could not init SPI\n");
3478                 return rc;
3479         }
3480
3481         priv->info = of_device_get_match_data(dev);
3482
3483         /* Detect hardware device */
3484         rc = sja1105_check_device_id(priv);
3485         if (rc < 0) {
3486                 dev_err(dev, "Device ID check failed: %d\n", rc);
3487                 return rc;
3488         }
3489
3490         dev_info(dev, "Probed switch chip: %s\n", priv->info->name);
3491
3492         ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL);
3493         if (!ds)
3494                 return -ENOMEM;
3495
3496         ds->dev = dev;
3497         ds->num_ports = SJA1105_NUM_PORTS;
3498         ds->ops = &sja1105_switch_ops;
3499         ds->priv = priv;
3500         priv->ds = ds;
3501
3502         tagger_data = &priv->tagger_data;
3503
3504         mutex_init(&priv->ptp_data.lock);
3505         mutex_init(&priv->mgmt_lock);
3506
3507         INIT_LIST_HEAD(&priv->crosschip_links);
3508         INIT_LIST_HEAD(&priv->bridge_vlans);
3509         INIT_LIST_HEAD(&priv->dsa_8021q_vlans);
3510
3511         sja1105_tas_setup(ds);
3512         sja1105_flower_setup(ds);
3513
3514         rc = dsa_register_switch(priv->ds);
3515         if (rc)
3516                 return rc;
3517
3518         if (IS_ENABLED(CONFIG_NET_SCH_CBS)) {
3519                 priv->cbs = devm_kcalloc(dev, priv->info->num_cbs_shapers,
3520                                          sizeof(struct sja1105_cbs_entry),
3521                                          GFP_KERNEL);
3522                 if (!priv->cbs)
3523                         return -ENOMEM;
3524         }
3525
3526         /* Connections between dsa_port and sja1105_port */
3527         for (port = 0; port < SJA1105_NUM_PORTS; port++) {
3528                 struct sja1105_port *sp = &priv->ports[port];
3529                 struct dsa_port *dp = dsa_to_port(ds, port);
3530                 struct net_device *slave;
3531                 int subvlan;
3532
3533                 if (!dsa_is_user_port(ds, port))
3534                         continue;
3535
3536                 dp->priv = sp;
3537                 sp->dp = dp;
3538                 sp->data = tagger_data;
3539                 slave = dp->slave;
3540                 kthread_init_work(&sp->xmit_work, sja1105_port_deferred_xmit);
3541                 sp->xmit_worker = kthread_create_worker(0, "%s_xmit",
3542                                                         slave->name);
3543                 if (IS_ERR(sp->xmit_worker)) {
3544                         rc = PTR_ERR(sp->xmit_worker);
3545                         dev_err(ds->dev,
3546                                 "failed to create deferred xmit thread: %d\n",
3547                                 rc);
3548                         goto out;
3549                 }
3550                 skb_queue_head_init(&sp->xmit_queue);
3551                 sp->xmit_tpid = ETH_P_SJA1105;
3552
3553                 for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
3554                         sp->subvlan_map[subvlan] = VLAN_N_VID;
3555         }
3556
3557         return 0;
3558 out:
3559         while (port-- > 0) {
3560                 struct sja1105_port *sp = &priv->ports[port];
3561
3562                 if (!dsa_is_user_port(ds, port))
3563                         continue;
3564
3565                 kthread_destroy_worker(sp->xmit_worker);
3566         }
3567         return rc;
3568 }
3569
3570 static int sja1105_remove(struct spi_device *spi)
3571 {
3572         struct sja1105_private *priv = spi_get_drvdata(spi);
3573
3574         dsa_unregister_switch(priv->ds);
3575         return 0;
3576 }
3577
3578 static const struct of_device_id sja1105_dt_ids[] = {
3579         { .compatible = "nxp,sja1105e", .data = &sja1105e_info },
3580         { .compatible = "nxp,sja1105t", .data = &sja1105t_info },
3581         { .compatible = "nxp,sja1105p", .data = &sja1105p_info },
3582         { .compatible = "nxp,sja1105q", .data = &sja1105q_info },
3583         { .compatible = "nxp,sja1105r", .data = &sja1105r_info },
3584         { .compatible = "nxp,sja1105s", .data = &sja1105s_info },
3585         { /* sentinel */ },
3586 };
3587 MODULE_DEVICE_TABLE(of, sja1105_dt_ids);
3588
3589 static struct spi_driver sja1105_driver = {
3590         .driver = {
3591                 .name  = "sja1105",
3592                 .owner = THIS_MODULE,
3593                 .of_match_table = of_match_ptr(sja1105_dt_ids),
3594         },
3595         .probe  = sja1105_probe,
3596         .remove = sja1105_remove,
3597 };
3598
3599 module_spi_driver(sja1105_driver);
3600
3601 MODULE_AUTHOR("Vladimir Oltean <[email protected]>");
3602 MODULE_AUTHOR("Georg Waibel <[email protected]>");
3603 MODULE_DESCRIPTION("SJA1105 Driver");
3604 MODULE_LICENSE("GPL v2");
This page took 0.248411 seconds and 4 git commands to generate.