]> Git Repo - linux.git/blob - drivers/net/ethernet/mellanox/mlxsw/spectrum.c
Linux 6.14-rc3
[linux.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum.c
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/types.h>
7 #include <linux/pci.h>
8 #include <linux/netdevice.h>
9 #include <linux/etherdevice.h>
10 #include <linux/ethtool.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/skbuff.h>
14 #include <linux/if_vlan.h>
15 #include <linux/if_bridge.h>
16 #include <linux/workqueue.h>
17 #include <linux/jiffies.h>
18 #include <linux/bitops.h>
19 #include <linux/list.h>
20 #include <linux/notifier.h>
21 #include <linux/dcbnl.h>
22 #include <linux/inetdevice.h>
23 #include <linux/netlink.h>
24 #include <linux/jhash.h>
25 #include <linux/log2.h>
26 #include <linux/refcount.h>
27 #include <linux/rhashtable.h>
28 #include <net/switchdev.h>
29 #include <net/pkt_cls.h>
30 #include <net/netevent.h>
31 #include <net/addrconf.h>
32 #include <linux/ptp_classify.h>
33
34 #include "spectrum.h"
35 #include "pci.h"
36 #include "core.h"
37 #include "core_env.h"
38 #include "reg.h"
39 #include "port.h"
40 #include "trap.h"
41 #include "txheader.h"
42 #include "spectrum_cnt.h"
43 #include "spectrum_dpipe.h"
44 #include "spectrum_acl_flex_actions.h"
45 #include "spectrum_span.h"
46 #include "spectrum_ptp.h"
47 #include "spectrum_trap.h"
48
49 #define MLXSW_SP_FWREV_MINOR 2010
50 #define MLXSW_SP_FWREV_SUBMINOR 1006
51
52 #define MLXSW_SP1_FWREV_MAJOR 13
53 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
54
55 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
56         .major = MLXSW_SP1_FWREV_MAJOR,
57         .minor = MLXSW_SP_FWREV_MINOR,
58         .subminor = MLXSW_SP_FWREV_SUBMINOR,
59         .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR,
60 };
61
62 #define MLXSW_SP1_FW_FILENAME \
63         "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \
64         "." __stringify(MLXSW_SP_FWREV_MINOR) \
65         "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
66
67 #define MLXSW_SP2_FWREV_MAJOR 29
68
69 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
70         .major = MLXSW_SP2_FWREV_MAJOR,
71         .minor = MLXSW_SP_FWREV_MINOR,
72         .subminor = MLXSW_SP_FWREV_SUBMINOR,
73 };
74
75 #define MLXSW_SP2_FW_FILENAME \
76         "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \
77         "." __stringify(MLXSW_SP_FWREV_MINOR) \
78         "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
79
80 #define MLXSW_SP3_FWREV_MAJOR 30
81
82 static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = {
83         .major = MLXSW_SP3_FWREV_MAJOR,
84         .minor = MLXSW_SP_FWREV_MINOR,
85         .subminor = MLXSW_SP_FWREV_SUBMINOR,
86 };
87
88 #define MLXSW_SP3_FW_FILENAME \
89         "mellanox/mlxsw_spectrum3-" __stringify(MLXSW_SP3_FWREV_MAJOR) \
90         "." __stringify(MLXSW_SP_FWREV_MINOR) \
91         "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
92
93 #define MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME \
94         "mellanox/lc_ini_bundle_" \
95         __stringify(MLXSW_SP_FWREV_MINOR) "_" \
96         __stringify(MLXSW_SP_FWREV_SUBMINOR) ".bin"
97
98 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
99 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
100 static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3";
101 static const char mlxsw_sp4_driver_name[] = "mlxsw_spectrum4";
102
103 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = {
104         0xff, 0xff, 0xff, 0xff, 0xfc, 0x00
105 };
106 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = {
107         0xff, 0xff, 0xff, 0xff, 0xf0, 0x00
108 };
109
110 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
111                               unsigned int counter_index, bool clear,
112                               u64 *packets, u64 *bytes)
113 {
114         enum mlxsw_reg_mgpc_opcode op = clear ? MLXSW_REG_MGPC_OPCODE_CLEAR :
115                                                 MLXSW_REG_MGPC_OPCODE_NOP;
116         char mgpc_pl[MLXSW_REG_MGPC_LEN];
117         int err;
118
119         mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, op,
120                             MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
121         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
122         if (err)
123                 return err;
124         if (packets)
125                 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
126         if (bytes)
127                 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
128         return 0;
129 }
130
131 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
132                                        unsigned int counter_index)
133 {
134         char mgpc_pl[MLXSW_REG_MGPC_LEN];
135
136         mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
137                             MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
138         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
139 }
140
141 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
142                                 unsigned int *p_counter_index)
143 {
144         int err;
145
146         err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
147                                      p_counter_index);
148         if (err)
149                 return err;
150         err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
151         if (err)
152                 goto err_counter_clear;
153         return 0;
154
155 err_counter_clear:
156         mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
157                               *p_counter_index);
158         return err;
159 }
160
161 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
162                                 unsigned int counter_index)
163 {
164          mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
165                                counter_index);
166 }
167
168 static bool mlxsw_sp_skb_requires_ts(struct sk_buff *skb)
169 {
170         unsigned int type;
171
172         if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
173                 return false;
174
175         type = ptp_classify_raw(skb);
176         return !!ptp_parse_header(skb, type);
177 }
178
179 static void mlxsw_sp_txhdr_info_data_init(struct mlxsw_core *mlxsw_core,
180                                           struct sk_buff *skb,
181                                           struct mlxsw_txhdr_info *txhdr_info)
182 {
183         /* Resource validation was done as part of PTP init. */
184         u16 max_fid = MLXSW_CORE_RES_GET(mlxsw_core, FID);
185
186         txhdr_info->data = true;
187         txhdr_info->max_fid = max_fid;
188 }
189
190 static struct sk_buff *
191 mlxsw_sp_vlan_tag_push(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb)
192 {
193         /* In some Spectrum ASICs, in order for PTP event packets to have their
194          * correction field correctly set on the egress port they must be
195          * transmitted as data packets. Such packets ingress the ASIC via the
196          * CPU port and must have a VLAN tag, as the CPU port is not configured
197          * with a PVID. Push the default VLAN (4095), which is configured as
198          * egress untagged on all the ports.
199          */
200         if (skb_vlan_tagged(skb))
201                 return skb;
202
203         return vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
204                                          MLXSW_SP_DEFAULT_VID);
205 }
206
207 static struct sk_buff *
208 mlxsw_sp_txhdr_preparations(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
209                             struct mlxsw_txhdr_info *txhdr_info)
210 {
211         if (likely(!mlxsw_sp_skb_requires_ts(skb)))
212                 return skb;
213
214         if (!mlxsw_sp->ptp_ops->tx_as_data)
215                 return skb;
216
217         /* Special handling for PTP events that require a time stamp and cannot
218          * be transmitted as regular control packets.
219          */
220         mlxsw_sp_txhdr_info_data_init(mlxsw_sp->core, skb, txhdr_info);
221         return mlxsw_sp_vlan_tag_push(mlxsw_sp, skb);
222 }
223
224 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state)
225 {
226         switch (state) {
227         case BR_STATE_FORWARDING:
228                 return MLXSW_REG_SPMS_STATE_FORWARDING;
229         case BR_STATE_LEARNING:
230                 return MLXSW_REG_SPMS_STATE_LEARNING;
231         case BR_STATE_LISTENING:
232         case BR_STATE_DISABLED:
233         case BR_STATE_BLOCKING:
234                 return MLXSW_REG_SPMS_STATE_DISCARDING;
235         default:
236                 BUG();
237         }
238 }
239
240 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
241                               u8 state)
242 {
243         enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state);
244         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
245         char *spms_pl;
246         int err;
247
248         spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
249         if (!spms_pl)
250                 return -ENOMEM;
251         mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
252         mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
253
254         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
255         kfree(spms_pl);
256         return err;
257 }
258
259 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
260 {
261         char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
262         int err;
263
264         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
265         if (err)
266                 return err;
267         mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
268         return 0;
269 }
270
271 int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
272                                    bool is_up)
273 {
274         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
275         char paos_pl[MLXSW_REG_PAOS_LEN];
276
277         mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
278                             is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
279                             MLXSW_PORT_ADMIN_STATUS_DOWN);
280         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
281 }
282
283 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
284                                       const unsigned char *addr)
285 {
286         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
287         char ppad_pl[MLXSW_REG_PPAD_LEN];
288
289         mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
290         mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
291         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
292 }
293
294 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
295 {
296         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
297
298         eth_hw_addr_gen(mlxsw_sp_port->dev, mlxsw_sp->base_mac,
299                         mlxsw_sp_port->local_port);
300         return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port,
301                                           mlxsw_sp_port->dev->dev_addr);
302 }
303
304 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
305 {
306         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
307         char pmtu_pl[MLXSW_REG_PMTU_LEN];
308
309         mtu += MLXSW_PORT_ETH_FRAME_HDR;
310
311         mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
312         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
313 }
314
315 static int mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp,
316                                   u16 local_port, u8 swid)
317 {
318         char pspa_pl[MLXSW_REG_PSPA_LEN];
319
320         mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
321         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
322 }
323
324 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
325 {
326         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
327         char svpe_pl[MLXSW_REG_SVPE_LEN];
328
329         mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
330         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
331 }
332
333 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
334                                    bool learn_enable)
335 {
336         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
337         char *spvmlr_pl;
338         int err;
339
340         spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
341         if (!spvmlr_pl)
342                 return -ENOMEM;
343         mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
344                               learn_enable);
345         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
346         kfree(spvmlr_pl);
347         return err;
348 }
349
350 int mlxsw_sp_port_security_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
351 {
352         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
353         char spfsr_pl[MLXSW_REG_SPFSR_LEN];
354         int err;
355
356         if (mlxsw_sp_port->security == enable)
357                 return 0;
358
359         mlxsw_reg_spfsr_pack(spfsr_pl, mlxsw_sp_port->local_port, enable);
360         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spfsr), spfsr_pl);
361         if (err)
362                 return err;
363
364         mlxsw_sp_port->security = enable;
365         return 0;
366 }
367
368 int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type)
369 {
370         switch (ethtype) {
371         case ETH_P_8021Q:
372                 *p_sver_type = 0;
373                 break;
374         case ETH_P_8021AD:
375                 *p_sver_type = 1;
376                 break;
377         default:
378                 return -EINVAL;
379         }
380
381         return 0;
382 }
383
384 int mlxsw_sp_port_egress_ethtype_set(struct mlxsw_sp_port *mlxsw_sp_port,
385                                      u16 ethtype)
386 {
387         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
388         char spevet_pl[MLXSW_REG_SPEVET_LEN];
389         u8 sver_type;
390         int err;
391
392         err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type);
393         if (err)
394                 return err;
395
396         mlxsw_reg_spevet_pack(spevet_pl, mlxsw_sp_port->local_port, sver_type);
397         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spevet), spevet_pl);
398 }
399
400 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
401                                     u16 vid, u16 ethtype)
402 {
403         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
404         char spvid_pl[MLXSW_REG_SPVID_LEN];
405         u8 sver_type;
406         int err;
407
408         err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type);
409         if (err)
410                 return err;
411
412         mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid,
413                              sver_type);
414
415         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
416 }
417
418 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
419                                             bool allow)
420 {
421         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
422         char spaft_pl[MLXSW_REG_SPAFT_LEN];
423
424         mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
425         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
426 }
427
428 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
429                            u16 ethtype)
430 {
431         int err;
432
433         if (!vid) {
434                 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
435                 if (err)
436                         return err;
437         } else {
438                 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid, ethtype);
439                 if (err)
440                         return err;
441                 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true);
442                 if (err)
443                         goto err_port_allow_untagged_set;
444         }
445
446         mlxsw_sp_port->pvid = vid;
447         return 0;
448
449 err_port_allow_untagged_set:
450         __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid, ethtype);
451         return err;
452 }
453
454 static int
455 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
456 {
457         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
458         char sspr_pl[MLXSW_REG_SSPR_LEN];
459
460         mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
461         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
462 }
463
464 static int
465 mlxsw_sp_port_module_info_parse(struct mlxsw_sp *mlxsw_sp,
466                                 u16 local_port, char *pmlp_pl,
467                                 struct mlxsw_sp_port_mapping *port_mapping)
468 {
469         bool separate_rxtx;
470         u8 first_lane;
471         u8 slot_index;
472         u8 module;
473         u8 width;
474         int i;
475
476         module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
477         slot_index = mlxsw_reg_pmlp_slot_index_get(pmlp_pl, 0);
478         width = mlxsw_reg_pmlp_width_get(pmlp_pl);
479         separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl);
480         first_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
481
482         if (width && !is_power_of_2(width)) {
483                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n",
484                         local_port);
485                 return -EINVAL;
486         }
487
488         for (i = 0; i < width; i++) {
489                 if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) {
490                         dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n",
491                                 local_port);
492                         return -EINVAL;
493                 }
494                 if (mlxsw_reg_pmlp_slot_index_get(pmlp_pl, i) != slot_index) {
495                         dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple slot indexes\n",
496                                 local_port);
497                         return -EINVAL;
498                 }
499                 if (separate_rxtx &&
500                     mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) !=
501                     mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) {
502                         dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n",
503                                 local_port);
504                         return -EINVAL;
505                 }
506                 if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i + first_lane) {
507                         dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n",
508                                 local_port);
509                         return -EINVAL;
510                 }
511         }
512
513         port_mapping->module = module;
514         port_mapping->slot_index = slot_index;
515         port_mapping->width = width;
516         port_mapping->module_width = width;
517         port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
518         return 0;
519 }
520
521 static int
522 mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port,
523                               struct mlxsw_sp_port_mapping *port_mapping)
524 {
525         char pmlp_pl[MLXSW_REG_PMLP_LEN];
526         int err;
527
528         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
529         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
530         if (err)
531                 return err;
532         return mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port,
533                                                pmlp_pl, port_mapping);
534 }
535
536 static int
537 mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u16 local_port,
538                          const struct mlxsw_sp_port_mapping *port_mapping)
539 {
540         char pmlp_pl[MLXSW_REG_PMLP_LEN];
541         int i, err;
542
543         mlxsw_env_module_port_map(mlxsw_sp->core, port_mapping->slot_index,
544                                   port_mapping->module);
545
546         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
547         mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width);
548         for (i = 0; i < port_mapping->width; i++) {
549                 mlxsw_reg_pmlp_slot_index_set(pmlp_pl, i,
550                                               port_mapping->slot_index);
551                 mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module);
552                 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */
553         }
554
555         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
556         if (err)
557                 goto err_pmlp_write;
558         return 0;
559
560 err_pmlp_write:
561         mlxsw_env_module_port_unmap(mlxsw_sp->core, port_mapping->slot_index,
562                                     port_mapping->module);
563         return err;
564 }
565
566 static void mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u16 local_port,
567                                        u8 slot_index, u8 module)
568 {
569         char pmlp_pl[MLXSW_REG_PMLP_LEN];
570
571         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
572         mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
573         mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
574         mlxsw_env_module_port_unmap(mlxsw_sp->core, slot_index, module);
575 }
576
577 static int mlxsw_sp_port_open(struct net_device *dev)
578 {
579         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
580         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
581         int err;
582
583         err = mlxsw_env_module_port_up(mlxsw_sp->core,
584                                        mlxsw_sp_port->mapping.slot_index,
585                                        mlxsw_sp_port->mapping.module);
586         if (err)
587                 return err;
588         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
589         if (err)
590                 goto err_port_admin_status_set;
591         netif_start_queue(dev);
592         return 0;
593
594 err_port_admin_status_set:
595         mlxsw_env_module_port_down(mlxsw_sp->core,
596                                    mlxsw_sp_port->mapping.slot_index,
597                                    mlxsw_sp_port->mapping.module);
598         return err;
599 }
600
601 static int mlxsw_sp_port_stop(struct net_device *dev)
602 {
603         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
604         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
605
606         netif_stop_queue(dev);
607         mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
608         mlxsw_env_module_port_down(mlxsw_sp->core,
609                                    mlxsw_sp_port->mapping.slot_index,
610                                    mlxsw_sp_port->mapping.module);
611         return 0;
612 }
613
614 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
615                                       struct net_device *dev)
616 {
617         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
618         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
619         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
620         struct mlxsw_txhdr_info txhdr_info = {
621                 .tx_info.local_port = mlxsw_sp_port->local_port,
622                 .tx_info.is_emad = false,
623         };
624         u64 len;
625         int err;
626
627         memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
628
629         if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &txhdr_info.tx_info))
630                 return NETDEV_TX_BUSY;
631
632         if (eth_skb_pad(skb)) {
633                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
634                 return NETDEV_TX_OK;
635         }
636
637         skb = mlxsw_sp_txhdr_preparations(mlxsw_sp, skb, &txhdr_info);
638         if (!skb) {
639                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
640                 return NETDEV_TX_OK;
641         }
642
643         /* TX header is consumed by HW on the way so we shouldn't count its
644          * bytes as being sent.
645          */
646         len = skb->len - MLXSW_TXHDR_LEN;
647
648         /* Due to a race we might fail here because of a full queue. In that
649          * unlikely case we simply drop the packet.
650          */
651         err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &txhdr_info);
652
653         if (!err) {
654                 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
655                 u64_stats_update_begin(&pcpu_stats->syncp);
656                 pcpu_stats->tx_packets++;
657                 pcpu_stats->tx_bytes += len;
658                 u64_stats_update_end(&pcpu_stats->syncp);
659         } else {
660                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
661                 dev_kfree_skb_any(skb);
662         }
663         return NETDEV_TX_OK;
664 }
665
666 static void mlxsw_sp_set_rx_mode(struct net_device *dev)
667 {
668 }
669
670 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
671 {
672         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
673         struct sockaddr *addr = p;
674         int err;
675
676         if (!is_valid_ether_addr(addr->sa_data))
677                 return -EADDRNOTAVAIL;
678
679         err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
680         if (err)
681                 return err;
682         eth_hw_addr_set(dev, addr->sa_data);
683         return 0;
684 }
685
686 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
687 {
688         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
689         struct mlxsw_sp_hdroom orig_hdroom;
690         struct mlxsw_sp_hdroom hdroom;
691         int err;
692
693         orig_hdroom = *mlxsw_sp_port->hdroom;
694
695         hdroom = orig_hdroom;
696         hdroom.mtu = mtu;
697         mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
698
699         err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
700         if (err) {
701                 netdev_err(dev, "Failed to configure port's headroom\n");
702                 return err;
703         }
704
705         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
706         if (err)
707                 goto err_port_mtu_set;
708         WRITE_ONCE(dev->mtu, mtu);
709         return 0;
710
711 err_port_mtu_set:
712         mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
713         return err;
714 }
715
716 static int
717 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
718                              struct rtnl_link_stats64 *stats)
719 {
720         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
721         struct mlxsw_sp_port_pcpu_stats *p;
722         u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
723         u32 tx_dropped = 0;
724         unsigned int start;
725         int i;
726
727         for_each_possible_cpu(i) {
728                 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
729                 do {
730                         start = u64_stats_fetch_begin(&p->syncp);
731                         rx_packets      = p->rx_packets;
732                         rx_bytes        = p->rx_bytes;
733                         tx_packets      = p->tx_packets;
734                         tx_bytes        = p->tx_bytes;
735                 } while (u64_stats_fetch_retry(&p->syncp, start));
736
737                 stats->rx_packets       += rx_packets;
738                 stats->rx_bytes         += rx_bytes;
739                 stats->tx_packets       += tx_packets;
740                 stats->tx_bytes         += tx_bytes;
741                 /* tx_dropped is u32, updated without syncp protection. */
742                 tx_dropped      += p->tx_dropped;
743         }
744         stats->tx_dropped       = tx_dropped;
745         return 0;
746 }
747
748 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
749 {
750         switch (attr_id) {
751         case IFLA_OFFLOAD_XSTATS_CPU_HIT:
752                 return true;
753         }
754
755         return false;
756 }
757
758 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
759                                            void *sp)
760 {
761         switch (attr_id) {
762         case IFLA_OFFLOAD_XSTATS_CPU_HIT:
763                 return mlxsw_sp_port_get_sw_stats64(dev, sp);
764         }
765
766         return -EINVAL;
767 }
768
769 int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
770                                 int prio, char *ppcnt_pl)
771 {
772         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
773         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
774
775         mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
776         return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
777 }
778
779 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
780                                       struct rtnl_link_stats64 *stats)
781 {
782         char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
783         int err;
784
785         err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
786                                           0, ppcnt_pl);
787         if (err)
788                 goto out;
789
790         stats->tx_packets =
791                 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
792         stats->rx_packets =
793                 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
794         stats->tx_bytes =
795                 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
796         stats->rx_bytes =
797                 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
798         stats->multicast =
799                 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
800
801         stats->rx_crc_errors =
802                 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
803         stats->rx_frame_errors =
804                 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
805
806         stats->rx_length_errors = (
807                 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
808                 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
809                 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
810
811         stats->rx_errors = (stats->rx_crc_errors +
812                 stats->rx_frame_errors + stats->rx_length_errors);
813
814 out:
815         return err;
816 }
817
818 static void
819 mlxsw_sp_port_get_hw_xstats(struct net_device *dev,
820                             struct mlxsw_sp_port_xstats *xstats)
821 {
822         char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
823         int err, i;
824
825         err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0,
826                                           ppcnt_pl);
827         if (!err)
828                 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl);
829
830         for (i = 0; i < TC_MAX_QUEUE; i++) {
831                 err = mlxsw_sp_port_get_stats_raw(dev,
832                                                   MLXSW_REG_PPCNT_TC_CONG_CNT,
833                                                   i, ppcnt_pl);
834                 if (err)
835                         goto tc_cnt;
836
837                 xstats->wred_drop[i] =
838                         mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl);
839                 xstats->tc_ecn[i] = mlxsw_reg_ppcnt_ecn_marked_tc_get(ppcnt_pl);
840
841 tc_cnt:
842                 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT,
843                                                   i, ppcnt_pl);
844                 if (err)
845                         continue;
846
847                 xstats->backlog[i] =
848                         mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl);
849                 xstats->tail_drop[i] =
850                         mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl);
851         }
852
853         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
854                 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT,
855                                                   i, ppcnt_pl);
856                 if (err)
857                         continue;
858
859                 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl);
860                 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl);
861         }
862 }
863
864 static void update_stats_cache(struct work_struct *work)
865 {
866         struct mlxsw_sp_port *mlxsw_sp_port =
867                 container_of(work, struct mlxsw_sp_port,
868                              periodic_hw_stats.update_dw.work);
869
870         if (!netif_carrier_ok(mlxsw_sp_port->dev))
871                 /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as
872                  * necessary when port goes down.
873                  */
874                 goto out;
875
876         mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
877                                    &mlxsw_sp_port->periodic_hw_stats.stats);
878         mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev,
879                                     &mlxsw_sp_port->periodic_hw_stats.xstats);
880
881 out:
882         mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw,
883                                MLXSW_HW_STATS_UPDATE_TIME);
884 }
885
886 /* Return the stats from a cache that is updated periodically,
887  * as this function might get called in an atomic context.
888  */
889 static void
890 mlxsw_sp_port_get_stats64(struct net_device *dev,
891                           struct rtnl_link_stats64 *stats)
892 {
893         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
894
895         memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats));
896 }
897
898 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
899                                     u16 vid_begin, u16 vid_end,
900                                     bool is_member, bool untagged)
901 {
902         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
903         char *spvm_pl;
904         int err;
905
906         spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
907         if (!spvm_pl)
908                 return -ENOMEM;
909
910         mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
911                             vid_end, is_member, untagged);
912         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
913         kfree(spvm_pl);
914         return err;
915 }
916
917 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
918                            u16 vid_end, bool is_member, bool untagged)
919 {
920         u16 vid, vid_e;
921         int err;
922
923         for (vid = vid_begin; vid <= vid_end;
924              vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
925                 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
926                             vid_end);
927
928                 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
929                                                is_member, untagged);
930                 if (err)
931                         return err;
932         }
933
934         return 0;
935 }
936
937 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port,
938                                      bool flush_default)
939 {
940         struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp;
941
942         list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp,
943                                  &mlxsw_sp_port->vlans_list, list) {
944                 if (!flush_default &&
945                     mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID)
946                         continue;
947                 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
948         }
949 }
950
951 static void
952 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
953 {
954         if (mlxsw_sp_port_vlan->bridge_port)
955                 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
956         else if (mlxsw_sp_port_vlan->fid)
957                 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
958 }
959
960 struct mlxsw_sp_port_vlan *
961 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
962 {
963         struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
964         bool untagged = vid == MLXSW_SP_DEFAULT_VID;
965         int err;
966
967         mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
968         if (mlxsw_sp_port_vlan)
969                 return ERR_PTR(-EEXIST);
970
971         err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged);
972         if (err)
973                 return ERR_PTR(err);
974
975         mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL);
976         if (!mlxsw_sp_port_vlan) {
977                 err = -ENOMEM;
978                 goto err_port_vlan_alloc;
979         }
980
981         mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
982         mlxsw_sp_port_vlan->vid = vid;
983         list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
984
985         return mlxsw_sp_port_vlan;
986
987 err_port_vlan_alloc:
988         mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
989         return ERR_PTR(err);
990 }
991
992 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
993 {
994         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
995         u16 vid = mlxsw_sp_port_vlan->vid;
996
997         mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan);
998         list_del(&mlxsw_sp_port_vlan->list);
999         kfree(mlxsw_sp_port_vlan);
1000         mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1001 }
1002
1003 static int mlxsw_sp_port_add_vid(struct net_device *dev,
1004                                  __be16 __always_unused proto, u16 vid)
1005 {
1006         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1007
1008         /* VLAN 0 is added to HW filter when device goes up, but it is
1009          * reserved in our case, so simply return.
1010          */
1011         if (!vid)
1012                 return 0;
1013
1014         return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid));
1015 }
1016
1017 int mlxsw_sp_port_kill_vid(struct net_device *dev,
1018                            __be16 __always_unused proto, u16 vid)
1019 {
1020         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1021         struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1022
1023         /* VLAN 0 is removed from HW filter when device goes down, but
1024          * it is reserved in our case, so simply return.
1025          */
1026         if (!vid)
1027                 return 0;
1028
1029         mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1030         if (!mlxsw_sp_port_vlan)
1031                 return 0;
1032         mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1033
1034         return 0;
1035 }
1036
1037 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
1038                                    struct flow_block_offload *f)
1039 {
1040         switch (f->binder_type) {
1041         case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS:
1042                 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, true);
1043         case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS:
1044                 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, false);
1045         case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP:
1046                 return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port, f);
1047         case FLOW_BLOCK_BINDER_TYPE_RED_MARK:
1048                 return mlxsw_sp_setup_tc_block_qevent_mark(mlxsw_sp_port, f);
1049         default:
1050                 return -EOPNOTSUPP;
1051         }
1052 }
1053
1054 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
1055                              void *type_data)
1056 {
1057         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1058
1059         switch (type) {
1060         case TC_SETUP_BLOCK:
1061                 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data);
1062         case TC_SETUP_QDISC_RED:
1063                 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data);
1064         case TC_SETUP_QDISC_PRIO:
1065                 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data);
1066         case TC_SETUP_QDISC_ETS:
1067                 return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data);
1068         case TC_SETUP_QDISC_TBF:
1069                 return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data);
1070         case TC_SETUP_QDISC_FIFO:
1071                 return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, type_data);
1072         default:
1073                 return -EOPNOTSUPP;
1074         }
1075 }
1076
1077 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable)
1078 {
1079         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1080
1081         if (!enable) {
1082                 if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->ing_flow_block) ||
1083                     mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->eg_flow_block)) {
1084                         netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
1085                         return -EINVAL;
1086                 }
1087                 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->ing_flow_block);
1088                 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->eg_flow_block);
1089         } else {
1090                 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->ing_flow_block);
1091                 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->eg_flow_block);
1092         }
1093         return 0;
1094 }
1095
1096 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable)
1097 {
1098         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1099         char pplr_pl[MLXSW_REG_PPLR_LEN];
1100         int err;
1101
1102         if (netif_running(dev))
1103                 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1104
1105         mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable);
1106         err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr),
1107                               pplr_pl);
1108
1109         if (netif_running(dev))
1110                 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1111
1112         return err;
1113 }
1114
1115 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable);
1116
1117 static int mlxsw_sp_handle_feature(struct net_device *dev,
1118                                    netdev_features_t wanted_features,
1119                                    netdev_features_t feature,
1120                                    mlxsw_sp_feature_handler feature_handler)
1121 {
1122         netdev_features_t changes = wanted_features ^ dev->features;
1123         bool enable = !!(wanted_features & feature);
1124         int err;
1125
1126         if (!(changes & feature))
1127                 return 0;
1128
1129         err = feature_handler(dev, enable);
1130         if (err) {
1131                 netdev_err(dev, "%s feature %pNF failed, err %d\n",
1132                            enable ? "Enable" : "Disable", &feature, err);
1133                 return err;
1134         }
1135
1136         if (enable)
1137                 dev->features |= feature;
1138         else
1139                 dev->features &= ~feature;
1140
1141         return 0;
1142 }
1143 static int mlxsw_sp_set_features(struct net_device *dev,
1144                                  netdev_features_t features)
1145 {
1146         netdev_features_t oper_features = dev->features;
1147         int err = 0;
1148
1149         err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC,
1150                                        mlxsw_sp_feature_hw_tc);
1151         err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK,
1152                                        mlxsw_sp_feature_loopback);
1153
1154         if (err) {
1155                 dev->features = oper_features;
1156                 return -EINVAL;
1157         }
1158
1159         return 0;
1160 }
1161
1162 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
1163                                       struct ifreq *ifr)
1164 {
1165         struct hwtstamp_config config;
1166         int err;
1167
1168         if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
1169                 return -EFAULT;
1170
1171         err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port,
1172                                                              &config);
1173         if (err)
1174                 return err;
1175
1176         if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
1177                 return -EFAULT;
1178
1179         return 0;
1180 }
1181
1182 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
1183                                       struct ifreq *ifr)
1184 {
1185         struct hwtstamp_config config;
1186         int err;
1187
1188         err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port,
1189                                                              &config);
1190         if (err)
1191                 return err;
1192
1193         if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
1194                 return -EFAULT;
1195
1196         return 0;
1197 }
1198
1199 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port)
1200 {
1201         struct hwtstamp_config config = {0};
1202
1203         mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config);
1204 }
1205
1206 static int
1207 mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1208 {
1209         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1210
1211         switch (cmd) {
1212         case SIOCSHWTSTAMP:
1213                 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr);
1214         case SIOCGHWTSTAMP:
1215                 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr);
1216         default:
1217                 return -EOPNOTSUPP;
1218         }
1219 }
1220
1221 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
1222         .ndo_open               = mlxsw_sp_port_open,
1223         .ndo_stop               = mlxsw_sp_port_stop,
1224         .ndo_start_xmit         = mlxsw_sp_port_xmit,
1225         .ndo_setup_tc           = mlxsw_sp_setup_tc,
1226         .ndo_set_rx_mode        = mlxsw_sp_set_rx_mode,
1227         .ndo_set_mac_address    = mlxsw_sp_port_set_mac_address,
1228         .ndo_change_mtu         = mlxsw_sp_port_change_mtu,
1229         .ndo_get_stats64        = mlxsw_sp_port_get_stats64,
1230         .ndo_has_offload_stats  = mlxsw_sp_port_has_offload_stats,
1231         .ndo_get_offload_stats  = mlxsw_sp_port_get_offload_stats,
1232         .ndo_vlan_rx_add_vid    = mlxsw_sp_port_add_vid,
1233         .ndo_vlan_rx_kill_vid   = mlxsw_sp_port_kill_vid,
1234         .ndo_set_features       = mlxsw_sp_set_features,
1235         .ndo_eth_ioctl          = mlxsw_sp_port_ioctl,
1236 };
1237
1238 static int
1239 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port)
1240 {
1241         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1242         u32 eth_proto_cap, eth_proto_admin, eth_proto_oper;
1243         const struct mlxsw_sp_port_type_speed_ops *ops;
1244         char ptys_pl[MLXSW_REG_PTYS_LEN];
1245         u32 eth_proto_cap_masked;
1246         int err;
1247
1248         ops = mlxsw_sp->port_type_speed_ops;
1249
1250         /* Set advertised speeds to speeds supported by both the driver
1251          * and the device.
1252          */
1253         ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
1254                                0, false);
1255         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1256         if (err)
1257                 return err;
1258
1259         ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, &eth_proto_cap,
1260                                  &eth_proto_admin, &eth_proto_oper);
1261         eth_proto_cap_masked = ops->ptys_proto_cap_masked_get(eth_proto_cap);
1262         ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
1263                                eth_proto_cap_masked,
1264                                mlxsw_sp_port->link.autoneg);
1265         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1266 }
1267
1268 int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed)
1269 {
1270         const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops;
1271         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1272         char ptys_pl[MLXSW_REG_PTYS_LEN];
1273         u32 eth_proto_oper;
1274         int err;
1275
1276         port_type_speed_ops = mlxsw_sp->port_type_speed_ops;
1277         port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl,
1278                                                mlxsw_sp_port->local_port, 0,
1279                                                false);
1280         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1281         if (err)
1282                 return err;
1283         port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL,
1284                                                  &eth_proto_oper);
1285         *speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper);
1286         return 0;
1287 }
1288
1289 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
1290                           enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
1291                           bool dwrr, u8 dwrr_weight)
1292 {
1293         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1294         char qeec_pl[MLXSW_REG_QEEC_LEN];
1295
1296         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1297                             next_index);
1298         mlxsw_reg_qeec_de_set(qeec_pl, true);
1299         mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
1300         mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
1301         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1302 }
1303
1304 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
1305                                   enum mlxsw_reg_qeec_hr hr, u8 index,
1306                                   u8 next_index, u32 maxrate, u8 burst_size)
1307 {
1308         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1309         char qeec_pl[MLXSW_REG_QEEC_LEN];
1310
1311         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1312                             next_index);
1313         mlxsw_reg_qeec_mase_set(qeec_pl, true);
1314         mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
1315         mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl, burst_size);
1316         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1317 }
1318
1319 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port,
1320                                     enum mlxsw_reg_qeec_hr hr, u8 index,
1321                                     u8 next_index, u32 minrate)
1322 {
1323         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1324         char qeec_pl[MLXSW_REG_QEEC_LEN];
1325
1326         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1327                             next_index);
1328         mlxsw_reg_qeec_mise_set(qeec_pl, true);
1329         mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate);
1330
1331         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1332 }
1333
1334 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
1335                               u8 switch_prio, u8 tclass)
1336 {
1337         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1338         char qtct_pl[MLXSW_REG_QTCT_LEN];
1339
1340         mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
1341                             tclass);
1342         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
1343 }
1344
1345 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
1346 {
1347         int err, i;
1348
1349         /* Setup the elements hierarcy, so that each TC is linked to
1350          * one subgroup, which are all member in the same group.
1351          */
1352         err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1353                                     MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0);
1354         if (err)
1355                 return err;
1356         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1357                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1358                                             MLXSW_REG_QEEC_HR_SUBGROUP, i,
1359                                             0, false, 0);
1360                 if (err)
1361                         return err;
1362         }
1363         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1364                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1365                                             MLXSW_REG_QEEC_HR_TC, i, i,
1366                                             false, 0);
1367                 if (err)
1368                         return err;
1369
1370                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1371                                             MLXSW_REG_QEEC_HR_TC,
1372                                             i + 8, i,
1373                                             true, 100);
1374                 if (err)
1375                         return err;
1376         }
1377
1378         /* Make sure the max shaper is disabled in all hierarchies that support
1379          * it. Note that this disables ptps (PTP shaper), but that is intended
1380          * for the initial configuration.
1381          */
1382         err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1383                                             MLXSW_REG_QEEC_HR_PORT, 0, 0,
1384                                             MLXSW_REG_QEEC_MAS_DIS, 0);
1385         if (err)
1386                 return err;
1387         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1388                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1389                                                     MLXSW_REG_QEEC_HR_SUBGROUP,
1390                                                     i, 0,
1391                                                     MLXSW_REG_QEEC_MAS_DIS, 0);
1392                 if (err)
1393                         return err;
1394         }
1395         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1396                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1397                                                     MLXSW_REG_QEEC_HR_TC,
1398                                                     i, i,
1399                                                     MLXSW_REG_QEEC_MAS_DIS, 0);
1400                 if (err)
1401                         return err;
1402
1403                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1404                                                     MLXSW_REG_QEEC_HR_TC,
1405                                                     i + 8, i,
1406                                                     MLXSW_REG_QEEC_MAS_DIS, 0);
1407                 if (err)
1408                         return err;
1409         }
1410
1411         /* Configure the min shaper for multicast TCs. */
1412         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1413                 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port,
1414                                                MLXSW_REG_QEEC_HR_TC,
1415                                                i + 8, i,
1416                                                MLXSW_REG_QEEC_MIS_MIN);
1417                 if (err)
1418                         return err;
1419         }
1420
1421         /* Map all priorities to traffic class 0. */
1422         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1423                 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
1424                 if (err)
1425                         return err;
1426         }
1427
1428         return 0;
1429 }
1430
1431 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
1432                                         bool enable)
1433 {
1434         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1435         char qtctm_pl[MLXSW_REG_QTCTM_LEN];
1436
1437         mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable);
1438         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl);
1439 }
1440
1441 static int mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port *mlxsw_sp_port)
1442 {
1443         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1444         u8 slot_index = mlxsw_sp_port->mapping.slot_index;
1445         u8 module = mlxsw_sp_port->mapping.module;
1446         u64 overheat_counter;
1447         int err;
1448
1449         err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, slot_index,
1450                                                     module, &overheat_counter);
1451         if (err)
1452                 return err;
1453
1454         mlxsw_sp_port->module_overheat_initial_val = overheat_counter;
1455         return 0;
1456 }
1457
1458 int
1459 mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port,
1460                                       bool is_8021ad_tagged,
1461                                       bool is_8021q_tagged)
1462 {
1463         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1464         char spvc_pl[MLXSW_REG_SPVC_LEN];
1465
1466         mlxsw_reg_spvc_pack(spvc_pl, mlxsw_sp_port->local_port,
1467                             is_8021ad_tagged, is_8021q_tagged);
1468         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvc), spvc_pl);
1469 }
1470
1471 static int mlxsw_sp_port_label_info_get(struct mlxsw_sp *mlxsw_sp,
1472                                         u16 local_port, u8 *port_number,
1473                                         u8 *split_port_subnumber,
1474                                         u8 *slot_index)
1475 {
1476         char pllp_pl[MLXSW_REG_PLLP_LEN];
1477         int err;
1478
1479         mlxsw_reg_pllp_pack(pllp_pl, local_port);
1480         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pllp), pllp_pl);
1481         if (err)
1482                 return err;
1483         mlxsw_reg_pllp_unpack(pllp_pl, port_number,
1484                               split_port_subnumber, slot_index);
1485         return 0;
1486 }
1487
1488 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port,
1489                                 bool split,
1490                                 struct mlxsw_sp_port_mapping *port_mapping)
1491 {
1492         struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1493         struct mlxsw_sp_port *mlxsw_sp_port;
1494         u32 lanes = port_mapping->width;
1495         u8 split_port_subnumber;
1496         struct net_device *dev;
1497         u8 port_number;
1498         u8 slot_index;
1499         bool splittable;
1500         int err;
1501
1502         err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, port_mapping);
1503         if (err) {
1504                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
1505                         local_port);
1506                 return err;
1507         }
1508
1509         err = mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 0);
1510         if (err) {
1511                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
1512                         local_port);
1513                 goto err_port_swid_set;
1514         }
1515
1516         err = mlxsw_sp_port_label_info_get(mlxsw_sp, local_port, &port_number,
1517                                            &split_port_subnumber, &slot_index);
1518         if (err) {
1519                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get port label information\n",
1520                         local_port);
1521                 goto err_port_label_info_get;
1522         }
1523
1524         splittable = lanes > 1 && !split;
1525         err = mlxsw_core_port_init(mlxsw_sp->core, local_port, slot_index,
1526                                    port_number, split, split_port_subnumber,
1527                                    splittable, lanes, mlxsw_sp->base_mac,
1528                                    sizeof(mlxsw_sp->base_mac));
1529         if (err) {
1530                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
1531                         local_port);
1532                 goto err_core_port_init;
1533         }
1534
1535         dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
1536         if (!dev) {
1537                 err = -ENOMEM;
1538                 goto err_alloc_etherdev;
1539         }
1540         SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
1541         dev_net_set(dev, mlxsw_sp_net(mlxsw_sp));
1542         mlxsw_sp_port = netdev_priv(dev);
1543         mlxsw_core_port_netdev_link(mlxsw_sp->core, local_port,
1544                                     mlxsw_sp_port, dev);
1545         mlxsw_sp_port->dev = dev;
1546         mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1547         mlxsw_sp_port->local_port = local_port;
1548         mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID;
1549         mlxsw_sp_port->split = split;
1550         mlxsw_sp_port->mapping = *port_mapping;
1551         mlxsw_sp_port->link.autoneg = 1;
1552         INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
1553
1554         mlxsw_sp_port->pcpu_stats =
1555                 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
1556         if (!mlxsw_sp_port->pcpu_stats) {
1557                 err = -ENOMEM;
1558                 goto err_alloc_stats;
1559         }
1560
1561         INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw,
1562                           &update_stats_cache);
1563
1564         dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
1565         dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
1566
1567         err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
1568         if (err) {
1569                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
1570                         mlxsw_sp_port->local_port);
1571                 goto err_dev_addr_init;
1572         }
1573
1574         netif_carrier_off(dev);
1575
1576         dev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_FILTER |
1577                          NETIF_F_HW_TC;
1578         dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK;
1579         dev->lltx = true;
1580         dev->netns_local = true;
1581
1582         dev->min_mtu = ETH_MIN_MTU;
1583         dev->max_mtu = MLXSW_PORT_MAX_MTU - MLXSW_PORT_ETH_FRAME_HDR;
1584
1585         /* Each packet needs to have a Tx header (metadata) on top all other
1586          * headers.
1587          */
1588         dev->needed_headroom = MLXSW_TXHDR_LEN;
1589
1590         err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
1591         if (err) {
1592                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1593                         mlxsw_sp_port->local_port);
1594                 goto err_port_system_port_mapping_set;
1595         }
1596
1597         err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port);
1598         if (err) {
1599                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
1600                         mlxsw_sp_port->local_port);
1601                 goto err_port_speed_by_width_set;
1602         }
1603
1604         err = mlxsw_sp->port_type_speed_ops->ptys_max_speed(mlxsw_sp_port,
1605                                                             &mlxsw_sp_port->max_speed);
1606         if (err) {
1607                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum speed\n",
1608                         mlxsw_sp_port->local_port);
1609                 goto err_max_speed_get;
1610         }
1611
1612         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
1613         if (err) {
1614                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
1615                         mlxsw_sp_port->local_port);
1616                 goto err_port_mtu_set;
1617         }
1618
1619         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1620         if (err)
1621                 goto err_port_admin_status_set;
1622
1623         err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
1624         if (err) {
1625                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
1626                         mlxsw_sp_port->local_port);
1627                 goto err_port_buffers_init;
1628         }
1629
1630         err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
1631         if (err) {
1632                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
1633                         mlxsw_sp_port->local_port);
1634                 goto err_port_ets_init;
1635         }
1636
1637         err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true);
1638         if (err) {
1639                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n",
1640                         mlxsw_sp_port->local_port);
1641                 goto err_port_tc_mc_mode;
1642         }
1643
1644         /* ETS and buffers must be initialized before DCB. */
1645         err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
1646         if (err) {
1647                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
1648                         mlxsw_sp_port->local_port);
1649                 goto err_port_dcb_init;
1650         }
1651
1652         err = mlxsw_sp_port_fids_init(mlxsw_sp_port);
1653         if (err) {
1654                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n",
1655                         mlxsw_sp_port->local_port);
1656                 goto err_port_fids_init;
1657         }
1658
1659         err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port);
1660         if (err) {
1661                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n",
1662                         mlxsw_sp_port->local_port);
1663                 goto err_port_qdiscs_init;
1664         }
1665
1666         err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false,
1667                                      false);
1668         if (err) {
1669                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n",
1670                         mlxsw_sp_port->local_port);
1671                 goto err_port_vlan_clear;
1672         }
1673
1674         err = mlxsw_sp_port_nve_init(mlxsw_sp_port);
1675         if (err) {
1676                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n",
1677                         mlxsw_sp_port->local_port);
1678                 goto err_port_nve_init;
1679         }
1680
1681         err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
1682                                      ETH_P_8021Q);
1683         if (err) {
1684                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n",
1685                         mlxsw_sp_port->local_port);
1686                 goto err_port_pvid_set;
1687         }
1688
1689         mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
1690                                                        MLXSW_SP_DEFAULT_VID);
1691         if (IS_ERR(mlxsw_sp_port_vlan)) {
1692                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
1693                         mlxsw_sp_port->local_port);
1694                 err = PTR_ERR(mlxsw_sp_port_vlan);
1695                 goto err_port_vlan_create;
1696         }
1697         mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan;
1698
1699         /* Set SPVC.et0=true and SPVC.et1=false to make the local port to treat
1700          * only packets with 802.1q header as tagged packets.
1701          */
1702         err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true);
1703         if (err) {
1704                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set default VLAN classification\n",
1705                         local_port);
1706                 goto err_port_vlan_classification_set;
1707         }
1708
1709         INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw,
1710                           mlxsw_sp->ptp_ops->shaper_work);
1711
1712         mlxsw_sp->ports[local_port] = mlxsw_sp_port;
1713
1714         err = mlxsw_sp_port_overheat_init_val_set(mlxsw_sp_port);
1715         if (err) {
1716                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set overheat initial value\n",
1717                         mlxsw_sp_port->local_port);
1718                 goto err_port_overheat_init_val_set;
1719         }
1720
1721         err = register_netdev(dev);
1722         if (err) {
1723                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
1724                         mlxsw_sp_port->local_port);
1725                 goto err_register_netdev;
1726         }
1727
1728         mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0);
1729         return 0;
1730
1731 err_register_netdev:
1732 err_port_overheat_init_val_set:
1733         mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true);
1734 err_port_vlan_classification_set:
1735         mlxsw_sp->ports[local_port] = NULL;
1736         mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1737 err_port_vlan_create:
1738 err_port_pvid_set:
1739         mlxsw_sp_port_nve_fini(mlxsw_sp_port);
1740 err_port_nve_init:
1741 err_port_vlan_clear:
1742         mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
1743 err_port_qdiscs_init:
1744         mlxsw_sp_port_fids_fini(mlxsw_sp_port);
1745 err_port_fids_init:
1746         mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1747 err_port_dcb_init:
1748         mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
1749 err_port_tc_mc_mode:
1750 err_port_ets_init:
1751         mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
1752 err_port_buffers_init:
1753 err_port_admin_status_set:
1754 err_port_mtu_set:
1755 err_max_speed_get:
1756 err_port_speed_by_width_set:
1757 err_port_system_port_mapping_set:
1758 err_dev_addr_init:
1759         free_percpu(mlxsw_sp_port->pcpu_stats);
1760 err_alloc_stats:
1761         free_netdev(dev);
1762 err_alloc_etherdev:
1763         mlxsw_core_port_fini(mlxsw_sp->core, local_port);
1764 err_core_port_init:
1765 err_port_label_info_get:
1766         mlxsw_sp_port_swid_set(mlxsw_sp, local_port,
1767                                MLXSW_PORT_SWID_DISABLED_PORT);
1768 err_port_swid_set:
1769         mlxsw_sp_port_module_unmap(mlxsw_sp, local_port,
1770                                    port_mapping->slot_index,
1771                                    port_mapping->module);
1772         return err;
1773 }
1774
1775 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port)
1776 {
1777         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1778         u8 slot_index = mlxsw_sp_port->mapping.slot_index;
1779         u8 module = mlxsw_sp_port->mapping.module;
1780
1781         cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
1782         cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw);
1783         unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
1784         mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
1785         mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true);
1786         mlxsw_sp->ports[local_port] = NULL;
1787         mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true);
1788         mlxsw_sp_port_nve_fini(mlxsw_sp_port);
1789         mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
1790         mlxsw_sp_port_fids_fini(mlxsw_sp_port);
1791         mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1792         mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
1793         mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
1794         free_percpu(mlxsw_sp_port->pcpu_stats);
1795         WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
1796         free_netdev(mlxsw_sp_port->dev);
1797         mlxsw_core_port_fini(mlxsw_sp->core, local_port);
1798         mlxsw_sp_port_swid_set(mlxsw_sp, local_port,
1799                                MLXSW_PORT_SWID_DISABLED_PORT);
1800         mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, slot_index, module);
1801 }
1802
1803 static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp)
1804 {
1805         struct mlxsw_sp_port *mlxsw_sp_port;
1806         int err;
1807
1808         mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL);
1809         if (!mlxsw_sp_port)
1810                 return -ENOMEM;
1811
1812         mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1813         mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT;
1814
1815         err = mlxsw_core_cpu_port_init(mlxsw_sp->core,
1816                                        mlxsw_sp_port,
1817                                        mlxsw_sp->base_mac,
1818                                        sizeof(mlxsw_sp->base_mac));
1819         if (err) {
1820                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n");
1821                 goto err_core_cpu_port_init;
1822         }
1823
1824         mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port;
1825         return 0;
1826
1827 err_core_cpu_port_init:
1828         kfree(mlxsw_sp_port);
1829         return err;
1830 }
1831
1832 static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp)
1833 {
1834         struct mlxsw_sp_port *mlxsw_sp_port =
1835                                 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT];
1836
1837         mlxsw_core_cpu_port_fini(mlxsw_sp->core);
1838         mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL;
1839         kfree(mlxsw_sp_port);
1840 }
1841
1842 static bool mlxsw_sp_local_port_valid(u16 local_port)
1843 {
1844         return local_port != MLXSW_PORT_CPU_PORT;
1845 }
1846
1847 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u16 local_port)
1848 {
1849         if (!mlxsw_sp_local_port_valid(local_port))
1850                 return false;
1851         return mlxsw_sp->ports[local_port] != NULL;
1852 }
1853
1854 static int mlxsw_sp_port_mapping_event_set(struct mlxsw_sp *mlxsw_sp,
1855                                            u16 local_port, bool enable)
1856 {
1857         char pmecr_pl[MLXSW_REG_PMECR_LEN];
1858
1859         mlxsw_reg_pmecr_pack(pmecr_pl, local_port,
1860                              enable ? MLXSW_REG_PMECR_E_GENERATE_EVENT :
1861                                       MLXSW_REG_PMECR_E_DO_NOT_GENERATE_EVENT);
1862         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmecr), pmecr_pl);
1863 }
1864
1865 struct mlxsw_sp_port_mapping_event {
1866         struct list_head list;
1867         char pmlp_pl[MLXSW_REG_PMLP_LEN];
1868 };
1869
1870 static void mlxsw_sp_port_mapping_events_work(struct work_struct *work)
1871 {
1872         struct mlxsw_sp_port_mapping_event *event, *next_event;
1873         struct mlxsw_sp_port_mapping_events *events;
1874         struct mlxsw_sp_port_mapping port_mapping;
1875         struct mlxsw_sp *mlxsw_sp;
1876         struct devlink *devlink;
1877         LIST_HEAD(event_queue);
1878         u16 local_port;
1879         int err;
1880
1881         events = container_of(work, struct mlxsw_sp_port_mapping_events, work);
1882         mlxsw_sp = container_of(events, struct mlxsw_sp, port_mapping_events);
1883         devlink = priv_to_devlink(mlxsw_sp->core);
1884
1885         spin_lock_bh(&events->queue_lock);
1886         list_splice_init(&events->queue, &event_queue);
1887         spin_unlock_bh(&events->queue_lock);
1888
1889         list_for_each_entry_safe(event, next_event, &event_queue, list) {
1890                 local_port = mlxsw_reg_pmlp_local_port_get(event->pmlp_pl);
1891                 err = mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port,
1892                                                       event->pmlp_pl, &port_mapping);
1893                 if (err)
1894                         goto out;
1895
1896                 if (WARN_ON_ONCE(!port_mapping.width))
1897                         goto out;
1898
1899                 devl_lock(devlink);
1900
1901                 if (!mlxsw_sp_port_created(mlxsw_sp, local_port))
1902                         mlxsw_sp_port_create(mlxsw_sp, local_port,
1903                                              false, &port_mapping);
1904                 else
1905                         WARN_ON_ONCE(1);
1906
1907                 devl_unlock(devlink);
1908
1909                 mlxsw_sp->port_mapping[local_port] = port_mapping;
1910
1911 out:
1912                 kfree(event);
1913         }
1914 }
1915
1916 static void
1917 mlxsw_sp_port_mapping_listener_func(const struct mlxsw_reg_info *reg,
1918                                     char *pmlp_pl, void *priv)
1919 {
1920         struct mlxsw_sp_port_mapping_events *events;
1921         struct mlxsw_sp_port_mapping_event *event;
1922         struct mlxsw_sp *mlxsw_sp = priv;
1923         u16 local_port;
1924
1925         local_port = mlxsw_reg_pmlp_local_port_get(pmlp_pl);
1926         if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
1927                 return;
1928
1929         events = &mlxsw_sp->port_mapping_events;
1930         event = kmalloc(sizeof(*event), GFP_ATOMIC);
1931         if (!event)
1932                 return;
1933         memcpy(event->pmlp_pl, pmlp_pl, sizeof(event->pmlp_pl));
1934         spin_lock(&events->queue_lock);
1935         list_add_tail(&event->list, &events->queue);
1936         spin_unlock(&events->queue_lock);
1937         mlxsw_core_schedule_work(&events->work);
1938 }
1939
1940 static void
1941 __mlxsw_sp_port_mapping_events_cancel(struct mlxsw_sp *mlxsw_sp)
1942 {
1943         struct mlxsw_sp_port_mapping_event *event, *next_event;
1944         struct mlxsw_sp_port_mapping_events *events;
1945
1946         events = &mlxsw_sp->port_mapping_events;
1947
1948         /* Caller needs to make sure that no new event is going to appear. */
1949         cancel_work_sync(&events->work);
1950         list_for_each_entry_safe(event, next_event, &events->queue, list) {
1951                 list_del(&event->list);
1952                 kfree(event);
1953         }
1954 }
1955
1956 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
1957 {
1958         unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
1959         int i;
1960
1961         for (i = 1; i < max_ports; i++)
1962                 mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false);
1963         /* Make sure all scheduled events are processed */
1964         __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp);
1965
1966         for (i = 1; i < max_ports; i++)
1967                 if (mlxsw_sp_port_created(mlxsw_sp, i))
1968                         mlxsw_sp_port_remove(mlxsw_sp, i);
1969         mlxsw_sp_cpu_port_remove(mlxsw_sp);
1970         kfree(mlxsw_sp->ports);
1971         mlxsw_sp->ports = NULL;
1972 }
1973
1974 static void
1975 mlxsw_sp_ports_remove_selected(struct mlxsw_core *mlxsw_core,
1976                                bool (*selector)(void *priv, u16 local_port),
1977                                void *priv)
1978 {
1979         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1980         unsigned int max_ports = mlxsw_core_max_ports(mlxsw_core);
1981         int i;
1982
1983         for (i = 1; i < max_ports; i++)
1984                 if (mlxsw_sp_port_created(mlxsw_sp, i) && selector(priv, i))
1985                         mlxsw_sp_port_remove(mlxsw_sp, i);
1986 }
1987
1988 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
1989 {
1990         unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
1991         struct mlxsw_sp_port_mapping_events *events;
1992         struct mlxsw_sp_port_mapping *port_mapping;
1993         size_t alloc_size;
1994         int i;
1995         int err;
1996
1997         alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports;
1998         mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
1999         if (!mlxsw_sp->ports)
2000                 return -ENOMEM;
2001
2002         events = &mlxsw_sp->port_mapping_events;
2003         INIT_LIST_HEAD(&events->queue);
2004         spin_lock_init(&events->queue_lock);
2005         INIT_WORK(&events->work, mlxsw_sp_port_mapping_events_work);
2006
2007         for (i = 1; i < max_ports; i++) {
2008                 err = mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, true);
2009                 if (err)
2010                         goto err_event_enable;
2011         }
2012
2013         err = mlxsw_sp_cpu_port_create(mlxsw_sp);
2014         if (err)
2015                 goto err_cpu_port_create;
2016
2017         for (i = 1; i < max_ports; i++) {
2018                 port_mapping = &mlxsw_sp->port_mapping[i];
2019                 if (!port_mapping->width)
2020                         continue;
2021                 err = mlxsw_sp_port_create(mlxsw_sp, i, false, port_mapping);
2022                 if (err)
2023                         goto err_port_create;
2024         }
2025         return 0;
2026
2027 err_port_create:
2028         for (i--; i >= 1; i--)
2029                 if (mlxsw_sp_port_created(mlxsw_sp, i))
2030                         mlxsw_sp_port_remove(mlxsw_sp, i);
2031         i = max_ports;
2032         mlxsw_sp_cpu_port_remove(mlxsw_sp);
2033 err_cpu_port_create:
2034 err_event_enable:
2035         for (i--; i >= 1; i--)
2036                 mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false);
2037         /* Make sure all scheduled events are processed */
2038         __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp);
2039         kfree(mlxsw_sp->ports);
2040         mlxsw_sp->ports = NULL;
2041         return err;
2042 }
2043
2044 static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp)
2045 {
2046         unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
2047         struct mlxsw_sp_port_mapping *port_mapping;
2048         int i;
2049         int err;
2050
2051         mlxsw_sp->port_mapping = kcalloc(max_ports,
2052                                          sizeof(struct mlxsw_sp_port_mapping),
2053                                          GFP_KERNEL);
2054         if (!mlxsw_sp->port_mapping)
2055                 return -ENOMEM;
2056
2057         for (i = 1; i < max_ports; i++) {
2058                 port_mapping = &mlxsw_sp->port_mapping[i];
2059                 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, port_mapping);
2060                 if (err)
2061                         goto err_port_module_info_get;
2062         }
2063         return 0;
2064
2065 err_port_module_info_get:
2066         kfree(mlxsw_sp->port_mapping);
2067         return err;
2068 }
2069
2070 static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp)
2071 {
2072         kfree(mlxsw_sp->port_mapping);
2073 }
2074
2075 static int
2076 mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp,
2077                            struct mlxsw_sp_port_mapping *port_mapping,
2078                            unsigned int count, const char *pmtdb_pl)
2079 {
2080         struct mlxsw_sp_port_mapping split_port_mapping;
2081         int err, i;
2082
2083         split_port_mapping = *port_mapping;
2084         split_port_mapping.width /= count;
2085         for (i = 0; i < count; i++) {
2086                 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2087
2088                 if (!mlxsw_sp_local_port_valid(s_local_port))
2089                         continue;
2090
2091                 err = mlxsw_sp_port_create(mlxsw_sp, s_local_port,
2092                                            true, &split_port_mapping);
2093                 if (err)
2094                         goto err_port_create;
2095                 split_port_mapping.lane += split_port_mapping.width;
2096         }
2097
2098         return 0;
2099
2100 err_port_create:
2101         for (i--; i >= 0; i--) {
2102                 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2103
2104                 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
2105                         mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
2106         }
2107         return err;
2108 }
2109
2110 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
2111                                          unsigned int count,
2112                                          const char *pmtdb_pl)
2113 {
2114         struct mlxsw_sp_port_mapping *port_mapping;
2115         int i;
2116
2117         /* Go over original unsplit ports in the gap and recreate them. */
2118         for (i = 0; i < count; i++) {
2119                 u16 local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2120
2121                 port_mapping = &mlxsw_sp->port_mapping[local_port];
2122                 if (!port_mapping->width || !mlxsw_sp_local_port_valid(local_port))
2123                         continue;
2124                 mlxsw_sp_port_create(mlxsw_sp, local_port,
2125                                      false, port_mapping);
2126         }
2127 }
2128
2129 static struct mlxsw_sp_port *
2130 mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u16 local_port)
2131 {
2132         if (mlxsw_sp->ports && mlxsw_sp->ports[local_port])
2133                 return mlxsw_sp->ports[local_port];
2134         return NULL;
2135 }
2136
2137 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u16 local_port,
2138                                unsigned int count,
2139                                struct netlink_ext_ack *extack)
2140 {
2141         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2142         struct mlxsw_sp_port_mapping port_mapping;
2143         struct mlxsw_sp_port *mlxsw_sp_port;
2144         enum mlxsw_reg_pmtdb_status status;
2145         char pmtdb_pl[MLXSW_REG_PMTDB_LEN];
2146         int i;
2147         int err;
2148
2149         mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
2150         if (!mlxsw_sp_port) {
2151                 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2152                         local_port);
2153                 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
2154                 return -EINVAL;
2155         }
2156
2157         if (mlxsw_sp_port->split) {
2158                 NL_SET_ERR_MSG_MOD(extack, "Port is already split");
2159                 return -EINVAL;
2160         }
2161
2162         mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index,
2163                              mlxsw_sp_port->mapping.module,
2164                              mlxsw_sp_port->mapping.module_width / count,
2165                              count);
2166         err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl);
2167         if (err) {
2168                 NL_SET_ERR_MSG_MOD(extack, "Failed to query split info");
2169                 return err;
2170         }
2171
2172         status = mlxsw_reg_pmtdb_status_get(pmtdb_pl);
2173         if (status != MLXSW_REG_PMTDB_STATUS_SUCCESS) {
2174                 NL_SET_ERR_MSG_MOD(extack, "Unsupported split configuration");
2175                 return -EINVAL;
2176         }
2177
2178         port_mapping = mlxsw_sp_port->mapping;
2179
2180         for (i = 0; i < count; i++) {
2181                 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2182
2183                 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
2184                         mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
2185         }
2186
2187         err = mlxsw_sp_port_split_create(mlxsw_sp, &port_mapping,
2188                                          count, pmtdb_pl);
2189         if (err) {
2190                 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
2191                 goto err_port_split_create;
2192         }
2193
2194         return 0;
2195
2196 err_port_split_create:
2197         mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl);
2198
2199         return err;
2200 }
2201
2202 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u16 local_port,
2203                                  struct netlink_ext_ack *extack)
2204 {
2205         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2206         struct mlxsw_sp_port *mlxsw_sp_port;
2207         char pmtdb_pl[MLXSW_REG_PMTDB_LEN];
2208         unsigned int count;
2209         int i;
2210         int err;
2211
2212         mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
2213         if (!mlxsw_sp_port) {
2214                 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2215                         local_port);
2216                 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
2217                 return -EINVAL;
2218         }
2219
2220         if (!mlxsw_sp_port->split) {
2221                 NL_SET_ERR_MSG_MOD(extack, "Port was not split");
2222                 return -EINVAL;
2223         }
2224
2225         count = mlxsw_sp_port->mapping.module_width /
2226                 mlxsw_sp_port->mapping.width;
2227
2228         mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index,
2229                              mlxsw_sp_port->mapping.module,
2230                              mlxsw_sp_port->mapping.module_width / count,
2231                              count);
2232         err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl);
2233         if (err) {
2234                 NL_SET_ERR_MSG_MOD(extack, "Failed to query split info");
2235                 return err;
2236         }
2237
2238         for (i = 0; i < count; i++) {
2239                 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2240
2241                 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
2242                         mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
2243         }
2244
2245         mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl);
2246
2247         return 0;
2248 }
2249
2250 static void
2251 mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port)
2252 {
2253         int i;
2254
2255         for (i = 0; i < TC_MAX_QUEUE; i++)
2256                 mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0;
2257 }
2258
2259 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
2260                                      char *pude_pl, void *priv)
2261 {
2262         struct mlxsw_sp *mlxsw_sp = priv;
2263         struct mlxsw_sp_port *mlxsw_sp_port;
2264         enum mlxsw_reg_pude_oper_status status;
2265         u16 local_port;
2266
2267         local_port = mlxsw_reg_pude_local_port_get(pude_pl);
2268
2269         if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
2270                 return;
2271         mlxsw_sp_port = mlxsw_sp->ports[local_port];
2272         if (!mlxsw_sp_port)
2273                 return;
2274
2275         status = mlxsw_reg_pude_oper_status_get(pude_pl);
2276         if (status == MLXSW_PORT_OPER_STATUS_UP) {
2277                 netdev_info(mlxsw_sp_port->dev, "link up\n");
2278                 netif_carrier_on(mlxsw_sp_port->dev);
2279                 mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0);
2280         } else {
2281                 netdev_info(mlxsw_sp_port->dev, "link down\n");
2282                 netif_carrier_off(mlxsw_sp_port->dev);
2283                 mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port);
2284         }
2285 }
2286
2287 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp,
2288                                           char *mtpptr_pl, bool ingress)
2289 {
2290         u16 local_port;
2291         u8 num_rec;
2292         int i;
2293
2294         local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl);
2295         num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl);
2296         for (i = 0; i < num_rec; i++) {
2297                 u8 domain_number;
2298                 u8 message_type;
2299                 u16 sequence_id;
2300                 u64 timestamp;
2301
2302                 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type,
2303                                         &domain_number, &sequence_id,
2304                                         &timestamp);
2305                 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port,
2306                                             message_type, domain_number,
2307                                             sequence_id, timestamp);
2308         }
2309 }
2310
2311 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg,
2312                                               char *mtpptr_pl, void *priv)
2313 {
2314         struct mlxsw_sp *mlxsw_sp = priv;
2315
2316         mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true);
2317 }
2318
2319 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg,
2320                                               char *mtpptr_pl, void *priv)
2321 {
2322         struct mlxsw_sp *mlxsw_sp = priv;
2323
2324         mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false);
2325 }
2326
2327 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
2328                                        u16 local_port, void *priv)
2329 {
2330         struct mlxsw_sp *mlxsw_sp = priv;
2331         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2332         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
2333
2334         if (unlikely(!mlxsw_sp_port)) {
2335                 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
2336                                      local_port);
2337                 return;
2338         }
2339
2340         skb->dev = mlxsw_sp_port->dev;
2341
2342         pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
2343         u64_stats_update_begin(&pcpu_stats->syncp);
2344         pcpu_stats->rx_packets++;
2345         pcpu_stats->rx_bytes += skb->len;
2346         u64_stats_update_end(&pcpu_stats->syncp);
2347
2348         skb->protocol = eth_type_trans(skb, skb->dev);
2349         napi_gro_receive(mlxsw_skb_cb(skb)->rx_md_info.napi, skb);
2350 }
2351
2352 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u16 local_port,
2353                                            void *priv)
2354 {
2355         skb->offload_fwd_mark = 1;
2356         return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
2357 }
2358
2359 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb,
2360                                               u16 local_port, void *priv)
2361 {
2362         skb->offload_l3_fwd_mark = 1;
2363         skb->offload_fwd_mark = 1;
2364         return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
2365 }
2366
2367 void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
2368                           u16 local_port)
2369 {
2370         mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port);
2371 }
2372
2373 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl)  \
2374         MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
2375                   _is_ctrl, SP_##_trap_group, DISCARD)
2376
2377 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl)     \
2378         MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action,    \
2379                 _is_ctrl, SP_##_trap_group, DISCARD)
2380
2381 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl)  \
2382         MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \
2383                 _is_ctrl, SP_##_trap_group, DISCARD)
2384
2385 #define MLXSW_SP_EVENTL(_func, _trap_id)                \
2386         MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
2387
2388 static const struct mlxsw_listener mlxsw_sp_listener[] = {
2389         /* Events */
2390         MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
2391         /* L2 traps */
2392         MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, FID_MISS, false),
2393         /* L3 traps */
2394         MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP,
2395                           false),
2396         MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false),
2397         MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP,
2398                           false),
2399         MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD,
2400                              ROUTER_EXP, false),
2401         MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD,
2402                              ROUTER_EXP, false),
2403         MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD,
2404                              ROUTER_EXP, false),
2405         MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD,
2406                              ROUTER_EXP, false),
2407         /* Multicast Router Traps */
2408         MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
2409         MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
2410         /* NVE traps */
2411         MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false),
2412 };
2413
2414 static const struct mlxsw_listener mlxsw_sp1_listener[] = {
2415         /* Events */
2416         MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0),
2417         MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0),
2418 };
2419
2420 static const struct mlxsw_listener mlxsw_sp2_listener[] = {
2421         /* Events */
2422         MLXSW_SP_EVENTL(mlxsw_sp_port_mapping_listener_func, PMLPE),
2423 };
2424
2425 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
2426 {
2427         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2428         char qpcr_pl[MLXSW_REG_QPCR_LEN];
2429         enum mlxsw_reg_qpcr_ir_units ir_units;
2430         int max_cpu_policers;
2431         bool is_bytes;
2432         u8 burst_size;
2433         u32 rate;
2434         int i, err;
2435
2436         if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
2437                 return -EIO;
2438
2439         max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
2440
2441         ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
2442         for (i = 0; i < max_cpu_policers; i++) {
2443                 is_bytes = false;
2444                 switch (i) {
2445                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
2446                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
2447                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
2448                         rate = 1024;
2449                         burst_size = 7;
2450                         break;
2451                 default:
2452                         continue;
2453                 }
2454
2455                 __set_bit(i, mlxsw_sp->trap->policers_usage);
2456                 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
2457                                     burst_size);
2458                 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
2459                 if (err)
2460                         return err;
2461         }
2462
2463         return 0;
2464 }
2465
2466 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
2467 {
2468         char htgt_pl[MLXSW_REG_HTGT_LEN];
2469         enum mlxsw_reg_htgt_trap_group i;
2470         int max_cpu_policers;
2471         int max_trap_groups;
2472         u8 priority, tc;
2473         u16 policer_id;
2474         int err;
2475
2476         if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
2477                 return -EIO;
2478
2479         max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
2480         max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
2481
2482         for (i = 0; i < max_trap_groups; i++) {
2483                 policer_id = i;
2484                 switch (i) {
2485                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
2486                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
2487                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
2488                         priority = 1;
2489                         tc = 1;
2490                         break;
2491                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
2492                         priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
2493                         tc = MLXSW_REG_HTGT_DEFAULT_TC;
2494                         policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
2495                         break;
2496                 default:
2497                         continue;
2498                 }
2499
2500                 if (max_cpu_policers <= policer_id &&
2501                     policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
2502                         return -EIO;
2503
2504                 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
2505                 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2506                 if (err)
2507                         return err;
2508         }
2509
2510         return 0;
2511 }
2512
2513 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
2514 {
2515         struct mlxsw_sp_trap *trap;
2516         u64 max_policers;
2517         int err;
2518
2519         if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_CPU_POLICERS))
2520                 return -EIO;
2521         max_policers = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_CPU_POLICERS);
2522         trap = kzalloc(struct_size(trap, policers_usage,
2523                                    BITS_TO_LONGS(max_policers)), GFP_KERNEL);
2524         if (!trap)
2525                 return -ENOMEM;
2526         trap->max_policers = max_policers;
2527         mlxsw_sp->trap = trap;
2528
2529         err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
2530         if (err)
2531                 goto err_cpu_policers_set;
2532
2533         err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
2534         if (err)
2535                 goto err_trap_groups_set;
2536
2537         err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp_listener,
2538                                         ARRAY_SIZE(mlxsw_sp_listener),
2539                                         mlxsw_sp);
2540         if (err)
2541                 goto err_traps_register;
2542
2543         err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp->listeners,
2544                                         mlxsw_sp->listeners_count, mlxsw_sp);
2545         if (err)
2546                 goto err_extra_traps_init;
2547
2548         return 0;
2549
2550 err_extra_traps_init:
2551         mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener,
2552                                     ARRAY_SIZE(mlxsw_sp_listener),
2553                                     mlxsw_sp);
2554 err_traps_register:
2555 err_trap_groups_set:
2556 err_cpu_policers_set:
2557         kfree(trap);
2558         return err;
2559 }
2560
2561 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
2562 {
2563         mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp->listeners,
2564                                     mlxsw_sp->listeners_count,
2565                                     mlxsw_sp);
2566         mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener,
2567                                     ARRAY_SIZE(mlxsw_sp_listener), mlxsw_sp);
2568         kfree(mlxsw_sp->trap);
2569 }
2570
2571 static int mlxsw_sp_lag_pgt_init(struct mlxsw_sp *mlxsw_sp)
2572 {
2573         char sgcr_pl[MLXSW_REG_SGCR_LEN];
2574         int err;
2575
2576         if (mlxsw_core_lag_mode(mlxsw_sp->core) !=
2577             MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW)
2578                 return 0;
2579
2580         /* In DDD mode, which we by default use, each LAG entry is 8 PGT
2581          * entries. The LAG table address needs to be 8-aligned, but that ought
2582          * to be the case, since the LAG table is allocated first.
2583          */
2584         err = mlxsw_sp_pgt_mid_alloc_range(mlxsw_sp, &mlxsw_sp->lag_pgt_base,
2585                                            mlxsw_sp->max_lag * 8);
2586         if (err)
2587                 return err;
2588         if (WARN_ON_ONCE(mlxsw_sp->lag_pgt_base % 8)) {
2589                 err = -EINVAL;
2590                 goto err_mid_alloc_range;
2591         }
2592
2593         mlxsw_reg_sgcr_pack(sgcr_pl, mlxsw_sp->lag_pgt_base);
2594         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sgcr), sgcr_pl);
2595         if (err)
2596                 goto err_mid_alloc_range;
2597
2598         return 0;
2599
2600 err_mid_alloc_range:
2601         mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mlxsw_sp->lag_pgt_base,
2602                                     mlxsw_sp->max_lag * 8);
2603         return err;
2604 }
2605
2606 static void mlxsw_sp_lag_pgt_fini(struct mlxsw_sp *mlxsw_sp)
2607 {
2608         if (mlxsw_core_lag_mode(mlxsw_sp->core) !=
2609             MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW)
2610                 return;
2611
2612         mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mlxsw_sp->lag_pgt_base,
2613                                     mlxsw_sp->max_lag * 8);
2614 }
2615
2616 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe
2617
2618 struct mlxsw_sp_lag {
2619         struct net_device *dev;
2620         refcount_t ref_count;
2621         u16 lag_id;
2622 };
2623
2624 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
2625 {
2626         char slcr_pl[MLXSW_REG_SLCR_LEN];
2627         u32 seed;
2628         int err;
2629
2630         seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac),
2631                      MLXSW_SP_LAG_SEED_INIT);
2632         mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
2633                                      MLXSW_REG_SLCR_LAG_HASH_DMAC |
2634                                      MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
2635                                      MLXSW_REG_SLCR_LAG_HASH_VLANID |
2636                                      MLXSW_REG_SLCR_LAG_HASH_SIP |
2637                                      MLXSW_REG_SLCR_LAG_HASH_DIP |
2638                                      MLXSW_REG_SLCR_LAG_HASH_SPORT |
2639                                      MLXSW_REG_SLCR_LAG_HASH_DPORT |
2640                                      MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed);
2641         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
2642         if (err)
2643                 return err;
2644
2645         err = mlxsw_core_max_lag(mlxsw_sp->core, &mlxsw_sp->max_lag);
2646         if (err)
2647                 return err;
2648
2649         if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
2650                 return -EIO;
2651
2652         err = mlxsw_sp_lag_pgt_init(mlxsw_sp);
2653         if (err)
2654                 return err;
2655
2656         mlxsw_sp->lags = kcalloc(mlxsw_sp->max_lag, sizeof(struct mlxsw_sp_lag),
2657                                  GFP_KERNEL);
2658         if (!mlxsw_sp->lags) {
2659                 err = -ENOMEM;
2660                 goto err_kcalloc;
2661         }
2662
2663         return 0;
2664
2665 err_kcalloc:
2666         mlxsw_sp_lag_pgt_fini(mlxsw_sp);
2667         return err;
2668 }
2669
2670 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
2671 {
2672         mlxsw_sp_lag_pgt_fini(mlxsw_sp);
2673         kfree(mlxsw_sp->lags);
2674 }
2675
2676 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = {
2677         .clock_init     = mlxsw_sp1_ptp_clock_init,
2678         .clock_fini     = mlxsw_sp1_ptp_clock_fini,
2679         .init           = mlxsw_sp1_ptp_init,
2680         .fini           = mlxsw_sp1_ptp_fini,
2681         .receive        = mlxsw_sp1_ptp_receive,
2682         .transmitted    = mlxsw_sp1_ptp_transmitted,
2683         .hwtstamp_get   = mlxsw_sp1_ptp_hwtstamp_get,
2684         .hwtstamp_set   = mlxsw_sp1_ptp_hwtstamp_set,
2685         .shaper_work    = mlxsw_sp1_ptp_shaper_work,
2686 #if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
2687         .get_ts_info    = mlxsw_sp1_ptp_get_ts_info,
2688 #endif
2689         .get_stats_count = mlxsw_sp1_get_stats_count,
2690         .get_stats_strings = mlxsw_sp1_get_stats_strings,
2691         .get_stats      = mlxsw_sp1_get_stats,
2692 };
2693
2694 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
2695         .clock_init     = mlxsw_sp2_ptp_clock_init,
2696         .clock_fini     = mlxsw_sp2_ptp_clock_fini,
2697         .init           = mlxsw_sp2_ptp_init,
2698         .fini           = mlxsw_sp2_ptp_fini,
2699         .receive        = mlxsw_sp2_ptp_receive,
2700         .transmitted    = mlxsw_sp2_ptp_transmitted,
2701         .hwtstamp_get   = mlxsw_sp2_ptp_hwtstamp_get,
2702         .hwtstamp_set   = mlxsw_sp2_ptp_hwtstamp_set,
2703         .shaper_work    = mlxsw_sp2_ptp_shaper_work,
2704 #if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
2705         .get_ts_info    = mlxsw_sp2_ptp_get_ts_info,
2706 #endif
2707         .get_stats_count = mlxsw_sp2_get_stats_count,
2708         .get_stats_strings = mlxsw_sp2_get_stats_strings,
2709         .get_stats      = mlxsw_sp2_get_stats,
2710         .tx_as_data     = true,
2711 };
2712
2713 static const struct mlxsw_sp_ptp_ops mlxsw_sp4_ptp_ops = {
2714         .clock_init     = mlxsw_sp2_ptp_clock_init,
2715         .clock_fini     = mlxsw_sp2_ptp_clock_fini,
2716         .init           = mlxsw_sp2_ptp_init,
2717         .fini           = mlxsw_sp2_ptp_fini,
2718         .receive        = mlxsw_sp2_ptp_receive,
2719         .transmitted    = mlxsw_sp2_ptp_transmitted,
2720         .hwtstamp_get   = mlxsw_sp2_ptp_hwtstamp_get,
2721         .hwtstamp_set   = mlxsw_sp2_ptp_hwtstamp_set,
2722         .shaper_work    = mlxsw_sp2_ptp_shaper_work,
2723 #if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
2724         .get_ts_info    = mlxsw_sp2_ptp_get_ts_info,
2725 #endif
2726         .get_stats_count = mlxsw_sp2_get_stats_count,
2727         .get_stats_strings = mlxsw_sp2_get_stats_strings,
2728         .get_stats      = mlxsw_sp2_get_stats,
2729 };
2730
2731 struct mlxsw_sp_sample_trigger_node {
2732         struct mlxsw_sp_sample_trigger trigger;
2733         struct mlxsw_sp_sample_params params;
2734         struct rhash_head ht_node;
2735         struct rcu_head rcu;
2736         refcount_t refcount;
2737 };
2738
2739 static const struct rhashtable_params mlxsw_sp_sample_trigger_ht_params = {
2740         .key_offset = offsetof(struct mlxsw_sp_sample_trigger_node, trigger),
2741         .head_offset = offsetof(struct mlxsw_sp_sample_trigger_node, ht_node),
2742         .key_len = sizeof(struct mlxsw_sp_sample_trigger),
2743         .automatic_shrinking = true,
2744 };
2745
2746 static void
2747 mlxsw_sp_sample_trigger_key_init(struct mlxsw_sp_sample_trigger *key,
2748                                  const struct mlxsw_sp_sample_trigger *trigger)
2749 {
2750         memset(key, 0, sizeof(*key));
2751         key->type = trigger->type;
2752         key->local_port = trigger->local_port;
2753 }
2754
2755 /* RCU read lock must be held */
2756 struct mlxsw_sp_sample_params *
2757 mlxsw_sp_sample_trigger_params_lookup(struct mlxsw_sp *mlxsw_sp,
2758                                       const struct mlxsw_sp_sample_trigger *trigger)
2759 {
2760         struct mlxsw_sp_sample_trigger_node *trigger_node;
2761         struct mlxsw_sp_sample_trigger key;
2762
2763         mlxsw_sp_sample_trigger_key_init(&key, trigger);
2764         trigger_node = rhashtable_lookup(&mlxsw_sp->sample_trigger_ht, &key,
2765                                          mlxsw_sp_sample_trigger_ht_params);
2766         if (!trigger_node)
2767                 return NULL;
2768
2769         return &trigger_node->params;
2770 }
2771
2772 static int
2773 mlxsw_sp_sample_trigger_node_init(struct mlxsw_sp *mlxsw_sp,
2774                                   const struct mlxsw_sp_sample_trigger *trigger,
2775                                   const struct mlxsw_sp_sample_params *params)
2776 {
2777         struct mlxsw_sp_sample_trigger_node *trigger_node;
2778         int err;
2779
2780         trigger_node = kzalloc(sizeof(*trigger_node), GFP_KERNEL);
2781         if (!trigger_node)
2782                 return -ENOMEM;
2783
2784         trigger_node->trigger = *trigger;
2785         trigger_node->params = *params;
2786         refcount_set(&trigger_node->refcount, 1);
2787
2788         err = rhashtable_insert_fast(&mlxsw_sp->sample_trigger_ht,
2789                                      &trigger_node->ht_node,
2790                                      mlxsw_sp_sample_trigger_ht_params);
2791         if (err)
2792                 goto err_rhashtable_insert;
2793
2794         return 0;
2795
2796 err_rhashtable_insert:
2797         kfree(trigger_node);
2798         return err;
2799 }
2800
2801 static void
2802 mlxsw_sp_sample_trigger_node_fini(struct mlxsw_sp *mlxsw_sp,
2803                                   struct mlxsw_sp_sample_trigger_node *trigger_node)
2804 {
2805         rhashtable_remove_fast(&mlxsw_sp->sample_trigger_ht,
2806                                &trigger_node->ht_node,
2807                                mlxsw_sp_sample_trigger_ht_params);
2808         kfree_rcu(trigger_node, rcu);
2809 }
2810
2811 int
2812 mlxsw_sp_sample_trigger_params_set(struct mlxsw_sp *mlxsw_sp,
2813                                    const struct mlxsw_sp_sample_trigger *trigger,
2814                                    const struct mlxsw_sp_sample_params *params,
2815                                    struct netlink_ext_ack *extack)
2816 {
2817         struct mlxsw_sp_sample_trigger_node *trigger_node;
2818         struct mlxsw_sp_sample_trigger key;
2819
2820         ASSERT_RTNL();
2821
2822         mlxsw_sp_sample_trigger_key_init(&key, trigger);
2823
2824         trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht,
2825                                               &key,
2826                                               mlxsw_sp_sample_trigger_ht_params);
2827         if (!trigger_node)
2828                 return mlxsw_sp_sample_trigger_node_init(mlxsw_sp, &key,
2829                                                          params);
2830
2831         if (trigger_node->trigger.local_port) {
2832                 NL_SET_ERR_MSG_MOD(extack, "Sampling already enabled on port");
2833                 return -EINVAL;
2834         }
2835
2836         if (trigger_node->params.psample_group != params->psample_group ||
2837             trigger_node->params.truncate != params->truncate ||
2838             trigger_node->params.rate != params->rate ||
2839             trigger_node->params.trunc_size != params->trunc_size) {
2840                 NL_SET_ERR_MSG_MOD(extack, "Sampling parameters do not match for an existing sampling trigger");
2841                 return -EINVAL;
2842         }
2843
2844         refcount_inc(&trigger_node->refcount);
2845
2846         return 0;
2847 }
2848
2849 void
2850 mlxsw_sp_sample_trigger_params_unset(struct mlxsw_sp *mlxsw_sp,
2851                                      const struct mlxsw_sp_sample_trigger *trigger)
2852 {
2853         struct mlxsw_sp_sample_trigger_node *trigger_node;
2854         struct mlxsw_sp_sample_trigger key;
2855
2856         ASSERT_RTNL();
2857
2858         mlxsw_sp_sample_trigger_key_init(&key, trigger);
2859
2860         trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht,
2861                                               &key,
2862                                               mlxsw_sp_sample_trigger_ht_params);
2863         if (!trigger_node)
2864                 return;
2865
2866         if (!refcount_dec_and_test(&trigger_node->refcount))
2867                 return;
2868
2869         mlxsw_sp_sample_trigger_node_fini(mlxsw_sp, trigger_node);
2870 }
2871
2872 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
2873                                     unsigned long event, void *ptr);
2874
2875 #define MLXSW_SP_DEFAULT_PARSING_DEPTH 96
2876 #define MLXSW_SP_INCREASED_PARSING_DEPTH 128
2877 #define MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT 4789
2878
2879 static void mlxsw_sp_parsing_init(struct mlxsw_sp *mlxsw_sp)
2880 {
2881         refcount_set(&mlxsw_sp->parsing.parsing_depth_ref, 0);
2882         mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH;
2883         mlxsw_sp->parsing.vxlan_udp_dport = MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT;
2884         mutex_init(&mlxsw_sp->parsing.lock);
2885 }
2886
2887 static void mlxsw_sp_parsing_fini(struct mlxsw_sp *mlxsw_sp)
2888 {
2889         mutex_destroy(&mlxsw_sp->parsing.lock);
2890         WARN_ON_ONCE(refcount_read(&mlxsw_sp->parsing.parsing_depth_ref));
2891 }
2892
2893 struct mlxsw_sp_ipv6_addr_node {
2894         struct in6_addr key;
2895         struct rhash_head ht_node;
2896         u32 kvdl_index;
2897         refcount_t refcount;
2898 };
2899
2900 static const struct rhashtable_params mlxsw_sp_ipv6_addr_ht_params = {
2901         .key_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, key),
2902         .head_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, ht_node),
2903         .key_len = sizeof(struct in6_addr),
2904         .automatic_shrinking = true,
2905 };
2906
2907 static int
2908 mlxsw_sp_ipv6_addr_init(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6,
2909                         u32 *p_kvdl_index)
2910 {
2911         struct mlxsw_sp_ipv6_addr_node *node;
2912         char rips_pl[MLXSW_REG_RIPS_LEN];
2913         int err;
2914
2915         err = mlxsw_sp_kvdl_alloc(mlxsw_sp,
2916                                   MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
2917                                   p_kvdl_index);
2918         if (err)
2919                 return err;
2920
2921         mlxsw_reg_rips_pack(rips_pl, *p_kvdl_index, addr6);
2922         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rips), rips_pl);
2923         if (err)
2924                 goto err_rips_write;
2925
2926         node = kzalloc(sizeof(*node), GFP_KERNEL);
2927         if (!node) {
2928                 err = -ENOMEM;
2929                 goto err_node_alloc;
2930         }
2931
2932         node->key = *addr6;
2933         node->kvdl_index = *p_kvdl_index;
2934         refcount_set(&node->refcount, 1);
2935
2936         err = rhashtable_insert_fast(&mlxsw_sp->ipv6_addr_ht,
2937                                      &node->ht_node,
2938                                      mlxsw_sp_ipv6_addr_ht_params);
2939         if (err)
2940                 goto err_rhashtable_insert;
2941
2942         return 0;
2943
2944 err_rhashtable_insert:
2945         kfree(node);
2946 err_node_alloc:
2947 err_rips_write:
2948         mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
2949                            *p_kvdl_index);
2950         return err;
2951 }
2952
2953 static void mlxsw_sp_ipv6_addr_fini(struct mlxsw_sp *mlxsw_sp,
2954                                     struct mlxsw_sp_ipv6_addr_node *node)
2955 {
2956         u32 kvdl_index = node->kvdl_index;
2957
2958         rhashtable_remove_fast(&mlxsw_sp->ipv6_addr_ht, &node->ht_node,
2959                                mlxsw_sp_ipv6_addr_ht_params);
2960         kfree(node);
2961         mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
2962                            kvdl_index);
2963 }
2964
2965 int mlxsw_sp_ipv6_addr_kvdl_index_get(struct mlxsw_sp *mlxsw_sp,
2966                                       const struct in6_addr *addr6,
2967                                       u32 *p_kvdl_index)
2968 {
2969         struct mlxsw_sp_ipv6_addr_node *node;
2970         int err = 0;
2971
2972         mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock);
2973         node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6,
2974                                       mlxsw_sp_ipv6_addr_ht_params);
2975         if (node) {
2976                 refcount_inc(&node->refcount);
2977                 *p_kvdl_index = node->kvdl_index;
2978                 goto out_unlock;
2979         }
2980
2981         err = mlxsw_sp_ipv6_addr_init(mlxsw_sp, addr6, p_kvdl_index);
2982
2983 out_unlock:
2984         mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock);
2985         return err;
2986 }
2987
2988 void
2989 mlxsw_sp_ipv6_addr_put(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6)
2990 {
2991         struct mlxsw_sp_ipv6_addr_node *node;
2992
2993         mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock);
2994         node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6,
2995                                       mlxsw_sp_ipv6_addr_ht_params);
2996         if (WARN_ON(!node))
2997                 goto out_unlock;
2998
2999         if (!refcount_dec_and_test(&node->refcount))
3000                 goto out_unlock;
3001
3002         mlxsw_sp_ipv6_addr_fini(mlxsw_sp, node);
3003
3004 out_unlock:
3005         mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock);
3006 }
3007
3008 static int mlxsw_sp_ipv6_addr_ht_init(struct mlxsw_sp *mlxsw_sp)
3009 {
3010         int err;
3011
3012         err = rhashtable_init(&mlxsw_sp->ipv6_addr_ht,
3013                               &mlxsw_sp_ipv6_addr_ht_params);
3014         if (err)
3015                 return err;
3016
3017         mutex_init(&mlxsw_sp->ipv6_addr_ht_lock);
3018         return 0;
3019 }
3020
3021 static void mlxsw_sp_ipv6_addr_ht_fini(struct mlxsw_sp *mlxsw_sp)
3022 {
3023         mutex_destroy(&mlxsw_sp->ipv6_addr_ht_lock);
3024         rhashtable_destroy(&mlxsw_sp->ipv6_addr_ht);
3025 }
3026
3027 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
3028                          const struct mlxsw_bus_info *mlxsw_bus_info,
3029                          struct netlink_ext_ack *extack)
3030 {
3031         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3032         int err;
3033
3034         mlxsw_sp->core = mlxsw_core;
3035         mlxsw_sp->bus_info = mlxsw_bus_info;
3036
3037         mlxsw_sp_parsing_init(mlxsw_sp);
3038
3039         err = mlxsw_sp_base_mac_get(mlxsw_sp);
3040         if (err) {
3041                 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
3042                 return err;
3043         }
3044
3045         err = mlxsw_sp_kvdl_init(mlxsw_sp);
3046         if (err) {
3047                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n");
3048                 return err;
3049         }
3050
3051         err = mlxsw_sp_pgt_init(mlxsw_sp);
3052         if (err) {
3053                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PGT\n");
3054                 goto err_pgt_init;
3055         }
3056
3057         /* Initialize before FIDs so that the LAG table is at the start of PGT
3058          * and 8-aligned without overallocation.
3059          */
3060         err = mlxsw_sp_lag_init(mlxsw_sp);
3061         if (err) {
3062                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
3063                 goto err_lag_init;
3064         }
3065
3066         err = mlxsw_sp->fid_core_ops->init(mlxsw_sp);
3067         if (err) {
3068                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
3069                 goto err_fid_core_init;
3070         }
3071
3072         err = mlxsw_sp_policers_init(mlxsw_sp);
3073         if (err) {
3074                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize policers\n");
3075                 goto err_policers_init;
3076         }
3077
3078         err = mlxsw_sp_traps_init(mlxsw_sp);
3079         if (err) {
3080                 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
3081                 goto err_traps_init;
3082         }
3083
3084         err = mlxsw_sp_devlink_traps_init(mlxsw_sp);
3085         if (err) {
3086                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n");
3087                 goto err_devlink_traps_init;
3088         }
3089
3090         err = mlxsw_sp_buffers_init(mlxsw_sp);
3091         if (err) {
3092                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
3093                 goto err_buffers_init;
3094         }
3095
3096         /* Initialize SPAN before router and switchdev, so that those components
3097          * can call mlxsw_sp_span_respin().
3098          */
3099         err = mlxsw_sp_span_init(mlxsw_sp);
3100         if (err) {
3101                 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
3102                 goto err_span_init;
3103         }
3104
3105         err = mlxsw_sp_switchdev_init(mlxsw_sp);
3106         if (err) {
3107                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
3108                 goto err_switchdev_init;
3109         }
3110
3111         err = mlxsw_sp_counter_pool_init(mlxsw_sp);
3112         if (err) {
3113                 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
3114                 goto err_counter_pool_init;
3115         }
3116
3117         err = mlxsw_sp_afa_init(mlxsw_sp);
3118         if (err) {
3119                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n");
3120                 goto err_afa_init;
3121         }
3122
3123         err = mlxsw_sp_ipv6_addr_ht_init(mlxsw_sp);
3124         if (err) {
3125                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize hash table for IPv6 addresses\n");
3126                 goto err_ipv6_addr_ht_init;
3127         }
3128
3129         err = mlxsw_sp_nve_init(mlxsw_sp);
3130         if (err) {
3131                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n");
3132                 goto err_nve_init;
3133         }
3134
3135         err = mlxsw_sp_port_range_init(mlxsw_sp);
3136         if (err) {
3137                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize port ranges\n");
3138                 goto err_port_range_init;
3139         }
3140
3141         err = mlxsw_sp_acl_init(mlxsw_sp);
3142         if (err) {
3143                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
3144                 goto err_acl_init;
3145         }
3146
3147         err = mlxsw_sp_router_init(mlxsw_sp, extack);
3148         if (err) {
3149                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
3150                 goto err_router_init;
3151         }
3152
3153         if (mlxsw_sp->bus_info->read_clock_capable) {
3154                 /* NULL is a valid return value from clock_init */
3155                 mlxsw_sp->clock =
3156                         mlxsw_sp->ptp_ops->clock_init(mlxsw_sp,
3157                                                       mlxsw_sp->bus_info->dev);
3158                 if (IS_ERR(mlxsw_sp->clock)) {
3159                         err = PTR_ERR(mlxsw_sp->clock);
3160                         dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n");
3161                         goto err_ptp_clock_init;
3162                 }
3163         }
3164
3165         if (mlxsw_sp->clock) {
3166                 /* NULL is a valid return value from ptp_ops->init */
3167                 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp);
3168                 if (IS_ERR(mlxsw_sp->ptp_state)) {
3169                         err = PTR_ERR(mlxsw_sp->ptp_state);
3170                         dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n");
3171                         goto err_ptp_init;
3172                 }
3173         }
3174
3175         /* Initialize netdevice notifier after SPAN is initialized, so that the
3176          * event handler can call SPAN respin.
3177          */
3178         mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event;
3179         err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
3180                                               &mlxsw_sp->netdevice_nb);
3181         if (err) {
3182                 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n");
3183                 goto err_netdev_notifier;
3184         }
3185
3186         err = mlxsw_sp_dpipe_init(mlxsw_sp);
3187         if (err) {
3188                 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
3189                 goto err_dpipe_init;
3190         }
3191
3192         err = mlxsw_sp_port_module_info_init(mlxsw_sp);
3193         if (err) {
3194                 dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n");
3195                 goto err_port_module_info_init;
3196         }
3197
3198         err = rhashtable_init(&mlxsw_sp->sample_trigger_ht,
3199                               &mlxsw_sp_sample_trigger_ht_params);
3200         if (err) {
3201                 dev_err(mlxsw_sp->bus_info->dev, "Failed to init sampling trigger hashtable\n");
3202                 goto err_sample_trigger_init;
3203         }
3204
3205         err = mlxsw_sp_ports_create(mlxsw_sp);
3206         if (err) {
3207                 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
3208                 goto err_ports_create;
3209         }
3210
3211         return 0;
3212
3213 err_ports_create:
3214         rhashtable_destroy(&mlxsw_sp->sample_trigger_ht);
3215 err_sample_trigger_init:
3216         mlxsw_sp_port_module_info_fini(mlxsw_sp);
3217 err_port_module_info_init:
3218         mlxsw_sp_dpipe_fini(mlxsw_sp);
3219 err_dpipe_init:
3220         unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
3221                                           &mlxsw_sp->netdevice_nb);
3222 err_netdev_notifier:
3223         if (mlxsw_sp->clock)
3224                 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
3225 err_ptp_init:
3226         if (mlxsw_sp->clock)
3227                 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
3228 err_ptp_clock_init:
3229         mlxsw_sp_router_fini(mlxsw_sp);
3230 err_router_init:
3231         mlxsw_sp_acl_fini(mlxsw_sp);
3232 err_acl_init:
3233         mlxsw_sp_port_range_fini(mlxsw_sp);
3234 err_port_range_init:
3235         mlxsw_sp_nve_fini(mlxsw_sp);
3236 err_nve_init:
3237         mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp);
3238 err_ipv6_addr_ht_init:
3239         mlxsw_sp_afa_fini(mlxsw_sp);
3240 err_afa_init:
3241         mlxsw_sp_counter_pool_fini(mlxsw_sp);
3242 err_counter_pool_init:
3243         mlxsw_sp_switchdev_fini(mlxsw_sp);
3244 err_switchdev_init:
3245         mlxsw_sp_span_fini(mlxsw_sp);
3246 err_span_init:
3247         mlxsw_sp_buffers_fini(mlxsw_sp);
3248 err_buffers_init:
3249         mlxsw_sp_devlink_traps_fini(mlxsw_sp);
3250 err_devlink_traps_init:
3251         mlxsw_sp_traps_fini(mlxsw_sp);
3252 err_traps_init:
3253         mlxsw_sp_policers_fini(mlxsw_sp);
3254 err_policers_init:
3255         mlxsw_sp->fid_core_ops->fini(mlxsw_sp);
3256 err_fid_core_init:
3257         mlxsw_sp_lag_fini(mlxsw_sp);
3258 err_lag_init:
3259         mlxsw_sp_pgt_fini(mlxsw_sp);
3260 err_pgt_init:
3261         mlxsw_sp_kvdl_fini(mlxsw_sp);
3262         mlxsw_sp_parsing_fini(mlxsw_sp);
3263         return err;
3264 }
3265
3266 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
3267                           const struct mlxsw_bus_info *mlxsw_bus_info,
3268                           struct netlink_ext_ack *extack)
3269 {
3270         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3271
3272         mlxsw_sp->switchdev_ops = &mlxsw_sp1_switchdev_ops;
3273         mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops;
3274         mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops;
3275         mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops;
3276         mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops;
3277         mlxsw_sp->acl_rulei_ops = &mlxsw_sp1_acl_rulei_ops;
3278         mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops;
3279         mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr;
3280         mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask;
3281         mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals;
3282         mlxsw_sp->sb_ops = &mlxsw_sp1_sb_ops;
3283         mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops;
3284         mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops;
3285         mlxsw_sp->span_ops = &mlxsw_sp1_span_ops;
3286         mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops;
3287         mlxsw_sp->trap_ops = &mlxsw_sp1_trap_ops;
3288         mlxsw_sp->mall_ops = &mlxsw_sp1_mall_ops;
3289         mlxsw_sp->router_ops = &mlxsw_sp1_router_ops;
3290         mlxsw_sp->listeners = mlxsw_sp1_listener;
3291         mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener);
3292         mlxsw_sp->fid_core_ops = &mlxsw_sp1_fid_core_ops;
3293         mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1;
3294         mlxsw_sp->pgt_smpe_index_valid = true;
3295
3296         return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3297 }
3298
3299 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
3300                           const struct mlxsw_bus_info *mlxsw_bus_info,
3301                           struct netlink_ext_ack *extack)
3302 {
3303         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3304
3305         mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
3306         mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
3307         mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
3308         mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
3309         mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
3310         mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
3311         mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
3312         mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops;
3313         mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
3314         mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
3315         mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
3316         mlxsw_sp->sb_ops = &mlxsw_sp2_sb_ops;
3317         mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
3318         mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
3319         mlxsw_sp->span_ops = &mlxsw_sp2_span_ops;
3320         mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
3321         mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
3322         mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
3323         mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
3324         mlxsw_sp->listeners = mlxsw_sp2_listener;
3325         mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
3326         mlxsw_sp->fid_core_ops = &mlxsw_sp2_fid_core_ops;
3327         mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2;
3328         mlxsw_sp->pgt_smpe_index_valid = false;
3329
3330         return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3331 }
3332
3333 static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
3334                           const struct mlxsw_bus_info *mlxsw_bus_info,
3335                           struct netlink_ext_ack *extack)
3336 {
3337         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3338
3339         mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
3340         mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
3341         mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
3342         mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
3343         mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
3344         mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
3345         mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
3346         mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops;
3347         mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
3348         mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
3349         mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
3350         mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops;
3351         mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
3352         mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
3353         mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
3354         mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
3355         mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
3356         mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
3357         mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
3358         mlxsw_sp->listeners = mlxsw_sp2_listener;
3359         mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
3360         mlxsw_sp->fid_core_ops = &mlxsw_sp2_fid_core_ops;
3361         mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3;
3362         mlxsw_sp->pgt_smpe_index_valid = false;
3363
3364         return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3365 }
3366
3367 static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core,
3368                           const struct mlxsw_bus_info *mlxsw_bus_info,
3369                           struct netlink_ext_ack *extack)
3370 {
3371         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3372
3373         mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
3374         mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
3375         mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
3376         mlxsw_sp->afk_ops = &mlxsw_sp4_afk_ops;
3377         mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
3378         mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
3379         mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
3380         mlxsw_sp->acl_bf_ops = &mlxsw_sp4_acl_bf_ops;
3381         mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
3382         mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
3383         mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
3384         mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops;
3385         mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
3386         mlxsw_sp->ptp_ops = &mlxsw_sp4_ptp_ops;
3387         mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
3388         mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
3389         mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
3390         mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
3391         mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
3392         mlxsw_sp->listeners = mlxsw_sp2_listener;
3393         mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
3394         mlxsw_sp->fid_core_ops = &mlxsw_sp2_fid_core_ops;
3395         mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP4;
3396         mlxsw_sp->pgt_smpe_index_valid = false;
3397
3398         return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3399 }
3400
3401 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
3402 {
3403         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3404
3405         mlxsw_sp_ports_remove(mlxsw_sp);
3406         rhashtable_destroy(&mlxsw_sp->sample_trigger_ht);
3407         mlxsw_sp_port_module_info_fini(mlxsw_sp);
3408         mlxsw_sp_dpipe_fini(mlxsw_sp);
3409         unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
3410                                           &mlxsw_sp->netdevice_nb);
3411         if (mlxsw_sp->clock) {
3412                 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
3413                 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
3414         }
3415         mlxsw_sp_router_fini(mlxsw_sp);
3416         mlxsw_sp_acl_fini(mlxsw_sp);
3417         mlxsw_sp_port_range_fini(mlxsw_sp);
3418         mlxsw_sp_nve_fini(mlxsw_sp);
3419         mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp);
3420         mlxsw_sp_afa_fini(mlxsw_sp);
3421         mlxsw_sp_counter_pool_fini(mlxsw_sp);
3422         mlxsw_sp_switchdev_fini(mlxsw_sp);
3423         mlxsw_sp_span_fini(mlxsw_sp);
3424         mlxsw_sp_buffers_fini(mlxsw_sp);
3425         mlxsw_sp_devlink_traps_fini(mlxsw_sp);
3426         mlxsw_sp_traps_fini(mlxsw_sp);
3427         mlxsw_sp_policers_fini(mlxsw_sp);
3428         mlxsw_sp->fid_core_ops->fini(mlxsw_sp);
3429         mlxsw_sp_lag_fini(mlxsw_sp);
3430         mlxsw_sp_pgt_fini(mlxsw_sp);
3431         mlxsw_sp_kvdl_fini(mlxsw_sp);
3432         mlxsw_sp_parsing_fini(mlxsw_sp);
3433 }
3434
3435 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
3436         .used_flood_mode                = 1,
3437         .flood_mode                     = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
3438         .used_max_ib_mc                 = 1,
3439         .max_ib_mc                      = 0,
3440         .used_max_pkey                  = 1,
3441         .max_pkey                       = 0,
3442         .used_ubridge                   = 1,
3443         .ubridge                        = 1,
3444         .used_kvd_sizes                 = 1,
3445         .kvd_hash_single_parts          = 59,
3446         .kvd_hash_double_parts          = 41,
3447         .kvd_linear_size                = MLXSW_SP_KVD_LINEAR_SIZE,
3448         .swid_config                    = {
3449                 {
3450                         .used_type      = 1,
3451                         .type           = MLXSW_PORT_SWID_TYPE_ETH,
3452                 }
3453         },
3454 };
3455
3456 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = {
3457         .used_flood_mode                = 1,
3458         .flood_mode                     = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
3459         .used_max_ib_mc                 = 1,
3460         .max_ib_mc                      = 0,
3461         .used_max_pkey                  = 1,
3462         .max_pkey                       = 0,
3463         .used_ubridge                   = 1,
3464         .ubridge                        = 1,
3465         .swid_config                    = {
3466                 {
3467                         .used_type      = 1,
3468                         .type           = MLXSW_PORT_SWID_TYPE_ETH,
3469                 }
3470         },
3471         .used_cqe_time_stamp_type       = 1,
3472         .cqe_time_stamp_type            = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC,
3473         .lag_mode_prefer_sw             = true,
3474         .flood_mode_prefer_cff          = true,
3475 };
3476
3477 /* Reduce number of LAGs from full capacity (256) to the maximum supported LAGs
3478  * in Spectrum-2/3, to avoid regression in number of free entries in the PGT
3479  * table.
3480  */
3481 #define MLXSW_SP4_CONFIG_PROFILE_MAX_LAG 128
3482
3483 static const struct mlxsw_config_profile mlxsw_sp4_config_profile = {
3484         .used_max_lag                   = 1,
3485         .max_lag                        = MLXSW_SP4_CONFIG_PROFILE_MAX_LAG,
3486         .used_flood_mode                = 1,
3487         .flood_mode                     = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
3488         .used_max_ib_mc                 = 1,
3489         .max_ib_mc                      = 0,
3490         .used_max_pkey                  = 1,
3491         .max_pkey                       = 0,
3492         .used_ubridge                   = 1,
3493         .ubridge                        = 1,
3494         .swid_config                    = {
3495                 {
3496                         .used_type      = 1,
3497                         .type           = MLXSW_PORT_SWID_TYPE_ETH,
3498                 }
3499         },
3500         .used_cqe_time_stamp_type       = 1,
3501         .cqe_time_stamp_type            = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC,
3502         .lag_mode_prefer_sw             = true,
3503         .flood_mode_prefer_cff          = true,
3504 };
3505
3506 static void
3507 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
3508                                       struct devlink_resource_size_params *kvd_size_params,
3509                                       struct devlink_resource_size_params *linear_size_params,
3510                                       struct devlink_resource_size_params *hash_double_size_params,
3511                                       struct devlink_resource_size_params *hash_single_size_params)
3512 {
3513         u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
3514                                                  KVD_SINGLE_MIN_SIZE);
3515         u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
3516                                                  KVD_DOUBLE_MIN_SIZE);
3517         u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3518         u32 linear_size_min = 0;
3519
3520         devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size,
3521                                           MLXSW_SP_KVD_GRANULARITY,
3522                                           DEVLINK_RESOURCE_UNIT_ENTRY);
3523         devlink_resource_size_params_init(linear_size_params, linear_size_min,
3524                                           kvd_size - single_size_min -
3525                                           double_size_min,
3526                                           MLXSW_SP_KVD_GRANULARITY,
3527                                           DEVLINK_RESOURCE_UNIT_ENTRY);
3528         devlink_resource_size_params_init(hash_double_size_params,
3529                                           double_size_min,
3530                                           kvd_size - single_size_min -
3531                                           linear_size_min,
3532                                           MLXSW_SP_KVD_GRANULARITY,
3533                                           DEVLINK_RESOURCE_UNIT_ENTRY);
3534         devlink_resource_size_params_init(hash_single_size_params,
3535                                           single_size_min,
3536                                           kvd_size - double_size_min -
3537                                           linear_size_min,
3538                                           MLXSW_SP_KVD_GRANULARITY,
3539                                           DEVLINK_RESOURCE_UNIT_ENTRY);
3540 }
3541
3542 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core)
3543 {
3544         struct devlink *devlink = priv_to_devlink(mlxsw_core);
3545         struct devlink_resource_size_params hash_single_size_params;
3546         struct devlink_resource_size_params hash_double_size_params;
3547         struct devlink_resource_size_params linear_size_params;
3548         struct devlink_resource_size_params kvd_size_params;
3549         u32 kvd_size, single_size, double_size, linear_size;
3550         const struct mlxsw_config_profile *profile;
3551         int err;
3552
3553         profile = &mlxsw_sp1_config_profile;
3554         if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
3555                 return -EIO;
3556
3557         mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params,
3558                                               &linear_size_params,
3559                                               &hash_double_size_params,
3560                                               &hash_single_size_params);
3561
3562         kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3563         err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
3564                                      kvd_size, MLXSW_SP_RESOURCE_KVD,
3565                                      DEVLINK_RESOURCE_ID_PARENT_TOP,
3566                                      &kvd_size_params);
3567         if (err)
3568                 return err;
3569
3570         linear_size = profile->kvd_linear_size;
3571         err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR,
3572                                      linear_size,
3573                                      MLXSW_SP_RESOURCE_KVD_LINEAR,
3574                                      MLXSW_SP_RESOURCE_KVD,
3575                                      &linear_size_params);
3576         if (err)
3577                 return err;
3578
3579         err = mlxsw_sp1_kvdl_resources_register(mlxsw_core);
3580         if  (err)
3581                 return err;
3582
3583         double_size = kvd_size - linear_size;
3584         double_size *= profile->kvd_hash_double_parts;
3585         double_size /= profile->kvd_hash_double_parts +
3586                        profile->kvd_hash_single_parts;
3587         double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY);
3588         err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE,
3589                                      double_size,
3590                                      MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
3591                                      MLXSW_SP_RESOURCE_KVD,
3592                                      &hash_double_size_params);
3593         if (err)
3594                 return err;
3595
3596         single_size = kvd_size - double_size - linear_size;
3597         err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE,
3598                                      single_size,
3599                                      MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
3600                                      MLXSW_SP_RESOURCE_KVD,
3601                                      &hash_single_size_params);
3602         if (err)
3603                 return err;
3604
3605         return 0;
3606 }
3607
3608 static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core)
3609 {
3610         struct devlink *devlink = priv_to_devlink(mlxsw_core);
3611         struct devlink_resource_size_params kvd_size_params;
3612         u32 kvd_size;
3613
3614         if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
3615                 return -EIO;
3616
3617         kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3618         devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size,
3619                                           MLXSW_SP_KVD_GRANULARITY,
3620                                           DEVLINK_RESOURCE_UNIT_ENTRY);
3621
3622         return devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
3623                                       kvd_size, MLXSW_SP_RESOURCE_KVD,
3624                                       DEVLINK_RESOURCE_ID_PARENT_TOP,
3625                                       &kvd_size_params);
3626 }
3627
3628 static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core)
3629 {
3630         struct devlink *devlink = priv_to_devlink(mlxsw_core);
3631         struct devlink_resource_size_params span_size_params;
3632         u32 max_span;
3633
3634         if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN))
3635                 return -EIO;
3636
3637         max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN);
3638         devlink_resource_size_params_init(&span_size_params, max_span, max_span,
3639                                           1, DEVLINK_RESOURCE_UNIT_ENTRY);
3640
3641         return devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN,
3642                                       max_span, MLXSW_SP_RESOURCE_SPAN,
3643                                       DEVLINK_RESOURCE_ID_PARENT_TOP,
3644                                       &span_size_params);
3645 }
3646
3647 static int
3648 mlxsw_sp_resources_rif_mac_profile_register(struct mlxsw_core *mlxsw_core)
3649 {
3650         struct devlink *devlink = priv_to_devlink(mlxsw_core);
3651         struct devlink_resource_size_params size_params;
3652         u8 max_rif_mac_profiles;
3653
3654         if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIF_MAC_PROFILES))
3655                 max_rif_mac_profiles = 1;
3656         else
3657                 max_rif_mac_profiles = MLXSW_CORE_RES_GET(mlxsw_core,
3658                                                           MAX_RIF_MAC_PROFILES);
3659         devlink_resource_size_params_init(&size_params, max_rif_mac_profiles,
3660                                           max_rif_mac_profiles, 1,
3661                                           DEVLINK_RESOURCE_UNIT_ENTRY);
3662
3663         return devl_resource_register(devlink,
3664                                       "rif_mac_profiles",
3665                                       max_rif_mac_profiles,
3666                                       MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
3667                                       DEVLINK_RESOURCE_ID_PARENT_TOP,
3668                                       &size_params);
3669 }
3670
3671 static int mlxsw_sp_resources_rifs_register(struct mlxsw_core *mlxsw_core)
3672 {
3673         struct devlink *devlink = priv_to_devlink(mlxsw_core);
3674         struct devlink_resource_size_params size_params;
3675         u64 max_rifs;
3676
3677         if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIFS))
3678                 return -EIO;
3679
3680         max_rifs = MLXSW_CORE_RES_GET(mlxsw_core, MAX_RIFS);
3681         devlink_resource_size_params_init(&size_params, max_rifs, max_rifs,
3682                                           1, DEVLINK_RESOURCE_UNIT_ENTRY);
3683
3684         return devl_resource_register(devlink, "rifs", max_rifs,
3685                                       MLXSW_SP_RESOURCE_RIFS,
3686                                       DEVLINK_RESOURCE_ID_PARENT_TOP,
3687                                       &size_params);
3688 }
3689
3690 static int
3691 mlxsw_sp_resources_port_range_register(struct mlxsw_core *mlxsw_core)
3692 {
3693         struct devlink *devlink = priv_to_devlink(mlxsw_core);
3694         struct devlink_resource_size_params size_params;
3695         u64 max;
3696
3697         if (!MLXSW_CORE_RES_VALID(mlxsw_core, ACL_MAX_L4_PORT_RANGE))
3698                 return -EIO;
3699
3700         max = MLXSW_CORE_RES_GET(mlxsw_core, ACL_MAX_L4_PORT_RANGE);
3701         devlink_resource_size_params_init(&size_params, max, max, 1,
3702                                           DEVLINK_RESOURCE_UNIT_ENTRY);
3703
3704         return devl_resource_register(devlink, "port_range_registers", max,
3705                                       MLXSW_SP_RESOURCE_PORT_RANGE_REGISTERS,
3706                                       DEVLINK_RESOURCE_ID_PARENT_TOP,
3707                                       &size_params);
3708 }
3709
3710 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
3711 {
3712         int err;
3713
3714         err = mlxsw_sp1_resources_kvd_register(mlxsw_core);
3715         if (err)
3716                 return err;
3717
3718         err = mlxsw_sp_resources_span_register(mlxsw_core);
3719         if (err)
3720                 goto err_resources_span_register;
3721
3722         err = mlxsw_sp_counter_resources_register(mlxsw_core);
3723         if (err)
3724                 goto err_resources_counter_register;
3725
3726         err = mlxsw_sp_policer_resources_register(mlxsw_core);
3727         if (err)
3728                 goto err_policer_resources_register;
3729
3730         err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core);
3731         if (err)
3732                 goto err_resources_rif_mac_profile_register;
3733
3734         err = mlxsw_sp_resources_rifs_register(mlxsw_core);
3735         if (err)
3736                 goto err_resources_rifs_register;
3737
3738         err = mlxsw_sp_resources_port_range_register(mlxsw_core);
3739         if (err)
3740                 goto err_resources_port_range_register;
3741
3742         return 0;
3743
3744 err_resources_port_range_register:
3745 err_resources_rifs_register:
3746 err_resources_rif_mac_profile_register:
3747 err_policer_resources_register:
3748 err_resources_counter_register:
3749 err_resources_span_register:
3750         devl_resources_unregister(priv_to_devlink(mlxsw_core));
3751         return err;
3752 }
3753
3754 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
3755 {
3756         int err;
3757
3758         err = mlxsw_sp2_resources_kvd_register(mlxsw_core);
3759         if (err)
3760                 return err;
3761
3762         err = mlxsw_sp_resources_span_register(mlxsw_core);
3763         if (err)
3764                 goto err_resources_span_register;
3765
3766         err = mlxsw_sp_counter_resources_register(mlxsw_core);
3767         if (err)
3768                 goto err_resources_counter_register;
3769
3770         err = mlxsw_sp_policer_resources_register(mlxsw_core);
3771         if (err)
3772                 goto err_policer_resources_register;
3773
3774         err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core);
3775         if (err)
3776                 goto err_resources_rif_mac_profile_register;
3777
3778         err = mlxsw_sp_resources_rifs_register(mlxsw_core);
3779         if (err)
3780                 goto err_resources_rifs_register;
3781
3782         err = mlxsw_sp_resources_port_range_register(mlxsw_core);
3783         if (err)
3784                 goto err_resources_port_range_register;
3785
3786         return 0;
3787
3788 err_resources_port_range_register:
3789 err_resources_rifs_register:
3790 err_resources_rif_mac_profile_register:
3791 err_policer_resources_register:
3792 err_resources_counter_register:
3793 err_resources_span_register:
3794         devl_resources_unregister(priv_to_devlink(mlxsw_core));
3795         return err;
3796 }
3797
3798 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
3799                                   const struct mlxsw_config_profile *profile,
3800                                   u64 *p_single_size, u64 *p_double_size,
3801                                   u64 *p_linear_size)
3802 {
3803         struct devlink *devlink = priv_to_devlink(mlxsw_core);
3804         u32 double_size;
3805         int err;
3806
3807         if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
3808             !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE))
3809                 return -EIO;
3810
3811         /* The hash part is what left of the kvd without the
3812          * linear part. It is split to the single size and
3813          * double size by the parts ratio from the profile.
3814          * Both sizes must be a multiplications of the
3815          * granularity from the profile. In case the user
3816          * provided the sizes they are obtained via devlink.
3817          */
3818         err = devl_resource_size_get(devlink,
3819                                      MLXSW_SP_RESOURCE_KVD_LINEAR,
3820                                      p_linear_size);
3821         if (err)
3822                 *p_linear_size = profile->kvd_linear_size;
3823
3824         err = devl_resource_size_get(devlink,
3825                                      MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
3826                                      p_double_size);
3827         if (err) {
3828                 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
3829                               *p_linear_size;
3830                 double_size *= profile->kvd_hash_double_parts;
3831                 double_size /= profile->kvd_hash_double_parts +
3832                                profile->kvd_hash_single_parts;
3833                 *p_double_size = rounddown(double_size,
3834                                            MLXSW_SP_KVD_GRANULARITY);
3835         }
3836
3837         err = devl_resource_size_get(devlink,
3838                                      MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
3839                                      p_single_size);
3840         if (err)
3841                 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
3842                                  *p_double_size - *p_linear_size;
3843
3844         /* Check results are legal. */
3845         if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
3846             *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) ||
3847             MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size)
3848                 return -EIO;
3849
3850         return 0;
3851 }
3852
3853 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core,
3854                                      struct sk_buff *skb, u16 local_port)
3855 {
3856         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3857
3858         skb_pull(skb, MLXSW_TXHDR_LEN);
3859         mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port);
3860 }
3861
3862 static struct mlxsw_driver mlxsw_sp1_driver = {
3863         .kind                           = mlxsw_sp1_driver_name,
3864         .priv_size                      = sizeof(struct mlxsw_sp),
3865         .fw_req_rev                     = &mlxsw_sp1_fw_rev,
3866         .fw_filename                    = MLXSW_SP1_FW_FILENAME,
3867         .init                           = mlxsw_sp1_init,
3868         .fini                           = mlxsw_sp_fini,
3869         .port_split                     = mlxsw_sp_port_split,
3870         .port_unsplit                   = mlxsw_sp_port_unsplit,
3871         .sb_pool_get                    = mlxsw_sp_sb_pool_get,
3872         .sb_pool_set                    = mlxsw_sp_sb_pool_set,
3873         .sb_port_pool_get               = mlxsw_sp_sb_port_pool_get,
3874         .sb_port_pool_set               = mlxsw_sp_sb_port_pool_set,
3875         .sb_tc_pool_bind_get            = mlxsw_sp_sb_tc_pool_bind_get,
3876         .sb_tc_pool_bind_set            = mlxsw_sp_sb_tc_pool_bind_set,
3877         .sb_occ_snapshot                = mlxsw_sp_sb_occ_snapshot,
3878         .sb_occ_max_clear               = mlxsw_sp_sb_occ_max_clear,
3879         .sb_occ_port_pool_get           = mlxsw_sp_sb_occ_port_pool_get,
3880         .sb_occ_tc_port_bind_get        = mlxsw_sp_sb_occ_tc_port_bind_get,
3881         .trap_init                      = mlxsw_sp_trap_init,
3882         .trap_fini                      = mlxsw_sp_trap_fini,
3883         .trap_action_set                = mlxsw_sp_trap_action_set,
3884         .trap_group_init                = mlxsw_sp_trap_group_init,
3885         .trap_group_set                 = mlxsw_sp_trap_group_set,
3886         .trap_policer_init              = mlxsw_sp_trap_policer_init,
3887         .trap_policer_fini              = mlxsw_sp_trap_policer_fini,
3888         .trap_policer_set               = mlxsw_sp_trap_policer_set,
3889         .trap_policer_counter_get       = mlxsw_sp_trap_policer_counter_get,
3890         .resources_register             = mlxsw_sp1_resources_register,
3891         .kvd_sizes_get                  = mlxsw_sp_kvd_sizes_get,
3892         .ptp_transmitted                = mlxsw_sp_ptp_transmitted,
3893         .profile                        = &mlxsw_sp1_config_profile,
3894         .sdq_supports_cqe_v2            = false,
3895 };
3896
3897 static struct mlxsw_driver mlxsw_sp2_driver = {
3898         .kind                           = mlxsw_sp2_driver_name,
3899         .priv_size                      = sizeof(struct mlxsw_sp),
3900         .fw_req_rev                     = &mlxsw_sp2_fw_rev,
3901         .fw_filename                    = MLXSW_SP2_FW_FILENAME,
3902         .init                           = mlxsw_sp2_init,
3903         .fini                           = mlxsw_sp_fini,
3904         .port_split                     = mlxsw_sp_port_split,
3905         .port_unsplit                   = mlxsw_sp_port_unsplit,
3906         .ports_remove_selected          = mlxsw_sp_ports_remove_selected,
3907         .sb_pool_get                    = mlxsw_sp_sb_pool_get,
3908         .sb_pool_set                    = mlxsw_sp_sb_pool_set,
3909         .sb_port_pool_get               = mlxsw_sp_sb_port_pool_get,
3910         .sb_port_pool_set               = mlxsw_sp_sb_port_pool_set,
3911         .sb_tc_pool_bind_get            = mlxsw_sp_sb_tc_pool_bind_get,
3912         .sb_tc_pool_bind_set            = mlxsw_sp_sb_tc_pool_bind_set,
3913         .sb_occ_snapshot                = mlxsw_sp_sb_occ_snapshot,
3914         .sb_occ_max_clear               = mlxsw_sp_sb_occ_max_clear,
3915         .sb_occ_port_pool_get           = mlxsw_sp_sb_occ_port_pool_get,
3916         .sb_occ_tc_port_bind_get        = mlxsw_sp_sb_occ_tc_port_bind_get,
3917         .trap_init                      = mlxsw_sp_trap_init,
3918         .trap_fini                      = mlxsw_sp_trap_fini,
3919         .trap_action_set                = mlxsw_sp_trap_action_set,
3920         .trap_group_init                = mlxsw_sp_trap_group_init,
3921         .trap_group_set                 = mlxsw_sp_trap_group_set,
3922         .trap_policer_init              = mlxsw_sp_trap_policer_init,
3923         .trap_policer_fini              = mlxsw_sp_trap_policer_fini,
3924         .trap_policer_set               = mlxsw_sp_trap_policer_set,
3925         .trap_policer_counter_get       = mlxsw_sp_trap_policer_counter_get,
3926         .resources_register             = mlxsw_sp2_resources_register,
3927         .ptp_transmitted                = mlxsw_sp_ptp_transmitted,
3928         .profile                        = &mlxsw_sp2_config_profile,
3929         .sdq_supports_cqe_v2            = true,
3930 };
3931
3932 static struct mlxsw_driver mlxsw_sp3_driver = {
3933         .kind                           = mlxsw_sp3_driver_name,
3934         .priv_size                      = sizeof(struct mlxsw_sp),
3935         .fw_req_rev                     = &mlxsw_sp3_fw_rev,
3936         .fw_filename                    = MLXSW_SP3_FW_FILENAME,
3937         .init                           = mlxsw_sp3_init,
3938         .fini                           = mlxsw_sp_fini,
3939         .port_split                     = mlxsw_sp_port_split,
3940         .port_unsplit                   = mlxsw_sp_port_unsplit,
3941         .ports_remove_selected          = mlxsw_sp_ports_remove_selected,
3942         .sb_pool_get                    = mlxsw_sp_sb_pool_get,
3943         .sb_pool_set                    = mlxsw_sp_sb_pool_set,
3944         .sb_port_pool_get               = mlxsw_sp_sb_port_pool_get,
3945         .sb_port_pool_set               = mlxsw_sp_sb_port_pool_set,
3946         .sb_tc_pool_bind_get            = mlxsw_sp_sb_tc_pool_bind_get,
3947         .sb_tc_pool_bind_set            = mlxsw_sp_sb_tc_pool_bind_set,
3948         .sb_occ_snapshot                = mlxsw_sp_sb_occ_snapshot,
3949         .sb_occ_max_clear               = mlxsw_sp_sb_occ_max_clear,
3950         .sb_occ_port_pool_get           = mlxsw_sp_sb_occ_port_pool_get,
3951         .sb_occ_tc_port_bind_get        = mlxsw_sp_sb_occ_tc_port_bind_get,
3952         .trap_init                      = mlxsw_sp_trap_init,
3953         .trap_fini                      = mlxsw_sp_trap_fini,
3954         .trap_action_set                = mlxsw_sp_trap_action_set,
3955         .trap_group_init                = mlxsw_sp_trap_group_init,
3956         .trap_group_set                 = mlxsw_sp_trap_group_set,
3957         .trap_policer_init              = mlxsw_sp_trap_policer_init,
3958         .trap_policer_fini              = mlxsw_sp_trap_policer_fini,
3959         .trap_policer_set               = mlxsw_sp_trap_policer_set,
3960         .trap_policer_counter_get       = mlxsw_sp_trap_policer_counter_get,
3961         .resources_register             = mlxsw_sp2_resources_register,
3962         .ptp_transmitted                = mlxsw_sp_ptp_transmitted,
3963         .profile                        = &mlxsw_sp2_config_profile,
3964         .sdq_supports_cqe_v2            = true,
3965 };
3966
3967 static struct mlxsw_driver mlxsw_sp4_driver = {
3968         .kind                           = mlxsw_sp4_driver_name,
3969         .priv_size                      = sizeof(struct mlxsw_sp),
3970         .init                           = mlxsw_sp4_init,
3971         .fini                           = mlxsw_sp_fini,
3972         .port_split                     = mlxsw_sp_port_split,
3973         .port_unsplit                   = mlxsw_sp_port_unsplit,
3974         .ports_remove_selected          = mlxsw_sp_ports_remove_selected,
3975         .sb_pool_get                    = mlxsw_sp_sb_pool_get,
3976         .sb_pool_set                    = mlxsw_sp_sb_pool_set,
3977         .sb_port_pool_get               = mlxsw_sp_sb_port_pool_get,
3978         .sb_port_pool_set               = mlxsw_sp_sb_port_pool_set,
3979         .sb_tc_pool_bind_get            = mlxsw_sp_sb_tc_pool_bind_get,
3980         .sb_tc_pool_bind_set            = mlxsw_sp_sb_tc_pool_bind_set,
3981         .sb_occ_snapshot                = mlxsw_sp_sb_occ_snapshot,
3982         .sb_occ_max_clear               = mlxsw_sp_sb_occ_max_clear,
3983         .sb_occ_port_pool_get           = mlxsw_sp_sb_occ_port_pool_get,
3984         .sb_occ_tc_port_bind_get        = mlxsw_sp_sb_occ_tc_port_bind_get,
3985         .trap_init                      = mlxsw_sp_trap_init,
3986         .trap_fini                      = mlxsw_sp_trap_fini,
3987         .trap_action_set                = mlxsw_sp_trap_action_set,
3988         .trap_group_init                = mlxsw_sp_trap_group_init,
3989         .trap_group_set                 = mlxsw_sp_trap_group_set,
3990         .trap_policer_init              = mlxsw_sp_trap_policer_init,
3991         .trap_policer_fini              = mlxsw_sp_trap_policer_fini,
3992         .trap_policer_set               = mlxsw_sp_trap_policer_set,
3993         .trap_policer_counter_get       = mlxsw_sp_trap_policer_counter_get,
3994         .resources_register             = mlxsw_sp2_resources_register,
3995         .ptp_transmitted                = mlxsw_sp_ptp_transmitted,
3996         .profile                        = &mlxsw_sp4_config_profile,
3997         .sdq_supports_cqe_v2            = true,
3998 };
3999
4000 bool mlxsw_sp_port_dev_check(const struct net_device *dev)
4001 {
4002         return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
4003 }
4004
4005 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev,
4006                                    struct netdev_nested_priv *priv)
4007 {
4008         int ret = 0;
4009
4010         if (mlxsw_sp_port_dev_check(lower_dev)) {
4011                 priv->data = (void *)netdev_priv(lower_dev);
4012                 ret = 1;
4013         }
4014
4015         return ret;
4016 }
4017
4018 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
4019 {
4020         struct netdev_nested_priv priv = {
4021                 .data = NULL,
4022         };
4023
4024         if (mlxsw_sp_port_dev_check(dev))
4025                 return netdev_priv(dev);
4026
4027         netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &priv);
4028
4029         return (struct mlxsw_sp_port *)priv.data;
4030 }
4031
4032 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
4033 {
4034         struct mlxsw_sp_port *mlxsw_sp_port;
4035
4036         mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
4037         return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
4038 }
4039
4040 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
4041 {
4042         struct netdev_nested_priv priv = {
4043                 .data = NULL,
4044         };
4045
4046         if (mlxsw_sp_port_dev_check(dev))
4047                 return netdev_priv(dev);
4048
4049         netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
4050                                       &priv);
4051
4052         return (struct mlxsw_sp_port *)priv.data;
4053 }
4054
4055 int mlxsw_sp_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp)
4056 {
4057         char mprs_pl[MLXSW_REG_MPRS_LEN];
4058         int err = 0;
4059
4060         mutex_lock(&mlxsw_sp->parsing.lock);
4061
4062         if (refcount_inc_not_zero(&mlxsw_sp->parsing.parsing_depth_ref))
4063                 goto out_unlock;
4064
4065         mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_INCREASED_PARSING_DEPTH,
4066                             mlxsw_sp->parsing.vxlan_udp_dport);
4067         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl);
4068         if (err)
4069                 goto out_unlock;
4070
4071         mlxsw_sp->parsing.parsing_depth = MLXSW_SP_INCREASED_PARSING_DEPTH;
4072         refcount_set(&mlxsw_sp->parsing.parsing_depth_ref, 1);
4073
4074 out_unlock:
4075         mutex_unlock(&mlxsw_sp->parsing.lock);
4076         return err;
4077 }
4078
4079 void mlxsw_sp_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp)
4080 {
4081         char mprs_pl[MLXSW_REG_MPRS_LEN];
4082
4083         mutex_lock(&mlxsw_sp->parsing.lock);
4084
4085         if (!refcount_dec_and_test(&mlxsw_sp->parsing.parsing_depth_ref))
4086                 goto out_unlock;
4087
4088         mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_DEFAULT_PARSING_DEPTH,
4089                             mlxsw_sp->parsing.vxlan_udp_dport);
4090         mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl);
4091         mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH;
4092
4093 out_unlock:
4094         mutex_unlock(&mlxsw_sp->parsing.lock);
4095 }
4096
4097 int mlxsw_sp_parsing_vxlan_udp_dport_set(struct mlxsw_sp *mlxsw_sp,
4098                                          __be16 udp_dport)
4099 {
4100         char mprs_pl[MLXSW_REG_MPRS_LEN];
4101         int err;
4102
4103         mutex_lock(&mlxsw_sp->parsing.lock);
4104
4105         mlxsw_reg_mprs_pack(mprs_pl, mlxsw_sp->parsing.parsing_depth,
4106                             be16_to_cpu(udp_dport));
4107         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl);
4108         if (err)
4109                 goto out_unlock;
4110
4111         mlxsw_sp->parsing.vxlan_udp_dport = be16_to_cpu(udp_dport);
4112
4113 out_unlock:
4114         mutex_unlock(&mlxsw_sp->parsing.lock);
4115         return err;
4116 }
4117
4118 static void
4119 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port,
4120                                  struct net_device *lag_dev)
4121 {
4122         struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev);
4123         struct net_device *upper_dev;
4124         struct list_head *iter;
4125
4126         if (netif_is_bridge_port(lag_dev))
4127                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev);
4128
4129         netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
4130                 if (!netif_is_bridge_port(upper_dev))
4131                         continue;
4132                 br_dev = netdev_master_upper_dev_get(upper_dev);
4133                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev);
4134         }
4135 }
4136
4137 static struct mlxsw_sp_lag *
4138 mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, struct net_device *lag_dev,
4139                     struct netlink_ext_ack *extack)
4140 {
4141         char sldr_pl[MLXSW_REG_SLDR_LEN];
4142         struct mlxsw_sp_lag *lag;
4143         u16 lag_id;
4144         int i, err;
4145
4146         for (i = 0; i < mlxsw_sp->max_lag; i++) {
4147                 if (!mlxsw_sp->lags[i].dev)
4148                         break;
4149         }
4150
4151         if (i == mlxsw_sp->max_lag) {
4152                 NL_SET_ERR_MSG_MOD(extack,
4153                                    "Exceeded number of supported LAG devices");
4154                 return ERR_PTR(-EBUSY);
4155         }
4156
4157         lag_id = i;
4158         mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
4159         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4160         if (err)
4161                 return ERR_PTR(err);
4162
4163         lag = &mlxsw_sp->lags[lag_id];
4164         lag->lag_id = lag_id;
4165         lag->dev = lag_dev;
4166         refcount_set(&lag->ref_count, 1);
4167
4168         return lag;
4169 }
4170
4171 static int
4172 mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_lag *lag)
4173 {
4174         char sldr_pl[MLXSW_REG_SLDR_LEN];
4175
4176         lag->dev = NULL;
4177
4178         mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag->lag_id);
4179         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4180 }
4181
4182 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
4183                                      u16 lag_id, u8 port_index)
4184 {
4185         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4186         char slcor_pl[MLXSW_REG_SLCOR_LEN];
4187
4188         mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
4189                                       lag_id, port_index);
4190         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4191 }
4192
4193 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
4194                                         u16 lag_id)
4195 {
4196         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4197         char slcor_pl[MLXSW_REG_SLCOR_LEN];
4198
4199         mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
4200                                          lag_id);
4201         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4202 }
4203
4204 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
4205                                         u16 lag_id)
4206 {
4207         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4208         char slcor_pl[MLXSW_REG_SLCOR_LEN];
4209
4210         mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
4211                                         lag_id);
4212         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4213 }
4214
4215 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
4216                                          u16 lag_id)
4217 {
4218         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4219         char slcor_pl[MLXSW_REG_SLCOR_LEN];
4220
4221         mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
4222                                          lag_id);
4223         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4224 }
4225
4226 static struct mlxsw_sp_lag *
4227 mlxsw_sp_lag_find(struct mlxsw_sp *mlxsw_sp, struct net_device *lag_dev)
4228 {
4229         int i;
4230
4231         for (i = 0; i < mlxsw_sp->max_lag; i++) {
4232                 if (!mlxsw_sp->lags[i].dev)
4233                         continue;
4234
4235                 if (mlxsw_sp->lags[i].dev == lag_dev)
4236                         return &mlxsw_sp->lags[i];
4237         }
4238
4239         return NULL;
4240 }
4241
4242 static struct mlxsw_sp_lag *
4243 mlxsw_sp_lag_get(struct mlxsw_sp *mlxsw_sp, struct net_device *lag_dev,
4244                  struct netlink_ext_ack *extack)
4245 {
4246         struct mlxsw_sp_lag *lag;
4247
4248         lag = mlxsw_sp_lag_find(mlxsw_sp, lag_dev);
4249         if (lag) {
4250                 refcount_inc(&lag->ref_count);
4251                 return lag;
4252         }
4253
4254         return mlxsw_sp_lag_create(mlxsw_sp, lag_dev, extack);
4255 }
4256
4257 static void
4258 mlxsw_sp_lag_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_lag *lag)
4259 {
4260         if (!refcount_dec_and_test(&lag->ref_count))
4261                 return;
4262
4263         mlxsw_sp_lag_destroy(mlxsw_sp, lag);
4264 }
4265
4266 static bool
4267 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
4268                           struct net_device *lag_dev,
4269                           struct netdev_lag_upper_info *lag_upper_info,
4270                           struct netlink_ext_ack *extack)
4271 {
4272         if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
4273                 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
4274                 return false;
4275         }
4276         return true;
4277 }
4278
4279 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
4280                                        u16 lag_id, u8 *p_port_index)
4281 {
4282         u64 max_lag_members;
4283         int i;
4284
4285         max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
4286                                              MAX_LAG_MEMBERS);
4287         for (i = 0; i < max_lag_members; i++) {
4288                 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
4289                         *p_port_index = i;
4290                         return 0;
4291                 }
4292         }
4293         return -EBUSY;
4294 }
4295
4296 static int mlxsw_sp_lag_uppers_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
4297                                            struct net_device *lag_dev,
4298                                            struct netlink_ext_ack *extack)
4299 {
4300         struct net_device *upper_dev;
4301         struct net_device *master;
4302         struct list_head *iter;
4303         int done = 0;
4304         int err;
4305
4306         master = netdev_master_upper_dev_get(lag_dev);
4307         if (master && netif_is_bridge_master(master)) {
4308                 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, lag_dev, master,
4309                                                 extack);
4310                 if (err)
4311                         return err;
4312         }
4313
4314         netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
4315                 if (!is_vlan_dev(upper_dev))
4316                         continue;
4317
4318                 master = netdev_master_upper_dev_get(upper_dev);
4319                 if (master && netif_is_bridge_master(master)) {
4320                         err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4321                                                         upper_dev, master,
4322                                                         extack);
4323                         if (err)
4324                                 goto err_port_bridge_join;
4325                 }
4326
4327                 ++done;
4328         }
4329
4330         return 0;
4331
4332 err_port_bridge_join:
4333         netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
4334                 if (!is_vlan_dev(upper_dev))
4335                         continue;
4336
4337                 master = netdev_master_upper_dev_get(upper_dev);
4338                 if (!master || !netif_is_bridge_master(master))
4339                         continue;
4340
4341                 if (!done--)
4342                         break;
4343
4344                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, master);
4345         }
4346
4347         master = netdev_master_upper_dev_get(lag_dev);
4348         if (master && netif_is_bridge_master(master))
4349                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, master);
4350
4351         return err;
4352 }
4353
4354 static void
4355 mlxsw_sp_lag_uppers_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
4356                                  struct net_device *lag_dev)
4357 {
4358         struct net_device *upper_dev;
4359         struct net_device *master;
4360         struct list_head *iter;
4361
4362         netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
4363                 if (!is_vlan_dev(upper_dev))
4364                         continue;
4365
4366                 master = netdev_master_upper_dev_get(upper_dev);
4367                 if (!master)
4368                         continue;
4369
4370                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, master);
4371         }
4372
4373         master = netdev_master_upper_dev_get(lag_dev);
4374         if (master)
4375                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, master);
4376 }
4377
4378 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
4379                                   struct net_device *lag_dev,
4380                                   struct netlink_ext_ack *extack)
4381 {
4382         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4383         struct mlxsw_sp_lag *lag;
4384         u16 lag_id;
4385         u8 port_index;
4386         int err;
4387
4388         lag = mlxsw_sp_lag_get(mlxsw_sp, lag_dev, extack);
4389         if (IS_ERR(lag))
4390                 return PTR_ERR(lag);
4391
4392         lag_id = lag->lag_id;
4393         err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
4394         if (err)
4395                 return err;
4396
4397         err = mlxsw_sp_lag_uppers_bridge_join(mlxsw_sp_port, lag_dev,
4398                                               extack);
4399         if (err)
4400                 goto err_lag_uppers_bridge_join;
4401
4402         err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
4403         if (err)
4404                 goto err_col_port_add;
4405
4406         mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
4407                                    mlxsw_sp_port->local_port);
4408         mlxsw_sp_port->lag_id = lag_id;
4409         mlxsw_sp_port->lagged = 1;
4410
4411         err = mlxsw_sp_fid_port_join_lag(mlxsw_sp_port);
4412         if (err)
4413                 goto err_fid_port_join_lag;
4414
4415         /* Port is no longer usable as a router interface */
4416         if (mlxsw_sp_port->default_vlan->fid)
4417                 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
4418
4419         /* Join a router interface configured on the LAG, if exists */
4420         err = mlxsw_sp_router_port_join_lag(mlxsw_sp_port, lag_dev,
4421                                             extack);
4422         if (err)
4423                 goto err_router_join;
4424
4425         err = mlxsw_sp_netdevice_enslavement_replay(mlxsw_sp, lag_dev, extack);
4426         if (err)
4427                 goto err_replay;
4428
4429         return 0;
4430
4431 err_replay:
4432         mlxsw_sp_router_port_leave_lag(mlxsw_sp_port, lag_dev);
4433 err_router_join:
4434         mlxsw_sp_fid_port_leave_lag(mlxsw_sp_port);
4435 err_fid_port_join_lag:
4436         mlxsw_sp_port->lagged = 0;
4437         mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
4438                                      mlxsw_sp_port->local_port);
4439         mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
4440 err_col_port_add:
4441         mlxsw_sp_lag_uppers_bridge_leave(mlxsw_sp_port, lag_dev);
4442 err_lag_uppers_bridge_join:
4443         mlxsw_sp_lag_put(mlxsw_sp, lag);
4444         return err;
4445 }
4446
4447 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
4448                                     struct net_device *lag_dev)
4449 {
4450         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4451         u16 lag_id = mlxsw_sp_port->lag_id;
4452         struct mlxsw_sp_lag *lag;
4453
4454         if (!mlxsw_sp_port->lagged)
4455                 return;
4456         lag = &mlxsw_sp->lags[lag_id];
4457
4458         mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
4459
4460         /* Any VLANs configured on the port are no longer valid */
4461         mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false);
4462         mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan);
4463         /* Make the LAG and its directly linked uppers leave bridges they
4464          * are memeber in
4465          */
4466         mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev);
4467
4468         mlxsw_sp_fid_port_leave_lag(mlxsw_sp_port);
4469
4470         mlxsw_sp_lag_put(mlxsw_sp, lag);
4471
4472         mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
4473                                      mlxsw_sp_port->local_port);
4474         mlxsw_sp_port->lagged = 0;
4475
4476         /* Make sure untagged frames are allowed to ingress */
4477         mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
4478                                ETH_P_8021Q);
4479 }
4480
4481 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
4482                                       u16 lag_id)
4483 {
4484         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4485         char sldr_pl[MLXSW_REG_SLDR_LEN];
4486
4487         mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
4488                                          mlxsw_sp_port->local_port);
4489         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4490 }
4491
4492 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
4493                                          u16 lag_id)
4494 {
4495         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4496         char sldr_pl[MLXSW_REG_SLDR_LEN];
4497
4498         mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
4499                                             mlxsw_sp_port->local_port);
4500         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4501 }
4502
4503 static int
4504 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port)
4505 {
4506         int err;
4507
4508         err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port,
4509                                            mlxsw_sp_port->lag_id);
4510         if (err)
4511                 return err;
4512
4513         err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
4514         if (err)
4515                 goto err_dist_port_add;
4516
4517         return 0;
4518
4519 err_dist_port_add:
4520         mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id);
4521         return err;
4522 }
4523
4524 static int
4525 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port)
4526 {
4527         int err;
4528
4529         err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
4530                                             mlxsw_sp_port->lag_id);
4531         if (err)
4532                 return err;
4533
4534         err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port,
4535                                             mlxsw_sp_port->lag_id);
4536         if (err)
4537                 goto err_col_port_disable;
4538
4539         return 0;
4540
4541 err_col_port_disable:
4542         mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
4543         return err;
4544 }
4545
4546 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
4547                                      struct netdev_lag_lower_state_info *info)
4548 {
4549         if (info->tx_enabled)
4550                 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port);
4551         else
4552                 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
4553 }
4554
4555 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
4556                                  bool enable)
4557 {
4558         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4559         enum mlxsw_reg_spms_state spms_state;
4560         char *spms_pl;
4561         u16 vid;
4562         int err;
4563
4564         spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING :
4565                               MLXSW_REG_SPMS_STATE_DISCARDING;
4566
4567         spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
4568         if (!spms_pl)
4569                 return -ENOMEM;
4570         mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
4571
4572         for (vid = 0; vid < VLAN_N_VID; vid++)
4573                 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
4574
4575         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
4576         kfree(spms_pl);
4577         return err;
4578 }
4579
4580 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
4581 {
4582         u16 vid = 1;
4583         int err;
4584
4585         err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
4586         if (err)
4587                 return err;
4588         err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
4589         if (err)
4590                 goto err_port_stp_set;
4591         err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
4592                                      true, false);
4593         if (err)
4594                 goto err_port_vlan_set;
4595
4596         for (; vid <= VLAN_N_VID - 1; vid++) {
4597                 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
4598                                                      vid, false);
4599                 if (err)
4600                         goto err_vid_learning_set;
4601         }
4602
4603         return 0;
4604
4605 err_vid_learning_set:
4606         for (vid--; vid >= 1; vid--)
4607                 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
4608 err_port_vlan_set:
4609         mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
4610 err_port_stp_set:
4611         mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
4612         return err;
4613 }
4614
4615 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
4616 {
4617         u16 vid;
4618
4619         for (vid = VLAN_N_VID - 1; vid >= 1; vid--)
4620                 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
4621                                                vid, true);
4622
4623         mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
4624                                false, false);
4625         mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
4626         mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
4627 }
4628
4629 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev)
4630 {
4631         unsigned int num_vxlans = 0;
4632         struct net_device *dev;
4633         struct list_head *iter;
4634
4635         netdev_for_each_lower_dev(br_dev, dev, iter) {
4636                 if (netif_is_vxlan(dev))
4637                         num_vxlans++;
4638         }
4639
4640         return num_vxlans > 1;
4641 }
4642
4643 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev)
4644 {
4645         DECLARE_BITMAP(vlans, VLAN_N_VID) = {0};
4646         struct net_device *dev;
4647         struct list_head *iter;
4648
4649         netdev_for_each_lower_dev(br_dev, dev, iter) {
4650                 u16 pvid;
4651                 int err;
4652
4653                 if (!netif_is_vxlan(dev))
4654                         continue;
4655
4656                 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid);
4657                 if (err || !pvid)
4658                         continue;
4659
4660                 if (test_and_set_bit(pvid, vlans))
4661                         return false;
4662         }
4663
4664         return true;
4665 }
4666
4667 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev,
4668                                            struct netlink_ext_ack *extack)
4669 {
4670         if (br_multicast_enabled(br_dev)) {
4671                 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device");
4672                 return false;
4673         }
4674
4675         if (!br_vlan_enabled(br_dev) &&
4676             mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) {
4677                 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge");
4678                 return false;
4679         }
4680
4681         if (br_vlan_enabled(br_dev) &&
4682             !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) {
4683                 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged");
4684                 return false;
4685         }
4686
4687         return true;
4688 }
4689
4690 static bool mlxsw_sp_netdev_is_master(struct net_device *upper_dev,
4691                                       struct net_device *dev)
4692 {
4693         return upper_dev == netdev_master_upper_dev_get(dev);
4694 }
4695
4696 static int __mlxsw_sp_netdevice_event(struct mlxsw_sp *mlxsw_sp,
4697                                       unsigned long event, void *ptr,
4698                                       bool process_foreign);
4699
4700 static int mlxsw_sp_netdevice_validate_uppers(struct mlxsw_sp *mlxsw_sp,
4701                                               struct net_device *dev,
4702                                               struct netlink_ext_ack *extack)
4703 {
4704         struct net_device *upper_dev;
4705         struct list_head *iter;
4706         int err;
4707
4708         netdev_for_each_upper_dev_rcu(dev, upper_dev, iter) {
4709                 struct netdev_notifier_changeupper_info info = {
4710                         .info = {
4711                                 .dev = dev,
4712                                 .extack = extack,
4713                         },
4714                         .master = mlxsw_sp_netdev_is_master(upper_dev, dev),
4715                         .upper_dev = upper_dev,
4716                         .linking = true,
4717
4718                         /* upper_info is relevant for LAG devices. But we would
4719                          * only need this if LAG were a valid upper above
4720                          * another upper (e.g. a bridge that is a member of a
4721                          * LAG), and that is never a valid configuration. So we
4722                          * can keep this as NULL.
4723                          */
4724                         .upper_info = NULL,
4725                 };
4726
4727                 err = __mlxsw_sp_netdevice_event(mlxsw_sp,
4728                                                  NETDEV_PRECHANGEUPPER,
4729                                                  &info, true);
4730                 if (err)
4731                         return err;
4732
4733                 err = mlxsw_sp_netdevice_validate_uppers(mlxsw_sp, upper_dev,
4734                                                          extack);
4735                 if (err)
4736                         return err;
4737         }
4738
4739         return 0;
4740 }
4741
4742 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
4743                                                struct net_device *dev,
4744                                                unsigned long event, void *ptr,
4745                                                bool replay_deslavement)
4746 {
4747         struct netdev_notifier_changeupper_info *info;
4748         struct mlxsw_sp_port *mlxsw_sp_port;
4749         struct netlink_ext_ack *extack;
4750         struct net_device *upper_dev;
4751         struct mlxsw_sp *mlxsw_sp;
4752         int err = 0;
4753         u16 proto;
4754
4755         mlxsw_sp_port = netdev_priv(dev);
4756         mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4757         info = ptr;
4758         extack = netdev_notifier_info_to_extack(&info->info);
4759
4760         switch (event) {
4761         case NETDEV_PRECHANGEUPPER:
4762                 upper_dev = info->upper_dev;
4763                 if (!is_vlan_dev(upper_dev) &&
4764                     !netif_is_lag_master(upper_dev) &&
4765                     !netif_is_bridge_master(upper_dev) &&
4766                     !netif_is_ovs_master(upper_dev) &&
4767                     !netif_is_macvlan(upper_dev) &&
4768                     !netif_is_l3_master(upper_dev)) {
4769                         NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4770                         return -EINVAL;
4771                 }
4772                 if (!info->linking)
4773                         break;
4774                 if (netif_is_bridge_master(upper_dev) &&
4775                     !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
4776                     mlxsw_sp_bridge_has_vxlan(upper_dev) &&
4777                     !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
4778                         return -EOPNOTSUPP;
4779                 if (netdev_has_any_upper_dev(upper_dev) &&
4780                     (!netif_is_bridge_master(upper_dev) ||
4781                      !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
4782                                                           upper_dev))) {
4783                         err = mlxsw_sp_netdevice_validate_uppers(mlxsw_sp,
4784                                                                  upper_dev,
4785                                                                  extack);
4786                         if (err)
4787                                 return err;
4788                 }
4789                 if (netif_is_lag_master(upper_dev) &&
4790                     !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
4791                                                info->upper_info, extack))
4792                         return -EINVAL;
4793                 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) {
4794                         NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN");
4795                         return -EINVAL;
4796                 }
4797                 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
4798                     !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) {
4799                         NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port");
4800                         return -EINVAL;
4801                 }
4802                 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) {
4803                         NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN");
4804                         return -EINVAL;
4805                 }
4806                 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) {
4807                         NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port");
4808                         return -EINVAL;
4809                 }
4810                 if (netif_is_bridge_master(upper_dev)) {
4811                         br_vlan_get_proto(upper_dev, &proto);
4812                         if (br_vlan_enabled(upper_dev) &&
4813                             proto != ETH_P_8021Q && proto != ETH_P_8021AD) {
4814                                 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a bridge with unknown VLAN protocol is not supported");
4815                                 return -EOPNOTSUPP;
4816                         }
4817                         if (vlan_uses_dev(lower_dev) &&
4818                             br_vlan_enabled(upper_dev) &&
4819                             proto == ETH_P_8021AD) {
4820                                 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port that already has a VLAN upper to an 802.1ad bridge is not supported");
4821                                 return -EOPNOTSUPP;
4822                         }
4823                 }
4824                 if (netif_is_bridge_port(lower_dev) && is_vlan_dev(upper_dev)) {
4825                         struct net_device *br_dev = netdev_master_upper_dev_get(lower_dev);
4826
4827                         if (br_vlan_enabled(br_dev)) {
4828                                 br_vlan_get_proto(br_dev, &proto);
4829                                 if (proto == ETH_P_8021AD) {
4830                                         NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a port enslaved to an 802.1ad bridge");
4831                                         return -EOPNOTSUPP;
4832                                 }
4833                         }
4834                 }
4835                 if (is_vlan_dev(upper_dev) &&
4836                     ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) {
4837                         NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol");
4838                         return -EOPNOTSUPP;
4839                 }
4840                 if (is_vlan_dev(upper_dev) && mlxsw_sp_port->security) {
4841                         NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a locked port");
4842                         return -EOPNOTSUPP;
4843                 }
4844                 break;
4845         case NETDEV_CHANGEUPPER:
4846                 upper_dev = info->upper_dev;
4847                 if (netif_is_bridge_master(upper_dev)) {
4848                         if (info->linking) {
4849                                 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4850                                                                 lower_dev,
4851                                                                 upper_dev,
4852                                                                 extack);
4853                         } else {
4854                                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4855                                                            lower_dev,
4856                                                            upper_dev);
4857                                 if (!replay_deslavement)
4858                                         break;
4859                                 mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp,
4860                                                                       lower_dev);
4861                         }
4862                 } else if (netif_is_lag_master(upper_dev)) {
4863                         if (info->linking) {
4864                                 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
4865                                                              upper_dev, extack);
4866                         } else {
4867                                 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
4868                                 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
4869                                                         upper_dev);
4870                                 mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp,
4871                                                                       dev);
4872                         }
4873                 } else if (netif_is_ovs_master(upper_dev)) {
4874                         if (info->linking)
4875                                 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
4876                         else
4877                                 mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
4878                 } else if (netif_is_macvlan(upper_dev)) {
4879                         if (!info->linking)
4880                                 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4881                 } else if (is_vlan_dev(upper_dev)) {
4882                         struct net_device *br_dev;
4883
4884                         if (!netif_is_bridge_port(upper_dev))
4885                                 break;
4886                         if (info->linking)
4887                                 break;
4888                         br_dev = netdev_master_upper_dev_get(upper_dev);
4889                         mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev,
4890                                                    br_dev);
4891                 }
4892                 break;
4893         }
4894
4895         return err;
4896 }
4897
4898 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
4899                                                unsigned long event, void *ptr)
4900 {
4901         struct netdev_notifier_changelowerstate_info *info;
4902         struct mlxsw_sp_port *mlxsw_sp_port;
4903         int err;
4904
4905         mlxsw_sp_port = netdev_priv(dev);
4906         info = ptr;
4907
4908         switch (event) {
4909         case NETDEV_CHANGELOWERSTATE:
4910                 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
4911                         err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
4912                                                         info->lower_state_info);
4913                         if (err)
4914                                 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
4915                 }
4916                 break;
4917         }
4918
4919         return 0;
4920 }
4921
4922 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev,
4923                                          struct net_device *port_dev,
4924                                          unsigned long event, void *ptr,
4925                                          bool replay_deslavement)
4926 {
4927         switch (event) {
4928         case NETDEV_PRECHANGEUPPER:
4929         case NETDEV_CHANGEUPPER:
4930                 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev,
4931                                                            event, ptr,
4932                                                            replay_deslavement);
4933         case NETDEV_CHANGELOWERSTATE:
4934                 return mlxsw_sp_netdevice_port_lower_event(port_dev, event,
4935                                                            ptr);
4936         }
4937
4938         return 0;
4939 }
4940
4941 /* Called for LAG or its upper VLAN after the per-LAG-lower processing was done,
4942  * to do any per-LAG / per-LAG-upper processing.
4943  */
4944 static int mlxsw_sp_netdevice_post_lag_event(struct net_device *dev,
4945                                              unsigned long event,
4946                                              void *ptr)
4947 {
4948         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(dev);
4949         struct netdev_notifier_changeupper_info *info = ptr;
4950
4951         if (!mlxsw_sp)
4952                 return 0;
4953
4954         switch (event) {
4955         case NETDEV_CHANGEUPPER:
4956                 if (info->linking)
4957                         break;
4958                 if (netif_is_bridge_master(info->upper_dev))
4959                         mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp, dev);
4960                 break;
4961         }
4962         return 0;
4963 }
4964
4965 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
4966                                         unsigned long event, void *ptr)
4967 {
4968         struct net_device *dev;
4969         struct list_head *iter;
4970         int ret;
4971
4972         netdev_for_each_lower_dev(lag_dev, dev, iter) {
4973                 if (mlxsw_sp_port_dev_check(dev)) {
4974                         ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event,
4975                                                             ptr, false);
4976                         if (ret)
4977                                 return ret;
4978                 }
4979         }
4980
4981         return mlxsw_sp_netdevice_post_lag_event(lag_dev, event, ptr);
4982 }
4983
4984 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
4985                                               struct net_device *dev,
4986                                               unsigned long event, void *ptr,
4987                                               u16 vid, bool replay_deslavement)
4988 {
4989         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
4990         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4991         struct netdev_notifier_changeupper_info *info = ptr;
4992         struct netlink_ext_ack *extack;
4993         struct net_device *upper_dev;
4994         int err = 0;
4995
4996         extack = netdev_notifier_info_to_extack(&info->info);
4997
4998         switch (event) {
4999         case NETDEV_PRECHANGEUPPER:
5000                 upper_dev = info->upper_dev;
5001                 if (!netif_is_bridge_master(upper_dev) &&
5002                     !netif_is_macvlan(upper_dev) &&
5003                     !netif_is_l3_master(upper_dev)) {
5004                         NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5005                         return -EINVAL;
5006                 }
5007                 if (!info->linking)
5008                         break;
5009                 if (netif_is_bridge_master(upper_dev) &&
5010                     !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
5011                     mlxsw_sp_bridge_has_vxlan(upper_dev) &&
5012                     !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
5013                         return -EOPNOTSUPP;
5014                 if (netdev_has_any_upper_dev(upper_dev) &&
5015                     (!netif_is_bridge_master(upper_dev) ||
5016                      !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
5017                                                           upper_dev))) {
5018                         err = mlxsw_sp_netdevice_validate_uppers(mlxsw_sp,
5019                                                                  upper_dev,
5020                                                                  extack);
5021                         if (err)
5022                                 return err;
5023                 }
5024                 break;
5025         case NETDEV_CHANGEUPPER:
5026                 upper_dev = info->upper_dev;
5027                 if (netif_is_bridge_master(upper_dev)) {
5028                         if (info->linking) {
5029                                 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
5030                                                                 vlan_dev,
5031                                                                 upper_dev,
5032                                                                 extack);
5033                         } else {
5034                                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
5035                                                            vlan_dev,
5036                                                            upper_dev);
5037                                 if (!replay_deslavement)
5038                                         break;
5039                                 mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp,
5040                                                                       vlan_dev);
5041                         }
5042                 } else if (netif_is_macvlan(upper_dev)) {
5043                         if (!info->linking)
5044                                 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
5045                 }
5046                 break;
5047         }
5048
5049         return err;
5050 }
5051
5052 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev,
5053                                                   struct net_device *lag_dev,
5054                                                   unsigned long event,
5055                                                   void *ptr, u16 vid)
5056 {
5057         struct net_device *dev;
5058         struct list_head *iter;
5059         int ret;
5060
5061         netdev_for_each_lower_dev(lag_dev, dev, iter) {
5062                 if (mlxsw_sp_port_dev_check(dev)) {
5063                         ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev,
5064                                                                  event, ptr,
5065                                                                  vid, false);
5066                         if (ret)
5067                                 return ret;
5068                 }
5069         }
5070
5071         return mlxsw_sp_netdevice_post_lag_event(vlan_dev, event, ptr);
5072 }
5073
5074 static int mlxsw_sp_netdevice_bridge_vlan_event(struct mlxsw_sp *mlxsw_sp,
5075                                                 struct net_device *vlan_dev,
5076                                                 struct net_device *br_dev,
5077                                                 unsigned long event, void *ptr,
5078                                                 u16 vid, bool process_foreign)
5079 {
5080         struct netdev_notifier_changeupper_info *info = ptr;
5081         struct netlink_ext_ack *extack;
5082         struct net_device *upper_dev;
5083
5084         if (!process_foreign && !mlxsw_sp_lower_get(vlan_dev))
5085                 return 0;
5086
5087         extack = netdev_notifier_info_to_extack(&info->info);
5088
5089         switch (event) {
5090         case NETDEV_PRECHANGEUPPER:
5091                 upper_dev = info->upper_dev;
5092                 if (!netif_is_macvlan(upper_dev) &&
5093                     !netif_is_l3_master(upper_dev)) {
5094                         NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5095                         return -EOPNOTSUPP;
5096                 }
5097                 break;
5098         case NETDEV_CHANGEUPPER:
5099                 upper_dev = info->upper_dev;
5100                 if (info->linking)
5101                         break;
5102                 if (netif_is_macvlan(upper_dev))
5103                         mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
5104                 break;
5105         }
5106
5107         return 0;
5108 }
5109
5110 static int mlxsw_sp_netdevice_vlan_event(struct mlxsw_sp *mlxsw_sp,
5111                                          struct net_device *vlan_dev,
5112                                          unsigned long event, void *ptr,
5113                                          bool process_foreign)
5114 {
5115         struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
5116         u16 vid = vlan_dev_vlan_id(vlan_dev);
5117
5118         if (mlxsw_sp_port_dev_check(real_dev))
5119                 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev,
5120                                                           event, ptr, vid,
5121                                                           true);
5122         else if (netif_is_lag_master(real_dev))
5123                 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev,
5124                                                               real_dev, event,
5125                                                               ptr, vid);
5126         else if (netif_is_bridge_master(real_dev))
5127                 return mlxsw_sp_netdevice_bridge_vlan_event(mlxsw_sp, vlan_dev,
5128                                                             real_dev, event,
5129                                                             ptr, vid,
5130                                                             process_foreign);
5131
5132         return 0;
5133 }
5134
5135 static int mlxsw_sp_netdevice_bridge_event(struct mlxsw_sp *mlxsw_sp,
5136                                            struct net_device *br_dev,
5137                                            unsigned long event, void *ptr,
5138                                            bool process_foreign)
5139 {
5140         struct netdev_notifier_changeupper_info *info = ptr;
5141         struct netlink_ext_ack *extack;
5142         struct net_device *upper_dev;
5143         u16 proto;
5144
5145         if (!process_foreign && !mlxsw_sp_lower_get(br_dev))
5146                 return 0;
5147
5148         extack = netdev_notifier_info_to_extack(&info->info);
5149
5150         switch (event) {
5151         case NETDEV_PRECHANGEUPPER:
5152                 upper_dev = info->upper_dev;
5153                 if (!is_vlan_dev(upper_dev) &&
5154                     !netif_is_macvlan(upper_dev) &&
5155                     !netif_is_l3_master(upper_dev)) {
5156                         NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5157                         return -EOPNOTSUPP;
5158                 }
5159                 if (!info->linking)
5160                         break;
5161                 if (br_vlan_enabled(br_dev)) {
5162                         br_vlan_get_proto(br_dev, &proto);
5163                         if (proto == ETH_P_8021AD) {
5164                                 NL_SET_ERR_MSG_MOD(extack, "Upper devices are not supported on top of an 802.1ad bridge");
5165                                 return -EOPNOTSUPP;
5166                         }
5167                 }
5168                 if (is_vlan_dev(upper_dev) &&
5169                     ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) {
5170                         NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol");
5171                         return -EOPNOTSUPP;
5172                 }
5173                 break;
5174         case NETDEV_CHANGEUPPER:
5175                 upper_dev = info->upper_dev;
5176                 if (info->linking)
5177                         break;
5178                 if (is_vlan_dev(upper_dev))
5179                         mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev);
5180                 if (netif_is_macvlan(upper_dev))
5181                         mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
5182                 break;
5183         }
5184
5185         return 0;
5186 }
5187
5188 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev,
5189                                             unsigned long event, void *ptr)
5190 {
5191         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev);
5192         struct netdev_notifier_changeupper_info *info = ptr;
5193         struct netlink_ext_ack *extack;
5194         struct net_device *upper_dev;
5195
5196         if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER)
5197                 return 0;
5198
5199         extack = netdev_notifier_info_to_extack(&info->info);
5200         upper_dev = info->upper_dev;
5201
5202         if (!netif_is_l3_master(upper_dev)) {
5203                 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5204                 return -EOPNOTSUPP;
5205         }
5206
5207         return 0;
5208 }
5209
5210 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp,
5211                                           struct net_device *dev,
5212                                           unsigned long event, void *ptr)
5213 {
5214         struct netdev_notifier_changeupper_info *cu_info;
5215         struct netdev_notifier_info *info = ptr;
5216         struct netlink_ext_ack *extack;
5217         struct net_device *upper_dev;
5218
5219         extack = netdev_notifier_info_to_extack(info);
5220
5221         switch (event) {
5222         case NETDEV_CHANGEUPPER:
5223                 cu_info = container_of(info,
5224                                        struct netdev_notifier_changeupper_info,
5225                                        info);
5226                 upper_dev = cu_info->upper_dev;
5227                 if (!netif_is_bridge_master(upper_dev))
5228                         return 0;
5229                 if (!mlxsw_sp_lower_get(upper_dev))
5230                         return 0;
5231                 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
5232                         return -EOPNOTSUPP;
5233                 if (cu_info->linking) {
5234                         if (!netif_running(dev))
5235                                 return 0;
5236                         /* When the bridge is VLAN-aware, the VNI of the VxLAN
5237                          * device needs to be mapped to a VLAN, but at this
5238                          * point no VLANs are configured on the VxLAN device
5239                          */
5240                         if (br_vlan_enabled(upper_dev))
5241                                 return 0;
5242                         return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev,
5243                                                           dev, 0, extack);
5244                 } else {
5245                         /* VLANs were already flushed, which triggered the
5246                          * necessary cleanup
5247                          */
5248                         if (br_vlan_enabled(upper_dev))
5249                                 return 0;
5250                         mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
5251                 }
5252                 break;
5253         case NETDEV_PRE_UP:
5254                 upper_dev = netdev_master_upper_dev_get(dev);
5255                 if (!upper_dev)
5256                         return 0;
5257                 if (!netif_is_bridge_master(upper_dev))
5258                         return 0;
5259                 if (!mlxsw_sp_lower_get(upper_dev))
5260                         return 0;
5261                 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0,
5262                                                   extack);
5263         case NETDEV_DOWN:
5264                 upper_dev = netdev_master_upper_dev_get(dev);
5265                 if (!upper_dev)
5266                         return 0;
5267                 if (!netif_is_bridge_master(upper_dev))
5268                         return 0;
5269                 if (!mlxsw_sp_lower_get(upper_dev))
5270                         return 0;
5271                 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
5272                 break;
5273         }
5274
5275         return 0;
5276 }
5277
5278 static int __mlxsw_sp_netdevice_event(struct mlxsw_sp *mlxsw_sp,
5279                                       unsigned long event, void *ptr,
5280                                       bool process_foreign)
5281 {
5282         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5283         struct mlxsw_sp_span_entry *span_entry;
5284         int err = 0;
5285
5286         if (event == NETDEV_UNREGISTER) {
5287                 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev);
5288                 if (span_entry)
5289                         mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry);
5290         }
5291
5292         if (netif_is_vxlan(dev))
5293                 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr);
5294         else if (mlxsw_sp_port_dev_check(dev))
5295                 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr, true);
5296         else if (netif_is_lag_master(dev))
5297                 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
5298         else if (is_vlan_dev(dev))
5299                 err = mlxsw_sp_netdevice_vlan_event(mlxsw_sp, dev, event, ptr,
5300                                                     process_foreign);
5301         else if (netif_is_bridge_master(dev))
5302                 err = mlxsw_sp_netdevice_bridge_event(mlxsw_sp, dev, event, ptr,
5303                                                       process_foreign);
5304         else if (netif_is_macvlan(dev))
5305                 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr);
5306
5307         return err;
5308 }
5309
5310 static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
5311                                     unsigned long event, void *ptr)
5312 {
5313         struct mlxsw_sp *mlxsw_sp;
5314         int err;
5315
5316         mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb);
5317         mlxsw_sp_span_respin(mlxsw_sp);
5318         err = __mlxsw_sp_netdevice_event(mlxsw_sp, event, ptr, false);
5319
5320         return notifier_from_errno(err);
5321 }
5322
5323 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = {
5324         {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
5325         {0, },
5326 };
5327
5328 static struct pci_driver mlxsw_sp1_pci_driver = {
5329         .name = mlxsw_sp1_driver_name,
5330         .id_table = mlxsw_sp1_pci_id_table,
5331 };
5332
5333 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = {
5334         {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0},
5335         {0, },
5336 };
5337
5338 static struct pci_driver mlxsw_sp2_pci_driver = {
5339         .name = mlxsw_sp2_driver_name,
5340         .id_table = mlxsw_sp2_pci_id_table,
5341 };
5342
5343 static const struct pci_device_id mlxsw_sp3_pci_id_table[] = {
5344         {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0},
5345         {0, },
5346 };
5347
5348 static struct pci_driver mlxsw_sp3_pci_driver = {
5349         .name = mlxsw_sp3_driver_name,
5350         .id_table = mlxsw_sp3_pci_id_table,
5351 };
5352
5353 static const struct pci_device_id mlxsw_sp4_pci_id_table[] = {
5354         {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM4), 0},
5355         {0, },
5356 };
5357
5358 static struct pci_driver mlxsw_sp4_pci_driver = {
5359         .name = mlxsw_sp4_driver_name,
5360         .id_table = mlxsw_sp4_pci_id_table,
5361 };
5362
5363 static int __init mlxsw_sp_module_init(void)
5364 {
5365         int err;
5366
5367         err = mlxsw_core_driver_register(&mlxsw_sp1_driver);
5368         if (err)
5369                 return err;
5370
5371         err = mlxsw_core_driver_register(&mlxsw_sp2_driver);
5372         if (err)
5373                 goto err_sp2_core_driver_register;
5374
5375         err = mlxsw_core_driver_register(&mlxsw_sp3_driver);
5376         if (err)
5377                 goto err_sp3_core_driver_register;
5378
5379         err = mlxsw_core_driver_register(&mlxsw_sp4_driver);
5380         if (err)
5381                 goto err_sp4_core_driver_register;
5382
5383         err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver);
5384         if (err)
5385                 goto err_sp1_pci_driver_register;
5386
5387         err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver);
5388         if (err)
5389                 goto err_sp2_pci_driver_register;
5390
5391         err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver);
5392         if (err)
5393                 goto err_sp3_pci_driver_register;
5394
5395         err = mlxsw_pci_driver_register(&mlxsw_sp4_pci_driver);
5396         if (err)
5397                 goto err_sp4_pci_driver_register;
5398
5399         return 0;
5400
5401 err_sp4_pci_driver_register:
5402         mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver);
5403 err_sp3_pci_driver_register:
5404         mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
5405 err_sp2_pci_driver_register:
5406         mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
5407 err_sp1_pci_driver_register:
5408         mlxsw_core_driver_unregister(&mlxsw_sp4_driver);
5409 err_sp4_core_driver_register:
5410         mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
5411 err_sp3_core_driver_register:
5412         mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
5413 err_sp2_core_driver_register:
5414         mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
5415         return err;
5416 }
5417
5418 static void __exit mlxsw_sp_module_exit(void)
5419 {
5420         mlxsw_pci_driver_unregister(&mlxsw_sp4_pci_driver);
5421         mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver);
5422         mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
5423         mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
5424         mlxsw_core_driver_unregister(&mlxsw_sp4_driver);
5425         mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
5426         mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
5427         mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
5428 }
5429
5430 module_init(mlxsw_sp_module_init);
5431 module_exit(mlxsw_sp_module_exit);
5432
5433 MODULE_LICENSE("Dual BSD/GPL");
5434 MODULE_AUTHOR("Jiri Pirko <[email protected]>");
5435 MODULE_DESCRIPTION("Mellanox Spectrum driver");
5436 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table);
5437 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table);
5438 MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table);
5439 MODULE_DEVICE_TABLE(pci, mlxsw_sp4_pci_id_table);
5440 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME);
5441 MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME);
5442 MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME);
5443 MODULE_FIRMWARE(MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME);
This page took 0.352751 seconds and 4 git commands to generate.