]> Git Repo - linux.git/blob - drivers/net/dsa/qca8k.c
net: dsa: create a dsa_lag structure
[linux.git] / drivers / net / dsa / qca8k.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2009 Felix Fietkau <[email protected]>
4  * Copyright (C) 2011-2012 Gabor Juhos <[email protected]>
5  * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved.
6  * Copyright (c) 2016 John Crispin <[email protected]>
7  */
8
9 #include <linux/module.h>
10 #include <linux/phy.h>
11 #include <linux/netdevice.h>
12 #include <linux/bitfield.h>
13 #include <linux/regmap.h>
14 #include <net/dsa.h>
15 #include <linux/of_net.h>
16 #include <linux/of_mdio.h>
17 #include <linux/of_platform.h>
18 #include <linux/if_bridge.h>
19 #include <linux/mdio.h>
20 #include <linux/phylink.h>
21 #include <linux/gpio/consumer.h>
22 #include <linux/etherdevice.h>
23 #include <linux/dsa/tag_qca.h>
24
25 #include "qca8k.h"
26
27 #define MIB_DESC(_s, _o, _n)    \
28         {                       \
29                 .size = (_s),   \
30                 .offset = (_o), \
31                 .name = (_n),   \
32         }
33
34 static const struct qca8k_mib_desc ar8327_mib[] = {
35         MIB_DESC(1, 0x00, "RxBroad"),
36         MIB_DESC(1, 0x04, "RxPause"),
37         MIB_DESC(1, 0x08, "RxMulti"),
38         MIB_DESC(1, 0x0c, "RxFcsErr"),
39         MIB_DESC(1, 0x10, "RxAlignErr"),
40         MIB_DESC(1, 0x14, "RxRunt"),
41         MIB_DESC(1, 0x18, "RxFragment"),
42         MIB_DESC(1, 0x1c, "Rx64Byte"),
43         MIB_DESC(1, 0x20, "Rx128Byte"),
44         MIB_DESC(1, 0x24, "Rx256Byte"),
45         MIB_DESC(1, 0x28, "Rx512Byte"),
46         MIB_DESC(1, 0x2c, "Rx1024Byte"),
47         MIB_DESC(1, 0x30, "Rx1518Byte"),
48         MIB_DESC(1, 0x34, "RxMaxByte"),
49         MIB_DESC(1, 0x38, "RxTooLong"),
50         MIB_DESC(2, 0x3c, "RxGoodByte"),
51         MIB_DESC(2, 0x44, "RxBadByte"),
52         MIB_DESC(1, 0x4c, "RxOverFlow"),
53         MIB_DESC(1, 0x50, "Filtered"),
54         MIB_DESC(1, 0x54, "TxBroad"),
55         MIB_DESC(1, 0x58, "TxPause"),
56         MIB_DESC(1, 0x5c, "TxMulti"),
57         MIB_DESC(1, 0x60, "TxUnderRun"),
58         MIB_DESC(1, 0x64, "Tx64Byte"),
59         MIB_DESC(1, 0x68, "Tx128Byte"),
60         MIB_DESC(1, 0x6c, "Tx256Byte"),
61         MIB_DESC(1, 0x70, "Tx512Byte"),
62         MIB_DESC(1, 0x74, "Tx1024Byte"),
63         MIB_DESC(1, 0x78, "Tx1518Byte"),
64         MIB_DESC(1, 0x7c, "TxMaxByte"),
65         MIB_DESC(1, 0x80, "TxOverSize"),
66         MIB_DESC(2, 0x84, "TxByte"),
67         MIB_DESC(1, 0x8c, "TxCollision"),
68         MIB_DESC(1, 0x90, "TxAbortCol"),
69         MIB_DESC(1, 0x94, "TxMultiCol"),
70         MIB_DESC(1, 0x98, "TxSingleCol"),
71         MIB_DESC(1, 0x9c, "TxExcDefer"),
72         MIB_DESC(1, 0xa0, "TxDefer"),
73         MIB_DESC(1, 0xa4, "TxLateCol"),
74         MIB_DESC(1, 0xa8, "RXUnicast"),
75         MIB_DESC(1, 0xac, "TXUnicast"),
76 };
77
78 static void
79 qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
80 {
81         regaddr >>= 1;
82         *r1 = regaddr & 0x1e;
83
84         regaddr >>= 5;
85         *r2 = regaddr & 0x7;
86
87         regaddr >>= 3;
88         *page = regaddr & 0x3ff;
89 }
90
91 static int
92 qca8k_set_lo(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 lo)
93 {
94         u16 *cached_lo = &priv->mdio_cache.lo;
95         struct mii_bus *bus = priv->bus;
96         int ret;
97
98         if (lo == *cached_lo)
99                 return 0;
100
101         ret = bus->write(bus, phy_id, regnum, lo);
102         if (ret < 0)
103                 dev_err_ratelimited(&bus->dev,
104                                     "failed to write qca8k 32bit lo register\n");
105
106         *cached_lo = lo;
107         return 0;
108 }
109
110 static int
111 qca8k_set_hi(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 hi)
112 {
113         u16 *cached_hi = &priv->mdio_cache.hi;
114         struct mii_bus *bus = priv->bus;
115         int ret;
116
117         if (hi == *cached_hi)
118                 return 0;
119
120         ret = bus->write(bus, phy_id, regnum, hi);
121         if (ret < 0)
122                 dev_err_ratelimited(&bus->dev,
123                                     "failed to write qca8k 32bit hi register\n");
124
125         *cached_hi = hi;
126         return 0;
127 }
128
129 static int
130 qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
131 {
132         int ret;
133
134         ret = bus->read(bus, phy_id, regnum);
135         if (ret >= 0) {
136                 *val = ret;
137                 ret = bus->read(bus, phy_id, regnum + 1);
138                 *val |= ret << 16;
139         }
140
141         if (ret < 0) {
142                 dev_err_ratelimited(&bus->dev,
143                                     "failed to read qca8k 32bit register\n");
144                 *val = 0;
145                 return ret;
146         }
147
148         return 0;
149 }
150
151 static void
152 qca8k_mii_write32(struct qca8k_priv *priv, int phy_id, u32 regnum, u32 val)
153 {
154         u16 lo, hi;
155         int ret;
156
157         lo = val & 0xffff;
158         hi = (u16)(val >> 16);
159
160         ret = qca8k_set_lo(priv, phy_id, regnum, lo);
161         if (ret >= 0)
162                 ret = qca8k_set_hi(priv, phy_id, regnum + 1, hi);
163 }
164
165 static int
166 qca8k_set_page(struct qca8k_priv *priv, u16 page)
167 {
168         u16 *cached_page = &priv->mdio_cache.page;
169         struct mii_bus *bus = priv->bus;
170         int ret;
171
172         if (page == *cached_page)
173                 return 0;
174
175         ret = bus->write(bus, 0x18, 0, page);
176         if (ret < 0) {
177                 dev_err_ratelimited(&bus->dev,
178                                     "failed to set qca8k page\n");
179                 return ret;
180         }
181
182         *cached_page = page;
183         usleep_range(1000, 2000);
184         return 0;
185 }
186
187 static int
188 qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val)
189 {
190         return regmap_read(priv->regmap, reg, val);
191 }
192
193 static int
194 qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val)
195 {
196         return regmap_write(priv->regmap, reg, val);
197 }
198
199 static int
200 qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
201 {
202         return regmap_update_bits(priv->regmap, reg, mask, write_val);
203 }
204
205 static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb)
206 {
207         struct qca8k_mgmt_eth_data *mgmt_eth_data;
208         struct qca8k_priv *priv = ds->priv;
209         struct qca_mgmt_ethhdr *mgmt_ethhdr;
210         u8 len, cmd;
211
212         mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb_mac_header(skb);
213         mgmt_eth_data = &priv->mgmt_eth_data;
214
215         cmd = FIELD_GET(QCA_HDR_MGMT_CMD, mgmt_ethhdr->command);
216         len = FIELD_GET(QCA_HDR_MGMT_LENGTH, mgmt_ethhdr->command);
217
218         /* Make sure the seq match the requested packet */
219         if (mgmt_ethhdr->seq == mgmt_eth_data->seq)
220                 mgmt_eth_data->ack = true;
221
222         if (cmd == MDIO_READ) {
223                 mgmt_eth_data->data[0] = mgmt_ethhdr->mdio_data;
224
225                 /* Get the rest of the 12 byte of data.
226                  * The read/write function will extract the requested data.
227                  */
228                 if (len > QCA_HDR_MGMT_DATA1_LEN)
229                         memcpy(mgmt_eth_data->data + 1, skb->data,
230                                QCA_HDR_MGMT_DATA2_LEN);
231         }
232
233         complete(&mgmt_eth_data->rw_done);
234 }
235
236 static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *val,
237                                                int priority, unsigned int len)
238 {
239         struct qca_mgmt_ethhdr *mgmt_ethhdr;
240         unsigned int real_len;
241         struct sk_buff *skb;
242         u32 *data2;
243         u16 hdr;
244
245         skb = dev_alloc_skb(QCA_HDR_MGMT_PKT_LEN);
246         if (!skb)
247                 return NULL;
248
249         /* Max value for len reg is 15 (0xf) but the switch actually return 16 byte
250          * Actually for some reason the steps are:
251          * 0: nothing
252          * 1-4: first 4 byte
253          * 5-6: first 12 byte
254          * 7-15: all 16 byte
255          */
256         if (len == 16)
257                 real_len = 15;
258         else
259                 real_len = len;
260
261         skb_reset_mac_header(skb);
262         skb_set_network_header(skb, skb->len);
263
264         mgmt_ethhdr = skb_push(skb, QCA_HDR_MGMT_HEADER_LEN + QCA_HDR_LEN);
265
266         hdr = FIELD_PREP(QCA_HDR_XMIT_VERSION, QCA_HDR_VERSION);
267         hdr |= FIELD_PREP(QCA_HDR_XMIT_PRIORITY, priority);
268         hdr |= QCA_HDR_XMIT_FROM_CPU;
269         hdr |= FIELD_PREP(QCA_HDR_XMIT_DP_BIT, BIT(0));
270         hdr |= FIELD_PREP(QCA_HDR_XMIT_CONTROL, QCA_HDR_XMIT_TYPE_RW_REG);
271
272         mgmt_ethhdr->command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg);
273         mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, real_len);
274         mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd);
275         mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE,
276                                            QCA_HDR_MGMT_CHECK_CODE_VAL);
277
278         if (cmd == MDIO_WRITE)
279                 mgmt_ethhdr->mdio_data = *val;
280
281         mgmt_ethhdr->hdr = htons(hdr);
282
283         data2 = skb_put_zero(skb, QCA_HDR_MGMT_DATA2_LEN + QCA_HDR_MGMT_PADDING_LEN);
284         if (cmd == MDIO_WRITE && len > QCA_HDR_MGMT_DATA1_LEN)
285                 memcpy(data2, val + 1, len - QCA_HDR_MGMT_DATA1_LEN);
286
287         return skb;
288 }
289
290 static void qca8k_mdio_header_fill_seq_num(struct sk_buff *skb, u32 seq_num)
291 {
292         struct qca_mgmt_ethhdr *mgmt_ethhdr;
293
294         mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb->data;
295         mgmt_ethhdr->seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num);
296 }
297
298 static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
299 {
300         struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
301         struct sk_buff *skb;
302         bool ack;
303         int ret;
304
305         skb = qca8k_alloc_mdio_header(MDIO_READ, reg, NULL,
306                                       QCA8K_ETHERNET_MDIO_PRIORITY, len);
307         if (!skb)
308                 return -ENOMEM;
309
310         mutex_lock(&mgmt_eth_data->mutex);
311
312         /* Check mgmt_master if is operational */
313         if (!priv->mgmt_master) {
314                 kfree_skb(skb);
315                 mutex_unlock(&mgmt_eth_data->mutex);
316                 return -EINVAL;
317         }
318
319         skb->dev = priv->mgmt_master;
320
321         reinit_completion(&mgmt_eth_data->rw_done);
322
323         /* Increment seq_num and set it in the mdio pkt */
324         mgmt_eth_data->seq++;
325         qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
326         mgmt_eth_data->ack = false;
327
328         dev_queue_xmit(skb);
329
330         ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
331                                           msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
332
333         *val = mgmt_eth_data->data[0];
334         if (len > QCA_HDR_MGMT_DATA1_LEN)
335                 memcpy(val + 1, mgmt_eth_data->data + 1, len - QCA_HDR_MGMT_DATA1_LEN);
336
337         ack = mgmt_eth_data->ack;
338
339         mutex_unlock(&mgmt_eth_data->mutex);
340
341         if (ret <= 0)
342                 return -ETIMEDOUT;
343
344         if (!ack)
345                 return -EINVAL;
346
347         return 0;
348 }
349
350 static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
351 {
352         struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
353         struct sk_buff *skb;
354         bool ack;
355         int ret;
356
357         skb = qca8k_alloc_mdio_header(MDIO_WRITE, reg, val,
358                                       QCA8K_ETHERNET_MDIO_PRIORITY, len);
359         if (!skb)
360                 return -ENOMEM;
361
362         mutex_lock(&mgmt_eth_data->mutex);
363
364         /* Check mgmt_master if is operational */
365         if (!priv->mgmt_master) {
366                 kfree_skb(skb);
367                 mutex_unlock(&mgmt_eth_data->mutex);
368                 return -EINVAL;
369         }
370
371         skb->dev = priv->mgmt_master;
372
373         reinit_completion(&mgmt_eth_data->rw_done);
374
375         /* Increment seq_num and set it in the mdio pkt */
376         mgmt_eth_data->seq++;
377         qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
378         mgmt_eth_data->ack = false;
379
380         dev_queue_xmit(skb);
381
382         ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
383                                           msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
384
385         ack = mgmt_eth_data->ack;
386
387         mutex_unlock(&mgmt_eth_data->mutex);
388
389         if (ret <= 0)
390                 return -ETIMEDOUT;
391
392         if (!ack)
393                 return -EINVAL;
394
395         return 0;
396 }
397
398 static int
399 qca8k_regmap_update_bits_eth(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
400 {
401         u32 val = 0;
402         int ret;
403
404         ret = qca8k_read_eth(priv, reg, &val, sizeof(val));
405         if (ret)
406                 return ret;
407
408         val &= ~mask;
409         val |= write_val;
410
411         return qca8k_write_eth(priv, reg, &val, sizeof(val));
412 }
413
414 static int
415 qca8k_bulk_read(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
416 {
417         int i, count = len / sizeof(u32), ret;
418
419         if (priv->mgmt_master && !qca8k_read_eth(priv, reg, val, len))
420                 return 0;
421
422         for (i = 0; i < count; i++) {
423                 ret = regmap_read(priv->regmap, reg + (i * 4), val + i);
424                 if (ret < 0)
425                         return ret;
426         }
427
428         return 0;
429 }
430
431 static int
432 qca8k_bulk_write(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
433 {
434         int i, count = len / sizeof(u32), ret;
435         u32 tmp;
436
437         if (priv->mgmt_master && !qca8k_write_eth(priv, reg, val, len))
438                 return 0;
439
440         for (i = 0; i < count; i++) {
441                 tmp = val[i];
442
443                 ret = regmap_write(priv->regmap, reg + (i * 4), tmp);
444                 if (ret < 0)
445                         return ret;
446         }
447
448         return 0;
449 }
450
451 static int
452 qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
453 {
454         struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
455         struct mii_bus *bus = priv->bus;
456         u16 r1, r2, page;
457         int ret;
458
459         if (!qca8k_read_eth(priv, reg, val, sizeof(*val)))
460                 return 0;
461
462         qca8k_split_addr(reg, &r1, &r2, &page);
463
464         mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
465
466         ret = qca8k_set_page(priv, page);
467         if (ret < 0)
468                 goto exit;
469
470         ret = qca8k_mii_read32(bus, 0x10 | r2, r1, val);
471
472 exit:
473         mutex_unlock(&bus->mdio_lock);
474         return ret;
475 }
476
477 static int
478 qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val)
479 {
480         struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
481         struct mii_bus *bus = priv->bus;
482         u16 r1, r2, page;
483         int ret;
484
485         if (!qca8k_write_eth(priv, reg, &val, sizeof(val)))
486                 return 0;
487
488         qca8k_split_addr(reg, &r1, &r2, &page);
489
490         mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
491
492         ret = qca8k_set_page(priv, page);
493         if (ret < 0)
494                 goto exit;
495
496         qca8k_mii_write32(priv, 0x10 | r2, r1, val);
497
498 exit:
499         mutex_unlock(&bus->mdio_lock);
500         return ret;
501 }
502
503 static int
504 qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_val)
505 {
506         struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
507         struct mii_bus *bus = priv->bus;
508         u16 r1, r2, page;
509         u32 val;
510         int ret;
511
512         if (!qca8k_regmap_update_bits_eth(priv, reg, mask, write_val))
513                 return 0;
514
515         qca8k_split_addr(reg, &r1, &r2, &page);
516
517         mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
518
519         ret = qca8k_set_page(priv, page);
520         if (ret < 0)
521                 goto exit;
522
523         ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
524         if (ret < 0)
525                 goto exit;
526
527         val &= ~mask;
528         val |= write_val;
529         qca8k_mii_write32(priv, 0x10 | r2, r1, val);
530
531 exit:
532         mutex_unlock(&bus->mdio_lock);
533
534         return ret;
535 }
536
537 static const struct regmap_range qca8k_readable_ranges[] = {
538         regmap_reg_range(0x0000, 0x00e4), /* Global control */
539         regmap_reg_range(0x0100, 0x0168), /* EEE control */
540         regmap_reg_range(0x0200, 0x0270), /* Parser control */
541         regmap_reg_range(0x0400, 0x0454), /* ACL */
542         regmap_reg_range(0x0600, 0x0718), /* Lookup */
543         regmap_reg_range(0x0800, 0x0b70), /* QM */
544         regmap_reg_range(0x0c00, 0x0c80), /* PKT */
545         regmap_reg_range(0x0e00, 0x0e98), /* L3 */
546         regmap_reg_range(0x1000, 0x10ac), /* MIB - Port0 */
547         regmap_reg_range(0x1100, 0x11ac), /* MIB - Port1 */
548         regmap_reg_range(0x1200, 0x12ac), /* MIB - Port2 */
549         regmap_reg_range(0x1300, 0x13ac), /* MIB - Port3 */
550         regmap_reg_range(0x1400, 0x14ac), /* MIB - Port4 */
551         regmap_reg_range(0x1500, 0x15ac), /* MIB - Port5 */
552         regmap_reg_range(0x1600, 0x16ac), /* MIB - Port6 */
553
554 };
555
556 static const struct regmap_access_table qca8k_readable_table = {
557         .yes_ranges = qca8k_readable_ranges,
558         .n_yes_ranges = ARRAY_SIZE(qca8k_readable_ranges),
559 };
560
561 static struct regmap_config qca8k_regmap_config = {
562         .reg_bits = 16,
563         .val_bits = 32,
564         .reg_stride = 4,
565         .max_register = 0x16ac, /* end MIB - Port6 range */
566         .reg_read = qca8k_regmap_read,
567         .reg_write = qca8k_regmap_write,
568         .reg_update_bits = qca8k_regmap_update_bits,
569         .rd_table = &qca8k_readable_table,
570         .disable_locking = true, /* Locking is handled by qca8k read/write */
571         .cache_type = REGCACHE_NONE, /* Explicitly disable CACHE */
572 };
573
574 static int
575 qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
576 {
577         u32 val;
578
579         return regmap_read_poll_timeout(priv->regmap, reg, val, !(val & mask), 0,
580                                        QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC);
581 }
582
583 static int
584 qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
585 {
586         u32 reg[3];
587         int ret;
588
589         /* load the ARL table into an array */
590         ret = qca8k_bulk_read(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
591         if (ret)
592                 return ret;
593
594         /* vid - 83:72 */
595         fdb->vid = FIELD_GET(QCA8K_ATU_VID_MASK, reg[2]);
596         /* aging - 67:64 */
597         fdb->aging = FIELD_GET(QCA8K_ATU_STATUS_MASK, reg[2]);
598         /* portmask - 54:48 */
599         fdb->port_mask = FIELD_GET(QCA8K_ATU_PORT_MASK, reg[1]);
600         /* mac - 47:0 */
601         fdb->mac[0] = FIELD_GET(QCA8K_ATU_ADDR0_MASK, reg[1]);
602         fdb->mac[1] = FIELD_GET(QCA8K_ATU_ADDR1_MASK, reg[1]);
603         fdb->mac[2] = FIELD_GET(QCA8K_ATU_ADDR2_MASK, reg[0]);
604         fdb->mac[3] = FIELD_GET(QCA8K_ATU_ADDR3_MASK, reg[0]);
605         fdb->mac[4] = FIELD_GET(QCA8K_ATU_ADDR4_MASK, reg[0]);
606         fdb->mac[5] = FIELD_GET(QCA8K_ATU_ADDR5_MASK, reg[0]);
607
608         return 0;
609 }
610
611 static void
612 qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask, const u8 *mac,
613                 u8 aging)
614 {
615         u32 reg[3] = { 0 };
616
617         /* vid - 83:72 */
618         reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid);
619         /* aging - 67:64 */
620         reg[2] |= FIELD_PREP(QCA8K_ATU_STATUS_MASK, aging);
621         /* portmask - 54:48 */
622         reg[1] = FIELD_PREP(QCA8K_ATU_PORT_MASK, port_mask);
623         /* mac - 47:0 */
624         reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR0_MASK, mac[0]);
625         reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR1_MASK, mac[1]);
626         reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR2_MASK, mac[2]);
627         reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR3_MASK, mac[3]);
628         reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR4_MASK, mac[4]);
629         reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]);
630
631         /* load the array into the ARL table */
632         qca8k_bulk_write(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
633 }
634
635 static int
636 qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd, int port)
637 {
638         u32 reg;
639         int ret;
640
641         /* Set the command and FDB index */
642         reg = QCA8K_ATU_FUNC_BUSY;
643         reg |= cmd;
644         if (port >= 0) {
645                 reg |= QCA8K_ATU_FUNC_PORT_EN;
646                 reg |= FIELD_PREP(QCA8K_ATU_FUNC_PORT_MASK, port);
647         }
648
649         /* Write the function register triggering the table access */
650         ret = qca8k_write(priv, QCA8K_REG_ATU_FUNC, reg);
651         if (ret)
652                 return ret;
653
654         /* wait for completion */
655         ret = qca8k_busy_wait(priv, QCA8K_REG_ATU_FUNC, QCA8K_ATU_FUNC_BUSY);
656         if (ret)
657                 return ret;
658
659         /* Check for table full violation when adding an entry */
660         if (cmd == QCA8K_FDB_LOAD) {
661                 ret = qca8k_read(priv, QCA8K_REG_ATU_FUNC, &reg);
662                 if (ret < 0)
663                         return ret;
664                 if (reg & QCA8K_ATU_FUNC_FULL)
665                         return -1;
666         }
667
668         return 0;
669 }
670
671 static int
672 qca8k_fdb_next(struct qca8k_priv *priv, struct qca8k_fdb *fdb, int port)
673 {
674         int ret;
675
676         qca8k_fdb_write(priv, fdb->vid, fdb->port_mask, fdb->mac, fdb->aging);
677         ret = qca8k_fdb_access(priv, QCA8K_FDB_NEXT, port);
678         if (ret < 0)
679                 return ret;
680
681         return qca8k_fdb_read(priv, fdb);
682 }
683
684 static int
685 qca8k_fdb_add(struct qca8k_priv *priv, const u8 *mac, u16 port_mask,
686               u16 vid, u8 aging)
687 {
688         int ret;
689
690         mutex_lock(&priv->reg_mutex);
691         qca8k_fdb_write(priv, vid, port_mask, mac, aging);
692         ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
693         mutex_unlock(&priv->reg_mutex);
694
695         return ret;
696 }
697
698 static int
699 qca8k_fdb_del(struct qca8k_priv *priv, const u8 *mac, u16 port_mask, u16 vid)
700 {
701         int ret;
702
703         mutex_lock(&priv->reg_mutex);
704         qca8k_fdb_write(priv, vid, port_mask, mac, 0);
705         ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
706         mutex_unlock(&priv->reg_mutex);
707
708         return ret;
709 }
710
711 static void
712 qca8k_fdb_flush(struct qca8k_priv *priv)
713 {
714         mutex_lock(&priv->reg_mutex);
715         qca8k_fdb_access(priv, QCA8K_FDB_FLUSH, -1);
716         mutex_unlock(&priv->reg_mutex);
717 }
718
719 static int
720 qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask,
721                             const u8 *mac, u16 vid)
722 {
723         struct qca8k_fdb fdb = { 0 };
724         int ret;
725
726         mutex_lock(&priv->reg_mutex);
727
728         qca8k_fdb_write(priv, vid, 0, mac, 0);
729         ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
730         if (ret < 0)
731                 goto exit;
732
733         ret = qca8k_fdb_read(priv, &fdb);
734         if (ret < 0)
735                 goto exit;
736
737         /* Rule exist. Delete first */
738         if (!fdb.aging) {
739                 ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
740                 if (ret)
741                         goto exit;
742         }
743
744         /* Add port to fdb portmask */
745         fdb.port_mask |= port_mask;
746
747         qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
748         ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
749
750 exit:
751         mutex_unlock(&priv->reg_mutex);
752         return ret;
753 }
754
755 static int
756 qca8k_fdb_search_and_del(struct qca8k_priv *priv, u8 port_mask,
757                          const u8 *mac, u16 vid)
758 {
759         struct qca8k_fdb fdb = { 0 };
760         int ret;
761
762         mutex_lock(&priv->reg_mutex);
763
764         qca8k_fdb_write(priv, vid, 0, mac, 0);
765         ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
766         if (ret < 0)
767                 goto exit;
768
769         /* Rule doesn't exist. Why delete? */
770         if (!fdb.aging) {
771                 ret = -EINVAL;
772                 goto exit;
773         }
774
775         ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
776         if (ret)
777                 goto exit;
778
779         /* Only port in the rule is this port. Don't re insert */
780         if (fdb.port_mask == port_mask)
781                 goto exit;
782
783         /* Remove port from port mask */
784         fdb.port_mask &= ~port_mask;
785
786         qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
787         ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
788
789 exit:
790         mutex_unlock(&priv->reg_mutex);
791         return ret;
792 }
793
794 static int
795 qca8k_vlan_access(struct qca8k_priv *priv, enum qca8k_vlan_cmd cmd, u16 vid)
796 {
797         u32 reg;
798         int ret;
799
800         /* Set the command and VLAN index */
801         reg = QCA8K_VTU_FUNC1_BUSY;
802         reg |= cmd;
803         reg |= FIELD_PREP(QCA8K_VTU_FUNC1_VID_MASK, vid);
804
805         /* Write the function register triggering the table access */
806         ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg);
807         if (ret)
808                 return ret;
809
810         /* wait for completion */
811         ret = qca8k_busy_wait(priv, QCA8K_REG_VTU_FUNC1, QCA8K_VTU_FUNC1_BUSY);
812         if (ret)
813                 return ret;
814
815         /* Check for table full violation when adding an entry */
816         if (cmd == QCA8K_VLAN_LOAD) {
817                 ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC1, &reg);
818                 if (ret < 0)
819                         return ret;
820                 if (reg & QCA8K_VTU_FUNC1_FULL)
821                         return -ENOMEM;
822         }
823
824         return 0;
825 }
826
827 static int
828 qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid, bool untagged)
829 {
830         u32 reg;
831         int ret;
832
833         /*
834            We do the right thing with VLAN 0 and treat it as untagged while
835            preserving the tag on egress.
836          */
837         if (vid == 0)
838                 return 0;
839
840         mutex_lock(&priv->reg_mutex);
841         ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
842         if (ret < 0)
843                 goto out;
844
845         ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg);
846         if (ret < 0)
847                 goto out;
848         reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN;
849         reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
850         if (untagged)
851                 reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(port);
852         else
853                 reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(port);
854
855         ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
856         if (ret)
857                 goto out;
858         ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
859
860 out:
861         mutex_unlock(&priv->reg_mutex);
862
863         return ret;
864 }
865
866 static int
867 qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid)
868 {
869         u32 reg, mask;
870         int ret, i;
871         bool del;
872
873         mutex_lock(&priv->reg_mutex);
874         ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
875         if (ret < 0)
876                 goto out;
877
878         ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg);
879         if (ret < 0)
880                 goto out;
881         reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
882         reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(port);
883
884         /* Check if we're the last member to be removed */
885         del = true;
886         for (i = 0; i < QCA8K_NUM_PORTS; i++) {
887                 mask = QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(i);
888
889                 if ((reg & mask) != mask) {
890                         del = false;
891                         break;
892                 }
893         }
894
895         if (del) {
896                 ret = qca8k_vlan_access(priv, QCA8K_VLAN_PURGE, vid);
897         } else {
898                 ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
899                 if (ret)
900                         goto out;
901                 ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
902         }
903
904 out:
905         mutex_unlock(&priv->reg_mutex);
906
907         return ret;
908 }
909
910 static int
911 qca8k_mib_init(struct qca8k_priv *priv)
912 {
913         int ret;
914
915         mutex_lock(&priv->reg_mutex);
916         ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
917                                  QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
918                                  FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_FLUSH) |
919                                  QCA8K_MIB_BUSY);
920         if (ret)
921                 goto exit;
922
923         ret = qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY);
924         if (ret)
925                 goto exit;
926
927         ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP);
928         if (ret)
929                 goto exit;
930
931         ret = qca8k_write(priv, QCA8K_REG_MODULE_EN, QCA8K_MODULE_EN_MIB);
932
933 exit:
934         mutex_unlock(&priv->reg_mutex);
935         return ret;
936 }
937
938 static void
939 qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
940 {
941         u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
942
943         /* Port 0 and 6 have no internal PHY */
944         if (port > 0 && port < 6)
945                 mask |= QCA8K_PORT_STATUS_LINK_AUTO;
946
947         if (enable)
948                 regmap_set_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
949         else
950                 regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
951 }
952
953 static int
954 qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data,
955                         struct sk_buff *read_skb, u32 *val)
956 {
957         struct sk_buff *skb = skb_copy(read_skb, GFP_KERNEL);
958         bool ack;
959         int ret;
960
961         reinit_completion(&mgmt_eth_data->rw_done);
962
963         /* Increment seq_num and set it in the copy pkt */
964         mgmt_eth_data->seq++;
965         qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
966         mgmt_eth_data->ack = false;
967
968         dev_queue_xmit(skb);
969
970         ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
971                                           QCA8K_ETHERNET_TIMEOUT);
972
973         ack = mgmt_eth_data->ack;
974
975         if (ret <= 0)
976                 return -ETIMEDOUT;
977
978         if (!ack)
979                 return -EINVAL;
980
981         *val = mgmt_eth_data->data[0];
982
983         return 0;
984 }
985
986 static int
987 qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
988                       int regnum, u16 data)
989 {
990         struct sk_buff *write_skb, *clear_skb, *read_skb;
991         struct qca8k_mgmt_eth_data *mgmt_eth_data;
992         u32 write_val, clear_val = 0, val;
993         struct net_device *mgmt_master;
994         int ret, ret1;
995         bool ack;
996
997         if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
998                 return -EINVAL;
999
1000         mgmt_eth_data = &priv->mgmt_eth_data;
1001
1002         write_val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
1003                     QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
1004                     QCA8K_MDIO_MASTER_REG_ADDR(regnum);
1005
1006         if (read) {
1007                 write_val |= QCA8K_MDIO_MASTER_READ;
1008         } else {
1009                 write_val |= QCA8K_MDIO_MASTER_WRITE;
1010                 write_val |= QCA8K_MDIO_MASTER_DATA(data);
1011         }
1012
1013         /* Prealloc all the needed skb before the lock */
1014         write_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &write_val,
1015                                             QCA8K_ETHERNET_PHY_PRIORITY, sizeof(write_val));
1016         if (!write_skb)
1017                 return -ENOMEM;
1018
1019         clear_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &clear_val,
1020                                             QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
1021         if (!clear_skb) {
1022                 ret = -ENOMEM;
1023                 goto err_clear_skb;
1024         }
1025
1026         read_skb = qca8k_alloc_mdio_header(MDIO_READ, QCA8K_MDIO_MASTER_CTRL, &clear_val,
1027                                            QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
1028         if (!read_skb) {
1029                 ret = -ENOMEM;
1030                 goto err_read_skb;
1031         }
1032
1033         /* Actually start the request:
1034          * 1. Send mdio master packet
1035          * 2. Busy Wait for mdio master command
1036          * 3. Get the data if we are reading
1037          * 4. Reset the mdio master (even with error)
1038          */
1039         mutex_lock(&mgmt_eth_data->mutex);
1040
1041         /* Check if mgmt_master is operational */
1042         mgmt_master = priv->mgmt_master;
1043         if (!mgmt_master) {
1044                 mutex_unlock(&mgmt_eth_data->mutex);
1045                 ret = -EINVAL;
1046                 goto err_mgmt_master;
1047         }
1048
1049         read_skb->dev = mgmt_master;
1050         clear_skb->dev = mgmt_master;
1051         write_skb->dev = mgmt_master;
1052
1053         reinit_completion(&mgmt_eth_data->rw_done);
1054
1055         /* Increment seq_num and set it in the write pkt */
1056         mgmt_eth_data->seq++;
1057         qca8k_mdio_header_fill_seq_num(write_skb, mgmt_eth_data->seq);
1058         mgmt_eth_data->ack = false;
1059
1060         dev_queue_xmit(write_skb);
1061
1062         ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
1063                                           QCA8K_ETHERNET_TIMEOUT);
1064
1065         ack = mgmt_eth_data->ack;
1066
1067         if (ret <= 0) {
1068                 ret = -ETIMEDOUT;
1069                 kfree_skb(read_skb);
1070                 goto exit;
1071         }
1072
1073         if (!ack) {
1074                 ret = -EINVAL;
1075                 kfree_skb(read_skb);
1076                 goto exit;
1077         }
1078
1079         ret = read_poll_timeout(qca8k_phy_eth_busy_wait, ret1,
1080                                 !(val & QCA8K_MDIO_MASTER_BUSY), 0,
1081                                 QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
1082                                 mgmt_eth_data, read_skb, &val);
1083
1084         if (ret < 0 && ret1 < 0) {
1085                 ret = ret1;
1086                 goto exit;
1087         }
1088
1089         if (read) {
1090                 reinit_completion(&mgmt_eth_data->rw_done);
1091
1092                 /* Increment seq_num and set it in the read pkt */
1093                 mgmt_eth_data->seq++;
1094                 qca8k_mdio_header_fill_seq_num(read_skb, mgmt_eth_data->seq);
1095                 mgmt_eth_data->ack = false;
1096
1097                 dev_queue_xmit(read_skb);
1098
1099                 ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
1100                                                   QCA8K_ETHERNET_TIMEOUT);
1101
1102                 ack = mgmt_eth_data->ack;
1103
1104                 if (ret <= 0) {
1105                         ret = -ETIMEDOUT;
1106                         goto exit;
1107                 }
1108
1109                 if (!ack) {
1110                         ret = -EINVAL;
1111                         goto exit;
1112                 }
1113
1114                 ret = mgmt_eth_data->data[0] & QCA8K_MDIO_MASTER_DATA_MASK;
1115         } else {
1116                 kfree_skb(read_skb);
1117         }
1118 exit:
1119         reinit_completion(&mgmt_eth_data->rw_done);
1120
1121         /* Increment seq_num and set it in the clear pkt */
1122         mgmt_eth_data->seq++;
1123         qca8k_mdio_header_fill_seq_num(clear_skb, mgmt_eth_data->seq);
1124         mgmt_eth_data->ack = false;
1125
1126         dev_queue_xmit(clear_skb);
1127
1128         wait_for_completion_timeout(&mgmt_eth_data->rw_done,
1129                                     QCA8K_ETHERNET_TIMEOUT);
1130
1131         mutex_unlock(&mgmt_eth_data->mutex);
1132
1133         return ret;
1134
1135         /* Error handling before lock */
1136 err_mgmt_master:
1137         kfree_skb(read_skb);
1138 err_read_skb:
1139         kfree_skb(clear_skb);
1140 err_clear_skb:
1141         kfree_skb(write_skb);
1142
1143         return ret;
1144 }
1145
1146 static u32
1147 qca8k_port_to_phy(int port)
1148 {
1149         /* From Andrew Lunn:
1150          * Port 0 has no internal phy.
1151          * Port 1 has an internal PHY at MDIO address 0.
1152          * Port 2 has an internal PHY at MDIO address 1.
1153          * ...
1154          * Port 5 has an internal PHY at MDIO address 4.
1155          * Port 6 has no internal PHY.
1156          */
1157
1158         return port - 1;
1159 }
1160
1161 static int
1162 qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask)
1163 {
1164         u16 r1, r2, page;
1165         u32 val;
1166         int ret, ret1;
1167
1168         qca8k_split_addr(reg, &r1, &r2, &page);
1169
1170         ret = read_poll_timeout(qca8k_mii_read32, ret1, !(val & mask), 0,
1171                                 QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
1172                                 bus, 0x10 | r2, r1, &val);
1173
1174         /* Check if qca8k_read has failed for a different reason
1175          * before returnting -ETIMEDOUT
1176          */
1177         if (ret < 0 && ret1 < 0)
1178                 return ret1;
1179
1180         return ret;
1181 }
1182
1183 static int
1184 qca8k_mdio_write(struct qca8k_priv *priv, int phy, int regnum, u16 data)
1185 {
1186         struct mii_bus *bus = priv->bus;
1187         u16 r1, r2, page;
1188         u32 val;
1189         int ret;
1190
1191         if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
1192                 return -EINVAL;
1193
1194         val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
1195               QCA8K_MDIO_MASTER_WRITE | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
1196               QCA8K_MDIO_MASTER_REG_ADDR(regnum) |
1197               QCA8K_MDIO_MASTER_DATA(data);
1198
1199         qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
1200
1201         mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
1202
1203         ret = qca8k_set_page(priv, page);
1204         if (ret)
1205                 goto exit;
1206
1207         qca8k_mii_write32(priv, 0x10 | r2, r1, val);
1208
1209         ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
1210                                    QCA8K_MDIO_MASTER_BUSY);
1211
1212 exit:
1213         /* even if the busy_wait timeouts try to clear the MASTER_EN */
1214         qca8k_mii_write32(priv, 0x10 | r2, r1, 0);
1215
1216         mutex_unlock(&bus->mdio_lock);
1217
1218         return ret;
1219 }
1220
1221 static int
1222 qca8k_mdio_read(struct qca8k_priv *priv, int phy, int regnum)
1223 {
1224         struct mii_bus *bus = priv->bus;
1225         u16 r1, r2, page;
1226         u32 val;
1227         int ret;
1228
1229         if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
1230                 return -EINVAL;
1231
1232         val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
1233               QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
1234               QCA8K_MDIO_MASTER_REG_ADDR(regnum);
1235
1236         qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
1237
1238         mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
1239
1240         ret = qca8k_set_page(priv, page);
1241         if (ret)
1242                 goto exit;
1243
1244         qca8k_mii_write32(priv, 0x10 | r2, r1, val);
1245
1246         ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
1247                                    QCA8K_MDIO_MASTER_BUSY);
1248         if (ret)
1249                 goto exit;
1250
1251         ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
1252
1253 exit:
1254         /* even if the busy_wait timeouts try to clear the MASTER_EN */
1255         qca8k_mii_write32(priv, 0x10 | r2, r1, 0);
1256
1257         mutex_unlock(&bus->mdio_lock);
1258
1259         if (ret >= 0)
1260                 ret = val & QCA8K_MDIO_MASTER_DATA_MASK;
1261
1262         return ret;
1263 }
1264
1265 static int
1266 qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data)
1267 {
1268         struct qca8k_priv *priv = slave_bus->priv;
1269         int ret;
1270
1271         /* Use mdio Ethernet when available, fallback to legacy one on error */
1272         ret = qca8k_phy_eth_command(priv, false, phy, regnum, data);
1273         if (!ret)
1274                 return 0;
1275
1276         return qca8k_mdio_write(priv, phy, regnum, data);
1277 }
1278
1279 static int
1280 qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum)
1281 {
1282         struct qca8k_priv *priv = slave_bus->priv;
1283         int ret;
1284
1285         /* Use mdio Ethernet when available, fallback to legacy one on error */
1286         ret = qca8k_phy_eth_command(priv, true, phy, regnum, 0);
1287         if (ret >= 0)
1288                 return ret;
1289
1290         return qca8k_mdio_read(priv, phy, regnum);
1291 }
1292
1293 static int
1294 qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data)
1295 {
1296         struct qca8k_priv *priv = ds->priv;
1297         int ret;
1298
1299         /* Check if the legacy mapping should be used and the
1300          * port is not correctly mapped to the right PHY in the
1301          * devicetree
1302          */
1303         if (priv->legacy_phy_port_mapping)
1304                 port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
1305
1306         /* Use mdio Ethernet when available, fallback to legacy one on error */
1307         ret = qca8k_phy_eth_command(priv, false, port, regnum, 0);
1308         if (!ret)
1309                 return ret;
1310
1311         return qca8k_mdio_write(priv, port, regnum, data);
1312 }
1313
1314 static int
1315 qca8k_phy_read(struct dsa_switch *ds, int port, int regnum)
1316 {
1317         struct qca8k_priv *priv = ds->priv;
1318         int ret;
1319
1320         /* Check if the legacy mapping should be used and the
1321          * port is not correctly mapped to the right PHY in the
1322          * devicetree
1323          */
1324         if (priv->legacy_phy_port_mapping)
1325                 port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
1326
1327         /* Use mdio Ethernet when available, fallback to legacy one on error */
1328         ret = qca8k_phy_eth_command(priv, true, port, regnum, 0);
1329         if (ret >= 0)
1330                 return ret;
1331
1332         ret = qca8k_mdio_read(priv, port, regnum);
1333
1334         if (ret < 0)
1335                 return 0xffff;
1336
1337         return ret;
1338 }
1339
1340 static int
1341 qca8k_mdio_register(struct qca8k_priv *priv, struct device_node *mdio)
1342 {
1343         struct dsa_switch *ds = priv->ds;
1344         struct mii_bus *bus;
1345
1346         bus = devm_mdiobus_alloc(ds->dev);
1347
1348         if (!bus)
1349                 return -ENOMEM;
1350
1351         bus->priv = (void *)priv;
1352         bus->name = "qca8k slave mii";
1353         bus->read = qca8k_internal_mdio_read;
1354         bus->write = qca8k_internal_mdio_write;
1355         snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d",
1356                  ds->index);
1357
1358         bus->parent = ds->dev;
1359         bus->phy_mask = ~ds->phys_mii_mask;
1360
1361         ds->slave_mii_bus = bus;
1362
1363         return devm_of_mdiobus_register(priv->dev, bus, mdio);
1364 }
1365
1366 static int
1367 qca8k_setup_mdio_bus(struct qca8k_priv *priv)
1368 {
1369         u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg;
1370         struct device_node *ports, *port, *mdio;
1371         phy_interface_t mode;
1372         int err;
1373
1374         ports = of_get_child_by_name(priv->dev->of_node, "ports");
1375         if (!ports)
1376                 ports = of_get_child_by_name(priv->dev->of_node, "ethernet-ports");
1377
1378         if (!ports)
1379                 return -EINVAL;
1380
1381         for_each_available_child_of_node(ports, port) {
1382                 err = of_property_read_u32(port, "reg", &reg);
1383                 if (err) {
1384                         of_node_put(port);
1385                         of_node_put(ports);
1386                         return err;
1387                 }
1388
1389                 if (!dsa_is_user_port(priv->ds, reg))
1390                         continue;
1391
1392                 of_get_phy_mode(port, &mode);
1393
1394                 if (of_property_read_bool(port, "phy-handle") &&
1395                     mode != PHY_INTERFACE_MODE_INTERNAL)
1396                         external_mdio_mask |= BIT(reg);
1397                 else
1398                         internal_mdio_mask |= BIT(reg);
1399         }
1400
1401         of_node_put(ports);
1402         if (!external_mdio_mask && !internal_mdio_mask) {
1403                 dev_err(priv->dev, "no PHYs are defined.\n");
1404                 return -EINVAL;
1405         }
1406
1407         /* The QCA8K_MDIO_MASTER_EN Bit, which grants access to PHYs through
1408          * the MDIO_MASTER register also _disconnects_ the external MDC
1409          * passthrough to the internal PHYs. It's not possible to use both
1410          * configurations at the same time!
1411          *
1412          * Because this came up during the review process:
1413          * If the external mdio-bus driver is capable magically disabling
1414          * the QCA8K_MDIO_MASTER_EN and mutex/spin-locking out the qca8k's
1415          * accessors for the time being, it would be possible to pull this
1416          * off.
1417          */
1418         if (!!external_mdio_mask && !!internal_mdio_mask) {
1419                 dev_err(priv->dev, "either internal or external mdio bus configuration is supported.\n");
1420                 return -EINVAL;
1421         }
1422
1423         if (external_mdio_mask) {
1424                 /* Make sure to disable the internal mdio bus in cases
1425                  * a dt-overlay and driver reload changed the configuration
1426                  */
1427
1428                 return regmap_clear_bits(priv->regmap, QCA8K_MDIO_MASTER_CTRL,
1429                                          QCA8K_MDIO_MASTER_EN);
1430         }
1431
1432         /* Check if the devicetree declare the port:phy mapping */
1433         mdio = of_get_child_by_name(priv->dev->of_node, "mdio");
1434         if (of_device_is_available(mdio)) {
1435                 err = qca8k_mdio_register(priv, mdio);
1436                 if (err)
1437                         of_node_put(mdio);
1438
1439                 return err;
1440         }
1441
1442         /* If a mapping can't be found the legacy mapping is used,
1443          * using the qca8k_port_to_phy function
1444          */
1445         priv->legacy_phy_port_mapping = true;
1446         priv->ops.phy_read = qca8k_phy_read;
1447         priv->ops.phy_write = qca8k_phy_write;
1448
1449         return 0;
1450 }
1451
1452 static int
1453 qca8k_setup_mac_pwr_sel(struct qca8k_priv *priv)
1454 {
1455         u32 mask = 0;
1456         int ret = 0;
1457
1458         /* SoC specific settings for ipq8064.
1459          * If more device require this consider adding
1460          * a dedicated binding.
1461          */
1462         if (of_machine_is_compatible("qcom,ipq8064"))
1463                 mask |= QCA8K_MAC_PWR_RGMII0_1_8V;
1464
1465         /* SoC specific settings for ipq8065 */
1466         if (of_machine_is_compatible("qcom,ipq8065"))
1467                 mask |= QCA8K_MAC_PWR_RGMII1_1_8V;
1468
1469         if (mask) {
1470                 ret = qca8k_rmw(priv, QCA8K_REG_MAC_PWR_SEL,
1471                                 QCA8K_MAC_PWR_RGMII0_1_8V |
1472                                 QCA8K_MAC_PWR_RGMII1_1_8V,
1473                                 mask);
1474         }
1475
1476         return ret;
1477 }
1478
1479 static int qca8k_find_cpu_port(struct dsa_switch *ds)
1480 {
1481         struct qca8k_priv *priv = ds->priv;
1482
1483         /* Find the connected cpu port. Valid port are 0 or 6 */
1484         if (dsa_is_cpu_port(ds, 0))
1485                 return 0;
1486
1487         dev_dbg(priv->dev, "port 0 is not the CPU port. Checking port 6");
1488
1489         if (dsa_is_cpu_port(ds, 6))
1490                 return 6;
1491
1492         return -EINVAL;
1493 }
1494
1495 static int
1496 qca8k_setup_of_pws_reg(struct qca8k_priv *priv)
1497 {
1498         struct device_node *node = priv->dev->of_node;
1499         const struct qca8k_match_data *data;
1500         u32 val = 0;
1501         int ret;
1502
1503         /* QCA8327 require to set to the correct mode.
1504          * His bigger brother QCA8328 have the 172 pin layout.
1505          * Should be applied by default but we set this just to make sure.
1506          */
1507         if (priv->switch_id == QCA8K_ID_QCA8327) {
1508                 data = of_device_get_match_data(priv->dev);
1509
1510                 /* Set the correct package of 148 pin for QCA8327 */
1511                 if (data->reduced_package)
1512                         val |= QCA8327_PWS_PACKAGE148_EN;
1513
1514                 ret = qca8k_rmw(priv, QCA8K_REG_PWS, QCA8327_PWS_PACKAGE148_EN,
1515                                 val);
1516                 if (ret)
1517                         return ret;
1518         }
1519
1520         if (of_property_read_bool(node, "qca,ignore-power-on-sel"))
1521                 val |= QCA8K_PWS_POWER_ON_SEL;
1522
1523         if (of_property_read_bool(node, "qca,led-open-drain")) {
1524                 if (!(val & QCA8K_PWS_POWER_ON_SEL)) {
1525                         dev_err(priv->dev, "qca,led-open-drain require qca,ignore-power-on-sel to be set.");
1526                         return -EINVAL;
1527                 }
1528
1529                 val |= QCA8K_PWS_LED_OPEN_EN_CSR;
1530         }
1531
1532         return qca8k_rmw(priv, QCA8K_REG_PWS,
1533                         QCA8K_PWS_LED_OPEN_EN_CSR | QCA8K_PWS_POWER_ON_SEL,
1534                         val);
1535 }
1536
1537 static int
1538 qca8k_parse_port_config(struct qca8k_priv *priv)
1539 {
1540         int port, cpu_port_index = -1, ret;
1541         struct device_node *port_dn;
1542         phy_interface_t mode;
1543         struct dsa_port *dp;
1544         u32 delay;
1545
1546         /* We have 2 CPU port. Check them */
1547         for (port = 0; port < QCA8K_NUM_PORTS; port++) {
1548                 /* Skip every other port */
1549                 if (port != 0 && port != 6)
1550                         continue;
1551
1552                 dp = dsa_to_port(priv->ds, port);
1553                 port_dn = dp->dn;
1554                 cpu_port_index++;
1555
1556                 if (!of_device_is_available(port_dn))
1557                         continue;
1558
1559                 ret = of_get_phy_mode(port_dn, &mode);
1560                 if (ret)
1561                         continue;
1562
1563                 switch (mode) {
1564                 case PHY_INTERFACE_MODE_RGMII:
1565                 case PHY_INTERFACE_MODE_RGMII_ID:
1566                 case PHY_INTERFACE_MODE_RGMII_TXID:
1567                 case PHY_INTERFACE_MODE_RGMII_RXID:
1568                 case PHY_INTERFACE_MODE_SGMII:
1569                         delay = 0;
1570
1571                         if (!of_property_read_u32(port_dn, "tx-internal-delay-ps", &delay))
1572                                 /* Switch regs accept value in ns, convert ps to ns */
1573                                 delay = delay / 1000;
1574                         else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
1575                                  mode == PHY_INTERFACE_MODE_RGMII_TXID)
1576                                 delay = 1;
1577
1578                         if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, delay)) {
1579                                 dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value");
1580                                 delay = 3;
1581                         }
1582
1583                         priv->ports_config.rgmii_tx_delay[cpu_port_index] = delay;
1584
1585                         delay = 0;
1586
1587                         if (!of_property_read_u32(port_dn, "rx-internal-delay-ps", &delay))
1588                                 /* Switch regs accept value in ns, convert ps to ns */
1589                                 delay = delay / 1000;
1590                         else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
1591                                  mode == PHY_INTERFACE_MODE_RGMII_RXID)
1592                                 delay = 2;
1593
1594                         if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, delay)) {
1595                                 dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value");
1596                                 delay = 3;
1597                         }
1598
1599                         priv->ports_config.rgmii_rx_delay[cpu_port_index] = delay;
1600
1601                         /* Skip sgmii parsing for rgmii* mode */
1602                         if (mode == PHY_INTERFACE_MODE_RGMII ||
1603                             mode == PHY_INTERFACE_MODE_RGMII_ID ||
1604                             mode == PHY_INTERFACE_MODE_RGMII_TXID ||
1605                             mode == PHY_INTERFACE_MODE_RGMII_RXID)
1606                                 break;
1607
1608                         if (of_property_read_bool(port_dn, "qca,sgmii-txclk-falling-edge"))
1609                                 priv->ports_config.sgmii_tx_clk_falling_edge = true;
1610
1611                         if (of_property_read_bool(port_dn, "qca,sgmii-rxclk-falling-edge"))
1612                                 priv->ports_config.sgmii_rx_clk_falling_edge = true;
1613
1614                         if (of_property_read_bool(port_dn, "qca,sgmii-enable-pll")) {
1615                                 priv->ports_config.sgmii_enable_pll = true;
1616
1617                                 if (priv->switch_id == QCA8K_ID_QCA8327) {
1618                                         dev_err(priv->dev, "SGMII PLL should NOT be enabled for qca8327. Aborting enabling");
1619                                         priv->ports_config.sgmii_enable_pll = false;
1620                                 }
1621
1622                                 if (priv->switch_revision < 2)
1623                                         dev_warn(priv->dev, "SGMII PLL should NOT be enabled for qca8337 with revision 2 or more.");
1624                         }
1625
1626                         break;
1627                 default:
1628                         continue;
1629                 }
1630         }
1631
1632         return 0;
1633 }
1634
1635 static void
1636 qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_index,
1637                                       u32 reg)
1638 {
1639         u32 delay, val = 0;
1640         int ret;
1641
1642         /* Delay can be declared in 3 different way.
1643          * Mode to rgmii and internal-delay standard binding defined
1644          * rgmii-id or rgmii-tx/rx phy mode set.
1645          * The parse logic set a delay different than 0 only when one
1646          * of the 3 different way is used. In all other case delay is
1647          * not enabled. With ID or TX/RXID delay is enabled and set
1648          * to the default and recommended value.
1649          */
1650         if (priv->ports_config.rgmii_tx_delay[cpu_port_index]) {
1651                 delay = priv->ports_config.rgmii_tx_delay[cpu_port_index];
1652
1653                 val |= QCA8K_PORT_PAD_RGMII_TX_DELAY(delay) |
1654                         QCA8K_PORT_PAD_RGMII_TX_DELAY_EN;
1655         }
1656
1657         if (priv->ports_config.rgmii_rx_delay[cpu_port_index]) {
1658                 delay = priv->ports_config.rgmii_rx_delay[cpu_port_index];
1659
1660                 val |= QCA8K_PORT_PAD_RGMII_RX_DELAY(delay) |
1661                         QCA8K_PORT_PAD_RGMII_RX_DELAY_EN;
1662         }
1663
1664         /* Set RGMII delay based on the selected values */
1665         ret = qca8k_rmw(priv, reg,
1666                         QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK |
1667                         QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK |
1668                         QCA8K_PORT_PAD_RGMII_TX_DELAY_EN |
1669                         QCA8K_PORT_PAD_RGMII_RX_DELAY_EN,
1670                         val);
1671         if (ret)
1672                 dev_err(priv->dev, "Failed to set internal delay for CPU port%d",
1673                         cpu_port_index == QCA8K_CPU_PORT0 ? 0 : 6);
1674 }
1675
1676 static struct phylink_pcs *
1677 qca8k_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
1678                              phy_interface_t interface)
1679 {
1680         struct qca8k_priv *priv = ds->priv;
1681         struct phylink_pcs *pcs = NULL;
1682
1683         switch (interface) {
1684         case PHY_INTERFACE_MODE_SGMII:
1685         case PHY_INTERFACE_MODE_1000BASEX:
1686                 switch (port) {
1687                 case 0:
1688                         pcs = &priv->pcs_port_0.pcs;
1689                         break;
1690
1691                 case 6:
1692                         pcs = &priv->pcs_port_6.pcs;
1693                         break;
1694                 }
1695                 break;
1696
1697         default:
1698                 break;
1699         }
1700
1701         return pcs;
1702 }
1703
1704 static void
1705 qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
1706                          const struct phylink_link_state *state)
1707 {
1708         struct qca8k_priv *priv = ds->priv;
1709         int cpu_port_index;
1710         u32 reg;
1711
1712         switch (port) {
1713         case 0: /* 1st CPU port */
1714                 if (state->interface != PHY_INTERFACE_MODE_RGMII &&
1715                     state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
1716                     state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
1717                     state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
1718                     state->interface != PHY_INTERFACE_MODE_SGMII)
1719                         return;
1720
1721                 reg = QCA8K_REG_PORT0_PAD_CTRL;
1722                 cpu_port_index = QCA8K_CPU_PORT0;
1723                 break;
1724         case 1:
1725         case 2:
1726         case 3:
1727         case 4:
1728         case 5:
1729                 /* Internal PHY, nothing to do */
1730                 return;
1731         case 6: /* 2nd CPU port / external PHY */
1732                 if (state->interface != PHY_INTERFACE_MODE_RGMII &&
1733                     state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
1734                     state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
1735                     state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
1736                     state->interface != PHY_INTERFACE_MODE_SGMII &&
1737                     state->interface != PHY_INTERFACE_MODE_1000BASEX)
1738                         return;
1739
1740                 reg = QCA8K_REG_PORT6_PAD_CTRL;
1741                 cpu_port_index = QCA8K_CPU_PORT6;
1742                 break;
1743         default:
1744                 dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port);
1745                 return;
1746         }
1747
1748         if (port != 6 && phylink_autoneg_inband(mode)) {
1749                 dev_err(ds->dev, "%s: in-band negotiation unsupported\n",
1750                         __func__);
1751                 return;
1752         }
1753
1754         switch (state->interface) {
1755         case PHY_INTERFACE_MODE_RGMII:
1756         case PHY_INTERFACE_MODE_RGMII_ID:
1757         case PHY_INTERFACE_MODE_RGMII_TXID:
1758         case PHY_INTERFACE_MODE_RGMII_RXID:
1759                 qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN);
1760
1761                 /* Configure rgmii delay */
1762                 qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
1763
1764                 /* QCA8337 requires to set rgmii rx delay for all ports.
1765                  * This is enabled through PORT5_PAD_CTRL for all ports,
1766                  * rather than individual port registers.
1767                  */
1768                 if (priv->switch_id == QCA8K_ID_QCA8337)
1769                         qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL,
1770                                     QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
1771                 break;
1772         case PHY_INTERFACE_MODE_SGMII:
1773         case PHY_INTERFACE_MODE_1000BASEX:
1774                 /* Enable SGMII on the port */
1775                 qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN);
1776                 break;
1777         default:
1778                 dev_err(ds->dev, "xMII mode %s not supported for port %d\n",
1779                         phy_modes(state->interface), port);
1780                 return;
1781         }
1782 }
1783
1784 static void qca8k_phylink_get_caps(struct dsa_switch *ds, int port,
1785                                    struct phylink_config *config)
1786 {
1787         switch (port) {
1788         case 0: /* 1st CPU port */
1789                 phy_interface_set_rgmii(config->supported_interfaces);
1790                 __set_bit(PHY_INTERFACE_MODE_SGMII,
1791                           config->supported_interfaces);
1792                 break;
1793
1794         case 1:
1795         case 2:
1796         case 3:
1797         case 4:
1798         case 5:
1799                 /* Internal PHY */
1800                 __set_bit(PHY_INTERFACE_MODE_GMII,
1801                           config->supported_interfaces);
1802                 __set_bit(PHY_INTERFACE_MODE_INTERNAL,
1803                           config->supported_interfaces);
1804                 break;
1805
1806         case 6: /* 2nd CPU port / external PHY */
1807                 phy_interface_set_rgmii(config->supported_interfaces);
1808                 __set_bit(PHY_INTERFACE_MODE_SGMII,
1809                           config->supported_interfaces);
1810                 __set_bit(PHY_INTERFACE_MODE_1000BASEX,
1811                           config->supported_interfaces);
1812                 break;
1813         }
1814
1815         config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1816                 MAC_10 | MAC_100 | MAC_1000FD;
1817
1818         config->legacy_pre_march2020 = false;
1819 }
1820
1821 static void
1822 qca8k_phylink_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
1823                             phy_interface_t interface)
1824 {
1825         struct qca8k_priv *priv = ds->priv;
1826
1827         qca8k_port_set_status(priv, port, 0);
1828 }
1829
1830 static void
1831 qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
1832                           phy_interface_t interface, struct phy_device *phydev,
1833                           int speed, int duplex, bool tx_pause, bool rx_pause)
1834 {
1835         struct qca8k_priv *priv = ds->priv;
1836         u32 reg;
1837
1838         if (phylink_autoneg_inband(mode)) {
1839                 reg = QCA8K_PORT_STATUS_LINK_AUTO;
1840         } else {
1841                 switch (speed) {
1842                 case SPEED_10:
1843                         reg = QCA8K_PORT_STATUS_SPEED_10;
1844                         break;
1845                 case SPEED_100:
1846                         reg = QCA8K_PORT_STATUS_SPEED_100;
1847                         break;
1848                 case SPEED_1000:
1849                         reg = QCA8K_PORT_STATUS_SPEED_1000;
1850                         break;
1851                 default:
1852                         reg = QCA8K_PORT_STATUS_LINK_AUTO;
1853                         break;
1854                 }
1855
1856                 if (duplex == DUPLEX_FULL)
1857                         reg |= QCA8K_PORT_STATUS_DUPLEX;
1858
1859                 if (rx_pause || dsa_is_cpu_port(ds, port))
1860                         reg |= QCA8K_PORT_STATUS_RXFLOW;
1861
1862                 if (tx_pause || dsa_is_cpu_port(ds, port))
1863                         reg |= QCA8K_PORT_STATUS_TXFLOW;
1864         }
1865
1866         reg |= QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
1867
1868         qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg);
1869 }
1870
1871 static struct qca8k_pcs *pcs_to_qca8k_pcs(struct phylink_pcs *pcs)
1872 {
1873         return container_of(pcs, struct qca8k_pcs, pcs);
1874 }
1875
1876 static void qca8k_pcs_get_state(struct phylink_pcs *pcs,
1877                                 struct phylink_link_state *state)
1878 {
1879         struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
1880         int port = pcs_to_qca8k_pcs(pcs)->port;
1881         u32 reg;
1882         int ret;
1883
1884         ret = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port), &reg);
1885         if (ret < 0) {
1886                 state->link = false;
1887                 return;
1888         }
1889
1890         state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP);
1891         state->an_complete = state->link;
1892         state->an_enabled = !!(reg & QCA8K_PORT_STATUS_LINK_AUTO);
1893         state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL :
1894                                                            DUPLEX_HALF;
1895
1896         switch (reg & QCA8K_PORT_STATUS_SPEED) {
1897         case QCA8K_PORT_STATUS_SPEED_10:
1898                 state->speed = SPEED_10;
1899                 break;
1900         case QCA8K_PORT_STATUS_SPEED_100:
1901                 state->speed = SPEED_100;
1902                 break;
1903         case QCA8K_PORT_STATUS_SPEED_1000:
1904                 state->speed = SPEED_1000;
1905                 break;
1906         default:
1907                 state->speed = SPEED_UNKNOWN;
1908                 break;
1909         }
1910
1911         if (reg & QCA8K_PORT_STATUS_RXFLOW)
1912                 state->pause |= MLO_PAUSE_RX;
1913         if (reg & QCA8K_PORT_STATUS_TXFLOW)
1914                 state->pause |= MLO_PAUSE_TX;
1915 }
1916
1917 static int qca8k_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
1918                             phy_interface_t interface,
1919                             const unsigned long *advertising,
1920                             bool permit_pause_to_mac)
1921 {
1922         struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
1923         int cpu_port_index, ret, port;
1924         u32 reg, val;
1925
1926         port = pcs_to_qca8k_pcs(pcs)->port;
1927         switch (port) {
1928         case 0:
1929                 reg = QCA8K_REG_PORT0_PAD_CTRL;
1930                 cpu_port_index = QCA8K_CPU_PORT0;
1931                 break;
1932
1933         case 6:
1934                 reg = QCA8K_REG_PORT6_PAD_CTRL;
1935                 cpu_port_index = QCA8K_CPU_PORT6;
1936                 break;
1937
1938         default:
1939                 WARN_ON(1);
1940         }
1941
1942         /* Enable/disable SerDes auto-negotiation as necessary */
1943         ret = qca8k_read(priv, QCA8K_REG_PWS, &val);
1944         if (ret)
1945                 return ret;
1946         if (phylink_autoneg_inband(mode))
1947                 val &= ~QCA8K_PWS_SERDES_AEN_DIS;
1948         else
1949                 val |= QCA8K_PWS_SERDES_AEN_DIS;
1950         qca8k_write(priv, QCA8K_REG_PWS, val);
1951
1952         /* Configure the SGMII parameters */
1953         ret = qca8k_read(priv, QCA8K_REG_SGMII_CTRL, &val);
1954         if (ret)
1955                 return ret;
1956
1957         val |= QCA8K_SGMII_EN_SD;
1958
1959         if (priv->ports_config.sgmii_enable_pll)
1960                 val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX |
1961                        QCA8K_SGMII_EN_TX;
1962
1963         if (dsa_is_cpu_port(priv->ds, port)) {
1964                 /* CPU port, we're talking to the CPU MAC, be a PHY */
1965                 val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
1966                 val |= QCA8K_SGMII_MODE_CTRL_PHY;
1967         } else if (interface == PHY_INTERFACE_MODE_SGMII) {
1968                 val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
1969                 val |= QCA8K_SGMII_MODE_CTRL_MAC;
1970         } else if (interface == PHY_INTERFACE_MODE_1000BASEX) {
1971                 val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
1972                 val |= QCA8K_SGMII_MODE_CTRL_BASEX;
1973         }
1974
1975         qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val);
1976
1977         /* From original code is reported port instability as SGMII also
1978          * require delay set. Apply advised values here or take them from DT.
1979          */
1980         if (interface == PHY_INTERFACE_MODE_SGMII)
1981                 qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
1982         /* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and
1983          * falling edge is set writing in the PORT0 PAD reg
1984          */
1985         if (priv->switch_id == QCA8K_ID_QCA8327 ||
1986             priv->switch_id == QCA8K_ID_QCA8337)
1987                 reg = QCA8K_REG_PORT0_PAD_CTRL;
1988
1989         val = 0;
1990
1991         /* SGMII Clock phase configuration */
1992         if (priv->ports_config.sgmii_rx_clk_falling_edge)
1993                 val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE;
1994
1995         if (priv->ports_config.sgmii_tx_clk_falling_edge)
1996                 val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE;
1997
1998         if (val)
1999                 ret = qca8k_rmw(priv, reg,
2000                                 QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE |
2001                                 QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE,
2002                                 val);
2003
2004         return 0;
2005 }
2006
2007 static void qca8k_pcs_an_restart(struct phylink_pcs *pcs)
2008 {
2009 }
2010
2011 static const struct phylink_pcs_ops qca8k_pcs_ops = {
2012         .pcs_get_state = qca8k_pcs_get_state,
2013         .pcs_config = qca8k_pcs_config,
2014         .pcs_an_restart = qca8k_pcs_an_restart,
2015 };
2016
2017 static void qca8k_setup_pcs(struct qca8k_priv *priv, struct qca8k_pcs *qpcs,
2018                             int port)
2019 {
2020         qpcs->pcs.ops = &qca8k_pcs_ops;
2021
2022         /* We don't have interrupts for link changes, so we need to poll */
2023         qpcs->pcs.poll = true;
2024         qpcs->priv = priv;
2025         qpcs->port = port;
2026 }
2027
2028 static void
2029 qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data)
2030 {
2031         const struct qca8k_match_data *match_data;
2032         struct qca8k_priv *priv = ds->priv;
2033         int i;
2034
2035         if (stringset != ETH_SS_STATS)
2036                 return;
2037
2038         match_data = of_device_get_match_data(priv->dev);
2039
2040         for (i = 0; i < match_data->mib_count; i++)
2041                 strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name,
2042                         ETH_GSTRING_LEN);
2043 }
2044
2045 static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *skb)
2046 {
2047         const struct qca8k_match_data *match_data;
2048         struct qca8k_mib_eth_data *mib_eth_data;
2049         struct qca8k_priv *priv = ds->priv;
2050         const struct qca8k_mib_desc *mib;
2051         struct mib_ethhdr *mib_ethhdr;
2052         int i, mib_len, offset = 0;
2053         u64 *data;
2054         u8 port;
2055
2056         mib_ethhdr = (struct mib_ethhdr *)skb_mac_header(skb);
2057         mib_eth_data = &priv->mib_eth_data;
2058
2059         /* The switch autocast every port. Ignore other packet and
2060          * parse only the requested one.
2061          */
2062         port = FIELD_GET(QCA_HDR_RECV_SOURCE_PORT, ntohs(mib_ethhdr->hdr));
2063         if (port != mib_eth_data->req_port)
2064                 goto exit;
2065
2066         match_data = device_get_match_data(priv->dev);
2067         data = mib_eth_data->data;
2068
2069         for (i = 0; i < match_data->mib_count; i++) {
2070                 mib = &ar8327_mib[i];
2071
2072                 /* First 3 mib are present in the skb head */
2073                 if (i < 3) {
2074                         data[i] = mib_ethhdr->data[i];
2075                         continue;
2076                 }
2077
2078                 mib_len = sizeof(uint32_t);
2079
2080                 /* Some mib are 64 bit wide */
2081                 if (mib->size == 2)
2082                         mib_len = sizeof(uint64_t);
2083
2084                 /* Copy the mib value from packet to the */
2085                 memcpy(data + i, skb->data + offset, mib_len);
2086
2087                 /* Set the offset for the next mib */
2088                 offset += mib_len;
2089         }
2090
2091 exit:
2092         /* Complete on receiving all the mib packet */
2093         if (refcount_dec_and_test(&mib_eth_data->port_parsed))
2094                 complete(&mib_eth_data->rw_done);
2095 }
2096
2097 static int
2098 qca8k_get_ethtool_stats_eth(struct dsa_switch *ds, int port, u64 *data)
2099 {
2100         struct dsa_port *dp = dsa_to_port(ds, port);
2101         struct qca8k_mib_eth_data *mib_eth_data;
2102         struct qca8k_priv *priv = ds->priv;
2103         int ret;
2104
2105         mib_eth_data = &priv->mib_eth_data;
2106
2107         mutex_lock(&mib_eth_data->mutex);
2108
2109         reinit_completion(&mib_eth_data->rw_done);
2110
2111         mib_eth_data->req_port = dp->index;
2112         mib_eth_data->data = data;
2113         refcount_set(&mib_eth_data->port_parsed, QCA8K_NUM_PORTS);
2114
2115         mutex_lock(&priv->reg_mutex);
2116
2117         /* Send mib autocast request */
2118         ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
2119                                  QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
2120                                  FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_CAST) |
2121                                  QCA8K_MIB_BUSY);
2122
2123         mutex_unlock(&priv->reg_mutex);
2124
2125         if (ret)
2126                 goto exit;
2127
2128         ret = wait_for_completion_timeout(&mib_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
2129
2130 exit:
2131         mutex_unlock(&mib_eth_data->mutex);
2132
2133         return ret;
2134 }
2135
2136 static void
2137 qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
2138                         uint64_t *data)
2139 {
2140         struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2141         const struct qca8k_match_data *match_data;
2142         const struct qca8k_mib_desc *mib;
2143         u32 reg, i, val;
2144         u32 hi = 0;
2145         int ret;
2146
2147         if (priv->mgmt_master &&
2148             qca8k_get_ethtool_stats_eth(ds, port, data) > 0)
2149                 return;
2150
2151         match_data = of_device_get_match_data(priv->dev);
2152
2153         for (i = 0; i < match_data->mib_count; i++) {
2154                 mib = &ar8327_mib[i];
2155                 reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset;
2156
2157                 ret = qca8k_read(priv, reg, &val);
2158                 if (ret < 0)
2159                         continue;
2160
2161                 if (mib->size == 2) {
2162                         ret = qca8k_read(priv, reg + 4, &hi);
2163                         if (ret < 0)
2164                                 continue;
2165                 }
2166
2167                 data[i] = val;
2168                 if (mib->size == 2)
2169                         data[i] |= (u64)hi << 32;
2170         }
2171 }
2172
2173 static int
2174 qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset)
2175 {
2176         const struct qca8k_match_data *match_data;
2177         struct qca8k_priv *priv = ds->priv;
2178
2179         if (sset != ETH_SS_STATS)
2180                 return 0;
2181
2182         match_data = of_device_get_match_data(priv->dev);
2183
2184         return match_data->mib_count;
2185 }
2186
2187 static int
2188 qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *eee)
2189 {
2190         struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2191         u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port);
2192         u32 reg;
2193         int ret;
2194
2195         mutex_lock(&priv->reg_mutex);
2196         ret = qca8k_read(priv, QCA8K_REG_EEE_CTRL, &reg);
2197         if (ret < 0)
2198                 goto exit;
2199
2200         if (eee->eee_enabled)
2201                 reg |= lpi_en;
2202         else
2203                 reg &= ~lpi_en;
2204         ret = qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg);
2205
2206 exit:
2207         mutex_unlock(&priv->reg_mutex);
2208         return ret;
2209 }
2210
2211 static int
2212 qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
2213 {
2214         /* Nothing to do on the port's MAC */
2215         return 0;
2216 }
2217
2218 static void
2219 qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
2220 {
2221         struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2222         u32 stp_state;
2223
2224         switch (state) {
2225         case BR_STATE_DISABLED:
2226                 stp_state = QCA8K_PORT_LOOKUP_STATE_DISABLED;
2227                 break;
2228         case BR_STATE_BLOCKING:
2229                 stp_state = QCA8K_PORT_LOOKUP_STATE_BLOCKING;
2230                 break;
2231         case BR_STATE_LISTENING:
2232                 stp_state = QCA8K_PORT_LOOKUP_STATE_LISTENING;
2233                 break;
2234         case BR_STATE_LEARNING:
2235                 stp_state = QCA8K_PORT_LOOKUP_STATE_LEARNING;
2236                 break;
2237         case BR_STATE_FORWARDING:
2238         default:
2239                 stp_state = QCA8K_PORT_LOOKUP_STATE_FORWARD;
2240                 break;
2241         }
2242
2243         qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
2244                   QCA8K_PORT_LOOKUP_STATE_MASK, stp_state);
2245 }
2246
2247 static int qca8k_port_bridge_join(struct dsa_switch *ds, int port,
2248                                   struct dsa_bridge bridge,
2249                                   bool *tx_fwd_offload)
2250 {
2251         struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2252         int port_mask, cpu_port;
2253         int i, ret;
2254
2255         cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
2256         port_mask = BIT(cpu_port);
2257
2258         for (i = 0; i < QCA8K_NUM_PORTS; i++) {
2259                 if (dsa_is_cpu_port(ds, i))
2260                         continue;
2261                 if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
2262                         continue;
2263                 /* Add this port to the portvlan mask of the other ports
2264                  * in the bridge
2265                  */
2266                 ret = regmap_set_bits(priv->regmap,
2267                                       QCA8K_PORT_LOOKUP_CTRL(i),
2268                                       BIT(port));
2269                 if (ret)
2270                         return ret;
2271                 if (i != port)
2272                         port_mask |= BIT(i);
2273         }
2274
2275         /* Add all other ports to this ports portvlan mask */
2276         ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
2277                         QCA8K_PORT_LOOKUP_MEMBER, port_mask);
2278
2279         return ret;
2280 }
2281
2282 static void qca8k_port_bridge_leave(struct dsa_switch *ds, int port,
2283                                     struct dsa_bridge bridge)
2284 {
2285         struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2286         int cpu_port, i;
2287
2288         cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
2289
2290         for (i = 0; i < QCA8K_NUM_PORTS; i++) {
2291                 if (dsa_is_cpu_port(ds, i))
2292                         continue;
2293                 if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
2294                         continue;
2295                 /* Remove this port to the portvlan mask of the other ports
2296                  * in the bridge
2297                  */
2298                 regmap_clear_bits(priv->regmap,
2299                                   QCA8K_PORT_LOOKUP_CTRL(i),
2300                                   BIT(port));
2301         }
2302
2303         /* Set the cpu port to be the only one in the portvlan mask of
2304          * this port
2305          */
2306         qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
2307                   QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port));
2308 }
2309
2310 static void
2311 qca8k_port_fast_age(struct dsa_switch *ds, int port)
2312 {
2313         struct qca8k_priv *priv = ds->priv;
2314
2315         mutex_lock(&priv->reg_mutex);
2316         qca8k_fdb_access(priv, QCA8K_FDB_FLUSH_PORT, port);
2317         mutex_unlock(&priv->reg_mutex);
2318 }
2319
2320 static int
2321 qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
2322 {
2323         struct qca8k_priv *priv = ds->priv;
2324         unsigned int secs = msecs / 1000;
2325         u32 val;
2326
2327         /* AGE_TIME reg is set in 7s step */
2328         val = secs / 7;
2329
2330         /* Handle case with 0 as val to NOT disable
2331          * learning
2332          */
2333         if (!val)
2334                 val = 1;
2335
2336         return regmap_update_bits(priv->regmap, QCA8K_REG_ATU_CTRL, QCA8K_ATU_AGE_TIME_MASK,
2337                                   QCA8K_ATU_AGE_TIME(val));
2338 }
2339
2340 static int
2341 qca8k_port_enable(struct dsa_switch *ds, int port,
2342                   struct phy_device *phy)
2343 {
2344         struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2345
2346         qca8k_port_set_status(priv, port, 1);
2347         priv->port_sts[port].enabled = 1;
2348
2349         if (dsa_is_user_port(ds, port))
2350                 phy_support_asym_pause(phy);
2351
2352         return 0;
2353 }
2354
2355 static void
2356 qca8k_port_disable(struct dsa_switch *ds, int port)
2357 {
2358         struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2359
2360         qca8k_port_set_status(priv, port, 0);
2361         priv->port_sts[port].enabled = 0;
2362 }
2363
2364 static int
2365 qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
2366 {
2367         struct qca8k_priv *priv = ds->priv;
2368         int i, mtu = 0;
2369
2370         priv->port_mtu[port] = new_mtu;
2371
2372         for (i = 0; i < QCA8K_NUM_PORTS; i++)
2373                 if (priv->port_mtu[i] > mtu)
2374                         mtu = priv->port_mtu[i];
2375
2376         /* Include L2 header / FCS length */
2377         return qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, mtu + ETH_HLEN + ETH_FCS_LEN);
2378 }
2379
2380 static int
2381 qca8k_port_max_mtu(struct dsa_switch *ds, int port)
2382 {
2383         return QCA8K_MAX_MTU;
2384 }
2385
2386 static int
2387 qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
2388                       u16 port_mask, u16 vid)
2389 {
2390         /* Set the vid to the port vlan id if no vid is set */
2391         if (!vid)
2392                 vid = QCA8K_PORT_VID_DEF;
2393
2394         return qca8k_fdb_add(priv, addr, port_mask, vid,
2395                              QCA8K_ATU_STATUS_STATIC);
2396 }
2397
2398 static int
2399 qca8k_port_fdb_add(struct dsa_switch *ds, int port,
2400                    const unsigned char *addr, u16 vid)
2401 {
2402         struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2403         u16 port_mask = BIT(port);
2404
2405         return qca8k_port_fdb_insert(priv, addr, port_mask, vid);
2406 }
2407
2408 static int
2409 qca8k_port_fdb_del(struct dsa_switch *ds, int port,
2410                    const unsigned char *addr, u16 vid)
2411 {
2412         struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2413         u16 port_mask = BIT(port);
2414
2415         if (!vid)
2416                 vid = QCA8K_PORT_VID_DEF;
2417
2418         return qca8k_fdb_del(priv, addr, port_mask, vid);
2419 }
2420
2421 static int
2422 qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
2423                     dsa_fdb_dump_cb_t *cb, void *data)
2424 {
2425         struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2426         struct qca8k_fdb _fdb = { 0 };
2427         int cnt = QCA8K_NUM_FDB_RECORDS;
2428         bool is_static;
2429         int ret = 0;
2430
2431         mutex_lock(&priv->reg_mutex);
2432         while (cnt-- && !qca8k_fdb_next(priv, &_fdb, port)) {
2433                 if (!_fdb.aging)
2434                         break;
2435                 is_static = (_fdb.aging == QCA8K_ATU_STATUS_STATIC);
2436                 ret = cb(_fdb.mac, _fdb.vid, is_static, data);
2437                 if (ret)
2438                         break;
2439         }
2440         mutex_unlock(&priv->reg_mutex);
2441
2442         return 0;
2443 }
2444
2445 static int
2446 qca8k_port_mdb_add(struct dsa_switch *ds, int port,
2447                    const struct switchdev_obj_port_mdb *mdb)
2448 {
2449         struct qca8k_priv *priv = ds->priv;
2450         const u8 *addr = mdb->addr;
2451         u16 vid = mdb->vid;
2452
2453         return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid);
2454 }
2455
2456 static int
2457 qca8k_port_mdb_del(struct dsa_switch *ds, int port,
2458                    const struct switchdev_obj_port_mdb *mdb)
2459 {
2460         struct qca8k_priv *priv = ds->priv;
2461         const u8 *addr = mdb->addr;
2462         u16 vid = mdb->vid;
2463
2464         return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid);
2465 }
2466
2467 static int
2468 qca8k_port_mirror_add(struct dsa_switch *ds, int port,
2469                       struct dsa_mall_mirror_tc_entry *mirror,
2470                       bool ingress)
2471 {
2472         struct qca8k_priv *priv = ds->priv;
2473         int monitor_port, ret;
2474         u32 reg, val;
2475
2476         /* Check for existent entry */
2477         if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port))
2478                 return -EEXIST;
2479
2480         ret = regmap_read(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, &val);
2481         if (ret)
2482                 return ret;
2483
2484         /* QCA83xx can have only one port set to mirror mode.
2485          * Check that the correct port is requested and return error otherwise.
2486          * When no mirror port is set, the values is set to 0xF
2487          */
2488         monitor_port = FIELD_GET(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
2489         if (monitor_port != 0xF && monitor_port != mirror->to_local_port)
2490                 return -EEXIST;
2491
2492         /* Set the monitor port */
2493         val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM,
2494                          mirror->to_local_port);
2495         ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
2496                                  QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
2497         if (ret)
2498                 return ret;
2499
2500         if (ingress) {
2501                 reg = QCA8K_PORT_LOOKUP_CTRL(port);
2502                 val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
2503         } else {
2504                 reg = QCA8K_REG_PORT_HOL_CTRL1(port);
2505                 val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
2506         }
2507
2508         ret = regmap_update_bits(priv->regmap, reg, val, val);
2509         if (ret)
2510                 return ret;
2511
2512         /* Track mirror port for tx and rx to decide when the
2513          * mirror port has to be disabled.
2514          */
2515         if (ingress)
2516                 priv->mirror_rx |= BIT(port);
2517         else
2518                 priv->mirror_tx |= BIT(port);
2519
2520         return 0;
2521 }
2522
2523 static void
2524 qca8k_port_mirror_del(struct dsa_switch *ds, int port,
2525                       struct dsa_mall_mirror_tc_entry *mirror)
2526 {
2527         struct qca8k_priv *priv = ds->priv;
2528         u32 reg, val;
2529         int ret;
2530
2531         if (mirror->ingress) {
2532                 reg = QCA8K_PORT_LOOKUP_CTRL(port);
2533                 val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
2534         } else {
2535                 reg = QCA8K_REG_PORT_HOL_CTRL1(port);
2536                 val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
2537         }
2538
2539         ret = regmap_clear_bits(priv->regmap, reg, val);
2540         if (ret)
2541                 goto err;
2542
2543         if (mirror->ingress)
2544                 priv->mirror_rx &= ~BIT(port);
2545         else
2546                 priv->mirror_tx &= ~BIT(port);
2547
2548         /* No port set to send packet to mirror port. Disable mirror port */
2549         if (!priv->mirror_rx && !priv->mirror_tx) {
2550                 val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, 0xF);
2551                 ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
2552                                          QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
2553                 if (ret)
2554                         goto err;
2555         }
2556 err:
2557         dev_err(priv->dev, "Failed to del mirror port from %d", port);
2558 }
2559
2560 static int
2561 qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
2562                           struct netlink_ext_ack *extack)
2563 {
2564         struct qca8k_priv *priv = ds->priv;
2565         int ret;
2566
2567         if (vlan_filtering) {
2568                 ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
2569                                 QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
2570                                 QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE);
2571         } else {
2572                 ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
2573                                 QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
2574                                 QCA8K_PORT_LOOKUP_VLAN_MODE_NONE);
2575         }
2576
2577         return ret;
2578 }
2579
2580 static int
2581 qca8k_port_vlan_add(struct dsa_switch *ds, int port,
2582                     const struct switchdev_obj_port_vlan *vlan,
2583                     struct netlink_ext_ack *extack)
2584 {
2585         bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
2586         bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
2587         struct qca8k_priv *priv = ds->priv;
2588         int ret;
2589
2590         ret = qca8k_vlan_add(priv, port, vlan->vid, untagged);
2591         if (ret) {
2592                 dev_err(priv->dev, "Failed to add VLAN to port %d (%d)", port, ret);
2593                 return ret;
2594         }
2595
2596         if (pvid) {
2597                 ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
2598                                 QCA8K_EGREES_VLAN_PORT_MASK(port),
2599                                 QCA8K_EGREES_VLAN_PORT(port, vlan->vid));
2600                 if (ret)
2601                         return ret;
2602
2603                 ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port),
2604                                   QCA8K_PORT_VLAN_CVID(vlan->vid) |
2605                                   QCA8K_PORT_VLAN_SVID(vlan->vid));
2606         }
2607
2608         return ret;
2609 }
2610
2611 static int
2612 qca8k_port_vlan_del(struct dsa_switch *ds, int port,
2613                     const struct switchdev_obj_port_vlan *vlan)
2614 {
2615         struct qca8k_priv *priv = ds->priv;
2616         int ret;
2617
2618         ret = qca8k_vlan_del(priv, port, vlan->vid);
2619         if (ret)
2620                 dev_err(priv->dev, "Failed to delete VLAN from port %d (%d)", port, ret);
2621
2622         return ret;
2623 }
2624
2625 static u32 qca8k_get_phy_flags(struct dsa_switch *ds, int port)
2626 {
2627         struct qca8k_priv *priv = ds->priv;
2628
2629         /* Communicate to the phy internal driver the switch revision.
2630          * Based on the switch revision different values needs to be
2631          * set to the dbg and mmd reg on the phy.
2632          * The first 2 bit are used to communicate the switch revision
2633          * to the phy driver.
2634          */
2635         if (port > 0 && port < 6)
2636                 return priv->switch_revision;
2637
2638         return 0;
2639 }
2640
2641 static enum dsa_tag_protocol
2642 qca8k_get_tag_protocol(struct dsa_switch *ds, int port,
2643                        enum dsa_tag_protocol mp)
2644 {
2645         return DSA_TAG_PROTO_QCA;
2646 }
2647
2648 static bool
2649 qca8k_lag_can_offload(struct dsa_switch *ds, struct dsa_lag lag,
2650                       struct netdev_lag_upper_info *info)
2651 {
2652         struct dsa_port *dp;
2653         int members = 0;
2654
2655         if (!lag.id)
2656                 return false;
2657
2658         dsa_lag_foreach_port(dp, ds->dst, &lag)
2659                 /* Includes the port joining the LAG */
2660                 members++;
2661
2662         if (members > QCA8K_NUM_PORTS_FOR_LAG)
2663                 return false;
2664
2665         if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
2666                 return false;
2667
2668         if (info->hash_type != NETDEV_LAG_HASH_L2 &&
2669             info->hash_type != NETDEV_LAG_HASH_L23)
2670                 return false;
2671
2672         return true;
2673 }
2674
2675 static int
2676 qca8k_lag_setup_hash(struct dsa_switch *ds, struct dsa_lag lag,
2677                      struct netdev_lag_upper_info *info)
2678 {
2679         struct net_device *lag_dev = lag.dev;
2680         struct qca8k_priv *priv = ds->priv;
2681         bool unique_lag = true;
2682         unsigned int i;
2683         u32 hash = 0;
2684
2685         switch (info->hash_type) {
2686         case NETDEV_LAG_HASH_L23:
2687                 hash |= QCA8K_TRUNK_HASH_SIP_EN;
2688                 hash |= QCA8K_TRUNK_HASH_DIP_EN;
2689                 fallthrough;
2690         case NETDEV_LAG_HASH_L2:
2691                 hash |= QCA8K_TRUNK_HASH_SA_EN;
2692                 hash |= QCA8K_TRUNK_HASH_DA_EN;
2693                 break;
2694         default: /* We should NEVER reach this */
2695                 return -EOPNOTSUPP;
2696         }
2697
2698         /* Check if we are the unique configured LAG */
2699         dsa_lags_foreach_id(i, ds->dst)
2700                 if (i != lag.id && dsa_lag_by_id(ds->dst, i)) {
2701                         unique_lag = false;
2702                         break;
2703                 }
2704
2705         /* Hash Mode is global. Make sure the same Hash Mode
2706          * is set to all the 4 possible lag.
2707          * If we are the unique LAG we can set whatever hash
2708          * mode we want.
2709          * To change hash mode it's needed to remove all LAG
2710          * and change the mode with the latest.
2711          */
2712         if (unique_lag) {
2713                 priv->lag_hash_mode = hash;
2714         } else if (priv->lag_hash_mode != hash) {
2715                 netdev_err(lag_dev, "Error: Mismatched Hash Mode across different lag is not supported\n");
2716                 return -EOPNOTSUPP;
2717         }
2718
2719         return regmap_update_bits(priv->regmap, QCA8K_TRUNK_HASH_EN_CTRL,
2720                                   QCA8K_TRUNK_HASH_MASK, hash);
2721 }
2722
2723 static int
2724 qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port,
2725                           struct dsa_lag lag, bool delete)
2726 {
2727         struct qca8k_priv *priv = ds->priv;
2728         int ret, id, i;
2729         u32 val;
2730
2731         /* DSA LAG IDs are one-based, hardware is zero-based */
2732         id = lag.id - 1;
2733
2734         /* Read current port member */
2735         ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, &val);
2736         if (ret)
2737                 return ret;
2738
2739         /* Shift val to the correct trunk */
2740         val >>= QCA8K_REG_GOL_TRUNK_SHIFT(id);
2741         val &= QCA8K_REG_GOL_TRUNK_MEMBER_MASK;
2742         if (delete)
2743                 val &= ~BIT(port);
2744         else
2745                 val |= BIT(port);
2746
2747         /* Update port member. With empty portmap disable trunk */
2748         ret = regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0,
2749                                  QCA8K_REG_GOL_TRUNK_MEMBER(id) |
2750                                  QCA8K_REG_GOL_TRUNK_EN(id),
2751                                  !val << QCA8K_REG_GOL_TRUNK_SHIFT(id) |
2752                                  val << QCA8K_REG_GOL_TRUNK_SHIFT(id));
2753
2754         /* Search empty member if adding or port on deleting */
2755         for (i = 0; i < QCA8K_NUM_PORTS_FOR_LAG; i++) {
2756                 ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), &val);
2757                 if (ret)
2758                         return ret;
2759
2760                 val >>= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i);
2761                 val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK;
2762
2763                 if (delete) {
2764                         /* If port flagged to be disabled assume this member is
2765                          * empty
2766                          */
2767                         if (val != QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
2768                                 continue;
2769
2770                         val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK;
2771                         if (val != port)
2772                                 continue;
2773                 } else {
2774                         /* If port flagged to be enabled assume this member is
2775                          * already set
2776                          */
2777                         if (val == QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
2778                                 continue;
2779                 }
2780
2781                 /* We have found the member to add/remove */
2782                 break;
2783         }
2784
2785         /* Set port in the correct port mask or disable port if in delete mode */
2786         return regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id),
2787                                   QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(id, i) |
2788                                   QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(id, i),
2789                                   !delete << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i) |
2790                                   port << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i));
2791 }
2792
2793 static int
2794 qca8k_port_lag_join(struct dsa_switch *ds, int port, struct dsa_lag lag,
2795                     struct netdev_lag_upper_info *info)
2796 {
2797         int ret;
2798
2799         if (!qca8k_lag_can_offload(ds, lag, info))
2800                 return -EOPNOTSUPP;
2801
2802         ret = qca8k_lag_setup_hash(ds, lag, info);
2803         if (ret)
2804                 return ret;
2805
2806         return qca8k_lag_refresh_portmap(ds, port, lag, false);
2807 }
2808
2809 static int
2810 qca8k_port_lag_leave(struct dsa_switch *ds, int port,
2811                      struct dsa_lag lag)
2812 {
2813         return qca8k_lag_refresh_portmap(ds, port, lag, true);
2814 }
2815
2816 static void
2817 qca8k_master_change(struct dsa_switch *ds, const struct net_device *master,
2818                     bool operational)
2819 {
2820         struct dsa_port *dp = master->dsa_ptr;
2821         struct qca8k_priv *priv = ds->priv;
2822
2823         /* Ethernet MIB/MDIO is only supported for CPU port 0 */
2824         if (dp->index != 0)
2825                 return;
2826
2827         mutex_lock(&priv->mgmt_eth_data.mutex);
2828         mutex_lock(&priv->mib_eth_data.mutex);
2829
2830         priv->mgmt_master = operational ? (struct net_device *)master : NULL;
2831
2832         mutex_unlock(&priv->mib_eth_data.mutex);
2833         mutex_unlock(&priv->mgmt_eth_data.mutex);
2834 }
2835
2836 static int qca8k_connect_tag_protocol(struct dsa_switch *ds,
2837                                       enum dsa_tag_protocol proto)
2838 {
2839         struct qca_tagger_data *tagger_data;
2840
2841         switch (proto) {
2842         case DSA_TAG_PROTO_QCA:
2843                 tagger_data = ds->tagger_data;
2844
2845                 tagger_data->rw_reg_ack_handler = qca8k_rw_reg_ack_handler;
2846                 tagger_data->mib_autocast_handler = qca8k_mib_autocast_handler;
2847
2848                 break;
2849         default:
2850                 return -EOPNOTSUPP;
2851         }
2852
2853         return 0;
2854 }
2855
2856 static int
2857 qca8k_setup(struct dsa_switch *ds)
2858 {
2859         struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2860         int cpu_port, ret, i;
2861         u32 mask;
2862
2863         cpu_port = qca8k_find_cpu_port(ds);
2864         if (cpu_port < 0) {
2865                 dev_err(priv->dev, "No cpu port configured in both cpu port0 and port6");
2866                 return cpu_port;
2867         }
2868
2869         /* Parse CPU port config to be later used in phy_link mac_config */
2870         ret = qca8k_parse_port_config(priv);
2871         if (ret)
2872                 return ret;
2873
2874         ret = qca8k_setup_mdio_bus(priv);
2875         if (ret)
2876                 return ret;
2877
2878         ret = qca8k_setup_of_pws_reg(priv);
2879         if (ret)
2880                 return ret;
2881
2882         ret = qca8k_setup_mac_pwr_sel(priv);
2883         if (ret)
2884                 return ret;
2885
2886         qca8k_setup_pcs(priv, &priv->pcs_port_0, 0);
2887         qca8k_setup_pcs(priv, &priv->pcs_port_6, 6);
2888
2889         /* Make sure MAC06 is disabled */
2890         ret = regmap_clear_bits(priv->regmap, QCA8K_REG_PORT0_PAD_CTRL,
2891                                 QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN);
2892         if (ret) {
2893                 dev_err(priv->dev, "failed disabling MAC06 exchange");
2894                 return ret;
2895         }
2896
2897         /* Enable CPU Port */
2898         ret = regmap_set_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
2899                               QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
2900         if (ret) {
2901                 dev_err(priv->dev, "failed enabling CPU port");
2902                 return ret;
2903         }
2904
2905         /* Enable MIB counters */
2906         ret = qca8k_mib_init(priv);
2907         if (ret)
2908                 dev_warn(priv->dev, "mib init failed");
2909
2910         /* Initial setup of all ports */
2911         for (i = 0; i < QCA8K_NUM_PORTS; i++) {
2912                 /* Disable forwarding by default on all ports */
2913                 ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
2914                                 QCA8K_PORT_LOOKUP_MEMBER, 0);
2915                 if (ret)
2916                         return ret;
2917
2918                 /* Enable QCA header mode on all cpu ports */
2919                 if (dsa_is_cpu_port(ds, i)) {
2920                         ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i),
2921                                           FIELD_PREP(QCA8K_PORT_HDR_CTRL_TX_MASK, QCA8K_PORT_HDR_CTRL_ALL) |
2922                                           FIELD_PREP(QCA8K_PORT_HDR_CTRL_RX_MASK, QCA8K_PORT_HDR_CTRL_ALL));
2923                         if (ret) {
2924                                 dev_err(priv->dev, "failed enabling QCA header mode");
2925                                 return ret;
2926                         }
2927                 }
2928
2929                 /* Disable MAC by default on all user ports */
2930                 if (dsa_is_user_port(ds, i))
2931                         qca8k_port_set_status(priv, i, 0);
2932         }
2933
2934         /* Forward all unknown frames to CPU port for Linux processing
2935          * Notice that in multi-cpu config only one port should be set
2936          * for igmp, unknown, multicast and broadcast packet
2937          */
2938         ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
2939                           FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK, BIT(cpu_port)) |
2940                           FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK, BIT(cpu_port)) |
2941                           FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK, BIT(cpu_port)) |
2942                           FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK, BIT(cpu_port)));
2943         if (ret)
2944                 return ret;
2945
2946         /* Setup connection between CPU port & user ports
2947          * Configure specific switch configuration for ports
2948          */
2949         for (i = 0; i < QCA8K_NUM_PORTS; i++) {
2950                 /* CPU port gets connected to all user ports of the switch */
2951                 if (dsa_is_cpu_port(ds, i)) {
2952                         ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
2953                                         QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds));
2954                         if (ret)
2955                                 return ret;
2956                 }
2957
2958                 /* Individual user ports get connected to CPU port only */
2959                 if (dsa_is_user_port(ds, i)) {
2960                         ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
2961                                         QCA8K_PORT_LOOKUP_MEMBER,
2962                                         BIT(cpu_port));
2963                         if (ret)
2964                                 return ret;
2965
2966                         /* Enable ARP Auto-learning by default */
2967                         ret = regmap_set_bits(priv->regmap, QCA8K_PORT_LOOKUP_CTRL(i),
2968                                               QCA8K_PORT_LOOKUP_LEARN);
2969                         if (ret)
2970                                 return ret;
2971
2972                         /* For port based vlans to work we need to set the
2973                          * default egress vid
2974                          */
2975                         ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i),
2976                                         QCA8K_EGREES_VLAN_PORT_MASK(i),
2977                                         QCA8K_EGREES_VLAN_PORT(i, QCA8K_PORT_VID_DEF));
2978                         if (ret)
2979                                 return ret;
2980
2981                         ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(i),
2982                                           QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) |
2983                                           QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF));
2984                         if (ret)
2985                                 return ret;
2986                 }
2987
2988                 /* The port 5 of the qca8337 have some problem in flood condition. The
2989                  * original legacy driver had some specific buffer and priority settings
2990                  * for the different port suggested by the QCA switch team. Add this
2991                  * missing settings to improve switch stability under load condition.
2992                  * This problem is limited to qca8337 and other qca8k switch are not affected.
2993                  */
2994                 if (priv->switch_id == QCA8K_ID_QCA8337) {
2995                         switch (i) {
2996                         /* The 2 CPU port and port 5 requires some different
2997                          * priority than any other ports.
2998                          */
2999                         case 0:
3000                         case 5:
3001                         case 6:
3002                                 mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
3003                                         QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
3004                                         QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x4) |
3005                                         QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x4) |
3006                                         QCA8K_PORT_HOL_CTRL0_EG_PRI4(0x6) |
3007                                         QCA8K_PORT_HOL_CTRL0_EG_PRI5(0x8) |
3008                                         QCA8K_PORT_HOL_CTRL0_EG_PORT(0x1e);
3009                                 break;
3010                         default:
3011                                 mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
3012                                         QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
3013                                         QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x6) |
3014                                         QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x8) |
3015                                         QCA8K_PORT_HOL_CTRL0_EG_PORT(0x19);
3016                         }
3017                         qca8k_write(priv, QCA8K_REG_PORT_HOL_CTRL0(i), mask);
3018
3019                         mask = QCA8K_PORT_HOL_CTRL1_ING(0x6) |
3020                         QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
3021                         QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
3022                         QCA8K_PORT_HOL_CTRL1_WRED_EN;
3023                         qca8k_rmw(priv, QCA8K_REG_PORT_HOL_CTRL1(i),
3024                                   QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK |
3025                                   QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
3026                                   QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
3027                                   QCA8K_PORT_HOL_CTRL1_WRED_EN,
3028                                   mask);
3029                 }
3030
3031                 /* Set initial MTU for every port.
3032                  * We have only have a general MTU setting. So track
3033                  * every port and set the max across all port.
3034                  * Set per port MTU to 1500 as the MTU change function
3035                  * will add the overhead and if its set to 1518 then it
3036                  * will apply the overhead again and we will end up with
3037                  * MTU of 1536 instead of 1518
3038                  */
3039                 priv->port_mtu[i] = ETH_DATA_LEN;
3040         }
3041
3042         /* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */
3043         if (priv->switch_id == QCA8K_ID_QCA8327) {
3044                 mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) |
3045                        QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496);
3046                 qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH,
3047                           QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK |
3048                           QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK,
3049                           mask);
3050         }
3051
3052         /* Setup our port MTUs to match power on defaults */
3053         ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN);
3054         if (ret)
3055                 dev_warn(priv->dev, "failed setting MTU settings");
3056
3057         /* Flush the FDB table */
3058         qca8k_fdb_flush(priv);
3059
3060         /* Set min a max ageing value supported */
3061         ds->ageing_time_min = 7000;
3062         ds->ageing_time_max = 458745000;
3063
3064         /* Set max number of LAGs supported */
3065         ds->num_lag_ids = QCA8K_NUM_LAGS;
3066
3067         return 0;
3068 }
3069
3070 static const struct dsa_switch_ops qca8k_switch_ops = {
3071         .get_tag_protocol       = qca8k_get_tag_protocol,
3072         .setup                  = qca8k_setup,
3073         .get_strings            = qca8k_get_strings,
3074         .get_ethtool_stats      = qca8k_get_ethtool_stats,
3075         .get_sset_count         = qca8k_get_sset_count,
3076         .set_ageing_time        = qca8k_set_ageing_time,
3077         .get_mac_eee            = qca8k_get_mac_eee,
3078         .set_mac_eee            = qca8k_set_mac_eee,
3079         .port_enable            = qca8k_port_enable,
3080         .port_disable           = qca8k_port_disable,
3081         .port_change_mtu        = qca8k_port_change_mtu,
3082         .port_max_mtu           = qca8k_port_max_mtu,
3083         .port_stp_state_set     = qca8k_port_stp_state_set,
3084         .port_bridge_join       = qca8k_port_bridge_join,
3085         .port_bridge_leave      = qca8k_port_bridge_leave,
3086         .port_fast_age          = qca8k_port_fast_age,
3087         .port_fdb_add           = qca8k_port_fdb_add,
3088         .port_fdb_del           = qca8k_port_fdb_del,
3089         .port_fdb_dump          = qca8k_port_fdb_dump,
3090         .port_mdb_add           = qca8k_port_mdb_add,
3091         .port_mdb_del           = qca8k_port_mdb_del,
3092         .port_mirror_add        = qca8k_port_mirror_add,
3093         .port_mirror_del        = qca8k_port_mirror_del,
3094         .port_vlan_filtering    = qca8k_port_vlan_filtering,
3095         .port_vlan_add          = qca8k_port_vlan_add,
3096         .port_vlan_del          = qca8k_port_vlan_del,
3097         .phylink_get_caps       = qca8k_phylink_get_caps,
3098         .phylink_mac_select_pcs = qca8k_phylink_mac_select_pcs,
3099         .phylink_mac_config     = qca8k_phylink_mac_config,
3100         .phylink_mac_link_down  = qca8k_phylink_mac_link_down,
3101         .phylink_mac_link_up    = qca8k_phylink_mac_link_up,
3102         .get_phy_flags          = qca8k_get_phy_flags,
3103         .port_lag_join          = qca8k_port_lag_join,
3104         .port_lag_leave         = qca8k_port_lag_leave,
3105         .master_state_change    = qca8k_master_change,
3106         .connect_tag_protocol   = qca8k_connect_tag_protocol,
3107 };
3108
3109 static int qca8k_read_switch_id(struct qca8k_priv *priv)
3110 {
3111         const struct qca8k_match_data *data;
3112         u32 val;
3113         u8 id;
3114         int ret;
3115
3116         /* get the switches ID from the compatible */
3117         data = of_device_get_match_data(priv->dev);
3118         if (!data)
3119                 return -ENODEV;
3120
3121         ret = qca8k_read(priv, QCA8K_REG_MASK_CTRL, &val);
3122         if (ret < 0)
3123                 return -ENODEV;
3124
3125         id = QCA8K_MASK_CTRL_DEVICE_ID(val);
3126         if (id != data->id) {
3127                 dev_err(priv->dev, "Switch id detected %x but expected %x", id, data->id);
3128                 return -ENODEV;
3129         }
3130
3131         priv->switch_id = id;
3132
3133         /* Save revision to communicate to the internal PHY driver */
3134         priv->switch_revision = QCA8K_MASK_CTRL_REV_ID(val);
3135
3136         return 0;
3137 }
3138
3139 static int
3140 qca8k_sw_probe(struct mdio_device *mdiodev)
3141 {
3142         struct qca8k_priv *priv;
3143         int ret;
3144
3145         /* allocate the private data struct so that we can probe the switches
3146          * ID register
3147          */
3148         priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
3149         if (!priv)
3150                 return -ENOMEM;
3151
3152         priv->bus = mdiodev->bus;
3153         priv->dev = &mdiodev->dev;
3154
3155         priv->reset_gpio = devm_gpiod_get_optional(priv->dev, "reset",
3156                                                    GPIOD_ASIS);
3157         if (IS_ERR(priv->reset_gpio))
3158                 return PTR_ERR(priv->reset_gpio);
3159
3160         if (priv->reset_gpio) {
3161                 gpiod_set_value_cansleep(priv->reset_gpio, 1);
3162                 /* The active low duration must be greater than 10 ms
3163                  * and checkpatch.pl wants 20 ms.
3164                  */
3165                 msleep(20);
3166                 gpiod_set_value_cansleep(priv->reset_gpio, 0);
3167         }
3168
3169         /* Start by setting up the register mapping */
3170         priv->regmap = devm_regmap_init(&mdiodev->dev, NULL, priv,
3171                                         &qca8k_regmap_config);
3172         if (IS_ERR(priv->regmap)) {
3173                 dev_err(priv->dev, "regmap initialization failed");
3174                 return PTR_ERR(priv->regmap);
3175         }
3176
3177         priv->mdio_cache.page = 0xffff;
3178         priv->mdio_cache.lo = 0xffff;
3179         priv->mdio_cache.hi = 0xffff;
3180
3181         /* Check the detected switch id */
3182         ret = qca8k_read_switch_id(priv);
3183         if (ret)
3184                 return ret;
3185
3186         priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL);
3187         if (!priv->ds)
3188                 return -ENOMEM;
3189
3190         mutex_init(&priv->mgmt_eth_data.mutex);
3191         init_completion(&priv->mgmt_eth_data.rw_done);
3192
3193         mutex_init(&priv->mib_eth_data.mutex);
3194         init_completion(&priv->mib_eth_data.rw_done);
3195
3196         priv->ds->dev = &mdiodev->dev;
3197         priv->ds->num_ports = QCA8K_NUM_PORTS;
3198         priv->ds->priv = priv;
3199         priv->ops = qca8k_switch_ops;
3200         priv->ds->ops = &priv->ops;
3201         mutex_init(&priv->reg_mutex);
3202         dev_set_drvdata(&mdiodev->dev, priv);
3203
3204         return dsa_register_switch(priv->ds);
3205 }
3206
3207 static void
3208 qca8k_sw_remove(struct mdio_device *mdiodev)
3209 {
3210         struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
3211         int i;
3212
3213         if (!priv)
3214                 return;
3215
3216         for (i = 0; i < QCA8K_NUM_PORTS; i++)
3217                 qca8k_port_set_status(priv, i, 0);
3218
3219         dsa_unregister_switch(priv->ds);
3220
3221         dev_set_drvdata(&mdiodev->dev, NULL);
3222 }
3223
3224 static void qca8k_sw_shutdown(struct mdio_device *mdiodev)
3225 {
3226         struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
3227
3228         if (!priv)
3229                 return;
3230
3231         dsa_switch_shutdown(priv->ds);
3232
3233         dev_set_drvdata(&mdiodev->dev, NULL);
3234 }
3235
3236 #ifdef CONFIG_PM_SLEEP
3237 static void
3238 qca8k_set_pm(struct qca8k_priv *priv, int enable)
3239 {
3240         int i;
3241
3242         for (i = 0; i < QCA8K_NUM_PORTS; i++) {
3243                 if (!priv->port_sts[i].enabled)
3244                         continue;
3245
3246                 qca8k_port_set_status(priv, i, enable);
3247         }
3248 }
3249
3250 static int qca8k_suspend(struct device *dev)
3251 {
3252         struct qca8k_priv *priv = dev_get_drvdata(dev);
3253
3254         qca8k_set_pm(priv, 0);
3255
3256         return dsa_switch_suspend(priv->ds);
3257 }
3258
3259 static int qca8k_resume(struct device *dev)
3260 {
3261         struct qca8k_priv *priv = dev_get_drvdata(dev);
3262
3263         qca8k_set_pm(priv, 1);
3264
3265         return dsa_switch_resume(priv->ds);
3266 }
3267 #endif /* CONFIG_PM_SLEEP */
3268
3269 static SIMPLE_DEV_PM_OPS(qca8k_pm_ops,
3270                          qca8k_suspend, qca8k_resume);
3271
3272 static const struct qca8k_match_data qca8327 = {
3273         .id = QCA8K_ID_QCA8327,
3274         .reduced_package = true,
3275         .mib_count = QCA8K_QCA832X_MIB_COUNT,
3276 };
3277
3278 static const struct qca8k_match_data qca8328 = {
3279         .id = QCA8K_ID_QCA8327,
3280         .mib_count = QCA8K_QCA832X_MIB_COUNT,
3281 };
3282
3283 static const struct qca8k_match_data qca833x = {
3284         .id = QCA8K_ID_QCA8337,
3285         .mib_count = QCA8K_QCA833X_MIB_COUNT,
3286 };
3287
3288 static const struct of_device_id qca8k_of_match[] = {
3289         { .compatible = "qca,qca8327", .data = &qca8327 },
3290         { .compatible = "qca,qca8328", .data = &qca8328 },
3291         { .compatible = "qca,qca8334", .data = &qca833x },
3292         { .compatible = "qca,qca8337", .data = &qca833x },
3293         { /* sentinel */ },
3294 };
3295
3296 static struct mdio_driver qca8kmdio_driver = {
3297         .probe  = qca8k_sw_probe,
3298         .remove = qca8k_sw_remove,
3299         .shutdown = qca8k_sw_shutdown,
3300         .mdiodrv.driver = {
3301                 .name = "qca8k",
3302                 .of_match_table = qca8k_of_match,
3303                 .pm = &qca8k_pm_ops,
3304         },
3305 };
3306
3307 mdio_module_driver(qca8kmdio_driver);
3308
3309 MODULE_AUTHOR("Mathieu Olivari, John Crispin <[email protected]>");
3310 MODULE_DESCRIPTION("Driver for QCA8K ethernet switch family");
3311 MODULE_LICENSE("GPL v2");
3312 MODULE_ALIAS("platform:qca8k");
This page took 0.216014 seconds and 4 git commands to generate.