2 * B53 switch driver main logic
7 * Permission to use, copy, modify, and/or distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 #include <linux/delay.h>
23 #include <linux/export.h>
24 #include <linux/gpio.h>
25 #include <linux/kernel.h>
26 #include <linux/module.h>
27 #include <linux/platform_data/b53.h>
28 #include <linux/phy.h>
29 #include <linux/phylink.h>
30 #include <linux/etherdevice.h>
31 #include <linux/if_bridge.h>
43 /* BCM5365 MIB counters */
44 static const struct b53_mib_desc b53_mibs_65[] = {
45 { 8, 0x00, "TxOctets" },
46 { 4, 0x08, "TxDropPkts" },
47 { 4, 0x10, "TxBroadcastPkts" },
48 { 4, 0x14, "TxMulticastPkts" },
49 { 4, 0x18, "TxUnicastPkts" },
50 { 4, 0x1c, "TxCollisions" },
51 { 4, 0x20, "TxSingleCollision" },
52 { 4, 0x24, "TxMultipleCollision" },
53 { 4, 0x28, "TxDeferredTransmit" },
54 { 4, 0x2c, "TxLateCollision" },
55 { 4, 0x30, "TxExcessiveCollision" },
56 { 4, 0x38, "TxPausePkts" },
57 { 8, 0x44, "RxOctets" },
58 { 4, 0x4c, "RxUndersizePkts" },
59 { 4, 0x50, "RxPausePkts" },
60 { 4, 0x54, "Pkts64Octets" },
61 { 4, 0x58, "Pkts65to127Octets" },
62 { 4, 0x5c, "Pkts128to255Octets" },
63 { 4, 0x60, "Pkts256to511Octets" },
64 { 4, 0x64, "Pkts512to1023Octets" },
65 { 4, 0x68, "Pkts1024to1522Octets" },
66 { 4, 0x6c, "RxOversizePkts" },
67 { 4, 0x70, "RxJabbers" },
68 { 4, 0x74, "RxAlignmentErrors" },
69 { 4, 0x78, "RxFCSErrors" },
70 { 8, 0x7c, "RxGoodOctets" },
71 { 4, 0x84, "RxDropPkts" },
72 { 4, 0x88, "RxUnicastPkts" },
73 { 4, 0x8c, "RxMulticastPkts" },
74 { 4, 0x90, "RxBroadcastPkts" },
75 { 4, 0x94, "RxSAChanges" },
76 { 4, 0x98, "RxFragments" },
79 #define B53_MIBS_65_SIZE ARRAY_SIZE(b53_mibs_65)
81 /* BCM63xx MIB counters */
82 static const struct b53_mib_desc b53_mibs_63xx[] = {
83 { 8, 0x00, "TxOctets" },
84 { 4, 0x08, "TxDropPkts" },
85 { 4, 0x0c, "TxQoSPkts" },
86 { 4, 0x10, "TxBroadcastPkts" },
87 { 4, 0x14, "TxMulticastPkts" },
88 { 4, 0x18, "TxUnicastPkts" },
89 { 4, 0x1c, "TxCollisions" },
90 { 4, 0x20, "TxSingleCollision" },
91 { 4, 0x24, "TxMultipleCollision" },
92 { 4, 0x28, "TxDeferredTransmit" },
93 { 4, 0x2c, "TxLateCollision" },
94 { 4, 0x30, "TxExcessiveCollision" },
95 { 4, 0x38, "TxPausePkts" },
96 { 8, 0x3c, "TxQoSOctets" },
97 { 8, 0x44, "RxOctets" },
98 { 4, 0x4c, "RxUndersizePkts" },
99 { 4, 0x50, "RxPausePkts" },
100 { 4, 0x54, "Pkts64Octets" },
101 { 4, 0x58, "Pkts65to127Octets" },
102 { 4, 0x5c, "Pkts128to255Octets" },
103 { 4, 0x60, "Pkts256to511Octets" },
104 { 4, 0x64, "Pkts512to1023Octets" },
105 { 4, 0x68, "Pkts1024to1522Octets" },
106 { 4, 0x6c, "RxOversizePkts" },
107 { 4, 0x70, "RxJabbers" },
108 { 4, 0x74, "RxAlignmentErrors" },
109 { 4, 0x78, "RxFCSErrors" },
110 { 8, 0x7c, "RxGoodOctets" },
111 { 4, 0x84, "RxDropPkts" },
112 { 4, 0x88, "RxUnicastPkts" },
113 { 4, 0x8c, "RxMulticastPkts" },
114 { 4, 0x90, "RxBroadcastPkts" },
115 { 4, 0x94, "RxSAChanges" },
116 { 4, 0x98, "RxFragments" },
117 { 4, 0xa0, "RxSymbolErrors" },
118 { 4, 0xa4, "RxQoSPkts" },
119 { 8, 0xa8, "RxQoSOctets" },
120 { 4, 0xb0, "Pkts1523to2047Octets" },
121 { 4, 0xb4, "Pkts2048to4095Octets" },
122 { 4, 0xb8, "Pkts4096to8191Octets" },
123 { 4, 0xbc, "Pkts8192to9728Octets" },
124 { 4, 0xc0, "RxDiscarded" },
127 #define B53_MIBS_63XX_SIZE ARRAY_SIZE(b53_mibs_63xx)
130 static const struct b53_mib_desc b53_mibs[] = {
131 { 8, 0x00, "TxOctets" },
132 { 4, 0x08, "TxDropPkts" },
133 { 4, 0x10, "TxBroadcastPkts" },
134 { 4, 0x14, "TxMulticastPkts" },
135 { 4, 0x18, "TxUnicastPkts" },
136 { 4, 0x1c, "TxCollisions" },
137 { 4, 0x20, "TxSingleCollision" },
138 { 4, 0x24, "TxMultipleCollision" },
139 { 4, 0x28, "TxDeferredTransmit" },
140 { 4, 0x2c, "TxLateCollision" },
141 { 4, 0x30, "TxExcessiveCollision" },
142 { 4, 0x38, "TxPausePkts" },
143 { 8, 0x50, "RxOctets" },
144 { 4, 0x58, "RxUndersizePkts" },
145 { 4, 0x5c, "RxPausePkts" },
146 { 4, 0x60, "Pkts64Octets" },
147 { 4, 0x64, "Pkts65to127Octets" },
148 { 4, 0x68, "Pkts128to255Octets" },
149 { 4, 0x6c, "Pkts256to511Octets" },
150 { 4, 0x70, "Pkts512to1023Octets" },
151 { 4, 0x74, "Pkts1024to1522Octets" },
152 { 4, 0x78, "RxOversizePkts" },
153 { 4, 0x7c, "RxJabbers" },
154 { 4, 0x80, "RxAlignmentErrors" },
155 { 4, 0x84, "RxFCSErrors" },
156 { 8, 0x88, "RxGoodOctets" },
157 { 4, 0x90, "RxDropPkts" },
158 { 4, 0x94, "RxUnicastPkts" },
159 { 4, 0x98, "RxMulticastPkts" },
160 { 4, 0x9c, "RxBroadcastPkts" },
161 { 4, 0xa0, "RxSAChanges" },
162 { 4, 0xa4, "RxFragments" },
163 { 4, 0xa8, "RxJumboPkts" },
164 { 4, 0xac, "RxSymbolErrors" },
165 { 4, 0xc0, "RxDiscarded" },
168 #define B53_MIBS_SIZE ARRAY_SIZE(b53_mibs)
170 static const struct b53_mib_desc b53_mibs_58xx[] = {
171 { 8, 0x00, "TxOctets" },
172 { 4, 0x08, "TxDropPkts" },
173 { 4, 0x0c, "TxQPKTQ0" },
174 { 4, 0x10, "TxBroadcastPkts" },
175 { 4, 0x14, "TxMulticastPkts" },
176 { 4, 0x18, "TxUnicastPKts" },
177 { 4, 0x1c, "TxCollisions" },
178 { 4, 0x20, "TxSingleCollision" },
179 { 4, 0x24, "TxMultipleCollision" },
180 { 4, 0x28, "TxDeferredCollision" },
181 { 4, 0x2c, "TxLateCollision" },
182 { 4, 0x30, "TxExcessiveCollision" },
183 { 4, 0x34, "TxFrameInDisc" },
184 { 4, 0x38, "TxPausePkts" },
185 { 4, 0x3c, "TxQPKTQ1" },
186 { 4, 0x40, "TxQPKTQ2" },
187 { 4, 0x44, "TxQPKTQ3" },
188 { 4, 0x48, "TxQPKTQ4" },
189 { 4, 0x4c, "TxQPKTQ5" },
190 { 8, 0x50, "RxOctets" },
191 { 4, 0x58, "RxUndersizePkts" },
192 { 4, 0x5c, "RxPausePkts" },
193 { 4, 0x60, "RxPkts64Octets" },
194 { 4, 0x64, "RxPkts65to127Octets" },
195 { 4, 0x68, "RxPkts128to255Octets" },
196 { 4, 0x6c, "RxPkts256to511Octets" },
197 { 4, 0x70, "RxPkts512to1023Octets" },
198 { 4, 0x74, "RxPkts1024toMaxPktsOctets" },
199 { 4, 0x78, "RxOversizePkts" },
200 { 4, 0x7c, "RxJabbers" },
201 { 4, 0x80, "RxAlignmentErrors" },
202 { 4, 0x84, "RxFCSErrors" },
203 { 8, 0x88, "RxGoodOctets" },
204 { 4, 0x90, "RxDropPkts" },
205 { 4, 0x94, "RxUnicastPkts" },
206 { 4, 0x98, "RxMulticastPkts" },
207 { 4, 0x9c, "RxBroadcastPkts" },
208 { 4, 0xa0, "RxSAChanges" },
209 { 4, 0xa4, "RxFragments" },
210 { 4, 0xa8, "RxJumboPkt" },
211 { 4, 0xac, "RxSymblErr" },
212 { 4, 0xb0, "InRangeErrCount" },
213 { 4, 0xb4, "OutRangeErrCount" },
214 { 4, 0xb8, "EEELpiEvent" },
215 { 4, 0xbc, "EEELpiDuration" },
216 { 4, 0xc0, "RxDiscard" },
217 { 4, 0xc8, "TxQPKTQ6" },
218 { 4, 0xcc, "TxQPKTQ7" },
219 { 4, 0xd0, "TxPkts64Octets" },
220 { 4, 0xd4, "TxPkts65to127Octets" },
221 { 4, 0xd8, "TxPkts128to255Octets" },
222 { 4, 0xdc, "TxPkts256to511Ocets" },
223 { 4, 0xe0, "TxPkts512to1023Ocets" },
224 { 4, 0xe4, "TxPkts1024toMaxPktOcets" },
227 #define B53_MIBS_58XX_SIZE ARRAY_SIZE(b53_mibs_58xx)
229 static int b53_do_vlan_op(struct b53_device *dev, u8 op)
233 b53_write8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], VTA_START_CMD | op);
235 for (i = 0; i < 10; i++) {
238 b53_read8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], &vta);
239 if (!(vta & VTA_START_CMD))
242 usleep_range(100, 200);
248 static void b53_set_vlan_entry(struct b53_device *dev, u16 vid,
249 struct b53_vlan *vlan)
255 entry = ((vlan->untag & VA_UNTAG_MASK_25) <<
256 VA_UNTAG_S_25) | vlan->members;
257 if (dev->core_rev >= 3)
258 entry |= VA_VALID_25_R4 | vid << VA_VID_HIGH_S;
260 entry |= VA_VALID_25;
263 b53_write32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, entry);
264 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid |
265 VTA_RW_STATE_WR | VTA_RW_OP_EN);
266 } else if (is5365(dev)) {
270 entry = ((vlan->untag & VA_UNTAG_MASK_65) <<
271 VA_UNTAG_S_65) | vlan->members | VA_VALID_65;
273 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, entry);
274 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid |
275 VTA_RW_STATE_WR | VTA_RW_OP_EN);
277 b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid);
278 b53_write32(dev, B53_ARLIO_PAGE, dev->vta_regs[2],
279 (vlan->untag << VTE_UNTAG_S) | vlan->members);
281 b53_do_vlan_op(dev, VTA_CMD_WRITE);
284 dev_dbg(dev->ds->dev, "VID: %d, members: 0x%04x, untag: 0x%04x\n",
285 vid, vlan->members, vlan->untag);
288 static void b53_get_vlan_entry(struct b53_device *dev, u16 vid,
289 struct b53_vlan *vlan)
294 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid |
295 VTA_RW_STATE_RD | VTA_RW_OP_EN);
296 b53_read32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, &entry);
298 if (dev->core_rev >= 3)
299 vlan->valid = !!(entry & VA_VALID_25_R4);
301 vlan->valid = !!(entry & VA_VALID_25);
302 vlan->members = entry & VA_MEMBER_MASK;
303 vlan->untag = (entry >> VA_UNTAG_S_25) & VA_UNTAG_MASK_25;
305 } else if (is5365(dev)) {
308 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid |
309 VTA_RW_STATE_WR | VTA_RW_OP_EN);
310 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, &entry);
312 vlan->valid = !!(entry & VA_VALID_65);
313 vlan->members = entry & VA_MEMBER_MASK;
314 vlan->untag = (entry >> VA_UNTAG_S_65) & VA_UNTAG_MASK_65;
318 b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid);
319 b53_do_vlan_op(dev, VTA_CMD_READ);
320 b53_read32(dev, B53_ARLIO_PAGE, dev->vta_regs[2], &entry);
321 vlan->members = entry & VTE_MEMBERS;
322 vlan->untag = (entry >> VTE_UNTAG_S) & VTE_MEMBERS;
327 static void b53_set_forwarding(struct b53_device *dev, int enable)
331 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
334 mgmt |= SM_SW_FWD_EN;
336 mgmt &= ~SM_SW_FWD_EN;
338 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
340 /* Include IMP port in dumb forwarding mode
342 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, &mgmt);
343 mgmt |= B53_MII_DUMB_FWDG_EN;
344 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt);
346 /* Look at B53_UC_FWD_EN and B53_MC_FWD_EN to decide whether
347 * frames should be flooded or not.
349 b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt);
350 mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IPMC_FWD_EN;
351 b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt);
354 static void b53_enable_vlan(struct b53_device *dev, bool enable,
355 bool enable_filtering)
357 u8 mgmt, vc0, vc1, vc4 = 0, vc5;
359 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
360 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, &vc0);
361 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, &vc1);
363 if (is5325(dev) || is5365(dev)) {
364 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4);
365 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, &vc5);
366 } else if (is63xx(dev)) {
367 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, &vc4);
368 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, &vc5);
370 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, &vc4);
371 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, &vc5);
375 vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID;
376 vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN;
377 vc4 &= ~VC4_ING_VID_CHECK_MASK;
378 if (enable_filtering) {
379 vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S;
380 vc5 |= VC5_DROP_VTABLE_MISS;
382 vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S;
383 vc5 &= ~VC5_DROP_VTABLE_MISS;
387 vc0 &= ~VC0_RESERVED_1;
389 if (is5325(dev) || is5365(dev))
390 vc1 |= VC1_RX_MCST_TAG_EN;
393 vc0 &= ~(VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID);
394 vc1 &= ~(VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN);
395 vc4 &= ~VC4_ING_VID_CHECK_MASK;
396 vc5 &= ~VC5_DROP_VTABLE_MISS;
398 if (is5325(dev) || is5365(dev))
399 vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S;
401 vc4 |= VC4_ING_VID_VIO_TO_IMP << VC4_ING_VID_CHECK_S;
403 if (is5325(dev) || is5365(dev))
404 vc1 &= ~VC1_RX_MCST_TAG_EN;
407 if (!is5325(dev) && !is5365(dev))
408 vc5 &= ~VC5_VID_FFF_EN;
410 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, vc0);
411 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, vc1);
413 if (is5325(dev) || is5365(dev)) {
414 /* enable the high 8 bit vid check on 5325 */
415 if (is5325(dev) && enable)
416 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3,
419 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0);
421 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, vc4);
422 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, vc5);
423 } else if (is63xx(dev)) {
424 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3_63XX, 0);
425 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, vc4);
426 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, vc5);
428 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0);
429 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, vc4);
430 b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, vc5);
433 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
435 dev->vlan_enabled = enable;
438 static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100)
441 u16 max_size = JMS_MIN_SIZE;
443 if (is5325(dev) || is5365(dev))
447 port_mask = dev->enabled_ports;
448 max_size = JMS_MAX_SIZE;
450 port_mask |= JPM_10_100_JUMBO_EN;
453 b53_write32(dev, B53_JUMBO_PAGE, dev->jumbo_pm_reg, port_mask);
454 return b53_write16(dev, B53_JUMBO_PAGE, dev->jumbo_size_reg, max_size);
457 static int b53_flush_arl(struct b53_device *dev, u8 mask)
461 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL,
462 FAST_AGE_DONE | FAST_AGE_DYNAMIC | mask);
464 for (i = 0; i < 10; i++) {
467 b53_read8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL,
470 if (!(fast_age_ctrl & FAST_AGE_DONE))
478 /* Only age dynamic entries (default behavior) */
479 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, FAST_AGE_DYNAMIC);
483 static int b53_fast_age_port(struct b53_device *dev, int port)
485 b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_PORT_CTRL, port);
487 return b53_flush_arl(dev, FAST_AGE_PORT);
490 static int b53_fast_age_vlan(struct b53_device *dev, u16 vid)
492 b53_write16(dev, B53_CTRL_PAGE, B53_FAST_AGE_VID_CTRL, vid);
494 return b53_flush_arl(dev, FAST_AGE_VLAN);
497 void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
499 struct b53_device *dev = ds->priv;
503 /* Enable the IMP port to be in the same VLAN as the other ports
504 * on a per-port basis such that we only have Port i and IMP in
507 b53_for_each_port(dev, i) {
508 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &pvlan);
509 pvlan |= BIT(cpu_port);
510 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), pvlan);
513 EXPORT_SYMBOL(b53_imp_vlan_setup);
515 int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
517 struct b53_device *dev = ds->priv;
518 unsigned int cpu_port;
522 if (!dsa_is_user_port(ds, port))
525 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
527 b53_br_egress_floods(ds, port, true, true);
529 if (dev->ops->irq_enable)
530 ret = dev->ops->irq_enable(dev, port);
534 /* Clear the Rx and Tx disable bits and set to no spanning tree */
535 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), 0);
537 /* Set this port, and only this one to be in the default VLAN,
538 * if member of a bridge, restore its membership prior to
539 * bringing down this port.
541 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
544 pvlan |= dev->ports[port].vlan_ctl_mask;
545 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
547 b53_imp_vlan_setup(ds, cpu_port);
549 /* If EEE was enabled, restore it */
550 if (dev->ports[port].eee.eee_enabled)
551 b53_eee_enable_set(ds, port, true);
555 EXPORT_SYMBOL(b53_enable_port);
557 void b53_disable_port(struct dsa_switch *ds, int port)
559 struct b53_device *dev = ds->priv;
562 /* Disable Tx/Rx for the port */
563 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), ®);
564 reg |= PORT_CTRL_RX_DISABLE | PORT_CTRL_TX_DISABLE;
565 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg);
567 if (dev->ops->irq_disable)
568 dev->ops->irq_disable(dev, port);
570 EXPORT_SYMBOL(b53_disable_port);
572 void b53_brcm_hdr_setup(struct dsa_switch *ds, int port)
574 struct b53_device *dev = ds->priv;
575 bool tag_en = !(dev->tag_protocol == DSA_TAG_PROTO_NONE);
579 /* Resolve which bit controls the Broadcom tag */
582 val = BRCM_HDR_P8_EN;
585 val = BRCM_HDR_P7_EN;
588 val = BRCM_HDR_P5_EN;
595 /* Enable management mode if tagging is requested */
596 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &hdr_ctl);
598 hdr_ctl |= SM_SW_FWD_MODE;
600 hdr_ctl &= ~SM_SW_FWD_MODE;
601 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, hdr_ctl);
603 /* Configure the appropriate IMP port */
604 b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &hdr_ctl);
606 hdr_ctl |= GC_FRM_MGMT_PORT_MII;
608 hdr_ctl |= GC_FRM_MGMT_PORT_M;
609 b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, hdr_ctl);
611 /* Enable Broadcom tags for IMP port */
612 b53_read8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, &hdr_ctl);
617 b53_write8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, hdr_ctl);
619 /* Registers below are only accessible on newer devices */
623 /* Enable reception Broadcom tag for CPU TX (switch RX) to
624 * allow us to tag outgoing frames
626 b53_read16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_RX_DIS, ®);
631 b53_write16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_RX_DIS, reg);
633 /* Enable transmission of Broadcom tags from the switch (CPU RX) to
634 * allow delivering frames to the per-port net_devices
636 b53_read16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_TX_DIS, ®);
641 b53_write16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_TX_DIS, reg);
643 EXPORT_SYMBOL(b53_brcm_hdr_setup);
645 static void b53_enable_cpu_port(struct b53_device *dev, int port)
649 /* BCM5325 CPU port is at 8 */
650 if ((is5325(dev) || is5365(dev)) && port == B53_CPU_PORT_25)
653 port_ctrl = PORT_CTRL_RX_BCST_EN |
654 PORT_CTRL_RX_MCST_EN |
655 PORT_CTRL_RX_UCST_EN;
656 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), port_ctrl);
658 b53_brcm_hdr_setup(dev->ds, port);
660 b53_br_egress_floods(dev->ds, port, true, true);
663 static void b53_enable_mib(struct b53_device *dev)
667 b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc);
668 gc &= ~(GC_RESET_MIB | GC_MIB_AC_EN);
669 b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc);
672 static u16 b53_default_pvid(struct b53_device *dev)
674 if (is5325(dev) || is5365(dev))
680 int b53_configure_vlan(struct dsa_switch *ds)
682 struct b53_device *dev = ds->priv;
683 struct b53_vlan vl = { 0 };
686 def_vid = b53_default_pvid(dev);
688 /* clear all vlan entries */
689 if (is5325(dev) || is5365(dev)) {
690 for (i = def_vid; i < dev->num_vlans; i++)
691 b53_set_vlan_entry(dev, i, &vl);
693 b53_do_vlan_op(dev, VTA_CMD_CLEAR);
696 b53_enable_vlan(dev, dev->vlan_enabled, ds->vlan_filtering);
698 b53_for_each_port(dev, i)
699 b53_write16(dev, B53_VLAN_PAGE,
700 B53_VLAN_PORT_DEF_TAG(i), def_vid);
702 if (!is5325(dev) && !is5365(dev))
703 b53_set_jumbo(dev, dev->enable_jumbo, false);
707 EXPORT_SYMBOL(b53_configure_vlan);
709 static void b53_switch_reset_gpio(struct b53_device *dev)
711 int gpio = dev->reset_gpio;
716 /* Reset sequence: RESET low(50ms)->high(20ms)
718 gpio_set_value(gpio, 0);
721 gpio_set_value(gpio, 1);
724 dev->current_page = 0xff;
727 static int b53_switch_reset(struct b53_device *dev)
729 unsigned int timeout = 1000;
732 b53_switch_reset_gpio(dev);
735 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x83);
736 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x00);
739 /* This is specific to 58xx devices here, do not use is58xx() which
740 * covers the larger Starfigther 2 family, including 7445/7278 which
741 * still use this driver as a library and need to perform the reset
744 if (dev->chip_id == BCM58XX_DEVICE_ID ||
745 dev->chip_id == BCM583XX_DEVICE_ID) {
746 b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, ®);
747 reg |= SW_RST | EN_SW_RST | EN_CH_RST;
748 b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, reg);
751 b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, ®);
755 usleep_range(1000, 2000);
756 } while (timeout-- > 0);
762 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
764 if (!(mgmt & SM_SW_FWD_EN)) {
765 mgmt &= ~SM_SW_FWD_MODE;
766 mgmt |= SM_SW_FWD_EN;
768 b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
769 b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
771 if (!(mgmt & SM_SW_FWD_EN)) {
772 dev_err(dev->dev, "Failed to enable switch!\n");
779 return b53_flush_arl(dev, FAST_AGE_STATIC);
782 static int b53_phy_read16(struct dsa_switch *ds, int addr, int reg)
784 struct b53_device *priv = ds->priv;
788 if (priv->ops->phy_read16)
789 ret = priv->ops->phy_read16(priv, addr, reg, &value);
791 ret = b53_read16(priv, B53_PORT_MII_PAGE(addr),
794 return ret ? ret : value;
797 static int b53_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
799 struct b53_device *priv = ds->priv;
801 if (priv->ops->phy_write16)
802 return priv->ops->phy_write16(priv, addr, reg, val);
804 return b53_write16(priv, B53_PORT_MII_PAGE(addr), reg * 2, val);
807 static int b53_reset_switch(struct b53_device *priv)
810 priv->enable_jumbo = false;
812 memset(priv->vlans, 0, sizeof(*priv->vlans) * priv->num_vlans);
813 memset(priv->ports, 0, sizeof(*priv->ports) * priv->num_ports);
815 priv->serdes_lane = B53_INVALID_LANE;
817 return b53_switch_reset(priv);
820 static int b53_apply_config(struct b53_device *priv)
822 /* disable switching */
823 b53_set_forwarding(priv, 0);
825 b53_configure_vlan(priv->ds);
827 /* enable switching */
828 b53_set_forwarding(priv, 1);
833 static void b53_reset_mib(struct b53_device *priv)
837 b53_read8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc);
839 b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc | GC_RESET_MIB);
841 b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc & ~GC_RESET_MIB);
845 static const struct b53_mib_desc *b53_get_mib(struct b53_device *dev)
849 else if (is63xx(dev))
850 return b53_mibs_63xx;
851 else if (is58xx(dev))
852 return b53_mibs_58xx;
857 static unsigned int b53_get_mib_size(struct b53_device *dev)
860 return B53_MIBS_65_SIZE;
861 else if (is63xx(dev))
862 return B53_MIBS_63XX_SIZE;
863 else if (is58xx(dev))
864 return B53_MIBS_58XX_SIZE;
866 return B53_MIBS_SIZE;
869 static struct phy_device *b53_get_phy_device(struct dsa_switch *ds, int port)
871 /* These ports typically do not have built-in PHYs */
873 case B53_CPU_PORT_25:
879 return mdiobus_get_phy(ds->slave_mii_bus, port);
882 void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset,
885 struct b53_device *dev = ds->priv;
886 const struct b53_mib_desc *mibs = b53_get_mib(dev);
887 unsigned int mib_size = b53_get_mib_size(dev);
888 struct phy_device *phydev;
891 if (stringset == ETH_SS_STATS) {
892 for (i = 0; i < mib_size; i++)
893 strlcpy(data + i * ETH_GSTRING_LEN,
894 mibs[i].name, ETH_GSTRING_LEN);
895 } else if (stringset == ETH_SS_PHY_STATS) {
896 phydev = b53_get_phy_device(ds, port);
900 phy_ethtool_get_strings(phydev, data);
903 EXPORT_SYMBOL(b53_get_strings);
905 void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
907 struct b53_device *dev = ds->priv;
908 const struct b53_mib_desc *mibs = b53_get_mib(dev);
909 unsigned int mib_size = b53_get_mib_size(dev);
910 const struct b53_mib_desc *s;
914 if (is5365(dev) && port == 5)
917 mutex_lock(&dev->stats_mutex);
919 for (i = 0; i < mib_size; i++) {
923 b53_read64(dev, B53_MIB_PAGE(port), s->offset, &val);
927 b53_read32(dev, B53_MIB_PAGE(port), s->offset,
934 mutex_unlock(&dev->stats_mutex);
936 EXPORT_SYMBOL(b53_get_ethtool_stats);
938 void b53_get_ethtool_phy_stats(struct dsa_switch *ds, int port, uint64_t *data)
940 struct phy_device *phydev;
942 phydev = b53_get_phy_device(ds, port);
946 phy_ethtool_get_stats(phydev, NULL, data);
948 EXPORT_SYMBOL(b53_get_ethtool_phy_stats);
950 int b53_get_sset_count(struct dsa_switch *ds, int port, int sset)
952 struct b53_device *dev = ds->priv;
953 struct phy_device *phydev;
955 if (sset == ETH_SS_STATS) {
956 return b53_get_mib_size(dev);
957 } else if (sset == ETH_SS_PHY_STATS) {
958 phydev = b53_get_phy_device(ds, port);
962 return phy_ethtool_get_sset_count(phydev);
967 EXPORT_SYMBOL(b53_get_sset_count);
969 static int b53_setup(struct dsa_switch *ds)
971 struct b53_device *dev = ds->priv;
975 ret = b53_reset_switch(dev);
977 dev_err(ds->dev, "failed to reset switch\n");
983 ret = b53_apply_config(dev);
985 dev_err(ds->dev, "failed to apply configuration\n");
987 /* Configure IMP/CPU port, disable all other ports. Enabled
988 * ports will be configured with .port_enable
990 for (port = 0; port < dev->num_ports; port++) {
991 if (dsa_is_cpu_port(ds, port))
992 b53_enable_cpu_port(dev, port);
994 b53_disable_port(ds, port);
997 /* Let DSA handle the case were multiple bridges span the same switch
998 * device and different VLAN awareness settings are requested, which
999 * would be breaking filtering semantics for any of the other bridge
1000 * devices. (not hardware supported)
1002 ds->vlan_filtering_is_global = true;
1007 static void b53_force_link(struct b53_device *dev, int port, int link)
1011 /* Override the port settings */
1012 if (port == dev->cpu_port) {
1013 off = B53_PORT_OVERRIDE_CTRL;
1014 val = PORT_OVERRIDE_EN;
1016 off = B53_GMII_PORT_OVERRIDE_CTRL(port);
1020 b53_read8(dev, B53_CTRL_PAGE, off, ®);
1023 reg |= PORT_OVERRIDE_LINK;
1025 reg &= ~PORT_OVERRIDE_LINK;
1026 b53_write8(dev, B53_CTRL_PAGE, off, reg);
1029 static void b53_force_port_config(struct b53_device *dev, int port,
1030 int speed, int duplex, int pause)
1034 /* Override the port settings */
1035 if (port == dev->cpu_port) {
1036 off = B53_PORT_OVERRIDE_CTRL;
1037 val = PORT_OVERRIDE_EN;
1039 off = B53_GMII_PORT_OVERRIDE_CTRL(port);
1043 b53_read8(dev, B53_CTRL_PAGE, off, ®);
1045 if (duplex == DUPLEX_FULL)
1046 reg |= PORT_OVERRIDE_FULL_DUPLEX;
1048 reg &= ~PORT_OVERRIDE_FULL_DUPLEX;
1052 reg |= PORT_OVERRIDE_SPEED_2000M;
1055 reg |= PORT_OVERRIDE_SPEED_1000M;
1058 reg |= PORT_OVERRIDE_SPEED_100M;
1061 reg |= PORT_OVERRIDE_SPEED_10M;
1064 dev_err(dev->dev, "unknown speed: %d\n", speed);
1068 if (pause & MLO_PAUSE_RX)
1069 reg |= PORT_OVERRIDE_RX_FLOW;
1070 if (pause & MLO_PAUSE_TX)
1071 reg |= PORT_OVERRIDE_TX_FLOW;
1073 b53_write8(dev, B53_CTRL_PAGE, off, reg);
1076 static void b53_adjust_link(struct dsa_switch *ds, int port,
1077 struct phy_device *phydev)
1079 struct b53_device *dev = ds->priv;
1080 struct ethtool_eee *p = &dev->ports[port].eee;
1081 u8 rgmii_ctrl = 0, reg = 0, off;
1084 if (!phy_is_pseudo_fixed_link(phydev))
1087 /* Enable flow control on BCM5301x's CPU port */
1088 if (is5301x(dev) && port == dev->cpu_port)
1089 pause = MLO_PAUSE_TXRX_MASK;
1091 if (phydev->pause) {
1092 if (phydev->asym_pause)
1093 pause |= MLO_PAUSE_TX;
1094 pause |= MLO_PAUSE_RX;
1097 b53_force_port_config(dev, port, phydev->speed, phydev->duplex, pause);
1098 b53_force_link(dev, port, phydev->link);
1100 if (is531x5(dev) && phy_interface_is_rgmii(phydev)) {
1102 off = B53_RGMII_CTRL_IMP;
1104 off = B53_RGMII_CTRL_P(port);
1106 /* Configure the port RGMII clock delay by DLL disabled and
1107 * tx_clk aligned timing (restoring to reset defaults)
1109 b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl);
1110 rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC |
1111 RGMII_CTRL_TIMING_SEL);
1113 /* PHY_INTERFACE_MODE_RGMII_TXID means TX internal delay, make
1114 * sure that we enable the port TX clock internal delay to
1115 * account for this internal delay that is inserted, otherwise
1116 * the switch won't be able to receive correctly.
1118 * PHY_INTERFACE_MODE_RGMII means that we are not introducing
1119 * any delay neither on transmission nor reception, so the
1120 * BCM53125 must also be configured accordingly to account for
1121 * the lack of delay and introduce
1123 * The BCM53125 switch has its RX clock and TX clock control
1124 * swapped, hence the reason why we modify the TX clock path in
1127 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
1128 rgmii_ctrl |= RGMII_CTRL_DLL_TXC;
1129 if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
1130 rgmii_ctrl |= RGMII_CTRL_DLL_TXC | RGMII_CTRL_DLL_RXC;
1131 rgmii_ctrl |= RGMII_CTRL_TIMING_SEL;
1132 b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl);
1134 dev_info(ds->dev, "Configured port %d for %s\n", port,
1135 phy_modes(phydev->interface));
1138 /* configure MII port if necessary */
1140 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
1143 /* reverse mii needs to be enabled */
1144 if (!(reg & PORT_OVERRIDE_RV_MII_25)) {
1145 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
1146 reg | PORT_OVERRIDE_RV_MII_25);
1147 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
1150 if (!(reg & PORT_OVERRIDE_RV_MII_25)) {
1152 "Failed to enable reverse MII mode\n");
1156 } else if (is5301x(dev)) {
1157 if (port != dev->cpu_port) {
1158 b53_force_port_config(dev, dev->cpu_port, 2000,
1159 DUPLEX_FULL, MLO_PAUSE_TXRX_MASK);
1160 b53_force_link(dev, dev->cpu_port, 1);
1164 /* Re-negotiate EEE if it was enabled already */
1165 p->eee_enabled = b53_eee_init(ds, port, phydev);
1168 void b53_port_event(struct dsa_switch *ds, int port)
1170 struct b53_device *dev = ds->priv;
1174 b53_read16(dev, B53_STAT_PAGE, B53_LINK_STAT, &sts);
1175 link = !!(sts & BIT(port));
1176 dsa_port_phylink_mac_change(ds, port, link);
1178 EXPORT_SYMBOL(b53_port_event);
1180 void b53_phylink_validate(struct dsa_switch *ds, int port,
1181 unsigned long *supported,
1182 struct phylink_link_state *state)
1184 struct b53_device *dev = ds->priv;
1185 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1187 if (dev->ops->serdes_phylink_validate)
1188 dev->ops->serdes_phylink_validate(dev, port, mask, state);
1190 /* Allow all the expected bits */
1191 phylink_set(mask, Autoneg);
1192 phylink_set_port_modes(mask);
1193 phylink_set(mask, Pause);
1194 phylink_set(mask, Asym_Pause);
1196 /* With the exclusion of 5325/5365, MII, Reverse MII and 802.3z, we
1197 * support Gigabit, including Half duplex.
1199 if (state->interface != PHY_INTERFACE_MODE_MII &&
1200 state->interface != PHY_INTERFACE_MODE_REVMII &&
1201 !phy_interface_mode_is_8023z(state->interface) &&
1202 !(is5325(dev) || is5365(dev))) {
1203 phylink_set(mask, 1000baseT_Full);
1204 phylink_set(mask, 1000baseT_Half);
1207 if (!phy_interface_mode_is_8023z(state->interface)) {
1208 phylink_set(mask, 10baseT_Half);
1209 phylink_set(mask, 10baseT_Full);
1210 phylink_set(mask, 100baseT_Half);
1211 phylink_set(mask, 100baseT_Full);
1214 bitmap_and(supported, supported, mask,
1215 __ETHTOOL_LINK_MODE_MASK_NBITS);
1216 bitmap_and(state->advertising, state->advertising, mask,
1217 __ETHTOOL_LINK_MODE_MASK_NBITS);
1219 phylink_helper_basex_speed(state);
1221 EXPORT_SYMBOL(b53_phylink_validate);
1223 int b53_phylink_mac_link_state(struct dsa_switch *ds, int port,
1224 struct phylink_link_state *state)
1226 struct b53_device *dev = ds->priv;
1227 int ret = -EOPNOTSUPP;
1229 if ((phy_interface_mode_is_8023z(state->interface) ||
1230 state->interface == PHY_INTERFACE_MODE_SGMII) &&
1231 dev->ops->serdes_link_state)
1232 ret = dev->ops->serdes_link_state(dev, port, state);
1236 EXPORT_SYMBOL(b53_phylink_mac_link_state);
1238 void b53_phylink_mac_config(struct dsa_switch *ds, int port,
1240 const struct phylink_link_state *state)
1242 struct b53_device *dev = ds->priv;
1244 if (mode == MLO_AN_PHY)
1247 if (mode == MLO_AN_FIXED) {
1248 b53_force_port_config(dev, port, state->speed,
1249 state->duplex, state->pause);
1253 if ((phy_interface_mode_is_8023z(state->interface) ||
1254 state->interface == PHY_INTERFACE_MODE_SGMII) &&
1255 dev->ops->serdes_config)
1256 dev->ops->serdes_config(dev, port, mode, state);
1258 EXPORT_SYMBOL(b53_phylink_mac_config);
1260 void b53_phylink_mac_an_restart(struct dsa_switch *ds, int port)
1262 struct b53_device *dev = ds->priv;
1264 if (dev->ops->serdes_an_restart)
1265 dev->ops->serdes_an_restart(dev, port);
1267 EXPORT_SYMBOL(b53_phylink_mac_an_restart);
1269 void b53_phylink_mac_link_down(struct dsa_switch *ds, int port,
1271 phy_interface_t interface)
1273 struct b53_device *dev = ds->priv;
1275 if (mode == MLO_AN_PHY)
1278 if (mode == MLO_AN_FIXED) {
1279 b53_force_link(dev, port, false);
1283 if (phy_interface_mode_is_8023z(interface) &&
1284 dev->ops->serdes_link_set)
1285 dev->ops->serdes_link_set(dev, port, mode, interface, false);
1287 EXPORT_SYMBOL(b53_phylink_mac_link_down);
1289 void b53_phylink_mac_link_up(struct dsa_switch *ds, int port,
1291 phy_interface_t interface,
1292 struct phy_device *phydev)
1294 struct b53_device *dev = ds->priv;
1296 if (mode == MLO_AN_PHY)
1299 if (mode == MLO_AN_FIXED) {
1300 b53_force_link(dev, port, true);
1304 if (phy_interface_mode_is_8023z(interface) &&
1305 dev->ops->serdes_link_set)
1306 dev->ops->serdes_link_set(dev, port, mode, interface, true);
1308 EXPORT_SYMBOL(b53_phylink_mac_link_up);
1310 int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
1312 struct b53_device *dev = ds->priv;
1315 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid);
1317 if (!vlan_filtering) {
1318 /* Filtering is currently enabled, use the default PVID since
1319 * the bridge does not expect tagging anymore
1321 dev->ports[port].pvid = pvid;
1322 new_pvid = b53_default_pvid(dev);
1324 /* Filtering is currently disabled, restore the previous PVID */
1325 new_pvid = dev->ports[port].pvid;
1328 if (pvid != new_pvid)
1329 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
1332 b53_enable_vlan(dev, dev->vlan_enabled, vlan_filtering);
1336 EXPORT_SYMBOL(b53_vlan_filtering);
1338 int b53_vlan_prepare(struct dsa_switch *ds, int port,
1339 const struct switchdev_obj_port_vlan *vlan)
1341 struct b53_device *dev = ds->priv;
1343 if ((is5325(dev) || is5365(dev)) && vlan->vid_begin == 0)
1346 if (vlan->vid_end > dev->num_vlans)
1349 b53_enable_vlan(dev, true, ds->vlan_filtering);
1353 EXPORT_SYMBOL(b53_vlan_prepare);
1355 void b53_vlan_add(struct dsa_switch *ds, int port,
1356 const struct switchdev_obj_port_vlan *vlan)
1358 struct b53_device *dev = ds->priv;
1359 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1360 bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1361 struct b53_vlan *vl;
1364 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
1365 vl = &dev->vlans[vid];
1367 b53_get_vlan_entry(dev, vid, vl);
1369 vl->members |= BIT(port);
1370 if (untagged && !dsa_is_cpu_port(ds, port))
1371 vl->untag |= BIT(port);
1373 vl->untag &= ~BIT(port);
1375 b53_set_vlan_entry(dev, vid, vl);
1376 b53_fast_age_vlan(dev, vid);
1379 if (pvid && !dsa_is_cpu_port(ds, port)) {
1380 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
1382 b53_fast_age_vlan(dev, vid);
1385 EXPORT_SYMBOL(b53_vlan_add);
1387 int b53_vlan_del(struct dsa_switch *ds, int port,
1388 const struct switchdev_obj_port_vlan *vlan)
1390 struct b53_device *dev = ds->priv;
1391 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1392 struct b53_vlan *vl;
1396 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid);
1398 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
1399 vl = &dev->vlans[vid];
1401 b53_get_vlan_entry(dev, vid, vl);
1403 vl->members &= ~BIT(port);
1406 pvid = b53_default_pvid(dev);
1408 if (untagged && !dsa_is_cpu_port(ds, port))
1409 vl->untag &= ~(BIT(port));
1411 b53_set_vlan_entry(dev, vid, vl);
1412 b53_fast_age_vlan(dev, vid);
1415 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), pvid);
1416 b53_fast_age_vlan(dev, pvid);
1420 EXPORT_SYMBOL(b53_vlan_del);
1422 /* Address Resolution Logic routines */
1423 static int b53_arl_op_wait(struct b53_device *dev)
1425 unsigned int timeout = 10;
1429 b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, ®);
1430 if (!(reg & ARLTBL_START_DONE))
1433 usleep_range(1000, 2000);
1434 } while (timeout--);
1436 dev_warn(dev->dev, "timeout waiting for ARL to finish: 0x%02x\n", reg);
1441 static int b53_arl_rw_op(struct b53_device *dev, unsigned int op)
1448 b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, ®);
1449 reg |= ARLTBL_START_DONE;
1454 b53_write8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, reg);
1456 return b53_arl_op_wait(dev);
1459 static int b53_arl_read(struct b53_device *dev, u64 mac,
1460 u16 vid, struct b53_arl_entry *ent, u8 *idx,
1466 ret = b53_arl_op_wait(dev);
1471 for (i = 0; i < dev->num_arl_entries; i++) {
1475 b53_read64(dev, B53_ARLIO_PAGE,
1476 B53_ARLTBL_MAC_VID_ENTRY(i), &mac_vid);
1477 b53_read32(dev, B53_ARLIO_PAGE,
1478 B53_ARLTBL_DATA_ENTRY(i), &fwd_entry);
1479 b53_arl_to_entry(ent, mac_vid, fwd_entry);
1481 if (!(fwd_entry & ARLTBL_VALID))
1483 if ((mac_vid & ARLTBL_MAC_MASK) != mac)
1491 static int b53_arl_op(struct b53_device *dev, int op, int port,
1492 const unsigned char *addr, u16 vid, bool is_valid)
1494 struct b53_arl_entry ent;
1496 u64 mac, mac_vid = 0;
1500 /* Convert the array into a 64-bit MAC */
1501 mac = ether_addr_to_u64(addr);
1503 /* Perform a read for the given MAC and VID */
1504 b53_write48(dev, B53_ARLIO_PAGE, B53_MAC_ADDR_IDX, mac);
1505 b53_write16(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid);
1507 /* Issue a read operation for this MAC */
1508 ret = b53_arl_rw_op(dev, 1);
1512 ret = b53_arl_read(dev, mac, vid, &ent, &idx, is_valid);
1513 /* If this is a read, just finish now */
1517 /* We could not find a matching MAC, so reset to a new entry */
1523 /* For multicast address, the port is a bitmask and the validity
1524 * is determined by having at least one port being still active
1526 if (!is_multicast_ether_addr(addr)) {
1528 ent.is_valid = is_valid;
1531 ent.port |= BIT(port);
1533 ent.port &= ~BIT(port);
1535 ent.is_valid = !!(ent.port);
1538 ent.is_valid = is_valid;
1540 ent.is_static = true;
1542 memcpy(ent.mac, addr, ETH_ALEN);
1543 b53_arl_from_entry(&mac_vid, &fwd_entry, &ent);
1545 b53_write64(dev, B53_ARLIO_PAGE,
1546 B53_ARLTBL_MAC_VID_ENTRY(idx), mac_vid);
1547 b53_write32(dev, B53_ARLIO_PAGE,
1548 B53_ARLTBL_DATA_ENTRY(idx), fwd_entry);
1550 return b53_arl_rw_op(dev, 0);
1553 int b53_fdb_add(struct dsa_switch *ds, int port,
1554 const unsigned char *addr, u16 vid)
1556 struct b53_device *priv = ds->priv;
1558 /* 5325 and 5365 require some more massaging, but could
1559 * be supported eventually
1561 if (is5325(priv) || is5365(priv))
1564 return b53_arl_op(priv, 0, port, addr, vid, true);
1566 EXPORT_SYMBOL(b53_fdb_add);
1568 int b53_fdb_del(struct dsa_switch *ds, int port,
1569 const unsigned char *addr, u16 vid)
1571 struct b53_device *priv = ds->priv;
1573 return b53_arl_op(priv, 0, port, addr, vid, false);
1575 EXPORT_SYMBOL(b53_fdb_del);
1577 static int b53_arl_search_wait(struct b53_device *dev)
1579 unsigned int timeout = 1000;
1583 b53_read8(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, ®);
1584 if (!(reg & ARL_SRCH_STDN))
1587 if (reg & ARL_SRCH_VLID)
1590 usleep_range(1000, 2000);
1591 } while (timeout--);
1596 static void b53_arl_search_rd(struct b53_device *dev, u8 idx,
1597 struct b53_arl_entry *ent)
1602 b53_read64(dev, B53_ARLIO_PAGE,
1603 B53_ARL_SRCH_RSTL_MACVID(idx), &mac_vid);
1604 b53_read32(dev, B53_ARLIO_PAGE,
1605 B53_ARL_SRCH_RSTL(idx), &fwd_entry);
1606 b53_arl_to_entry(ent, mac_vid, fwd_entry);
1609 static int b53_fdb_copy(int port, const struct b53_arl_entry *ent,
1610 dsa_fdb_dump_cb_t *cb, void *data)
1615 if (port != ent->port)
1618 return cb(ent->mac, ent->vid, ent->is_static, data);
1621 int b53_fdb_dump(struct dsa_switch *ds, int port,
1622 dsa_fdb_dump_cb_t *cb, void *data)
1624 struct b53_device *priv = ds->priv;
1625 struct b53_arl_entry results[2];
1626 unsigned int count = 0;
1630 /* Start search operation */
1631 reg = ARL_SRCH_STDN;
1632 b53_write8(priv, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, reg);
1635 ret = b53_arl_search_wait(priv);
1639 b53_arl_search_rd(priv, 0, &results[0]);
1640 ret = b53_fdb_copy(port, &results[0], cb, data);
1644 if (priv->num_arl_entries > 2) {
1645 b53_arl_search_rd(priv, 1, &results[1]);
1646 ret = b53_fdb_copy(port, &results[1], cb, data);
1650 if (!results[0].is_valid && !results[1].is_valid)
1654 } while (count++ < 1024);
1658 EXPORT_SYMBOL(b53_fdb_dump);
1660 int b53_mdb_prepare(struct dsa_switch *ds, int port,
1661 const struct switchdev_obj_port_mdb *mdb)
1663 struct b53_device *priv = ds->priv;
1665 /* 5325 and 5365 require some more massaging, but could
1666 * be supported eventually
1668 if (is5325(priv) || is5365(priv))
1673 EXPORT_SYMBOL(b53_mdb_prepare);
1675 void b53_mdb_add(struct dsa_switch *ds, int port,
1676 const struct switchdev_obj_port_mdb *mdb)
1678 struct b53_device *priv = ds->priv;
1681 ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, true);
1683 dev_err(ds->dev, "failed to add MDB entry\n");
1685 EXPORT_SYMBOL(b53_mdb_add);
1687 int b53_mdb_del(struct dsa_switch *ds, int port,
1688 const struct switchdev_obj_port_mdb *mdb)
1690 struct b53_device *priv = ds->priv;
1693 ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, false);
1695 dev_err(ds->dev, "failed to delete MDB entry\n");
1699 EXPORT_SYMBOL(b53_mdb_del);
1701 int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br)
1703 struct b53_device *dev = ds->priv;
1704 s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
1708 /* Make this port leave the all VLANs join since we will have proper
1709 * VLAN entries from now on
1712 b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, ®);
1714 if ((reg & BIT(cpu_port)) == BIT(cpu_port))
1715 reg &= ~BIT(cpu_port);
1716 b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
1719 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
1721 b53_for_each_port(dev, i) {
1722 if (dsa_to_port(ds, i)->bridge_dev != br)
1725 /* Add this local port to the remote port VLAN control
1726 * membership and update the remote port bitmask
1728 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), ®);
1730 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg);
1731 dev->ports[i].vlan_ctl_mask = reg;
1736 /* Configure the local port VLAN control membership to include
1737 * remote ports and update the local port bitmask
1739 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
1740 dev->ports[port].vlan_ctl_mask = pvlan;
1744 EXPORT_SYMBOL(b53_br_join);
1746 void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br)
1748 struct b53_device *dev = ds->priv;
1749 struct b53_vlan *vl = &dev->vlans[0];
1750 s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
1752 u16 pvlan, reg, pvid;
1754 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
1756 b53_for_each_port(dev, i) {
1757 /* Don't touch the remaining ports */
1758 if (dsa_to_port(ds, i)->bridge_dev != br)
1761 b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), ®);
1763 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg);
1764 dev->ports[port].vlan_ctl_mask = reg;
1766 /* Prevent self removal to preserve isolation */
1771 b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
1772 dev->ports[port].vlan_ctl_mask = pvlan;
1774 pvid = b53_default_pvid(dev);
1776 /* Make this port join all VLANs without VLAN entries */
1778 b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, ®);
1780 if (!(reg & BIT(cpu_port)))
1781 reg |= BIT(cpu_port);
1782 b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
1784 b53_get_vlan_entry(dev, pvid, vl);
1785 vl->members |= BIT(port) | BIT(cpu_port);
1786 vl->untag |= BIT(port) | BIT(cpu_port);
1787 b53_set_vlan_entry(dev, pvid, vl);
1790 EXPORT_SYMBOL(b53_br_leave);
1792 void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state)
1794 struct b53_device *dev = ds->priv;
1799 case BR_STATE_DISABLED:
1800 hw_state = PORT_CTRL_DIS_STATE;
1802 case BR_STATE_LISTENING:
1803 hw_state = PORT_CTRL_LISTEN_STATE;
1805 case BR_STATE_LEARNING:
1806 hw_state = PORT_CTRL_LEARN_STATE;
1808 case BR_STATE_FORWARDING:
1809 hw_state = PORT_CTRL_FWD_STATE;
1811 case BR_STATE_BLOCKING:
1812 hw_state = PORT_CTRL_BLOCK_STATE;
1815 dev_err(ds->dev, "invalid STP state: %d\n", state);
1819 b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), ®);
1820 reg &= ~PORT_CTRL_STP_STATE_MASK;
1822 b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg);
1824 EXPORT_SYMBOL(b53_br_set_stp_state);
1826 void b53_br_fast_age(struct dsa_switch *ds, int port)
1828 struct b53_device *dev = ds->priv;
1830 if (b53_fast_age_port(dev, port))
1831 dev_err(ds->dev, "fast ageing failed\n");
1833 EXPORT_SYMBOL(b53_br_fast_age);
1835 int b53_br_egress_floods(struct dsa_switch *ds, int port,
1836 bool unicast, bool multicast)
1838 struct b53_device *dev = ds->priv;
1841 b53_read16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, &uc);
1846 b53_write16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, uc);
1848 b53_read16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, &mc);
1853 b53_write16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, mc);
1855 b53_read16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, &mc);
1860 b53_write16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, mc);
1865 EXPORT_SYMBOL(b53_br_egress_floods);
1867 static bool b53_possible_cpu_port(struct dsa_switch *ds, int port)
1869 /* Broadcom switches will accept enabling Broadcom tags on the
1870 * following ports: 5, 7 and 8, any other port is not supported
1873 case B53_CPU_PORT_25:
1882 static bool b53_can_enable_brcm_tags(struct dsa_switch *ds, int port,
1883 enum dsa_tag_protocol tag_protocol)
1885 bool ret = b53_possible_cpu_port(ds, port);
1888 dev_warn(ds->dev, "Port %d is not Broadcom tag capable\n",
1893 switch (tag_protocol) {
1894 case DSA_TAG_PROTO_BRCM:
1895 case DSA_TAG_PROTO_BRCM_PREPEND:
1897 "Port %d is stacked to Broadcom tag switch\n", port);
1908 enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port,
1909 enum dsa_tag_protocol mprot)
1911 struct b53_device *dev = ds->priv;
1913 /* Older models (5325, 5365) support a different tag format that we do
1914 * not support in net/dsa/tag_brcm.c yet.
1916 if (is5325(dev) || is5365(dev) ||
1917 !b53_can_enable_brcm_tags(ds, port, mprot)) {
1918 dev->tag_protocol = DSA_TAG_PROTO_NONE;
1922 /* Broadcom BCM58xx chips have a flow accelerator on Port 8
1923 * which requires us to use the prepended Broadcom tag type
1925 if (dev->chip_id == BCM58XX_DEVICE_ID && port == B53_CPU_PORT) {
1926 dev->tag_protocol = DSA_TAG_PROTO_BRCM_PREPEND;
1930 dev->tag_protocol = DSA_TAG_PROTO_BRCM;
1932 return dev->tag_protocol;
1934 EXPORT_SYMBOL(b53_get_tag_protocol);
1936 int b53_mirror_add(struct dsa_switch *ds, int port,
1937 struct dsa_mall_mirror_tc_entry *mirror, bool ingress)
1939 struct b53_device *dev = ds->priv;
1943 loc = B53_IG_MIR_CTL;
1945 loc = B53_EG_MIR_CTL;
1947 b53_read16(dev, B53_MGMT_PAGE, loc, ®);
1949 b53_write16(dev, B53_MGMT_PAGE, loc, reg);
1951 b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, ®);
1952 reg &= ~CAP_PORT_MASK;
1953 reg |= mirror->to_local_port;
1955 b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg);
1959 EXPORT_SYMBOL(b53_mirror_add);
1961 void b53_mirror_del(struct dsa_switch *ds, int port,
1962 struct dsa_mall_mirror_tc_entry *mirror)
1964 struct b53_device *dev = ds->priv;
1965 bool loc_disable = false, other_loc_disable = false;
1968 if (mirror->ingress)
1969 loc = B53_IG_MIR_CTL;
1971 loc = B53_EG_MIR_CTL;
1973 /* Update the desired ingress/egress register */
1974 b53_read16(dev, B53_MGMT_PAGE, loc, ®);
1976 if (!(reg & MIRROR_MASK))
1978 b53_write16(dev, B53_MGMT_PAGE, loc, reg);
1980 /* Now look at the other one to know if we can disable mirroring
1983 if (mirror->ingress)
1984 b53_read16(dev, B53_MGMT_PAGE, B53_EG_MIR_CTL, ®);
1986 b53_read16(dev, B53_MGMT_PAGE, B53_IG_MIR_CTL, ®);
1987 if (!(reg & MIRROR_MASK))
1988 other_loc_disable = true;
1990 b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, ®);
1991 /* Both no longer have ports, let's disable mirroring */
1992 if (loc_disable && other_loc_disable) {
1994 reg &= ~mirror->to_local_port;
1996 b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg);
1998 EXPORT_SYMBOL(b53_mirror_del);
2000 void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
2002 struct b53_device *dev = ds->priv;
2005 b53_read16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, ®);
2010 b53_write16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, reg);
2012 EXPORT_SYMBOL(b53_eee_enable_set);
2015 /* Returns 0 if EEE was not enabled, or 1 otherwise
2017 int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy)
2021 ret = phy_init_eee(phy, 0);
2025 b53_eee_enable_set(ds, port, true);
2029 EXPORT_SYMBOL(b53_eee_init);
2031 int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
2033 struct b53_device *dev = ds->priv;
2034 struct ethtool_eee *p = &dev->ports[port].eee;
2037 if (is5325(dev) || is5365(dev))
2040 b53_read16(dev, B53_EEE_PAGE, B53_EEE_LPI_INDICATE, ®);
2041 e->eee_enabled = p->eee_enabled;
2042 e->eee_active = !!(reg & BIT(port));
2046 EXPORT_SYMBOL(b53_get_mac_eee);
2048 int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
2050 struct b53_device *dev = ds->priv;
2051 struct ethtool_eee *p = &dev->ports[port].eee;
2053 if (is5325(dev) || is5365(dev))
2056 p->eee_enabled = e->eee_enabled;
2057 b53_eee_enable_set(ds, port, e->eee_enabled);
2061 EXPORT_SYMBOL(b53_set_mac_eee);
2063 static const struct dsa_switch_ops b53_switch_ops = {
2064 .get_tag_protocol = b53_get_tag_protocol,
2066 .get_strings = b53_get_strings,
2067 .get_ethtool_stats = b53_get_ethtool_stats,
2068 .get_sset_count = b53_get_sset_count,
2069 .get_ethtool_phy_stats = b53_get_ethtool_phy_stats,
2070 .phy_read = b53_phy_read16,
2071 .phy_write = b53_phy_write16,
2072 .adjust_link = b53_adjust_link,
2073 .phylink_validate = b53_phylink_validate,
2074 .phylink_mac_link_state = b53_phylink_mac_link_state,
2075 .phylink_mac_config = b53_phylink_mac_config,
2076 .phylink_mac_an_restart = b53_phylink_mac_an_restart,
2077 .phylink_mac_link_down = b53_phylink_mac_link_down,
2078 .phylink_mac_link_up = b53_phylink_mac_link_up,
2079 .port_enable = b53_enable_port,
2080 .port_disable = b53_disable_port,
2081 .get_mac_eee = b53_get_mac_eee,
2082 .set_mac_eee = b53_set_mac_eee,
2083 .port_bridge_join = b53_br_join,
2084 .port_bridge_leave = b53_br_leave,
2085 .port_stp_state_set = b53_br_set_stp_state,
2086 .port_fast_age = b53_br_fast_age,
2087 .port_egress_floods = b53_br_egress_floods,
2088 .port_vlan_filtering = b53_vlan_filtering,
2089 .port_vlan_prepare = b53_vlan_prepare,
2090 .port_vlan_add = b53_vlan_add,
2091 .port_vlan_del = b53_vlan_del,
2092 .port_fdb_dump = b53_fdb_dump,
2093 .port_fdb_add = b53_fdb_add,
2094 .port_fdb_del = b53_fdb_del,
2095 .port_mirror_add = b53_mirror_add,
2096 .port_mirror_del = b53_mirror_del,
2097 .port_mdb_prepare = b53_mdb_prepare,
2098 .port_mdb_add = b53_mdb_add,
2099 .port_mdb_del = b53_mdb_del,
2102 struct b53_chip_data {
2104 const char *dev_name;
2115 #define B53_VTA_REGS \
2116 { B53_VT_ACCESS, B53_VT_INDEX, B53_VT_ENTRY }
2117 #define B53_VTA_REGS_9798 \
2118 { B53_VT_ACCESS_9798, B53_VT_INDEX_9798, B53_VT_ENTRY_9798 }
2119 #define B53_VTA_REGS_63XX \
2120 { B53_VT_ACCESS_63XX, B53_VT_INDEX_63XX, B53_VT_ENTRY_63XX }
2122 static const struct b53_chip_data b53_switch_chips[] = {
2124 .chip_id = BCM5325_DEVICE_ID,
2125 .dev_name = "BCM5325",
2127 .enabled_ports = 0x1f,
2129 .cpu_port = B53_CPU_PORT_25,
2130 .duplex_reg = B53_DUPLEX_STAT_FE,
2133 .chip_id = BCM5365_DEVICE_ID,
2134 .dev_name = "BCM5365",
2136 .enabled_ports = 0x1f,
2138 .cpu_port = B53_CPU_PORT_25,
2139 .duplex_reg = B53_DUPLEX_STAT_FE,
2142 .chip_id = BCM5389_DEVICE_ID,
2143 .dev_name = "BCM5389",
2145 .enabled_ports = 0x1f,
2147 .cpu_port = B53_CPU_PORT,
2148 .vta_regs = B53_VTA_REGS,
2149 .duplex_reg = B53_DUPLEX_STAT_GE,
2150 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2151 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2154 .chip_id = BCM5395_DEVICE_ID,
2155 .dev_name = "BCM5395",
2157 .enabled_ports = 0x1f,
2159 .cpu_port = B53_CPU_PORT,
2160 .vta_regs = B53_VTA_REGS,
2161 .duplex_reg = B53_DUPLEX_STAT_GE,
2162 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2163 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2166 .chip_id = BCM5397_DEVICE_ID,
2167 .dev_name = "BCM5397",
2169 .enabled_ports = 0x1f,
2171 .cpu_port = B53_CPU_PORT,
2172 .vta_regs = B53_VTA_REGS_9798,
2173 .duplex_reg = B53_DUPLEX_STAT_GE,
2174 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2175 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2178 .chip_id = BCM5398_DEVICE_ID,
2179 .dev_name = "BCM5398",
2181 .enabled_ports = 0x7f,
2183 .cpu_port = B53_CPU_PORT,
2184 .vta_regs = B53_VTA_REGS_9798,
2185 .duplex_reg = B53_DUPLEX_STAT_GE,
2186 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2187 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2190 .chip_id = BCM53115_DEVICE_ID,
2191 .dev_name = "BCM53115",
2193 .enabled_ports = 0x1f,
2195 .vta_regs = B53_VTA_REGS,
2196 .cpu_port = B53_CPU_PORT,
2197 .duplex_reg = B53_DUPLEX_STAT_GE,
2198 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2199 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2202 .chip_id = BCM53125_DEVICE_ID,
2203 .dev_name = "BCM53125",
2205 .enabled_ports = 0xff,
2207 .cpu_port = B53_CPU_PORT,
2208 .vta_regs = B53_VTA_REGS,
2209 .duplex_reg = B53_DUPLEX_STAT_GE,
2210 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2211 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2214 .chip_id = BCM53128_DEVICE_ID,
2215 .dev_name = "BCM53128",
2217 .enabled_ports = 0x1ff,
2219 .cpu_port = B53_CPU_PORT,
2220 .vta_regs = B53_VTA_REGS,
2221 .duplex_reg = B53_DUPLEX_STAT_GE,
2222 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2223 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2226 .chip_id = BCM63XX_DEVICE_ID,
2227 .dev_name = "BCM63xx",
2229 .enabled_ports = 0, /* pdata must provide them */
2231 .cpu_port = B53_CPU_PORT,
2232 .vta_regs = B53_VTA_REGS_63XX,
2233 .duplex_reg = B53_DUPLEX_STAT_63XX,
2234 .jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX,
2235 .jumbo_size_reg = B53_JUMBO_MAX_SIZE_63XX,
2238 .chip_id = BCM53010_DEVICE_ID,
2239 .dev_name = "BCM53010",
2241 .enabled_ports = 0x1f,
2243 .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
2244 .vta_regs = B53_VTA_REGS,
2245 .duplex_reg = B53_DUPLEX_STAT_GE,
2246 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2247 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2250 .chip_id = BCM53011_DEVICE_ID,
2251 .dev_name = "BCM53011",
2253 .enabled_ports = 0x1bf,
2255 .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
2256 .vta_regs = B53_VTA_REGS,
2257 .duplex_reg = B53_DUPLEX_STAT_GE,
2258 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2259 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2262 .chip_id = BCM53012_DEVICE_ID,
2263 .dev_name = "BCM53012",
2265 .enabled_ports = 0x1bf,
2267 .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
2268 .vta_regs = B53_VTA_REGS,
2269 .duplex_reg = B53_DUPLEX_STAT_GE,
2270 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2271 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2274 .chip_id = BCM53018_DEVICE_ID,
2275 .dev_name = "BCM53018",
2277 .enabled_ports = 0x1f,
2279 .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
2280 .vta_regs = B53_VTA_REGS,
2281 .duplex_reg = B53_DUPLEX_STAT_GE,
2282 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2283 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2286 .chip_id = BCM53019_DEVICE_ID,
2287 .dev_name = "BCM53019",
2289 .enabled_ports = 0x1f,
2291 .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
2292 .vta_regs = B53_VTA_REGS,
2293 .duplex_reg = B53_DUPLEX_STAT_GE,
2294 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2295 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2298 .chip_id = BCM58XX_DEVICE_ID,
2299 .dev_name = "BCM585xx/586xx/88312",
2301 .enabled_ports = 0x1ff,
2303 .cpu_port = B53_CPU_PORT,
2304 .vta_regs = B53_VTA_REGS,
2305 .duplex_reg = B53_DUPLEX_STAT_GE,
2306 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2307 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2310 .chip_id = BCM583XX_DEVICE_ID,
2311 .dev_name = "BCM583xx/11360",
2313 .enabled_ports = 0x103,
2315 .cpu_port = B53_CPU_PORT,
2316 .vta_regs = B53_VTA_REGS,
2317 .duplex_reg = B53_DUPLEX_STAT_GE,
2318 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2319 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2322 .chip_id = BCM7445_DEVICE_ID,
2323 .dev_name = "BCM7445",
2325 .enabled_ports = 0x1ff,
2327 .cpu_port = B53_CPU_PORT,
2328 .vta_regs = B53_VTA_REGS,
2329 .duplex_reg = B53_DUPLEX_STAT_GE,
2330 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2331 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2334 .chip_id = BCM7278_DEVICE_ID,
2335 .dev_name = "BCM7278",
2337 .enabled_ports = 0x1ff,
2339 .cpu_port = B53_CPU_PORT,
2340 .vta_regs = B53_VTA_REGS,
2341 .duplex_reg = B53_DUPLEX_STAT_GE,
2342 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2343 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2347 static int b53_switch_init(struct b53_device *dev)
2352 for (i = 0; i < ARRAY_SIZE(b53_switch_chips); i++) {
2353 const struct b53_chip_data *chip = &b53_switch_chips[i];
2355 if (chip->chip_id == dev->chip_id) {
2356 if (!dev->enabled_ports)
2357 dev->enabled_ports = chip->enabled_ports;
2358 dev->name = chip->dev_name;
2359 dev->duplex_reg = chip->duplex_reg;
2360 dev->vta_regs[0] = chip->vta_regs[0];
2361 dev->vta_regs[1] = chip->vta_regs[1];
2362 dev->vta_regs[2] = chip->vta_regs[2];
2363 dev->jumbo_pm_reg = chip->jumbo_pm_reg;
2364 dev->cpu_port = chip->cpu_port;
2365 dev->num_vlans = chip->vlans;
2366 dev->num_arl_entries = chip->arl_entries;
2371 /* check which BCM5325x version we have */
2375 b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4);
2377 /* check reserved bits */
2383 /* BCM5325F - do not use port 4 */
2384 dev->enabled_ports &= ~BIT(4);
2387 /* On the BCM47XX SoCs this is the supported internal switch.*/
2388 #ifndef CONFIG_BCM47XX
2395 } else if (dev->chip_id == BCM53115_DEVICE_ID) {
2398 b53_read48(dev, B53_STAT_PAGE, B53_STRAP_VALUE, &strap_value);
2399 /* use second IMP port if GMII is enabled */
2400 if (strap_value & SV_GMII_CTRL_115)
2404 /* cpu port is always last */
2405 dev->num_ports = dev->cpu_port + 1;
2406 dev->enabled_ports |= BIT(dev->cpu_port);
2408 /* Include non standard CPU port built-in PHYs to be probed */
2409 if (is539x(dev) || is531x5(dev)) {
2410 for (i = 0; i < dev->num_ports; i++) {
2411 if (!(dev->ds->phys_mii_mask & BIT(i)) &&
2412 !b53_possible_cpu_port(dev->ds, i))
2413 dev->ds->phys_mii_mask |= BIT(i);
2417 dev->ports = devm_kcalloc(dev->dev,
2418 dev->num_ports, sizeof(struct b53_port),
2423 dev->vlans = devm_kcalloc(dev->dev,
2424 dev->num_vlans, sizeof(struct b53_vlan),
2429 dev->reset_gpio = b53_switch_get_reset_gpio(dev);
2430 if (dev->reset_gpio >= 0) {
2431 ret = devm_gpio_request_one(dev->dev, dev->reset_gpio,
2432 GPIOF_OUT_INIT_HIGH, "robo_reset");
2440 struct b53_device *b53_switch_alloc(struct device *base,
2441 const struct b53_io_ops *ops,
2444 struct dsa_switch *ds;
2445 struct b53_device *dev;
2447 ds = devm_kzalloc(base, sizeof(*ds), GFP_KERNEL);
2452 ds->num_ports = DSA_MAX_PORTS;
2454 dev = devm_kzalloc(base, sizeof(*dev), GFP_KERNEL);
2464 ds->ops = &b53_switch_ops;
2465 mutex_init(&dev->reg_mutex);
2466 mutex_init(&dev->stats_mutex);
2470 EXPORT_SYMBOL(b53_switch_alloc);
2472 int b53_switch_detect(struct b53_device *dev)
2479 ret = b53_read8(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id8);
2485 /* BCM5325 and BCM5365 do not have this register so reads
2486 * return 0. But the read operation did succeed, so assume this
2489 * Next check if we can write to the 5325's VTA register; for
2490 * 5365 it is read only.
2492 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, 0xf);
2493 b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, &tmp);
2496 dev->chip_id = BCM5325_DEVICE_ID;
2498 dev->chip_id = BCM5365_DEVICE_ID;
2500 case BCM5389_DEVICE_ID:
2501 case BCM5395_DEVICE_ID:
2502 case BCM5397_DEVICE_ID:
2503 case BCM5398_DEVICE_ID:
2507 ret = b53_read32(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id32);
2512 case BCM53115_DEVICE_ID:
2513 case BCM53125_DEVICE_ID:
2514 case BCM53128_DEVICE_ID:
2515 case BCM53010_DEVICE_ID:
2516 case BCM53011_DEVICE_ID:
2517 case BCM53012_DEVICE_ID:
2518 case BCM53018_DEVICE_ID:
2519 case BCM53019_DEVICE_ID:
2520 dev->chip_id = id32;
2523 pr_err("unsupported switch detected (BCM53%02x/BCM%x)\n",
2529 if (dev->chip_id == BCM5325_DEVICE_ID)
2530 return b53_read8(dev, B53_STAT_PAGE, B53_REV_ID_25,
2533 return b53_read8(dev, B53_MGMT_PAGE, B53_REV_ID,
2536 EXPORT_SYMBOL(b53_switch_detect);
2538 int b53_switch_register(struct b53_device *dev)
2543 dev->chip_id = dev->pdata->chip_id;
2544 dev->enabled_ports = dev->pdata->enabled_ports;
2547 if (!dev->chip_id && b53_switch_detect(dev))
2550 ret = b53_switch_init(dev);
2554 pr_info("found switch: %s, rev %i\n", dev->name, dev->core_rev);
2556 return dsa_register_switch(dev->ds);
2558 EXPORT_SYMBOL(b53_switch_register);
2561 MODULE_DESCRIPTION("B53 switch library");
2562 MODULE_LICENSE("Dual BSD/GPL");