]>
Commit | Line | Data |
---|---|---|
83d290c5 | 1 | // SPDX-License-Identifier: GPL-2.0 |
19fc2eae SR |
2 | /* |
3 | * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs. | |
4 | * | |
5 | * U-Boot version: | |
e3b9c98a | 6 | * Copyright (C) 2014-2015 Stefan Roese <[email protected]> |
19fc2eae SR |
7 | * |
8 | * Based on the Linux version which is: | |
9 | * Copyright (C) 2012 Marvell | |
10 | * | |
11 | * Rami Rosen <[email protected]> | |
12 | * Thomas Petazzoni <[email protected]> | |
19fc2eae SR |
13 | */ |
14 | ||
15 | #include <common.h> | |
1eb69ae4 | 16 | #include <cpu_func.h> |
e3b9c98a | 17 | #include <dm.h> |
f7ae49fc | 18 | #include <log.h> |
19fc2eae SR |
19 | #include <net.h> |
20 | #include <netdev.h> | |
21 | #include <config.h> | |
22 | #include <malloc.h> | |
90526e9f | 23 | #include <asm/cache.h> |
401d1c4f | 24 | #include <asm/global_data.h> |
19fc2eae | 25 | #include <asm/io.h> |
336d4615 | 26 | #include <dm/device_compat.h> |
61b29b82 | 27 | #include <dm/devres.h> |
cd93d625 | 28 | #include <linux/bitops.h> |
eb41d8a1 | 29 | #include <linux/bug.h> |
c05ed00a | 30 | #include <linux/delay.h> |
1221ce45 | 31 | #include <linux/errno.h> |
19fc2eae SR |
32 | #include <phy.h> |
33 | #include <miiphy.h> | |
34 | #include <watchdog.h> | |
35 | #include <asm/arch/cpu.h> | |
36 | #include <asm/arch/soc.h> | |
37 | #include <linux/compat.h> | |
38 | #include <linux/mbus.h> | |
18bfc8fa | 39 | #include <asm-generic/gpio.h> |
19fc2eae | 40 | |
e3b9c98a SR |
41 | DECLARE_GLOBAL_DATA_PTR; |
42 | ||
31f4ccca | 43 | #define MVNETA_NR_CPUS 1 |
19fc2eae SR |
44 | #define ETH_HLEN 14 /* Total octets in header */ |
45 | ||
46 | /* 2(HW hdr) 14(MAC hdr) 4(CRC) 32(extra for cache prefetch) */ | |
47 | #define WRAP (2 + ETH_HLEN + 4 + 32) | |
48 | #define MTU 1500 | |
49 | #define RX_BUFFER_SIZE (ALIGN(MTU + WRAP, ARCH_DMA_MINALIGN)) | |
50 | ||
51 | #define MVNETA_SMI_TIMEOUT 10000 | |
52 | ||
53 | /* Registers */ | |
54 | #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2)) | |
55 | #define MVNETA_RXQ_HW_BUF_ALLOC BIT(1) | |
56 | #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8) | |
57 | #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8) | |
58 | #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2)) | |
59 | #define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16) | |
60 | #define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2)) | |
61 | #define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2)) | |
62 | #define MVNETA_RXQ_BUF_SIZE_SHIFT 19 | |
63 | #define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19) | |
64 | #define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2)) | |
65 | #define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff | |
66 | #define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2)) | |
67 | #define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16 | |
68 | #define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255 | |
69 | #define MVNETA_PORT_RX_RESET 0x1cc0 | |
70 | #define MVNETA_PORT_RX_DMA_RESET BIT(0) | |
71 | #define MVNETA_PHY_ADDR 0x2000 | |
72 | #define MVNETA_PHY_ADDR_MASK 0x1f | |
73 | #define MVNETA_SMI 0x2004 | |
74 | #define MVNETA_PHY_REG_MASK 0x1f | |
75 | /* SMI register fields */ | |
76 | #define MVNETA_SMI_DATA_OFFS 0 /* Data */ | |
77 | #define MVNETA_SMI_DATA_MASK (0xffff << MVNETA_SMI_DATA_OFFS) | |
78 | #define MVNETA_SMI_DEV_ADDR_OFFS 16 /* PHY device address */ | |
79 | #define MVNETA_SMI_REG_ADDR_OFFS 21 /* PHY device reg addr*/ | |
80 | #define MVNETA_SMI_OPCODE_OFFS 26 /* Write/Read opcode */ | |
81 | #define MVNETA_SMI_OPCODE_READ (1 << MVNETA_SMI_OPCODE_OFFS) | |
82 | #define MVNETA_SMI_READ_VALID (1 << 27) /* Read Valid */ | |
83 | #define MVNETA_SMI_BUSY (1 << 28) /* Busy */ | |
84 | #define MVNETA_MBUS_RETRY 0x2010 | |
85 | #define MVNETA_UNIT_INTR_CAUSE 0x2080 | |
86 | #define MVNETA_UNIT_CONTROL 0x20B0 | |
87 | #define MVNETA_PHY_POLLING_ENABLE BIT(1) | |
88 | #define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3)) | |
89 | #define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3)) | |
90 | #define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2)) | |
544eefe0 | 91 | #define MVNETA_WIN_SIZE_MASK (0xffff0000) |
19fc2eae | 92 | #define MVNETA_BASE_ADDR_ENABLE 0x2290 |
544eefe0 SR |
93 | #define MVNETA_BASE_ADDR_ENABLE_BIT 0x1 |
94 | #define MVNETA_PORT_ACCESS_PROTECT 0x2294 | |
95 | #define MVNETA_PORT_ACCESS_PROTECT_WIN0_RW 0x3 | |
19fc2eae SR |
96 | #define MVNETA_PORT_CONFIG 0x2400 |
97 | #define MVNETA_UNI_PROMISC_MODE BIT(0) | |
98 | #define MVNETA_DEF_RXQ(q) ((q) << 1) | |
99 | #define MVNETA_DEF_RXQ_ARP(q) ((q) << 4) | |
100 | #define MVNETA_TX_UNSET_ERR_SUM BIT(12) | |
101 | #define MVNETA_DEF_RXQ_TCP(q) ((q) << 16) | |
102 | #define MVNETA_DEF_RXQ_UDP(q) ((q) << 19) | |
103 | #define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22) | |
104 | #define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25) | |
105 | #define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \ | |
106 | MVNETA_DEF_RXQ_ARP(q) | \ | |
107 | MVNETA_DEF_RXQ_TCP(q) | \ | |
108 | MVNETA_DEF_RXQ_UDP(q) | \ | |
109 | MVNETA_DEF_RXQ_BPDU(q) | \ | |
110 | MVNETA_TX_UNSET_ERR_SUM | \ | |
111 | MVNETA_RX_CSUM_WITH_PSEUDO_HDR) | |
112 | #define MVNETA_PORT_CONFIG_EXTEND 0x2404 | |
113 | #define MVNETA_MAC_ADDR_LOW 0x2414 | |
114 | #define MVNETA_MAC_ADDR_HIGH 0x2418 | |
115 | #define MVNETA_SDMA_CONFIG 0x241c | |
116 | #define MVNETA_SDMA_BRST_SIZE_16 4 | |
117 | #define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1) | |
118 | #define MVNETA_RX_NO_DATA_SWAP BIT(4) | |
119 | #define MVNETA_TX_NO_DATA_SWAP BIT(5) | |
120 | #define MVNETA_DESC_SWAP BIT(6) | |
121 | #define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22) | |
122 | #define MVNETA_PORT_STATUS 0x2444 | |
123 | #define MVNETA_TX_IN_PRGRS BIT(1) | |
124 | #define MVNETA_TX_FIFO_EMPTY BIT(8) | |
125 | #define MVNETA_RX_MIN_FRAME_SIZE 0x247c | |
126 | #define MVNETA_SERDES_CFG 0x24A0 | |
127 | #define MVNETA_SGMII_SERDES_PROTO 0x0cc7 | |
128 | #define MVNETA_QSGMII_SERDES_PROTO 0x0667 | |
129 | #define MVNETA_TYPE_PRIO 0x24bc | |
130 | #define MVNETA_FORCE_UNI BIT(21) | |
131 | #define MVNETA_TXQ_CMD_1 0x24e4 | |
132 | #define MVNETA_TXQ_CMD 0x2448 | |
133 | #define MVNETA_TXQ_DISABLE_SHIFT 8 | |
134 | #define MVNETA_TXQ_ENABLE_MASK 0x000000ff | |
135 | #define MVNETA_ACC_MODE 0x2500 | |
136 | #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2)) | |
137 | #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff | |
138 | #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00 | |
139 | #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2)) | |
140 | ||
141 | /* Exception Interrupt Port/Queue Cause register */ | |
142 | ||
143 | #define MVNETA_INTR_NEW_CAUSE 0x25a0 | |
144 | #define MVNETA_INTR_NEW_MASK 0x25a4 | |
145 | ||
146 | /* bits 0..7 = TXQ SENT, one bit per queue. | |
147 | * bits 8..15 = RXQ OCCUP, one bit per queue. | |
148 | * bits 16..23 = RXQ FREE, one bit per queue. | |
149 | * bit 29 = OLD_REG_SUM, see old reg ? | |
150 | * bit 30 = TX_ERR_SUM, one bit for 4 ports | |
151 | * bit 31 = MISC_SUM, one bit for 4 ports | |
152 | */ | |
153 | #define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0) | |
154 | #define MVNETA_TX_INTR_MASK_ALL (0xff << 0) | |
155 | #define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8) | |
156 | #define MVNETA_RX_INTR_MASK_ALL (0xff << 8) | |
157 | ||
158 | #define MVNETA_INTR_OLD_CAUSE 0x25a8 | |
159 | #define MVNETA_INTR_OLD_MASK 0x25ac | |
160 | ||
161 | /* Data Path Port/Queue Cause Register */ | |
162 | #define MVNETA_INTR_MISC_CAUSE 0x25b0 | |
163 | #define MVNETA_INTR_MISC_MASK 0x25b4 | |
164 | #define MVNETA_INTR_ENABLE 0x25b8 | |
165 | ||
166 | #define MVNETA_RXQ_CMD 0x2680 | |
167 | #define MVNETA_RXQ_DISABLE_SHIFT 8 | |
168 | #define MVNETA_RXQ_ENABLE_MASK 0x000000ff | |
169 | #define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4)) | |
170 | #define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4)) | |
171 | #define MVNETA_GMAC_CTRL_0 0x2c00 | |
172 | #define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2 | |
173 | #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc | |
174 | #define MVNETA_GMAC0_PORT_ENABLE BIT(0) | |
175 | #define MVNETA_GMAC_CTRL_2 0x2c08 | |
176 | #define MVNETA_GMAC2_PCS_ENABLE BIT(3) | |
177 | #define MVNETA_GMAC2_PORT_RGMII BIT(4) | |
178 | #define MVNETA_GMAC2_PORT_RESET BIT(6) | |
179 | #define MVNETA_GMAC_STATUS 0x2c10 | |
180 | #define MVNETA_GMAC_LINK_UP BIT(0) | |
181 | #define MVNETA_GMAC_SPEED_1000 BIT(1) | |
182 | #define MVNETA_GMAC_SPEED_100 BIT(2) | |
183 | #define MVNETA_GMAC_FULL_DUPLEX BIT(3) | |
184 | #define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4) | |
185 | #define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5) | |
186 | #define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6) | |
187 | #define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7) | |
188 | #define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c | |
189 | #define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0) | |
190 | #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1) | |
278d30c8 | 191 | #define MVNETA_GMAC_IB_BYPASS_AN_EN BIT(3) |
19fc2eae SR |
192 | #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5) |
193 | #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6) | |
194 | #define MVNETA_GMAC_AN_SPEED_EN BIT(7) | |
278d30c8 KP |
195 | #define MVNETA_GMAC_SET_FC_EN BIT(8) |
196 | #define MVNETA_GMAC_ADVERT_FC_EN BIT(9) | |
19fc2eae SR |
197 | #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12) |
198 | #define MVNETA_GMAC_AN_DUPLEX_EN BIT(13) | |
278d30c8 | 199 | #define MVNETA_GMAC_SAMPLE_TX_CFG_EN BIT(15) |
19fc2eae SR |
200 | #define MVNETA_MIB_COUNTERS_BASE 0x3080 |
201 | #define MVNETA_MIB_LATE_COLLISION 0x7c | |
202 | #define MVNETA_DA_FILT_SPEC_MCAST 0x3400 | |
203 | #define MVNETA_DA_FILT_OTH_MCAST 0x3500 | |
204 | #define MVNETA_DA_FILT_UCAST_BASE 0x3600 | |
205 | #define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2)) | |
206 | #define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2)) | |
207 | #define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000 | |
208 | #define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16) | |
209 | #define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2)) | |
210 | #define MVNETA_TXQ_DEC_SENT_SHIFT 16 | |
211 | #define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2)) | |
212 | #define MVNETA_TXQ_SENT_DESC_SHIFT 16 | |
213 | #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000 | |
214 | #define MVNETA_PORT_TX_RESET 0x3cf0 | |
215 | #define MVNETA_PORT_TX_DMA_RESET BIT(0) | |
216 | #define MVNETA_TX_MTU 0x3e0c | |
217 | #define MVNETA_TX_TOKEN_SIZE 0x3e14 | |
218 | #define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff | |
219 | #define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2)) | |
220 | #define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff | |
221 | ||
222 | /* Descriptor ring Macros */ | |
223 | #define MVNETA_QUEUE_NEXT_DESC(q, index) \ | |
224 | (((index) < (q)->last_desc) ? ((index) + 1) : 0) | |
225 | ||
226 | /* Various constants */ | |
227 | ||
228 | /* Coalescing */ | |
229 | #define MVNETA_TXDONE_COAL_PKTS 16 | |
230 | #define MVNETA_RX_COAL_PKTS 32 | |
231 | #define MVNETA_RX_COAL_USEC 100 | |
232 | ||
233 | /* The two bytes Marvell header. Either contains a special value used | |
234 | * by Marvell switches when a specific hardware mode is enabled (not | |
235 | * supported by this driver) or is filled automatically by zeroes on | |
236 | * the RX side. Those two bytes being at the front of the Ethernet | |
237 | * header, they allow to have the IP header aligned on a 4 bytes | |
238 | * boundary automatically: the hardware skips those two bytes on its | |
239 | * own. | |
240 | */ | |
241 | #define MVNETA_MH_SIZE 2 | |
242 | ||
243 | #define MVNETA_VLAN_TAG_LEN 4 | |
244 | ||
245 | #define MVNETA_CPU_D_CACHE_LINE_SIZE 32 | |
246 | #define MVNETA_TX_CSUM_MAX_SIZE 9800 | |
247 | #define MVNETA_ACC_MODE_EXT 1 | |
248 | ||
249 | /* Timeout constants */ | |
250 | #define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000 | |
251 | #define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000 | |
252 | #define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000 | |
253 | ||
254 | #define MVNETA_TX_MTU_MAX 0x3ffff | |
255 | ||
256 | /* Max number of Rx descriptors */ | |
257 | #define MVNETA_MAX_RXD 16 | |
258 | ||
259 | /* Max number of Tx descriptors */ | |
260 | #define MVNETA_MAX_TXD 16 | |
261 | ||
262 | /* descriptor aligned size */ | |
263 | #define MVNETA_DESC_ALIGNED_SIZE 32 | |
264 | ||
265 | struct mvneta_port { | |
266 | void __iomem *base; | |
267 | struct mvneta_rx_queue *rxqs; | |
268 | struct mvneta_tx_queue *txqs; | |
269 | ||
270 | u8 mcast_count[256]; | |
271 | u16 tx_ring_size; | |
272 | u16 rx_ring_size; | |
273 | ||
274 | phy_interface_t phy_interface; | |
275 | unsigned int link; | |
276 | unsigned int duplex; | |
277 | unsigned int speed; | |
278 | ||
279 | int init; | |
19fc2eae | 280 | struct phy_device *phydev; |
bcee8d67 | 281 | #if CONFIG_IS_ENABLED(DM_GPIO) |
18bfc8fa | 282 | struct gpio_desc phy_reset_gpio; |
2b7beb9c | 283 | struct gpio_desc sfp_tx_disable_gpio; |
18bfc8fa | 284 | #endif |
19fc2eae SR |
285 | }; |
286 | ||
287 | /* The mvneta_tx_desc and mvneta_rx_desc structures describe the | |
288 | * layout of the transmit and reception DMA descriptors, and their | |
289 | * layout is therefore defined by the hardware design | |
290 | */ | |
291 | ||
292 | #define MVNETA_TX_L3_OFF_SHIFT 0 | |
293 | #define MVNETA_TX_IP_HLEN_SHIFT 8 | |
294 | #define MVNETA_TX_L4_UDP BIT(16) | |
295 | #define MVNETA_TX_L3_IP6 BIT(17) | |
296 | #define MVNETA_TXD_IP_CSUM BIT(18) | |
297 | #define MVNETA_TXD_Z_PAD BIT(19) | |
298 | #define MVNETA_TXD_L_DESC BIT(20) | |
299 | #define MVNETA_TXD_F_DESC BIT(21) | |
300 | #define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \ | |
301 | MVNETA_TXD_L_DESC | \ | |
302 | MVNETA_TXD_F_DESC) | |
303 | #define MVNETA_TX_L4_CSUM_FULL BIT(30) | |
304 | #define MVNETA_TX_L4_CSUM_NOT BIT(31) | |
305 | ||
306 | #define MVNETA_RXD_ERR_CRC 0x0 | |
307 | #define MVNETA_RXD_ERR_SUMMARY BIT(16) | |
308 | #define MVNETA_RXD_ERR_OVERRUN BIT(17) | |
309 | #define MVNETA_RXD_ERR_LEN BIT(18) | |
310 | #define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18)) | |
311 | #define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18)) | |
312 | #define MVNETA_RXD_L3_IP4 BIT(25) | |
313 | #define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27)) | |
314 | #define MVNETA_RXD_L4_CSUM_OK BIT(30) | |
315 | ||
316 | struct mvneta_tx_desc { | |
317 | u32 command; /* Options used by HW for packet transmitting.*/ | |
318 | u16 reserverd1; /* csum_l4 (for future use) */ | |
319 | u16 data_size; /* Data size of transmitted packet in bytes */ | |
320 | u32 buf_phys_addr; /* Physical addr of transmitted buffer */ | |
321 | u32 reserved2; /* hw_cmd - (for future use, PMT) */ | |
322 | u32 reserved3[4]; /* Reserved - (for future use) */ | |
323 | }; | |
324 | ||
325 | struct mvneta_rx_desc { | |
326 | u32 status; /* Info about received packet */ | |
327 | u16 reserved1; /* pnc_info - (for future use, PnC) */ | |
328 | u16 data_size; /* Size of received packet in bytes */ | |
329 | ||
330 | u32 buf_phys_addr; /* Physical address of the buffer */ | |
331 | u32 reserved2; /* pnc_flow_id (for future use, PnC) */ | |
332 | ||
333 | u32 buf_cookie; /* cookie for access to RX buffer in rx path */ | |
334 | u16 reserved3; /* prefetch_cmd, for future use */ | |
335 | u16 reserved4; /* csum_l4 - (for future use, PnC) */ | |
336 | ||
337 | u32 reserved5; /* pnc_extra PnC (for future use, PnC) */ | |
338 | u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */ | |
339 | }; | |
340 | ||
341 | struct mvneta_tx_queue { | |
342 | /* Number of this TX queue, in the range 0-7 */ | |
343 | u8 id; | |
344 | ||
345 | /* Number of TX DMA descriptors in the descriptor ring */ | |
346 | int size; | |
347 | ||
348 | /* Index of last TX DMA descriptor that was inserted */ | |
349 | int txq_put_index; | |
350 | ||
351 | /* Index of the TX DMA descriptor to be cleaned up */ | |
352 | int txq_get_index; | |
353 | ||
354 | /* Virtual address of the TX DMA descriptors array */ | |
355 | struct mvneta_tx_desc *descs; | |
356 | ||
357 | /* DMA address of the TX DMA descriptors array */ | |
358 | dma_addr_t descs_phys; | |
359 | ||
360 | /* Index of the last TX DMA descriptor */ | |
361 | int last_desc; | |
362 | ||
363 | /* Index of the next TX DMA descriptor to process */ | |
364 | int next_desc_to_proc; | |
365 | }; | |
366 | ||
367 | struct mvneta_rx_queue { | |
368 | /* rx queue number, in the range 0-7 */ | |
369 | u8 id; | |
370 | ||
371 | /* num of rx descriptors in the rx descriptor ring */ | |
372 | int size; | |
373 | ||
374 | /* Virtual address of the RX DMA descriptors array */ | |
375 | struct mvneta_rx_desc *descs; | |
376 | ||
377 | /* DMA address of the RX DMA descriptors array */ | |
378 | dma_addr_t descs_phys; | |
379 | ||
380 | /* Index of the last RX DMA descriptor */ | |
381 | int last_desc; | |
382 | ||
383 | /* Index of the next RX DMA descriptor to process */ | |
384 | int next_desc_to_proc; | |
385 | }; | |
386 | ||
387 | /* U-Boot doesn't use the queues, so set the number to 1 */ | |
388 | static int rxq_number = 1; | |
389 | static int txq_number = 1; | |
390 | static int rxq_def; | |
391 | ||
392 | struct buffer_location { | |
393 | struct mvneta_tx_desc *tx_descs; | |
394 | struct mvneta_rx_desc *rx_descs; | |
395 | u32 rx_buffers; | |
396 | }; | |
397 | ||
398 | /* | |
399 | * All 4 interfaces use the same global buffer, since only one interface | |
400 | * can be enabled at once | |
401 | */ | |
402 | static struct buffer_location buffer_loc; | |
403 | ||
404 | /* | |
405 | * Page table entries are set to 1MB, or multiples of 1MB | |
406 | * (not < 1MB). driver uses less bd's so use 1MB bdspace. | |
407 | */ | |
408 | #define BD_SPACE (1 << 20) | |
409 | ||
410 | /* Utility/helper methods */ | |
411 | ||
412 | /* Write helper method */ | |
413 | static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data) | |
414 | { | |
415 | writel(data, pp->base + offset); | |
416 | } | |
417 | ||
418 | /* Read helper method */ | |
419 | static u32 mvreg_read(struct mvneta_port *pp, u32 offset) | |
420 | { | |
421 | return readl(pp->base + offset); | |
422 | } | |
423 | ||
424 | /* Clear all MIB counters */ | |
425 | static void mvneta_mib_counters_clear(struct mvneta_port *pp) | |
426 | { | |
427 | int i; | |
428 | ||
429 | /* Perform dummy reads from MIB counters */ | |
430 | for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4) | |
431 | mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i)); | |
432 | } | |
433 | ||
434 | /* Rx descriptors helper methods */ | |
435 | ||
436 | /* Checks whether the RX descriptor having this status is both the first | |
437 | * and the last descriptor for the RX packet. Each RX packet is currently | |
438 | * received through a single RX descriptor, so not having each RX | |
439 | * descriptor with its first and last bits set is an error | |
440 | */ | |
441 | static int mvneta_rxq_desc_is_first_last(u32 status) | |
442 | { | |
443 | return (status & MVNETA_RXD_FIRST_LAST_DESC) == | |
444 | MVNETA_RXD_FIRST_LAST_DESC; | |
445 | } | |
446 | ||
447 | /* Add number of descriptors ready to receive new packets */ | |
448 | static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp, | |
449 | struct mvneta_rx_queue *rxq, | |
450 | int ndescs) | |
451 | { | |
452 | /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can | |
453 | * be added at once | |
454 | */ | |
455 | while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) { | |
456 | mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), | |
457 | (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX << | |
458 | MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT)); | |
459 | ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX; | |
460 | } | |
461 | ||
462 | mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), | |
463 | (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT)); | |
464 | } | |
465 | ||
466 | /* Get number of RX descriptors occupied by received packets */ | |
467 | static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp, | |
468 | struct mvneta_rx_queue *rxq) | |
469 | { | |
470 | u32 val; | |
471 | ||
472 | val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id)); | |
473 | return val & MVNETA_RXQ_OCCUPIED_ALL_MASK; | |
474 | } | |
475 | ||
476 | /* Update num of rx desc called upon return from rx path or | |
477 | * from mvneta_rxq_drop_pkts(). | |
478 | */ | |
479 | static void mvneta_rxq_desc_num_update(struct mvneta_port *pp, | |
480 | struct mvneta_rx_queue *rxq, | |
481 | int rx_done, int rx_filled) | |
482 | { | |
483 | u32 val; | |
484 | ||
485 | if ((rx_done <= 0xff) && (rx_filled <= 0xff)) { | |
486 | val = rx_done | | |
487 | (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT); | |
488 | mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); | |
489 | return; | |
490 | } | |
491 | ||
492 | /* Only 255 descriptors can be added at once */ | |
493 | while ((rx_done > 0) || (rx_filled > 0)) { | |
494 | if (rx_done <= 0xff) { | |
495 | val = rx_done; | |
496 | rx_done = 0; | |
497 | } else { | |
498 | val = 0xff; | |
499 | rx_done -= 0xff; | |
500 | } | |
501 | if (rx_filled <= 0xff) { | |
502 | val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT; | |
503 | rx_filled = 0; | |
504 | } else { | |
505 | val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT; | |
506 | rx_filled -= 0xff; | |
507 | } | |
508 | mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); | |
509 | } | |
510 | } | |
511 | ||
512 | /* Get pointer to next RX descriptor to be processed by SW */ | |
513 | static struct mvneta_rx_desc * | |
514 | mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq) | |
515 | { | |
516 | int rx_desc = rxq->next_desc_to_proc; | |
517 | ||
518 | rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc); | |
519 | return rxq->descs + rx_desc; | |
520 | } | |
521 | ||
522 | /* Tx descriptors helper methods */ | |
523 | ||
524 | /* Update HW with number of TX descriptors to be sent */ | |
525 | static void mvneta_txq_pend_desc_add(struct mvneta_port *pp, | |
526 | struct mvneta_tx_queue *txq, | |
527 | int pend_desc) | |
528 | { | |
529 | u32 val; | |
530 | ||
531 | /* Only 255 descriptors can be added at once ; Assume caller | |
e4691564 | 532 | * process TX descriptors in quanta less than 256 |
19fc2eae SR |
533 | */ |
534 | val = pend_desc; | |
535 | mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); | |
536 | } | |
537 | ||
538 | /* Get pointer to next TX descriptor to be processed (send) by HW */ | |
539 | static struct mvneta_tx_desc * | |
540 | mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq) | |
541 | { | |
542 | int tx_desc = txq->next_desc_to_proc; | |
543 | ||
544 | txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc); | |
545 | return txq->descs + tx_desc; | |
546 | } | |
547 | ||
548 | /* Set rxq buf size */ | |
549 | static void mvneta_rxq_buf_size_set(struct mvneta_port *pp, | |
550 | struct mvneta_rx_queue *rxq, | |
551 | int buf_size) | |
552 | { | |
553 | u32 val; | |
554 | ||
555 | val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id)); | |
556 | ||
557 | val &= ~MVNETA_RXQ_BUF_SIZE_MASK; | |
558 | val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT); | |
559 | ||
560 | mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val); | |
561 | } | |
562 | ||
563 | /* Start the Ethernet port RX and TX activity */ | |
564 | static void mvneta_port_up(struct mvneta_port *pp) | |
565 | { | |
566 | int queue; | |
567 | u32 q_map; | |
568 | ||
569 | /* Enable all initialized TXs. */ | |
570 | mvneta_mib_counters_clear(pp); | |
571 | q_map = 0; | |
572 | for (queue = 0; queue < txq_number; queue++) { | |
573 | struct mvneta_tx_queue *txq = &pp->txqs[queue]; | |
574 | if (txq->descs != NULL) | |
575 | q_map |= (1 << queue); | |
576 | } | |
577 | mvreg_write(pp, MVNETA_TXQ_CMD, q_map); | |
578 | ||
579 | /* Enable all initialized RXQs. */ | |
580 | q_map = 0; | |
581 | for (queue = 0; queue < rxq_number; queue++) { | |
582 | struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; | |
583 | if (rxq->descs != NULL) | |
584 | q_map |= (1 << queue); | |
585 | } | |
586 | mvreg_write(pp, MVNETA_RXQ_CMD, q_map); | |
587 | } | |
588 | ||
589 | /* Stop the Ethernet port activity */ | |
590 | static void mvneta_port_down(struct mvneta_port *pp) | |
591 | { | |
592 | u32 val; | |
593 | int count; | |
594 | ||
595 | /* Stop Rx port activity. Check port Rx activity. */ | |
596 | val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK; | |
597 | ||
598 | /* Issue stop command for active channels only */ | |
599 | if (val != 0) | |
600 | mvreg_write(pp, MVNETA_RXQ_CMD, | |
601 | val << MVNETA_RXQ_DISABLE_SHIFT); | |
602 | ||
603 | /* Wait for all Rx activity to terminate. */ | |
604 | count = 0; | |
605 | do { | |
606 | if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) { | |
c519cbf5 SA |
607 | dev_warn(pp->phydev->dev, |
608 | "TIMEOUT for RX stopped ! rx_queue_cmd: 0x08%x\n", | |
609 | val); | |
19fc2eae SR |
610 | break; |
611 | } | |
612 | mdelay(1); | |
613 | ||
614 | val = mvreg_read(pp, MVNETA_RXQ_CMD); | |
615 | } while (val & 0xff); | |
616 | ||
617 | /* Stop Tx port activity. Check port Tx activity. Issue stop | |
618 | * command for active channels only | |
619 | */ | |
620 | val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK; | |
621 | ||
622 | if (val != 0) | |
623 | mvreg_write(pp, MVNETA_TXQ_CMD, | |
624 | (val << MVNETA_TXQ_DISABLE_SHIFT)); | |
625 | ||
626 | /* Wait for all Tx activity to terminate. */ | |
627 | count = 0; | |
628 | do { | |
629 | if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) { | |
c519cbf5 SA |
630 | dev_warn(pp->phydev->dev, |
631 | "TIMEOUT for TX stopped status=0x%08x\n", | |
632 | val); | |
19fc2eae SR |
633 | break; |
634 | } | |
635 | mdelay(1); | |
636 | ||
637 | /* Check TX Command reg that all Txqs are stopped */ | |
638 | val = mvreg_read(pp, MVNETA_TXQ_CMD); | |
639 | ||
640 | } while (val & 0xff); | |
641 | ||
642 | /* Double check to verify that TX FIFO is empty */ | |
643 | count = 0; | |
644 | do { | |
645 | if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) { | |
c519cbf5 SA |
646 | dev_warn(pp->phydev->dev, |
647 | "TX FIFO empty timeout status=0x08%x\n", | |
648 | val); | |
19fc2eae SR |
649 | break; |
650 | } | |
651 | mdelay(1); | |
652 | ||
653 | val = mvreg_read(pp, MVNETA_PORT_STATUS); | |
654 | } while (!(val & MVNETA_TX_FIFO_EMPTY) && | |
655 | (val & MVNETA_TX_IN_PRGRS)); | |
656 | ||
657 | udelay(200); | |
658 | } | |
659 | ||
660 | /* Enable the port by setting the port enable bit of the MAC control register */ | |
661 | static void mvneta_port_enable(struct mvneta_port *pp) | |
662 | { | |
663 | u32 val; | |
664 | ||
665 | /* Enable port */ | |
666 | val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); | |
667 | val |= MVNETA_GMAC0_PORT_ENABLE; | |
668 | mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); | |
669 | } | |
670 | ||
671 | /* Disable the port and wait for about 200 usec before retuning */ | |
672 | static void mvneta_port_disable(struct mvneta_port *pp) | |
673 | { | |
674 | u32 val; | |
675 | ||
676 | /* Reset the Enable bit in the Serial Control Register */ | |
677 | val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); | |
678 | val &= ~MVNETA_GMAC0_PORT_ENABLE; | |
679 | mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); | |
680 | ||
681 | udelay(200); | |
682 | } | |
683 | ||
684 | /* Multicast tables methods */ | |
685 | ||
686 | /* Set all entries in Unicast MAC Table; queue==-1 means reject all */ | |
687 | static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue) | |
688 | { | |
689 | int offset; | |
690 | u32 val; | |
691 | ||
692 | if (queue == -1) { | |
693 | val = 0; | |
694 | } else { | |
695 | val = 0x1 | (queue << 1); | |
696 | val |= (val << 24) | (val << 16) | (val << 8); | |
697 | } | |
698 | ||
699 | for (offset = 0; offset <= 0xc; offset += 4) | |
700 | mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val); | |
701 | } | |
702 | ||
703 | /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */ | |
704 | static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue) | |
705 | { | |
706 | int offset; | |
707 | u32 val; | |
708 | ||
709 | if (queue == -1) { | |
710 | val = 0; | |
711 | } else { | |
712 | val = 0x1 | (queue << 1); | |
713 | val |= (val << 24) | (val << 16) | (val << 8); | |
714 | } | |
715 | ||
716 | for (offset = 0; offset <= 0xfc; offset += 4) | |
717 | mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val); | |
718 | } | |
719 | ||
720 | /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */ | |
721 | static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue) | |
722 | { | |
723 | int offset; | |
724 | u32 val; | |
725 | ||
726 | if (queue == -1) { | |
727 | memset(pp->mcast_count, 0, sizeof(pp->mcast_count)); | |
728 | val = 0; | |
729 | } else { | |
730 | memset(pp->mcast_count, 1, sizeof(pp->mcast_count)); | |
731 | val = 0x1 | (queue << 1); | |
732 | val |= (val << 24) | (val << 16) | (val << 8); | |
733 | } | |
734 | ||
735 | for (offset = 0; offset <= 0xfc; offset += 4) | |
736 | mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val); | |
737 | } | |
738 | ||
739 | /* This method sets defaults to the NETA port: | |
740 | * Clears interrupt Cause and Mask registers. | |
741 | * Clears all MAC tables. | |
742 | * Sets defaults to all registers. | |
743 | * Resets RX and TX descriptor rings. | |
744 | * Resets PHY. | |
745 | * This method can be called after mvneta_port_down() to return the port | |
746 | * settings to defaults. | |
747 | */ | |
748 | static void mvneta_defaults_set(struct mvneta_port *pp) | |
749 | { | |
750 | int cpu; | |
751 | int queue; | |
752 | u32 val; | |
753 | ||
754 | /* Clear all Cause registers */ | |
755 | mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0); | |
756 | mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0); | |
757 | mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); | |
758 | ||
759 | /* Mask all interrupts */ | |
760 | mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); | |
761 | mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); | |
762 | mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); | |
763 | mvreg_write(pp, MVNETA_INTR_ENABLE, 0); | |
764 | ||
765 | /* Enable MBUS Retry bit16 */ | |
766 | mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20); | |
767 | ||
768 | /* Set CPU queue access map - all CPUs have access to all RX | |
769 | * queues and to all TX queues | |
770 | */ | |
31f4ccca | 771 | for (cpu = 0; cpu < MVNETA_NR_CPUS; cpu++) |
19fc2eae SR |
772 | mvreg_write(pp, MVNETA_CPU_MAP(cpu), |
773 | (MVNETA_CPU_RXQ_ACCESS_ALL_MASK | | |
774 | MVNETA_CPU_TXQ_ACCESS_ALL_MASK)); | |
775 | ||
776 | /* Reset RX and TX DMAs */ | |
777 | mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); | |
778 | mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET); | |
779 | ||
780 | /* Disable Legacy WRR, Disable EJP, Release from reset */ | |
781 | mvreg_write(pp, MVNETA_TXQ_CMD_1, 0); | |
782 | for (queue = 0; queue < txq_number; queue++) { | |
783 | mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0); | |
784 | mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0); | |
785 | } | |
786 | ||
787 | mvreg_write(pp, MVNETA_PORT_TX_RESET, 0); | |
788 | mvreg_write(pp, MVNETA_PORT_RX_RESET, 0); | |
789 | ||
790 | /* Set Port Acceleration Mode */ | |
791 | val = MVNETA_ACC_MODE_EXT; | |
792 | mvreg_write(pp, MVNETA_ACC_MODE, val); | |
793 | ||
794 | /* Update val of portCfg register accordingly with all RxQueue types */ | |
795 | val = MVNETA_PORT_CONFIG_DEFL_VALUE(rxq_def); | |
796 | mvreg_write(pp, MVNETA_PORT_CONFIG, val); | |
797 | ||
798 | val = 0; | |
799 | mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val); | |
800 | mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64); | |
801 | ||
802 | /* Build PORT_SDMA_CONFIG_REG */ | |
803 | val = 0; | |
804 | ||
805 | /* Default burst size */ | |
806 | val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); | |
807 | val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); | |
808 | val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP; | |
809 | ||
810 | /* Assign port SDMA configuration */ | |
811 | mvreg_write(pp, MVNETA_SDMA_CONFIG, val); | |
812 | ||
278d30c8 | 813 | /* Enable PHY polling in hardware if not in fixed-link mode */ |
bdbda1e8 MB |
814 | if (!CONFIG_IS_ENABLED(PHY_FIXED) || |
815 | pp->phydev->phy_id != PHY_FIXED_ID) { | |
e06c7f34 MB |
816 | mvreg_write(pp, MVNETA_PHY_ADDR, pp->phydev->addr); |
817 | ||
278d30c8 KP |
818 | val = mvreg_read(pp, MVNETA_UNIT_CONTROL); |
819 | val |= MVNETA_PHY_POLLING_ENABLE; | |
820 | mvreg_write(pp, MVNETA_UNIT_CONTROL, val); | |
821 | } | |
19fc2eae SR |
822 | |
823 | mvneta_set_ucast_table(pp, -1); | |
824 | mvneta_set_special_mcast_table(pp, -1); | |
825 | mvneta_set_other_mcast_table(pp, -1); | |
826 | } | |
827 | ||
828 | /* Set unicast address */ | |
829 | static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble, | |
830 | int queue) | |
831 | { | |
832 | unsigned int unicast_reg; | |
833 | unsigned int tbl_offset; | |
834 | unsigned int reg_offset; | |
835 | ||
836 | /* Locate the Unicast table entry */ | |
837 | last_nibble = (0xf & last_nibble); | |
838 | ||
839 | /* offset from unicast tbl base */ | |
840 | tbl_offset = (last_nibble / 4) * 4; | |
841 | ||
842 | /* offset within the above reg */ | |
843 | reg_offset = last_nibble % 4; | |
844 | ||
845 | unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset)); | |
846 | ||
847 | if (queue == -1) { | |
848 | /* Clear accepts frame bit at specified unicast DA tbl entry */ | |
849 | unicast_reg &= ~(0xff << (8 * reg_offset)); | |
850 | } else { | |
851 | unicast_reg &= ~(0xff << (8 * reg_offset)); | |
852 | unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); | |
853 | } | |
854 | ||
855 | mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg); | |
856 | } | |
857 | ||
858 | /* Set mac address */ | |
859 | static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr, | |
860 | int queue) | |
861 | { | |
862 | unsigned int mac_h; | |
863 | unsigned int mac_l; | |
864 | ||
865 | if (queue != -1) { | |
866 | mac_l = (addr[4] << 8) | (addr[5]); | |
867 | mac_h = (addr[0] << 24) | (addr[1] << 16) | | |
868 | (addr[2] << 8) | (addr[3] << 0); | |
869 | ||
870 | mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l); | |
871 | mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h); | |
872 | } | |
873 | ||
874 | /* Accept frames of this address */ | |
875 | mvneta_set_ucast_addr(pp, addr[5], queue); | |
876 | } | |
877 | ||
0a85f024 MP |
878 | static int mvneta_write_hwaddr(struct udevice *dev) |
879 | { | |
880 | mvneta_mac_addr_set(dev_get_priv(dev), | |
c69cda25 | 881 | ((struct eth_pdata *)dev_get_plat(dev))->enetaddr, |
0a85f024 MP |
882 | rxq_def); |
883 | ||
884 | return 0; | |
885 | } | |
886 | ||
19fc2eae SR |
887 | /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */ |
888 | static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc, | |
889 | u32 phys_addr, u32 cookie) | |
890 | { | |
891 | rx_desc->buf_cookie = cookie; | |
892 | rx_desc->buf_phys_addr = phys_addr; | |
893 | } | |
894 | ||
895 | /* Decrement sent descriptors counter */ | |
896 | static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp, | |
897 | struct mvneta_tx_queue *txq, | |
898 | int sent_desc) | |
899 | { | |
900 | u32 val; | |
901 | ||
902 | /* Only 255 TX descriptors can be updated at once */ | |
903 | while (sent_desc > 0xff) { | |
904 | val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT; | |
905 | mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); | |
906 | sent_desc = sent_desc - 0xff; | |
907 | } | |
908 | ||
909 | val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT; | |
910 | mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); | |
911 | } | |
912 | ||
913 | /* Get number of TX descriptors already sent by HW */ | |
914 | static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp, | |
915 | struct mvneta_tx_queue *txq) | |
916 | { | |
917 | u32 val; | |
918 | int sent_desc; | |
919 | ||
920 | val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id)); | |
921 | sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >> | |
922 | MVNETA_TXQ_SENT_DESC_SHIFT; | |
923 | ||
924 | return sent_desc; | |
925 | } | |
926 | ||
927 | /* Display more error info */ | |
928 | static void mvneta_rx_error(struct mvneta_port *pp, | |
929 | struct mvneta_rx_desc *rx_desc) | |
930 | { | |
931 | u32 status = rx_desc->status; | |
932 | ||
933 | if (!mvneta_rxq_desc_is_first_last(status)) { | |
c519cbf5 SA |
934 | dev_err(pp->phydev->dev, |
935 | "bad rx status %08x (buffer oversize), size=%d\n", | |
936 | status, rx_desc->data_size); | |
19fc2eae SR |
937 | return; |
938 | } | |
939 | ||
940 | switch (status & MVNETA_RXD_ERR_CODE_MASK) { | |
941 | case MVNETA_RXD_ERR_CRC: | |
c519cbf5 SA |
942 | dev_err(pp->phydev->dev, |
943 | "bad rx status %08x (crc error), size=%d\n", status, | |
944 | rx_desc->data_size); | |
19fc2eae SR |
945 | break; |
946 | case MVNETA_RXD_ERR_OVERRUN: | |
c519cbf5 SA |
947 | dev_err(pp->phydev->dev, |
948 | "bad rx status %08x (overrun error), size=%d\n", status, | |
949 | rx_desc->data_size); | |
19fc2eae SR |
950 | break; |
951 | case MVNETA_RXD_ERR_LEN: | |
c519cbf5 SA |
952 | dev_err(pp->phydev->dev, |
953 | "bad rx status %08x (max frame length error), size=%d\n", | |
954 | status, rx_desc->data_size); | |
19fc2eae SR |
955 | break; |
956 | case MVNETA_RXD_ERR_RESOURCE: | |
c519cbf5 SA |
957 | dev_err(pp->phydev->dev, |
958 | "bad rx status %08x (resource error), size=%d\n", | |
959 | status, rx_desc->data_size); | |
19fc2eae SR |
960 | break; |
961 | } | |
962 | } | |
963 | ||
964 | static struct mvneta_rx_queue *mvneta_rxq_handle_get(struct mvneta_port *pp, | |
965 | int rxq) | |
966 | { | |
967 | return &pp->rxqs[rxq]; | |
968 | } | |
969 | ||
970 | ||
971 | /* Drop packets received by the RXQ and free buffers */ | |
972 | static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, | |
973 | struct mvneta_rx_queue *rxq) | |
974 | { | |
975 | int rx_done; | |
976 | ||
977 | rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); | |
978 | if (rx_done) | |
979 | mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); | |
980 | } | |
981 | ||
982 | /* Handle rxq fill: allocates rxq skbs; called when initializing a port */ | |
983 | static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, | |
984 | int num) | |
985 | { | |
986 | int i; | |
987 | ||
988 | for (i = 0; i < num; i++) { | |
989 | u32 addr; | |
990 | ||
991 | /* U-Boot special: Fill in the rx buffer addresses */ | |
992 | addr = buffer_loc.rx_buffers + (i * RX_BUFFER_SIZE); | |
993 | mvneta_rx_desc_fill(rxq->descs + i, addr, addr); | |
994 | } | |
995 | ||
996 | /* Add this number of RX descriptors as non occupied (ready to | |
997 | * get packets) | |
998 | */ | |
999 | mvneta_rxq_non_occup_desc_add(pp, rxq, i); | |
1000 | ||
1001 | return 0; | |
1002 | } | |
1003 | ||
1004 | /* Rx/Tx queue initialization/cleanup methods */ | |
1005 | ||
1006 | /* Create a specified RX queue */ | |
1007 | static int mvneta_rxq_init(struct mvneta_port *pp, | |
1008 | struct mvneta_rx_queue *rxq) | |
1009 | ||
1010 | { | |
1011 | rxq->size = pp->rx_ring_size; | |
1012 | ||
1013 | /* Allocate memory for RX descriptors */ | |
1014 | rxq->descs_phys = (dma_addr_t)rxq->descs; | |
1015 | if (rxq->descs == NULL) | |
1016 | return -ENOMEM; | |
1017 | ||
199b27bb JN |
1018 | WARN_ON(rxq->descs != PTR_ALIGN(rxq->descs, ARCH_DMA_MINALIGN)); |
1019 | ||
19fc2eae SR |
1020 | rxq->last_desc = rxq->size - 1; |
1021 | ||
1022 | /* Set Rx descriptors queue starting address */ | |
1023 | mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys); | |
1024 | mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size); | |
1025 | ||
1026 | /* Fill RXQ with buffers from RX pool */ | |
1027 | mvneta_rxq_buf_size_set(pp, rxq, RX_BUFFER_SIZE); | |
1028 | mvneta_rxq_fill(pp, rxq, rxq->size); | |
1029 | ||
1030 | return 0; | |
1031 | } | |
1032 | ||
1033 | /* Cleanup Rx queue */ | |
1034 | static void mvneta_rxq_deinit(struct mvneta_port *pp, | |
1035 | struct mvneta_rx_queue *rxq) | |
1036 | { | |
1037 | mvneta_rxq_drop_pkts(pp, rxq); | |
1038 | ||
1039 | rxq->descs = NULL; | |
1040 | rxq->last_desc = 0; | |
1041 | rxq->next_desc_to_proc = 0; | |
1042 | rxq->descs_phys = 0; | |
1043 | } | |
1044 | ||
1045 | /* Create and initialize a tx queue */ | |
1046 | static int mvneta_txq_init(struct mvneta_port *pp, | |
1047 | struct mvneta_tx_queue *txq) | |
1048 | { | |
1049 | txq->size = pp->tx_ring_size; | |
1050 | ||
1051 | /* Allocate memory for TX descriptors */ | |
3cbc11da | 1052 | txq->descs_phys = (dma_addr_t)txq->descs; |
19fc2eae SR |
1053 | if (txq->descs == NULL) |
1054 | return -ENOMEM; | |
1055 | ||
199b27bb JN |
1056 | WARN_ON(txq->descs != PTR_ALIGN(txq->descs, ARCH_DMA_MINALIGN)); |
1057 | ||
19fc2eae SR |
1058 | txq->last_desc = txq->size - 1; |
1059 | ||
1060 | /* Set maximum bandwidth for enabled TXQs */ | |
1061 | mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff); | |
1062 | mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff); | |
1063 | ||
1064 | /* Set Tx descriptors queue starting address */ | |
1065 | mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys); | |
1066 | mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size); | |
1067 | ||
1068 | return 0; | |
1069 | } | |
1070 | ||
1071 | /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/ | |
1072 | static void mvneta_txq_deinit(struct mvneta_port *pp, | |
1073 | struct mvneta_tx_queue *txq) | |
1074 | { | |
1075 | txq->descs = NULL; | |
1076 | txq->last_desc = 0; | |
1077 | txq->next_desc_to_proc = 0; | |
1078 | txq->descs_phys = 0; | |
1079 | ||
1080 | /* Set minimum bandwidth for disabled TXQs */ | |
1081 | mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0); | |
1082 | mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0); | |
1083 | ||
1084 | /* Set Tx descriptors queue starting address and size */ | |
1085 | mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0); | |
1086 | mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0); | |
1087 | } | |
1088 | ||
1089 | /* Cleanup all Tx queues */ | |
1090 | static void mvneta_cleanup_txqs(struct mvneta_port *pp) | |
1091 | { | |
1092 | int queue; | |
1093 | ||
1094 | for (queue = 0; queue < txq_number; queue++) | |
1095 | mvneta_txq_deinit(pp, &pp->txqs[queue]); | |
1096 | } | |
1097 | ||
1098 | /* Cleanup all Rx queues */ | |
1099 | static void mvneta_cleanup_rxqs(struct mvneta_port *pp) | |
1100 | { | |
1101 | int queue; | |
1102 | ||
1103 | for (queue = 0; queue < rxq_number; queue++) | |
1104 | mvneta_rxq_deinit(pp, &pp->rxqs[queue]); | |
1105 | } | |
1106 | ||
1107 | ||
1108 | /* Init all Rx queues */ | |
1109 | static int mvneta_setup_rxqs(struct mvneta_port *pp) | |
1110 | { | |
1111 | int queue; | |
1112 | ||
1113 | for (queue = 0; queue < rxq_number; queue++) { | |
1114 | int err = mvneta_rxq_init(pp, &pp->rxqs[queue]); | |
1115 | if (err) { | |
c519cbf5 SA |
1116 | dev_err(pp->phydev->dev, "%s: can't create rxq=%d\n", |
1117 | __func__, queue); | |
19fc2eae SR |
1118 | mvneta_cleanup_rxqs(pp); |
1119 | return err; | |
1120 | } | |
1121 | } | |
1122 | ||
1123 | return 0; | |
1124 | } | |
1125 | ||
1126 | /* Init all tx queues */ | |
1127 | static int mvneta_setup_txqs(struct mvneta_port *pp) | |
1128 | { | |
1129 | int queue; | |
1130 | ||
1131 | for (queue = 0; queue < txq_number; queue++) { | |
1132 | int err = mvneta_txq_init(pp, &pp->txqs[queue]); | |
1133 | if (err) { | |
c519cbf5 SA |
1134 | dev_err(pp->phydev->dev, "%s: can't create txq=%d\n", |
1135 | __func__, queue); | |
19fc2eae SR |
1136 | mvneta_cleanup_txqs(pp); |
1137 | return err; | |
1138 | } | |
1139 | } | |
1140 | ||
1141 | return 0; | |
1142 | } | |
1143 | ||
1144 | static void mvneta_start_dev(struct mvneta_port *pp) | |
1145 | { | |
1146 | /* start the Rx/Tx activity */ | |
1147 | mvneta_port_enable(pp); | |
1148 | } | |
1149 | ||
e3b9c98a | 1150 | static void mvneta_adjust_link(struct udevice *dev) |
19fc2eae | 1151 | { |
e3b9c98a | 1152 | struct mvneta_port *pp = dev_get_priv(dev); |
19fc2eae | 1153 | struct phy_device *phydev = pp->phydev; |
3b38fad1 | 1154 | bool status_change = false; |
19fc2eae | 1155 | |
ca4730a4 MB |
1156 | if (phydev->link && |
1157 | (pp->speed != phydev->speed || pp->duplex != phydev->duplex)) { | |
1158 | u32 val; | |
19fc2eae | 1159 | |
ca4730a4 MB |
1160 | val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); |
1161 | val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED | | |
1162 | MVNETA_GMAC_CONFIG_GMII_SPEED | | |
1163 | MVNETA_GMAC_CONFIG_FULL_DUPLEX | | |
1164 | MVNETA_GMAC_AN_SPEED_EN | | |
1165 | MVNETA_GMAC_AN_DUPLEX_EN); | |
19fc2eae | 1166 | |
77fcf3cf MB |
1167 | /* FIXME: For fixed-link case, these were the initial settings |
1168 | * used before the code was converted to use PHY_FIXED. Some of | |
1169 | * these may look nonsensical (for example BYPASS_AN makes sense | |
1170 | * for 1000base-x and 2500base-x modes, AFAIK), and in fact this | |
1171 | * may be changed in the future (when support for inband AN will | |
1172 | * be added). Also, why is ADVERT_FC enabled if we don't enable | |
1173 | * inband AN at all? | |
1174 | */ | |
bdbda1e8 MB |
1175 | if (CONFIG_IS_ENABLED(PHY_FIXED) && |
1176 | pp->phydev->phy_id == PHY_FIXED_ID) | |
95a3a6ee | 1177 | val = MVNETA_GMAC_IB_BYPASS_AN_EN | |
77fcf3cf MB |
1178 | MVNETA_GMAC_SET_FC_EN | |
1179 | MVNETA_GMAC_ADVERT_FC_EN | | |
1180 | MVNETA_GMAC_SAMPLE_TX_CFG_EN; | |
1181 | ||
ca4730a4 MB |
1182 | if (phydev->duplex) |
1183 | val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; | |
1184 | ||
1185 | if (phydev->speed == SPEED_1000) | |
1186 | val |= MVNETA_GMAC_CONFIG_GMII_SPEED; | |
1187 | else if (pp->speed == SPEED_100) | |
1188 | val |= MVNETA_GMAC_CONFIG_MII_SPEED; | |
1189 | ||
1190 | mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); | |
1191 | ||
1192 | pp->duplex = phydev->duplex; | |
824f2f9c | 1193 | pp->speed = phydev->speed; |
19fc2eae SR |
1194 | } |
1195 | ||
1196 | if (phydev->link != pp->link) { | |
1197 | if (!phydev->link) { | |
1198 | pp->duplex = -1; | |
1199 | pp->speed = 0; | |
1200 | } | |
1201 | ||
1202 | pp->link = phydev->link; | |
3b38fad1 | 1203 | status_change = true; |
19fc2eae SR |
1204 | } |
1205 | ||
1206 | if (status_change) { | |
1207 | if (phydev->link) { | |
1208 | u32 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); | |
1209 | val |= (MVNETA_GMAC_FORCE_LINK_PASS | | |
1210 | MVNETA_GMAC_FORCE_LINK_DOWN); | |
1211 | mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); | |
1212 | mvneta_port_up(pp); | |
1213 | } else { | |
1214 | mvneta_port_down(pp); | |
1215 | } | |
1216 | } | |
1217 | } | |
1218 | ||
e3b9c98a | 1219 | static int mvneta_open(struct udevice *dev) |
19fc2eae | 1220 | { |
e3b9c98a | 1221 | struct mvneta_port *pp = dev_get_priv(dev); |
19fc2eae SR |
1222 | int ret; |
1223 | ||
1224 | ret = mvneta_setup_rxqs(pp); | |
1225 | if (ret) | |
1226 | return ret; | |
1227 | ||
1228 | ret = mvneta_setup_txqs(pp); | |
1229 | if (ret) | |
1230 | return ret; | |
1231 | ||
1232 | mvneta_adjust_link(dev); | |
1233 | ||
1234 | mvneta_start_dev(pp); | |
1235 | ||
1236 | return 0; | |
1237 | } | |
1238 | ||
1239 | /* Initialize hw */ | |
e3b9c98a | 1240 | static int mvneta_init2(struct mvneta_port *pp) |
19fc2eae SR |
1241 | { |
1242 | int queue; | |
1243 | ||
1244 | /* Disable port */ | |
1245 | mvneta_port_disable(pp); | |
1246 | ||
1247 | /* Set port default values */ | |
1248 | mvneta_defaults_set(pp); | |
1249 | ||
1250 | pp->txqs = kzalloc(txq_number * sizeof(struct mvneta_tx_queue), | |
1251 | GFP_KERNEL); | |
1252 | if (!pp->txqs) | |
1253 | return -ENOMEM; | |
1254 | ||
1255 | /* U-Boot special: use preallocated area */ | |
1256 | pp->txqs[0].descs = buffer_loc.tx_descs; | |
1257 | ||
1258 | /* Initialize TX descriptor rings */ | |
1259 | for (queue = 0; queue < txq_number; queue++) { | |
1260 | struct mvneta_tx_queue *txq = &pp->txqs[queue]; | |
1261 | txq->id = queue; | |
1262 | txq->size = pp->tx_ring_size; | |
1263 | } | |
1264 | ||
1265 | pp->rxqs = kzalloc(rxq_number * sizeof(struct mvneta_rx_queue), | |
1266 | GFP_KERNEL); | |
1267 | if (!pp->rxqs) { | |
1268 | kfree(pp->txqs); | |
1269 | return -ENOMEM; | |
1270 | } | |
1271 | ||
1272 | /* U-Boot special: use preallocated area */ | |
1273 | pp->rxqs[0].descs = buffer_loc.rx_descs; | |
1274 | ||
1275 | /* Create Rx descriptor rings */ | |
1276 | for (queue = 0; queue < rxq_number; queue++) { | |
1277 | struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; | |
1278 | rxq->id = queue; | |
1279 | rxq->size = pp->rx_ring_size; | |
1280 | } | |
1281 | ||
1282 | return 0; | |
1283 | } | |
1284 | ||
1285 | /* platform glue : initialize decoding windows */ | |
544eefe0 SR |
1286 | |
1287 | /* | |
1288 | * Not like A380, in Armada3700, there are two layers of decode windows for GBE: | |
1289 | * First layer is: GbE Address window that resides inside the GBE unit, | |
1290 | * Second layer is: Fabric address window which is located in the NIC400 | |
1291 | * (South Fabric). | |
1292 | * To simplify the address decode configuration for Armada3700, we bypass the | |
1293 | * first layer of GBE decode window by setting the first window to 4GB. | |
1294 | */ | |
1295 | static void mvneta_bypass_mbus_windows(struct mvneta_port *pp) | |
1296 | { | |
1297 | /* | |
1298 | * Set window size to 4GB, to bypass GBE address decode, leave the | |
1299 | * work to MBUS decode window | |
1300 | */ | |
1301 | mvreg_write(pp, MVNETA_WIN_SIZE(0), MVNETA_WIN_SIZE_MASK); | |
1302 | ||
1303 | /* Enable GBE address decode window 0 by set bit 0 to 0 */ | |
1304 | clrbits_le32(pp->base + MVNETA_BASE_ADDR_ENABLE, | |
1305 | MVNETA_BASE_ADDR_ENABLE_BIT); | |
1306 | ||
1307 | /* Set GBE address decode window 0 to full Access (read or write) */ | |
1308 | setbits_le32(pp->base + MVNETA_PORT_ACCESS_PROTECT, | |
1309 | MVNETA_PORT_ACCESS_PROTECT_WIN0_RW); | |
1310 | } | |
1311 | ||
19fc2eae SR |
1312 | static void mvneta_conf_mbus_windows(struct mvneta_port *pp) |
1313 | { | |
1314 | const struct mbus_dram_target_info *dram; | |
1315 | u32 win_enable; | |
1316 | u32 win_protect; | |
1317 | int i; | |
1318 | ||
1319 | dram = mvebu_mbus_dram_info(); | |
1320 | for (i = 0; i < 6; i++) { | |
1321 | mvreg_write(pp, MVNETA_WIN_BASE(i), 0); | |
1322 | mvreg_write(pp, MVNETA_WIN_SIZE(i), 0); | |
1323 | ||
1324 | if (i < 4) | |
1325 | mvreg_write(pp, MVNETA_WIN_REMAP(i), 0); | |
1326 | } | |
1327 | ||
1328 | win_enable = 0x3f; | |
1329 | win_protect = 0; | |
1330 | ||
1331 | for (i = 0; i < dram->num_cs; i++) { | |
1332 | const struct mbus_dram_window *cs = dram->cs + i; | |
1333 | mvreg_write(pp, MVNETA_WIN_BASE(i), (cs->base & 0xffff0000) | | |
1334 | (cs->mbus_attr << 8) | dram->mbus_dram_target_id); | |
1335 | ||
1336 | mvreg_write(pp, MVNETA_WIN_SIZE(i), | |
1337 | (cs->size - 1) & 0xffff0000); | |
1338 | ||
1339 | win_enable &= ~(1 << i); | |
1340 | win_protect |= 3 << (2 * i); | |
1341 | } | |
1342 | ||
1343 | mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); | |
1344 | } | |
1345 | ||
1346 | /* Power up the port */ | |
1347 | static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) | |
1348 | { | |
1349 | u32 ctrl; | |
1350 | ||
1351 | /* MAC Cause register should be cleared */ | |
1352 | mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0); | |
1353 | ||
1354 | ctrl = mvreg_read(pp, MVNETA_GMAC_CTRL_2); | |
1355 | ||
1356 | /* Even though it might look weird, when we're configured in | |
1357 | * SGMII or QSGMII mode, the RGMII bit needs to be set. | |
1358 | */ | |
1359 | switch (phy_mode) { | |
1360 | case PHY_INTERFACE_MODE_QSGMII: | |
1361 | mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO); | |
1362 | ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII; | |
1363 | break; | |
1364 | case PHY_INTERFACE_MODE_SGMII: | |
1365 | mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO); | |
1366 | ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII; | |
1367 | break; | |
1368 | case PHY_INTERFACE_MODE_RGMII: | |
1369 | case PHY_INTERFACE_MODE_RGMII_ID: | |
1370 | ctrl |= MVNETA_GMAC2_PORT_RGMII; | |
1371 | break; | |
1372 | default: | |
1373 | return -EINVAL; | |
1374 | } | |
1375 | ||
1376 | /* Cancel Port Reset */ | |
1377 | ctrl &= ~MVNETA_GMAC2_PORT_RESET; | |
1378 | mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl); | |
1379 | ||
1380 | while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) & | |
1381 | MVNETA_GMAC2_PORT_RESET) != 0) | |
1382 | continue; | |
1383 | ||
1384 | return 0; | |
1385 | } | |
1386 | ||
1387 | /* Device initialization routine */ | |
e3b9c98a | 1388 | static int mvneta_init(struct udevice *dev) |
19fc2eae | 1389 | { |
c69cda25 | 1390 | struct eth_pdata *pdata = dev_get_plat(dev); |
e3b9c98a | 1391 | struct mvneta_port *pp = dev_get_priv(dev); |
19fc2eae SR |
1392 | int err; |
1393 | ||
1394 | pp->tx_ring_size = MVNETA_MAX_TXD; | |
1395 | pp->rx_ring_size = MVNETA_MAX_RXD; | |
1396 | ||
e3b9c98a | 1397 | err = mvneta_init2(pp); |
19fc2eae | 1398 | if (err < 0) { |
13cbe299 | 1399 | dev_err(dev, "can't init eth hal\n"); |
19fc2eae SR |
1400 | return err; |
1401 | } | |
1402 | ||
e3b9c98a | 1403 | mvneta_mac_addr_set(pp, pdata->enetaddr, rxq_def); |
19fc2eae SR |
1404 | |
1405 | err = mvneta_port_power_up(pp, pp->phy_interface); | |
1406 | if (err < 0) { | |
13cbe299 | 1407 | dev_err(dev, "can't power up port\n"); |
19fc2eae SR |
1408 | return err; |
1409 | } | |
1410 | ||
1411 | /* Call open() now as it needs to be done before runing send() */ | |
1412 | mvneta_open(dev); | |
1413 | ||
1414 | return 0; | |
1415 | } | |
1416 | ||
1417 | /* U-Boot only functions follow here */ | |
1418 | ||
e3b9c98a | 1419 | static int mvneta_start(struct udevice *dev) |
19fc2eae | 1420 | { |
e3b9c98a | 1421 | struct mvneta_port *pp = dev_get_priv(dev); |
19fc2eae SR |
1422 | struct phy_device *phydev; |
1423 | ||
1424 | mvneta_port_power_up(pp, pp->phy_interface); | |
1425 | ||
1426 | if (!pp->init || pp->link == 0) { | |
77fcf3cf MB |
1427 | phydev = dm_eth_phy_connect(dev); |
1428 | if (!phydev) { | |
1429 | printf("dm_eth_phy_connect failed\n"); | |
1430 | return -ENODEV; | |
1431 | } | |
278d30c8 | 1432 | |
77fcf3cf MB |
1433 | pp->phydev = phydev; |
1434 | phy_config(phydev); | |
1435 | phy_startup(phydev); | |
1436 | if (!phydev->link) { | |
1437 | printf("%s: No link.\n", phydev->dev->name); | |
1438 | return -1; | |
278d30c8 | 1439 | } |
19fc2eae | 1440 | |
77fcf3cf MB |
1441 | /* Full init on first call */ |
1442 | mvneta_init(dev); | |
1443 | pp->init = 1; | |
1444 | } else { | |
1445 | /* Upon all following calls, this is enough */ | |
1446 | mvneta_port_up(pp); | |
1447 | mvneta_port_enable(pp); | |
1448 | } | |
278d30c8 | 1449 | |
19fc2eae SR |
1450 | return 0; |
1451 | } | |
1452 | ||
e3b9c98a | 1453 | static int mvneta_send(struct udevice *dev, void *packet, int length) |
19fc2eae | 1454 | { |
e3b9c98a | 1455 | struct mvneta_port *pp = dev_get_priv(dev); |
19fc2eae SR |
1456 | struct mvneta_tx_queue *txq = &pp->txqs[0]; |
1457 | struct mvneta_tx_desc *tx_desc; | |
1458 | int sent_desc; | |
1459 | u32 timeout = 0; | |
1460 | ||
1461 | /* Get a descriptor for the first part of the packet */ | |
1462 | tx_desc = mvneta_txq_next_desc_get(txq); | |
1463 | ||
3cbc11da | 1464 | tx_desc->buf_phys_addr = (u32)(uintptr_t)packet; |
e3b9c98a | 1465 | tx_desc->data_size = length; |
3cbc11da SR |
1466 | flush_dcache_range((ulong)packet, |
1467 | (ulong)packet + ALIGN(length, PKTALIGN)); | |
19fc2eae SR |
1468 | |
1469 | /* First and Last descriptor */ | |
1470 | tx_desc->command = MVNETA_TX_L4_CSUM_NOT | MVNETA_TXD_FLZ_DESC; | |
1471 | mvneta_txq_pend_desc_add(pp, txq, 1); | |
1472 | ||
1473 | /* Wait for packet to be sent (queue might help with speed here) */ | |
1474 | sent_desc = mvneta_txq_sent_desc_num_get(pp, txq); | |
1475 | while (!sent_desc) { | |
1476 | if (timeout++ > 10000) { | |
1477 | printf("timeout: packet not sent\n"); | |
1478 | return -1; | |
1479 | } | |
1480 | sent_desc = mvneta_txq_sent_desc_num_get(pp, txq); | |
1481 | } | |
1482 | ||
1483 | /* txDone has increased - hw sent packet */ | |
1484 | mvneta_txq_sent_desc_dec(pp, txq, sent_desc); | |
19fc2eae SR |
1485 | |
1486 | return 0; | |
1487 | } | |
1488 | ||
e3b9c98a | 1489 | static int mvneta_recv(struct udevice *dev, int flags, uchar **packetp) |
19fc2eae | 1490 | { |
e3b9c98a | 1491 | struct mvneta_port *pp = dev_get_priv(dev); |
19fc2eae | 1492 | int rx_done; |
19fc2eae | 1493 | struct mvneta_rx_queue *rxq; |
e3b9c98a | 1494 | int rx_bytes = 0; |
19fc2eae SR |
1495 | |
1496 | /* get rx queue */ | |
1497 | rxq = mvneta_rxq_handle_get(pp, rxq_def); | |
1498 | rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); | |
19fc2eae | 1499 | |
e3b9c98a | 1500 | if (rx_done) { |
19fc2eae SR |
1501 | struct mvneta_rx_desc *rx_desc; |
1502 | unsigned char *data; | |
1503 | u32 rx_status; | |
19fc2eae SR |
1504 | |
1505 | /* | |
1506 | * No cache invalidation needed here, since the desc's are | |
1507 | * located in a uncached memory region | |
1508 | */ | |
1509 | rx_desc = mvneta_rxq_next_desc_get(rxq); | |
1510 | ||
1511 | rx_status = rx_desc->status; | |
1512 | if (!mvneta_rxq_desc_is_first_last(rx_status) || | |
1513 | (rx_status & MVNETA_RXD_ERR_SUMMARY)) { | |
1514 | mvneta_rx_error(pp, rx_desc); | |
1515 | /* leave the descriptor untouched */ | |
e3b9c98a | 1516 | return -EIO; |
19fc2eae SR |
1517 | } |
1518 | ||
1519 | /* 2 bytes for marvell header. 4 bytes for crc */ | |
1520 | rx_bytes = rx_desc->data_size - 6; | |
1521 | ||
1522 | /* give packet to stack - skip on first 2 bytes */ | |
3cbc11da | 1523 | data = (u8 *)(uintptr_t)rx_desc->buf_cookie + 2; |
19fc2eae SR |
1524 | /* |
1525 | * No cache invalidation needed here, since the rx_buffer's are | |
1526 | * located in a uncached memory region | |
1527 | */ | |
e3b9c98a | 1528 | *packetp = data; |
19fc2eae | 1529 | |
32ac8b0b JB |
1530 | /* |
1531 | * Only mark one descriptor as free | |
1532 | * since only one was processed | |
1533 | */ | |
1534 | mvneta_rxq_desc_num_update(pp, rxq, 1, 1); | |
e3b9c98a | 1535 | } |
19fc2eae | 1536 | |
e3b9c98a | 1537 | return rx_bytes; |
19fc2eae SR |
1538 | } |
1539 | ||
e3b9c98a | 1540 | static int mvneta_probe(struct udevice *dev) |
19fc2eae | 1541 | { |
e3b9c98a | 1542 | struct mvneta_port *pp = dev_get_priv(dev); |
2b7beb9c RM |
1543 | #if CONFIG_IS_ENABLED(DM_GPIO) |
1544 | struct ofnode_phandle_args sfp_args; | |
1545 | #endif | |
19fc2eae SR |
1546 | void *bd_space; |
1547 | ||
19fc2eae SR |
1548 | /* |
1549 | * Allocate buffer area for descs and rx_buffers. This is only | |
1550 | * done once for all interfaces. As only one interface can | |
6723b235 | 1551 | * be active. Make this area DMA safe by disabling the D-cache |
19fc2eae SR |
1552 | */ |
1553 | if (!buffer_loc.tx_descs) { | |
199b27bb JN |
1554 | u32 size; |
1555 | ||
19fc2eae SR |
1556 | /* Align buffer area for descs and rx_buffers to 1MiB */ |
1557 | bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE); | |
0f8888b7 | 1558 | flush_dcache_range((ulong)bd_space, (ulong)bd_space + BD_SPACE); |
3cbc11da | 1559 | mmu_set_region_dcache_behaviour((phys_addr_t)bd_space, BD_SPACE, |
19fc2eae SR |
1560 | DCACHE_OFF); |
1561 | buffer_loc.tx_descs = (struct mvneta_tx_desc *)bd_space; | |
199b27bb JN |
1562 | size = roundup(MVNETA_MAX_TXD * sizeof(struct mvneta_tx_desc), |
1563 | ARCH_DMA_MINALIGN); | |
318b5d76 | 1564 | memset(buffer_loc.tx_descs, 0, size); |
19fc2eae | 1565 | buffer_loc.rx_descs = (struct mvneta_rx_desc *) |
199b27bb JN |
1566 | ((phys_addr_t)bd_space + size); |
1567 | size += roundup(MVNETA_MAX_RXD * sizeof(struct mvneta_rx_desc), | |
1568 | ARCH_DMA_MINALIGN); | |
1569 | buffer_loc.rx_buffers = (phys_addr_t)(bd_space + size); | |
19fc2eae SR |
1570 | } |
1571 | ||
443cf356 MB |
1572 | pp->base = dev_read_addr_ptr(dev); |
1573 | pp->phy_interface = dev_read_phy_mode(dev); | |
1574 | if (pp->phy_interface == PHY_INTERFACE_MODE_NA) | |
1575 | return -EINVAL; | |
19fc2eae | 1576 | |
e3b9c98a | 1577 | /* Configure MBUS address windows */ |
911f3aef | 1578 | if (device_is_compatible(dev, "marvell,armada-3700-neta")) |
544eefe0 SR |
1579 | mvneta_bypass_mbus_windows(pp); |
1580 | else | |
1581 | mvneta_conf_mbus_windows(pp); | |
19fc2eae | 1582 | |
bcee8d67 | 1583 | #if CONFIG_IS_ENABLED(DM_GPIO) |
7ec50404 MB |
1584 | if (!dev_read_phandle_with_args(dev, "sfp", NULL, 0, 0, &sfp_args) && |
1585 | ofnode_is_enabled(sfp_args.node)) | |
2b7beb9c RM |
1586 | gpio_request_by_name_nodev(sfp_args.node, "tx-disable-gpio", 0, |
1587 | &pp->sfp_tx_disable_gpio, GPIOD_IS_OUT); | |
1588 | ||
18bfc8fa AP |
1589 | gpio_request_by_name(dev, "phy-reset-gpios", 0, |
1590 | &pp->phy_reset_gpio, GPIOD_IS_OUT); | |
1591 | ||
1592 | if (dm_gpio_is_valid(&pp->phy_reset_gpio)) { | |
1593 | dm_gpio_set_value(&pp->phy_reset_gpio, 1); | |
1594 | mdelay(10); | |
1595 | dm_gpio_set_value(&pp->phy_reset_gpio, 0); | |
1596 | } | |
2b7beb9c RM |
1597 | |
1598 | if (dm_gpio_is_valid(&pp->sfp_tx_disable_gpio)) | |
1599 | dm_gpio_set_value(&pp->sfp_tx_disable_gpio, 0); | |
18bfc8fa AP |
1600 | #endif |
1601 | ||
1d87904c | 1602 | return 0; |
e3b9c98a | 1603 | } |
19fc2eae | 1604 | |
e3b9c98a SR |
1605 | static void mvneta_stop(struct udevice *dev) |
1606 | { | |
1607 | struct mvneta_port *pp = dev_get_priv(dev); | |
19fc2eae | 1608 | |
e3b9c98a SR |
1609 | mvneta_port_down(pp); |
1610 | mvneta_port_disable(pp); | |
19fc2eae | 1611 | } |
e3b9c98a SR |
1612 | |
1613 | static const struct eth_ops mvneta_ops = { | |
1614 | .start = mvneta_start, | |
1615 | .send = mvneta_send, | |
1616 | .recv = mvneta_recv, | |
1617 | .stop = mvneta_stop, | |
0a85f024 | 1618 | .write_hwaddr = mvneta_write_hwaddr, |
e3b9c98a SR |
1619 | }; |
1620 | ||
e3b9c98a SR |
1621 | static const struct udevice_id mvneta_ids[] = { |
1622 | { .compatible = "marvell,armada-370-neta" }, | |
1623 | { .compatible = "marvell,armada-xp-neta" }, | |
544eefe0 | 1624 | { .compatible = "marvell,armada-3700-neta" }, |
e3b9c98a SR |
1625 | { } |
1626 | }; | |
1627 | ||
1628 | U_BOOT_DRIVER(mvneta) = { | |
1629 | .name = "mvneta", | |
1630 | .id = UCLASS_ETH, | |
1631 | .of_match = mvneta_ids, | |
e3b9c98a SR |
1632 | .probe = mvneta_probe, |
1633 | .ops = &mvneta_ops, | |
41575d8e | 1634 | .priv_auto = sizeof(struct mvneta_port), |
caa4daa2 | 1635 | .plat_auto = sizeof(struct eth_pdata), |
e3b9c98a | 1636 | }; |