]>
Commit | Line | Data |
---|---|---|
19fc2eae SR |
1 | /* |
2 | * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs. | |
3 | * | |
4 | * U-Boot version: | |
e3b9c98a | 5 | * Copyright (C) 2014-2015 Stefan Roese <[email protected]> |
19fc2eae SR |
6 | * |
7 | * Based on the Linux version which is: | |
8 | * Copyright (C) 2012 Marvell | |
9 | * | |
10 | * Rami Rosen <[email protected]> | |
11 | * Thomas Petazzoni <[email protected]> | |
12 | * | |
13 | * SPDX-License-Identifier: GPL-2.0 | |
14 | */ | |
15 | ||
16 | #include <common.h> | |
e3b9c98a | 17 | #include <dm.h> |
19fc2eae SR |
18 | #include <net.h> |
19 | #include <netdev.h> | |
20 | #include <config.h> | |
21 | #include <malloc.h> | |
22 | #include <asm/io.h> | |
1221ce45 | 23 | #include <linux/errno.h> |
19fc2eae SR |
24 | #include <phy.h> |
25 | #include <miiphy.h> | |
26 | #include <watchdog.h> | |
27 | #include <asm/arch/cpu.h> | |
28 | #include <asm/arch/soc.h> | |
29 | #include <linux/compat.h> | |
30 | #include <linux/mbus.h> | |
31 | ||
e3b9c98a SR |
32 | DECLARE_GLOBAL_DATA_PTR; |
33 | ||
19fc2eae SR |
34 | #if !defined(CONFIG_PHYLIB) |
35 | # error Marvell mvneta requires PHYLIB | |
36 | #endif | |
37 | ||
38 | /* Some linux -> U-Boot compatibility stuff */ | |
39 | #define netdev_err(dev, fmt, args...) \ | |
40 | printf(fmt, ##args) | |
41 | #define netdev_warn(dev, fmt, args...) \ | |
42 | printf(fmt, ##args) | |
43 | #define netdev_info(dev, fmt, args...) \ | |
44 | printf(fmt, ##args) | |
45 | ||
46 | #define CONFIG_NR_CPUS 1 | |
19fc2eae SR |
47 | #define ETH_HLEN 14 /* Total octets in header */ |
48 | ||
49 | /* 2(HW hdr) 14(MAC hdr) 4(CRC) 32(extra for cache prefetch) */ | |
50 | #define WRAP (2 + ETH_HLEN + 4 + 32) | |
51 | #define MTU 1500 | |
52 | #define RX_BUFFER_SIZE (ALIGN(MTU + WRAP, ARCH_DMA_MINALIGN)) | |
53 | ||
54 | #define MVNETA_SMI_TIMEOUT 10000 | |
55 | ||
56 | /* Registers */ | |
57 | #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2)) | |
58 | #define MVNETA_RXQ_HW_BUF_ALLOC BIT(1) | |
59 | #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8) | |
60 | #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8) | |
61 | #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2)) | |
62 | #define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16) | |
63 | #define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2)) | |
64 | #define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2)) | |
65 | #define MVNETA_RXQ_BUF_SIZE_SHIFT 19 | |
66 | #define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19) | |
67 | #define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2)) | |
68 | #define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff | |
69 | #define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2)) | |
70 | #define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16 | |
71 | #define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255 | |
72 | #define MVNETA_PORT_RX_RESET 0x1cc0 | |
73 | #define MVNETA_PORT_RX_DMA_RESET BIT(0) | |
74 | #define MVNETA_PHY_ADDR 0x2000 | |
75 | #define MVNETA_PHY_ADDR_MASK 0x1f | |
76 | #define MVNETA_SMI 0x2004 | |
77 | #define MVNETA_PHY_REG_MASK 0x1f | |
78 | /* SMI register fields */ | |
79 | #define MVNETA_SMI_DATA_OFFS 0 /* Data */ | |
80 | #define MVNETA_SMI_DATA_MASK (0xffff << MVNETA_SMI_DATA_OFFS) | |
81 | #define MVNETA_SMI_DEV_ADDR_OFFS 16 /* PHY device address */ | |
82 | #define MVNETA_SMI_REG_ADDR_OFFS 21 /* PHY device reg addr*/ | |
83 | #define MVNETA_SMI_OPCODE_OFFS 26 /* Write/Read opcode */ | |
84 | #define MVNETA_SMI_OPCODE_READ (1 << MVNETA_SMI_OPCODE_OFFS) | |
85 | #define MVNETA_SMI_READ_VALID (1 << 27) /* Read Valid */ | |
86 | #define MVNETA_SMI_BUSY (1 << 28) /* Busy */ | |
87 | #define MVNETA_MBUS_RETRY 0x2010 | |
88 | #define MVNETA_UNIT_INTR_CAUSE 0x2080 | |
89 | #define MVNETA_UNIT_CONTROL 0x20B0 | |
90 | #define MVNETA_PHY_POLLING_ENABLE BIT(1) | |
91 | #define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3)) | |
92 | #define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3)) | |
93 | #define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2)) | |
544eefe0 | 94 | #define MVNETA_WIN_SIZE_MASK (0xffff0000) |
19fc2eae | 95 | #define MVNETA_BASE_ADDR_ENABLE 0x2290 |
544eefe0 SR |
96 | #define MVNETA_BASE_ADDR_ENABLE_BIT 0x1 |
97 | #define MVNETA_PORT_ACCESS_PROTECT 0x2294 | |
98 | #define MVNETA_PORT_ACCESS_PROTECT_WIN0_RW 0x3 | |
19fc2eae SR |
99 | #define MVNETA_PORT_CONFIG 0x2400 |
100 | #define MVNETA_UNI_PROMISC_MODE BIT(0) | |
101 | #define MVNETA_DEF_RXQ(q) ((q) << 1) | |
102 | #define MVNETA_DEF_RXQ_ARP(q) ((q) << 4) | |
103 | #define MVNETA_TX_UNSET_ERR_SUM BIT(12) | |
104 | #define MVNETA_DEF_RXQ_TCP(q) ((q) << 16) | |
105 | #define MVNETA_DEF_RXQ_UDP(q) ((q) << 19) | |
106 | #define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22) | |
107 | #define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25) | |
108 | #define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \ | |
109 | MVNETA_DEF_RXQ_ARP(q) | \ | |
110 | MVNETA_DEF_RXQ_TCP(q) | \ | |
111 | MVNETA_DEF_RXQ_UDP(q) | \ | |
112 | MVNETA_DEF_RXQ_BPDU(q) | \ | |
113 | MVNETA_TX_UNSET_ERR_SUM | \ | |
114 | MVNETA_RX_CSUM_WITH_PSEUDO_HDR) | |
115 | #define MVNETA_PORT_CONFIG_EXTEND 0x2404 | |
116 | #define MVNETA_MAC_ADDR_LOW 0x2414 | |
117 | #define MVNETA_MAC_ADDR_HIGH 0x2418 | |
118 | #define MVNETA_SDMA_CONFIG 0x241c | |
119 | #define MVNETA_SDMA_BRST_SIZE_16 4 | |
120 | #define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1) | |
121 | #define MVNETA_RX_NO_DATA_SWAP BIT(4) | |
122 | #define MVNETA_TX_NO_DATA_SWAP BIT(5) | |
123 | #define MVNETA_DESC_SWAP BIT(6) | |
124 | #define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22) | |
125 | #define MVNETA_PORT_STATUS 0x2444 | |
126 | #define MVNETA_TX_IN_PRGRS BIT(1) | |
127 | #define MVNETA_TX_FIFO_EMPTY BIT(8) | |
128 | #define MVNETA_RX_MIN_FRAME_SIZE 0x247c | |
129 | #define MVNETA_SERDES_CFG 0x24A0 | |
130 | #define MVNETA_SGMII_SERDES_PROTO 0x0cc7 | |
131 | #define MVNETA_QSGMII_SERDES_PROTO 0x0667 | |
132 | #define MVNETA_TYPE_PRIO 0x24bc | |
133 | #define MVNETA_FORCE_UNI BIT(21) | |
134 | #define MVNETA_TXQ_CMD_1 0x24e4 | |
135 | #define MVNETA_TXQ_CMD 0x2448 | |
136 | #define MVNETA_TXQ_DISABLE_SHIFT 8 | |
137 | #define MVNETA_TXQ_ENABLE_MASK 0x000000ff | |
138 | #define MVNETA_ACC_MODE 0x2500 | |
139 | #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2)) | |
140 | #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff | |
141 | #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00 | |
142 | #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2)) | |
143 | ||
144 | /* Exception Interrupt Port/Queue Cause register */ | |
145 | ||
146 | #define MVNETA_INTR_NEW_CAUSE 0x25a0 | |
147 | #define MVNETA_INTR_NEW_MASK 0x25a4 | |
148 | ||
149 | /* bits 0..7 = TXQ SENT, one bit per queue. | |
150 | * bits 8..15 = RXQ OCCUP, one bit per queue. | |
151 | * bits 16..23 = RXQ FREE, one bit per queue. | |
152 | * bit 29 = OLD_REG_SUM, see old reg ? | |
153 | * bit 30 = TX_ERR_SUM, one bit for 4 ports | |
154 | * bit 31 = MISC_SUM, one bit for 4 ports | |
155 | */ | |
156 | #define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0) | |
157 | #define MVNETA_TX_INTR_MASK_ALL (0xff << 0) | |
158 | #define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8) | |
159 | #define MVNETA_RX_INTR_MASK_ALL (0xff << 8) | |
160 | ||
161 | #define MVNETA_INTR_OLD_CAUSE 0x25a8 | |
162 | #define MVNETA_INTR_OLD_MASK 0x25ac | |
163 | ||
164 | /* Data Path Port/Queue Cause Register */ | |
165 | #define MVNETA_INTR_MISC_CAUSE 0x25b0 | |
166 | #define MVNETA_INTR_MISC_MASK 0x25b4 | |
167 | #define MVNETA_INTR_ENABLE 0x25b8 | |
168 | ||
169 | #define MVNETA_RXQ_CMD 0x2680 | |
170 | #define MVNETA_RXQ_DISABLE_SHIFT 8 | |
171 | #define MVNETA_RXQ_ENABLE_MASK 0x000000ff | |
172 | #define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4)) | |
173 | #define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4)) | |
174 | #define MVNETA_GMAC_CTRL_0 0x2c00 | |
175 | #define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2 | |
176 | #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc | |
177 | #define MVNETA_GMAC0_PORT_ENABLE BIT(0) | |
178 | #define MVNETA_GMAC_CTRL_2 0x2c08 | |
179 | #define MVNETA_GMAC2_PCS_ENABLE BIT(3) | |
180 | #define MVNETA_GMAC2_PORT_RGMII BIT(4) | |
181 | #define MVNETA_GMAC2_PORT_RESET BIT(6) | |
182 | #define MVNETA_GMAC_STATUS 0x2c10 | |
183 | #define MVNETA_GMAC_LINK_UP BIT(0) | |
184 | #define MVNETA_GMAC_SPEED_1000 BIT(1) | |
185 | #define MVNETA_GMAC_SPEED_100 BIT(2) | |
186 | #define MVNETA_GMAC_FULL_DUPLEX BIT(3) | |
187 | #define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4) | |
188 | #define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5) | |
189 | #define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6) | |
190 | #define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7) | |
191 | #define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c | |
192 | #define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0) | |
193 | #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1) | |
278d30c8 KP |
194 | #define MVNETA_GMAC_FORCE_LINK_UP (BIT(0) | BIT(1)) |
195 | #define MVNETA_GMAC_IB_BYPASS_AN_EN BIT(3) | |
19fc2eae SR |
196 | #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5) |
197 | #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6) | |
198 | #define MVNETA_GMAC_AN_SPEED_EN BIT(7) | |
278d30c8 KP |
199 | #define MVNETA_GMAC_SET_FC_EN BIT(8) |
200 | #define MVNETA_GMAC_ADVERT_FC_EN BIT(9) | |
19fc2eae SR |
201 | #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12) |
202 | #define MVNETA_GMAC_AN_DUPLEX_EN BIT(13) | |
278d30c8 | 203 | #define MVNETA_GMAC_SAMPLE_TX_CFG_EN BIT(15) |
19fc2eae SR |
204 | #define MVNETA_MIB_COUNTERS_BASE 0x3080 |
205 | #define MVNETA_MIB_LATE_COLLISION 0x7c | |
206 | #define MVNETA_DA_FILT_SPEC_MCAST 0x3400 | |
207 | #define MVNETA_DA_FILT_OTH_MCAST 0x3500 | |
208 | #define MVNETA_DA_FILT_UCAST_BASE 0x3600 | |
209 | #define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2)) | |
210 | #define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2)) | |
211 | #define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000 | |
212 | #define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16) | |
213 | #define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2)) | |
214 | #define MVNETA_TXQ_DEC_SENT_SHIFT 16 | |
215 | #define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2)) | |
216 | #define MVNETA_TXQ_SENT_DESC_SHIFT 16 | |
217 | #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000 | |
218 | #define MVNETA_PORT_TX_RESET 0x3cf0 | |
219 | #define MVNETA_PORT_TX_DMA_RESET BIT(0) | |
220 | #define MVNETA_TX_MTU 0x3e0c | |
221 | #define MVNETA_TX_TOKEN_SIZE 0x3e14 | |
222 | #define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff | |
223 | #define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2)) | |
224 | #define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff | |
225 | ||
226 | /* Descriptor ring Macros */ | |
227 | #define MVNETA_QUEUE_NEXT_DESC(q, index) \ | |
228 | (((index) < (q)->last_desc) ? ((index) + 1) : 0) | |
229 | ||
230 | /* Various constants */ | |
231 | ||
232 | /* Coalescing */ | |
233 | #define MVNETA_TXDONE_COAL_PKTS 16 | |
234 | #define MVNETA_RX_COAL_PKTS 32 | |
235 | #define MVNETA_RX_COAL_USEC 100 | |
236 | ||
237 | /* The two bytes Marvell header. Either contains a special value used | |
238 | * by Marvell switches when a specific hardware mode is enabled (not | |
239 | * supported by this driver) or is filled automatically by zeroes on | |
240 | * the RX side. Those two bytes being at the front of the Ethernet | |
241 | * header, they allow to have the IP header aligned on a 4 bytes | |
242 | * boundary automatically: the hardware skips those two bytes on its | |
243 | * own. | |
244 | */ | |
245 | #define MVNETA_MH_SIZE 2 | |
246 | ||
247 | #define MVNETA_VLAN_TAG_LEN 4 | |
248 | ||
249 | #define MVNETA_CPU_D_CACHE_LINE_SIZE 32 | |
250 | #define MVNETA_TX_CSUM_MAX_SIZE 9800 | |
251 | #define MVNETA_ACC_MODE_EXT 1 | |
252 | ||
253 | /* Timeout constants */ | |
254 | #define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000 | |
255 | #define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000 | |
256 | #define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000 | |
257 | ||
258 | #define MVNETA_TX_MTU_MAX 0x3ffff | |
259 | ||
260 | /* Max number of Rx descriptors */ | |
261 | #define MVNETA_MAX_RXD 16 | |
262 | ||
263 | /* Max number of Tx descriptors */ | |
264 | #define MVNETA_MAX_TXD 16 | |
265 | ||
266 | /* descriptor aligned size */ | |
267 | #define MVNETA_DESC_ALIGNED_SIZE 32 | |
268 | ||
269 | struct mvneta_port { | |
270 | void __iomem *base; | |
271 | struct mvneta_rx_queue *rxqs; | |
272 | struct mvneta_tx_queue *txqs; | |
273 | ||
274 | u8 mcast_count[256]; | |
275 | u16 tx_ring_size; | |
276 | u16 rx_ring_size; | |
277 | ||
278 | phy_interface_t phy_interface; | |
279 | unsigned int link; | |
280 | unsigned int duplex; | |
281 | unsigned int speed; | |
282 | ||
283 | int init; | |
284 | int phyaddr; | |
285 | struct phy_device *phydev; | |
286 | struct mii_dev *bus; | |
287 | }; | |
288 | ||
289 | /* The mvneta_tx_desc and mvneta_rx_desc structures describe the | |
290 | * layout of the transmit and reception DMA descriptors, and their | |
291 | * layout is therefore defined by the hardware design | |
292 | */ | |
293 | ||
294 | #define MVNETA_TX_L3_OFF_SHIFT 0 | |
295 | #define MVNETA_TX_IP_HLEN_SHIFT 8 | |
296 | #define MVNETA_TX_L4_UDP BIT(16) | |
297 | #define MVNETA_TX_L3_IP6 BIT(17) | |
298 | #define MVNETA_TXD_IP_CSUM BIT(18) | |
299 | #define MVNETA_TXD_Z_PAD BIT(19) | |
300 | #define MVNETA_TXD_L_DESC BIT(20) | |
301 | #define MVNETA_TXD_F_DESC BIT(21) | |
302 | #define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \ | |
303 | MVNETA_TXD_L_DESC | \ | |
304 | MVNETA_TXD_F_DESC) | |
305 | #define MVNETA_TX_L4_CSUM_FULL BIT(30) | |
306 | #define MVNETA_TX_L4_CSUM_NOT BIT(31) | |
307 | ||
308 | #define MVNETA_RXD_ERR_CRC 0x0 | |
309 | #define MVNETA_RXD_ERR_SUMMARY BIT(16) | |
310 | #define MVNETA_RXD_ERR_OVERRUN BIT(17) | |
311 | #define MVNETA_RXD_ERR_LEN BIT(18) | |
312 | #define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18)) | |
313 | #define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18)) | |
314 | #define MVNETA_RXD_L3_IP4 BIT(25) | |
315 | #define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27)) | |
316 | #define MVNETA_RXD_L4_CSUM_OK BIT(30) | |
317 | ||
318 | struct mvneta_tx_desc { | |
319 | u32 command; /* Options used by HW for packet transmitting.*/ | |
320 | u16 reserverd1; /* csum_l4 (for future use) */ | |
321 | u16 data_size; /* Data size of transmitted packet in bytes */ | |
322 | u32 buf_phys_addr; /* Physical addr of transmitted buffer */ | |
323 | u32 reserved2; /* hw_cmd - (for future use, PMT) */ | |
324 | u32 reserved3[4]; /* Reserved - (for future use) */ | |
325 | }; | |
326 | ||
327 | struct mvneta_rx_desc { | |
328 | u32 status; /* Info about received packet */ | |
329 | u16 reserved1; /* pnc_info - (for future use, PnC) */ | |
330 | u16 data_size; /* Size of received packet in bytes */ | |
331 | ||
332 | u32 buf_phys_addr; /* Physical address of the buffer */ | |
333 | u32 reserved2; /* pnc_flow_id (for future use, PnC) */ | |
334 | ||
335 | u32 buf_cookie; /* cookie for access to RX buffer in rx path */ | |
336 | u16 reserved3; /* prefetch_cmd, for future use */ | |
337 | u16 reserved4; /* csum_l4 - (for future use, PnC) */ | |
338 | ||
339 | u32 reserved5; /* pnc_extra PnC (for future use, PnC) */ | |
340 | u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */ | |
341 | }; | |
342 | ||
343 | struct mvneta_tx_queue { | |
344 | /* Number of this TX queue, in the range 0-7 */ | |
345 | u8 id; | |
346 | ||
347 | /* Number of TX DMA descriptors in the descriptor ring */ | |
348 | int size; | |
349 | ||
350 | /* Index of last TX DMA descriptor that was inserted */ | |
351 | int txq_put_index; | |
352 | ||
353 | /* Index of the TX DMA descriptor to be cleaned up */ | |
354 | int txq_get_index; | |
355 | ||
356 | /* Virtual address of the TX DMA descriptors array */ | |
357 | struct mvneta_tx_desc *descs; | |
358 | ||
359 | /* DMA address of the TX DMA descriptors array */ | |
360 | dma_addr_t descs_phys; | |
361 | ||
362 | /* Index of the last TX DMA descriptor */ | |
363 | int last_desc; | |
364 | ||
365 | /* Index of the next TX DMA descriptor to process */ | |
366 | int next_desc_to_proc; | |
367 | }; | |
368 | ||
369 | struct mvneta_rx_queue { | |
370 | /* rx queue number, in the range 0-7 */ | |
371 | u8 id; | |
372 | ||
373 | /* num of rx descriptors in the rx descriptor ring */ | |
374 | int size; | |
375 | ||
376 | /* Virtual address of the RX DMA descriptors array */ | |
377 | struct mvneta_rx_desc *descs; | |
378 | ||
379 | /* DMA address of the RX DMA descriptors array */ | |
380 | dma_addr_t descs_phys; | |
381 | ||
382 | /* Index of the last RX DMA descriptor */ | |
383 | int last_desc; | |
384 | ||
385 | /* Index of the next RX DMA descriptor to process */ | |
386 | int next_desc_to_proc; | |
387 | }; | |
388 | ||
389 | /* U-Boot doesn't use the queues, so set the number to 1 */ | |
390 | static int rxq_number = 1; | |
391 | static int txq_number = 1; | |
392 | static int rxq_def; | |
393 | ||
394 | struct buffer_location { | |
395 | struct mvneta_tx_desc *tx_descs; | |
396 | struct mvneta_rx_desc *rx_descs; | |
397 | u32 rx_buffers; | |
398 | }; | |
399 | ||
400 | /* | |
401 | * All 4 interfaces use the same global buffer, since only one interface | |
402 | * can be enabled at once | |
403 | */ | |
404 | static struct buffer_location buffer_loc; | |
405 | ||
406 | /* | |
407 | * Page table entries are set to 1MB, or multiples of 1MB | |
408 | * (not < 1MB). driver uses less bd's so use 1MB bdspace. | |
409 | */ | |
410 | #define BD_SPACE (1 << 20) | |
411 | ||
976feda2 KP |
412 | /* |
413 | * Dummy implementation that can be overwritten by a board | |
414 | * specific function | |
415 | */ | |
416 | __weak int board_network_enable(struct mii_dev *bus) | |
417 | { | |
418 | return 0; | |
419 | } | |
420 | ||
19fc2eae SR |
421 | /* Utility/helper methods */ |
422 | ||
423 | /* Write helper method */ | |
424 | static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data) | |
425 | { | |
426 | writel(data, pp->base + offset); | |
427 | } | |
428 | ||
429 | /* Read helper method */ | |
430 | static u32 mvreg_read(struct mvneta_port *pp, u32 offset) | |
431 | { | |
432 | return readl(pp->base + offset); | |
433 | } | |
434 | ||
435 | /* Clear all MIB counters */ | |
436 | static void mvneta_mib_counters_clear(struct mvneta_port *pp) | |
437 | { | |
438 | int i; | |
439 | ||
440 | /* Perform dummy reads from MIB counters */ | |
441 | for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4) | |
442 | mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i)); | |
443 | } | |
444 | ||
445 | /* Rx descriptors helper methods */ | |
446 | ||
447 | /* Checks whether the RX descriptor having this status is both the first | |
448 | * and the last descriptor for the RX packet. Each RX packet is currently | |
449 | * received through a single RX descriptor, so not having each RX | |
450 | * descriptor with its first and last bits set is an error | |
451 | */ | |
452 | static int mvneta_rxq_desc_is_first_last(u32 status) | |
453 | { | |
454 | return (status & MVNETA_RXD_FIRST_LAST_DESC) == | |
455 | MVNETA_RXD_FIRST_LAST_DESC; | |
456 | } | |
457 | ||
458 | /* Add number of descriptors ready to receive new packets */ | |
459 | static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp, | |
460 | struct mvneta_rx_queue *rxq, | |
461 | int ndescs) | |
462 | { | |
463 | /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can | |
464 | * be added at once | |
465 | */ | |
466 | while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) { | |
467 | mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), | |
468 | (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX << | |
469 | MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT)); | |
470 | ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX; | |
471 | } | |
472 | ||
473 | mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), | |
474 | (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT)); | |
475 | } | |
476 | ||
477 | /* Get number of RX descriptors occupied by received packets */ | |
478 | static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp, | |
479 | struct mvneta_rx_queue *rxq) | |
480 | { | |
481 | u32 val; | |
482 | ||
483 | val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id)); | |
484 | return val & MVNETA_RXQ_OCCUPIED_ALL_MASK; | |
485 | } | |
486 | ||
487 | /* Update num of rx desc called upon return from rx path or | |
488 | * from mvneta_rxq_drop_pkts(). | |
489 | */ | |
490 | static void mvneta_rxq_desc_num_update(struct mvneta_port *pp, | |
491 | struct mvneta_rx_queue *rxq, | |
492 | int rx_done, int rx_filled) | |
493 | { | |
494 | u32 val; | |
495 | ||
496 | if ((rx_done <= 0xff) && (rx_filled <= 0xff)) { | |
497 | val = rx_done | | |
498 | (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT); | |
499 | mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); | |
500 | return; | |
501 | } | |
502 | ||
503 | /* Only 255 descriptors can be added at once */ | |
504 | while ((rx_done > 0) || (rx_filled > 0)) { | |
505 | if (rx_done <= 0xff) { | |
506 | val = rx_done; | |
507 | rx_done = 0; | |
508 | } else { | |
509 | val = 0xff; | |
510 | rx_done -= 0xff; | |
511 | } | |
512 | if (rx_filled <= 0xff) { | |
513 | val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT; | |
514 | rx_filled = 0; | |
515 | } else { | |
516 | val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT; | |
517 | rx_filled -= 0xff; | |
518 | } | |
519 | mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); | |
520 | } | |
521 | } | |
522 | ||
523 | /* Get pointer to next RX descriptor to be processed by SW */ | |
524 | static struct mvneta_rx_desc * | |
525 | mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq) | |
526 | { | |
527 | int rx_desc = rxq->next_desc_to_proc; | |
528 | ||
529 | rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc); | |
530 | return rxq->descs + rx_desc; | |
531 | } | |
532 | ||
533 | /* Tx descriptors helper methods */ | |
534 | ||
535 | /* Update HW with number of TX descriptors to be sent */ | |
536 | static void mvneta_txq_pend_desc_add(struct mvneta_port *pp, | |
537 | struct mvneta_tx_queue *txq, | |
538 | int pend_desc) | |
539 | { | |
540 | u32 val; | |
541 | ||
542 | /* Only 255 descriptors can be added at once ; Assume caller | |
543 | * process TX desriptors in quanta less than 256 | |
544 | */ | |
545 | val = pend_desc; | |
546 | mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); | |
547 | } | |
548 | ||
549 | /* Get pointer to next TX descriptor to be processed (send) by HW */ | |
550 | static struct mvneta_tx_desc * | |
551 | mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq) | |
552 | { | |
553 | int tx_desc = txq->next_desc_to_proc; | |
554 | ||
555 | txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc); | |
556 | return txq->descs + tx_desc; | |
557 | } | |
558 | ||
559 | /* Set rxq buf size */ | |
560 | static void mvneta_rxq_buf_size_set(struct mvneta_port *pp, | |
561 | struct mvneta_rx_queue *rxq, | |
562 | int buf_size) | |
563 | { | |
564 | u32 val; | |
565 | ||
566 | val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id)); | |
567 | ||
568 | val &= ~MVNETA_RXQ_BUF_SIZE_MASK; | |
569 | val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT); | |
570 | ||
571 | mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val); | |
572 | } | |
573 | ||
278d30c8 KP |
574 | static int mvneta_port_is_fixed_link(struct mvneta_port *pp) |
575 | { | |
576 | /* phy_addr is set to invalid value for fixed link */ | |
577 | return pp->phyaddr > PHY_MAX_ADDR; | |
578 | } | |
579 | ||
580 | ||
19fc2eae SR |
581 | /* Start the Ethernet port RX and TX activity */ |
582 | static void mvneta_port_up(struct mvneta_port *pp) | |
583 | { | |
584 | int queue; | |
585 | u32 q_map; | |
586 | ||
587 | /* Enable all initialized TXs. */ | |
588 | mvneta_mib_counters_clear(pp); | |
589 | q_map = 0; | |
590 | for (queue = 0; queue < txq_number; queue++) { | |
591 | struct mvneta_tx_queue *txq = &pp->txqs[queue]; | |
592 | if (txq->descs != NULL) | |
593 | q_map |= (1 << queue); | |
594 | } | |
595 | mvreg_write(pp, MVNETA_TXQ_CMD, q_map); | |
596 | ||
597 | /* Enable all initialized RXQs. */ | |
598 | q_map = 0; | |
599 | for (queue = 0; queue < rxq_number; queue++) { | |
600 | struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; | |
601 | if (rxq->descs != NULL) | |
602 | q_map |= (1 << queue); | |
603 | } | |
604 | mvreg_write(pp, MVNETA_RXQ_CMD, q_map); | |
605 | } | |
606 | ||
607 | /* Stop the Ethernet port activity */ | |
608 | static void mvneta_port_down(struct mvneta_port *pp) | |
609 | { | |
610 | u32 val; | |
611 | int count; | |
612 | ||
613 | /* Stop Rx port activity. Check port Rx activity. */ | |
614 | val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK; | |
615 | ||
616 | /* Issue stop command for active channels only */ | |
617 | if (val != 0) | |
618 | mvreg_write(pp, MVNETA_RXQ_CMD, | |
619 | val << MVNETA_RXQ_DISABLE_SHIFT); | |
620 | ||
621 | /* Wait for all Rx activity to terminate. */ | |
622 | count = 0; | |
623 | do { | |
624 | if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) { | |
625 | netdev_warn(pp->dev, | |
626 | "TIMEOUT for RX stopped ! rx_queue_cmd: 0x08%x\n", | |
627 | val); | |
628 | break; | |
629 | } | |
630 | mdelay(1); | |
631 | ||
632 | val = mvreg_read(pp, MVNETA_RXQ_CMD); | |
633 | } while (val & 0xff); | |
634 | ||
635 | /* Stop Tx port activity. Check port Tx activity. Issue stop | |
636 | * command for active channels only | |
637 | */ | |
638 | val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK; | |
639 | ||
640 | if (val != 0) | |
641 | mvreg_write(pp, MVNETA_TXQ_CMD, | |
642 | (val << MVNETA_TXQ_DISABLE_SHIFT)); | |
643 | ||
644 | /* Wait for all Tx activity to terminate. */ | |
645 | count = 0; | |
646 | do { | |
647 | if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) { | |
648 | netdev_warn(pp->dev, | |
649 | "TIMEOUT for TX stopped status=0x%08x\n", | |
650 | val); | |
651 | break; | |
652 | } | |
653 | mdelay(1); | |
654 | ||
655 | /* Check TX Command reg that all Txqs are stopped */ | |
656 | val = mvreg_read(pp, MVNETA_TXQ_CMD); | |
657 | ||
658 | } while (val & 0xff); | |
659 | ||
660 | /* Double check to verify that TX FIFO is empty */ | |
661 | count = 0; | |
662 | do { | |
663 | if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) { | |
664 | netdev_warn(pp->dev, | |
665 | "TX FIFO empty timeout status=0x08%x\n", | |
666 | val); | |
667 | break; | |
668 | } | |
669 | mdelay(1); | |
670 | ||
671 | val = mvreg_read(pp, MVNETA_PORT_STATUS); | |
672 | } while (!(val & MVNETA_TX_FIFO_EMPTY) && | |
673 | (val & MVNETA_TX_IN_PRGRS)); | |
674 | ||
675 | udelay(200); | |
676 | } | |
677 | ||
678 | /* Enable the port by setting the port enable bit of the MAC control register */ | |
679 | static void mvneta_port_enable(struct mvneta_port *pp) | |
680 | { | |
681 | u32 val; | |
682 | ||
683 | /* Enable port */ | |
684 | val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); | |
685 | val |= MVNETA_GMAC0_PORT_ENABLE; | |
686 | mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); | |
687 | } | |
688 | ||
689 | /* Disable the port and wait for about 200 usec before retuning */ | |
690 | static void mvneta_port_disable(struct mvneta_port *pp) | |
691 | { | |
692 | u32 val; | |
693 | ||
694 | /* Reset the Enable bit in the Serial Control Register */ | |
695 | val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); | |
696 | val &= ~MVNETA_GMAC0_PORT_ENABLE; | |
697 | mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); | |
698 | ||
699 | udelay(200); | |
700 | } | |
701 | ||
702 | /* Multicast tables methods */ | |
703 | ||
704 | /* Set all entries in Unicast MAC Table; queue==-1 means reject all */ | |
705 | static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue) | |
706 | { | |
707 | int offset; | |
708 | u32 val; | |
709 | ||
710 | if (queue == -1) { | |
711 | val = 0; | |
712 | } else { | |
713 | val = 0x1 | (queue << 1); | |
714 | val |= (val << 24) | (val << 16) | (val << 8); | |
715 | } | |
716 | ||
717 | for (offset = 0; offset <= 0xc; offset += 4) | |
718 | mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val); | |
719 | } | |
720 | ||
721 | /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */ | |
722 | static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue) | |
723 | { | |
724 | int offset; | |
725 | u32 val; | |
726 | ||
727 | if (queue == -1) { | |
728 | val = 0; | |
729 | } else { | |
730 | val = 0x1 | (queue << 1); | |
731 | val |= (val << 24) | (val << 16) | (val << 8); | |
732 | } | |
733 | ||
734 | for (offset = 0; offset <= 0xfc; offset += 4) | |
735 | mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val); | |
736 | } | |
737 | ||
738 | /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */ | |
739 | static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue) | |
740 | { | |
741 | int offset; | |
742 | u32 val; | |
743 | ||
744 | if (queue == -1) { | |
745 | memset(pp->mcast_count, 0, sizeof(pp->mcast_count)); | |
746 | val = 0; | |
747 | } else { | |
748 | memset(pp->mcast_count, 1, sizeof(pp->mcast_count)); | |
749 | val = 0x1 | (queue << 1); | |
750 | val |= (val << 24) | (val << 16) | (val << 8); | |
751 | } | |
752 | ||
753 | for (offset = 0; offset <= 0xfc; offset += 4) | |
754 | mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val); | |
755 | } | |
756 | ||
757 | /* This method sets defaults to the NETA port: | |
758 | * Clears interrupt Cause and Mask registers. | |
759 | * Clears all MAC tables. | |
760 | * Sets defaults to all registers. | |
761 | * Resets RX and TX descriptor rings. | |
762 | * Resets PHY. | |
763 | * This method can be called after mvneta_port_down() to return the port | |
764 | * settings to defaults. | |
765 | */ | |
766 | static void mvneta_defaults_set(struct mvneta_port *pp) | |
767 | { | |
768 | int cpu; | |
769 | int queue; | |
770 | u32 val; | |
771 | ||
772 | /* Clear all Cause registers */ | |
773 | mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0); | |
774 | mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0); | |
775 | mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); | |
776 | ||
777 | /* Mask all interrupts */ | |
778 | mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); | |
779 | mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); | |
780 | mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); | |
781 | mvreg_write(pp, MVNETA_INTR_ENABLE, 0); | |
782 | ||
783 | /* Enable MBUS Retry bit16 */ | |
784 | mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20); | |
785 | ||
786 | /* Set CPU queue access map - all CPUs have access to all RX | |
787 | * queues and to all TX queues | |
788 | */ | |
789 | for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++) | |
790 | mvreg_write(pp, MVNETA_CPU_MAP(cpu), | |
791 | (MVNETA_CPU_RXQ_ACCESS_ALL_MASK | | |
792 | MVNETA_CPU_TXQ_ACCESS_ALL_MASK)); | |
793 | ||
794 | /* Reset RX and TX DMAs */ | |
795 | mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); | |
796 | mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET); | |
797 | ||
798 | /* Disable Legacy WRR, Disable EJP, Release from reset */ | |
799 | mvreg_write(pp, MVNETA_TXQ_CMD_1, 0); | |
800 | for (queue = 0; queue < txq_number; queue++) { | |
801 | mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0); | |
802 | mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0); | |
803 | } | |
804 | ||
805 | mvreg_write(pp, MVNETA_PORT_TX_RESET, 0); | |
806 | mvreg_write(pp, MVNETA_PORT_RX_RESET, 0); | |
807 | ||
808 | /* Set Port Acceleration Mode */ | |
809 | val = MVNETA_ACC_MODE_EXT; | |
810 | mvreg_write(pp, MVNETA_ACC_MODE, val); | |
811 | ||
812 | /* Update val of portCfg register accordingly with all RxQueue types */ | |
813 | val = MVNETA_PORT_CONFIG_DEFL_VALUE(rxq_def); | |
814 | mvreg_write(pp, MVNETA_PORT_CONFIG, val); | |
815 | ||
816 | val = 0; | |
817 | mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val); | |
818 | mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64); | |
819 | ||
820 | /* Build PORT_SDMA_CONFIG_REG */ | |
821 | val = 0; | |
822 | ||
823 | /* Default burst size */ | |
824 | val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); | |
825 | val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); | |
826 | val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP; | |
827 | ||
828 | /* Assign port SDMA configuration */ | |
829 | mvreg_write(pp, MVNETA_SDMA_CONFIG, val); | |
830 | ||
278d30c8 KP |
831 | /* Enable PHY polling in hardware if not in fixed-link mode */ |
832 | if (!mvneta_port_is_fixed_link(pp)) { | |
833 | val = mvreg_read(pp, MVNETA_UNIT_CONTROL); | |
834 | val |= MVNETA_PHY_POLLING_ENABLE; | |
835 | mvreg_write(pp, MVNETA_UNIT_CONTROL, val); | |
836 | } | |
19fc2eae SR |
837 | |
838 | mvneta_set_ucast_table(pp, -1); | |
839 | mvneta_set_special_mcast_table(pp, -1); | |
840 | mvneta_set_other_mcast_table(pp, -1); | |
841 | } | |
842 | ||
843 | /* Set unicast address */ | |
844 | static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble, | |
845 | int queue) | |
846 | { | |
847 | unsigned int unicast_reg; | |
848 | unsigned int tbl_offset; | |
849 | unsigned int reg_offset; | |
850 | ||
851 | /* Locate the Unicast table entry */ | |
852 | last_nibble = (0xf & last_nibble); | |
853 | ||
854 | /* offset from unicast tbl base */ | |
855 | tbl_offset = (last_nibble / 4) * 4; | |
856 | ||
857 | /* offset within the above reg */ | |
858 | reg_offset = last_nibble % 4; | |
859 | ||
860 | unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset)); | |
861 | ||
862 | if (queue == -1) { | |
863 | /* Clear accepts frame bit at specified unicast DA tbl entry */ | |
864 | unicast_reg &= ~(0xff << (8 * reg_offset)); | |
865 | } else { | |
866 | unicast_reg &= ~(0xff << (8 * reg_offset)); | |
867 | unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); | |
868 | } | |
869 | ||
870 | mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg); | |
871 | } | |
872 | ||
873 | /* Set mac address */ | |
874 | static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr, | |
875 | int queue) | |
876 | { | |
877 | unsigned int mac_h; | |
878 | unsigned int mac_l; | |
879 | ||
880 | if (queue != -1) { | |
881 | mac_l = (addr[4] << 8) | (addr[5]); | |
882 | mac_h = (addr[0] << 24) | (addr[1] << 16) | | |
883 | (addr[2] << 8) | (addr[3] << 0); | |
884 | ||
885 | mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l); | |
886 | mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h); | |
887 | } | |
888 | ||
889 | /* Accept frames of this address */ | |
890 | mvneta_set_ucast_addr(pp, addr[5], queue); | |
891 | } | |
892 | ||
893 | /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */ | |
894 | static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc, | |
895 | u32 phys_addr, u32 cookie) | |
896 | { | |
897 | rx_desc->buf_cookie = cookie; | |
898 | rx_desc->buf_phys_addr = phys_addr; | |
899 | } | |
900 | ||
901 | /* Decrement sent descriptors counter */ | |
902 | static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp, | |
903 | struct mvneta_tx_queue *txq, | |
904 | int sent_desc) | |
905 | { | |
906 | u32 val; | |
907 | ||
908 | /* Only 255 TX descriptors can be updated at once */ | |
909 | while (sent_desc > 0xff) { | |
910 | val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT; | |
911 | mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); | |
912 | sent_desc = sent_desc - 0xff; | |
913 | } | |
914 | ||
915 | val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT; | |
916 | mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); | |
917 | } | |
918 | ||
919 | /* Get number of TX descriptors already sent by HW */ | |
920 | static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp, | |
921 | struct mvneta_tx_queue *txq) | |
922 | { | |
923 | u32 val; | |
924 | int sent_desc; | |
925 | ||
926 | val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id)); | |
927 | sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >> | |
928 | MVNETA_TXQ_SENT_DESC_SHIFT; | |
929 | ||
930 | return sent_desc; | |
931 | } | |
932 | ||
933 | /* Display more error info */ | |
934 | static void mvneta_rx_error(struct mvneta_port *pp, | |
935 | struct mvneta_rx_desc *rx_desc) | |
936 | { | |
937 | u32 status = rx_desc->status; | |
938 | ||
939 | if (!mvneta_rxq_desc_is_first_last(status)) { | |
940 | netdev_err(pp->dev, | |
941 | "bad rx status %08x (buffer oversize), size=%d\n", | |
942 | status, rx_desc->data_size); | |
943 | return; | |
944 | } | |
945 | ||
946 | switch (status & MVNETA_RXD_ERR_CODE_MASK) { | |
947 | case MVNETA_RXD_ERR_CRC: | |
948 | netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n", | |
949 | status, rx_desc->data_size); | |
950 | break; | |
951 | case MVNETA_RXD_ERR_OVERRUN: | |
952 | netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n", | |
953 | status, rx_desc->data_size); | |
954 | break; | |
955 | case MVNETA_RXD_ERR_LEN: | |
956 | netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n", | |
957 | status, rx_desc->data_size); | |
958 | break; | |
959 | case MVNETA_RXD_ERR_RESOURCE: | |
960 | netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n", | |
961 | status, rx_desc->data_size); | |
962 | break; | |
963 | } | |
964 | } | |
965 | ||
966 | static struct mvneta_rx_queue *mvneta_rxq_handle_get(struct mvneta_port *pp, | |
967 | int rxq) | |
968 | { | |
969 | return &pp->rxqs[rxq]; | |
970 | } | |
971 | ||
972 | ||
973 | /* Drop packets received by the RXQ and free buffers */ | |
974 | static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, | |
975 | struct mvneta_rx_queue *rxq) | |
976 | { | |
977 | int rx_done; | |
978 | ||
979 | rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); | |
980 | if (rx_done) | |
981 | mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); | |
982 | } | |
983 | ||
984 | /* Handle rxq fill: allocates rxq skbs; called when initializing a port */ | |
985 | static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, | |
986 | int num) | |
987 | { | |
988 | int i; | |
989 | ||
990 | for (i = 0; i < num; i++) { | |
991 | u32 addr; | |
992 | ||
993 | /* U-Boot special: Fill in the rx buffer addresses */ | |
994 | addr = buffer_loc.rx_buffers + (i * RX_BUFFER_SIZE); | |
995 | mvneta_rx_desc_fill(rxq->descs + i, addr, addr); | |
996 | } | |
997 | ||
998 | /* Add this number of RX descriptors as non occupied (ready to | |
999 | * get packets) | |
1000 | */ | |
1001 | mvneta_rxq_non_occup_desc_add(pp, rxq, i); | |
1002 | ||
1003 | return 0; | |
1004 | } | |
1005 | ||
1006 | /* Rx/Tx queue initialization/cleanup methods */ | |
1007 | ||
1008 | /* Create a specified RX queue */ | |
1009 | static int mvneta_rxq_init(struct mvneta_port *pp, | |
1010 | struct mvneta_rx_queue *rxq) | |
1011 | ||
1012 | { | |
1013 | rxq->size = pp->rx_ring_size; | |
1014 | ||
1015 | /* Allocate memory for RX descriptors */ | |
1016 | rxq->descs_phys = (dma_addr_t)rxq->descs; | |
1017 | if (rxq->descs == NULL) | |
1018 | return -ENOMEM; | |
1019 | ||
1020 | rxq->last_desc = rxq->size - 1; | |
1021 | ||
1022 | /* Set Rx descriptors queue starting address */ | |
1023 | mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys); | |
1024 | mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size); | |
1025 | ||
1026 | /* Fill RXQ with buffers from RX pool */ | |
1027 | mvneta_rxq_buf_size_set(pp, rxq, RX_BUFFER_SIZE); | |
1028 | mvneta_rxq_fill(pp, rxq, rxq->size); | |
1029 | ||
1030 | return 0; | |
1031 | } | |
1032 | ||
1033 | /* Cleanup Rx queue */ | |
1034 | static void mvneta_rxq_deinit(struct mvneta_port *pp, | |
1035 | struct mvneta_rx_queue *rxq) | |
1036 | { | |
1037 | mvneta_rxq_drop_pkts(pp, rxq); | |
1038 | ||
1039 | rxq->descs = NULL; | |
1040 | rxq->last_desc = 0; | |
1041 | rxq->next_desc_to_proc = 0; | |
1042 | rxq->descs_phys = 0; | |
1043 | } | |
1044 | ||
1045 | /* Create and initialize a tx queue */ | |
1046 | static int mvneta_txq_init(struct mvneta_port *pp, | |
1047 | struct mvneta_tx_queue *txq) | |
1048 | { | |
1049 | txq->size = pp->tx_ring_size; | |
1050 | ||
1051 | /* Allocate memory for TX descriptors */ | |
3cbc11da | 1052 | txq->descs_phys = (dma_addr_t)txq->descs; |
19fc2eae SR |
1053 | if (txq->descs == NULL) |
1054 | return -ENOMEM; | |
1055 | ||
1056 | txq->last_desc = txq->size - 1; | |
1057 | ||
1058 | /* Set maximum bandwidth for enabled TXQs */ | |
1059 | mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff); | |
1060 | mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff); | |
1061 | ||
1062 | /* Set Tx descriptors queue starting address */ | |
1063 | mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys); | |
1064 | mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size); | |
1065 | ||
1066 | return 0; | |
1067 | } | |
1068 | ||
1069 | /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/ | |
1070 | static void mvneta_txq_deinit(struct mvneta_port *pp, | |
1071 | struct mvneta_tx_queue *txq) | |
1072 | { | |
1073 | txq->descs = NULL; | |
1074 | txq->last_desc = 0; | |
1075 | txq->next_desc_to_proc = 0; | |
1076 | txq->descs_phys = 0; | |
1077 | ||
1078 | /* Set minimum bandwidth for disabled TXQs */ | |
1079 | mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0); | |
1080 | mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0); | |
1081 | ||
1082 | /* Set Tx descriptors queue starting address and size */ | |
1083 | mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0); | |
1084 | mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0); | |
1085 | } | |
1086 | ||
1087 | /* Cleanup all Tx queues */ | |
1088 | static void mvneta_cleanup_txqs(struct mvneta_port *pp) | |
1089 | { | |
1090 | int queue; | |
1091 | ||
1092 | for (queue = 0; queue < txq_number; queue++) | |
1093 | mvneta_txq_deinit(pp, &pp->txqs[queue]); | |
1094 | } | |
1095 | ||
1096 | /* Cleanup all Rx queues */ | |
1097 | static void mvneta_cleanup_rxqs(struct mvneta_port *pp) | |
1098 | { | |
1099 | int queue; | |
1100 | ||
1101 | for (queue = 0; queue < rxq_number; queue++) | |
1102 | mvneta_rxq_deinit(pp, &pp->rxqs[queue]); | |
1103 | } | |
1104 | ||
1105 | ||
1106 | /* Init all Rx queues */ | |
1107 | static int mvneta_setup_rxqs(struct mvneta_port *pp) | |
1108 | { | |
1109 | int queue; | |
1110 | ||
1111 | for (queue = 0; queue < rxq_number; queue++) { | |
1112 | int err = mvneta_rxq_init(pp, &pp->rxqs[queue]); | |
1113 | if (err) { | |
1114 | netdev_err(pp->dev, "%s: can't create rxq=%d\n", | |
1115 | __func__, queue); | |
1116 | mvneta_cleanup_rxqs(pp); | |
1117 | return err; | |
1118 | } | |
1119 | } | |
1120 | ||
1121 | return 0; | |
1122 | } | |
1123 | ||
1124 | /* Init all tx queues */ | |
1125 | static int mvneta_setup_txqs(struct mvneta_port *pp) | |
1126 | { | |
1127 | int queue; | |
1128 | ||
1129 | for (queue = 0; queue < txq_number; queue++) { | |
1130 | int err = mvneta_txq_init(pp, &pp->txqs[queue]); | |
1131 | if (err) { | |
1132 | netdev_err(pp->dev, "%s: can't create txq=%d\n", | |
1133 | __func__, queue); | |
1134 | mvneta_cleanup_txqs(pp); | |
1135 | return err; | |
1136 | } | |
1137 | } | |
1138 | ||
1139 | return 0; | |
1140 | } | |
1141 | ||
1142 | static void mvneta_start_dev(struct mvneta_port *pp) | |
1143 | { | |
1144 | /* start the Rx/Tx activity */ | |
1145 | mvneta_port_enable(pp); | |
1146 | } | |
1147 | ||
e3b9c98a | 1148 | static void mvneta_adjust_link(struct udevice *dev) |
19fc2eae | 1149 | { |
e3b9c98a | 1150 | struct mvneta_port *pp = dev_get_priv(dev); |
19fc2eae SR |
1151 | struct phy_device *phydev = pp->phydev; |
1152 | int status_change = 0; | |
1153 | ||
278d30c8 KP |
1154 | if (mvneta_port_is_fixed_link(pp)) { |
1155 | debug("Using fixed link, skip link adjust\n"); | |
1156 | return; | |
1157 | } | |
1158 | ||
19fc2eae SR |
1159 | if (phydev->link) { |
1160 | if ((pp->speed != phydev->speed) || | |
1161 | (pp->duplex != phydev->duplex)) { | |
1162 | u32 val; | |
1163 | ||
1164 | val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); | |
1165 | val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED | | |
1166 | MVNETA_GMAC_CONFIG_GMII_SPEED | | |
1167 | MVNETA_GMAC_CONFIG_FULL_DUPLEX | | |
1168 | MVNETA_GMAC_AN_SPEED_EN | | |
1169 | MVNETA_GMAC_AN_DUPLEX_EN); | |
1170 | ||
1171 | if (phydev->duplex) | |
1172 | val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; | |
1173 | ||
1174 | if (phydev->speed == SPEED_1000) | |
1175 | val |= MVNETA_GMAC_CONFIG_GMII_SPEED; | |
1176 | else | |
1177 | val |= MVNETA_GMAC_CONFIG_MII_SPEED; | |
1178 | ||
1179 | mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); | |
1180 | ||
1181 | pp->duplex = phydev->duplex; | |
1182 | pp->speed = phydev->speed; | |
1183 | } | |
1184 | } | |
1185 | ||
1186 | if (phydev->link != pp->link) { | |
1187 | if (!phydev->link) { | |
1188 | pp->duplex = -1; | |
1189 | pp->speed = 0; | |
1190 | } | |
1191 | ||
1192 | pp->link = phydev->link; | |
1193 | status_change = 1; | |
1194 | } | |
1195 | ||
1196 | if (status_change) { | |
1197 | if (phydev->link) { | |
1198 | u32 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); | |
1199 | val |= (MVNETA_GMAC_FORCE_LINK_PASS | | |
1200 | MVNETA_GMAC_FORCE_LINK_DOWN); | |
1201 | mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); | |
1202 | mvneta_port_up(pp); | |
1203 | } else { | |
1204 | mvneta_port_down(pp); | |
1205 | } | |
1206 | } | |
1207 | } | |
1208 | ||
e3b9c98a | 1209 | static int mvneta_open(struct udevice *dev) |
19fc2eae | 1210 | { |
e3b9c98a | 1211 | struct mvneta_port *pp = dev_get_priv(dev); |
19fc2eae SR |
1212 | int ret; |
1213 | ||
1214 | ret = mvneta_setup_rxqs(pp); | |
1215 | if (ret) | |
1216 | return ret; | |
1217 | ||
1218 | ret = mvneta_setup_txqs(pp); | |
1219 | if (ret) | |
1220 | return ret; | |
1221 | ||
1222 | mvneta_adjust_link(dev); | |
1223 | ||
1224 | mvneta_start_dev(pp); | |
1225 | ||
1226 | return 0; | |
1227 | } | |
1228 | ||
1229 | /* Initialize hw */ | |
e3b9c98a | 1230 | static int mvneta_init2(struct mvneta_port *pp) |
19fc2eae SR |
1231 | { |
1232 | int queue; | |
1233 | ||
1234 | /* Disable port */ | |
1235 | mvneta_port_disable(pp); | |
1236 | ||
1237 | /* Set port default values */ | |
1238 | mvneta_defaults_set(pp); | |
1239 | ||
1240 | pp->txqs = kzalloc(txq_number * sizeof(struct mvneta_tx_queue), | |
1241 | GFP_KERNEL); | |
1242 | if (!pp->txqs) | |
1243 | return -ENOMEM; | |
1244 | ||
1245 | /* U-Boot special: use preallocated area */ | |
1246 | pp->txqs[0].descs = buffer_loc.tx_descs; | |
1247 | ||
1248 | /* Initialize TX descriptor rings */ | |
1249 | for (queue = 0; queue < txq_number; queue++) { | |
1250 | struct mvneta_tx_queue *txq = &pp->txqs[queue]; | |
1251 | txq->id = queue; | |
1252 | txq->size = pp->tx_ring_size; | |
1253 | } | |
1254 | ||
1255 | pp->rxqs = kzalloc(rxq_number * sizeof(struct mvneta_rx_queue), | |
1256 | GFP_KERNEL); | |
1257 | if (!pp->rxqs) { | |
1258 | kfree(pp->txqs); | |
1259 | return -ENOMEM; | |
1260 | } | |
1261 | ||
1262 | /* U-Boot special: use preallocated area */ | |
1263 | pp->rxqs[0].descs = buffer_loc.rx_descs; | |
1264 | ||
1265 | /* Create Rx descriptor rings */ | |
1266 | for (queue = 0; queue < rxq_number; queue++) { | |
1267 | struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; | |
1268 | rxq->id = queue; | |
1269 | rxq->size = pp->rx_ring_size; | |
1270 | } | |
1271 | ||
1272 | return 0; | |
1273 | } | |
1274 | ||
1275 | /* platform glue : initialize decoding windows */ | |
544eefe0 SR |
1276 | |
1277 | /* | |
1278 | * Not like A380, in Armada3700, there are two layers of decode windows for GBE: | |
1279 | * First layer is: GbE Address window that resides inside the GBE unit, | |
1280 | * Second layer is: Fabric address window which is located in the NIC400 | |
1281 | * (South Fabric). | |
1282 | * To simplify the address decode configuration for Armada3700, we bypass the | |
1283 | * first layer of GBE decode window by setting the first window to 4GB. | |
1284 | */ | |
1285 | static void mvneta_bypass_mbus_windows(struct mvneta_port *pp) | |
1286 | { | |
1287 | /* | |
1288 | * Set window size to 4GB, to bypass GBE address decode, leave the | |
1289 | * work to MBUS decode window | |
1290 | */ | |
1291 | mvreg_write(pp, MVNETA_WIN_SIZE(0), MVNETA_WIN_SIZE_MASK); | |
1292 | ||
1293 | /* Enable GBE address decode window 0 by set bit 0 to 0 */ | |
1294 | clrbits_le32(pp->base + MVNETA_BASE_ADDR_ENABLE, | |
1295 | MVNETA_BASE_ADDR_ENABLE_BIT); | |
1296 | ||
1297 | /* Set GBE address decode window 0 to full Access (read or write) */ | |
1298 | setbits_le32(pp->base + MVNETA_PORT_ACCESS_PROTECT, | |
1299 | MVNETA_PORT_ACCESS_PROTECT_WIN0_RW); | |
1300 | } | |
1301 | ||
19fc2eae SR |
1302 | static void mvneta_conf_mbus_windows(struct mvneta_port *pp) |
1303 | { | |
1304 | const struct mbus_dram_target_info *dram; | |
1305 | u32 win_enable; | |
1306 | u32 win_protect; | |
1307 | int i; | |
1308 | ||
1309 | dram = mvebu_mbus_dram_info(); | |
1310 | for (i = 0; i < 6; i++) { | |
1311 | mvreg_write(pp, MVNETA_WIN_BASE(i), 0); | |
1312 | mvreg_write(pp, MVNETA_WIN_SIZE(i), 0); | |
1313 | ||
1314 | if (i < 4) | |
1315 | mvreg_write(pp, MVNETA_WIN_REMAP(i), 0); | |
1316 | } | |
1317 | ||
1318 | win_enable = 0x3f; | |
1319 | win_protect = 0; | |
1320 | ||
1321 | for (i = 0; i < dram->num_cs; i++) { | |
1322 | const struct mbus_dram_window *cs = dram->cs + i; | |
1323 | mvreg_write(pp, MVNETA_WIN_BASE(i), (cs->base & 0xffff0000) | | |
1324 | (cs->mbus_attr << 8) | dram->mbus_dram_target_id); | |
1325 | ||
1326 | mvreg_write(pp, MVNETA_WIN_SIZE(i), | |
1327 | (cs->size - 1) & 0xffff0000); | |
1328 | ||
1329 | win_enable &= ~(1 << i); | |
1330 | win_protect |= 3 << (2 * i); | |
1331 | } | |
1332 | ||
1333 | mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); | |
1334 | } | |
1335 | ||
1336 | /* Power up the port */ | |
1337 | static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) | |
1338 | { | |
1339 | u32 ctrl; | |
1340 | ||
1341 | /* MAC Cause register should be cleared */ | |
1342 | mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0); | |
1343 | ||
1344 | ctrl = mvreg_read(pp, MVNETA_GMAC_CTRL_2); | |
1345 | ||
1346 | /* Even though it might look weird, when we're configured in | |
1347 | * SGMII or QSGMII mode, the RGMII bit needs to be set. | |
1348 | */ | |
1349 | switch (phy_mode) { | |
1350 | case PHY_INTERFACE_MODE_QSGMII: | |
1351 | mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO); | |
1352 | ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII; | |
1353 | break; | |
1354 | case PHY_INTERFACE_MODE_SGMII: | |
1355 | mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO); | |
1356 | ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII; | |
1357 | break; | |
1358 | case PHY_INTERFACE_MODE_RGMII: | |
1359 | case PHY_INTERFACE_MODE_RGMII_ID: | |
1360 | ctrl |= MVNETA_GMAC2_PORT_RGMII; | |
1361 | break; | |
1362 | default: | |
1363 | return -EINVAL; | |
1364 | } | |
1365 | ||
1366 | /* Cancel Port Reset */ | |
1367 | ctrl &= ~MVNETA_GMAC2_PORT_RESET; | |
1368 | mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl); | |
1369 | ||
1370 | while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) & | |
1371 | MVNETA_GMAC2_PORT_RESET) != 0) | |
1372 | continue; | |
1373 | ||
1374 | return 0; | |
1375 | } | |
1376 | ||
1377 | /* Device initialization routine */ | |
e3b9c98a | 1378 | static int mvneta_init(struct udevice *dev) |
19fc2eae | 1379 | { |
e3b9c98a SR |
1380 | struct eth_pdata *pdata = dev_get_platdata(dev); |
1381 | struct mvneta_port *pp = dev_get_priv(dev); | |
19fc2eae SR |
1382 | int err; |
1383 | ||
1384 | pp->tx_ring_size = MVNETA_MAX_TXD; | |
1385 | pp->rx_ring_size = MVNETA_MAX_RXD; | |
1386 | ||
e3b9c98a | 1387 | err = mvneta_init2(pp); |
19fc2eae SR |
1388 | if (err < 0) { |
1389 | dev_err(&pdev->dev, "can't init eth hal\n"); | |
1390 | return err; | |
1391 | } | |
1392 | ||
e3b9c98a | 1393 | mvneta_mac_addr_set(pp, pdata->enetaddr, rxq_def); |
19fc2eae SR |
1394 | |
1395 | err = mvneta_port_power_up(pp, pp->phy_interface); | |
1396 | if (err < 0) { | |
1397 | dev_err(&pdev->dev, "can't power up port\n"); | |
1398 | return err; | |
1399 | } | |
1400 | ||
1401 | /* Call open() now as it needs to be done before runing send() */ | |
1402 | mvneta_open(dev); | |
1403 | ||
1404 | return 0; | |
1405 | } | |
1406 | ||
1407 | /* U-Boot only functions follow here */ | |
1408 | ||
1409 | /* SMI / MDIO functions */ | |
1410 | ||
1411 | static int smi_wait_ready(struct mvneta_port *pp) | |
1412 | { | |
1413 | u32 timeout = MVNETA_SMI_TIMEOUT; | |
1414 | u32 smi_reg; | |
1415 | ||
1416 | /* wait till the SMI is not busy */ | |
1417 | do { | |
1418 | /* read smi register */ | |
1419 | smi_reg = mvreg_read(pp, MVNETA_SMI); | |
1420 | if (timeout-- == 0) { | |
1421 | printf("Error: SMI busy timeout\n"); | |
1422 | return -EFAULT; | |
1423 | } | |
1424 | } while (smi_reg & MVNETA_SMI_BUSY); | |
1425 | ||
1426 | return 0; | |
1427 | } | |
1428 | ||
1429 | /* | |
e3b9c98a | 1430 | * mvneta_mdio_read - miiphy_read callback function. |
19fc2eae SR |
1431 | * |
1432 | * Returns 16bit phy register value, or 0xffff on error | |
1433 | */ | |
e3b9c98a | 1434 | static int mvneta_mdio_read(struct mii_dev *bus, int addr, int devad, int reg) |
19fc2eae | 1435 | { |
e3b9c98a | 1436 | struct mvneta_port *pp = bus->priv; |
19fc2eae SR |
1437 | u32 smi_reg; |
1438 | u32 timeout; | |
1439 | ||
1440 | /* check parameters */ | |
e3b9c98a SR |
1441 | if (addr > MVNETA_PHY_ADDR_MASK) { |
1442 | printf("Error: Invalid PHY address %d\n", addr); | |
19fc2eae SR |
1443 | return -EFAULT; |
1444 | } | |
1445 | ||
e3b9c98a SR |
1446 | if (reg > MVNETA_PHY_REG_MASK) { |
1447 | printf("Err: Invalid register offset %d\n", reg); | |
19fc2eae SR |
1448 | return -EFAULT; |
1449 | } | |
1450 | ||
1451 | /* wait till the SMI is not busy */ | |
1452 | if (smi_wait_ready(pp) < 0) | |
1453 | return -EFAULT; | |
1454 | ||
1455 | /* fill the phy address and regiser offset and read opcode */ | |
e3b9c98a SR |
1456 | smi_reg = (addr << MVNETA_SMI_DEV_ADDR_OFFS) |
1457 | | (reg << MVNETA_SMI_REG_ADDR_OFFS) | |
19fc2eae SR |
1458 | | MVNETA_SMI_OPCODE_READ; |
1459 | ||
1460 | /* write the smi register */ | |
1461 | mvreg_write(pp, MVNETA_SMI, smi_reg); | |
1462 | ||
e3b9c98a | 1463 | /* wait till read value is ready */ |
19fc2eae SR |
1464 | timeout = MVNETA_SMI_TIMEOUT; |
1465 | ||
1466 | do { | |
1467 | /* read smi register */ | |
1468 | smi_reg = mvreg_read(pp, MVNETA_SMI); | |
1469 | if (timeout-- == 0) { | |
1470 | printf("Err: SMI read ready timeout\n"); | |
1471 | return -EFAULT; | |
1472 | } | |
1473 | } while (!(smi_reg & MVNETA_SMI_READ_VALID)); | |
1474 | ||
1475 | /* Wait for the data to update in the SMI register */ | |
1476 | for (timeout = 0; timeout < MVNETA_SMI_TIMEOUT; timeout++) | |
1477 | ; | |
1478 | ||
e3b9c98a | 1479 | return mvreg_read(pp, MVNETA_SMI) & MVNETA_SMI_DATA_MASK; |
19fc2eae SR |
1480 | } |
1481 | ||
1482 | /* | |
e3b9c98a | 1483 | * mvneta_mdio_write - miiphy_write callback function. |
19fc2eae SR |
1484 | * |
1485 | * Returns 0 if write succeed, -EINVAL on bad parameters | |
1486 | * -ETIME on timeout | |
1487 | */ | |
e3b9c98a SR |
1488 | static int mvneta_mdio_write(struct mii_dev *bus, int addr, int devad, int reg, |
1489 | u16 value) | |
19fc2eae | 1490 | { |
e3b9c98a | 1491 | struct mvneta_port *pp = bus->priv; |
19fc2eae SR |
1492 | u32 smi_reg; |
1493 | ||
1494 | /* check parameters */ | |
e3b9c98a SR |
1495 | if (addr > MVNETA_PHY_ADDR_MASK) { |
1496 | printf("Error: Invalid PHY address %d\n", addr); | |
19fc2eae SR |
1497 | return -EFAULT; |
1498 | } | |
1499 | ||
e3b9c98a SR |
1500 | if (reg > MVNETA_PHY_REG_MASK) { |
1501 | printf("Err: Invalid register offset %d\n", reg); | |
19fc2eae SR |
1502 | return -EFAULT; |
1503 | } | |
1504 | ||
1505 | /* wait till the SMI is not busy */ | |
1506 | if (smi_wait_ready(pp) < 0) | |
1507 | return -EFAULT; | |
1508 | ||
1509 | /* fill the phy addr and reg offset and write opcode and data */ | |
e3b9c98a SR |
1510 | smi_reg = value << MVNETA_SMI_DATA_OFFS; |
1511 | smi_reg |= (addr << MVNETA_SMI_DEV_ADDR_OFFS) | |
1512 | | (reg << MVNETA_SMI_REG_ADDR_OFFS); | |
19fc2eae SR |
1513 | smi_reg &= ~MVNETA_SMI_OPCODE_READ; |
1514 | ||
1515 | /* write the smi register */ | |
1516 | mvreg_write(pp, MVNETA_SMI, smi_reg); | |
1517 | ||
1518 | return 0; | |
1519 | } | |
1520 | ||
e3b9c98a | 1521 | static int mvneta_start(struct udevice *dev) |
19fc2eae | 1522 | { |
e3b9c98a | 1523 | struct mvneta_port *pp = dev_get_priv(dev); |
19fc2eae SR |
1524 | struct phy_device *phydev; |
1525 | ||
1526 | mvneta_port_power_up(pp, pp->phy_interface); | |
1527 | ||
1528 | if (!pp->init || pp->link == 0) { | |
278d30c8 KP |
1529 | if (mvneta_port_is_fixed_link(pp)) { |
1530 | u32 val; | |
19fc2eae | 1531 | |
278d30c8 KP |
1532 | pp->init = 1; |
1533 | pp->link = 1; | |
1534 | mvneta_init(dev); | |
1535 | ||
1536 | val = MVNETA_GMAC_FORCE_LINK_UP | | |
1537 | MVNETA_GMAC_IB_BYPASS_AN_EN | | |
1538 | MVNETA_GMAC_SET_FC_EN | | |
1539 | MVNETA_GMAC_ADVERT_FC_EN | | |
1540 | MVNETA_GMAC_SAMPLE_TX_CFG_EN; | |
1541 | ||
1542 | if (pp->duplex) | |
1543 | val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; | |
1544 | ||
1545 | if (pp->speed == SPEED_1000) | |
1546 | val |= MVNETA_GMAC_CONFIG_GMII_SPEED; | |
1547 | else if (pp->speed == SPEED_100) | |
1548 | val |= MVNETA_GMAC_CONFIG_MII_SPEED; | |
1549 | ||
1550 | mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); | |
1551 | } else { | |
1552 | /* Set phy address of the port */ | |
1553 | mvreg_write(pp, MVNETA_PHY_ADDR, pp->phyaddr); | |
1554 | ||
1555 | phydev = phy_connect(pp->bus, pp->phyaddr, dev, | |
1556 | pp->phy_interface); | |
1557 | ||
1558 | pp->phydev = phydev; | |
1559 | phy_config(phydev); | |
1560 | phy_startup(phydev); | |
1561 | if (!phydev->link) { | |
1562 | printf("%s: No link.\n", phydev->dev->name); | |
1563 | return -1; | |
1564 | } | |
1565 | ||
1566 | /* Full init on first call */ | |
1567 | mvneta_init(dev); | |
1568 | pp->init = 1; | |
1569 | return 0; | |
1570 | } | |
19fc2eae SR |
1571 | } |
1572 | ||
278d30c8 KP |
1573 | /* Upon all following calls, this is enough */ |
1574 | mvneta_port_up(pp); | |
1575 | mvneta_port_enable(pp); | |
1576 | ||
19fc2eae SR |
1577 | return 0; |
1578 | } | |
1579 | ||
e3b9c98a | 1580 | static int mvneta_send(struct udevice *dev, void *packet, int length) |
19fc2eae | 1581 | { |
e3b9c98a | 1582 | struct mvneta_port *pp = dev_get_priv(dev); |
19fc2eae SR |
1583 | struct mvneta_tx_queue *txq = &pp->txqs[0]; |
1584 | struct mvneta_tx_desc *tx_desc; | |
1585 | int sent_desc; | |
1586 | u32 timeout = 0; | |
1587 | ||
1588 | /* Get a descriptor for the first part of the packet */ | |
1589 | tx_desc = mvneta_txq_next_desc_get(txq); | |
1590 | ||
3cbc11da | 1591 | tx_desc->buf_phys_addr = (u32)(uintptr_t)packet; |
e3b9c98a | 1592 | tx_desc->data_size = length; |
3cbc11da SR |
1593 | flush_dcache_range((ulong)packet, |
1594 | (ulong)packet + ALIGN(length, PKTALIGN)); | |
19fc2eae SR |
1595 | |
1596 | /* First and Last descriptor */ | |
1597 | tx_desc->command = MVNETA_TX_L4_CSUM_NOT | MVNETA_TXD_FLZ_DESC; | |
1598 | mvneta_txq_pend_desc_add(pp, txq, 1); | |
1599 | ||
1600 | /* Wait for packet to be sent (queue might help with speed here) */ | |
1601 | sent_desc = mvneta_txq_sent_desc_num_get(pp, txq); | |
1602 | while (!sent_desc) { | |
1603 | if (timeout++ > 10000) { | |
1604 | printf("timeout: packet not sent\n"); | |
1605 | return -1; | |
1606 | } | |
1607 | sent_desc = mvneta_txq_sent_desc_num_get(pp, txq); | |
1608 | } | |
1609 | ||
1610 | /* txDone has increased - hw sent packet */ | |
1611 | mvneta_txq_sent_desc_dec(pp, txq, sent_desc); | |
19fc2eae SR |
1612 | |
1613 | return 0; | |
1614 | } | |
1615 | ||
e3b9c98a | 1616 | static int mvneta_recv(struct udevice *dev, int flags, uchar **packetp) |
19fc2eae | 1617 | { |
e3b9c98a | 1618 | struct mvneta_port *pp = dev_get_priv(dev); |
19fc2eae | 1619 | int rx_done; |
19fc2eae | 1620 | struct mvneta_rx_queue *rxq; |
e3b9c98a | 1621 | int rx_bytes = 0; |
19fc2eae SR |
1622 | |
1623 | /* get rx queue */ | |
1624 | rxq = mvneta_rxq_handle_get(pp, rxq_def); | |
1625 | rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); | |
19fc2eae | 1626 | |
e3b9c98a | 1627 | if (rx_done) { |
19fc2eae SR |
1628 | struct mvneta_rx_desc *rx_desc; |
1629 | unsigned char *data; | |
1630 | u32 rx_status; | |
19fc2eae SR |
1631 | |
1632 | /* | |
1633 | * No cache invalidation needed here, since the desc's are | |
1634 | * located in a uncached memory region | |
1635 | */ | |
1636 | rx_desc = mvneta_rxq_next_desc_get(rxq); | |
1637 | ||
1638 | rx_status = rx_desc->status; | |
1639 | if (!mvneta_rxq_desc_is_first_last(rx_status) || | |
1640 | (rx_status & MVNETA_RXD_ERR_SUMMARY)) { | |
1641 | mvneta_rx_error(pp, rx_desc); | |
1642 | /* leave the descriptor untouched */ | |
e3b9c98a | 1643 | return -EIO; |
19fc2eae SR |
1644 | } |
1645 | ||
1646 | /* 2 bytes for marvell header. 4 bytes for crc */ | |
1647 | rx_bytes = rx_desc->data_size - 6; | |
1648 | ||
1649 | /* give packet to stack - skip on first 2 bytes */ | |
3cbc11da | 1650 | data = (u8 *)(uintptr_t)rx_desc->buf_cookie + 2; |
19fc2eae SR |
1651 | /* |
1652 | * No cache invalidation needed here, since the rx_buffer's are | |
1653 | * located in a uncached memory region | |
1654 | */ | |
e3b9c98a | 1655 | *packetp = data; |
19fc2eae | 1656 | |
19fc2eae | 1657 | mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); |
e3b9c98a | 1658 | } |
19fc2eae | 1659 | |
e3b9c98a | 1660 | return rx_bytes; |
19fc2eae SR |
1661 | } |
1662 | ||
e3b9c98a | 1663 | static int mvneta_probe(struct udevice *dev) |
19fc2eae | 1664 | { |
e3b9c98a SR |
1665 | struct eth_pdata *pdata = dev_get_platdata(dev); |
1666 | struct mvneta_port *pp = dev_get_priv(dev); | |
1667 | void *blob = (void *)gd->fdt_blob; | |
e160f7d4 | 1668 | int node = dev_of_offset(dev); |
e3b9c98a SR |
1669 | struct mii_dev *bus; |
1670 | unsigned long addr; | |
19fc2eae | 1671 | void *bd_space; |
976feda2 | 1672 | int ret; |
278d30c8 | 1673 | int fl_node; |
19fc2eae | 1674 | |
19fc2eae SR |
1675 | /* |
1676 | * Allocate buffer area for descs and rx_buffers. This is only | |
1677 | * done once for all interfaces. As only one interface can | |
6723b235 | 1678 | * be active. Make this area DMA safe by disabling the D-cache |
19fc2eae SR |
1679 | */ |
1680 | if (!buffer_loc.tx_descs) { | |
1681 | /* Align buffer area for descs and rx_buffers to 1MiB */ | |
1682 | bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE); | |
3cbc11da | 1683 | mmu_set_region_dcache_behaviour((phys_addr_t)bd_space, BD_SPACE, |
19fc2eae SR |
1684 | DCACHE_OFF); |
1685 | buffer_loc.tx_descs = (struct mvneta_tx_desc *)bd_space; | |
1686 | buffer_loc.rx_descs = (struct mvneta_rx_desc *) | |
3cbc11da | 1687 | ((phys_addr_t)bd_space + |
19fc2eae | 1688 | MVNETA_MAX_TXD * sizeof(struct mvneta_tx_desc)); |
3cbc11da | 1689 | buffer_loc.rx_buffers = (phys_addr_t) |
19fc2eae SR |
1690 | (bd_space + |
1691 | MVNETA_MAX_TXD * sizeof(struct mvneta_tx_desc) + | |
1692 | MVNETA_MAX_RXD * sizeof(struct mvneta_rx_desc)); | |
1693 | } | |
1694 | ||
e3b9c98a | 1695 | pp->base = (void __iomem *)pdata->iobase; |
19fc2eae | 1696 | |
e3b9c98a | 1697 | /* Configure MBUS address windows */ |
911f3aef | 1698 | if (device_is_compatible(dev, "marvell,armada-3700-neta")) |
544eefe0 SR |
1699 | mvneta_bypass_mbus_windows(pp); |
1700 | else | |
1701 | mvneta_conf_mbus_windows(pp); | |
19fc2eae | 1702 | |
e3b9c98a SR |
1703 | /* PHY interface is already decoded in mvneta_ofdata_to_platdata() */ |
1704 | pp->phy_interface = pdata->phy_interface; | |
1705 | ||
278d30c8 KP |
1706 | /* fetch 'fixed-link' property from 'neta' node */ |
1707 | fl_node = fdt_subnode_offset(blob, node, "fixed-link"); | |
1708 | if (fl_node != -FDT_ERR_NOTFOUND) { | |
1709 | /* set phy_addr to invalid value for fixed link */ | |
1710 | pp->phyaddr = PHY_MAX_ADDR + 1; | |
1711 | pp->duplex = fdtdec_get_bool(blob, fl_node, "full-duplex"); | |
1712 | pp->speed = fdtdec_get_int(blob, fl_node, "speed", 0); | |
1713 | } else { | |
1714 | /* Now read phyaddr from DT */ | |
1715 | addr = fdtdec_get_int(blob, node, "phy", 0); | |
1716 | addr = fdt_node_offset_by_phandle(blob, addr); | |
1717 | pp->phyaddr = fdtdec_get_int(blob, addr, "reg", 0); | |
1718 | } | |
e3b9c98a SR |
1719 | |
1720 | bus = mdio_alloc(); | |
1721 | if (!bus) { | |
1722 | printf("Failed to allocate MDIO bus\n"); | |
1723 | return -ENOMEM; | |
1724 | } | |
1725 | ||
1726 | bus->read = mvneta_mdio_read; | |
1727 | bus->write = mvneta_mdio_write; | |
1728 | snprintf(bus->name, sizeof(bus->name), dev->name); | |
1729 | bus->priv = (void *)pp; | |
1730 | pp->bus = bus; | |
19fc2eae | 1731 | |
976feda2 KP |
1732 | ret = mdio_register(bus); |
1733 | if (ret) | |
1734 | return ret; | |
1735 | ||
1736 | return board_network_enable(bus); | |
e3b9c98a | 1737 | } |
19fc2eae | 1738 | |
e3b9c98a SR |
1739 | static void mvneta_stop(struct udevice *dev) |
1740 | { | |
1741 | struct mvneta_port *pp = dev_get_priv(dev); | |
19fc2eae | 1742 | |
e3b9c98a SR |
1743 | mvneta_port_down(pp); |
1744 | mvneta_port_disable(pp); | |
19fc2eae | 1745 | } |
e3b9c98a SR |
1746 | |
1747 | static const struct eth_ops mvneta_ops = { | |
1748 | .start = mvneta_start, | |
1749 | .send = mvneta_send, | |
1750 | .recv = mvneta_recv, | |
1751 | .stop = mvneta_stop, | |
1752 | }; | |
1753 | ||
1754 | static int mvneta_ofdata_to_platdata(struct udevice *dev) | |
1755 | { | |
1756 | struct eth_pdata *pdata = dev_get_platdata(dev); | |
1757 | const char *phy_mode; | |
1758 | ||
a821c4af | 1759 | pdata->iobase = devfdt_get_addr(dev); |
e3b9c98a SR |
1760 | |
1761 | /* Get phy-mode / phy_interface from DT */ | |
1762 | pdata->phy_interface = -1; | |
e160f7d4 SG |
1763 | phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "phy-mode", |
1764 | NULL); | |
e3b9c98a SR |
1765 | if (phy_mode) |
1766 | pdata->phy_interface = phy_get_interface_by_name(phy_mode); | |
1767 | if (pdata->phy_interface == -1) { | |
1768 | debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode); | |
1769 | return -EINVAL; | |
1770 | } | |
1771 | ||
1772 | return 0; | |
1773 | } | |
1774 | ||
1775 | static const struct udevice_id mvneta_ids[] = { | |
1776 | { .compatible = "marvell,armada-370-neta" }, | |
1777 | { .compatible = "marvell,armada-xp-neta" }, | |
544eefe0 | 1778 | { .compatible = "marvell,armada-3700-neta" }, |
e3b9c98a SR |
1779 | { } |
1780 | }; | |
1781 | ||
1782 | U_BOOT_DRIVER(mvneta) = { | |
1783 | .name = "mvneta", | |
1784 | .id = UCLASS_ETH, | |
1785 | .of_match = mvneta_ids, | |
1786 | .ofdata_to_platdata = mvneta_ofdata_to_platdata, | |
1787 | .probe = mvneta_probe, | |
1788 | .ops = &mvneta_ops, | |
1789 | .priv_auto_alloc_size = sizeof(struct mvneta_port), | |
1790 | .platdata_auto_alloc_size = sizeof(struct eth_pdata), | |
1791 | }; |