]> Git Repo - linux.git/blame - drivers/net/ethernet/marvell/mvneta.c
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux.git] / drivers / net / ethernet / marvell / mvneta.c
CommitLineData
c5aff182
TP
1/*
2 * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
3 *
4 * Copyright (C) 2012 Marvell
5 *
6 * Rami Rosen <[email protected]>
7 * Thomas Petazzoni <[email protected]>
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13
0e03f563
JZ
14#include <linux/clk.h>
15#include <linux/cpu.h>
c5aff182 16#include <linux/etherdevice.h>
0e03f563 17#include <linux/if_vlan.h>
c5aff182 18#include <linux/inetdevice.h>
c5aff182 19#include <linux/interrupt.h>
c3f0dd38 20#include <linux/io.h>
0e03f563
JZ
21#include <linux/kernel.h>
22#include <linux/mbus.h>
23#include <linux/module.h>
24#include <linux/netdevice.h>
c5aff182 25#include <linux/of.h>
0e03f563 26#include <linux/of_address.h>
c5aff182
TP
27#include <linux/of_irq.h>
28#include <linux/of_mdio.h>
29#include <linux/of_net.h>
c5aff182 30#include <linux/phy.h>
503f9aa9 31#include <linux/phylink.h>
0e03f563
JZ
32#include <linux/platform_device.h>
33#include <linux/skbuff.h>
baa11ebc 34#include <net/hwbm.h>
dc35a10f 35#include "mvneta_bm.h"
0e03f563
JZ
36#include <net/ip.h>
37#include <net/ipv6.h>
38#include <net/tso.h>
c5aff182
TP
39
40/* Registers */
41#define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
e5bdf689 42#define MVNETA_RXQ_HW_BUF_ALLOC BIT(0)
dc35a10f
MW
43#define MVNETA_RXQ_SHORT_POOL_ID_SHIFT 4
44#define MVNETA_RXQ_SHORT_POOL_ID_MASK 0x30
45#define MVNETA_RXQ_LONG_POOL_ID_SHIFT 6
46#define MVNETA_RXQ_LONG_POOL_ID_MASK 0xc0
c5aff182
TP
47#define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8)
48#define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8)
49#define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2))
50#define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16)
51#define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2))
52#define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2))
53#define MVNETA_RXQ_BUF_SIZE_SHIFT 19
54#define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19)
55#define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2))
56#define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff
57#define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2))
58#define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16
59#define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255
dc35a10f
MW
60#define MVNETA_PORT_POOL_BUFFER_SZ_REG(pool) (0x1700 + ((pool) << 2))
61#define MVNETA_PORT_POOL_BUFFER_SZ_SHIFT 3
62#define MVNETA_PORT_POOL_BUFFER_SZ_MASK 0xfff8
c5aff182
TP
63#define MVNETA_PORT_RX_RESET 0x1cc0
64#define MVNETA_PORT_RX_DMA_RESET BIT(0)
65#define MVNETA_PHY_ADDR 0x2000
66#define MVNETA_PHY_ADDR_MASK 0x1f
67#define MVNETA_MBUS_RETRY 0x2010
68#define MVNETA_UNIT_INTR_CAUSE 0x2080
69#define MVNETA_UNIT_CONTROL 0x20B0
70#define MVNETA_PHY_POLLING_ENABLE BIT(1)
71#define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3))
72#define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3))
73#define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2))
74#define MVNETA_BASE_ADDR_ENABLE 0x2290
db6ba9a5 75#define MVNETA_ACCESS_PROTECT_ENABLE 0x2294
c5aff182
TP
76#define MVNETA_PORT_CONFIG 0x2400
77#define MVNETA_UNI_PROMISC_MODE BIT(0)
78#define MVNETA_DEF_RXQ(q) ((q) << 1)
79#define MVNETA_DEF_RXQ_ARP(q) ((q) << 4)
80#define MVNETA_TX_UNSET_ERR_SUM BIT(12)
81#define MVNETA_DEF_RXQ_TCP(q) ((q) << 16)
82#define MVNETA_DEF_RXQ_UDP(q) ((q) << 19)
83#define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22)
84#define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25)
85#define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \
86 MVNETA_DEF_RXQ_ARP(q) | \
87 MVNETA_DEF_RXQ_TCP(q) | \
88 MVNETA_DEF_RXQ_UDP(q) | \
89 MVNETA_DEF_RXQ_BPDU(q) | \
90 MVNETA_TX_UNSET_ERR_SUM | \
91 MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
92#define MVNETA_PORT_CONFIG_EXTEND 0x2404
93#define MVNETA_MAC_ADDR_LOW 0x2414
94#define MVNETA_MAC_ADDR_HIGH 0x2418
95#define MVNETA_SDMA_CONFIG 0x241c
96#define MVNETA_SDMA_BRST_SIZE_16 4
c5aff182
TP
97#define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1)
98#define MVNETA_RX_NO_DATA_SWAP BIT(4)
99#define MVNETA_TX_NO_DATA_SWAP BIT(5)
9ad8fef6 100#define MVNETA_DESC_SWAP BIT(6)
c5aff182
TP
101#define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
102#define MVNETA_PORT_STATUS 0x2444
103#define MVNETA_TX_IN_PRGRS BIT(1)
104#define MVNETA_TX_FIFO_EMPTY BIT(8)
105#define MVNETA_RX_MIN_FRAME_SIZE 0x247c
3f1dd4bc 106#define MVNETA_SERDES_CFG 0x24A0
5445eaf3 107#define MVNETA_SGMII_SERDES_PROTO 0x0cc7
3f1dd4bc 108#define MVNETA_QSGMII_SERDES_PROTO 0x0667
c5aff182
TP
109#define MVNETA_TYPE_PRIO 0x24bc
110#define MVNETA_FORCE_UNI BIT(21)
111#define MVNETA_TXQ_CMD_1 0x24e4
112#define MVNETA_TXQ_CMD 0x2448
113#define MVNETA_TXQ_DISABLE_SHIFT 8
114#define MVNETA_TXQ_ENABLE_MASK 0x000000ff
e483911f
AL
115#define MVNETA_RX_DISCARD_FRAME_COUNT 0x2484
116#define MVNETA_OVERRUN_FRAME_COUNT 0x2488
898b2970
SS
117#define MVNETA_GMAC_CLOCK_DIVIDER 0x24f4
118#define MVNETA_GMAC_1MS_CLOCK_ENABLE BIT(31)
c5aff182 119#define MVNETA_ACC_MODE 0x2500
dc35a10f 120#define MVNETA_BM_ADDRESS 0x2504
c5aff182
TP
121#define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2))
122#define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
123#define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
2dcf75e2 124#define MVNETA_CPU_RXQ_ACCESS(rxq) BIT(rxq)
50bf8cb6 125#define MVNETA_CPU_TXQ_ACCESS(txq) BIT(txq + 8)
c5aff182 126#define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
40ba35e7 127
2dcf75e2
GC
128/* Exception Interrupt Port/Queue Cause register
129 *
130 * Their behavior depend of the mapping done using the PCPX2Q
131 * registers. For a given CPU if the bit associated to a queue is not
132 * set, then for the register a read from this CPU will always return
133 * 0 and a write won't do anything
134 */
40ba35e7 135
c5aff182 136#define MVNETA_INTR_NEW_CAUSE 0x25a0
c5aff182 137#define MVNETA_INTR_NEW_MASK 0x25a4
40ba35e7 138
139/* bits 0..7 = TXQ SENT, one bit per queue.
140 * bits 8..15 = RXQ OCCUP, one bit per queue.
141 * bits 16..23 = RXQ FREE, one bit per queue.
142 * bit 29 = OLD_REG_SUM, see old reg ?
143 * bit 30 = TX_ERR_SUM, one bit for 4 ports
144 * bit 31 = MISC_SUM, one bit for 4 ports
145 */
146#define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0)
147#define MVNETA_TX_INTR_MASK_ALL (0xff << 0)
148#define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
149#define MVNETA_RX_INTR_MASK_ALL (0xff << 8)
898b2970 150#define MVNETA_MISCINTR_INTR_MASK BIT(31)
40ba35e7 151
c5aff182
TP
152#define MVNETA_INTR_OLD_CAUSE 0x25a8
153#define MVNETA_INTR_OLD_MASK 0x25ac
40ba35e7 154
155/* Data Path Port/Queue Cause Register */
c5aff182
TP
156#define MVNETA_INTR_MISC_CAUSE 0x25b0
157#define MVNETA_INTR_MISC_MASK 0x25b4
40ba35e7 158
159#define MVNETA_CAUSE_PHY_STATUS_CHANGE BIT(0)
160#define MVNETA_CAUSE_LINK_CHANGE BIT(1)
161#define MVNETA_CAUSE_PTP BIT(4)
162
163#define MVNETA_CAUSE_INTERNAL_ADDR_ERR BIT(7)
164#define MVNETA_CAUSE_RX_OVERRUN BIT(8)
165#define MVNETA_CAUSE_RX_CRC_ERROR BIT(9)
166#define MVNETA_CAUSE_RX_LARGE_PKT BIT(10)
167#define MVNETA_CAUSE_TX_UNDERUN BIT(11)
168#define MVNETA_CAUSE_PRBS_ERR BIT(12)
169#define MVNETA_CAUSE_PSC_SYNC_CHANGE BIT(13)
170#define MVNETA_CAUSE_SERDES_SYNC_ERR BIT(14)
171
172#define MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT 16
173#define MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT)
174#define MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool)))
175
176#define MVNETA_CAUSE_TXQ_ERROR_SHIFT 24
177#define MVNETA_CAUSE_TXQ_ERROR_ALL_MASK (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT)
178#define MVNETA_CAUSE_TXQ_ERROR_MASK(q) (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q)))
179
c5aff182
TP
180#define MVNETA_INTR_ENABLE 0x25b8
181#define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00
dc1aadf6 182#define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0x000000ff
40ba35e7 183
c5aff182
TP
184#define MVNETA_RXQ_CMD 0x2680
185#define MVNETA_RXQ_DISABLE_SHIFT 8
186#define MVNETA_RXQ_ENABLE_MASK 0x000000ff
187#define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4))
188#define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4))
189#define MVNETA_GMAC_CTRL_0 0x2c00
190#define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2
191#define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
22f4bf8a 192#define MVNETA_GMAC0_PORT_1000BASE_X BIT(1)
c5aff182
TP
193#define MVNETA_GMAC0_PORT_ENABLE BIT(0)
194#define MVNETA_GMAC_CTRL_2 0x2c08
898b2970 195#define MVNETA_GMAC2_INBAND_AN_ENABLE BIT(0)
a79121d3 196#define MVNETA_GMAC2_PCS_ENABLE BIT(3)
c5aff182
TP
197#define MVNETA_GMAC2_PORT_RGMII BIT(4)
198#define MVNETA_GMAC2_PORT_RESET BIT(6)
199#define MVNETA_GMAC_STATUS 0x2c10
200#define MVNETA_GMAC_LINK_UP BIT(0)
201#define MVNETA_GMAC_SPEED_1000 BIT(1)
202#define MVNETA_GMAC_SPEED_100 BIT(2)
203#define MVNETA_GMAC_FULL_DUPLEX BIT(3)
204#define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4)
205#define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5)
206#define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6)
207#define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7)
503f9aa9
RK
208#define MVNETA_GMAC_AN_COMPLETE BIT(11)
209#define MVNETA_GMAC_SYNC_OK BIT(14)
c5aff182
TP
210#define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c
211#define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0)
212#define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
898b2970 213#define MVNETA_GMAC_INBAND_AN_ENABLE BIT(2)
22f4bf8a
RK
214#define MVNETA_GMAC_AN_BYPASS_ENABLE BIT(3)
215#define MVNETA_GMAC_INBAND_RESTART_AN BIT(4)
c5aff182
TP
216#define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
217#define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
71408602 218#define MVNETA_GMAC_AN_SPEED_EN BIT(7)
22f4bf8a
RK
219#define MVNETA_GMAC_CONFIG_FLOW_CTRL BIT(8)
220#define MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL BIT(9)
898b2970 221#define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11)
c5aff182 222#define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
71408602 223#define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
da58a931
MC
224#define MVNETA_GMAC_CTRL_4 0x2c90
225#define MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE BIT(1)
e483911f 226#define MVNETA_MIB_COUNTERS_BASE 0x3000
c5aff182
TP
227#define MVNETA_MIB_LATE_COLLISION 0x7c
228#define MVNETA_DA_FILT_SPEC_MCAST 0x3400
229#define MVNETA_DA_FILT_OTH_MCAST 0x3500
230#define MVNETA_DA_FILT_UCAST_BASE 0x3600
231#define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2))
232#define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2))
233#define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000
234#define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16)
235#define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2))
236#define MVNETA_TXQ_DEC_SENT_SHIFT 16
2a90f7e1 237#define MVNETA_TXQ_DEC_SENT_MASK 0xff
c5aff182
TP
238#define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2))
239#define MVNETA_TXQ_SENT_DESC_SHIFT 16
240#define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
241#define MVNETA_PORT_TX_RESET 0x3cf0
242#define MVNETA_PORT_TX_DMA_RESET BIT(0)
243#define MVNETA_TX_MTU 0x3e0c
244#define MVNETA_TX_TOKEN_SIZE 0x3e14
245#define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff
246#define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2))
247#define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff
248
6d81f451
RK
249#define MVNETA_LPI_CTRL_0 0x2cc0
250#define MVNETA_LPI_CTRL_1 0x2cc4
251#define MVNETA_LPI_REQUEST_ENABLE BIT(0)
252#define MVNETA_LPI_CTRL_2 0x2cc8
253#define MVNETA_LPI_STATUS 0x2ccc
254
c5aff182
TP
255#define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
256
257/* Descriptor ring Macros */
258#define MVNETA_QUEUE_NEXT_DESC(q, index) \
259 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
260
261/* Various constants */
262
263/* Coalescing */
06708f81 264#define MVNETA_TXDONE_COAL_PKTS 0 /* interrupt per packet */
c5aff182
TP
265#define MVNETA_RX_COAL_PKTS 32
266#define MVNETA_RX_COAL_USEC 100
267
6a20c175 268/* The two bytes Marvell header. Either contains a special value used
c5aff182
TP
269 * by Marvell switches when a specific hardware mode is enabled (not
270 * supported by this driver) or is filled automatically by zeroes on
271 * the RX side. Those two bytes being at the front of the Ethernet
272 * header, they allow to have the IP header aligned on a 4 bytes
273 * boundary automatically: the hardware skips those two bytes on its
274 * own.
275 */
276#define MVNETA_MH_SIZE 2
277
278#define MVNETA_VLAN_TAG_LEN 4
279
9110ee07 280#define MVNETA_TX_CSUM_DEF_SIZE 1600
c5aff182 281#define MVNETA_TX_CSUM_MAX_SIZE 9800
dc35a10f
MW
282#define MVNETA_ACC_MODE_EXT1 1
283#define MVNETA_ACC_MODE_EXT2 2
284
285#define MVNETA_MAX_DECODE_WIN 6
c5aff182
TP
286
287/* Timeout constants */
288#define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000
289#define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000
290#define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000
291
292#define MVNETA_TX_MTU_MAX 0x3ffff
293
9a401dea
GC
294/* The RSS lookup table actually has 256 entries but we do not use
295 * them yet
296 */
297#define MVNETA_RSS_LU_TABLE_SIZE 1
298
c5aff182 299/* Max number of Rx descriptors */
c307e2a8 300#define MVNETA_MAX_RXD 512
c5aff182
TP
301
302/* Max number of Tx descriptors */
c307e2a8 303#define MVNETA_MAX_TXD 1024
c5aff182 304
8eef5f97
EG
305/* Max number of allowed TCP segments for software TSO */
306#define MVNETA_MAX_TSO_SEGS 100
307
308#define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
309
c5aff182
TP
310/* descriptor aligned size */
311#define MVNETA_DESC_ALIGNED_SIZE 32
312
8d5047cf
MW
313/* Number of bytes to be taken into account by HW when putting incoming data
314 * to the buffers. It is needed in case NET_SKB_PAD exceeds maximum packet
315 * offset supported in MVNETA_RXQ_CONFIG_REG(q) registers.
316 */
317#define MVNETA_RX_PKT_OFFSET_CORRECTION 64
318
c5aff182
TP
319#define MVNETA_RX_PKT_SIZE(mtu) \
320 ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
321 ETH_HLEN + ETH_FCS_LEN, \
c66e98c9 322 cache_line_size())
c5aff182 323
2e3173a3
EG
324#define IS_TSO_HEADER(txq, addr) \
325 ((addr >= txq->tso_hdrs_phys) && \
326 (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
327
dc35a10f
MW
328#define MVNETA_RX_GET_BM_POOL_ID(rxd) \
329 (((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT)
c5aff182 330
6d81f451
RK
331enum {
332 ETHTOOL_STAT_EEE_WAKEUP,
17a96da6
GC
333 ETHTOOL_STAT_SKB_ALLOC_ERR,
334 ETHTOOL_STAT_REFILL_ERR,
6d81f451
RK
335 ETHTOOL_MAX_STATS,
336};
337
9b0cdefa
RK
338struct mvneta_statistic {
339 unsigned short offset;
340 unsigned short type;
341 const char name[ETH_GSTRING_LEN];
342};
343
344#define T_REG_32 32
345#define T_REG_64 64
6d81f451 346#define T_SW 1
9b0cdefa
RK
347
348static const struct mvneta_statistic mvneta_statistics[] = {
349 { 0x3000, T_REG_64, "good_octets_received", },
350 { 0x3010, T_REG_32, "good_frames_received", },
351 { 0x3008, T_REG_32, "bad_octets_received", },
352 { 0x3014, T_REG_32, "bad_frames_received", },
353 { 0x3018, T_REG_32, "broadcast_frames_received", },
354 { 0x301c, T_REG_32, "multicast_frames_received", },
355 { 0x3050, T_REG_32, "unrec_mac_control_received", },
356 { 0x3058, T_REG_32, "good_fc_received", },
357 { 0x305c, T_REG_32, "bad_fc_received", },
358 { 0x3060, T_REG_32, "undersize_received", },
359 { 0x3064, T_REG_32, "fragments_received", },
360 { 0x3068, T_REG_32, "oversize_received", },
361 { 0x306c, T_REG_32, "jabber_received", },
362 { 0x3070, T_REG_32, "mac_receive_error", },
363 { 0x3074, T_REG_32, "bad_crc_event", },
364 { 0x3078, T_REG_32, "collision", },
365 { 0x307c, T_REG_32, "late_collision", },
366 { 0x2484, T_REG_32, "rx_discard", },
367 { 0x2488, T_REG_32, "rx_overrun", },
368 { 0x3020, T_REG_32, "frames_64_octets", },
369 { 0x3024, T_REG_32, "frames_65_to_127_octets", },
370 { 0x3028, T_REG_32, "frames_128_to_255_octets", },
371 { 0x302c, T_REG_32, "frames_256_to_511_octets", },
372 { 0x3030, T_REG_32, "frames_512_to_1023_octets", },
373 { 0x3034, T_REG_32, "frames_1024_to_max_octets", },
374 { 0x3038, T_REG_64, "good_octets_sent", },
375 { 0x3040, T_REG_32, "good_frames_sent", },
376 { 0x3044, T_REG_32, "excessive_collision", },
377 { 0x3048, T_REG_32, "multicast_frames_sent", },
378 { 0x304c, T_REG_32, "broadcast_frames_sent", },
379 { 0x3054, T_REG_32, "fc_sent", },
380 { 0x300c, T_REG_32, "internal_mac_transmit_err", },
6d81f451 381 { ETHTOOL_STAT_EEE_WAKEUP, T_SW, "eee_wakeup_errors", },
17a96da6
GC
382 { ETHTOOL_STAT_SKB_ALLOC_ERR, T_SW, "skb_alloc_errors", },
383 { ETHTOOL_STAT_REFILL_ERR, T_SW, "refill_errors", },
9b0cdefa
RK
384};
385
74c41b04 386struct mvneta_pcpu_stats {
c5aff182 387 struct u64_stats_sync syncp;
74c41b04 388 u64 rx_packets;
389 u64 rx_bytes;
390 u64 tx_packets;
391 u64 tx_bytes;
c5aff182
TP
392};
393
12bb03b4
MR
394struct mvneta_pcpu_port {
395 /* Pointer to the shared port */
396 struct mvneta_port *pp;
397
398 /* Pointer to the CPU-local NAPI struct */
399 struct napi_struct napi;
400
401 /* Cause of the previous interrupt */
402 u32 cause_rx_tx;
403};
404
c5aff182 405struct mvneta_port {
dc35a10f 406 u8 id;
12bb03b4
MR
407 struct mvneta_pcpu_port __percpu *ports;
408 struct mvneta_pcpu_stats __percpu *stats;
409
c5aff182 410 int pkt_size;
8ec2cd48 411 unsigned int frag_size;
c5aff182
TP
412 void __iomem *base;
413 struct mvneta_rx_queue *rxqs;
414 struct mvneta_tx_queue *txqs;
c5aff182 415 struct net_device *dev;
84a3f4db
SAS
416 struct hlist_node node_online;
417 struct hlist_node node_dead;
90b74c01 418 int rxq_def;
5888511e
GC
419 /* Protect the access to the percpu interrupt registers,
420 * ensuring that the configuration remains coherent.
421 */
422 spinlock_t lock;
120cfa50 423 bool is_stopped;
c5aff182 424
2636ac3c
MW
425 u32 cause_rx_tx;
426 struct napi_struct napi;
427
c5aff182 428 /* Core clock */
189dd626 429 struct clk *clk;
15cc4a4a
JZ
430 /* AXI clock */
431 struct clk *clk_bus;
c5aff182
TP
432 u8 mcast_count[256];
433 u16 tx_ring_size;
434 u16 rx_ring_size;
c5aff182 435
c5aff182 436 phy_interface_t phy_interface;
503f9aa9 437 struct device_node *dn;
b65657fc 438 unsigned int tx_csum_limit;
503f9aa9 439 struct phylink *phylink;
9b0cdefa 440
dc35a10f
MW
441 struct mvneta_bm *bm_priv;
442 struct mvneta_bm_pool *pool_long;
443 struct mvneta_bm_pool *pool_short;
444 int bm_win_id;
445
6d81f451
RK
446 bool eee_enabled;
447 bool eee_active;
448 bool tx_lpi_enabled;
449
9b0cdefa 450 u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
9a401dea
GC
451
452 u32 indir[MVNETA_RSS_LU_TABLE_SIZE];
2636ac3c
MW
453
454 /* Flags for special SoC configurations */
455 bool neta_armada3700;
8d5047cf 456 u16 rx_offset_correction;
9768b45c 457 const struct mbus_dram_target_info *dram_target_info;
c5aff182
TP
458};
459
6a20c175 460/* The mvneta_tx_desc and mvneta_rx_desc structures describe the
c5aff182
TP
461 * layout of the transmit and reception DMA descriptors, and their
462 * layout is therefore defined by the hardware design
463 */
6083ed44 464
c5aff182
TP
465#define MVNETA_TX_L3_OFF_SHIFT 0
466#define MVNETA_TX_IP_HLEN_SHIFT 8
467#define MVNETA_TX_L4_UDP BIT(16)
468#define MVNETA_TX_L3_IP6 BIT(17)
469#define MVNETA_TXD_IP_CSUM BIT(18)
470#define MVNETA_TXD_Z_PAD BIT(19)
471#define MVNETA_TXD_L_DESC BIT(20)
472#define MVNETA_TXD_F_DESC BIT(21)
473#define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \
474 MVNETA_TXD_L_DESC | \
475 MVNETA_TXD_F_DESC)
476#define MVNETA_TX_L4_CSUM_FULL BIT(30)
477#define MVNETA_TX_L4_CSUM_NOT BIT(31)
478
c5aff182 479#define MVNETA_RXD_ERR_CRC 0x0
dc35a10f
MW
480#define MVNETA_RXD_BM_POOL_SHIFT 13
481#define MVNETA_RXD_BM_POOL_MASK (BIT(13) | BIT(14))
c5aff182
TP
482#define MVNETA_RXD_ERR_SUMMARY BIT(16)
483#define MVNETA_RXD_ERR_OVERRUN BIT(17)
484#define MVNETA_RXD_ERR_LEN BIT(18)
485#define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18))
486#define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18))
487#define MVNETA_RXD_L3_IP4 BIT(25)
562e2f46
YK
488#define MVNETA_RXD_LAST_DESC BIT(26)
489#define MVNETA_RXD_FIRST_DESC BIT(27)
490#define MVNETA_RXD_FIRST_LAST_DESC (MVNETA_RXD_FIRST_DESC | \
491 MVNETA_RXD_LAST_DESC)
c5aff182
TP
492#define MVNETA_RXD_L4_CSUM_OK BIT(30)
493
9ad8fef6 494#if defined(__LITTLE_ENDIAN)
6083ed44
TP
495struct mvneta_tx_desc {
496 u32 command; /* Options used by HW for packet transmitting.*/
fbd1d524 497 u16 reserved1; /* csum_l4 (for future use) */
6083ed44
TP
498 u16 data_size; /* Data size of transmitted packet in bytes */
499 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
500 u32 reserved2; /* hw_cmd - (for future use, PMT) */
501 u32 reserved3[4]; /* Reserved - (for future use) */
502};
503
504struct mvneta_rx_desc {
505 u32 status; /* Info about received packet */
c5aff182
TP
506 u16 reserved1; /* pnc_info - (for future use, PnC) */
507 u16 data_size; /* Size of received packet in bytes */
6083ed44 508
c5aff182
TP
509 u32 buf_phys_addr; /* Physical address of the buffer */
510 u32 reserved2; /* pnc_flow_id (for future use, PnC) */
6083ed44 511
c5aff182
TP
512 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
513 u16 reserved3; /* prefetch_cmd, for future use */
514 u16 reserved4; /* csum_l4 - (for future use, PnC) */
6083ed44 515
c5aff182
TP
516 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
517 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
518};
9ad8fef6
TP
519#else
520struct mvneta_tx_desc {
521 u16 data_size; /* Data size of transmitted packet in bytes */
fbd1d524 522 u16 reserved1; /* csum_l4 (for future use) */
9ad8fef6
TP
523 u32 command; /* Options used by HW for packet transmitting.*/
524 u32 reserved2; /* hw_cmd - (for future use, PMT) */
525 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
526 u32 reserved3[4]; /* Reserved - (for future use) */
527};
528
529struct mvneta_rx_desc {
530 u16 data_size; /* Size of received packet in bytes */
531 u16 reserved1; /* pnc_info - (for future use, PnC) */
532 u32 status; /* Info about received packet */
533
534 u32 reserved2; /* pnc_flow_id (for future use, PnC) */
535 u32 buf_phys_addr; /* Physical address of the buffer */
536
537 u16 reserved4; /* csum_l4 - (for future use, PnC) */
538 u16 reserved3; /* prefetch_cmd, for future use */
539 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
540
541 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
542 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
543};
544#endif
c5aff182
TP
545
546struct mvneta_tx_queue {
547 /* Number of this TX queue, in the range 0-7 */
548 u8 id;
549
550 /* Number of TX DMA descriptors in the descriptor ring */
551 int size;
552
553 /* Number of currently used TX DMA descriptor in the
6a20c175
TP
554 * descriptor ring
555 */
c5aff182 556 int count;
2a90f7e1 557 int pending;
8eef5f97
EG
558 int tx_stop_threshold;
559 int tx_wake_threshold;
c5aff182
TP
560
561 /* Array of transmitted skb */
562 struct sk_buff **tx_skb;
563
564 /* Index of last TX DMA descriptor that was inserted */
565 int txq_put_index;
566
567 /* Index of the TX DMA descriptor to be cleaned up */
568 int txq_get_index;
569
570 u32 done_pkts_coal;
571
572 /* Virtual address of the TX DMA descriptors array */
573 struct mvneta_tx_desc *descs;
574
575 /* DMA address of the TX DMA descriptors array */
576 dma_addr_t descs_phys;
577
578 /* Index of the last TX DMA descriptor */
579 int last_desc;
580
581 /* Index of the next TX DMA descriptor to process */
582 int next_desc_to_proc;
2adb719d
EG
583
584 /* DMA buffers for TSO headers */
585 char *tso_hdrs;
586
587 /* DMA address of TSO headers */
588 dma_addr_t tso_hdrs_phys;
50bf8cb6
GC
589
590 /* Affinity mask for CPUs*/
591 cpumask_t affinity_mask;
c5aff182
TP
592};
593
594struct mvneta_rx_queue {
595 /* rx queue number, in the range 0-7 */
596 u8 id;
597
598 /* num of rx descriptors in the rx descriptor ring */
599 int size;
600
c5aff182
TP
601 u32 pkts_coal;
602 u32 time_coal;
603
f88bee1c
GC
604 /* Virtual address of the RX buffer */
605 void **buf_virt_addr;
606
c5aff182
TP
607 /* Virtual address of the RX DMA descriptors array */
608 struct mvneta_rx_desc *descs;
609
610 /* DMA address of the RX DMA descriptors array */
611 dma_addr_t descs_phys;
612
613 /* Index of the last RX DMA descriptor */
614 int last_desc;
615
616 /* Index of the next RX DMA descriptor to process */
617 int next_desc_to_proc;
17a96da6 618
562e2f46
YK
619 /* Index of first RX DMA descriptor to refill */
620 int first_to_refill;
621 u32 refill_num;
622
623 /* pointer to uncomplete skb buffer */
624 struct sk_buff *skb;
625 int left_size;
626
17a96da6
GC
627 /* error counters */
628 u32 skb_alloc_err;
629 u32 refill_err;
c5aff182
TP
630};
631
84a3f4db 632static enum cpuhp_state online_hpstate;
edadb7fa
EG
633/* The hardware supports eight (8) rx queues, but we are only allowing
634 * the first one to be used. Therefore, let's just allocate one queue.
635 */
d8936657 636static int rxq_number = 8;
c5aff182
TP
637static int txq_number = 8;
638
639static int rxq_def;
c5aff182 640
f19fadfc 641static int rx_copybreak __read_mostly = 256;
562e2f46 642static int rx_header_size __read_mostly = 128;
f19fadfc 643
dc35a10f
MW
644/* HW BM need that each port be identify by a unique ID */
645static int global_port_id;
646
c5aff182
TP
647#define MVNETA_DRIVER_NAME "mvneta"
648#define MVNETA_DRIVER_VERSION "1.0"
649
650/* Utility/helper methods */
651
652/* Write helper method */
653static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
654{
655 writel(data, pp->base + offset);
656}
657
658/* Read helper method */
659static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
660{
661 return readl(pp->base + offset);
662}
663
664/* Increment txq get counter */
665static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq)
666{
667 txq->txq_get_index++;
668 if (txq->txq_get_index == txq->size)
669 txq->txq_get_index = 0;
670}
671
672/* Increment txq put counter */
673static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
674{
675 txq->txq_put_index++;
676 if (txq->txq_put_index == txq->size)
677 txq->txq_put_index = 0;
678}
679
680
681/* Clear all MIB counters */
682static void mvneta_mib_counters_clear(struct mvneta_port *pp)
683{
684 int i;
685 u32 dummy;
686
687 /* Perform dummy reads from MIB counters */
688 for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
689 dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
e483911f
AL
690 dummy = mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT);
691 dummy = mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT);
c5aff182
TP
692}
693
694/* Get System Network Statistics */
bc1f4470 695static void
2dc0d2b4
BX
696mvneta_get_stats64(struct net_device *dev,
697 struct rtnl_link_stats64 *stats)
c5aff182
TP
698{
699 struct mvneta_port *pp = netdev_priv(dev);
700 unsigned int start;
74c41b04 701 int cpu;
c5aff182 702
74c41b04 703 for_each_possible_cpu(cpu) {
704 struct mvneta_pcpu_stats *cpu_stats;
705 u64 rx_packets;
706 u64 rx_bytes;
707 u64 tx_packets;
708 u64 tx_bytes;
c5aff182 709
74c41b04 710 cpu_stats = per_cpu_ptr(pp->stats, cpu);
711 do {
57a7744e 712 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
74c41b04 713 rx_packets = cpu_stats->rx_packets;
714 rx_bytes = cpu_stats->rx_bytes;
715 tx_packets = cpu_stats->tx_packets;
716 tx_bytes = cpu_stats->tx_bytes;
57a7744e 717 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
c5aff182 718
74c41b04 719 stats->rx_packets += rx_packets;
720 stats->rx_bytes += rx_bytes;
721 stats->tx_packets += tx_packets;
722 stats->tx_bytes += tx_bytes;
723 }
c5aff182
TP
724
725 stats->rx_errors = dev->stats.rx_errors;
726 stats->rx_dropped = dev->stats.rx_dropped;
727
728 stats->tx_dropped = dev->stats.tx_dropped;
c5aff182
TP
729}
730
731/* Rx descriptors helper methods */
732
5428213c 733/* Checks whether the RX descriptor having this status is both the first
734 * and the last descriptor for the RX packet. Each RX packet is currently
c5aff182
TP
735 * received through a single RX descriptor, so not having each RX
736 * descriptor with its first and last bits set is an error
737 */
5428213c 738static int mvneta_rxq_desc_is_first_last(u32 status)
c5aff182 739{
5428213c 740 return (status & MVNETA_RXD_FIRST_LAST_DESC) ==
c5aff182
TP
741 MVNETA_RXD_FIRST_LAST_DESC;
742}
743
744/* Add number of descriptors ready to receive new packets */
745static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
746 struct mvneta_rx_queue *rxq,
747 int ndescs)
748{
749 /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
6a20c175
TP
750 * be added at once
751 */
c5aff182
TP
752 while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) {
753 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
754 (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX <<
755 MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
756 ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX;
757 }
758
759 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
760 (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
761}
762
763/* Get number of RX descriptors occupied by received packets */
764static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
765 struct mvneta_rx_queue *rxq)
766{
767 u32 val;
768
769 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
770 return val & MVNETA_RXQ_OCCUPIED_ALL_MASK;
771}
772
6a20c175 773/* Update num of rx desc called upon return from rx path or
c5aff182
TP
774 * from mvneta_rxq_drop_pkts().
775 */
776static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
777 struct mvneta_rx_queue *rxq,
778 int rx_done, int rx_filled)
779{
780 u32 val;
781
782 if ((rx_done <= 0xff) && (rx_filled <= 0xff)) {
783 val = rx_done |
784 (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT);
785 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
786 return;
787 }
788
789 /* Only 255 descriptors can be added at once */
790 while ((rx_done > 0) || (rx_filled > 0)) {
791 if (rx_done <= 0xff) {
792 val = rx_done;
793 rx_done = 0;
794 } else {
795 val = 0xff;
796 rx_done -= 0xff;
797 }
798 if (rx_filled <= 0xff) {
799 val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
800 rx_filled = 0;
801 } else {
802 val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
803 rx_filled -= 0xff;
804 }
805 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
806 }
807}
808
809/* Get pointer to next RX descriptor to be processed by SW */
810static struct mvneta_rx_desc *
811mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
812{
813 int rx_desc = rxq->next_desc_to_proc;
814
815 rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
34e4179d 816 prefetch(rxq->descs + rxq->next_desc_to_proc);
c5aff182
TP
817 return rxq->descs + rx_desc;
818}
819
820/* Change maximum receive size of the port. */
821static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size)
822{
823 u32 val;
824
825 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
826 val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK;
827 val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) <<
828 MVNETA_GMAC_MAX_RX_SIZE_SHIFT;
829 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
830}
831
832
833/* Set rx queue offset */
834static void mvneta_rxq_offset_set(struct mvneta_port *pp,
835 struct mvneta_rx_queue *rxq,
836 int offset)
837{
838 u32 val;
839
840 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
841 val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK;
842
843 /* Offset is in */
844 val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3);
845 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
846}
847
848
849/* Tx descriptors helper methods */
850
851/* Update HW with number of TX descriptors to be sent */
852static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
853 struct mvneta_tx_queue *txq,
854 int pend_desc)
855{
856 u32 val;
857
0d63785c
SG
858 pend_desc += txq->pending;
859
860 /* Only 255 Tx descriptors can be added at once */
861 do {
862 val = min(pend_desc, 255);
863 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
864 pend_desc -= val;
865 } while (pend_desc > 0);
2a90f7e1 866 txq->pending = 0;
c5aff182
TP
867}
868
869/* Get pointer to next TX descriptor to be processed (send) by HW */
870static struct mvneta_tx_desc *
871mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
872{
873 int tx_desc = txq->next_desc_to_proc;
874
875 txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc);
876 return txq->descs + tx_desc;
877}
878
879/* Release the last allocated TX descriptor. Useful to handle DMA
6a20c175
TP
880 * mapping failures in the TX path.
881 */
c5aff182
TP
882static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq)
883{
884 if (txq->next_desc_to_proc == 0)
885 txq->next_desc_to_proc = txq->last_desc - 1;
886 else
887 txq->next_desc_to_proc--;
888}
889
890/* Set rxq buf size */
891static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
892 struct mvneta_rx_queue *rxq,
893 int buf_size)
894{
895 u32 val;
896
897 val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
898
899 val &= ~MVNETA_RXQ_BUF_SIZE_MASK;
900 val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT);
901
902 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
903}
904
905/* Disable buffer management (BM) */
906static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
907 struct mvneta_rx_queue *rxq)
908{
909 u32 val;
910
911 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
912 val &= ~MVNETA_RXQ_HW_BUF_ALLOC;
913 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
914}
915
dc35a10f
MW
916/* Enable buffer management (BM) */
917static void mvneta_rxq_bm_enable(struct mvneta_port *pp,
918 struct mvneta_rx_queue *rxq)
919{
920 u32 val;
921
922 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
923 val |= MVNETA_RXQ_HW_BUF_ALLOC;
924 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
925}
926
927/* Notify HW about port's assignment of pool for bigger packets */
928static void mvneta_rxq_long_pool_set(struct mvneta_port *pp,
929 struct mvneta_rx_queue *rxq)
930{
931 u32 val;
932
933 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
934 val &= ~MVNETA_RXQ_LONG_POOL_ID_MASK;
935 val |= (pp->pool_long->id << MVNETA_RXQ_LONG_POOL_ID_SHIFT);
936
937 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
938}
939
940/* Notify HW about port's assignment of pool for smaller packets */
941static void mvneta_rxq_short_pool_set(struct mvneta_port *pp,
942 struct mvneta_rx_queue *rxq)
943{
944 u32 val;
945
946 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
947 val &= ~MVNETA_RXQ_SHORT_POOL_ID_MASK;
948 val |= (pp->pool_short->id << MVNETA_RXQ_SHORT_POOL_ID_SHIFT);
949
950 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
951}
952
953/* Set port's receive buffer size for assigned BM pool */
954static inline void mvneta_bm_pool_bufsize_set(struct mvneta_port *pp,
955 int buf_size,
956 u8 pool_id)
957{
958 u32 val;
959
960 if (!IS_ALIGNED(buf_size, 8)) {
961 dev_warn(pp->dev->dev.parent,
962 "illegal buf_size value %d, round to %d\n",
963 buf_size, ALIGN(buf_size, 8));
964 buf_size = ALIGN(buf_size, 8);
965 }
966
967 val = mvreg_read(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id));
968 val |= buf_size & MVNETA_PORT_POOL_BUFFER_SZ_MASK;
969 mvreg_write(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id), val);
970}
971
972/* Configure MBUS window in order to enable access BM internal SRAM */
973static int mvneta_mbus_io_win_set(struct mvneta_port *pp, u32 base, u32 wsize,
974 u8 target, u8 attr)
975{
976 u32 win_enable, win_protect;
977 int i;
978
979 win_enable = mvreg_read(pp, MVNETA_BASE_ADDR_ENABLE);
980
981 if (pp->bm_win_id < 0) {
982 /* Find first not occupied window */
983 for (i = 0; i < MVNETA_MAX_DECODE_WIN; i++) {
984 if (win_enable & (1 << i)) {
985 pp->bm_win_id = i;
986 break;
987 }
988 }
989 if (i == MVNETA_MAX_DECODE_WIN)
990 return -ENOMEM;
991 } else {
992 i = pp->bm_win_id;
993 }
994
995 mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
996 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
997
998 if (i < 4)
999 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
1000
1001 mvreg_write(pp, MVNETA_WIN_BASE(i), (base & 0xffff0000) |
1002 (attr << 8) | target);
1003
1004 mvreg_write(pp, MVNETA_WIN_SIZE(i), (wsize - 1) & 0xffff0000);
1005
1006 win_protect = mvreg_read(pp, MVNETA_ACCESS_PROTECT_ENABLE);
1007 win_protect |= 3 << (2 * i);
1008 mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
1009
1010 win_enable &= ~(1 << i);
1011 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
1012
1013 return 0;
1014}
1015
2636ac3c 1016static int mvneta_bm_port_mbus_init(struct mvneta_port *pp)
dc35a10f 1017{
2636ac3c 1018 u32 wsize;
dc35a10f
MW
1019 u8 target, attr;
1020 int err;
1021
1022 /* Get BM window information */
1023 err = mvebu_mbus_get_io_win_info(pp->bm_priv->bppi_phys_addr, &wsize,
1024 &target, &attr);
1025 if (err < 0)
1026 return err;
1027
1028 pp->bm_win_id = -1;
1029
1030 /* Open NETA -> BM window */
1031 err = mvneta_mbus_io_win_set(pp, pp->bm_priv->bppi_phys_addr, wsize,
1032 target, attr);
1033 if (err < 0) {
1034 netdev_info(pp->dev, "fail to configure mbus window to BM\n");
1035 return err;
1036 }
2636ac3c
MW
1037 return 0;
1038}
1039
1040/* Assign and initialize pools for port. In case of fail
1041 * buffer manager will remain disabled for current port.
1042 */
1043static int mvneta_bm_port_init(struct platform_device *pdev,
1044 struct mvneta_port *pp)
1045{
1046 struct device_node *dn = pdev->dev.of_node;
1047 u32 long_pool_id, short_pool_id;
1048
1049 if (!pp->neta_armada3700) {
1050 int ret;
1051
1052 ret = mvneta_bm_port_mbus_init(pp);
1053 if (ret)
1054 return ret;
1055 }
dc35a10f
MW
1056
1057 if (of_property_read_u32(dn, "bm,pool-long", &long_pool_id)) {
1058 netdev_info(pp->dev, "missing long pool id\n");
1059 return -EINVAL;
1060 }
1061
1062 /* Create port's long pool depending on mtu */
1063 pp->pool_long = mvneta_bm_pool_use(pp->bm_priv, long_pool_id,
1064 MVNETA_BM_LONG, pp->id,
1065 MVNETA_RX_PKT_SIZE(pp->dev->mtu));
1066 if (!pp->pool_long) {
1067 netdev_info(pp->dev, "fail to obtain long pool for port\n");
1068 return -ENOMEM;
1069 }
1070
1071 pp->pool_long->port_map |= 1 << pp->id;
1072
1073 mvneta_bm_pool_bufsize_set(pp, pp->pool_long->buf_size,
1074 pp->pool_long->id);
1075
1076 /* If short pool id is not defined, assume using single pool */
1077 if (of_property_read_u32(dn, "bm,pool-short", &short_pool_id))
1078 short_pool_id = long_pool_id;
1079
1080 /* Create port's short pool */
1081 pp->pool_short = mvneta_bm_pool_use(pp->bm_priv, short_pool_id,
1082 MVNETA_BM_SHORT, pp->id,
1083 MVNETA_BM_SHORT_PKT_SIZE);
1084 if (!pp->pool_short) {
1085 netdev_info(pp->dev, "fail to obtain short pool for port\n");
1086 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
1087 return -ENOMEM;
1088 }
1089
1090 if (short_pool_id != long_pool_id) {
1091 pp->pool_short->port_map |= 1 << pp->id;
1092 mvneta_bm_pool_bufsize_set(pp, pp->pool_short->buf_size,
1093 pp->pool_short->id);
1094 }
1095
1096 return 0;
1097}
1098
1099/* Update settings of a pool for bigger packets */
1100static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu)
1101{
1102 struct mvneta_bm_pool *bm_pool = pp->pool_long;
baa11ebc 1103 struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool;
dc35a10f
MW
1104 int num;
1105
1106 /* Release all buffers from long pool */
1107 mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id);
baa11ebc 1108 if (hwbm_pool->buf_num) {
dc35a10f
MW
1109 WARN(1, "cannot free all buffers in pool %d\n",
1110 bm_pool->id);
1111 goto bm_mtu_err;
1112 }
1113
1114 bm_pool->pkt_size = MVNETA_RX_PKT_SIZE(mtu);
1115 bm_pool->buf_size = MVNETA_RX_BUF_SIZE(bm_pool->pkt_size);
baa11ebc
GC
1116 hwbm_pool->frag_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1117 SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size));
dc35a10f
MW
1118
1119 /* Fill entire long pool */
baa11ebc
GC
1120 num = hwbm_pool_add(hwbm_pool, hwbm_pool->size, GFP_ATOMIC);
1121 if (num != hwbm_pool->size) {
dc35a10f 1122 WARN(1, "pool %d: %d of %d allocated\n",
baa11ebc 1123 bm_pool->id, num, hwbm_pool->size);
dc35a10f
MW
1124 goto bm_mtu_err;
1125 }
1126 mvneta_bm_pool_bufsize_set(pp, bm_pool->buf_size, bm_pool->id);
1127
1128 return;
1129
1130bm_mtu_err:
1131 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
1132 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id);
1133
1134 pp->bm_priv = NULL;
1135 mvreg_write(pp, MVNETA_ACC_MODE, MVNETA_ACC_MODE_EXT1);
1136 netdev_info(pp->dev, "fail to update MTU, fall back to software BM\n");
1137}
1138
c5aff182
TP
1139/* Start the Ethernet port RX and TX activity */
1140static void mvneta_port_up(struct mvneta_port *pp)
1141{
1142 int queue;
1143 u32 q_map;
1144
1145 /* Enable all initialized TXs. */
c5aff182
TP
1146 q_map = 0;
1147 for (queue = 0; queue < txq_number; queue++) {
1148 struct mvneta_tx_queue *txq = &pp->txqs[queue];
f95936cc 1149 if (txq->descs)
c5aff182
TP
1150 q_map |= (1 << queue);
1151 }
1152 mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
1153
e81b5e01 1154 q_map = 0;
c5aff182 1155 /* Enable all initialized RXQs. */
2dcf75e2
GC
1156 for (queue = 0; queue < rxq_number; queue++) {
1157 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
1158
f95936cc 1159 if (rxq->descs)
2dcf75e2
GC
1160 q_map |= (1 << queue);
1161 }
1162 mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
c5aff182
TP
1163}
1164
1165/* Stop the Ethernet port activity */
1166static void mvneta_port_down(struct mvneta_port *pp)
1167{
1168 u32 val;
1169 int count;
1170
1171 /* Stop Rx port activity. Check port Rx activity. */
1172 val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;
1173
1174 /* Issue stop command for active channels only */
1175 if (val != 0)
1176 mvreg_write(pp, MVNETA_RXQ_CMD,
1177 val << MVNETA_RXQ_DISABLE_SHIFT);
1178
1179 /* Wait for all Rx activity to terminate. */
1180 count = 0;
1181 do {
1182 if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) {
1183 netdev_warn(pp->dev,
0838abb3 1184 "TIMEOUT for RX stopped ! rx_queue_cmd: 0x%08x\n",
c5aff182
TP
1185 val);
1186 break;
1187 }
1188 mdelay(1);
1189
1190 val = mvreg_read(pp, MVNETA_RXQ_CMD);
a3703fb3 1191 } while (val & MVNETA_RXQ_ENABLE_MASK);
c5aff182
TP
1192
1193 /* Stop Tx port activity. Check port Tx activity. Issue stop
6a20c175
TP
1194 * command for active channels only
1195 */
c5aff182
TP
1196 val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
1197
1198 if (val != 0)
1199 mvreg_write(pp, MVNETA_TXQ_CMD,
1200 (val << MVNETA_TXQ_DISABLE_SHIFT));
1201
1202 /* Wait for all Tx activity to terminate. */
1203 count = 0;
1204 do {
1205 if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) {
1206 netdev_warn(pp->dev,
1207 "TIMEOUT for TX stopped status=0x%08x\n",
1208 val);
1209 break;
1210 }
1211 mdelay(1);
1212
1213 /* Check TX Command reg that all Txqs are stopped */
1214 val = mvreg_read(pp, MVNETA_TXQ_CMD);
1215
a3703fb3 1216 } while (val & MVNETA_TXQ_ENABLE_MASK);
c5aff182
TP
1217
1218 /* Double check to verify that TX FIFO is empty */
1219 count = 0;
1220 do {
1221 if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) {
1222 netdev_warn(pp->dev,
0838abb3 1223 "TX FIFO empty timeout status=0x%08x\n",
c5aff182
TP
1224 val);
1225 break;
1226 }
1227 mdelay(1);
1228
1229 val = mvreg_read(pp, MVNETA_PORT_STATUS);
1230 } while (!(val & MVNETA_TX_FIFO_EMPTY) &&
1231 (val & MVNETA_TX_IN_PRGRS));
1232
1233 udelay(200);
1234}
1235
1236/* Enable the port by setting the port enable bit of the MAC control register */
1237static void mvneta_port_enable(struct mvneta_port *pp)
1238{
1239 u32 val;
1240
1241 /* Enable port */
1242 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
1243 val |= MVNETA_GMAC0_PORT_ENABLE;
1244 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
1245}
1246
1247/* Disable the port and wait for about 200 usec before retuning */
1248static void mvneta_port_disable(struct mvneta_port *pp)
1249{
1250 u32 val;
1251
1252 /* Reset the Enable bit in the Serial Control Register */
1253 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
1254 val &= ~MVNETA_GMAC0_PORT_ENABLE;
1255 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
1256
1257 udelay(200);
1258}
1259
1260/* Multicast tables methods */
1261
1262/* Set all entries in Unicast MAC Table; queue==-1 means reject all */
1263static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
1264{
1265 int offset;
1266 u32 val;
1267
1268 if (queue == -1) {
1269 val = 0;
1270 } else {
1271 val = 0x1 | (queue << 1);
1272 val |= (val << 24) | (val << 16) | (val << 8);
1273 }
1274
1275 for (offset = 0; offset <= 0xc; offset += 4)
1276 mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val);
1277}
1278
1279/* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
1280static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
1281{
1282 int offset;
1283 u32 val;
1284
1285 if (queue == -1) {
1286 val = 0;
1287 } else {
1288 val = 0x1 | (queue << 1);
1289 val |= (val << 24) | (val << 16) | (val << 8);
1290 }
1291
1292 for (offset = 0; offset <= 0xfc; offset += 4)
1293 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val);
1294
1295}
1296
1297/* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
1298static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
1299{
1300 int offset;
1301 u32 val;
1302
1303 if (queue == -1) {
1304 memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
1305 val = 0;
1306 } else {
1307 memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
1308 val = 0x1 | (queue << 1);
1309 val |= (val << 24) | (val << 16) | (val << 8);
1310 }
1311
1312 for (offset = 0; offset <= 0xfc; offset += 4)
1313 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
1314}
1315
db488c10
GC
1316static void mvneta_percpu_unmask_interrupt(void *arg)
1317{
1318 struct mvneta_port *pp = arg;
1319
1320 /* All the queue are unmasked, but actually only the ones
1321 * mapped to this CPU will be unmasked
1322 */
1323 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
1324 MVNETA_RX_INTR_MASK_ALL |
1325 MVNETA_TX_INTR_MASK_ALL |
1326 MVNETA_MISCINTR_INTR_MASK);
1327}
1328
1329static void mvneta_percpu_mask_interrupt(void *arg)
1330{
1331 struct mvneta_port *pp = arg;
1332
1333 /* All the queue are masked, but actually only the ones
1334 * mapped to this CPU will be masked
1335 */
1336 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
1337 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
1338 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
1339}
1340
1341static void mvneta_percpu_clear_intr_cause(void *arg)
1342{
1343 struct mvneta_port *pp = arg;
1344
1345 /* All the queue are cleared, but actually only the ones
1346 * mapped to this CPU will be cleared
1347 */
1348 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
1349 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
1350 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
1351}
1352
c5aff182
TP
1353/* This method sets defaults to the NETA port:
1354 * Clears interrupt Cause and Mask registers.
1355 * Clears all MAC tables.
1356 * Sets defaults to all registers.
1357 * Resets RX and TX descriptor rings.
1358 * Resets PHY.
1359 * This method can be called after mvneta_port_down() to return the port
1360 * settings to defaults.
1361 */
1362static void mvneta_defaults_set(struct mvneta_port *pp)
1363{
1364 int cpu;
1365 int queue;
1366 u32 val;
2dcf75e2 1367 int max_cpu = num_present_cpus();
c5aff182
TP
1368
1369 /* Clear all Cause registers */
db488c10 1370 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
c5aff182
TP
1371
1372 /* Mask all interrupts */
db488c10 1373 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
c5aff182
TP
1374 mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
1375
1376 /* Enable MBUS Retry bit16 */
1377 mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
1378
50bf8cb6
GC
1379 /* Set CPU queue access map. CPUs are assigned to the RX and
1380 * TX queues modulo their number. If there is only one TX
1381 * queue then it is assigned to the CPU associated to the
1382 * default RX queue.
6a20c175 1383 */
2dcf75e2
GC
1384 for_each_present_cpu(cpu) {
1385 int rxq_map = 0, txq_map = 0;
50bf8cb6 1386 int rxq, txq;
2636ac3c
MW
1387 if (!pp->neta_armada3700) {
1388 for (rxq = 0; rxq < rxq_number; rxq++)
1389 if ((rxq % max_cpu) == cpu)
1390 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
1391
1392 for (txq = 0; txq < txq_number; txq++)
1393 if ((txq % max_cpu) == cpu)
1394 txq_map |= MVNETA_CPU_TXQ_ACCESS(txq);
1395
1396 /* With only one TX queue we configure a special case
1397 * which will allow to get all the irq on a single
1398 * CPU
1399 */
1400 if (txq_number == 1)
1401 txq_map = (cpu == pp->rxq_def) ?
1402 MVNETA_CPU_TXQ_ACCESS(1) : 0;
2dcf75e2 1403
2636ac3c
MW
1404 } else {
1405 txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
1406 rxq_map = MVNETA_CPU_RXQ_ACCESS_ALL_MASK;
1407 }
2dcf75e2
GC
1408
1409 mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
1410 }
c5aff182
TP
1411
1412 /* Reset RX and TX DMAs */
1413 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
1414 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
1415
1416 /* Disable Legacy WRR, Disable EJP, Release from reset */
1417 mvreg_write(pp, MVNETA_TXQ_CMD_1, 0);
1418 for (queue = 0; queue < txq_number; queue++) {
1419 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
1420 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
1421 }
1422
1423 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
1424 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
1425
1426 /* Set Port Acceleration Mode */
dc35a10f
MW
1427 if (pp->bm_priv)
1428 /* HW buffer management + legacy parser */
1429 val = MVNETA_ACC_MODE_EXT2;
1430 else
1431 /* SW buffer management + legacy parser */
1432 val = MVNETA_ACC_MODE_EXT1;
c5aff182
TP
1433 mvreg_write(pp, MVNETA_ACC_MODE, val);
1434
dc35a10f
MW
1435 if (pp->bm_priv)
1436 mvreg_write(pp, MVNETA_BM_ADDRESS, pp->bm_priv->bppi_phys_addr);
1437
c5aff182 1438 /* Update val of portCfg register accordingly with all RxQueue types */
90b74c01 1439 val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
c5aff182
TP
1440 mvreg_write(pp, MVNETA_PORT_CONFIG, val);
1441
1442 val = 0;
1443 mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
1444 mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);
1445
1446 /* Build PORT_SDMA_CONFIG_REG */
1447 val = 0;
1448
1449 /* Default burst size */
1450 val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
1451 val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
9ad8fef6 1452 val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP;
c5aff182 1453
9ad8fef6
TP
1454#if defined(__BIG_ENDIAN)
1455 val |= MVNETA_DESC_SWAP;
1456#endif
c5aff182
TP
1457
1458 /* Assign port SDMA configuration */
1459 mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
1460
71408602
TP
1461 /* Disable PHY polling in hardware, since we're using the
1462 * kernel phylib to do this.
1463 */
1464 val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
1465 val &= ~MVNETA_PHY_POLLING_ENABLE;
1466 mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
1467
c5aff182
TP
1468 mvneta_set_ucast_table(pp, -1);
1469 mvneta_set_special_mcast_table(pp, -1);
1470 mvneta_set_other_mcast_table(pp, -1);
1471
1472 /* Set port interrupt enable register - default enable all */
1473 mvreg_write(pp, MVNETA_INTR_ENABLE,
1474 (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
1475 | MVNETA_TXQ_INTR_ENABLE_ALL_MASK));
e483911f
AL
1476
1477 mvneta_mib_counters_clear(pp);
c5aff182
TP
1478}
1479
1480/* Set max sizes for tx queues */
1481static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size)
1482
1483{
1484 u32 val, size, mtu;
1485 int queue;
1486
1487 mtu = max_tx_size * 8;
1488 if (mtu > MVNETA_TX_MTU_MAX)
1489 mtu = MVNETA_TX_MTU_MAX;
1490
1491 /* Set MTU */
1492 val = mvreg_read(pp, MVNETA_TX_MTU);
1493 val &= ~MVNETA_TX_MTU_MAX;
1494 val |= mtu;
1495 mvreg_write(pp, MVNETA_TX_MTU, val);
1496
1497 /* TX token size and all TXQs token size must be larger that MTU */
1498 val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE);
1499
1500 size = val & MVNETA_TX_TOKEN_SIZE_MAX;
1501 if (size < mtu) {
1502 size = mtu;
1503 val &= ~MVNETA_TX_TOKEN_SIZE_MAX;
1504 val |= size;
1505 mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val);
1506 }
1507 for (queue = 0; queue < txq_number; queue++) {
1508 val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue));
1509
1510 size = val & MVNETA_TXQ_TOKEN_SIZE_MAX;
1511 if (size < mtu) {
1512 size = mtu;
1513 val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX;
1514 val |= size;
1515 mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val);
1516 }
1517 }
1518}
1519
1520/* Set unicast address */
1521static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
1522 int queue)
1523{
1524 unsigned int unicast_reg;
1525 unsigned int tbl_offset;
1526 unsigned int reg_offset;
1527
1528 /* Locate the Unicast table entry */
1529 last_nibble = (0xf & last_nibble);
1530
1531 /* offset from unicast tbl base */
1532 tbl_offset = (last_nibble / 4) * 4;
1533
1534 /* offset within the above reg */
1535 reg_offset = last_nibble % 4;
1536
1537 unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset));
1538
1539 if (queue == -1) {
1540 /* Clear accepts frame bit at specified unicast DA tbl entry */
1541 unicast_reg &= ~(0xff << (8 * reg_offset));
1542 } else {
1543 unicast_reg &= ~(0xff << (8 * reg_offset));
1544 unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1545 }
1546
1547 mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg);
1548}
1549
1550/* Set mac address */
1551static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr,
1552 int queue)
1553{
1554 unsigned int mac_h;
1555 unsigned int mac_l;
1556
1557 if (queue != -1) {
1558 mac_l = (addr[4] << 8) | (addr[5]);
1559 mac_h = (addr[0] << 24) | (addr[1] << 16) |
1560 (addr[2] << 8) | (addr[3] << 0);
1561
1562 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l);
1563 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h);
1564 }
1565
1566 /* Accept frames of this address */
1567 mvneta_set_ucast_addr(pp, addr[5], queue);
1568}
1569
6a20c175
TP
1570/* Set the number of packets that will be received before RX interrupt
1571 * will be generated by HW.
c5aff182
TP
1572 */
1573static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
1574 struct mvneta_rx_queue *rxq, u32 value)
1575{
1576 mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
1577 value | MVNETA_RXQ_NON_OCCUPIED(0));
c5aff182
TP
1578}
1579
6a20c175
TP
1580/* Set the time delay in usec before RX interrupt will be generated by
1581 * HW.
c5aff182
TP
1582 */
1583static void mvneta_rx_time_coal_set(struct mvneta_port *pp,
1584 struct mvneta_rx_queue *rxq, u32 value)
1585{
189dd626
TP
1586 u32 val;
1587 unsigned long clk_rate;
1588
1589 clk_rate = clk_get_rate(pp->clk);
1590 val = (clk_rate / 1000000) * value;
c5aff182
TP
1591
1592 mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
c5aff182
TP
1593}
1594
1595/* Set threshold for TX_DONE pkts coalescing */
1596static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
1597 struct mvneta_tx_queue *txq, u32 value)
1598{
1599 u32 val;
1600
1601 val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id));
1602
1603 val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK;
1604 val |= MVNETA_TXQ_SENT_THRESH_MASK(value);
1605
1606 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val);
c5aff182
TP
1607}
1608
c5aff182
TP
1609/* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
1610static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
f88bee1c
GC
1611 u32 phys_addr, void *virt_addr,
1612 struct mvneta_rx_queue *rxq)
c5aff182 1613{
f88bee1c
GC
1614 int i;
1615
c5aff182 1616 rx_desc->buf_phys_addr = phys_addr;
f88bee1c
GC
1617 i = rx_desc - rxq->descs;
1618 rxq->buf_virt_addr[i] = virt_addr;
c5aff182
TP
1619}
1620
1621/* Decrement sent descriptors counter */
1622static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
1623 struct mvneta_tx_queue *txq,
1624 int sent_desc)
1625{
1626 u32 val;
1627
1628 /* Only 255 TX descriptors can be updated at once */
1629 while (sent_desc > 0xff) {
1630 val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT;
1631 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1632 sent_desc = sent_desc - 0xff;
1633 }
1634
1635 val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT;
1636 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1637}
1638
1639/* Get number of TX descriptors already sent by HW */
1640static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
1641 struct mvneta_tx_queue *txq)
1642{
1643 u32 val;
1644 int sent_desc;
1645
1646 val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
1647 sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >>
1648 MVNETA_TXQ_SENT_DESC_SHIFT;
1649
1650 return sent_desc;
1651}
1652
6a20c175 1653/* Get number of sent descriptors and decrement counter.
c5aff182
TP
1654 * The number of sent descriptors is returned.
1655 */
1656static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
1657 struct mvneta_tx_queue *txq)
1658{
1659 int sent_desc;
1660
1661 /* Get number of sent descriptors */
1662 sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
1663
1664 /* Decrement sent descriptors counter */
1665 if (sent_desc)
1666 mvneta_txq_sent_desc_dec(pp, txq, sent_desc);
1667
1668 return sent_desc;
1669}
1670
1671/* Set TXQ descriptors fields relevant for CSUM calculation */
1672static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
1673 int ip_hdr_len, int l4_proto)
1674{
1675 u32 command;
1676
1677 /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
6a20c175
TP
1678 * G_L4_chk, L4_type; required only for checksum
1679 * calculation
1680 */
c5aff182
TP
1681 command = l3_offs << MVNETA_TX_L3_OFF_SHIFT;
1682 command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
1683
0a198587 1684 if (l3_proto == htons(ETH_P_IP))
c5aff182
TP
1685 command |= MVNETA_TXD_IP_CSUM;
1686 else
1687 command |= MVNETA_TX_L3_IP6;
1688
1689 if (l4_proto == IPPROTO_TCP)
1690 command |= MVNETA_TX_L4_CSUM_FULL;
1691 else if (l4_proto == IPPROTO_UDP)
1692 command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL;
1693 else
1694 command |= MVNETA_TX_L4_CSUM_NOT;
1695
1696 return command;
1697}
1698
1699
1700/* Display more error info */
1701static void mvneta_rx_error(struct mvneta_port *pp,
1702 struct mvneta_rx_desc *rx_desc)
1703{
1704 u32 status = rx_desc->status;
1705
c5aff182
TP
1706 switch (status & MVNETA_RXD_ERR_CODE_MASK) {
1707 case MVNETA_RXD_ERR_CRC:
1708 netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
1709 status, rx_desc->data_size);
1710 break;
1711 case MVNETA_RXD_ERR_OVERRUN:
1712 netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
1713 status, rx_desc->data_size);
1714 break;
1715 case MVNETA_RXD_ERR_LEN:
1716 netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
1717 status, rx_desc->data_size);
1718 break;
1719 case MVNETA_RXD_ERR_RESOURCE:
1720 netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
1721 status, rx_desc->data_size);
1722 break;
1723 }
1724}
1725
5428213c 1726/* Handle RX checksum offload based on the descriptor's status */
1727static void mvneta_rx_csum(struct mvneta_port *pp, u32 status,
c5aff182
TP
1728 struct sk_buff *skb)
1729{
f945cec8
YK
1730 if ((pp->dev->features & NETIF_F_RXCSUM) &&
1731 (status & MVNETA_RXD_L3_IP4) &&
5428213c 1732 (status & MVNETA_RXD_L4_CSUM_OK)) {
c5aff182
TP
1733 skb->csum = 0;
1734 skb->ip_summed = CHECKSUM_UNNECESSARY;
1735 return;
1736 }
1737
1738 skb->ip_summed = CHECKSUM_NONE;
1739}
1740
6c498974 1741/* Return tx queue pointer (find last set bit) according to <cause> returned
1742 * form tx_done reg. <cause> must not be null. The return value is always a
1743 * valid queue for matching the first one found in <cause>.
1744 */
c5aff182
TP
1745static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
1746 u32 cause)
1747{
1748 int queue = fls(cause) - 1;
1749
6c498974 1750 return &pp->txqs[queue];
c5aff182
TP
1751}
1752
1753/* Free tx queue skbuffs */
1754static void mvneta_txq_bufs_free(struct mvneta_port *pp,
a29b6235
MW
1755 struct mvneta_tx_queue *txq, int num,
1756 struct netdev_queue *nq)
c5aff182 1757{
a29b6235 1758 unsigned int bytes_compl = 0, pkts_compl = 0;
c5aff182
TP
1759 int i;
1760
1761 for (i = 0; i < num; i++) {
1762 struct mvneta_tx_desc *tx_desc = txq->descs +
1763 txq->txq_get_index;
1764 struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
1765
a29b6235
MW
1766 if (skb) {
1767 bytes_compl += skb->len;
1768 pkts_compl++;
1769 }
1770
c5aff182
TP
1771 mvneta_txq_inc_get(txq);
1772
2e3173a3
EG
1773 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
1774 dma_unmap_single(pp->dev->dev.parent,
1775 tx_desc->buf_phys_addr,
1776 tx_desc->data_size, DMA_TO_DEVICE);
ba7e46ef
EG
1777 if (!skb)
1778 continue;
c5aff182
TP
1779 dev_kfree_skb_any(skb);
1780 }
a29b6235
MW
1781
1782 netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
c5aff182
TP
1783}
1784
1785/* Handle end of transmission */
cd713199 1786static void mvneta_txq_done(struct mvneta_port *pp,
c5aff182
TP
1787 struct mvneta_tx_queue *txq)
1788{
1789 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
1790 int tx_done;
1791
1792 tx_done = mvneta_txq_sent_desc_proc(pp, txq);
cd713199
AE
1793 if (!tx_done)
1794 return;
1795
a29b6235 1796 mvneta_txq_bufs_free(pp, txq, tx_done, nq);
c5aff182
TP
1797
1798 txq->count -= tx_done;
1799
1800 if (netif_tx_queue_stopped(nq)) {
8eef5f97 1801 if (txq->count <= txq->tx_wake_threshold)
c5aff182
TP
1802 netif_tx_wake_queue(nq);
1803 }
c5aff182
TP
1804}
1805
dc35a10f 1806/* Refill processing for SW buffer management */
7e47fd84 1807/* Allocate page per descriptor */
c5aff182 1808static int mvneta_rx_refill(struct mvneta_port *pp,
f88bee1c 1809 struct mvneta_rx_desc *rx_desc,
7e47fd84
GC
1810 struct mvneta_rx_queue *rxq,
1811 gfp_t gfp_mask)
c5aff182
TP
1812{
1813 dma_addr_t phys_addr;
7e47fd84 1814 struct page *page;
c5aff182 1815
7e47fd84
GC
1816 page = __dev_alloc_page(gfp_mask);
1817 if (!page)
c5aff182
TP
1818 return -ENOMEM;
1819
7e47fd84
GC
1820 /* map page for use */
1821 phys_addr = dma_map_page(pp->dev->dev.parent, page, 0, PAGE_SIZE,
1822 DMA_FROM_DEVICE);
c5aff182 1823 if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
7e47fd84 1824 __free_page(page);
c5aff182
TP
1825 return -ENOMEM;
1826 }
1827
8d5047cf 1828 phys_addr += pp->rx_offset_correction;
7e47fd84 1829 mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq);
c5aff182
TP
1830 return 0;
1831}
1832
1833/* Handle tx checksum */
1834static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
1835{
1836 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1837 int ip_hdr_len = 0;
817dbfa5 1838 __be16 l3_proto = vlan_get_protocol(skb);
c5aff182
TP
1839 u8 l4_proto;
1840
817dbfa5 1841 if (l3_proto == htons(ETH_P_IP)) {
c5aff182
TP
1842 struct iphdr *ip4h = ip_hdr(skb);
1843
1844 /* Calculate IPv4 checksum and L4 checksum */
1845 ip_hdr_len = ip4h->ihl;
1846 l4_proto = ip4h->protocol;
817dbfa5 1847 } else if (l3_proto == htons(ETH_P_IPV6)) {
c5aff182
TP
1848 struct ipv6hdr *ip6h = ipv6_hdr(skb);
1849
1850 /* Read l4_protocol from one of IPv6 extra headers */
1851 if (skb_network_header_len(skb) > 0)
1852 ip_hdr_len = (skb_network_header_len(skb) >> 2);
1853 l4_proto = ip6h->nexthdr;
1854 } else
1855 return MVNETA_TX_L4_CSUM_NOT;
1856
1857 return mvneta_txq_desc_csum(skb_network_offset(skb),
817dbfa5 1858 l3_proto, ip_hdr_len, l4_proto);
c5aff182
TP
1859 }
1860
1861 return MVNETA_TX_L4_CSUM_NOT;
1862}
1863
c5aff182
TP
1864/* Drop packets received by the RXQ and free buffers */
1865static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
1866 struct mvneta_rx_queue *rxq)
1867{
1868 int rx_done, i;
1869
1870 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
dc35a10f
MW
1871 if (rx_done)
1872 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
1873
1874 if (pp->bm_priv) {
1875 for (i = 0; i < rx_done; i++) {
1876 struct mvneta_rx_desc *rx_desc =
1877 mvneta_rxq_next_desc_get(rxq);
1878 u8 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc);
1879 struct mvneta_bm_pool *bm_pool;
1880
1881 bm_pool = &pp->bm_priv->bm_pools[pool_id];
1882 /* Return dropped buffer to the pool */
1883 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
1884 rx_desc->buf_phys_addr);
1885 }
1886 return;
1887 }
1888
c5aff182
TP
1889 for (i = 0; i < rxq->size; i++) {
1890 struct mvneta_rx_desc *rx_desc = rxq->descs + i;
f88bee1c 1891 void *data = rxq->buf_virt_addr[i];
562e2f46
YK
1892 if (!data || !(rx_desc->buf_phys_addr))
1893 continue;
c5aff182 1894
f4a51879
AT
1895 dma_unmap_page(pp->dev->dev.parent, rx_desc->buf_phys_addr,
1896 PAGE_SIZE, DMA_FROM_DEVICE);
7e47fd84 1897 __free_page(data);
c5aff182 1898 }
dc35a10f 1899}
c5aff182 1900
562e2f46
YK
1901static inline
1902int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
1903{
1904 struct mvneta_rx_desc *rx_desc;
1905 int curr_desc = rxq->first_to_refill;
1906 int i;
1907
1908 for (i = 0; (i < rxq->refill_num) && (i < 64); i++) {
1909 rx_desc = rxq->descs + curr_desc;
1910 if (!(rx_desc->buf_phys_addr)) {
1911 if (mvneta_rx_refill(pp, rx_desc, rxq, GFP_ATOMIC)) {
1912 pr_err("Can't refill queue %d. Done %d from %d\n",
1913 rxq->id, i, rxq->refill_num);
1914 rxq->refill_err++;
1915 break;
1916 }
1917 }
1918 curr_desc = MVNETA_QUEUE_NEXT_DESC(rxq, curr_desc);
1919 }
1920 rxq->refill_num -= i;
1921 rxq->first_to_refill = curr_desc;
1922
1923 return i;
1924}
1925
dc35a10f 1926/* Main rx processing when using software buffer management */
7a86f05f 1927static int mvneta_rx_swbm(struct napi_struct *napi,
562e2f46 1928 struct mvneta_port *pp, int budget,
dc35a10f
MW
1929 struct mvneta_rx_queue *rxq)
1930{
dc35a10f 1931 struct net_device *dev = pp->dev;
562e2f46
YK
1932 int rx_todo, rx_proc;
1933 int refill = 0;
dc35a10f
MW
1934 u32 rcvd_pkts = 0;
1935 u32 rcvd_bytes = 0;
1936
1937 /* Get number of received packets */
562e2f46
YK
1938 rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
1939 rx_proc = 0;
dc35a10f
MW
1940
1941 /* Fairness NAPI loop */
562e2f46 1942 while ((rcvd_pkts < budget) && (rx_proc < rx_todo)) {
dc35a10f 1943 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
dc35a10f 1944 unsigned char *data;
7e47fd84 1945 struct page *page;
dc35a10f 1946 dma_addr_t phys_addr;
562e2f46
YK
1947 u32 rx_status, index;
1948 int rx_bytes, skb_size, copy_size;
1949 int frag_num, frag_size, frag_offset;
dc35a10f 1950
f88bee1c 1951 index = rx_desc - rxq->descs;
7e47fd84
GC
1952 page = (struct page *)rxq->buf_virt_addr[index];
1953 data = page_address(page);
1954 /* Prefetch header */
1955 prefetch(data);
dc35a10f 1956
562e2f46
YK
1957 phys_addr = rx_desc->buf_phys_addr;
1958 rx_status = rx_desc->status;
1959 rx_proc++;
1960 rxq->refill_num++;
1961
1962 if (rx_status & MVNETA_RXD_FIRST_DESC) {
1963 /* Check errors only for FIRST descriptor */
1964 if (rx_status & MVNETA_RXD_ERR_SUMMARY) {
1965 mvneta_rx_error(pp, rx_desc);
1966 dev->stats.rx_errors++;
1967 /* leave the descriptor untouched */
1968 continue;
1969 }
1970 rx_bytes = rx_desc->data_size -
1971 (ETH_FCS_LEN + MVNETA_MH_SIZE);
dc35a10f 1972
562e2f46
YK
1973 /* Allocate small skb for each new packet */
1974 skb_size = max(rx_copybreak, rx_header_size);
1975 rxq->skb = netdev_alloc_skb_ip_align(dev, skb_size);
1976 if (unlikely(!rxq->skb)) {
17a96da6
GC
1977 netdev_err(dev,
1978 "Can't allocate skb on queue %d\n",
1979 rxq->id);
562e2f46 1980 dev->stats.rx_dropped++;
17a96da6 1981 rxq->skb_alloc_err++;
562e2f46 1982 continue;
17a96da6 1983 }
562e2f46
YK
1984 copy_size = min(skb_size, rx_bytes);
1985
1986 /* Copy data from buffer to SKB, skip Marvell header */
1987 memcpy(rxq->skb->data, data + MVNETA_MH_SIZE,
1988 copy_size);
1989 skb_put(rxq->skb, copy_size);
1990 rxq->left_size = rx_bytes - copy_size;
1991
1992 mvneta_rx_csum(pp, rx_status, rxq->skb);
1993 if (rxq->left_size == 0) {
1994 int size = copy_size + MVNETA_MH_SIZE;
1995
1996 dma_sync_single_range_for_cpu(dev->dev.parent,
1997 phys_addr, 0,
1998 size,
1999 DMA_FROM_DEVICE);
2000
2001 /* leave the descriptor and buffer untouched */
2002 } else {
2003 /* refill descriptor with new buffer later */
2004 rx_desc->buf_phys_addr = 0;
2005
2006 frag_num = 0;
2007 frag_offset = copy_size + MVNETA_MH_SIZE;
2008 frag_size = min(rxq->left_size,
2009 (int)(PAGE_SIZE - frag_offset));
2010 skb_add_rx_frag(rxq->skb, frag_num, page,
2011 frag_offset, frag_size,
2012 PAGE_SIZE);
cf5cca6e
AT
2013 dma_unmap_page(dev->dev.parent, phys_addr,
2014 PAGE_SIZE, DMA_FROM_DEVICE);
562e2f46
YK
2015 rxq->left_size -= frag_size;
2016 }
2017 } else {
2018 /* Middle or Last descriptor */
2019 if (unlikely(!rxq->skb)) {
2020 pr_debug("no skb for rx_status 0x%x\n",
2021 rx_status);
2022 continue;
2023 }
2024 if (!rxq->left_size) {
2025 /* last descriptor has only FCS */
2026 /* and can be discarded */
2027 dma_sync_single_range_for_cpu(dev->dev.parent,
2028 phys_addr, 0,
2029 ETH_FCS_LEN,
2030 DMA_FROM_DEVICE);
2031 /* leave the descriptor and buffer untouched */
2032 } else {
2033 /* refill descriptor with new buffer later */
2034 rx_desc->buf_phys_addr = 0;
2035
2036 frag_num = skb_shinfo(rxq->skb)->nr_frags;
2037 frag_offset = 0;
2038 frag_size = min(rxq->left_size,
2039 (int)(PAGE_SIZE - frag_offset));
2040 skb_add_rx_frag(rxq->skb, frag_num, page,
2041 frag_offset, frag_size,
2042 PAGE_SIZE);
2043
f4a51879
AT
2044 dma_unmap_page(dev->dev.parent, phys_addr,
2045 PAGE_SIZE, DMA_FROM_DEVICE);
562e2f46
YK
2046
2047 rxq->left_size -= frag_size;
2048 }
2049 } /* Middle or Last descriptor */
dc35a10f 2050
562e2f46
YK
2051 if (!(rx_status & MVNETA_RXD_LAST_DESC))
2052 /* no last descriptor this time */
dc35a10f 2053 continue;
dc35a10f 2054
562e2f46
YK
2055 if (rxq->left_size) {
2056 pr_err("get last desc, but left_size (%d) != 0\n",
2057 rxq->left_size);
2058 dev_kfree_skb_any(rxq->skb);
2059 rxq->left_size = 0;
2060 rxq->skb = NULL;
2061 continue;
dc35a10f 2062 }
dc35a10f 2063 rcvd_pkts++;
562e2f46 2064 rcvd_bytes += rxq->skb->len;
dc35a10f
MW
2065
2066 /* Linux processing */
562e2f46 2067 rxq->skb->protocol = eth_type_trans(rxq->skb, dev);
dc35a10f 2068
d28118e3 2069 napi_gro_receive(napi, rxq->skb);
dc35a10f 2070
562e2f46
YK
2071 /* clean uncomplete skb pointer in queue */
2072 rxq->skb = NULL;
2073 rxq->left_size = 0;
dc35a10f
MW
2074 }
2075
2076 if (rcvd_pkts) {
2077 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2078
2079 u64_stats_update_begin(&stats->syncp);
2080 stats->rx_packets += rcvd_pkts;
2081 stats->rx_bytes += rcvd_bytes;
2082 u64_stats_update_end(&stats->syncp);
2083 }
2084
562e2f46
YK
2085 /* return some buffers to hardware queue, one at a time is too slow */
2086 refill = mvneta_rx_refill_queue(pp, rxq);
2087
dc35a10f 2088 /* Update rxq management counters */
562e2f46 2089 mvneta_rxq_desc_num_update(pp, rxq, rx_proc, refill);
dc35a10f 2090
562e2f46 2091 return rcvd_pkts;
c5aff182
TP
2092}
2093
dc35a10f 2094/* Main rx processing when using hardware buffer management */
7a86f05f
AL
2095static int mvneta_rx_hwbm(struct napi_struct *napi,
2096 struct mvneta_port *pp, int rx_todo,
dc35a10f 2097 struct mvneta_rx_queue *rxq)
c5aff182
TP
2098{
2099 struct net_device *dev = pp->dev;
a84e3289 2100 int rx_done;
dc4277dd 2101 u32 rcvd_pkts = 0;
2102 u32 rcvd_bytes = 0;
c5aff182
TP
2103
2104 /* Get number of received packets */
2105 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
2106
2107 if (rx_todo > rx_done)
2108 rx_todo = rx_done;
2109
2110 rx_done = 0;
c5aff182
TP
2111
2112 /* Fairness NAPI loop */
2113 while (rx_done < rx_todo) {
2114 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
dc35a10f 2115 struct mvneta_bm_pool *bm_pool = NULL;
c5aff182 2116 struct sk_buff *skb;
8ec2cd48 2117 unsigned char *data;
daf158d0 2118 dma_addr_t phys_addr;
dc35a10f 2119 u32 rx_status, frag_size;
c5aff182 2120 int rx_bytes, err;
dc35a10f 2121 u8 pool_id;
c5aff182 2122
c5aff182 2123 rx_done++;
c5aff182 2124 rx_status = rx_desc->status;
f19fadfc 2125 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
f88bee1c 2126 data = (u8 *)(uintptr_t)rx_desc->buf_cookie;
daf158d0 2127 phys_addr = rx_desc->buf_phys_addr;
dc35a10f
MW
2128 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc);
2129 bm_pool = &pp->bm_priv->bm_pools[pool_id];
c5aff182 2130
5428213c 2131 if (!mvneta_rxq_desc_is_first_last(rx_status) ||
f19fadfc 2132 (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
dc35a10f
MW
2133err_drop_frame_ret_pool:
2134 /* Return the buffer to the pool */
2135 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
2136 rx_desc->buf_phys_addr);
2137err_drop_frame:
c5aff182
TP
2138 dev->stats.rx_errors++;
2139 mvneta_rx_error(pp, rx_desc);
8ec2cd48 2140 /* leave the descriptor untouched */
c5aff182
TP
2141 continue;
2142 }
2143
f19fadfc 2144 if (rx_bytes <= rx_copybreak) {
2145 /* better copy a small frame and not unmap the DMA region */
2146 skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
2147 if (unlikely(!skb))
dc35a10f 2148 goto err_drop_frame_ret_pool;
f19fadfc 2149
2150 dma_sync_single_range_for_cpu(dev->dev.parent,
2151 rx_desc->buf_phys_addr,
2152 MVNETA_MH_SIZE + NET_SKB_PAD,
2153 rx_bytes,
2154 DMA_FROM_DEVICE);
59ae1d12
JB
2155 skb_put_data(skb, data + MVNETA_MH_SIZE + NET_SKB_PAD,
2156 rx_bytes);
f19fadfc 2157
2158 skb->protocol = eth_type_trans(skb, dev);
2159 mvneta_rx_csum(pp, rx_status, skb);
7a86f05f 2160 napi_gro_receive(napi, skb);
f19fadfc 2161
2162 rcvd_pkts++;
2163 rcvd_bytes += rx_bytes;
2164
dc35a10f
MW
2165 /* Return the buffer to the pool */
2166 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
2167 rx_desc->buf_phys_addr);
2168
f19fadfc 2169 /* leave the descriptor and buffer untouched */
2170 continue;
2171 }
2172
a84e3289 2173 /* Refill processing */
baa11ebc 2174 err = hwbm_pool_refill(&bm_pool->hwbm_pool, GFP_ATOMIC);
a84e3289
SG
2175 if (err) {
2176 netdev_err(dev, "Linux processing - Can't refill\n");
17a96da6 2177 rxq->refill_err++;
dc35a10f 2178 goto err_drop_frame_ret_pool;
a84e3289
SG
2179 }
2180
baa11ebc 2181 frag_size = bm_pool->hwbm_pool.frag_size;
dc35a10f
MW
2182
2183 skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size);
f19fadfc 2184
26c17a17
MW
2185 /* After refill old buffer has to be unmapped regardless
2186 * the skb is successfully built or not.
2187 */
dc35a10f
MW
2188 dma_unmap_single(&pp->bm_priv->pdev->dev, phys_addr,
2189 bm_pool->buf_size, DMA_FROM_DEVICE);
26c17a17
MW
2190 if (!skb)
2191 goto err_drop_frame;
2192
dc4277dd 2193 rcvd_pkts++;
2194 rcvd_bytes += rx_bytes;
c5aff182
TP
2195
2196 /* Linux processing */
8ec2cd48 2197 skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
c5aff182
TP
2198 skb_put(skb, rx_bytes);
2199
2200 skb->protocol = eth_type_trans(skb, dev);
2201
5428213c 2202 mvneta_rx_csum(pp, rx_status, skb);
c5aff182 2203
7a86f05f 2204 napi_gro_receive(napi, skb);
c5aff182
TP
2205 }
2206
dc4277dd 2207 if (rcvd_pkts) {
74c41b04 2208 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2209
2210 u64_stats_update_begin(&stats->syncp);
2211 stats->rx_packets += rcvd_pkts;
2212 stats->rx_bytes += rcvd_bytes;
2213 u64_stats_update_end(&stats->syncp);
dc4277dd 2214 }
2215
c5aff182 2216 /* Update rxq management counters */
a84e3289 2217 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
c5aff182
TP
2218
2219 return rx_done;
2220}
2221
2adb719d
EG
2222static inline void
2223mvneta_tso_put_hdr(struct sk_buff *skb,
2224 struct mvneta_port *pp, struct mvneta_tx_queue *txq)
2225{
2226 struct mvneta_tx_desc *tx_desc;
2227 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2228
2229 txq->tx_skb[txq->txq_put_index] = NULL;
2230 tx_desc = mvneta_txq_next_desc_get(txq);
2231 tx_desc->data_size = hdr_len;
2232 tx_desc->command = mvneta_skb_tx_csum(pp, skb);
2233 tx_desc->command |= MVNETA_TXD_F_DESC;
2234 tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
2235 txq->txq_put_index * TSO_HEADER_SIZE;
2236 mvneta_txq_inc_put(txq);
2237}
2238
2239static inline int
2240mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
2241 struct sk_buff *skb, char *data, int size,
2242 bool last_tcp, bool is_last)
2243{
2244 struct mvneta_tx_desc *tx_desc;
2245
2246 tx_desc = mvneta_txq_next_desc_get(txq);
2247 tx_desc->data_size = size;
2248 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data,
2249 size, DMA_TO_DEVICE);
2250 if (unlikely(dma_mapping_error(dev->dev.parent,
2251 tx_desc->buf_phys_addr))) {
2252 mvneta_txq_desc_put(txq);
2253 return -ENOMEM;
2254 }
2255
2256 tx_desc->command = 0;
2257 txq->tx_skb[txq->txq_put_index] = NULL;
2258
2259 if (last_tcp) {
2260 /* last descriptor in the TCP packet */
2261 tx_desc->command = MVNETA_TXD_L_DESC;
2262
2263 /* last descriptor in SKB */
2264 if (is_last)
2265 txq->tx_skb[txq->txq_put_index] = skb;
2266 }
2267 mvneta_txq_inc_put(txq);
2268 return 0;
2269}
2270
2271static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
2272 struct mvneta_tx_queue *txq)
2273{
2274 int total_len, data_left;
2275 int desc_count = 0;
2276 struct mvneta_port *pp = netdev_priv(dev);
2277 struct tso_t tso;
2278 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2279 int i;
2280
2281 /* Count needed descriptors */
2282 if ((txq->count + tso_count_descs(skb)) >= txq->size)
2283 return 0;
2284
2285 if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) {
2286 pr_info("*** Is this even possible???!?!?\n");
2287 return 0;
2288 }
2289
2290 /* Initialize the TSO handler, and prepare the first payload */
2291 tso_start(skb, &tso);
2292
2293 total_len = skb->len - hdr_len;
2294 while (total_len > 0) {
2295 char *hdr;
2296
2297 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
2298 total_len -= data_left;
2299 desc_count++;
2300
2301 /* prepare packet headers: MAC + IP + TCP */
2302 hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE;
2303 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
2304
2305 mvneta_tso_put_hdr(skb, pp, txq);
2306
2307 while (data_left > 0) {
2308 int size;
2309 desc_count++;
2310
2311 size = min_t(int, tso.size, data_left);
2312
2313 if (mvneta_tso_put_data(dev, txq, skb,
2314 tso.data, size,
2315 size == data_left,
2316 total_len == 0))
2317 goto err_release;
2318 data_left -= size;
2319
2320 tso_build_data(skb, &tso, size);
2321 }
2322 }
2323
2324 return desc_count;
2325
2326err_release:
2327 /* Release all used data descriptors; header descriptors must not
2328 * be DMA-unmapped.
2329 */
2330 for (i = desc_count - 1; i >= 0; i--) {
2331 struct mvneta_tx_desc *tx_desc = txq->descs + i;
2e3173a3 2332 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
2adb719d
EG
2333 dma_unmap_single(pp->dev->dev.parent,
2334 tx_desc->buf_phys_addr,
2335 tx_desc->data_size,
2336 DMA_TO_DEVICE);
2337 mvneta_txq_desc_put(txq);
2338 }
2339 return 0;
2340}
2341
c5aff182
TP
2342/* Handle tx fragmentation processing */
2343static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
2344 struct mvneta_tx_queue *txq)
2345{
2346 struct mvneta_tx_desc *tx_desc;
3d4ea02f 2347 int i, nr_frags = skb_shinfo(skb)->nr_frags;
c5aff182 2348
3d4ea02f 2349 for (i = 0; i < nr_frags; i++) {
c5aff182
TP
2350 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2351 void *addr = page_address(frag->page.p) + frag->page_offset;
2352
2353 tx_desc = mvneta_txq_next_desc_get(txq);
2354 tx_desc->data_size = frag->size;
2355
2356 tx_desc->buf_phys_addr =
2357 dma_map_single(pp->dev->dev.parent, addr,
2358 tx_desc->data_size, DMA_TO_DEVICE);
2359
2360 if (dma_mapping_error(pp->dev->dev.parent,
2361 tx_desc->buf_phys_addr)) {
2362 mvneta_txq_desc_put(txq);
2363 goto error;
2364 }
2365
3d4ea02f 2366 if (i == nr_frags - 1) {
c5aff182
TP
2367 /* Last descriptor */
2368 tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
c5aff182 2369 txq->tx_skb[txq->txq_put_index] = skb;
c5aff182
TP
2370 } else {
2371 /* Descriptor in the middle: Not First, Not Last */
2372 tx_desc->command = 0;
c5aff182 2373 txq->tx_skb[txq->txq_put_index] = NULL;
c5aff182 2374 }
3d4ea02f 2375 mvneta_txq_inc_put(txq);
c5aff182
TP
2376 }
2377
2378 return 0;
2379
2380error:
2381 /* Release all descriptors that were used to map fragments of
6a20c175
TP
2382 * this packet, as well as the corresponding DMA mappings
2383 */
c5aff182
TP
2384 for (i = i - 1; i >= 0; i--) {
2385 tx_desc = txq->descs + i;
2386 dma_unmap_single(pp->dev->dev.parent,
2387 tx_desc->buf_phys_addr,
2388 tx_desc->data_size,
2389 DMA_TO_DEVICE);
2390 mvneta_txq_desc_put(txq);
2391 }
2392
2393 return -ENOMEM;
2394}
2395
2396/* Main tx processing */
f03508ce 2397static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
c5aff182
TP
2398{
2399 struct mvneta_port *pp = netdev_priv(dev);
ee40a116
WT
2400 u16 txq_id = skb_get_queue_mapping(skb);
2401 struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
c5aff182 2402 struct mvneta_tx_desc *tx_desc;
5f478b41 2403 int len = skb->len;
c5aff182
TP
2404 int frags = 0;
2405 u32 tx_cmd;
2406
2407 if (!netif_running(dev))
2408 goto out;
2409
2adb719d
EG
2410 if (skb_is_gso(skb)) {
2411 frags = mvneta_tx_tso(skb, dev, txq);
2412 goto out;
2413 }
2414
c5aff182 2415 frags = skb_shinfo(skb)->nr_frags + 1;
c5aff182
TP
2416
2417 /* Get a descriptor for the first part of the packet */
2418 tx_desc = mvneta_txq_next_desc_get(txq);
2419
2420 tx_cmd = mvneta_skb_tx_csum(pp, skb);
2421
2422 tx_desc->data_size = skb_headlen(skb);
2423
2424 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
2425 tx_desc->data_size,
2426 DMA_TO_DEVICE);
2427 if (unlikely(dma_mapping_error(dev->dev.parent,
2428 tx_desc->buf_phys_addr))) {
2429 mvneta_txq_desc_put(txq);
2430 frags = 0;
2431 goto out;
2432 }
2433
2434 if (frags == 1) {
2435 /* First and Last descriptor */
2436 tx_cmd |= MVNETA_TXD_FLZ_DESC;
2437 tx_desc->command = tx_cmd;
2438 txq->tx_skb[txq->txq_put_index] = skb;
2439 mvneta_txq_inc_put(txq);
2440 } else {
2441 /* First but not Last */
2442 tx_cmd |= MVNETA_TXD_F_DESC;
2443 txq->tx_skb[txq->txq_put_index] = NULL;
2444 mvneta_txq_inc_put(txq);
2445 tx_desc->command = tx_cmd;
2446 /* Continue with other skb fragments */
2447 if (mvneta_tx_frag_process(pp, skb, txq)) {
2448 dma_unmap_single(dev->dev.parent,
2449 tx_desc->buf_phys_addr,
2450 tx_desc->data_size,
2451 DMA_TO_DEVICE);
2452 mvneta_txq_desc_put(txq);
2453 frags = 0;
2454 goto out;
2455 }
2456 }
2457
c5aff182
TP
2458out:
2459 if (frags > 0) {
74c41b04 2460 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
e19d2dda
EG
2461 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
2462
a29b6235
MW
2463 netdev_tx_sent_queue(nq, len);
2464
e19d2dda 2465 txq->count += frags;
8eef5f97 2466 if (txq->count >= txq->tx_stop_threshold)
e19d2dda 2467 netif_tx_stop_queue(nq);
c5aff182 2468
2a90f7e1
SG
2469 if (!skb->xmit_more || netif_xmit_stopped(nq) ||
2470 txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK)
2471 mvneta_txq_pend_desc_add(pp, txq, frags);
2472 else
2473 txq->pending += frags;
2474
74c41b04 2475 u64_stats_update_begin(&stats->syncp);
2476 stats->tx_packets++;
5f478b41 2477 stats->tx_bytes += len;
74c41b04 2478 u64_stats_update_end(&stats->syncp);
c5aff182
TP
2479 } else {
2480 dev->stats.tx_dropped++;
2481 dev_kfree_skb_any(skb);
2482 }
2483
c5aff182
TP
2484 return NETDEV_TX_OK;
2485}
2486
2487
2488/* Free tx resources, when resetting a port */
2489static void mvneta_txq_done_force(struct mvneta_port *pp,
2490 struct mvneta_tx_queue *txq)
2491
2492{
a29b6235 2493 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
c5aff182
TP
2494 int tx_done = txq->count;
2495
a29b6235 2496 mvneta_txq_bufs_free(pp, txq, tx_done, nq);
c5aff182
TP
2497
2498 /* reset txq */
2499 txq->count = 0;
2500 txq->txq_put_index = 0;
2501 txq->txq_get_index = 0;
2502}
2503
6c498974 2504/* Handle tx done - called in softirq context. The <cause_tx_done> argument
2505 * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL.
2506 */
0713a86a 2507static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done)
c5aff182
TP
2508{
2509 struct mvneta_tx_queue *txq;
c5aff182 2510 struct netdev_queue *nq;
bd9f1ee3 2511 int cpu = smp_processor_id();
c5aff182 2512
6c498974 2513 while (cause_tx_done) {
c5aff182 2514 txq = mvneta_tx_done_policy(pp, cause_tx_done);
c5aff182
TP
2515
2516 nq = netdev_get_tx_queue(pp->dev, txq->id);
bd9f1ee3 2517 __netif_tx_lock(nq, cpu);
c5aff182 2518
0713a86a
AE
2519 if (txq->count)
2520 mvneta_txq_done(pp, txq);
c5aff182
TP
2521
2522 __netif_tx_unlock(nq);
2523 cause_tx_done &= ~((1 << txq->id));
2524 }
c5aff182
TP
2525}
2526
6a20c175 2527/* Compute crc8 of the specified address, using a unique algorithm ,
c5aff182
TP
2528 * according to hw spec, different than generic crc8 algorithm
2529 */
2530static int mvneta_addr_crc(unsigned char *addr)
2531{
2532 int crc = 0;
2533 int i;
2534
2535 for (i = 0; i < ETH_ALEN; i++) {
2536 int j;
2537
2538 crc = (crc ^ addr[i]) << 8;
2539 for (j = 7; j >= 0; j--) {
2540 if (crc & (0x100 << j))
2541 crc ^= 0x107 << j;
2542 }
2543 }
2544
2545 return crc;
2546}
2547
2548/* This method controls the net device special MAC multicast support.
2549 * The Special Multicast Table for MAC addresses supports MAC of the form
2550 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
2551 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
2552 * Table entries in the DA-Filter table. This method set the Special
2553 * Multicast Table appropriate entry.
2554 */
2555static void mvneta_set_special_mcast_addr(struct mvneta_port *pp,
2556 unsigned char last_byte,
2557 int queue)
2558{
2559 unsigned int smc_table_reg;
2560 unsigned int tbl_offset;
2561 unsigned int reg_offset;
2562
2563 /* Register offset from SMC table base */
2564 tbl_offset = (last_byte / 4);
2565 /* Entry offset within the above reg */
2566 reg_offset = last_byte % 4;
2567
2568 smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST
2569 + tbl_offset * 4));
2570
2571 if (queue == -1)
2572 smc_table_reg &= ~(0xff << (8 * reg_offset));
2573 else {
2574 smc_table_reg &= ~(0xff << (8 * reg_offset));
2575 smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
2576 }
2577
2578 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4,
2579 smc_table_reg);
2580}
2581
2582/* This method controls the network device Other MAC multicast support.
2583 * The Other Multicast Table is used for multicast of another type.
2584 * A CRC-8 is used as an index to the Other Multicast Table entries
2585 * in the DA-Filter table.
2586 * The method gets the CRC-8 value from the calling routine and
2587 * sets the Other Multicast Table appropriate entry according to the
2588 * specified CRC-8 .
2589 */
2590static void mvneta_set_other_mcast_addr(struct mvneta_port *pp,
2591 unsigned char crc8,
2592 int queue)
2593{
2594 unsigned int omc_table_reg;
2595 unsigned int tbl_offset;
2596 unsigned int reg_offset;
2597
2598 tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */
2599 reg_offset = crc8 % 4; /* Entry offset within the above reg */
2600
2601 omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset);
2602
2603 if (queue == -1) {
2604 /* Clear accepts frame bit at specified Other DA table entry */
2605 omc_table_reg &= ~(0xff << (8 * reg_offset));
2606 } else {
2607 omc_table_reg &= ~(0xff << (8 * reg_offset));
2608 omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
2609 }
2610
2611 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg);
2612}
2613
2614/* The network device supports multicast using two tables:
2615 * 1) Special Multicast Table for MAC addresses of the form
2616 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
2617 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
2618 * Table entries in the DA-Filter table.
2619 * 2) Other Multicast Table for multicast of another type. A CRC-8 value
2620 * is used as an index to the Other Multicast Table entries in the
2621 * DA-Filter table.
2622 */
2623static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr,
2624 int queue)
2625{
2626 unsigned char crc_result = 0;
2627
2628 if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) {
2629 mvneta_set_special_mcast_addr(pp, p_addr[5], queue);
2630 return 0;
2631 }
2632
2633 crc_result = mvneta_addr_crc(p_addr);
2634 if (queue == -1) {
2635 if (pp->mcast_count[crc_result] == 0) {
2636 netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n",
2637 crc_result);
2638 return -EINVAL;
2639 }
2640
2641 pp->mcast_count[crc_result]--;
2642 if (pp->mcast_count[crc_result] != 0) {
2643 netdev_info(pp->dev,
2644 "After delete there are %d valid Mcast for crc8=0x%02x\n",
2645 pp->mcast_count[crc_result], crc_result);
2646 return -EINVAL;
2647 }
2648 } else
2649 pp->mcast_count[crc_result]++;
2650
2651 mvneta_set_other_mcast_addr(pp, crc_result, queue);
2652
2653 return 0;
2654}
2655
2656/* Configure Fitering mode of Ethernet port */
2657static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp,
2658 int is_promisc)
2659{
2660 u32 port_cfg_reg, val;
2661
2662 port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG);
2663
2664 val = mvreg_read(pp, MVNETA_TYPE_PRIO);
2665
2666 /* Set / Clear UPM bit in port configuration register */
2667 if (is_promisc) {
2668 /* Accept all Unicast addresses */
2669 port_cfg_reg |= MVNETA_UNI_PROMISC_MODE;
2670 val |= MVNETA_FORCE_UNI;
2671 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff);
2672 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff);
2673 } else {
2674 /* Reject all Unicast addresses */
2675 port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE;
2676 val &= ~MVNETA_FORCE_UNI;
2677 }
2678
2679 mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg);
2680 mvreg_write(pp, MVNETA_TYPE_PRIO, val);
2681}
2682
2683/* register unicast and multicast addresses */
2684static void mvneta_set_rx_mode(struct net_device *dev)
2685{
2686 struct mvneta_port *pp = netdev_priv(dev);
2687 struct netdev_hw_addr *ha;
2688
2689 if (dev->flags & IFF_PROMISC) {
2690 /* Accept all: Multicast + Unicast */
2691 mvneta_rx_unicast_promisc_set(pp, 1);
90b74c01
GC
2692 mvneta_set_ucast_table(pp, pp->rxq_def);
2693 mvneta_set_special_mcast_table(pp, pp->rxq_def);
2694 mvneta_set_other_mcast_table(pp, pp->rxq_def);
c5aff182
TP
2695 } else {
2696 /* Accept single Unicast */
2697 mvneta_rx_unicast_promisc_set(pp, 0);
2698 mvneta_set_ucast_table(pp, -1);
90b74c01 2699 mvneta_mac_addr_set(pp, dev->dev_addr, pp->rxq_def);
c5aff182
TP
2700
2701 if (dev->flags & IFF_ALLMULTI) {
2702 /* Accept all multicast */
90b74c01
GC
2703 mvneta_set_special_mcast_table(pp, pp->rxq_def);
2704 mvneta_set_other_mcast_table(pp, pp->rxq_def);
c5aff182
TP
2705 } else {
2706 /* Accept only initialized multicast */
2707 mvneta_set_special_mcast_table(pp, -1);
2708 mvneta_set_other_mcast_table(pp, -1);
2709
2710 if (!netdev_mc_empty(dev)) {
2711 netdev_for_each_mc_addr(ha, dev) {
2712 mvneta_mcast_addr_set(pp, ha->addr,
90b74c01 2713 pp->rxq_def);
c5aff182
TP
2714 }
2715 }
2716 }
2717 }
2718}
2719
2720/* Interrupt handling - the callback for request_irq() */
2721static irqreturn_t mvneta_isr(int irq, void *dev_id)
2636ac3c
MW
2722{
2723 struct mvneta_port *pp = (struct mvneta_port *)dev_id;
2724
2725 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
2726 napi_schedule(&pp->napi);
2727
2728 return IRQ_HANDLED;
2729}
2730
2731/* Interrupt handling - the callback for request_percpu_irq() */
2732static irqreturn_t mvneta_percpu_isr(int irq, void *dev_id)
c5aff182 2733{
12bb03b4 2734 struct mvneta_pcpu_port *port = (struct mvneta_pcpu_port *)dev_id;
c5aff182 2735
12bb03b4 2736 disable_percpu_irq(port->pp->dev->irq);
12bb03b4 2737 napi_schedule(&port->napi);
c5aff182
TP
2738
2739 return IRQ_HANDLED;
2740}
2741
503f9aa9 2742static void mvneta_link_change(struct mvneta_port *pp)
898b2970 2743{
898b2970
SS
2744 u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
2745
503f9aa9 2746 phylink_mac_change(pp->phylink, !!(gmac_stat & MVNETA_GMAC_LINK_UP));
898b2970
SS
2747}
2748
c5aff182
TP
2749/* NAPI handler
2750 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
2751 * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
2752 * Bits 8 -15 of the cause Rx Tx register indicate that are received
2753 * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
2754 * Each CPU has its own causeRxTx register
2755 */
2756static int mvneta_poll(struct napi_struct *napi, int budget)
2757{
2758 int rx_done = 0;
2759 u32 cause_rx_tx;
2dcf75e2 2760 int rx_queue;
c5aff182 2761 struct mvneta_port *pp = netdev_priv(napi->dev);
12bb03b4 2762 struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
c5aff182
TP
2763
2764 if (!netif_running(pp->dev)) {
2636ac3c 2765 napi_complete(napi);
c5aff182
TP
2766 return rx_done;
2767 }
2768
2769 /* Read cause register */
898b2970
SS
2770 cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE);
2771 if (cause_rx_tx & MVNETA_MISCINTR_INTR_MASK) {
2772 u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE);
2773
2774 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
503f9aa9
RK
2775
2776 if (cause_misc & (MVNETA_CAUSE_PHY_STATUS_CHANGE |
856b2cc5 2777 MVNETA_CAUSE_LINK_CHANGE))
503f9aa9 2778 mvneta_link_change(pp);
898b2970 2779 }
71f6d1b3 2780
2781 /* Release Tx descriptors */
2782 if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) {
0713a86a 2783 mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL));
71f6d1b3 2784 cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL;
2785 }
c5aff182 2786
6a20c175 2787 /* For the case where the last mvneta_poll did not process all
c5aff182
TP
2788 * RX packets
2789 */
2dcf75e2
GC
2790 rx_queue = fls(((cause_rx_tx >> 8) & 0xff));
2791
2636ac3c
MW
2792 cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx :
2793 port->cause_rx_tx;
2dcf75e2
GC
2794
2795 if (rx_queue) {
2796 rx_queue = rx_queue - 1;
dc35a10f 2797 if (pp->bm_priv)
7a86f05f
AL
2798 rx_done = mvneta_rx_hwbm(napi, pp, budget,
2799 &pp->rxqs[rx_queue]);
dc35a10f 2800 else
7a86f05f
AL
2801 rx_done = mvneta_rx_swbm(napi, pp, budget,
2802 &pp->rxqs[rx_queue]);
2dcf75e2
GC
2803 }
2804
6ad20165 2805 if (rx_done < budget) {
c5aff182 2806 cause_rx_tx = 0;
6ad20165 2807 napi_complete_done(napi, rx_done);
2636ac3c
MW
2808
2809 if (pp->neta_armada3700) {
2810 unsigned long flags;
2811
2812 local_irq_save(flags);
2813 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
2814 MVNETA_RX_INTR_MASK(rxq_number) |
2815 MVNETA_TX_INTR_MASK(txq_number) |
2816 MVNETA_MISCINTR_INTR_MASK);
2817 local_irq_restore(flags);
2818 } else {
2819 enable_percpu_irq(pp->dev->irq, 0);
2820 }
c5aff182
TP
2821 }
2822
2636ac3c
MW
2823 if (pp->neta_armada3700)
2824 pp->cause_rx_tx = cause_rx_tx;
2825 else
2826 port->cause_rx_tx = cause_rx_tx;
2827
c5aff182
TP
2828 return rx_done;
2829}
2830
c5aff182
TP
2831/* Handle rxq fill: allocates rxq skbs; called when initializing a port */
2832static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2833 int num)
2834{
c5aff182
TP
2835 int i;
2836
2837 for (i = 0; i < num; i++) {
a1a65ab1 2838 memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
7e47fd84
GC
2839 if (mvneta_rx_refill(pp, rxq->descs + i, rxq,
2840 GFP_KERNEL) != 0) {
2841 netdev_err(pp->dev,
2842 "%s:rxq %d, %d of %d buffs filled\n",
2843 __func__, rxq->id, i, num);
c5aff182
TP
2844 break;
2845 }
c5aff182
TP
2846 }
2847
2848 /* Add this number of RX descriptors as non occupied (ready to
6a20c175
TP
2849 * get packets)
2850 */
c5aff182
TP
2851 mvneta_rxq_non_occup_desc_add(pp, rxq, i);
2852
2853 return i;
2854}
2855
2856/* Free all packets pending transmit from all TXQs and reset TX port */
2857static void mvneta_tx_reset(struct mvneta_port *pp)
2858{
2859 int queue;
2860
9672850b 2861 /* free the skb's in the tx ring */
c5aff182
TP
2862 for (queue = 0; queue < txq_number; queue++)
2863 mvneta_txq_done_force(pp, &pp->txqs[queue]);
2864
2865 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
2866 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
2867}
2868
2869static void mvneta_rx_reset(struct mvneta_port *pp)
2870{
2871 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
2872 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
2873}
2874
2875/* Rx/Tx queue initialization/cleanup methods */
2876
4a188a63
JZ
2877static int mvneta_rxq_sw_init(struct mvneta_port *pp,
2878 struct mvneta_rx_queue *rxq)
c5aff182
TP
2879{
2880 rxq->size = pp->rx_ring_size;
2881
2882 /* Allocate memory for RX descriptors */
2883 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2884 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2885 &rxq->descs_phys, GFP_KERNEL);
f95936cc 2886 if (!rxq->descs)
c5aff182 2887 return -ENOMEM;
c5aff182 2888
c5aff182
TP
2889 rxq->last_desc = rxq->size - 1;
2890
4a188a63
JZ
2891 return 0;
2892}
2893
2894static void mvneta_rxq_hw_init(struct mvneta_port *pp,
2895 struct mvneta_rx_queue *rxq)
2896{
c5aff182
TP
2897 /* Set Rx descriptors queue starting address */
2898 mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
2899 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
2900
c5aff182
TP
2901 /* Set coalescing pkts and time */
2902 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
2903 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
2904
dc35a10f 2905 if (!pp->bm_priv) {
562e2f46
YK
2906 /* Set Offset */
2907 mvneta_rxq_offset_set(pp, rxq, 0);
2908 mvneta_rxq_buf_size_set(pp, rxq, pp->frag_size);
dc35a10f 2909 mvneta_rxq_bm_disable(pp, rxq);
e9f64999 2910 mvneta_rxq_fill(pp, rxq, rxq->size);
dc35a10f 2911 } else {
562e2f46
YK
2912 /* Set Offset */
2913 mvneta_rxq_offset_set(pp, rxq,
2914 NET_SKB_PAD - pp->rx_offset_correction);
2915
dc35a10f 2916 mvneta_rxq_bm_enable(pp, rxq);
562e2f46 2917 /* Fill RXQ with buffers from RX pool */
dc35a10f
MW
2918 mvneta_rxq_long_pool_set(pp, rxq);
2919 mvneta_rxq_short_pool_set(pp, rxq);
e9f64999 2920 mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size);
dc35a10f 2921 }
4a188a63
JZ
2922}
2923
2924/* Create a specified RX queue */
2925static int mvneta_rxq_init(struct mvneta_port *pp,
2926 struct mvneta_rx_queue *rxq)
2927
2928{
2929 int ret;
2930
2931 ret = mvneta_rxq_sw_init(pp, rxq);
2932 if (ret < 0)
2933 return ret;
2934
2935 mvneta_rxq_hw_init(pp, rxq);
dc35a10f 2936
c5aff182
TP
2937 return 0;
2938}
2939
2940/* Cleanup Rx queue */
2941static void mvneta_rxq_deinit(struct mvneta_port *pp,
2942 struct mvneta_rx_queue *rxq)
2943{
2944 mvneta_rxq_drop_pkts(pp, rxq);
2945
562e2f46
YK
2946 if (rxq->skb)
2947 dev_kfree_skb_any(rxq->skb);
2948
c5aff182
TP
2949 if (rxq->descs)
2950 dma_free_coherent(pp->dev->dev.parent,
2951 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2952 rxq->descs,
2953 rxq->descs_phys);
2954
2955 rxq->descs = NULL;
2956 rxq->last_desc = 0;
2957 rxq->next_desc_to_proc = 0;
2958 rxq->descs_phys = 0;
562e2f46
YK
2959 rxq->first_to_refill = 0;
2960 rxq->refill_num = 0;
2961 rxq->skb = NULL;
2962 rxq->left_size = 0;
c5aff182
TP
2963}
2964
4a188a63
JZ
2965static int mvneta_txq_sw_init(struct mvneta_port *pp,
2966 struct mvneta_tx_queue *txq)
c5aff182 2967{
50bf8cb6
GC
2968 int cpu;
2969
c5aff182
TP
2970 txq->size = pp->tx_ring_size;
2971
8eef5f97
EG
2972 /* A queue must always have room for at least one skb.
2973 * Therefore, stop the queue when the free entries reaches
2974 * the maximum number of descriptors per skb.
2975 */
2976 txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS;
2977 txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
2978
c5aff182
TP
2979 /* Allocate memory for TX descriptors */
2980 txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2981 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2982 &txq->descs_phys, GFP_KERNEL);
f95936cc 2983 if (!txq->descs)
c5aff182 2984 return -ENOMEM;
c5aff182 2985
c5aff182
TP
2986 txq->last_desc = txq->size - 1;
2987
d441b688
ME
2988 txq->tx_skb = kmalloc_array(txq->size, sizeof(*txq->tx_skb),
2989 GFP_KERNEL);
f95936cc 2990 if (!txq->tx_skb) {
c5aff182
TP
2991 dma_free_coherent(pp->dev->dev.parent,
2992 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2993 txq->descs, txq->descs_phys);
2994 return -ENOMEM;
2995 }
2adb719d
EG
2996
2997 /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
2998 txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent,
2999 txq->size * TSO_HEADER_SIZE,
3000 &txq->tso_hdrs_phys, GFP_KERNEL);
f95936cc 3001 if (!txq->tso_hdrs) {
2adb719d
EG
3002 kfree(txq->tx_skb);
3003 dma_free_coherent(pp->dev->dev.parent,
3004 txq->size * MVNETA_DESC_ALIGNED_SIZE,
3005 txq->descs, txq->descs_phys);
3006 return -ENOMEM;
3007 }
c5aff182 3008
50bf8cb6
GC
3009 /* Setup XPS mapping */
3010 if (txq_number > 1)
3011 cpu = txq->id % num_present_cpus();
3012 else
3013 cpu = pp->rxq_def % num_present_cpus();
3014 cpumask_set_cpu(cpu, &txq->affinity_mask);
3015 netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id);
3016
c5aff182
TP
3017 return 0;
3018}
3019
4a188a63
JZ
3020static void mvneta_txq_hw_init(struct mvneta_port *pp,
3021 struct mvneta_tx_queue *txq)
3022{
3023 /* Set maximum bandwidth for enabled TXQs */
3024 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
3025 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
3026
3027 /* Set Tx descriptors queue starting address */
3028 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
3029 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
3030
3031 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
3032}
3033
3034/* Create and initialize a tx queue */
3035static int mvneta_txq_init(struct mvneta_port *pp,
3036 struct mvneta_tx_queue *txq)
3037{
3038 int ret;
3039
3040 ret = mvneta_txq_sw_init(pp, txq);
3041 if (ret < 0)
3042 return ret;
3043
3044 mvneta_txq_hw_init(pp, txq);
3045
3046 return 0;
3047}
3048
c5aff182 3049/* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
4a188a63
JZ
3050static void mvneta_txq_sw_deinit(struct mvneta_port *pp,
3051 struct mvneta_tx_queue *txq)
c5aff182 3052{
a29b6235
MW
3053 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
3054
c5aff182
TP
3055 kfree(txq->tx_skb);
3056
2adb719d
EG
3057 if (txq->tso_hdrs)
3058 dma_free_coherent(pp->dev->dev.parent,
3059 txq->size * TSO_HEADER_SIZE,
3060 txq->tso_hdrs, txq->tso_hdrs_phys);
c5aff182
TP
3061 if (txq->descs)
3062 dma_free_coherent(pp->dev->dev.parent,
3063 txq->size * MVNETA_DESC_ALIGNED_SIZE,
3064 txq->descs, txq->descs_phys);
3065
a29b6235
MW
3066 netdev_tx_reset_queue(nq);
3067
c5aff182
TP
3068 txq->descs = NULL;
3069 txq->last_desc = 0;
3070 txq->next_desc_to_proc = 0;
3071 txq->descs_phys = 0;
4a188a63 3072}
c5aff182 3073
4a188a63
JZ
3074static void mvneta_txq_hw_deinit(struct mvneta_port *pp,
3075 struct mvneta_tx_queue *txq)
3076{
c5aff182
TP
3077 /* Set minimum bandwidth for disabled TXQs */
3078 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
3079 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
3080
3081 /* Set Tx descriptors queue starting address and size */
3082 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
3083 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
3084}
3085
4a188a63
JZ
3086static void mvneta_txq_deinit(struct mvneta_port *pp,
3087 struct mvneta_tx_queue *txq)
3088{
3089 mvneta_txq_sw_deinit(pp, txq);
3090 mvneta_txq_hw_deinit(pp, txq);
3091}
3092
c5aff182
TP
3093/* Cleanup all Tx queues */
3094static void mvneta_cleanup_txqs(struct mvneta_port *pp)
3095{
3096 int queue;
3097
3098 for (queue = 0; queue < txq_number; queue++)
3099 mvneta_txq_deinit(pp, &pp->txqs[queue]);
3100}
3101
3102/* Cleanup all Rx queues */
3103static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
3104{
2dcf75e2
GC
3105 int queue;
3106
ca5902a6 3107 for (queue = 0; queue < rxq_number; queue++)
2dcf75e2 3108 mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
c5aff182
TP
3109}
3110
3111
3112/* Init all Rx queues */
3113static int mvneta_setup_rxqs(struct mvneta_port *pp)
3114{
2dcf75e2
GC
3115 int queue;
3116
3117 for (queue = 0; queue < rxq_number; queue++) {
3118 int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
3119
3120 if (err) {
3121 netdev_err(pp->dev, "%s: can't create rxq=%d\n",
3122 __func__, queue);
3123 mvneta_cleanup_rxqs(pp);
3124 return err;
3125 }
c5aff182
TP
3126 }
3127
3128 return 0;
3129}
3130
3131/* Init all tx queues */
3132static int mvneta_setup_txqs(struct mvneta_port *pp)
3133{
3134 int queue;
3135
3136 for (queue = 0; queue < txq_number; queue++) {
3137 int err = mvneta_txq_init(pp, &pp->txqs[queue]);
3138 if (err) {
3139 netdev_err(pp->dev, "%s: can't create txq=%d\n",
3140 __func__, queue);
3141 mvneta_cleanup_txqs(pp);
3142 return err;
3143 }
3144 }
3145
3146 return 0;
3147}
3148
3149static void mvneta_start_dev(struct mvneta_port *pp)
3150{
6b125d63 3151 int cpu;
12bb03b4 3152
c5aff182
TP
3153 mvneta_max_rx_size_set(pp, pp->pkt_size);
3154 mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
3155
3156 /* start the Rx/Tx activity */
3157 mvneta_port_enable(pp);
3158
2636ac3c
MW
3159 if (!pp->neta_armada3700) {
3160 /* Enable polling on the port */
3161 for_each_online_cpu(cpu) {
3162 struct mvneta_pcpu_port *port =
3163 per_cpu_ptr(pp->ports, cpu);
12bb03b4 3164
2636ac3c
MW
3165 napi_enable(&port->napi);
3166 }
3167 } else {
3168 napi_enable(&pp->napi);
12bb03b4 3169 }
c5aff182 3170
2dcf75e2 3171 /* Unmask interrupts. It has to be done from each CPU */
6b125d63
GC
3172 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
3173
898b2970
SS
3174 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
3175 MVNETA_CAUSE_PHY_STATUS_CHANGE |
856b2cc5 3176 MVNETA_CAUSE_LINK_CHANGE);
c5aff182 3177
503f9aa9 3178 phylink_start(pp->phylink);
c5aff182
TP
3179 netif_tx_start_all_queues(pp->dev);
3180}
3181
3182static void mvneta_stop_dev(struct mvneta_port *pp)
3183{
12bb03b4
MR
3184 unsigned int cpu;
3185
503f9aa9 3186 phylink_stop(pp->phylink);
c5aff182 3187
2636ac3c
MW
3188 if (!pp->neta_armada3700) {
3189 for_each_online_cpu(cpu) {
3190 struct mvneta_pcpu_port *port =
3191 per_cpu_ptr(pp->ports, cpu);
12bb03b4 3192
2636ac3c
MW
3193 napi_disable(&port->napi);
3194 }
3195 } else {
3196 napi_disable(&pp->napi);
12bb03b4 3197 }
c5aff182
TP
3198
3199 netif_carrier_off(pp->dev);
3200
3201 mvneta_port_down(pp);
3202 netif_tx_stop_all_queues(pp->dev);
3203
3204 /* Stop the port activity */
3205 mvneta_port_disable(pp);
3206
3207 /* Clear all ethernet port interrupts */
db488c10 3208 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
c5aff182
TP
3209
3210 /* Mask all ethernet port interrupts */
db488c10 3211 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
c5aff182
TP
3212
3213 mvneta_tx_reset(pp);
3214 mvneta_rx_reset(pp);
3215}
3216
db5dd0db
MW
3217static void mvneta_percpu_enable(void *arg)
3218{
3219 struct mvneta_port *pp = arg;
3220
3221 enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
3222}
3223
3224static void mvneta_percpu_disable(void *arg)
3225{
3226 struct mvneta_port *pp = arg;
3227
3228 disable_percpu_irq(pp->dev->irq);
3229}
3230
c5aff182
TP
3231/* Change the device mtu */
3232static int mvneta_change_mtu(struct net_device *dev, int mtu)
3233{
3234 struct mvneta_port *pp = netdev_priv(dev);
3235 int ret;
3236
5777987e
JW
3237 if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
3238 netdev_info(dev, "Illegal MTU value %d, rounding to %d\n",
3239 mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8));
3240 mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
3241 }
c5aff182
TP
3242
3243 dev->mtu = mtu;
3244
b65657fc 3245 if (!netif_running(dev)) {
dc35a10f
MW
3246 if (pp->bm_priv)
3247 mvneta_bm_update_mtu(pp, mtu);
3248
b65657fc 3249 netdev_update_features(dev);
c5aff182 3250 return 0;
b65657fc 3251 }
c5aff182 3252
6a20c175 3253 /* The interface is running, so we have to force a
a92dbd96 3254 * reallocation of the queues
c5aff182
TP
3255 */
3256 mvneta_stop_dev(pp);
db5dd0db 3257 on_each_cpu(mvneta_percpu_disable, pp, true);
c5aff182
TP
3258
3259 mvneta_cleanup_txqs(pp);
3260 mvneta_cleanup_rxqs(pp);
3261
dc35a10f
MW
3262 if (pp->bm_priv)
3263 mvneta_bm_update_mtu(pp, mtu);
3264
a92dbd96 3265 pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
c5aff182
TP
3266
3267 ret = mvneta_setup_rxqs(pp);
3268 if (ret) {
a92dbd96 3269 netdev_err(dev, "unable to setup rxqs after MTU change\n");
c5aff182
TP
3270 return ret;
3271 }
3272
a92dbd96
EG
3273 ret = mvneta_setup_txqs(pp);
3274 if (ret) {
3275 netdev_err(dev, "unable to setup txqs after MTU change\n");
3276 return ret;
3277 }
c5aff182 3278
db5dd0db 3279 on_each_cpu(mvneta_percpu_enable, pp, true);
c5aff182 3280 mvneta_start_dev(pp);
c5aff182 3281
b65657fc
SG
3282 netdev_update_features(dev);
3283
c5aff182
TP
3284 return 0;
3285}
3286
b65657fc
SG
3287static netdev_features_t mvneta_fix_features(struct net_device *dev,
3288 netdev_features_t features)
3289{
3290 struct mvneta_port *pp = netdev_priv(dev);
3291
3292 if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) {
3293 features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
3294 netdev_info(dev,
3295 "Disable IP checksum for MTU greater than %dB\n",
3296 pp->tx_csum_limit);
3297 }
3298
3299 return features;
3300}
3301
8cc3e439
TP
3302/* Get mac address */
3303static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
3304{
3305 u32 mac_addr_l, mac_addr_h;
3306
3307 mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW);
3308 mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH);
3309 addr[0] = (mac_addr_h >> 24) & 0xFF;
3310 addr[1] = (mac_addr_h >> 16) & 0xFF;
3311 addr[2] = (mac_addr_h >> 8) & 0xFF;
3312 addr[3] = mac_addr_h & 0xFF;
3313 addr[4] = (mac_addr_l >> 8) & 0xFF;
3314 addr[5] = mac_addr_l & 0xFF;
3315}
3316
c5aff182
TP
3317/* Handle setting mac address */
3318static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
3319{
3320 struct mvneta_port *pp = netdev_priv(dev);
e68de360
EG
3321 struct sockaddr *sockaddr = addr;
3322 int ret;
c5aff182 3323
e68de360
EG
3324 ret = eth_prepare_mac_addr_change(dev, addr);
3325 if (ret < 0)
3326 return ret;
c5aff182
TP
3327 /* Remove previous address table entry */
3328 mvneta_mac_addr_set(pp, dev->dev_addr, -1);
3329
3330 /* Set new addr in hw */
90b74c01 3331 mvneta_mac_addr_set(pp, sockaddr->sa_data, pp->rxq_def);
c5aff182 3332
e68de360 3333 eth_commit_mac_addr_change(dev, addr);
c5aff182
TP
3334 return 0;
3335}
3336
503f9aa9
RK
3337static void mvneta_validate(struct net_device *ndev, unsigned long *supported,
3338 struct phylink_link_state *state)
3339{
3340 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
3341
22f4bf8a 3342 /* We only support QSGMII, SGMII, 802.3z and RGMII modes */
503f9aa9
RK
3343 if (state->interface != PHY_INTERFACE_MODE_NA &&
3344 state->interface != PHY_INTERFACE_MODE_QSGMII &&
3345 state->interface != PHY_INTERFACE_MODE_SGMII &&
22f4bf8a 3346 !phy_interface_mode_is_8023z(state->interface) &&
503f9aa9
RK
3347 !phy_interface_mode_is_rgmii(state->interface)) {
3348 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
3349 return;
3350 }
3351
3352 /* Allow all the expected bits */
3353 phylink_set(mask, Autoneg);
3354 phylink_set_port_modes(mask);
3355
4932a918
RK
3356 /* Asymmetric pause is unsupported */
3357 phylink_set(mask, Pause);
da58a931 3358
83e65df6
MC
3359 /* Half-duplex at speeds higher than 100Mbit is unsupported */
3360 phylink_set(mask, 1000baseT_Full);
3361 phylink_set(mask, 1000baseX_Full);
22f4bf8a
RK
3362
3363 if (!phy_interface_mode_is_8023z(state->interface)) {
3364 /* 10M and 100M are only supported in non-802.3z mode */
3365 phylink_set(mask, 10baseT_Half);
3366 phylink_set(mask, 10baseT_Full);
3367 phylink_set(mask, 100baseT_Half);
3368 phylink_set(mask, 100baseT_Full);
3369 }
503f9aa9
RK
3370
3371 bitmap_and(supported, supported, mask,
3372 __ETHTOOL_LINK_MODE_MASK_NBITS);
3373 bitmap_and(state->advertising, state->advertising, mask,
3374 __ETHTOOL_LINK_MODE_MASK_NBITS);
3375}
3376
3377static int mvneta_mac_link_state(struct net_device *ndev,
3378 struct phylink_link_state *state)
c5aff182
TP
3379{
3380 struct mvneta_port *pp = netdev_priv(ndev);
503f9aa9 3381 u32 gmac_stat;
c5aff182 3382
503f9aa9 3383 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
c5aff182 3384
503f9aa9
RK
3385 if (gmac_stat & MVNETA_GMAC_SPEED_1000)
3386 state->speed = SPEED_1000;
3387 else if (gmac_stat & MVNETA_GMAC_SPEED_100)
3388 state->speed = SPEED_100;
3389 else
3390 state->speed = SPEED_10;
c5aff182 3391
503f9aa9
RK
3392 state->an_complete = !!(gmac_stat & MVNETA_GMAC_AN_COMPLETE);
3393 state->link = !!(gmac_stat & MVNETA_GMAC_LINK_UP);
3394 state->duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX);
c5aff182 3395
503f9aa9 3396 state->pause = 0;
4932a918
RK
3397 if (gmac_stat & MVNETA_GMAC_RX_FLOW_CTRL_ENABLE)
3398 state->pause |= MLO_PAUSE_RX;
3399 if (gmac_stat & MVNETA_GMAC_TX_FLOW_CTRL_ENABLE)
3400 state->pause |= MLO_PAUSE_TX;
503f9aa9
RK
3401
3402 return 1;
3403}
3404
22f4bf8a
RK
3405static void mvneta_mac_an_restart(struct net_device *ndev)
3406{
3407 struct mvneta_port *pp = netdev_priv(ndev);
3408 u32 gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
3409
3410 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
3411 gmac_an | MVNETA_GMAC_INBAND_RESTART_AN);
3412 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
3413 gmac_an & ~MVNETA_GMAC_INBAND_RESTART_AN);
3414}
3415
503f9aa9
RK
3416static void mvneta_mac_config(struct net_device *ndev, unsigned int mode,
3417 const struct phylink_link_state *state)
3418{
3419 struct mvneta_port *pp = netdev_priv(ndev);
22f4bf8a 3420 u32 new_ctrl0, gmac_ctrl0 = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
503f9aa9 3421 u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
da58a931 3422 u32 new_ctrl4, gmac_ctrl4 = mvreg_read(pp, MVNETA_GMAC_CTRL_4);
503f9aa9
RK
3423 u32 new_clk, gmac_clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
3424 u32 new_an, gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
3425
22f4bf8a 3426 new_ctrl0 = gmac_ctrl0 & ~MVNETA_GMAC0_PORT_1000BASE_X;
32699954
RK
3427 new_ctrl2 = gmac_ctrl2 & ~(MVNETA_GMAC2_INBAND_AN_ENABLE |
3428 MVNETA_GMAC2_PORT_RESET);
da58a931 3429 new_ctrl4 = gmac_ctrl4 & ~(MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE);
503f9aa9
RK
3430 new_clk = gmac_clk & ~MVNETA_GMAC_1MS_CLOCK_ENABLE;
3431 new_an = gmac_an & ~(MVNETA_GMAC_INBAND_AN_ENABLE |
3432 MVNETA_GMAC_INBAND_RESTART_AN |
3433 MVNETA_GMAC_CONFIG_MII_SPEED |
3434 MVNETA_GMAC_CONFIG_GMII_SPEED |
3435 MVNETA_GMAC_AN_SPEED_EN |
22f4bf8a
RK
3436 MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL |
3437 MVNETA_GMAC_CONFIG_FLOW_CTRL |
503f9aa9
RK
3438 MVNETA_GMAC_AN_FLOW_CTRL_EN |
3439 MVNETA_GMAC_CONFIG_FULL_DUPLEX |
3440 MVNETA_GMAC_AN_DUPLEX_EN);
3441
32699954
RK
3442 /* Even though it might look weird, when we're configured in
3443 * SGMII or QSGMII mode, the RGMII bit needs to be set.
3444 */
3445 new_ctrl2 |= MVNETA_GMAC2_PORT_RGMII;
3446
3447 if (state->interface == PHY_INTERFACE_MODE_QSGMII ||
22f4bf8a
RK
3448 state->interface == PHY_INTERFACE_MODE_SGMII ||
3449 phy_interface_mode_is_8023z(state->interface))
32699954
RK
3450 new_ctrl2 |= MVNETA_GMAC2_PCS_ENABLE;
3451
4932a918
RK
3452 if (phylink_test(state->advertising, Pause))
3453 new_an |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL;
3454 if (state->pause & MLO_PAUSE_TXRX_MASK)
3455 new_an |= MVNETA_GMAC_CONFIG_FLOW_CTRL;
3456
503f9aa9
RK
3457 if (!phylink_autoneg_inband(mode)) {
3458 /* Phy or fixed speed */
3459 if (state->duplex)
3460 new_an |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
3461
da58a931 3462 if (state->speed == SPEED_1000 || state->speed == SPEED_2500)
503f9aa9
RK
3463 new_an |= MVNETA_GMAC_CONFIG_GMII_SPEED;
3464 else if (state->speed == SPEED_100)
3465 new_an |= MVNETA_GMAC_CONFIG_MII_SPEED;
22f4bf8a 3466 } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
503f9aa9
RK
3467 /* SGMII mode receives the state from the PHY */
3468 new_ctrl2 |= MVNETA_GMAC2_INBAND_AN_ENABLE;
3469 new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
3470 new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN |
3471 MVNETA_GMAC_FORCE_LINK_PASS)) |
3472 MVNETA_GMAC_INBAND_AN_ENABLE |
3473 MVNETA_GMAC_AN_SPEED_EN |
3474 MVNETA_GMAC_AN_DUPLEX_EN;
22f4bf8a
RK
3475 } else {
3476 /* 802.3z negotiation - only 1000base-X */
3477 new_ctrl0 |= MVNETA_GMAC0_PORT_1000BASE_X;
3478 new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
3479 new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN |
3480 MVNETA_GMAC_FORCE_LINK_PASS)) |
3481 MVNETA_GMAC_INBAND_AN_ENABLE |
3482 MVNETA_GMAC_CONFIG_GMII_SPEED |
3483 /* The MAC only supports FD mode */
3484 MVNETA_GMAC_CONFIG_FULL_DUPLEX;
4932a918
RK
3485
3486 if (state->pause & MLO_PAUSE_AN && state->an_enabled)
3487 new_an |= MVNETA_GMAC_AN_FLOW_CTRL_EN;
503f9aa9 3488 }
c5aff182 3489
503f9aa9
RK
3490 /* Armada 370 documentation says we can only change the port mode
3491 * and in-band enable when the link is down, so force it down
3492 * while making these changes. We also do this for GMAC_CTRL2 */
22f4bf8a
RK
3493 if ((new_ctrl0 ^ gmac_ctrl0) & MVNETA_GMAC0_PORT_1000BASE_X ||
3494 (new_ctrl2 ^ gmac_ctrl2) & MVNETA_GMAC2_INBAND_AN_ENABLE ||
503f9aa9
RK
3495 (new_an ^ gmac_an) & MVNETA_GMAC_INBAND_AN_ENABLE) {
3496 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
3497 (gmac_an & ~MVNETA_GMAC_FORCE_LINK_PASS) |
3498 MVNETA_GMAC_FORCE_LINK_DOWN);
fc548b99 3499 }
503f9aa9 3500
da58a931
MC
3501 /* When at 2.5G, the link partner can send frames with shortened
3502 * preambles.
3503 */
3504 if (state->speed == SPEED_2500)
3505 new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE;
3506
22f4bf8a
RK
3507 if (new_ctrl0 != gmac_ctrl0)
3508 mvreg_write(pp, MVNETA_GMAC_CTRL_0, new_ctrl0);
503f9aa9
RK
3509 if (new_ctrl2 != gmac_ctrl2)
3510 mvreg_write(pp, MVNETA_GMAC_CTRL_2, new_ctrl2);
da58a931
MC
3511 if (new_ctrl4 != gmac_ctrl4)
3512 mvreg_write(pp, MVNETA_GMAC_CTRL_4, new_ctrl4);
503f9aa9
RK
3513 if (new_clk != gmac_clk)
3514 mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, new_clk);
3515 if (new_an != gmac_an)
3516 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, new_an);
32699954
RK
3517
3518 if (gmac_ctrl2 & MVNETA_GMAC2_PORT_RESET) {
3519 while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
3520 MVNETA_GMAC2_PORT_RESET) != 0)
3521 continue;
3522 }
fc548b99 3523}
c5aff182 3524
6d81f451
RK
3525static void mvneta_set_eee(struct mvneta_port *pp, bool enable)
3526{
3527 u32 lpi_ctl1;
3528
3529 lpi_ctl1 = mvreg_read(pp, MVNETA_LPI_CTRL_1);
3530 if (enable)
3531 lpi_ctl1 |= MVNETA_LPI_REQUEST_ENABLE;
3532 else
3533 lpi_ctl1 &= ~MVNETA_LPI_REQUEST_ENABLE;
3534 mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi_ctl1);
3535}
3536
c6ab3008
FF
3537static void mvneta_mac_link_down(struct net_device *ndev, unsigned int mode,
3538 phy_interface_t interface)
fc548b99
RK
3539{
3540 struct mvneta_port *pp = netdev_priv(ndev);
3541 u32 val;
3542
503f9aa9
RK
3543 mvneta_port_down(pp);
3544
3545 if (!phylink_autoneg_inband(mode)) {
fc548b99
RK
3546 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
3547 val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
3548 val |= MVNETA_GMAC_FORCE_LINK_DOWN;
3549 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
3550 }
6d81f451
RK
3551
3552 pp->eee_active = false;
3553 mvneta_set_eee(pp, false);
fc548b99
RK
3554}
3555
503f9aa9 3556static void mvneta_mac_link_up(struct net_device *ndev, unsigned int mode,
c6ab3008 3557 phy_interface_t interface,
503f9aa9 3558 struct phy_device *phy)
fc548b99
RK
3559{
3560 struct mvneta_port *pp = netdev_priv(ndev);
3561 u32 val;
3562
503f9aa9 3563 if (!phylink_autoneg_inband(mode)) {
fc548b99
RK
3564 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
3565 val &= ~MVNETA_GMAC_FORCE_LINK_DOWN;
3566 val |= MVNETA_GMAC_FORCE_LINK_PASS;
3567 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
c5aff182
TP
3568 }
3569
fc548b99 3570 mvneta_port_up(pp);
6d81f451
RK
3571
3572 if (phy && pp->eee_enabled) {
3573 pp->eee_active = phy_init_eee(phy, 0) >= 0;
3574 mvneta_set_eee(pp, pp->eee_active && pp->tx_lpi_enabled);
3575 }
fc548b99
RK
3576}
3577
503f9aa9
RK
3578static const struct phylink_mac_ops mvneta_phylink_ops = {
3579 .validate = mvneta_validate,
3580 .mac_link_state = mvneta_mac_link_state,
22f4bf8a 3581 .mac_an_restart = mvneta_mac_an_restart,
503f9aa9
RK
3582 .mac_config = mvneta_mac_config,
3583 .mac_link_down = mvneta_mac_link_down,
3584 .mac_link_up = mvneta_mac_link_up,
3585};
c5aff182
TP
3586
3587static int mvneta_mdio_probe(struct mvneta_port *pp)
3588{
82960fff 3589 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
503f9aa9 3590 int err = phylink_of_phy_connect(pp->phylink, pp->dn, 0);
c5aff182 3591
503f9aa9
RK
3592 if (err)
3593 netdev_err(pp->dev, "could not attach PHY: %d\n", err);
c5aff182 3594
503f9aa9 3595 phylink_ethtool_get_wol(pp->phylink, &wol);
82960fff
JZ
3596 device_set_wakeup_capable(&pp->dev->dev, !!wol.supported);
3597
503f9aa9 3598 return err;
c5aff182
TP
3599}
3600
3601static void mvneta_mdio_remove(struct mvneta_port *pp)
3602{
503f9aa9 3603 phylink_disconnect_phy(pp->phylink);
c5aff182
TP
3604}
3605
120cfa50
GC
3606/* Electing a CPU must be done in an atomic way: it should be done
3607 * after or before the removal/insertion of a CPU and this function is
3608 * not reentrant.
3609 */
f8642885
MR
3610static void mvneta_percpu_elect(struct mvneta_port *pp)
3611{
cad5d847
GC
3612 int elected_cpu = 0, max_cpu, cpu, i = 0;
3613
3614 /* Use the cpu associated to the rxq when it is online, in all
3615 * the other cases, use the cpu 0 which can't be offline.
3616 */
3617 if (cpu_online(pp->rxq_def))
3618 elected_cpu = pp->rxq_def;
f8642885 3619
2dcf75e2 3620 max_cpu = num_present_cpus();
f8642885
MR
3621
3622 for_each_online_cpu(cpu) {
2dcf75e2
GC
3623 int rxq_map = 0, txq_map = 0;
3624 int rxq;
3625
3626 for (rxq = 0; rxq < rxq_number; rxq++)
3627 if ((rxq % max_cpu) == cpu)
3628 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
3629
cad5d847 3630 if (cpu == elected_cpu)
50bf8cb6
GC
3631 /* Map the default receive queue queue to the
3632 * elected CPU
f8642885 3633 */
2dcf75e2 3634 rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def);
50bf8cb6
GC
3635
3636 /* We update the TX queue map only if we have one
3637 * queue. In this case we associate the TX queue to
3638 * the CPU bound to the default RX queue
3639 */
3640 if (txq_number == 1)
cad5d847 3641 txq_map = (cpu == elected_cpu) ?
50bf8cb6
GC
3642 MVNETA_CPU_TXQ_ACCESS(1) : 0;
3643 else
3644 txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) &
3645 MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
3646
2dcf75e2
GC
3647 mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
3648
3649 /* Update the interrupt mask on each CPU according the
3650 * new mapping
3651 */
3652 smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt,
3653 pp, true);
f8642885 3654 i++;
2dcf75e2 3655
f8642885
MR
3656 }
3657};
3658
84a3f4db 3659static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
f8642885 3660{
84a3f4db
SAS
3661 int other_cpu;
3662 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
3663 node_online);
f8642885
MR
3664 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
3665
f8642885 3666
84a3f4db
SAS
3667 spin_lock(&pp->lock);
3668 /*
3669 * Configuring the driver for a new CPU while the driver is
3670 * stopping is racy, so just avoid it.
3671 */
3672 if (pp->is_stopped) {
3673 spin_unlock(&pp->lock);
3674 return 0;
3675 }
3676 netif_tx_stop_all_queues(pp->dev);
f8642885 3677
84a3f4db
SAS
3678 /*
3679 * We have to synchronise on tha napi of each CPU except the one
3680 * just being woken up
3681 */
3682 for_each_online_cpu(other_cpu) {
3683 if (other_cpu != cpu) {
3684 struct mvneta_pcpu_port *other_port =
3685 per_cpu_ptr(pp->ports, other_cpu);
3686
3687 napi_synchronize(&other_port->napi);
f8642885 3688 }
84a3f4db 3689 }
f8642885 3690
84a3f4db
SAS
3691 /* Mask all ethernet port interrupts */
3692 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3693 napi_enable(&port->napi);
f8642885 3694
84a3f4db
SAS
3695 /*
3696 * Enable per-CPU interrupts on the CPU that is
3697 * brought up.
3698 */
3699 mvneta_percpu_enable(pp);
2dcf75e2 3700
84a3f4db
SAS
3701 /*
3702 * Enable per-CPU interrupt on the one CPU we care
3703 * about.
3704 */
3705 mvneta_percpu_elect(pp);
2dcf75e2 3706
84a3f4db
SAS
3707 /* Unmask all ethernet port interrupts */
3708 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
3709 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
3710 MVNETA_CAUSE_PHY_STATUS_CHANGE |
856b2cc5 3711 MVNETA_CAUSE_LINK_CHANGE);
84a3f4db
SAS
3712 netif_tx_start_all_queues(pp->dev);
3713 spin_unlock(&pp->lock);
3714 return 0;
3715}
f8642885 3716
84a3f4db
SAS
3717static int mvneta_cpu_down_prepare(unsigned int cpu, struct hlist_node *node)
3718{
3719 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
3720 node_online);
3721 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
f8642885 3722
84a3f4db
SAS
3723 /*
3724 * Thanks to this lock we are sure that any pending cpu election is
3725 * done.
3726 */
3727 spin_lock(&pp->lock);
3728 /* Mask all ethernet port interrupts */
3729 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3730 spin_unlock(&pp->lock);
f8642885 3731
84a3f4db
SAS
3732 napi_synchronize(&port->napi);
3733 napi_disable(&port->napi);
3734 /* Disable per-CPU interrupts on the CPU that is brought down. */
3735 mvneta_percpu_disable(pp);
3736 return 0;
3737}
3738
3739static int mvneta_cpu_dead(unsigned int cpu, struct hlist_node *node)
3740{
3741 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
3742 node_dead);
3743
3744 /* Check if a new CPU must be elected now this on is down */
3745 spin_lock(&pp->lock);
3746 mvneta_percpu_elect(pp);
3747 spin_unlock(&pp->lock);
3748 /* Unmask all ethernet port interrupts */
3749 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
3750 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
3751 MVNETA_CAUSE_PHY_STATUS_CHANGE |
856b2cc5 3752 MVNETA_CAUSE_LINK_CHANGE);
84a3f4db
SAS
3753 netif_tx_start_all_queues(pp->dev);
3754 return 0;
f8642885
MR
3755}
3756
c5aff182
TP
3757static int mvneta_open(struct net_device *dev)
3758{
3759 struct mvneta_port *pp = netdev_priv(dev);
6b125d63 3760 int ret;
c5aff182 3761
c5aff182 3762 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
7e47fd84 3763 pp->frag_size = PAGE_SIZE;
c5aff182
TP
3764
3765 ret = mvneta_setup_rxqs(pp);
3766 if (ret)
3767 return ret;
3768
3769 ret = mvneta_setup_txqs(pp);
3770 if (ret)
3771 goto err_cleanup_rxqs;
3772
3773 /* Connect to port interrupt line */
2636ac3c
MW
3774 if (pp->neta_armada3700)
3775 ret = request_irq(pp->dev->irq, mvneta_isr, 0,
3776 dev->name, pp);
3777 else
3778 ret = request_percpu_irq(pp->dev->irq, mvneta_percpu_isr,
3779 dev->name, pp->ports);
c5aff182
TP
3780 if (ret) {
3781 netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
3782 goto err_cleanup_txqs;
3783 }
3784
2636ac3c
MW
3785 if (!pp->neta_armada3700) {
3786 /* Enable per-CPU interrupt on all the CPU to handle our RX
3787 * queue interrupts
3788 */
3789 on_each_cpu(mvneta_percpu_enable, pp, true);
2dcf75e2 3790
2636ac3c
MW
3791 pp->is_stopped = false;
3792 /* Register a CPU notifier to handle the case where our CPU
3793 * might be taken offline.
3794 */
3795 ret = cpuhp_state_add_instance_nocalls(online_hpstate,
3796 &pp->node_online);
3797 if (ret)
3798 goto err_free_irq;
84a3f4db 3799
2636ac3c
MW
3800 ret = cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
3801 &pp->node_dead);
3802 if (ret)
3803 goto err_free_online_hp;
3804 }
f8642885 3805
c5aff182
TP
3806 ret = mvneta_mdio_probe(pp);
3807 if (ret < 0) {
3808 netdev_err(dev, "cannot probe MDIO bus\n");
84a3f4db 3809 goto err_free_dead_hp;
c5aff182
TP
3810 }
3811
3812 mvneta_start_dev(pp);
3813
3814 return 0;
3815
84a3f4db 3816err_free_dead_hp:
2636ac3c
MW
3817 if (!pp->neta_armada3700)
3818 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
3819 &pp->node_dead);
84a3f4db 3820err_free_online_hp:
2636ac3c
MW
3821 if (!pp->neta_armada3700)
3822 cpuhp_state_remove_instance_nocalls(online_hpstate,
3823 &pp->node_online);
c5aff182 3824err_free_irq:
2636ac3c
MW
3825 if (pp->neta_armada3700) {
3826 free_irq(pp->dev->irq, pp);
3827 } else {
3828 on_each_cpu(mvneta_percpu_disable, pp, true);
3829 free_percpu_irq(pp->dev->irq, pp->ports);
3830 }
c5aff182
TP
3831err_cleanup_txqs:
3832 mvneta_cleanup_txqs(pp);
3833err_cleanup_rxqs:
3834 mvneta_cleanup_rxqs(pp);
3835 return ret;
3836}
3837
3838/* Stop the port, free port interrupt line */
3839static int mvneta_stop(struct net_device *dev)
3840{
3841 struct mvneta_port *pp = netdev_priv(dev);
3842
2636ac3c
MW
3843 if (!pp->neta_armada3700) {
3844 /* Inform that we are stopping so we don't want to setup the
3845 * driver for new CPUs in the notifiers. The code of the
3846 * notifier for CPU online is protected by the same spinlock,
3847 * so when we get the lock, the notifer work is done.
3848 */
3849 spin_lock(&pp->lock);
3850 pp->is_stopped = true;
3851 spin_unlock(&pp->lock);
1c2722a9 3852
2636ac3c
MW
3853 mvneta_stop_dev(pp);
3854 mvneta_mdio_remove(pp);
84a3f4db 3855
d26aac2d
DC
3856 cpuhp_state_remove_instance_nocalls(online_hpstate,
3857 &pp->node_online);
3858 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
3859 &pp->node_dead);
2636ac3c
MW
3860 on_each_cpu(mvneta_percpu_disable, pp, true);
3861 free_percpu_irq(dev->irq, pp->ports);
3862 } else {
3863 mvneta_stop_dev(pp);
3864 mvneta_mdio_remove(pp);
3865 free_irq(dev->irq, pp);
3866 }
3867
c5aff182
TP
3868 mvneta_cleanup_rxqs(pp);
3869 mvneta_cleanup_txqs(pp);
c5aff182
TP
3870
3871 return 0;
3872}
3873
15f59456
TP
3874static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3875{
503f9aa9 3876 struct mvneta_port *pp = netdev_priv(dev);
15f59456 3877
503f9aa9 3878 return phylink_mii_ioctl(pp->phylink, ifr, cmd);
15f59456
TP
3879}
3880
c5aff182
TP
3881/* Ethtool methods */
3882
013ad40d 3883/* Set link ksettings (phy address, speed) for ethtools */
2dc0d2b4
BX
3884static int
3885mvneta_ethtool_set_link_ksettings(struct net_device *ndev,
3886 const struct ethtool_link_ksettings *cmd)
c5aff182 3887{
013ad40d 3888 struct mvneta_port *pp = netdev_priv(ndev);
0c0744fc 3889
503f9aa9
RK
3890 return phylink_ethtool_ksettings_set(pp->phylink, cmd);
3891}
0c0744fc 3892
503f9aa9
RK
3893/* Get link ksettings for ethtools */
3894static int
3895mvneta_ethtool_get_link_ksettings(struct net_device *ndev,
3896 struct ethtool_link_ksettings *cmd)
3897{
3898 struct mvneta_port *pp = netdev_priv(ndev);
0c0744fc 3899
503f9aa9
RK
3900 return phylink_ethtool_ksettings_get(pp->phylink, cmd);
3901}
0c0744fc 3902
503f9aa9
RK
3903static int mvneta_ethtool_nway_reset(struct net_device *dev)
3904{
3905 struct mvneta_port *pp = netdev_priv(dev);
0c0744fc 3906
503f9aa9 3907 return phylink_ethtool_nway_reset(pp->phylink);
c5aff182
TP
3908}
3909
3910/* Set interrupt coalescing for ethtools */
3911static int mvneta_ethtool_set_coalesce(struct net_device *dev,
3912 struct ethtool_coalesce *c)
3913{
3914 struct mvneta_port *pp = netdev_priv(dev);
3915 int queue;
3916
3917 for (queue = 0; queue < rxq_number; queue++) {
3918 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
3919 rxq->time_coal = c->rx_coalesce_usecs;
3920 rxq->pkts_coal = c->rx_max_coalesced_frames;
3921 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
3922 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
3923 }
3924
3925 for (queue = 0; queue < txq_number; queue++) {
3926 struct mvneta_tx_queue *txq = &pp->txqs[queue];
3927 txq->done_pkts_coal = c->tx_max_coalesced_frames;
3928 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
3929 }
3930
3931 return 0;
3932}
3933
3934/* get coalescing for ethtools */
3935static int mvneta_ethtool_get_coalesce(struct net_device *dev,
3936 struct ethtool_coalesce *c)
3937{
3938 struct mvneta_port *pp = netdev_priv(dev);
3939
3940 c->rx_coalesce_usecs = pp->rxqs[0].time_coal;
3941 c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal;
3942
3943 c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal;
3944 return 0;
3945}
3946
3947
3948static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
3949 struct ethtool_drvinfo *drvinfo)
3950{
3951 strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME,
3952 sizeof(drvinfo->driver));
3953 strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION,
3954 sizeof(drvinfo->version));
3955 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
3956 sizeof(drvinfo->bus_info));
3957}
3958
3959
3960static void mvneta_ethtool_get_ringparam(struct net_device *netdev,
3961 struct ethtool_ringparam *ring)
3962{
3963 struct mvneta_port *pp = netdev_priv(netdev);
3964
3965 ring->rx_max_pending = MVNETA_MAX_RXD;
3966 ring->tx_max_pending = MVNETA_MAX_TXD;
3967 ring->rx_pending = pp->rx_ring_size;
3968 ring->tx_pending = pp->tx_ring_size;
3969}
3970
3971static int mvneta_ethtool_set_ringparam(struct net_device *dev,
3972 struct ethtool_ringparam *ring)
3973{
3974 struct mvneta_port *pp = netdev_priv(dev);
3975
3976 if ((ring->rx_pending == 0) || (ring->tx_pending == 0))
3977 return -EINVAL;
3978 pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
3979 ring->rx_pending : MVNETA_MAX_RXD;
8eef5f97
EG
3980
3981 pp->tx_ring_size = clamp_t(u16, ring->tx_pending,
3982 MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD);
3983 if (pp->tx_ring_size != ring->tx_pending)
3984 netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
3985 pp->tx_ring_size, ring->tx_pending);
c5aff182
TP
3986
3987 if (netif_running(dev)) {
3988 mvneta_stop(dev);
3989 if (mvneta_open(dev)) {
3990 netdev_err(dev,
3991 "error on opening device after ring param change\n");
3992 return -ENOMEM;
3993 }
3994 }
3995
3996 return 0;
3997}
3998
4932a918
RK
3999static void mvneta_ethtool_get_pauseparam(struct net_device *dev,
4000 struct ethtool_pauseparam *pause)
4001{
4002 struct mvneta_port *pp = netdev_priv(dev);
4003
4004 phylink_ethtool_get_pauseparam(pp->phylink, pause);
4005}
4006
4007static int mvneta_ethtool_set_pauseparam(struct net_device *dev,
4008 struct ethtool_pauseparam *pause)
4009{
4010 struct mvneta_port *pp = netdev_priv(dev);
4011
4012 return phylink_ethtool_set_pauseparam(pp->phylink, pause);
4013}
4014
9b0cdefa
RK
4015static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
4016 u8 *data)
4017{
4018 if (sset == ETH_SS_STATS) {
4019 int i;
4020
4021 for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
4022 memcpy(data + i * ETH_GSTRING_LEN,
4023 mvneta_statistics[i].name, ETH_GSTRING_LEN);
4024 }
4025}
4026
4027static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
4028{
4029 const struct mvneta_statistic *s;
4030 void __iomem *base = pp->base;
6d81f451
RK
4031 u32 high, low;
4032 u64 val;
9b0cdefa
RK
4033 int i;
4034
4035 for (i = 0, s = mvneta_statistics;
4036 s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics);
4037 s++, i++) {
6d81f451
RK
4038 val = 0;
4039
9b0cdefa
RK
4040 switch (s->type) {
4041 case T_REG_32:
4042 val = readl_relaxed(base + s->offset);
4043 break;
4044 case T_REG_64:
4045 /* Docs say to read low 32-bit then high */
4046 low = readl_relaxed(base + s->offset);
4047 high = readl_relaxed(base + s->offset + 4);
6d81f451
RK
4048 val = (u64)high << 32 | low;
4049 break;
4050 case T_SW:
4051 switch (s->offset) {
4052 case ETHTOOL_STAT_EEE_WAKEUP:
4053 val = phylink_get_eee_err(pp->phylink);
4054 break;
17a96da6
GC
4055 case ETHTOOL_STAT_SKB_ALLOC_ERR:
4056 val = pp->rxqs[0].skb_alloc_err;
4057 break;
4058 case ETHTOOL_STAT_REFILL_ERR:
4059 val = pp->rxqs[0].refill_err;
4060 break;
6d81f451 4061 }
9b0cdefa
RK
4062 break;
4063 }
6d81f451
RK
4064
4065 pp->ethtool_stats[i] += val;
9b0cdefa
RK
4066 }
4067}
4068
4069static void mvneta_ethtool_get_stats(struct net_device *dev,
4070 struct ethtool_stats *stats, u64 *data)
4071{
4072 struct mvneta_port *pp = netdev_priv(dev);
4073 int i;
4074
4075 mvneta_ethtool_update_stats(pp);
4076
4077 for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
4078 *data++ = pp->ethtool_stats[i];
4079}
4080
4081static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
4082{
4083 if (sset == ETH_SS_STATS)
4084 return ARRAY_SIZE(mvneta_statistics);
4085 return -EOPNOTSUPP;
4086}
4087
9a401dea
GC
4088static u32 mvneta_ethtool_get_rxfh_indir_size(struct net_device *dev)
4089{
4090 return MVNETA_RSS_LU_TABLE_SIZE;
4091}
4092
4093static int mvneta_ethtool_get_rxnfc(struct net_device *dev,
4094 struct ethtool_rxnfc *info,
4095 u32 *rules __always_unused)
4096{
4097 switch (info->cmd) {
4098 case ETHTOOL_GRXRINGS:
4099 info->data = rxq_number;
4100 return 0;
4101 case ETHTOOL_GRXFH:
4102 return -EOPNOTSUPP;
4103 default:
4104 return -EOPNOTSUPP;
4105 }
4106}
4107
4108static int mvneta_config_rss(struct mvneta_port *pp)
4109{
4110 int cpu;
4111 u32 val;
4112
4113 netif_tx_stop_all_queues(pp->dev);
4114
6b125d63 4115 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
9a401dea 4116
0f5c6c30
JZ
4117 if (!pp->neta_armada3700) {
4118 /* We have to synchronise on the napi of each CPU */
4119 for_each_online_cpu(cpu) {
4120 struct mvneta_pcpu_port *pcpu_port =
4121 per_cpu_ptr(pp->ports, cpu);
9a401dea 4122
0f5c6c30
JZ
4123 napi_synchronize(&pcpu_port->napi);
4124 napi_disable(&pcpu_port->napi);
4125 }
4126 } else {
4127 napi_synchronize(&pp->napi);
4128 napi_disable(&pp->napi);
9a401dea
GC
4129 }
4130
4131 pp->rxq_def = pp->indir[0];
4132
4133 /* Update unicast mapping */
4134 mvneta_set_rx_mode(pp->dev);
4135
4136 /* Update val of portCfg register accordingly with all RxQueue types */
4137 val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
4138 mvreg_write(pp, MVNETA_PORT_CONFIG, val);
4139
4140 /* Update the elected CPU matching the new rxq_def */
120cfa50 4141 spin_lock(&pp->lock);
9a401dea 4142 mvneta_percpu_elect(pp);
120cfa50 4143 spin_unlock(&pp->lock);
9a401dea 4144
0f5c6c30
JZ
4145 if (!pp->neta_armada3700) {
4146 /* We have to synchronise on the napi of each CPU */
4147 for_each_online_cpu(cpu) {
4148 struct mvneta_pcpu_port *pcpu_port =
4149 per_cpu_ptr(pp->ports, cpu);
9a401dea 4150
0f5c6c30
JZ
4151 napi_enable(&pcpu_port->napi);
4152 }
4153 } else {
4154 napi_enable(&pp->napi);
9a401dea
GC
4155 }
4156
4157 netif_tx_start_all_queues(pp->dev);
4158
4159 return 0;
4160}
4161
4162static int mvneta_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
4163 const u8 *key, const u8 hfunc)
4164{
4165 struct mvneta_port *pp = netdev_priv(dev);
2636ac3c
MW
4166
4167 /* Current code for Armada 3700 doesn't support RSS features yet */
4168 if (pp->neta_armada3700)
4169 return -EOPNOTSUPP;
4170
9a401dea
GC
4171 /* We require at least one supported parameter to be changed
4172 * and no change in any of the unsupported parameters
4173 */
4174 if (key ||
4175 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
4176 return -EOPNOTSUPP;
4177
4178 if (!indir)
4179 return 0;
4180
4181 memcpy(pp->indir, indir, MVNETA_RSS_LU_TABLE_SIZE);
4182
4183 return mvneta_config_rss(pp);
4184}
4185
4186static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
4187 u8 *hfunc)
4188{
4189 struct mvneta_port *pp = netdev_priv(dev);
4190
2636ac3c
MW
4191 /* Current code for Armada 3700 doesn't support RSS features yet */
4192 if (pp->neta_armada3700)
4193 return -EOPNOTSUPP;
4194
9a401dea
GC
4195 if (hfunc)
4196 *hfunc = ETH_RSS_HASH_TOP;
4197
4198 if (!indir)
4199 return 0;
4200
4201 memcpy(indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE);
4202
4203 return 0;
4204}
4205
b60a00f9
JH
4206static void mvneta_ethtool_get_wol(struct net_device *dev,
4207 struct ethtool_wolinfo *wol)
4208{
503f9aa9 4209 struct mvneta_port *pp = netdev_priv(dev);
b60a00f9 4210
503f9aa9 4211 phylink_ethtool_get_wol(pp->phylink, wol);
b60a00f9
JH
4212}
4213
4214static int mvneta_ethtool_set_wol(struct net_device *dev,
4215 struct ethtool_wolinfo *wol)
4216{
503f9aa9 4217 struct mvneta_port *pp = netdev_priv(dev);
82960fff
JZ
4218 int ret;
4219
503f9aa9 4220 ret = phylink_ethtool_set_wol(pp->phylink, wol);
82960fff
JZ
4221 if (!ret)
4222 device_set_wakeup_enable(&dev->dev, !!wol->wolopts);
4223
4224 return ret;
b60a00f9
JH
4225}
4226
6d81f451
RK
4227static int mvneta_ethtool_get_eee(struct net_device *dev,
4228 struct ethtool_eee *eee)
4229{
4230 struct mvneta_port *pp = netdev_priv(dev);
4231 u32 lpi_ctl0;
4232
4233 lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
4234
4235 eee->eee_enabled = pp->eee_enabled;
4236 eee->eee_active = pp->eee_active;
4237 eee->tx_lpi_enabled = pp->tx_lpi_enabled;
4238 eee->tx_lpi_timer = (lpi_ctl0) >> 8; // * scale;
4239
4240 return phylink_ethtool_get_eee(pp->phylink, eee);
4241}
4242
4243static int mvneta_ethtool_set_eee(struct net_device *dev,
4244 struct ethtool_eee *eee)
4245{
4246 struct mvneta_port *pp = netdev_priv(dev);
4247 u32 lpi_ctl0;
4248
4249 /* The Armada 37x documents do not give limits for this other than
4250 * it being an 8-bit register. */
4251 if (eee->tx_lpi_enabled &&
4252 (eee->tx_lpi_timer < 0 || eee->tx_lpi_timer > 255))
4253 return -EINVAL;
4254
4255 lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
4256 lpi_ctl0 &= ~(0xff << 8);
4257 lpi_ctl0 |= eee->tx_lpi_timer << 8;
4258 mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi_ctl0);
4259
4260 pp->eee_enabled = eee->eee_enabled;
4261 pp->tx_lpi_enabled = eee->tx_lpi_enabled;
4262
4263 mvneta_set_eee(pp, eee->tx_lpi_enabled && eee->eee_enabled);
4264
4265 return phylink_ethtool_set_eee(pp->phylink, eee);
4266}
4267
c5aff182
TP
4268static const struct net_device_ops mvneta_netdev_ops = {
4269 .ndo_open = mvneta_open,
4270 .ndo_stop = mvneta_stop,
4271 .ndo_start_xmit = mvneta_tx,
4272 .ndo_set_rx_mode = mvneta_set_rx_mode,
4273 .ndo_set_mac_address = mvneta_set_mac_addr,
4274 .ndo_change_mtu = mvneta_change_mtu,
b65657fc 4275 .ndo_fix_features = mvneta_fix_features,
c5aff182 4276 .ndo_get_stats64 = mvneta_get_stats64,
15f59456 4277 .ndo_do_ioctl = mvneta_ioctl,
c5aff182
TP
4278};
4279
4581be42 4280static const struct ethtool_ops mvneta_eth_tool_ops = {
503f9aa9 4281 .nway_reset = mvneta_ethtool_nway_reset,
c5aff182 4282 .get_link = ethtool_op_get_link,
c5aff182
TP
4283 .set_coalesce = mvneta_ethtool_set_coalesce,
4284 .get_coalesce = mvneta_ethtool_get_coalesce,
4285 .get_drvinfo = mvneta_ethtool_get_drvinfo,
4286 .get_ringparam = mvneta_ethtool_get_ringparam,
4287 .set_ringparam = mvneta_ethtool_set_ringparam,
4932a918
RK
4288 .get_pauseparam = mvneta_ethtool_get_pauseparam,
4289 .set_pauseparam = mvneta_ethtool_set_pauseparam,
9b0cdefa
RK
4290 .get_strings = mvneta_ethtool_get_strings,
4291 .get_ethtool_stats = mvneta_ethtool_get_stats,
4292 .get_sset_count = mvneta_ethtool_get_sset_count,
9a401dea
GC
4293 .get_rxfh_indir_size = mvneta_ethtool_get_rxfh_indir_size,
4294 .get_rxnfc = mvneta_ethtool_get_rxnfc,
4295 .get_rxfh = mvneta_ethtool_get_rxfh,
4296 .set_rxfh = mvneta_ethtool_set_rxfh,
503f9aa9 4297 .get_link_ksettings = mvneta_ethtool_get_link_ksettings,
013ad40d 4298 .set_link_ksettings = mvneta_ethtool_set_link_ksettings,
b60a00f9
JH
4299 .get_wol = mvneta_ethtool_get_wol,
4300 .set_wol = mvneta_ethtool_set_wol,
6d81f451
RK
4301 .get_eee = mvneta_ethtool_get_eee,
4302 .set_eee = mvneta_ethtool_set_eee,
c5aff182
TP
4303};
4304
4305/* Initialize hw */
9672850b 4306static int mvneta_init(struct device *dev, struct mvneta_port *pp)
c5aff182
TP
4307{
4308 int queue;
4309
4310 /* Disable port */
4311 mvneta_port_disable(pp);
4312
4313 /* Set port default values */
4314 mvneta_defaults_set(pp);
4315
5d6312ed 4316 pp->txqs = devm_kcalloc(dev, txq_number, sizeof(*pp->txqs), GFP_KERNEL);
c5aff182
TP
4317 if (!pp->txqs)
4318 return -ENOMEM;
4319
4320 /* Initialize TX descriptor rings */
4321 for (queue = 0; queue < txq_number; queue++) {
4322 struct mvneta_tx_queue *txq = &pp->txqs[queue];
4323 txq->id = queue;
4324 txq->size = pp->tx_ring_size;
4325 txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
4326 }
4327
5d6312ed 4328 pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*pp->rxqs), GFP_KERNEL);
9672850b 4329 if (!pp->rxqs)
c5aff182 4330 return -ENOMEM;
c5aff182
TP
4331
4332 /* Create Rx descriptor rings */
4333 for (queue = 0; queue < rxq_number; queue++) {
4334 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
4335 rxq->id = queue;
4336 rxq->size = pp->rx_ring_size;
4337 rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
4338 rxq->time_coal = MVNETA_RX_COAL_USEC;
29110630
ME
4339 rxq->buf_virt_addr
4340 = devm_kmalloc_array(pp->dev->dev.parent,
4341 rxq->size,
4342 sizeof(*rxq->buf_virt_addr),
4343 GFP_KERNEL);
f88bee1c
GC
4344 if (!rxq->buf_virt_addr)
4345 return -ENOMEM;
c5aff182
TP
4346 }
4347
4348 return 0;
4349}
4350
c5aff182 4351/* platform glue : initialize decoding windows */
03ce758e
GK
4352static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
4353 const struct mbus_dram_target_info *dram)
c5aff182
TP
4354{
4355 u32 win_enable;
4356 u32 win_protect;
4357 int i;
4358
4359 for (i = 0; i < 6; i++) {
4360 mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
4361 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
4362
4363 if (i < 4)
4364 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
4365 }
4366
4367 win_enable = 0x3f;
4368 win_protect = 0;
4369
2636ac3c
MW
4370 if (dram) {
4371 for (i = 0; i < dram->num_cs; i++) {
4372 const struct mbus_dram_window *cs = dram->cs + i;
4373
4374 mvreg_write(pp, MVNETA_WIN_BASE(i),
4375 (cs->base & 0xffff0000) |
4376 (cs->mbus_attr << 8) |
4377 dram->mbus_dram_target_id);
c5aff182 4378
2636ac3c
MW
4379 mvreg_write(pp, MVNETA_WIN_SIZE(i),
4380 (cs->size - 1) & 0xffff0000);
c5aff182 4381
2636ac3c
MW
4382 win_enable &= ~(1 << i);
4383 win_protect |= 3 << (2 * i);
4384 }
4385 } else {
4386 /* For Armada3700 open default 4GB Mbus window, leaving
4387 * arbitration of target/attribute to a different layer
4388 * of configuration.
4389 */
4390 mvreg_write(pp, MVNETA_WIN_SIZE(0), 0xffff0000);
4391 win_enable &= ~BIT(0);
4392 win_protect = 3;
c5aff182
TP
4393 }
4394
4395 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
db6ba9a5 4396 mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
c5aff182
TP
4397}
4398
4399/* Power up the port */
3f1dd4bc 4400static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
c5aff182 4401{
c5aff182
TP
4402 /* MAC Cause register should be cleared */
4403 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
4404
32699954 4405 if (phy_mode == PHY_INTERFACE_MODE_QSGMII)
3f1dd4bc 4406 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO);
22f4bf8a
RK
4407 else if (phy_mode == PHY_INTERFACE_MODE_SGMII ||
4408 phy_mode == PHY_INTERFACE_MODE_1000BASEX)
3f1dd4bc 4409 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
32699954 4410 else if (!phy_interface_mode_is_rgmii(phy_mode))
3f1dd4bc 4411 return -EINVAL;
3f1dd4bc
TP
4412
4413 return 0;
c5aff182
TP
4414}
4415
4416/* Device initialization routine */
03ce758e 4417static int mvneta_probe(struct platform_device *pdev)
c5aff182 4418{
c3f0dd38 4419 struct resource *res;
c5aff182 4420 struct device_node *dn = pdev->dev.of_node;
dc35a10f 4421 struct device_node *bm_node;
c5aff182
TP
4422 struct mvneta_port *pp;
4423 struct net_device *dev;
503f9aa9 4424 struct phylink *phylink;
8cc3e439
TP
4425 const char *dt_mac_addr;
4426 char hw_mac_addr[ETH_ALEN];
4427 const char *mac_from;
9110ee07 4428 int tx_csum_limit;
c5aff182
TP
4429 int phy_mode;
4430 int err;
12bb03b4 4431 int cpu;
c5aff182 4432
ee40a116 4433 dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number);
c5aff182
TP
4434 if (!dev)
4435 return -ENOMEM;
4436
4437 dev->irq = irq_of_parse_and_map(dn, 0);
4438 if (dev->irq == 0) {
4439 err = -EINVAL;
4440 goto err_free_netdev;
4441 }
4442
c5aff182
TP
4443 phy_mode = of_get_phy_mode(dn);
4444 if (phy_mode < 0) {
4445 dev_err(&pdev->dev, "incorrect phy-mode\n");
4446 err = -EINVAL;
503f9aa9
RK
4447 goto err_free_irq;
4448 }
4449
4450 phylink = phylink_create(dev, pdev->dev.fwnode, phy_mode,
4451 &mvneta_phylink_ops);
4452 if (IS_ERR(phylink)) {
4453 err = PTR_ERR(phylink);
4454 goto err_free_irq;
c5aff182
TP
4455 }
4456
c5aff182
TP
4457 dev->tx_queue_len = MVNETA_MAX_TXD;
4458 dev->watchdog_timeo = 5 * HZ;
4459 dev->netdev_ops = &mvneta_netdev_ops;
4460
7ad24ea4 4461 dev->ethtool_ops = &mvneta_eth_tool_ops;
c5aff182
TP
4462
4463 pp = netdev_priv(dev);
1c2722a9 4464 spin_lock_init(&pp->lock);
503f9aa9 4465 pp->phylink = phylink;
c5aff182 4466 pp->phy_interface = phy_mode;
503f9aa9 4467 pp->dn = dn;
c5aff182 4468
90b74c01 4469 pp->rxq_def = rxq_def;
9a401dea
GC
4470 pp->indir[0] = rxq_def;
4471
2636ac3c
MW
4472 /* Get special SoC configurations */
4473 if (of_device_is_compatible(dn, "marvell,armada-3700-neta"))
4474 pp->neta_armada3700 = true;
4475
2804ba4e
JZ
4476 pp->clk = devm_clk_get(&pdev->dev, "core");
4477 if (IS_ERR(pp->clk))
4478 pp->clk = devm_clk_get(&pdev->dev, NULL);
189dd626
TP
4479 if (IS_ERR(pp->clk)) {
4480 err = PTR_ERR(pp->clk);
503f9aa9 4481 goto err_free_phylink;
189dd626
TP
4482 }
4483
4484 clk_prepare_enable(pp->clk);
4485
15cc4a4a
JZ
4486 pp->clk_bus = devm_clk_get(&pdev->dev, "bus");
4487 if (!IS_ERR(pp->clk_bus))
4488 clk_prepare_enable(pp->clk_bus);
4489
c3f0dd38
TP
4490 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4491 pp->base = devm_ioremap_resource(&pdev->dev, res);
4492 if (IS_ERR(pp->base)) {
4493 err = PTR_ERR(pp->base);
5445eaf3
AP
4494 goto err_clk;
4495 }
4496
12bb03b4
MR
4497 /* Alloc per-cpu port structure */
4498 pp->ports = alloc_percpu(struct mvneta_pcpu_port);
4499 if (!pp->ports) {
4500 err = -ENOMEM;
4501 goto err_clk;
4502 }
4503
74c41b04 4504 /* Alloc per-cpu stats */
1c213bd2 4505 pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats);
74c41b04 4506 if (!pp->stats) {
4507 err = -ENOMEM;
12bb03b4 4508 goto err_free_ports;
74c41b04 4509 }
4510
8cc3e439 4511 dt_mac_addr = of_get_mac_address(dn);
6c7a9a3c 4512 if (dt_mac_addr) {
8cc3e439
TP
4513 mac_from = "device tree";
4514 memcpy(dev->dev_addr, dt_mac_addr, ETH_ALEN);
4515 } else {
4516 mvneta_get_mac_addr(pp, hw_mac_addr);
4517 if (is_valid_ether_addr(hw_mac_addr)) {
4518 mac_from = "hardware";
4519 memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN);
4520 } else {
4521 mac_from = "random";
4522 eth_hw_addr_random(dev);
4523 }
4524 }
4525
9110ee07
MW
4526 if (!of_property_read_u32(dn, "tx-csum-limit", &tx_csum_limit)) {
4527 if (tx_csum_limit < 0 ||
4528 tx_csum_limit > MVNETA_TX_CSUM_MAX_SIZE) {
4529 tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
4530 dev_info(&pdev->dev,
4531 "Wrong TX csum limit in DT, set to %dB\n",
4532 MVNETA_TX_CSUM_DEF_SIZE);
4533 }
4534 } else if (of_device_is_compatible(dn, "marvell,armada-370-neta")) {
4535 tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
4536 } else {
4537 tx_csum_limit = MVNETA_TX_CSUM_MAX_SIZE;
4538 }
4539
4540 pp->tx_csum_limit = tx_csum_limit;
b65657fc 4541
9768b45c 4542 pp->dram_target_info = mv_mbus_dram_info();
2636ac3c
MW
4543 /* Armada3700 requires setting default configuration of Mbus
4544 * windows, however without using filled mbus_dram_target_info
4545 * structure.
4546 */
9768b45c
JL
4547 if (pp->dram_target_info || pp->neta_armada3700)
4548 mvneta_conf_mbus_windows(pp, pp->dram_target_info);
dc35a10f 4549
c5aff182
TP
4550 pp->tx_ring_size = MVNETA_MAX_TXD;
4551 pp->rx_ring_size = MVNETA_MAX_RXD;
4552
4553 pp->dev = dev;
4554 SET_NETDEV_DEV(dev, &pdev->dev);
4555
dc35a10f 4556 pp->id = global_port_id++;
562e2f46 4557 pp->rx_offset_correction = 0; /* not relevant for SW BM */
dc35a10f
MW
4558
4559 /* Obtain access to BM resources if enabled and already initialized */
4560 bm_node = of_parse_phandle(dn, "buffer-manager", 0);
965cbbec
GC
4561 if (bm_node) {
4562 pp->bm_priv = mvneta_bm_get(bm_node);
4563 if (pp->bm_priv) {
4564 err = mvneta_bm_port_init(pdev, pp);
4565 if (err < 0) {
4566 dev_info(&pdev->dev,
4567 "use SW buffer management\n");
4568 mvneta_bm_put(pp->bm_priv);
4569 pp->bm_priv = NULL;
4570 }
dc35a10f 4571 }
562e2f46
YK
4572 /* Set RX packet offset correction for platforms, whose
4573 * NET_SKB_PAD, exceeds 64B. It should be 64B for 64-bit
4574 * platforms and 0B for 32-bit ones.
4575 */
4576 pp->rx_offset_correction = max(0,
4577 NET_SKB_PAD -
4578 MVNETA_RX_PKT_OFFSET_CORRECTION);
dc35a10f 4579 }
d4e4da00 4580 of_node_put(bm_node);
dc35a10f 4581
9672850b
EG
4582 err = mvneta_init(&pdev->dev, pp);
4583 if (err < 0)
dc35a10f 4584 goto err_netdev;
3f1dd4bc
TP
4585
4586 err = mvneta_port_power_up(pp, phy_mode);
4587 if (err < 0) {
4588 dev_err(&pdev->dev, "can't power up port\n");
dc35a10f 4589 goto err_netdev;
3f1dd4bc 4590 }
c5aff182 4591
2636ac3c
MW
4592 /* Armada3700 network controller does not support per-cpu
4593 * operation, so only single NAPI should be initialized.
4594 */
4595 if (pp->neta_armada3700) {
4596 netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT);
4597 } else {
4598 for_each_present_cpu(cpu) {
4599 struct mvneta_pcpu_port *port =
4600 per_cpu_ptr(pp->ports, cpu);
12bb03b4 4601
2636ac3c
MW
4602 netif_napi_add(dev, &port->napi, mvneta_poll,
4603 NAPI_POLL_WEIGHT);
4604 port->pp = pp;
4605 }
12bb03b4 4606 }
c5aff182 4607
7772988a
JZ
4608 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4609 NETIF_F_TSO | NETIF_F_RXCSUM;
01ef26ca
EG
4610 dev->hw_features |= dev->features;
4611 dev->vlan_features |= dev->features;
97db8afa 4612 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
8eef5f97 4613 dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
b50b72de 4614
5777987e
JW
4615 /* MTU range: 68 - 9676 */
4616 dev->min_mtu = ETH_MIN_MTU;
4617 /* 9676 == 9700 - 20 and rounding to 8 */
4618 dev->max_mtu = 9676;
4619
c5aff182
TP
4620 err = register_netdev(dev);
4621 if (err < 0) {
4622 dev_err(&pdev->dev, "failed to register\n");
9672850b 4623 goto err_free_stats;
c5aff182
TP
4624 }
4625
8cc3e439
TP
4626 netdev_info(dev, "Using %s mac address %pM\n", mac_from,
4627 dev->dev_addr);
c5aff182
TP
4628
4629 platform_set_drvdata(pdev, pp->dev);
4630
4631 return 0;
4632
dc35a10f
MW
4633err_netdev:
4634 unregister_netdev(dev);
4635 if (pp->bm_priv) {
4636 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
4637 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
4638 1 << pp->id);
965cbbec 4639 mvneta_bm_put(pp->bm_priv);
dc35a10f 4640 }
74c41b04 4641err_free_stats:
4642 free_percpu(pp->stats);
12bb03b4
MR
4643err_free_ports:
4644 free_percpu(pp->ports);
5445eaf3 4645err_clk:
15cc4a4a 4646 clk_disable_unprepare(pp->clk_bus);
5445eaf3 4647 clk_disable_unprepare(pp->clk);
503f9aa9
RK
4648err_free_phylink:
4649 if (pp->phylink)
4650 phylink_destroy(pp->phylink);
c5aff182
TP
4651err_free_irq:
4652 irq_dispose_mapping(dev->irq);
4653err_free_netdev:
4654 free_netdev(dev);
4655 return err;
4656}
4657
4658/* Device removal routine */
03ce758e 4659static int mvneta_remove(struct platform_device *pdev)
c5aff182
TP
4660{
4661 struct net_device *dev = platform_get_drvdata(pdev);
4662 struct mvneta_port *pp = netdev_priv(dev);
4663
4664 unregister_netdev(dev);
15cc4a4a 4665 clk_disable_unprepare(pp->clk_bus);
189dd626 4666 clk_disable_unprepare(pp->clk);
12bb03b4 4667 free_percpu(pp->ports);
74c41b04 4668 free_percpu(pp->stats);
c5aff182 4669 irq_dispose_mapping(dev->irq);
503f9aa9 4670 phylink_destroy(pp->phylink);
c5aff182
TP
4671 free_netdev(dev);
4672
dc35a10f
MW
4673 if (pp->bm_priv) {
4674 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
4675 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
4676 1 << pp->id);
965cbbec 4677 mvneta_bm_put(pp->bm_priv);
dc35a10f
MW
4678 }
4679
c5aff182
TP
4680 return 0;
4681}
4682
9768b45c
JL
4683#ifdef CONFIG_PM_SLEEP
4684static int mvneta_suspend(struct device *device)
4685{
1799cdd2 4686 int queue;
9768b45c
JL
4687 struct net_device *dev = dev_get_drvdata(device);
4688 struct mvneta_port *pp = netdev_priv(dev);
4689
1799cdd2
JZ
4690 if (!netif_running(dev))
4691 goto clean_exit;
4692
4693 if (!pp->neta_armada3700) {
4694 spin_lock(&pp->lock);
4695 pp->is_stopped = true;
4696 spin_unlock(&pp->lock);
4697
4698 cpuhp_state_remove_instance_nocalls(online_hpstate,
4699 &pp->node_online);
4700 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
4701 &pp->node_dead);
4702 }
4703
3b8bc674 4704 rtnl_lock();
1799cdd2 4705 mvneta_stop_dev(pp);
3b8bc674 4706 rtnl_unlock();
1799cdd2
JZ
4707
4708 for (queue = 0; queue < rxq_number; queue++) {
4709 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
4710
4711 mvneta_rxq_drop_pkts(pp, rxq);
4712 }
4713
4714 for (queue = 0; queue < txq_number; queue++) {
4715 struct mvneta_tx_queue *txq = &pp->txqs[queue];
4716
4717 mvneta_txq_hw_deinit(pp, txq);
4718 }
4719
4720clean_exit:
9768b45c
JL
4721 netif_device_detach(dev);
4722 clk_disable_unprepare(pp->clk_bus);
4723 clk_disable_unprepare(pp->clk);
1799cdd2 4724
9768b45c
JL
4725 return 0;
4726}
4727
4728static int mvneta_resume(struct device *device)
4729{
4730 struct platform_device *pdev = to_platform_device(device);
4731 struct net_device *dev = dev_get_drvdata(device);
4732 struct mvneta_port *pp = netdev_priv(dev);
1799cdd2 4733 int err, queue;
9768b45c
JL
4734
4735 clk_prepare_enable(pp->clk);
4736 if (!IS_ERR(pp->clk_bus))
4737 clk_prepare_enable(pp->clk_bus);
4738 if (pp->dram_target_info || pp->neta_armada3700)
4739 mvneta_conf_mbus_windows(pp, pp->dram_target_info);
4740 if (pp->bm_priv) {
4741 err = mvneta_bm_port_init(pdev, pp);
4742 if (err < 0) {
4743 dev_info(&pdev->dev, "use SW buffer management\n");
4744 pp->bm_priv = NULL;
4745 }
4746 }
4747 mvneta_defaults_set(pp);
4748 err = mvneta_port_power_up(pp, pp->phy_interface);
4749 if (err < 0) {
4750 dev_err(device, "can't power up port\n");
4751 return err;
4752 }
4753
9768b45c 4754 netif_device_attach(dev);
1799cdd2
JZ
4755
4756 if (!netif_running(dev))
4757 return 0;
4758
4759 for (queue = 0; queue < rxq_number; queue++) {
4760 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
4761
4762 rxq->next_desc_to_proc = 0;
4763 mvneta_rxq_hw_init(pp, rxq);
4764 }
4765
4766 for (queue = 0; queue < txq_number; queue++) {
4767 struct mvneta_tx_queue *txq = &pp->txqs[queue];
4768
4769 txq->next_desc_to_proc = 0;
4770 mvneta_txq_hw_init(pp, txq);
d6956ac8 4771 }
1799cdd2
JZ
4772
4773 if (!pp->neta_armada3700) {
4774 spin_lock(&pp->lock);
4775 pp->is_stopped = false;
4776 spin_unlock(&pp->lock);
4777 cpuhp_state_add_instance_nocalls(online_hpstate,
4778 &pp->node_online);
4779 cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
4780 &pp->node_dead);
4781 }
4782
4783 rtnl_lock();
4784 mvneta_start_dev(pp);
3b8bc674 4785 rtnl_unlock();
1799cdd2 4786 mvneta_set_rx_mode(dev);
d6956ac8 4787
9768b45c
JL
4788 return 0;
4789}
4790#endif
4791
4792static SIMPLE_DEV_PM_OPS(mvneta_pm_ops, mvneta_suspend, mvneta_resume);
4793
c5aff182
TP
4794static const struct of_device_id mvneta_match[] = {
4795 { .compatible = "marvell,armada-370-neta" },
f522a975 4796 { .compatible = "marvell,armada-xp-neta" },
2636ac3c 4797 { .compatible = "marvell,armada-3700-neta" },
c5aff182
TP
4798 { }
4799};
4800MODULE_DEVICE_TABLE(of, mvneta_match);
4801
4802static struct platform_driver mvneta_driver = {
4803 .probe = mvneta_probe,
03ce758e 4804 .remove = mvneta_remove,
c5aff182
TP
4805 .driver = {
4806 .name = MVNETA_DRIVER_NAME,
4807 .of_match_table = mvneta_match,
9768b45c 4808 .pm = &mvneta_pm_ops,
c5aff182
TP
4809 },
4810};
4811
84a3f4db
SAS
4812static int __init mvneta_driver_init(void)
4813{
4814 int ret;
4815
4816 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/mvmeta:online",
4817 mvneta_cpu_online,
4818 mvneta_cpu_down_prepare);
4819 if (ret < 0)
4820 goto out;
4821 online_hpstate = ret;
4822 ret = cpuhp_setup_state_multi(CPUHP_NET_MVNETA_DEAD, "net/mvneta:dead",
4823 NULL, mvneta_cpu_dead);
4824 if (ret)
4825 goto err_dead;
4826
4827 ret = platform_driver_register(&mvneta_driver);
4828 if (ret)
4829 goto err;
4830 return 0;
4831
4832err:
4833 cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
4834err_dead:
4835 cpuhp_remove_multi_state(online_hpstate);
4836out:
4837 return ret;
4838}
4839module_init(mvneta_driver_init);
4840
4841static void __exit mvneta_driver_exit(void)
4842{
4843 platform_driver_unregister(&mvneta_driver);
4844 cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
4845 cpuhp_remove_multi_state(online_hpstate);
4846}
4847module_exit(mvneta_driver_exit);
c5aff182
TP
4848
4849MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
4850MODULE_AUTHOR("Rami Rosen <[email protected]>, Thomas Petazzoni <[email protected]>");
4851MODULE_LICENSE("GPL");
4852
d3757ba4
JP
4853module_param(rxq_number, int, 0444);
4854module_param(txq_number, int, 0444);
c5aff182 4855
d3757ba4
JP
4856module_param(rxq_def, int, 0444);
4857module_param(rx_copybreak, int, 0644);
This page took 1.401268 seconds and 4 git commands to generate.