]> Git Repo - linux.git/blob - drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
cd7709da09694beb461f86c572a59c01ca434c8c
[linux.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5
6         Copyright(C) 2007-2011 STMicroelectronics Ltd
7
8
9   Author: Giuseppe Cavallaro <[email protected]>
10
11   Documentation available at:
12         http://www.stlinux.com
13   Support available at:
14         https://bugzilla.stlinux.com/
15 *******************************************************************************/
16
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/prefetch.h>
32 #include <linux/pinctrl/consumer.h>
33 #ifdef CONFIG_DEBUG_FS
34 #include <linux/debugfs.h>
35 #include <linux/seq_file.h>
36 #endif /* CONFIG_DEBUG_FS */
37 #include <linux/net_tstamp.h>
38 #include <linux/phylink.h>
39 #include <linux/udp.h>
40 #include <net/pkt_cls.h>
41 #include "stmmac_ptp.h"
42 #include "stmmac.h"
43 #include <linux/reset.h>
44 #include <linux/of_mdio.h>
45 #include "dwmac1000.h"
46 #include "dwxgmac2.h"
47 #include "hwif.h"
48
49 #define STMMAC_ALIGN(x)         ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
50 #define TSO_MAX_BUFF_SIZE       (SZ_16K - 1)
51
52 /* Module parameters */
53 #define TX_TIMEO        5000
54 static int watchdog = TX_TIMEO;
55 module_param(watchdog, int, 0644);
56 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
57
58 static int debug = -1;
59 module_param(debug, int, 0644);
60 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
61
62 static int phyaddr = -1;
63 module_param(phyaddr, int, 0444);
64 MODULE_PARM_DESC(phyaddr, "Physical device address");
65
66 #define STMMAC_TX_THRESH(x)     ((x)->dma_tx_size / 4)
67 #define STMMAC_RX_THRESH(x)     ((x)->dma_rx_size / 4)
68
69 static int flow_ctrl = FLOW_AUTO;
70 module_param(flow_ctrl, int, 0644);
71 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
72
73 static int pause = PAUSE_TIME;
74 module_param(pause, int, 0644);
75 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
76
77 #define TC_DEFAULT 64
78 static int tc = TC_DEFAULT;
79 module_param(tc, int, 0644);
80 MODULE_PARM_DESC(tc, "DMA threshold control value");
81
82 #define DEFAULT_BUFSIZE 1536
83 static int buf_sz = DEFAULT_BUFSIZE;
84 module_param(buf_sz, int, 0644);
85 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
86
87 #define STMMAC_RX_COPYBREAK     256
88
89 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
90                                       NETIF_MSG_LINK | NETIF_MSG_IFUP |
91                                       NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
92
93 #define STMMAC_DEFAULT_LPI_TIMER        1000
94 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
95 module_param(eee_timer, int, 0644);
96 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
97 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
98
99 /* By default the driver will use the ring mode to manage tx and rx descriptors,
100  * but allow user to force to use the chain instead of the ring
101  */
102 static unsigned int chain_mode;
103 module_param(chain_mode, int, 0444);
104 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
105
106 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
107
108 #ifdef CONFIG_DEBUG_FS
109 static const struct net_device_ops stmmac_netdev_ops;
110 static void stmmac_init_fs(struct net_device *dev);
111 static void stmmac_exit_fs(struct net_device *dev);
112 #endif
113
114 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
115
116 /**
117  * stmmac_verify_args - verify the driver parameters.
118  * Description: it checks the driver parameters and set a default in case of
119  * errors.
120  */
121 static void stmmac_verify_args(void)
122 {
123         if (unlikely(watchdog < 0))
124                 watchdog = TX_TIMEO;
125         if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
126                 buf_sz = DEFAULT_BUFSIZE;
127         if (unlikely(flow_ctrl > 1))
128                 flow_ctrl = FLOW_AUTO;
129         else if (likely(flow_ctrl < 0))
130                 flow_ctrl = FLOW_OFF;
131         if (unlikely((pause < 0) || (pause > 0xffff)))
132                 pause = PAUSE_TIME;
133         if (eee_timer < 0)
134                 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
135 }
136
137 /**
138  * stmmac_disable_all_queues - Disable all queues
139  * @priv: driver private structure
140  */
141 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
142 {
143         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
144         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
145         u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
146         u32 queue;
147
148         for (queue = 0; queue < maxq; queue++) {
149                 struct stmmac_channel *ch = &priv->channel[queue];
150
151                 if (queue < rx_queues_cnt)
152                         napi_disable(&ch->rx_napi);
153                 if (queue < tx_queues_cnt)
154                         napi_disable(&ch->tx_napi);
155         }
156 }
157
158 /**
159  * stmmac_enable_all_queues - Enable all queues
160  * @priv: driver private structure
161  */
162 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
163 {
164         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
165         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
166         u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
167         u32 queue;
168
169         for (queue = 0; queue < maxq; queue++) {
170                 struct stmmac_channel *ch = &priv->channel[queue];
171
172                 if (queue < rx_queues_cnt)
173                         napi_enable(&ch->rx_napi);
174                 if (queue < tx_queues_cnt)
175                         napi_enable(&ch->tx_napi);
176         }
177 }
178
179 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
180 {
181         if (!test_bit(STMMAC_DOWN, &priv->state) &&
182             !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
183                 queue_work(priv->wq, &priv->service_task);
184 }
185
186 static void stmmac_global_err(struct stmmac_priv *priv)
187 {
188         netif_carrier_off(priv->dev);
189         set_bit(STMMAC_RESET_REQUESTED, &priv->state);
190         stmmac_service_event_schedule(priv);
191 }
192
193 /**
194  * stmmac_clk_csr_set - dynamically set the MDC clock
195  * @priv: driver private structure
196  * Description: this is to dynamically set the MDC clock according to the csr
197  * clock input.
198  * Note:
199  *      If a specific clk_csr value is passed from the platform
200  *      this means that the CSR Clock Range selection cannot be
201  *      changed at run-time and it is fixed (as reported in the driver
202  *      documentation). Viceversa the driver will try to set the MDC
203  *      clock dynamically according to the actual clock input.
204  */
205 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
206 {
207         u32 clk_rate;
208
209         clk_rate = clk_get_rate(priv->plat->stmmac_clk);
210
211         /* Platform provided default clk_csr would be assumed valid
212          * for all other cases except for the below mentioned ones.
213          * For values higher than the IEEE 802.3 specified frequency
214          * we can not estimate the proper divider as it is not known
215          * the frequency of clk_csr_i. So we do not change the default
216          * divider.
217          */
218         if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
219                 if (clk_rate < CSR_F_35M)
220                         priv->clk_csr = STMMAC_CSR_20_35M;
221                 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
222                         priv->clk_csr = STMMAC_CSR_35_60M;
223                 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
224                         priv->clk_csr = STMMAC_CSR_60_100M;
225                 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
226                         priv->clk_csr = STMMAC_CSR_100_150M;
227                 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
228                         priv->clk_csr = STMMAC_CSR_150_250M;
229                 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
230                         priv->clk_csr = STMMAC_CSR_250_300M;
231         }
232
233         if (priv->plat->has_sun8i) {
234                 if (clk_rate > 160000000)
235                         priv->clk_csr = 0x03;
236                 else if (clk_rate > 80000000)
237                         priv->clk_csr = 0x02;
238                 else if (clk_rate > 40000000)
239                         priv->clk_csr = 0x01;
240                 else
241                         priv->clk_csr = 0;
242         }
243
244         if (priv->plat->has_xgmac) {
245                 if (clk_rate > 400000000)
246                         priv->clk_csr = 0x5;
247                 else if (clk_rate > 350000000)
248                         priv->clk_csr = 0x4;
249                 else if (clk_rate > 300000000)
250                         priv->clk_csr = 0x3;
251                 else if (clk_rate > 250000000)
252                         priv->clk_csr = 0x2;
253                 else if (clk_rate > 150000000)
254                         priv->clk_csr = 0x1;
255                 else
256                         priv->clk_csr = 0x0;
257         }
258 }
259
260 static void print_pkt(unsigned char *buf, int len)
261 {
262         pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
263         print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
264 }
265
266 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
267 {
268         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
269         u32 avail;
270
271         if (tx_q->dirty_tx > tx_q->cur_tx)
272                 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
273         else
274                 avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
275
276         return avail;
277 }
278
279 /**
280  * stmmac_rx_dirty - Get RX queue dirty
281  * @priv: driver private structure
282  * @queue: RX queue index
283  */
284 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
285 {
286         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
287         u32 dirty;
288
289         if (rx_q->dirty_rx <= rx_q->cur_rx)
290                 dirty = rx_q->cur_rx - rx_q->dirty_rx;
291         else
292                 dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
293
294         return dirty;
295 }
296
297 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
298 {
299         int tx_lpi_timer;
300
301         /* Clear/set the SW EEE timer flag based on LPI ET enablement */
302         priv->eee_sw_timer_en = en ? 0 : 1;
303         tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
304         stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
305 }
306
307 /**
308  * stmmac_enable_eee_mode - check and enter in LPI mode
309  * @priv: driver private structure
310  * Description: this function is to verify and enter in LPI mode in case of
311  * EEE.
312  */
313 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
314 {
315         u32 tx_cnt = priv->plat->tx_queues_to_use;
316         u32 queue;
317
318         /* check if all TX queues have the work finished */
319         for (queue = 0; queue < tx_cnt; queue++) {
320                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
321
322                 if (tx_q->dirty_tx != tx_q->cur_tx)
323                         return; /* still unfinished work */
324         }
325
326         /* Check and enter in LPI mode */
327         if (!priv->tx_path_in_lpi_mode)
328                 stmmac_set_eee_mode(priv, priv->hw,
329                                 priv->plat->en_tx_lpi_clockgating);
330 }
331
332 /**
333  * stmmac_disable_eee_mode - disable and exit from LPI mode
334  * @priv: driver private structure
335  * Description: this function is to exit and disable EEE in case of
336  * LPI state is true. This is called by the xmit.
337  */
338 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
339 {
340         if (!priv->eee_sw_timer_en) {
341                 stmmac_lpi_entry_timer_config(priv, 0);
342                 return;
343         }
344
345         stmmac_reset_eee_mode(priv, priv->hw);
346         del_timer_sync(&priv->eee_ctrl_timer);
347         priv->tx_path_in_lpi_mode = false;
348 }
349
350 /**
351  * stmmac_eee_ctrl_timer - EEE TX SW timer.
352  * @t:  timer_list struct containing private info
353  * Description:
354  *  if there is no data transfer and if we are not in LPI state,
355  *  then MAC Transmitter can be moved to LPI state.
356  */
357 static void stmmac_eee_ctrl_timer(struct timer_list *t)
358 {
359         struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
360
361         stmmac_enable_eee_mode(priv);
362         mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
363 }
364
365 /**
366  * stmmac_eee_init - init EEE
367  * @priv: driver private structure
368  * Description:
369  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
370  *  can also manage EEE, this function enable the LPI state and start related
371  *  timer.
372  */
373 bool stmmac_eee_init(struct stmmac_priv *priv)
374 {
375         int eee_tw_timer = priv->eee_tw_timer;
376
377         /* Using PCS we cannot dial with the phy registers at this stage
378          * so we do not support extra feature like EEE.
379          */
380         if (priv->hw->pcs == STMMAC_PCS_TBI ||
381             priv->hw->pcs == STMMAC_PCS_RTBI)
382                 return false;
383
384         /* Check if MAC core supports the EEE feature. */
385         if (!priv->dma_cap.eee)
386                 return false;
387
388         mutex_lock(&priv->lock);
389
390         /* Check if it needs to be deactivated */
391         if (!priv->eee_active) {
392                 if (priv->eee_enabled) {
393                         netdev_dbg(priv->dev, "disable EEE\n");
394                         stmmac_lpi_entry_timer_config(priv, 0);
395                         del_timer_sync(&priv->eee_ctrl_timer);
396                         stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
397                 }
398                 mutex_unlock(&priv->lock);
399                 return false;
400         }
401
402         if (priv->eee_active && !priv->eee_enabled) {
403                 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
404                 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
405                                      eee_tw_timer);
406         }
407
408         if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
409                 del_timer_sync(&priv->eee_ctrl_timer);
410                 priv->tx_path_in_lpi_mode = false;
411                 stmmac_lpi_entry_timer_config(priv, 1);
412         } else {
413                 stmmac_lpi_entry_timer_config(priv, 0);
414                 mod_timer(&priv->eee_ctrl_timer,
415                           STMMAC_LPI_T(priv->tx_lpi_timer));
416         }
417
418         mutex_unlock(&priv->lock);
419         netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
420         return true;
421 }
422
423 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
424  * @priv: driver private structure
425  * @p : descriptor pointer
426  * @skb : the socket buffer
427  * Description :
428  * This function will read timestamp from the descriptor & pass it to stack.
429  * and also perform some sanity checks.
430  */
431 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
432                                    struct dma_desc *p, struct sk_buff *skb)
433 {
434         struct skb_shared_hwtstamps shhwtstamp;
435         bool found = false;
436         u64 ns = 0;
437
438         if (!priv->hwts_tx_en)
439                 return;
440
441         /* exit if skb doesn't support hw tstamp */
442         if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
443                 return;
444
445         /* check tx tstamp status */
446         if (stmmac_get_tx_timestamp_status(priv, p)) {
447                 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
448                 found = true;
449         } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
450                 found = true;
451         }
452
453         if (found) {
454                 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
455                 shhwtstamp.hwtstamp = ns_to_ktime(ns);
456
457                 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
458                 /* pass tstamp to stack */
459                 skb_tstamp_tx(skb, &shhwtstamp);
460         }
461 }
462
463 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
464  * @priv: driver private structure
465  * @p : descriptor pointer
466  * @np : next descriptor pointer
467  * @skb : the socket buffer
468  * Description :
469  * This function will read received packet's timestamp from the descriptor
470  * and pass it to stack. It also perform some sanity checks.
471  */
472 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
473                                    struct dma_desc *np, struct sk_buff *skb)
474 {
475         struct skb_shared_hwtstamps *shhwtstamp = NULL;
476         struct dma_desc *desc = p;
477         u64 ns = 0;
478
479         if (!priv->hwts_rx_en)
480                 return;
481         /* For GMAC4, the valid timestamp is from CTX next desc. */
482         if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
483                 desc = np;
484
485         /* Check if timestamp is available */
486         if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
487                 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
488                 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
489                 shhwtstamp = skb_hwtstamps(skb);
490                 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
491                 shhwtstamp->hwtstamp = ns_to_ktime(ns);
492         } else  {
493                 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
494         }
495 }
496
497 /**
498  *  stmmac_hwtstamp_set - control hardware timestamping.
499  *  @dev: device pointer.
500  *  @ifr: An IOCTL specific structure, that can contain a pointer to
501  *  a proprietary structure used to pass information to the driver.
502  *  Description:
503  *  This function configures the MAC to enable/disable both outgoing(TX)
504  *  and incoming(RX) packets time stamping based on user input.
505  *  Return Value:
506  *  0 on success and an appropriate -ve integer on failure.
507  */
508 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
509 {
510         struct stmmac_priv *priv = netdev_priv(dev);
511         struct hwtstamp_config config;
512         struct timespec64 now;
513         u64 temp = 0;
514         u32 ptp_v2 = 0;
515         u32 tstamp_all = 0;
516         u32 ptp_over_ipv4_udp = 0;
517         u32 ptp_over_ipv6_udp = 0;
518         u32 ptp_over_ethernet = 0;
519         u32 snap_type_sel = 0;
520         u32 ts_master_en = 0;
521         u32 ts_event_en = 0;
522         u32 sec_inc = 0;
523         u32 value = 0;
524         bool xmac;
525
526         xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
527
528         if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
529                 netdev_alert(priv->dev, "No support for HW time stamping\n");
530                 priv->hwts_tx_en = 0;
531                 priv->hwts_rx_en = 0;
532
533                 return -EOPNOTSUPP;
534         }
535
536         if (copy_from_user(&config, ifr->ifr_data,
537                            sizeof(config)))
538                 return -EFAULT;
539
540         netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
541                    __func__, config.flags, config.tx_type, config.rx_filter);
542
543         /* reserved for future extensions */
544         if (config.flags)
545                 return -EINVAL;
546
547         if (config.tx_type != HWTSTAMP_TX_OFF &&
548             config.tx_type != HWTSTAMP_TX_ON)
549                 return -ERANGE;
550
551         if (priv->adv_ts) {
552                 switch (config.rx_filter) {
553                 case HWTSTAMP_FILTER_NONE:
554                         /* time stamp no incoming packet at all */
555                         config.rx_filter = HWTSTAMP_FILTER_NONE;
556                         break;
557
558                 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
559                         /* PTP v1, UDP, any kind of event packet */
560                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
561                         /* 'xmac' hardware can support Sync, Pdelay_Req and
562                          * Pdelay_resp by setting bit14 and bits17/16 to 01
563                          * This leaves Delay_Req timestamps out.
564                          * Enable all events *and* general purpose message
565                          * timestamping
566                          */
567                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
568                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
569                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
570                         break;
571
572                 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
573                         /* PTP v1, UDP, Sync packet */
574                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
575                         /* take time stamp for SYNC messages only */
576                         ts_event_en = PTP_TCR_TSEVNTENA;
577
578                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
579                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
580                         break;
581
582                 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
583                         /* PTP v1, UDP, Delay_req packet */
584                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
585                         /* take time stamp for Delay_Req messages only */
586                         ts_master_en = PTP_TCR_TSMSTRENA;
587                         ts_event_en = PTP_TCR_TSEVNTENA;
588
589                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
590                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
591                         break;
592
593                 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
594                         /* PTP v2, UDP, any kind of event packet */
595                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
596                         ptp_v2 = PTP_TCR_TSVER2ENA;
597                         /* take time stamp for all event messages */
598                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
599
600                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
601                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
602                         break;
603
604                 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
605                         /* PTP v2, UDP, Sync packet */
606                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
607                         ptp_v2 = PTP_TCR_TSVER2ENA;
608                         /* take time stamp for SYNC messages only */
609                         ts_event_en = PTP_TCR_TSEVNTENA;
610
611                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
612                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
613                         break;
614
615                 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
616                         /* PTP v2, UDP, Delay_req packet */
617                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
618                         ptp_v2 = PTP_TCR_TSVER2ENA;
619                         /* take time stamp for Delay_Req messages only */
620                         ts_master_en = PTP_TCR_TSMSTRENA;
621                         ts_event_en = PTP_TCR_TSEVNTENA;
622
623                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
624                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
625                         break;
626
627                 case HWTSTAMP_FILTER_PTP_V2_EVENT:
628                         /* PTP v2/802.AS1 any layer, any kind of event packet */
629                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
630                         ptp_v2 = PTP_TCR_TSVER2ENA;
631                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
632                         if (priv->synopsys_id != DWMAC_CORE_5_10)
633                                 ts_event_en = PTP_TCR_TSEVNTENA;
634                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
635                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
636                         ptp_over_ethernet = PTP_TCR_TSIPENA;
637                         break;
638
639                 case HWTSTAMP_FILTER_PTP_V2_SYNC:
640                         /* PTP v2/802.AS1, any layer, Sync packet */
641                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
642                         ptp_v2 = PTP_TCR_TSVER2ENA;
643                         /* take time stamp for SYNC messages only */
644                         ts_event_en = PTP_TCR_TSEVNTENA;
645
646                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
647                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
648                         ptp_over_ethernet = PTP_TCR_TSIPENA;
649                         break;
650
651                 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
652                         /* PTP v2/802.AS1, any layer, Delay_req packet */
653                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
654                         ptp_v2 = PTP_TCR_TSVER2ENA;
655                         /* take time stamp for Delay_Req messages only */
656                         ts_master_en = PTP_TCR_TSMSTRENA;
657                         ts_event_en = PTP_TCR_TSEVNTENA;
658
659                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
660                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
661                         ptp_over_ethernet = PTP_TCR_TSIPENA;
662                         break;
663
664                 case HWTSTAMP_FILTER_NTP_ALL:
665                 case HWTSTAMP_FILTER_ALL:
666                         /* time stamp any incoming packet */
667                         config.rx_filter = HWTSTAMP_FILTER_ALL;
668                         tstamp_all = PTP_TCR_TSENALL;
669                         break;
670
671                 default:
672                         return -ERANGE;
673                 }
674         } else {
675                 switch (config.rx_filter) {
676                 case HWTSTAMP_FILTER_NONE:
677                         config.rx_filter = HWTSTAMP_FILTER_NONE;
678                         break;
679                 default:
680                         /* PTP v1, UDP, any kind of event packet */
681                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
682                         break;
683                 }
684         }
685         priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
686         priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
687
688         if (!priv->hwts_tx_en && !priv->hwts_rx_en)
689                 stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
690         else {
691                 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
692                          tstamp_all | ptp_v2 | ptp_over_ethernet |
693                          ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
694                          ts_master_en | snap_type_sel);
695                 stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
696
697                 /* program Sub Second Increment reg */
698                 stmmac_config_sub_second_increment(priv,
699                                 priv->ptpaddr, priv->plat->clk_ptp_rate,
700                                 xmac, &sec_inc);
701                 temp = div_u64(1000000000ULL, sec_inc);
702
703                 /* Store sub second increment and flags for later use */
704                 priv->sub_second_inc = sec_inc;
705                 priv->systime_flags = value;
706
707                 /* calculate default added value:
708                  * formula is :
709                  * addend = (2^32)/freq_div_ratio;
710                  * where, freq_div_ratio = 1e9ns/sec_inc
711                  */
712                 temp = (u64)(temp << 32);
713                 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
714                 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
715
716                 /* initialize system time */
717                 ktime_get_real_ts64(&now);
718
719                 /* lower 32 bits of tv_sec are safe until y2106 */
720                 stmmac_init_systime(priv, priv->ptpaddr,
721                                 (u32)now.tv_sec, now.tv_nsec);
722         }
723
724         memcpy(&priv->tstamp_config, &config, sizeof(config));
725
726         return copy_to_user(ifr->ifr_data, &config,
727                             sizeof(config)) ? -EFAULT : 0;
728 }
729
730 /**
731  *  stmmac_hwtstamp_get - read hardware timestamping.
732  *  @dev: device pointer.
733  *  @ifr: An IOCTL specific structure, that can contain a pointer to
734  *  a proprietary structure used to pass information to the driver.
735  *  Description:
736  *  This function obtain the current hardware timestamping settings
737  *  as requested.
738  */
739 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
740 {
741         struct stmmac_priv *priv = netdev_priv(dev);
742         struct hwtstamp_config *config = &priv->tstamp_config;
743
744         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
745                 return -EOPNOTSUPP;
746
747         return copy_to_user(ifr->ifr_data, config,
748                             sizeof(*config)) ? -EFAULT : 0;
749 }
750
751 /**
752  * stmmac_init_ptp - init PTP
753  * @priv: driver private structure
754  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
755  * This is done by looking at the HW cap. register.
756  * This function also registers the ptp driver.
757  */
758 static int stmmac_init_ptp(struct stmmac_priv *priv)
759 {
760         bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
761
762         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
763                 return -EOPNOTSUPP;
764
765         priv->adv_ts = 0;
766         /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
767         if (xmac && priv->dma_cap.atime_stamp)
768                 priv->adv_ts = 1;
769         /* Dwmac 3.x core with extend_desc can support adv_ts */
770         else if (priv->extend_desc && priv->dma_cap.atime_stamp)
771                 priv->adv_ts = 1;
772
773         if (priv->dma_cap.time_stamp)
774                 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
775
776         if (priv->adv_ts)
777                 netdev_info(priv->dev,
778                             "IEEE 1588-2008 Advanced Timestamp supported\n");
779
780         priv->hwts_tx_en = 0;
781         priv->hwts_rx_en = 0;
782
783         stmmac_ptp_register(priv);
784
785         return 0;
786 }
787
788 static void stmmac_release_ptp(struct stmmac_priv *priv)
789 {
790         clk_disable_unprepare(priv->plat->clk_ptp_ref);
791         stmmac_ptp_unregister(priv);
792 }
793
794 /**
795  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
796  *  @priv: driver private structure
797  *  @duplex: duplex passed to the next function
798  *  Description: It is used for configuring the flow control in all queues
799  */
800 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
801 {
802         u32 tx_cnt = priv->plat->tx_queues_to_use;
803
804         stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
805                         priv->pause, tx_cnt);
806 }
807
808 static void stmmac_validate(struct phylink_config *config,
809                             unsigned long *supported,
810                             struct phylink_link_state *state)
811 {
812         struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
813         __ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, };
814         __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
815         int tx_cnt = priv->plat->tx_queues_to_use;
816         int max_speed = priv->plat->max_speed;
817
818         phylink_set(mac_supported, 10baseT_Half);
819         phylink_set(mac_supported, 10baseT_Full);
820         phylink_set(mac_supported, 100baseT_Half);
821         phylink_set(mac_supported, 100baseT_Full);
822         phylink_set(mac_supported, 1000baseT_Half);
823         phylink_set(mac_supported, 1000baseT_Full);
824         phylink_set(mac_supported, 1000baseKX_Full);
825
826         phylink_set(mac_supported, Autoneg);
827         phylink_set(mac_supported, Pause);
828         phylink_set(mac_supported, Asym_Pause);
829         phylink_set_port_modes(mac_supported);
830
831         /* Cut down 1G if asked to */
832         if ((max_speed > 0) && (max_speed < 1000)) {
833                 phylink_set(mask, 1000baseT_Full);
834                 phylink_set(mask, 1000baseX_Full);
835         } else if (priv->plat->has_xgmac) {
836                 if (!max_speed || (max_speed >= 2500)) {
837                         phylink_set(mac_supported, 2500baseT_Full);
838                         phylink_set(mac_supported, 2500baseX_Full);
839                 }
840                 if (!max_speed || (max_speed >= 5000)) {
841                         phylink_set(mac_supported, 5000baseT_Full);
842                 }
843                 if (!max_speed || (max_speed >= 10000)) {
844                         phylink_set(mac_supported, 10000baseSR_Full);
845                         phylink_set(mac_supported, 10000baseLR_Full);
846                         phylink_set(mac_supported, 10000baseER_Full);
847                         phylink_set(mac_supported, 10000baseLRM_Full);
848                         phylink_set(mac_supported, 10000baseT_Full);
849                         phylink_set(mac_supported, 10000baseKX4_Full);
850                         phylink_set(mac_supported, 10000baseKR_Full);
851                 }
852                 if (!max_speed || (max_speed >= 25000)) {
853                         phylink_set(mac_supported, 25000baseCR_Full);
854                         phylink_set(mac_supported, 25000baseKR_Full);
855                         phylink_set(mac_supported, 25000baseSR_Full);
856                 }
857                 if (!max_speed || (max_speed >= 40000)) {
858                         phylink_set(mac_supported, 40000baseKR4_Full);
859                         phylink_set(mac_supported, 40000baseCR4_Full);
860                         phylink_set(mac_supported, 40000baseSR4_Full);
861                         phylink_set(mac_supported, 40000baseLR4_Full);
862                 }
863                 if (!max_speed || (max_speed >= 50000)) {
864                         phylink_set(mac_supported, 50000baseCR2_Full);
865                         phylink_set(mac_supported, 50000baseKR2_Full);
866                         phylink_set(mac_supported, 50000baseSR2_Full);
867                         phylink_set(mac_supported, 50000baseKR_Full);
868                         phylink_set(mac_supported, 50000baseSR_Full);
869                         phylink_set(mac_supported, 50000baseCR_Full);
870                         phylink_set(mac_supported, 50000baseLR_ER_FR_Full);
871                         phylink_set(mac_supported, 50000baseDR_Full);
872                 }
873                 if (!max_speed || (max_speed >= 100000)) {
874                         phylink_set(mac_supported, 100000baseKR4_Full);
875                         phylink_set(mac_supported, 100000baseSR4_Full);
876                         phylink_set(mac_supported, 100000baseCR4_Full);
877                         phylink_set(mac_supported, 100000baseLR4_ER4_Full);
878                         phylink_set(mac_supported, 100000baseKR2_Full);
879                         phylink_set(mac_supported, 100000baseSR2_Full);
880                         phylink_set(mac_supported, 100000baseCR2_Full);
881                         phylink_set(mac_supported, 100000baseLR2_ER2_FR2_Full);
882                         phylink_set(mac_supported, 100000baseDR2_Full);
883                 }
884         }
885
886         /* Half-Duplex can only work with single queue */
887         if (tx_cnt > 1) {
888                 phylink_set(mask, 10baseT_Half);
889                 phylink_set(mask, 100baseT_Half);
890                 phylink_set(mask, 1000baseT_Half);
891         }
892
893         linkmode_and(supported, supported, mac_supported);
894         linkmode_andnot(supported, supported, mask);
895
896         linkmode_and(state->advertising, state->advertising, mac_supported);
897         linkmode_andnot(state->advertising, state->advertising, mask);
898
899         /* If PCS is supported, check which modes it supports. */
900         stmmac_xpcs_validate(priv, &priv->hw->xpcs_args, supported, state);
901 }
902
903 static void stmmac_mac_pcs_get_state(struct phylink_config *config,
904                                      struct phylink_link_state *state)
905 {
906         struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
907
908         state->link = 0;
909         stmmac_xpcs_get_state(priv, &priv->hw->xpcs_args, state);
910 }
911
912 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
913                               const struct phylink_link_state *state)
914 {
915         struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
916
917         stmmac_xpcs_config(priv, &priv->hw->xpcs_args, state);
918 }
919
920 static void stmmac_mac_an_restart(struct phylink_config *config)
921 {
922         /* Not Supported */
923 }
924
925 static void stmmac_mac_link_down(struct phylink_config *config,
926                                  unsigned int mode, phy_interface_t interface)
927 {
928         struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
929
930         stmmac_mac_set(priv, priv->ioaddr, false);
931         priv->eee_active = false;
932         priv->tx_lpi_enabled = false;
933         stmmac_eee_init(priv);
934         stmmac_set_eee_pls(priv, priv->hw, false);
935 }
936
937 static void stmmac_mac_link_up(struct phylink_config *config,
938                                struct phy_device *phy,
939                                unsigned int mode, phy_interface_t interface,
940                                int speed, int duplex,
941                                bool tx_pause, bool rx_pause)
942 {
943         struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
944         u32 ctrl;
945
946         stmmac_xpcs_link_up(priv, &priv->hw->xpcs_args, speed, interface);
947
948         ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
949         ctrl &= ~priv->hw->link.speed_mask;
950
951         if (interface == PHY_INTERFACE_MODE_USXGMII) {
952                 switch (speed) {
953                 case SPEED_10000:
954                         ctrl |= priv->hw->link.xgmii.speed10000;
955                         break;
956                 case SPEED_5000:
957                         ctrl |= priv->hw->link.xgmii.speed5000;
958                         break;
959                 case SPEED_2500:
960                         ctrl |= priv->hw->link.xgmii.speed2500;
961                         break;
962                 default:
963                         return;
964                 }
965         } else if (interface == PHY_INTERFACE_MODE_XLGMII) {
966                 switch (speed) {
967                 case SPEED_100000:
968                         ctrl |= priv->hw->link.xlgmii.speed100000;
969                         break;
970                 case SPEED_50000:
971                         ctrl |= priv->hw->link.xlgmii.speed50000;
972                         break;
973                 case SPEED_40000:
974                         ctrl |= priv->hw->link.xlgmii.speed40000;
975                         break;
976                 case SPEED_25000:
977                         ctrl |= priv->hw->link.xlgmii.speed25000;
978                         break;
979                 case SPEED_10000:
980                         ctrl |= priv->hw->link.xgmii.speed10000;
981                         break;
982                 case SPEED_2500:
983                         ctrl |= priv->hw->link.speed2500;
984                         break;
985                 case SPEED_1000:
986                         ctrl |= priv->hw->link.speed1000;
987                         break;
988                 default:
989                         return;
990                 }
991         } else {
992                 switch (speed) {
993                 case SPEED_2500:
994                         ctrl |= priv->hw->link.speed2500;
995                         break;
996                 case SPEED_1000:
997                         ctrl |= priv->hw->link.speed1000;
998                         break;
999                 case SPEED_100:
1000                         ctrl |= priv->hw->link.speed100;
1001                         break;
1002                 case SPEED_10:
1003                         ctrl |= priv->hw->link.speed10;
1004                         break;
1005                 default:
1006                         return;
1007                 }
1008         }
1009
1010         priv->speed = speed;
1011
1012         if (priv->plat->fix_mac_speed)
1013                 priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
1014
1015         if (!duplex)
1016                 ctrl &= ~priv->hw->link.duplex;
1017         else
1018                 ctrl |= priv->hw->link.duplex;
1019
1020         /* Flow Control operation */
1021         if (tx_pause && rx_pause)
1022                 stmmac_mac_flow_ctrl(priv, duplex);
1023
1024         writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1025
1026         stmmac_mac_set(priv, priv->ioaddr, true);
1027         if (phy && priv->dma_cap.eee) {
1028                 priv->eee_active = phy_init_eee(phy, 1) >= 0;
1029                 priv->eee_enabled = stmmac_eee_init(priv);
1030                 priv->tx_lpi_enabled = priv->eee_enabled;
1031                 stmmac_set_eee_pls(priv, priv->hw, true);
1032         }
1033 }
1034
1035 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1036         .validate = stmmac_validate,
1037         .mac_pcs_get_state = stmmac_mac_pcs_get_state,
1038         .mac_config = stmmac_mac_config,
1039         .mac_an_restart = stmmac_mac_an_restart,
1040         .mac_link_down = stmmac_mac_link_down,
1041         .mac_link_up = stmmac_mac_link_up,
1042 };
1043
1044 /**
1045  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1046  * @priv: driver private structure
1047  * Description: this is to verify if the HW supports the PCS.
1048  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1049  * configured for the TBI, RTBI, or SGMII PHY interface.
1050  */
1051 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1052 {
1053         int interface = priv->plat->interface;
1054
1055         if (priv->dma_cap.pcs) {
1056                 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1057                     (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1058                     (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1059                     (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1060                         netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1061                         priv->hw->pcs = STMMAC_PCS_RGMII;
1062                 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
1063                         netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1064                         priv->hw->pcs = STMMAC_PCS_SGMII;
1065                 }
1066         }
1067 }
1068
1069 /**
1070  * stmmac_init_phy - PHY initialization
1071  * @dev: net device structure
1072  * Description: it initializes the driver's PHY state, and attaches the PHY
1073  * to the mac driver.
1074  *  Return value:
1075  *  0 on success
1076  */
1077 static int stmmac_init_phy(struct net_device *dev)
1078 {
1079         struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1080         struct stmmac_priv *priv = netdev_priv(dev);
1081         struct device_node *node;
1082         int ret;
1083
1084         node = priv->plat->phylink_node;
1085
1086         if (node)
1087                 ret = phylink_of_phy_connect(priv->phylink, node, 0);
1088
1089         /* Some DT bindings do not set-up the PHY handle. Let's try to
1090          * manually parse it
1091          */
1092         if (!node || ret) {
1093                 int addr = priv->plat->phy_addr;
1094                 struct phy_device *phydev;
1095
1096                 phydev = mdiobus_get_phy(priv->mii, addr);
1097                 if (!phydev) {
1098                         netdev_err(priv->dev, "no phy at addr %d\n", addr);
1099                         return -ENODEV;
1100                 }
1101
1102                 ret = phylink_connect_phy(priv->phylink, phydev);
1103         }
1104
1105         phylink_ethtool_get_wol(priv->phylink, &wol);
1106         device_set_wakeup_capable(priv->device, !!wol.supported);
1107
1108         return ret;
1109 }
1110
1111 static int stmmac_phy_setup(struct stmmac_priv *priv)
1112 {
1113         struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
1114         int mode = priv->plat->phy_interface;
1115         struct phylink *phylink;
1116
1117         priv->phylink_config.dev = &priv->dev->dev;
1118         priv->phylink_config.type = PHYLINK_NETDEV;
1119         priv->phylink_config.pcs_poll = true;
1120
1121         if (!fwnode)
1122                 fwnode = dev_fwnode(priv->device);
1123
1124         phylink = phylink_create(&priv->phylink_config, fwnode,
1125                                  mode, &stmmac_phylink_mac_ops);
1126         if (IS_ERR(phylink))
1127                 return PTR_ERR(phylink);
1128
1129         priv->phylink = phylink;
1130         return 0;
1131 }
1132
1133 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1134 {
1135         u32 rx_cnt = priv->plat->rx_queues_to_use;
1136         unsigned int desc_size;
1137         void *head_rx;
1138         u32 queue;
1139
1140         /* Display RX rings */
1141         for (queue = 0; queue < rx_cnt; queue++) {
1142                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1143
1144                 pr_info("\tRX Queue %u rings\n", queue);
1145
1146                 if (priv->extend_desc) {
1147                         head_rx = (void *)rx_q->dma_erx;
1148                         desc_size = sizeof(struct dma_extended_desc);
1149                 } else {
1150                         head_rx = (void *)rx_q->dma_rx;
1151                         desc_size = sizeof(struct dma_desc);
1152                 }
1153
1154                 /* Display RX ring */
1155                 stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true,
1156                                     rx_q->dma_rx_phy, desc_size);
1157         }
1158 }
1159
1160 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1161 {
1162         u32 tx_cnt = priv->plat->tx_queues_to_use;
1163         unsigned int desc_size;
1164         void *head_tx;
1165         u32 queue;
1166
1167         /* Display TX rings */
1168         for (queue = 0; queue < tx_cnt; queue++) {
1169                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1170
1171                 pr_info("\tTX Queue %d rings\n", queue);
1172
1173                 if (priv->extend_desc) {
1174                         head_tx = (void *)tx_q->dma_etx;
1175                         desc_size = sizeof(struct dma_extended_desc);
1176                 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1177                         head_tx = (void *)tx_q->dma_entx;
1178                         desc_size = sizeof(struct dma_edesc);
1179                 } else {
1180                         head_tx = (void *)tx_q->dma_tx;
1181                         desc_size = sizeof(struct dma_desc);
1182                 }
1183
1184                 stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false,
1185                                     tx_q->dma_tx_phy, desc_size);
1186         }
1187 }
1188
1189 static void stmmac_display_rings(struct stmmac_priv *priv)
1190 {
1191         /* Display RX ring */
1192         stmmac_display_rx_rings(priv);
1193
1194         /* Display TX ring */
1195         stmmac_display_tx_rings(priv);
1196 }
1197
1198 static int stmmac_set_bfsize(int mtu, int bufsize)
1199 {
1200         int ret = bufsize;
1201
1202         if (mtu >= BUF_SIZE_8KiB)
1203                 ret = BUF_SIZE_16KiB;
1204         else if (mtu >= BUF_SIZE_4KiB)
1205                 ret = BUF_SIZE_8KiB;
1206         else if (mtu >= BUF_SIZE_2KiB)
1207                 ret = BUF_SIZE_4KiB;
1208         else if (mtu > DEFAULT_BUFSIZE)
1209                 ret = BUF_SIZE_2KiB;
1210         else
1211                 ret = DEFAULT_BUFSIZE;
1212
1213         return ret;
1214 }
1215
1216 /**
1217  * stmmac_clear_rx_descriptors - clear RX descriptors
1218  * @priv: driver private structure
1219  * @queue: RX queue index
1220  * Description: this function is called to clear the RX descriptors
1221  * in case of both basic and extended descriptors are used.
1222  */
1223 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1224 {
1225         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1226         int i;
1227
1228         /* Clear the RX descriptors */
1229         for (i = 0; i < priv->dma_rx_size; i++)
1230                 if (priv->extend_desc)
1231                         stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1232                                         priv->use_riwt, priv->mode,
1233                                         (i == priv->dma_rx_size - 1),
1234                                         priv->dma_buf_sz);
1235                 else
1236                         stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1237                                         priv->use_riwt, priv->mode,
1238                                         (i == priv->dma_rx_size - 1),
1239                                         priv->dma_buf_sz);
1240 }
1241
1242 /**
1243  * stmmac_clear_tx_descriptors - clear tx descriptors
1244  * @priv: driver private structure
1245  * @queue: TX queue index.
1246  * Description: this function is called to clear the TX descriptors
1247  * in case of both basic and extended descriptors are used.
1248  */
1249 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1250 {
1251         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1252         int i;
1253
1254         /* Clear the TX descriptors */
1255         for (i = 0; i < priv->dma_tx_size; i++) {
1256                 int last = (i == (priv->dma_tx_size - 1));
1257                 struct dma_desc *p;
1258
1259                 if (priv->extend_desc)
1260                         p = &tx_q->dma_etx[i].basic;
1261                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1262                         p = &tx_q->dma_entx[i].basic;
1263                 else
1264                         p = &tx_q->dma_tx[i];
1265
1266                 stmmac_init_tx_desc(priv, p, priv->mode, last);
1267         }
1268 }
1269
1270 /**
1271  * stmmac_clear_descriptors - clear descriptors
1272  * @priv: driver private structure
1273  * Description: this function is called to clear the TX and RX descriptors
1274  * in case of both basic and extended descriptors are used.
1275  */
1276 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1277 {
1278         u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1279         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1280         u32 queue;
1281
1282         /* Clear the RX descriptors */
1283         for (queue = 0; queue < rx_queue_cnt; queue++)
1284                 stmmac_clear_rx_descriptors(priv, queue);
1285
1286         /* Clear the TX descriptors */
1287         for (queue = 0; queue < tx_queue_cnt; queue++)
1288                 stmmac_clear_tx_descriptors(priv, queue);
1289 }
1290
1291 /**
1292  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1293  * @priv: driver private structure
1294  * @p: descriptor pointer
1295  * @i: descriptor index
1296  * @flags: gfp flag
1297  * @queue: RX queue index
1298  * Description: this function is called to allocate a receive buffer, perform
1299  * the DMA mapping and init the descriptor.
1300  */
1301 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1302                                   int i, gfp_t flags, u32 queue)
1303 {
1304         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1305         struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1306
1307         buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
1308         if (!buf->page)
1309                 return -ENOMEM;
1310
1311         if (priv->sph) {
1312                 buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
1313                 if (!buf->sec_page)
1314                         return -ENOMEM;
1315
1316                 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1317                 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1318         } else {
1319                 buf->sec_page = NULL;
1320                 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1321         }
1322
1323         buf->addr = page_pool_get_dma_addr(buf->page);
1324         stmmac_set_desc_addr(priv, p, buf->addr);
1325         if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1326                 stmmac_init_desc3(priv, p);
1327
1328         return 0;
1329 }
1330
1331 /**
1332  * stmmac_free_rx_buffer - free RX dma buffers
1333  * @priv: private structure
1334  * @queue: RX queue index
1335  * @i: buffer index.
1336  */
1337 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1338 {
1339         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1340         struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1341
1342         if (buf->page)
1343                 page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1344         buf->page = NULL;
1345
1346         if (buf->sec_page)
1347                 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1348         buf->sec_page = NULL;
1349 }
1350
1351 /**
1352  * stmmac_free_tx_buffer - free RX dma buffers
1353  * @priv: private structure
1354  * @queue: RX queue index
1355  * @i: buffer index.
1356  */
1357 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1358 {
1359         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1360
1361         if (tx_q->tx_skbuff_dma[i].buf) {
1362                 if (tx_q->tx_skbuff_dma[i].map_as_page)
1363                         dma_unmap_page(priv->device,
1364                                        tx_q->tx_skbuff_dma[i].buf,
1365                                        tx_q->tx_skbuff_dma[i].len,
1366                                        DMA_TO_DEVICE);
1367                 else
1368                         dma_unmap_single(priv->device,
1369                                          tx_q->tx_skbuff_dma[i].buf,
1370                                          tx_q->tx_skbuff_dma[i].len,
1371                                          DMA_TO_DEVICE);
1372         }
1373
1374         if (tx_q->tx_skbuff[i]) {
1375                 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1376                 tx_q->tx_skbuff[i] = NULL;
1377                 tx_q->tx_skbuff_dma[i].buf = 0;
1378                 tx_q->tx_skbuff_dma[i].map_as_page = false;
1379         }
1380 }
1381
1382 /**
1383  * init_dma_rx_desc_rings - init the RX descriptor rings
1384  * @dev: net device structure
1385  * @flags: gfp flag.
1386  * Description: this function initializes the DMA RX descriptors
1387  * and allocates the socket buffers. It supports the chained and ring
1388  * modes.
1389  */
1390 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1391 {
1392         struct stmmac_priv *priv = netdev_priv(dev);
1393         u32 rx_count = priv->plat->rx_queues_to_use;
1394         int ret = -ENOMEM;
1395         int queue;
1396         int i;
1397
1398         /* RX INITIALIZATION */
1399         netif_dbg(priv, probe, priv->dev,
1400                   "SKB addresses:\nskb\t\tskb data\tdma data\n");
1401
1402         for (queue = 0; queue < rx_count; queue++) {
1403                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1404
1405                 netif_dbg(priv, probe, priv->dev,
1406                           "(%s) dma_rx_phy=0x%08x\n", __func__,
1407                           (u32)rx_q->dma_rx_phy);
1408
1409                 stmmac_clear_rx_descriptors(priv, queue);
1410
1411                 for (i = 0; i < priv->dma_rx_size; i++) {
1412                         struct dma_desc *p;
1413
1414                         if (priv->extend_desc)
1415                                 p = &((rx_q->dma_erx + i)->basic);
1416                         else
1417                                 p = rx_q->dma_rx + i;
1418
1419                         ret = stmmac_init_rx_buffers(priv, p, i, flags,
1420                                                      queue);
1421                         if (ret)
1422                                 goto err_init_rx_buffers;
1423                 }
1424
1425                 rx_q->cur_rx = 0;
1426                 rx_q->dirty_rx = (unsigned int)(i - priv->dma_rx_size);
1427
1428                 /* Setup the chained descriptor addresses */
1429                 if (priv->mode == STMMAC_CHAIN_MODE) {
1430                         if (priv->extend_desc)
1431                                 stmmac_mode_init(priv, rx_q->dma_erx,
1432                                                  rx_q->dma_rx_phy,
1433                                                  priv->dma_rx_size, 1);
1434                         else
1435                                 stmmac_mode_init(priv, rx_q->dma_rx,
1436                                                  rx_q->dma_rx_phy,
1437                                                  priv->dma_rx_size, 0);
1438                 }
1439         }
1440
1441         return 0;
1442
1443 err_init_rx_buffers:
1444         while (queue >= 0) {
1445                 while (--i >= 0)
1446                         stmmac_free_rx_buffer(priv, queue, i);
1447
1448                 if (queue == 0)
1449                         break;
1450
1451                 i = priv->dma_rx_size;
1452                 queue--;
1453         }
1454
1455         return ret;
1456 }
1457
1458 /**
1459  * init_dma_tx_desc_rings - init the TX descriptor rings
1460  * @dev: net device structure.
1461  * Description: this function initializes the DMA TX descriptors
1462  * and allocates the socket buffers. It supports the chained and ring
1463  * modes.
1464  */
1465 static int init_dma_tx_desc_rings(struct net_device *dev)
1466 {
1467         struct stmmac_priv *priv = netdev_priv(dev);
1468         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1469         u32 queue;
1470         int i;
1471
1472         for (queue = 0; queue < tx_queue_cnt; queue++) {
1473                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1474
1475                 netif_dbg(priv, probe, priv->dev,
1476                           "(%s) dma_tx_phy=0x%08x\n", __func__,
1477                          (u32)tx_q->dma_tx_phy);
1478
1479                 /* Setup the chained descriptor addresses */
1480                 if (priv->mode == STMMAC_CHAIN_MODE) {
1481                         if (priv->extend_desc)
1482                                 stmmac_mode_init(priv, tx_q->dma_etx,
1483                                                  tx_q->dma_tx_phy,
1484                                                  priv->dma_tx_size, 1);
1485                         else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1486                                 stmmac_mode_init(priv, tx_q->dma_tx,
1487                                                  tx_q->dma_tx_phy,
1488                                                  priv->dma_tx_size, 0);
1489                 }
1490
1491                 for (i = 0; i < priv->dma_tx_size; i++) {
1492                         struct dma_desc *p;
1493                         if (priv->extend_desc)
1494                                 p = &((tx_q->dma_etx + i)->basic);
1495                         else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1496                                 p = &((tx_q->dma_entx + i)->basic);
1497                         else
1498                                 p = tx_q->dma_tx + i;
1499
1500                         stmmac_clear_desc(priv, p);
1501
1502                         tx_q->tx_skbuff_dma[i].buf = 0;
1503                         tx_q->tx_skbuff_dma[i].map_as_page = false;
1504                         tx_q->tx_skbuff_dma[i].len = 0;
1505                         tx_q->tx_skbuff_dma[i].last_segment = false;
1506                         tx_q->tx_skbuff[i] = NULL;
1507                 }
1508
1509                 tx_q->dirty_tx = 0;
1510                 tx_q->cur_tx = 0;
1511                 tx_q->mss = 0;
1512
1513                 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1514         }
1515
1516         return 0;
1517 }
1518
1519 /**
1520  * init_dma_desc_rings - init the RX/TX descriptor rings
1521  * @dev: net device structure
1522  * @flags: gfp flag.
1523  * Description: this function initializes the DMA RX/TX descriptors
1524  * and allocates the socket buffers. It supports the chained and ring
1525  * modes.
1526  */
1527 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1528 {
1529         struct stmmac_priv *priv = netdev_priv(dev);
1530         int ret;
1531
1532         ret = init_dma_rx_desc_rings(dev, flags);
1533         if (ret)
1534                 return ret;
1535
1536         ret = init_dma_tx_desc_rings(dev);
1537
1538         stmmac_clear_descriptors(priv);
1539
1540         if (netif_msg_hw(priv))
1541                 stmmac_display_rings(priv);
1542
1543         return ret;
1544 }
1545
1546 /**
1547  * dma_free_rx_skbufs - free RX dma buffers
1548  * @priv: private structure
1549  * @queue: RX queue index
1550  */
1551 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1552 {
1553         int i;
1554
1555         for (i = 0; i < priv->dma_rx_size; i++)
1556                 stmmac_free_rx_buffer(priv, queue, i);
1557 }
1558
1559 /**
1560  * dma_free_tx_skbufs - free TX dma buffers
1561  * @priv: private structure
1562  * @queue: TX queue index
1563  */
1564 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1565 {
1566         int i;
1567
1568         for (i = 0; i < priv->dma_tx_size; i++)
1569                 stmmac_free_tx_buffer(priv, queue, i);
1570 }
1571
1572 /**
1573  * stmmac_free_tx_skbufs - free TX skb buffers
1574  * @priv: private structure
1575  */
1576 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1577 {
1578         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1579         u32 queue;
1580
1581         for (queue = 0; queue < tx_queue_cnt; queue++)
1582                 dma_free_tx_skbufs(priv, queue);
1583 }
1584
1585 /**
1586  * free_dma_rx_desc_resources - free RX dma desc resources
1587  * @priv: private structure
1588  */
1589 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1590 {
1591         u32 rx_count = priv->plat->rx_queues_to_use;
1592         u32 queue;
1593
1594         /* Free RX queue resources */
1595         for (queue = 0; queue < rx_count; queue++) {
1596                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1597
1598                 /* Release the DMA RX socket buffers */
1599                 dma_free_rx_skbufs(priv, queue);
1600
1601                 /* Free DMA regions of consistent memory previously allocated */
1602                 if (!priv->extend_desc)
1603                         dma_free_coherent(priv->device, priv->dma_rx_size *
1604                                           sizeof(struct dma_desc),
1605                                           rx_q->dma_rx, rx_q->dma_rx_phy);
1606                 else
1607                         dma_free_coherent(priv->device, priv->dma_rx_size *
1608                                           sizeof(struct dma_extended_desc),
1609                                           rx_q->dma_erx, rx_q->dma_rx_phy);
1610
1611                 kfree(rx_q->buf_pool);
1612                 if (rx_q->page_pool)
1613                         page_pool_destroy(rx_q->page_pool);
1614         }
1615 }
1616
1617 /**
1618  * free_dma_tx_desc_resources - free TX dma desc resources
1619  * @priv: private structure
1620  */
1621 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1622 {
1623         u32 tx_count = priv->plat->tx_queues_to_use;
1624         u32 queue;
1625
1626         /* Free TX queue resources */
1627         for (queue = 0; queue < tx_count; queue++) {
1628                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1629                 size_t size;
1630                 void *addr;
1631
1632                 /* Release the DMA TX socket buffers */
1633                 dma_free_tx_skbufs(priv, queue);
1634
1635                 if (priv->extend_desc) {
1636                         size = sizeof(struct dma_extended_desc);
1637                         addr = tx_q->dma_etx;
1638                 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1639                         size = sizeof(struct dma_edesc);
1640                         addr = tx_q->dma_entx;
1641                 } else {
1642                         size = sizeof(struct dma_desc);
1643                         addr = tx_q->dma_tx;
1644                 }
1645
1646                 size *= priv->dma_tx_size;
1647
1648                 dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1649
1650                 kfree(tx_q->tx_skbuff_dma);
1651                 kfree(tx_q->tx_skbuff);
1652         }
1653 }
1654
1655 /**
1656  * alloc_dma_rx_desc_resources - alloc RX resources.
1657  * @priv: private structure
1658  * Description: according to which descriptor can be used (extend or basic)
1659  * this function allocates the resources for TX and RX paths. In case of
1660  * reception, for example, it pre-allocated the RX socket buffer in order to
1661  * allow zero-copy mechanism.
1662  */
1663 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1664 {
1665         u32 rx_count = priv->plat->rx_queues_to_use;
1666         int ret = -ENOMEM;
1667         u32 queue;
1668
1669         /* RX queues buffers and DMA */
1670         for (queue = 0; queue < rx_count; queue++) {
1671                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1672                 struct page_pool_params pp_params = { 0 };
1673                 unsigned int num_pages;
1674
1675                 rx_q->queue_index = queue;
1676                 rx_q->priv_data = priv;
1677
1678                 pp_params.flags = PP_FLAG_DMA_MAP;
1679                 pp_params.pool_size = priv->dma_rx_size;
1680                 num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
1681                 pp_params.order = ilog2(num_pages);
1682                 pp_params.nid = dev_to_node(priv->device);
1683                 pp_params.dev = priv->device;
1684                 pp_params.dma_dir = DMA_FROM_DEVICE;
1685
1686                 rx_q->page_pool = page_pool_create(&pp_params);
1687                 if (IS_ERR(rx_q->page_pool)) {
1688                         ret = PTR_ERR(rx_q->page_pool);
1689                         rx_q->page_pool = NULL;
1690                         goto err_dma;
1691                 }
1692
1693                 rx_q->buf_pool = kcalloc(priv->dma_rx_size,
1694                                          sizeof(*rx_q->buf_pool),
1695                                          GFP_KERNEL);
1696                 if (!rx_q->buf_pool)
1697                         goto err_dma;
1698
1699                 if (priv->extend_desc) {
1700                         rx_q->dma_erx = dma_alloc_coherent(priv->device,
1701                                                            priv->dma_rx_size *
1702                                                            sizeof(struct dma_extended_desc),
1703                                                            &rx_q->dma_rx_phy,
1704                                                            GFP_KERNEL);
1705                         if (!rx_q->dma_erx)
1706                                 goto err_dma;
1707
1708                 } else {
1709                         rx_q->dma_rx = dma_alloc_coherent(priv->device,
1710                                                           priv->dma_rx_size *
1711                                                           sizeof(struct dma_desc),
1712                                                           &rx_q->dma_rx_phy,
1713                                                           GFP_KERNEL);
1714                         if (!rx_q->dma_rx)
1715                                 goto err_dma;
1716                 }
1717         }
1718
1719         return 0;
1720
1721 err_dma:
1722         free_dma_rx_desc_resources(priv);
1723
1724         return ret;
1725 }
1726
1727 /**
1728  * alloc_dma_tx_desc_resources - alloc TX resources.
1729  * @priv: private structure
1730  * Description: according to which descriptor can be used (extend or basic)
1731  * this function allocates the resources for TX and RX paths. In case of
1732  * reception, for example, it pre-allocated the RX socket buffer in order to
1733  * allow zero-copy mechanism.
1734  */
1735 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1736 {
1737         u32 tx_count = priv->plat->tx_queues_to_use;
1738         int ret = -ENOMEM;
1739         u32 queue;
1740
1741         /* TX queues buffers and DMA */
1742         for (queue = 0; queue < tx_count; queue++) {
1743                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1744                 size_t size;
1745                 void *addr;
1746
1747                 tx_q->queue_index = queue;
1748                 tx_q->priv_data = priv;
1749
1750                 tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size,
1751                                               sizeof(*tx_q->tx_skbuff_dma),
1752                                               GFP_KERNEL);
1753                 if (!tx_q->tx_skbuff_dma)
1754                         goto err_dma;
1755
1756                 tx_q->tx_skbuff = kcalloc(priv->dma_tx_size,
1757                                           sizeof(struct sk_buff *),
1758                                           GFP_KERNEL);
1759                 if (!tx_q->tx_skbuff)
1760                         goto err_dma;
1761
1762                 if (priv->extend_desc)
1763                         size = sizeof(struct dma_extended_desc);
1764                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1765                         size = sizeof(struct dma_edesc);
1766                 else
1767                         size = sizeof(struct dma_desc);
1768
1769                 size *= priv->dma_tx_size;
1770
1771                 addr = dma_alloc_coherent(priv->device, size,
1772                                           &tx_q->dma_tx_phy, GFP_KERNEL);
1773                 if (!addr)
1774                         goto err_dma;
1775
1776                 if (priv->extend_desc)
1777                         tx_q->dma_etx = addr;
1778                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1779                         tx_q->dma_entx = addr;
1780                 else
1781                         tx_q->dma_tx = addr;
1782         }
1783
1784         return 0;
1785
1786 err_dma:
1787         free_dma_tx_desc_resources(priv);
1788         return ret;
1789 }
1790
1791 /**
1792  * alloc_dma_desc_resources - alloc TX/RX resources.
1793  * @priv: private structure
1794  * Description: according to which descriptor can be used (extend or basic)
1795  * this function allocates the resources for TX and RX paths. In case of
1796  * reception, for example, it pre-allocated the RX socket buffer in order to
1797  * allow zero-copy mechanism.
1798  */
1799 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1800 {
1801         /* RX Allocation */
1802         int ret = alloc_dma_rx_desc_resources(priv);
1803
1804         if (ret)
1805                 return ret;
1806
1807         ret = alloc_dma_tx_desc_resources(priv);
1808
1809         return ret;
1810 }
1811
1812 /**
1813  * free_dma_desc_resources - free dma desc resources
1814  * @priv: private structure
1815  */
1816 static void free_dma_desc_resources(struct stmmac_priv *priv)
1817 {
1818         /* Release the DMA RX socket buffers */
1819         free_dma_rx_desc_resources(priv);
1820
1821         /* Release the DMA TX socket buffers */
1822         free_dma_tx_desc_resources(priv);
1823 }
1824
1825 /**
1826  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1827  *  @priv: driver private structure
1828  *  Description: It is used for enabling the rx queues in the MAC
1829  */
1830 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1831 {
1832         u32 rx_queues_count = priv->plat->rx_queues_to_use;
1833         int queue;
1834         u8 mode;
1835
1836         for (queue = 0; queue < rx_queues_count; queue++) {
1837                 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1838                 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
1839         }
1840 }
1841
1842 /**
1843  * stmmac_start_rx_dma - start RX DMA channel
1844  * @priv: driver private structure
1845  * @chan: RX channel index
1846  * Description:
1847  * This starts a RX DMA channel
1848  */
1849 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1850 {
1851         netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1852         stmmac_start_rx(priv, priv->ioaddr, chan);
1853 }
1854
1855 /**
1856  * stmmac_start_tx_dma - start TX DMA channel
1857  * @priv: driver private structure
1858  * @chan: TX channel index
1859  * Description:
1860  * This starts a TX DMA channel
1861  */
1862 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1863 {
1864         netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1865         stmmac_start_tx(priv, priv->ioaddr, chan);
1866 }
1867
1868 /**
1869  * stmmac_stop_rx_dma - stop RX DMA channel
1870  * @priv: driver private structure
1871  * @chan: RX channel index
1872  * Description:
1873  * This stops a RX DMA channel
1874  */
1875 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1876 {
1877         netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1878         stmmac_stop_rx(priv, priv->ioaddr, chan);
1879 }
1880
1881 /**
1882  * stmmac_stop_tx_dma - stop TX DMA channel
1883  * @priv: driver private structure
1884  * @chan: TX channel index
1885  * Description:
1886  * This stops a TX DMA channel
1887  */
1888 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1889 {
1890         netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1891         stmmac_stop_tx(priv, priv->ioaddr, chan);
1892 }
1893
1894 /**
1895  * stmmac_start_all_dma - start all RX and TX DMA channels
1896  * @priv: driver private structure
1897  * Description:
1898  * This starts all the RX and TX DMA channels
1899  */
1900 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1901 {
1902         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1903         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1904         u32 chan = 0;
1905
1906         for (chan = 0; chan < rx_channels_count; chan++)
1907                 stmmac_start_rx_dma(priv, chan);
1908
1909         for (chan = 0; chan < tx_channels_count; chan++)
1910                 stmmac_start_tx_dma(priv, chan);
1911 }
1912
1913 /**
1914  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1915  * @priv: driver private structure
1916  * Description:
1917  * This stops the RX and TX DMA channels
1918  */
1919 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1920 {
1921         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1922         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1923         u32 chan = 0;
1924
1925         for (chan = 0; chan < rx_channels_count; chan++)
1926                 stmmac_stop_rx_dma(priv, chan);
1927
1928         for (chan = 0; chan < tx_channels_count; chan++)
1929                 stmmac_stop_tx_dma(priv, chan);
1930 }
1931
1932 /**
1933  *  stmmac_dma_operation_mode - HW DMA operation mode
1934  *  @priv: driver private structure
1935  *  Description: it is used for configuring the DMA operation mode register in
1936  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1937  */
1938 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1939 {
1940         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1941         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1942         int rxfifosz = priv->plat->rx_fifo_size;
1943         int txfifosz = priv->plat->tx_fifo_size;
1944         u32 txmode = 0;
1945         u32 rxmode = 0;
1946         u32 chan = 0;
1947         u8 qmode = 0;
1948
1949         if (rxfifosz == 0)
1950                 rxfifosz = priv->dma_cap.rx_fifo_size;
1951         if (txfifosz == 0)
1952                 txfifosz = priv->dma_cap.tx_fifo_size;
1953
1954         /* Adjust for real per queue fifo size */
1955         rxfifosz /= rx_channels_count;
1956         txfifosz /= tx_channels_count;
1957
1958         if (priv->plat->force_thresh_dma_mode) {
1959                 txmode = tc;
1960                 rxmode = tc;
1961         } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1962                 /*
1963                  * In case of GMAC, SF mode can be enabled
1964                  * to perform the TX COE in HW. This depends on:
1965                  * 1) TX COE if actually supported
1966                  * 2) There is no bugged Jumbo frame support
1967                  *    that needs to not insert csum in the TDES.
1968                  */
1969                 txmode = SF_DMA_MODE;
1970                 rxmode = SF_DMA_MODE;
1971                 priv->xstats.threshold = SF_DMA_MODE;
1972         } else {
1973                 txmode = tc;
1974                 rxmode = SF_DMA_MODE;
1975         }
1976
1977         /* configure all channels */
1978         for (chan = 0; chan < rx_channels_count; chan++) {
1979                 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1980
1981                 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
1982                                 rxfifosz, qmode);
1983                 stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
1984                                 chan);
1985         }
1986
1987         for (chan = 0; chan < tx_channels_count; chan++) {
1988                 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1989
1990                 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
1991                                 txfifosz, qmode);
1992         }
1993 }
1994
1995 /**
1996  * stmmac_tx_clean - to manage the transmission completion
1997  * @priv: driver private structure
1998  * @budget: napi budget limiting this functions packet handling
1999  * @queue: TX queue index
2000  * Description: it reclaims the transmit resources after transmission completes.
2001  */
2002 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
2003 {
2004         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2005         unsigned int bytes_compl = 0, pkts_compl = 0;
2006         unsigned int entry, count = 0;
2007
2008         __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2009
2010         priv->xstats.tx_clean++;
2011
2012         entry = tx_q->dirty_tx;
2013         while ((entry != tx_q->cur_tx) && (count < budget)) {
2014                 struct sk_buff *skb = tx_q->tx_skbuff[entry];
2015                 struct dma_desc *p;
2016                 int status;
2017
2018                 if (priv->extend_desc)
2019                         p = (struct dma_desc *)(tx_q->dma_etx + entry);
2020                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2021                         p = &tx_q->dma_entx[entry].basic;
2022                 else
2023                         p = tx_q->dma_tx + entry;
2024
2025                 status = stmmac_tx_status(priv, &priv->dev->stats,
2026                                 &priv->xstats, p, priv->ioaddr);
2027                 /* Check if the descriptor is owned by the DMA */
2028                 if (unlikely(status & tx_dma_own))
2029                         break;
2030
2031                 count++;
2032
2033                 /* Make sure descriptor fields are read after reading
2034                  * the own bit.
2035                  */
2036                 dma_rmb();
2037
2038                 /* Just consider the last segment and ...*/
2039                 if (likely(!(status & tx_not_ls))) {
2040                         /* ... verify the status error condition */
2041                         if (unlikely(status & tx_err)) {
2042                                 priv->dev->stats.tx_errors++;
2043                         } else {
2044                                 priv->dev->stats.tx_packets++;
2045                                 priv->xstats.tx_pkt_n++;
2046                         }
2047                         stmmac_get_tx_hwtstamp(priv, p, skb);
2048                 }
2049
2050                 if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
2051                         if (tx_q->tx_skbuff_dma[entry].map_as_page)
2052                                 dma_unmap_page(priv->device,
2053                                                tx_q->tx_skbuff_dma[entry].buf,
2054                                                tx_q->tx_skbuff_dma[entry].len,
2055                                                DMA_TO_DEVICE);
2056                         else
2057                                 dma_unmap_single(priv->device,
2058                                                  tx_q->tx_skbuff_dma[entry].buf,
2059                                                  tx_q->tx_skbuff_dma[entry].len,
2060                                                  DMA_TO_DEVICE);
2061                         tx_q->tx_skbuff_dma[entry].buf = 0;
2062                         tx_q->tx_skbuff_dma[entry].len = 0;
2063                         tx_q->tx_skbuff_dma[entry].map_as_page = false;
2064                 }
2065
2066                 stmmac_clean_desc3(priv, tx_q, p);
2067
2068                 tx_q->tx_skbuff_dma[entry].last_segment = false;
2069                 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2070
2071                 if (likely(skb != NULL)) {
2072                         pkts_compl++;
2073                         bytes_compl += skb->len;
2074                         dev_consume_skb_any(skb);
2075                         tx_q->tx_skbuff[entry] = NULL;
2076                 }
2077
2078                 stmmac_release_tx_desc(priv, p, priv->mode);
2079
2080                 entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
2081         }
2082         tx_q->dirty_tx = entry;
2083
2084         netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2085                                   pkts_compl, bytes_compl);
2086
2087         if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2088                                                                 queue))) &&
2089             stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2090
2091                 netif_dbg(priv, tx_done, priv->dev,
2092                           "%s: restart transmit\n", __func__);
2093                 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2094         }
2095
2096         if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2097             priv->eee_sw_timer_en) {
2098                 stmmac_enable_eee_mode(priv);
2099                 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2100         }
2101
2102         /* We still have pending packets, let's call for a new scheduling */
2103         if (tx_q->dirty_tx != tx_q->cur_tx)
2104                 hrtimer_start(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer),
2105                               HRTIMER_MODE_REL);
2106
2107         __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2108
2109         return count;
2110 }
2111
2112 /**
2113  * stmmac_tx_err - to manage the tx error
2114  * @priv: driver private structure
2115  * @chan: channel index
2116  * Description: it cleans the descriptors and restarts the transmission
2117  * in case of transmission errors.
2118  */
2119 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2120 {
2121         struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2122
2123         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2124
2125         stmmac_stop_tx_dma(priv, chan);
2126         dma_free_tx_skbufs(priv, chan);
2127         stmmac_clear_tx_descriptors(priv, chan);
2128         tx_q->dirty_tx = 0;
2129         tx_q->cur_tx = 0;
2130         tx_q->mss = 0;
2131         netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
2132         stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2133                             tx_q->dma_tx_phy, chan);
2134         stmmac_start_tx_dma(priv, chan);
2135
2136         priv->dev->stats.tx_errors++;
2137         netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2138 }
2139
2140 /**
2141  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2142  *  @priv: driver private structure
2143  *  @txmode: TX operating mode
2144  *  @rxmode: RX operating mode
2145  *  @chan: channel index
2146  *  Description: it is used for configuring of the DMA operation mode in
2147  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2148  *  mode.
2149  */
2150 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2151                                           u32 rxmode, u32 chan)
2152 {
2153         u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2154         u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2155         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2156         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2157         int rxfifosz = priv->plat->rx_fifo_size;
2158         int txfifosz = priv->plat->tx_fifo_size;
2159
2160         if (rxfifosz == 0)
2161                 rxfifosz = priv->dma_cap.rx_fifo_size;
2162         if (txfifosz == 0)
2163                 txfifosz = priv->dma_cap.tx_fifo_size;
2164
2165         /* Adjust for real per queue fifo size */
2166         rxfifosz /= rx_channels_count;
2167         txfifosz /= tx_channels_count;
2168
2169         stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2170         stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2171 }
2172
2173 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2174 {
2175         int ret;
2176
2177         ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2178                         priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2179         if (ret && (ret != -EINVAL)) {
2180                 stmmac_global_err(priv);
2181                 return true;
2182         }
2183
2184         return false;
2185 }
2186
2187 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
2188 {
2189         int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2190                                                  &priv->xstats, chan);
2191         struct stmmac_channel *ch = &priv->channel[chan];
2192         unsigned long flags;
2193
2194         if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2195                 if (napi_schedule_prep(&ch->rx_napi)) {
2196                         spin_lock_irqsave(&ch->lock, flags);
2197                         stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2198                         spin_unlock_irqrestore(&ch->lock, flags);
2199                         __napi_schedule(&ch->rx_napi);
2200                 }
2201         }
2202
2203         if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2204                 if (napi_schedule_prep(&ch->tx_napi)) {
2205                         spin_lock_irqsave(&ch->lock, flags);
2206                         stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2207                         spin_unlock_irqrestore(&ch->lock, flags);
2208                         __napi_schedule(&ch->tx_napi);
2209                 }
2210         }
2211
2212         return status;
2213 }
2214
2215 /**
2216  * stmmac_dma_interrupt - DMA ISR
2217  * @priv: driver private structure
2218  * Description: this is the DMA ISR. It is called by the main ISR.
2219  * It calls the dwmac dma routine and schedule poll method in case of some
2220  * work can be done.
2221  */
2222 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2223 {
2224         u32 tx_channel_count = priv->plat->tx_queues_to_use;
2225         u32 rx_channel_count = priv->plat->rx_queues_to_use;
2226         u32 channels_to_check = tx_channel_count > rx_channel_count ?
2227                                 tx_channel_count : rx_channel_count;
2228         u32 chan;
2229         int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2230
2231         /* Make sure we never check beyond our status buffer. */
2232         if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2233                 channels_to_check = ARRAY_SIZE(status);
2234
2235         for (chan = 0; chan < channels_to_check; chan++)
2236                 status[chan] = stmmac_napi_check(priv, chan);
2237
2238         for (chan = 0; chan < tx_channel_count; chan++) {
2239                 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2240                         /* Try to bump up the dma threshold on this failure */
2241                         if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2242                             (tc <= 256)) {
2243                                 tc += 64;
2244                                 if (priv->plat->force_thresh_dma_mode)
2245                                         stmmac_set_dma_operation_mode(priv,
2246                                                                       tc,
2247                                                                       tc,
2248                                                                       chan);
2249                                 else
2250                                         stmmac_set_dma_operation_mode(priv,
2251                                                                     tc,
2252                                                                     SF_DMA_MODE,
2253                                                                     chan);
2254                                 priv->xstats.threshold = tc;
2255                         }
2256                 } else if (unlikely(status[chan] == tx_hard_error)) {
2257                         stmmac_tx_err(priv, chan);
2258                 }
2259         }
2260 }
2261
2262 /**
2263  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2264  * @priv: driver private structure
2265  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2266  */
2267 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2268 {
2269         unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2270                             MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2271
2272         stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2273
2274         if (priv->dma_cap.rmon) {
2275                 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2276                 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2277         } else
2278                 netdev_info(priv->dev, "No MAC Management Counters available\n");
2279 }
2280
2281 /**
2282  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2283  * @priv: driver private structure
2284  * Description:
2285  *  new GMAC chip generations have a new register to indicate the
2286  *  presence of the optional feature/functions.
2287  *  This can be also used to override the value passed through the
2288  *  platform and necessary for old MAC10/100 and GMAC chips.
2289  */
2290 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2291 {
2292         return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2293 }
2294
2295 /**
2296  * stmmac_check_ether_addr - check if the MAC addr is valid
2297  * @priv: driver private structure
2298  * Description:
2299  * it is to verify if the MAC address is valid, in case of failures it
2300  * generates a random MAC address
2301  */
2302 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2303 {
2304         if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2305                 stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2306                 if (!is_valid_ether_addr(priv->dev->dev_addr))
2307                         eth_hw_addr_random(priv->dev);
2308                 dev_info(priv->device, "device MAC address %pM\n",
2309                          priv->dev->dev_addr);
2310         }
2311 }
2312
2313 /**
2314  * stmmac_init_dma_engine - DMA init.
2315  * @priv: driver private structure
2316  * Description:
2317  * It inits the DMA invoking the specific MAC/GMAC callback.
2318  * Some DMA parameters can be passed from the platform;
2319  * in case of these are not passed a default is kept for the MAC or GMAC.
2320  */
2321 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2322 {
2323         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2324         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2325         u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2326         struct stmmac_rx_queue *rx_q;
2327         struct stmmac_tx_queue *tx_q;
2328         u32 chan = 0;
2329         int atds = 0;
2330         int ret = 0;
2331
2332         if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2333                 dev_err(priv->device, "Invalid DMA configuration\n");
2334                 return -EINVAL;
2335         }
2336
2337         if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2338                 atds = 1;
2339
2340         ret = stmmac_reset(priv, priv->ioaddr);
2341         if (ret) {
2342                 dev_err(priv->device, "Failed to reset the dma\n");
2343                 return ret;
2344         }
2345
2346         /* DMA Configuration */
2347         stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2348
2349         if (priv->plat->axi)
2350                 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2351
2352         /* DMA CSR Channel configuration */
2353         for (chan = 0; chan < dma_csr_ch; chan++)
2354                 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2355
2356         /* DMA RX Channel Configuration */
2357         for (chan = 0; chan < rx_channels_count; chan++) {
2358                 rx_q = &priv->rx_queue[chan];
2359
2360                 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2361                                     rx_q->dma_rx_phy, chan);
2362
2363                 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2364                                      (priv->dma_rx_size *
2365                                       sizeof(struct dma_desc));
2366                 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2367                                        rx_q->rx_tail_addr, chan);
2368         }
2369
2370         /* DMA TX Channel Configuration */
2371         for (chan = 0; chan < tx_channels_count; chan++) {
2372                 tx_q = &priv->tx_queue[chan];
2373
2374                 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2375                                     tx_q->dma_tx_phy, chan);
2376
2377                 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2378                 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2379                                        tx_q->tx_tail_addr, chan);
2380         }
2381
2382         return ret;
2383 }
2384
2385 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2386 {
2387         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2388
2389         hrtimer_start(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer),
2390                       HRTIMER_MODE_REL);
2391 }
2392
2393 /**
2394  * stmmac_tx_timer - mitigation sw timer for tx.
2395  * @t: data pointer
2396  * Description:
2397  * This is the timer handler to directly invoke the stmmac_tx_clean.
2398  */
2399 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
2400 {
2401         struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
2402         struct stmmac_priv *priv = tx_q->priv_data;
2403         struct stmmac_channel *ch;
2404
2405         ch = &priv->channel[tx_q->queue_index];
2406
2407         if (likely(napi_schedule_prep(&ch->tx_napi))) {
2408                 unsigned long flags;
2409
2410                 spin_lock_irqsave(&ch->lock, flags);
2411                 stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
2412                 spin_unlock_irqrestore(&ch->lock, flags);
2413                 __napi_schedule(&ch->tx_napi);
2414         }
2415
2416         return HRTIMER_NORESTART;
2417 }
2418
2419 /**
2420  * stmmac_init_coalesce - init mitigation options.
2421  * @priv: driver private structure
2422  * Description:
2423  * This inits the coalesce parameters: i.e. timer rate,
2424  * timer handler and default threshold used for enabling the
2425  * interrupt on completion bit.
2426  */
2427 static void stmmac_init_coalesce(struct stmmac_priv *priv)
2428 {
2429         u32 tx_channel_count = priv->plat->tx_queues_to_use;
2430         u32 chan;
2431
2432         priv->tx_coal_frames = STMMAC_TX_FRAMES;
2433         priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2434         priv->rx_coal_frames = STMMAC_RX_FRAMES;
2435
2436         for (chan = 0; chan < tx_channel_count; chan++) {
2437                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2438
2439                 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2440                 tx_q->txtimer.function = stmmac_tx_timer;
2441         }
2442 }
2443
2444 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2445 {
2446         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2447         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2448         u32 chan;
2449
2450         /* set TX ring length */
2451         for (chan = 0; chan < tx_channels_count; chan++)
2452                 stmmac_set_tx_ring_len(priv, priv->ioaddr,
2453                                        (priv->dma_tx_size - 1), chan);
2454
2455         /* set RX ring length */
2456         for (chan = 0; chan < rx_channels_count; chan++)
2457                 stmmac_set_rx_ring_len(priv, priv->ioaddr,
2458                                        (priv->dma_rx_size - 1), chan);
2459 }
2460
2461 /**
2462  *  stmmac_set_tx_queue_weight - Set TX queue weight
2463  *  @priv: driver private structure
2464  *  Description: It is used for setting TX queues weight
2465  */
2466 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2467 {
2468         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2469         u32 weight;
2470         u32 queue;
2471
2472         for (queue = 0; queue < tx_queues_count; queue++) {
2473                 weight = priv->plat->tx_queues_cfg[queue].weight;
2474                 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
2475         }
2476 }
2477
2478 /**
2479  *  stmmac_configure_cbs - Configure CBS in TX queue
2480  *  @priv: driver private structure
2481  *  Description: It is used for configuring CBS in AVB TX queues
2482  */
2483 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2484 {
2485         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2486         u32 mode_to_use;
2487         u32 queue;
2488
2489         /* queue 0 is reserved for legacy traffic */
2490         for (queue = 1; queue < tx_queues_count; queue++) {
2491                 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2492                 if (mode_to_use == MTL_QUEUE_DCB)
2493                         continue;
2494
2495                 stmmac_config_cbs(priv, priv->hw,
2496                                 priv->plat->tx_queues_cfg[queue].send_slope,
2497                                 priv->plat->tx_queues_cfg[queue].idle_slope,
2498                                 priv->plat->tx_queues_cfg[queue].high_credit,
2499                                 priv->plat->tx_queues_cfg[queue].low_credit,
2500                                 queue);
2501         }
2502 }
2503
2504 /**
2505  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2506  *  @priv: driver private structure
2507  *  Description: It is used for mapping RX queues to RX dma channels
2508  */
2509 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2510 {
2511         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2512         u32 queue;
2513         u32 chan;
2514
2515         for (queue = 0; queue < rx_queues_count; queue++) {
2516                 chan = priv->plat->rx_queues_cfg[queue].chan;
2517                 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2518         }
2519 }
2520
2521 /**
2522  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2523  *  @priv: driver private structure
2524  *  Description: It is used for configuring the RX Queue Priority
2525  */
2526 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2527 {
2528         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2529         u32 queue;
2530         u32 prio;
2531
2532         for (queue = 0; queue < rx_queues_count; queue++) {
2533                 if (!priv->plat->rx_queues_cfg[queue].use_prio)
2534                         continue;
2535
2536                 prio = priv->plat->rx_queues_cfg[queue].prio;
2537                 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2538         }
2539 }
2540
2541 /**
2542  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2543  *  @priv: driver private structure
2544  *  Description: It is used for configuring the TX Queue Priority
2545  */
2546 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2547 {
2548         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2549         u32 queue;
2550         u32 prio;
2551
2552         for (queue = 0; queue < tx_queues_count; queue++) {
2553                 if (!priv->plat->tx_queues_cfg[queue].use_prio)
2554                         continue;
2555
2556                 prio = priv->plat->tx_queues_cfg[queue].prio;
2557                 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2558         }
2559 }
2560
2561 /**
2562  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2563  *  @priv: driver private structure
2564  *  Description: It is used for configuring the RX queue routing
2565  */
2566 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2567 {
2568         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2569         u32 queue;
2570         u8 packet;
2571
2572         for (queue = 0; queue < rx_queues_count; queue++) {
2573                 /* no specific packet type routing specified for the queue */
2574                 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2575                         continue;
2576
2577                 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2578                 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2579         }
2580 }
2581
2582 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
2583 {
2584         if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
2585                 priv->rss.enable = false;
2586                 return;
2587         }
2588
2589         if (priv->dev->features & NETIF_F_RXHASH)
2590                 priv->rss.enable = true;
2591         else
2592                 priv->rss.enable = false;
2593
2594         stmmac_rss_configure(priv, priv->hw, &priv->rss,
2595                              priv->plat->rx_queues_to_use);
2596 }
2597
2598 /**
2599  *  stmmac_mtl_configuration - Configure MTL
2600  *  @priv: driver private structure
2601  *  Description: It is used for configurring MTL
2602  */
2603 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2604 {
2605         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2606         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2607
2608         if (tx_queues_count > 1)
2609                 stmmac_set_tx_queue_weight(priv);
2610
2611         /* Configure MTL RX algorithms */
2612         if (rx_queues_count > 1)
2613                 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2614                                 priv->plat->rx_sched_algorithm);
2615
2616         /* Configure MTL TX algorithms */
2617         if (tx_queues_count > 1)
2618                 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2619                                 priv->plat->tx_sched_algorithm);
2620
2621         /* Configure CBS in AVB TX queues */
2622         if (tx_queues_count > 1)
2623                 stmmac_configure_cbs(priv);
2624
2625         /* Map RX MTL to DMA channels */
2626         stmmac_rx_queue_dma_chan_map(priv);
2627
2628         /* Enable MAC RX Queues */
2629         stmmac_mac_enable_rx_queues(priv);
2630
2631         /* Set RX priorities */
2632         if (rx_queues_count > 1)
2633                 stmmac_mac_config_rx_queues_prio(priv);
2634
2635         /* Set TX priorities */
2636         if (tx_queues_count > 1)
2637                 stmmac_mac_config_tx_queues_prio(priv);
2638
2639         /* Set RX routing */
2640         if (rx_queues_count > 1)
2641                 stmmac_mac_config_rx_queues_routing(priv);
2642
2643         /* Receive Side Scaling */
2644         if (rx_queues_count > 1)
2645                 stmmac_mac_config_rss(priv);
2646 }
2647
2648 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
2649 {
2650         if (priv->dma_cap.asp) {
2651                 netdev_info(priv->dev, "Enabling Safety Features\n");
2652                 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
2653         } else {
2654                 netdev_info(priv->dev, "No Safety Features support found\n");
2655         }
2656 }
2657
2658 /**
2659  * stmmac_hw_setup - setup mac in a usable state.
2660  *  @dev : pointer to the device structure.
2661  *  @init_ptp: initialize PTP if set
2662  *  Description:
2663  *  this is the main function to setup the HW in a usable state because the
2664  *  dma engine is reset, the core registers are configured (e.g. AXI,
2665  *  Checksum features, timers). The DMA is ready to start receiving and
2666  *  transmitting.
2667  *  Return value:
2668  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2669  *  file on failure.
2670  */
2671 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2672 {
2673         struct stmmac_priv *priv = netdev_priv(dev);
2674         u32 rx_cnt = priv->plat->rx_queues_to_use;
2675         u32 tx_cnt = priv->plat->tx_queues_to_use;
2676         u32 chan;
2677         int ret;
2678
2679         /* DMA initialization and SW reset */
2680         ret = stmmac_init_dma_engine(priv);
2681         if (ret < 0) {
2682                 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2683                            __func__);
2684                 return ret;
2685         }
2686
2687         /* Copy the MAC addr into the HW  */
2688         stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2689
2690         /* PS and related bits will be programmed according to the speed */
2691         if (priv->hw->pcs) {
2692                 int speed = priv->plat->mac_port_sel_speed;
2693
2694                 if ((speed == SPEED_10) || (speed == SPEED_100) ||
2695                     (speed == SPEED_1000)) {
2696                         priv->hw->ps = speed;
2697                 } else {
2698                         dev_warn(priv->device, "invalid port speed\n");
2699                         priv->hw->ps = 0;
2700                 }
2701         }
2702
2703         /* Initialize the MAC Core */
2704         stmmac_core_init(priv, priv->hw, dev);
2705
2706         /* Initialize MTL*/
2707         stmmac_mtl_configuration(priv);
2708
2709         /* Initialize Safety Features */
2710         stmmac_safety_feat_configuration(priv);
2711
2712         ret = stmmac_rx_ipc(priv, priv->hw);
2713         if (!ret) {
2714                 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2715                 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2716                 priv->hw->rx_csum = 0;
2717         }
2718
2719         /* Enable the MAC Rx/Tx */
2720         stmmac_mac_set(priv, priv->ioaddr, true);
2721
2722         /* Set the HW DMA mode and the COE */
2723         stmmac_dma_operation_mode(priv);
2724
2725         stmmac_mmc_setup(priv);
2726
2727         if (init_ptp) {
2728                 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2729                 if (ret < 0)
2730                         netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2731
2732                 ret = stmmac_init_ptp(priv);
2733                 if (ret == -EOPNOTSUPP)
2734                         netdev_warn(priv->dev, "PTP not supported by HW\n");
2735                 else if (ret)
2736                         netdev_warn(priv->dev, "PTP init failed\n");
2737         }
2738
2739         priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
2740
2741         /* Convert the timer from msec to usec */
2742         if (!priv->tx_lpi_timer)
2743                 priv->tx_lpi_timer = eee_timer * 1000;
2744
2745         if (priv->use_riwt) {
2746                 if (!priv->rx_riwt)
2747                         priv->rx_riwt = DEF_DMA_RIWT;
2748
2749                 ret = stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt, rx_cnt);
2750         }
2751
2752         if (priv->hw->pcs)
2753                 stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
2754
2755         /* set TX and RX rings length */
2756         stmmac_set_rings_length(priv);
2757
2758         /* Enable TSO */
2759         if (priv->tso) {
2760                 for (chan = 0; chan < tx_cnt; chan++)
2761                         stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2762         }
2763
2764         /* Enable Split Header */
2765         if (priv->sph && priv->hw->rx_csum) {
2766                 for (chan = 0; chan < rx_cnt; chan++)
2767                         stmmac_enable_sph(priv, priv->ioaddr, 1, chan);
2768         }
2769
2770         /* VLAN Tag Insertion */
2771         if (priv->dma_cap.vlins)
2772                 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
2773
2774         /* TBS */
2775         for (chan = 0; chan < tx_cnt; chan++) {
2776                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2777                 int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
2778
2779                 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
2780         }
2781
2782         /* Configure real RX and TX queues */
2783         netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
2784         netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
2785
2786         /* Start the ball rolling... */
2787         stmmac_start_all_dma(priv);
2788
2789         return 0;
2790 }
2791
2792 static void stmmac_hw_teardown(struct net_device *dev)
2793 {
2794         struct stmmac_priv *priv = netdev_priv(dev);
2795
2796         clk_disable_unprepare(priv->plat->clk_ptp_ref);
2797 }
2798
2799 /**
2800  *  stmmac_open - open entry point of the driver
2801  *  @dev : pointer to the device structure.
2802  *  Description:
2803  *  This function is the open entry point of the driver.
2804  *  Return value:
2805  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2806  *  file on failure.
2807  */
2808 static int stmmac_open(struct net_device *dev)
2809 {
2810         struct stmmac_priv *priv = netdev_priv(dev);
2811         int bfsize = 0;
2812         u32 chan;
2813         int ret;
2814
2815         if (priv->hw->pcs != STMMAC_PCS_TBI &&
2816             priv->hw->pcs != STMMAC_PCS_RTBI &&
2817             priv->hw->xpcs == NULL) {
2818                 ret = stmmac_init_phy(dev);
2819                 if (ret) {
2820                         netdev_err(priv->dev,
2821                                    "%s: Cannot attach to PHY (error: %d)\n",
2822                                    __func__, ret);
2823                         return ret;
2824                 }
2825         }
2826
2827         /* Extra statistics */
2828         memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2829         priv->xstats.threshold = tc;
2830
2831         bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
2832         if (bfsize < 0)
2833                 bfsize = 0;
2834
2835         if (bfsize < BUF_SIZE_16KiB)
2836                 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
2837
2838         priv->dma_buf_sz = bfsize;
2839         buf_sz = bfsize;
2840
2841         priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2842
2843         if (!priv->dma_tx_size)
2844                 priv->dma_tx_size = DMA_DEFAULT_TX_SIZE;
2845         if (!priv->dma_rx_size)
2846                 priv->dma_rx_size = DMA_DEFAULT_RX_SIZE;
2847
2848         /* Earlier check for TBS */
2849         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
2850                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2851                 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
2852
2853                 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
2854                 if (stmmac_enable_tbs(priv, priv->ioaddr, tbs_en, chan))
2855                         tx_q->tbs &= ~STMMAC_TBS_AVAIL;
2856         }
2857
2858         ret = alloc_dma_desc_resources(priv);
2859         if (ret < 0) {
2860                 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2861                            __func__);
2862                 goto dma_desc_error;
2863         }
2864
2865         ret = init_dma_desc_rings(dev, GFP_KERNEL);
2866         if (ret < 0) {
2867                 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2868                            __func__);
2869                 goto init_error;
2870         }
2871
2872         ret = stmmac_hw_setup(dev, true);
2873         if (ret < 0) {
2874                 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2875                 goto init_error;
2876         }
2877
2878         stmmac_init_coalesce(priv);
2879
2880         phylink_start(priv->phylink);
2881         /* We may have called phylink_speed_down before */
2882         phylink_speed_up(priv->phylink);
2883
2884         /* Request the IRQ lines */
2885         ret = request_irq(dev->irq, stmmac_interrupt,
2886                           IRQF_SHARED, dev->name, dev);
2887         if (unlikely(ret < 0)) {
2888                 netdev_err(priv->dev,
2889                            "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2890                            __func__, dev->irq, ret);
2891                 goto irq_error;
2892         }
2893
2894         /* Request the Wake IRQ in case of another line is used for WoL */
2895         if (priv->wol_irq != dev->irq) {
2896                 ret = request_irq(priv->wol_irq, stmmac_interrupt,
2897                                   IRQF_SHARED, dev->name, dev);
2898                 if (unlikely(ret < 0)) {
2899                         netdev_err(priv->dev,
2900                                    "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2901                                    __func__, priv->wol_irq, ret);
2902                         goto wolirq_error;
2903                 }
2904         }
2905
2906         /* Request the IRQ lines */
2907         if (priv->lpi_irq > 0) {
2908                 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2909                                   dev->name, dev);
2910                 if (unlikely(ret < 0)) {
2911                         netdev_err(priv->dev,
2912                                    "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2913                                    __func__, priv->lpi_irq, ret);
2914                         goto lpiirq_error;
2915                 }
2916         }
2917
2918         stmmac_enable_all_queues(priv);
2919         netif_tx_start_all_queues(priv->dev);
2920
2921         return 0;
2922
2923 lpiirq_error:
2924         if (priv->wol_irq != dev->irq)
2925                 free_irq(priv->wol_irq, dev);
2926 wolirq_error:
2927         free_irq(dev->irq, dev);
2928 irq_error:
2929         phylink_stop(priv->phylink);
2930
2931         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
2932                 hrtimer_cancel(&priv->tx_queue[chan].txtimer);
2933
2934         stmmac_hw_teardown(dev);
2935 init_error:
2936         free_dma_desc_resources(priv);
2937 dma_desc_error:
2938         phylink_disconnect_phy(priv->phylink);
2939         return ret;
2940 }
2941
2942 /**
2943  *  stmmac_release - close entry point of the driver
2944  *  @dev : device pointer.
2945  *  Description:
2946  *  This is the stop entry point of the driver.
2947  */
2948 static int stmmac_release(struct net_device *dev)
2949 {
2950         struct stmmac_priv *priv = netdev_priv(dev);
2951         u32 chan;
2952
2953         if (device_may_wakeup(priv->device))
2954                 phylink_speed_down(priv->phylink, false);
2955         /* Stop and disconnect the PHY */
2956         phylink_stop(priv->phylink);
2957         phylink_disconnect_phy(priv->phylink);
2958
2959         stmmac_disable_all_queues(priv);
2960
2961         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
2962                 hrtimer_cancel(&priv->tx_queue[chan].txtimer);
2963
2964         /* Free the IRQ lines */
2965         free_irq(dev->irq, dev);
2966         if (priv->wol_irq != dev->irq)
2967                 free_irq(priv->wol_irq, dev);
2968         if (priv->lpi_irq > 0)
2969                 free_irq(priv->lpi_irq, dev);
2970
2971         if (priv->eee_enabled) {
2972                 priv->tx_path_in_lpi_mode = false;
2973                 del_timer_sync(&priv->eee_ctrl_timer);
2974         }
2975
2976         /* Stop TX/RX DMA and clear the descriptors */
2977         stmmac_stop_all_dma(priv);
2978
2979         /* Release and free the Rx/Tx resources */
2980         free_dma_desc_resources(priv);
2981
2982         /* Disable the MAC Rx/Tx */
2983         stmmac_mac_set(priv, priv->ioaddr, false);
2984
2985         netif_carrier_off(dev);
2986
2987         stmmac_release_ptp(priv);
2988
2989         return 0;
2990 }
2991
2992 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
2993                                struct stmmac_tx_queue *tx_q)
2994 {
2995         u16 tag = 0x0, inner_tag = 0x0;
2996         u32 inner_type = 0x0;
2997         struct dma_desc *p;
2998
2999         if (!priv->dma_cap.vlins)
3000                 return false;
3001         if (!skb_vlan_tag_present(skb))
3002                 return false;
3003         if (skb->vlan_proto == htons(ETH_P_8021AD)) {
3004                 inner_tag = skb_vlan_tag_get(skb);
3005                 inner_type = STMMAC_VLAN_INSERT;
3006         }
3007
3008         tag = skb_vlan_tag_get(skb);
3009
3010         if (tx_q->tbs & STMMAC_TBS_AVAIL)
3011                 p = &tx_q->dma_entx[tx_q->cur_tx].basic;
3012         else
3013                 p = &tx_q->dma_tx[tx_q->cur_tx];
3014
3015         if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
3016                 return false;
3017
3018         stmmac_set_tx_owner(priv, p);
3019         tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
3020         return true;
3021 }
3022
3023 /**
3024  *  stmmac_tso_allocator - close entry point of the driver
3025  *  @priv: driver private structure
3026  *  @des: buffer start address
3027  *  @total_len: total length to fill in descriptors
3028  *  @last_segment: condition for the last descriptor
3029  *  @queue: TX queue index
3030  *  Description:
3031  *  This function fills descriptor and request new descriptors according to
3032  *  buffer length to fill
3033  */
3034 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
3035                                  int total_len, bool last_segment, u32 queue)
3036 {
3037         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3038         struct dma_desc *desc;
3039         u32 buff_size;
3040         int tmp_len;
3041
3042         tmp_len = total_len;
3043
3044         while (tmp_len > 0) {
3045                 dma_addr_t curr_addr;
3046
3047                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
3048                                                 priv->dma_tx_size);
3049                 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
3050
3051                 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3052                         desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3053                 else
3054                         desc = &tx_q->dma_tx[tx_q->cur_tx];
3055
3056                 curr_addr = des + (total_len - tmp_len);
3057                 if (priv->dma_cap.addr64 <= 32)
3058                         desc->des0 = cpu_to_le32(curr_addr);
3059                 else
3060                         stmmac_set_desc_addr(priv, desc, curr_addr);
3061
3062                 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
3063                             TSO_MAX_BUFF_SIZE : tmp_len;
3064
3065                 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
3066                                 0, 1,
3067                                 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
3068                                 0, 0);
3069
3070                 tmp_len -= TSO_MAX_BUFF_SIZE;
3071         }
3072 }
3073
3074 /**
3075  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
3076  *  @skb : the socket buffer
3077  *  @dev : device pointer
3078  *  Description: this is the transmit function that is called on TSO frames
3079  *  (support available on GMAC4 and newer chips).
3080  *  Diagram below show the ring programming in case of TSO frames:
3081  *
3082  *  First Descriptor
3083  *   --------
3084  *   | DES0 |---> buffer1 = L2/L3/L4 header
3085  *   | DES1 |---> TCP Payload (can continue on next descr...)
3086  *   | DES2 |---> buffer 1 and 2 len
3087  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
3088  *   --------
3089  *      |
3090  *     ...
3091  *      |
3092  *   --------
3093  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
3094  *   | DES1 | --|
3095  *   | DES2 | --> buffer 1 and 2 len
3096  *   | DES3 |
3097  *   --------
3098  *
3099  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
3100  */
3101 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
3102 {
3103         struct dma_desc *desc, *first, *mss_desc = NULL;
3104         struct stmmac_priv *priv = netdev_priv(dev);
3105         int desc_size, tmp_pay_len = 0, first_tx;
3106         int nfrags = skb_shinfo(skb)->nr_frags;
3107         u32 queue = skb_get_queue_mapping(skb);
3108         unsigned int first_entry, tx_packets;
3109         struct stmmac_tx_queue *tx_q;
3110         bool has_vlan, set_ic;
3111         u8 proto_hdr_len, hdr;
3112         u32 pay_len, mss;
3113         dma_addr_t des;
3114         int i;
3115
3116         tx_q = &priv->tx_queue[queue];
3117         first_tx = tx_q->cur_tx;
3118
3119         /* Compute header lengths */
3120         if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
3121                 proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
3122                 hdr = sizeof(struct udphdr);
3123         } else {
3124                 proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3125                 hdr = tcp_hdrlen(skb);
3126         }
3127
3128         /* Desc availability based on threshold should be enough safe */
3129         if (unlikely(stmmac_tx_avail(priv, queue) <
3130                 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
3131                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3132                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3133                                                                 queue));
3134                         /* This is a hard error, log it. */
3135                         netdev_err(priv->dev,
3136                                    "%s: Tx Ring full when queue awake\n",
3137                                    __func__);
3138                 }
3139                 return NETDEV_TX_BUSY;
3140         }
3141
3142         pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
3143
3144         mss = skb_shinfo(skb)->gso_size;
3145
3146         /* set new MSS value if needed */
3147         if (mss != tx_q->mss) {
3148                 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3149                         mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3150                 else
3151                         mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
3152
3153                 stmmac_set_mss(priv, mss_desc, mss);
3154                 tx_q->mss = mss;
3155                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
3156                                                 priv->dma_tx_size);
3157                 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
3158         }
3159
3160         if (netif_msg_tx_queued(priv)) {
3161                 pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
3162                         __func__, hdr, proto_hdr_len, pay_len, mss);
3163                 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
3164                         skb->data_len);
3165         }
3166
3167         /* Check if VLAN can be inserted by HW */
3168         has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
3169
3170         first_entry = tx_q->cur_tx;
3171         WARN_ON(tx_q->tx_skbuff[first_entry]);
3172
3173         if (tx_q->tbs & STMMAC_TBS_AVAIL)
3174                 desc = &tx_q->dma_entx[first_entry].basic;
3175         else
3176                 desc = &tx_q->dma_tx[first_entry];
3177         first = desc;
3178
3179         if (has_vlan)
3180                 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
3181
3182         /* first descriptor: fill Headers on Buf1 */
3183         des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
3184                              DMA_TO_DEVICE);
3185         if (dma_mapping_error(priv->device, des))
3186                 goto dma_map_err;
3187
3188         tx_q->tx_skbuff_dma[first_entry].buf = des;
3189         tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
3190
3191         if (priv->dma_cap.addr64 <= 32) {
3192                 first->des0 = cpu_to_le32(des);
3193
3194                 /* Fill start of payload in buff2 of first descriptor */
3195                 if (pay_len)
3196                         first->des1 = cpu_to_le32(des + proto_hdr_len);
3197
3198                 /* If needed take extra descriptors to fill the remaining payload */
3199                 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
3200         } else {
3201                 stmmac_set_desc_addr(priv, first, des);
3202                 tmp_pay_len = pay_len;
3203                 des += proto_hdr_len;
3204                 pay_len = 0;
3205         }
3206
3207         stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
3208
3209         /* Prepare fragments */
3210         for (i = 0; i < nfrags; i++) {
3211                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3212
3213                 des = skb_frag_dma_map(priv->device, frag, 0,
3214                                        skb_frag_size(frag),
3215                                        DMA_TO_DEVICE);
3216                 if (dma_mapping_error(priv->device, des))
3217                         goto dma_map_err;
3218
3219                 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
3220                                      (i == nfrags - 1), queue);
3221
3222                 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
3223                 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
3224                 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
3225         }
3226
3227         tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
3228
3229         /* Only the last descriptor gets to point to the skb. */
3230         tx_q->tx_skbuff[tx_q->cur_tx] = skb;
3231
3232         /* Manage tx mitigation */
3233         tx_packets = (tx_q->cur_tx + 1) - first_tx;
3234         tx_q->tx_count_frames += tx_packets;
3235
3236         if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
3237                 set_ic = true;
3238         else if (!priv->tx_coal_frames)
3239                 set_ic = false;
3240         else if (tx_packets > priv->tx_coal_frames)
3241                 set_ic = true;
3242         else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets)
3243                 set_ic = true;
3244         else
3245                 set_ic = false;
3246
3247         if (set_ic) {
3248                 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3249                         desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3250                 else
3251                         desc = &tx_q->dma_tx[tx_q->cur_tx];
3252
3253                 tx_q->tx_count_frames = 0;
3254                 stmmac_set_tx_ic(priv, desc);
3255                 priv->xstats.tx_set_ic_bit++;
3256         }
3257
3258         /* We've used all descriptors we need for this skb, however,
3259          * advance cur_tx so that it references a fresh descriptor.
3260          * ndo_start_xmit will fill this descriptor the next time it's
3261          * called and stmmac_tx_clean may clean up to this descriptor.
3262          */
3263         tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
3264
3265         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3266                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3267                           __func__);
3268                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3269         }
3270
3271         dev->stats.tx_bytes += skb->len;
3272         priv->xstats.tx_tso_frames++;
3273         priv->xstats.tx_tso_nfrags += nfrags;
3274
3275         if (priv->sarc_type)
3276                 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
3277
3278         skb_tx_timestamp(skb);
3279
3280         if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3281                      priv->hwts_tx_en)) {
3282                 /* declare that device is doing timestamping */
3283                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3284                 stmmac_enable_tx_timestamp(priv, first);
3285         }
3286
3287         /* Complete the first descriptor before granting the DMA */
3288         stmmac_prepare_tso_tx_desc(priv, first, 1,
3289                         proto_hdr_len,
3290                         pay_len,
3291                         1, tx_q->tx_skbuff_dma[first_entry].last_segment,
3292                         hdr / 4, (skb->len - proto_hdr_len));
3293
3294         /* If context desc is used to change MSS */
3295         if (mss_desc) {
3296                 /* Make sure that first descriptor has been completely
3297                  * written, including its own bit. This is because MSS is
3298                  * actually before first descriptor, so we need to make
3299                  * sure that MSS's own bit is the last thing written.
3300                  */
3301                 dma_wmb();
3302                 stmmac_set_tx_owner(priv, mss_desc);
3303         }
3304
3305         /* The own bit must be the latest setting done when prepare the
3306          * descriptor and then barrier is needed to make sure that
3307          * all is coherent before granting the DMA engine.
3308          */
3309         wmb();
3310
3311         if (netif_msg_pktdata(priv)) {
3312                 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
3313                         __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3314                         tx_q->cur_tx, first, nfrags);
3315                 pr_info(">>> frame to be transmitted: ");
3316                 print_pkt(skb->data, skb_headlen(skb));
3317         }
3318
3319         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3320
3321         if (tx_q->tbs & STMMAC_TBS_AVAIL)
3322                 desc_size = sizeof(struct dma_edesc);
3323         else
3324                 desc_size = sizeof(struct dma_desc);
3325
3326         tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
3327         stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3328         stmmac_tx_timer_arm(priv, queue);
3329
3330         return NETDEV_TX_OK;
3331
3332 dma_map_err:
3333         dev_err(priv->device, "Tx dma map failed\n");
3334         dev_kfree_skb(skb);
3335         priv->dev->stats.tx_dropped++;
3336         return NETDEV_TX_OK;
3337 }
3338
3339 /**
3340  *  stmmac_xmit - Tx entry point of the driver
3341  *  @skb : the socket buffer
3342  *  @dev : device pointer
3343  *  Description : this is the tx entry point of the driver.
3344  *  It programs the chain or the ring and supports oversized frames
3345  *  and SG feature.
3346  */
3347 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3348 {
3349         unsigned int first_entry, tx_packets, enh_desc;
3350         struct stmmac_priv *priv = netdev_priv(dev);
3351         unsigned int nopaged_len = skb_headlen(skb);
3352         int i, csum_insertion = 0, is_jumbo = 0;
3353         u32 queue = skb_get_queue_mapping(skb);
3354         int nfrags = skb_shinfo(skb)->nr_frags;
3355         int gso = skb_shinfo(skb)->gso_type;
3356         struct dma_edesc *tbs_desc = NULL;
3357         int entry, desc_size, first_tx;
3358         struct dma_desc *desc, *first;
3359         struct stmmac_tx_queue *tx_q;
3360         bool has_vlan, set_ic;
3361         dma_addr_t des;
3362
3363         tx_q = &priv->tx_queue[queue];
3364         first_tx = tx_q->cur_tx;
3365
3366         if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
3367                 stmmac_disable_eee_mode(priv);
3368
3369         /* Manage oversized TCP frames for GMAC4 device */
3370         if (skb_is_gso(skb) && priv->tso) {
3371                 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
3372                         return stmmac_tso_xmit(skb, dev);
3373                 if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
3374                         return stmmac_tso_xmit(skb, dev);
3375         }
3376
3377         if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3378                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3379                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3380                                                                 queue));
3381                         /* This is a hard error, log it. */
3382                         netdev_err(priv->dev,
3383                                    "%s: Tx Ring full when queue awake\n",
3384                                    __func__);
3385                 }
3386                 return NETDEV_TX_BUSY;
3387         }
3388
3389         /* Check if VLAN can be inserted by HW */
3390         has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
3391
3392         entry = tx_q->cur_tx;
3393         first_entry = entry;
3394         WARN_ON(tx_q->tx_skbuff[first_entry]);
3395
3396         csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3397
3398         if (likely(priv->extend_desc))
3399                 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3400         else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3401                 desc = &tx_q->dma_entx[entry].basic;
3402         else
3403                 desc = tx_q->dma_tx + entry;
3404
3405         first = desc;
3406
3407         if (has_vlan)
3408                 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
3409
3410         enh_desc = priv->plat->enh_desc;
3411         /* To program the descriptors according to the size of the frame */
3412         if (enh_desc)
3413                 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
3414
3415         if (unlikely(is_jumbo)) {
3416                 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
3417                 if (unlikely(entry < 0) && (entry != -EINVAL))
3418                         goto dma_map_err;
3419         }
3420
3421         for (i = 0; i < nfrags; i++) {
3422                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3423                 int len = skb_frag_size(frag);
3424                 bool last_segment = (i == (nfrags - 1));
3425
3426                 entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
3427                 WARN_ON(tx_q->tx_skbuff[entry]);
3428
3429                 if (likely(priv->extend_desc))
3430                         desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3431                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3432                         desc = &tx_q->dma_entx[entry].basic;
3433                 else
3434                         desc = tx_q->dma_tx + entry;
3435
3436                 des = skb_frag_dma_map(priv->device, frag, 0, len,
3437                                        DMA_TO_DEVICE);
3438                 if (dma_mapping_error(priv->device, des))
3439                         goto dma_map_err; /* should reuse desc w/o issues */
3440
3441                 tx_q->tx_skbuff_dma[entry].buf = des;
3442
3443                 stmmac_set_desc_addr(priv, desc, des);
3444
3445                 tx_q->tx_skbuff_dma[entry].map_as_page = true;
3446                 tx_q->tx_skbuff_dma[entry].len = len;
3447                 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3448
3449                 /* Prepare the descriptor and set the own bit too */
3450                 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
3451                                 priv->mode, 1, last_segment, skb->len);
3452         }
3453
3454         /* Only the last descriptor gets to point to the skb. */
3455         tx_q->tx_skbuff[entry] = skb;
3456
3457         /* According to the coalesce parameter the IC bit for the latest
3458          * segment is reset and the timer re-started to clean the tx status.
3459          * This approach takes care about the fragments: desc is the first
3460          * element in case of no SG.
3461          */
3462         tx_packets = (entry + 1) - first_tx;
3463         tx_q->tx_count_frames += tx_packets;
3464
3465         if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
3466                 set_ic = true;
3467         else if (!priv->tx_coal_frames)
3468                 set_ic = false;
3469         else if (tx_packets > priv->tx_coal_frames)
3470                 set_ic = true;
3471         else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets)
3472                 set_ic = true;
3473         else
3474                 set_ic = false;
3475
3476         if (set_ic) {
3477                 if (likely(priv->extend_desc))
3478                         desc = &tx_q->dma_etx[entry].basic;
3479                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3480                         desc = &tx_q->dma_entx[entry].basic;
3481                 else
3482                         desc = &tx_q->dma_tx[entry];
3483
3484                 tx_q->tx_count_frames = 0;
3485                 stmmac_set_tx_ic(priv, desc);
3486                 priv->xstats.tx_set_ic_bit++;
3487         }
3488
3489         /* We've used all descriptors we need for this skb, however,
3490          * advance cur_tx so that it references a fresh descriptor.
3491          * ndo_start_xmit will fill this descriptor the next time it's
3492          * called and stmmac_tx_clean may clean up to this descriptor.
3493          */
3494         entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
3495         tx_q->cur_tx = entry;
3496
3497         if (netif_msg_pktdata(priv)) {
3498                 netdev_dbg(priv->dev,
3499                            "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3500                            __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3501                            entry, first, nfrags);
3502
3503                 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3504                 print_pkt(skb->data, skb->len);
3505         }
3506
3507         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3508                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3509                           __func__);
3510                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3511         }
3512
3513         dev->stats.tx_bytes += skb->len;
3514
3515         if (priv->sarc_type)
3516                 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
3517
3518         skb_tx_timestamp(skb);
3519
3520         /* Ready to fill the first descriptor and set the OWN bit w/o any
3521          * problems because all the descriptors are actually ready to be
3522          * passed to the DMA engine.
3523          */
3524         if (likely(!is_jumbo)) {
3525                 bool last_segment = (nfrags == 0);
3526
3527                 des = dma_map_single(priv->device, skb->data,
3528                                      nopaged_len, DMA_TO_DEVICE);
3529                 if (dma_mapping_error(priv->device, des))
3530                         goto dma_map_err;
3531
3532                 tx_q->tx_skbuff_dma[first_entry].buf = des;
3533
3534                 stmmac_set_desc_addr(priv, first, des);
3535
3536                 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3537                 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3538
3539                 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3540                              priv->hwts_tx_en)) {
3541                         /* declare that device is doing timestamping */
3542                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3543                         stmmac_enable_tx_timestamp(priv, first);
3544                 }
3545
3546                 /* Prepare the first descriptor setting the OWN bit too */
3547                 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
3548                                 csum_insertion, priv->mode, 0, last_segment,
3549                                 skb->len);
3550         }
3551
3552         if (tx_q->tbs & STMMAC_TBS_EN) {
3553                 struct timespec64 ts = ns_to_timespec64(skb->tstamp);
3554
3555                 tbs_desc = &tx_q->dma_entx[first_entry];
3556                 stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
3557         }
3558
3559         stmmac_set_tx_owner(priv, first);
3560
3561         /* The own bit must be the latest setting done when prepare the
3562          * descriptor and then barrier is needed to make sure that
3563          * all is coherent before granting the DMA engine.
3564          */
3565         wmb();
3566
3567         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3568
3569         stmmac_enable_dma_transmission(priv, priv->ioaddr);
3570
3571         if (likely(priv->extend_desc))
3572                 desc_size = sizeof(struct dma_extended_desc);
3573         else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3574                 desc_size = sizeof(struct dma_edesc);
3575         else
3576                 desc_size = sizeof(struct dma_desc);
3577
3578         tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
3579         stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3580         stmmac_tx_timer_arm(priv, queue);
3581
3582         return NETDEV_TX_OK;
3583
3584 dma_map_err:
3585         netdev_err(priv->dev, "Tx DMA map failed\n");
3586         dev_kfree_skb(skb);
3587         priv->dev->stats.tx_dropped++;
3588         return NETDEV_TX_OK;
3589 }
3590
3591 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3592 {
3593         struct vlan_ethhdr *veth;
3594         __be16 vlan_proto;
3595         u16 vlanid;
3596
3597         veth = (struct vlan_ethhdr *)skb->data;
3598         vlan_proto = veth->h_vlan_proto;
3599
3600         if ((vlan_proto == htons(ETH_P_8021Q) &&
3601              dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
3602             (vlan_proto == htons(ETH_P_8021AD) &&
3603              dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
3604                 /* pop the vlan tag */
3605                 vlanid = ntohs(veth->h_vlan_TCI);
3606                 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
3607                 skb_pull(skb, VLAN_HLEN);
3608                 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
3609         }
3610 }
3611
3612 /**
3613  * stmmac_rx_refill - refill used skb preallocated buffers
3614  * @priv: driver private structure
3615  * @queue: RX queue index
3616  * Description : this is to reallocate the skb for the reception process
3617  * that is based on zero-copy.
3618  */
3619 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3620 {
3621         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3622         int len, dirty = stmmac_rx_dirty(priv, queue);
3623         unsigned int entry = rx_q->dirty_rx;
3624
3625         len = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
3626
3627         while (dirty-- > 0) {
3628                 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
3629                 struct dma_desc *p;
3630                 bool use_rx_wd;
3631
3632                 if (priv->extend_desc)
3633                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
3634                 else
3635                         p = rx_q->dma_rx + entry;
3636
3637                 if (!buf->page) {
3638                         buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
3639                         if (!buf->page)
3640                                 break;
3641                 }
3642
3643                 if (priv->sph && !buf->sec_page) {
3644                         buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
3645                         if (!buf->sec_page)
3646                                 break;
3647
3648                         buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
3649
3650                         dma_sync_single_for_device(priv->device, buf->sec_addr,
3651                                                    len, DMA_FROM_DEVICE);
3652                 }
3653
3654                 buf->addr = page_pool_get_dma_addr(buf->page);
3655
3656                 /* Sync whole allocation to device. This will invalidate old
3657                  * data.
3658                  */
3659                 dma_sync_single_for_device(priv->device, buf->addr, len,
3660                                            DMA_FROM_DEVICE);
3661
3662                 stmmac_set_desc_addr(priv, p, buf->addr);
3663                 if (priv->sph)
3664                         stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
3665                 else
3666                         stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
3667                 stmmac_refill_desc3(priv, rx_q, p);
3668
3669                 rx_q->rx_count_frames++;
3670                 rx_q->rx_count_frames += priv->rx_coal_frames;
3671                 if (rx_q->rx_count_frames > priv->rx_coal_frames)
3672                         rx_q->rx_count_frames = 0;
3673
3674                 use_rx_wd = !priv->rx_coal_frames;
3675                 use_rx_wd |= rx_q->rx_count_frames > 0;
3676                 if (!priv->use_riwt)
3677                         use_rx_wd = false;
3678
3679                 dma_wmb();
3680                 stmmac_set_rx_owner(priv, p, use_rx_wd);
3681
3682                 entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
3683         }
3684         rx_q->dirty_rx = entry;
3685         rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3686                             (rx_q->dirty_rx * sizeof(struct dma_desc));
3687         stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
3688 }
3689
3690 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
3691                                        struct dma_desc *p,
3692                                        int status, unsigned int len)
3693 {
3694         unsigned int plen = 0, hlen = 0;
3695         int coe = priv->hw->rx_csum;
3696
3697         /* Not first descriptor, buffer is always zero */
3698         if (priv->sph && len)
3699                 return 0;
3700
3701         /* First descriptor, get split header length */
3702         stmmac_get_rx_header_len(priv, p, &hlen);
3703         if (priv->sph && hlen) {
3704                 priv->xstats.rx_split_hdr_pkt_n++;
3705                 return hlen;
3706         }
3707
3708         /* First descriptor, not last descriptor and not split header */
3709         if (status & rx_not_ls)
3710                 return priv->dma_buf_sz;
3711
3712         plen = stmmac_get_rx_frame_len(priv, p, coe);
3713
3714         /* First descriptor and last descriptor and not split header */
3715         return min_t(unsigned int, priv->dma_buf_sz, plen);
3716 }
3717
3718 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
3719                                        struct dma_desc *p,
3720                                        int status, unsigned int len)
3721 {
3722         int coe = priv->hw->rx_csum;
3723         unsigned int plen = 0;
3724
3725         /* Not split header, buffer is not available */
3726         if (!priv->sph)
3727                 return 0;
3728
3729         /* Not last descriptor */
3730         if (status & rx_not_ls)
3731                 return priv->dma_buf_sz;
3732
3733         plen = stmmac_get_rx_frame_len(priv, p, coe);
3734
3735         /* Last descriptor */
3736         return plen - len;
3737 }
3738
3739 /**
3740  * stmmac_rx - manage the receive process
3741  * @priv: driver private structure
3742  * @limit: napi bugget
3743  * @queue: RX queue index.
3744  * Description :  this the function called by the napi poll method.
3745  * It gets all the frames inside the ring.
3746  */
3747 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3748 {
3749         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3750         struct stmmac_channel *ch = &priv->channel[queue];
3751         unsigned int count = 0, error = 0, len = 0;
3752         int status = 0, coe = priv->hw->rx_csum;
3753         unsigned int next_entry = rx_q->cur_rx;
3754         unsigned int desc_size;
3755         struct sk_buff *skb = NULL;
3756
3757         if (netif_msg_rx_status(priv)) {
3758                 void *rx_head;
3759
3760                 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3761                 if (priv->extend_desc) {
3762                         rx_head = (void *)rx_q->dma_erx;
3763                         desc_size = sizeof(struct dma_extended_desc);
3764                 } else {
3765                         rx_head = (void *)rx_q->dma_rx;
3766                         desc_size = sizeof(struct dma_desc);
3767                 }
3768
3769                 stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
3770                                     rx_q->dma_rx_phy, desc_size);
3771         }
3772         while (count < limit) {
3773                 unsigned int buf1_len = 0, buf2_len = 0;
3774                 enum pkt_hash_types hash_type;
3775                 struct stmmac_rx_buffer *buf;
3776                 struct dma_desc *np, *p;
3777                 int entry;
3778                 u32 hash;
3779
3780                 if (!count && rx_q->state_saved) {
3781                         skb = rx_q->state.skb;
3782                         error = rx_q->state.error;
3783                         len = rx_q->state.len;
3784                 } else {
3785                         rx_q->state_saved = false;
3786                         skb = NULL;
3787                         error = 0;
3788                         len = 0;
3789                 }
3790
3791                 if (count >= limit)
3792                         break;
3793
3794 read_again:
3795                 buf1_len = 0;
3796                 buf2_len = 0;
3797                 entry = next_entry;
3798                 buf = &rx_q->buf_pool[entry];
3799
3800                 if (priv->extend_desc)
3801                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
3802                 else
3803                         p = rx_q->dma_rx + entry;
3804
3805                 /* read the status of the incoming frame */
3806                 status = stmmac_rx_status(priv, &priv->dev->stats,
3807                                 &priv->xstats, p);
3808                 /* check if managed by the DMA otherwise go ahead */
3809                 if (unlikely(status & dma_own))
3810                         break;
3811
3812                 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
3813                                                 priv->dma_rx_size);
3814                 next_entry = rx_q->cur_rx;
3815
3816                 if (priv->extend_desc)
3817                         np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3818                 else
3819                         np = rx_q->dma_rx + next_entry;
3820
3821                 prefetch(np);
3822
3823                 if (priv->extend_desc)
3824                         stmmac_rx_extended_status(priv, &priv->dev->stats,
3825                                         &priv->xstats, rx_q->dma_erx + entry);
3826                 if (unlikely(status == discard_frame)) {
3827                         page_pool_recycle_direct(rx_q->page_pool, buf->page);
3828                         buf->page = NULL;
3829                         error = 1;
3830                         if (!priv->hwts_rx_en)
3831                                 priv->dev->stats.rx_errors++;
3832                 }
3833
3834                 if (unlikely(error && (status & rx_not_ls)))
3835                         goto read_again;
3836                 if (unlikely(error)) {
3837                         dev_kfree_skb(skb);
3838                         skb = NULL;
3839                         count++;
3840                         continue;
3841                 }
3842
3843                 /* Buffer is good. Go on. */
3844
3845                 prefetch(page_address(buf->page));
3846                 if (buf->sec_page)
3847                         prefetch(page_address(buf->sec_page));
3848
3849                 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
3850                 len += buf1_len;
3851                 buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
3852                 len += buf2_len;
3853
3854                 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3855                  * Type frames (LLC/LLC-SNAP)
3856                  *
3857                  * llc_snap is never checked in GMAC >= 4, so this ACS
3858                  * feature is always disabled and packets need to be
3859                  * stripped manually.
3860                  */
3861                 if (likely(!(status & rx_not_ls)) &&
3862                     (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3863                      unlikely(status != llc_snap))) {
3864                         if (buf2_len)
3865                                 buf2_len -= ETH_FCS_LEN;
3866                         else
3867                                 buf1_len -= ETH_FCS_LEN;
3868
3869                         len -= ETH_FCS_LEN;
3870                 }
3871
3872                 if (!skb) {
3873                         skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
3874                         if (!skb) {
3875                                 priv->dev->stats.rx_dropped++;
3876                                 count++;
3877                                 goto drain_data;
3878                         }
3879
3880                         dma_sync_single_for_cpu(priv->device, buf->addr,
3881                                                 buf1_len, DMA_FROM_DEVICE);
3882                         skb_copy_to_linear_data(skb, page_address(buf->page),
3883                                                 buf1_len);
3884                         skb_put(skb, buf1_len);
3885
3886                         /* Data payload copied into SKB, page ready for recycle */
3887                         page_pool_recycle_direct(rx_q->page_pool, buf->page);
3888                         buf->page = NULL;
3889                 } else if (buf1_len) {
3890                         dma_sync_single_for_cpu(priv->device, buf->addr,
3891                                                 buf1_len, DMA_FROM_DEVICE);
3892                         skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
3893                                         buf->page, 0, buf1_len,
3894                                         priv->dma_buf_sz);
3895
3896                         /* Data payload appended into SKB */
3897                         page_pool_release_page(rx_q->page_pool, buf->page);
3898                         buf->page = NULL;
3899                 }
3900
3901                 if (buf2_len) {
3902                         dma_sync_single_for_cpu(priv->device, buf->sec_addr,
3903                                                 buf2_len, DMA_FROM_DEVICE);
3904                         skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
3905                                         buf->sec_page, 0, buf2_len,
3906                                         priv->dma_buf_sz);
3907
3908                         /* Data payload appended into SKB */
3909                         page_pool_release_page(rx_q->page_pool, buf->sec_page);
3910                         buf->sec_page = NULL;
3911                 }
3912
3913 drain_data:
3914                 if (likely(status & rx_not_ls))
3915                         goto read_again;
3916                 if (!skb)
3917                         continue;
3918
3919                 /* Got entire packet into SKB. Finish it. */
3920
3921                 stmmac_get_rx_hwtstamp(priv, p, np, skb);
3922                 stmmac_rx_vlan(priv->dev, skb);
3923                 skb->protocol = eth_type_trans(skb, priv->dev);
3924
3925                 if (unlikely(!coe))
3926                         skb_checksum_none_assert(skb);
3927                 else
3928                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3929
3930                 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
3931                         skb_set_hash(skb, hash, hash_type);
3932
3933                 skb_record_rx_queue(skb, queue);
3934                 napi_gro_receive(&ch->rx_napi, skb);
3935                 skb = NULL;
3936
3937                 priv->dev->stats.rx_packets++;
3938                 priv->dev->stats.rx_bytes += len;
3939                 count++;
3940         }
3941
3942         if (status & rx_not_ls || skb) {
3943                 rx_q->state_saved = true;
3944                 rx_q->state.skb = skb;
3945                 rx_q->state.error = error;
3946                 rx_q->state.len = len;
3947         }
3948
3949         stmmac_rx_refill(priv, queue);
3950
3951         priv->xstats.rx_pkt_n += count;
3952
3953         return count;
3954 }
3955
3956 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
3957 {
3958         struct stmmac_channel *ch =
3959                 container_of(napi, struct stmmac_channel, rx_napi);
3960         struct stmmac_priv *priv = ch->priv_data;
3961         u32 chan = ch->index;
3962         int work_done;
3963
3964         priv->xstats.napi_poll++;
3965
3966         work_done = stmmac_rx(priv, budget, chan);
3967         if (work_done < budget && napi_complete_done(napi, work_done)) {
3968                 unsigned long flags;
3969
3970                 spin_lock_irqsave(&ch->lock, flags);
3971                 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
3972                 spin_unlock_irqrestore(&ch->lock, flags);
3973         }
3974
3975         return work_done;
3976 }
3977
3978 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
3979 {
3980         struct stmmac_channel *ch =
3981                 container_of(napi, struct stmmac_channel, tx_napi);
3982         struct stmmac_priv *priv = ch->priv_data;
3983         u32 chan = ch->index;
3984         int work_done;
3985
3986         priv->xstats.napi_poll++;
3987
3988         work_done = stmmac_tx_clean(priv, priv->dma_tx_size, chan);
3989         work_done = min(work_done, budget);
3990
3991         if (work_done < budget && napi_complete_done(napi, work_done)) {
3992                 unsigned long flags;
3993
3994                 spin_lock_irqsave(&ch->lock, flags);
3995                 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
3996                 spin_unlock_irqrestore(&ch->lock, flags);
3997         }
3998
3999         return work_done;
4000 }
4001
4002 /**
4003  *  stmmac_tx_timeout
4004  *  @dev : Pointer to net device structure
4005  *  @txqueue: the index of the hanging transmit queue
4006  *  Description: this function is called when a packet transmission fails to
4007  *   complete within a reasonable time. The driver will mark the error in the
4008  *   netdev structure and arrange for the device to be reset to a sane state
4009  *   in order to transmit a new packet.
4010  */
4011 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
4012 {
4013         struct stmmac_priv *priv = netdev_priv(dev);
4014
4015         stmmac_global_err(priv);
4016 }
4017
4018 /**
4019  *  stmmac_set_rx_mode - entry point for multicast addressing
4020  *  @dev : pointer to the device structure
4021  *  Description:
4022  *  This function is a driver entry point which gets called by the kernel
4023  *  whenever multicast addresses must be enabled/disabled.
4024  *  Return value:
4025  *  void.
4026  */
4027 static void stmmac_set_rx_mode(struct net_device *dev)
4028 {
4029         struct stmmac_priv *priv = netdev_priv(dev);
4030
4031         stmmac_set_filter(priv, priv->hw, dev);
4032 }
4033
4034 /**
4035  *  stmmac_change_mtu - entry point to change MTU size for the device.
4036  *  @dev : device pointer.
4037  *  @new_mtu : the new MTU size for the device.
4038  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
4039  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
4040  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
4041  *  Return value:
4042  *  0 on success and an appropriate (-)ve integer as defined in errno.h
4043  *  file on failure.
4044  */
4045 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
4046 {
4047         struct stmmac_priv *priv = netdev_priv(dev);
4048         int txfifosz = priv->plat->tx_fifo_size;
4049         const int mtu = new_mtu;
4050
4051         if (txfifosz == 0)
4052                 txfifosz = priv->dma_cap.tx_fifo_size;
4053
4054         txfifosz /= priv->plat->tx_queues_to_use;
4055
4056         if (netif_running(dev)) {
4057                 netdev_err(priv->dev, "must be stopped to change its MTU\n");
4058                 return -EBUSY;
4059         }
4060
4061         new_mtu = STMMAC_ALIGN(new_mtu);
4062
4063         /* If condition true, FIFO is too small or MTU too large */
4064         if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
4065                 return -EINVAL;
4066
4067         dev->mtu = mtu;
4068
4069         netdev_update_features(dev);
4070
4071         return 0;
4072 }
4073
4074 static netdev_features_t stmmac_fix_features(struct net_device *dev,
4075                                              netdev_features_t features)
4076 {
4077         struct stmmac_priv *priv = netdev_priv(dev);
4078
4079         if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
4080                 features &= ~NETIF_F_RXCSUM;
4081
4082         if (!priv->plat->tx_coe)
4083                 features &= ~NETIF_F_CSUM_MASK;
4084
4085         /* Some GMAC devices have a bugged Jumbo frame support that
4086          * needs to have the Tx COE disabled for oversized frames
4087          * (due to limited buffer sizes). In this case we disable
4088          * the TX csum insertion in the TDES and not use SF.
4089          */
4090         if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
4091                 features &= ~NETIF_F_CSUM_MASK;
4092
4093         /* Disable tso if asked by ethtool */
4094         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4095                 if (features & NETIF_F_TSO)
4096                         priv->tso = true;
4097                 else
4098                         priv->tso = false;
4099         }
4100
4101         return features;
4102 }
4103
4104 static int stmmac_set_features(struct net_device *netdev,
4105                                netdev_features_t features)
4106 {
4107         struct stmmac_priv *priv = netdev_priv(netdev);
4108         bool sph_en;
4109         u32 chan;
4110
4111         /* Keep the COE Type in case of csum is supporting */
4112         if (features & NETIF_F_RXCSUM)
4113                 priv->hw->rx_csum = priv->plat->rx_coe;
4114         else
4115                 priv->hw->rx_csum = 0;
4116         /* No check needed because rx_coe has been set before and it will be
4117          * fixed in case of issue.
4118          */
4119         stmmac_rx_ipc(priv, priv->hw);
4120
4121         sph_en = (priv->hw->rx_csum > 0) && priv->sph;
4122         for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
4123                 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
4124
4125         return 0;
4126 }
4127
4128 /**
4129  *  stmmac_interrupt - main ISR
4130  *  @irq: interrupt number.
4131  *  @dev_id: to pass the net device pointer (must be valid).
4132  *  Description: this is the main driver interrupt service routine.
4133  *  It can call:
4134  *  o DMA service routine (to manage incoming frame reception and transmission
4135  *    status)
4136  *  o Core interrupts to manage: remote wake-up, management counter, LPI
4137  *    interrupts.
4138  */
4139 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
4140 {
4141         struct net_device *dev = (struct net_device *)dev_id;
4142         struct stmmac_priv *priv = netdev_priv(dev);
4143         u32 rx_cnt = priv->plat->rx_queues_to_use;
4144         u32 tx_cnt = priv->plat->tx_queues_to_use;
4145         u32 queues_count;
4146         u32 queue;
4147         bool xmac;
4148
4149         xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
4150         queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
4151
4152         if (priv->irq_wake)
4153                 pm_wakeup_event(priv->device, 0);
4154
4155         /* Check if adapter is up */
4156         if (test_bit(STMMAC_DOWN, &priv->state))
4157                 return IRQ_HANDLED;
4158         /* Check if a fatal error happened */
4159         if (stmmac_safety_feat_interrupt(priv))
4160                 return IRQ_HANDLED;
4161
4162         /* To handle GMAC own interrupts */
4163         if ((priv->plat->has_gmac) || xmac) {
4164                 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
4165                 int mtl_status;
4166
4167                 if (unlikely(status)) {
4168                         /* For LPI we need to save the tx status */
4169                         if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
4170                                 priv->tx_path_in_lpi_mode = true;
4171                         if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
4172                                 priv->tx_path_in_lpi_mode = false;
4173                 }
4174
4175                 for (queue = 0; queue < queues_count; queue++) {
4176                         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4177
4178                         mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
4179                                                                 queue);
4180                         if (mtl_status != -EINVAL)
4181                                 status |= mtl_status;
4182
4183                         if (status & CORE_IRQ_MTL_RX_OVERFLOW)
4184                                 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
4185                                                        rx_q->rx_tail_addr,
4186                                                        queue);
4187                 }
4188
4189                 /* PCS link status */
4190                 if (priv->hw->pcs) {
4191                         if (priv->xstats.pcs_link)
4192                                 netif_carrier_on(dev);
4193                         else
4194                                 netif_carrier_off(dev);
4195                 }
4196         }
4197
4198         /* To handle DMA interrupts */
4199         stmmac_dma_interrupt(priv);
4200
4201         return IRQ_HANDLED;
4202 }
4203
4204 #ifdef CONFIG_NET_POLL_CONTROLLER
4205 /* Polling receive - used by NETCONSOLE and other diagnostic tools
4206  * to allow network I/O with interrupts disabled.
4207  */
4208 static void stmmac_poll_controller(struct net_device *dev)
4209 {
4210         disable_irq(dev->irq);
4211         stmmac_interrupt(dev->irq, dev);
4212         enable_irq(dev->irq);
4213 }
4214 #endif
4215
4216 /**
4217  *  stmmac_ioctl - Entry point for the Ioctl
4218  *  @dev: Device pointer.
4219  *  @rq: An IOCTL specefic structure, that can contain a pointer to
4220  *  a proprietary structure used to pass information to the driver.
4221  *  @cmd: IOCTL command
4222  *  Description:
4223  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
4224  */
4225 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4226 {
4227         struct stmmac_priv *priv = netdev_priv (dev);
4228         int ret = -EOPNOTSUPP;
4229
4230         if (!netif_running(dev))
4231                 return -EINVAL;
4232
4233         switch (cmd) {
4234         case SIOCGMIIPHY:
4235         case SIOCGMIIREG:
4236         case SIOCSMIIREG:
4237                 ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
4238                 break;
4239         case SIOCSHWTSTAMP:
4240                 ret = stmmac_hwtstamp_set(dev, rq);
4241                 break;
4242         case SIOCGHWTSTAMP:
4243                 ret = stmmac_hwtstamp_get(dev, rq);
4244                 break;
4245         default:
4246                 break;
4247         }
4248
4249         return ret;
4250 }
4251
4252 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
4253                                     void *cb_priv)
4254 {
4255         struct stmmac_priv *priv = cb_priv;
4256         int ret = -EOPNOTSUPP;
4257
4258         if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
4259                 return ret;
4260
4261         stmmac_disable_all_queues(priv);
4262
4263         switch (type) {
4264         case TC_SETUP_CLSU32:
4265                 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
4266                 break;
4267         case TC_SETUP_CLSFLOWER:
4268                 ret = stmmac_tc_setup_cls(priv, priv, type_data);
4269                 break;
4270         default:
4271                 break;
4272         }
4273
4274         stmmac_enable_all_queues(priv);
4275         return ret;
4276 }
4277
4278 static LIST_HEAD(stmmac_block_cb_list);
4279
4280 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
4281                            void *type_data)
4282 {
4283         struct stmmac_priv *priv = netdev_priv(ndev);
4284
4285         switch (type) {
4286         case TC_SETUP_BLOCK:
4287                 return flow_block_cb_setup_simple(type_data,
4288                                                   &stmmac_block_cb_list,
4289                                                   stmmac_setup_tc_block_cb,
4290                                                   priv, priv, true);
4291         case TC_SETUP_QDISC_CBS:
4292                 return stmmac_tc_setup_cbs(priv, priv, type_data);
4293         case TC_SETUP_QDISC_TAPRIO:
4294                 return stmmac_tc_setup_taprio(priv, priv, type_data);
4295         case TC_SETUP_QDISC_ETF:
4296                 return stmmac_tc_setup_etf(priv, priv, type_data);
4297         default:
4298                 return -EOPNOTSUPP;
4299         }
4300 }
4301
4302 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
4303                                struct net_device *sb_dev)
4304 {
4305         int gso = skb_shinfo(skb)->gso_type;
4306
4307         if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
4308                 /*
4309                  * There is no way to determine the number of TSO/USO
4310                  * capable Queues. Let's use always the Queue 0
4311                  * because if TSO/USO is supported then at least this
4312                  * one will be capable.
4313                  */
4314                 return 0;
4315         }
4316
4317         return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
4318 }
4319
4320 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
4321 {
4322         struct stmmac_priv *priv = netdev_priv(ndev);
4323         int ret = 0;
4324
4325         ret = eth_mac_addr(ndev, addr);
4326         if (ret)
4327                 return ret;
4328
4329         stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
4330
4331         return ret;
4332 }
4333
4334 #ifdef CONFIG_DEBUG_FS
4335 static struct dentry *stmmac_fs_dir;
4336
4337 static void sysfs_display_ring(void *head, int size, int extend_desc,
4338                                struct seq_file *seq, dma_addr_t dma_phy_addr)
4339 {
4340         int i;
4341         struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
4342         struct dma_desc *p = (struct dma_desc *)head;
4343         dma_addr_t dma_addr;
4344
4345         for (i = 0; i < size; i++) {
4346                 if (extend_desc) {
4347                         dma_addr = dma_phy_addr + i * sizeof(*ep);
4348                         seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
4349                                    i, &dma_addr,
4350                                    le32_to_cpu(ep->basic.des0),
4351                                    le32_to_cpu(ep->basic.des1),
4352                                    le32_to_cpu(ep->basic.des2),
4353                                    le32_to_cpu(ep->basic.des3));
4354                         ep++;
4355                 } else {
4356                         dma_addr = dma_phy_addr + i * sizeof(*p);
4357                         seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
4358                                    i, &dma_addr,
4359                                    le32_to_cpu(p->des0), le32_to_cpu(p->des1),
4360                                    le32_to_cpu(p->des2), le32_to_cpu(p->des3));
4361                         p++;
4362                 }
4363                 seq_printf(seq, "\n");
4364         }
4365 }
4366
4367 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
4368 {
4369         struct net_device *dev = seq->private;
4370         struct stmmac_priv *priv = netdev_priv(dev);
4371         u32 rx_count = priv->plat->rx_queues_to_use;
4372         u32 tx_count = priv->plat->tx_queues_to_use;
4373         u32 queue;
4374
4375         if ((dev->flags & IFF_UP) == 0)
4376                 return 0;
4377
4378         for (queue = 0; queue < rx_count; queue++) {
4379                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4380
4381                 seq_printf(seq, "RX Queue %d:\n", queue);
4382
4383                 if (priv->extend_desc) {
4384                         seq_printf(seq, "Extended descriptor ring:\n");
4385                         sysfs_display_ring((void *)rx_q->dma_erx,
4386                                            priv->dma_rx_size, 1, seq, rx_q->dma_rx_phy);
4387                 } else {
4388                         seq_printf(seq, "Descriptor ring:\n");
4389                         sysfs_display_ring((void *)rx_q->dma_rx,
4390                                            priv->dma_rx_size, 0, seq, rx_q->dma_rx_phy);
4391                 }
4392         }
4393
4394         for (queue = 0; queue < tx_count; queue++) {
4395                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4396
4397                 seq_printf(seq, "TX Queue %d:\n", queue);
4398
4399                 if (priv->extend_desc) {
4400                         seq_printf(seq, "Extended descriptor ring:\n");
4401                         sysfs_display_ring((void *)tx_q->dma_etx,
4402                                            priv->dma_tx_size, 1, seq, tx_q->dma_tx_phy);
4403                 } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
4404                         seq_printf(seq, "Descriptor ring:\n");
4405                         sysfs_display_ring((void *)tx_q->dma_tx,
4406                                            priv->dma_tx_size, 0, seq, tx_q->dma_tx_phy);
4407                 }
4408         }
4409
4410         return 0;
4411 }
4412 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
4413
4414 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
4415 {
4416         struct net_device *dev = seq->private;
4417         struct stmmac_priv *priv = netdev_priv(dev);
4418
4419         if (!priv->hw_cap_support) {
4420                 seq_printf(seq, "DMA HW features not supported\n");
4421                 return 0;
4422         }
4423
4424         seq_printf(seq, "==============================\n");
4425         seq_printf(seq, "\tDMA HW features\n");
4426         seq_printf(seq, "==============================\n");
4427
4428         seq_printf(seq, "\t10/100 Mbps: %s\n",
4429                    (priv->dma_cap.mbps_10_100) ? "Y" : "N");
4430         seq_printf(seq, "\t1000 Mbps: %s\n",
4431                    (priv->dma_cap.mbps_1000) ? "Y" : "N");
4432         seq_printf(seq, "\tHalf duplex: %s\n",
4433                    (priv->dma_cap.half_duplex) ? "Y" : "N");
4434         seq_printf(seq, "\tHash Filter: %s\n",
4435                    (priv->dma_cap.hash_filter) ? "Y" : "N");
4436         seq_printf(seq, "\tMultiple MAC address registers: %s\n",
4437                    (priv->dma_cap.multi_addr) ? "Y" : "N");
4438         seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
4439                    (priv->dma_cap.pcs) ? "Y" : "N");
4440         seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
4441                    (priv->dma_cap.sma_mdio) ? "Y" : "N");
4442         seq_printf(seq, "\tPMT Remote wake up: %s\n",
4443                    (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
4444         seq_printf(seq, "\tPMT Magic Frame: %s\n",
4445                    (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
4446         seq_printf(seq, "\tRMON module: %s\n",
4447                    (priv->dma_cap.rmon) ? "Y" : "N");
4448         seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
4449                    (priv->dma_cap.time_stamp) ? "Y" : "N");
4450         seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
4451                    (priv->dma_cap.atime_stamp) ? "Y" : "N");
4452         seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
4453                    (priv->dma_cap.eee) ? "Y" : "N");
4454         seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
4455         seq_printf(seq, "\tChecksum Offload in TX: %s\n",
4456                    (priv->dma_cap.tx_coe) ? "Y" : "N");
4457         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
4458                 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
4459                            (priv->dma_cap.rx_coe) ? "Y" : "N");
4460         } else {
4461                 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
4462                            (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
4463                 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
4464                            (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
4465         }
4466         seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
4467                    (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
4468         seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
4469                    priv->dma_cap.number_rx_channel);
4470         seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
4471                    priv->dma_cap.number_tx_channel);
4472         seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
4473                    priv->dma_cap.number_rx_queues);
4474         seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
4475                    priv->dma_cap.number_tx_queues);
4476         seq_printf(seq, "\tEnhanced descriptors: %s\n",
4477                    (priv->dma_cap.enh_desc) ? "Y" : "N");
4478         seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
4479         seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
4480         seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz);
4481         seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
4482         seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
4483                    priv->dma_cap.pps_out_num);
4484         seq_printf(seq, "\tSafety Features: %s\n",
4485                    priv->dma_cap.asp ? "Y" : "N");
4486         seq_printf(seq, "\tFlexible RX Parser: %s\n",
4487                    priv->dma_cap.frpsel ? "Y" : "N");
4488         seq_printf(seq, "\tEnhanced Addressing: %d\n",
4489                    priv->dma_cap.addr64);
4490         seq_printf(seq, "\tReceive Side Scaling: %s\n",
4491                    priv->dma_cap.rssen ? "Y" : "N");
4492         seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
4493                    priv->dma_cap.vlhash ? "Y" : "N");
4494         seq_printf(seq, "\tSplit Header: %s\n",
4495                    priv->dma_cap.sphen ? "Y" : "N");
4496         seq_printf(seq, "\tVLAN TX Insertion: %s\n",
4497                    priv->dma_cap.vlins ? "Y" : "N");
4498         seq_printf(seq, "\tDouble VLAN: %s\n",
4499                    priv->dma_cap.dvlan ? "Y" : "N");
4500         seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
4501                    priv->dma_cap.l3l4fnum);
4502         seq_printf(seq, "\tARP Offloading: %s\n",
4503                    priv->dma_cap.arpoffsel ? "Y" : "N");
4504         seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
4505                    priv->dma_cap.estsel ? "Y" : "N");
4506         seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
4507                    priv->dma_cap.fpesel ? "Y" : "N");
4508         seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
4509                    priv->dma_cap.tbssel ? "Y" : "N");
4510         return 0;
4511 }
4512 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
4513
4514 /* Use network device events to rename debugfs file entries.
4515  */
4516 static int stmmac_device_event(struct notifier_block *unused,
4517                                unsigned long event, void *ptr)
4518 {
4519         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4520         struct stmmac_priv *priv = netdev_priv(dev);
4521
4522         if (dev->netdev_ops != &stmmac_netdev_ops)
4523                 goto done;
4524
4525         switch (event) {
4526         case NETDEV_CHANGENAME:
4527                 if (priv->dbgfs_dir)
4528                         priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
4529                                                          priv->dbgfs_dir,
4530                                                          stmmac_fs_dir,
4531                                                          dev->name);
4532                 break;
4533         }
4534 done:
4535         return NOTIFY_DONE;
4536 }
4537
4538 static struct notifier_block stmmac_notifier = {
4539         .notifier_call = stmmac_device_event,
4540 };
4541
4542 static void stmmac_init_fs(struct net_device *dev)
4543 {
4544         struct stmmac_priv *priv = netdev_priv(dev);
4545
4546         rtnl_lock();
4547
4548         /* Create per netdev entries */
4549         priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
4550
4551         /* Entry to report DMA RX/TX rings */
4552         debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
4553                             &stmmac_rings_status_fops);
4554
4555         /* Entry to report the DMA HW features */
4556         debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
4557                             &stmmac_dma_cap_fops);
4558
4559         rtnl_unlock();
4560 }
4561
4562 static void stmmac_exit_fs(struct net_device *dev)
4563 {
4564         struct stmmac_priv *priv = netdev_priv(dev);
4565
4566         debugfs_remove_recursive(priv->dbgfs_dir);
4567 }
4568 #endif /* CONFIG_DEBUG_FS */
4569
4570 static u32 stmmac_vid_crc32_le(__le16 vid_le)
4571 {
4572         unsigned char *data = (unsigned char *)&vid_le;
4573         unsigned char data_byte = 0;
4574         u32 crc = ~0x0;
4575         u32 temp = 0;
4576         int i, bits;
4577
4578         bits = get_bitmask_order(VLAN_VID_MASK);
4579         for (i = 0; i < bits; i++) {
4580                 if ((i % 8) == 0)
4581                         data_byte = data[i / 8];
4582
4583                 temp = ((crc & 1) ^ data_byte) & 1;
4584                 crc >>= 1;
4585                 data_byte >>= 1;
4586
4587                 if (temp)
4588                         crc ^= 0xedb88320;
4589         }
4590
4591         return crc;
4592 }
4593
4594 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
4595 {
4596         u32 crc, hash = 0;
4597         __le16 pmatch = 0;
4598         int count = 0;
4599         u16 vid = 0;
4600
4601         for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
4602                 __le16 vid_le = cpu_to_le16(vid);
4603                 crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
4604                 hash |= (1 << crc);
4605                 count++;
4606         }
4607
4608         if (!priv->dma_cap.vlhash) {
4609                 if (count > 2) /* VID = 0 always passes filter */
4610                         return -EOPNOTSUPP;
4611
4612                 pmatch = cpu_to_le16(vid);
4613                 hash = 0;
4614         }
4615
4616         return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
4617 }
4618
4619 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
4620 {
4621         struct stmmac_priv *priv = netdev_priv(ndev);
4622         bool is_double = false;
4623         int ret;
4624
4625         if (be16_to_cpu(proto) == ETH_P_8021AD)
4626                 is_double = true;
4627
4628         set_bit(vid, priv->active_vlans);
4629         ret = stmmac_vlan_update(priv, is_double);
4630         if (ret) {
4631                 clear_bit(vid, priv->active_vlans);
4632                 return ret;
4633         }
4634
4635         if (priv->hw->num_vlan) {
4636                 ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
4637                 if (ret)
4638                         return ret;
4639         }
4640
4641         return 0;
4642 }
4643
4644 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
4645 {
4646         struct stmmac_priv *priv = netdev_priv(ndev);
4647         bool is_double = false;
4648         int ret;
4649
4650         if (be16_to_cpu(proto) == ETH_P_8021AD)
4651                 is_double = true;
4652
4653         clear_bit(vid, priv->active_vlans);
4654
4655         if (priv->hw->num_vlan) {
4656                 ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
4657                 if (ret)
4658                         return ret;
4659         }
4660
4661         return stmmac_vlan_update(priv, is_double);
4662 }
4663
4664 static const struct net_device_ops stmmac_netdev_ops = {
4665         .ndo_open = stmmac_open,
4666         .ndo_start_xmit = stmmac_xmit,
4667         .ndo_stop = stmmac_release,
4668         .ndo_change_mtu = stmmac_change_mtu,
4669         .ndo_fix_features = stmmac_fix_features,
4670         .ndo_set_features = stmmac_set_features,
4671         .ndo_set_rx_mode = stmmac_set_rx_mode,
4672         .ndo_tx_timeout = stmmac_tx_timeout,
4673         .ndo_do_ioctl = stmmac_ioctl,
4674         .ndo_setup_tc = stmmac_setup_tc,
4675         .ndo_select_queue = stmmac_select_queue,
4676 #ifdef CONFIG_NET_POLL_CONTROLLER
4677         .ndo_poll_controller = stmmac_poll_controller,
4678 #endif
4679         .ndo_set_mac_address = stmmac_set_mac_address,
4680         .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
4681         .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
4682 };
4683
4684 static void stmmac_reset_subtask(struct stmmac_priv *priv)
4685 {
4686         if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
4687                 return;
4688         if (test_bit(STMMAC_DOWN, &priv->state))
4689                 return;
4690
4691         netdev_err(priv->dev, "Reset adapter.\n");
4692
4693         rtnl_lock();
4694         netif_trans_update(priv->dev);
4695         while (test_and_set_bit(STMMAC_RESETING, &priv->state))
4696                 usleep_range(1000, 2000);
4697
4698         set_bit(STMMAC_DOWN, &priv->state);
4699         dev_close(priv->dev);
4700         dev_open(priv->dev, NULL);
4701         clear_bit(STMMAC_DOWN, &priv->state);
4702         clear_bit(STMMAC_RESETING, &priv->state);
4703         rtnl_unlock();
4704 }
4705
4706 static void stmmac_service_task(struct work_struct *work)
4707 {
4708         struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
4709                         service_task);
4710
4711         stmmac_reset_subtask(priv);
4712         clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
4713 }
4714
4715 /**
4716  *  stmmac_hw_init - Init the MAC device
4717  *  @priv: driver private structure
4718  *  Description: this function is to configure the MAC device according to
4719  *  some platform parameters or the HW capability register. It prepares the
4720  *  driver to use either ring or chain modes and to setup either enhanced or
4721  *  normal descriptors.
4722  */
4723 static int stmmac_hw_init(struct stmmac_priv *priv)
4724 {
4725         int ret;
4726
4727         /* dwmac-sun8i only work in chain mode */
4728         if (priv->plat->has_sun8i)
4729                 chain_mode = 1;
4730         priv->chain_mode = chain_mode;
4731
4732         /* Initialize HW Interface */
4733         ret = stmmac_hwif_init(priv);
4734         if (ret)
4735                 return ret;
4736
4737         /* Get the HW capability (new GMAC newer than 3.50a) */
4738         priv->hw_cap_support = stmmac_get_hw_features(priv);
4739         if (priv->hw_cap_support) {
4740                 dev_info(priv->device, "DMA HW capability register supported\n");
4741
4742                 /* We can override some gmac/dma configuration fields: e.g.
4743                  * enh_desc, tx_coe (e.g. that are passed through the
4744                  * platform) with the values from the HW capability
4745                  * register (if supported).
4746                  */
4747                 priv->plat->enh_desc = priv->dma_cap.enh_desc;
4748                 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4749                 priv->hw->pmt = priv->plat->pmt;
4750                 if (priv->dma_cap.hash_tb_sz) {
4751                         priv->hw->multicast_filter_bins =
4752                                         (BIT(priv->dma_cap.hash_tb_sz) << 5);
4753                         priv->hw->mcast_bits_log2 =
4754                                         ilog2(priv->hw->multicast_filter_bins);
4755                 }
4756
4757                 /* TXCOE doesn't work in thresh DMA mode */
4758                 if (priv->plat->force_thresh_dma_mode)
4759                         priv->plat->tx_coe = 0;
4760                 else
4761                         priv->plat->tx_coe = priv->dma_cap.tx_coe;
4762
4763                 /* In case of GMAC4 rx_coe is from HW cap register. */
4764                 priv->plat->rx_coe = priv->dma_cap.rx_coe;
4765
4766                 if (priv->dma_cap.rx_coe_type2)
4767                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4768                 else if (priv->dma_cap.rx_coe_type1)
4769                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4770
4771         } else {
4772                 dev_info(priv->device, "No HW DMA feature register supported\n");
4773         }
4774
4775         if (priv->plat->rx_coe) {
4776                 priv->hw->rx_csum = priv->plat->rx_coe;
4777                 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4778                 if (priv->synopsys_id < DWMAC_CORE_4_00)
4779                         dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4780         }
4781         if (priv->plat->tx_coe)
4782                 dev_info(priv->device, "TX Checksum insertion supported\n");
4783
4784         if (priv->plat->pmt) {
4785                 dev_info(priv->device, "Wake-Up On Lan supported\n");
4786                 device_set_wakeup_capable(priv->device, 1);
4787         }
4788
4789         if (priv->dma_cap.tsoen)
4790                 dev_info(priv->device, "TSO supported\n");
4791
4792         priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en;
4793         priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
4794
4795         /* Run HW quirks, if any */
4796         if (priv->hwif_quirks) {
4797                 ret = priv->hwif_quirks(priv);
4798                 if (ret)
4799                         return ret;
4800         }
4801
4802         /* Rx Watchdog is available in the COREs newer than the 3.40.
4803          * In some case, for example on bugged HW this feature
4804          * has to be disable and this can be done by passing the
4805          * riwt_off field from the platform.
4806          */
4807         if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
4808             (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
4809                 priv->use_riwt = 1;
4810                 dev_info(priv->device,
4811                          "Enable RX Mitigation via HW Watchdog Timer\n");
4812         }
4813
4814         return 0;
4815 }
4816
4817 static void stmmac_napi_add(struct net_device *dev)
4818 {
4819         struct stmmac_priv *priv = netdev_priv(dev);
4820         u32 queue, maxq;
4821
4822         maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
4823
4824         for (queue = 0; queue < maxq; queue++) {
4825                 struct stmmac_channel *ch = &priv->channel[queue];
4826
4827                 ch->priv_data = priv;
4828                 ch->index = queue;
4829                 spin_lock_init(&ch->lock);
4830
4831                 if (queue < priv->plat->rx_queues_to_use) {
4832                         netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx,
4833                                        NAPI_POLL_WEIGHT);
4834                 }
4835                 if (queue < priv->plat->tx_queues_to_use) {
4836                         netif_tx_napi_add(dev, &ch->tx_napi,
4837                                           stmmac_napi_poll_tx,
4838                                           NAPI_POLL_WEIGHT);
4839                 }
4840         }
4841 }
4842
4843 static void stmmac_napi_del(struct net_device *dev)
4844 {
4845         struct stmmac_priv *priv = netdev_priv(dev);
4846         u32 queue, maxq;
4847
4848         maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
4849
4850         for (queue = 0; queue < maxq; queue++) {
4851                 struct stmmac_channel *ch = &priv->channel[queue];
4852
4853                 if (queue < priv->plat->rx_queues_to_use)
4854                         netif_napi_del(&ch->rx_napi);
4855                 if (queue < priv->plat->tx_queues_to_use)
4856                         netif_napi_del(&ch->tx_napi);
4857         }
4858 }
4859
4860 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
4861 {
4862         struct stmmac_priv *priv = netdev_priv(dev);
4863         int ret = 0;
4864
4865         if (netif_running(dev))
4866                 stmmac_release(dev);
4867
4868         stmmac_napi_del(dev);
4869
4870         priv->plat->rx_queues_to_use = rx_cnt;
4871         priv->plat->tx_queues_to_use = tx_cnt;
4872
4873         stmmac_napi_add(dev);
4874
4875         if (netif_running(dev))
4876                 ret = stmmac_open(dev);
4877
4878         return ret;
4879 }
4880
4881 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
4882 {
4883         struct stmmac_priv *priv = netdev_priv(dev);
4884         int ret = 0;
4885
4886         if (netif_running(dev))
4887                 stmmac_release(dev);
4888
4889         priv->dma_rx_size = rx_size;
4890         priv->dma_tx_size = tx_size;
4891
4892         if (netif_running(dev))
4893                 ret = stmmac_open(dev);
4894
4895         return ret;
4896 }
4897
4898 /**
4899  * stmmac_dvr_probe
4900  * @device: device pointer
4901  * @plat_dat: platform data pointer
4902  * @res: stmmac resource pointer
4903  * Description: this is the main probe function used to
4904  * call the alloc_etherdev, allocate the priv structure.
4905  * Return:
4906  * returns 0 on success, otherwise errno.
4907  */
4908 int stmmac_dvr_probe(struct device *device,
4909                      struct plat_stmmacenet_data *plat_dat,
4910                      struct stmmac_resources *res)
4911 {
4912         struct net_device *ndev = NULL;
4913         struct stmmac_priv *priv;
4914         u32 rxq;
4915         int i, ret = 0;
4916
4917         ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
4918                                        MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
4919         if (!ndev)
4920                 return -ENOMEM;
4921
4922         SET_NETDEV_DEV(ndev, device);
4923
4924         priv = netdev_priv(ndev);
4925         priv->device = device;
4926         priv->dev = ndev;
4927
4928         stmmac_set_ethtool_ops(ndev);
4929         priv->pause = pause;
4930         priv->plat = plat_dat;
4931         priv->ioaddr = res->addr;
4932         priv->dev->base_addr = (unsigned long)res->addr;
4933
4934         priv->dev->irq = res->irq;
4935         priv->wol_irq = res->wol_irq;
4936         priv->lpi_irq = res->lpi_irq;
4937
4938         if (!IS_ERR_OR_NULL(res->mac))
4939                 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4940
4941         dev_set_drvdata(device, priv->dev);
4942
4943         /* Verify driver arguments */
4944         stmmac_verify_args();
4945
4946         /* Allocate workqueue */
4947         priv->wq = create_singlethread_workqueue("stmmac_wq");
4948         if (!priv->wq) {
4949                 dev_err(priv->device, "failed to create workqueue\n");
4950                 return -ENOMEM;
4951         }
4952
4953         INIT_WORK(&priv->service_task, stmmac_service_task);
4954
4955         /* Override with kernel parameters if supplied XXX CRS XXX
4956          * this needs to have multiple instances
4957          */
4958         if ((phyaddr >= 0) && (phyaddr <= 31))
4959                 priv->plat->phy_addr = phyaddr;
4960
4961         if (priv->plat->stmmac_rst) {
4962                 ret = reset_control_assert(priv->plat->stmmac_rst);
4963                 reset_control_deassert(priv->plat->stmmac_rst);
4964                 /* Some reset controllers have only reset callback instead of
4965                  * assert + deassert callbacks pair.
4966                  */
4967                 if (ret == -ENOTSUPP)
4968                         reset_control_reset(priv->plat->stmmac_rst);
4969         }
4970
4971         /* Init MAC and get the capabilities */
4972         ret = stmmac_hw_init(priv);
4973         if (ret)
4974                 goto error_hw_init;
4975
4976         stmmac_check_ether_addr(priv);
4977
4978         ndev->netdev_ops = &stmmac_netdev_ops;
4979
4980         ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4981                             NETIF_F_RXCSUM;
4982
4983         ret = stmmac_tc_init(priv, priv);
4984         if (!ret) {
4985                 ndev->hw_features |= NETIF_F_HW_TC;
4986         }
4987
4988         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4989                 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4990                 if (priv->plat->has_gmac4)
4991                         ndev->hw_features |= NETIF_F_GSO_UDP_L4;
4992                 priv->tso = true;
4993                 dev_info(priv->device, "TSO feature enabled\n");
4994         }
4995
4996         if (priv->dma_cap.sphen) {
4997                 ndev->hw_features |= NETIF_F_GRO;
4998                 priv->sph = true;
4999                 dev_info(priv->device, "SPH feature enabled\n");
5000         }
5001
5002         /* The current IP register MAC_HW_Feature1[ADDR64] only define
5003          * 32/40/64 bit width, but some SOC support others like i.MX8MP
5004          * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64].
5005          * So overwrite dma_cap.addr64 according to HW real design.
5006          */
5007         if (priv->plat->addr64)
5008                 priv->dma_cap.addr64 = priv->plat->addr64;
5009
5010         if (priv->dma_cap.addr64) {
5011                 ret = dma_set_mask_and_coherent(device,
5012                                 DMA_BIT_MASK(priv->dma_cap.addr64));
5013                 if (!ret) {
5014                         dev_info(priv->device, "Using %d bits DMA width\n",
5015                                  priv->dma_cap.addr64);
5016
5017                         /*
5018                          * If more than 32 bits can be addressed, make sure to
5019                          * enable enhanced addressing mode.
5020                          */
5021                         if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
5022                                 priv->plat->dma_cfg->eame = true;
5023                 } else {
5024                         ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
5025                         if (ret) {
5026                                 dev_err(priv->device, "Failed to set DMA Mask\n");
5027                                 goto error_hw_init;
5028                         }
5029
5030                         priv->dma_cap.addr64 = 32;
5031                 }
5032         }
5033
5034         ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
5035         ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
5036 #ifdef STMMAC_VLAN_TAG_USED
5037         /* Both mac100 and gmac support receive VLAN tag detection */
5038         ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
5039         if (priv->dma_cap.vlhash) {
5040                 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
5041                 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
5042         }
5043         if (priv->dma_cap.vlins) {
5044                 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
5045                 if (priv->dma_cap.dvlan)
5046                         ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
5047         }
5048 #endif
5049         priv->msg_enable = netif_msg_init(debug, default_msg_level);
5050
5051         /* Initialize RSS */
5052         rxq = priv->plat->rx_queues_to_use;
5053         netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
5054         for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
5055                 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
5056
5057         if (priv->dma_cap.rssen && priv->plat->rss_en)
5058                 ndev->features |= NETIF_F_RXHASH;
5059
5060         /* MTU range: 46 - hw-specific max */
5061         ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
5062         if (priv->plat->has_xgmac)
5063                 ndev->max_mtu = XGMAC_JUMBO_LEN;
5064         else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
5065                 ndev->max_mtu = JUMBO_LEN;
5066         else
5067                 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
5068         /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
5069          * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
5070          */
5071         if ((priv->plat->maxmtu < ndev->max_mtu) &&
5072             (priv->plat->maxmtu >= ndev->min_mtu))
5073                 ndev->max_mtu = priv->plat->maxmtu;
5074         else if (priv->plat->maxmtu < ndev->min_mtu)
5075                 dev_warn(priv->device,
5076                          "%s: warning: maxmtu having invalid value (%d)\n",
5077                          __func__, priv->plat->maxmtu);
5078
5079         if (flow_ctrl)
5080                 priv->flow_ctrl = FLOW_AUTO;    /* RX/TX pause on */
5081
5082         /* Setup channels NAPI */
5083         stmmac_napi_add(ndev);
5084
5085         mutex_init(&priv->lock);
5086
5087         /* If a specific clk_csr value is passed from the platform
5088          * this means that the CSR Clock Range selection cannot be
5089          * changed at run-time and it is fixed. Viceversa the driver'll try to
5090          * set the MDC clock dynamically according to the csr actual
5091          * clock input.
5092          */
5093         if (priv->plat->clk_csr >= 0)
5094                 priv->clk_csr = priv->plat->clk_csr;
5095         else
5096                 stmmac_clk_csr_set(priv);
5097
5098         stmmac_check_pcs_mode(priv);
5099
5100         if (priv->hw->pcs != STMMAC_PCS_TBI &&
5101             priv->hw->pcs != STMMAC_PCS_RTBI) {
5102                 /* MDIO bus Registration */
5103                 ret = stmmac_mdio_register(ndev);
5104                 if (ret < 0) {
5105                         dev_err(priv->device,
5106                                 "%s: MDIO bus (id: %d) registration failed",
5107                                 __func__, priv->plat->bus_id);
5108                         goto error_mdio_register;
5109                 }
5110         }
5111
5112         ret = stmmac_phy_setup(priv);
5113         if (ret) {
5114                 netdev_err(ndev, "failed to setup phy (%d)\n", ret);
5115                 goto error_phy_setup;
5116         }
5117
5118         ret = register_netdev(ndev);
5119         if (ret) {
5120                 dev_err(priv->device, "%s: ERROR %i registering the device\n",
5121                         __func__, ret);
5122                 goto error_netdev_register;
5123         }
5124
5125         if (priv->plat->serdes_powerup) {
5126                 ret = priv->plat->serdes_powerup(ndev,
5127                                                  priv->plat->bsp_priv);
5128
5129                 if (ret < 0)
5130                         goto error_serdes_powerup;
5131         }
5132
5133 #ifdef CONFIG_DEBUG_FS
5134         stmmac_init_fs(ndev);
5135 #endif
5136
5137         return ret;
5138
5139 error_serdes_powerup:
5140         unregister_netdev(ndev);
5141 error_netdev_register:
5142         phylink_destroy(priv->phylink);
5143 error_phy_setup:
5144         if (priv->hw->pcs != STMMAC_PCS_TBI &&
5145             priv->hw->pcs != STMMAC_PCS_RTBI)
5146                 stmmac_mdio_unregister(ndev);
5147 error_mdio_register:
5148         stmmac_napi_del(ndev);
5149 error_hw_init:
5150         destroy_workqueue(priv->wq);
5151
5152         return ret;
5153 }
5154 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
5155
5156 /**
5157  * stmmac_dvr_remove
5158  * @dev: device pointer
5159  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
5160  * changes the link status, releases the DMA descriptor rings.
5161  */
5162 int stmmac_dvr_remove(struct device *dev)
5163 {
5164         struct net_device *ndev = dev_get_drvdata(dev);
5165         struct stmmac_priv *priv = netdev_priv(ndev);
5166
5167         netdev_info(priv->dev, "%s: removing driver", __func__);
5168
5169         stmmac_stop_all_dma(priv);
5170
5171         if (priv->plat->serdes_powerdown)
5172                 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
5173
5174         stmmac_mac_set(priv, priv->ioaddr, false);
5175         netif_carrier_off(ndev);
5176         unregister_netdev(ndev);
5177 #ifdef CONFIG_DEBUG_FS
5178         stmmac_exit_fs(ndev);
5179 #endif
5180         phylink_destroy(priv->phylink);
5181         if (priv->plat->stmmac_rst)
5182                 reset_control_assert(priv->plat->stmmac_rst);
5183         clk_disable_unprepare(priv->plat->pclk);
5184         clk_disable_unprepare(priv->plat->stmmac_clk);
5185         if (priv->hw->pcs != STMMAC_PCS_TBI &&
5186             priv->hw->pcs != STMMAC_PCS_RTBI)
5187                 stmmac_mdio_unregister(ndev);
5188         destroy_workqueue(priv->wq);
5189         mutex_destroy(&priv->lock);
5190
5191         return 0;
5192 }
5193 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
5194
5195 /**
5196  * stmmac_suspend - suspend callback
5197  * @dev: device pointer
5198  * Description: this is the function to suspend the device and it is called
5199  * by the platform driver to stop the network queue, release the resources,
5200  * program the PMT register (for WoL), clean and release driver resources.
5201  */
5202 int stmmac_suspend(struct device *dev)
5203 {
5204         struct net_device *ndev = dev_get_drvdata(dev);
5205         struct stmmac_priv *priv = netdev_priv(ndev);
5206         u32 chan;
5207
5208         if (!ndev || !netif_running(ndev))
5209                 return 0;
5210
5211         phylink_mac_change(priv->phylink, false);
5212
5213         mutex_lock(&priv->lock);
5214
5215         netif_device_detach(ndev);
5216
5217         stmmac_disable_all_queues(priv);
5218
5219         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
5220                 hrtimer_cancel(&priv->tx_queue[chan].txtimer);
5221
5222         if (priv->eee_enabled) {
5223                 priv->tx_path_in_lpi_mode = false;
5224                 del_timer_sync(&priv->eee_ctrl_timer);
5225         }
5226
5227         /* Stop TX/RX DMA */
5228         stmmac_stop_all_dma(priv);
5229
5230         if (priv->plat->serdes_powerdown)
5231                 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
5232
5233         /* Enable Power down mode by programming the PMT regs */
5234         if (device_may_wakeup(priv->device) && priv->plat->pmt) {
5235                 stmmac_pmt(priv, priv->hw, priv->wolopts);
5236                 priv->irq_wake = 1;
5237         } else {
5238                 mutex_unlock(&priv->lock);
5239                 rtnl_lock();
5240                 if (device_may_wakeup(priv->device))
5241                         phylink_speed_down(priv->phylink, false);
5242                 phylink_stop(priv->phylink);
5243                 rtnl_unlock();
5244                 mutex_lock(&priv->lock);
5245
5246                 stmmac_mac_set(priv, priv->ioaddr, false);
5247                 pinctrl_pm_select_sleep_state(priv->device);
5248                 /* Disable clock in case of PWM is off */
5249                 clk_disable_unprepare(priv->plat->clk_ptp_ref);
5250                 clk_disable_unprepare(priv->plat->pclk);
5251                 clk_disable_unprepare(priv->plat->stmmac_clk);
5252         }
5253         mutex_unlock(&priv->lock);
5254
5255         priv->speed = SPEED_UNKNOWN;
5256         return 0;
5257 }
5258 EXPORT_SYMBOL_GPL(stmmac_suspend);
5259
5260 /**
5261  * stmmac_reset_queues_param - reset queue parameters
5262  * @priv: device pointer
5263  */
5264 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
5265 {
5266         u32 rx_cnt = priv->plat->rx_queues_to_use;
5267         u32 tx_cnt = priv->plat->tx_queues_to_use;
5268         u32 queue;
5269
5270         for (queue = 0; queue < rx_cnt; queue++) {
5271                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
5272
5273                 rx_q->cur_rx = 0;
5274                 rx_q->dirty_rx = 0;
5275         }
5276
5277         for (queue = 0; queue < tx_cnt; queue++) {
5278                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
5279
5280                 tx_q->cur_tx = 0;
5281                 tx_q->dirty_tx = 0;
5282                 tx_q->mss = 0;
5283
5284                 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
5285         }
5286 }
5287
5288 /**
5289  * stmmac_resume - resume callback
5290  * @dev: device pointer
5291  * Description: when resume this function is invoked to setup the DMA and CORE
5292  * in a usable state.
5293  */
5294 int stmmac_resume(struct device *dev)
5295 {
5296         struct net_device *ndev = dev_get_drvdata(dev);
5297         struct stmmac_priv *priv = netdev_priv(ndev);
5298         int ret;
5299
5300         if (!netif_running(ndev))
5301                 return 0;
5302
5303         /* Power Down bit, into the PM register, is cleared
5304          * automatically as soon as a magic packet or a Wake-up frame
5305          * is received. Anyway, it's better to manually clear
5306          * this bit because it can generate problems while resuming
5307          * from another devices (e.g. serial console).
5308          */
5309         if (device_may_wakeup(priv->device) && priv->plat->pmt) {
5310                 mutex_lock(&priv->lock);
5311                 stmmac_pmt(priv, priv->hw, 0);
5312                 mutex_unlock(&priv->lock);
5313                 priv->irq_wake = 0;
5314         } else {
5315                 pinctrl_pm_select_default_state(priv->device);
5316                 /* enable the clk previously disabled */
5317                 clk_prepare_enable(priv->plat->stmmac_clk);
5318                 clk_prepare_enable(priv->plat->pclk);
5319                 if (priv->plat->clk_ptp_ref)
5320                         clk_prepare_enable(priv->plat->clk_ptp_ref);
5321                 /* reset the phy so that it's ready */
5322                 if (priv->mii)
5323                         stmmac_mdio_reset(priv->mii);
5324         }
5325
5326         if (priv->plat->serdes_powerup) {
5327                 ret = priv->plat->serdes_powerup(ndev,
5328                                                  priv->plat->bsp_priv);
5329
5330                 if (ret < 0)
5331                         return ret;
5332         }
5333
5334         if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
5335                 rtnl_lock();
5336                 phylink_start(priv->phylink);
5337                 /* We may have called phylink_speed_down before */
5338                 phylink_speed_up(priv->phylink);
5339                 rtnl_unlock();
5340         }
5341
5342         rtnl_lock();
5343         mutex_lock(&priv->lock);
5344
5345         stmmac_reset_queues_param(priv);
5346
5347         stmmac_free_tx_skbufs(priv);
5348         stmmac_clear_descriptors(priv);
5349
5350         stmmac_hw_setup(ndev, false);
5351         stmmac_init_coalesce(priv);
5352         stmmac_set_rx_mode(ndev);
5353
5354         stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
5355
5356         stmmac_enable_all_queues(priv);
5357
5358         mutex_unlock(&priv->lock);
5359         rtnl_unlock();
5360
5361         phylink_mac_change(priv->phylink, true);
5362
5363         netif_device_attach(ndev);
5364
5365         return 0;
5366 }
5367 EXPORT_SYMBOL_GPL(stmmac_resume);
5368
5369 #ifndef MODULE
5370 static int __init stmmac_cmdline_opt(char *str)
5371 {
5372         char *opt;
5373
5374         if (!str || !*str)
5375                 return -EINVAL;
5376         while ((opt = strsep(&str, ",")) != NULL) {
5377                 if (!strncmp(opt, "debug:", 6)) {
5378                         if (kstrtoint(opt + 6, 0, &debug))
5379                                 goto err;
5380                 } else if (!strncmp(opt, "phyaddr:", 8)) {
5381                         if (kstrtoint(opt + 8, 0, &phyaddr))
5382                                 goto err;
5383                 } else if (!strncmp(opt, "buf_sz:", 7)) {
5384                         if (kstrtoint(opt + 7, 0, &buf_sz))
5385                                 goto err;
5386                 } else if (!strncmp(opt, "tc:", 3)) {
5387                         if (kstrtoint(opt + 3, 0, &tc))
5388                                 goto err;
5389                 } else if (!strncmp(opt, "watchdog:", 9)) {
5390                         if (kstrtoint(opt + 9, 0, &watchdog))
5391                                 goto err;
5392                 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
5393                         if (kstrtoint(opt + 10, 0, &flow_ctrl))
5394                                 goto err;
5395                 } else if (!strncmp(opt, "pause:", 6)) {
5396                         if (kstrtoint(opt + 6, 0, &pause))
5397                                 goto err;
5398                 } else if (!strncmp(opt, "eee_timer:", 10)) {
5399                         if (kstrtoint(opt + 10, 0, &eee_timer))
5400                                 goto err;
5401                 } else if (!strncmp(opt, "chain_mode:", 11)) {
5402                         if (kstrtoint(opt + 11, 0, &chain_mode))
5403                                 goto err;
5404                 }
5405         }
5406         return 0;
5407
5408 err:
5409         pr_err("%s: ERROR broken module parameter conversion", __func__);
5410         return -EINVAL;
5411 }
5412
5413 __setup("stmmaceth=", stmmac_cmdline_opt);
5414 #endif /* MODULE */
5415
5416 static int __init stmmac_init(void)
5417 {
5418 #ifdef CONFIG_DEBUG_FS
5419         /* Create debugfs main directory if it doesn't exist yet */
5420         if (!stmmac_fs_dir)
5421                 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
5422         register_netdevice_notifier(&stmmac_notifier);
5423 #endif
5424
5425         return 0;
5426 }
5427
5428 static void __exit stmmac_exit(void)
5429 {
5430 #ifdef CONFIG_DEBUG_FS
5431         unregister_netdevice_notifier(&stmmac_notifier);
5432         debugfs_remove_recursive(stmmac_fs_dir);
5433 #endif
5434 }
5435
5436 module_init(stmmac_init)
5437 module_exit(stmmac_exit)
5438
5439 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
5440 MODULE_AUTHOR("Giuseppe Cavallaro <[email protected]>");
5441 MODULE_LICENSE("GPL");
This page took 0.367785 seconds and 2 git commands to generate.