1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright Sunplus Technology Co., Ltd.
6 #include <linux/platform_device.h>
7 #include <linux/etherdevice.h>
8 #include <linux/netdevice.h>
9 #include <linux/bitfield.h>
10 #include <linux/spinlock.h>
11 #include <linux/of_mdio.h>
13 #include "spl2sw_register.h"
14 #include "spl2sw_define.h"
15 #include "spl2sw_int.h"
17 int spl2sw_rx_poll(struct napi_struct *napi, int budget)
19 struct spl2sw_common *comm = container_of(napi, struct spl2sw_common, rx_napi);
20 struct spl2sw_mac_desc *desc, *h_desc;
21 struct net_device_stats *stats;
22 struct sk_buff *skb, *new_skb;
23 struct spl2sw_skb_info *sinfo;
24 int budget_left = budget;
34 /* Process high-priority queue and then low-priority queue. */
35 for (queue = 0; queue < RX_DESC_QUEUE_NUM; queue++) {
36 rx_pos = comm->rx_pos[queue];
37 rx_count = comm->rx_desc_num[queue];
39 for (num = 0; num < rx_count && budget_left; num++) {
40 sinfo = comm->rx_skb_info[queue] + rx_pos;
41 desc = comm->rx_desc[queue] + rx_pos;
47 port = FIELD_GET(RXD_PKT_SP, cmd);
48 if (port < MAX_NETDEV_NUM && comm->ndev[port])
49 stats = &comm->ndev[port]->stats;
51 goto spl2sw_rx_poll_rec_err;
53 pkg_len = FIELD_GET(RXD_PKT_LEN, cmd);
54 if (unlikely((cmd & RXD_ERR_CODE) || pkg_len < ETH_ZLEN + 4)) {
55 stats->rx_length_errors++;
57 goto spl2sw_rx_poll_rec_err;
60 dma_unmap_single(&comm->pdev->dev, sinfo->mapping,
61 comm->rx_desc_buff_size, DMA_FROM_DEVICE);
64 skb_put(skb, pkg_len - 4); /* Minus FCS */
65 skb->ip_summed = CHECKSUM_NONE;
66 skb->protocol = eth_type_trans(skb, comm->ndev[port]);
68 netif_receive_skb(skb);
71 stats->rx_bytes += len;
73 /* Allocate a new skb for receiving. */
74 new_skb = netdev_alloc_skb(NULL, comm->rx_desc_buff_size);
75 if (unlikely(!new_skb)) {
76 desc->cmd2 = (rx_pos == comm->rx_desc_num[queue] - 1) ?
81 goto spl2sw_rx_poll_alloc_err;
84 sinfo->mapping = dma_map_single(&comm->pdev->dev, new_skb->data,
85 comm->rx_desc_buff_size,
87 if (dma_mapping_error(&comm->pdev->dev, sinfo->mapping)) {
88 dev_kfree_skb_irq(new_skb);
89 desc->cmd2 = (rx_pos == comm->rx_desc_num[queue] - 1) ?
94 goto spl2sw_rx_poll_alloc_err;
98 desc->addr1 = sinfo->mapping;
100 spl2sw_rx_poll_rec_err:
101 desc->cmd2 = (rx_pos == comm->rx_desc_num[queue] - 1) ?
102 RXD_EOR | comm->rx_desc_buff_size :
103 comm->rx_desc_buff_size;
105 wmb(); /* Set RXD_OWN after other fields are effective. */
106 desc->cmd1 = RXD_OWN;
108 spl2sw_rx_poll_alloc_err:
109 /* Move rx_pos to next position */
110 rx_pos = ((rx_pos + 1) == comm->rx_desc_num[queue]) ? 0 : rx_pos + 1;
114 /* If there are packets in high-priority queue,
115 * stop processing low-priority queue.
117 if (queue == 1 && !(h_desc->cmd1 & RXD_OWN))
121 comm->rx_pos[queue] = rx_pos;
123 /* Save pointer to last rx descriptor of high-priority queue. */
125 h_desc = comm->rx_desc[queue] + rx_pos;
128 spin_lock_irqsave(&comm->int_mask_lock, flags);
129 mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
131 writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
132 spin_unlock_irqrestore(&comm->int_mask_lock, flags);
135 return budget - budget_left;
138 int spl2sw_tx_poll(struct napi_struct *napi, int budget)
140 struct spl2sw_common *comm = container_of(napi, struct spl2sw_common, tx_napi);
141 struct spl2sw_skb_info *skbinfo;
142 struct net_device_stats *stats;
143 int budget_left = budget;
150 spin_lock(&comm->tx_lock);
152 tx_done_pos = comm->tx_done_pos;
153 while (((tx_done_pos != comm->tx_pos) || (comm->tx_desc_full == 1)) && budget_left) {
154 cmd = comm->tx_desc[tx_done_pos].cmd1;
158 skbinfo = &comm->tx_temp_skb_info[tx_done_pos];
159 if (unlikely(!skbinfo->skb))
160 goto spl2sw_tx_poll_next;
162 i = ffs(FIELD_GET(TXD_VLAN, cmd)) - 1;
163 if (i < MAX_NETDEV_NUM && comm->ndev[i])
164 stats = &comm->ndev[i]->stats;
166 goto spl2sw_tx_poll_unmap;
168 if (unlikely(cmd & (TXD_ERR_CODE))) {
172 stats->tx_bytes += skbinfo->len;
175 spl2sw_tx_poll_unmap:
176 dma_unmap_single(&comm->pdev->dev, skbinfo->mapping, skbinfo->len,
178 skbinfo->mapping = 0;
179 dev_kfree_skb_irq(skbinfo->skb);
183 /* Move tx_done_pos to next position */
184 tx_done_pos = ((tx_done_pos + 1) == TX_DESC_NUM) ? 0 : tx_done_pos + 1;
186 if (comm->tx_desc_full == 1)
187 comm->tx_desc_full = 0;
192 comm->tx_done_pos = tx_done_pos;
193 if (!comm->tx_desc_full)
194 for (i = 0; i < MAX_NETDEV_NUM; i++)
196 if (netif_queue_stopped(comm->ndev[i]))
197 netif_wake_queue(comm->ndev[i]);
199 spin_unlock(&comm->tx_lock);
201 spin_lock_irqsave(&comm->int_mask_lock, flags);
202 mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
204 writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
205 spin_unlock_irqrestore(&comm->int_mask_lock, flags);
208 return budget - budget_left;
211 irqreturn_t spl2sw_ethernet_interrupt(int irq, void *dev_id)
213 struct spl2sw_common *comm = (struct spl2sw_common *)dev_id;
218 status = readl(comm->l2sw_reg_base + L2SW_SW_INT_STATUS_0);
219 if (unlikely(!status)) {
220 dev_dbg(&comm->pdev->dev, "Interrupt status is null!\n");
221 goto spl2sw_ethernet_int_out;
223 writel(status, comm->l2sw_reg_base + L2SW_SW_INT_STATUS_0);
225 if (status & MAC_INT_RX) {
226 /* Disable RX interrupts. */
227 spin_lock(&comm->int_mask_lock);
228 mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
230 writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
231 spin_unlock(&comm->int_mask_lock);
233 if (unlikely(status & MAC_INT_RX_DES_ERR)) {
234 for (i = 0; i < MAX_NETDEV_NUM; i++)
236 comm->ndev[i]->stats.rx_fifo_errors++;
239 dev_dbg(&comm->pdev->dev, "Illegal RX Descriptor!\n");
242 napi_schedule(&comm->rx_napi);
245 if (status & MAC_INT_TX) {
246 /* Disable TX interrupts. */
247 spin_lock(&comm->int_mask_lock);
248 mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
250 writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
251 spin_unlock(&comm->int_mask_lock);
253 if (unlikely(status & MAC_INT_TX_DES_ERR)) {
254 for (i = 0; i < MAX_NETDEV_NUM; i++)
256 comm->ndev[i]->stats.tx_fifo_errors++;
259 dev_dbg(&comm->pdev->dev, "Illegal TX Descriptor Error\n");
261 spin_lock(&comm->int_mask_lock);
262 mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
264 writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
265 spin_unlock(&comm->int_mask_lock);
267 napi_schedule(&comm->tx_napi);
271 spl2sw_ethernet_int_out: