]> Git Repo - linux.git/blob - drivers/net/ethernet/sunplus/spl2sw_int.c
Linux 6.14-rc3
[linux.git] / drivers / net / ethernet / sunplus / spl2sw_int.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright Sunplus Technology Co., Ltd.
3  *       All rights reserved.
4  */
5
6 #include <linux/platform_device.h>
7 #include <linux/etherdevice.h>
8 #include <linux/netdevice.h>
9 #include <linux/bitfield.h>
10 #include <linux/spinlock.h>
11 #include <linux/of_mdio.h>
12
13 #include "spl2sw_register.h"
14 #include "spl2sw_define.h"
15 #include "spl2sw_int.h"
16
17 int spl2sw_rx_poll(struct napi_struct *napi, int budget)
18 {
19         struct spl2sw_common *comm = container_of(napi, struct spl2sw_common, rx_napi);
20         struct spl2sw_mac_desc *desc, *h_desc;
21         struct net_device_stats *stats;
22         struct sk_buff *skb, *new_skb;
23         struct spl2sw_skb_info *sinfo;
24         int budget_left = budget;
25         unsigned long flags;
26         u32 rx_pos, pkg_len;
27         u32 num, rx_count;
28         s32 queue;
29         u32 mask;
30         int port;
31         u32 cmd;
32         u32 len;
33
34         /* Process high-priority queue and then low-priority queue. */
35         for (queue = 0; queue < RX_DESC_QUEUE_NUM; queue++) {
36                 rx_pos = comm->rx_pos[queue];
37                 rx_count = comm->rx_desc_num[queue];
38
39                 for (num = 0; num < rx_count && budget_left; num++) {
40                         sinfo = comm->rx_skb_info[queue] + rx_pos;
41                         desc = comm->rx_desc[queue] + rx_pos;
42                         cmd = desc->cmd1;
43
44                         if (cmd & RXD_OWN)
45                                 break;
46
47                         port = FIELD_GET(RXD_PKT_SP, cmd);
48                         if (port < MAX_NETDEV_NUM && comm->ndev[port])
49                                 stats = &comm->ndev[port]->stats;
50                         else
51                                 goto spl2sw_rx_poll_rec_err;
52
53                         pkg_len = FIELD_GET(RXD_PKT_LEN, cmd);
54                         if (unlikely((cmd & RXD_ERR_CODE) || pkg_len < ETH_ZLEN + 4)) {
55                                 stats->rx_length_errors++;
56                                 stats->rx_dropped++;
57                                 goto spl2sw_rx_poll_rec_err;
58                         }
59
60                         dma_unmap_single(&comm->pdev->dev, sinfo->mapping,
61                                          comm->rx_desc_buff_size, DMA_FROM_DEVICE);
62
63                         skb = sinfo->skb;
64                         skb_put(skb, pkg_len - 4); /* Minus FCS */
65                         skb->ip_summed = CHECKSUM_NONE;
66                         skb->protocol = eth_type_trans(skb, comm->ndev[port]);
67                         len = skb->len;
68                         netif_receive_skb(skb);
69
70                         stats->rx_packets++;
71                         stats->rx_bytes += len;
72
73                         /* Allocate a new skb for receiving. */
74                         new_skb = netdev_alloc_skb(NULL, comm->rx_desc_buff_size);
75                         if (unlikely(!new_skb)) {
76                                 desc->cmd2 = (rx_pos == comm->rx_desc_num[queue] - 1) ?
77                                              RXD_EOR : 0;
78                                 sinfo->skb = NULL;
79                                 sinfo->mapping = 0;
80                                 desc->addr1 = 0;
81                                 goto spl2sw_rx_poll_alloc_err;
82                         }
83
84                         sinfo->mapping = dma_map_single(&comm->pdev->dev, new_skb->data,
85                                                         comm->rx_desc_buff_size,
86                                                         DMA_FROM_DEVICE);
87                         if (dma_mapping_error(&comm->pdev->dev, sinfo->mapping)) {
88                                 dev_kfree_skb_irq(new_skb);
89                                 desc->cmd2 = (rx_pos == comm->rx_desc_num[queue] - 1) ?
90                                              RXD_EOR : 0;
91                                 sinfo->skb = NULL;
92                                 sinfo->mapping = 0;
93                                 desc->addr1 = 0;
94                                 goto spl2sw_rx_poll_alloc_err;
95                         }
96
97                         sinfo->skb = new_skb;
98                         desc->addr1 = sinfo->mapping;
99
100 spl2sw_rx_poll_rec_err:
101                         desc->cmd2 = (rx_pos == comm->rx_desc_num[queue] - 1) ?
102                                      RXD_EOR | comm->rx_desc_buff_size :
103                                      comm->rx_desc_buff_size;
104
105                         wmb();  /* Set RXD_OWN after other fields are effective. */
106                         desc->cmd1 = RXD_OWN;
107
108 spl2sw_rx_poll_alloc_err:
109                         /* Move rx_pos to next position */
110                         rx_pos = ((rx_pos + 1) == comm->rx_desc_num[queue]) ? 0 : rx_pos + 1;
111
112                         budget_left--;
113
114                         /* If there are packets in high-priority queue,
115                          * stop processing low-priority queue.
116                          */
117                         if (queue == 1 && !(h_desc->cmd1 & RXD_OWN))
118                                 break;
119                 }
120
121                 comm->rx_pos[queue] = rx_pos;
122
123                 /* Save pointer to last rx descriptor of high-priority queue. */
124                 if (queue == 0)
125                         h_desc = comm->rx_desc[queue] + rx_pos;
126         }
127
128         spin_lock_irqsave(&comm->int_mask_lock, flags);
129         mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
130         mask &= ~MAC_INT_RX;
131         writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
132         spin_unlock_irqrestore(&comm->int_mask_lock, flags);
133
134         napi_complete(napi);
135         return budget - budget_left;
136 }
137
138 int spl2sw_tx_poll(struct napi_struct *napi, int budget)
139 {
140         struct spl2sw_common *comm = container_of(napi, struct spl2sw_common, tx_napi);
141         struct spl2sw_skb_info *skbinfo;
142         struct net_device_stats *stats;
143         int budget_left = budget;
144         unsigned long flags;
145         u32 tx_done_pos;
146         u32 mask;
147         u32 cmd;
148         int i;
149
150         spin_lock(&comm->tx_lock);
151
152         tx_done_pos = comm->tx_done_pos;
153         while (((tx_done_pos != comm->tx_pos) || (comm->tx_desc_full == 1)) && budget_left) {
154                 cmd = comm->tx_desc[tx_done_pos].cmd1;
155                 if (cmd & TXD_OWN)
156                         break;
157
158                 skbinfo = &comm->tx_temp_skb_info[tx_done_pos];
159                 if (unlikely(!skbinfo->skb))
160                         goto spl2sw_tx_poll_next;
161
162                 i = ffs(FIELD_GET(TXD_VLAN, cmd)) - 1;
163                 if (i < MAX_NETDEV_NUM && comm->ndev[i])
164                         stats = &comm->ndev[i]->stats;
165                 else
166                         goto spl2sw_tx_poll_unmap;
167
168                 if (unlikely(cmd & (TXD_ERR_CODE))) {
169                         stats->tx_errors++;
170                 } else {
171                         stats->tx_packets++;
172                         stats->tx_bytes += skbinfo->len;
173                 }
174
175 spl2sw_tx_poll_unmap:
176                 dma_unmap_single(&comm->pdev->dev, skbinfo->mapping, skbinfo->len,
177                                  DMA_TO_DEVICE);
178                 skbinfo->mapping = 0;
179                 dev_kfree_skb_irq(skbinfo->skb);
180                 skbinfo->skb = NULL;
181
182 spl2sw_tx_poll_next:
183                 /* Move tx_done_pos to next position */
184                 tx_done_pos = ((tx_done_pos + 1) == TX_DESC_NUM) ? 0 : tx_done_pos + 1;
185
186                 if (comm->tx_desc_full == 1)
187                         comm->tx_desc_full = 0;
188
189                 budget_left--;
190         }
191
192         comm->tx_done_pos = tx_done_pos;
193         if (!comm->tx_desc_full)
194                 for (i = 0; i < MAX_NETDEV_NUM; i++)
195                         if (comm->ndev[i])
196                                 if (netif_queue_stopped(comm->ndev[i]))
197                                         netif_wake_queue(comm->ndev[i]);
198
199         spin_unlock(&comm->tx_lock);
200
201         spin_lock_irqsave(&comm->int_mask_lock, flags);
202         mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
203         mask &= ~MAC_INT_TX;
204         writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
205         spin_unlock_irqrestore(&comm->int_mask_lock, flags);
206
207         napi_complete(napi);
208         return budget - budget_left;
209 }
210
211 irqreturn_t spl2sw_ethernet_interrupt(int irq, void *dev_id)
212 {
213         struct spl2sw_common *comm = (struct spl2sw_common *)dev_id;
214         u32 status;
215         u32 mask;
216         int i;
217
218         status = readl(comm->l2sw_reg_base + L2SW_SW_INT_STATUS_0);
219         if (unlikely(!status)) {
220                 dev_dbg(&comm->pdev->dev, "Interrupt status is null!\n");
221                 goto spl2sw_ethernet_int_out;
222         }
223         writel(status, comm->l2sw_reg_base + L2SW_SW_INT_STATUS_0);
224
225         if (status & MAC_INT_RX) {
226                 /* Disable RX interrupts. */
227                 spin_lock(&comm->int_mask_lock);
228                 mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
229                 mask |= MAC_INT_RX;
230                 writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
231                 spin_unlock(&comm->int_mask_lock);
232
233                 if (unlikely(status & MAC_INT_RX_DES_ERR)) {
234                         for (i = 0; i < MAX_NETDEV_NUM; i++)
235                                 if (comm->ndev[i]) {
236                                         comm->ndev[i]->stats.rx_fifo_errors++;
237                                         break;
238                                 }
239                         dev_dbg(&comm->pdev->dev, "Illegal RX Descriptor!\n");
240                 }
241
242                 napi_schedule(&comm->rx_napi);
243         }
244
245         if (status & MAC_INT_TX) {
246                 /* Disable TX interrupts. */
247                 spin_lock(&comm->int_mask_lock);
248                 mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
249                 mask |= MAC_INT_TX;
250                 writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
251                 spin_unlock(&comm->int_mask_lock);
252
253                 if (unlikely(status & MAC_INT_TX_DES_ERR)) {
254                         for (i = 0; i < MAX_NETDEV_NUM; i++)
255                                 if (comm->ndev[i]) {
256                                         comm->ndev[i]->stats.tx_fifo_errors++;
257                                         break;
258                                 }
259                         dev_dbg(&comm->pdev->dev, "Illegal TX Descriptor Error\n");
260
261                         spin_lock(&comm->int_mask_lock);
262                         mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
263                         mask &= ~MAC_INT_TX;
264                         writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
265                         spin_unlock(&comm->int_mask_lock);
266                 } else {
267                         napi_schedule(&comm->tx_napi);
268                 }
269         }
270
271 spl2sw_ethernet_int_out:
272         return IRQ_HANDLED;
273 }
This page took 0.049872 seconds and 4 git commands to generate.