]> Git Repo - linux.git/blob - drivers/net/ethernet/mediatek/mtk_wed.c
Merge tag 'drm-msm-next-2023-08-20' of https://gitlab.freedesktop.org/drm/msm into...
[linux.git] / drivers / net / ethernet / mediatek / mtk_wed.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2021 Felix Fietkau <[email protected]> */
3
4 #include <linux/kernel.h>
5 #include <linux/slab.h>
6 #include <linux/module.h>
7 #include <linux/bitfield.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/skbuff.h>
10 #include <linux/of_platform.h>
11 #include <linux/of_address.h>
12 #include <linux/of_reserved_mem.h>
13 #include <linux/mfd/syscon.h>
14 #include <linux/debugfs.h>
15 #include <linux/soc/mediatek/mtk_wed.h>
16 #include <net/flow_offload.h>
17 #include <net/pkt_cls.h>
18 #include "mtk_eth_soc.h"
19 #include "mtk_wed_regs.h"
20 #include "mtk_wed.h"
21 #include "mtk_ppe.h"
22 #include "mtk_wed_wo.h"
23
24 #define MTK_PCIE_BASE(n)                (0x1a143000 + (n) * 0x2000)
25
26 #define MTK_WED_PKT_SIZE                1900
27 #define MTK_WED_BUF_SIZE                2048
28 #define MTK_WED_BUF_PER_PAGE            (PAGE_SIZE / 2048)
29 #define MTK_WED_RX_RING_SIZE            1536
30
31 #define MTK_WED_TX_RING_SIZE            2048
32 #define MTK_WED_WDMA_RING_SIZE          1024
33 #define MTK_WED_MAX_GROUP_SIZE          0x100
34 #define MTK_WED_VLD_GROUP_SIZE          0x40
35 #define MTK_WED_PER_GROUP_PKT           128
36
37 #define MTK_WED_FBUF_SIZE               128
38 #define MTK_WED_MIOD_CNT                16
39 #define MTK_WED_FB_CMD_CNT              1024
40 #define MTK_WED_RRO_QUE_CNT             8192
41 #define MTK_WED_MIOD_ENTRY_CNT          128
42
43 static struct mtk_wed_hw *hw_list[2];
44 static DEFINE_MUTEX(hw_lock);
45
46 struct mtk_wed_flow_block_priv {
47         struct mtk_wed_hw *hw;
48         struct net_device *dev;
49 };
50
51 static void
52 wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
53 {
54         regmap_update_bits(dev->hw->regs, reg, mask | val, val);
55 }
56
57 static void
58 wed_set(struct mtk_wed_device *dev, u32 reg, u32 mask)
59 {
60         return wed_m32(dev, reg, 0, mask);
61 }
62
63 static void
64 wed_clr(struct mtk_wed_device *dev, u32 reg, u32 mask)
65 {
66         return wed_m32(dev, reg, mask, 0);
67 }
68
69 static void
70 wdma_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
71 {
72         wdma_w32(dev, reg, (wdma_r32(dev, reg) & ~mask) | val);
73 }
74
75 static void
76 wdma_set(struct mtk_wed_device *dev, u32 reg, u32 mask)
77 {
78         wdma_m32(dev, reg, 0, mask);
79 }
80
81 static void
82 wdma_clr(struct mtk_wed_device *dev, u32 reg, u32 mask)
83 {
84         wdma_m32(dev, reg, mask, 0);
85 }
86
87 static u32
88 wifi_r32(struct mtk_wed_device *dev, u32 reg)
89 {
90         return readl(dev->wlan.base + reg);
91 }
92
93 static void
94 wifi_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
95 {
96         writel(val, dev->wlan.base + reg);
97 }
98
99 static u32
100 mtk_wed_read_reset(struct mtk_wed_device *dev)
101 {
102         return wed_r32(dev, MTK_WED_RESET);
103 }
104
105 static u32
106 mtk_wdma_read_reset(struct mtk_wed_device *dev)
107 {
108         return wdma_r32(dev, MTK_WDMA_GLO_CFG);
109 }
110
111 static int
112 mtk_wdma_rx_reset(struct mtk_wed_device *dev)
113 {
114         u32 status, mask = MTK_WDMA_GLO_CFG_RX_DMA_BUSY;
115         int i, ret;
116
117         wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_DMA_EN);
118         ret = readx_poll_timeout(mtk_wdma_read_reset, dev, status,
119                                  !(status & mask), 0, 10000);
120         if (ret)
121                 dev_err(dev->hw->dev, "rx reset failed\n");
122
123         wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
124         wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
125
126         for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) {
127                 if (dev->rx_wdma[i].desc)
128                         continue;
129
130                 wdma_w32(dev,
131                          MTK_WDMA_RING_RX(i) + MTK_WED_RING_OFS_CPU_IDX, 0);
132         }
133
134         return ret;
135 }
136
137 static void
138 mtk_wdma_tx_reset(struct mtk_wed_device *dev)
139 {
140         u32 status, mask = MTK_WDMA_GLO_CFG_TX_DMA_BUSY;
141         int i;
142
143         wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN);
144         if (readx_poll_timeout(mtk_wdma_read_reset, dev, status,
145                                !(status & mask), 0, 10000))
146                 dev_err(dev->hw->dev, "tx reset failed\n");
147
148         wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX);
149         wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
150
151         for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
152                 wdma_w32(dev,
153                          MTK_WDMA_RING_TX(i) + MTK_WED_RING_OFS_CPU_IDX, 0);
154 }
155
156 static void
157 mtk_wed_reset(struct mtk_wed_device *dev, u32 mask)
158 {
159         u32 status;
160
161         wed_w32(dev, MTK_WED_RESET, mask);
162         if (readx_poll_timeout(mtk_wed_read_reset, dev, status,
163                                !(status & mask), 0, 1000))
164                 WARN_ON_ONCE(1);
165 }
166
167 static u32
168 mtk_wed_wo_read_status(struct mtk_wed_device *dev)
169 {
170         return wed_r32(dev, MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_WO_STATUS);
171 }
172
173 static void
174 mtk_wed_wo_reset(struct mtk_wed_device *dev)
175 {
176         struct mtk_wed_wo *wo = dev->hw->wed_wo;
177         u8 state = MTK_WED_WO_STATE_DISABLE;
178         void __iomem *reg;
179         u32 val;
180
181         mtk_wdma_tx_reset(dev);
182         mtk_wed_reset(dev, MTK_WED_RESET_WED);
183
184         if (mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO,
185                                  MTK_WED_WO_CMD_CHANGE_STATE, &state,
186                                  sizeof(state), false))
187                 return;
188
189         if (readx_poll_timeout(mtk_wed_wo_read_status, dev, val,
190                                val == MTK_WED_WOIF_DISABLE_DONE,
191                                100, MTK_WOCPU_TIMEOUT))
192                 dev_err(dev->hw->dev, "failed to disable wed-wo\n");
193
194         reg = ioremap(MTK_WED_WO_CPU_MCUSYS_RESET_ADDR, 4);
195
196         val = readl(reg);
197         switch (dev->hw->index) {
198         case 0:
199                 val |= MTK_WED_WO_CPU_WO0_MCUSYS_RESET_MASK;
200                 writel(val, reg);
201                 val &= ~MTK_WED_WO_CPU_WO0_MCUSYS_RESET_MASK;
202                 writel(val, reg);
203                 break;
204         case 1:
205                 val |= MTK_WED_WO_CPU_WO1_MCUSYS_RESET_MASK;
206                 writel(val, reg);
207                 val &= ~MTK_WED_WO_CPU_WO1_MCUSYS_RESET_MASK;
208                 writel(val, reg);
209                 break;
210         default:
211                 break;
212         }
213         iounmap(reg);
214 }
215
216 void mtk_wed_fe_reset(void)
217 {
218         int i;
219
220         mutex_lock(&hw_lock);
221
222         for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
223                 struct mtk_wed_hw *hw = hw_list[i];
224                 struct mtk_wed_device *dev = hw->wed_dev;
225                 int err;
226
227                 if (!dev || !dev->wlan.reset)
228                         continue;
229
230                 /* reset callback blocks until WLAN reset is completed */
231                 err = dev->wlan.reset(dev);
232                 if (err)
233                         dev_err(dev->dev, "wlan reset failed: %d\n", err);
234         }
235
236         mutex_unlock(&hw_lock);
237 }
238
239 void mtk_wed_fe_reset_complete(void)
240 {
241         int i;
242
243         mutex_lock(&hw_lock);
244
245         for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
246                 struct mtk_wed_hw *hw = hw_list[i];
247                 struct mtk_wed_device *dev = hw->wed_dev;
248
249                 if (!dev || !dev->wlan.reset_complete)
250                         continue;
251
252                 dev->wlan.reset_complete(dev);
253         }
254
255         mutex_unlock(&hw_lock);
256 }
257
258 static struct mtk_wed_hw *
259 mtk_wed_assign(struct mtk_wed_device *dev)
260 {
261         struct mtk_wed_hw *hw;
262         int i;
263
264         if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
265                 hw = hw_list[pci_domain_nr(dev->wlan.pci_dev->bus)];
266                 if (!hw)
267                         return NULL;
268
269                 if (!hw->wed_dev)
270                         goto out;
271
272                 if (hw->version == 1)
273                         return NULL;
274
275                 /* MT7986 WED devices do not have any pcie slot restrictions */
276         }
277         /* MT7986 PCIE or AXI */
278         for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
279                 hw = hw_list[i];
280                 if (hw && !hw->wed_dev)
281                         goto out;
282         }
283
284         return NULL;
285
286 out:
287         hw->wed_dev = dev;
288         return hw;
289 }
290
291 static int
292 mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
293 {
294         struct mtk_wdma_desc *desc;
295         dma_addr_t desc_phys;
296         void **page_list;
297         int token = dev->wlan.token_start;
298         int ring_size;
299         int n_pages;
300         int i, page_idx;
301
302         ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
303         n_pages = ring_size / MTK_WED_BUF_PER_PAGE;
304
305         page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL);
306         if (!page_list)
307                 return -ENOMEM;
308
309         dev->tx_buf_ring.size = ring_size;
310         dev->tx_buf_ring.pages = page_list;
311
312         desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc),
313                                   &desc_phys, GFP_KERNEL);
314         if (!desc)
315                 return -ENOMEM;
316
317         dev->tx_buf_ring.desc = desc;
318         dev->tx_buf_ring.desc_phys = desc_phys;
319
320         for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
321                 dma_addr_t page_phys, buf_phys;
322                 struct page *page;
323                 void *buf;
324                 int s;
325
326                 page = __dev_alloc_pages(GFP_KERNEL, 0);
327                 if (!page)
328                         return -ENOMEM;
329
330                 page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE,
331                                          DMA_BIDIRECTIONAL);
332                 if (dma_mapping_error(dev->hw->dev, page_phys)) {
333                         __free_page(page);
334                         return -ENOMEM;
335                 }
336
337                 page_list[page_idx++] = page;
338                 dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
339                                         DMA_BIDIRECTIONAL);
340
341                 buf = page_to_virt(page);
342                 buf_phys = page_phys;
343
344                 for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) {
345                         u32 txd_size;
346                         u32 ctrl;
347
348                         txd_size = dev->wlan.init_buf(buf, buf_phys, token++);
349
350                         desc->buf0 = cpu_to_le32(buf_phys);
351                         desc->buf1 = cpu_to_le32(buf_phys + txd_size);
352
353                         if (dev->hw->version == 1)
354                                 ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
355                                        FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
356                                                   MTK_WED_BUF_SIZE - txd_size) |
357                                        MTK_WDMA_DESC_CTRL_LAST_SEG1;
358                         else
359                                 ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
360                                        FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1_V2,
361                                                   MTK_WED_BUF_SIZE - txd_size) |
362                                        MTK_WDMA_DESC_CTRL_LAST_SEG0;
363                         desc->ctrl = cpu_to_le32(ctrl);
364                         desc->info = 0;
365                         desc++;
366
367                         buf += MTK_WED_BUF_SIZE;
368                         buf_phys += MTK_WED_BUF_SIZE;
369                 }
370
371                 dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE,
372                                            DMA_BIDIRECTIONAL);
373         }
374
375         return 0;
376 }
377
378 static void
379 mtk_wed_free_tx_buffer(struct mtk_wed_device *dev)
380 {
381         struct mtk_wdma_desc *desc = dev->tx_buf_ring.desc;
382         void **page_list = dev->tx_buf_ring.pages;
383         int page_idx;
384         int i;
385
386         if (!page_list)
387                 return;
388
389         if (!desc)
390                 goto free_pagelist;
391
392         for (i = 0, page_idx = 0; i < dev->tx_buf_ring.size;
393              i += MTK_WED_BUF_PER_PAGE) {
394                 void *page = page_list[page_idx++];
395                 dma_addr_t buf_addr;
396
397                 if (!page)
398                         break;
399
400                 buf_addr = le32_to_cpu(desc[i].buf0);
401                 dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE,
402                                DMA_BIDIRECTIONAL);
403                 __free_page(page);
404         }
405
406         dma_free_coherent(dev->hw->dev, dev->tx_buf_ring.size * sizeof(*desc),
407                           desc, dev->tx_buf_ring.desc_phys);
408
409 free_pagelist:
410         kfree(page_list);
411 }
412
413 static int
414 mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev)
415 {
416         struct mtk_rxbm_desc *desc;
417         dma_addr_t desc_phys;
418
419         dev->rx_buf_ring.size = dev->wlan.rx_nbuf;
420         desc = dma_alloc_coherent(dev->hw->dev,
421                                   dev->wlan.rx_nbuf * sizeof(*desc),
422                                   &desc_phys, GFP_KERNEL);
423         if (!desc)
424                 return -ENOMEM;
425
426         dev->rx_buf_ring.desc = desc;
427         dev->rx_buf_ring.desc_phys = desc_phys;
428         dev->wlan.init_rx_buf(dev, dev->wlan.rx_npkt);
429
430         return 0;
431 }
432
433 static void
434 mtk_wed_free_rx_buffer(struct mtk_wed_device *dev)
435 {
436         struct mtk_rxbm_desc *desc = dev->rx_buf_ring.desc;
437
438         if (!desc)
439                 return;
440
441         dev->wlan.release_rx_buf(dev);
442         dma_free_coherent(dev->hw->dev, dev->rx_buf_ring.size * sizeof(*desc),
443                           desc, dev->rx_buf_ring.desc_phys);
444 }
445
446 static void
447 mtk_wed_rx_buffer_hw_init(struct mtk_wed_device *dev)
448 {
449         wed_w32(dev, MTK_WED_RX_BM_RX_DMAD,
450                 FIELD_PREP(MTK_WED_RX_BM_RX_DMAD_SDL0, dev->wlan.rx_size));
451         wed_w32(dev, MTK_WED_RX_BM_BASE, dev->rx_buf_ring.desc_phys);
452         wed_w32(dev, MTK_WED_RX_BM_INIT_PTR, MTK_WED_RX_BM_INIT_SW_TAIL |
453                 FIELD_PREP(MTK_WED_RX_BM_SW_TAIL, dev->wlan.rx_npkt));
454         wed_w32(dev, MTK_WED_RX_BM_DYN_ALLOC_TH,
455                 FIELD_PREP(MTK_WED_RX_BM_DYN_ALLOC_TH_H, 0xffff));
456         wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN);
457 }
458
459 static void
460 mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring)
461 {
462         if (!ring->desc)
463                 return;
464
465         dma_free_coherent(dev->hw->dev, ring->size * ring->desc_size,
466                           ring->desc, ring->desc_phys);
467 }
468
469 static void
470 mtk_wed_free_rx_rings(struct mtk_wed_device *dev)
471 {
472         mtk_wed_free_rx_buffer(dev);
473         mtk_wed_free_ring(dev, &dev->rro.ring);
474 }
475
476 static void
477 mtk_wed_free_tx_rings(struct mtk_wed_device *dev)
478 {
479         int i;
480
481         for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++)
482                 mtk_wed_free_ring(dev, &dev->tx_ring[i]);
483         for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++)
484                 mtk_wed_free_ring(dev, &dev->rx_wdma[i]);
485 }
486
487 static void
488 mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en)
489 {
490         u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
491
492         if (dev->hw->version == 1)
493                 mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR;
494         else
495                 mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH |
496                         MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH |
497                         MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
498                         MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR;
499
500         if (!dev->hw->num_flows)
501                 mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
502
503         wed_w32(dev, MTK_WED_EXT_INT_MASK, en ? mask : 0);
504         wed_r32(dev, MTK_WED_EXT_INT_MASK);
505 }
506
507 static void
508 mtk_wed_set_512_support(struct mtk_wed_device *dev, bool enable)
509 {
510         if (enable) {
511                 wed_w32(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
512                 wed_w32(dev, MTK_WED_TXP_DW1,
513                         FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0103));
514         } else {
515                 wed_w32(dev, MTK_WED_TXP_DW1,
516                         FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0100));
517                 wed_clr(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
518         }
519 }
520
521 #define MTK_WFMDA_RX_DMA_EN     BIT(2)
522 static void
523 mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx)
524 {
525         u32 val;
526         int i;
527
528         if (!(dev->rx_ring[idx].flags & MTK_WED_RING_CONFIGURED))
529                 return; /* queue is not configured by mt76 */
530
531         for (i = 0; i < 3; i++) {
532                 u32 cur_idx;
533
534                 cur_idx = wed_r32(dev,
535                                   MTK_WED_WPDMA_RING_RX_DATA(idx) +
536                                   MTK_WED_RING_OFS_CPU_IDX);
537                 if (cur_idx == MTK_WED_RX_RING_SIZE - 1)
538                         break;
539
540                 usleep_range(100000, 200000);
541         }
542
543         if (i == 3) {
544                 dev_err(dev->hw->dev, "rx dma enable failed\n");
545                 return;
546         }
547
548         val = wifi_r32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base) |
549               MTK_WFMDA_RX_DMA_EN;
550         wifi_w32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base, val);
551 }
552
553 static void
554 mtk_wed_dma_disable(struct mtk_wed_device *dev)
555 {
556         wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
557                 MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
558                 MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
559
560         wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
561
562         wed_clr(dev, MTK_WED_GLO_CFG,
563                 MTK_WED_GLO_CFG_TX_DMA_EN |
564                 MTK_WED_GLO_CFG_RX_DMA_EN);
565
566         wdma_clr(dev, MTK_WDMA_GLO_CFG,
567                  MTK_WDMA_GLO_CFG_TX_DMA_EN |
568                  MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
569                  MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
570
571         if (dev->hw->version == 1) {
572                 regmap_write(dev->hw->mirror, dev->hw->index * 4, 0);
573                 wdma_clr(dev, MTK_WDMA_GLO_CFG,
574                          MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
575         } else {
576                 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
577                         MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
578                         MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
579
580                 wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
581                         MTK_WED_WPDMA_RX_D_RX_DRV_EN);
582                 wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
583                         MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
584         }
585
586         mtk_wed_set_512_support(dev, false);
587 }
588
589 static void
590 mtk_wed_stop(struct mtk_wed_device *dev)
591 {
592         mtk_wed_set_ext_int(dev, false);
593
594         wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0);
595         wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0);
596         wdma_w32(dev, MTK_WDMA_INT_MASK, 0);
597         wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
598         wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
599
600         if (dev->hw->version == 1)
601                 return;
602
603         wed_w32(dev, MTK_WED_EXT_INT_MASK1, 0);
604         wed_w32(dev, MTK_WED_EXT_INT_MASK2, 0);
605 }
606
607 static void
608 mtk_wed_deinit(struct mtk_wed_device *dev)
609 {
610         mtk_wed_stop(dev);
611         mtk_wed_dma_disable(dev);
612
613         wed_clr(dev, MTK_WED_CTRL,
614                 MTK_WED_CTRL_WDMA_INT_AGENT_EN |
615                 MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
616                 MTK_WED_CTRL_WED_TX_BM_EN |
617                 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
618
619         if (dev->hw->version == 1)
620                 return;
621
622         wed_clr(dev, MTK_WED_CTRL,
623                 MTK_WED_CTRL_RX_ROUTE_QM_EN |
624                 MTK_WED_CTRL_WED_RX_BM_EN |
625                 MTK_WED_CTRL_RX_RRO_QM_EN);
626 }
627
628 static void
629 __mtk_wed_detach(struct mtk_wed_device *dev)
630 {
631         struct mtk_wed_hw *hw = dev->hw;
632
633         mtk_wed_deinit(dev);
634
635         mtk_wdma_rx_reset(dev);
636         mtk_wed_reset(dev, MTK_WED_RESET_WED);
637         mtk_wed_free_tx_buffer(dev);
638         mtk_wed_free_tx_rings(dev);
639
640         if (mtk_wed_get_rx_capa(dev)) {
641                 if (hw->wed_wo)
642                         mtk_wed_wo_reset(dev);
643                 mtk_wed_free_rx_rings(dev);
644                 if (hw->wed_wo)
645                         mtk_wed_wo_deinit(hw);
646         }
647
648         if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
649                 struct device_node *wlan_node;
650
651                 wlan_node = dev->wlan.pci_dev->dev.of_node;
652                 if (of_dma_is_coherent(wlan_node) && hw->hifsys)
653                         regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
654                                            BIT(hw->index), BIT(hw->index));
655         }
656
657         if ((!hw_list[!hw->index] || !hw_list[!hw->index]->wed_dev) &&
658             hw->eth->dma_dev != hw->eth->dev)
659                 mtk_eth_set_dma_device(hw->eth, hw->eth->dev);
660
661         memset(dev, 0, sizeof(*dev));
662         module_put(THIS_MODULE);
663
664         hw->wed_dev = NULL;
665 }
666
667 static void
668 mtk_wed_detach(struct mtk_wed_device *dev)
669 {
670         mutex_lock(&hw_lock);
671         __mtk_wed_detach(dev);
672         mutex_unlock(&hw_lock);
673 }
674
675 #define PCIE_BASE_ADDR0         0x11280000
676 static void
677 mtk_wed_bus_init(struct mtk_wed_device *dev)
678 {
679         switch (dev->wlan.bus_type) {
680         case MTK_WED_BUS_PCIE: {
681                 struct device_node *np = dev->hw->eth->dev->of_node;
682                 struct regmap *regs;
683
684                 regs = syscon_regmap_lookup_by_phandle(np,
685                                                        "mediatek,wed-pcie");
686                 if (IS_ERR(regs))
687                         break;
688
689                 regmap_update_bits(regs, 0, BIT(0), BIT(0));
690
691                 wed_w32(dev, MTK_WED_PCIE_INT_CTRL,
692                         FIELD_PREP(MTK_WED_PCIE_INT_CTRL_POLL_EN, 2));
693
694                 /* pcie interrupt control: pola/source selection */
695                 wed_set(dev, MTK_WED_PCIE_INT_CTRL,
696                         MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA |
697                         FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, 1));
698                 wed_r32(dev, MTK_WED_PCIE_INT_CTRL);
699
700                 wed_w32(dev, MTK_WED_PCIE_CFG_INTM, PCIE_BASE_ADDR0 | 0x180);
701                 wed_w32(dev, MTK_WED_PCIE_CFG_BASE, PCIE_BASE_ADDR0 | 0x184);
702
703                 /* pcie interrupt status trigger register */
704                 wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24));
705                 wed_r32(dev, MTK_WED_PCIE_INT_TRIGGER);
706
707                 /* pola setting */
708                 wed_set(dev, MTK_WED_PCIE_INT_CTRL,
709                         MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA);
710                 break;
711         }
712         case MTK_WED_BUS_AXI:
713                 wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
714                         MTK_WED_WPDMA_INT_CTRL_SIG_SRC |
715                         FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_SRC_SEL, 0));
716                 break;
717         default:
718                 break;
719         }
720 }
721
722 static void
723 mtk_wed_set_wpdma(struct mtk_wed_device *dev)
724 {
725         if (dev->hw->version == 1) {
726                 wed_w32(dev, MTK_WED_WPDMA_CFG_BASE,  dev->wlan.wpdma_phys);
727         } else {
728                 mtk_wed_bus_init(dev);
729
730                 wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int);
731                 wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask);
732                 wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx);
733                 wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree);
734                 wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo);
735                 wed_w32(dev, MTK_WED_WPDMA_RX_RING, dev->wlan.wpdma_rx);
736         }
737 }
738
739 static void
740 mtk_wed_hw_init_early(struct mtk_wed_device *dev)
741 {
742         u32 mask, set;
743
744         mtk_wed_deinit(dev);
745         mtk_wed_reset(dev, MTK_WED_RESET_WED);
746         mtk_wed_set_wpdma(dev);
747
748         mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE |
749                MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
750                MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE;
751         set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) |
752               MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP |
753               MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
754         wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set);
755
756         if (dev->hw->version == 1) {
757                 u32 offset = dev->hw->index ? 0x04000400 : 0;
758
759                 wdma_set(dev, MTK_WDMA_GLO_CFG,
760                          MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
761                          MTK_WDMA_GLO_CFG_RX_INFO2_PRERES |
762                          MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
763
764                 wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset);
765                 wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset);
766                 wed_w32(dev, MTK_WED_PCIE_CFG_BASE,
767                         MTK_PCIE_BASE(dev->hw->index));
768         } else {
769                 wed_w32(dev, MTK_WED_WDMA_CFG_BASE, dev->hw->wdma_phy);
770                 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_ETH_DMAD_FMT);
771                 wed_w32(dev, MTK_WED_WDMA_OFFSET0,
772                         FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_INTS,
773                                    MTK_WDMA_INT_STATUS) |
774                         FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_CFG,
775                                    MTK_WDMA_GLO_CFG));
776
777                 wed_w32(dev, MTK_WED_WDMA_OFFSET1,
778                         FIELD_PREP(MTK_WED_WDMA_OFST1_TX_CTRL,
779                                    MTK_WDMA_RING_TX(0)) |
780                         FIELD_PREP(MTK_WED_WDMA_OFST1_RX_CTRL,
781                                    MTK_WDMA_RING_RX(0)));
782         }
783 }
784
785 static int
786 mtk_wed_rro_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
787                        int size)
788 {
789         ring->desc = dma_alloc_coherent(dev->hw->dev,
790                                         size * sizeof(*ring->desc),
791                                         &ring->desc_phys, GFP_KERNEL);
792         if (!ring->desc)
793                 return -ENOMEM;
794
795         ring->desc_size = sizeof(*ring->desc);
796         ring->size = size;
797
798         return 0;
799 }
800
801 #define MTK_WED_MIOD_COUNT      (MTK_WED_MIOD_ENTRY_CNT * MTK_WED_MIOD_CNT)
802 static int
803 mtk_wed_rro_alloc(struct mtk_wed_device *dev)
804 {
805         struct reserved_mem *rmem;
806         struct device_node *np;
807         int index;
808
809         index = of_property_match_string(dev->hw->node, "memory-region-names",
810                                          "wo-dlm");
811         if (index < 0)
812                 return index;
813
814         np = of_parse_phandle(dev->hw->node, "memory-region", index);
815         if (!np)
816                 return -ENODEV;
817
818         rmem = of_reserved_mem_lookup(np);
819         of_node_put(np);
820
821         if (!rmem)
822                 return -ENODEV;
823
824         dev->rro.miod_phys = rmem->base;
825         dev->rro.fdbk_phys = MTK_WED_MIOD_COUNT + dev->rro.miod_phys;
826
827         return mtk_wed_rro_ring_alloc(dev, &dev->rro.ring,
828                                       MTK_WED_RRO_QUE_CNT);
829 }
830
831 static int
832 mtk_wed_rro_cfg(struct mtk_wed_device *dev)
833 {
834         struct mtk_wed_wo *wo = dev->hw->wed_wo;
835         struct {
836                 struct {
837                         __le32 base;
838                         __le32 cnt;
839                         __le32 unit;
840                 } ring[2];
841                 __le32 wed;
842                 u8 version;
843         } req = {
844                 .ring[0] = {
845                         .base = cpu_to_le32(MTK_WED_WOCPU_VIEW_MIOD_BASE),
846                         .cnt = cpu_to_le32(MTK_WED_MIOD_CNT),
847                         .unit = cpu_to_le32(MTK_WED_MIOD_ENTRY_CNT),
848                 },
849                 .ring[1] = {
850                         .base = cpu_to_le32(MTK_WED_WOCPU_VIEW_MIOD_BASE +
851                                             MTK_WED_MIOD_COUNT),
852                         .cnt = cpu_to_le32(MTK_WED_FB_CMD_CNT),
853                         .unit = cpu_to_le32(4),
854                 },
855         };
856
857         return mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO,
858                                     MTK_WED_WO_CMD_WED_CFG,
859                                     &req, sizeof(req), true);
860 }
861
862 static void
863 mtk_wed_rro_hw_init(struct mtk_wed_device *dev)
864 {
865         wed_w32(dev, MTK_WED_RROQM_MIOD_CFG,
866                 FIELD_PREP(MTK_WED_RROQM_MIOD_MID_DW, 0x70 >> 2) |
867                 FIELD_PREP(MTK_WED_RROQM_MIOD_MOD_DW, 0x10 >> 2) |
868                 FIELD_PREP(MTK_WED_RROQM_MIOD_ENTRY_DW,
869                            MTK_WED_MIOD_ENTRY_CNT >> 2));
870
871         wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL0, dev->rro.miod_phys);
872         wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL1,
873                 FIELD_PREP(MTK_WED_RROQM_MIOD_CNT, MTK_WED_MIOD_CNT));
874         wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL0, dev->rro.fdbk_phys);
875         wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL1,
876                 FIELD_PREP(MTK_WED_RROQM_FDBK_CNT, MTK_WED_FB_CMD_CNT));
877         wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL2, 0);
878         wed_w32(dev, MTK_WED_RROQ_BASE_L, dev->rro.ring.desc_phys);
879
880         wed_set(dev, MTK_WED_RROQM_RST_IDX,
881                 MTK_WED_RROQM_RST_IDX_MIOD |
882                 MTK_WED_RROQM_RST_IDX_FDBK);
883
884         wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0);
885         wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL2, MTK_WED_MIOD_CNT - 1);
886         wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN);
887 }
888
889 static void
890 mtk_wed_route_qm_hw_init(struct mtk_wed_device *dev)
891 {
892         wed_w32(dev, MTK_WED_RESET, MTK_WED_RESET_RX_ROUTE_QM);
893
894         for (;;) {
895                 usleep_range(100, 200);
896                 if (!(wed_r32(dev, MTK_WED_RESET) & MTK_WED_RESET_RX_ROUTE_QM))
897                         break;
898         }
899
900         /* configure RX_ROUTE_QM */
901         wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
902         wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT);
903         wed_set(dev, MTK_WED_RTQM_GLO_CFG,
904                 FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, 0x3 + dev->hw->index));
905         wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
906         /* enable RX_ROUTE_QM */
907         wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN);
908 }
909
910 static void
911 mtk_wed_hw_init(struct mtk_wed_device *dev)
912 {
913         if (dev->init_done)
914                 return;
915
916         dev->init_done = true;
917         mtk_wed_set_ext_int(dev, false);
918         wed_w32(dev, MTK_WED_TX_BM_CTRL,
919                 MTK_WED_TX_BM_CTRL_PAUSE |
920                 FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM,
921                            dev->tx_buf_ring.size / 128) |
922                 FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM,
923                            MTK_WED_TX_RING_SIZE / 256));
924
925         wed_w32(dev, MTK_WED_TX_BM_BASE, dev->tx_buf_ring.desc_phys);
926
927         wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
928
929         if (dev->hw->version == 1) {
930                 wed_w32(dev, MTK_WED_TX_BM_TKID,
931                         FIELD_PREP(MTK_WED_TX_BM_TKID_START,
932                                    dev->wlan.token_start) |
933                         FIELD_PREP(MTK_WED_TX_BM_TKID_END,
934                                    dev->wlan.token_start +
935                                    dev->wlan.nbuf - 1));
936                 wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
937                         FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) |
938                         MTK_WED_TX_BM_DYN_THR_HI);
939         } else {
940                 wed_w32(dev, MTK_WED_TX_BM_TKID_V2,
941                         FIELD_PREP(MTK_WED_TX_BM_TKID_START,
942                                    dev->wlan.token_start) |
943                         FIELD_PREP(MTK_WED_TX_BM_TKID_END,
944                                    dev->wlan.token_start +
945                                    dev->wlan.nbuf - 1));
946                 wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
947                         FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO_V2, 0) |
948                         MTK_WED_TX_BM_DYN_THR_HI_V2);
949                 wed_w32(dev, MTK_WED_TX_TKID_CTRL,
950                         MTK_WED_TX_TKID_CTRL_PAUSE |
951                         FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM,
952                                    dev->tx_buf_ring.size / 128) |
953                         FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM,
954                                    dev->tx_buf_ring.size / 128));
955                 wed_w32(dev, MTK_WED_TX_TKID_DYN_THR,
956                         FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) |
957                         MTK_WED_TX_TKID_DYN_THR_HI);
958         }
959
960         mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
961
962         if (dev->hw->version == 1) {
963                 wed_set(dev, MTK_WED_CTRL,
964                         MTK_WED_CTRL_WED_TX_BM_EN |
965                         MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
966         } else {
967                 wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE);
968                 /* rx hw init */
969                 wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
970                         MTK_WED_WPDMA_RX_D_RST_CRX_IDX |
971                         MTK_WED_WPDMA_RX_D_RST_DRV_IDX);
972                 wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0);
973
974                 mtk_wed_rx_buffer_hw_init(dev);
975                 mtk_wed_rro_hw_init(dev);
976                 mtk_wed_route_qm_hw_init(dev);
977         }
978
979         wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE);
980 }
981
982 static void
983 mtk_wed_ring_reset(struct mtk_wed_ring *ring, int size, bool tx)
984 {
985         void *head = (void *)ring->desc;
986         int i;
987
988         for (i = 0; i < size; i++) {
989                 struct mtk_wdma_desc *desc;
990
991                 desc = (struct mtk_wdma_desc *)(head + i * ring->desc_size);
992                 desc->buf0 = 0;
993                 if (tx)
994                         desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
995                 else
996                         desc->ctrl = cpu_to_le32(MTK_WFDMA_DESC_CTRL_TO_HOST);
997                 desc->buf1 = 0;
998                 desc->info = 0;
999         }
1000 }
1001
1002 static u32
1003 mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
1004 {
1005         return !!(wed_r32(dev, reg) & mask);
1006 }
1007
1008 static int
1009 mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
1010 {
1011         int sleep = 15000;
1012         int timeout = 100 * sleep;
1013         u32 val;
1014
1015         return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
1016                                  timeout, false, dev, reg, mask);
1017 }
1018
1019 static int
1020 mtk_wed_rx_reset(struct mtk_wed_device *dev)
1021 {
1022         struct mtk_wed_wo *wo = dev->hw->wed_wo;
1023         u8 val = MTK_WED_WO_STATE_SER_RESET;
1024         int i, ret;
1025
1026         ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO,
1027                                    MTK_WED_WO_CMD_CHANGE_STATE, &val,
1028                                    sizeof(val), true);
1029         if (ret)
1030                 return ret;
1031
1032         wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RX_DRV_EN);
1033         ret = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
1034                                 MTK_WED_WPDMA_RX_D_RX_DRV_BUSY);
1035         if (ret) {
1036                 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
1037                 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_D_DRV);
1038         } else {
1039                 wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
1040                         MTK_WED_WPDMA_RX_D_RST_CRX_IDX |
1041                         MTK_WED_WPDMA_RX_D_RST_DRV_IDX);
1042
1043                 wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
1044                         MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE |
1045                         MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE);
1046                 wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
1047                         MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE |
1048                         MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE);
1049
1050                 wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0);
1051         }
1052
1053         /* reset rro qm */
1054         wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN);
1055         ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL,
1056                                 MTK_WED_CTRL_RX_RRO_QM_BUSY);
1057         if (ret) {
1058                 mtk_wed_reset(dev, MTK_WED_RESET_RX_RRO_QM);
1059         } else {
1060                 wed_set(dev, MTK_WED_RROQM_RST_IDX,
1061                         MTK_WED_RROQM_RST_IDX_MIOD |
1062                         MTK_WED_RROQM_RST_IDX_FDBK);
1063                 wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0);
1064         }
1065
1066         /* reset route qm */
1067         wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN);
1068         ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL,
1069                                 MTK_WED_CTRL_RX_ROUTE_QM_BUSY);
1070         if (ret)
1071                 mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM);
1072         else
1073                 wed_set(dev, MTK_WED_RTQM_GLO_CFG,
1074                         MTK_WED_RTQM_Q_RST);
1075
1076         /* reset tx wdma */
1077         mtk_wdma_tx_reset(dev);
1078
1079         /* reset tx wdma drv */
1080         wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_TX_DRV_EN);
1081         mtk_wed_poll_busy(dev, MTK_WED_CTRL,
1082                           MTK_WED_CTRL_WDMA_INT_AGENT_BUSY);
1083         mtk_wed_reset(dev, MTK_WED_RESET_WDMA_TX_DRV);
1084
1085         /* reset wed rx dma */
1086         ret = mtk_wed_poll_busy(dev, MTK_WED_GLO_CFG,
1087                                 MTK_WED_GLO_CFG_RX_DMA_BUSY);
1088         wed_clr(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_RX_DMA_EN);
1089         if (ret) {
1090                 mtk_wed_reset(dev, MTK_WED_RESET_WED_RX_DMA);
1091         } else {
1092                 struct mtk_eth *eth = dev->hw->eth;
1093
1094                 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
1095                         wed_set(dev, MTK_WED_RESET_IDX,
1096                                 MTK_WED_RESET_IDX_RX_V2);
1097                 else
1098                         wed_set(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_IDX_RX);
1099                 wed_w32(dev, MTK_WED_RESET_IDX, 0);
1100         }
1101
1102         /* reset rx bm */
1103         wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN);
1104         mtk_wed_poll_busy(dev, MTK_WED_CTRL,
1105                           MTK_WED_CTRL_WED_RX_BM_BUSY);
1106         mtk_wed_reset(dev, MTK_WED_RESET_RX_BM);
1107
1108         /* wo change to enable state */
1109         val = MTK_WED_WO_STATE_ENABLE;
1110         ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO,
1111                                    MTK_WED_WO_CMD_CHANGE_STATE, &val,
1112                                    sizeof(val), true);
1113         if (ret)
1114                 return ret;
1115
1116         /* wed_rx_ring_reset */
1117         for (i = 0; i < ARRAY_SIZE(dev->rx_ring); i++) {
1118                 if (!dev->rx_ring[i].desc)
1119                         continue;
1120
1121                 mtk_wed_ring_reset(&dev->rx_ring[i], MTK_WED_RX_RING_SIZE,
1122                                    false);
1123         }
1124         mtk_wed_free_rx_buffer(dev);
1125
1126         return 0;
1127 }
1128
1129 static void
1130 mtk_wed_reset_dma(struct mtk_wed_device *dev)
1131 {
1132         bool busy = false;
1133         u32 val;
1134         int i;
1135
1136         for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) {
1137                 if (!dev->tx_ring[i].desc)
1138                         continue;
1139
1140                 mtk_wed_ring_reset(&dev->tx_ring[i], MTK_WED_TX_RING_SIZE,
1141                                    true);
1142         }
1143
1144         /* 1. reset WED tx DMA */
1145         wed_clr(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_TX_DMA_EN);
1146         busy = mtk_wed_poll_busy(dev, MTK_WED_GLO_CFG,
1147                                  MTK_WED_GLO_CFG_TX_DMA_BUSY);
1148         if (busy) {
1149                 mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA);
1150         } else {
1151                 wed_w32(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_IDX_TX);
1152                 wed_w32(dev, MTK_WED_RESET_IDX, 0);
1153         }
1154
1155         /* 2. reset WDMA rx DMA */
1156         busy = !!mtk_wdma_rx_reset(dev);
1157         wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
1158         if (!busy)
1159                 busy = mtk_wed_poll_busy(dev, MTK_WED_WDMA_GLO_CFG,
1160                                          MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY);
1161
1162         if (busy) {
1163                 mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT);
1164                 mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV);
1165         } else {
1166                 wed_w32(dev, MTK_WED_WDMA_RESET_IDX,
1167                         MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV);
1168                 wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 0);
1169
1170                 wed_set(dev, MTK_WED_WDMA_GLO_CFG,
1171                         MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
1172
1173                 wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
1174                         MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
1175         }
1176
1177         /* 3. reset WED WPDMA tx */
1178         wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
1179
1180         for (i = 0; i < 100; i++) {
1181                 val = wed_r32(dev, MTK_WED_TX_BM_INTF);
1182                 if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40)
1183                         break;
1184         }
1185
1186         mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT);
1187         wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_BM_EN);
1188         mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
1189
1190         /* 4. reset WED WPDMA tx */
1191         busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG,
1192                                  MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY);
1193         wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
1194                 MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
1195                 MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
1196         if (!busy)
1197                 busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG,
1198                                          MTK_WED_WPDMA_GLO_CFG_RX_DRV_BUSY);
1199
1200         if (busy) {
1201                 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
1202                 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV);
1203                 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV);
1204         } else {
1205                 wed_w32(dev, MTK_WED_WPDMA_RESET_IDX,
1206                         MTK_WED_WPDMA_RESET_IDX_TX |
1207                         MTK_WED_WPDMA_RESET_IDX_RX);
1208                 wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0);
1209         }
1210
1211         dev->init_done = false;
1212         if (dev->hw->version == 1)
1213                 return;
1214
1215         if (!busy) {
1216                 wed_w32(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_WPDMA_IDX_RX);
1217                 wed_w32(dev, MTK_WED_RESET_IDX, 0);
1218         }
1219
1220         mtk_wed_rx_reset(dev);
1221 }
1222
1223 static int
1224 mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
1225                    int size, u32 desc_size, bool tx)
1226 {
1227         ring->desc = dma_alloc_coherent(dev->hw->dev, size * desc_size,
1228                                         &ring->desc_phys, GFP_KERNEL);
1229         if (!ring->desc)
1230                 return -ENOMEM;
1231
1232         ring->desc_size = desc_size;
1233         ring->size = size;
1234         mtk_wed_ring_reset(ring, size, tx);
1235
1236         return 0;
1237 }
1238
1239 static int
1240 mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size,
1241                            bool reset)
1242 {
1243         u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version;
1244         struct mtk_wed_ring *wdma;
1245
1246         if (idx >= ARRAY_SIZE(dev->rx_wdma))
1247                 return -EINVAL;
1248
1249         wdma = &dev->rx_wdma[idx];
1250         if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
1251                                          desc_size, true))
1252                 return -ENOMEM;
1253
1254         wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
1255                  wdma->desc_phys);
1256         wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT,
1257                  size);
1258         wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
1259
1260         wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
1261                 wdma->desc_phys);
1262         wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT,
1263                 size);
1264
1265         return 0;
1266 }
1267
1268 static int
1269 mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size,
1270                            bool reset)
1271 {
1272         u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version;
1273         struct mtk_wed_ring *wdma;
1274
1275         if (idx >= ARRAY_SIZE(dev->tx_wdma))
1276                 return -EINVAL;
1277
1278         wdma = &dev->tx_wdma[idx];
1279         if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
1280                                          desc_size, true))
1281                 return -ENOMEM;
1282
1283         wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
1284                  wdma->desc_phys);
1285         wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT,
1286                  size);
1287         wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
1288         wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_DMA_IDX, 0);
1289
1290         if (reset)
1291                 mtk_wed_ring_reset(wdma, MTK_WED_WDMA_RING_SIZE, true);
1292
1293         if (!idx)  {
1294                 wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_BASE,
1295                         wdma->desc_phys);
1296                 wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_COUNT,
1297                         size);
1298                 wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_CPU_IDX,
1299                         0);
1300                 wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_DMA_IDX,
1301                         0);
1302         }
1303
1304         return 0;
1305 }
1306
1307 static void
1308 mtk_wed_ppe_check(struct mtk_wed_device *dev, struct sk_buff *skb,
1309                   u32 reason, u32 hash)
1310 {
1311         struct mtk_eth *eth = dev->hw->eth;
1312         struct ethhdr *eh;
1313
1314         if (!skb)
1315                 return;
1316
1317         if (reason != MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
1318                 return;
1319
1320         skb_set_mac_header(skb, 0);
1321         eh = eth_hdr(skb);
1322         skb->protocol = eh->h_proto;
1323         mtk_ppe_check_skb(eth->ppe[dev->hw->index], skb, hash);
1324 }
1325
1326 static void
1327 mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask)
1328 {
1329         u32 wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0));
1330
1331         /* wed control cr set */
1332         wed_set(dev, MTK_WED_CTRL,
1333                 MTK_WED_CTRL_WDMA_INT_AGENT_EN |
1334                 MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
1335                 MTK_WED_CTRL_WED_TX_BM_EN |
1336                 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
1337
1338         if (dev->hw->version == 1) {
1339                 wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER,
1340                         MTK_WED_PCIE_INT_TRIGGER_STATUS);
1341
1342                 wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER,
1343                         MTK_WED_WPDMA_INT_TRIGGER_RX_DONE |
1344                         MTK_WED_WPDMA_INT_TRIGGER_TX_DONE);
1345
1346                 wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
1347         } else {
1348                 wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE,
1349                                         GENMASK(1, 0));
1350                 /* initail tx interrupt trigger */
1351                 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX,
1352                         MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN |
1353                         MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR |
1354                         MTK_WED_WPDMA_INT_CTRL_TX1_DONE_EN |
1355                         MTK_WED_WPDMA_INT_CTRL_TX1_DONE_CLR |
1356                         FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX0_DONE_TRIG,
1357                                    dev->wlan.tx_tbit[0]) |
1358                         FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG,
1359                                    dev->wlan.tx_tbit[1]));
1360
1361                 /* initail txfree interrupt trigger */
1362                 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX_FREE,
1363                         MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN |
1364                         MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR |
1365                         FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG,
1366                                    dev->wlan.txfree_tbit));
1367
1368                 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX,
1369                         MTK_WED_WPDMA_INT_CTRL_RX0_EN |
1370                         MTK_WED_WPDMA_INT_CTRL_RX0_CLR |
1371                         MTK_WED_WPDMA_INT_CTRL_RX1_EN |
1372                         MTK_WED_WPDMA_INT_CTRL_RX1_CLR |
1373                         FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG,
1374                                    dev->wlan.rx_tbit[0]) |
1375                         FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG,
1376                                    dev->wlan.rx_tbit[1]));
1377
1378                 wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask);
1379                 wed_set(dev, MTK_WED_WDMA_INT_CTRL,
1380                         FIELD_PREP(MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL,
1381                                    dev->wdma_idx));
1382         }
1383
1384         wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask);
1385
1386         wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask);
1387         wdma_w32(dev, MTK_WDMA_INT_GRP2, wdma_mask);
1388         wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask);
1389         wed_w32(dev, MTK_WED_INT_MASK, irq_mask);
1390 }
1391
1392 static void
1393 mtk_wed_dma_enable(struct mtk_wed_device *dev)
1394 {
1395         wed_set(dev, MTK_WED_WPDMA_INT_CTRL, MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
1396
1397         wed_set(dev, MTK_WED_GLO_CFG,
1398                 MTK_WED_GLO_CFG_TX_DMA_EN |
1399                 MTK_WED_GLO_CFG_RX_DMA_EN);
1400         wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
1401                 MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
1402                 MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
1403         wed_set(dev, MTK_WED_WDMA_GLO_CFG,
1404                 MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
1405
1406         wdma_set(dev, MTK_WDMA_GLO_CFG,
1407                  MTK_WDMA_GLO_CFG_TX_DMA_EN |
1408                  MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
1409                  MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
1410
1411         if (dev->hw->version == 1) {
1412                 wdma_set(dev, MTK_WDMA_GLO_CFG,
1413                          MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
1414         } else {
1415                 int i;
1416
1417                 wed_set(dev, MTK_WED_WPDMA_CTRL,
1418                         MTK_WED_WPDMA_CTRL_SDL1_FIXED);
1419
1420                 wed_set(dev, MTK_WED_WDMA_GLO_CFG,
1421                         MTK_WED_WDMA_GLO_CFG_TX_DRV_EN |
1422                         MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
1423
1424                 wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
1425                         MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
1426                         MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
1427
1428                 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
1429                         MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP |
1430                         MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV);
1431
1432                 wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
1433                         MTK_WED_WPDMA_RX_D_RX_DRV_EN |
1434                         FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) |
1435                         FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL,
1436                                    0x2));
1437
1438                 for (i = 0; i < MTK_WED_RX_QUEUES; i++)
1439                         mtk_wed_check_wfdma_rx_fill(dev, i);
1440         }
1441 }
1442
1443 static void
1444 mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
1445 {
1446         int i;
1447
1448         if (mtk_wed_get_rx_capa(dev) && mtk_wed_rx_buffer_alloc(dev))
1449                 return;
1450
1451         for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++)
1452                 if (!dev->rx_wdma[i].desc)
1453                         mtk_wed_wdma_rx_ring_setup(dev, i, 16, false);
1454
1455         mtk_wed_hw_init(dev);
1456         mtk_wed_configure_irq(dev, irq_mask);
1457
1458         mtk_wed_set_ext_int(dev, true);
1459
1460         if (dev->hw->version == 1) {
1461                 u32 val = dev->wlan.wpdma_phys | MTK_PCIE_MIRROR_MAP_EN |
1462                           FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID,
1463                                      dev->hw->index);
1464
1465                 val |= BIT(0) | (BIT(1) * !!dev->hw->index);
1466                 regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
1467         } else {
1468                 /* driver set mid ready and only once */
1469                 wed_w32(dev, MTK_WED_EXT_INT_MASK1,
1470                         MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
1471                 wed_w32(dev, MTK_WED_EXT_INT_MASK2,
1472                         MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
1473
1474                 wed_r32(dev, MTK_WED_EXT_INT_MASK1);
1475                 wed_r32(dev, MTK_WED_EXT_INT_MASK2);
1476
1477                 if (mtk_wed_rro_cfg(dev))
1478                         return;
1479
1480         }
1481
1482         mtk_wed_set_512_support(dev, dev->wlan.wcid_512);
1483
1484         mtk_wed_dma_enable(dev);
1485         dev->running = true;
1486 }
1487
1488 static int
1489 mtk_wed_attach(struct mtk_wed_device *dev)
1490         __releases(RCU)
1491 {
1492         struct mtk_wed_hw *hw;
1493         struct device *device;
1494         int ret = 0;
1495
1496         RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
1497                          "mtk_wed_attach without holding the RCU read lock");
1498
1499         if ((dev->wlan.bus_type == MTK_WED_BUS_PCIE &&
1500              pci_domain_nr(dev->wlan.pci_dev->bus) > 1) ||
1501             !try_module_get(THIS_MODULE))
1502                 ret = -ENODEV;
1503
1504         rcu_read_unlock();
1505
1506         if (ret)
1507                 return ret;
1508
1509         mutex_lock(&hw_lock);
1510
1511         hw = mtk_wed_assign(dev);
1512         if (!hw) {
1513                 module_put(THIS_MODULE);
1514                 ret = -ENODEV;
1515                 goto unlock;
1516         }
1517
1518         device = dev->wlan.bus_type == MTK_WED_BUS_PCIE
1519                 ? &dev->wlan.pci_dev->dev
1520                 : &dev->wlan.platform_dev->dev;
1521         dev_info(device, "attaching wed device %d version %d\n",
1522                  hw->index, hw->version);
1523
1524         dev->hw = hw;
1525         dev->dev = hw->dev;
1526         dev->irq = hw->irq;
1527         dev->wdma_idx = hw->index;
1528         dev->version = hw->version;
1529
1530         if (hw->eth->dma_dev == hw->eth->dev &&
1531             of_dma_is_coherent(hw->eth->dev->of_node))
1532                 mtk_eth_set_dma_device(hw->eth, hw->dev);
1533
1534         ret = mtk_wed_tx_buffer_alloc(dev);
1535         if (ret)
1536                 goto out;
1537
1538         if (mtk_wed_get_rx_capa(dev)) {
1539                 ret = mtk_wed_rro_alloc(dev);
1540                 if (ret)
1541                         goto out;
1542         }
1543
1544         mtk_wed_hw_init_early(dev);
1545         if (hw->version == 1) {
1546                 regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
1547                                    BIT(hw->index), 0);
1548         } else {
1549                 dev->rev_id = wed_r32(dev, MTK_WED_REV_ID);
1550                 ret = mtk_wed_wo_init(hw);
1551         }
1552 out:
1553         if (ret) {
1554                 dev_err(dev->hw->dev, "failed to attach wed device\n");
1555                 __mtk_wed_detach(dev);
1556         }
1557 unlock:
1558         mutex_unlock(&hw_lock);
1559
1560         return ret;
1561 }
1562
1563 static int
1564 mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs,
1565                       bool reset)
1566 {
1567         struct mtk_wed_ring *ring = &dev->tx_ring[idx];
1568
1569         /*
1570          * Tx ring redirection:
1571          * Instead of configuring the WLAN PDMA TX ring directly, the WLAN
1572          * driver allocated DMA ring gets configured into WED MTK_WED_RING_TX(n)
1573          * registers.
1574          *
1575          * WED driver posts its own DMA ring as WLAN PDMA TX and configures it
1576          * into MTK_WED_WPDMA_RING_TX(n) registers.
1577          * It gets filled with packets picked up from WED TX ring and from
1578          * WDMA RX.
1579          */
1580
1581         if (WARN_ON(idx >= ARRAY_SIZE(dev->tx_ring)))
1582                 return -EINVAL;
1583
1584         if (!reset && mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE,
1585                                          sizeof(*ring->desc), true))
1586                 return -ENOMEM;
1587
1588         if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE,
1589                                        reset))
1590                 return -ENOMEM;
1591
1592         ring->reg_base = MTK_WED_RING_TX(idx);
1593         ring->wpdma = regs;
1594
1595         /* WED -> WPDMA */
1596         wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys);
1597         wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_TX_RING_SIZE);
1598         wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_CPU_IDX, 0);
1599
1600         wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
1601                 ring->desc_phys);
1602         wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT,
1603                 MTK_WED_TX_RING_SIZE);
1604         wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
1605
1606         return 0;
1607 }
1608
1609 static int
1610 mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
1611 {
1612         struct mtk_wed_ring *ring = &dev->txfree_ring;
1613         int i, index = dev->hw->version == 1;
1614
1615         /*
1616          * For txfree event handling, the same DMA ring is shared between WED
1617          * and WLAN. The WLAN driver accesses the ring index registers through
1618          * WED
1619          */
1620         ring->reg_base = MTK_WED_RING_RX(index);
1621         ring->wpdma = regs;
1622
1623         for (i = 0; i < 12; i += 4) {
1624                 u32 val = readl(regs + i);
1625
1626                 wed_w32(dev, MTK_WED_RING_RX(index) + i, val);
1627                 wed_w32(dev, MTK_WED_WPDMA_RING_RX(index) + i, val);
1628         }
1629
1630         return 0;
1631 }
1632
1633 static int
1634 mtk_wed_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs,
1635                       bool reset)
1636 {
1637         struct mtk_wed_ring *ring = &dev->rx_ring[idx];
1638
1639         if (WARN_ON(idx >= ARRAY_SIZE(dev->rx_ring)))
1640                 return -EINVAL;
1641
1642         if (!reset && mtk_wed_ring_alloc(dev, ring, MTK_WED_RX_RING_SIZE,
1643                                          sizeof(*ring->desc), false))
1644                 return -ENOMEM;
1645
1646         if (mtk_wed_wdma_tx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE,
1647                                        reset))
1648                 return -ENOMEM;
1649
1650         ring->reg_base = MTK_WED_RING_RX_DATA(idx);
1651         ring->wpdma = regs;
1652         ring->flags |= MTK_WED_RING_CONFIGURED;
1653
1654         /* WPDMA ->  WED */
1655         wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys);
1656         wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_RX_RING_SIZE);
1657
1658         wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_BASE,
1659                 ring->desc_phys);
1660         wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_COUNT,
1661                 MTK_WED_RX_RING_SIZE);
1662
1663         return 0;
1664 }
1665
1666 static u32
1667 mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
1668 {
1669         u32 val, ext_mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
1670
1671         if (dev->hw->version == 1)
1672                 ext_mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR;
1673         else
1674                 ext_mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH |
1675                             MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH |
1676                             MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
1677                             MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR;
1678
1679         val = wed_r32(dev, MTK_WED_EXT_INT_STATUS);
1680         wed_w32(dev, MTK_WED_EXT_INT_STATUS, val);
1681         val &= ext_mask;
1682         if (!dev->hw->num_flows)
1683                 val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
1684         if (val && net_ratelimit())
1685                 pr_err("mtk_wed%d: error status=%08x\n", dev->hw->index, val);
1686
1687         val = wed_r32(dev, MTK_WED_INT_STATUS);
1688         val &= mask;
1689         wed_w32(dev, MTK_WED_INT_STATUS, val); /* ACK */
1690
1691         return val;
1692 }
1693
1694 static void
1695 mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask)
1696 {
1697         if (!dev->running)
1698                 return;
1699
1700         mtk_wed_set_ext_int(dev, !!mask);
1701         wed_w32(dev, MTK_WED_INT_MASK, mask);
1702 }
1703
1704 int mtk_wed_flow_add(int index)
1705 {
1706         struct mtk_wed_hw *hw = hw_list[index];
1707         int ret;
1708
1709         if (!hw || !hw->wed_dev)
1710                 return -ENODEV;
1711
1712         if (hw->num_flows) {
1713                 hw->num_flows++;
1714                 return 0;
1715         }
1716
1717         mutex_lock(&hw_lock);
1718         if (!hw->wed_dev) {
1719                 ret = -ENODEV;
1720                 goto out;
1721         }
1722
1723         ret = hw->wed_dev->wlan.offload_enable(hw->wed_dev);
1724         if (!ret)
1725                 hw->num_flows++;
1726         mtk_wed_set_ext_int(hw->wed_dev, true);
1727
1728 out:
1729         mutex_unlock(&hw_lock);
1730
1731         return ret;
1732 }
1733
1734 void mtk_wed_flow_remove(int index)
1735 {
1736         struct mtk_wed_hw *hw = hw_list[index];
1737
1738         if (!hw)
1739                 return;
1740
1741         if (--hw->num_flows)
1742                 return;
1743
1744         mutex_lock(&hw_lock);
1745         if (!hw->wed_dev)
1746                 goto out;
1747
1748         hw->wed_dev->wlan.offload_disable(hw->wed_dev);
1749         mtk_wed_set_ext_int(hw->wed_dev, true);
1750
1751 out:
1752         mutex_unlock(&hw_lock);
1753 }
1754
1755 static int
1756 mtk_wed_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
1757 {
1758         struct mtk_wed_flow_block_priv *priv = cb_priv;
1759         struct flow_cls_offload *cls = type_data;
1760         struct mtk_wed_hw *hw = priv->hw;
1761
1762         if (!tc_can_offload(priv->dev))
1763                 return -EOPNOTSUPP;
1764
1765         if (type != TC_SETUP_CLSFLOWER)
1766                 return -EOPNOTSUPP;
1767
1768         return mtk_flow_offload_cmd(hw->eth, cls, hw->index);
1769 }
1770
1771 static int
1772 mtk_wed_setup_tc_block(struct mtk_wed_hw *hw, struct net_device *dev,
1773                        struct flow_block_offload *f)
1774 {
1775         struct mtk_wed_flow_block_priv *priv;
1776         static LIST_HEAD(block_cb_list);
1777         struct flow_block_cb *block_cb;
1778         struct mtk_eth *eth = hw->eth;
1779         flow_setup_cb_t *cb;
1780
1781         if (!eth->soc->offload_version)
1782                 return -EOPNOTSUPP;
1783
1784         if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1785                 return -EOPNOTSUPP;
1786
1787         cb = mtk_wed_setup_tc_block_cb;
1788         f->driver_block_list = &block_cb_list;
1789
1790         switch (f->command) {
1791         case FLOW_BLOCK_BIND:
1792                 block_cb = flow_block_cb_lookup(f->block, cb, dev);
1793                 if (block_cb) {
1794                         flow_block_cb_incref(block_cb);
1795                         return 0;
1796                 }
1797
1798                 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1799                 if (!priv)
1800                         return -ENOMEM;
1801
1802                 priv->hw = hw;
1803                 priv->dev = dev;
1804                 block_cb = flow_block_cb_alloc(cb, dev, priv, NULL);
1805                 if (IS_ERR(block_cb)) {
1806                         kfree(priv);
1807                         return PTR_ERR(block_cb);
1808                 }
1809
1810                 flow_block_cb_incref(block_cb);
1811                 flow_block_cb_add(block_cb, f);
1812                 list_add_tail(&block_cb->driver_list, &block_cb_list);
1813                 return 0;
1814         case FLOW_BLOCK_UNBIND:
1815                 block_cb = flow_block_cb_lookup(f->block, cb, dev);
1816                 if (!block_cb)
1817                         return -ENOENT;
1818
1819                 if (!flow_block_cb_decref(block_cb)) {
1820                         flow_block_cb_remove(block_cb, f);
1821                         list_del(&block_cb->driver_list);
1822                         kfree(block_cb->cb_priv);
1823                 }
1824                 return 0;
1825         default:
1826                 return -EOPNOTSUPP;
1827         }
1828 }
1829
1830 static int
1831 mtk_wed_setup_tc(struct mtk_wed_device *wed, struct net_device *dev,
1832                  enum tc_setup_type type, void *type_data)
1833 {
1834         struct mtk_wed_hw *hw = wed->hw;
1835
1836         if (hw->version < 2)
1837                 return -EOPNOTSUPP;
1838
1839         switch (type) {
1840         case TC_SETUP_BLOCK:
1841         case TC_SETUP_FT:
1842                 return mtk_wed_setup_tc_block(hw, dev, type_data);
1843         default:
1844                 return -EOPNOTSUPP;
1845         }
1846 }
1847
1848 void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
1849                     void __iomem *wdma, phys_addr_t wdma_phy,
1850                     int index)
1851 {
1852         static const struct mtk_wed_ops wed_ops = {
1853                 .attach = mtk_wed_attach,
1854                 .tx_ring_setup = mtk_wed_tx_ring_setup,
1855                 .rx_ring_setup = mtk_wed_rx_ring_setup,
1856                 .txfree_ring_setup = mtk_wed_txfree_ring_setup,
1857                 .msg_update = mtk_wed_mcu_msg_update,
1858                 .start = mtk_wed_start,
1859                 .stop = mtk_wed_stop,
1860                 .reset_dma = mtk_wed_reset_dma,
1861                 .reg_read = wed_r32,
1862                 .reg_write = wed_w32,
1863                 .irq_get = mtk_wed_irq_get,
1864                 .irq_set_mask = mtk_wed_irq_set_mask,
1865                 .detach = mtk_wed_detach,
1866                 .ppe_check = mtk_wed_ppe_check,
1867                 .setup_tc = mtk_wed_setup_tc,
1868         };
1869         struct device_node *eth_np = eth->dev->of_node;
1870         struct platform_device *pdev;
1871         struct mtk_wed_hw *hw;
1872         struct regmap *regs;
1873         int irq;
1874
1875         if (!np)
1876                 return;
1877
1878         pdev = of_find_device_by_node(np);
1879         if (!pdev)
1880                 goto err_of_node_put;
1881
1882         get_device(&pdev->dev);
1883         irq = platform_get_irq(pdev, 0);
1884         if (irq < 0)
1885                 goto err_put_device;
1886
1887         regs = syscon_regmap_lookup_by_phandle(np, NULL);
1888         if (IS_ERR(regs))
1889                 goto err_put_device;
1890
1891         rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops);
1892
1893         mutex_lock(&hw_lock);
1894
1895         if (WARN_ON(hw_list[index]))
1896                 goto unlock;
1897
1898         hw = kzalloc(sizeof(*hw), GFP_KERNEL);
1899         if (!hw)
1900                 goto unlock;
1901
1902         hw->node = np;
1903         hw->regs = regs;
1904         hw->eth = eth;
1905         hw->dev = &pdev->dev;
1906         hw->wdma_phy = wdma_phy;
1907         hw->wdma = wdma;
1908         hw->index = index;
1909         hw->irq = irq;
1910         hw->version = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
1911
1912         if (hw->version == 1) {
1913                 hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
1914                                 "mediatek,pcie-mirror");
1915                 hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np,
1916                                 "mediatek,hifsys");
1917                 if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) {
1918                         kfree(hw);
1919                         goto unlock;
1920                 }
1921
1922                 if (!index) {
1923                         regmap_write(hw->mirror, 0, 0);
1924                         regmap_write(hw->mirror, 4, 0);
1925                 }
1926         }
1927
1928         mtk_wed_hw_add_debugfs(hw);
1929
1930         hw_list[index] = hw;
1931
1932         mutex_unlock(&hw_lock);
1933
1934         return;
1935
1936 unlock:
1937         mutex_unlock(&hw_lock);
1938 err_put_device:
1939         put_device(&pdev->dev);
1940 err_of_node_put:
1941         of_node_put(np);
1942 }
1943
1944 void mtk_wed_exit(void)
1945 {
1946         int i;
1947
1948         rcu_assign_pointer(mtk_soc_wed_ops, NULL);
1949
1950         synchronize_rcu();
1951
1952         for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
1953                 struct mtk_wed_hw *hw;
1954
1955                 hw = hw_list[i];
1956                 if (!hw)
1957                         continue;
1958
1959                 hw_list[i] = NULL;
1960                 debugfs_remove(hw->debugfs_dir);
1961                 put_device(hw->dev);
1962                 of_node_put(hw->node);
1963                 kfree(hw);
1964         }
1965 }
This page took 0.15 seconds and 4 git commands to generate.