1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2022 MediaTek Inc.
8 #include <linux/kernel.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/interrupt.h>
11 #include <linux/mfd/syscon.h>
13 #include <linux/of_irq.h>
14 #include <linux/bitfield.h>
17 #include "mtk_wed_regs.h"
18 #include "mtk_wed_wo.h"
21 mtk_wed_mmio_r32(struct mtk_wed_wo *wo, u32 reg)
25 if (regmap_read(wo->mmio.regs, reg, &val))
32 mtk_wed_mmio_w32(struct mtk_wed_wo *wo, u32 reg, u32 val)
34 regmap_write(wo->mmio.regs, reg, val);
38 mtk_wed_wo_get_isr(struct mtk_wed_wo *wo)
40 u32 val = mtk_wed_mmio_r32(wo, MTK_WED_WO_CCIF_RCHNUM);
42 return val & MTK_WED_WO_CCIF_RCHNUM_MASK;
46 mtk_wed_wo_set_isr(struct mtk_wed_wo *wo, u32 mask)
48 mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_IRQ0_MASK, mask);
52 mtk_wed_wo_set_ack(struct mtk_wed_wo *wo, u32 mask)
54 mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_ACK, mask);
58 mtk_wed_wo_set_isr_mask(struct mtk_wed_wo *wo, u32 mask, u32 val, bool set)
62 spin_lock_irqsave(&wo->mmio.lock, flags);
63 wo->mmio.irq_mask &= ~mask;
64 wo->mmio.irq_mask |= val;
66 mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask);
67 spin_unlock_irqrestore(&wo->mmio.lock, flags);
71 mtk_wed_wo_irq_enable(struct mtk_wed_wo *wo, u32 mask)
73 mtk_wed_wo_set_isr_mask(wo, 0, mask, false);
74 tasklet_schedule(&wo->mmio.irq_tasklet);
78 mtk_wed_wo_irq_disable(struct mtk_wed_wo *wo, u32 mask)
80 mtk_wed_wo_set_isr_mask(wo, mask, 0, true);
84 mtk_wed_wo_kickout(struct mtk_wed_wo *wo)
86 mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_BUSY, 1 << MTK_WED_WO_TXCH_NUM);
87 mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_TCHNUM, MTK_WED_WO_TXCH_NUM);
91 mtk_wed_wo_queue_kick(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
95 mtk_wed_mmio_w32(wo, q->regs.cpu_idx, val);
99 mtk_wed_wo_dequeue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, u32 *len,
102 int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
103 int index = (q->tail + 1) % q->n_desc;
104 struct mtk_wed_wo_queue_entry *entry;
105 struct mtk_wed_wo_queue_desc *desc;
112 q->desc[index].ctrl |= cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE);
113 else if (!(q->desc[index].ctrl & cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE)))
119 desc = &q->desc[index];
120 entry = &q->entry[index];
123 *len = FIELD_GET(MTK_WED_WO_CTL_SD_LEN0,
124 le32_to_cpu(READ_ONCE(desc->ctrl)));
126 dma_unmap_single(wo->hw->dev, entry->addr, buf_len,
134 mtk_wed_wo_queue_refill(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
137 enum dma_data_direction dir = rx ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
140 while (q->queued < q->n_desc) {
141 struct mtk_wed_wo_queue_entry *entry;
145 buf = page_frag_alloc(&q->cache, q->buf_size,
146 GFP_ATOMIC | GFP_DMA32);
150 addr = dma_map_single(wo->hw->dev, buf, q->buf_size, dir);
151 if (unlikely(dma_mapping_error(wo->hw->dev, addr))) {
156 q->head = (q->head + 1) % q->n_desc;
157 entry = &q->entry[q->head];
159 entry->len = q->buf_size;
160 q->entry[q->head].buf = buf;
163 struct mtk_wed_wo_queue_desc *desc = &q->desc[q->head];
164 u32 ctrl = MTK_WED_WO_CTL_LAST_SEC0 |
165 FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0,
168 WRITE_ONCE(desc->buf0, cpu_to_le32(addr));
169 WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
179 mtk_wed_wo_rx_complete(struct mtk_wed_wo *wo)
181 mtk_wed_wo_set_ack(wo, MTK_WED_WO_RXCH_INT_MASK);
182 mtk_wed_wo_irq_enable(wo, MTK_WED_WO_RXCH_INT_MASK);
186 mtk_wed_wo_rx_run_queue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
189 struct mtk_wed_mcu_hdr *hdr;
194 data = mtk_wed_wo_dequeue(wo, q, &len, false);
198 skb = build_skb(data, q->buf_size);
205 if (mtk_wed_mcu_check_msg(wo, skb)) {
210 hdr = (struct mtk_wed_mcu_hdr *)skb->data;
211 if (hdr->flag & cpu_to_le16(MTK_WED_WARP_CMD_FLAG_RSP))
212 mtk_wed_mcu_rx_event(wo, skb);
214 mtk_wed_mcu_rx_unsolicited_event(wo, skb);
217 if (mtk_wed_wo_queue_refill(wo, q, true)) {
218 u32 index = (q->head - 1) % q->n_desc;
220 mtk_wed_wo_queue_kick(wo, q, index);
225 mtk_wed_wo_irq_handler(int irq, void *data)
227 struct mtk_wed_wo *wo = data;
229 mtk_wed_wo_set_isr(wo, 0);
230 tasklet_schedule(&wo->mmio.irq_tasklet);
235 static void mtk_wed_wo_irq_tasklet(struct tasklet_struct *t)
237 struct mtk_wed_wo *wo = from_tasklet(wo, t, mmio.irq_tasklet);
240 /* disable interrupts */
241 mtk_wed_wo_set_isr(wo, 0);
243 intr = mtk_wed_wo_get_isr(wo);
244 intr &= wo->mmio.irq_mask;
245 mask = intr & (MTK_WED_WO_RXCH_INT_MASK | MTK_WED_WO_EXCEPTION_INT_MASK);
246 mtk_wed_wo_irq_disable(wo, mask);
248 if (intr & MTK_WED_WO_RXCH_INT_MASK) {
249 mtk_wed_wo_rx_run_queue(wo, &wo->q_rx);
250 mtk_wed_wo_rx_complete(wo);
254 /* mtk wed wo hw queues */
257 mtk_wed_wo_queue_alloc(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
258 int n_desc, int buf_size, int index,
259 struct mtk_wed_wo_queue_regs *regs)
263 q->buf_size = buf_size;
265 q->desc = dmam_alloc_coherent(wo->hw->dev, n_desc * sizeof(*q->desc),
266 &q->desc_dma, GFP_KERNEL);
270 q->entry = devm_kzalloc(wo->hw->dev, n_desc * sizeof(*q->entry),
279 mtk_wed_wo_queue_free(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
281 mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0);
282 dma_free_coherent(wo->hw->dev, q->n_desc * sizeof(*q->desc), q->desc,
287 mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
291 for (i = 0; i < q->n_desc; i++) {
292 struct mtk_wed_wo_queue_entry *entry = &q->entry[i];
297 dma_unmap_single(wo->hw->dev, entry->addr, entry->len,
299 skb_free_frag(entry->buf);
303 page_frag_cache_drain(&q->cache);
307 mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
310 void *buf = mtk_wed_wo_dequeue(wo, q, NULL, true);
318 page_frag_cache_drain(&q->cache);
322 mtk_wed_wo_queue_reset(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
324 mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0);
325 mtk_wed_mmio_w32(wo, q->regs.desc_base, q->desc_dma);
326 mtk_wed_mmio_w32(wo, q->regs.ring_size, q->n_desc);
329 int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
332 struct mtk_wed_wo_queue_entry *entry;
333 struct mtk_wed_wo_queue_desc *desc;
337 q->tail = mtk_wed_mmio_r32(wo, q->regs.dma_idx);
338 index = (q->head + 1) % q->n_desc;
339 if (q->tail == index) {
344 entry = &q->entry[index];
345 if (skb->len > entry->len) {
350 desc = &q->desc[index];
353 dma_sync_single_for_cpu(wo->hw->dev, entry->addr, skb->len,
355 memcpy(entry->buf, skb->data, skb->len);
356 dma_sync_single_for_device(wo->hw->dev, entry->addr, skb->len,
359 ctrl = FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0, skb->len) |
360 MTK_WED_WO_CTL_LAST_SEC0 | MTK_WED_WO_CTL_DMA_DONE;
361 WRITE_ONCE(desc->buf0, cpu_to_le32(entry->addr));
362 WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
364 mtk_wed_wo_queue_kick(wo, q, q->head);
365 mtk_wed_wo_kickout(wo);
373 mtk_wed_wo_exception_init(struct mtk_wed_wo *wo)
379 mtk_wed_wo_hardware_init(struct mtk_wed_wo *wo)
381 struct mtk_wed_wo_queue_regs regs;
382 struct device_node *np;
385 np = of_parse_phandle(wo->hw->node, "mediatek,wo-ccif", 0);
389 wo->mmio.regs = syscon_regmap_lookup_by_phandle(np, NULL);
390 if (IS_ERR(wo->mmio.regs)) {
391 ret = PTR_ERR(wo->mmio.regs);
395 wo->mmio.irq = irq_of_parse_and_map(np, 0);
396 wo->mmio.irq_mask = MTK_WED_WO_ALL_INT_MASK;
397 spin_lock_init(&wo->mmio.lock);
398 tasklet_setup(&wo->mmio.irq_tasklet, mtk_wed_wo_irq_tasklet);
400 ret = devm_request_irq(wo->hw->dev, wo->mmio.irq,
401 mtk_wed_wo_irq_handler, IRQF_TRIGGER_HIGH,
406 regs.desc_base = MTK_WED_WO_CCIF_DUMMY1;
407 regs.ring_size = MTK_WED_WO_CCIF_DUMMY2;
408 regs.dma_idx = MTK_WED_WO_CCIF_SHADOW4;
409 regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY3;
411 ret = mtk_wed_wo_queue_alloc(wo, &wo->q_tx, MTK_WED_WO_RING_SIZE,
412 MTK_WED_WO_CMD_LEN, MTK_WED_WO_TXCH_NUM,
417 mtk_wed_wo_queue_refill(wo, &wo->q_tx, false);
418 mtk_wed_wo_queue_reset(wo, &wo->q_tx);
420 regs.desc_base = MTK_WED_WO_CCIF_DUMMY5;
421 regs.ring_size = MTK_WED_WO_CCIF_DUMMY6;
422 regs.dma_idx = MTK_WED_WO_CCIF_SHADOW8;
423 regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY7;
425 ret = mtk_wed_wo_queue_alloc(wo, &wo->q_rx, MTK_WED_WO_RING_SIZE,
426 MTK_WED_WO_CMD_LEN, MTK_WED_WO_RXCH_NUM,
431 mtk_wed_wo_queue_refill(wo, &wo->q_rx, true);
432 mtk_wed_wo_queue_reset(wo, &wo->q_rx);
434 /* rx queue irqmask */
435 mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask);
440 devm_free_irq(wo->hw->dev, wo->mmio.irq, wo);
447 mtk_wed_wo_hw_deinit(struct mtk_wed_wo *wo)
449 /* disable interrupts */
450 mtk_wed_wo_set_isr(wo, 0);
452 tasklet_disable(&wo->mmio.irq_tasklet);
454 disable_irq(wo->mmio.irq);
455 devm_free_irq(wo->hw->dev, wo->mmio.irq, wo);
457 mtk_wed_wo_queue_tx_clean(wo, &wo->q_tx);
458 mtk_wed_wo_queue_rx_clean(wo, &wo->q_rx);
459 mtk_wed_wo_queue_free(wo, &wo->q_tx);
460 mtk_wed_wo_queue_free(wo, &wo->q_rx);
463 int mtk_wed_wo_init(struct mtk_wed_hw *hw)
465 struct mtk_wed_wo *wo;
468 wo = devm_kzalloc(hw->dev, sizeof(*wo), GFP_KERNEL);
475 ret = mtk_wed_wo_hardware_init(wo);
479 ret = mtk_wed_mcu_init(wo);
483 return mtk_wed_wo_exception_init(wo);
486 void mtk_wed_wo_deinit(struct mtk_wed_hw *hw)
488 struct mtk_wed_wo *wo = hw->wed_wo;
490 mtk_wed_wo_hw_deinit(wo);