1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * DMA driver for AMD Queue-based DMA Subsystem
5 * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
7 #include <linux/bitfield.h>
8 #include <linux/bitops.h>
9 #include <linux/dmaengine.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/module.h>
12 #include <linux/mod_devicetable.h>
13 #include <linux/platform_device.h>
14 #include <linux/platform_data/amd_qdma.h>
15 #include <linux/regmap.h>
19 #define CHAN_STR(q) (((q)->dir == DMA_MEM_TO_DEV) ? "H2C" : "C2H")
20 #define QDMA_REG_OFF(d, r) ((d)->roffs[r].off)
22 /* MMIO regmap config for all QDMA registers */
23 static const struct regmap_config qdma_regmap_config = {
29 static inline struct qdma_queue *to_qdma_queue(struct dma_chan *chan)
31 return container_of(chan, struct qdma_queue, vchan.chan);
34 static inline struct qdma_mm_vdesc *to_qdma_vdesc(struct virt_dma_desc *vdesc)
36 return container_of(vdesc, struct qdma_mm_vdesc, vdesc);
39 static inline u32 qdma_get_intr_ring_idx(struct qdma_device *qdev)
43 idx = qdev->qintr_rings[qdev->qintr_ring_idx++].ridx;
44 qdev->qintr_ring_idx %= qdev->qintr_ring_num;
49 static u64 qdma_get_field(const struct qdma_device *qdev, const u32 *data,
50 enum qdma_reg_fields field)
52 const struct qdma_reg_field *f = &qdev->rfields[field];
53 u16 low_pos, hi_pos, low_bit, hi_bit;
56 low_pos = f->lsb / BITS_PER_TYPE(*data);
57 hi_pos = f->msb / BITS_PER_TYPE(*data);
59 if (low_pos == hi_pos) {
60 low_bit = f->lsb % BITS_PER_TYPE(*data);
61 hi_bit = f->msb % BITS_PER_TYPE(*data);
62 mask = GENMASK(hi_bit, low_bit);
63 value = (data[low_pos] & mask) >> low_bit;
64 } else if (hi_pos == low_pos + 1) {
65 low_bit = f->lsb % BITS_PER_TYPE(*data);
66 hi_bit = low_bit + (f->msb - f->lsb);
67 value = ((u64)data[hi_pos] << BITS_PER_TYPE(*data)) |
69 mask = GENMASK_ULL(hi_bit, low_bit);
70 value = (value & mask) >> low_bit;
72 hi_bit = f->msb % BITS_PER_TYPE(*data);
73 mask = GENMASK(hi_bit, 0);
74 value = data[hi_pos] & mask;
75 low_bit = f->msb - f->lsb - hi_bit;
78 value |= (u64)data[hi_pos - 1] << low_bit;
79 mask = GENMASK(31, 32 - low_bit);
80 value |= (data[hi_pos - 2] & mask) >> low_bit;
86 static void qdma_set_field(const struct qdma_device *qdev, u32 *data,
87 enum qdma_reg_fields field, u64 value)
89 const struct qdma_reg_field *f = &qdev->rfields[field];
90 u16 low_pos, hi_pos, low_bit;
92 low_pos = f->lsb / BITS_PER_TYPE(*data);
93 hi_pos = f->msb / BITS_PER_TYPE(*data);
94 low_bit = f->lsb % BITS_PER_TYPE(*data);
96 data[low_pos++] |= value << low_bit;
97 if (low_pos <= hi_pos)
98 data[low_pos++] |= (u32)(value >> (32 - low_bit));
99 if (low_pos <= hi_pos)
100 data[low_pos] |= (u32)(value >> (64 - low_bit));
103 static inline int qdma_reg_write(const struct qdma_device *qdev,
104 const u32 *data, enum qdma_regs reg)
106 const struct qdma_reg *r = &qdev->roffs[reg];
110 ret = regmap_bulk_write(qdev->regmap, r->off, data, r->count);
112 ret = regmap_write(qdev->regmap, r->off, *data);
117 static inline int qdma_reg_read(const struct qdma_device *qdev, u32 *data,
120 const struct qdma_reg *r = &qdev->roffs[reg];
124 ret = regmap_bulk_read(qdev->regmap, r->off, data, r->count);
126 ret = regmap_read(qdev->regmap, r->off, data);
131 static int qdma_context_cmd_execute(const struct qdma_device *qdev,
132 enum qdma_ctxt_type type,
133 enum qdma_ctxt_cmd cmd, u16 index)
138 qdma_set_field(qdev, &value, QDMA_REGF_CMD_INDX, index);
139 qdma_set_field(qdev, &value, QDMA_REGF_CMD_CMD, cmd);
140 qdma_set_field(qdev, &value, QDMA_REGF_CMD_TYPE, type);
142 ret = qdma_reg_write(qdev, &value, QDMA_REGO_CTXT_CMD);
146 ret = regmap_read_poll_timeout(qdev->regmap,
147 QDMA_REG_OFF(qdev, QDMA_REGO_CTXT_CMD),
149 !qdma_get_field(qdev, &value,
152 QDMA_POLL_TIMEOUT_US);
154 qdma_err(qdev, "Context command execution timed out");
161 static int qdma_context_write_data(const struct qdma_device *qdev,
164 u32 mask[QDMA_CTXT_REGMAP_LEN];
167 memset(mask, ~0, sizeof(mask));
169 ret = qdma_reg_write(qdev, mask, QDMA_REGO_CTXT_MASK);
173 ret = qdma_reg_write(qdev, data, QDMA_REGO_CTXT_DATA);
180 static void qdma_prep_sw_desc_context(const struct qdma_device *qdev,
181 const struct qdma_ctxt_sw_desc *ctxt,
184 memset(data, 0, QDMA_CTXT_REGMAP_LEN * sizeof(*data));
185 qdma_set_field(qdev, data, QDMA_REGF_DESC_BASE, ctxt->desc_base);
186 qdma_set_field(qdev, data, QDMA_REGF_IRQ_VEC, ctxt->vec);
187 qdma_set_field(qdev, data, QDMA_REGF_FUNCTION_ID, qdev->fid);
189 qdma_set_field(qdev, data, QDMA_REGF_DESC_SIZE, QDMA_DESC_SIZE_32B);
190 qdma_set_field(qdev, data, QDMA_REGF_RING_ID, QDMA_DEFAULT_RING_ID);
191 qdma_set_field(qdev, data, QDMA_REGF_QUEUE_MODE, QDMA_QUEUE_OP_MM);
192 qdma_set_field(qdev, data, QDMA_REGF_IRQ_ENABLE, 1);
193 qdma_set_field(qdev, data, QDMA_REGF_WBK_ENABLE, 1);
194 qdma_set_field(qdev, data, QDMA_REGF_WBI_CHECK, 1);
195 qdma_set_field(qdev, data, QDMA_REGF_IRQ_ARM, 1);
196 qdma_set_field(qdev, data, QDMA_REGF_IRQ_AGG, 1);
197 qdma_set_field(qdev, data, QDMA_REGF_WBI_INTVL_ENABLE, 1);
198 qdma_set_field(qdev, data, QDMA_REGF_QUEUE_ENABLE, 1);
199 qdma_set_field(qdev, data, QDMA_REGF_MRKR_DISABLE, 1);
202 static void qdma_prep_intr_context(const struct qdma_device *qdev,
203 const struct qdma_ctxt_intr *ctxt,
206 memset(data, 0, QDMA_CTXT_REGMAP_LEN * sizeof(*data));
207 qdma_set_field(qdev, data, QDMA_REGF_INTR_AGG_BASE, ctxt->agg_base);
208 qdma_set_field(qdev, data, QDMA_REGF_INTR_VECTOR, ctxt->vec);
209 qdma_set_field(qdev, data, QDMA_REGF_INTR_SIZE, ctxt->size);
210 qdma_set_field(qdev, data, QDMA_REGF_INTR_VALID, ctxt->valid);
211 qdma_set_field(qdev, data, QDMA_REGF_INTR_COLOR, ctxt->color);
212 qdma_set_field(qdev, data, QDMA_REGF_INTR_FUNCTION_ID, qdev->fid);
215 static void qdma_prep_fmap_context(const struct qdma_device *qdev,
216 const struct qdma_ctxt_fmap *ctxt,
219 memset(data, 0, QDMA_CTXT_REGMAP_LEN * sizeof(*data));
220 qdma_set_field(qdev, data, QDMA_REGF_QUEUE_BASE, ctxt->qbase);
221 qdma_set_field(qdev, data, QDMA_REGF_QUEUE_MAX, ctxt->qmax);
225 * Program the indirect context register space
227 * Once the queue is enabled, context is dynamically updated by hardware. Any
228 * modification of the context through this API when the queue is enabled can
229 * result in unexpected behavior. Reading the context when the queue is enabled
230 * is not recommended as it can result in reduced performance.
232 static int qdma_prog_context(struct qdma_device *qdev, enum qdma_ctxt_type type,
233 enum qdma_ctxt_cmd cmd, u16 index, u32 *ctxt)
237 mutex_lock(&qdev->ctxt_lock);
238 if (cmd == QDMA_CTXT_WRITE) {
239 ret = qdma_context_write_data(qdev, ctxt);
244 ret = qdma_context_cmd_execute(qdev, type, cmd, index);
248 if (cmd == QDMA_CTXT_READ) {
249 ret = qdma_reg_read(qdev, ctxt, QDMA_REGO_CTXT_DATA);
255 mutex_unlock(&qdev->ctxt_lock);
260 static int qdma_check_queue_status(struct qdma_device *qdev,
261 enum dma_transfer_direction dir, u16 qid)
263 u32 status, data[QDMA_CTXT_REGMAP_LEN] = {0};
264 enum qdma_ctxt_type type;
267 if (dir == DMA_MEM_TO_DEV)
268 type = QDMA_CTXT_DESC_SW_H2C;
270 type = QDMA_CTXT_DESC_SW_C2H;
271 ret = qdma_prog_context(qdev, type, QDMA_CTXT_READ, qid, data);
275 status = qdma_get_field(qdev, data, QDMA_REGF_QUEUE_ENABLE);
277 qdma_err(qdev, "queue %d already in use", qid);
284 static int qdma_clear_queue_context(const struct qdma_queue *queue)
286 enum qdma_ctxt_type h2c_types[] = { QDMA_CTXT_DESC_SW_H2C,
287 QDMA_CTXT_DESC_HW_H2C,
288 QDMA_CTXT_DESC_CR_H2C,
290 enum qdma_ctxt_type c2h_types[] = { QDMA_CTXT_DESC_SW_C2H,
291 QDMA_CTXT_DESC_HW_C2H,
292 QDMA_CTXT_DESC_CR_C2H,
294 struct qdma_device *qdev = queue->qdev;
295 enum qdma_ctxt_type *type;
298 if (queue->dir == DMA_MEM_TO_DEV) {
300 num = ARRAY_SIZE(h2c_types);
303 num = ARRAY_SIZE(c2h_types);
305 for (i = 0; i < num; i++) {
306 ret = qdma_prog_context(qdev, type[i], QDMA_CTXT_CLEAR,
309 qdma_err(qdev, "Failed to clear ctxt %d", type[i]);
317 static int qdma_setup_fmap_context(struct qdma_device *qdev)
319 u32 ctxt[QDMA_CTXT_REGMAP_LEN];
320 struct qdma_ctxt_fmap fmap;
323 ret = qdma_prog_context(qdev, QDMA_CTXT_FMAP, QDMA_CTXT_CLEAR,
326 qdma_err(qdev, "Failed clearing context");
331 fmap.qmax = qdev->chan_num * 2;
332 qdma_prep_fmap_context(qdev, &fmap, ctxt);
333 ret = qdma_prog_context(qdev, QDMA_CTXT_FMAP, QDMA_CTXT_WRITE,
336 qdma_err(qdev, "Failed setup fmap, ret %d", ret);
341 static int qdma_setup_queue_context(struct qdma_device *qdev,
342 const struct qdma_ctxt_sw_desc *sw_desc,
343 enum dma_transfer_direction dir, u16 qid)
345 u32 ctxt[QDMA_CTXT_REGMAP_LEN];
346 enum qdma_ctxt_type type;
349 if (dir == DMA_MEM_TO_DEV)
350 type = QDMA_CTXT_DESC_SW_H2C;
352 type = QDMA_CTXT_DESC_SW_C2H;
354 qdma_prep_sw_desc_context(qdev, sw_desc, ctxt);
355 /* Setup SW descriptor context */
356 ret = qdma_prog_context(qdev, type, QDMA_CTXT_WRITE, qid, ctxt);
358 qdma_err(qdev, "Failed setup SW desc ctxt for queue: %d", qid);
364 * Enable or disable memory-mapped DMA engines
365 * 1: enable, 0: disable
367 static int qdma_sgdma_control(struct qdma_device *qdev, u32 ctrl)
371 ret = qdma_reg_write(qdev, &ctrl, QDMA_REGO_MM_H2C_CTRL);
372 ret |= qdma_reg_write(qdev, &ctrl, QDMA_REGO_MM_C2H_CTRL);
377 static int qdma_get_hw_info(struct qdma_device *qdev)
379 struct qdma_platdata *pdata = dev_get_platdata(&qdev->pdev->dev);
383 ret = qdma_reg_read(qdev, &value, QDMA_REGO_QUEUE_COUNT);
387 value = qdma_get_field(qdev, &value, QDMA_REGF_QUEUE_COUNT) + 1;
388 if (pdata->max_mm_channels * 2 > value) {
389 qdma_err(qdev, "not enough hw queues %d", value);
392 qdev->chan_num = pdata->max_mm_channels;
394 ret = qdma_reg_read(qdev, &qdev->fid, QDMA_REGO_FUNC_ID);
398 qdma_info(qdev, "max channel %d, function id %d",
399 qdev->chan_num, qdev->fid);
404 static inline int qdma_update_pidx(const struct qdma_queue *queue, u16 pidx)
406 struct qdma_device *qdev = queue->qdev;
408 return regmap_write(qdev->regmap, queue->pidx_reg,
409 pidx | QDMA_QUEUE_ARM_BIT);
412 static inline int qdma_update_cidx(const struct qdma_queue *queue,
415 struct qdma_device *qdev = queue->qdev;
417 return regmap_write(qdev->regmap, queue->cidx_reg,
418 ((u32)ridx << 16) | cidx);
422 * qdma_free_vdesc - Free descriptor
423 * @vdesc: Virtual DMA descriptor
425 static void qdma_free_vdesc(struct virt_dma_desc *vdesc)
427 struct qdma_mm_vdesc *vd = to_qdma_vdesc(vdesc);
432 static int qdma_alloc_queues(struct qdma_device *qdev,
433 enum dma_transfer_direction dir)
435 struct qdma_queue *q, **queues;
439 if (dir == DMA_MEM_TO_DEV) {
440 queues = &qdev->h2c_queues;
441 pidx_base = QDMA_REG_OFF(qdev, QDMA_REGO_H2C_PIDX);
443 queues = &qdev->c2h_queues;
444 pidx_base = QDMA_REG_OFF(qdev, QDMA_REGO_C2H_PIDX);
447 *queues = devm_kcalloc(&qdev->pdev->dev, qdev->chan_num, sizeof(*q),
452 for (i = 0; i < qdev->chan_num; i++) {
453 ret = qdma_check_queue_status(qdev, dir, i);
458 q->ring_size = QDMA_DEFAULT_RING_SIZE;
459 q->idx_mask = q->ring_size - 2;
463 q->pidx_reg = pidx_base + i * QDMA_DMAP_REG_STRIDE;
464 q->cidx_reg = QDMA_REG_OFF(qdev, QDMA_REGO_INTR_CIDX) +
465 i * QDMA_DMAP_REG_STRIDE;
466 q->vchan.desc_free = qdma_free_vdesc;
467 vchan_init(&q->vchan, &qdev->dma_dev);
473 static int qdma_device_verify(struct qdma_device *qdev)
478 ret = regmap_read(qdev->regmap, QDMA_IDENTIFIER_REGOFF, &value);
482 value = FIELD_GET(QDMA_IDENTIFIER_MASK, value);
483 if (value != QDMA_IDENTIFIER) {
484 qdma_err(qdev, "Invalid identifier");
487 qdev->rfields = qdma_regfs_default;
488 qdev->roffs = qdma_regos_default;
493 static int qdma_device_setup(struct qdma_device *qdev)
495 u32 ring_sz = QDMA_DEFAULT_RING_SIZE;
498 ret = qdma_setup_fmap_context(qdev);
500 qdma_err(qdev, "Failed setup fmap context");
504 /* Setup global ring buffer size at QDMA_DEFAULT_RING_ID index */
505 ret = qdma_reg_write(qdev, &ring_sz, QDMA_REGO_RING_SIZE);
507 qdma_err(qdev, "Failed to setup ring %d of size %ld",
508 QDMA_DEFAULT_RING_ID, QDMA_DEFAULT_RING_SIZE);
512 /* Enable memory-mapped DMA engine in both directions */
513 ret = qdma_sgdma_control(qdev, 1);
515 qdma_err(qdev, "Failed to SGDMA with error %d", ret);
519 ret = qdma_alloc_queues(qdev, DMA_MEM_TO_DEV);
521 qdma_err(qdev, "Failed to alloc H2C queues, ret %d", ret);
525 ret = qdma_alloc_queues(qdev, DMA_DEV_TO_MEM);
527 qdma_err(qdev, "Failed to alloc C2H queues, ret %d", ret);
535 * qdma_free_queue_resources() - Free queue resources
538 static void qdma_free_queue_resources(struct dma_chan *chan)
540 struct qdma_queue *queue = to_qdma_queue(chan);
541 struct qdma_device *qdev = queue->qdev;
542 struct qdma_platdata *pdata;
544 qdma_clear_queue_context(queue);
545 vchan_free_chan_resources(&queue->vchan);
546 pdata = dev_get_platdata(&qdev->pdev->dev);
547 dma_free_coherent(pdata->dma_dev, queue->ring_size * QDMA_MM_DESC_SIZE,
548 queue->desc_base, queue->dma_desc_base);
552 * qdma_alloc_queue_resources() - Allocate queue resources
555 static int qdma_alloc_queue_resources(struct dma_chan *chan)
557 struct qdma_queue *queue = to_qdma_queue(chan);
558 struct qdma_device *qdev = queue->qdev;
559 struct qdma_ctxt_sw_desc desc;
560 struct qdma_platdata *pdata;
564 ret = qdma_clear_queue_context(queue);
568 pdata = dev_get_platdata(&qdev->pdev->dev);
569 size = queue->ring_size * QDMA_MM_DESC_SIZE;
570 queue->desc_base = dma_alloc_coherent(pdata->dma_dev, size,
571 &queue->dma_desc_base,
573 if (!queue->desc_base) {
574 qdma_err(qdev, "Failed to allocate descriptor ring");
578 /* Setup SW descriptor queue context for DMA memory map */
579 desc.vec = qdma_get_intr_ring_idx(qdev);
580 desc.desc_base = queue->dma_desc_base;
581 ret = qdma_setup_queue_context(qdev, &desc, queue->dir, queue->qid);
583 qdma_err(qdev, "Failed to setup SW desc ctxt for %s",
585 dma_free_coherent(pdata->dma_dev, size, queue->desc_base,
586 queue->dma_desc_base);
596 static bool qdma_filter_fn(struct dma_chan *chan, void *param)
598 struct qdma_queue *queue = to_qdma_queue(chan);
599 struct qdma_queue_info *info = param;
601 return info->dir == queue->dir;
604 static int qdma_xfer_start(struct qdma_queue *queue)
606 struct qdma_device *qdev = queue->qdev;
609 if (!vchan_next_desc(&queue->vchan))
612 qdma_dbg(qdev, "Tnx kickoff with P: %d for %s%d",
613 queue->issued_vdesc->pidx, CHAN_STR(queue), queue->qid);
615 ret = qdma_update_pidx(queue, queue->issued_vdesc->pidx);
617 qdma_err(qdev, "Failed to update PIDX to %d for %s queue: %d",
618 queue->pidx, CHAN_STR(queue), queue->qid);
624 static void qdma_issue_pending(struct dma_chan *chan)
626 struct qdma_queue *queue = to_qdma_queue(chan);
629 spin_lock_irqsave(&queue->vchan.lock, flags);
630 if (vchan_issue_pending(&queue->vchan)) {
631 if (queue->submitted_vdesc) {
632 queue->issued_vdesc = queue->submitted_vdesc;
633 queue->submitted_vdesc = NULL;
635 qdma_xfer_start(queue);
638 spin_unlock_irqrestore(&queue->vchan.lock, flags);
641 static struct qdma_mm_desc *qdma_get_desc(struct qdma_queue *q)
643 struct qdma_mm_desc *desc;
645 if (((q->pidx + 1) & q->idx_mask) == q->cidx)
648 desc = q->desc_base + q->pidx;
649 q->pidx = (q->pidx + 1) & q->idx_mask;
654 static int qdma_hw_enqueue(struct qdma_queue *q, struct qdma_mm_vdesc *vdesc)
656 struct qdma_mm_desc *desc;
657 struct scatterlist *sg;
658 u64 addr, *src, *dst;
666 if (q->dir == DMA_MEM_TO_DEV) {
667 dst = &vdesc->dev_addr;
671 src = &vdesc->dev_addr;
674 for_each_sg(vdesc->sgl, sg, vdesc->sg_len, i) {
675 addr = sg_dma_address(sg) + vdesc->sg_off;
676 rest = sg_dma_len(sg) - vdesc->sg_off;
678 len = min_t(u32, rest, QDMA_MM_DESC_MAX_LEN);
679 desc = qdma_get_desc(q);
685 desc->src_addr = cpu_to_le64(*src);
686 desc->dst_addr = cpu_to_le64(*dst);
687 desc->len = cpu_to_le32(len);
689 vdesc->dev_addr += len;
690 vdesc->sg_off += len;
691 vdesc->pending_descs++;
699 vdesc->pidx = q->pidx;
703 static void qdma_fill_pending_vdesc(struct qdma_queue *q)
705 struct virt_dma_chan *vc = &q->vchan;
706 struct qdma_mm_vdesc *vdesc = NULL;
707 struct virt_dma_desc *vd;
710 if (!list_empty(&vc->desc_issued)) {
711 vd = &q->issued_vdesc->vdesc;
712 list_for_each_entry_from(vd, &vc->desc_issued, node) {
713 vdesc = to_qdma_vdesc(vd);
714 ret = qdma_hw_enqueue(q, vdesc);
716 q->issued_vdesc = vdesc;
720 q->issued_vdesc = vdesc;
723 if (list_empty(&vc->desc_submitted))
726 if (q->submitted_vdesc)
727 vd = &q->submitted_vdesc->vdesc;
729 vd = list_first_entry(&vc->desc_submitted, typeof(*vd), node);
731 list_for_each_entry_from(vd, &vc->desc_submitted, node) {
732 vdesc = to_qdma_vdesc(vd);
733 ret = qdma_hw_enqueue(q, vdesc);
737 q->submitted_vdesc = vdesc;
740 static dma_cookie_t qdma_tx_submit(struct dma_async_tx_descriptor *tx)
742 struct virt_dma_chan *vc = to_virt_chan(tx->chan);
743 struct qdma_queue *q = to_qdma_queue(&vc->chan);
744 struct virt_dma_desc *vd;
748 vd = container_of(tx, struct virt_dma_desc, tx);
749 spin_lock_irqsave(&vc->lock, flags);
750 cookie = dma_cookie_assign(tx);
752 list_move_tail(&vd->node, &vc->desc_submitted);
753 qdma_fill_pending_vdesc(q);
754 spin_unlock_irqrestore(&vc->lock, flags);
759 static struct dma_async_tx_descriptor *
760 qdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl,
761 unsigned int sg_len, enum dma_transfer_direction dir,
762 unsigned long flags, void *context)
764 struct qdma_queue *q = to_qdma_queue(chan);
765 struct dma_async_tx_descriptor *tx;
766 struct qdma_mm_vdesc *vdesc;
768 vdesc = kzalloc(sizeof(*vdesc), GFP_NOWAIT);
772 vdesc->sg_len = sg_len;
773 if (dir == DMA_MEM_TO_DEV)
774 vdesc->dev_addr = q->cfg.dst_addr;
776 vdesc->dev_addr = q->cfg.src_addr;
778 tx = vchan_tx_prep(&q->vchan, &vdesc->vdesc, flags);
779 tx->tx_submit = qdma_tx_submit;
784 static int qdma_device_config(struct dma_chan *chan,
785 struct dma_slave_config *cfg)
787 struct qdma_queue *q = to_qdma_queue(chan);
789 memcpy(&q->cfg, cfg, sizeof(*cfg));
794 static int qdma_arm_err_intr(const struct qdma_device *qdev)
798 qdma_set_field(qdev, &value, QDMA_REGF_ERR_INT_FUNC, qdev->fid);
799 qdma_set_field(qdev, &value, QDMA_REGF_ERR_INT_VEC, qdev->err_irq_idx);
800 qdma_set_field(qdev, &value, QDMA_REGF_ERR_INT_ARM, 1);
802 return qdma_reg_write(qdev, &value, QDMA_REGO_ERR_INT);
805 static irqreturn_t qdma_error_isr(int irq, void *data)
807 struct qdma_device *qdev = data;
811 ret = qdma_reg_read(qdev, &err_stat, QDMA_REGO_ERR_STAT);
813 qdma_err(qdev, "read error state failed, ret %d", ret);
817 qdma_err(qdev, "global error %d", err_stat);
818 ret = qdma_reg_write(qdev, &err_stat, QDMA_REGO_ERR_STAT);
820 qdma_err(qdev, "clear error state failed, ret %d", ret);
823 qdma_arm_err_intr(qdev);
827 static irqreturn_t qdma_queue_isr(int irq, void *data)
829 struct qdma_intr_ring *intr = data;
830 struct qdma_queue *q = NULL;
831 struct qdma_device *qdev;
832 u32 index, comp_desc;
841 struct virt_dma_desc *vd;
842 struct qdma_mm_vdesc *vdesc;
846 intr_ent = le64_to_cpu(intr->base[index]);
847 color = FIELD_GET(QDMA_INTR_MASK_COLOR, intr_ent);
848 if (color != intr->color)
851 qid = FIELD_GET(QDMA_INTR_MASK_QID, intr_ent);
852 if (FIELD_GET(QDMA_INTR_MASK_TYPE, intr_ent))
853 q = qdev->c2h_queues;
855 q = qdev->h2c_queues;
858 cidx = FIELD_GET(QDMA_INTR_MASK_CIDX, intr_ent);
860 spin_lock_irqsave(&q->vchan.lock, flags);
861 comp_desc = (cidx - q->cidx) & q->idx_mask;
863 vd = vchan_next_desc(&q->vchan);
867 vdesc = to_qdma_vdesc(vd);
868 while (comp_desc > vdesc->pending_descs) {
870 vchan_cookie_complete(vd);
871 comp_desc -= vdesc->pending_descs;
872 vd = vchan_next_desc(&q->vchan);
873 vdesc = to_qdma_vdesc(vd);
875 vdesc->pending_descs -= comp_desc;
876 if (!vdesc->pending_descs && QDMA_VDESC_QUEUED(vdesc)) {
878 vchan_cookie_complete(vd);
882 qdma_fill_pending_vdesc(q);
886 spin_unlock_irqrestore(&q->vchan.lock, flags);
889 * Wrap the index value and flip the expected color value if
890 * interrupt aggregation PIDX has wrapped around.
893 index &= QDMA_INTR_RING_IDX_MASK;
895 intr->color = !intr->color;
899 * Update the software interrupt aggregation ring CIDX if a valid entry
903 qdma_dbg(qdev, "update intr ring%d %d", intr->ridx, index);
906 * Record the last read index of status descriptor from the
907 * interrupt aggregation ring.
911 ret = qdma_update_cidx(q, intr->ridx, index);
913 qdma_err(qdev, "Failed to update IRQ CIDX");
921 static int qdma_init_error_irq(struct qdma_device *qdev)
923 struct device *dev = &qdev->pdev->dev;
927 vec = qdev->queue_irq_start - 1;
929 ret = devm_request_threaded_irq(dev, vec, NULL, qdma_error_isr,
930 IRQF_ONESHOT, "amd-qdma-error", qdev);
932 qdma_err(qdev, "Failed to request error IRQ vector: %d", vec);
936 ret = qdma_arm_err_intr(qdev);
938 qdma_err(qdev, "Failed to arm err interrupt, ret %d", ret);
943 static int qdmam_alloc_qintr_rings(struct qdma_device *qdev)
945 struct qdma_platdata *pdata = dev_get_platdata(&qdev->pdev->dev);
946 struct device *dev = &qdev->pdev->dev;
947 u32 ctxt[QDMA_CTXT_REGMAP_LEN];
948 struct qdma_intr_ring *ring;
949 struct qdma_ctxt_intr intr_ctxt;
953 qdev->qintr_ring_num = qdev->queue_irq_num;
954 qdev->qintr_rings = devm_kcalloc(dev, qdev->qintr_ring_num,
955 sizeof(*qdev->qintr_rings),
957 if (!qdev->qintr_rings)
960 vector = qdev->queue_irq_start;
961 for (i = 0; i < qdev->qintr_ring_num; i++, vector++) {
962 ring = &qdev->qintr_rings[i];
964 ring->msix_id = qdev->err_irq_idx + i + 1;
967 ring->base = dmam_alloc_coherent(pdata->dma_dev,
969 &ring->dev_base, GFP_KERNEL);
971 qdma_err(qdev, "Failed to alloc intr ring %d", i);
974 intr_ctxt.agg_base = QDMA_INTR_RING_BASE(ring->dev_base);
975 intr_ctxt.size = (QDMA_INTR_RING_SIZE - 1) / 4096;
976 intr_ctxt.vec = ring->msix_id;
977 intr_ctxt.valid = true;
978 intr_ctxt.color = true;
979 ret = qdma_prog_context(qdev, QDMA_CTXT_INTR_COAL,
980 QDMA_CTXT_CLEAR, ring->ridx, NULL);
982 qdma_err(qdev, "Failed clear intr ctx, ret %d", ret);
986 qdma_prep_intr_context(qdev, &intr_ctxt, ctxt);
987 ret = qdma_prog_context(qdev, QDMA_CTXT_INTR_COAL,
988 QDMA_CTXT_WRITE, ring->ridx, ctxt);
990 qdma_err(qdev, "Failed setup intr ctx, ret %d", ret);
994 ret = devm_request_threaded_irq(dev, vector, NULL,
995 qdma_queue_isr, IRQF_ONESHOT,
996 "amd-qdma-queue", ring);
998 qdma_err(qdev, "Failed to request irq %d", vector);
1006 static int qdma_intr_init(struct qdma_device *qdev)
1010 ret = qdma_init_error_irq(qdev);
1012 qdma_err(qdev, "Failed to init error IRQs, ret %d", ret);
1016 ret = qdmam_alloc_qintr_rings(qdev);
1018 qdma_err(qdev, "Failed to init queue IRQs, ret %d", ret);
1025 static void amd_qdma_remove(struct platform_device *pdev)
1027 struct qdma_device *qdev = platform_get_drvdata(pdev);
1029 qdma_sgdma_control(qdev, 0);
1030 dma_async_device_unregister(&qdev->dma_dev);
1032 mutex_destroy(&qdev->ctxt_lock);
1035 static int amd_qdma_probe(struct platform_device *pdev)
1037 struct qdma_platdata *pdata = dev_get_platdata(&pdev->dev);
1038 struct qdma_device *qdev;
1039 struct resource *res;
1043 qdev = devm_kzalloc(&pdev->dev, sizeof(*qdev), GFP_KERNEL);
1047 platform_set_drvdata(pdev, qdev);
1049 mutex_init(&qdev->ctxt_lock);
1051 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1053 qdma_err(qdev, "Failed to get IRQ resource");
1057 qdev->err_irq_idx = pdata->irq_index;
1058 qdev->queue_irq_start = res->start + 1;
1059 qdev->queue_irq_num = resource_size(res) - 1;
1061 regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
1063 ret = PTR_ERR(regs);
1064 qdma_err(qdev, "Failed to map IO resource, err %d", ret);
1068 qdev->regmap = devm_regmap_init_mmio(&pdev->dev, regs,
1069 &qdma_regmap_config);
1070 if (IS_ERR(qdev->regmap)) {
1071 ret = PTR_ERR(qdev->regmap);
1072 qdma_err(qdev, "Regmap init failed, err %d", ret);
1076 ret = qdma_device_verify(qdev);
1080 ret = qdma_get_hw_info(qdev);
1084 INIT_LIST_HEAD(&qdev->dma_dev.channels);
1086 ret = qdma_device_setup(qdev);
1090 ret = qdma_intr_init(qdev);
1092 qdma_err(qdev, "Failed to initialize IRQs %d", ret);
1093 goto failed_disable_engine;
1096 dma_cap_set(DMA_SLAVE, qdev->dma_dev.cap_mask);
1097 dma_cap_set(DMA_PRIVATE, qdev->dma_dev.cap_mask);
1099 qdev->dma_dev.dev = &pdev->dev;
1100 qdev->dma_dev.filter.map = pdata->device_map;
1101 qdev->dma_dev.filter.mapcnt = qdev->chan_num * 2;
1102 qdev->dma_dev.filter.fn = qdma_filter_fn;
1103 qdev->dma_dev.device_alloc_chan_resources = qdma_alloc_queue_resources;
1104 qdev->dma_dev.device_free_chan_resources = qdma_free_queue_resources;
1105 qdev->dma_dev.device_prep_slave_sg = qdma_prep_device_sg;
1106 qdev->dma_dev.device_config = qdma_device_config;
1107 qdev->dma_dev.device_issue_pending = qdma_issue_pending;
1108 qdev->dma_dev.device_tx_status = dma_cookie_status;
1109 qdev->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1111 ret = dma_async_device_register(&qdev->dma_dev);
1113 qdma_err(qdev, "Failed to register AMD QDMA: %d", ret);
1114 goto failed_disable_engine;
1119 failed_disable_engine:
1120 qdma_sgdma_control(qdev, 0);
1122 mutex_destroy(&qdev->ctxt_lock);
1123 qdma_err(qdev, "Failed to probe AMD QDMA driver");
1127 static struct platform_driver amd_qdma_driver = {
1131 .probe = amd_qdma_probe,
1132 .remove = amd_qdma_remove,
1135 module_platform_driver(amd_qdma_driver);
1137 MODULE_DESCRIPTION("AMD QDMA driver");
1139 MODULE_LICENSE("GPL");