2 * QEMU model of Xilinx AXI-DMA block.
4 * Copyright (c) 2011 Edgar E. Iglesias.
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "hw/sysbus.h"
26 #include "qemu/timer.h"
27 #include "hw/ptimer.h"
29 #include "hw/qdev-addr.h"
30 #include "qapi/qmp/qerror.h"
32 #include "hw/stream.h"
36 #define TYPE_XILINX_AXI_DMA "xlnx.axi-dma"
37 #define TYPE_XILINX_AXI_DMA_DATA_STREAM "xilinx-axi-dma-data-stream"
39 #define XILINX_AXI_DMA(obj) \
40 OBJECT_CHECK(XilinxAXIDMA, (obj), TYPE_XILINX_AXI_DMA)
42 #define XILINX_AXI_DMA_DATA_STREAM(obj) \
43 OBJECT_CHECK(XilinxAXIDMAStreamSlave, (obj),\
44 TYPE_XILINX_AXI_DMA_DATA_STREAM)
46 #define R_DMACR (0x00 / 4)
47 #define R_DMASR (0x04 / 4)
48 #define R_CURDESC (0x08 / 4)
49 #define R_TAILDESC (0x10 / 4)
50 #define R_MAX (0x30 / 4)
52 typedef struct XilinxAXIDMA XilinxAXIDMA;
53 typedef struct XilinxAXIDMAStreamSlave XilinxAXIDMAStreamSlave;
57 DMACR_TAILPTR_MODE = 2,
64 DMASR_IOC_IRQ = 1 << 12,
65 DMASR_DLY_IRQ = 1 << 13,
67 DMASR_IRQ_MASK = 7 << 12
72 uint64_t buffer_address;
80 SDESC_CTRL_EOF = (1 << 26),
81 SDESC_CTRL_SOF = (1 << 27),
83 SDESC_CTRL_LEN_MASK = (1 << 23) - 1
87 SDESC_STATUS_EOF = (1 << 26),
88 SDESC_STATUS_SOF_BIT = 27,
89 SDESC_STATUS_SOF = (1 << SDESC_STATUS_SOF_BIT),
90 SDESC_STATUS_COMPLETE = (1 << 31)
102 unsigned int complete_cnt;
103 uint32_t regs[R_MAX];
106 struct XilinxAXIDMAStreamSlave {
109 struct XilinxAXIDMA *dma;
112 struct XilinxAXIDMA {
117 XilinxAXIDMAStreamSlave rx_data_dev;
119 struct Stream streams[2];
123 * Helper calls to extract info from desriptors and other trivial
126 static inline int stream_desc_sof(struct SDesc *d)
128 return d->control & SDESC_CTRL_SOF;
131 static inline int stream_desc_eof(struct SDesc *d)
133 return d->control & SDESC_CTRL_EOF;
136 static inline int stream_resetting(struct Stream *s)
138 return !!(s->regs[R_DMACR] & DMACR_RESET);
141 static inline int stream_running(struct Stream *s)
143 return s->regs[R_DMACR] & DMACR_RUNSTOP;
146 static inline int stream_halted(struct Stream *s)
148 return s->regs[R_DMASR] & DMASR_HALTED;
151 static inline int stream_idle(struct Stream *s)
153 return !!(s->regs[R_DMASR] & DMASR_IDLE);
156 static void stream_reset(struct Stream *s)
158 s->regs[R_DMASR] = DMASR_HALTED; /* starts up halted. */
159 s->regs[R_DMACR] = 1 << 16; /* Starts with one in compl threshold. */
162 /* Map an offset addr into a channel index. */
163 static inline int streamid_from_addr(hwaddr addr)
173 static void stream_desc_show(struct SDesc *d)
175 qemu_log("buffer_addr = " PRIx64 "\n", d->buffer_address);
176 qemu_log("nxtdesc = " PRIx64 "\n", d->nxtdesc);
177 qemu_log("control = %x\n", d->control);
178 qemu_log("status = %x\n", d->status);
182 static void stream_desc_load(struct Stream *s, hwaddr addr)
184 struct SDesc *d = &s->desc;
187 cpu_physical_memory_read(addr, (void *) d, sizeof *d);
189 /* Convert from LE into host endianness. */
190 d->buffer_address = le64_to_cpu(d->buffer_address);
191 d->nxtdesc = le64_to_cpu(d->nxtdesc);
192 d->control = le32_to_cpu(d->control);
193 d->status = le32_to_cpu(d->status);
194 for (i = 0; i < ARRAY_SIZE(d->app); i++) {
195 d->app[i] = le32_to_cpu(d->app[i]);
199 static void stream_desc_store(struct Stream *s, hwaddr addr)
201 struct SDesc *d = &s->desc;
204 /* Convert from host endianness into LE. */
205 d->buffer_address = cpu_to_le64(d->buffer_address);
206 d->nxtdesc = cpu_to_le64(d->nxtdesc);
207 d->control = cpu_to_le32(d->control);
208 d->status = cpu_to_le32(d->status);
209 for (i = 0; i < ARRAY_SIZE(d->app); i++) {
210 d->app[i] = cpu_to_le32(d->app[i]);
212 cpu_physical_memory_write(addr, (void *) d, sizeof *d);
215 static void stream_update_irq(struct Stream *s)
217 unsigned int pending, mask, irq;
219 pending = s->regs[R_DMASR] & DMASR_IRQ_MASK;
220 mask = s->regs[R_DMACR] & DMASR_IRQ_MASK;
222 irq = pending & mask;
224 qemu_set_irq(s->irq, !!irq);
227 static void stream_reload_complete_cnt(struct Stream *s)
229 unsigned int comp_th;
230 comp_th = (s->regs[R_DMACR] >> 16) & 0xff;
231 s->complete_cnt = comp_th;
234 static void timer_hit(void *opaque)
236 struct Stream *s = opaque;
238 stream_reload_complete_cnt(s);
239 s->regs[R_DMASR] |= DMASR_DLY_IRQ;
240 stream_update_irq(s);
243 static void stream_complete(struct Stream *s)
245 unsigned int comp_delay;
247 /* Start the delayed timer. */
248 comp_delay = s->regs[R_DMACR] >> 24;
250 ptimer_stop(s->ptimer);
251 ptimer_set_count(s->ptimer, comp_delay);
252 ptimer_run(s->ptimer, 1);
256 if (s->complete_cnt == 0) {
257 /* Raise the IOC irq. */
258 s->regs[R_DMASR] |= DMASR_IOC_IRQ;
259 stream_reload_complete_cnt(s);
263 static void stream_process_mem2s(struct Stream *s,
267 unsigned char txbuf[16 * 1024];
271 if (!stream_running(s) || stream_idle(s)) {
276 stream_desc_load(s, s->regs[R_CURDESC]);
278 if (s->desc.status & SDESC_STATUS_COMPLETE) {
279 s->regs[R_DMASR] |= DMASR_HALTED;
283 if (stream_desc_sof(&s->desc)) {
285 memcpy(app, s->desc.app, sizeof app);
288 txlen = s->desc.control & SDESC_CTRL_LEN_MASK;
289 if ((txlen + s->pos) > sizeof txbuf) {
290 hw_error("%s: too small internal txbuf! %d\n", __func__,
294 cpu_physical_memory_read(s->desc.buffer_address,
295 txbuf + s->pos, txlen);
298 if (stream_desc_eof(&s->desc)) {
299 stream_push(tx_dev, txbuf, s->pos, app);
304 /* Update the descriptor. */
305 s->desc.status = txlen | SDESC_STATUS_COMPLETE;
306 stream_desc_store(s, s->regs[R_CURDESC]);
309 prev_d = s->regs[R_CURDESC];
310 s->regs[R_CURDESC] = s->desc.nxtdesc;
311 if (prev_d == s->regs[R_TAILDESC]) {
312 s->regs[R_DMASR] |= DMASR_IDLE;
318 static void stream_process_s2mem(struct Stream *s,
319 unsigned char *buf, size_t len, uint32_t *app)
326 if (!stream_running(s) || stream_idle(s)) {
331 stream_desc_load(s, s->regs[R_CURDESC]);
333 if (s->desc.status & SDESC_STATUS_COMPLETE) {
334 s->regs[R_DMASR] |= DMASR_HALTED;
338 rxlen = s->desc.control & SDESC_CTRL_LEN_MASK;
344 cpu_physical_memory_write(s->desc.buffer_address, buf + pos, rxlen);
348 /* Update the descriptor. */
353 for (i = 0; i < 5; i++) {
354 s->desc.app[i] = app[i];
356 s->desc.status |= SDESC_STATUS_EOF;
359 s->desc.status |= sof << SDESC_STATUS_SOF_BIT;
360 s->desc.status |= SDESC_STATUS_COMPLETE;
361 stream_desc_store(s, s->regs[R_CURDESC]);
365 prev_d = s->regs[R_CURDESC];
366 s->regs[R_CURDESC] = s->desc.nxtdesc;
367 if (prev_d == s->regs[R_TAILDESC]) {
368 s->regs[R_DMASR] |= DMASR_IDLE;
374 static void xilinx_axidma_reset(DeviceState *dev)
377 XilinxAXIDMA *s = XILINX_AXI_DMA(dev);
379 for (i = 0; i < 2; i++) {
380 stream_reset(&s->streams[i]);
385 xilinx_axidma_data_stream_push(StreamSlave *obj, unsigned char *buf, size_t len,
388 XilinxAXIDMAStreamSlave *ds = XILINX_AXI_DMA_DATA_STREAM(obj);
389 struct Stream *s = &ds->dma->streams[1];
392 hw_error("No stream app data!\n");
394 stream_process_s2mem(s, buf, len, app);
395 stream_update_irq(s);
399 static uint64_t axidma_read(void *opaque, hwaddr addr,
402 XilinxAXIDMA *d = opaque;
407 sid = streamid_from_addr(addr);
408 s = &d->streams[sid];
414 /* Simulate one cycles reset delay. */
415 s->regs[addr] &= ~DMACR_RESET;
419 s->regs[addr] &= 0xffff;
420 s->regs[addr] |= (s->complete_cnt & 0xff) << 16;
421 s->regs[addr] |= (ptimer_get_count(s->ptimer) & 0xff) << 24;
426 D(qemu_log("%s ch=%d addr=" TARGET_FMT_plx " v=%x\n",
427 __func__, sid, addr * 4, r));
434 static void axidma_write(void *opaque, hwaddr addr,
435 uint64_t value, unsigned size)
437 XilinxAXIDMA *d = opaque;
441 sid = streamid_from_addr(addr);
442 s = &d->streams[sid];
448 /* Tailptr mode is always on. */
449 value |= DMACR_TAILPTR_MODE;
450 /* Remember our previous reset state. */
451 value |= (s->regs[addr] & DMACR_RESET);
452 s->regs[addr] = value;
454 if (value & DMACR_RESET) {
458 if ((value & 1) && !stream_resetting(s)) {
459 /* Start processing. */
460 s->regs[R_DMASR] &= ~(DMASR_HALTED | DMASR_IDLE);
462 stream_reload_complete_cnt(s);
466 /* Mask away write to clear irq lines. */
467 value &= ~(value & DMASR_IRQ_MASK);
468 s->regs[addr] = value;
472 s->regs[addr] = value;
473 s->regs[R_DMASR] &= ~DMASR_IDLE; /* Not idle. */
475 stream_process_mem2s(s, d->tx_dev);
479 D(qemu_log("%s: ch=%d addr=" TARGET_FMT_plx " v=%x\n",
480 __func__, sid, addr * 4, (unsigned)value));
481 s->regs[addr] = value;
484 stream_update_irq(s);
487 static const MemoryRegionOps axidma_ops = {
489 .write = axidma_write,
490 .endianness = DEVICE_NATIVE_ENDIAN,
493 static void xilinx_axidma_realize(DeviceState *dev, Error **errp)
495 XilinxAXIDMA *s = XILINX_AXI_DMA(dev);
496 XilinxAXIDMAStreamSlave *ds = XILINX_AXI_DMA_DATA_STREAM(&s->rx_data_dev);
497 Error *local_errp = NULL;
499 object_property_add_link(OBJECT(ds), "dma", TYPE_XILINX_AXI_DMA,
500 (Object **)&ds->dma, &local_errp);
502 goto xilinx_axidma_realize_fail;
504 object_property_set_link(OBJECT(ds), OBJECT(s), "dma", &local_errp);
506 goto xilinx_axidma_realize_fail;
511 for (i = 0; i < 2; i++) {
512 s->streams[i].nr = i;
513 s->streams[i].bh = qemu_bh_new(timer_hit, &s->streams[i]);
514 s->streams[i].ptimer = ptimer_init(s->streams[i].bh);
515 ptimer_set_freq(s->streams[i].ptimer, s->freqhz);
519 xilinx_axidma_realize_fail:
525 static void xilinx_axidma_init(Object *obj)
527 XilinxAXIDMA *s = XILINX_AXI_DMA(obj);
528 SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
531 object_property_add_link(obj, "axistream-connected", TYPE_STREAM_SLAVE,
532 (Object **) &s->tx_dev, NULL);
534 object_initialize(&s->rx_data_dev, TYPE_XILINX_AXI_DMA_DATA_STREAM);
535 object_property_add_child(OBJECT(s), "axistream-connected-target",
536 (Object *)&s->rx_data_dev, &errp);
537 assert_no_error(errp);
539 sysbus_init_irq(sbd, &s->streams[0].irq);
540 sysbus_init_irq(sbd, &s->streams[1].irq);
542 memory_region_init_io(&s->iomem, &axidma_ops, s,
543 "xlnx.axi-dma", R_MAX * 4 * 2);
544 sysbus_init_mmio(sbd, &s->iomem);
547 static Property axidma_properties[] = {
548 DEFINE_PROP_UINT32("freqhz", XilinxAXIDMA, freqhz, 50000000),
549 DEFINE_PROP_END_OF_LIST(),
552 static void axidma_class_init(ObjectClass *klass, void *data)
554 DeviceClass *dc = DEVICE_CLASS(klass);
556 dc->realize = xilinx_axidma_realize,
557 dc->reset = xilinx_axidma_reset;
558 dc->props = axidma_properties;
561 static void xilinx_axidma_stream_class_init(ObjectClass *klass, void *data)
563 StreamSlaveClass *ssc = STREAM_SLAVE_CLASS(klass);
568 static const TypeInfo axidma_info = {
569 .name = TYPE_XILINX_AXI_DMA,
570 .parent = TYPE_SYS_BUS_DEVICE,
571 .instance_size = sizeof(XilinxAXIDMA),
572 .class_init = axidma_class_init,
573 .instance_init = xilinx_axidma_init,
576 static const TypeInfo xilinx_axidma_data_stream_info = {
577 .name = TYPE_XILINX_AXI_DMA_DATA_STREAM,
578 .parent = TYPE_OBJECT,
579 .instance_size = sizeof(struct XilinxAXIDMAStreamSlave),
580 .class_init = xilinx_axidma_stream_class_init,
581 .class_data = xilinx_axidma_data_stream_push,
582 .interfaces = (InterfaceInfo[]) {
583 { TYPE_STREAM_SLAVE },
588 static void xilinx_axidma_register_types(void)
590 type_register_static(&axidma_info);
591 type_register_static(&xilinx_axidma_data_stream_info);
594 type_init(xilinx_axidma_register_types)