]> Git Repo - qemu.git/blob - hw/misc/aspeed_xdma.c
Merge remote-tracking branch 'remotes/dgibson/tags/ppc-for-4.1-20190702' into staging
[qemu.git] / hw / misc / aspeed_xdma.c
1 /*
2  * ASPEED XDMA Controller
3  * Eddie James <[email protected]>
4  *
5  * Copyright (C) 2019 IBM Corp
6  * SPDX-License-Identifer: GPL-2.0-or-later
7  */
8
9 #include "qemu/osdep.h"
10 #include "qemu/log.h"
11 #include "qemu/error-report.h"
12 #include "hw/misc/aspeed_xdma.h"
13 #include "qapi/error.h"
14
15 #include "trace.h"
16
17 #define XDMA_BMC_CMDQ_ADDR         0x10
18 #define XDMA_BMC_CMDQ_ENDP         0x14
19 #define XDMA_BMC_CMDQ_WRP          0x18
20 #define  XDMA_BMC_CMDQ_W_MASK      0x0003FFFF
21 #define XDMA_BMC_CMDQ_RDP          0x1C
22 #define  XDMA_BMC_CMDQ_RDP_MAGIC   0xEE882266
23 #define XDMA_IRQ_ENG_CTRL          0x20
24 #define  XDMA_IRQ_ENG_CTRL_US_COMP BIT(4)
25 #define  XDMA_IRQ_ENG_CTRL_DS_COMP BIT(5)
26 #define  XDMA_IRQ_ENG_CTRL_W_MASK  0xBFEFF07F
27 #define XDMA_IRQ_ENG_STAT          0x24
28 #define  XDMA_IRQ_ENG_STAT_US_COMP BIT(4)
29 #define  XDMA_IRQ_ENG_STAT_DS_COMP BIT(5)
30 #define  XDMA_IRQ_ENG_STAT_RESET   0xF8000000
31 #define XDMA_MEM_SIZE              0x1000
32
33 #define TO_REG(addr) ((addr) / sizeof(uint32_t))
34
35 static uint64_t aspeed_xdma_read(void *opaque, hwaddr addr, unsigned int size)
36 {
37     uint32_t val = 0;
38     AspeedXDMAState *xdma = opaque;
39
40     if (addr < ASPEED_XDMA_REG_SIZE) {
41         val = xdma->regs[TO_REG(addr)];
42     }
43
44     return (uint64_t)val;
45 }
46
47 static void aspeed_xdma_write(void *opaque, hwaddr addr, uint64_t val,
48                               unsigned int size)
49 {
50     unsigned int idx;
51     uint32_t val32 = (uint32_t)val;
52     AspeedXDMAState *xdma = opaque;
53
54     if (addr >= ASPEED_XDMA_REG_SIZE) {
55         return;
56     }
57
58     switch (addr) {
59     case XDMA_BMC_CMDQ_ENDP:
60         xdma->regs[TO_REG(addr)] = val32 & XDMA_BMC_CMDQ_W_MASK;
61         break;
62     case XDMA_BMC_CMDQ_WRP:
63         idx = TO_REG(addr);
64         xdma->regs[idx] = val32 & XDMA_BMC_CMDQ_W_MASK;
65         xdma->regs[TO_REG(XDMA_BMC_CMDQ_RDP)] = xdma->regs[idx];
66
67         trace_aspeed_xdma_write(addr, val);
68
69         if (xdma->bmc_cmdq_readp_set) {
70             xdma->bmc_cmdq_readp_set = 0;
71         } else {
72             xdma->regs[TO_REG(XDMA_IRQ_ENG_STAT)] |=
73                 XDMA_IRQ_ENG_STAT_US_COMP | XDMA_IRQ_ENG_STAT_DS_COMP;
74
75             if (xdma->regs[TO_REG(XDMA_IRQ_ENG_CTRL)] &
76                 (XDMA_IRQ_ENG_CTRL_US_COMP | XDMA_IRQ_ENG_CTRL_DS_COMP))
77                 qemu_irq_raise(xdma->irq);
78         }
79         break;
80     case XDMA_BMC_CMDQ_RDP:
81         trace_aspeed_xdma_write(addr, val);
82
83         if (val32 == XDMA_BMC_CMDQ_RDP_MAGIC) {
84             xdma->bmc_cmdq_readp_set = 1;
85         }
86         break;
87     case XDMA_IRQ_ENG_CTRL:
88         xdma->regs[TO_REG(addr)] = val32 & XDMA_IRQ_ENG_CTRL_W_MASK;
89         break;
90     case XDMA_IRQ_ENG_STAT:
91         trace_aspeed_xdma_write(addr, val);
92
93         idx = TO_REG(addr);
94         if (val32 & (XDMA_IRQ_ENG_STAT_US_COMP | XDMA_IRQ_ENG_STAT_DS_COMP)) {
95             xdma->regs[idx] &=
96                 ~(XDMA_IRQ_ENG_STAT_US_COMP | XDMA_IRQ_ENG_STAT_DS_COMP);
97             qemu_irq_lower(xdma->irq);
98         }
99         break;
100     default:
101         xdma->regs[TO_REG(addr)] = val32;
102         break;
103     }
104 }
105
106 static const MemoryRegionOps aspeed_xdma_ops = {
107     .read = aspeed_xdma_read,
108     .write = aspeed_xdma_write,
109     .endianness = DEVICE_NATIVE_ENDIAN,
110     .valid.min_access_size = 4,
111     .valid.max_access_size = 4,
112 };
113
114 static void aspeed_xdma_realize(DeviceState *dev, Error **errp)
115 {
116     SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
117     AspeedXDMAState *xdma = ASPEED_XDMA(dev);
118
119     sysbus_init_irq(sbd, &xdma->irq);
120     memory_region_init_io(&xdma->iomem, OBJECT(xdma), &aspeed_xdma_ops, xdma,
121                           TYPE_ASPEED_XDMA, XDMA_MEM_SIZE);
122     sysbus_init_mmio(sbd, &xdma->iomem);
123 }
124
125 static void aspeed_xdma_reset(DeviceState *dev)
126 {
127     AspeedXDMAState *xdma = ASPEED_XDMA(dev);
128
129     xdma->bmc_cmdq_readp_set = 0;
130     memset(xdma->regs, 0, ASPEED_XDMA_REG_SIZE);
131     xdma->regs[TO_REG(XDMA_IRQ_ENG_STAT)] = XDMA_IRQ_ENG_STAT_RESET;
132
133     qemu_irq_lower(xdma->irq);
134 }
135
136 static const VMStateDescription aspeed_xdma_vmstate = {
137     .name = TYPE_ASPEED_XDMA,
138     .version_id = 1,
139     .fields = (VMStateField[]) {
140         VMSTATE_UINT32_ARRAY(regs, AspeedXDMAState, ASPEED_XDMA_NUM_REGS),
141         VMSTATE_END_OF_LIST(),
142     },
143 };
144
145 static void aspeed_xdma_class_init(ObjectClass *classp, void *data)
146 {
147     DeviceClass *dc = DEVICE_CLASS(classp);
148
149     dc->realize = aspeed_xdma_realize;
150     dc->reset = aspeed_xdma_reset;
151     dc->vmsd = &aspeed_xdma_vmstate;
152 }
153
154 static const TypeInfo aspeed_xdma_info = {
155     .name          = TYPE_ASPEED_XDMA,
156     .parent        = TYPE_SYS_BUS_DEVICE,
157     .instance_size = sizeof(AspeedXDMAState),
158     .class_init    = aspeed_xdma_class_init,
159 };
160
161 static void aspeed_xdma_register_type(void)
162 {
163     type_register_static(&aspeed_xdma_info);
164 }
165 type_init(aspeed_xdma_register_type);
This page took 0.035881 seconds and 4 git commands to generate.