2 * QEMU model of the Ibex SPI Controller
3 * SPEC Reference: https://docs.opentitan.org/hw/ip/spi_host/doc/
5 * Copyright (C) 2022 Western Digital
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "qemu/osdep.h"
28 #include "qemu/module.h"
29 #include "hw/ssi/ibex_spi_host.h"
31 #include "hw/qdev-properties.h"
32 #include "hw/qdev-properties-system.h"
33 #include "migration/vmstate.h"
36 REG32(INTR_STATE, 0x00)
37 FIELD(INTR_STATE, ERROR, 0, 1)
38 FIELD(INTR_STATE, SPI_EVENT, 1, 1)
39 REG32(INTR_ENABLE, 0x04)
40 FIELD(INTR_ENABLE, ERROR, 0, 1)
41 FIELD(INTR_ENABLE, SPI_EVENT, 1, 1)
42 REG32(INTR_TEST, 0x08)
43 FIELD(INTR_TEST, ERROR, 0, 1)
44 FIELD(INTR_TEST, SPI_EVENT, 1, 1)
45 REG32(ALERT_TEST, 0x0c)
46 FIELD(ALERT_TEST, FETAL_TEST, 0, 1)
48 FIELD(CONTROL, RX_WATERMARK, 0, 8)
49 FIELD(CONTROL, TX_WATERMARK, 1, 8)
50 FIELD(CONTROL, OUTPUT_EN, 29, 1)
51 FIELD(CONTROL, SW_RST, 30, 1)
52 FIELD(CONTROL, SPIEN, 31, 1)
54 FIELD(STATUS, TXQD, 0, 8)
55 FIELD(STATUS, RXQD, 18, 8)
56 FIELD(STATUS, CMDQD, 16, 3)
57 FIELD(STATUS, RXWM, 20, 1)
58 FIELD(STATUS, BYTEORDER, 22, 1)
59 FIELD(STATUS, RXSTALL, 23, 1)
60 FIELD(STATUS, RXEMPTY, 24, 1)
61 FIELD(STATUS, RXFULL, 25, 1)
62 FIELD(STATUS, TXWM, 26, 1)
63 FIELD(STATUS, TXSTALL, 27, 1)
64 FIELD(STATUS, TXEMPTY, 28, 1)
65 FIELD(STATUS, TXFULL, 29, 1)
66 FIELD(STATUS, ACTIVE, 30, 1)
67 FIELD(STATUS, READY, 31, 1)
68 REG32(CONFIGOPTS, 0x18)
69 FIELD(CONFIGOPTS, CLKDIV_0, 0, 16)
70 FIELD(CONFIGOPTS, CSNIDLE_0, 16, 4)
71 FIELD(CONFIGOPTS, CSNTRAIL_0, 20, 4)
72 FIELD(CONFIGOPTS, CSNLEAD_0, 24, 4)
73 FIELD(CONFIGOPTS, FULLCYC_0, 29, 1)
74 FIELD(CONFIGOPTS, CPHA_0, 30, 1)
75 FIELD(CONFIGOPTS, CPOL_0, 31, 1)
77 FIELD(CSID, CSID, 0, 32)
79 FIELD(COMMAND, LEN, 0, 8)
80 FIELD(COMMAND, CSAAT, 9, 1)
81 FIELD(COMMAND, SPEED, 10, 2)
82 FIELD(COMMAND, DIRECTION, 12, 2)
83 REG32(ERROR_ENABLE, 0x2c)
84 FIELD(ERROR_ENABLE, CMDBUSY, 0, 1)
85 FIELD(ERROR_ENABLE, OVERFLOW, 1, 1)
86 FIELD(ERROR_ENABLE, UNDERFLOW, 2, 1)
87 FIELD(ERROR_ENABLE, CMDINVAL, 3, 1)
88 FIELD(ERROR_ENABLE, CSIDINVAL, 4, 1)
89 REG32(ERROR_STATUS, 0x30)
90 FIELD(ERROR_STATUS, CMDBUSY, 0, 1)
91 FIELD(ERROR_STATUS, OVERFLOW, 1, 1)
92 FIELD(ERROR_STATUS, UNDERFLOW, 2, 1)
93 FIELD(ERROR_STATUS, CMDINVAL, 3, 1)
94 FIELD(ERROR_STATUS, CSIDINVAL, 4, 1)
95 FIELD(ERROR_STATUS, ACCESSINVAL, 5, 1)
96 REG32(EVENT_ENABLE, 0x30)
97 FIELD(EVENT_ENABLE, RXFULL, 0, 1)
98 FIELD(EVENT_ENABLE, TXEMPTY, 1, 1)
99 FIELD(EVENT_ENABLE, RXWM, 2, 1)
100 FIELD(EVENT_ENABLE, TXWM, 3, 1)
101 FIELD(EVENT_ENABLE, READY, 4, 1)
102 FIELD(EVENT_ENABLE, IDLE, 5, 1)
104 static inline uint8_t div4_round_up(uint8_t dividend)
106 return (dividend + 3) / 4;
109 static void ibex_spi_rxfifo_reset(IbexSPIHostState *s)
111 /* Empty the RX FIFO and assert RXEMPTY */
112 fifo8_reset(&s->rx_fifo);
113 s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_RXFULL_MASK;
114 s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_RXEMPTY_MASK;
117 static void ibex_spi_txfifo_reset(IbexSPIHostState *s)
119 /* Empty the TX FIFO and assert TXEMPTY */
120 fifo8_reset(&s->tx_fifo);
121 s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_TXFULL_MASK;
122 s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_TXEMPTY_MASK;
125 static void ibex_spi_host_reset(DeviceState *dev)
127 IbexSPIHostState *s = IBEX_SPI_HOST(dev);
128 trace_ibex_spi_host_reset("Resetting Ibex SPI");
130 /* SPI Host Register Reset */
131 s->regs[IBEX_SPI_HOST_INTR_STATE] = 0x00;
132 s->regs[IBEX_SPI_HOST_INTR_ENABLE] = 0x00;
133 s->regs[IBEX_SPI_HOST_INTR_TEST] = 0x00;
134 s->regs[IBEX_SPI_HOST_ALERT_TEST] = 0x00;
135 s->regs[IBEX_SPI_HOST_CONTROL] = 0x7f;
136 s->regs[IBEX_SPI_HOST_STATUS] = 0x00;
137 s->regs[IBEX_SPI_HOST_CONFIGOPTS] = 0x00;
138 s->regs[IBEX_SPI_HOST_CSID] = 0x00;
139 s->regs[IBEX_SPI_HOST_COMMAND] = 0x00;
140 /* RX/TX Modelled by FIFO */
141 s->regs[IBEX_SPI_HOST_RXDATA] = 0x00;
142 s->regs[IBEX_SPI_HOST_TXDATA] = 0x00;
144 s->regs[IBEX_SPI_HOST_ERROR_ENABLE] = 0x1F;
145 s->regs[IBEX_SPI_HOST_ERROR_STATUS] = 0x00;
146 s->regs[IBEX_SPI_HOST_EVENT_ENABLE] = 0x00;
148 ibex_spi_rxfifo_reset(s);
149 ibex_spi_txfifo_reset(s);
151 s->init_status = true;
156 * Check if we need to trigger an interrupt.
157 * The two interrupts lines (host_err and event) can
158 * be enabled separately in 'IBEX_SPI_HOST_INTR_ENABLE'.
160 * Interrupts are triggered based on the ones
161 * enabled in the `IBEX_SPI_HOST_EVENT_ENABLE` and `IBEX_SPI_HOST_ERROR_ENABLE`.
163 static void ibex_spi_host_irq(IbexSPIHostState *s)
165 bool error_en = s->regs[IBEX_SPI_HOST_INTR_ENABLE]
166 & R_INTR_ENABLE_ERROR_MASK;
167 bool event_en = s->regs[IBEX_SPI_HOST_INTR_ENABLE]
168 & R_INTR_ENABLE_SPI_EVENT_MASK;
169 bool err_pending = s->regs[IBEX_SPI_HOST_INTR_STATE]
170 & R_INTR_STATE_ERROR_MASK;
171 bool status_pending = s->regs[IBEX_SPI_HOST_INTR_STATE]
172 & R_INTR_STATE_SPI_EVENT_MASK;
173 int err_irq = 0, event_irq = 0;
175 /* Error IRQ enabled and Error IRQ Cleared*/
176 if (error_en && !err_pending) {
177 /* Event enabled, Interrupt Test Error */
178 if (s->regs[IBEX_SPI_HOST_INTR_TEST] & R_INTR_TEST_ERROR_MASK) {
180 } else if ((s->regs[IBEX_SPI_HOST_ERROR_ENABLE]
181 & R_ERROR_ENABLE_CMDBUSY_MASK) &&
182 s->regs[IBEX_SPI_HOST_ERROR_STATUS]
183 & R_ERROR_STATUS_CMDBUSY_MASK) {
184 /* Wrote to COMMAND when not READY */
186 } else if ((s->regs[IBEX_SPI_HOST_ERROR_ENABLE]
187 & R_ERROR_ENABLE_CMDINVAL_MASK) &&
188 s->regs[IBEX_SPI_HOST_ERROR_STATUS]
189 & R_ERROR_STATUS_CMDINVAL_MASK) {
190 /* Invalid command segment */
192 } else if ((s->regs[IBEX_SPI_HOST_ERROR_ENABLE]
193 & R_ERROR_ENABLE_CSIDINVAL_MASK) &&
194 s->regs[IBEX_SPI_HOST_ERROR_STATUS]
195 & R_ERROR_STATUS_CSIDINVAL_MASK) {
196 /* Invalid value for CSID */
200 s->regs[IBEX_SPI_HOST_INTR_STATE] |= R_INTR_STATE_ERROR_MASK;
202 qemu_set_irq(s->host_err, err_irq);
205 /* Event IRQ Enabled and Event IRQ Cleared */
206 if (event_en && !status_pending) {
207 if (s->regs[IBEX_SPI_HOST_INTR_TEST] & R_INTR_TEST_SPI_EVENT_MASK) {
208 /* Event enabled, Interrupt Test Event */
210 } else if ((s->regs[IBEX_SPI_HOST_EVENT_ENABLE]
211 & R_EVENT_ENABLE_READY_MASK) &&
212 (s->regs[IBEX_SPI_HOST_STATUS] & R_STATUS_READY_MASK)) {
213 /* SPI Host ready for next command */
215 } else if ((s->regs[IBEX_SPI_HOST_EVENT_ENABLE]
216 & R_EVENT_ENABLE_TXEMPTY_MASK) &&
217 (s->regs[IBEX_SPI_HOST_STATUS] & R_STATUS_TXEMPTY_MASK)) {
218 /* SPI TXEMPTY, TXFIFO drained */
220 } else if ((s->regs[IBEX_SPI_HOST_EVENT_ENABLE]
221 & R_EVENT_ENABLE_RXFULL_MASK) &&
222 (s->regs[IBEX_SPI_HOST_STATUS] & R_STATUS_RXFULL_MASK)) {
223 /* SPI RXFULL, RXFIFO full */
227 s->regs[IBEX_SPI_HOST_INTR_STATE] |= R_INTR_STATE_SPI_EVENT_MASK;
229 qemu_set_irq(s->event, event_irq);
233 static void ibex_spi_host_transfer(IbexSPIHostState *s)
236 /* Get num of one byte transfers */
237 uint8_t segment_len = ((s->regs[IBEX_SPI_HOST_COMMAND] & R_COMMAND_LEN_MASK)
238 >> R_COMMAND_LEN_SHIFT);
239 while (segment_len > 0) {
240 if (fifo8_is_empty(&s->tx_fifo)) {
242 s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_TXSTALL_MASK;
244 } else if (fifo8_is_full(&s->rx_fifo)) {
246 s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_RXSTALL_MASK;
249 tx = fifo8_pop(&s->tx_fifo);
252 rx = ssi_transfer(s->ssi, tx);
254 trace_ibex_spi_host_transfer(tx, rx);
256 if (!fifo8_is_full(&s->rx_fifo)) {
257 fifo8_push(&s->rx_fifo, rx);
260 s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_RXFULL_MASK;
266 s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_READY_MASK;
268 s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_RXQD_MASK;
269 s->regs[IBEX_SPI_HOST_STATUS] |= (R_STATUS_RXQD_MASK
270 & div4_round_up(segment_len));
272 s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_TXQD_MASK;
273 s->regs[IBEX_SPI_HOST_STATUS] |= (fifo8_num_used(&s->tx_fifo) / 4)
274 & R_STATUS_TXQD_MASK;
276 s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_TXFULL_MASK;
277 /* Assert TXEMPTY and drop remaining bytes that exceed segment_len */
278 ibex_spi_txfifo_reset(s);
280 s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_RXEMPTY_MASK;
282 ibex_spi_host_irq(s);
285 static uint64_t ibex_spi_host_read(void *opaque, hwaddr addr,
288 IbexSPIHostState *s = opaque;
292 trace_ibex_spi_host_read(addr, size);
294 /* Match reg index */
297 /* Skipping any W/O registers */
298 case IBEX_SPI_HOST_INTR_STATE...IBEX_SPI_HOST_INTR_ENABLE:
299 case IBEX_SPI_HOST_CONTROL...IBEX_SPI_HOST_STATUS:
302 case IBEX_SPI_HOST_CSID:
305 case IBEX_SPI_HOST_CONFIGOPTS:
306 rc = s->config_opts[s->regs[IBEX_SPI_HOST_CSID]];
308 case IBEX_SPI_HOST_TXDATA:
311 case IBEX_SPI_HOST_RXDATA:
313 s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_RXFULL_MASK;
315 for (int i = 0; i < 4; ++i) {
316 if (fifo8_is_empty(&s->rx_fifo)) {
317 /* Assert RXEMPTY, no IRQ */
318 s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_RXEMPTY_MASK;
319 s->regs[IBEX_SPI_HOST_ERROR_STATUS] |=
320 R_ERROR_STATUS_UNDERFLOW_MASK;
323 rx_byte = fifo8_pop(&s->rx_fifo);
324 rc |= rx_byte << (i * 8);
327 case IBEX_SPI_HOST_ERROR_ENABLE...IBEX_SPI_HOST_EVENT_ENABLE:
331 qemu_log_mask(LOG_GUEST_ERROR, "Bad offset 0x%" HWADDR_PRIx "\n",
338 static void ibex_spi_host_write(void *opaque, hwaddr addr,
339 uint64_t val64, unsigned int size)
341 IbexSPIHostState *s = opaque;
342 uint32_t val32 = val64;
343 uint32_t shift_mask = 0xff;
346 trace_ibex_spi_host_write(addr, size, val64);
348 /* Match reg index */
352 /* Skipping any R/O registers */
353 case IBEX_SPI_HOST_INTR_STATE...IBEX_SPI_HOST_INTR_ENABLE:
354 s->regs[addr] = val32;
356 case IBEX_SPI_HOST_INTR_TEST:
357 s->regs[addr] = val32;
358 ibex_spi_host_irq(s);
360 case IBEX_SPI_HOST_ALERT_TEST:
361 s->regs[addr] = val32;
362 qemu_log_mask(LOG_UNIMP,
363 "%s: SPI_ALERT_TEST is not supported\n", __func__);
365 case IBEX_SPI_HOST_CONTROL:
366 s->regs[addr] = val32;
368 if (val32 & R_CONTROL_SW_RST_MASK) {
369 ibex_spi_host_reset((DeviceState *)s);
370 /* Clear active if any */
371 s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_ACTIVE_MASK;
374 if (val32 & R_CONTROL_OUTPUT_EN_MASK) {
375 qemu_log_mask(LOG_UNIMP,
376 "%s: CONTROL_OUTPUT_EN is not supported\n", __func__);
379 case IBEX_SPI_HOST_CONFIGOPTS:
380 /* Update the respective config-opts register based on CSIDth index */
381 s->config_opts[s->regs[IBEX_SPI_HOST_CSID]] = val32;
382 qemu_log_mask(LOG_UNIMP,
383 "%s: CONFIGOPTS Hardware settings not supported\n",
386 case IBEX_SPI_HOST_CSID:
387 if (val32 >= s->num_cs) {
388 /* CSID exceeds max num_cs */
389 s->regs[IBEX_SPI_HOST_ERROR_STATUS] |=
390 R_ERROR_STATUS_CSIDINVAL_MASK;
391 ibex_spi_host_irq(s);
394 s->regs[addr] = val32;
396 case IBEX_SPI_HOST_COMMAND:
397 s->regs[addr] = val32;
399 /* STALL, IP not enabled */
400 if (!(s->regs[IBEX_SPI_HOST_CONTROL] & R_CONTROL_SPIEN_MASK)) {
404 /* SPI not ready, IRQ Error */
405 if (!(s->regs[IBEX_SPI_HOST_STATUS] & R_STATUS_READY_MASK)) {
406 s->regs[IBEX_SPI_HOST_ERROR_STATUS] |= R_ERROR_STATUS_CMDBUSY_MASK;
407 ibex_spi_host_irq(s);
410 /* Assert Not Ready */
411 s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_READY_MASK;
413 if (((val32 & R_COMMAND_DIRECTION_MASK) >> R_COMMAND_DIRECTION_SHIFT)
414 != BIDIRECTIONAL_TRANSFER) {
415 qemu_log_mask(LOG_UNIMP,
416 "%s: Rx Only/Tx Only are not supported\n", __func__);
419 if (val32 & R_COMMAND_CSAAT_MASK) {
420 qemu_log_mask(LOG_UNIMP,
421 "%s: CSAAT is not supported\n", __func__);
423 if (val32 & R_COMMAND_SPEED_MASK) {
424 qemu_log_mask(LOG_UNIMP,
425 "%s: SPEED is not supported\n", __func__);
428 /* Set Transfer Callback */
429 timer_mod(s->fifo_trigger_handle,
430 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
431 (TX_INTERRUPT_TRIGGER_DELAY_NS));
434 case IBEX_SPI_HOST_TXDATA:
436 * This is a hardware `feature` where
437 * the first word written TXDATA after init is omitted entirely
439 if (s->init_status) {
440 s->init_status = false;
444 for (int i = 0; i < 4; ++i) {
445 /* Attempting to write when TXFULL */
446 if (fifo8_is_full(&s->tx_fifo)) {
447 /* Assert RXEMPTY, no IRQ */
448 s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_TXFULL_MASK;
449 s->regs[IBEX_SPI_HOST_ERROR_STATUS] |=
450 R_ERROR_STATUS_OVERFLOW_MASK;
451 ibex_spi_host_irq(s);
454 /* Byte ordering is set by the IP */
455 if ((s->regs[IBEX_SPI_HOST_STATUS] &
456 R_STATUS_BYTEORDER_MASK) == 0) {
457 /* LE: LSB transmitted first (default for ibex processor) */
458 shift_mask = 0xff << (i * 8);
460 /* BE: MSB transmitted first */
461 qemu_log_mask(LOG_UNIMP,
462 "%s: Big endian is not supported\n", __func__);
465 fifo8_push(&s->tx_fifo, (val32 & shift_mask) >> (i * 8));
469 s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_TXEMPTY_MASK;
471 txqd_len = (s->regs[IBEX_SPI_HOST_STATUS] &
472 R_STATUS_TXQD_MASK) >> R_STATUS_TXQD_SHIFT;
473 /* Partial bytes (size < 4) are padded, in words. */
475 s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_TXQD_MASK;
476 s->regs[IBEX_SPI_HOST_STATUS] |= txqd_len;
478 s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_READY_MASK;
480 case IBEX_SPI_HOST_ERROR_ENABLE:
481 s->regs[addr] = val32;
483 if (val32 & R_ERROR_ENABLE_CMDINVAL_MASK) {
484 qemu_log_mask(LOG_UNIMP,
485 "%s: Segment Length is not supported\n", __func__);
488 case IBEX_SPI_HOST_ERROR_STATUS:
490 * Indicates that any errors that have occurred.
491 * When an error occurs, the corresponding bit must be cleared
492 * here before issuing any further commands
494 s->regs[addr] = val32;
496 case IBEX_SPI_HOST_EVENT_ENABLE:
497 /* Controls which classes of SPI events raise an interrupt. */
498 s->regs[addr] = val32;
500 if (val32 & R_EVENT_ENABLE_RXWM_MASK) {
501 qemu_log_mask(LOG_UNIMP,
502 "%s: RXWM is not supported\n", __func__);
504 if (val32 & R_EVENT_ENABLE_TXWM_MASK) {
505 qemu_log_mask(LOG_UNIMP,
506 "%s: TXWM is not supported\n", __func__);
509 if (val32 & R_EVENT_ENABLE_IDLE_MASK) {
510 qemu_log_mask(LOG_UNIMP,
511 "%s: IDLE is not supported\n", __func__);
515 qemu_log_mask(LOG_GUEST_ERROR, "Bad offset 0x%" HWADDR_PRIx "\n",
520 static const MemoryRegionOps ibex_spi_ops = {
521 .read = ibex_spi_host_read,
522 .write = ibex_spi_host_write,
523 /* Ibex default LE */
524 .endianness = DEVICE_LITTLE_ENDIAN,
527 static Property ibex_spi_properties[] = {
528 DEFINE_PROP_UINT32("num_cs", IbexSPIHostState, num_cs, 1),
529 DEFINE_PROP_END_OF_LIST(),
532 static const VMStateDescription vmstate_ibex = {
533 .name = TYPE_IBEX_SPI_HOST,
535 .minimum_version_id = 1,
536 .fields = (VMStateField[]) {
537 VMSTATE_UINT32_ARRAY(regs, IbexSPIHostState, IBEX_SPI_HOST_MAX_REGS),
538 VMSTATE_VARRAY_UINT32(config_opts, IbexSPIHostState,
539 num_cs, 0, vmstate_info_uint32, uint32_t),
540 VMSTATE_FIFO8(rx_fifo, IbexSPIHostState),
541 VMSTATE_FIFO8(tx_fifo, IbexSPIHostState),
542 VMSTATE_TIMER_PTR(fifo_trigger_handle, IbexSPIHostState),
543 VMSTATE_BOOL(init_status, IbexSPIHostState),
544 VMSTATE_END_OF_LIST()
548 static void fifo_trigger_update(void *opaque)
550 IbexSPIHostState *s = opaque;
551 ibex_spi_host_transfer(s);
554 static void ibex_spi_host_realize(DeviceState *dev, Error **errp)
556 IbexSPIHostState *s = IBEX_SPI_HOST(dev);
559 s->ssi = ssi_create_bus(dev, "ssi");
560 s->cs_lines = g_new0(qemu_irq, s->num_cs);
562 for (i = 0; i < s->num_cs; ++i) {
563 sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->cs_lines[i]);
566 /* Setup CONFIGOPTS Multi-register */
567 s->config_opts = g_new0(uint32_t, s->num_cs);
569 /* Setup FIFO Interrupt Timer */
570 s->fifo_trigger_handle = timer_new_ns(QEMU_CLOCK_VIRTUAL,
571 fifo_trigger_update, s);
573 /* FIFO sizes as per OT Spec */
574 fifo8_create(&s->tx_fifo, IBEX_SPI_HOST_TXFIFO_LEN);
575 fifo8_create(&s->rx_fifo, IBEX_SPI_HOST_RXFIFO_LEN);
578 static void ibex_spi_host_init(Object *obj)
580 IbexSPIHostState *s = IBEX_SPI_HOST(obj);
582 sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->host_err);
583 sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->event);
585 memory_region_init_io(&s->mmio, obj, &ibex_spi_ops, s,
586 TYPE_IBEX_SPI_HOST, 0x1000);
587 sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio);
590 static void ibex_spi_host_class_init(ObjectClass *klass, void *data)
592 DeviceClass *dc = DEVICE_CLASS(klass);
593 dc->realize = ibex_spi_host_realize;
594 dc->reset = ibex_spi_host_reset;
595 dc->vmsd = &vmstate_ibex;
596 device_class_set_props(dc, ibex_spi_properties);
599 static const TypeInfo ibex_spi_host_info = {
600 .name = TYPE_IBEX_SPI_HOST,
601 .parent = TYPE_SYS_BUS_DEVICE,
602 .instance_size = sizeof(IbexSPIHostState),
603 .instance_init = ibex_spi_host_init,
604 .class_init = ibex_spi_host_class_init,
607 static void ibex_spi_host_register_types(void)
609 type_register_static(&ibex_spi_host_info);
612 type_init(ibex_spi_host_register_types)