1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (c) 2011-12 The Chromium OS Authors.
5 * This file is derived from the flashrom project.
8 #define LOG_CATEGORY UCLASS_SPI
11 #include <bootstage.h>
14 #include <dt-structs.h>
22 #include <spi_flash.h>
25 #include <asm/fast_spi.h>
28 #include <linux/bitops.h>
29 #include <linux/delay.h>
30 #include <linux/sizes.h>
35 #define debug_trace(fmt, args...) debug(fmt, ##args)
37 #define debug_trace(x, args...)
40 struct ich_spi_platdata {
41 #if CONFIG_IS_ENABLED(OF_PLATDATA)
42 struct dtd_intel_fast_spi dtplat;
44 enum ich_version ich_version; /* Controller version, 7 or 9 */
45 bool lockdown; /* lock down controller settings? */
46 ulong mmio_base; /* Base of MMIO registers */
47 pci_dev_t bdf; /* PCI address used by of-platdata */
48 bool hwseq; /* Use hardware sequencing (not s/w) */
51 static u8 ich_readb(struct ich_spi_priv *priv, int reg)
53 u8 value = readb(priv->base + reg);
55 debug_trace("read %2.2x from %4.4x\n", value, reg);
60 static u16 ich_readw(struct ich_spi_priv *priv, int reg)
62 u16 value = readw(priv->base + reg);
64 debug_trace("read %4.4x from %4.4x\n", value, reg);
69 static u32 ich_readl(struct ich_spi_priv *priv, int reg)
71 u32 value = readl(priv->base + reg);
73 debug_trace("read %8.8x from %4.4x\n", value, reg);
78 static void ich_writeb(struct ich_spi_priv *priv, u8 value, int reg)
80 writeb(value, priv->base + reg);
81 debug_trace("wrote %2.2x to %4.4x\n", value, reg);
84 static void ich_writew(struct ich_spi_priv *priv, u16 value, int reg)
86 writew(value, priv->base + reg);
87 debug_trace("wrote %4.4x to %4.4x\n", value, reg);
90 static void ich_writel(struct ich_spi_priv *priv, u32 value, int reg)
92 writel(value, priv->base + reg);
93 debug_trace("wrote %8.8x to %4.4x\n", value, reg);
96 static void write_reg(struct ich_spi_priv *priv, const void *value,
97 int dest_reg, uint32_t size)
99 memcpy_toio(priv->base + dest_reg, value, size);
102 static void read_reg(struct ich_spi_priv *priv, int src_reg, void *value,
105 memcpy_fromio(value, priv->base + src_reg, size);
108 static void ich_set_bbar(struct ich_spi_priv *ctlr, uint32_t minaddr)
110 const uint32_t bbar_mask = 0x00ffff00;
111 uint32_t ichspi_bbar;
114 minaddr &= bbar_mask;
115 ichspi_bbar = ich_readl(ctlr, ctlr->bbar) & ~bbar_mask;
116 ichspi_bbar |= minaddr;
117 ich_writel(ctlr, ichspi_bbar, ctlr->bbar);
121 /* @return 1 if the SPI flash supports the 33MHz speed */
122 static bool ich9_can_do_33mhz(struct udevice *dev)
124 struct ich_spi_priv *priv = dev_get_priv(dev);
127 if (!CONFIG_IS_ENABLED(PCI))
129 /* Observe SPI Descriptor Component Section 0 */
130 dm_pci_write_config32(priv->pch, 0xb0, 0x1000);
132 /* Extract the Write/Erase SPI Frequency from descriptor */
133 dm_pci_read_config32(priv->pch, 0xb4, &fdod);
135 /* Bits 23:21 have the fast read clock frequency, 0=20MHz, 1=33MHz */
136 speed = (fdod >> 21) & 7;
141 static void spi_lock_down(struct ich_spi_platdata *plat, void *sbase)
143 if (plat->ich_version == ICHV_7) {
144 struct ich7_spi_regs *ich7_spi = sbase;
146 setbits_le16(&ich7_spi->spis, SPIS_LOCK);
147 } else if (plat->ich_version == ICHV_9) {
148 struct ich9_spi_regs *ich9_spi = sbase;
150 setbits_le16(&ich9_spi->hsfs, HSFS_FLOCKDN);
154 static bool spi_lock_status(struct ich_spi_platdata *plat, void *sbase)
158 if (plat->ich_version == ICHV_7) {
159 struct ich7_spi_regs *ich7_spi = sbase;
161 lock = readw(&ich7_spi->spis) & SPIS_LOCK;
162 } else if (plat->ich_version == ICHV_9) {
163 struct ich9_spi_regs *ich9_spi = sbase;
165 lock = readw(&ich9_spi->hsfs) & HSFS_FLOCKDN;
171 static int spi_setup_opcode(struct ich_spi_priv *ctlr, struct spi_trans *trans,
175 uint8_t opmenu[ctlr->menubytes];
178 /* The lock is off, so just use index 0. */
179 ich_writeb(ctlr, trans->opcode, ctlr->opmenu);
180 optypes = ich_readw(ctlr, ctlr->optype);
181 optypes = (optypes & 0xfffc) | (trans->type & 0x3);
182 ich_writew(ctlr, optypes, ctlr->optype);
185 /* The lock is on. See if what we need is on the menu. */
187 uint16_t opcode_index;
189 /* Write Enable is handled as atomic prefix */
190 if (trans->opcode == SPI_OPCODE_WREN)
193 read_reg(ctlr, ctlr->opmenu, opmenu, sizeof(opmenu));
194 for (opcode_index = 0; opcode_index < ctlr->menubytes;
196 if (opmenu[opcode_index] == trans->opcode)
200 if (opcode_index == ctlr->menubytes) {
201 debug("ICH SPI: Opcode %x not found\n", trans->opcode);
205 optypes = ich_readw(ctlr, ctlr->optype);
206 optype = (optypes >> (opcode_index * 2)) & 0x3;
208 if (optype != trans->type) {
209 debug("ICH SPI: Transaction doesn't fit type %d\n",
218 * Wait for up to 6s til status register bit(s) turn 1 (in case wait_til_set
219 * below is true) or 0. In case the wait was for the bit(s) to set - write
220 * those bits back, which would cause resetting them.
222 * Return the last read status value on success or -1 on failure.
224 static int ich_status_poll(struct ich_spi_priv *ctlr, u16 bitmask,
227 int timeout = 600000; /* This will result in 6s */
231 status = ich_readw(ctlr, ctlr->status);
232 if (wait_til_set ^ ((status & bitmask) == 0)) {
234 ich_writew(ctlr, status & bitmask,
241 debug("ICH SPI: SCIP timeout, read %x, expected %x, wts %x %x\n",
242 status, bitmask, wait_til_set, status & bitmask);
247 static void ich_spi_config_opcode(struct udevice *dev)
249 struct ich_spi_priv *ctlr = dev_get_priv(dev);
252 * PREOP, OPTYPE, OPMENU1/OPMENU2 registers can be locked down
253 * to prevent accidental or intentional writes. Before they get
254 * locked down, these registers should be initialized properly.
256 ich_writew(ctlr, SPI_OPPREFIX, ctlr->preop);
257 ich_writew(ctlr, SPI_OPTYPE, ctlr->optype);
258 ich_writel(ctlr, SPI_OPMENU_LOWER, ctlr->opmenu);
259 ich_writel(ctlr, SPI_OPMENU_UPPER, ctlr->opmenu + sizeof(u32));
262 static int ich_spi_exec_op_swseq(struct spi_slave *slave,
263 const struct spi_mem_op *op)
265 struct udevice *bus = dev_get_parent(slave->dev);
266 struct ich_spi_platdata *plat = dev_get_platdata(bus);
267 struct ich_spi_priv *ctlr = dev_get_priv(bus);
269 int16_t opcode_index;
272 struct spi_trans *trans = &ctlr->trans;
273 bool lock = spi_lock_status(plat, ctlr->base);
280 if (op->data.nbytes) {
281 if (op->data.dir == SPI_MEM_DATA_IN) {
282 trans->in = op->data.buf.in;
283 trans->bytesin = op->data.nbytes;
285 trans->out = op->data.buf.out;
286 trans->bytesout = op->data.nbytes;
290 if (trans->opcode != op->cmd.opcode)
291 trans->opcode = op->cmd.opcode;
293 if (lock && trans->opcode == SPI_OPCODE_WRDIS)
296 if (trans->opcode == SPI_OPCODE_WREN) {
298 * Treat Write Enable as Atomic Pre-Op if possible
299 * in order to prevent the Management Engine from
300 * issuing a transaction between WREN and DATA.
303 ich_writew(ctlr, trans->opcode, ctlr->preop);
307 ret = ich_status_poll(ctlr, SPIS_SCIP, 0);
311 if (plat->ich_version == ICHV_7)
312 ich_writew(ctlr, SPIS_CDS | SPIS_FCERR, ctlr->status);
314 ich_writeb(ctlr, SPIS_CDS | SPIS_FCERR, ctlr->status);
316 /* Try to guess spi transaction type */
317 if (op->data.dir == SPI_MEM_DATA_OUT) {
319 trans->type = SPI_OPCODE_TYPE_WRITE_WITH_ADDRESS;
321 trans->type = SPI_OPCODE_TYPE_WRITE_NO_ADDRESS;
324 trans->type = SPI_OPCODE_TYPE_READ_WITH_ADDRESS;
326 trans->type = SPI_OPCODE_TYPE_READ_NO_ADDRESS;
328 /* Special erase case handling */
329 if (op->addr.nbytes && !op->data.buswidth)
330 trans->type = SPI_OPCODE_TYPE_WRITE_WITH_ADDRESS;
332 opcode_index = spi_setup_opcode(ctlr, trans, lock);
333 if (opcode_index < 0)
336 if (op->addr.nbytes) {
337 trans->offset = op->addr.val;
341 if (ctlr->speed && ctlr->max_speed >= 33000000) {
344 byte = ich_readb(ctlr, ctlr->speed);
345 if (ctlr->cur_speed >= 33000000)
346 byte |= SSFC_SCF_33MHZ;
348 byte &= ~SSFC_SCF_33MHZ;
349 ich_writeb(ctlr, byte, ctlr->speed);
352 /* Preset control fields */
353 control = SPIC_SCGO | ((opcode_index & 0x07) << 4);
355 /* Issue atomic preop cycle if needed */
356 if (ich_readw(ctlr, ctlr->preop))
359 if (!trans->bytesout && !trans->bytesin) {
360 /* SPI addresses are 24 bit only */
362 ich_writel(ctlr, trans->offset & 0x00FFFFFF,
366 * This is a 'no data' command (like Write Enable), its
367 * bitesout size was 1, decremented to zero while executing
368 * spi_setup_opcode() above. Tell the chip to send the
371 ich_writew(ctlr, control, ctlr->control);
373 /* wait for the result */
374 status = ich_status_poll(ctlr, SPIS_CDS | SPIS_FCERR, 1);
378 if (status & SPIS_FCERR) {
379 debug("ICH SPI: Command transaction error\n");
386 while (trans->bytesout || trans->bytesin) {
387 uint32_t data_length;
389 /* SPI addresses are 24 bit only */
390 ich_writel(ctlr, trans->offset & 0x00FFFFFF, ctlr->addr);
393 data_length = min(trans->bytesout, ctlr->databytes);
395 data_length = min(trans->bytesin, ctlr->databytes);
397 /* Program data into FDATA0 to N */
398 if (trans->bytesout) {
399 write_reg(ctlr, trans->out, ctlr->data, data_length);
400 trans->bytesout -= data_length;
403 /* Add proper control fields' values */
404 control &= ~((ctlr->databytes - 1) << 8);
406 control |= (data_length - 1) << 8;
409 ich_writew(ctlr, control, ctlr->control);
411 /* Wait for Cycle Done Status or Flash Cycle Error */
412 status = ich_status_poll(ctlr, SPIS_CDS | SPIS_FCERR, 1);
416 if (status & SPIS_FCERR) {
417 debug("ICH SPI: Data transaction error %x\n", status);
421 if (trans->bytesin) {
422 read_reg(ctlr, ctlr->data, trans->in, data_length);
423 trans->bytesin -= data_length;
427 /* Clear atomic preop now that xfer is done */
429 ich_writew(ctlr, 0, ctlr->preop);
435 * Ensure read/write xfer len is not greater than SPIBAR_FDATA_FIFO_SIZE and
436 * that the operation does not cross page boundary.
438 static uint get_xfer_len(u32 offset, int len, int page_size)
440 uint xfer_len = min(len, SPIBAR_FDATA_FIFO_SIZE);
441 uint bytes_left = ALIGN(offset, page_size) - offset;
444 xfer_len = min(xfer_len, bytes_left);
449 /* Fill FDATAn FIFO in preparation for a write transaction */
450 static void fill_xfer_fifo(struct fast_spi_regs *regs, const void *data,
453 memcpy(regs->fdata, data, len);
456 /* Drain FDATAn FIFO after a read transaction populates data */
457 static void drain_xfer_fifo(struct fast_spi_regs *regs, void *dest, uint len)
459 memcpy(dest, regs->fdata, len);
462 /* Fire up a transfer using the hardware sequencer */
463 static void start_hwseq_xfer(struct fast_spi_regs *regs, uint hsfsts_cycle,
464 uint offset, uint len)
466 /* Make sure all W1C status bits get cleared */
469 hsfsts = readl(®s->hsfsts_ctl);
470 hsfsts &= ~(HSFSTS_FCYCLE_MASK | HSFSTS_FDBC_MASK);
471 hsfsts |= HSFSTS_AEL | HSFSTS_FCERR | HSFSTS_FDONE;
473 /* Set up transaction parameters */
474 hsfsts |= hsfsts_cycle << HSFSTS_FCYCLE_SHIFT;
475 hsfsts |= ((len - 1) << HSFSTS_FDBC_SHIFT) & HSFSTS_FDBC_MASK;
476 hsfsts |= HSFSTS_FGO;
478 writel(offset, ®s->faddr);
479 writel(hsfsts, ®s->hsfsts_ctl);
482 static int wait_for_hwseq_xfer(struct fast_spi_regs *regs, uint offset)
487 start = get_timer(0);
489 hsfsts = readl(®s->hsfsts_ctl);
490 if (hsfsts & HSFSTS_FCERR) {
491 debug("SPI transaction error at offset %x HSFSTS = %08x\n",
495 if (hsfsts & HSFSTS_AEL)
498 if (hsfsts & HSFSTS_FDONE)
500 } while (get_timer(start) < SPIBAR_HWSEQ_XFER_TIMEOUT_MS);
502 debug("SPI transaction timeout at offset %x HSFSTS = %08x, timer %d\n",
503 offset, hsfsts, (uint)get_timer(start));
509 * exec_sync_hwseq_xfer() - Execute flash transfer by hardware sequencing
511 * This waits until complete or timeout
513 * @regs: SPI registers
514 * @hsfsts_cycle: Cycle type (enum hsfsts_cycle_t)
515 * @offset: Offset to access
516 * @len: Number of bytes to transfer (can be 0)
517 * @return 0 if OK, -EIO on flash-cycle error (FCERR), -EPERM on access error
518 * (AEL), -ETIMEDOUT on timeout
520 static int exec_sync_hwseq_xfer(struct fast_spi_regs *regs, uint hsfsts_cycle,
521 uint offset, uint len)
523 start_hwseq_xfer(regs, hsfsts_cycle, offset, len);
525 return wait_for_hwseq_xfer(regs, offset);
528 static int ich_spi_exec_op_hwseq(struct spi_slave *slave,
529 const struct spi_mem_op *op)
531 struct spi_flash *flash = dev_get_uclass_priv(slave->dev);
532 struct udevice *bus = dev_get_parent(slave->dev);
533 struct ich_spi_priv *priv = dev_get_priv(bus);
534 struct fast_spi_regs *regs = priv->base;
543 offset = op->addr.val;
544 len = op->data.nbytes;
546 switch (op->cmd.opcode) {
548 cycle = HSFSTS_CYCLE_RDID;
550 case SPINOR_OP_READ_FAST:
551 cycle = HSFSTS_CYCLE_READ;
554 cycle = HSFSTS_CYCLE_WRITE;
557 /* Nothing needs to be done */
560 cycle = HSFSTS_CYCLE_WR_STATUS;
563 cycle = HSFSTS_CYCLE_RD_STATUS;
566 return 0; /* ignore */
567 case SPINOR_OP_BE_4K:
568 cycle = HSFSTS_CYCLE_4K_ERASE;
569 ret = exec_sync_hwseq_xfer(regs, cycle, offset, 0);
572 debug("Unknown cycle %x\n", op->cmd.opcode);
576 out = op->data.dir == SPI_MEM_DATA_OUT;
577 buf = out ? (u8 *)op->data.buf.out : op->data.buf.in;
578 page_size = flash->page_size ? : 256;
581 uint xfer_len = get_xfer_len(offset, len, page_size);
584 fill_xfer_fifo(regs, buf, xfer_len);
586 ret = exec_sync_hwseq_xfer(regs, cycle, offset, xfer_len);
591 drain_xfer_fifo(regs, buf, xfer_len);
601 static int ich_spi_exec_op(struct spi_slave *slave, const struct spi_mem_op *op)
603 struct udevice *bus = dev_get_parent(slave->dev);
604 struct ich_spi_platdata *plat = dev_get_platdata(bus);
607 bootstage_start(BOOTSTAGE_ID_ACCUM_SPI, "fast_spi");
609 ret = ich_spi_exec_op_hwseq(slave, op);
611 ret = ich_spi_exec_op_swseq(slave, op);
612 bootstage_accum(BOOTSTAGE_ID_ACCUM_SPI);
617 static int ich_get_mmap_bus(struct udevice *bus, ulong *map_basep,
618 uint *map_sizep, uint *offsetp)
622 #if !CONFIG_IS_ENABLED(OF_PLATDATA)
623 struct pci_child_platdata *pplat = dev_get_parent_platdata(bus);
625 spi_bdf = pplat->devfn;
627 struct ich_spi_platdata *plat = dev_get_platdata(bus);
630 * We cannot rely on plat->bdf being set up yet since this method can
631 * be called before the device is probed. Use the of-platdata directly
634 spi_bdf = pci_ofplat_get_devfn(plat->dtplat.reg[0]);
637 return fast_spi_get_bios_mmap(spi_bdf, map_basep, map_sizep, offsetp);
640 static int ich_get_mmap(struct udevice *dev, ulong *map_basep, uint *map_sizep,
643 struct udevice *bus = dev_get_parent(dev);
645 return ich_get_mmap_bus(bus, map_basep, map_sizep, offsetp);
648 static int ich_spi_adjust_size(struct spi_slave *slave, struct spi_mem_op *op)
650 unsigned int page_offset;
651 int addr = op->addr.val;
652 unsigned int byte_count = op->data.nbytes;
654 if (hweight32(ICH_BOUNDARY) == 1) {
655 page_offset = addr & (ICH_BOUNDARY - 1);
659 page_offset = do_div(aux, ICH_BOUNDARY);
662 if (op->data.dir == SPI_MEM_DATA_IN) {
663 if (slave->max_read_size) {
664 op->data.nbytes = min(ICH_BOUNDARY - page_offset,
665 slave->max_read_size);
667 } else if (slave->max_write_size) {
668 op->data.nbytes = min(ICH_BOUNDARY - page_offset,
669 slave->max_write_size);
672 op->data.nbytes = min(op->data.nbytes, byte_count);
677 static int ich_protect_lockdown(struct udevice *dev)
679 struct ich_spi_platdata *plat = dev_get_platdata(dev);
680 struct ich_spi_priv *priv = dev_get_priv(dev);
683 /* Disable the BIOS write protect so write commands are allowed */
685 ret = pch_set_spi_protect(priv->pch, false);
686 if (ret == -ENOSYS) {
689 bios_cntl = ich_readb(priv, priv->bcr);
690 bios_cntl &= ~BIT(5); /* clear Enable InSMM_STS (EISS) */
691 bios_cntl |= 1; /* Write Protect Disable (WPD) */
692 ich_writeb(priv, bios_cntl, priv->bcr);
694 debug("%s: Failed to disable write-protect: err=%d\n",
699 /* Lock down SPI controller settings if required */
700 if (plat->lockdown) {
701 ich_spi_config_opcode(dev);
702 spi_lock_down(plat, priv->base);
708 static int ich_init_controller(struct udevice *dev,
709 struct ich_spi_platdata *plat,
710 struct ich_spi_priv *ctlr)
712 if (spl_phase() == PHASE_TPL) {
713 struct ich_spi_platdata *plat = dev_get_platdata(dev);
716 ret = fast_spi_early_init(plat->bdf, plat->mmio_base);
721 ctlr->base = (void *)plat->mmio_base;
722 if (plat->ich_version == ICHV_7) {
723 struct ich7_spi_regs *ich7_spi = ctlr->base;
725 ctlr->opmenu = offsetof(struct ich7_spi_regs, opmenu);
726 ctlr->menubytes = sizeof(ich7_spi->opmenu);
727 ctlr->optype = offsetof(struct ich7_spi_regs, optype);
728 ctlr->addr = offsetof(struct ich7_spi_regs, spia);
729 ctlr->data = offsetof(struct ich7_spi_regs, spid);
730 ctlr->databytes = sizeof(ich7_spi->spid);
731 ctlr->status = offsetof(struct ich7_spi_regs, spis);
732 ctlr->control = offsetof(struct ich7_spi_regs, spic);
733 ctlr->bbar = offsetof(struct ich7_spi_regs, bbar);
734 ctlr->preop = offsetof(struct ich7_spi_regs, preop);
735 } else if (plat->ich_version == ICHV_9) {
736 struct ich9_spi_regs *ich9_spi = ctlr->base;
738 ctlr->opmenu = offsetof(struct ich9_spi_regs, opmenu);
739 ctlr->menubytes = sizeof(ich9_spi->opmenu);
740 ctlr->optype = offsetof(struct ich9_spi_regs, optype);
741 ctlr->addr = offsetof(struct ich9_spi_regs, faddr);
742 ctlr->data = offsetof(struct ich9_spi_regs, fdata);
743 ctlr->databytes = sizeof(ich9_spi->fdata);
744 ctlr->status = offsetof(struct ich9_spi_regs, ssfs);
745 ctlr->control = offsetof(struct ich9_spi_regs, ssfc);
746 ctlr->speed = ctlr->control + 2;
747 ctlr->bbar = offsetof(struct ich9_spi_regs, bbar);
748 ctlr->preop = offsetof(struct ich9_spi_regs, preop);
749 ctlr->bcr = offsetof(struct ich9_spi_regs, bcr);
750 ctlr->pr = &ich9_spi->pr[0];
751 } else if (plat->ich_version == ICHV_APL) {
753 debug("ICH SPI: Unrecognised ICH version %d\n",
758 /* Work out the maximum speed we can support */
759 ctlr->max_speed = 20000000;
760 if (plat->ich_version == ICHV_9 && ich9_can_do_33mhz(dev))
761 ctlr->max_speed = 33000000;
762 debug("ICH SPI: Version ID %d detected at %lx, speed %ld\n",
763 plat->ich_version, plat->mmio_base, ctlr->max_speed);
765 ich_set_bbar(ctlr, 0);
770 static int ich_cache_bios_region(struct udevice *dev)
778 ret = ich_get_mmap_bus(dev, &map_base, &map_size, &offset);
782 /* Don't use WRBACK since we are not supposed to write to SPI flash */
783 base = SZ_4G - map_size;
784 mtrr_set_next_var(MTRR_TYPE_WRPROT, base, map_size);
785 log_debug("BIOS cache base=%lx, size=%x\n", base, (uint)map_size);
790 static int ich_spi_probe(struct udevice *dev)
792 struct ich_spi_platdata *plat = dev_get_platdata(dev);
793 struct ich_spi_priv *priv = dev_get_priv(dev);
796 ret = ich_init_controller(dev, plat, priv);
800 if (spl_phase() == PHASE_TPL) {
801 /* Cache the BIOS to speed things up */
802 ret = ich_cache_bios_region(dev);
806 ret = ich_protect_lockdown(dev);
810 priv->cur_speed = priv->max_speed;
815 static int ich_spi_remove(struct udevice *bus)
818 * Configure SPI controller so that the Linux MTD driver can fully
819 * access the SPI NOR chip
821 ich_spi_config_opcode(bus);
826 static int ich_spi_set_speed(struct udevice *bus, uint speed)
828 struct ich_spi_priv *priv = dev_get_priv(bus);
830 priv->cur_speed = speed;
835 static int ich_spi_set_mode(struct udevice *bus, uint mode)
837 debug("%s: mode=%d\n", __func__, mode);
842 static int ich_spi_child_pre_probe(struct udevice *dev)
844 struct udevice *bus = dev_get_parent(dev);
845 struct ich_spi_platdata *plat = dev_get_platdata(bus);
846 struct ich_spi_priv *priv = dev_get_priv(bus);
847 struct spi_slave *slave = dev_get_parent_priv(dev);
850 * Yes this controller can only write a small number of bytes at
851 * once! The limit is typically 64 bytes. For hardware sequencing a
852 * a loop is used to get around this.
855 slave->max_write_size = priv->databytes;
857 * ICH 7 SPI controller only supports array read command
858 * and byte program command for SST flash
860 if (plat->ich_version == ICHV_7)
861 slave->mode = SPI_RX_SLOW | SPI_TX_BYTE;
866 static int ich_spi_ofdata_to_platdata(struct udevice *dev)
868 struct ich_spi_platdata *plat = dev_get_platdata(dev);
870 #if !CONFIG_IS_ENABLED(OF_PLATDATA)
871 struct ich_spi_priv *priv = dev_get_priv(dev);
873 /* Find a PCH if there is one */
874 uclass_first_device(UCLASS_PCH, &priv->pch);
876 priv->pch = dev_get_parent(dev);
878 plat->ich_version = dev_get_driver_data(dev);
879 plat->lockdown = dev_read_bool(dev, "intel,spi-lock-down");
880 if (plat->ich_version == ICHV_APL) {
881 plat->mmio_base = dm_pci_read_bar32(dev, 0);
883 /* SBASE is similar */
884 pch_get_spi_base(priv->pch, &plat->mmio_base);
887 * Use an int so that the property is present in of-platdata even
890 plat->hwseq = dev_read_u32_default(dev, "intel,hardware-seq", 0);
892 plat->ich_version = ICHV_APL;
893 plat->mmio_base = plat->dtplat.early_regs[0];
894 plat->bdf = pci_ofplat_get_devfn(plat->dtplat.reg[0]);
895 plat->hwseq = plat->dtplat.intel_hardware_seq;
897 debug("%s: mmio_base=%lx\n", __func__, plat->mmio_base);
902 static const struct spi_controller_mem_ops ich_controller_mem_ops = {
903 .adjust_op_size = ich_spi_adjust_size,
905 .exec_op = ich_spi_exec_op,
908 static const struct dm_spi_ops ich_spi_ops = {
909 /* xfer is not supported */
910 .set_speed = ich_spi_set_speed,
911 .set_mode = ich_spi_set_mode,
912 .mem_ops = &ich_controller_mem_ops,
913 .get_mmap = ich_get_mmap,
915 * cs_info is not needed, since we require all chip selects to be
916 * in the device tree explicitly
920 static const struct udevice_id ich_spi_ids[] = {
921 { .compatible = "intel,ich7-spi", ICHV_7 },
922 { .compatible = "intel,ich9-spi", ICHV_9 },
923 { .compatible = "intel,fast-spi", ICHV_APL },
927 U_BOOT_DRIVER(intel_fast_spi) = {
928 .name = "intel_fast_spi",
930 .of_match = ich_spi_ids,
932 .ofdata_to_platdata = ich_spi_ofdata_to_platdata,
933 .platdata_auto_alloc_size = sizeof(struct ich_spi_platdata),
934 .priv_auto_alloc_size = sizeof(struct ich_spi_priv),
935 .child_pre_probe = ich_spi_child_pre_probe,
936 .probe = ich_spi_probe,
937 .remove = ich_spi_remove,
938 .flags = DM_FLAG_OS_PREPARE,