]> Git Repo - J-linux.git/blob - drivers/spi/spi-intel.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / drivers / spi / spi-intel.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Intel PCH/PCU SPI flash driver.
4  *
5  * Copyright (C) 2016 - 2022, Intel Corporation
6  * Author: Mika Westerberg <[email protected]>
7  */
8
9 #include <linux/iopoll.h>
10 #include <linux/module.h>
11
12 #include <linux/mtd/partitions.h>
13 #include <linux/mtd/spi-nor.h>
14
15 #include <linux/spi/flash.h>
16 #include <linux/spi/spi.h>
17 #include <linux/spi/spi-mem.h>
18
19 #include "spi-intel.h"
20
21 /* Offsets are from @ispi->base */
22 #define BFPREG                          0x00
23
24 #define HSFSTS_CTL                      0x04
25 #define HSFSTS_CTL_FSMIE                BIT(31)
26 #define HSFSTS_CTL_FDBC_SHIFT           24
27 #define HSFSTS_CTL_FDBC_MASK            (0x3f << HSFSTS_CTL_FDBC_SHIFT)
28
29 #define HSFSTS_CTL_FCYCLE_SHIFT         17
30 #define HSFSTS_CTL_FCYCLE_MASK          (0x0f << HSFSTS_CTL_FCYCLE_SHIFT)
31 /* HW sequencer opcodes */
32 #define HSFSTS_CTL_FCYCLE_READ          (0x00 << HSFSTS_CTL_FCYCLE_SHIFT)
33 #define HSFSTS_CTL_FCYCLE_WRITE         (0x02 << HSFSTS_CTL_FCYCLE_SHIFT)
34 #define HSFSTS_CTL_FCYCLE_ERASE         (0x03 << HSFSTS_CTL_FCYCLE_SHIFT)
35 #define HSFSTS_CTL_FCYCLE_ERASE_64K     (0x04 << HSFSTS_CTL_FCYCLE_SHIFT)
36 #define HSFSTS_CTL_FCYCLE_RDSFDP        (0x05 << HSFSTS_CTL_FCYCLE_SHIFT)
37 #define HSFSTS_CTL_FCYCLE_RDID          (0x06 << HSFSTS_CTL_FCYCLE_SHIFT)
38 #define HSFSTS_CTL_FCYCLE_WRSR          (0x07 << HSFSTS_CTL_FCYCLE_SHIFT)
39 #define HSFSTS_CTL_FCYCLE_RDSR          (0x08 << HSFSTS_CTL_FCYCLE_SHIFT)
40
41 #define HSFSTS_CTL_FGO                  BIT(16)
42 #define HSFSTS_CTL_FLOCKDN              BIT(15)
43 #define HSFSTS_CTL_FDV                  BIT(14)
44 #define HSFSTS_CTL_SCIP                 BIT(5)
45 #define HSFSTS_CTL_AEL                  BIT(2)
46 #define HSFSTS_CTL_FCERR                BIT(1)
47 #define HSFSTS_CTL_FDONE                BIT(0)
48
49 #define FADDR                           0x08
50 #define DLOCK                           0x0c
51 #define FDATA(n)                        (0x10 + ((n) * 4))
52
53 #define FRACC                           0x50
54
55 #define FREG(n)                         (0x54 + ((n) * 4))
56 #define FREG_BASE_MASK                  GENMASK(14, 0)
57 #define FREG_LIMIT_SHIFT                16
58 #define FREG_LIMIT_MASK                 GENMASK(30, 16)
59
60 /* Offset is from @ispi->pregs */
61 #define PR(n)                           ((n) * 4)
62 #define PR_WPE                          BIT(31)
63 #define PR_LIMIT_SHIFT                  16
64 #define PR_LIMIT_MASK                   GENMASK(30, 16)
65 #define PR_RPE                          BIT(15)
66 #define PR_BASE_MASK                    GENMASK(14, 0)
67
68 /* Offsets are from @ispi->sregs */
69 #define SSFSTS_CTL                      0x00
70 #define SSFSTS_CTL_FSMIE                BIT(23)
71 #define SSFSTS_CTL_DS                   BIT(22)
72 #define SSFSTS_CTL_DBC_SHIFT            16
73 #define SSFSTS_CTL_SPOP                 BIT(11)
74 #define SSFSTS_CTL_ACS                  BIT(10)
75 #define SSFSTS_CTL_SCGO                 BIT(9)
76 #define SSFSTS_CTL_COP_SHIFT            12
77 #define SSFSTS_CTL_FRS                  BIT(7)
78 #define SSFSTS_CTL_DOFRS                BIT(6)
79 #define SSFSTS_CTL_AEL                  BIT(4)
80 #define SSFSTS_CTL_FCERR                BIT(3)
81 #define SSFSTS_CTL_FDONE                BIT(2)
82 #define SSFSTS_CTL_SCIP                 BIT(0)
83
84 #define PREOP_OPTYPE                    0x04
85 #define OPMENU0                         0x08
86 #define OPMENU1                         0x0c
87
88 #define OPTYPE_READ_NO_ADDR             0
89 #define OPTYPE_WRITE_NO_ADDR            1
90 #define OPTYPE_READ_WITH_ADDR           2
91 #define OPTYPE_WRITE_WITH_ADDR          3
92
93 /* CPU specifics */
94 #define BYT_PR                          0x74
95 #define BYT_SSFSTS_CTL                  0x90
96 #define BYT_FREG_NUM                    5
97 #define BYT_PR_NUM                      5
98
99 #define LPT_PR                          0x74
100 #define LPT_SSFSTS_CTL                  0x90
101 #define LPT_FREG_NUM                    5
102 #define LPT_PR_NUM                      5
103
104 #define BXT_PR                          0x84
105 #define BXT_SSFSTS_CTL                  0xa0
106 #define BXT_FREG_NUM                    12
107 #define BXT_PR_NUM                      5
108
109 #define CNL_PR                          0x84
110 #define CNL_FREG_NUM                    6
111 #define CNL_PR_NUM                      5
112
113 #define LVSCC                           0xc4
114 #define UVSCC                           0xc8
115 #define ERASE_OPCODE_SHIFT              8
116 #define ERASE_OPCODE_MASK               (0xff << ERASE_OPCODE_SHIFT)
117 #define ERASE_64K_OPCODE_SHIFT          16
118 #define ERASE_64K_OPCODE_MASK           (0xff << ERASE_64K_OPCODE_SHIFT)
119
120 /* Flash descriptor fields */
121 #define FLVALSIG_MAGIC                  0x0ff0a55a
122 #define FLMAP0_NC_MASK                  GENMASK(9, 8)
123 #define FLMAP0_NC_SHIFT                 8
124 #define FLMAP0_FCBA_MASK                GENMASK(7, 0)
125
126 #define FLCOMP_C0DEN_MASK               GENMASK(3, 0)
127 #define FLCOMP_C0DEN_512K               0x00
128 #define FLCOMP_C0DEN_1M                 0x01
129 #define FLCOMP_C0DEN_2M                 0x02
130 #define FLCOMP_C0DEN_4M                 0x03
131 #define FLCOMP_C0DEN_8M                 0x04
132 #define FLCOMP_C0DEN_16M                0x05
133 #define FLCOMP_C0DEN_32M                0x06
134 #define FLCOMP_C0DEN_64M                0x07
135
136 #define INTEL_SPI_TIMEOUT               5000 /* ms */
137 #define INTEL_SPI_FIFO_SZ               64
138
139 /**
140  * struct intel_spi - Driver private data
141  * @dev: Device pointer
142  * @info: Pointer to board specific info
143  * @base: Beginning of MMIO space
144  * @pregs: Start of protection registers
145  * @sregs: Start of software sequencer registers
146  * @host: Pointer to the SPI controller structure
147  * @nregions: Maximum number of regions
148  * @pr_num: Maximum number of protected range registers
149  * @chip0_size: Size of the first flash chip in bytes
150  * @locked: Is SPI setting locked
151  * @protected: Whether the regions are write protected
152  * @bios_locked: Is BIOS region locked
153  * @swseq_reg: Use SW sequencer in register reads/writes
154  * @swseq_erase: Use SW sequencer in erase operation
155  * @atomic_preopcode: Holds preopcode when atomic sequence is requested
156  * @opcodes: Opcodes which are supported. This are programmed by BIOS
157  *           before it locks down the controller.
158  * @mem_ops: Pointer to SPI MEM ops supported by the controller
159  */
160 struct intel_spi {
161         struct device *dev;
162         const struct intel_spi_boardinfo *info;
163         void __iomem *base;
164         void __iomem *pregs;
165         void __iomem *sregs;
166         struct spi_controller *host;
167         size_t nregions;
168         size_t pr_num;
169         size_t chip0_size;
170         bool locked;
171         bool protected;
172         bool bios_locked;
173         bool swseq_reg;
174         bool swseq_erase;
175         u8 atomic_preopcode;
176         u8 opcodes[8];
177         const struct intel_spi_mem_op *mem_ops;
178 };
179
180 struct intel_spi_mem_op {
181         struct spi_mem_op mem_op;
182         u32 replacement_op;
183         int (*exec_op)(struct intel_spi *ispi,
184                        const struct spi_mem *mem,
185                        const struct intel_spi_mem_op *iop,
186                        const struct spi_mem_op *op);
187 };
188
189 static bool writeable;
190 module_param(writeable, bool, 0);
191 MODULE_PARM_DESC(writeable, "Enable write access to SPI flash chip (default=0)");
192
193 static void intel_spi_dump_regs(struct intel_spi *ispi)
194 {
195         u32 value;
196         int i;
197
198         dev_dbg(ispi->dev, "BFPREG=0x%08x\n", readl(ispi->base + BFPREG));
199
200         value = readl(ispi->base + HSFSTS_CTL);
201         dev_dbg(ispi->dev, "HSFSTS_CTL=0x%08x\n", value);
202         if (value & HSFSTS_CTL_FLOCKDN)
203                 dev_dbg(ispi->dev, "-> Locked\n");
204
205         dev_dbg(ispi->dev, "FADDR=0x%08x\n", readl(ispi->base + FADDR));
206         dev_dbg(ispi->dev, "DLOCK=0x%08x\n", readl(ispi->base + DLOCK));
207
208         for (i = 0; i < 16; i++)
209                 dev_dbg(ispi->dev, "FDATA(%d)=0x%08x\n",
210                         i, readl(ispi->base + FDATA(i)));
211
212         dev_dbg(ispi->dev, "FRACC=0x%08x\n", readl(ispi->base + FRACC));
213
214         for (i = 0; i < ispi->nregions; i++)
215                 dev_dbg(ispi->dev, "FREG(%d)=0x%08x\n", i,
216                         readl(ispi->base + FREG(i)));
217         for (i = 0; i < ispi->pr_num; i++)
218                 dev_dbg(ispi->dev, "PR(%d)=0x%08x\n", i,
219                         readl(ispi->pregs + PR(i)));
220
221         if (ispi->sregs) {
222                 value = readl(ispi->sregs + SSFSTS_CTL);
223                 dev_dbg(ispi->dev, "SSFSTS_CTL=0x%08x\n", value);
224                 dev_dbg(ispi->dev, "PREOP_OPTYPE=0x%08x\n",
225                         readl(ispi->sregs + PREOP_OPTYPE));
226                 dev_dbg(ispi->dev, "OPMENU0=0x%08x\n",
227                         readl(ispi->sregs + OPMENU0));
228                 dev_dbg(ispi->dev, "OPMENU1=0x%08x\n",
229                         readl(ispi->sregs + OPMENU1));
230         }
231
232         dev_dbg(ispi->dev, "LVSCC=0x%08x\n", readl(ispi->base + LVSCC));
233         dev_dbg(ispi->dev, "UVSCC=0x%08x\n", readl(ispi->base + UVSCC));
234
235         dev_dbg(ispi->dev, "Protected regions:\n");
236         for (i = 0; i < ispi->pr_num; i++) {
237                 u32 base, limit;
238
239                 value = readl(ispi->pregs + PR(i));
240                 if (!(value & (PR_WPE | PR_RPE)))
241                         continue;
242
243                 limit = (value & PR_LIMIT_MASK) >> PR_LIMIT_SHIFT;
244                 base = value & PR_BASE_MASK;
245
246                 dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x [%c%c]\n",
247                         i, base << 12, (limit << 12) | 0xfff,
248                         value & PR_WPE ? 'W' : '.', value & PR_RPE ? 'R' : '.');
249         }
250
251         dev_dbg(ispi->dev, "Flash regions:\n");
252         for (i = 0; i < ispi->nregions; i++) {
253                 u32 region, base, limit;
254
255                 region = readl(ispi->base + FREG(i));
256                 base = region & FREG_BASE_MASK;
257                 limit = (region & FREG_LIMIT_MASK) >> FREG_LIMIT_SHIFT;
258
259                 if (base >= limit || (i > 0 && limit == 0))
260                         dev_dbg(ispi->dev, " %02d disabled\n", i);
261                 else
262                         dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x\n",
263                                 i, base << 12, (limit << 12) | 0xfff);
264         }
265
266         dev_dbg(ispi->dev, "Using %cW sequencer for register access\n",
267                 ispi->swseq_reg ? 'S' : 'H');
268         dev_dbg(ispi->dev, "Using %cW sequencer for erase operation\n",
269                 ispi->swseq_erase ? 'S' : 'H');
270 }
271
272 /* Reads max INTEL_SPI_FIFO_SZ bytes from the device fifo */
273 static int intel_spi_read_block(struct intel_spi *ispi, void *buf, size_t size)
274 {
275         size_t bytes;
276         int i = 0;
277
278         if (size > INTEL_SPI_FIFO_SZ)
279                 return -EINVAL;
280
281         while (size > 0) {
282                 bytes = min_t(size_t, size, 4);
283                 memcpy_fromio(buf, ispi->base + FDATA(i), bytes);
284                 size -= bytes;
285                 buf += bytes;
286                 i++;
287         }
288
289         return 0;
290 }
291
292 /* Writes max INTEL_SPI_FIFO_SZ bytes to the device fifo */
293 static int intel_spi_write_block(struct intel_spi *ispi, const void *buf,
294                                  size_t size)
295 {
296         size_t bytes;
297         int i = 0;
298
299         if (size > INTEL_SPI_FIFO_SZ)
300                 return -EINVAL;
301
302         while (size > 0) {
303                 bytes = min_t(size_t, size, 4);
304                 memcpy_toio(ispi->base + FDATA(i), buf, bytes);
305                 size -= bytes;
306                 buf += bytes;
307                 i++;
308         }
309
310         return 0;
311 }
312
313 static int intel_spi_wait_hw_busy(struct intel_spi *ispi)
314 {
315         u32 val;
316
317         return readl_poll_timeout(ispi->base + HSFSTS_CTL, val,
318                                   !(val & HSFSTS_CTL_SCIP), 0,
319                                   INTEL_SPI_TIMEOUT * 1000);
320 }
321
322 static int intel_spi_wait_sw_busy(struct intel_spi *ispi)
323 {
324         u32 val;
325
326         return readl_poll_timeout(ispi->sregs + SSFSTS_CTL, val,
327                                   !(val & SSFSTS_CTL_SCIP), 0,
328                                   INTEL_SPI_TIMEOUT * 1000);
329 }
330
331 static bool intel_spi_set_writeable(struct intel_spi *ispi)
332 {
333         if (!ispi->info->set_writeable)
334                 return false;
335
336         return ispi->info->set_writeable(ispi->base, ispi->info->data);
337 }
338
339 static int intel_spi_opcode_index(struct intel_spi *ispi, u8 opcode, int optype)
340 {
341         int i;
342         int preop;
343
344         if (ispi->locked) {
345                 for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++)
346                         if (ispi->opcodes[i] == opcode)
347                                 return i;
348
349                 return -EINVAL;
350         }
351
352         /* The lock is off, so just use index 0 */
353         writel(opcode, ispi->sregs + OPMENU0);
354         preop = readw(ispi->sregs + PREOP_OPTYPE);
355         writel(optype << 16 | preop, ispi->sregs + PREOP_OPTYPE);
356
357         return 0;
358 }
359
360 static int intel_spi_hw_cycle(struct intel_spi *ispi,
361                               const struct intel_spi_mem_op *iop, size_t len)
362 {
363         u32 val, status;
364         int ret;
365
366         if (!iop->replacement_op)
367                 return -EINVAL;
368
369         val = readl(ispi->base + HSFSTS_CTL);
370         val &= ~(HSFSTS_CTL_FCYCLE_MASK | HSFSTS_CTL_FDBC_MASK);
371         val |= (len - 1) << HSFSTS_CTL_FDBC_SHIFT;
372         val |= HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
373         val |= HSFSTS_CTL_FGO;
374         val |= iop->replacement_op;
375         writel(val, ispi->base + HSFSTS_CTL);
376
377         ret = intel_spi_wait_hw_busy(ispi);
378         if (ret)
379                 return ret;
380
381         status = readl(ispi->base + HSFSTS_CTL);
382         if (status & HSFSTS_CTL_FCERR)
383                 return -EIO;
384         else if (status & HSFSTS_CTL_AEL)
385                 return -EACCES;
386
387         return 0;
388 }
389
390 static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, size_t len,
391                               int optype)
392 {
393         u32 val = 0, status;
394         u8 atomic_preopcode;
395         int ret;
396
397         ret = intel_spi_opcode_index(ispi, opcode, optype);
398         if (ret < 0)
399                 return ret;
400
401         /*
402          * Always clear it after each SW sequencer operation regardless
403          * of whether it is successful or not.
404          */
405         atomic_preopcode = ispi->atomic_preopcode;
406         ispi->atomic_preopcode = 0;
407
408         /* Only mark 'Data Cycle' bit when there is data to be transferred */
409         if (len > 0)
410                 val = ((len - 1) << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS;
411         val |= ret << SSFSTS_CTL_COP_SHIFT;
412         val |= SSFSTS_CTL_FCERR | SSFSTS_CTL_FDONE;
413         val |= SSFSTS_CTL_SCGO;
414         if (atomic_preopcode) {
415                 u16 preop;
416
417                 switch (optype) {
418                 case OPTYPE_WRITE_NO_ADDR:
419                 case OPTYPE_WRITE_WITH_ADDR:
420                         /* Pick matching preopcode for the atomic sequence */
421                         preop = readw(ispi->sregs + PREOP_OPTYPE);
422                         if ((preop & 0xff) == atomic_preopcode)
423                                 ; /* Do nothing */
424                         else if ((preop >> 8) == atomic_preopcode)
425                                 val |= SSFSTS_CTL_SPOP;
426                         else
427                                 return -EINVAL;
428
429                         /* Enable atomic sequence */
430                         val |= SSFSTS_CTL_ACS;
431                         break;
432
433                 default:
434                         return -EINVAL;
435                 }
436         }
437         writel(val, ispi->sregs + SSFSTS_CTL);
438
439         ret = intel_spi_wait_sw_busy(ispi);
440         if (ret)
441                 return ret;
442
443         status = readl(ispi->sregs + SSFSTS_CTL);
444         if (status & SSFSTS_CTL_FCERR)
445                 return -EIO;
446         else if (status & SSFSTS_CTL_AEL)
447                 return -EACCES;
448
449         return 0;
450 }
451
452 static u32 intel_spi_chip_addr(const struct intel_spi *ispi,
453                                const struct spi_mem *mem)
454 {
455         /* Pick up the correct start address */
456         if (!mem)
457                 return 0;
458         return (spi_get_chipselect(mem->spi, 0) == 1) ? ispi->chip0_size : 0;
459 }
460
461 static int intel_spi_read_reg(struct intel_spi *ispi, const struct spi_mem *mem,
462                               const struct intel_spi_mem_op *iop,
463                               const struct spi_mem_op *op)
464 {
465         u32 addr = intel_spi_chip_addr(ispi, mem) + op->addr.val;
466         size_t nbytes = op->data.nbytes;
467         u8 opcode = op->cmd.opcode;
468         int ret;
469
470         writel(addr, ispi->base + FADDR);
471
472         if (ispi->swseq_reg)
473                 ret = intel_spi_sw_cycle(ispi, opcode, nbytes,
474                                          OPTYPE_READ_NO_ADDR);
475         else
476                 ret = intel_spi_hw_cycle(ispi, iop, nbytes);
477
478         if (ret)
479                 return ret;
480
481         return intel_spi_read_block(ispi, op->data.buf.in, nbytes);
482 }
483
484 static int intel_spi_write_reg(struct intel_spi *ispi, const struct spi_mem *mem,
485                                const struct intel_spi_mem_op *iop,
486                                const struct spi_mem_op *op)
487 {
488         u32 addr = intel_spi_chip_addr(ispi, mem) + op->addr.val;
489         size_t nbytes = op->data.nbytes;
490         u8 opcode = op->cmd.opcode;
491         int ret;
492
493         /*
494          * This is handled with atomic operation and preop code in Intel
495          * controller so we only verify that it is available. If the
496          * controller is not locked, program the opcode to the PREOP
497          * register for later use.
498          *
499          * When hardware sequencer is used there is no need to program
500          * any opcodes (it handles them automatically as part of a command).
501          */
502         if (opcode == SPINOR_OP_WREN) {
503                 u16 preop;
504
505                 if (!ispi->swseq_reg)
506                         return 0;
507
508                 preop = readw(ispi->sregs + PREOP_OPTYPE);
509                 if ((preop & 0xff) != opcode && (preop >> 8) != opcode) {
510                         if (ispi->locked)
511                                 return -EINVAL;
512                         writel(opcode, ispi->sregs + PREOP_OPTYPE);
513                 }
514
515                 /*
516                  * This enables atomic sequence on next SW sycle. Will
517                  * be cleared after next operation.
518                  */
519                 ispi->atomic_preopcode = opcode;
520                 return 0;
521         }
522
523         /*
524          * We hope that HW sequencer will do the right thing automatically and
525          * with the SW sequencer we cannot use preopcode anyway, so just ignore
526          * the Write Disable operation and pretend it was completed
527          * successfully.
528          */
529         if (opcode == SPINOR_OP_WRDI)
530                 return 0;
531
532         writel(addr, ispi->base + FADDR);
533
534         /* Write the value beforehand */
535         ret = intel_spi_write_block(ispi, op->data.buf.out, nbytes);
536         if (ret)
537                 return ret;
538
539         if (ispi->swseq_reg)
540                 return intel_spi_sw_cycle(ispi, opcode, nbytes,
541                                           OPTYPE_WRITE_NO_ADDR);
542         return intel_spi_hw_cycle(ispi, iop, nbytes);
543 }
544
545 static int intel_spi_read(struct intel_spi *ispi, const struct spi_mem *mem,
546                           const struct intel_spi_mem_op *iop,
547                           const struct spi_mem_op *op)
548 {
549         u32 addr = intel_spi_chip_addr(ispi, mem) + op->addr.val;
550         size_t block_size, nbytes = op->data.nbytes;
551         void *read_buf = op->data.buf.in;
552         u32 val, status;
553         int ret;
554
555         /*
556          * Atomic sequence is not expected with HW sequencer reads. Make
557          * sure it is cleared regardless.
558          */
559         if (WARN_ON_ONCE(ispi->atomic_preopcode))
560                 ispi->atomic_preopcode = 0;
561
562         while (nbytes > 0) {
563                 block_size = min_t(size_t, nbytes, INTEL_SPI_FIFO_SZ);
564
565                 /* Read cannot cross 4K boundary */
566                 block_size = min_t(loff_t, addr + block_size,
567                                    round_up(addr + 1, SZ_4K)) - addr;
568
569                 writel(addr, ispi->base + FADDR);
570
571                 val = readl(ispi->base + HSFSTS_CTL);
572                 val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
573                 val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
574                 val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT;
575                 val |= HSFSTS_CTL_FCYCLE_READ;
576                 val |= HSFSTS_CTL_FGO;
577                 writel(val, ispi->base + HSFSTS_CTL);
578
579                 ret = intel_spi_wait_hw_busy(ispi);
580                 if (ret)
581                         return ret;
582
583                 status = readl(ispi->base + HSFSTS_CTL);
584                 if (status & HSFSTS_CTL_FCERR)
585                         ret = -EIO;
586                 else if (status & HSFSTS_CTL_AEL)
587                         ret = -EACCES;
588
589                 if (ret < 0) {
590                         dev_err(ispi->dev, "read error: %x: %#x\n", addr, status);
591                         return ret;
592                 }
593
594                 ret = intel_spi_read_block(ispi, read_buf, block_size);
595                 if (ret)
596                         return ret;
597
598                 nbytes -= block_size;
599                 addr += block_size;
600                 read_buf += block_size;
601         }
602
603         return 0;
604 }
605
606 static int intel_spi_write(struct intel_spi *ispi, const struct spi_mem *mem,
607                            const struct intel_spi_mem_op *iop,
608                            const struct spi_mem_op *op)
609 {
610         u32 addr = intel_spi_chip_addr(ispi, mem) + op->addr.val;
611         size_t block_size, nbytes = op->data.nbytes;
612         const void *write_buf = op->data.buf.out;
613         u32 val, status;
614         int ret;
615
616         /* Not needed with HW sequencer write, make sure it is cleared */
617         ispi->atomic_preopcode = 0;
618
619         while (nbytes > 0) {
620                 block_size = min_t(size_t, nbytes, INTEL_SPI_FIFO_SZ);
621
622                 /* Write cannot cross 4K boundary */
623                 block_size = min_t(loff_t, addr + block_size,
624                                    round_up(addr + 1, SZ_4K)) - addr;
625
626                 writel(addr, ispi->base + FADDR);
627
628                 val = readl(ispi->base + HSFSTS_CTL);
629                 val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
630                 val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
631                 val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT;
632                 val |= HSFSTS_CTL_FCYCLE_WRITE;
633
634                 ret = intel_spi_write_block(ispi, write_buf, block_size);
635                 if (ret) {
636                         dev_err(ispi->dev, "failed to write block\n");
637                         return ret;
638                 }
639
640                 /* Start the write now */
641                 val |= HSFSTS_CTL_FGO;
642                 writel(val, ispi->base + HSFSTS_CTL);
643
644                 ret = intel_spi_wait_hw_busy(ispi);
645                 if (ret) {
646                         dev_err(ispi->dev, "timeout\n");
647                         return ret;
648                 }
649
650                 status = readl(ispi->base + HSFSTS_CTL);
651                 if (status & HSFSTS_CTL_FCERR)
652                         ret = -EIO;
653                 else if (status & HSFSTS_CTL_AEL)
654                         ret = -EACCES;
655
656                 if (ret < 0) {
657                         dev_err(ispi->dev, "write error: %x: %#x\n", addr, status);
658                         return ret;
659                 }
660
661                 nbytes -= block_size;
662                 addr += block_size;
663                 write_buf += block_size;
664         }
665
666         return 0;
667 }
668
669 static int intel_spi_erase(struct intel_spi *ispi, const struct spi_mem *mem,
670                            const struct intel_spi_mem_op *iop,
671                            const struct spi_mem_op *op)
672 {
673         u32 addr = intel_spi_chip_addr(ispi, mem) + op->addr.val;
674         u8 opcode = op->cmd.opcode;
675         u32 val, status;
676         int ret;
677
678         writel(addr, ispi->base + FADDR);
679
680         if (ispi->swseq_erase)
681                 return intel_spi_sw_cycle(ispi, opcode, 0,
682                                           OPTYPE_WRITE_WITH_ADDR);
683
684         /* Not needed with HW sequencer erase, make sure it is cleared */
685         ispi->atomic_preopcode = 0;
686
687         val = readl(ispi->base + HSFSTS_CTL);
688         val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
689         val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
690         val |= HSFSTS_CTL_FGO;
691         val |= iop->replacement_op;
692         writel(val, ispi->base + HSFSTS_CTL);
693
694         ret = intel_spi_wait_hw_busy(ispi);
695         if (ret)
696                 return ret;
697
698         status = readl(ispi->base + HSFSTS_CTL);
699         if (status & HSFSTS_CTL_FCERR)
700                 return -EIO;
701         if (status & HSFSTS_CTL_AEL)
702                 return -EACCES;
703
704         return 0;
705 }
706
707 static int intel_spi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
708 {
709         op->data.nbytes = clamp_val(op->data.nbytes, 0, INTEL_SPI_FIFO_SZ);
710         return 0;
711 }
712
713 static bool intel_spi_cmp_mem_op(const struct intel_spi_mem_op *iop,
714                                  const struct spi_mem_op *op)
715 {
716         if (iop->mem_op.cmd.nbytes != op->cmd.nbytes ||
717             iop->mem_op.cmd.buswidth != op->cmd.buswidth ||
718             iop->mem_op.cmd.dtr != op->cmd.dtr)
719                 return false;
720
721         if (iop->mem_op.addr.nbytes != op->addr.nbytes ||
722             iop->mem_op.addr.dtr != op->addr.dtr)
723                 return false;
724
725         if (iop->mem_op.data.dir != op->data.dir ||
726             iop->mem_op.data.dtr != op->data.dtr)
727                 return false;
728
729         if (iop->mem_op.data.dir != SPI_MEM_NO_DATA) {
730                 if (iop->mem_op.data.buswidth != op->data.buswidth)
731                         return false;
732         }
733
734         return true;
735 }
736
737 static const struct intel_spi_mem_op *
738 intel_spi_match_mem_op(struct intel_spi *ispi, const struct spi_mem_op *op)
739 {
740         const struct intel_spi_mem_op *iop;
741
742         for (iop = ispi->mem_ops; iop->mem_op.cmd.opcode; iop++) {
743                 if (iop->mem_op.cmd.opcode == op->cmd.opcode &&
744                     intel_spi_cmp_mem_op(iop, op))
745                         return iop;
746         }
747
748         return NULL;
749 }
750
751 static bool intel_spi_supports_mem_op(struct spi_mem *mem,
752                                       const struct spi_mem_op *op)
753 {
754         struct intel_spi *ispi = spi_controller_get_devdata(mem->spi->controller);
755         const struct intel_spi_mem_op *iop;
756
757         iop = intel_spi_match_mem_op(ispi, op);
758         if (!iop) {
759                 dev_dbg(ispi->dev, "%#x not supported\n", op->cmd.opcode);
760                 return false;
761         }
762
763         /*
764          * For software sequencer check that the opcode is actually
765          * present in the opmenu if it is locked.
766          */
767         if (ispi->swseq_reg && ispi->locked) {
768                 int i;
769
770                 /* Check if it is in the locked opcodes list */
771                 for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++) {
772                         if (ispi->opcodes[i] == op->cmd.opcode)
773                                 return true;
774                 }
775
776                 dev_dbg(ispi->dev, "%#x not supported\n", op->cmd.opcode);
777                 return false;
778         }
779
780         return true;
781 }
782
783 static int intel_spi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
784 {
785         struct intel_spi *ispi = spi_controller_get_devdata(mem->spi->controller);
786         const struct intel_spi_mem_op *iop;
787
788         iop = intel_spi_match_mem_op(ispi, op);
789         if (!iop)
790                 return -EOPNOTSUPP;
791
792         return iop->exec_op(ispi, mem, iop, op);
793 }
794
795 static const char *intel_spi_get_name(struct spi_mem *mem)
796 {
797         const struct intel_spi *ispi = spi_controller_get_devdata(mem->spi->controller);
798
799         /*
800          * Return name of the flash controller device to be compatible
801          * with the MTD version.
802          */
803         return dev_name(ispi->dev);
804 }
805
806 static int intel_spi_dirmap_create(struct spi_mem_dirmap_desc *desc)
807 {
808         struct intel_spi *ispi = spi_controller_get_devdata(desc->mem->spi->controller);
809         const struct intel_spi_mem_op *iop;
810
811         iop = intel_spi_match_mem_op(ispi, &desc->info.op_tmpl);
812         if (!iop)
813                 return -EOPNOTSUPP;
814
815         desc->priv = (void *)iop;
816         return 0;
817 }
818
819 static ssize_t intel_spi_dirmap_read(struct spi_mem_dirmap_desc *desc, u64 offs,
820                                      size_t len, void *buf)
821 {
822         struct intel_spi *ispi = spi_controller_get_devdata(desc->mem->spi->controller);
823         const struct intel_spi_mem_op *iop = desc->priv;
824         struct spi_mem_op op = desc->info.op_tmpl;
825         int ret;
826
827         /* Fill in the gaps */
828         op.addr.val = offs;
829         op.data.nbytes = len;
830         op.data.buf.in = buf;
831
832         ret = iop->exec_op(ispi, desc->mem, iop, &op);
833         return ret ? ret : len;
834 }
835
836 static ssize_t intel_spi_dirmap_write(struct spi_mem_dirmap_desc *desc, u64 offs,
837                                       size_t len, const void *buf)
838 {
839         struct intel_spi *ispi = spi_controller_get_devdata(desc->mem->spi->controller);
840         const struct intel_spi_mem_op *iop = desc->priv;
841         struct spi_mem_op op = desc->info.op_tmpl;
842         int ret;
843
844         op.addr.val = offs;
845         op.data.nbytes = len;
846         op.data.buf.out = buf;
847
848         ret = iop->exec_op(ispi, desc->mem, iop, &op);
849         return ret ? ret : len;
850 }
851
852 static const struct spi_controller_mem_ops intel_spi_mem_ops = {
853         .adjust_op_size = intel_spi_adjust_op_size,
854         .supports_op = intel_spi_supports_mem_op,
855         .exec_op = intel_spi_exec_mem_op,
856         .get_name = intel_spi_get_name,
857         .dirmap_create = intel_spi_dirmap_create,
858         .dirmap_read = intel_spi_dirmap_read,
859         .dirmap_write = intel_spi_dirmap_write,
860 };
861
862 #define INTEL_SPI_OP_ADDR(__nbytes)                                     \
863         {                                                               \
864                 .nbytes = __nbytes,                                     \
865         }
866
867 #define INTEL_SPI_OP_NO_DATA                                            \
868         {                                                               \
869                 .dir = SPI_MEM_NO_DATA,                                 \
870         }
871
872 #define INTEL_SPI_OP_DATA_IN(__buswidth)                                \
873         {                                                               \
874                 .dir = SPI_MEM_DATA_IN,                                 \
875                 .buswidth = __buswidth,                                 \
876         }
877
878 #define INTEL_SPI_OP_DATA_OUT(__buswidth)                               \
879         {                                                               \
880                 .dir = SPI_MEM_DATA_OUT,                                \
881                 .buswidth = __buswidth,                                 \
882         }
883
884 #define INTEL_SPI_MEM_OP(__cmd, __addr, __data, __exec_op)              \
885         {                                                               \
886                 .mem_op = {                                             \
887                         .cmd = __cmd,                                   \
888                         .addr = __addr,                                 \
889                         .data = __data,                                 \
890                 },                                                      \
891                 .exec_op = __exec_op,                                   \
892         }
893
894 #define INTEL_SPI_MEM_OP_REPL(__cmd, __addr, __data, __exec_op, __repl) \
895         {                                                               \
896                 .mem_op = {                                             \
897                         .cmd = __cmd,                                   \
898                         .addr = __addr,                                 \
899                         .data = __data,                                 \
900                 },                                                      \
901                 .exec_op = __exec_op,                                   \
902                 .replacement_op = __repl,                               \
903         }
904
905 /*
906  * The controller handles pretty much everything internally based on the
907  * SFDP data but we want to make sure we only support the operations
908  * actually possible. Only check buswidth and transfer direction, the
909  * core validates data.
910  */
911 #define INTEL_SPI_GENERIC_OPS                                           \
912         /* Status register operations */                                \
913         INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_RDID, 1),        \
914                               SPI_MEM_OP_NO_ADDR,                       \
915                               INTEL_SPI_OP_DATA_IN(1),                  \
916                               intel_spi_read_reg,                       \
917                               HSFSTS_CTL_FCYCLE_RDID),                  \
918         INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 1),        \
919                               SPI_MEM_OP_NO_ADDR,                       \
920                               INTEL_SPI_OP_DATA_IN(1),                  \
921                               intel_spi_read_reg,                       \
922                               HSFSTS_CTL_FCYCLE_RDSR),                  \
923         INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1),        \
924                               SPI_MEM_OP_NO_ADDR,                       \
925                               INTEL_SPI_OP_DATA_OUT(1),                 \
926                               intel_spi_write_reg,                      \
927                               HSFSTS_CTL_FCYCLE_WRSR),                  \
928         INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_RDSFDP, 1),      \
929                               INTEL_SPI_OP_ADDR(3),                     \
930                               INTEL_SPI_OP_DATA_IN(1),                  \
931                               intel_spi_read_reg,                       \
932                               HSFSTS_CTL_FCYCLE_RDSFDP),                \
933         /* Normal read */                                               \
934         INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1),             \
935                          INTEL_SPI_OP_ADDR(3),                          \
936                          INTEL_SPI_OP_DATA_IN(1),                       \
937                          intel_spi_read),                               \
938         INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1),             \
939                          INTEL_SPI_OP_ADDR(3),                          \
940                          INTEL_SPI_OP_DATA_IN(2),                       \
941                          intel_spi_read),                               \
942         INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1),             \
943                          INTEL_SPI_OP_ADDR(3),                          \
944                          INTEL_SPI_OP_DATA_IN(4),                       \
945                          intel_spi_read),                               \
946         INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1),             \
947                          INTEL_SPI_OP_ADDR(4),                          \
948                          INTEL_SPI_OP_DATA_IN(1),                       \
949                          intel_spi_read),                               \
950         INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1),             \
951                          INTEL_SPI_OP_ADDR(4),                          \
952                          INTEL_SPI_OP_DATA_IN(2),                       \
953                          intel_spi_read),                               \
954         INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1),             \
955                          INTEL_SPI_OP_ADDR(4),                          \
956                          INTEL_SPI_OP_DATA_IN(4),                       \
957                          intel_spi_read),                               \
958         /* Fast read */                                                 \
959         INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1),        \
960                          INTEL_SPI_OP_ADDR(3),                          \
961                          INTEL_SPI_OP_DATA_IN(1),                       \
962                          intel_spi_read),                               \
963         INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1),        \
964                          INTEL_SPI_OP_ADDR(3),                          \
965                          INTEL_SPI_OP_DATA_IN(2),                       \
966                          intel_spi_read),                               \
967         INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1),        \
968                          INTEL_SPI_OP_ADDR(3),                          \
969                          INTEL_SPI_OP_DATA_IN(4),                       \
970                          intel_spi_read),                               \
971         INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1),        \
972                          INTEL_SPI_OP_ADDR(4),                          \
973                          INTEL_SPI_OP_DATA_IN(1),                       \
974                          intel_spi_read),                               \
975         INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1),        \
976                          INTEL_SPI_OP_ADDR(4),                          \
977                          INTEL_SPI_OP_DATA_IN(2),                       \
978                          intel_spi_read),                               \
979         INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1),        \
980                          INTEL_SPI_OP_ADDR(4),                          \
981                          INTEL_SPI_OP_DATA_IN(4),                       \
982                          intel_spi_read),                               \
983         /* Read with 4-byte address opcode */                           \
984         INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_4B, 1),          \
985                          INTEL_SPI_OP_ADDR(4),                          \
986                          INTEL_SPI_OP_DATA_IN(1),                       \
987                          intel_spi_read),                               \
988         INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_4B, 1),          \
989                          INTEL_SPI_OP_ADDR(4),                          \
990                          INTEL_SPI_OP_DATA_IN(2),                       \
991                          intel_spi_read),                               \
992         INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_4B, 1),          \
993                          INTEL_SPI_OP_ADDR(4),                          \
994                          INTEL_SPI_OP_DATA_IN(4),                       \
995                          intel_spi_read),                               \
996         /* Fast read with 4-byte address opcode */                      \
997         INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST_4B, 1),     \
998                          INTEL_SPI_OP_ADDR(4),                          \
999                          INTEL_SPI_OP_DATA_IN(1),                       \
1000                          intel_spi_read),                               \
1001         INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST_4B, 1),     \
1002                          INTEL_SPI_OP_ADDR(4),                          \
1003                          INTEL_SPI_OP_DATA_IN(2),                       \
1004                          intel_spi_read),                               \
1005         INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST_4B, 1),     \
1006                          INTEL_SPI_OP_ADDR(4),                          \
1007                          INTEL_SPI_OP_DATA_IN(4),                       \
1008                          intel_spi_read),                               \
1009         /* Write operations */                                          \
1010         INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_PP, 1),               \
1011                          INTEL_SPI_OP_ADDR(3),                          \
1012                          INTEL_SPI_OP_DATA_OUT(1),                      \
1013                          intel_spi_write),                              \
1014         INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_PP, 1),               \
1015                          INTEL_SPI_OP_ADDR(4),                          \
1016                          INTEL_SPI_OP_DATA_OUT(1),                      \
1017                          intel_spi_write),                              \
1018         INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_PP_4B, 1),            \
1019                          INTEL_SPI_OP_ADDR(4),                          \
1020                          INTEL_SPI_OP_DATA_OUT(1),                      \
1021                          intel_spi_write),                              \
1022         INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 1),             \
1023                          SPI_MEM_OP_NO_ADDR,                            \
1024                          SPI_MEM_OP_NO_DATA,                            \
1025                          intel_spi_write_reg),                          \
1026         INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 1),             \
1027                          SPI_MEM_OP_NO_ADDR,                            \
1028                          SPI_MEM_OP_NO_DATA,                            \
1029                          intel_spi_write_reg),                          \
1030         /* Erase operations */                                          \
1031         INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_BE_4K, 1),       \
1032                               INTEL_SPI_OP_ADDR(3),                     \
1033                               SPI_MEM_OP_NO_DATA,                       \
1034                               intel_spi_erase,                          \
1035                               HSFSTS_CTL_FCYCLE_ERASE),                 \
1036         INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_BE_4K, 1),       \
1037                               INTEL_SPI_OP_ADDR(4),                     \
1038                               SPI_MEM_OP_NO_DATA,                       \
1039                               intel_spi_erase,                          \
1040                               HSFSTS_CTL_FCYCLE_ERASE),                 \
1041         INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_BE_4K_4B, 1),    \
1042                               INTEL_SPI_OP_ADDR(4),                     \
1043                               SPI_MEM_OP_NO_DATA,                       \
1044                               intel_spi_erase,                          \
1045                               HSFSTS_CTL_FCYCLE_ERASE)                  \
1046
1047 static const struct intel_spi_mem_op generic_mem_ops[] = {
1048         INTEL_SPI_GENERIC_OPS,
1049         { },
1050 };
1051
1052 static const struct intel_spi_mem_op erase_64k_mem_ops[] = {
1053         INTEL_SPI_GENERIC_OPS,
1054         /* 64k sector erase operations */
1055         INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_SE, 1),
1056                               INTEL_SPI_OP_ADDR(3),
1057                               SPI_MEM_OP_NO_DATA,
1058                               intel_spi_erase,
1059                               HSFSTS_CTL_FCYCLE_ERASE_64K),
1060         INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_SE, 1),
1061                               INTEL_SPI_OP_ADDR(4),
1062                               SPI_MEM_OP_NO_DATA,
1063                               intel_spi_erase,
1064                               HSFSTS_CTL_FCYCLE_ERASE_64K),
1065         INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_SE_4B, 1),
1066                               INTEL_SPI_OP_ADDR(4),
1067                               SPI_MEM_OP_NO_DATA,
1068                               intel_spi_erase,
1069                               HSFSTS_CTL_FCYCLE_ERASE_64K),
1070         { },
1071 };
1072
1073 static int intel_spi_init(struct intel_spi *ispi)
1074 {
1075         u32 opmenu0, opmenu1, lvscc, uvscc, val;
1076         bool erase_64k = false;
1077         int i;
1078
1079         switch (ispi->info->type) {
1080         case INTEL_SPI_BYT:
1081                 ispi->sregs = ispi->base + BYT_SSFSTS_CTL;
1082                 ispi->pregs = ispi->base + BYT_PR;
1083                 ispi->nregions = BYT_FREG_NUM;
1084                 ispi->pr_num = BYT_PR_NUM;
1085                 ispi->swseq_reg = true;
1086                 break;
1087
1088         case INTEL_SPI_LPT:
1089                 ispi->sregs = ispi->base + LPT_SSFSTS_CTL;
1090                 ispi->pregs = ispi->base + LPT_PR;
1091                 ispi->nregions = LPT_FREG_NUM;
1092                 ispi->pr_num = LPT_PR_NUM;
1093                 ispi->swseq_reg = true;
1094                 break;
1095
1096         case INTEL_SPI_BXT:
1097                 ispi->sregs = ispi->base + BXT_SSFSTS_CTL;
1098                 ispi->pregs = ispi->base + BXT_PR;
1099                 ispi->nregions = BXT_FREG_NUM;
1100                 ispi->pr_num = BXT_PR_NUM;
1101                 erase_64k = true;
1102                 break;
1103
1104         case INTEL_SPI_CNL:
1105                 ispi->sregs = NULL;
1106                 ispi->pregs = ispi->base + CNL_PR;
1107                 ispi->nregions = CNL_FREG_NUM;
1108                 ispi->pr_num = CNL_PR_NUM;
1109                 erase_64k = true;
1110                 break;
1111
1112         default:
1113                 return -EINVAL;
1114         }
1115
1116         ispi->bios_locked = true;
1117         /* Try to disable BIOS write protection if user asked to do so */
1118         if (writeable) {
1119                 if (intel_spi_set_writeable(ispi))
1120                         ispi->bios_locked = false;
1121                 else
1122                         dev_warn(ispi->dev, "can't disable chip write protection\n");
1123         }
1124
1125         /* Disable #SMI generation from HW sequencer */
1126         val = readl(ispi->base + HSFSTS_CTL);
1127         val &= ~HSFSTS_CTL_FSMIE;
1128         writel(val, ispi->base + HSFSTS_CTL);
1129
1130         /*
1131          * Determine whether erase operation should use HW or SW sequencer.
1132          *
1133          * The HW sequencer has a predefined list of opcodes, with only the
1134          * erase opcode being programmable in LVSCC and UVSCC registers.
1135          * If these registers don't contain a valid erase opcode, erase
1136          * cannot be done using HW sequencer.
1137          */
1138         lvscc = readl(ispi->base + LVSCC);
1139         uvscc = readl(ispi->base + UVSCC);
1140         if (!(lvscc & ERASE_OPCODE_MASK) || !(uvscc & ERASE_OPCODE_MASK))
1141                 ispi->swseq_erase = true;
1142         /* SPI controller on Intel BXT supports 64K erase opcode */
1143         if (ispi->info->type == INTEL_SPI_BXT && !ispi->swseq_erase)
1144                 if (!(lvscc & ERASE_64K_OPCODE_MASK) ||
1145                     !(uvscc & ERASE_64K_OPCODE_MASK))
1146                         erase_64k = false;
1147
1148         if (!ispi->sregs && (ispi->swseq_reg || ispi->swseq_erase)) {
1149                 dev_err(ispi->dev, "software sequencer not supported, but required\n");
1150                 return -EINVAL;
1151         }
1152
1153         /*
1154          * Some controllers can only do basic operations using hardware
1155          * sequencer. All other operations are supposed to be carried out
1156          * using software sequencer.
1157          */
1158         if (ispi->swseq_reg) {
1159                 /* Disable #SMI generation from SW sequencer */
1160                 val = readl(ispi->sregs + SSFSTS_CTL);
1161                 val &= ~SSFSTS_CTL_FSMIE;
1162                 writel(val, ispi->sregs + SSFSTS_CTL);
1163         }
1164
1165         /* Check controller's lock status */
1166         val = readl(ispi->base + HSFSTS_CTL);
1167         ispi->locked = !!(val & HSFSTS_CTL_FLOCKDN);
1168
1169         if (ispi->locked && ispi->sregs) {
1170                 /*
1171                  * BIOS programs allowed opcodes and then locks down the
1172                  * register. So read back what opcodes it decided to support.
1173                  * That's the set we are going to support as well.
1174                  */
1175                 opmenu0 = readl(ispi->sregs + OPMENU0);
1176                 opmenu1 = readl(ispi->sregs + OPMENU1);
1177
1178                 if (opmenu0 && opmenu1) {
1179                         for (i = 0; i < ARRAY_SIZE(ispi->opcodes) / 2; i++) {
1180                                 ispi->opcodes[i] = opmenu0 >> i * 8;
1181                                 ispi->opcodes[i + 4] = opmenu1 >> i * 8;
1182                         }
1183                 }
1184         }
1185
1186         if (erase_64k) {
1187                 dev_dbg(ispi->dev, "Using erase_64k memory operations");
1188                 ispi->mem_ops = erase_64k_mem_ops;
1189         } else {
1190                 dev_dbg(ispi->dev, "Using generic memory operations");
1191                 ispi->mem_ops = generic_mem_ops;
1192         }
1193
1194         intel_spi_dump_regs(ispi);
1195         return 0;
1196 }
1197
1198 static bool intel_spi_is_protected(const struct intel_spi *ispi,
1199                                    unsigned int base, unsigned int limit)
1200 {
1201         int i;
1202
1203         for (i = 0; i < ispi->pr_num; i++) {
1204                 u32 pr_base, pr_limit, pr_value;
1205
1206                 pr_value = readl(ispi->pregs + PR(i));
1207                 if (!(pr_value & (PR_WPE | PR_RPE)))
1208                         continue;
1209
1210                 pr_limit = (pr_value & PR_LIMIT_MASK) >> PR_LIMIT_SHIFT;
1211                 pr_base = pr_value & PR_BASE_MASK;
1212
1213                 if (pr_base >= base && pr_limit <= limit)
1214                         return true;
1215         }
1216
1217         return false;
1218 }
1219
1220 /*
1221  * There will be a single partition holding all enabled flash regions. We
1222  * call this "BIOS".
1223  */
1224 static void intel_spi_fill_partition(struct intel_spi *ispi,
1225                                      struct mtd_partition *part)
1226 {
1227         u64 end;
1228         int i;
1229
1230         memset(part, 0, sizeof(*part));
1231
1232         /* Start from the mandatory descriptor region */
1233         part->size = 4096;
1234         part->name = "BIOS";
1235
1236         /*
1237          * Now try to find where this partition ends based on the flash
1238          * region registers.
1239          */
1240         for (i = 1; i < ispi->nregions; i++) {
1241                 u32 region, base, limit;
1242
1243                 region = readl(ispi->base + FREG(i));
1244                 base = region & FREG_BASE_MASK;
1245                 limit = (region & FREG_LIMIT_MASK) >> FREG_LIMIT_SHIFT;
1246
1247                 if (base >= limit || limit == 0)
1248                         continue;
1249
1250                 /*
1251                  * If any of the regions have protection bits set, make the
1252                  * whole partition read-only to be on the safe side.
1253                  *
1254                  * Also if the user did not ask the chip to be writeable
1255                  * mask the bit too.
1256                  */
1257                 if (!writeable || intel_spi_is_protected(ispi, base, limit)) {
1258                         part->mask_flags |= MTD_WRITEABLE;
1259                         ispi->protected = true;
1260                 }
1261
1262                 end = (limit << 12) + 4096;
1263                 if (end > part->size)
1264                         part->size = end;
1265         }
1266
1267         /*
1268          * Regions can refer to the second chip too so in this case we
1269          * just make the BIOS partition to occupy the whole chip.
1270          */
1271         if (ispi->chip0_size && part->size > ispi->chip0_size)
1272                 part->size = MTDPART_SIZ_FULL;
1273 }
1274
1275 static int intel_spi_read_desc(struct intel_spi *ispi)
1276 {
1277         struct spi_mem_op op =
1278                 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 0),
1279                            SPI_MEM_OP_ADDR(3, 0, 0),
1280                            SPI_MEM_OP_NO_DUMMY,
1281                            SPI_MEM_OP_DATA_IN(0, NULL, 0));
1282         u32 buf[2], nc, fcba, flcomp;
1283         ssize_t ret;
1284
1285         op.addr.val = 0x10;
1286         op.data.buf.in = buf;
1287         op.data.nbytes = sizeof(buf);
1288
1289         ret = intel_spi_read(ispi, NULL, NULL, &op);
1290         if (ret) {
1291                 dev_warn(ispi->dev, "failed to read descriptor\n");
1292                 return ret;
1293         }
1294
1295         dev_dbg(ispi->dev, "FLVALSIG=0x%08x\n", buf[0]);
1296         dev_dbg(ispi->dev, "FLMAP0=0x%08x\n", buf[1]);
1297
1298         if (buf[0] != FLVALSIG_MAGIC) {
1299                 dev_warn(ispi->dev, "descriptor signature not valid\n");
1300                 return -ENODEV;
1301         }
1302
1303         fcba = (buf[1] & FLMAP0_FCBA_MASK) << 4;
1304         dev_dbg(ispi->dev, "FCBA=%#x\n", fcba);
1305
1306         op.addr.val = fcba;
1307         op.data.buf.in = &flcomp;
1308         op.data.nbytes = sizeof(flcomp);
1309
1310         ret = intel_spi_read(ispi, NULL, NULL, &op);
1311         if (ret) {
1312                 dev_warn(ispi->dev, "failed to read FLCOMP\n");
1313                 return -ENODEV;
1314         }
1315
1316         dev_dbg(ispi->dev, "FLCOMP=0x%08x\n", flcomp);
1317
1318         switch (flcomp & FLCOMP_C0DEN_MASK) {
1319         case FLCOMP_C0DEN_512K:
1320                 ispi->chip0_size = SZ_512K;
1321                 break;
1322         case FLCOMP_C0DEN_1M:
1323                 ispi->chip0_size = SZ_1M;
1324                 break;
1325         case FLCOMP_C0DEN_2M:
1326                 ispi->chip0_size = SZ_2M;
1327                 break;
1328         case FLCOMP_C0DEN_4M:
1329                 ispi->chip0_size = SZ_4M;
1330                 break;
1331         case FLCOMP_C0DEN_8M:
1332                 ispi->chip0_size = SZ_8M;
1333                 break;
1334         case FLCOMP_C0DEN_16M:
1335                 ispi->chip0_size = SZ_16M;
1336                 break;
1337         case FLCOMP_C0DEN_32M:
1338                 ispi->chip0_size = SZ_32M;
1339                 break;
1340         case FLCOMP_C0DEN_64M:
1341                 ispi->chip0_size = SZ_64M;
1342                 break;
1343         default:
1344                 return -EINVAL;
1345         }
1346
1347         dev_dbg(ispi->dev, "chip0 size %zd KB\n", ispi->chip0_size / SZ_1K);
1348
1349         nc = (buf[1] & FLMAP0_NC_MASK) >> FLMAP0_NC_SHIFT;
1350         if (!nc)
1351                 ispi->host->num_chipselect = 1;
1352         else if (nc == 1)
1353                 ispi->host->num_chipselect = 2;
1354         else
1355                 return -EINVAL;
1356
1357         dev_dbg(ispi->dev, "%u flash components found\n",
1358                 ispi->host->num_chipselect);
1359         return 0;
1360 }
1361
1362 static int intel_spi_populate_chip(struct intel_spi *ispi)
1363 {
1364         struct flash_platform_data *pdata;
1365         struct mtd_partition *parts;
1366         struct spi_board_info chip;
1367         int ret;
1368
1369         ret = intel_spi_read_desc(ispi);
1370         if (ret)
1371                 return ret;
1372
1373         pdata = devm_kzalloc(ispi->dev, sizeof(*pdata), GFP_KERNEL);
1374         if (!pdata)
1375                 return -ENOMEM;
1376
1377         pdata->nr_parts = 1;
1378         pdata->parts = devm_kcalloc(ispi->dev, pdata->nr_parts,
1379                                     sizeof(*pdata->parts), GFP_KERNEL);
1380         if (!pdata->parts)
1381                 return -ENOMEM;
1382
1383         intel_spi_fill_partition(ispi, pdata->parts);
1384
1385         memset(&chip, 0, sizeof(chip));
1386         snprintf(chip.modalias, 8, "spi-nor");
1387         chip.platform_data = pdata;
1388
1389         if (!spi_new_device(ispi->host, &chip))
1390                 return -ENODEV;
1391
1392         /* Add the second chip if present */
1393         if (ispi->host->num_chipselect < 2)
1394                 return 0;
1395
1396         pdata = devm_kzalloc(ispi->dev, sizeof(*pdata), GFP_KERNEL);
1397         if (!pdata)
1398                 return -ENOMEM;
1399
1400         pdata->name = devm_kasprintf(ispi->dev, GFP_KERNEL, "%s-chip1",
1401                                      dev_name(ispi->dev));
1402         if (!pdata->name)
1403                 return -ENOMEM;
1404
1405         pdata->nr_parts = 1;
1406         parts = devm_kcalloc(ispi->dev, pdata->nr_parts, sizeof(*parts),
1407                              GFP_KERNEL);
1408         if (!parts)
1409                 return -ENOMEM;
1410
1411         parts[0].size = MTDPART_SIZ_FULL;
1412         parts[0].name = "BIOS1";
1413         pdata->parts = parts;
1414
1415         chip.platform_data = pdata;
1416         chip.chip_select = 1;
1417
1418         if (!spi_new_device(ispi->host, &chip))
1419                 return -ENODEV;
1420         return 0;
1421 }
1422
1423 static ssize_t intel_spi_protected_show(struct device *dev,
1424                                         struct device_attribute *attr, char *buf)
1425 {
1426         struct intel_spi *ispi = dev_get_drvdata(dev);
1427
1428         return sysfs_emit(buf, "%d\n", ispi->protected);
1429 }
1430 static DEVICE_ATTR_ADMIN_RO(intel_spi_protected);
1431
1432 static ssize_t intel_spi_locked_show(struct device *dev,
1433                                      struct device_attribute *attr, char *buf)
1434 {
1435         struct intel_spi *ispi = dev_get_drvdata(dev);
1436
1437         return sysfs_emit(buf, "%d\n", ispi->locked);
1438 }
1439 static DEVICE_ATTR_ADMIN_RO(intel_spi_locked);
1440
1441 static ssize_t intel_spi_bios_locked_show(struct device *dev,
1442                                           struct device_attribute *attr, char *buf)
1443 {
1444         struct intel_spi *ispi = dev_get_drvdata(dev);
1445
1446         return sysfs_emit(buf, "%d\n", ispi->bios_locked);
1447 }
1448 static DEVICE_ATTR_ADMIN_RO(intel_spi_bios_locked);
1449
1450 static struct attribute *intel_spi_attrs[] = {
1451         &dev_attr_intel_spi_protected.attr,
1452         &dev_attr_intel_spi_locked.attr,
1453         &dev_attr_intel_spi_bios_locked.attr,
1454         NULL
1455 };
1456
1457 static const struct attribute_group intel_spi_attr_group = {
1458         .attrs = intel_spi_attrs,
1459 };
1460
1461 const struct attribute_group *intel_spi_groups[] = {
1462         &intel_spi_attr_group,
1463         NULL
1464 };
1465 EXPORT_SYMBOL_GPL(intel_spi_groups);
1466
1467 /**
1468  * intel_spi_probe() - Probe the Intel SPI flash controller
1469  * @dev: Pointer to the parent device
1470  * @mem: MMIO resource
1471  * @info: Platform specific information
1472  *
1473  * Probes Intel SPI flash controller and creates the flash chip device.
1474  * Returns %0 on success and negative errno in case of failure.
1475  */
1476 int intel_spi_probe(struct device *dev, struct resource *mem,
1477                     const struct intel_spi_boardinfo *info)
1478 {
1479         struct spi_controller *host;
1480         struct intel_spi *ispi;
1481         int ret;
1482
1483         host = devm_spi_alloc_host(dev, sizeof(*ispi));
1484         if (!host)
1485                 return -ENOMEM;
1486
1487         host->mem_ops = &intel_spi_mem_ops;
1488
1489         ispi = spi_controller_get_devdata(host);
1490
1491         ispi->base = devm_ioremap_resource(dev, mem);
1492         if (IS_ERR(ispi->base))
1493                 return PTR_ERR(ispi->base);
1494
1495         ispi->dev = dev;
1496         ispi->host = host;
1497         ispi->info = info;
1498
1499         ret = intel_spi_init(ispi);
1500         if (ret)
1501                 return ret;
1502
1503         ret = devm_spi_register_controller(dev, host);
1504         if (ret)
1505                 return ret;
1506
1507         dev_set_drvdata(dev, ispi);
1508         return intel_spi_populate_chip(ispi);
1509 }
1510 EXPORT_SYMBOL_GPL(intel_spi_probe);
1511
1512 MODULE_DESCRIPTION("Intel PCH/PCU SPI flash core driver");
1513 MODULE_AUTHOR("Mika Westerberg <[email protected]>");
1514 MODULE_LICENSE("GPL v2");
This page took 0.114513 seconds and 4 git commands to generate.