3 * This is the generic MTD driver for NAND flash devices. It should be
4 * capable of working with almost all NAND chips currently available.
6 * Additional technical information is available on
7 * http://www.linux-mtd.infradead.org/doc/nand.html
13 * David Woodhouse for adding multichip support
15 * Aleph One Ltd. and Toby Churchill Ltd. for supporting the
16 * rework for 2K page size chips
19 * Enable cached programming for 2k page size chips
20 * Check, if mtd->ecctype should be set to MTD_ECC_HW
21 * if we have HW ECC support.
22 * BBT table is not serialized, has to be fixed
24 * This program is free software; you can redistribute it and/or modify
25 * it under the terms of the GNU General Public License version 2 as
26 * published by the Free Software Foundation.
30 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32 #include <linux/module.h>
33 #include <linux/delay.h>
34 #include <linux/errno.h>
35 #include <linux/err.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
39 #include <linux/nmi.h>
40 #include <linux/types.h>
41 #include <linux/mtd/mtd.h>
42 #include <linux/mtd/rawnand.h>
43 #include <linux/mtd/nand_ecc.h>
44 #include <linux/mtd/nand_bch.h>
45 #include <linux/interrupt.h>
46 #include <linux/bitops.h>
48 #include <linux/mtd/partitions.h>
51 static int nand_get_device(struct mtd_info *mtd, int new_state);
53 static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
54 struct mtd_oob_ops *ops);
56 /* Define default oob placement schemes for large and small page devices */
57 static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
58 struct mtd_oob_region *oobregion)
60 struct nand_chip *chip = mtd_to_nand(mtd);
61 struct nand_ecc_ctrl *ecc = &chip->ecc;
67 oobregion->offset = 0;
68 oobregion->length = 4;
70 oobregion->offset = 6;
71 oobregion->length = ecc->total - 4;
77 static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section,
78 struct mtd_oob_region *oobregion)
83 if (mtd->oobsize == 16) {
87 oobregion->length = 8;
88 oobregion->offset = 8;
90 oobregion->length = 2;
92 oobregion->offset = 3;
94 oobregion->offset = 6;
100 const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = {
101 .ecc = nand_ooblayout_ecc_sp,
102 .free = nand_ooblayout_free_sp,
104 EXPORT_SYMBOL_GPL(nand_ooblayout_sp_ops);
106 static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
107 struct mtd_oob_region *oobregion)
109 struct nand_chip *chip = mtd_to_nand(mtd);
110 struct nand_ecc_ctrl *ecc = &chip->ecc;
115 oobregion->length = ecc->total;
116 oobregion->offset = mtd->oobsize - oobregion->length;
121 static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
122 struct mtd_oob_region *oobregion)
124 struct nand_chip *chip = mtd_to_nand(mtd);
125 struct nand_ecc_ctrl *ecc = &chip->ecc;
130 oobregion->length = mtd->oobsize - ecc->total - 2;
131 oobregion->offset = 2;
136 const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
137 .ecc = nand_ooblayout_ecc_lp,
138 .free = nand_ooblayout_free_lp,
140 EXPORT_SYMBOL_GPL(nand_ooblayout_lp_ops);
143 * Support the old "large page" layout used for 1-bit Hamming ECC where ECC
144 * are placed at a fixed offset.
146 static int nand_ooblayout_ecc_lp_hamming(struct mtd_info *mtd, int section,
147 struct mtd_oob_region *oobregion)
149 struct nand_chip *chip = mtd_to_nand(mtd);
150 struct nand_ecc_ctrl *ecc = &chip->ecc;
155 switch (mtd->oobsize) {
157 oobregion->offset = 40;
160 oobregion->offset = 80;
166 oobregion->length = ecc->total;
167 if (oobregion->offset + oobregion->length > mtd->oobsize)
173 static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section,
174 struct mtd_oob_region *oobregion)
176 struct nand_chip *chip = mtd_to_nand(mtd);
177 struct nand_ecc_ctrl *ecc = &chip->ecc;
180 if (section < 0 || section > 1)
183 switch (mtd->oobsize) {
195 oobregion->offset = 2;
196 oobregion->length = ecc_offset - 2;
198 oobregion->offset = ecc_offset + ecc->total;
199 oobregion->length = mtd->oobsize - oobregion->offset;
205 static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
206 .ecc = nand_ooblayout_ecc_lp_hamming,
207 .free = nand_ooblayout_free_lp_hamming,
210 static int check_offs_len(struct mtd_info *mtd,
211 loff_t ofs, uint64_t len)
213 struct nand_chip *chip = mtd_to_nand(mtd);
216 /* Start address must align on block boundary */
217 if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) {
218 pr_debug("%s: unaligned address\n", __func__);
222 /* Length must align on block boundary */
223 if (len & ((1ULL << chip->phys_erase_shift) - 1)) {
224 pr_debug("%s: length not block aligned\n", __func__);
232 * nand_release_device - [GENERIC] release chip
233 * @mtd: MTD device structure
235 * Release chip lock and wake up anyone waiting on the device.
237 static void nand_release_device(struct mtd_info *mtd)
239 struct nand_chip *chip = mtd_to_nand(mtd);
241 /* Release the controller and the chip */
242 spin_lock(&chip->controller->lock);
243 chip->controller->active = NULL;
244 chip->state = FL_READY;
245 wake_up(&chip->controller->wq);
246 spin_unlock(&chip->controller->lock);
250 * nand_read_byte - [DEFAULT] read one byte from the chip
251 * @mtd: MTD device structure
253 * Default read function for 8bit buswidth
255 static uint8_t nand_read_byte(struct mtd_info *mtd)
257 struct nand_chip *chip = mtd_to_nand(mtd);
258 return readb(chip->IO_ADDR_R);
262 * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
263 * @mtd: MTD device structure
265 * Default read function for 16bit buswidth with endianness conversion.
268 static uint8_t nand_read_byte16(struct mtd_info *mtd)
270 struct nand_chip *chip = mtd_to_nand(mtd);
271 return (uint8_t) cpu_to_le16(readw(chip->IO_ADDR_R));
275 * nand_read_word - [DEFAULT] read one word from the chip
276 * @mtd: MTD device structure
278 * Default read function for 16bit buswidth without endianness conversion.
280 static u16 nand_read_word(struct mtd_info *mtd)
282 struct nand_chip *chip = mtd_to_nand(mtd);
283 return readw(chip->IO_ADDR_R);
287 * nand_select_chip - [DEFAULT] control CE line
288 * @mtd: MTD device structure
289 * @chipnr: chipnumber to select, -1 for deselect
291 * Default select function for 1 chip devices.
293 static void nand_select_chip(struct mtd_info *mtd, int chipnr)
295 struct nand_chip *chip = mtd_to_nand(mtd);
299 chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
310 * nand_write_byte - [DEFAULT] write single byte to chip
311 * @mtd: MTD device structure
312 * @byte: value to write
314 * Default function to write a byte to I/O[7:0]
316 static void nand_write_byte(struct mtd_info *mtd, uint8_t byte)
318 struct nand_chip *chip = mtd_to_nand(mtd);
320 chip->write_buf(mtd, &byte, 1);
324 * nand_write_byte16 - [DEFAULT] write single byte to a chip with width 16
325 * @mtd: MTD device structure
326 * @byte: value to write
328 * Default function to write a byte to I/O[7:0] on a 16-bit wide chip.
330 static void nand_write_byte16(struct mtd_info *mtd, uint8_t byte)
332 struct nand_chip *chip = mtd_to_nand(mtd);
333 uint16_t word = byte;
336 * It's not entirely clear what should happen to I/O[15:8] when writing
337 * a byte. The ONFi spec (Revision 3.1; 2012-09-19, Section 2.16) reads:
339 * When the host supports a 16-bit bus width, only data is
340 * transferred at the 16-bit width. All address and command line
341 * transfers shall use only the lower 8-bits of the data bus. During
342 * command transfers, the host may place any value on the upper
343 * 8-bits of the data bus. During address transfers, the host shall
344 * set the upper 8-bits of the data bus to 00h.
346 * One user of the write_byte callback is nand_onfi_set_features. The
347 * four parameters are specified to be written to I/O[7:0], but this is
348 * neither an address nor a command transfer. Let's assume a 0 on the
349 * upper I/O lines is OK.
351 chip->write_buf(mtd, (uint8_t *)&word, 2);
355 * nand_write_buf - [DEFAULT] write buffer to chip
356 * @mtd: MTD device structure
358 * @len: number of bytes to write
360 * Default write function for 8bit buswidth.
362 static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
364 struct nand_chip *chip = mtd_to_nand(mtd);
366 iowrite8_rep(chip->IO_ADDR_W, buf, len);
370 * nand_read_buf - [DEFAULT] read chip data into buffer
371 * @mtd: MTD device structure
372 * @buf: buffer to store date
373 * @len: number of bytes to read
375 * Default read function for 8bit buswidth.
377 static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
379 struct nand_chip *chip = mtd_to_nand(mtd);
381 ioread8_rep(chip->IO_ADDR_R, buf, len);
385 * nand_write_buf16 - [DEFAULT] write buffer to chip
386 * @mtd: MTD device structure
388 * @len: number of bytes to write
390 * Default write function for 16bit buswidth.
392 static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
394 struct nand_chip *chip = mtd_to_nand(mtd);
395 u16 *p = (u16 *) buf;
397 iowrite16_rep(chip->IO_ADDR_W, p, len >> 1);
401 * nand_read_buf16 - [DEFAULT] read chip data into buffer
402 * @mtd: MTD device structure
403 * @buf: buffer to store date
404 * @len: number of bytes to read
406 * Default read function for 16bit buswidth.
408 static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
410 struct nand_chip *chip = mtd_to_nand(mtd);
411 u16 *p = (u16 *) buf;
413 ioread16_rep(chip->IO_ADDR_R, p, len >> 1);
417 * nand_block_bad - [DEFAULT] Read bad block marker from the chip
418 * @mtd: MTD device structure
419 * @ofs: offset from device start
421 * Check, if the block is bad.
423 static int nand_block_bad(struct mtd_info *mtd, loff_t ofs)
425 int page, page_end, res;
426 struct nand_chip *chip = mtd_to_nand(mtd);
429 if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
430 ofs += mtd->erasesize - mtd->writesize;
432 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
433 page_end = page + (chip->bbt_options & NAND_BBT_SCAN2NDPAGE ? 2 : 1);
435 for (; page < page_end; page++) {
436 res = chip->ecc.read_oob(mtd, chip, page);
440 bad = chip->oob_poi[chip->badblockpos];
442 if (likely(chip->badblockbits == 8))
445 res = hweight8(bad) < chip->badblockbits;
454 * nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker
455 * @mtd: MTD device structure
456 * @ofs: offset from device start
458 * This is the default implementation, which can be overridden by a hardware
459 * specific driver. It provides the details for writing a bad block marker to a
462 static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
464 struct nand_chip *chip = mtd_to_nand(mtd);
465 struct mtd_oob_ops ops;
466 uint8_t buf[2] = { 0, 0 };
467 int ret = 0, res, i = 0;
469 memset(&ops, 0, sizeof(ops));
471 ops.ooboffs = chip->badblockpos;
472 if (chip->options & NAND_BUSWIDTH_16) {
473 ops.ooboffs &= ~0x01;
474 ops.len = ops.ooblen = 2;
476 ops.len = ops.ooblen = 1;
478 ops.mode = MTD_OPS_PLACE_OOB;
480 /* Write to first/last page(s) if necessary */
481 if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
482 ofs += mtd->erasesize - mtd->writesize;
484 res = nand_do_write_oob(mtd, ofs, &ops);
489 ofs += mtd->writesize;
490 } while ((chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && i < 2);
496 * nand_block_markbad_lowlevel - mark a block bad
497 * @mtd: MTD device structure
498 * @ofs: offset from device start
500 * This function performs the generic NAND bad block marking steps (i.e., bad
501 * block table(s) and/or marker(s)). We only allow the hardware driver to
502 * specify how to write bad block markers to OOB (chip->block_markbad).
504 * We try operations in the following order:
506 * (1) erase the affected block, to allow OOB marker to be written cleanly
507 * (2) write bad block marker to OOB area of affected block (unless flag
508 * NAND_BBT_NO_OOB_BBM is present)
511 * Note that we retain the first error encountered in (2) or (3), finish the
512 * procedures, and dump the error in the end.
514 static int nand_block_markbad_lowlevel(struct mtd_info *mtd, loff_t ofs)
516 struct nand_chip *chip = mtd_to_nand(mtd);
519 if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
520 struct erase_info einfo;
522 /* Attempt erase before marking OOB */
523 memset(&einfo, 0, sizeof(einfo));
526 einfo.len = 1ULL << chip->phys_erase_shift;
527 nand_erase_nand(mtd, &einfo, 0);
529 /* Write bad block marker to OOB */
530 nand_get_device(mtd, FL_WRITING);
531 ret = chip->block_markbad(mtd, ofs);
532 nand_release_device(mtd);
535 /* Mark block bad in BBT */
537 res = nand_markbad_bbt(mtd, ofs);
543 mtd->ecc_stats.badblocks++;
549 * nand_check_wp - [GENERIC] check if the chip is write protected
550 * @mtd: MTD device structure
552 * Check, if the device is write protected. The function expects, that the
553 * device is already selected.
555 static int nand_check_wp(struct mtd_info *mtd)
557 struct nand_chip *chip = mtd_to_nand(mtd);
559 /* Broken xD cards report WP despite being writable */
560 if (chip->options & NAND_BROKEN_XD)
563 /* Check the WP bit */
564 chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
565 return (chip->read_byte(mtd) & NAND_STATUS_WP) ? 0 : 1;
569 * nand_block_isreserved - [GENERIC] Check if a block is marked reserved.
570 * @mtd: MTD device structure
571 * @ofs: offset from device start
573 * Check if the block is marked as reserved.
575 static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
577 struct nand_chip *chip = mtd_to_nand(mtd);
581 /* Return info from the table */
582 return nand_isreserved_bbt(mtd, ofs);
586 * nand_block_checkbad - [GENERIC] Check if a block is marked bad
587 * @mtd: MTD device structure
588 * @ofs: offset from device start
589 * @allowbbt: 1, if its allowed to access the bbt area
591 * Check, if the block is bad. Either by reading the bad block table or
592 * calling of the scan function.
594 static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int allowbbt)
596 struct nand_chip *chip = mtd_to_nand(mtd);
599 return chip->block_bad(mtd, ofs);
601 /* Return info from the table */
602 return nand_isbad_bbt(mtd, ofs, allowbbt);
606 * panic_nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
607 * @mtd: MTD device structure
610 * Helper function for nand_wait_ready used when needing to wait in interrupt
613 static void panic_nand_wait_ready(struct mtd_info *mtd, unsigned long timeo)
615 struct nand_chip *chip = mtd_to_nand(mtd);
618 /* Wait for the device to get ready */
619 for (i = 0; i < timeo; i++) {
620 if (chip->dev_ready(mtd))
622 touch_softlockup_watchdog();
628 * nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
629 * @mtd: MTD device structure
631 * Wait for the ready pin after a command, and warn if a timeout occurs.
633 void nand_wait_ready(struct mtd_info *mtd)
635 struct nand_chip *chip = mtd_to_nand(mtd);
636 unsigned long timeo = 400;
638 if (in_interrupt() || oops_in_progress)
639 return panic_nand_wait_ready(mtd, timeo);
641 /* Wait until command is processed or timeout occurs */
642 timeo = jiffies + msecs_to_jiffies(timeo);
644 if (chip->dev_ready(mtd))
647 } while (time_before(jiffies, timeo));
649 if (!chip->dev_ready(mtd))
650 pr_warn_ratelimited("timeout while waiting for chip to become ready\n");
652 EXPORT_SYMBOL_GPL(nand_wait_ready);
655 * nand_wait_status_ready - [GENERIC] Wait for the ready status after commands.
656 * @mtd: MTD device structure
657 * @timeo: Timeout in ms
659 * Wait for status ready (i.e. command done) or timeout.
661 static void nand_wait_status_ready(struct mtd_info *mtd, unsigned long timeo)
663 register struct nand_chip *chip = mtd_to_nand(mtd);
665 timeo = jiffies + msecs_to_jiffies(timeo);
667 if ((chip->read_byte(mtd) & NAND_STATUS_READY))
669 touch_softlockup_watchdog();
670 } while (time_before(jiffies, timeo));
674 * nand_command - [DEFAULT] Send command to NAND device
675 * @mtd: MTD device structure
676 * @command: the command to be sent
677 * @column: the column address for this command, -1 if none
678 * @page_addr: the page address for this command, -1 if none
680 * Send command to NAND device. This function is used for small page devices
681 * (512 Bytes per page).
683 static void nand_command(struct mtd_info *mtd, unsigned int command,
684 int column, int page_addr)
686 register struct nand_chip *chip = mtd_to_nand(mtd);
687 int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE;
689 /* Write out the command to the device */
690 if (command == NAND_CMD_SEQIN) {
693 if (column >= mtd->writesize) {
695 column -= mtd->writesize;
696 readcmd = NAND_CMD_READOOB;
697 } else if (column < 256) {
698 /* First 256 bytes --> READ0 */
699 readcmd = NAND_CMD_READ0;
702 readcmd = NAND_CMD_READ1;
704 chip->cmd_ctrl(mtd, readcmd, ctrl);
705 ctrl &= ~NAND_CTRL_CHANGE;
707 chip->cmd_ctrl(mtd, command, ctrl);
709 /* Address cycle, when necessary */
710 ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
711 /* Serially input address */
713 /* Adjust columns for 16 bit buswidth */
714 if (chip->options & NAND_BUSWIDTH_16 &&
715 !nand_opcode_8bits(command))
717 chip->cmd_ctrl(mtd, column, ctrl);
718 ctrl &= ~NAND_CTRL_CHANGE;
720 if (page_addr != -1) {
721 chip->cmd_ctrl(mtd, page_addr, ctrl);
722 ctrl &= ~NAND_CTRL_CHANGE;
723 chip->cmd_ctrl(mtd, page_addr >> 8, ctrl);
724 /* One more address cycle for devices > 32MiB */
725 if (chip->chipsize > (32 << 20))
726 chip->cmd_ctrl(mtd, page_addr >> 16, ctrl);
728 chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
731 * Program and erase have their own busy handlers status and sequential
736 case NAND_CMD_PAGEPROG:
737 case NAND_CMD_ERASE1:
738 case NAND_CMD_ERASE2:
740 case NAND_CMD_STATUS:
741 case NAND_CMD_READID:
742 case NAND_CMD_SET_FEATURES:
748 udelay(chip->chip_delay);
749 chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
750 NAND_CTRL_CLE | NAND_CTRL_CHANGE);
752 NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
753 /* EZ-NAND can take upto 250ms as per ONFi v4.0 */
754 nand_wait_status_ready(mtd, 250);
757 /* This applies to read commands */
760 * READ0 is sometimes used to exit GET STATUS mode. When this
761 * is the case no address cycles are requested, and we can use
762 * this information to detect that we should not wait for the
763 * device to be ready.
765 if (column == -1 && page_addr == -1)
770 * If we don't have access to the busy pin, we apply the given
773 if (!chip->dev_ready) {
774 udelay(chip->chip_delay);
779 * Apply this short delay always to ensure that we do wait tWB in
780 * any case on any machine.
784 nand_wait_ready(mtd);
787 static void nand_ccs_delay(struct nand_chip *chip)
790 * The controller already takes care of waiting for tCCS when the RNDIN
791 * or RNDOUT command is sent, return directly.
793 if (!(chip->options & NAND_WAIT_TCCS))
797 * Wait tCCS_min if it is correctly defined, otherwise wait 500ns
798 * (which should be safe for all NANDs).
800 if (chip->data_interface && chip->data_interface->timings.sdr.tCCS_min)
801 ndelay(chip->data_interface->timings.sdr.tCCS_min / 1000);
807 * nand_command_lp - [DEFAULT] Send command to NAND large page device
808 * @mtd: MTD device structure
809 * @command: the command to be sent
810 * @column: the column address for this command, -1 if none
811 * @page_addr: the page address for this command, -1 if none
813 * Send command to NAND device. This is the version for the new large page
814 * devices. We don't have the separate regions as we have in the small page
815 * devices. We must emulate NAND_CMD_READOOB to keep the code compatible.
817 static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
818 int column, int page_addr)
820 register struct nand_chip *chip = mtd_to_nand(mtd);
822 /* Emulate NAND_CMD_READOOB */
823 if (command == NAND_CMD_READOOB) {
824 column += mtd->writesize;
825 command = NAND_CMD_READ0;
828 /* Command latch cycle */
829 chip->cmd_ctrl(mtd, command, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
831 if (column != -1 || page_addr != -1) {
832 int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
834 /* Serially input address */
836 /* Adjust columns for 16 bit buswidth */
837 if (chip->options & NAND_BUSWIDTH_16 &&
838 !nand_opcode_8bits(command))
840 chip->cmd_ctrl(mtd, column, ctrl);
841 ctrl &= ~NAND_CTRL_CHANGE;
843 /* Only output a single addr cycle for 8bits opcodes. */
844 if (!nand_opcode_8bits(command))
845 chip->cmd_ctrl(mtd, column >> 8, ctrl);
847 if (page_addr != -1) {
848 chip->cmd_ctrl(mtd, page_addr, ctrl);
849 chip->cmd_ctrl(mtd, page_addr >> 8,
850 NAND_NCE | NAND_ALE);
851 /* One more address cycle for devices > 128MiB */
852 if (chip->chipsize > (128 << 20))
853 chip->cmd_ctrl(mtd, page_addr >> 16,
854 NAND_NCE | NAND_ALE);
857 chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
860 * Program and erase have their own busy handlers status, sequential
861 * in and status need no delay.
865 case NAND_CMD_CACHEDPROG:
866 case NAND_CMD_PAGEPROG:
867 case NAND_CMD_ERASE1:
868 case NAND_CMD_ERASE2:
870 case NAND_CMD_STATUS:
871 case NAND_CMD_READID:
872 case NAND_CMD_SET_FEATURES:
876 nand_ccs_delay(chip);
882 udelay(chip->chip_delay);
883 chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
884 NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
885 chip->cmd_ctrl(mtd, NAND_CMD_NONE,
886 NAND_NCE | NAND_CTRL_CHANGE);
887 /* EZ-NAND can take upto 250ms as per ONFi v4.0 */
888 nand_wait_status_ready(mtd, 250);
891 case NAND_CMD_RNDOUT:
892 /* No ready / busy check necessary */
893 chip->cmd_ctrl(mtd, NAND_CMD_RNDOUTSTART,
894 NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
895 chip->cmd_ctrl(mtd, NAND_CMD_NONE,
896 NAND_NCE | NAND_CTRL_CHANGE);
898 nand_ccs_delay(chip);
903 * READ0 is sometimes used to exit GET STATUS mode. When this
904 * is the case no address cycles are requested, and we can use
905 * this information to detect that READSTART should not be
908 if (column == -1 && page_addr == -1)
911 chip->cmd_ctrl(mtd, NAND_CMD_READSTART,
912 NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
913 chip->cmd_ctrl(mtd, NAND_CMD_NONE,
914 NAND_NCE | NAND_CTRL_CHANGE);
916 /* This applies to read commands */
919 * If we don't have access to the busy pin, we apply the given
922 if (!chip->dev_ready) {
923 udelay(chip->chip_delay);
929 * Apply this short delay always to ensure that we do wait tWB in
930 * any case on any machine.
934 nand_wait_ready(mtd);
938 * panic_nand_get_device - [GENERIC] Get chip for selected access
939 * @chip: the nand chip descriptor
940 * @mtd: MTD device structure
941 * @new_state: the state which is requested
943 * Used when in panic, no locks are taken.
945 static void panic_nand_get_device(struct nand_chip *chip,
946 struct mtd_info *mtd, int new_state)
948 /* Hardware controller shared among independent devices */
949 chip->controller->active = chip;
950 chip->state = new_state;
954 * nand_get_device - [GENERIC] Get chip for selected access
955 * @mtd: MTD device structure
956 * @new_state: the state which is requested
958 * Get the device and lock it for exclusive access
961 nand_get_device(struct mtd_info *mtd, int new_state)
963 struct nand_chip *chip = mtd_to_nand(mtd);
964 spinlock_t *lock = &chip->controller->lock;
965 wait_queue_head_t *wq = &chip->controller->wq;
966 DECLARE_WAITQUEUE(wait, current);
970 /* Hardware controller shared among independent devices */
971 if (!chip->controller->active)
972 chip->controller->active = chip;
974 if (chip->controller->active == chip && chip->state == FL_READY) {
975 chip->state = new_state;
979 if (new_state == FL_PM_SUSPENDED) {
980 if (chip->controller->active->state == FL_PM_SUSPENDED) {
981 chip->state = FL_PM_SUSPENDED;
986 set_current_state(TASK_UNINTERRUPTIBLE);
987 add_wait_queue(wq, &wait);
990 remove_wait_queue(wq, &wait);
995 * panic_nand_wait - [GENERIC] wait until the command is done
996 * @mtd: MTD device structure
997 * @chip: NAND chip structure
1000 * Wait for command done. This is a helper function for nand_wait used when
1001 * we are in interrupt context. May happen when in panic and trying to write
1002 * an oops through mtdoops.
1004 static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
1005 unsigned long timeo)
1008 for (i = 0; i < timeo; i++) {
1009 if (chip->dev_ready) {
1010 if (chip->dev_ready(mtd))
1013 if (chip->read_byte(mtd) & NAND_STATUS_READY)
1021 * nand_wait - [DEFAULT] wait until the command is done
1022 * @mtd: MTD device structure
1023 * @chip: NAND chip structure
1025 * Wait for command done. This applies to erase and program only.
1027 static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
1031 unsigned long timeo = 400;
1034 * Apply this short delay always to ensure that we do wait tWB in any
1035 * case on any machine.
1039 chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
1041 if (in_interrupt() || oops_in_progress)
1042 panic_nand_wait(mtd, chip, timeo);
1044 timeo = jiffies + msecs_to_jiffies(timeo);
1046 if (chip->dev_ready) {
1047 if (chip->dev_ready(mtd))
1050 if (chip->read_byte(mtd) & NAND_STATUS_READY)
1054 } while (time_before(jiffies, timeo));
1057 status = (int)chip->read_byte(mtd);
1058 /* This can happen if in case of timeout or buggy dev_ready */
1059 WARN_ON(!(status & NAND_STATUS_READY));
1064 * nand_reset_data_interface - Reset data interface and timings
1065 * @chip: The NAND chip
1066 * @chipnr: Internal die id
1068 * Reset the Data interface and timings to ONFI mode 0.
1070 * Returns 0 for success or negative error code otherwise.
1072 static int nand_reset_data_interface(struct nand_chip *chip, int chipnr)
1074 struct mtd_info *mtd = nand_to_mtd(chip);
1075 const struct nand_data_interface *conf;
1078 if (!chip->setup_data_interface)
1082 * The ONFI specification says:
1084 * To transition from NV-DDR or NV-DDR2 to the SDR data
1085 * interface, the host shall use the Reset (FFh) command
1086 * using SDR timing mode 0. A device in any timing mode is
1087 * required to recognize Reset (FFh) command issued in SDR
1091 * Configure the data interface in SDR mode and set the
1092 * timings to timing mode 0.
1095 conf = nand_get_default_data_interface();
1096 ret = chip->setup_data_interface(mtd, chipnr, conf);
1098 pr_err("Failed to configure data interface to SDR timing mode 0\n");
1104 * nand_setup_data_interface - Setup the best data interface and timings
1105 * @chip: The NAND chip
1106 * @chipnr: Internal die id
1108 * Find and configure the best data interface and NAND timings supported by
1109 * the chip and the driver.
1110 * First tries to retrieve supported timing modes from ONFI information,
1111 * and if the NAND chip does not support ONFI, relies on the
1112 * ->onfi_timing_mode_default specified in the nand_ids table.
1114 * Returns 0 for success or negative error code otherwise.
1116 static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
1118 struct mtd_info *mtd = nand_to_mtd(chip);
1121 if (!chip->setup_data_interface || !chip->data_interface)
1125 * Ensure the timing mode has been changed on the chip side
1126 * before changing timings on the controller side.
1128 if (chip->onfi_version) {
1129 u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = {
1130 chip->onfi_timing_mode_default,
1133 ret = chip->onfi_set_features(mtd, chip,
1134 ONFI_FEATURE_ADDR_TIMING_MODE,
1140 ret = chip->setup_data_interface(mtd, chipnr, chip->data_interface);
1146 * nand_init_data_interface - find the best data interface and timings
1147 * @chip: The NAND chip
1149 * Find the best data interface and NAND timings supported by the chip
1151 * First tries to retrieve supported timing modes from ONFI information,
1152 * and if the NAND chip does not support ONFI, relies on the
1153 * ->onfi_timing_mode_default specified in the nand_ids table. After this
1154 * function nand_chip->data_interface is initialized with the best timing mode
1157 * Returns 0 for success or negative error code otherwise.
1159 static int nand_init_data_interface(struct nand_chip *chip)
1161 struct mtd_info *mtd = nand_to_mtd(chip);
1162 int modes, mode, ret;
1164 if (!chip->setup_data_interface)
1168 * First try to identify the best timings from ONFI parameters and
1169 * if the NAND does not support ONFI, fallback to the default ONFI
1172 modes = onfi_get_async_timing_mode(chip);
1173 if (modes == ONFI_TIMING_MODE_UNKNOWN) {
1174 if (!chip->onfi_timing_mode_default)
1177 modes = GENMASK(chip->onfi_timing_mode_default, 0);
1180 chip->data_interface = kzalloc(sizeof(*chip->data_interface),
1182 if (!chip->data_interface)
1185 for (mode = fls(modes) - 1; mode >= 0; mode--) {
1186 ret = onfi_init_data_interface(chip, chip->data_interface,
1187 NAND_SDR_IFACE, mode);
1191 /* Pass -1 to only */
1192 ret = chip->setup_data_interface(mtd,
1193 NAND_DATA_IFACE_CHECK_ONLY,
1194 chip->data_interface);
1196 chip->onfi_timing_mode_default = mode;
1204 static void nand_release_data_interface(struct nand_chip *chip)
1206 kfree(chip->data_interface);
1210 * nand_reset - Reset and initialize a NAND device
1211 * @chip: The NAND chip
1212 * @chipnr: Internal die id
1214 * Returns 0 for success or negative error code otherwise
1216 int nand_reset(struct nand_chip *chip, int chipnr)
1218 struct mtd_info *mtd = nand_to_mtd(chip);
1221 ret = nand_reset_data_interface(chip, chipnr);
1226 * The CS line has to be released before we can apply the new NAND
1227 * interface settings, hence this weird ->select_chip() dance.
1229 chip->select_chip(mtd, chipnr);
1230 chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
1231 chip->select_chip(mtd, -1);
1233 chip->select_chip(mtd, chipnr);
1234 ret = nand_setup_data_interface(chip, chipnr);
1235 chip->select_chip(mtd, -1);
1243 * __nand_unlock - [REPLACEABLE] unlocks specified locked blocks
1245 * @ofs: offset to start unlock from
1246 * @len: length to unlock
1248 * - when = 0, unlock the range of blocks within the lower and
1249 * upper boundary address
1250 * - when = 1, unlock the range of blocks outside the boundaries
1251 * of the lower and upper boundary address
1253 * Returs unlock status.
1255 static int __nand_unlock(struct mtd_info *mtd, loff_t ofs,
1256 uint64_t len, int invert)
1260 struct nand_chip *chip = mtd_to_nand(mtd);
1262 /* Submit address of first page to unlock */
1263 page = ofs >> chip->page_shift;
1264 chip->cmdfunc(mtd, NAND_CMD_UNLOCK1, -1, page & chip->pagemask);
1266 /* Submit address of last page to unlock */
1267 page = (ofs + len) >> chip->page_shift;
1268 chip->cmdfunc(mtd, NAND_CMD_UNLOCK2, -1,
1269 (page | invert) & chip->pagemask);
1271 /* Call wait ready function */
1272 status = chip->waitfunc(mtd, chip);
1273 /* See if device thinks it succeeded */
1274 if (status & NAND_STATUS_FAIL) {
1275 pr_debug("%s: error status = 0x%08x\n",
1284 * nand_unlock - [REPLACEABLE] unlocks specified locked blocks
1286 * @ofs: offset to start unlock from
1287 * @len: length to unlock
1289 * Returns unlock status.
1291 int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1295 struct nand_chip *chip = mtd_to_nand(mtd);
1297 pr_debug("%s: start = 0x%012llx, len = %llu\n",
1298 __func__, (unsigned long long)ofs, len);
1300 if (check_offs_len(mtd, ofs, len))
1303 /* Align to last block address if size addresses end of the device */
1304 if (ofs + len == mtd->size)
1305 len -= mtd->erasesize;
1307 nand_get_device(mtd, FL_UNLOCKING);
1309 /* Shift to get chip number */
1310 chipnr = ofs >> chip->chip_shift;
1314 * If we want to check the WP through READ STATUS and check the bit 7
1315 * we must reset the chip
1316 * some operation can also clear the bit 7 of status register
1317 * eg. erase/program a locked block
1319 nand_reset(chip, chipnr);
1321 chip->select_chip(mtd, chipnr);
1323 /* Check, if it is write protected */
1324 if (nand_check_wp(mtd)) {
1325 pr_debug("%s: device is write protected!\n",
1331 ret = __nand_unlock(mtd, ofs, len, 0);
1334 chip->select_chip(mtd, -1);
1335 nand_release_device(mtd);
1339 EXPORT_SYMBOL(nand_unlock);
1342 * nand_lock - [REPLACEABLE] locks all blocks present in the device
1344 * @ofs: offset to start unlock from
1345 * @len: length to unlock
1347 * This feature is not supported in many NAND parts. 'Micron' NAND parts do
1348 * have this feature, but it allows only to lock all blocks, not for specified
1349 * range for block. Implementing 'lock' feature by making use of 'unlock', for
1352 * Returns lock status.
1354 int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1357 int chipnr, status, page;
1358 struct nand_chip *chip = mtd_to_nand(mtd);
1360 pr_debug("%s: start = 0x%012llx, len = %llu\n",
1361 __func__, (unsigned long long)ofs, len);
1363 if (check_offs_len(mtd, ofs, len))
1366 nand_get_device(mtd, FL_LOCKING);
1368 /* Shift to get chip number */
1369 chipnr = ofs >> chip->chip_shift;
1373 * If we want to check the WP through READ STATUS and check the bit 7
1374 * we must reset the chip
1375 * some operation can also clear the bit 7 of status register
1376 * eg. erase/program a locked block
1378 nand_reset(chip, chipnr);
1380 chip->select_chip(mtd, chipnr);
1382 /* Check, if it is write protected */
1383 if (nand_check_wp(mtd)) {
1384 pr_debug("%s: device is write protected!\n",
1386 status = MTD_ERASE_FAILED;
1391 /* Submit address of first page to lock */
1392 page = ofs >> chip->page_shift;
1393 chip->cmdfunc(mtd, NAND_CMD_LOCK, -1, page & chip->pagemask);
1395 /* Call wait ready function */
1396 status = chip->waitfunc(mtd, chip);
1397 /* See if device thinks it succeeded */
1398 if (status & NAND_STATUS_FAIL) {
1399 pr_debug("%s: error status = 0x%08x\n",
1405 ret = __nand_unlock(mtd, ofs, len, 0x1);
1408 chip->select_chip(mtd, -1);
1409 nand_release_device(mtd);
1413 EXPORT_SYMBOL(nand_lock);
1416 * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
1417 * @buf: buffer to test
1418 * @len: buffer length
1419 * @bitflips_threshold: maximum number of bitflips
1421 * Check if a buffer contains only 0xff, which means the underlying region
1422 * has been erased and is ready to be programmed.
1423 * The bitflips_threshold specify the maximum number of bitflips before
1424 * considering the region is not erased.
1425 * Note: The logic of this function has been extracted from the memweight
1426 * implementation, except that nand_check_erased_buf function exit before
1427 * testing the whole buffer if the number of bitflips exceed the
1428 * bitflips_threshold value.
1430 * Returns a positive number of bitflips less than or equal to
1431 * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
1434 static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
1436 const unsigned char *bitmap = buf;
1440 for (; len && ((uintptr_t)bitmap) % sizeof(long);
1442 weight = hweight8(*bitmap);
1443 bitflips += BITS_PER_BYTE - weight;
1444 if (unlikely(bitflips > bitflips_threshold))
1448 for (; len >= sizeof(long);
1449 len -= sizeof(long), bitmap += sizeof(long)) {
1450 unsigned long d = *((unsigned long *)bitmap);
1453 weight = hweight_long(d);
1454 bitflips += BITS_PER_LONG - weight;
1455 if (unlikely(bitflips > bitflips_threshold))
1459 for (; len > 0; len--, bitmap++) {
1460 weight = hweight8(*bitmap);
1461 bitflips += BITS_PER_BYTE - weight;
1462 if (unlikely(bitflips > bitflips_threshold))
1470 * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only
1472 * @data: data buffer to test
1473 * @datalen: data length
1475 * @ecclen: ECC length
1476 * @extraoob: extra OOB buffer
1477 * @extraooblen: extra OOB length
1478 * @bitflips_threshold: maximum number of bitflips
1480 * Check if a data buffer and its associated ECC and OOB data contains only
1481 * 0xff pattern, which means the underlying region has been erased and is
1482 * ready to be programmed.
1483 * The bitflips_threshold specify the maximum number of bitflips before
1484 * considering the region as not erased.
1487 * 1/ ECC algorithms are working on pre-defined block sizes which are usually
1488 * different from the NAND page size. When fixing bitflips, ECC engines will
1489 * report the number of errors per chunk, and the NAND core infrastructure
1490 * expect you to return the maximum number of bitflips for the whole page.
1491 * This is why you should always use this function on a single chunk and
1492 * not on the whole page. After checking each chunk you should update your
1493 * max_bitflips value accordingly.
1494 * 2/ When checking for bitflips in erased pages you should not only check
1495 * the payload data but also their associated ECC data, because a user might
1496 * have programmed almost all bits to 1 but a few. In this case, we
1497 * shouldn't consider the chunk as erased, and checking ECC bytes prevent
1499 * 3/ The extraoob argument is optional, and should be used if some of your OOB
1500 * data are protected by the ECC engine.
1501 * It could also be used if you support subpages and want to attach some
1502 * extra OOB data to an ECC chunk.
1504 * Returns a positive number of bitflips less than or equal to
1505 * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
1506 * threshold. In case of success, the passed buffers are filled with 0xff.
1508 int nand_check_erased_ecc_chunk(void *data, int datalen,
1509 void *ecc, int ecclen,
1510 void *extraoob, int extraooblen,
1511 int bitflips_threshold)
1513 int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;
1515 data_bitflips = nand_check_erased_buf(data, datalen,
1516 bitflips_threshold);
1517 if (data_bitflips < 0)
1518 return data_bitflips;
1520 bitflips_threshold -= data_bitflips;
1522 ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
1523 if (ecc_bitflips < 0)
1524 return ecc_bitflips;
1526 bitflips_threshold -= ecc_bitflips;
1528 extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
1529 bitflips_threshold);
1530 if (extraoob_bitflips < 0)
1531 return extraoob_bitflips;
1534 memset(data, 0xff, datalen);
1537 memset(ecc, 0xff, ecclen);
1539 if (extraoob_bitflips)
1540 memset(extraoob, 0xff, extraooblen);
1542 return data_bitflips + ecc_bitflips + extraoob_bitflips;
1544 EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
1547 * nand_read_page_raw - [INTERN] read raw page data without ecc
1548 * @mtd: mtd info structure
1549 * @chip: nand chip info structure
1550 * @buf: buffer to store read data
1551 * @oob_required: caller requires OOB data read to chip->oob_poi
1552 * @page: page number to read
1554 * Not for syndrome calculating ECC controllers, which use a special oob layout.
1556 int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1557 uint8_t *buf, int oob_required, int page)
1559 chip->read_buf(mtd, buf, mtd->writesize);
1561 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1564 EXPORT_SYMBOL(nand_read_page_raw);
1567 * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
1568 * @mtd: mtd info structure
1569 * @chip: nand chip info structure
1570 * @buf: buffer to store read data
1571 * @oob_required: caller requires OOB data read to chip->oob_poi
1572 * @page: page number to read
1574 * We need a special oob layout and handling even when OOB isn't used.
1576 static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
1577 struct nand_chip *chip, uint8_t *buf,
1578 int oob_required, int page)
1580 int eccsize = chip->ecc.size;
1581 int eccbytes = chip->ecc.bytes;
1582 uint8_t *oob = chip->oob_poi;
1585 for (steps = chip->ecc.steps; steps > 0; steps--) {
1586 chip->read_buf(mtd, buf, eccsize);
1589 if (chip->ecc.prepad) {
1590 chip->read_buf(mtd, oob, chip->ecc.prepad);
1591 oob += chip->ecc.prepad;
1594 chip->read_buf(mtd, oob, eccbytes);
1597 if (chip->ecc.postpad) {
1598 chip->read_buf(mtd, oob, chip->ecc.postpad);
1599 oob += chip->ecc.postpad;
1603 size = mtd->oobsize - (oob - chip->oob_poi);
1605 chip->read_buf(mtd, oob, size);
1611 * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
1612 * @mtd: mtd info structure
1613 * @chip: nand chip info structure
1614 * @buf: buffer to store read data
1615 * @oob_required: caller requires OOB data read to chip->oob_poi
1616 * @page: page number to read
1618 static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
1619 uint8_t *buf, int oob_required, int page)
1621 int i, eccsize = chip->ecc.size, ret;
1622 int eccbytes = chip->ecc.bytes;
1623 int eccsteps = chip->ecc.steps;
1625 uint8_t *ecc_calc = chip->buffers->ecccalc;
1626 uint8_t *ecc_code = chip->buffers->ecccode;
1627 unsigned int max_bitflips = 0;
1629 chip->ecc.read_page_raw(mtd, chip, buf, 1, page);
1631 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
1632 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
1634 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
1639 eccsteps = chip->ecc.steps;
1642 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1645 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
1647 mtd->ecc_stats.failed++;
1649 mtd->ecc_stats.corrected += stat;
1650 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1653 return max_bitflips;
1657 * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function
1658 * @mtd: mtd info structure
1659 * @chip: nand chip info structure
1660 * @data_offs: offset of requested data within the page
1661 * @readlen: data length
1662 * @bufpoi: buffer to store read data
1663 * @page: page number to read
1665 static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1666 uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi,
1669 int start_step, end_step, num_steps, ret;
1671 int data_col_addr, i, gaps = 0;
1672 int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
1673 int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
1674 int index, section = 0;
1675 unsigned int max_bitflips = 0;
1676 struct mtd_oob_region oobregion = { };
1678 /* Column address within the page aligned to ECC size (256bytes) */
1679 start_step = data_offs / chip->ecc.size;
1680 end_step = (data_offs + readlen - 1) / chip->ecc.size;
1681 num_steps = end_step - start_step + 1;
1682 index = start_step * chip->ecc.bytes;
1684 /* Data size aligned to ECC ecc.size */
1685 datafrag_len = num_steps * chip->ecc.size;
1686 eccfrag_len = num_steps * chip->ecc.bytes;
1688 data_col_addr = start_step * chip->ecc.size;
1689 /* If we read not a page aligned data */
1690 if (data_col_addr != 0)
1691 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, data_col_addr, -1);
1693 p = bufpoi + data_col_addr;
1694 chip->read_buf(mtd, p, datafrag_len);
1697 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
1698 chip->ecc.calculate(mtd, p, &chip->buffers->ecccalc[i]);
1701 * The performance is faster if we position offsets according to
1702 * ecc.pos. Let's make sure that there are no gaps in ECC positions.
1704 ret = mtd_ooblayout_find_eccregion(mtd, index, §ion, &oobregion);
1708 if (oobregion.length < eccfrag_len)
1712 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
1713 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1716 * Send the command to read the particular ECC bytes take care
1717 * about buswidth alignment in read_buf.
1719 aligned_pos = oobregion.offset & ~(busw - 1);
1720 aligned_len = eccfrag_len;
1721 if (oobregion.offset & (busw - 1))
1723 if ((oobregion.offset + (num_steps * chip->ecc.bytes)) &
1727 chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
1728 mtd->writesize + aligned_pos, -1);
1729 chip->read_buf(mtd, &chip->oob_poi[aligned_pos], aligned_len);
1732 ret = mtd_ooblayout_get_eccbytes(mtd, chip->buffers->ecccode,
1733 chip->oob_poi, index, eccfrag_len);
1737 p = bufpoi + data_col_addr;
1738 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
1741 stat = chip->ecc.correct(mtd, p,
1742 &chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]);
1743 if (stat == -EBADMSG &&
1744 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
1745 /* check for empty pages with bitflips */
1746 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
1747 &chip->buffers->ecccode[i],
1750 chip->ecc.strength);
1754 mtd->ecc_stats.failed++;
1756 mtd->ecc_stats.corrected += stat;
1757 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1760 return max_bitflips;
1764 * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
1765 * @mtd: mtd info structure
1766 * @chip: nand chip info structure
1767 * @buf: buffer to store read data
1768 * @oob_required: caller requires OOB data read to chip->oob_poi
1769 * @page: page number to read
1771 * Not for syndrome calculating ECC controllers which need a special oob layout.
1773 static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
1774 uint8_t *buf, int oob_required, int page)
1776 int i, eccsize = chip->ecc.size, ret;
1777 int eccbytes = chip->ecc.bytes;
1778 int eccsteps = chip->ecc.steps;
1780 uint8_t *ecc_calc = chip->buffers->ecccalc;
1781 uint8_t *ecc_code = chip->buffers->ecccode;
1782 unsigned int max_bitflips = 0;
1784 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1785 chip->ecc.hwctl(mtd, NAND_ECC_READ);
1786 chip->read_buf(mtd, p, eccsize);
1787 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
1789 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1791 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
1796 eccsteps = chip->ecc.steps;
1799 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1802 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
1803 if (stat == -EBADMSG &&
1804 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
1805 /* check for empty pages with bitflips */
1806 stat = nand_check_erased_ecc_chunk(p, eccsize,
1807 &ecc_code[i], eccbytes,
1809 chip->ecc.strength);
1813 mtd->ecc_stats.failed++;
1815 mtd->ecc_stats.corrected += stat;
1816 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1819 return max_bitflips;
1823 * nand_read_page_hwecc_oob_first - [REPLACEABLE] hw ecc, read oob first
1824 * @mtd: mtd info structure
1825 * @chip: nand chip info structure
1826 * @buf: buffer to store read data
1827 * @oob_required: caller requires OOB data read to chip->oob_poi
1828 * @page: page number to read
1830 * Hardware ECC for large page chips, require OOB to be read first. For this
1831 * ECC mode, the write_page method is re-used from ECC_HW. These methods
1832 * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with
1833 * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from
1834 * the data area, by overwriting the NAND manufacturer bad block markings.
1836 static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
1837 struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
1839 int i, eccsize = chip->ecc.size, ret;
1840 int eccbytes = chip->ecc.bytes;
1841 int eccsteps = chip->ecc.steps;
1843 uint8_t *ecc_code = chip->buffers->ecccode;
1844 uint8_t *ecc_calc = chip->buffers->ecccalc;
1845 unsigned int max_bitflips = 0;
1847 /* Read the OOB area first */
1848 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
1849 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1850 chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
1852 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
1857 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1860 chip->ecc.hwctl(mtd, NAND_ECC_READ);
1861 chip->read_buf(mtd, p, eccsize);
1862 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
1864 stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
1865 if (stat == -EBADMSG &&
1866 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
1867 /* check for empty pages with bitflips */
1868 stat = nand_check_erased_ecc_chunk(p, eccsize,
1869 &ecc_code[i], eccbytes,
1871 chip->ecc.strength);
1875 mtd->ecc_stats.failed++;
1877 mtd->ecc_stats.corrected += stat;
1878 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1881 return max_bitflips;
1885 * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
1886 * @mtd: mtd info structure
1887 * @chip: nand chip info structure
1888 * @buf: buffer to store read data
1889 * @oob_required: caller requires OOB data read to chip->oob_poi
1890 * @page: page number to read
1892 * The hw generator calculates the error syndrome automatically. Therefore we
1893 * need a special oob layout and handling.
1895 static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
1896 uint8_t *buf, int oob_required, int page)
1898 int i, eccsize = chip->ecc.size;
1899 int eccbytes = chip->ecc.bytes;
1900 int eccsteps = chip->ecc.steps;
1901 int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
1903 uint8_t *oob = chip->oob_poi;
1904 unsigned int max_bitflips = 0;
1906 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1909 chip->ecc.hwctl(mtd, NAND_ECC_READ);
1910 chip->read_buf(mtd, p, eccsize);
1912 if (chip->ecc.prepad) {
1913 chip->read_buf(mtd, oob, chip->ecc.prepad);
1914 oob += chip->ecc.prepad;
1917 chip->ecc.hwctl(mtd, NAND_ECC_READSYN);
1918 chip->read_buf(mtd, oob, eccbytes);
1919 stat = chip->ecc.correct(mtd, p, oob, NULL);
1923 if (chip->ecc.postpad) {
1924 chip->read_buf(mtd, oob, chip->ecc.postpad);
1925 oob += chip->ecc.postpad;
1928 if (stat == -EBADMSG &&
1929 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
1930 /* check for empty pages with bitflips */
1931 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
1935 chip->ecc.strength);
1939 mtd->ecc_stats.failed++;
1941 mtd->ecc_stats.corrected += stat;
1942 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1946 /* Calculate remaining oob bytes */
1947 i = mtd->oobsize - (oob - chip->oob_poi);
1949 chip->read_buf(mtd, oob, i);
1951 return max_bitflips;
1955 * nand_transfer_oob - [INTERN] Transfer oob to client buffer
1956 * @mtd: mtd info structure
1957 * @oob: oob destination address
1958 * @ops: oob ops structure
1959 * @len: size of oob to transfer
1961 static uint8_t *nand_transfer_oob(struct mtd_info *mtd, uint8_t *oob,
1962 struct mtd_oob_ops *ops, size_t len)
1964 struct nand_chip *chip = mtd_to_nand(mtd);
1967 switch (ops->mode) {
1969 case MTD_OPS_PLACE_OOB:
1971 memcpy(oob, chip->oob_poi + ops->ooboffs, len);
1974 case MTD_OPS_AUTO_OOB:
1975 ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi,
1987 * nand_setup_read_retry - [INTERN] Set the READ RETRY mode
1988 * @mtd: MTD device structure
1989 * @retry_mode: the retry mode to use
1991 * Some vendors supply a special command to shift the Vt threshold, to be used
1992 * when there are too many bitflips in a page (i.e., ECC error). After setting
1993 * a new threshold, the host should retry reading the page.
1995 static int nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
1997 struct nand_chip *chip = mtd_to_nand(mtd);
1999 pr_debug("setting READ RETRY mode %d\n", retry_mode);
2001 if (retry_mode >= chip->read_retries)
2004 if (!chip->setup_read_retry)
2007 return chip->setup_read_retry(mtd, retry_mode);
2011 * nand_do_read_ops - [INTERN] Read data with ECC
2012 * @mtd: MTD device structure
2013 * @from: offset to read from
2014 * @ops: oob ops structure
2016 * Internal function. Called with chip held.
2018 static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
2019 struct mtd_oob_ops *ops)
2021 int chipnr, page, realpage, col, bytes, aligned, oob_required;
2022 struct nand_chip *chip = mtd_to_nand(mtd);
2024 uint32_t readlen = ops->len;
2025 uint32_t oobreadlen = ops->ooblen;
2026 uint32_t max_oobsize = mtd_oobavail(mtd, ops);
2028 uint8_t *bufpoi, *oob, *buf;
2030 unsigned int max_bitflips = 0;
2032 bool ecc_fail = false;
2034 chipnr = (int)(from >> chip->chip_shift);
2035 chip->select_chip(mtd, chipnr);
2037 realpage = (int)(from >> chip->page_shift);
2038 page = realpage & chip->pagemask;
2040 col = (int)(from & (mtd->writesize - 1));
2044 oob_required = oob ? 1 : 0;
2047 unsigned int ecc_failures = mtd->ecc_stats.failed;
2049 bytes = min(mtd->writesize - col, readlen);
2050 aligned = (bytes == mtd->writesize);
2054 else if (chip->options & NAND_USE_BOUNCE_BUFFER)
2055 use_bufpoi = !virt_addr_valid(buf) ||
2056 !IS_ALIGNED((unsigned long)buf,
2061 /* Is the current page in the buffer? */
2062 if (realpage != chip->pagebuf || oob) {
2063 bufpoi = use_bufpoi ? chip->buffers->databuf : buf;
2065 if (use_bufpoi && aligned)
2066 pr_debug("%s: using read bounce buffer for buf@%p\n",
2070 if (nand_standard_page_accessors(&chip->ecc))
2071 chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
2074 * Now read the page into the buffer. Absent an error,
2075 * the read methods return max bitflips per ecc step.
2077 if (unlikely(ops->mode == MTD_OPS_RAW))
2078 ret = chip->ecc.read_page_raw(mtd, chip, bufpoi,
2081 else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
2083 ret = chip->ecc.read_subpage(mtd, chip,
2087 ret = chip->ecc.read_page(mtd, chip, bufpoi,
2088 oob_required, page);
2091 /* Invalidate page cache */
2096 /* Transfer not aligned data */
2098 if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
2099 !(mtd->ecc_stats.failed - ecc_failures) &&
2100 (ops->mode != MTD_OPS_RAW)) {
2101 chip->pagebuf = realpage;
2102 chip->pagebuf_bitflips = ret;
2104 /* Invalidate page cache */
2107 memcpy(buf, chip->buffers->databuf + col, bytes);
2110 if (unlikely(oob)) {
2111 int toread = min(oobreadlen, max_oobsize);
2114 oob = nand_transfer_oob(mtd,
2116 oobreadlen -= toread;
2120 if (chip->options & NAND_NEED_READRDY) {
2121 /* Apply delay or wait for ready/busy pin */
2122 if (!chip->dev_ready)
2123 udelay(chip->chip_delay);
2125 nand_wait_ready(mtd);
2128 if (mtd->ecc_stats.failed - ecc_failures) {
2129 if (retry_mode + 1 < chip->read_retries) {
2131 ret = nand_setup_read_retry(mtd,
2136 /* Reset failures; retry */
2137 mtd->ecc_stats.failed = ecc_failures;
2140 /* No more retry modes; real failure */
2146 max_bitflips = max_t(unsigned int, max_bitflips, ret);
2148 memcpy(buf, chip->buffers->databuf + col, bytes);
2150 max_bitflips = max_t(unsigned int, max_bitflips,
2151 chip->pagebuf_bitflips);
2156 /* Reset to retry mode 0 */
2158 ret = nand_setup_read_retry(mtd, 0);
2167 /* For subsequent reads align to page boundary */
2169 /* Increment page address */
2172 page = realpage & chip->pagemask;
2173 /* Check, if we cross a chip boundary */
2176 chip->select_chip(mtd, -1);
2177 chip->select_chip(mtd, chipnr);
2180 chip->select_chip(mtd, -1);
2182 ops->retlen = ops->len - (size_t) readlen;
2184 ops->oobretlen = ops->ooblen - oobreadlen;
2192 return max_bitflips;
2196 * nand_read - [MTD Interface] MTD compatibility function for nand_do_read_ecc
2197 * @mtd: MTD device structure
2198 * @from: offset to read from
2199 * @len: number of bytes to read
2200 * @retlen: pointer to variable to store the number of read bytes
2201 * @buf: the databuffer to put data
2203 * Get hold of the chip and call nand_do_read.
2205 static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
2206 size_t *retlen, uint8_t *buf)
2208 struct mtd_oob_ops ops;
2211 nand_get_device(mtd, FL_READING);
2212 memset(&ops, 0, sizeof(ops));
2215 ops.mode = MTD_OPS_PLACE_OOB;
2216 ret = nand_do_read_ops(mtd, from, &ops);
2217 *retlen = ops.retlen;
2218 nand_release_device(mtd);
2223 * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
2224 * @mtd: mtd info structure
2225 * @chip: nand chip info structure
2226 * @page: page number to read
2228 int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
2230 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
2231 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
2234 EXPORT_SYMBOL(nand_read_oob_std);
2237 * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
2239 * @mtd: mtd info structure
2240 * @chip: nand chip info structure
2241 * @page: page number to read
2243 int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
2246 int length = mtd->oobsize;
2247 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
2248 int eccsize = chip->ecc.size;
2249 uint8_t *bufpoi = chip->oob_poi;
2250 int i, toread, sndrnd = 0, pos;
2252 chip->cmdfunc(mtd, NAND_CMD_READ0, chip->ecc.size, page);
2253 for (i = 0; i < chip->ecc.steps; i++) {
2255 pos = eccsize + i * (eccsize + chunk);
2256 if (mtd->writesize > 512)
2257 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, pos, -1);
2259 chip->cmdfunc(mtd, NAND_CMD_READ0, pos, page);
2262 toread = min_t(int, length, chunk);
2263 chip->read_buf(mtd, bufpoi, toread);
2268 chip->read_buf(mtd, bufpoi, length);
2272 EXPORT_SYMBOL(nand_read_oob_syndrome);
2275 * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
2276 * @mtd: mtd info structure
2277 * @chip: nand chip info structure
2278 * @page: page number to write
2280 int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
2283 const uint8_t *buf = chip->oob_poi;
2284 int length = mtd->oobsize;
2286 chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
2287 chip->write_buf(mtd, buf, length);
2288 /* Send command to program the OOB data */
2289 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
2291 status = chip->waitfunc(mtd, chip);
2293 return status & NAND_STATUS_FAIL ? -EIO : 0;
2295 EXPORT_SYMBOL(nand_write_oob_std);
2298 * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
2299 * with syndrome - only for large page flash
2300 * @mtd: mtd info structure
2301 * @chip: nand chip info structure
2302 * @page: page number to write
2304 int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
2307 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
2308 int eccsize = chip->ecc.size, length = mtd->oobsize;
2309 int i, len, pos, status = 0, sndcmd = 0, steps = chip->ecc.steps;
2310 const uint8_t *bufpoi = chip->oob_poi;
2313 * data-ecc-data-ecc ... ecc-oob
2315 * data-pad-ecc-pad-data-pad .... ecc-pad-oob
2317 if (!chip->ecc.prepad && !chip->ecc.postpad) {
2318 pos = steps * (eccsize + chunk);
2323 chip->cmdfunc(mtd, NAND_CMD_SEQIN, pos, page);
2324 for (i = 0; i < steps; i++) {
2326 if (mtd->writesize <= 512) {
2327 uint32_t fill = 0xFFFFFFFF;
2331 int num = min_t(int, len, 4);
2332 chip->write_buf(mtd, (uint8_t *)&fill,
2337 pos = eccsize + i * (eccsize + chunk);
2338 chip->cmdfunc(mtd, NAND_CMD_RNDIN, pos, -1);
2342 len = min_t(int, length, chunk);
2343 chip->write_buf(mtd, bufpoi, len);
2348 chip->write_buf(mtd, bufpoi, length);
2350 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
2351 status = chip->waitfunc(mtd, chip);
2353 return status & NAND_STATUS_FAIL ? -EIO : 0;
2355 EXPORT_SYMBOL(nand_write_oob_syndrome);
2358 * nand_do_read_oob - [INTERN] NAND read out-of-band
2359 * @mtd: MTD device structure
2360 * @from: offset to read from
2361 * @ops: oob operations description structure
2363 * NAND read out-of-band data from the spare area.
2365 static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
2366 struct mtd_oob_ops *ops)
2368 int page, realpage, chipnr;
2369 struct nand_chip *chip = mtd_to_nand(mtd);
2370 struct mtd_ecc_stats stats;
2371 int readlen = ops->ooblen;
2373 uint8_t *buf = ops->oobbuf;
2376 pr_debug("%s: from = 0x%08Lx, len = %i\n",
2377 __func__, (unsigned long long)from, readlen);
2379 stats = mtd->ecc_stats;
2381 len = mtd_oobavail(mtd, ops);
2383 if (unlikely(ops->ooboffs >= len)) {
2384 pr_debug("%s: attempt to start read outside oob\n",
2389 /* Do not allow reads past end of device */
2390 if (unlikely(from >= mtd->size ||
2391 ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) -
2392 (from >> chip->page_shift)) * len)) {
2393 pr_debug("%s: attempt to read beyond end of device\n",
2398 chipnr = (int)(from >> chip->chip_shift);
2399 chip->select_chip(mtd, chipnr);
2401 /* Shift to get page */
2402 realpage = (int)(from >> chip->page_shift);
2403 page = realpage & chip->pagemask;
2406 if (ops->mode == MTD_OPS_RAW)
2407 ret = chip->ecc.read_oob_raw(mtd, chip, page);
2409 ret = chip->ecc.read_oob(mtd, chip, page);
2414 len = min(len, readlen);
2415 buf = nand_transfer_oob(mtd, buf, ops, len);
2417 if (chip->options & NAND_NEED_READRDY) {
2418 /* Apply delay or wait for ready/busy pin */
2419 if (!chip->dev_ready)
2420 udelay(chip->chip_delay);
2422 nand_wait_ready(mtd);
2429 /* Increment page address */
2432 page = realpage & chip->pagemask;
2433 /* Check, if we cross a chip boundary */
2436 chip->select_chip(mtd, -1);
2437 chip->select_chip(mtd, chipnr);
2440 chip->select_chip(mtd, -1);
2442 ops->oobretlen = ops->ooblen - readlen;
2447 if (mtd->ecc_stats.failed - stats.failed)
2450 return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
2454 * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
2455 * @mtd: MTD device structure
2456 * @from: offset to read from
2457 * @ops: oob operation description structure
2459 * NAND read data and/or out-of-band data.
2461 static int nand_read_oob(struct mtd_info *mtd, loff_t from,
2462 struct mtd_oob_ops *ops)
2468 /* Do not allow reads past end of device */
2469 if (ops->datbuf && (from + ops->len) > mtd->size) {
2470 pr_debug("%s: attempt to read beyond end of device\n",
2475 if (ops->mode != MTD_OPS_PLACE_OOB &&
2476 ops->mode != MTD_OPS_AUTO_OOB &&
2477 ops->mode != MTD_OPS_RAW)
2480 nand_get_device(mtd, FL_READING);
2483 ret = nand_do_read_oob(mtd, from, ops);
2485 ret = nand_do_read_ops(mtd, from, ops);
2487 nand_release_device(mtd);
2493 * nand_write_page_raw - [INTERN] raw page write function
2494 * @mtd: mtd info structure
2495 * @chip: nand chip info structure
2497 * @oob_required: must write chip->oob_poi to OOB
2498 * @page: page number to write
2500 * Not for syndrome calculating ECC controllers, which use a special oob layout.
2502 int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
2503 const uint8_t *buf, int oob_required, int page)
2505 chip->write_buf(mtd, buf, mtd->writesize);
2507 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
2511 EXPORT_SYMBOL(nand_write_page_raw);
2514 * nand_write_page_raw_syndrome - [INTERN] raw page write function
2515 * @mtd: mtd info structure
2516 * @chip: nand chip info structure
2518 * @oob_required: must write chip->oob_poi to OOB
2519 * @page: page number to write
2521 * We need a special oob layout and handling even when ECC isn't checked.
2523 static int nand_write_page_raw_syndrome(struct mtd_info *mtd,
2524 struct nand_chip *chip,
2525 const uint8_t *buf, int oob_required,
2528 int eccsize = chip->ecc.size;
2529 int eccbytes = chip->ecc.bytes;
2530 uint8_t *oob = chip->oob_poi;
2533 for (steps = chip->ecc.steps; steps > 0; steps--) {
2534 chip->write_buf(mtd, buf, eccsize);
2537 if (chip->ecc.prepad) {
2538 chip->write_buf(mtd, oob, chip->ecc.prepad);
2539 oob += chip->ecc.prepad;
2542 chip->write_buf(mtd, oob, eccbytes);
2545 if (chip->ecc.postpad) {
2546 chip->write_buf(mtd, oob, chip->ecc.postpad);
2547 oob += chip->ecc.postpad;
2551 size = mtd->oobsize - (oob - chip->oob_poi);
2553 chip->write_buf(mtd, oob, size);
2558 * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
2559 * @mtd: mtd info structure
2560 * @chip: nand chip info structure
2562 * @oob_required: must write chip->oob_poi to OOB
2563 * @page: page number to write
2565 static int nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
2566 const uint8_t *buf, int oob_required,
2569 int i, eccsize = chip->ecc.size, ret;
2570 int eccbytes = chip->ecc.bytes;
2571 int eccsteps = chip->ecc.steps;
2572 uint8_t *ecc_calc = chip->buffers->ecccalc;
2573 const uint8_t *p = buf;
2575 /* Software ECC calculation */
2576 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
2577 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
2579 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
2584 return chip->ecc.write_page_raw(mtd, chip, buf, 1, page);
2588 * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
2589 * @mtd: mtd info structure
2590 * @chip: nand chip info structure
2592 * @oob_required: must write chip->oob_poi to OOB
2593 * @page: page number to write
2595 static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
2596 const uint8_t *buf, int oob_required,
2599 int i, eccsize = chip->ecc.size, ret;
2600 int eccbytes = chip->ecc.bytes;
2601 int eccsteps = chip->ecc.steps;
2602 uint8_t *ecc_calc = chip->buffers->ecccalc;
2603 const uint8_t *p = buf;
2605 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2606 chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
2607 chip->write_buf(mtd, p, eccsize);
2608 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
2611 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
2616 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
2623 * nand_write_subpage_hwecc - [REPLACEABLE] hardware ECC based subpage write
2624 * @mtd: mtd info structure
2625 * @chip: nand chip info structure
2626 * @offset: column address of subpage within the page
2627 * @data_len: data length
2629 * @oob_required: must write chip->oob_poi to OOB
2630 * @page: page number to write
2632 static int nand_write_subpage_hwecc(struct mtd_info *mtd,
2633 struct nand_chip *chip, uint32_t offset,
2634 uint32_t data_len, const uint8_t *buf,
2635 int oob_required, int page)
2637 uint8_t *oob_buf = chip->oob_poi;
2638 uint8_t *ecc_calc = chip->buffers->ecccalc;
2639 int ecc_size = chip->ecc.size;
2640 int ecc_bytes = chip->ecc.bytes;
2641 int ecc_steps = chip->ecc.steps;
2642 uint32_t start_step = offset / ecc_size;
2643 uint32_t end_step = (offset + data_len - 1) / ecc_size;
2644 int oob_bytes = mtd->oobsize / ecc_steps;
2647 for (step = 0; step < ecc_steps; step++) {
2648 /* configure controller for WRITE access */
2649 chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
2651 /* write data (untouched subpages already masked by 0xFF) */
2652 chip->write_buf(mtd, buf, ecc_size);
2654 /* mask ECC of un-touched subpages by padding 0xFF */
2655 if ((step < start_step) || (step > end_step))
2656 memset(ecc_calc, 0xff, ecc_bytes);
2658 chip->ecc.calculate(mtd, buf, ecc_calc);
2660 /* mask OOB of un-touched subpages by padding 0xFF */
2661 /* if oob_required, preserve OOB metadata of written subpage */
2662 if (!oob_required || (step < start_step) || (step > end_step))
2663 memset(oob_buf, 0xff, oob_bytes);
2666 ecc_calc += ecc_bytes;
2667 oob_buf += oob_bytes;
2670 /* copy calculated ECC for whole page to chip->buffer->oob */
2671 /* this include masked-value(0xFF) for unwritten subpages */
2672 ecc_calc = chip->buffers->ecccalc;
2673 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
2678 /* write OOB buffer to NAND device */
2679 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
2686 * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
2687 * @mtd: mtd info structure
2688 * @chip: nand chip info structure
2690 * @oob_required: must write chip->oob_poi to OOB
2691 * @page: page number to write
2693 * The hw generator calculates the error syndrome automatically. Therefore we
2694 * need a special oob layout and handling.
2696 static int nand_write_page_syndrome(struct mtd_info *mtd,
2697 struct nand_chip *chip,
2698 const uint8_t *buf, int oob_required,
2701 int i, eccsize = chip->ecc.size;
2702 int eccbytes = chip->ecc.bytes;
2703 int eccsteps = chip->ecc.steps;
2704 const uint8_t *p = buf;
2705 uint8_t *oob = chip->oob_poi;
2707 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2709 chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
2710 chip->write_buf(mtd, p, eccsize);
2712 if (chip->ecc.prepad) {
2713 chip->write_buf(mtd, oob, chip->ecc.prepad);
2714 oob += chip->ecc.prepad;
2717 chip->ecc.calculate(mtd, p, oob);
2718 chip->write_buf(mtd, oob, eccbytes);
2721 if (chip->ecc.postpad) {
2722 chip->write_buf(mtd, oob, chip->ecc.postpad);
2723 oob += chip->ecc.postpad;
2727 /* Calculate remaining oob bytes */
2728 i = mtd->oobsize - (oob - chip->oob_poi);
2730 chip->write_buf(mtd, oob, i);
2736 * nand_write_page - write one page
2737 * @mtd: MTD device structure
2738 * @chip: NAND chip descriptor
2739 * @offset: address offset within the page
2740 * @data_len: length of actual data to be written
2741 * @buf: the data to write
2742 * @oob_required: must write chip->oob_poi to OOB
2743 * @page: page number to write
2744 * @cached: cached programming
2745 * @raw: use _raw version of write_page
2747 static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
2748 uint32_t offset, int data_len, const uint8_t *buf,
2749 int oob_required, int page, int raw)
2751 int status, subpage;
2753 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
2754 chip->ecc.write_subpage)
2755 subpage = offset || (data_len < mtd->writesize);
2759 if (nand_standard_page_accessors(&chip->ecc))
2760 chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
2763 status = chip->ecc.write_page_raw(mtd, chip, buf,
2764 oob_required, page);
2766 status = chip->ecc.write_subpage(mtd, chip, offset, data_len,
2767 buf, oob_required, page);
2769 status = chip->ecc.write_page(mtd, chip, buf, oob_required,
2775 if (nand_standard_page_accessors(&chip->ecc)) {
2776 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
2778 status = chip->waitfunc(mtd, chip);
2779 if (status & NAND_STATUS_FAIL)
2787 * nand_fill_oob - [INTERN] Transfer client buffer to oob
2788 * @mtd: MTD device structure
2789 * @oob: oob data buffer
2790 * @len: oob data write length
2791 * @ops: oob ops structure
2793 static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len,
2794 struct mtd_oob_ops *ops)
2796 struct nand_chip *chip = mtd_to_nand(mtd);
2800 * Initialise to all 0xFF, to avoid the possibility of left over OOB
2801 * data from a previous OOB read.
2803 memset(chip->oob_poi, 0xff, mtd->oobsize);
2805 switch (ops->mode) {
2807 case MTD_OPS_PLACE_OOB:
2809 memcpy(chip->oob_poi + ops->ooboffs, oob, len);
2812 case MTD_OPS_AUTO_OOB:
2813 ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
2824 #define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0)
2827 * nand_do_write_ops - [INTERN] NAND write with ECC
2828 * @mtd: MTD device structure
2829 * @to: offset to write to
2830 * @ops: oob operations description structure
2832 * NAND write with ECC.
2834 static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
2835 struct mtd_oob_ops *ops)
2837 int chipnr, realpage, page, blockmask, column;
2838 struct nand_chip *chip = mtd_to_nand(mtd);
2839 uint32_t writelen = ops->len;
2841 uint32_t oobwritelen = ops->ooblen;
2842 uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
2844 uint8_t *oob = ops->oobbuf;
2845 uint8_t *buf = ops->datbuf;
2847 int oob_required = oob ? 1 : 0;
2853 /* Reject writes, which are not page aligned */
2854 if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
2855 pr_notice("%s: attempt to write non page aligned data\n",
2860 column = to & (mtd->writesize - 1);
2862 chipnr = (int)(to >> chip->chip_shift);
2863 chip->select_chip(mtd, chipnr);
2865 /* Check, if it is write protected */
2866 if (nand_check_wp(mtd)) {
2871 realpage = (int)(to >> chip->page_shift);
2872 page = realpage & chip->pagemask;
2873 blockmask = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
2875 /* Invalidate the page cache, when we write to the cached page */
2876 if (to <= ((loff_t)chip->pagebuf << chip->page_shift) &&
2877 ((loff_t)chip->pagebuf << chip->page_shift) < (to + ops->len))
2880 /* Don't allow multipage oob writes with offset */
2881 if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
2887 int bytes = mtd->writesize;
2888 uint8_t *wbuf = buf;
2890 int part_pagewr = (column || writelen < mtd->writesize);
2894 else if (chip->options & NAND_USE_BOUNCE_BUFFER)
2895 use_bufpoi = !virt_addr_valid(buf) ||
2896 !IS_ALIGNED((unsigned long)buf,
2901 /* Partial page write?, or need to use bounce buffer */
2903 pr_debug("%s: using write bounce buffer for buf@%p\n",
2906 bytes = min_t(int, bytes - column, writelen);
2908 memset(chip->buffers->databuf, 0xff, mtd->writesize);
2909 memcpy(&chip->buffers->databuf[column], buf, bytes);
2910 wbuf = chip->buffers->databuf;
2913 if (unlikely(oob)) {
2914 size_t len = min(oobwritelen, oobmaxlen);
2915 oob = nand_fill_oob(mtd, oob, len, ops);
2918 /* We still need to erase leftover OOB data */
2919 memset(chip->oob_poi, 0xff, mtd->oobsize);
2922 ret = nand_write_page(mtd, chip, column, bytes, wbuf,
2924 (ops->mode == MTD_OPS_RAW));
2936 page = realpage & chip->pagemask;
2937 /* Check, if we cross a chip boundary */
2940 chip->select_chip(mtd, -1);
2941 chip->select_chip(mtd, chipnr);
2945 ops->retlen = ops->len - writelen;
2947 ops->oobretlen = ops->ooblen;
2950 chip->select_chip(mtd, -1);
2955 * panic_nand_write - [MTD Interface] NAND write with ECC
2956 * @mtd: MTD device structure
2957 * @to: offset to write to
2958 * @len: number of bytes to write
2959 * @retlen: pointer to variable to store the number of written bytes
2960 * @buf: the data to write
2962 * NAND write with ECC. Used when performing writes in interrupt context, this
2963 * may for example be called by mtdoops when writing an oops while in panic.
2965 static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
2966 size_t *retlen, const uint8_t *buf)
2968 struct nand_chip *chip = mtd_to_nand(mtd);
2969 struct mtd_oob_ops ops;
2972 /* Wait for the device to get ready */
2973 panic_nand_wait(mtd, chip, 400);
2975 /* Grab the device */
2976 panic_nand_get_device(chip, mtd, FL_WRITING);
2978 memset(&ops, 0, sizeof(ops));
2980 ops.datbuf = (uint8_t *)buf;
2981 ops.mode = MTD_OPS_PLACE_OOB;
2983 ret = nand_do_write_ops(mtd, to, &ops);
2985 *retlen = ops.retlen;
2990 * nand_write - [MTD Interface] NAND write with ECC
2991 * @mtd: MTD device structure
2992 * @to: offset to write to
2993 * @len: number of bytes to write
2994 * @retlen: pointer to variable to store the number of written bytes
2995 * @buf: the data to write
2997 * NAND write with ECC.
2999 static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
3000 size_t *retlen, const uint8_t *buf)
3002 struct mtd_oob_ops ops;
3005 nand_get_device(mtd, FL_WRITING);
3006 memset(&ops, 0, sizeof(ops));
3008 ops.datbuf = (uint8_t *)buf;
3009 ops.mode = MTD_OPS_PLACE_OOB;
3010 ret = nand_do_write_ops(mtd, to, &ops);
3011 *retlen = ops.retlen;
3012 nand_release_device(mtd);
3017 * nand_do_write_oob - [MTD Interface] NAND write out-of-band
3018 * @mtd: MTD device structure
3019 * @to: offset to write to
3020 * @ops: oob operation description structure
3022 * NAND write out-of-band.
3024 static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
3025 struct mtd_oob_ops *ops)
3027 int chipnr, page, status, len;
3028 struct nand_chip *chip = mtd_to_nand(mtd);
3030 pr_debug("%s: to = 0x%08x, len = %i\n",
3031 __func__, (unsigned int)to, (int)ops->ooblen);
3033 len = mtd_oobavail(mtd, ops);
3035 /* Do not allow write past end of page */
3036 if ((ops->ooboffs + ops->ooblen) > len) {
3037 pr_debug("%s: attempt to write past end of page\n",
3042 if (unlikely(ops->ooboffs >= len)) {
3043 pr_debug("%s: attempt to start write outside oob\n",
3048 /* Do not allow write past end of device */
3049 if (unlikely(to >= mtd->size ||
3050 ops->ooboffs + ops->ooblen >
3051 ((mtd->size >> chip->page_shift) -
3052 (to >> chip->page_shift)) * len)) {
3053 pr_debug("%s: attempt to write beyond end of device\n",
3058 chipnr = (int)(to >> chip->chip_shift);
3061 * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
3062 * of my DiskOnChip 2000 test units) will clear the whole data page too
3063 * if we don't do this. I have no clue why, but I seem to have 'fixed'
3064 * it in the doc2000 driver in August 1999. dwmw2.
3066 nand_reset(chip, chipnr);
3068 chip->select_chip(mtd, chipnr);
3070 /* Shift to get page */
3071 page = (int)(to >> chip->page_shift);
3073 /* Check, if it is write protected */
3074 if (nand_check_wp(mtd)) {
3075 chip->select_chip(mtd, -1);
3079 /* Invalidate the page cache, if we write to the cached page */
3080 if (page == chip->pagebuf)
3083 nand_fill_oob(mtd, ops->oobbuf, ops->ooblen, ops);
3085 if (ops->mode == MTD_OPS_RAW)
3086 status = chip->ecc.write_oob_raw(mtd, chip, page & chip->pagemask);
3088 status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
3090 chip->select_chip(mtd, -1);
3095 ops->oobretlen = ops->ooblen;
3101 * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
3102 * @mtd: MTD device structure
3103 * @to: offset to write to
3104 * @ops: oob operation description structure
3106 static int nand_write_oob(struct mtd_info *mtd, loff_t to,
3107 struct mtd_oob_ops *ops)
3109 int ret = -ENOTSUPP;
3113 /* Do not allow writes past end of device */
3114 if (ops->datbuf && (to + ops->len) > mtd->size) {
3115 pr_debug("%s: attempt to write beyond end of device\n",
3120 nand_get_device(mtd, FL_WRITING);
3122 switch (ops->mode) {
3123 case MTD_OPS_PLACE_OOB:
3124 case MTD_OPS_AUTO_OOB:
3133 ret = nand_do_write_oob(mtd, to, ops);
3135 ret = nand_do_write_ops(mtd, to, ops);
3138 nand_release_device(mtd);
3143 * single_erase - [GENERIC] NAND standard block erase command function
3144 * @mtd: MTD device structure
3145 * @page: the page address of the block which will be erased
3147 * Standard erase command for NAND chips. Returns NAND status.
3149 static int single_erase(struct mtd_info *mtd, int page)
3151 struct nand_chip *chip = mtd_to_nand(mtd);
3152 /* Send commands to erase a block */
3153 chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
3154 chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
3156 return chip->waitfunc(mtd, chip);
3160 * nand_erase - [MTD Interface] erase block(s)
3161 * @mtd: MTD device structure
3162 * @instr: erase instruction
3164 * Erase one ore more blocks.
3166 static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
3168 return nand_erase_nand(mtd, instr, 0);
3172 * nand_erase_nand - [INTERN] erase block(s)
3173 * @mtd: MTD device structure
3174 * @instr: erase instruction
3175 * @allowbbt: allow erasing the bbt area
3177 * Erase one ore more blocks.
3179 int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
3182 int page, status, pages_per_block, ret, chipnr;
3183 struct nand_chip *chip = mtd_to_nand(mtd);
3186 pr_debug("%s: start = 0x%012llx, len = %llu\n",
3187 __func__, (unsigned long long)instr->addr,
3188 (unsigned long long)instr->len);
3190 if (check_offs_len(mtd, instr->addr, instr->len))
3193 /* Grab the lock and see if the device is available */
3194 nand_get_device(mtd, FL_ERASING);
3196 /* Shift to get first page */
3197 page = (int)(instr->addr >> chip->page_shift);
3198 chipnr = (int)(instr->addr >> chip->chip_shift);
3200 /* Calculate pages in each block */
3201 pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
3203 /* Select the NAND device */
3204 chip->select_chip(mtd, chipnr);
3206 /* Check, if it is write protected */
3207 if (nand_check_wp(mtd)) {
3208 pr_debug("%s: device is write protected!\n",
3210 instr->state = MTD_ERASE_FAILED;
3214 /* Loop through the pages */
3217 instr->state = MTD_ERASING;
3220 /* Check if we have a bad block, we do not erase bad blocks! */
3221 if (nand_block_checkbad(mtd, ((loff_t) page) <<
3222 chip->page_shift, allowbbt)) {
3223 pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
3225 instr->state = MTD_ERASE_FAILED;
3230 * Invalidate the page cache, if we erase the block which
3231 * contains the current cached page.
3233 if (page <= chip->pagebuf && chip->pagebuf <
3234 (page + pages_per_block))
3237 status = chip->erase(mtd, page & chip->pagemask);
3239 /* See if block erase succeeded */
3240 if (status & NAND_STATUS_FAIL) {
3241 pr_debug("%s: failed erase, page 0x%08x\n",
3243 instr->state = MTD_ERASE_FAILED;
3245 ((loff_t)page << chip->page_shift);
3249 /* Increment page address and decrement length */
3250 len -= (1ULL << chip->phys_erase_shift);
3251 page += pages_per_block;
3253 /* Check, if we cross a chip boundary */
3254 if (len && !(page & chip->pagemask)) {
3256 chip->select_chip(mtd, -1);
3257 chip->select_chip(mtd, chipnr);
3260 instr->state = MTD_ERASE_DONE;
3264 ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
3266 /* Deselect and wake up anyone waiting on the device */
3267 chip->select_chip(mtd, -1);
3268 nand_release_device(mtd);
3270 /* Do call back function */
3272 mtd_erase_callback(instr);
3274 /* Return more or less happy */
3279 * nand_sync - [MTD Interface] sync
3280 * @mtd: MTD device structure
3282 * Sync is actually a wait for chip ready function.
3284 static void nand_sync(struct mtd_info *mtd)
3286 pr_debug("%s: called\n", __func__);
3288 /* Grab the lock and see if the device is available */
3289 nand_get_device(mtd, FL_SYNCING);
3290 /* Release it and go back */
3291 nand_release_device(mtd);
3295 * nand_block_isbad - [MTD Interface] Check if block at offset is bad
3296 * @mtd: MTD device structure
3297 * @offs: offset relative to mtd start
3299 static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
3301 struct nand_chip *chip = mtd_to_nand(mtd);
3302 int chipnr = (int)(offs >> chip->chip_shift);
3305 /* Select the NAND device */
3306 nand_get_device(mtd, FL_READING);
3307 chip->select_chip(mtd, chipnr);
3309 ret = nand_block_checkbad(mtd, offs, 0);
3311 chip->select_chip(mtd, -1);
3312 nand_release_device(mtd);
3318 * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
3319 * @mtd: MTD device structure
3320 * @ofs: offset relative to mtd start
3322 static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
3326 ret = nand_block_isbad(mtd, ofs);
3328 /* If it was bad already, return success and do nothing */
3334 return nand_block_markbad_lowlevel(mtd, ofs);
3338 * nand_max_bad_blocks - [MTD Interface] Max number of bad blocks for an mtd
3339 * @mtd: MTD device structure
3340 * @ofs: offset relative to mtd start
3341 * @len: length of mtd
3343 static int nand_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len)
3345 struct nand_chip *chip = mtd_to_nand(mtd);
3346 u32 part_start_block;
3352 * max_bb_per_die and blocks_per_die used to determine
3353 * the maximum bad block count.
3355 if (!chip->max_bb_per_die || !chip->blocks_per_die)
3358 /* Get the start and end of the partition in erase blocks. */
3359 part_start_block = mtd_div_by_eb(ofs, mtd);
3360 part_end_block = mtd_div_by_eb(len, mtd) + part_start_block - 1;
3362 /* Get the start and end LUNs of the partition. */
3363 part_start_die = part_start_block / chip->blocks_per_die;
3364 part_end_die = part_end_block / chip->blocks_per_die;
3367 * Look up the bad blocks per unit and multiply by the number of units
3368 * that the partition spans.
3370 return chip->max_bb_per_die * (part_end_die - part_start_die + 1);
3374 * nand_onfi_set_features- [REPLACEABLE] set features for ONFI nand
3375 * @mtd: MTD device structure
3376 * @chip: nand chip info structure
3377 * @addr: feature address.
3378 * @subfeature_param: the subfeature parameters, a four bytes array.
3380 static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
3381 int addr, uint8_t *subfeature_param)
3386 if (!chip->onfi_version ||
3387 !(le16_to_cpu(chip->onfi_params.opt_cmd)
3388 & ONFI_OPT_CMD_SET_GET_FEATURES))
3391 chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, addr, -1);
3392 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
3393 chip->write_byte(mtd, subfeature_param[i]);
3395 status = chip->waitfunc(mtd, chip);
3396 if (status & NAND_STATUS_FAIL)
3402 * nand_onfi_get_features- [REPLACEABLE] get features for ONFI nand
3403 * @mtd: MTD device structure
3404 * @chip: nand chip info structure
3405 * @addr: feature address.
3406 * @subfeature_param: the subfeature parameters, a four bytes array.
3408 static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip,
3409 int addr, uint8_t *subfeature_param)
3413 if (!chip->onfi_version ||
3414 !(le16_to_cpu(chip->onfi_params.opt_cmd)
3415 & ONFI_OPT_CMD_SET_GET_FEATURES))
3418 chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, addr, -1);
3419 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
3420 *subfeature_param++ = chip->read_byte(mtd);
3425 * nand_onfi_get_set_features_notsupp - set/get features stub returning
3427 * @mtd: MTD device structure
3428 * @chip: nand chip info structure
3429 * @addr: feature address.
3430 * @subfeature_param: the subfeature parameters, a four bytes array.
3432 * Should be used by NAND controller drivers that do not support the SET/GET
3433 * FEATURES operations.
3435 int nand_onfi_get_set_features_notsupp(struct mtd_info *mtd,
3436 struct nand_chip *chip, int addr,
3437 u8 *subfeature_param)
3441 EXPORT_SYMBOL(nand_onfi_get_set_features_notsupp);
3444 * nand_suspend - [MTD Interface] Suspend the NAND flash
3445 * @mtd: MTD device structure
3447 static int nand_suspend(struct mtd_info *mtd)
3449 return nand_get_device(mtd, FL_PM_SUSPENDED);
3453 * nand_resume - [MTD Interface] Resume the NAND flash
3454 * @mtd: MTD device structure
3456 static void nand_resume(struct mtd_info *mtd)
3458 struct nand_chip *chip = mtd_to_nand(mtd);
3460 if (chip->state == FL_PM_SUSPENDED)
3461 nand_release_device(mtd);
3463 pr_err("%s called for a chip which is not in suspended state\n",
3468 * nand_shutdown - [MTD Interface] Finish the current NAND operation and
3469 * prevent further operations
3470 * @mtd: MTD device structure
3472 static void nand_shutdown(struct mtd_info *mtd)
3474 nand_get_device(mtd, FL_PM_SUSPENDED);
3477 /* Set default functions */
3478 static void nand_set_defaults(struct nand_chip *chip)
3480 unsigned int busw = chip->options & NAND_BUSWIDTH_16;
3482 /* check for proper chip_delay setup, set 20us if not */
3483 if (!chip->chip_delay)
3484 chip->chip_delay = 20;
3486 /* check, if a user supplied command function given */
3487 if (chip->cmdfunc == NULL)
3488 chip->cmdfunc = nand_command;
3490 /* check, if a user supplied wait function given */
3491 if (chip->waitfunc == NULL)
3492 chip->waitfunc = nand_wait;
3494 if (!chip->select_chip)
3495 chip->select_chip = nand_select_chip;
3497 /* set for ONFI nand */
3498 if (!chip->onfi_set_features)
3499 chip->onfi_set_features = nand_onfi_set_features;
3500 if (!chip->onfi_get_features)
3501 chip->onfi_get_features = nand_onfi_get_features;
3503 /* If called twice, pointers that depend on busw may need to be reset */
3504 if (!chip->read_byte || chip->read_byte == nand_read_byte)
3505 chip->read_byte = busw ? nand_read_byte16 : nand_read_byte;
3506 if (!chip->read_word)
3507 chip->read_word = nand_read_word;
3508 if (!chip->block_bad)
3509 chip->block_bad = nand_block_bad;
3510 if (!chip->block_markbad)
3511 chip->block_markbad = nand_default_block_markbad;
3512 if (!chip->write_buf || chip->write_buf == nand_write_buf)
3513 chip->write_buf = busw ? nand_write_buf16 : nand_write_buf;
3514 if (!chip->write_byte || chip->write_byte == nand_write_byte)
3515 chip->write_byte = busw ? nand_write_byte16 : nand_write_byte;
3516 if (!chip->read_buf || chip->read_buf == nand_read_buf)
3517 chip->read_buf = busw ? nand_read_buf16 : nand_read_buf;
3518 if (!chip->scan_bbt)
3519 chip->scan_bbt = nand_default_bbt;
3521 if (!chip->controller) {
3522 chip->controller = &chip->hwcontrol;
3523 nand_hw_control_init(chip->controller);
3526 if (!chip->buf_align)
3527 chip->buf_align = 1;
3530 /* Sanitize ONFI strings so we can safely print them */
3531 static void sanitize_string(uint8_t *s, size_t len)
3535 /* Null terminate */
3538 /* Remove non printable chars */
3539 for (i = 0; i < len - 1; i++) {
3540 if (s[i] < ' ' || s[i] > 127)
3544 /* Remove trailing spaces */
3548 static u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
3553 for (i = 0; i < 8; i++)
3554 crc = (crc << 1) ^ ((crc & 0x8000) ? 0x8005 : 0);
3560 /* Parse the Extended Parameter Page. */
3561 static int nand_flash_detect_ext_param_page(struct nand_chip *chip,
3562 struct nand_onfi_params *p)
3564 struct mtd_info *mtd = nand_to_mtd(chip);
3565 struct onfi_ext_param_page *ep;
3566 struct onfi_ext_section *s;
3567 struct onfi_ext_ecc_info *ecc;
3573 len = le16_to_cpu(p->ext_param_page_length) * 16;
3574 ep = kmalloc(len, GFP_KERNEL);
3578 /* Send our own NAND_CMD_PARAM. */
3579 chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
3581 /* Use the Change Read Column command to skip the ONFI param pages. */
3582 chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
3583 sizeof(*p) * p->num_of_param_pages , -1);
3585 /* Read out the Extended Parameter Page. */
3586 chip->read_buf(mtd, (uint8_t *)ep, len);
3587 if ((onfi_crc16(ONFI_CRC_BASE, ((uint8_t *)ep) + 2, len - 2)
3588 != le16_to_cpu(ep->crc))) {
3589 pr_debug("fail in the CRC.\n");
3594 * Check the signature.
3595 * Do not strictly follow the ONFI spec, maybe changed in future.
3597 if (strncmp(ep->sig, "EPPS", 4)) {
3598 pr_debug("The signature is invalid.\n");
3602 /* find the ECC section. */
3603 cursor = (uint8_t *)(ep + 1);
3604 for (i = 0; i < ONFI_EXT_SECTION_MAX; i++) {
3605 s = ep->sections + i;
3606 if (s->type == ONFI_SECTION_TYPE_2)
3608 cursor += s->length * 16;
3610 if (i == ONFI_EXT_SECTION_MAX) {
3611 pr_debug("We can not find the ECC section.\n");
3615 /* get the info we want. */
3616 ecc = (struct onfi_ext_ecc_info *)cursor;
3618 if (!ecc->codeword_size) {
3619 pr_debug("Invalid codeword size\n");
3623 chip->ecc_strength_ds = ecc->ecc_bits;
3624 chip->ecc_step_ds = 1 << ecc->codeword_size;
3633 * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise.
3635 static int nand_flash_detect_onfi(struct nand_chip *chip)
3637 struct mtd_info *mtd = nand_to_mtd(chip);
3638 struct nand_onfi_params *p = &chip->onfi_params;
3642 /* Try ONFI for unknown chip or LP */
3643 chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
3644 if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
3645 chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I')
3648 chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
3649 for (i = 0; i < 3; i++) {
3650 for (j = 0; j < sizeof(*p); j++)
3651 ((uint8_t *)p)[j] = chip->read_byte(mtd);
3652 if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) ==
3653 le16_to_cpu(p->crc)) {
3659 pr_err("Could not find valid ONFI parameter page; aborting\n");
3664 val = le16_to_cpu(p->revision);
3666 chip->onfi_version = 23;
3667 else if (val & (1 << 4))
3668 chip->onfi_version = 22;
3669 else if (val & (1 << 3))
3670 chip->onfi_version = 21;
3671 else if (val & (1 << 2))
3672 chip->onfi_version = 20;
3673 else if (val & (1 << 1))
3674 chip->onfi_version = 10;
3676 if (!chip->onfi_version) {
3677 pr_info("unsupported ONFI version: %d\n", val);
3681 sanitize_string(p->manufacturer, sizeof(p->manufacturer));
3682 sanitize_string(p->model, sizeof(p->model));
3684 mtd->name = p->model;
3686 mtd->writesize = le32_to_cpu(p->byte_per_page);
3689 * pages_per_block and blocks_per_lun may not be a power-of-2 size
3690 * (don't ask me who thought of this...). MTD assumes that these
3691 * dimensions will be power-of-2, so just truncate the remaining area.
3693 mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
3694 mtd->erasesize *= mtd->writesize;
3696 mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
3698 /* See erasesize comment */
3699 chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
3700 chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
3701 chip->bits_per_cell = p->bits_per_cell;
3703 chip->max_bb_per_die = le16_to_cpu(p->bb_per_lun);
3704 chip->blocks_per_die = le32_to_cpu(p->blocks_per_lun);
3706 if (onfi_feature(chip) & ONFI_FEATURE_16_BIT_BUS)
3707 chip->options |= NAND_BUSWIDTH_16;
3709 if (p->ecc_bits != 0xff) {
3710 chip->ecc_strength_ds = p->ecc_bits;
3711 chip->ecc_step_ds = 512;
3712 } else if (chip->onfi_version >= 21 &&
3713 (onfi_feature(chip) & ONFI_FEATURE_EXT_PARAM_PAGE)) {
3716 * The nand_flash_detect_ext_param_page() uses the
3717 * Change Read Column command which maybe not supported
3718 * by the chip->cmdfunc. So try to update the chip->cmdfunc
3719 * now. We do not replace user supplied command function.
3721 if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
3722 chip->cmdfunc = nand_command_lp;
3724 /* The Extended Parameter Page is supported since ONFI 2.1. */
3725 if (nand_flash_detect_ext_param_page(chip, p))
3726 pr_warn("Failed to detect ONFI extended param page\n");
3728 pr_warn("Could not retrieve ONFI ECC requirements\n");
3735 * Check if the NAND chip is JEDEC compliant, returns 1 if it is, 0 otherwise.
3737 static int nand_flash_detect_jedec(struct nand_chip *chip)
3739 struct mtd_info *mtd = nand_to_mtd(chip);
3740 struct nand_jedec_params *p = &chip->jedec_params;
3741 struct jedec_ecc_info *ecc;
3745 /* Try JEDEC for unknown chip or LP */
3746 chip->cmdfunc(mtd, NAND_CMD_READID, 0x40, -1);
3747 if (chip->read_byte(mtd) != 'J' || chip->read_byte(mtd) != 'E' ||
3748 chip->read_byte(mtd) != 'D' || chip->read_byte(mtd) != 'E' ||
3749 chip->read_byte(mtd) != 'C')
3752 chip->cmdfunc(mtd, NAND_CMD_PARAM, 0x40, -1);
3753 for (i = 0; i < 3; i++) {
3754 for (j = 0; j < sizeof(*p); j++)
3755 ((uint8_t *)p)[j] = chip->read_byte(mtd);
3757 if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 510) ==
3758 le16_to_cpu(p->crc))
3763 pr_err("Could not find valid JEDEC parameter page; aborting\n");
3768 val = le16_to_cpu(p->revision);
3770 chip->jedec_version = 10;
3771 else if (val & (1 << 1))
3772 chip->jedec_version = 1; /* vendor specific version */
3774 if (!chip->jedec_version) {
3775 pr_info("unsupported JEDEC version: %d\n", val);
3779 sanitize_string(p->manufacturer, sizeof(p->manufacturer));
3780 sanitize_string(p->model, sizeof(p->model));
3782 mtd->name = p->model;
3784 mtd->writesize = le32_to_cpu(p->byte_per_page);
3786 /* Please reference to the comment for nand_flash_detect_onfi. */
3787 mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
3788 mtd->erasesize *= mtd->writesize;
3790 mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
3792 /* Please reference to the comment for nand_flash_detect_onfi. */
3793 chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
3794 chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
3795 chip->bits_per_cell = p->bits_per_cell;
3797 if (jedec_feature(chip) & JEDEC_FEATURE_16_BIT_BUS)
3798 chip->options |= NAND_BUSWIDTH_16;
3801 ecc = &p->ecc_info[0];
3803 if (ecc->codeword_size >= 9) {
3804 chip->ecc_strength_ds = ecc->ecc_bits;
3805 chip->ecc_step_ds = 1 << ecc->codeword_size;
3807 pr_warn("Invalid codeword size\n");
3814 * nand_id_has_period - Check if an ID string has a given wraparound period
3815 * @id_data: the ID string
3816 * @arrlen: the length of the @id_data array
3817 * @period: the period of repitition
3819 * Check if an ID string is repeated within a given sequence of bytes at
3820 * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
3821 * period of 3). This is a helper function for nand_id_len(). Returns non-zero
3822 * if the repetition has a period of @period; otherwise, returns zero.
3824 static int nand_id_has_period(u8 *id_data, int arrlen, int period)
3827 for (i = 0; i < period; i++)
3828 for (j = i + period; j < arrlen; j += period)
3829 if (id_data[i] != id_data[j])
3835 * nand_id_len - Get the length of an ID string returned by CMD_READID
3836 * @id_data: the ID string
3837 * @arrlen: the length of the @id_data array
3839 * Returns the length of the ID string, according to known wraparound/trailing
3840 * zero patterns. If no pattern exists, returns the length of the array.
3842 static int nand_id_len(u8 *id_data, int arrlen)
3844 int last_nonzero, period;
3846 /* Find last non-zero byte */
3847 for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
3848 if (id_data[last_nonzero])
3852 if (last_nonzero < 0)
3855 /* Calculate wraparound period */
3856 for (period = 1; period < arrlen; period++)
3857 if (nand_id_has_period(id_data, arrlen, period))
3860 /* There's a repeated pattern */
3861 if (period < arrlen)
3864 /* There are trailing zeros */
3865 if (last_nonzero < arrlen - 1)
3866 return last_nonzero + 1;
3868 /* No pattern detected */
3872 /* Extract the bits of per cell from the 3rd byte of the extended ID */
3873 static int nand_get_bits_per_cell(u8 cellinfo)
3877 bits = cellinfo & NAND_CI_CELLTYPE_MSK;
3878 bits >>= NAND_CI_CELLTYPE_SHIFT;
3883 * Many new NAND share similar device ID codes, which represent the size of the
3884 * chip. The rest of the parameters must be decoded according to generic or
3885 * manufacturer-specific "extended ID" decoding patterns.
3887 void nand_decode_ext_id(struct nand_chip *chip)
3889 struct mtd_info *mtd = nand_to_mtd(chip);
3891 u8 *id_data = chip->id.data;
3892 /* The 3rd id byte holds MLC / multichip data */
3893 chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
3894 /* The 4th id byte is the important one */
3898 mtd->writesize = 1024 << (extid & 0x03);
3901 mtd->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9);
3903 /* Calc blocksize. Blocksize is multiples of 64KiB */
3904 mtd->erasesize = (64 * 1024) << (extid & 0x03);
3906 /* Get buswidth information */
3908 chip->options |= NAND_BUSWIDTH_16;
3910 EXPORT_SYMBOL_GPL(nand_decode_ext_id);
3913 * Old devices have chip data hardcoded in the device ID table. nand_decode_id
3914 * decodes a matching ID table entry and assigns the MTD size parameters for
3917 static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type)
3919 struct mtd_info *mtd = nand_to_mtd(chip);
3921 mtd->erasesize = type->erasesize;
3922 mtd->writesize = type->pagesize;
3923 mtd->oobsize = mtd->writesize / 32;
3925 /* All legacy ID NAND are small-page, SLC */
3926 chip->bits_per_cell = 1;
3930 * Set the bad block marker/indicator (BBM/BBI) patterns according to some
3931 * heuristic patterns using various detected parameters (e.g., manufacturer,
3932 * page size, cell-type information).
3934 static void nand_decode_bbm_options(struct nand_chip *chip)
3936 struct mtd_info *mtd = nand_to_mtd(chip);
3938 /* Set the bad block position */
3939 if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
3940 chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
3942 chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
3945 static inline bool is_full_id_nand(struct nand_flash_dev *type)
3947 return type->id_len;
3950 static bool find_full_id_nand(struct nand_chip *chip,
3951 struct nand_flash_dev *type)
3953 struct mtd_info *mtd = nand_to_mtd(chip);
3954 u8 *id_data = chip->id.data;
3956 if (!strncmp(type->id, id_data, type->id_len)) {
3957 mtd->writesize = type->pagesize;
3958 mtd->erasesize = type->erasesize;
3959 mtd->oobsize = type->oobsize;
3961 chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
3962 chip->chipsize = (uint64_t)type->chipsize << 20;
3963 chip->options |= type->options;
3964 chip->ecc_strength_ds = NAND_ECC_STRENGTH(type);
3965 chip->ecc_step_ds = NAND_ECC_STEP(type);
3966 chip->onfi_timing_mode_default =
3967 type->onfi_timing_mode_default;
3970 mtd->name = type->name;
3978 * Manufacturer detection. Only used when the NAND is not ONFI or JEDEC
3979 * compliant and does not have a full-id or legacy-id entry in the nand_ids
3982 static void nand_manufacturer_detect(struct nand_chip *chip)
3985 * Try manufacturer detection if available and use
3986 * nand_decode_ext_id() otherwise.
3988 if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
3989 chip->manufacturer.desc->ops->detect)
3990 chip->manufacturer.desc->ops->detect(chip);
3992 nand_decode_ext_id(chip);
3996 * Manufacturer initialization. This function is called for all NANDs including
3997 * ONFI and JEDEC compliant ones.
3998 * Manufacturer drivers should put all their specific initialization code in
3999 * their ->init() hook.
4001 static int nand_manufacturer_init(struct nand_chip *chip)
4003 if (!chip->manufacturer.desc || !chip->manufacturer.desc->ops ||
4004 !chip->manufacturer.desc->ops->init)
4007 return chip->manufacturer.desc->ops->init(chip);
4011 * Manufacturer cleanup. This function is called for all NANDs including
4012 * ONFI and JEDEC compliant ones.
4013 * Manufacturer drivers should put all their specific cleanup code in their
4016 static void nand_manufacturer_cleanup(struct nand_chip *chip)
4018 /* Release manufacturer private data */
4019 if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
4020 chip->manufacturer.desc->ops->cleanup)
4021 chip->manufacturer.desc->ops->cleanup(chip);
4025 * Get the flash and manufacturer id and lookup if the type is supported.
4027 static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
4029 const struct nand_manufacturer *manufacturer;
4030 struct mtd_info *mtd = nand_to_mtd(chip);
4033 u8 *id_data = chip->id.data;
4037 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
4040 nand_reset(chip, 0);
4042 /* Select the device */
4043 chip->select_chip(mtd, 0);
4045 /* Send the command for reading device ID */
4046 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
4048 /* Read manufacturer and device IDs */
4049 maf_id = chip->read_byte(mtd);
4050 dev_id = chip->read_byte(mtd);
4053 * Try again to make sure, as some systems the bus-hold or other
4054 * interface concerns can cause random data which looks like a
4055 * possibly credible NAND flash to appear. If the two results do
4056 * not match, ignore the device completely.
4059 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
4061 /* Read entire ID string */
4062 for (i = 0; i < 8; i++)
4063 id_data[i] = chip->read_byte(mtd);
4065 if (id_data[0] != maf_id || id_data[1] != dev_id) {
4066 pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
4067 maf_id, dev_id, id_data[0], id_data[1]);
4071 chip->id.len = nand_id_len(id_data, 8);
4073 /* Try to identify manufacturer */
4074 manufacturer = nand_get_manufacturer(maf_id);
4075 chip->manufacturer.desc = manufacturer;
4078 type = nand_flash_ids;
4081 * Save the NAND_BUSWIDTH_16 flag before letting auto-detection logic
4083 * This is required to make sure initial NAND bus width set by the
4084 * NAND controller driver is coherent with the real NAND bus width
4085 * (extracted by auto-detection code).
4087 busw = chip->options & NAND_BUSWIDTH_16;
4090 * The flag is only set (never cleared), reset it to its default value
4091 * before starting auto-detection.
4093 chip->options &= ~NAND_BUSWIDTH_16;
4095 for (; type->name != NULL; type++) {
4096 if (is_full_id_nand(type)) {
4097 if (find_full_id_nand(chip, type))
4099 } else if (dev_id == type->dev_id) {
4104 chip->onfi_version = 0;
4105 if (!type->name || !type->pagesize) {
4106 /* Check if the chip is ONFI compliant */
4107 if (nand_flash_detect_onfi(chip))
4110 /* Check if the chip is JEDEC compliant */
4111 if (nand_flash_detect_jedec(chip))
4119 mtd->name = type->name;
4121 chip->chipsize = (uint64_t)type->chipsize << 20;
4123 if (!type->pagesize)
4124 nand_manufacturer_detect(chip);
4126 nand_decode_id(chip, type);
4128 /* Get chip options */
4129 chip->options |= type->options;
4133 if (chip->options & NAND_BUSWIDTH_AUTO) {
4134 WARN_ON(busw & NAND_BUSWIDTH_16);
4135 nand_set_defaults(chip);
4136 } else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
4138 * Check, if buswidth is correct. Hardware drivers should set
4141 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
4143 pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
4145 pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8,
4146 (chip->options & NAND_BUSWIDTH_16) ? 16 : 8);
4150 nand_decode_bbm_options(chip);
4152 /* Calculate the address shift from the page size */
4153 chip->page_shift = ffs(mtd->writesize) - 1;
4154 /* Convert chipsize to number of pages per chip -1 */
4155 chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
4157 chip->bbt_erase_shift = chip->phys_erase_shift =
4158 ffs(mtd->erasesize) - 1;
4159 if (chip->chipsize & 0xffffffff)
4160 chip->chip_shift = ffs((unsigned)chip->chipsize) - 1;
4162 chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32));
4163 chip->chip_shift += 32 - 1;
4166 chip->badblockbits = 8;
4167 chip->erase = single_erase;
4169 /* Do not replace user supplied command function! */
4170 if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
4171 chip->cmdfunc = nand_command_lp;
4173 ret = nand_manufacturer_init(chip);
4177 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
4180 if (chip->onfi_version)
4181 pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
4182 chip->onfi_params.model);
4183 else if (chip->jedec_version)
4184 pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
4185 chip->jedec_params.model);
4187 pr_info("%s %s\n", nand_manufacturer_name(manufacturer),
4190 pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
4191 (int)(chip->chipsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
4192 mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
4196 static const char * const nand_ecc_modes[] = {
4197 [NAND_ECC_NONE] = "none",
4198 [NAND_ECC_SOFT] = "soft",
4199 [NAND_ECC_HW] = "hw",
4200 [NAND_ECC_HW_SYNDROME] = "hw_syndrome",
4201 [NAND_ECC_HW_OOB_FIRST] = "hw_oob_first",
4202 [NAND_ECC_ON_DIE] = "on-die",
4205 static int of_get_nand_ecc_mode(struct device_node *np)
4210 err = of_property_read_string(np, "nand-ecc-mode", &pm);
4214 for (i = 0; i < ARRAY_SIZE(nand_ecc_modes); i++)
4215 if (!strcasecmp(pm, nand_ecc_modes[i]))
4219 * For backward compatibility we support few obsoleted values that don't
4220 * have their mappings into nand_ecc_modes_t anymore (they were merged
4221 * with other enums).
4223 if (!strcasecmp(pm, "soft_bch"))
4224 return NAND_ECC_SOFT;
4229 static const char * const nand_ecc_algos[] = {
4230 [NAND_ECC_HAMMING] = "hamming",
4231 [NAND_ECC_BCH] = "bch",
4234 static int of_get_nand_ecc_algo(struct device_node *np)
4239 err = of_property_read_string(np, "nand-ecc-algo", &pm);
4241 for (i = NAND_ECC_HAMMING; i < ARRAY_SIZE(nand_ecc_algos); i++)
4242 if (!strcasecmp(pm, nand_ecc_algos[i]))
4248 * For backward compatibility we also read "nand-ecc-mode" checking
4249 * for some obsoleted values that were specifying ECC algorithm.
4251 err = of_property_read_string(np, "nand-ecc-mode", &pm);
4255 if (!strcasecmp(pm, "soft"))
4256 return NAND_ECC_HAMMING;
4257 else if (!strcasecmp(pm, "soft_bch"))
4258 return NAND_ECC_BCH;
4263 static int of_get_nand_ecc_step_size(struct device_node *np)
4268 ret = of_property_read_u32(np, "nand-ecc-step-size", &val);
4269 return ret ? ret : val;
4272 static int of_get_nand_ecc_strength(struct device_node *np)
4277 ret = of_property_read_u32(np, "nand-ecc-strength", &val);
4278 return ret ? ret : val;
4281 static int of_get_nand_bus_width(struct device_node *np)
4285 if (of_property_read_u32(np, "nand-bus-width", &val))
4297 static bool of_get_nand_on_flash_bbt(struct device_node *np)
4299 return of_property_read_bool(np, "nand-on-flash-bbt");
4302 static int nand_dt_init(struct nand_chip *chip)
4304 struct device_node *dn = nand_get_flash_node(chip);
4305 int ecc_mode, ecc_algo, ecc_strength, ecc_step;
4310 if (of_get_nand_bus_width(dn) == 16)
4311 chip->options |= NAND_BUSWIDTH_16;
4313 if (of_get_nand_on_flash_bbt(dn))
4314 chip->bbt_options |= NAND_BBT_USE_FLASH;
4316 ecc_mode = of_get_nand_ecc_mode(dn);
4317 ecc_algo = of_get_nand_ecc_algo(dn);
4318 ecc_strength = of_get_nand_ecc_strength(dn);
4319 ecc_step = of_get_nand_ecc_step_size(dn);
4322 chip->ecc.mode = ecc_mode;
4325 chip->ecc.algo = ecc_algo;
4327 if (ecc_strength >= 0)
4328 chip->ecc.strength = ecc_strength;
4331 chip->ecc.size = ecc_step;
4333 if (of_property_read_bool(dn, "nand-ecc-maximize"))
4334 chip->ecc.options |= NAND_ECC_MAXIMIZE;
4340 * nand_scan_ident - [NAND Interface] Scan for the NAND device
4341 * @mtd: MTD device structure
4342 * @maxchips: number of chips to scan for
4343 * @table: alternative NAND ID table
4345 * This is the first phase of the normal nand_scan() function. It reads the
4346 * flash ID and sets up MTD fields accordingly.
4349 int nand_scan_ident(struct mtd_info *mtd, int maxchips,
4350 struct nand_flash_dev *table)
4352 int i, nand_maf_id, nand_dev_id;
4353 struct nand_chip *chip = mtd_to_nand(mtd);
4356 ret = nand_dt_init(chip);
4360 if (!mtd->name && mtd->dev.parent)
4361 mtd->name = dev_name(mtd->dev.parent);
4363 if ((!chip->cmdfunc || !chip->select_chip) && !chip->cmd_ctrl) {
4365 * Default functions assigned for chip_select() and
4366 * cmdfunc() both expect cmd_ctrl() to be populated,
4367 * so we need to check that that's the case
4369 pr_err("chip.cmd_ctrl() callback is not provided");
4372 /* Set the default functions */
4373 nand_set_defaults(chip);
4375 /* Read the flash type */
4376 ret = nand_detect(chip, table);
4378 if (!(chip->options & NAND_SCAN_SILENT_NODEV))
4379 pr_warn("No NAND device found\n");
4380 chip->select_chip(mtd, -1);
4384 /* Initialize the ->data_interface field. */
4385 ret = nand_init_data_interface(chip);
4390 * Setup the data interface correctly on the chip and controller side.
4391 * This explicit call to nand_setup_data_interface() is only required
4392 * for the first die, because nand_reset() has been called before
4393 * ->data_interface and ->default_onfi_timing_mode were set.
4394 * For the other dies, nand_reset() will automatically switch to the
4397 ret = nand_setup_data_interface(chip, 0);
4401 nand_maf_id = chip->id.data[0];
4402 nand_dev_id = chip->id.data[1];
4404 chip->select_chip(mtd, -1);
4406 /* Check for a chip array */
4407 for (i = 1; i < maxchips; i++) {
4408 /* See comment in nand_get_flash_type for reset */
4409 nand_reset(chip, i);
4411 chip->select_chip(mtd, i);
4412 /* Send the command for reading device ID */
4413 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
4414 /* Read manufacturer and device IDs */
4415 if (nand_maf_id != chip->read_byte(mtd) ||
4416 nand_dev_id != chip->read_byte(mtd)) {
4417 chip->select_chip(mtd, -1);
4420 chip->select_chip(mtd, -1);
4423 pr_info("%d chips detected\n", i);
4425 /* Store the number of chips and calc total size for mtd */
4427 mtd->size = i * chip->chipsize;
4432 /* Free manufacturer priv data. */
4433 nand_manufacturer_cleanup(chip);
4437 EXPORT_SYMBOL(nand_scan_ident);
4439 static int nand_set_ecc_soft_ops(struct mtd_info *mtd)
4441 struct nand_chip *chip = mtd_to_nand(mtd);
4442 struct nand_ecc_ctrl *ecc = &chip->ecc;
4444 if (WARN_ON(ecc->mode != NAND_ECC_SOFT))
4447 switch (ecc->algo) {
4448 case NAND_ECC_HAMMING:
4449 ecc->calculate = nand_calculate_ecc;
4450 ecc->correct = nand_correct_data;
4451 ecc->read_page = nand_read_page_swecc;
4452 ecc->read_subpage = nand_read_subpage;
4453 ecc->write_page = nand_write_page_swecc;
4454 ecc->read_page_raw = nand_read_page_raw;
4455 ecc->write_page_raw = nand_write_page_raw;
4456 ecc->read_oob = nand_read_oob_std;
4457 ecc->write_oob = nand_write_oob_std;
4464 if (!mtd_nand_has_bch()) {
4465 WARN(1, "CONFIG_MTD_NAND_ECC_BCH not enabled\n");
4468 ecc->calculate = nand_bch_calculate_ecc;
4469 ecc->correct = nand_bch_correct_data;
4470 ecc->read_page = nand_read_page_swecc;
4471 ecc->read_subpage = nand_read_subpage;
4472 ecc->write_page = nand_write_page_swecc;
4473 ecc->read_page_raw = nand_read_page_raw;
4474 ecc->write_page_raw = nand_write_page_raw;
4475 ecc->read_oob = nand_read_oob_std;
4476 ecc->write_oob = nand_write_oob_std;
4479 * Board driver should supply ecc.size and ecc.strength
4480 * values to select how many bits are correctable.
4481 * Otherwise, default to 4 bits for large page devices.
4483 if (!ecc->size && (mtd->oobsize >= 64)) {
4489 * if no ecc placement scheme was provided pickup the default
4492 if (!mtd->ooblayout) {
4493 /* handle large page devices only */
4494 if (mtd->oobsize < 64) {
4495 WARN(1, "OOB layout is required when using software BCH on small pages\n");
4499 mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
4504 * We can only maximize ECC config when the default layout is
4505 * used, otherwise we don't know how many bytes can really be
4508 if (mtd->ooblayout == &nand_ooblayout_lp_ops &&
4509 ecc->options & NAND_ECC_MAXIMIZE) {
4512 /* Always prefer 1k blocks over 512bytes ones */
4514 steps = mtd->writesize / ecc->size;
4516 /* Reserve 2 bytes for the BBM */
4517 bytes = (mtd->oobsize - 2) / steps;
4518 ecc->strength = bytes * 8 / fls(8 * ecc->size);
4521 /* See nand_bch_init() for details. */
4523 ecc->priv = nand_bch_init(mtd);
4525 WARN(1, "BCH ECC initialization failed!\n");
4530 WARN(1, "Unsupported ECC algorithm!\n");
4536 * nand_check_ecc_caps - check the sanity of preset ECC settings
4537 * @chip: nand chip info structure
4538 * @caps: ECC caps info structure
4539 * @oobavail: OOB size that the ECC engine can use
4541 * When ECC step size and strength are already set, check if they are supported
4542 * by the controller and the calculated ECC bytes fit within the chip's OOB.
4543 * On success, the calculated ECC bytes is set.
4545 int nand_check_ecc_caps(struct nand_chip *chip,
4546 const struct nand_ecc_caps *caps, int oobavail)
4548 struct mtd_info *mtd = nand_to_mtd(chip);
4549 const struct nand_ecc_step_info *stepinfo;
4550 int preset_step = chip->ecc.size;
4551 int preset_strength = chip->ecc.strength;
4552 int nsteps, ecc_bytes;
4555 if (WARN_ON(oobavail < 0))
4558 if (!preset_step || !preset_strength)
4561 nsteps = mtd->writesize / preset_step;
4563 for (i = 0; i < caps->nstepinfos; i++) {
4564 stepinfo = &caps->stepinfos[i];
4566 if (stepinfo->stepsize != preset_step)
4569 for (j = 0; j < stepinfo->nstrengths; j++) {
4570 if (stepinfo->strengths[j] != preset_strength)
4573 ecc_bytes = caps->calc_ecc_bytes(preset_step,
4575 if (WARN_ON_ONCE(ecc_bytes < 0))
4578 if (ecc_bytes * nsteps > oobavail) {
4579 pr_err("ECC (step, strength) = (%d, %d) does not fit in OOB",
4580 preset_step, preset_strength);
4584 chip->ecc.bytes = ecc_bytes;
4590 pr_err("ECC (step, strength) = (%d, %d) not supported on this controller",
4591 preset_step, preset_strength);
4595 EXPORT_SYMBOL_GPL(nand_check_ecc_caps);
4598 * nand_match_ecc_req - meet the chip's requirement with least ECC bytes
4599 * @chip: nand chip info structure
4600 * @caps: ECC engine caps info structure
4601 * @oobavail: OOB size that the ECC engine can use
4603 * If a chip's ECC requirement is provided, try to meet it with the least
4604 * number of ECC bytes (i.e. with the largest number of OOB-free bytes).
4605 * On success, the chosen ECC settings are set.
4607 int nand_match_ecc_req(struct nand_chip *chip,
4608 const struct nand_ecc_caps *caps, int oobavail)
4610 struct mtd_info *mtd = nand_to_mtd(chip);
4611 const struct nand_ecc_step_info *stepinfo;
4612 int req_step = chip->ecc_step_ds;
4613 int req_strength = chip->ecc_strength_ds;
4614 int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total;
4615 int best_step, best_strength, best_ecc_bytes;
4616 int best_ecc_bytes_total = INT_MAX;
4619 if (WARN_ON(oobavail < 0))
4622 /* No information provided by the NAND chip */
4623 if (!req_step || !req_strength)
4626 /* number of correctable bits the chip requires in a page */
4627 req_corr = mtd->writesize / req_step * req_strength;
4629 for (i = 0; i < caps->nstepinfos; i++) {
4630 stepinfo = &caps->stepinfos[i];
4631 step_size = stepinfo->stepsize;
4633 for (j = 0; j < stepinfo->nstrengths; j++) {
4634 strength = stepinfo->strengths[j];
4637 * If both step size and strength are smaller than the
4638 * chip's requirement, it is not easy to compare the
4639 * resulted reliability.
4641 if (step_size < req_step && strength < req_strength)
4644 if (mtd->writesize % step_size)
4647 nsteps = mtd->writesize / step_size;
4649 ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
4650 if (WARN_ON_ONCE(ecc_bytes < 0))
4652 ecc_bytes_total = ecc_bytes * nsteps;
4654 if (ecc_bytes_total > oobavail ||
4655 strength * nsteps < req_corr)
4659 * We assume the best is to meet the chip's requrement
4660 * with the least number of ECC bytes.
4662 if (ecc_bytes_total < best_ecc_bytes_total) {
4663 best_ecc_bytes_total = ecc_bytes_total;
4664 best_step = step_size;
4665 best_strength = strength;
4666 best_ecc_bytes = ecc_bytes;
4671 if (best_ecc_bytes_total == INT_MAX)
4674 chip->ecc.size = best_step;
4675 chip->ecc.strength = best_strength;
4676 chip->ecc.bytes = best_ecc_bytes;
4680 EXPORT_SYMBOL_GPL(nand_match_ecc_req);
4683 * nand_maximize_ecc - choose the max ECC strength available
4684 * @chip: nand chip info structure
4685 * @caps: ECC engine caps info structure
4686 * @oobavail: OOB size that the ECC engine can use
4688 * Choose the max ECC strength that is supported on the controller, and can fit
4689 * within the chip's OOB. On success, the chosen ECC settings are set.
4691 int nand_maximize_ecc(struct nand_chip *chip,
4692 const struct nand_ecc_caps *caps, int oobavail)
4694 struct mtd_info *mtd = nand_to_mtd(chip);
4695 const struct nand_ecc_step_info *stepinfo;
4696 int step_size, strength, nsteps, ecc_bytes, corr;
4699 int best_strength, best_ecc_bytes;
4702 if (WARN_ON(oobavail < 0))
4705 for (i = 0; i < caps->nstepinfos; i++) {
4706 stepinfo = &caps->stepinfos[i];
4707 step_size = stepinfo->stepsize;
4709 /* If chip->ecc.size is already set, respect it */
4710 if (chip->ecc.size && step_size != chip->ecc.size)
4713 for (j = 0; j < stepinfo->nstrengths; j++) {
4714 strength = stepinfo->strengths[j];
4716 if (mtd->writesize % step_size)
4719 nsteps = mtd->writesize / step_size;
4721 ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
4722 if (WARN_ON_ONCE(ecc_bytes < 0))
4725 if (ecc_bytes * nsteps > oobavail)
4728 corr = strength * nsteps;
4731 * If the number of correctable bits is the same,
4732 * bigger step_size has more reliability.
4734 if (corr > best_corr ||
4735 (corr == best_corr && step_size > best_step)) {
4737 best_step = step_size;
4738 best_strength = strength;
4739 best_ecc_bytes = ecc_bytes;
4747 chip->ecc.size = best_step;
4748 chip->ecc.strength = best_strength;
4749 chip->ecc.bytes = best_ecc_bytes;
4753 EXPORT_SYMBOL_GPL(nand_maximize_ecc);
4756 * Check if the chip configuration meet the datasheet requirements.
4758 * If our configuration corrects A bits per B bytes and the minimum
4759 * required correction level is X bits per Y bytes, then we must ensure
4760 * both of the following are true:
4762 * (1) A / B >= X / Y
4765 * Requirement (1) ensures we can correct for the required bitflip density.
4766 * Requirement (2) ensures we can correct even when all bitflips are clumped
4767 * in the same sector.
4769 static bool nand_ecc_strength_good(struct mtd_info *mtd)
4771 struct nand_chip *chip = mtd_to_nand(mtd);
4772 struct nand_ecc_ctrl *ecc = &chip->ecc;
4775 if (ecc->size == 0 || chip->ecc_step_ds == 0)
4776 /* Not enough information */
4780 * We get the number of corrected bits per page to compare
4781 * the correction density.
4783 corr = (mtd->writesize * ecc->strength) / ecc->size;
4784 ds_corr = (mtd->writesize * chip->ecc_strength_ds) / chip->ecc_step_ds;
4786 return corr >= ds_corr && ecc->strength >= chip->ecc_strength_ds;
4789 static bool invalid_ecc_page_accessors(struct nand_chip *chip)
4791 struct nand_ecc_ctrl *ecc = &chip->ecc;
4793 if (nand_standard_page_accessors(ecc))
4797 * NAND_ECC_CUSTOM_PAGE_ACCESS flag is set, make sure the NAND
4798 * controller driver implements all the page accessors because
4799 * default helpers are not suitable when the core does not
4800 * send the READ0/PAGEPROG commands.
4802 return (!ecc->read_page || !ecc->write_page ||
4803 !ecc->read_page_raw || !ecc->write_page_raw ||
4804 (NAND_HAS_SUBPAGE_READ(chip) && !ecc->read_subpage) ||
4805 (NAND_HAS_SUBPAGE_WRITE(chip) && !ecc->write_subpage &&
4806 ecc->hwctl && ecc->calculate));
4810 * nand_scan_tail - [NAND Interface] Scan for the NAND device
4811 * @mtd: MTD device structure
4813 * This is the second phase of the normal nand_scan() function. It fills out
4814 * all the uninitialized function pointers with the defaults and scans for a
4815 * bad block table if appropriate.
4817 int nand_scan_tail(struct mtd_info *mtd)
4819 struct nand_chip *chip = mtd_to_nand(mtd);
4820 struct nand_ecc_ctrl *ecc = &chip->ecc;
4821 struct nand_buffers *nbuf = NULL;
4824 /* New bad blocks should be marked in OOB, flash-based BBT, or both */
4825 if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
4826 !(chip->bbt_options & NAND_BBT_USE_FLASH))) {
4831 if (invalid_ecc_page_accessors(chip)) {
4832 pr_err("Invalid ECC page accessors setup\n");
4837 if (!(chip->options & NAND_OWN_BUFFERS)) {
4838 nbuf = kzalloc(sizeof(*nbuf), GFP_KERNEL);
4844 nbuf->ecccalc = kmalloc(mtd->oobsize, GFP_KERNEL);
4845 if (!nbuf->ecccalc) {
4850 nbuf->ecccode = kmalloc(mtd->oobsize, GFP_KERNEL);
4851 if (!nbuf->ecccode) {
4856 nbuf->databuf = kmalloc(mtd->writesize + mtd->oobsize,
4858 if (!nbuf->databuf) {
4863 chip->buffers = nbuf;
4865 if (!chip->buffers) {
4871 /* Set the internal oob buffer location, just after the page data */
4872 chip->oob_poi = chip->buffers->databuf + mtd->writesize;
4875 * If no default placement scheme is given, select an appropriate one.
4877 if (!mtd->ooblayout &&
4878 !(ecc->mode == NAND_ECC_SOFT && ecc->algo == NAND_ECC_BCH)) {
4879 switch (mtd->oobsize) {
4882 mtd_set_ooblayout(mtd, &nand_ooblayout_sp_ops);
4886 mtd_set_ooblayout(mtd, &nand_ooblayout_lp_hamming_ops);
4889 WARN(1, "No oob scheme defined for oobsize %d\n",
4897 * Check ECC mode, default to software if 3byte/512byte hardware ECC is
4898 * selected and we have 256 byte pagesize fallback to software ECC
4901 switch (ecc->mode) {
4902 case NAND_ECC_HW_OOB_FIRST:
4903 /* Similar to NAND_ECC_HW, but a separate read_page handle */
4904 if (!ecc->calculate || !ecc->correct || !ecc->hwctl) {
4905 WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
4909 if (!ecc->read_page)
4910 ecc->read_page = nand_read_page_hwecc_oob_first;
4913 /* Use standard hwecc read page function? */
4914 if (!ecc->read_page)
4915 ecc->read_page = nand_read_page_hwecc;
4916 if (!ecc->write_page)
4917 ecc->write_page = nand_write_page_hwecc;
4918 if (!ecc->read_page_raw)
4919 ecc->read_page_raw = nand_read_page_raw;
4920 if (!ecc->write_page_raw)
4921 ecc->write_page_raw = nand_write_page_raw;
4923 ecc->read_oob = nand_read_oob_std;
4924 if (!ecc->write_oob)
4925 ecc->write_oob = nand_write_oob_std;
4926 if (!ecc->read_subpage)
4927 ecc->read_subpage = nand_read_subpage;
4928 if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
4929 ecc->write_subpage = nand_write_subpage_hwecc;
4931 case NAND_ECC_HW_SYNDROME:
4932 if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
4934 ecc->read_page == nand_read_page_hwecc ||
4936 ecc->write_page == nand_write_page_hwecc)) {
4937 WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
4941 /* Use standard syndrome read/write page function? */
4942 if (!ecc->read_page)
4943 ecc->read_page = nand_read_page_syndrome;
4944 if (!ecc->write_page)
4945 ecc->write_page = nand_write_page_syndrome;
4946 if (!ecc->read_page_raw)
4947 ecc->read_page_raw = nand_read_page_raw_syndrome;
4948 if (!ecc->write_page_raw)
4949 ecc->write_page_raw = nand_write_page_raw_syndrome;
4951 ecc->read_oob = nand_read_oob_syndrome;
4952 if (!ecc->write_oob)
4953 ecc->write_oob = nand_write_oob_syndrome;
4955 if (mtd->writesize >= ecc->size) {
4956 if (!ecc->strength) {
4957 WARN(1, "Driver must set ecc.strength when using hardware ECC\n");
4963 pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
4964 ecc->size, mtd->writesize);
4965 ecc->mode = NAND_ECC_SOFT;
4966 ecc->algo = NAND_ECC_HAMMING;
4969 ret = nand_set_ecc_soft_ops(mtd);
4976 case NAND_ECC_ON_DIE:
4977 if (!ecc->read_page || !ecc->write_page) {
4978 WARN(1, "No ECC functions supplied; on-die ECC not possible\n");
4983 ecc->read_oob = nand_read_oob_std;
4984 if (!ecc->write_oob)
4985 ecc->write_oob = nand_write_oob_std;
4989 pr_warn("NAND_ECC_NONE selected by board driver. This is not recommended!\n");
4990 ecc->read_page = nand_read_page_raw;
4991 ecc->write_page = nand_write_page_raw;
4992 ecc->read_oob = nand_read_oob_std;
4993 ecc->read_page_raw = nand_read_page_raw;
4994 ecc->write_page_raw = nand_write_page_raw;
4995 ecc->write_oob = nand_write_oob_std;
4996 ecc->size = mtd->writesize;
5002 WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->mode);
5007 /* For many systems, the standard OOB write also works for raw */
5008 if (!ecc->read_oob_raw)
5009 ecc->read_oob_raw = ecc->read_oob;
5010 if (!ecc->write_oob_raw)
5011 ecc->write_oob_raw = ecc->write_oob;
5013 /* propagate ecc info to mtd_info */
5014 mtd->ecc_strength = ecc->strength;
5015 mtd->ecc_step_size = ecc->size;
5018 * Set the number of read / write steps for one page depending on ECC
5021 ecc->steps = mtd->writesize / ecc->size;
5022 if (ecc->steps * ecc->size != mtd->writesize) {
5023 WARN(1, "Invalid ECC parameters\n");
5027 ecc->total = ecc->steps * ecc->bytes;
5028 if (ecc->total > mtd->oobsize) {
5029 WARN(1, "Total number of ECC bytes exceeded oobsize\n");
5035 * The number of bytes available for a client to place data into
5036 * the out of band area.
5038 ret = mtd_ooblayout_count_freebytes(mtd);
5042 mtd->oobavail = ret;
5044 /* ECC sanity check: warn if it's too weak */
5045 if (!nand_ecc_strength_good(mtd))
5046 pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
5049 /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
5050 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
5051 switch (ecc->steps) {
5053 mtd->subpage_sft = 1;
5058 mtd->subpage_sft = 2;
5062 chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
5064 /* Initialize state */
5065 chip->state = FL_READY;
5067 /* Invalidate the pagebuffer reference */
5070 /* Large page NAND with SOFT_ECC should support subpage reads */
5071 switch (ecc->mode) {
5073 if (chip->page_shift > 9)
5074 chip->options |= NAND_SUBPAGE_READ;
5081 /* Fill in remaining MTD driver data */
5082 mtd->type = nand_is_slc(chip) ? MTD_NANDFLASH : MTD_MLCNANDFLASH;
5083 mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
5085 mtd->_erase = nand_erase;
5087 mtd->_unpoint = NULL;
5088 mtd->_read = nand_read;
5089 mtd->_write = nand_write;
5090 mtd->_panic_write = panic_nand_write;
5091 mtd->_read_oob = nand_read_oob;
5092 mtd->_write_oob = nand_write_oob;
5093 mtd->_sync = nand_sync;
5095 mtd->_unlock = NULL;
5096 mtd->_suspend = nand_suspend;
5097 mtd->_resume = nand_resume;
5098 mtd->_reboot = nand_shutdown;
5099 mtd->_block_isreserved = nand_block_isreserved;
5100 mtd->_block_isbad = nand_block_isbad;
5101 mtd->_block_markbad = nand_block_markbad;
5102 mtd->_max_bad_blocks = nand_max_bad_blocks;
5103 mtd->writebufsize = mtd->writesize;
5106 * Initialize bitflip_threshold to its default prior scan_bbt() call.
5107 * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
5110 if (!mtd->bitflip_threshold)
5111 mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
5113 /* Check, if we should skip the bad block table scan */
5114 if (chip->options & NAND_SKIP_BBTSCAN)
5117 /* Build bad block table */
5118 ret = chip->scan_bbt(mtd);
5125 kfree(nbuf->databuf);
5126 kfree(nbuf->ecccode);
5127 kfree(nbuf->ecccalc);
5132 /* Clean up nand_scan_ident(). */
5134 /* Free manufacturer priv data. */
5135 nand_manufacturer_cleanup(chip);
5139 EXPORT_SYMBOL(nand_scan_tail);
5142 * is_module_text_address() isn't exported, and it's mostly a pointless
5143 * test if this is a module _anyway_ -- they'd have to try _really_ hard
5144 * to call us from in-kernel code if the core NAND support is modular.
5147 #define caller_is_module() (1)
5149 #define caller_is_module() \
5150 is_module_text_address((unsigned long)__builtin_return_address(0))
5154 * nand_scan - [NAND Interface] Scan for the NAND device
5155 * @mtd: MTD device structure
5156 * @maxchips: number of chips to scan for
5158 * This fills out all the uninitialized function pointers with the defaults.
5159 * The flash ID is read and the mtd/chip structures are filled with the
5160 * appropriate values.
5162 int nand_scan(struct mtd_info *mtd, int maxchips)
5166 ret = nand_scan_ident(mtd, maxchips, NULL);
5168 ret = nand_scan_tail(mtd);
5171 EXPORT_SYMBOL(nand_scan);
5174 * nand_cleanup - [NAND Interface] Free resources held by the NAND device
5175 * @chip: NAND chip object
5177 void nand_cleanup(struct nand_chip *chip)
5179 if (chip->ecc.mode == NAND_ECC_SOFT &&
5180 chip->ecc.algo == NAND_ECC_BCH)
5181 nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
5183 nand_release_data_interface(chip);
5185 /* Free bad block table memory */
5187 if (!(chip->options & NAND_OWN_BUFFERS) && chip->buffers) {
5188 kfree(chip->buffers->databuf);
5189 kfree(chip->buffers->ecccode);
5190 kfree(chip->buffers->ecccalc);
5191 kfree(chip->buffers);
5194 /* Free bad block descriptor memory */
5195 if (chip->badblock_pattern && chip->badblock_pattern->options
5196 & NAND_BBT_DYNAMICSTRUCT)
5197 kfree(chip->badblock_pattern);
5199 /* Free manufacturer priv data. */
5200 nand_manufacturer_cleanup(chip);
5202 EXPORT_SYMBOL_GPL(nand_cleanup);
5205 * nand_release - [NAND Interface] Unregister the MTD device and free resources
5206 * held by the NAND device
5207 * @mtd: MTD device structure
5209 void nand_release(struct mtd_info *mtd)
5211 mtd_device_unregister(mtd);
5212 nand_cleanup(mtd_to_nand(mtd));
5214 EXPORT_SYMBOL_GPL(nand_release);
5216 MODULE_LICENSE("GPL");
5219 MODULE_DESCRIPTION("Generic NAND flash driver code");