};
};
+ usb@73f80000 {
+ compatible = "fsl,imx51-usb", "fsl,imx27-usb";
+ reg = <0x73f80000 0x0200>;
+ interrupts = <18>;
+ status = "disabled";
+ };
+
+ usb@73f80200 {
+ compatible = "fsl,imx51-usb", "fsl,imx27-usb";
+ reg = <0x73f80200 0x0200>;
+ interrupts = <14>;
+ status = "disabled";
+ };
+
+ usb@73f80400 {
+ compatible = "fsl,imx51-usb", "fsl,imx27-usb";
+ reg = <0x73f80400 0x0200>;
+ interrupts = <16>;
+ status = "disabled";
+ };
+
+ usb@73f80600 {
+ compatible = "fsl,imx51-usb", "fsl,imx27-usb";
+ reg = <0x73f80600 0x0200>;
+ interrupts = <17>;
+ status = "disabled";
+ };
+
gpio1: gpio@73f84000 {
compatible = "fsl,imx51-gpio", "fsl,imx35-gpio";
reg = <0x73f84000 0x4000>;
compatible = "fsl,imx51-wdt", "fsl,imx21-wdt";
reg = <0x73f98000 0x4000>;
interrupts = <58>;
- status = "disabled";
};
wdog@73f9c000 { /* WDOG2 */
status = "disabled";
};
+ iomuxc@73fa8000 {
+ compatible = "fsl,imx51-iomuxc";
+ reg = <0x73fa8000 0x4000>;
+
+ audmux {
+ pinctrl_audmux_1: audmuxgrp-1 {
+ fsl,pins = <
+ 384 0x80000000 /* MX51_PAD_AUD3_BB_TXD__AUD3_TXD */
+ 386 0x80000000 /* MX51_PAD_AUD3_BB_RXD__AUD3_RXD */
+ 389 0x80000000 /* MX51_PAD_AUD3_BB_CK__AUD3_TXC */
+ 391 0x80000000 /* MX51_PAD_AUD3_BB_FS__AUD3_TXFS */
+ >;
+ };
+ };
+
+ fec {
+ pinctrl_fec_1: fecgrp-1 {
+ fsl,pins = <
+ 128 0x80000000 /* MX51_PAD_EIM_EB2__FEC_MDIO */
+ 134 0x80000000 /* MX51_PAD_EIM_EB3__FEC_RDATA1 */
+ 146 0x80000000 /* MX51_PAD_EIM_CS2__FEC_RDATA2 */
+ 152 0x80000000 /* MX51_PAD_EIM_CS3__FEC_RDATA3 */
+ 158 0x80000000 /* MX51_PAD_EIM_CS4__FEC_RX_ER */
+ 165 0x80000000 /* MX51_PAD_EIM_CS5__FEC_CRS */
+ 206 0x80000000 /* MX51_PAD_NANDF_RB2__FEC_COL */
+ 213 0x80000000 /* MX51_PAD_NANDF_RB3__FEC_RX_CLK */
+ 293 0x80000000 /* MX51_PAD_NANDF_D9__FEC_RDATA0 */
+ 298 0x80000000 /* MX51_PAD_NANDF_D8__FEC_TDATA0 */
+ 225 0x80000000 /* MX51_PAD_NANDF_CS2__FEC_TX_ER */
+ 231 0x80000000 /* MX51_PAD_NANDF_CS3__FEC_MDC */
+ 237 0x80000000 /* MX51_PAD_NANDF_CS4__FEC_TDATA1 */
+ 243 0x80000000 /* MX51_PAD_NANDF_CS5__FEC_TDATA2 */
+ 250 0x80000000 /* MX51_PAD_NANDF_CS6__FEC_TDATA3 */
+ 255 0x80000000 /* MX51_PAD_NANDF_CS7__FEC_TX_EN */
+ 260 0x80000000 /* MX51_PAD_NANDF_RDY_INT__FEC_TX_CLK */
+ >;
+ };
+ };
+
+ ecspi1 {
+ pinctrl_ecspi1_1: ecspi1grp-1 {
+ fsl,pins = <
+ 398 0x185 /* MX51_PAD_CSPI1_MISO__ECSPI1_MISO */
+ 394 0x185 /* MX51_PAD_CSPI1_MOSI__ECSPI1_MOSI */
+ 409 0x185 /* MX51_PAD_CSPI1_SCLK__ECSPI1_SCLK */
+ >;
+ };
+ };
+
+ esdhc1 {
+ pinctrl_esdhc1_1: esdhc1grp-1 {
+ fsl,pins = <
+ 666 0x400020d5 /* MX51_PAD_SD1_CMD__SD1_CMD */
+ 669 0x20d5 /* MX51_PAD_SD1_CLK__SD1_CLK */
+ 672 0x20d5 /* MX51_PAD_SD1_DATA0__SD1_DATA0 */
+ 678 0x20d5 /* MX51_PAD_SD1_DATA1__SD1_DATA1 */
+ 684 0x20d5 /* MX51_PAD_SD1_DATA2__SD1_DATA2 */
+ 691 0x20d5 /* MX51_PAD_SD1_DATA3__SD1_DATA3 */
+ >;
+ };
+ };
+
+ esdhc2 {
+ pinctrl_esdhc2_1: esdhc2grp-1 {
+ fsl,pins = <
+ 704 0x400020d5 /* MX51_PAD_SD2_CMD__SD2_CMD */
+ 707 0x20d5 /* MX51_PAD_SD2_CLK__SD2_CLK */
+ 710 0x20d5 /* MX51_PAD_SD2_DATA0__SD2_DATA0 */
+ 712 0x20d5 /* MX51_PAD_SD2_DATA1__SD2_DATA1 */
+ 715 0x20d5 /* MX51_PAD_SD2_DATA2__SD2_DATA2 */
+ 719 0x20d5 /* MX51_PAD_SD2_DATA3__SD2_DATA3 */
+ >;
+ };
+ };
+
+ i2c2 {
+ pinctrl_i2c2_1: i2c2grp-1 {
+ fsl,pins = <
+ 449 0x400001ed /* MX51_PAD_KEY_COL4__I2C2_SCL */
+ 454 0x400001ed /* MX51_PAD_KEY_COL5__I2C2_SDA */
+ >;
+ };
+ };
+
+ uart1 {
+ pinctrl_uart1_1: uart1grp-1 {
+ fsl,pins = <
+ 413 0x1c5 /* MX51_PAD_UART1_RXD__UART1_RXD */
+ 416 0x1c5 /* MX51_PAD_UART1_TXD__UART1_TXD */
+ 418 0x1c5 /* MX51_PAD_UART1_RTS__UART1_RTS */
+ 420 0x1c5 /* MX51_PAD_UART1_CTS__UART1_CTS */
+ >;
+ };
+ };
+
+ uart2 {
+ pinctrl_uart2_1: uart2grp-1 {
+ fsl,pins = <
+ 423 0x1c5 /* MX51_PAD_UART2_RXD__UART2_RXD */
+ 426 0x1c5 /* MX51_PAD_UART2_TXD__UART2_TXD */
+ >;
+ };
+ };
+
+ uart3 {
+ pinctrl_uart3_1: uart3grp-1 {
+ fsl,pins = <
+ 54 0x1c5 /* MX51_PAD_EIM_D25__UART3_RXD */
+ 59 0x1c5 /* MX51_PAD_EIM_D26__UART3_TXD */
+ 65 0x1c5 /* MX51_PAD_EIM_D27__UART3_RTS */
+ 49 0x1c5 /* MX51_PAD_EIM_D24__UART3_CTS */
+ >;
+ };
+ };
+ };
+
uart1: serial@73fbc000 {
compatible = "fsl,imx51-uart", "fsl,imx21-uart";
reg = <0x73fbc000 0x4000>;
compatible = "fsl,imx51-sdma", "fsl,imx35-sdma";
reg = <0x83fb0000 0x4000>;
interrupts = <6>;
+ fsl,sdma-ram-script-name = "imx/sdma/sdma-imx51.bin";
};
cspi@83fc0000 {
status = "disabled";
};
+ nand@83fdb000 {
+ compatible = "fsl,imx51-nand";
+ reg = <0x83fdb000 0x1000 0xcfff0000 0x10000>;
+ interrupts = <8>;
+ status = "disabled";
+ };
+
ssi3: ssi@83fe8000 {
compatible = "fsl,imx51-ssi", "fsl,imx21-ssi";
reg = <0x83fe8000 0x4000>;
};
};
+ usb@53f80000 {
+ compatible = "fsl,imx53-usb", "fsl,imx27-usb";
+ reg = <0x53f80000 0x0200>;
+ interrupts = <18>;
+ status = "disabled";
+ };
+
+ usb@53f80200 {
+ compatible = "fsl,imx53-usb", "fsl,imx27-usb";
+ reg = <0x53f80200 0x0200>;
+ interrupts = <14>;
+ status = "disabled";
+ };
+
+ usb@53f80400 {
+ compatible = "fsl,imx53-usb", "fsl,imx27-usb";
+ reg = <0x53f80400 0x0200>;
+ interrupts = <16>;
+ status = "disabled";
+ };
+
+ usb@53f80600 {
+ compatible = "fsl,imx53-usb", "fsl,imx27-usb";
+ reg = <0x53f80600 0x0200>;
+ interrupts = <17>;
+ status = "disabled";
+ };
+
gpio1: gpio@53f84000 {
compatible = "fsl,imx53-gpio", "fsl,imx35-gpio";
reg = <0x53f84000 0x4000>;
compatible = "fsl,imx53-wdt", "fsl,imx21-wdt";
reg = <0x53f98000 0x4000>;
interrupts = <58>;
- status = "disabled";
};
wdog@53f9c000 { /* WDOG2 */
status = "disabled";
};
+ iomuxc@53fa8000 {
+ compatible = "fsl,imx53-iomuxc";
+ reg = <0x53fa8000 0x4000>;
+
+ audmux {
+ pinctrl_audmux_1: audmuxgrp-1 {
+ fsl,pins = <
+ 10 0x80000000 /* MX53_PAD_KEY_COL0__AUDMUX_AUD5_TXC */
+ 17 0x80000000 /* MX53_PAD_KEY_ROW0__AUDMUX_AUD5_TXD */
+ 23 0x80000000 /* MX53_PAD_KEY_COL1__AUDMUX_AUD5_TXFS */
+ 30 0x80000000 /* MX53_PAD_KEY_ROW1__AUDMUX_AUD5_RXD */
+ >;
+ };
+ };
+
+ fec {
+ pinctrl_fec_1: fecgrp-1 {
+ fsl,pins = <
+ 820 0x80000000 /* MX53_PAD_FEC_MDC__FEC_MDC */
+ 779 0x80000000 /* MX53_PAD_FEC_MDIO__FEC_MDIO */
+ 786 0x80000000 /* MX53_PAD_FEC_REF_CLK__FEC_TX_CLK */
+ 791 0x80000000 /* MX53_PAD_FEC_RX_ER__FEC_RX_ER */
+ 796 0x80000000 /* MX53_PAD_FEC_CRS_DV__FEC_RX_DV */
+ 799 0x80000000 /* MX53_PAD_FEC_RXD1__FEC_RDATA_1 */
+ 804 0x80000000 /* MX53_PAD_FEC_RXD0__FEC_RDATA_0 */
+ 808 0x80000000 /* MX53_PAD_FEC_TX_EN__FEC_TX_EN */
+ 811 0x80000000 /* MX53_PAD_FEC_TXD1__FEC_TDATA_1 */
+ 816 0x80000000 /* MX53_PAD_FEC_TXD0__FEC_TDATA_0 */
+ >;
+ };
+ };
+
+ ecspi1 {
+ pinctrl_ecspi1_1: ecspi1grp-1 {
+ fsl,pins = <
+ 433 0x80000000 /* MX53_PAD_EIM_D16__ECSPI1_SCLK */
+ 439 0x80000000 /* MX53_PAD_EIM_D17__ECSPI1_MISO */
+ 445 0x80000000 /* MX53_PAD_EIM_D18__ECSPI1_MOSI */
+ >;
+ };
+ };
+
+ esdhc1 {
+ pinctrl_esdhc1_1: esdhc1grp-1 {
+ fsl,pins = <
+ 995 0x1d5 /* MX53_PAD_SD1_DATA0__ESDHC1_DAT0 */
+ 1000 0x1d5 /* MX53_PAD_SD1_DATA1__ESDHC1_DAT1 */
+ 1010 0x1d5 /* MX53_PAD_SD1_DATA2__ESDHC1_DAT2 */
+ 1024 0x1d5 /* MX53_PAD_SD1_DATA3__ESDHC1_DAT3 */
+ 1005 0x1d5 /* MX53_PAD_SD1_CMD__ESDHC1_CMD */
+ 1018 0x1d5 /* MX53_PAD_SD1_CLK__ESDHC1_CLK */
+ >;
+ };
+
+ pinctrl_esdhc1_2: esdhc1grp-2 {
+ fsl,pins = <
+ 995 0x1d5 /* MX53_PAD_SD1_DATA0__ESDHC1_DAT0 */
+ 1000 0x1d5 /* MX53_PAD_SD1_DATA1__ESDHC1_DAT1 */
+ 1010 0x1d5 /* MX53_PAD_SD1_DATA2__ESDHC1_DAT2 */
+ 1024 0x1d5 /* MX53_PAD_SD1_DATA3__ESDHC1_DAT3 */
+ 941 0x1d5 /* MX53_PAD_PATA_DATA8__ESDHC1_DAT4 */
+ 948 0x1d5 /* MX53_PAD_PATA_DATA9__ESDHC1_DAT5 */
+ 955 0x1d5 /* MX53_PAD_PATA_DATA10__ESDHC1_DAT6 */
+ 962 0x1d5 /* MX53_PAD_PATA_DATA11__ESDHC1_DAT7 */
+ 1005 0x1d5 /* MX53_PAD_SD1_CMD__ESDHC1_CMD */
+ 1018 0x1d5 /* MX53_PAD_SD1_CLK__ESDHC1_CLK */
+ >;
+ };
+ };
+
+ esdhc2 {
+ pinctrl_esdhc2_1: esdhc2grp-1 {
+ fsl,pins = <
+ 1038 0x1d5 /* MX53_PAD_SD2_CMD__ESDHC2_CMD */
+ 1032 0x1d5 /* MX53_PAD_SD2_CLK__ESDHC2_CLK */
+ 1062 0x1d5 /* MX53_PAD_SD2_DATA0__ESDHC2_DAT0 */
+ 1056 0x1d5 /* MX53_PAD_SD2_DATA1__ESDHC2_DAT1 */
+ 1050 0x1d5 /* MX53_PAD_SD2_DATA2__ESDHC2_DAT2 */
+ 1044 0x1d5 /* MX53_PAD_SD2_DATA3__ESDHC2_DAT3 */
+ >;
+ };
+ };
+
+ esdhc3 {
+ pinctrl_esdhc3_1: esdhc3grp-1 {
+ fsl,pins = <
+ 943 0x1d5 /* MX53_PAD_PATA_DATA8__ESDHC3_DAT0 */
+ 950 0x1d5 /* MX53_PAD_PATA_DATA9__ESDHC3_DAT1 */
+ 957 0x1d5 /* MX53_PAD_PATA_DATA10__ESDHC3_DAT2 */
+ 964 0x1d5 /* MX53_PAD_PATA_DATA11__ESDHC3_DAT3 */
+ 893 0x1d5 /* MX53_PAD_PATA_DATA0__ESDHC3_DAT4 */
+ 900 0x1d5 /* MX53_PAD_PATA_DATA1__ESDHC3_DAT5 */
+ 906 0x1d5 /* MX53_PAD_PATA_DATA2__ESDHC3_DAT6 */
+ 912 0x1d5 /* MX53_PAD_PATA_DATA3__ESDHC3_DAT7 */
+ 857 0x1d5 /* MX53_PAD_PATA_RESET_B__ESDHC3_CMD */
+ 863 0x1d5 /* MX53_PAD_PATA_IORDY__ESDHC3_CLK */
+ >;
+ };
+ };
+
+ i2c1 {
+ pinctrl_i2c1_1: i2c1grp-1 {
+ fsl,pins = <
+ 333 0xc0000000 /* MX53_PAD_CSI0_DAT8__I2C1_SDA */
+ 341 0xc0000000 /* MX53_PAD_CSI0_DAT9__I2C1_SCL */
+ >;
+ };
+ };
+
+ i2c2 {
+ pinctrl_i2c2_1: i2c2grp-1 {
+ fsl,pins = <
+ 61 0xc0000000 /* MX53_PAD_KEY_ROW3__I2C2_SDA */
+ 53 0xc0000000 /* MX53_PAD_KEY_COL3__I2C2_SCL */
+ >;
+ };
+ };
+
+ uart1 {
+ pinctrl_uart1_1: uart1grp-1 {
+ fsl,pins = <
+ 346 0x1c5 /* MX53_PAD_CSI0_DAT10__UART1_TXD_MUX */
+ 354 0x1c5 /* MX53_PAD_CSI0_DAT11__UART1_RXD_MUX */
+ >;
+ };
+
+ pinctrl_uart1_2: uart1grp-2 {
+ fsl,pins = <
+ 828 0x1c5 /* MX53_PAD_PATA_DIOW__UART1_TXD_MUX */
+ 832 0x1c5 /* MX53_PAD_PATA_DMACK__UART1_RXD_MUX */
+ >;
+ };
+ };
+
+ uart2 {
+ pinctrl_uart2_1: uart2grp-1 {
+ fsl,pins = <
+ 841 0x1c5 /* MX53_PAD_PATA_BUFFER_EN__UART2_RXD_MUX */
+ 836 0x1c5 /* MX53_PAD_PATA_DMARQ__UART2_TXD_MUX */
+ >;
+ };
+ };
+
+ uart3 {
+ pinctrl_uart3_1: uart3grp-1 {
+ fsl,pins = <
+ 884 0x1c5 /* MX53_PAD_PATA_CS_0__UART3_TXD_MUX */
+ 888 0x1c5 /* MX53_PAD_PATA_CS_1__UART3_RXD_MUX */
+ 875 0x1c5 /* MX53_PAD_PATA_DA_1__UART3_CTS */
+ 880 0x1c5 /* MX53_PAD_PATA_DA_2__UART3_RTS */
+ >;
+ };
+ };
+ };
+
uart1: serial@53fbc000 {
compatible = "fsl,imx53-uart", "fsl,imx21-uart";
reg = <0x53fbc000 0x4000>;
status = "disabled";
};
+ can1: can@53fc8000 {
+ compatible = "fsl,imx53-flexcan", "fsl,p1010-flexcan";
+ reg = <0x53fc8000 0x4000>;
+ interrupts = <82>;
+ status = "disabled";
+ };
+
+ can2: can@53fcc000 {
+ compatible = "fsl,imx53-flexcan", "fsl,p1010-flexcan";
+ reg = <0x53fcc000 0x4000>;
+ interrupts = <83>;
+ status = "disabled";
+ };
+
gpio5: gpio@53fdc000 {
compatible = "fsl,imx53-gpio", "fsl,imx35-gpio";
reg = <0x53fdc000 0x4000>;
compatible = "fsl,imx53-sdma", "fsl,imx35-sdma";
reg = <0x63fb0000 0x4000>;
interrupts = <6>;
+ fsl,sdma-ram-script-name = "imx/sdma/sdma-imx53.bin";
};
cspi@63fc0000 {
status = "disabled";
};
+ nand@63fdb000 {
+ compatible = "fsl,imx53-nand";
+ reg = <0x63fdb000 0x1000 0xf7ff0000 0x10000>;
+ interrupts = <8>;
+ status = "disabled";
+ };
+
ssi3: ssi@63fe8000 {
compatible = "fsl,imx53-ssi", "fsl,imx21-ssi";
reg = <0x63fe8000 0x4000>;
static const char *emi_slow_sel[] = { "main_bus", "ahb", };
static const char *usb_phy_sel_str[] = { "osc", "usb_phy_podf", };
static const char *mx51_ipu_di0_sel[] = { "di_pred", "osc", "ckih1", "tve_di", };
- static const char *mx53_ipu_di0_sel[] = { "di_pred", "osc", "ckih1", "di_pll4_podf", "dummy", "ldb_di0", };
+ static const char *mx53_ipu_di0_sel[] = { "di_pred", "osc", "ckih1", "di_pll4_podf", "dummy", "ldb_di0_gate", };
static const char *mx53_ldb_di0_sel[] = { "pll3_sw", "pll4_sw", };
static const char *mx51_ipu_di1_sel[] = { "di_pred", "osc", "ckih1", "tve_di", "ipp_di1", };
- static const char *mx53_ipu_di1_sel[] = { "di_pred", "osc", "ckih1", "tve_di", "ipp_di1", "ldb_di1", };
+ static const char *mx53_ipu_di1_sel[] = { "di_pred", "osc", "ckih1", "tve_di", "ipp_di1", "ldb_di1_gate", };
static const char *mx53_ldb_di1_sel[] = { "pll3_sw", "pll4_sw", };
static const char *mx51_tve_ext_sel[] = { "osc", "ckih1", };
static const char *mx53_tve_ext_sel[] = { "pll4_sw", "ckih1", };
static const char *tve_sel[] = { "tve_pred", "tve_ext_sel", };
static const char *ipu_sel[] = { "axi_a", "axi_b", "emi_slow_gate", "ahb", };
static const char *vpu_sel[] = { "axi_a", "axi_b", "emi_slow_gate", "ahb", };
+ static const char *mx53_can_sel[] = { "ipg", "ckih1", "ckih2", "lp_apm", };
enum imx5_clks {
dummy, ckil, osc, ckih1, ckih2, ahb, ipg, axi_a, axi_b, uart_pred,
ssi_ext1_podf, ssi_ext2_pred, ssi_ext2_podf, ssi1_root_gate,
ssi2_root_gate, ssi3_root_gate, ssi_ext1_gate, ssi_ext2_gate,
epit1_ipg_gate, epit1_hf_gate, epit2_ipg_gate, epit2_hf_gate,
+ can_sel, can1_serial_gate, can1_ipg_gate,
clk_max
};
clk_register_clkdev(clk[ssi1_ipg_gate], NULL, "83fcc000.ssi");
clk_register_clkdev(clk[ssi2_ipg_gate], NULL, "70014000.ssi");
clk_register_clkdev(clk[ssi3_ipg_gate], NULL, "83fe8000.ssi");
+ clk_register_clkdev(clk[nfc_gate], NULL, "83fdb000.nand");
/* set the usboh3 parent to pll2_sw */
clk_set_parent(clk[usboh3_sel], clk[pll2_sw]);
clk[esdhc4_per_gate] = imx_clk_gate2("esdhc4_per_gate", "esdhc_d_sel", MXC_CCM_CCGR3, 14);
clk[usb_phy1_gate] = imx_clk_gate2("usb_phy1_gate", "usb_phy_sel", MXC_CCM_CCGR4, 10);
clk[usb_phy2_gate] = imx_clk_gate2("usb_phy2_gate", "usb_phy_sel", MXC_CCM_CCGR4, 12);
- clk[can2_serial_gate] = imx_clk_gate2("can2_serial_gate", "ipg", MXC_CCM_CCGR4, 6);
- clk[can2_ipg_gate] = imx_clk_gate2("can2_ipg_gate", "ipg", MXC_CCM_CCGR4, 8);
+ clk[can_sel] = imx_clk_mux("can_sel", MXC_CCM_CSCMR2, 6, 2,
+ mx53_can_sel, ARRAY_SIZE(mx53_can_sel));
+ clk[can1_serial_gate] = imx_clk_gate2("can1_serial_gate", "can_sel", MXC_CCM_CCGR6, 22);
+ clk[can1_ipg_gate] = imx_clk_gate2("can1_ipg_gate", "ipg", MXC_CCM_CCGR6, 20);
+ clk[can2_serial_gate] = imx_clk_gate2("can2_serial_gate", "can_sel", MXC_CCM_CCGR4, 8);
+ clk[can2_ipg_gate] = imx_clk_gate2("can2_ipg_gate", "ipg", MXC_CCM_CCGR4, 6);
clk[i2c3_gate] = imx_clk_gate2("i2c3_gate", "per_root", MXC_CCM_CCGR1, 22);
for (i = 0; i < ARRAY_SIZE(clk); i++)
clk_register_clkdev(clk[ssi1_ipg_gate], NULL, "63fcc000.ssi");
clk_register_clkdev(clk[ssi2_ipg_gate], NULL, "50014000.ssi");
clk_register_clkdev(clk[ssi3_ipg_gate], NULL, "63fd0000.ssi");
+ clk_register_clkdev(clk[nfc_gate], NULL, "63fdb000.nand");
+ clk_register_clkdev(clk[can1_ipg_gate], "ipg", "53fc8000.can");
+ clk_register_clkdev(clk[can1_serial_gate], "per", "53fc8000.can");
+ clk_register_clkdev(clk[can2_ipg_gate], "ipg", "53fcc000.can");
+ clk_register_clkdev(clk[can2_serial_gate], "per", "53fcc000.can");
/* set SDHC root clock to 200MHZ*/
clk_set_rate(clk[esdhc_a_podf], 200000000);
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
- # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
+ CONFIG_PARTITION_ADVANCED=y
# CONFIG_PPC_CHRP is not set
# CONFIG_PPC_PMAC is not set
CONFIG_PPC_83xx=y
CONFIG_QUICC_ENGINE=y
CONFIG_QE_GPIO=y
CONFIG_MATH_EMULATION=y
- CONFIG_SPARSE_IRQ=y
CONFIG_PCI=y
CONFIG_NET=y
CONFIG_PACKET=y
# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+ CONFIG_DEVTMPFS=y
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
- CONFIG_MTD_PARTITIONS=y
- CONFIG_MTD_OF_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_VERIFY_WRITE=y
CONFIG_MTD_NAND_FSL_ELBC=y
CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_SATA_FSL=y
CONFIG_SATA_SIL=y
CONFIG_NETDEVICES=y
+ CONFIG_MII=y
+ CONFIG_UCC_GETH=y
+ CONFIG_GIANFAR=y
CONFIG_MARVELL_PHY=y
CONFIG_DAVICOM_PHY=y
CONFIG_VITESSE_PHY=y
CONFIG_ICPLUS_PHY=y
CONFIG_FIXED_PHY=y
- CONFIG_NET_ETHERNET=y
- CONFIG_MII=y
- CONFIG_GIANFAR=y
- CONFIG_UCC_GETH=y
CONFIG_INPUT_FF_MEMLESS=m
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
- CONFIG_INOTIFY=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
- CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
- CONFIG_PARTITION_ADVANCED=y
CONFIG_CRC_T10DIF=y
- # CONFIG_RCU_CPU_STALL_DETECTOR is not set
- CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_SHA256=y
}
EXPORT_SYMBOL_GPL(mtd_panic_write);
+int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
+{
+ int ret_code;
+ ops->retlen = ops->oobretlen = 0;
+ if (!mtd->_read_oob)
+ return -EOPNOTSUPP;
+ /*
+ * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
+ * similar to mtd->_read(), returning a non-negative integer
+ * representing max bitflips. In other cases, mtd->_read_oob() may
+ * return -EUCLEAN. In all cases, perform similar logic to mtd_read().
+ */
+ ret_code = mtd->_read_oob(mtd, from, ops);
+ if (unlikely(ret_code < 0))
+ return ret_code;
+ if (mtd->ecc_strength == 0)
+ return 0; /* device lacks ecc */
+ return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
+}
+EXPORT_SYMBOL_GPL(mtd_read_oob);
+
/*
* Method to access the protection register area, present in some flash
* devices. The user data is one time programmable but the factory data is read
* until the request succeeds or until the allocation size falls below
* the system page size. This attempts to make sure it does not adversely
* impact system performance, so when allocating more than one page, we
- * ask the memory allocator to avoid re-trying, swapping, writing back
- * or performing I/O.
+ * ask the memory allocator to avoid re-trying.
*
* Note, this function also makes sure that the allocated buffer is aligned to
* the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
*/
void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
{
- gfp_t flags = __GFP_NOWARN | __GFP_WAIT |
- __GFP_NORETRY | __GFP_NO_KSWAPD;
+ gfp_t flags = __GFP_NOWARN | __GFP_WAIT | __GFP_NORETRY;
size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
void *kbuf;
cxt->nextpage = 0;
}
- while (1) {
- ret = mtd_block_isbad(mtd, cxt->nextpage * record_size);
- if (!ret)
- break;
- if (ret < 0) {
- printk(KERN_ERR "mtdoops: block_isbad failed, aborting\n");
- return;
- }
+ while ((ret = mtd_block_isbad(mtd, cxt->nextpage * record_size)) > 0) {
badblock:
printk(KERN_WARNING "mtdoops: bad block at %08lx\n",
cxt->nextpage * record_size);
}
}
+ if (ret < 0) {
+ printk(KERN_ERR "mtdoops: mtd_block_isbad failed, aborting\n");
+ return;
+ }
+
for (j = 0, ret = -1; (j < 3) && (ret < 0); j++)
ret = mtdoops_erase_block(cxt, cxt->nextpage * record_size);
printk(KERN_WARNING "mtdoops: could not unregister kmsg_dumper\n");
cxt->mtd = NULL;
- flush_work_sync(&cxt->work_erase);
- flush_work_sync(&cxt->work_write);
+ flush_work(&cxt->work_erase);
+ flush_work(&cxt->work_write);
}
* partition parsers, specified in @types. However, if @types is %NULL, then
* the default list of parsers is used. The default list contains only the
* "cmdlinepart" and "ofpart" parsers ATM.
+ * Note: If there are more then one parser in @types, the kernel only takes the
+ * partitions parsed out by the first parser.
*
* This function may return:
* o a negative error code in case of failure
if (!parser)
continue;
ret = (*parser->parse_fn)(master, pparts, data);
+ put_partition_parser(parser);
if (ret > 0) {
printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
ret, parser->name, master->name);
+ break;
}
- put_partition_parser(parser);
}
return ret;
}
- int mtd_is_partition(struct mtd_info *mtd)
+ int mtd_is_partition(const struct mtd_info *mtd)
{
struct mtd_part *part;
int ispart = 0;
return ispart;
}
EXPORT_SYMBOL_GPL(mtd_is_partition);
+
+ /* Returns the size of the entire flash chip */
+ uint64_t mtd_get_device_size(const struct mtd_info *mtd)
+ {
+ if (!mtd_is_partition(mtd))
+ return mtd->size;
+
+ return PART(mtd)->master->size;
+ }
+ EXPORT_SYMBOL_GPL(mtd_get_device_size);
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
+ #include <linux/gpio.h>
+ #include <linux/platform_data/gpio-omap.h>
+
#include <asm/io.h>
- #include <mach/hardware.h>
#include <asm/sizes.h>
- #include <linux/gpio.h>
- #include <plat/board-ams-delta.h>
+
+ #include <mach/board-ams-delta.h>
+
+ #include <mach/hardware.h>
/*
* MTD structure for E3 (Delta)
buf[i] = ams_delta_read_byte(mtd);
}
-static int ams_delta_verify_buf(struct mtd_info *mtd, const u_char *buf,
- int len)
-{
- int i;
-
- for (i=0; i<len; i++)
- if (buf[i] != ams_delta_read_byte(mtd))
- return -EFAULT;
-
- return 0;
-}
-
/*
* Command control function
*
this->read_byte = ams_delta_read_byte;
this->write_buf = ams_delta_write_buf;
this->read_buf = ams_delta_read_buf;
- this->verify_buf = ams_delta_verify_buf;
this->cmd_ctrl = ams_delta_hwcontrol;
if (gpio_request(AMS_DELTA_GPIO_PIN_NAND_RB, "nand_rdy") == 0) {
this->dev_ready = ams_delta_nand_ready;
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
#include <linux/slab.h>
+#include <linux/of_device.h>
- #include <mach/nand.h>
- #include <mach/aemif.h>
+ #include <linux/platform_data/mtd-davinci.h>
+ #include <linux/platform_data/mtd-davinci-aemif.h>
/*
* This is a device driver for the NAND flash controller found on the
},
};
+#if defined(CONFIG_OF)
+static const struct of_device_id davinci_nand_of_match[] = {
+ {.compatible = "ti,davinci-nand", },
+ {},
+}
+MODULE_DEVICE_TABLE(of, davinci_nand_of_match);
+
+static struct davinci_nand_pdata
+ *nand_davinci_get_pdata(struct platform_device *pdev)
+{
+ if (!pdev->dev.platform_data && pdev->dev.of_node) {
+ struct davinci_nand_pdata *pdata;
+ const char *mode;
+ u32 prop;
+ int len;
+
+ pdata = devm_kzalloc(&pdev->dev,
+ sizeof(struct davinci_nand_pdata),
+ GFP_KERNEL);
+ pdev->dev.platform_data = pdata;
+ if (!pdata)
+ return NULL;
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "ti,davinci-chipselect", &prop))
+ pdev->id = prop;
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "ti,davinci-mask-ale", &prop))
+ pdata->mask_ale = prop;
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "ti,davinci-mask-cle", &prop))
+ pdata->mask_cle = prop;
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "ti,davinci-mask-chipsel", &prop))
+ pdata->mask_chipsel = prop;
+ if (!of_property_read_string(pdev->dev.of_node,
+ "ti,davinci-ecc-mode", &mode)) {
+ if (!strncmp("none", mode, 4))
+ pdata->ecc_mode = NAND_ECC_NONE;
+ if (!strncmp("soft", mode, 4))
+ pdata->ecc_mode = NAND_ECC_SOFT;
+ if (!strncmp("hw", mode, 2))
+ pdata->ecc_mode = NAND_ECC_HW;
+ }
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "ti,davinci-ecc-bits", &prop))
+ pdata->ecc_bits = prop;
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "ti,davinci-nand-buswidth", &prop))
+ if (prop == 16)
+ pdata->options |= NAND_BUSWIDTH_16;
+ if (of_find_property(pdev->dev.of_node,
+ "ti,davinci-nand-use-bbt", &len))
+ pdata->bbt_options = NAND_BBT_USE_FLASH;
+ }
+
+ return pdev->dev.platform_data;
+}
+#else
+#define davinci_nand_of_match NULL
+static struct davinci_nand_pdata
+ *nand_davinci_get_pdata(struct platform_device *pdev)
+{
+ return pdev->dev.platform_data;
+}
+#endif
+
static int __init nand_davinci_probe(struct platform_device *pdev)
{
- struct davinci_nand_pdata *pdata = pdev->dev.platform_data;
+ struct davinci_nand_pdata *pdata;
struct davinci_nand_info *info;
struct resource *res1;
struct resource *res2;
uint32_t val;
nand_ecc_modes_t ecc_mode;
+ pdata = nand_davinci_get_pdata(pdev);
/* insist on board-specific configuration */
if (!pdata)
return -ENODEV;
goto err_clk;
}
- ret = clk_enable(info->clk);
+ ret = clk_prepare_enable(info->clk);
if (ret < 0) {
dev_dbg(&pdev->dev, "unable to enable AEMIF clock, err %d\n",
ret);
err_scan:
err_timing:
- clk_disable(info->clk);
+ clk_disable_unprepare(info->clk);
err_clk_enable:
clk_put(info->clk);
nand_release(&info->mtd);
- clk_disable(info->clk);
+ clk_disable_unprepare(info->clk);
clk_put(info->clk);
kfree(info);
.remove = __exit_p(nand_davinci_remove),
.driver = {
.name = "davinci_nand",
+ .owner = THIS_MODULE,
+ .of_match_table = davinci_nand_of_match,
},
};
MODULE_ALIAS("platform:davinci_nand");
#include <linux/mtd/nand_ecc.h>
#include <asm/fsl_ifc.h>
+ #define FSL_IFC_V1_1_0 0x01010000
#define ERR_BYTE 0xFF /* Value returned for read
bytes when read failed */
#define IFC_TIMEOUT_MSECS 500 /* Maximum number of mSecs to wait
struct nand_chip *chip = mtd->priv;
struct fsl_ifc_mtd *priv = chip->priv;
u8 __iomem *addr = priv->vbase + bufnum * (mtd->writesize * 2);
- u32 __iomem *mainarea = (u32 *)addr;
+ u32 __iomem *mainarea = (u32 __iomem *)addr;
u8 __iomem *oob = addr + mtd->writesize;
int i;
* next byte.
*/
if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) {
- data = in_be16((uint16_t *)&ifc_nand_ctrl->
- addr[ifc_nand_ctrl->index]);
+ data = in_be16((uint16_t __iomem *)&ifc_nand_ctrl->
+ addr[ifc_nand_ctrl->index]);
ifc_nand_ctrl->index += 2;
return (uint8_t) data;
}
__func__, len, avail);
}
-/*
- * Verify buffer against the IFC Controller Data Buffer
- */
-static int fsl_ifc_verify_buf(struct mtd_info *mtd,
- const u_char *buf, int len)
-{
- struct nand_chip *chip = mtd->priv;
- struct fsl_ifc_mtd *priv = chip->priv;
- struct fsl_ifc_ctrl *ctrl = priv->ctrl;
- struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;
- int i;
-
- if (len < 0) {
- dev_err(priv->dev, "%s: write_buf of %d bytes", __func__, len);
- return -EINVAL;
- }
-
- if ((unsigned int)len > nctrl->read_bytes - nctrl->index) {
- dev_err(priv->dev,
- "%s: beyond end of buffer (%d requested, %u available)\n",
- __func__, len, nctrl->read_bytes - nctrl->index);
-
- nctrl->index = nctrl->read_bytes;
- return -EINVAL;
- }
-
- for (i = 0; i < len; i++)
- if (in_8(&nctrl->addr[nctrl->index + i]) != buf[i])
- break;
-
- nctrl->index += len;
-
- if (i != len)
- return -EIO;
- if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC)
- return -EIO;
-
- return 0;
-}
-
/*
* This function is called after Program and Erase Operations to
* check for success or failure.
/* ECC will be calculated automatically, and errors will be detected in
* waitfunc.
*/
-static void fsl_ifc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+static int fsl_ifc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required)
{
fsl_ifc_write_buf(mtd, buf, mtd->writesize);
fsl_ifc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
}
static int fsl_ifc_chip_init_tail(struct mtd_info *mtd)
return 0;
}
+ static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv)
+ {
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+ uint32_t csor = 0, csor_8k = 0, csor_ext = 0;
+ uint32_t cs = priv->bank;
+
+ /* Save CSOR and CSOR_ext */
+ csor = in_be32(&ifc->csor_cs[cs].csor);
+ csor_ext = in_be32(&ifc->csor_cs[cs].csor_ext);
+
+ /* chage PageSize 8K and SpareSize 1K*/
+ csor_8k = (csor & ~(CSOR_NAND_PGS_MASK)) | 0x0018C000;
+ out_be32(&ifc->csor_cs[cs].csor, csor_8k);
+ out_be32(&ifc->csor_cs[cs].csor_ext, 0x0000400);
+
+ /* READID */
+ out_be32(&ifc->ifc_nand.nand_fir0,
+ (IFC_FIR_OP_CMD0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT));
+ out_be32(&ifc->ifc_nand.nand_fcr0,
+ NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT);
+ out_be32(&ifc->ifc_nand.row3, 0x0);
+
+ out_be32(&ifc->ifc_nand.nand_fbcr, 0x0);
+
+ /* Program ROW0/COL0 */
+ out_be32(&ifc->ifc_nand.row0, 0x0);
+ out_be32(&ifc->ifc_nand.col0, 0x0);
+
+ /* set the chip select for NAND Transaction */
+ out_be32(&ifc->ifc_nand.nand_csel, cs << IFC_NAND_CSEL_SHIFT);
+
+ /* start read seq */
+ out_be32(&ifc->ifc_nand.nandseq_strt, IFC_NAND_SEQ_STRT_FIR_STRT);
+
+ /* wait for command complete flag or timeout */
+ wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
+ IFC_TIMEOUT_MSECS * HZ/1000);
+
+ if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC)
+ printk(KERN_ERR "fsl-ifc: Failed to Initialise SRAM\n");
+
+ /* Restore CSOR and CSOR_ext */
+ out_be32(&ifc->csor_cs[cs].csor, csor);
+ out_be32(&ifc->csor_cs[cs].csor_ext, csor_ext);
+ }
+
static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
{
struct fsl_ifc_ctrl *ctrl = priv->ctrl;
struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
struct nand_chip *chip = &priv->chip;
struct nand_ecclayout *layout;
- u32 csor;
+ u32 csor, ver;
/* Fill in fsl_ifc_mtd structure */
priv->mtd.priv = chip;
chip->write_buf = fsl_ifc_write_buf;
chip->read_buf = fsl_ifc_read_buf;
- chip->verify_buf = fsl_ifc_verify_buf;
chip->select_chip = fsl_ifc_select_chip;
chip->cmdfunc = fsl_ifc_cmdfunc;
chip->waitfunc = fsl_ifc_wait;
out_be32(&ifc->ifc_nand.ncfgr, 0x0);
/* set up nand options */
- chip->options = NAND_NO_READRDY;
chip->bbt_options = NAND_BBT_USE_FLASH;
chip->ecc.mode = NAND_ECC_SOFT;
}
+ ver = in_be32(&ifc->ifc_rev);
+ if (ver == FSL_IFC_V1_1_0)
+ fsl_ifc_sram_init(priv);
+
return 0;
}
#include <linux/of_mtd.h>
#include <asm/mach/flash.h>
- #include <mach/mxc_nand.h>
+ #include <linux/platform_data/mtd-mxc_nand.h>
#include <mach/hardware.h>
#define DRIVER_NAME "mxc_nand"
#define nfc_is_v21() (cpu_is_mx25() || cpu_is_mx35())
#define nfc_is_v1() (cpu_is_mx31() || cpu_is_mx27() || cpu_is_mx21())
-#define nfc_is_v3_2() (cpu_is_mx51() || cpu_is_mx53())
-#define nfc_is_v3() nfc_is_v3_2()
+#define nfc_is_v3_2a() cpu_is_mx51()
+#define nfc_is_v3_2b() cpu_is_mx53()
/* Addresses for NFC registers */
#define NFC_V1_V2_BUF_SIZE (host->regs + 0x00)
#define NFC_V3_CONFIG2_2CMD_PHASES (1 << 4)
#define NFC_V3_CONFIG2_NUM_ADDR_PHASE0 (1 << 5)
#define NFC_V3_CONFIG2_ECC_MODE_8 (1 << 6)
-#define NFC_V3_CONFIG2_PPB(x) (((x) & 0x3) << 7)
+#define NFC_V3_CONFIG2_PPB(x, shift) (((x) & 0x3) << shift)
#define NFC_V3_CONFIG2_NUM_ADDR_PHASE1(x) (((x) & 0x3) << 12)
#define NFC_V3_CONFIG2_INT_MSK (1 << 15)
#define NFC_V3_CONFIG2_ST_CMD(x) (((x) & 0xff) << 24)
int spare_len;
int eccbytes;
int eccsize;
+ int ppb_shift;
};
struct mxc_nand_host {
host->buf_start += n;
}
-/* Used by the upper layer to verify the data in NAND Flash
- * with the data in the buf. */
-static int mxc_nand_verify_buf(struct mtd_info *mtd,
- const u_char *buf, int len)
-{
- return -EFAULT;
-}
-
/* This function is used by upper layer for select and
* deselect of the NAND chip */
static void mxc_nand_select_chip_v1_v3(struct mtd_info *mtd, int chip)
if (chip == -1) {
/* Disable the NFC clock */
if (host->clk_act) {
- clk_disable(host->clk);
+ clk_disable_unprepare(host->clk);
host->clk_act = 0;
}
return;
if (!host->clk_act) {
/* Enable the NFC clock */
- clk_enable(host->clk);
+ clk_prepare_enable(host->clk);
host->clk_act = 1;
}
}
if (mtd->writesize) {
- config2 |= NFC_V3_CONFIG2_PPB(ffs(mtd->erasesize / mtd->writesize) - 6);
+ config2 |= NFC_V3_CONFIG2_PPB(
+ ffs(mtd->erasesize / mtd->writesize) - 6,
+ host->devtype_data->ppb_shift);
host->eccsize = get_eccsize(mtd);
if (host->eccsize == 8)
config2 |= NFC_V3_CONFIG2_ECC_MODE_8;
.eccsize = 0,
};
-/* v3: i.MX51, i.MX53 */
+/* v3.2a: i.MX51 */
static const struct mxc_nand_devtype_data imx51_nand_devtype_data = {
.preset = preset_v3,
.send_cmd = send_cmd_v3,
.spare_len = 64,
.eccbytes = 0,
.eccsize = 0,
+ .ppb_shift = 7,
+};
+
+/* v3.2b: i.MX53 */
+static const struct mxc_nand_devtype_data imx53_nand_devtype_data = {
+ .preset = preset_v3,
+ .send_cmd = send_cmd_v3,
+ .send_addr = send_addr_v3,
+ .send_page = send_page_v3,
+ .send_read_id = send_read_id_v3,
+ .get_dev_status = get_dev_status_v3,
+ .check_int = check_int_v3,
+ .irq_control = irq_control_v3,
+ .get_ecc_status = get_ecc_status_v3,
+ .ecclayout_512 = &nandv2_hw_eccoob_smallpage,
+ .ecclayout_2k = &nandv2_hw_eccoob_largepage,
+ .ecclayout_4k = &nandv2_hw_eccoob_smallpage, /* XXX: needs fix */
+ .select_chip = mxc_nand_select_chip_v1_v3,
+ .correct_data = mxc_nand_correct_data_v2_v3,
+ .irqpending_quirk = 0,
+ .needs_ip = 1,
+ .regs_offset = 0,
+ .spare0_offset = 0x1000,
+ .axi_offset = 0x1e00,
+ .spare_len = 64,
+ .eccbytes = 0,
+ .eccsize = 0,
+ .ppb_shift = 8,
};
#ifdef CONFIG_OF_MTD
}, {
.compatible = "fsl,imx51-nand",
.data = &imx51_nand_devtype_data,
+ }, {
+ .compatible = "fsl,imx53-nand",
+ .data = &imx53_nand_devtype_data,
},
{ /* sentinel */ }
};
host->devtype_data = &imx27_nand_devtype_data;
} else if (nfc_is_v21()) {
host->devtype_data = &imx25_nand_devtype_data;
- } else if (nfc_is_v3_2()) {
+ } else if (nfc_is_v3_2a()) {
host->devtype_data = &imx51_nand_devtype_data;
+ } else if (nfc_is_v3_2b()) {
+ host->devtype_data = &imx53_nand_devtype_data;
} else
BUG();
return 0;
}
-static int __init mxcnd_probe(struct platform_device *pdev)
+static int __devinit mxcnd_probe(struct platform_device *pdev)
{
struct nand_chip *this;
struct mtd_info *mtd;
int err = 0;
/* Allocate memory for MTD device structure and private data */
- host = kzalloc(sizeof(struct mxc_nand_host) + NAND_MAX_PAGESIZE +
- NAND_MAX_OOBSIZE, GFP_KERNEL);
+ host = devm_kzalloc(&pdev->dev, sizeof(struct mxc_nand_host) +
+ NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE, GFP_KERNEL);
if (!host)
return -ENOMEM;
this->read_word = mxc_nand_read_word;
this->write_buf = mxc_nand_write_buf;
this->read_buf = mxc_nand_read_buf;
- this->verify_buf = mxc_nand_verify_buf;
- host->clk = clk_get(&pdev->dev, "nfc");
- if (IS_ERR(host->clk)) {
- err = PTR_ERR(host->clk);
- goto eclk;
- }
+ host->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(host->clk))
+ return PTR_ERR(host->clk);
- clk_prepare_enable(host->clk);
- host->clk_act = 1;
+ err = mxcnd_probe_dt(host);
+ if (err > 0)
+ err = mxcnd_probe_pdata(host);
+ if (err < 0)
+ return err;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- err = -ENODEV;
- goto eres;
- }
+ if (host->devtype_data->needs_ip) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+ host->regs_ip = devm_request_and_ioremap(&pdev->dev, res);
+ if (!host->regs_ip)
+ return -ENOMEM;
- host->base = ioremap(res->start, resource_size(res));
- if (!host->base) {
- err = -ENOMEM;
- goto eres;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ } else {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
}
- host->main_area0 = host->base;
+ if (!res)
+ return -ENODEV;
- err = mxcnd_probe_dt(host);
- if (err > 0)
- err = mxcnd_probe_pdata(host);
- if (err < 0)
- goto eirq;
+ host->base = devm_request_and_ioremap(&pdev->dev, res);
+ if (!host->base)
+ return -ENOMEM;
+
+ host->main_area0 = host->base;
if (host->devtype_data->regs_offset)
host->regs = host->base + host->devtype_data->regs_offset;
this->ecc.size = 512;
this->ecc.layout = host->devtype_data->ecclayout_512;
- if (host->devtype_data->needs_ip) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res) {
- err = -ENODEV;
- goto eirq;
- }
- host->regs_ip = ioremap(res->start, resource_size(res));
- if (!host->regs_ip) {
- err = -ENOMEM;
- goto eirq;
- }
- }
-
if (host->pdata.hw_ecc) {
this->ecc.calculate = mxc_nand_calculate_ecc;
this->ecc.hwctl = mxc_nand_enable_hwecc;
*/
host->devtype_data->irq_control(host, 0);
- err = request_irq(host->irq, mxc_nfc_irq, IRQF_DISABLED, DRIVER_NAME, host);
+ err = devm_request_irq(&pdev->dev, host->irq, mxc_nfc_irq,
+ IRQF_DISABLED, DRIVER_NAME, host);
if (err)
- goto eirq;
+ return err;
+
+ clk_prepare_enable(host->clk);
+ host->clk_act = 1;
/*
* Now that we "own" the interrupt make sure the interrupt mask bit is
return 0;
escan:
- free_irq(host->irq, host);
-eirq:
- if (host->regs_ip)
- iounmap(host->regs_ip);
- iounmap(host->base);
-eres:
- clk_put(host->clk);
-eclk:
- kfree(host);
+ clk_disable_unprepare(host->clk);
return err;
}
{
struct mxc_nand_host *host = platform_get_drvdata(pdev);
- clk_put(host->clk);
-
platform_set_drvdata(pdev, NULL);
nand_release(&host->mtd);
- free_irq(host->irq, host);
- if (host->regs_ip)
- iounmap(host->regs_ip);
- iounmap(host->base);
- kfree(host);
return 0;
}
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(mxcnd_dt_ids),
},
+ .probe = mxcnd_probe,
.remove = __devexit_p(mxcnd_remove),
};
-
-static int __init mxc_nd_init(void)
-{
- return platform_driver_probe(&mxcnd_driver, mxcnd_probe);
-}
-
-static void __exit mxc_nd_cleanup(void)
-{
- /* Unregister the device structure */
- platform_driver_unregister(&mxcnd_driver);
-}
-
-module_init(mxc_nd_init);
-module_exit(mxc_nd_cleanup);
+module_platform_driver(mxcnd_driver);
MODULE_AUTHOR("Freescale Semiconductor, Inc.");
MODULE_DESCRIPTION("MXC NAND MTD driver");
#include <plat/dma.h>
#include <plat/gpmc.h>
- #include <plat/nand.h>
+ #include <linux/platform_data/mtd-nand-omap2.h>
#define DRIVER_NAME "omap2-nand"
#define OMAP_NAND_TIMEOUT_MS 5000
#define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0)
#define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1)
+ #define PREFETCH_CONFIG1_CS_SHIFT 24
+ #define ECC_CONFIG_CS_SHIFT 1
+ #define CS_MASK 0x7
+ #define ENABLE_PREFETCH (0x1 << 7)
+ #define DMA_MPU_MODE_SHIFT 2
+ #define ECCSIZE1_SHIFT 22
+ #define ECC1RESULTSIZE 0x1
+ #define ECCCLEAR 0x100
+ #define ECC1 0x1
+
/* oob info generated runtime depending on ecc algorithm and layout selected */
static struct nand_ecclayout omap_oobinfo;
/* Define some generic bad / good block scan pattern which are used
int gpmc_cs;
unsigned long phys_base;
+ unsigned long mem_size;
struct completion comp;
struct dma_chan *dma;
- int gpmc_irq;
+ int gpmc_irq_fifo;
+ int gpmc_irq_count;
enum {
OMAP_NAND_IO_READ = 0, /* read */
OMAP_NAND_IO_WRITE, /* write */
} iomode;
u_char *buf;
int buf_len;
+ struct gpmc_nand_regs reg;
#ifdef CONFIG_MTD_NAND_OMAP_BCH
struct bch_control *bch;
#endif
};
+ /**
+ * omap_prefetch_enable - configures and starts prefetch transfer
+ * @cs: cs (chip select) number
+ * @fifo_th: fifo threshold to be used for read/ write
+ * @dma_mode: dma mode enable (1) or disable (0)
+ * @u32_count: number of bytes to be transferred
+ * @is_write: prefetch read(0) or write post(1) mode
+ */
+ static int omap_prefetch_enable(int cs, int fifo_th, int dma_mode,
+ unsigned int u32_count, int is_write, struct omap_nand_info *info)
+ {
+ u32 val;
+
+ if (fifo_th > PREFETCH_FIFOTHRESHOLD_MAX)
+ return -1;
+
+ if (readl(info->reg.gpmc_prefetch_control))
+ return -EBUSY;
+
+ /* Set the amount of bytes to be prefetched */
+ writel(u32_count, info->reg.gpmc_prefetch_config2);
+
+ /* Set dma/mpu mode, the prefetch read / post write and
+ * enable the engine. Set which cs is has requested for.
+ */
+ val = ((cs << PREFETCH_CONFIG1_CS_SHIFT) |
+ PREFETCH_FIFOTHRESHOLD(fifo_th) | ENABLE_PREFETCH |
+ (dma_mode << DMA_MPU_MODE_SHIFT) | (0x1 & is_write));
+ writel(val, info->reg.gpmc_prefetch_config1);
+
+ /* Start the prefetch engine */
+ writel(0x1, info->reg.gpmc_prefetch_control);
+
+ return 0;
+ }
+
+ /**
+ * omap_prefetch_reset - disables and stops the prefetch engine
+ */
+ static int omap_prefetch_reset(int cs, struct omap_nand_info *info)
+ {
+ u32 config1;
+
+ /* check if the same module/cs is trying to reset */
+ config1 = readl(info->reg.gpmc_prefetch_config1);
+ if (((config1 >> PREFETCH_CONFIG1_CS_SHIFT) & CS_MASK) != cs)
+ return -EINVAL;
+
+ /* Stop the PFPW engine */
+ writel(0x0, info->reg.gpmc_prefetch_control);
+
+ /* Reset/disable the PFPW engine */
+ writel(0x0, info->reg.gpmc_prefetch_config1);
+
+ return 0;
+ }
+
/**
* omap_hwcontrol - hardware specific access to control-lines
* @mtd: MTD device structure
if (cmd != NAND_CMD_NONE) {
if (ctrl & NAND_CLE)
- gpmc_nand_write(info->gpmc_cs, GPMC_NAND_COMMAND, cmd);
+ writeb(cmd, info->reg.gpmc_nand_command);
else if (ctrl & NAND_ALE)
- gpmc_nand_write(info->gpmc_cs, GPMC_NAND_ADDRESS, cmd);
+ writeb(cmd, info->reg.gpmc_nand_address);
else /* NAND_NCE */
- gpmc_nand_write(info->gpmc_cs, GPMC_NAND_DATA, cmd);
+ writeb(cmd, info->reg.gpmc_nand_data);
}
}
iowrite8(*p++, info->nand.IO_ADDR_W);
/* wait until buffer is available for write */
do {
- status = gpmc_read_status(GPMC_STATUS_BUFFER);
+ status = readl(info->reg.gpmc_status) &
+ GPMC_STATUS_BUFF_EMPTY;
} while (!status);
}
}
iowrite16(*p++, info->nand.IO_ADDR_W);
/* wait until buffer is available for write */
do {
- status = gpmc_read_status(GPMC_STATUS_BUFFER);
+ status = readl(info->reg.gpmc_status) &
+ GPMC_STATUS_BUFF_EMPTY;
} while (!status);
}
}
}
/* configure and start prefetch transfer */
- ret = gpmc_prefetch_enable(info->gpmc_cs,
- PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0);
+ ret = omap_prefetch_enable(info->gpmc_cs,
+ PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0, info);
if (ret) {
/* PFPW engine is busy, use cpu copy method */
if (info->nand.options & NAND_BUSWIDTH_16)
omap_read_buf8(mtd, (u_char *)p, len);
} else {
do {
- r_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
+ r_count = readl(info->reg.gpmc_prefetch_status);
+ r_count = GPMC_PREFETCH_STATUS_FIFO_CNT(r_count);
r_count = r_count >> 2;
ioread32_rep(info->nand.IO_ADDR_R, p, r_count);
p += r_count;
len -= r_count << 2;
} while (len);
/* disable and stop the PFPW engine */
- gpmc_prefetch_reset(info->gpmc_cs);
+ omap_prefetch_reset(info->gpmc_cs, info);
}
}
int i = 0, ret = 0;
u16 *p = (u16 *)buf;
unsigned long tim, limit;
+ u32 val;
/* take care of subpage writes */
if (len % 2 != 0) {
}
/* configure and start prefetch transfer */
- ret = gpmc_prefetch_enable(info->gpmc_cs,
- PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1);
+ ret = omap_prefetch_enable(info->gpmc_cs,
+ PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1, info);
if (ret) {
/* PFPW engine is busy, use cpu copy method */
if (info->nand.options & NAND_BUSWIDTH_16)
omap_write_buf8(mtd, (u_char *)p, len);
} else {
while (len) {
- w_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
+ w_count = readl(info->reg.gpmc_prefetch_status);
+ w_count = GPMC_PREFETCH_STATUS_FIFO_CNT(w_count);
w_count = w_count >> 1;
for (i = 0; (i < w_count) && len; i++, len -= 2)
iowrite16(*p++, info->nand.IO_ADDR_W);
tim = 0;
limit = (loops_per_jiffy *
msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
- while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
+ do {
cpu_relax();
+ val = readl(info->reg.gpmc_prefetch_status);
+ val = GPMC_PREFETCH_STATUS_COUNT(val);
+ } while (val && (tim++ < limit));
/* disable and stop the PFPW engine */
- gpmc_prefetch_reset(info->gpmc_cs);
+ omap_prefetch_reset(info->gpmc_cs, info);
}
}
}
/*
- * omap_nand_dma_transfer: configer and start dma transfer
+ * omap_nand_dma_transfer: configure and start dma transfer
* @mtd: MTD device structure
* @addr: virtual address in RAM of source/destination
* @len: number of data bytes to be transferred
unsigned long tim, limit;
unsigned n;
int ret;
+ u32 val;
if (addr >= high_memory) {
struct page *p1;
tx->callback_param = &info->comp;
dmaengine_submit(tx);
- /* configure and start prefetch transfer */
- ret = gpmc_prefetch_enable(info->gpmc_cs,
- PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
+ /* configure and start prefetch transfer */
+ ret = omap_prefetch_enable(info->gpmc_cs,
+ PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write, info);
if (ret)
/* PFPW engine is busy, use cpu copy method */
goto out_copy_unmap;
wait_for_completion(&info->comp);
tim = 0;
limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
- while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
+
+ do {
cpu_relax();
+ val = readl(info->reg.gpmc_prefetch_status);
+ val = GPMC_PREFETCH_STATUS_COUNT(val);
+ } while (val && (tim++ < limit));
/* disable and stop the PFPW engine */
- gpmc_prefetch_reset(info->gpmc_cs);
+ omap_prefetch_reset(info->gpmc_cs, info);
dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
return 0;
}
/*
- * omap_nand_irq - GMPC irq handler
+ * omap_nand_irq - GPMC irq handler
* @this_irq: gpmc irq number
* @dev: omap_nand_info structure pointer is passed here
*/
{
struct omap_nand_info *info = (struct omap_nand_info *) dev;
u32 bytes;
- u32 irq_stat;
- irq_stat = gpmc_read_status(GPMC_GET_IRQ_STATUS);
- bytes = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
+ bytes = readl(info->reg.gpmc_prefetch_status);
+ bytes = GPMC_PREFETCH_STATUS_FIFO_CNT(bytes);
bytes = bytes & 0xFFFC; /* io in multiple of 4 bytes */
if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */
- if (irq_stat & 0x2)
+ if (this_irq == info->gpmc_irq_count)
goto done;
if (info->buf_len && (info->buf_len < bytes))
(u32 *)info->buf, bytes >> 2);
info->buf = info->buf + bytes;
- if (irq_stat & 0x2)
+ if (this_irq == info->gpmc_irq_count)
goto done;
}
- gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat);
return IRQ_HANDLED;
done:
complete(&info->comp);
- /* disable irq */
- gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, 0);
- /* clear status */
- gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat);
+ disable_irq_nosync(info->gpmc_irq_fifo);
+ disable_irq_nosync(info->gpmc_irq_count);
return IRQ_HANDLED;
}
init_completion(&info->comp);
/* configure and start prefetch transfer */
- ret = gpmc_prefetch_enable(info->gpmc_cs,
- PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0);
+ ret = omap_prefetch_enable(info->gpmc_cs,
+ PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0, info);
if (ret)
/* PFPW engine is busy, use cpu copy method */
goto out_copy;
info->buf_len = len;
- /* enable irq */
- gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ,
- (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT));
+
+ enable_irq(info->gpmc_irq_count);
+ enable_irq(info->gpmc_irq_fifo);
/* waiting for read to complete */
wait_for_completion(&info->comp);
/* disable and stop the PFPW engine */
- gpmc_prefetch_reset(info->gpmc_cs);
+ omap_prefetch_reset(info->gpmc_cs, info);
return;
out_copy:
struct omap_nand_info, mtd);
int ret = 0;
unsigned long tim, limit;
+ u32 val;
if (len <= mtd->oobsize) {
omap_write_buf_pref(mtd, buf, len);
init_completion(&info->comp);
/* configure and start prefetch transfer : size=24 */
- ret = gpmc_prefetch_enable(info->gpmc_cs,
- (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1);
+ ret = omap_prefetch_enable(info->gpmc_cs,
+ (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1, info);
if (ret)
/* PFPW engine is busy, use cpu copy method */
goto out_copy;
info->buf_len = len;
- /* enable irq */
- gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ,
- (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT));
+
+ enable_irq(info->gpmc_irq_count);
+ enable_irq(info->gpmc_irq_fifo);
/* waiting for write to complete */
wait_for_completion(&info->comp);
+
/* wait for data to flushed-out before reset the prefetch */
tim = 0;
limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
- while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
+ do {
+ val = readl(info->reg.gpmc_prefetch_status);
+ val = GPMC_PREFETCH_STATUS_COUNT(val);
cpu_relax();
+ } while (val && (tim++ < limit));
/* disable and stop the PFPW engine */
- gpmc_prefetch_reset(info->gpmc_cs);
+ omap_prefetch_reset(info->gpmc_cs, info);
return;
out_copy:
omap_write_buf8(mtd, buf, len);
}
-/**
- * omap_verify_buf - Verify chip data against buffer
- * @mtd: MTD device structure
- * @buf: buffer containing the data to compare
- * @len: number of bytes to compare
- */
-static int omap_verify_buf(struct mtd_info *mtd, const u_char * buf, int len)
-{
- struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
- mtd);
- u16 *p = (u16 *) buf;
-
- len >>= 1;
- while (len--) {
- if (*p++ != cpu_to_le16(readw(info->nand.IO_ADDR_R)))
- return -EFAULT;
- }
-
- return 0;
-}
-
/**
* gen_true_ecc - This function will generate true ECC value
* @ecc_buf: buffer to store ecc code
{
struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
mtd);
- return gpmc_calculate_ecc(info->gpmc_cs, dat, ecc_code);
+ u32 val;
+
+ val = readl(info->reg.gpmc_ecc_config);
+ if (((val >> ECC_CONFIG_CS_SHIFT) & ~CS_MASK) != info->gpmc_cs)
+ return -EINVAL;
+
+ /* read ecc result */
+ val = readl(info->reg.gpmc_ecc1_result);
+ *ecc_code++ = val; /* P128e, ..., P1e */
+ *ecc_code++ = val >> 16; /* P128o, ..., P1o */
+ /* P2048o, P1024o, P512o, P256o, P2048e, P1024e, P512e, P256e */
+ *ecc_code++ = ((val >> 8) & 0x0f) | ((val >> 20) & 0xf0);
+
+ return 0;
}
/**
mtd);
struct nand_chip *chip = mtd->priv;
unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
+ u32 val;
+
+ /* clear ecc and enable bits */
+ val = ECCCLEAR | ECC1;
+ writel(val, info->reg.gpmc_ecc_control);
- gpmc_enable_hwecc(info->gpmc_cs, mode, dev_width, info->nand.ecc.size);
+ /* program ecc and result sizes */
+ val = ((((info->nand.ecc.size >> 1) - 1) << ECCSIZE1_SHIFT) |
+ ECC1RESULTSIZE);
+ writel(val, info->reg.gpmc_ecc_size_config);
+
+ switch (mode) {
+ case NAND_ECC_READ:
+ case NAND_ECC_WRITE:
+ writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control);
+ break;
+ case NAND_ECC_READSYN:
+ writel(ECCCLEAR, info->reg.gpmc_ecc_control);
+ break;
+ default:
+ dev_info(&info->pdev->dev,
+ "error: unrecognized Mode[%d]!\n", mode);
+ break;
+ }
+
+ /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
+ val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
+ writel(val, info->reg.gpmc_ecc_config);
}
/**
else
timeo += (HZ * 20) / 1000;
- gpmc_nand_write(info->gpmc_cs,
- GPMC_NAND_COMMAND, (NAND_CMD_STATUS & 0xFF));
+ writeb(NAND_CMD_STATUS & 0xFF, info->reg.gpmc_nand_command);
while (time_before(jiffies, timeo)) {
- status = gpmc_nand_read(info->gpmc_cs, GPMC_NAND_DATA);
+ status = readb(info->reg.gpmc_nand_data);
if (status & NAND_STATUS_READY)
break;
cond_resched();
struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
mtd);
- val = gpmc_read_status(GPMC_GET_IRQ_STATUS);
+ val = readl(info->reg.gpmc_status);
+
if ((val & 0x100) == 0x100) {
- /* Clear IRQ Interrupt */
- val |= 0x100;
- val &= ~(0x0);
- gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, val);
+ return 1;
} else {
- unsigned int cnt = 0;
- while (cnt++ < 0x1FF) {
- if ((val & 0x100) == 0x100)
- return 0;
- val = gpmc_read_status(GPMC_GET_IRQ_STATUS);
- }
+ return 0;
}
-
- return 1;
}
#ifdef CONFIG_MTD_NAND_OMAP_BCH
int i, offset;
dma_cap_mask_t mask;
unsigned sig;
+ struct resource *res;
pdata = pdev->dev.platform_data;
if (pdata == NULL) {
info->pdev = pdev;
info->gpmc_cs = pdata->cs;
- info->phys_base = pdata->phys_base;
+ info->reg = pdata->reg;
info->mtd.priv = &info->nand;
info->mtd.name = dev_name(&pdev->dev);
info->nand.options = pdata->devsize;
info->nand.options |= NAND_SKIP_BBTSCAN;
- /* NAND write protect off */
- gpmc_cs_configure(info->gpmc_cs, GPMC_CONFIG_WP, 0);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ err = -EINVAL;
+ dev_err(&pdev->dev, "error getting memory resource\n");
+ goto out_free_info;
+ }
- if (!request_mem_region(info->phys_base, NAND_IO_SIZE,
+ info->phys_base = res->start;
+ info->mem_size = resource_size(res);
+
+ if (!request_mem_region(info->phys_base, info->mem_size,
pdev->dev.driver->name)) {
err = -EBUSY;
goto out_free_info;
}
- info->nand.IO_ADDR_R = ioremap(info->phys_base, NAND_IO_SIZE);
+ info->nand.IO_ADDR_R = ioremap(info->phys_base, info->mem_size);
if (!info->nand.IO_ADDR_R) {
err = -ENOMEM;
goto out_release_mem_region;
/*
* If RDY/BSY line is connected to OMAP then use the omap ready
- * funcrtion and the generic nand_wait function which reads the status
- * register after monitoring the RDY/BSY line.Otherwise use a standard
+ * function and the generic nand_wait function which reads the status
+ * register after monitoring the RDY/BSY line. Otherwise use a standard
* chip delay which is slightly more than tR (AC Timing) of the NAND
* device and read status register until you get a failure or success
*/
break;
case NAND_OMAP_PREFETCH_IRQ:
- err = request_irq(pdata->gpmc_irq,
- omap_nand_irq, IRQF_SHARED, "gpmc-nand", info);
+ info->gpmc_irq_fifo = platform_get_irq(pdev, 0);
+ if (info->gpmc_irq_fifo <= 0) {
+ dev_err(&pdev->dev, "error getting fifo irq\n");
+ err = -ENODEV;
+ goto out_release_mem_region;
+ }
+ err = request_irq(info->gpmc_irq_fifo, omap_nand_irq,
+ IRQF_SHARED, "gpmc-nand-fifo", info);
if (err) {
dev_err(&pdev->dev, "requesting irq(%d) error:%d",
- pdata->gpmc_irq, err);
+ info->gpmc_irq_fifo, err);
+ info->gpmc_irq_fifo = 0;
+ goto out_release_mem_region;
+ }
+
+ info->gpmc_irq_count = platform_get_irq(pdev, 1);
+ if (info->gpmc_irq_count <= 0) {
+ dev_err(&pdev->dev, "error getting count irq\n");
+ err = -ENODEV;
+ goto out_release_mem_region;
+ }
+ err = request_irq(info->gpmc_irq_count, omap_nand_irq,
+ IRQF_SHARED, "gpmc-nand-count", info);
+ if (err) {
+ dev_err(&pdev->dev, "requesting irq(%d) error:%d",
+ info->gpmc_irq_count, err);
+ info->gpmc_irq_count = 0;
goto out_release_mem_region;
- } else {
- info->gpmc_irq = pdata->gpmc_irq;
- info->nand.read_buf = omap_read_buf_irq_pref;
- info->nand.write_buf = omap_write_buf_irq_pref;
}
+
+ info->nand.read_buf = omap_read_buf_irq_pref;
+ info->nand.write_buf = omap_write_buf_irq_pref;
+
break;
default:
goto out_release_mem_region;
}
- info->nand.verify_buf = omap_verify_buf;
-
- /* selsect the ecc type */
+ /* select the ecc type */
if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_DEFAULT)
info->nand.ecc.mode = NAND_ECC_SOFT;
else if ((pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW) ||
out_release_mem_region:
if (info->dma)
dma_release_channel(info->dma);
- release_mem_region(info->phys_base, NAND_IO_SIZE);
+ if (info->gpmc_irq_count > 0)
+ free_irq(info->gpmc_irq_count, info);
+ if (info->gpmc_irq_fifo > 0)
+ free_irq(info->gpmc_irq_fifo, info);
+ release_mem_region(info->phys_base, info->mem_size);
out_free_info:
kfree(info);
if (info->dma)
dma_release_channel(info->dma);
- if (info->gpmc_irq)
- free_irq(info->gpmc_irq, info);
+ if (info->gpmc_irq_count > 0)
+ free_irq(info->gpmc_irq_count, info);
+ if (info->gpmc_irq_fifo > 0)
+ free_irq(info->gpmc_irq_fifo, info);
/* Release NAND device, its internal structures and partitions */
nand_release(&info->mtd);
iounmap(info->nand.IO_ADDR_R);
- kfree(&info->mtd);
+ release_mem_region(info->phys_base, NAND_IO_SIZE);
+ kfree(info);
return 0;
}
#include <linux/err.h>
#include <asm/io.h>
#include <asm/sizes.h>
- #include <plat/orion_nand.h>
-#include <mach/hardware.h>
+ #include <linux/platform_data/mtd-orion_nand.h>
static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
{
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/slab.h>
+ #include <linux/of.h>
+ #include <linux/of_device.h>
#include <mach/dma.h>
- #include <plat/pxa3xx_nand.h>
+ #include <linux/platform_data/mtd-nand-pxa3xx.h>
#define CHIP_DELAY_TIMEOUT (2 * HZ/10)
#define NAND_STOP_DELAY (2 * HZ/50)
info->state = STATE_IDLE;
}
-static void pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
+static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
struct nand_chip *chip, const uint8_t *buf, int oob_required)
{
chip->write_buf(mtd, buf, mtd->writesize);
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
}
static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
info->buf_start += real_len;
}
-static int pxa3xx_nand_verify_buf(struct mtd_info *mtd,
- const uint8_t *buf, int len)
-{
- return 0;
-}
-
static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
{
return;
chip->ecc.size = host->page_size;
chip->ecc.strength = 1;
- chip->options |= NAND_NO_READRDY;
if (host->reg_ndcr & NDCR_DWIDTH_M)
chip->options |= NAND_BUSWIDTH_16;
struct pxa3xx_nand_platform_data *pdata;
struct pxa3xx_nand_info *info;
struct pxa3xx_nand_host *host;
- struct nand_chip *chip;
+ struct nand_chip *chip = NULL;
struct mtd_info *mtd;
struct resource *r;
int ret, irq, cs;
chip->read_byte = pxa3xx_nand_read_byte;
chip->read_buf = pxa3xx_nand_read_buf;
chip->write_buf = pxa3xx_nand_write_buf;
- chip->verify_buf = pxa3xx_nand_verify_buf;
}
spin_lock_init(&chip->controller->lock);
}
clk_enable(info->clk);
- r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (r == NULL) {
- dev_err(&pdev->dev, "no resource defined for data DMA\n");
- ret = -ENXIO;
- goto fail_put_clk;
- }
- info->drcmr_dat = r->start;
+ /*
+ * This is a dirty hack to make this driver work from devicetree
+ * bindings. It can be removed once we have a prober DMA controller
+ * framework for DT.
+ */
+ if (pdev->dev.of_node && cpu_is_pxa3xx()) {
+ info->drcmr_dat = 97;
+ info->drcmr_cmd = 99;
+ } else {
+ r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (r == NULL) {
+ dev_err(&pdev->dev, "no resource defined for data DMA\n");
+ ret = -ENXIO;
+ goto fail_put_clk;
+ }
+ info->drcmr_dat = r->start;
- r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
- if (r == NULL) {
- dev_err(&pdev->dev, "no resource defined for command DMA\n");
- ret = -ENXIO;
- goto fail_put_clk;
+ r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+ if (r == NULL) {
+ dev_err(&pdev->dev, "no resource defined for command DMA\n");
+ ret = -ENXIO;
+ goto fail_put_clk;
+ }
+ info->drcmr_cmd = r->start;
}
- info->drcmr_cmd = r->start;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
return 0;
}
+ #ifdef CONFIG_OF
+ static struct of_device_id pxa3xx_nand_dt_ids[] = {
+ { .compatible = "marvell,pxa3xx-nand" },
+ {}
+ };
+ MODULE_DEVICE_TABLE(of, i2c_pxa_dt_ids);
+
+ static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
+ {
+ struct pxa3xx_nand_platform_data *pdata;
+ struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *of_id =
+ of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
+
+ if (!of_id)
+ return 0;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
+ pdata->enable_arbiter = 1;
+ if (of_get_property(np, "marvell,nand-keep-config", NULL))
+ pdata->keep_config = 1;
+ of_property_read_u32(np, "num-cs", &pdata->num_cs);
+
+ pdev->dev.platform_data = pdata;
+
+ return 0;
+ }
+ #else
+ static inline int pxa3xx_nand_probe_dt(struct platform_device *pdev)
+ {
+ return 0;
+ }
+ #endif
+
static int pxa3xx_nand_probe(struct platform_device *pdev)
{
struct pxa3xx_nand_platform_data *pdata;
+ struct mtd_part_parser_data ppdata = {};
struct pxa3xx_nand_info *info;
int ret, cs, probe_success;
+ ret = pxa3xx_nand_probe_dt(pdev);
+ if (ret)
+ return ret;
+
pdata = pdev->dev.platform_data;
if (!pdata) {
dev_err(&pdev->dev, "no platform data defined\n");
continue;
}
+ ppdata.of_node = pdev->dev.of_node;
ret = mtd_device_parse_register(info->host[cs]->mtd, NULL,
- NULL, pdata->parts[cs],
+ &ppdata, pdata->parts[cs],
pdata->nr_parts[cs]);
if (!ret)
probe_success = 1;
static struct platform_driver pxa3xx_nand_driver = {
.driver = {
.name = "pxa3xx-nand",
+ .of_match_table = of_match_ptr(pxa3xx_nand_dt_ids),
},
.probe = pxa3xx_nand_probe,
.remove = pxa3xx_nand_remove,
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) "nand-s3c2410: " fmt
+
#ifdef CONFIG_MTD_NAND_S3C2410_DEBUG
#define DEBUG
#endif
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/string.h>
+#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/mtd/nand_ecc.h>
#include <linux/mtd/partitions.h>
-#include <asm/io.h>
-
#include <plat/regs-nand.h>
- #include <plat/nand.h>
+ #include <linux/platform_data/mtd-nand-s3c2410.h>
-#ifdef CONFIG_MTD_NAND_S3C2410_HWECC
-static int hardware_ecc = 1;
-#else
-static int hardware_ecc = 0;
-#endif
-
-#ifdef CONFIG_MTD_NAND_S3C2410_CLKSTOP
-static const int clock_stop = 1;
-#else
-static const int clock_stop = 0;
-#endif
-
-
/* new oob placement block for use with hardware ecc generation
*/
* @mtds: An array of MTD instances on this controoler.
* @platform: The platform data for this board.
* @device: The platform device we bound to.
- * @area: The IO area resource that came from request_mem_region().
* @clk: The clock resource for this controller.
- * @regs: The area mapped for the hardware registers described by @area.
+ * @regs: The area mapped for the hardware registers.
* @sel_reg: Pointer to the register controlling the NAND selection.
* @sel_bit: The bit in @sel_reg to select the NAND chip.
* @mtd_count: The number of MTDs created from this controller.
/* device info */
struct device *device;
- struct resource *area;
struct clk *clk;
void __iomem *regs;
void __iomem *sel_reg;
static inline int allow_clk_suspend(struct s3c2410_nand_info *info)
{
- return clock_stop;
+#ifdef CONFIG_MTD_NAND_S3C2410_CLKSTOP
+ return 1;
+#else
+ return 0;
+#endif
}
/**
pr_debug("result %d from %ld, %d\n", result, clk, wanted);
if (result > max) {
- printk("%d ns is too big for current clock rate %ld\n", wanted, clk);
+ pr_err("%d ns is too big for current clock rate %ld\n",
+ wanted, clk);
return -1;
}
return result;
}
-#define to_ns(ticks,clk) (((ticks) * NS_IN_KHZ) / (unsigned int)(clk))
+#define to_ns(ticks, clk) (((ticks) * NS_IN_KHZ) / (unsigned int)(clk))
/* controller setup */
}
dev_info(info->device, "Tacls=%d, %dns Twrph0=%d %dns, Twrph1=%d %dns\n",
- tacls, to_ns(tacls, clkrate), twrph0, to_ns(twrph0, clkrate), twrph1, to_ns(twrph1, clkrate));
+ tacls, to_ns(tacls, clkrate), twrph0, to_ns(twrph0, clkrate),
+ twrph1, to_ns(twrph1, clkrate));
switch (info->cpu_type) {
case TYPE_S3C2410:
if (ret < 0)
return ret;
- switch (info->cpu_type) {
- case TYPE_S3C2410:
+ switch (info->cpu_type) {
+ case TYPE_S3C2410:
default:
break;
- case TYPE_S3C2440:
- case TYPE_S3C2412:
+ case TYPE_S3C2440:
+ case TYPE_S3C2412:
/* enable the controller and de-assert nFCE */
writel(S3C2440_NFCONT_ENABLE, info->regs + S3C2440_NFCONT);
/* ECC handling functions */
+#ifdef CONFIG_MTD_NAND_S3C2410_HWECC
static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat,
u_char *read_ecc, u_char *calc_ecc)
{
diff1 = read_ecc[1] ^ calc_ecc[1];
diff2 = read_ecc[2] ^ calc_ecc[2];
- pr_debug("%s: rd %02x%02x%02x calc %02x%02x%02x diff %02x%02x%02x\n",
- __func__,
- read_ecc[0], read_ecc[1], read_ecc[2],
- calc_ecc[0], calc_ecc[1], calc_ecc[2],
+ pr_debug("%s: rd %*phN calc %*phN diff %02x%02x%02x\n",
+ __func__, 3, read_ecc, 3, calc_ecc,
diff0, diff1, diff2);
if (diff0 == 0 && diff1 == 0 && diff2 == 0)
unsigned long ctrl;
ctrl = readl(info->regs + S3C2440_NFCONT);
- writel(ctrl | S3C2412_NFCONT_INIT_MAIN_ECC, info->regs + S3C2440_NFCONT);
+ writel(ctrl | S3C2412_NFCONT_INIT_MAIN_ECC,
+ info->regs + S3C2440_NFCONT);
}
static void s3c2440_nand_enable_hwecc(struct mtd_info *mtd, int mode)
writel(ctrl | S3C2440_NFCONT_INITECC, info->regs + S3C2440_NFCONT);
}
-static int s3c2410_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code)
+static int s3c2410_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+ u_char *ecc_code)
{
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
ecc_code[1] = readb(info->regs + S3C2410_NFECC + 1);
ecc_code[2] = readb(info->regs + S3C2410_NFECC + 2);
- pr_debug("%s: returning ecc %02x%02x%02x\n", __func__,
- ecc_code[0], ecc_code[1], ecc_code[2]);
+ pr_debug("%s: returning ecc %*phN\n", __func__, 3, ecc_code);
return 0;
}
-static int s3c2412_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code)
+static int s3c2412_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+ u_char *ecc_code)
{
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
unsigned long ecc = readl(info->regs + S3C2412_NFMECC0);
ecc_code[1] = ecc >> 8;
ecc_code[2] = ecc >> 16;
- pr_debug("calculate_ecc: returning ecc %02x,%02x,%02x\n", ecc_code[0], ecc_code[1], ecc_code[2]);
+ pr_debug("%s: returning ecc %*phN\n", __func__, 3, ecc_code);
return 0;
}
-static int s3c2440_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code)
+static int s3c2440_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+ u_char *ecc_code)
{
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
unsigned long ecc = readl(info->regs + S3C2440_NFMECC0);
return 0;
}
+#endif
/* over-ride the standard functions for a little more speed. We can
* use read/write block to move the data buffers to/from the controller
}
}
-static void s3c2410_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
+static void s3c2410_nand_write_buf(struct mtd_info *mtd, const u_char *buf,
+ int len)
{
struct nand_chip *this = mtd->priv;
writesb(this->IO_ADDR_W, buf, len);
}
-static void s3c2440_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
+static void s3c2440_nand_write_buf(struct mtd_info *mtd, const u_char *buf,
+ int len)
{
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
CPUFREQ_TRANSITION_NOTIFIER);
}
-static inline void s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *info)
+static inline void
+s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *info)
{
cpufreq_unregister_notifier(&info->freq_transition,
CPUFREQ_TRANSITION_NOTIFIER);
return 0;
}
-static inline void s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *info)
+static inline void
+s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *info)
{
}
#endif
pr_debug("releasing mtd %d (%p)\n", mtdno, ptr);
nand_release(&ptr->mtd);
}
-
- kfree(info->mtds);
}
/* free the common resources */
- if (!IS_ERR(info->clk)) {
+ if (!IS_ERR(info->clk))
s3c2410_nand_clk_set_state(info, CLOCK_DISABLE);
- clk_put(info->clk);
- }
-
- if (info->regs != NULL) {
- iounmap(info->regs);
- info->regs = NULL;
- }
-
- if (info->area != NULL) {
- release_resource(info->area);
- kfree(info->area);
- info->area = NULL;
- }
-
- kfree(info);
return 0;
}
dev_info(info->device, "System booted from NAND\n");
break;
- }
+ }
chip->IO_ADDR_R = chip->IO_ADDR_W;
nmtd->mtd.owner = THIS_MODULE;
nmtd->set = set;
- if (hardware_ecc) {
+#ifdef CONFIG_MTD_NAND_S3C2410_HWECC
+ chip->ecc.calculate = s3c2410_nand_calculate_ecc;
+ chip->ecc.correct = s3c2410_nand_correct_data;
+ chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.strength = 1;
+
+ switch (info->cpu_type) {
+ case TYPE_S3C2410:
+ chip->ecc.hwctl = s3c2410_nand_enable_hwecc;
chip->ecc.calculate = s3c2410_nand_calculate_ecc;
- chip->ecc.correct = s3c2410_nand_correct_data;
- chip->ecc.mode = NAND_ECC_HW;
- chip->ecc.strength = 1;
-
- switch (info->cpu_type) {
- case TYPE_S3C2410:
- chip->ecc.hwctl = s3c2410_nand_enable_hwecc;
- chip->ecc.calculate = s3c2410_nand_calculate_ecc;
- break;
-
- case TYPE_S3C2412:
- chip->ecc.hwctl = s3c2412_nand_enable_hwecc;
- chip->ecc.calculate = s3c2412_nand_calculate_ecc;
- break;
-
- case TYPE_S3C2440:
- chip->ecc.hwctl = s3c2440_nand_enable_hwecc;
- chip->ecc.calculate = s3c2440_nand_calculate_ecc;
- break;
+ break;
- }
- } else {
- chip->ecc.mode = NAND_ECC_SOFT;
+ case TYPE_S3C2412:
+ chip->ecc.hwctl = s3c2412_nand_enable_hwecc;
+ chip->ecc.calculate = s3c2412_nand_calculate_ecc;
+ break;
+
+ case TYPE_S3C2440:
+ chip->ecc.hwctl = s3c2440_nand_enable_hwecc;
+ chip->ecc.calculate = s3c2440_nand_calculate_ecc;
+ break;
}
+#else
+ chip->ecc.mode = NAND_ECC_SOFT;
+#endif
if (set->ecc_layout != NULL)
chip->ecc.layout = set->ecc_layout;
static int s3c24xx_nand_probe(struct platform_device *pdev)
{
struct s3c2410_platform_nand *plat = to_nand_plat(pdev);
- enum s3c_cpu_type cpu_type;
+ enum s3c_cpu_type cpu_type;
struct s3c2410_nand_info *info;
struct s3c2410_nand_mtd *nmtd;
struct s3c2410_nand_set *sets;
pr_debug("s3c2410_nand_probe(%p)\n", pdev);
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (info == NULL) {
dev_err(&pdev->dev, "no memory for flash info\n");
err = -ENOMEM;
/* get the clock source and enable it */
- info->clk = clk_get(&pdev->dev, "nand");
+ info->clk = devm_clk_get(&pdev->dev, "nand");
if (IS_ERR(info->clk)) {
dev_err(&pdev->dev, "failed to get clock\n");
err = -ENOENT;
/* allocate and map the resource */
/* currently we assume we have the one resource */
- res = pdev->resource;
+ res = pdev->resource;
size = resource_size(res);
- info->area = request_mem_region(res->start, size, pdev->name);
-
- if (info->area == NULL) {
- dev_err(&pdev->dev, "cannot reserve register region\n");
- err = -ENOENT;
- goto exit_error;
- }
-
- info->device = &pdev->dev;
- info->platform = plat;
- info->regs = ioremap(res->start, size);
- info->cpu_type = cpu_type;
+ info->device = &pdev->dev;
+ info->platform = plat;
+ info->cpu_type = cpu_type;
+ info->regs = devm_request_and_ioremap(&pdev->dev, res);
if (info->regs == NULL) {
dev_err(&pdev->dev, "cannot reserve register region\n");
err = -EIO;
/* allocate our information */
size = nr_sets * sizeof(*info->mtds);
- info->mtds = kzalloc(size, GFP_KERNEL);
+ info->mtds = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
if (info->mtds == NULL) {
dev_err(&pdev->dev, "failed to allocate mtd storage\n");
err = -ENOMEM;
nmtd = info->mtds;
for (setno = 0; setno < nr_sets; setno++, nmtd++) {
- pr_debug("initialising set %d (%p, info %p)\n", setno, nmtd, info);
+ pr_debug("initialising set %d (%p, info %p)\n",
+ setno, nmtd, info);
s3c2410_nand_init_chip(info, nmtd, sets);
},
};
-static int __init s3c2410_nand_init(void)
-{
- printk("S3C24XX NAND Driver, (c) 2004 Simtec Electronics\n");
-
- return platform_driver_register(&s3c24xx_nand_driver);
-}
-
-static void __exit s3c2410_nand_exit(void)
-{
- platform_driver_unregister(&s3c24xx_nand_driver);
-}
-
-module_init(s3c2410_nand_init);
-module_exit(s3c2410_nand_exit);
+module_platform_driver(s3c24xx_nand_driver);
MODULE_LICENSE("GPL");
{
struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
+#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
+ cancel_delayed_work_sync(&c->wbuf_dwork);
+#endif
+
mutex_lock(&c->alloc_sem);
jffs2_flush_wbuf_pad(c);
mutex_unlock(&c->alloc_sem);
unregister_filesystem(&jffs2_fs_type);
jffs2_destroy_slab_caches();
jffs2_compressors_exit();
+
+ /*
+ * Make sure all delayed rcu free inodes are flushed before we
+ * destroy cache.
+ */
+ rcu_barrier();
kmem_cache_destroy(jffs2_inode_cachep);
}