1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/arch/arm/mach-pxa/pxa3xx.c
5 * code specific to pxa3xx aka Monahans
7 * Copyright (C) 2006 Marvell International Ltd.
12 #include <linux/dmaengine.h>
13 #include <linux/dma/pxa-dma.h>
14 #include <linux/module.h>
15 #include <linux/kernel.h>
16 #include <linux/init.h>
17 #include <linux/gpio-pxa.h>
19 #include <linux/platform_device.h>
20 #include <linux/irq.h>
21 #include <linux/irqchip.h>
24 #include <linux/syscore_ops.h>
25 #include <linux/platform_data/i2c-pxa.h>
26 #include <linux/platform_data/mmp_dma.h>
27 #include <linux/soc/pxa/cpu.h>
29 #include <asm/mach/map.h>
30 #include <asm/suspend.h>
31 #include <mach/pxa3xx-regs.h>
32 #include <mach/reset.h>
33 #include <linux/platform_data/usb-ohci-pxa27x.h>
35 #include <mach/addr-map.h>
36 #include <mach/smemc.h>
37 #include <mach/irqs.h>
42 #define PECR_IE(n) ((1 << ((n) * 2)) << 28)
43 #define PECR_IS(n) ((1 << ((n) * 2)) << 29)
45 extern void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int));
48 * NAND NFC: DFI bus arbitration subset
50 #define NDCR (*(volatile u32 __iomem*)(NAND_VIRT + 0))
51 #define NDCR_ND_ARB_EN (1 << 12)
52 #define NDCR_ND_ARB_CNTL (1 << 19)
56 #define ISRAM_START 0x5c000000
57 #define ISRAM_SIZE SZ_256K
59 static void __iomem *sram;
60 static unsigned long wakeup_src;
63 * Enter a standby mode (S0D1C2 or S0D2C2). Upon wakeup, the dynamic
64 * memory controller has to be reinitialised, so we place some code
65 * in the SRAM to perform this function.
67 * We disable FIQs across the standby - otherwise, we might receive a
68 * FIQ while the SDRAM is unavailable.
70 static void pxa3xx_cpu_standby(unsigned int pwrmode)
72 void (*fn)(unsigned int) = (void __force *)(sram + 0x8000);
74 memcpy_toio(sram + 0x8000, pm_enter_standby_start,
75 pm_enter_standby_end - pm_enter_standby_start);
93 * NOTE: currently, the OBM (OEM Boot Module) binary comes along with
94 * PXA3xx development kits assumes that the resuming process continues
95 * with the address stored within the first 4 bytes of SDRAM. The PSPR
96 * register is used privately by BootROM and OBM, and _must_ be set to
97 * 0x5c014000 for the moment.
99 static void pxa3xx_cpu_pm_suspend(void)
101 volatile unsigned long *p = (volatile void *)0xc0000000;
102 unsigned long saved_data = *p;
103 #ifndef CONFIG_IWMMXT
106 asm volatile(".arch_extension xscale\n\t"
107 "mra %Q0, %R0, acc0" : "=r" (acc0));
110 /* resuming from D2 requires the HSIO2/BOOT/TPM clocks enabled */
111 CKENA |= (1 << CKEN_BOOT) | (1 << CKEN_TPM);
112 CKENB |= 1 << (CKEN_HSIO2 & 0x1f);
114 /* clear and setup wakeup source */
120 PCFR |= (1u << 13); /* L1_DIS */
121 PCFR &= ~((1u << 12) | (1u << 1)); /* L0_EN | SL_ROD */
125 /* overwrite with the resume address */
126 *p = __pa_symbol(cpu_resume);
128 cpu_suspend(0, pxa3xx_finish_suspend);
134 #ifndef CONFIG_IWMMXT
135 asm volatile(".arch_extension xscale\n\t"
136 "mar acc0, %Q0, %R0" : "=r" (acc0));
140 static void pxa3xx_cpu_pm_enter(suspend_state_t state)
143 * Don't sleep if no wakeup sources are defined
145 if (wakeup_src == 0) {
146 printk(KERN_ERR "Not suspending: no wakeup sources\n");
151 case PM_SUSPEND_STANDBY:
152 pxa3xx_cpu_standby(PXA3xx_PM_S0D2C2);
156 pxa3xx_cpu_pm_suspend();
161 static int pxa3xx_cpu_pm_valid(suspend_state_t state)
163 return state == PM_SUSPEND_MEM || state == PM_SUSPEND_STANDBY;
166 static struct pxa_cpu_pm_fns pxa3xx_cpu_pm_fns = {
167 .valid = pxa3xx_cpu_pm_valid,
168 .enter = pxa3xx_cpu_pm_enter,
171 static void __init pxa3xx_init_pm(void)
173 sram = ioremap(ISRAM_START, ISRAM_SIZE);
175 printk(KERN_ERR "Unable to map ISRAM: disabling standby/suspend\n");
180 * Since we copy wakeup code into the SRAM, we need to ensure
181 * that it is preserved over the low power modes. Note: bit 8
182 * is undocumented in the developer manual, but must be set.
184 AD1R |= ADXR_L2 | ADXR_R0;
185 AD2R |= ADXR_L2 | ADXR_R0;
186 AD3R |= ADXR_L2 | ADXR_R0;
189 * Clear the resume enable registers.
196 pxa_cpu_pm_fns = &pxa3xx_cpu_pm_fns;
199 static int pxa3xx_set_wake(struct irq_data *d, unsigned int on)
201 unsigned long flags, mask = 0;
205 mask = ADXER_MFP_WSSP3;
218 mask = ADXER_MFP_WAC97;
224 mask = ADXER_MFP_WSSP2;
227 mask = ADXER_MFP_WI2C;
230 mask = ADXER_MFP_WUART3;
233 mask = ADXER_MFP_WUART2;
236 mask = ADXER_MFP_WUART1;
239 mask = ADXER_MFP_WMMC1;
242 mask = ADXER_MFP_WSSP1;
248 mask = ADXER_MFP_WSSP4;
257 mask = ADXER_MFP_WMMC2;
260 mask = ADXER_MFP_WFLASH;
266 mask = ADXER_WEXTWAKE0;
269 mask = ADXER_WEXTWAKE1;
272 mask = ADXER_MFP_GEN12;
278 local_irq_save(flags);
283 local_irq_restore(flags);
288 static inline void pxa3xx_init_pm(void) {}
289 #define pxa3xx_set_wake NULL
292 static void pxa_ack_ext_wakeup(struct irq_data *d)
294 PECR |= PECR_IS(d->irq - IRQ_WAKEUP0);
297 static void pxa_mask_ext_wakeup(struct irq_data *d)
300 PECR &= ~PECR_IE(d->irq - IRQ_WAKEUP0);
303 static void pxa_unmask_ext_wakeup(struct irq_data *d)
306 PECR |= PECR_IE(d->irq - IRQ_WAKEUP0);
309 static int pxa_set_ext_wakeup_type(struct irq_data *d, unsigned int flow_type)
311 if (flow_type & IRQ_TYPE_EDGE_RISING)
312 PWER |= 1 << (d->irq - IRQ_WAKEUP0);
314 if (flow_type & IRQ_TYPE_EDGE_FALLING)
315 PWER |= 1 << (d->irq - IRQ_WAKEUP0 + 2);
320 static struct irq_chip pxa_ext_wakeup_chip = {
322 .irq_ack = pxa_ack_ext_wakeup,
323 .irq_mask = pxa_mask_ext_wakeup,
324 .irq_unmask = pxa_unmask_ext_wakeup,
325 .irq_set_type = pxa_set_ext_wakeup_type,
328 static void __init pxa_init_ext_wakeup_irq(int (*fn)(struct irq_data *,
333 for (irq = IRQ_WAKEUP0; irq <= IRQ_WAKEUP1; irq++) {
334 irq_set_chip_and_handler(irq, &pxa_ext_wakeup_chip,
336 irq_clear_status_flags(irq, IRQ_NOREQUEST);
339 pxa_ext_wakeup_chip.irq_set_wake = fn;
342 static void __init __pxa3xx_init_irq(void)
344 /* enable CP6 access */
346 __asm__ __volatile__("mrc p15, 0, %0, c15, c1, 0\n": "=r"(value));
348 __asm__ __volatile__("mcr p15, 0, %0, c15, c1, 0\n": :"r"(value));
350 pxa_init_ext_wakeup_irq(pxa3xx_set_wake);
353 void __init pxa3xx_init_irq(void)
356 pxa_init_irq(56, pxa3xx_set_wake);
360 static int __init __init
361 pxa3xx_dt_init_irq(struct device_node *node, struct device_node *parent)
364 pxa_dt_irq_init(pxa3xx_set_wake);
365 set_handle_irq(ichp_handle_irq);
369 IRQCHIP_DECLARE(pxa3xx_intc, "marvell,pxa-intc", pxa3xx_dt_init_irq);
370 #endif /* CONFIG_OF */
372 static struct map_desc pxa3xx_io_desc[] __initdata = {
374 .virtual = (unsigned long)SMEMC_VIRT,
375 .pfn = __phys_to_pfn(PXA3XX_SMEMC_BASE),
376 .length = SMEMC_SIZE,
379 .virtual = (unsigned long)NAND_VIRT,
380 .pfn = __phys_to_pfn(NAND_PHYS),
386 void __init pxa3xx_map_io(void)
389 iotable_init(ARRAY_AND_SIZE(pxa3xx_io_desc));
390 pxa3xx_get_clk_frequency_khz(1);
394 * device registration specific to PXA3xx.
397 void __init pxa3xx_set_i2c_power_info(struct i2c_pxa_platform_data *info)
399 pxa_register_device(&pxa3xx_device_i2c_power, info);
402 static struct pxa_gpio_platform_data pxa3xx_gpio_pdata = {
403 .irq_base = PXA_GPIO_TO_IRQ(0),
406 static struct platform_device *devices[] __initdata = {
410 &pxa_device_asoc_ssp1,
411 &pxa_device_asoc_ssp2,
412 &pxa_device_asoc_ssp3,
413 &pxa_device_asoc_ssp4,
414 &pxa_device_asoc_platform,
424 static const struct dma_slave_map pxa3xx_slave_map[] = {
425 /* PXA25x, PXA27x and PXA3xx common entries */
426 { "pxa2xx-ac97", "pcm_pcm_mic_mono", PDMA_FILTER_PARAM(LOWEST, 8) },
427 { "pxa2xx-ac97", "pcm_pcm_aux_mono_in", PDMA_FILTER_PARAM(LOWEST, 9) },
428 { "pxa2xx-ac97", "pcm_pcm_aux_mono_out",
429 PDMA_FILTER_PARAM(LOWEST, 10) },
430 { "pxa2xx-ac97", "pcm_pcm_stereo_in", PDMA_FILTER_PARAM(LOWEST, 11) },
431 { "pxa2xx-ac97", "pcm_pcm_stereo_out", PDMA_FILTER_PARAM(LOWEST, 12) },
432 { "pxa-ssp-dai.0", "rx", PDMA_FILTER_PARAM(LOWEST, 13) },
433 { "pxa-ssp-dai.0", "tx", PDMA_FILTER_PARAM(LOWEST, 14) },
434 { "pxa-ssp-dai.1", "rx", PDMA_FILTER_PARAM(LOWEST, 15) },
435 { "pxa-ssp-dai.1", "tx", PDMA_FILTER_PARAM(LOWEST, 16) },
436 { "pxa2xx-ir", "rx", PDMA_FILTER_PARAM(LOWEST, 17) },
437 { "pxa2xx-ir", "tx", PDMA_FILTER_PARAM(LOWEST, 18) },
438 { "pxa2xx-mci.0", "rx", PDMA_FILTER_PARAM(LOWEST, 21) },
439 { "pxa2xx-mci.0", "tx", PDMA_FILTER_PARAM(LOWEST, 22) },
440 { "pxa-ssp-dai.2", "rx", PDMA_FILTER_PARAM(LOWEST, 66) },
441 { "pxa-ssp-dai.2", "tx", PDMA_FILTER_PARAM(LOWEST, 67) },
443 /* PXA3xx specific map */
444 { "pxa-ssp-dai.3", "rx", PDMA_FILTER_PARAM(LOWEST, 2) },
445 { "pxa-ssp-dai.3", "tx", PDMA_FILTER_PARAM(LOWEST, 3) },
446 { "pxa2xx-mci.1", "rx", PDMA_FILTER_PARAM(LOWEST, 93) },
447 { "pxa2xx-mci.1", "tx", PDMA_FILTER_PARAM(LOWEST, 94) },
448 { "pxa3xx-nand", "data", PDMA_FILTER_PARAM(LOWEST, 97) },
449 { "pxa2xx-mci.2", "rx", PDMA_FILTER_PARAM(LOWEST, 100) },
450 { "pxa2xx-mci.2", "tx", PDMA_FILTER_PARAM(LOWEST, 101) },
453 static struct mmp_dma_platdata pxa3xx_dma_pdata = {
455 .nb_requestors = 100,
456 .slave_map = pxa3xx_slave_map,
457 .slave_map_cnt = ARRAY_SIZE(pxa3xx_slave_map),
460 static int __init pxa3xx_init(void)
464 if (cpu_is_pxa3xx()) {
469 * clear RDH bit every time after reset
471 * Note: the last 3 bits DxS are write-1-to-clear so carefully
472 * preserve them here in case they will be referenced later
474 ASCR &= ~(ASCR_RDH | ASCR_D1S | ASCR_D2S | ASCR_D3S);
477 * Disable DFI bus arbitration, to prevent a system bus lock if
478 * somebody disables the NAND clock (unused clock) while this
481 NDCR = (NDCR & ~NDCR_ND_ARB_EN) | NDCR_ND_ARB_CNTL;
485 enable_irq_wake(IRQ_WAKEUP0);
487 enable_irq_wake(IRQ_WAKEUP1);
489 register_syscore_ops(&pxa_irq_syscore_ops);
490 register_syscore_ops(&pxa3xx_mfp_syscore_ops);
492 if (of_have_populated_dt())
495 pxa2xx_set_dmac_info(&pxa3xx_dma_pdata);
496 ret = platform_add_devices(devices, ARRAY_SIZE(devices));
499 if (cpu_is_pxa300() || cpu_is_pxa310() || cpu_is_pxa320()) {
500 platform_device_add_data(&pxa3xx_device_gpio,
502 sizeof(pxa3xx_gpio_pdata));
503 ret = platform_device_register(&pxa3xx_device_gpio);
510 postcore_initcall(pxa3xx_init);