2 * linux/arch/arm/mach-pxa/pxa3xx.c
4 * code specific to pxa3xx aka Monahans
6 * Copyright (C) 2006 Marvell International Ltd.
8 * 2007-09-02: eric miao <eric.miao@marvell.com>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
15 #include <linux/module.h>
16 #include <linux/kernel.h>
17 #include <linux/init.h>
18 #include <linux/gpio-pxa.h>
20 #include <linux/platform_device.h>
21 #include <linux/irq.h>
24 #include <linux/syscore_ops.h>
25 #include <linux/i2c/pxa-i2c.h>
27 #include <asm/mach/map.h>
28 #include <asm/suspend.h>
29 #include <mach/hardware.h>
30 #include <mach/pxa3xx-regs.h>
31 #include <mach/reset.h>
32 #include <linux/platform_data/usb-ohci-pxa27x.h>
35 #include <mach/smemc.h>
36 #include <mach/irqs.h>
41 #define PECR_IE(n) ((1 << ((n) * 2)) << 28)
42 #define PECR_IS(n) ((1 << ((n) * 2)) << 29)
44 extern void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int));
47 #define ISRAM_START 0x5c000000
48 #define ISRAM_SIZE SZ_256K
50 static void __iomem *sram;
51 static unsigned long wakeup_src;
54 * Enter a standby mode (S0D1C2 or S0D2C2). Upon wakeup, the dynamic
55 * memory controller has to be reinitialised, so we place some code
56 * in the SRAM to perform this function.
58 * We disable FIQs across the standby - otherwise, we might receive a
59 * FIQ while the SDRAM is unavailable.
61 static void pxa3xx_cpu_standby(unsigned int pwrmode)
63 extern const char pm_enter_standby_start[], pm_enter_standby_end[];
64 void (*fn)(unsigned int) = (void __force *)(sram + 0x8000);
66 memcpy_toio(sram + 0x8000, pm_enter_standby_start,
67 pm_enter_standby_end - pm_enter_standby_start);
85 * NOTE: currently, the OBM (OEM Boot Module) binary comes along with
86 * PXA3xx development kits assumes that the resuming process continues
87 * with the address stored within the first 4 bytes of SDRAM. The PSPR
88 * register is used privately by BootROM and OBM, and _must_ be set to
89 * 0x5c014000 for the moment.
91 static void pxa3xx_cpu_pm_suspend(void)
93 volatile unsigned long *p = (volatile void *)0xc0000000;
94 unsigned long saved_data = *p;
98 asm volatile("mra %Q0, %R0, acc0" : "=r" (acc0));
101 extern int pxa3xx_finish_suspend(unsigned long);
103 /* resuming from D2 requires the HSIO2/BOOT/TPM clocks enabled */
104 CKENA |= (1 << CKEN_BOOT) | (1 << CKEN_TPM);
105 CKENB |= 1 << (CKEN_HSIO2 & 0x1f);
107 /* clear and setup wakeup source */
113 PCFR |= (1u << 13); /* L1_DIS */
114 PCFR &= ~((1u << 12) | (1u << 1)); /* L0_EN | SL_ROD */
118 /* overwrite with the resume address */
119 *p = virt_to_phys(cpu_resume);
121 cpu_suspend(0, pxa3xx_finish_suspend);
127 #ifndef CONFIG_IWMMXT
128 asm volatile("mar acc0, %Q0, %R0" : "=r" (acc0));
132 static void pxa3xx_cpu_pm_enter(suspend_state_t state)
135 * Don't sleep if no wakeup sources are defined
137 if (wakeup_src == 0) {
138 printk(KERN_ERR "Not suspending: no wakeup sources\n");
143 case PM_SUSPEND_STANDBY:
144 pxa3xx_cpu_standby(PXA3xx_PM_S0D2C2);
148 pxa3xx_cpu_pm_suspend();
153 static int pxa3xx_cpu_pm_valid(suspend_state_t state)
155 return state == PM_SUSPEND_MEM || state == PM_SUSPEND_STANDBY;
158 static struct pxa_cpu_pm_fns pxa3xx_cpu_pm_fns = {
159 .valid = pxa3xx_cpu_pm_valid,
160 .enter = pxa3xx_cpu_pm_enter,
163 static void __init pxa3xx_init_pm(void)
165 sram = ioremap(ISRAM_START, ISRAM_SIZE);
167 printk(KERN_ERR "Unable to map ISRAM: disabling standby/suspend\n");
172 * Since we copy wakeup code into the SRAM, we need to ensure
173 * that it is preserved over the low power modes. Note: bit 8
174 * is undocumented in the developer manual, but must be set.
176 AD1R |= ADXR_L2 | ADXR_R0;
177 AD2R |= ADXR_L2 | ADXR_R0;
178 AD3R |= ADXR_L2 | ADXR_R0;
181 * Clear the resume enable registers.
188 pxa_cpu_pm_fns = &pxa3xx_cpu_pm_fns;
191 static int pxa3xx_set_wake(struct irq_data *d, unsigned int on)
193 unsigned long flags, mask = 0;
197 mask = ADXER_MFP_WSSP3;
210 mask = ADXER_MFP_WAC97;
216 mask = ADXER_MFP_WSSP2;
219 mask = ADXER_MFP_WI2C;
222 mask = ADXER_MFP_WUART3;
225 mask = ADXER_MFP_WUART2;
228 mask = ADXER_MFP_WUART1;
231 mask = ADXER_MFP_WMMC1;
234 mask = ADXER_MFP_WSSP1;
240 mask = ADXER_MFP_WSSP4;
249 mask = ADXER_MFP_WMMC2;
252 mask = ADXER_MFP_WFLASH;
258 mask = ADXER_WEXTWAKE0;
261 mask = ADXER_WEXTWAKE1;
264 mask = ADXER_MFP_GEN12;
270 local_irq_save(flags);
275 local_irq_restore(flags);
280 static inline void pxa3xx_init_pm(void) {}
281 #define pxa3xx_set_wake NULL
284 static void pxa_ack_ext_wakeup(struct irq_data *d)
286 PECR |= PECR_IS(d->irq - IRQ_WAKEUP0);
289 static void pxa_mask_ext_wakeup(struct irq_data *d)
292 PECR &= ~PECR_IE(d->irq - IRQ_WAKEUP0);
295 static void pxa_unmask_ext_wakeup(struct irq_data *d)
298 PECR |= PECR_IE(d->irq - IRQ_WAKEUP0);
301 static int pxa_set_ext_wakeup_type(struct irq_data *d, unsigned int flow_type)
303 if (flow_type & IRQ_TYPE_EDGE_RISING)
304 PWER |= 1 << (d->irq - IRQ_WAKEUP0);
306 if (flow_type & IRQ_TYPE_EDGE_FALLING)
307 PWER |= 1 << (d->irq - IRQ_WAKEUP0 + 2);
312 static struct irq_chip pxa_ext_wakeup_chip = {
314 .irq_ack = pxa_ack_ext_wakeup,
315 .irq_mask = pxa_mask_ext_wakeup,
316 .irq_unmask = pxa_unmask_ext_wakeup,
317 .irq_set_type = pxa_set_ext_wakeup_type,
320 static void __init pxa_init_ext_wakeup_irq(int (*fn)(struct irq_data *,
325 for (irq = IRQ_WAKEUP0; irq <= IRQ_WAKEUP1; irq++) {
326 irq_set_chip_and_handler(irq, &pxa_ext_wakeup_chip,
328 irq_clear_status_flags(irq, IRQ_NOREQUEST);
331 pxa_ext_wakeup_chip.irq_set_wake = fn;
334 static void __init __pxa3xx_init_irq(void)
336 /* enable CP6 access */
338 __asm__ __volatile__("mrc p15, 0, %0, c15, c1, 0\n": "=r"(value));
340 __asm__ __volatile__("mcr p15, 0, %0, c15, c1, 0\n": :"r"(value));
342 pxa_init_ext_wakeup_irq(pxa3xx_set_wake);
345 void __init pxa3xx_init_irq(void)
348 pxa_init_irq(56, pxa3xx_set_wake);
352 void __init pxa3xx_dt_init_irq(void)
355 pxa_dt_irq_init(pxa3xx_set_wake);
357 #endif /* CONFIG_OF */
359 static struct map_desc pxa3xx_io_desc[] __initdata = {
361 .virtual = (unsigned long)SMEMC_VIRT,
362 .pfn = __phys_to_pfn(PXA3XX_SMEMC_BASE),
363 .length = SMEMC_SIZE,
368 void __init pxa3xx_map_io(void)
371 iotable_init(ARRAY_AND_SIZE(pxa3xx_io_desc));
372 pxa3xx_get_clk_frequency_khz(1);
376 * device registration specific to PXA3xx.
379 void __init pxa3xx_set_i2c_power_info(struct i2c_pxa_platform_data *info)
381 pxa_register_device(&pxa3xx_device_i2c_power, info);
384 static struct pxa_gpio_platform_data pxa3xx_gpio_pdata = {
385 .irq_base = PXA_GPIO_TO_IRQ(0),
388 static struct platform_device *devices[] __initdata = {
392 &pxa_device_asoc_ssp1,
393 &pxa_device_asoc_ssp2,
394 &pxa_device_asoc_ssp3,
395 &pxa_device_asoc_ssp4,
396 &pxa_device_asoc_platform,
407 static int __init pxa3xx_init(void)
411 if (cpu_is_pxa3xx()) {
416 * clear RDH bit every time after reset
418 * Note: the last 3 bits DxS are write-1-to-clear so carefully
419 * preserve them here in case they will be referenced later
421 ASCR &= ~(ASCR_RDH | ASCR_D1S | ASCR_D2S | ASCR_D3S);
423 if ((ret = pxa_init_dma(IRQ_DMA, 32)))
428 register_syscore_ops(&pxa_irq_syscore_ops);
429 register_syscore_ops(&pxa3xx_mfp_syscore_ops);
431 if (of_have_populated_dt())
434 pxa2xx_set_dmac_info(32);
435 ret = platform_add_devices(devices, ARRAY_SIZE(devices));
438 if (cpu_is_pxa300() || cpu_is_pxa310() || cpu_is_pxa320()) {
439 platform_device_add_data(&pxa3xx_device_gpio,
441 sizeof(pxa3xx_gpio_pdata));
442 ret = platform_device_register(&pxa3xx_device_gpio);
449 postcore_initcall(pxa3xx_init);