]>
Commit | Line | Data |
---|---|---|
3ebabaa5 DL |
1 | /* |
2 | * Copyright (c) 2012 Linaro : Daniel Lezcano <[email protected]> (IBM) | |
3 | * | |
4 | * Based on the work of Rickard Andersson <[email protected]> | |
5 | * and Jonas Aaberg <[email protected]>. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | */ | |
11 | ||
12 | #include <linux/module.h> | |
13 | #include <linux/cpuidle.h> | |
14 | #include <linux/clockchips.h> | |
15 | #include <linux/spinlock.h> | |
16 | #include <linux/atomic.h> | |
17 | #include <linux/smp.h> | |
18 | #include <linux/mfd/dbx500-prcmu.h> | |
19 | ||
20 | #include <asm/cpuidle.h> | |
21 | #include <asm/proc-fns.h> | |
22 | ||
23 | static atomic_t master = ATOMIC_INIT(0); | |
24 | static DEFINE_SPINLOCK(master_lock); | |
25 | static DEFINE_PER_CPU(struct cpuidle_device, ux500_cpuidle_device); | |
26 | ||
27 | static inline int ux500_enter_idle(struct cpuidle_device *dev, | |
28 | struct cpuidle_driver *drv, int index) | |
29 | { | |
30 | int this_cpu = smp_processor_id(); | |
31 | bool recouple = false; | |
32 | ||
33 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &this_cpu); | |
34 | ||
35 | if (atomic_inc_return(&master) == num_online_cpus()) { | |
36 | ||
37 | /* With this lock, we prevent the other cpu to exit and enter | |
38 | * this function again and become the master */ | |
39 | if (!spin_trylock(&master_lock)) | |
40 | goto wfi; | |
41 | ||
42 | /* decouple the gic from the A9 cores */ | |
43 | if (prcmu_gic_decouple()) | |
44 | goto out; | |
45 | ||
46 | /* If an error occur, we will have to recouple the gic | |
47 | * manually */ | |
48 | recouple = true; | |
49 | ||
50 | /* At this state, as the gic is decoupled, if the other | |
51 | * cpu is in WFI, we have the guarantee it won't be wake | |
52 | * up, so we can safely go to retention */ | |
53 | if (!prcmu_is_cpu_in_wfi(this_cpu ? 0 : 1)) | |
54 | goto out; | |
55 | ||
56 | /* The prcmu will be in charge of watching the interrupts | |
57 | * and wake up the cpus */ | |
58 | if (prcmu_copy_gic_settings()) | |
59 | goto out; | |
60 | ||
61 | /* Check in the meantime an interrupt did | |
62 | * not occur on the gic ... */ | |
63 | if (prcmu_gic_pending_irq()) | |
64 | goto out; | |
65 | ||
66 | /* ... and the prcmu */ | |
67 | if (prcmu_pending_irq()) | |
68 | goto out; | |
69 | ||
70 | /* Go to the retention state, the prcmu will wait for the | |
71 | * cpu to go WFI and this is what happens after exiting this | |
72 | * 'master' critical section */ | |
73 | if (prcmu_set_power_state(PRCMU_AP_IDLE, true, true)) | |
74 | goto out; | |
75 | ||
76 | /* When we switch to retention, the prcmu is in charge | |
77 | * of recoupling the gic automatically */ | |
78 | recouple = false; | |
79 | ||
80 | spin_unlock(&master_lock); | |
81 | } | |
82 | wfi: | |
83 | cpu_do_idle(); | |
84 | out: | |
85 | atomic_dec(&master); | |
86 | ||
87 | if (recouple) { | |
88 | prcmu_gic_recouple(); | |
89 | spin_unlock(&master_lock); | |
90 | } | |
91 | ||
92 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &this_cpu); | |
93 | ||
94 | return index; | |
95 | } | |
96 | ||
97 | static struct cpuidle_driver ux500_idle_driver = { | |
98 | .name = "ux500_idle", | |
99 | .owner = THIS_MODULE, | |
100 | .en_core_tk_irqen = 1, | |
101 | .states = { | |
102 | ARM_CPUIDLE_WFI_STATE, | |
103 | { | |
104 | .enter = ux500_enter_idle, | |
105 | .exit_latency = 70, | |
106 | .target_residency = 260, | |
107 | .flags = CPUIDLE_FLAG_TIME_VALID, | |
108 | .name = "ApIdle", | |
109 | .desc = "ARM Retention", | |
110 | }, | |
111 | }, | |
112 | .safe_state_index = 0, | |
113 | .state_count = 2, | |
114 | }; | |
115 | ||
116 | /* | |
117 | * For each cpu, setup the broadcast timer because we will | |
118 | * need to migrate the timers for the states >= ApIdle. | |
119 | */ | |
120 | static void ux500_setup_broadcast_timer(void *arg) | |
121 | { | |
122 | int cpu = smp_processor_id(); | |
123 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu); | |
124 | } | |
125 | ||
126 | int __init ux500_idle_init(void) | |
127 | { | |
128 | int ret, cpu; | |
129 | struct cpuidle_device *device; | |
130 | ||
131 | /* Configure wake up reasons */ | |
132 | prcmu_enable_wakeups(PRCMU_WAKEUP(ARM) | PRCMU_WAKEUP(RTC) | | |
133 | PRCMU_WAKEUP(ABB)); | |
134 | ||
135 | /* | |
136 | * Configure the timer broadcast for each cpu, that must | |
137 | * be done from the cpu context, so we use a smp cross | |
138 | * call with 'on_each_cpu'. | |
139 | */ | |
140 | on_each_cpu(ux500_setup_broadcast_timer, NULL, 1); | |
141 | ||
142 | ret = cpuidle_register_driver(&ux500_idle_driver); | |
143 | if (ret) { | |
144 | printk(KERN_ERR "failed to register ux500 idle driver\n"); | |
145 | return ret; | |
146 | } | |
147 | ||
148 | for_each_online_cpu(cpu) { | |
149 | device = &per_cpu(ux500_cpuidle_device, cpu); | |
150 | device->cpu = cpu; | |
151 | ret = cpuidle_register_device(device); | |
152 | if (ret) { | |
153 | printk(KERN_ERR "Failed to register cpuidle " | |
154 | "device for cpu%d\n", cpu); | |
155 | goto out_unregister; | |
156 | } | |
157 | } | |
158 | out: | |
159 | return ret; | |
160 | ||
161 | out_unregister: | |
162 | for_each_online_cpu(cpu) { | |
163 | device = &per_cpu(ux500_cpuidle_device, cpu); | |
164 | cpuidle_unregister_device(device); | |
165 | } | |
166 | ||
167 | cpuidle_unregister_driver(&ux500_idle_driver); | |
168 | goto out; | |
169 | } | |
170 | ||
171 | device_initcall(ux500_idle_init); |