mm: thp: set the accessed flag for old pages on access fault
[pandora-kernel.git] / drivers / mfd / db8500-prcmu.c
1 /*
2  * Copyright (C) STMicroelectronics 2009
3  * Copyright (C) ST-Ericsson SA 2010
4  *
5  * License Terms: GNU General Public License v2
6  * Author: Kumar Sanghvi <kumar.sanghvi@stericsson.com>
7  * Author: Sundar Iyer <sundar.iyer@stericsson.com>
8  * Author: Mattias Nilsson <mattias.i.nilsson@stericsson.com>
9  *
10  * U8500 PRCM Unit interface driver
11  *
12  */
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/delay.h>
16 #include <linux/errno.h>
17 #include <linux/err.h>
18 #include <linux/spinlock.h>
19 #include <linux/io.h>
20 #include <linux/slab.h>
21 #include <linux/mutex.h>
22 #include <linux/completion.h>
23 #include <linux/irq.h>
24 #include <linux/jiffies.h>
25 #include <linux/bitops.h>
26 #include <linux/fs.h>
27 #include <linux/platform_device.h>
28 #include <linux/uaccess.h>
29 #include <linux/mfd/core.h>
30 #include <linux/mfd/dbx500-prcmu.h>
31 #include <linux/regulator/db8500-prcmu.h>
32 #include <linux/regulator/machine.h>
33 #include <mach/hardware.h>
34 #include <mach/irqs.h>
35 #include <mach/db8500-regs.h>
36 #include <mach/id.h>
37 #include "dbx500-prcmu-regs.h"
38
39 /* Offset for the firmware version within the TCPM */
40 #define PRCMU_FW_VERSION_OFFSET 0xA4
41
42 /* PRCMU project numbers, defined by PRCMU FW */
43 #define PRCMU_PROJECT_ID_8500V1_0 1
44 #define PRCMU_PROJECT_ID_8500V2_0 2
45 #define PRCMU_PROJECT_ID_8400V2_0 3
46
47 /* Index of different voltages to be used when accessing AVSData */
48 #define PRCM_AVS_BASE           0x2FC
49 #define PRCM_AVS_VBB_RET        (PRCM_AVS_BASE + 0x0)
50 #define PRCM_AVS_VBB_MAX_OPP    (PRCM_AVS_BASE + 0x1)
51 #define PRCM_AVS_VBB_100_OPP    (PRCM_AVS_BASE + 0x2)
52 #define PRCM_AVS_VBB_50_OPP     (PRCM_AVS_BASE + 0x3)
53 #define PRCM_AVS_VARM_MAX_OPP   (PRCM_AVS_BASE + 0x4)
54 #define PRCM_AVS_VARM_100_OPP   (PRCM_AVS_BASE + 0x5)
55 #define PRCM_AVS_VARM_50_OPP    (PRCM_AVS_BASE + 0x6)
56 #define PRCM_AVS_VARM_RET       (PRCM_AVS_BASE + 0x7)
57 #define PRCM_AVS_VAPE_100_OPP   (PRCM_AVS_BASE + 0x8)
58 #define PRCM_AVS_VAPE_50_OPP    (PRCM_AVS_BASE + 0x9)
59 #define PRCM_AVS_VMOD_100_OPP   (PRCM_AVS_BASE + 0xA)
60 #define PRCM_AVS_VMOD_50_OPP    (PRCM_AVS_BASE + 0xB)
61 #define PRCM_AVS_VSAFE          (PRCM_AVS_BASE + 0xC)
62
63 #define PRCM_AVS_VOLTAGE                0
64 #define PRCM_AVS_VOLTAGE_MASK           0x3f
65 #define PRCM_AVS_ISSLOWSTARTUP          6
66 #define PRCM_AVS_ISSLOWSTARTUP_MASK     (1 << PRCM_AVS_ISSLOWSTARTUP)
67 #define PRCM_AVS_ISMODEENABLE           7
68 #define PRCM_AVS_ISMODEENABLE_MASK      (1 << PRCM_AVS_ISMODEENABLE)
69
70 #define PRCM_BOOT_STATUS        0xFFF
71 #define PRCM_ROMCODE_A2P        0xFFE
72 #define PRCM_ROMCODE_P2A        0xFFD
73 #define PRCM_XP70_CUR_PWR_STATE 0xFFC      /* 4 BYTES */
74
75 #define PRCM_SW_RST_REASON 0xFF8 /* 2 bytes */
76
77 #define _PRCM_MBOX_HEADER               0xFE8 /* 16 bytes */
78 #define PRCM_MBOX_HEADER_REQ_MB0        (_PRCM_MBOX_HEADER + 0x0)
79 #define PRCM_MBOX_HEADER_REQ_MB1        (_PRCM_MBOX_HEADER + 0x1)
80 #define PRCM_MBOX_HEADER_REQ_MB2        (_PRCM_MBOX_HEADER + 0x2)
81 #define PRCM_MBOX_HEADER_REQ_MB3        (_PRCM_MBOX_HEADER + 0x3)
82 #define PRCM_MBOX_HEADER_REQ_MB4        (_PRCM_MBOX_HEADER + 0x4)
83 #define PRCM_MBOX_HEADER_REQ_MB5        (_PRCM_MBOX_HEADER + 0x5)
84 #define PRCM_MBOX_HEADER_ACK_MB0        (_PRCM_MBOX_HEADER + 0x8)
85
86 /* Req Mailboxes */
87 #define PRCM_REQ_MB0 0xFDC /* 12 bytes  */
88 #define PRCM_REQ_MB1 0xFD0 /* 12 bytes  */
89 #define PRCM_REQ_MB2 0xFC0 /* 16 bytes  */
90 #define PRCM_REQ_MB3 0xE4C /* 372 bytes  */
91 #define PRCM_REQ_MB4 0xE48 /* 4 bytes  */
92 #define PRCM_REQ_MB5 0xE44 /* 4 bytes  */
93
94 /* Ack Mailboxes */
95 #define PRCM_ACK_MB0 0xE08 /* 52 bytes  */
96 #define PRCM_ACK_MB1 0xE04 /* 4 bytes */
97 #define PRCM_ACK_MB2 0xE00 /* 4 bytes */
98 #define PRCM_ACK_MB3 0xDFC /* 4 bytes */
99 #define PRCM_ACK_MB4 0xDF8 /* 4 bytes */
100 #define PRCM_ACK_MB5 0xDF4 /* 4 bytes */
101
102 /* Mailbox 0 headers */
103 #define MB0H_POWER_STATE_TRANS          0
104 #define MB0H_CONFIG_WAKEUPS_EXE         1
105 #define MB0H_READ_WAKEUP_ACK            3
106 #define MB0H_CONFIG_WAKEUPS_SLEEP       4
107
108 #define MB0H_WAKEUP_EXE 2
109 #define MB0H_WAKEUP_SLEEP 5
110
111 /* Mailbox 0 REQs */
112 #define PRCM_REQ_MB0_AP_POWER_STATE     (PRCM_REQ_MB0 + 0x0)
113 #define PRCM_REQ_MB0_AP_PLL_STATE       (PRCM_REQ_MB0 + 0x1)
114 #define PRCM_REQ_MB0_ULP_CLOCK_STATE    (PRCM_REQ_MB0 + 0x2)
115 #define PRCM_REQ_MB0_DO_NOT_WFI         (PRCM_REQ_MB0 + 0x3)
116 #define PRCM_REQ_MB0_WAKEUP_8500        (PRCM_REQ_MB0 + 0x4)
117 #define PRCM_REQ_MB0_WAKEUP_4500        (PRCM_REQ_MB0 + 0x8)
118
119 /* Mailbox 0 ACKs */
120 #define PRCM_ACK_MB0_AP_PWRSTTR_STATUS  (PRCM_ACK_MB0 + 0x0)
121 #define PRCM_ACK_MB0_READ_POINTER       (PRCM_ACK_MB0 + 0x1)
122 #define PRCM_ACK_MB0_WAKEUP_0_8500      (PRCM_ACK_MB0 + 0x4)
123 #define PRCM_ACK_MB0_WAKEUP_0_4500      (PRCM_ACK_MB0 + 0x8)
124 #define PRCM_ACK_MB0_WAKEUP_1_8500      (PRCM_ACK_MB0 + 0x1C)
125 #define PRCM_ACK_MB0_WAKEUP_1_4500      (PRCM_ACK_MB0 + 0x20)
126 #define PRCM_ACK_MB0_EVENT_4500_NUMBERS 20
127
128 /* Mailbox 1 headers */
129 #define MB1H_ARM_APE_OPP 0x0
130 #define MB1H_RESET_MODEM 0x2
131 #define MB1H_REQUEST_APE_OPP_100_VOLT 0x3
132 #define MB1H_RELEASE_APE_OPP_100_VOLT 0x4
133 #define MB1H_RELEASE_USB_WAKEUP 0x5
134 #define MB1H_PLL_ON_OFF 0x6
135
136 /* Mailbox 1 Requests */
137 #define PRCM_REQ_MB1_ARM_OPP                    (PRCM_REQ_MB1 + 0x0)
138 #define PRCM_REQ_MB1_APE_OPP                    (PRCM_REQ_MB1 + 0x1)
139 #define PRCM_REQ_MB1_PLL_ON_OFF                 (PRCM_REQ_MB1 + 0x4)
140 #define PLL_SOC1_OFF    0x4
141 #define PLL_SOC1_ON     0x8
142
143 /* Mailbox 1 ACKs */
144 #define PRCM_ACK_MB1_CURRENT_ARM_OPP    (PRCM_ACK_MB1 + 0x0)
145 #define PRCM_ACK_MB1_CURRENT_APE_OPP    (PRCM_ACK_MB1 + 0x1)
146 #define PRCM_ACK_MB1_APE_VOLTAGE_STATUS (PRCM_ACK_MB1 + 0x2)
147 #define PRCM_ACK_MB1_DVFS_STATUS        (PRCM_ACK_MB1 + 0x3)
148
149 /* Mailbox 2 headers */
150 #define MB2H_DPS        0x0
151 #define MB2H_AUTO_PWR   0x1
152
153 /* Mailbox 2 REQs */
154 #define PRCM_REQ_MB2_SVA_MMDSP          (PRCM_REQ_MB2 + 0x0)
155 #define PRCM_REQ_MB2_SVA_PIPE           (PRCM_REQ_MB2 + 0x1)
156 #define PRCM_REQ_MB2_SIA_MMDSP          (PRCM_REQ_MB2 + 0x2)
157 #define PRCM_REQ_MB2_SIA_PIPE           (PRCM_REQ_MB2 + 0x3)
158 #define PRCM_REQ_MB2_SGA                (PRCM_REQ_MB2 + 0x4)
159 #define PRCM_REQ_MB2_B2R2_MCDE          (PRCM_REQ_MB2 + 0x5)
160 #define PRCM_REQ_MB2_ESRAM12            (PRCM_REQ_MB2 + 0x6)
161 #define PRCM_REQ_MB2_ESRAM34            (PRCM_REQ_MB2 + 0x7)
162 #define PRCM_REQ_MB2_AUTO_PM_SLEEP      (PRCM_REQ_MB2 + 0x8)
163 #define PRCM_REQ_MB2_AUTO_PM_IDLE       (PRCM_REQ_MB2 + 0xC)
164
165 /* Mailbox 2 ACKs */
166 #define PRCM_ACK_MB2_DPS_STATUS (PRCM_ACK_MB2 + 0x0)
167 #define HWACC_PWR_ST_OK 0xFE
168
169 /* Mailbox 3 headers */
170 #define MB3H_ANC        0x0
171 #define MB3H_SIDETONE   0x1
172 #define MB3H_SYSCLK     0xE
173
174 /* Mailbox 3 Requests */
175 #define PRCM_REQ_MB3_ANC_FIR_COEFF      (PRCM_REQ_MB3 + 0x0)
176 #define PRCM_REQ_MB3_ANC_IIR_COEFF      (PRCM_REQ_MB3 + 0x20)
177 #define PRCM_REQ_MB3_ANC_SHIFTER        (PRCM_REQ_MB3 + 0x60)
178 #define PRCM_REQ_MB3_ANC_WARP           (PRCM_REQ_MB3 + 0x64)
179 #define PRCM_REQ_MB3_SIDETONE_FIR_GAIN  (PRCM_REQ_MB3 + 0x68)
180 #define PRCM_REQ_MB3_SIDETONE_FIR_COEFF (PRCM_REQ_MB3 + 0x6C)
181 #define PRCM_REQ_MB3_SYSCLK_MGT         (PRCM_REQ_MB3 + 0x16C)
182
183 /* Mailbox 4 headers */
184 #define MB4H_DDR_INIT   0x0
185 #define MB4H_MEM_ST     0x1
186 #define MB4H_HOTDOG     0x12
187 #define MB4H_HOTMON     0x13
188 #define MB4H_HOT_PERIOD 0x14
189 #define MB4H_A9WDOG_CONF 0x16
190 #define MB4H_A9WDOG_EN   0x17
191 #define MB4H_A9WDOG_DIS  0x18
192 #define MB4H_A9WDOG_LOAD 0x19
193 #define MB4H_A9WDOG_KICK 0x20
194
195 /* Mailbox 4 Requests */
196 #define PRCM_REQ_MB4_DDR_ST_AP_SLEEP_IDLE       (PRCM_REQ_MB4 + 0x0)
197 #define PRCM_REQ_MB4_DDR_ST_AP_DEEP_IDLE        (PRCM_REQ_MB4 + 0x1)
198 #define PRCM_REQ_MB4_ESRAM0_ST                  (PRCM_REQ_MB4 + 0x3)
199 #define PRCM_REQ_MB4_HOTDOG_THRESHOLD           (PRCM_REQ_MB4 + 0x0)
200 #define PRCM_REQ_MB4_HOTMON_LOW                 (PRCM_REQ_MB4 + 0x0)
201 #define PRCM_REQ_MB4_HOTMON_HIGH                (PRCM_REQ_MB4 + 0x1)
202 #define PRCM_REQ_MB4_HOTMON_CONFIG              (PRCM_REQ_MB4 + 0x2)
203 #define PRCM_REQ_MB4_HOT_PERIOD                 (PRCM_REQ_MB4 + 0x0)
204 #define HOTMON_CONFIG_LOW                       BIT(0)
205 #define HOTMON_CONFIG_HIGH                      BIT(1)
206 #define PRCM_REQ_MB4_A9WDOG_0                   (PRCM_REQ_MB4 + 0x0)
207 #define PRCM_REQ_MB4_A9WDOG_1                   (PRCM_REQ_MB4 + 0x1)
208 #define PRCM_REQ_MB4_A9WDOG_2                   (PRCM_REQ_MB4 + 0x2)
209 #define PRCM_REQ_MB4_A9WDOG_3                   (PRCM_REQ_MB4 + 0x3)
210 #define A9WDOG_AUTO_OFF_EN                      BIT(7)
211 #define A9WDOG_AUTO_OFF_DIS                     0
212 #define A9WDOG_ID_MASK                          0xf
213
214 /* Mailbox 5 Requests */
215 #define PRCM_REQ_MB5_I2C_SLAVE_OP       (PRCM_REQ_MB5 + 0x0)
216 #define PRCM_REQ_MB5_I2C_HW_BITS        (PRCM_REQ_MB5 + 0x1)
217 #define PRCM_REQ_MB5_I2C_REG            (PRCM_REQ_MB5 + 0x2)
218 #define PRCM_REQ_MB5_I2C_VAL            (PRCM_REQ_MB5 + 0x3)
219 #define PRCMU_I2C_WRITE(slave) \
220         (((slave) << 1) | (cpu_is_u8500v2() ? BIT(6) : 0))
221 #define PRCMU_I2C_READ(slave) \
222         (((slave) << 1) | BIT(0) | (cpu_is_u8500v2() ? BIT(6) : 0))
223 #define PRCMU_I2C_STOP_EN               BIT(3)
224
225 /* Mailbox 5 ACKs */
226 #define PRCM_ACK_MB5_I2C_STATUS (PRCM_ACK_MB5 + 0x1)
227 #define PRCM_ACK_MB5_I2C_VAL    (PRCM_ACK_MB5 + 0x3)
228 #define I2C_WR_OK 0x1
229 #define I2C_RD_OK 0x2
230
231 #define NUM_MB 8
232 #define MBOX_BIT BIT
233 #define ALL_MBOX_BITS (MBOX_BIT(NUM_MB) - 1)
234
235 /*
236  * Wakeups/IRQs
237  */
238
239 #define WAKEUP_BIT_RTC BIT(0)
240 #define WAKEUP_BIT_RTT0 BIT(1)
241 #define WAKEUP_BIT_RTT1 BIT(2)
242 #define WAKEUP_BIT_HSI0 BIT(3)
243 #define WAKEUP_BIT_HSI1 BIT(4)
244 #define WAKEUP_BIT_CA_WAKE BIT(5)
245 #define WAKEUP_BIT_USB BIT(6)
246 #define WAKEUP_BIT_ABB BIT(7)
247 #define WAKEUP_BIT_ABB_FIFO BIT(8)
248 #define WAKEUP_BIT_SYSCLK_OK BIT(9)
249 #define WAKEUP_BIT_CA_SLEEP BIT(10)
250 #define WAKEUP_BIT_AC_WAKE_ACK BIT(11)
251 #define WAKEUP_BIT_SIDE_TONE_OK BIT(12)
252 #define WAKEUP_BIT_ANC_OK BIT(13)
253 #define WAKEUP_BIT_SW_ERROR BIT(14)
254 #define WAKEUP_BIT_AC_SLEEP_ACK BIT(15)
255 #define WAKEUP_BIT_ARM BIT(17)
256 #define WAKEUP_BIT_HOTMON_LOW BIT(18)
257 #define WAKEUP_BIT_HOTMON_HIGH BIT(19)
258 #define WAKEUP_BIT_MODEM_SW_RESET_REQ BIT(20)
259 #define WAKEUP_BIT_GPIO0 BIT(23)
260 #define WAKEUP_BIT_GPIO1 BIT(24)
261 #define WAKEUP_BIT_GPIO2 BIT(25)
262 #define WAKEUP_BIT_GPIO3 BIT(26)
263 #define WAKEUP_BIT_GPIO4 BIT(27)
264 #define WAKEUP_BIT_GPIO5 BIT(28)
265 #define WAKEUP_BIT_GPIO6 BIT(29)
266 #define WAKEUP_BIT_GPIO7 BIT(30)
267 #define WAKEUP_BIT_GPIO8 BIT(31)
268
269 /*
270  * This vector maps irq numbers to the bits in the bit field used in
271  * communication with the PRCMU firmware.
272  *
273  * The reason for having this is to keep the irq numbers contiguous even though
274  * the bits in the bit field are not. (The bits also have a tendency to move
275  * around, to further complicate matters.)
276  */
277 #define IRQ_INDEX(_name) ((IRQ_PRCMU_##_name) - IRQ_PRCMU_BASE)
278 #define IRQ_ENTRY(_name)[IRQ_INDEX(_name)] = (WAKEUP_BIT_##_name)
279 static u32 prcmu_irq_bit[NUM_PRCMU_WAKEUPS] = {
280         IRQ_ENTRY(RTC),
281         IRQ_ENTRY(RTT0),
282         IRQ_ENTRY(RTT1),
283         IRQ_ENTRY(HSI0),
284         IRQ_ENTRY(HSI1),
285         IRQ_ENTRY(CA_WAKE),
286         IRQ_ENTRY(USB),
287         IRQ_ENTRY(ABB),
288         IRQ_ENTRY(ABB_FIFO),
289         IRQ_ENTRY(CA_SLEEP),
290         IRQ_ENTRY(ARM),
291         IRQ_ENTRY(HOTMON_LOW),
292         IRQ_ENTRY(HOTMON_HIGH),
293         IRQ_ENTRY(MODEM_SW_RESET_REQ),
294         IRQ_ENTRY(GPIO0),
295         IRQ_ENTRY(GPIO1),
296         IRQ_ENTRY(GPIO2),
297         IRQ_ENTRY(GPIO3),
298         IRQ_ENTRY(GPIO4),
299         IRQ_ENTRY(GPIO5),
300         IRQ_ENTRY(GPIO6),
301         IRQ_ENTRY(GPIO7),
302         IRQ_ENTRY(GPIO8)
303 };
304
305 #define VALID_WAKEUPS (BIT(NUM_PRCMU_WAKEUP_INDICES) - 1)
306 #define WAKEUP_ENTRY(_name)[PRCMU_WAKEUP_INDEX_##_name] = (WAKEUP_BIT_##_name)
307 static u32 prcmu_wakeup_bit[NUM_PRCMU_WAKEUP_INDICES] = {
308         WAKEUP_ENTRY(RTC),
309         WAKEUP_ENTRY(RTT0),
310         WAKEUP_ENTRY(RTT1),
311         WAKEUP_ENTRY(HSI0),
312         WAKEUP_ENTRY(HSI1),
313         WAKEUP_ENTRY(USB),
314         WAKEUP_ENTRY(ABB),
315         WAKEUP_ENTRY(ABB_FIFO),
316         WAKEUP_ENTRY(ARM)
317 };
318
319 /*
320  * mb0_transfer - state needed for mailbox 0 communication.
321  * @lock:               The transaction lock.
322  * @dbb_events_lock:    A lock used to handle concurrent access to (parts of)
323  *                      the request data.
324  * @mask_work:          Work structure used for (un)masking wakeup interrupts.
325  * @req:                Request data that need to persist between requests.
326  */
327 static struct {
328         spinlock_t lock;
329         spinlock_t dbb_irqs_lock;
330         struct work_struct mask_work;
331         struct mutex ac_wake_lock;
332         struct completion ac_wake_work;
333         struct {
334                 u32 dbb_irqs;
335                 u32 dbb_wakeups;
336                 u32 abb_events;
337         } req;
338 } mb0_transfer;
339
340 /*
341  * mb1_transfer - state needed for mailbox 1 communication.
342  * @lock:       The transaction lock.
343  * @work:       The transaction completion structure.
344  * @ack:        Reply ("acknowledge") data.
345  */
346 static struct {
347         struct mutex lock;
348         struct completion work;
349         struct {
350                 u8 header;
351                 u8 arm_opp;
352                 u8 ape_opp;
353                 u8 ape_voltage_status;
354         } ack;
355 } mb1_transfer;
356
357 /*
358  * mb2_transfer - state needed for mailbox 2 communication.
359  * @lock:            The transaction lock.
360  * @work:            The transaction completion structure.
361  * @auto_pm_lock:    The autonomous power management configuration lock.
362  * @auto_pm_enabled: A flag indicating whether autonomous PM is enabled.
363  * @req:             Request data that need to persist between requests.
364  * @ack:             Reply ("acknowledge") data.
365  */
366 static struct {
367         struct mutex lock;
368         struct completion work;
369         spinlock_t auto_pm_lock;
370         bool auto_pm_enabled;
371         struct {
372                 u8 status;
373         } ack;
374 } mb2_transfer;
375
376 /*
377  * mb3_transfer - state needed for mailbox 3 communication.
378  * @lock:               The request lock.
379  * @sysclk_lock:        A lock used to handle concurrent sysclk requests.
380  * @sysclk_work:        Work structure used for sysclk requests.
381  */
382 static struct {
383         spinlock_t lock;
384         struct mutex sysclk_lock;
385         struct completion sysclk_work;
386 } mb3_transfer;
387
388 /*
389  * mb4_transfer - state needed for mailbox 4 communication.
390  * @lock:       The transaction lock.
391  * @work:       The transaction completion structure.
392  */
393 static struct {
394         struct mutex lock;
395         struct completion work;
396 } mb4_transfer;
397
398 /*
399  * mb5_transfer - state needed for mailbox 5 communication.
400  * @lock:       The transaction lock.
401  * @work:       The transaction completion structure.
402  * @ack:        Reply ("acknowledge") data.
403  */
404 static struct {
405         struct mutex lock;
406         struct completion work;
407         struct {
408                 u8 status;
409                 u8 value;
410         } ack;
411 } mb5_transfer;
412
413 static atomic_t ac_wake_req_state = ATOMIC_INIT(0);
414
415 /* Spinlocks */
416 static DEFINE_SPINLOCK(clkout_lock);
417 static DEFINE_SPINLOCK(gpiocr_lock);
418
419 /* Global var to runtime determine TCDM base for v2 or v1 */
420 static __iomem void *tcdm_base;
421
422 struct clk_mgt {
423         unsigned int offset;
424         u32 pllsw;
425 };
426
427 static DEFINE_SPINLOCK(clk_mgt_lock);
428
429 #define CLK_MGT_ENTRY(_name)[PRCMU_##_name] = { (PRCM_##_name##_MGT_OFF), 0 }
430 struct clk_mgt clk_mgt[PRCMU_NUM_REG_CLOCKS] = {
431         CLK_MGT_ENTRY(SGACLK),
432         CLK_MGT_ENTRY(UARTCLK),
433         CLK_MGT_ENTRY(MSP02CLK),
434         CLK_MGT_ENTRY(MSP1CLK),
435         CLK_MGT_ENTRY(I2CCLK),
436         CLK_MGT_ENTRY(SDMMCCLK),
437         CLK_MGT_ENTRY(SLIMCLK),
438         CLK_MGT_ENTRY(PER1CLK),
439         CLK_MGT_ENTRY(PER2CLK),
440         CLK_MGT_ENTRY(PER3CLK),
441         CLK_MGT_ENTRY(PER5CLK),
442         CLK_MGT_ENTRY(PER6CLK),
443         CLK_MGT_ENTRY(PER7CLK),
444         CLK_MGT_ENTRY(LCDCLK),
445         CLK_MGT_ENTRY(BMLCLK),
446         CLK_MGT_ENTRY(HSITXCLK),
447         CLK_MGT_ENTRY(HSIRXCLK),
448         CLK_MGT_ENTRY(HDMICLK),
449         CLK_MGT_ENTRY(APEATCLK),
450         CLK_MGT_ENTRY(APETRACECLK),
451         CLK_MGT_ENTRY(MCDECLK),
452         CLK_MGT_ENTRY(IPI2CCLK),
453         CLK_MGT_ENTRY(DSIALTCLK),
454         CLK_MGT_ENTRY(DMACLK),
455         CLK_MGT_ENTRY(B2R2CLK),
456         CLK_MGT_ENTRY(TVCLK),
457         CLK_MGT_ENTRY(SSPCLK),
458         CLK_MGT_ENTRY(RNGCLK),
459         CLK_MGT_ENTRY(UICCCLK),
460 };
461
462 static struct regulator *hwacc_regulator[NUM_HW_ACC];
463 static struct regulator *hwacc_ret_regulator[NUM_HW_ACC];
464
465 static bool hwacc_enabled[NUM_HW_ACC];
466 static bool hwacc_ret_enabled[NUM_HW_ACC];
467
468 static const char *hwacc_regulator_name[NUM_HW_ACC] = {
469         [HW_ACC_SVAMMDSP]       = "hwacc-sva-mmdsp",
470         [HW_ACC_SVAPIPE]        = "hwacc-sva-pipe",
471         [HW_ACC_SIAMMDSP]       = "hwacc-sia-mmdsp",
472         [HW_ACC_SIAPIPE]        = "hwacc-sia-pipe",
473         [HW_ACC_SGA]            = "hwacc-sga",
474         [HW_ACC_B2R2]           = "hwacc-b2r2",
475         [HW_ACC_MCDE]           = "hwacc-mcde",
476         [HW_ACC_ESRAM1]         = "hwacc-esram1",
477         [HW_ACC_ESRAM2]         = "hwacc-esram2",
478         [HW_ACC_ESRAM3]         = "hwacc-esram3",
479         [HW_ACC_ESRAM4]         = "hwacc-esram4",
480 };
481
482 static const char *hwacc_ret_regulator_name[NUM_HW_ACC] = {
483         [HW_ACC_SVAMMDSP]       = "hwacc-sva-mmdsp-ret",
484         [HW_ACC_SIAMMDSP]       = "hwacc-sia-mmdsp-ret",
485         [HW_ACC_ESRAM1]         = "hwacc-esram1-ret",
486         [HW_ACC_ESRAM2]         = "hwacc-esram2-ret",
487         [HW_ACC_ESRAM3]         = "hwacc-esram3-ret",
488         [HW_ACC_ESRAM4]         = "hwacc-esram4-ret",
489 };
490
491 /*
492 * Used by MCDE to setup all necessary PRCMU registers
493 */
494 #define PRCMU_RESET_DSIPLL              0x00004000
495 #define PRCMU_UNCLAMP_DSIPLL            0x00400800
496
497 #define PRCMU_CLK_PLL_DIV_SHIFT         0
498 #define PRCMU_CLK_PLL_SW_SHIFT          5
499 #define PRCMU_CLK_38                    (1 << 9)
500 #define PRCMU_CLK_38_SRC                (1 << 10)
501 #define PRCMU_CLK_38_DIV                (1 << 11)
502
503 /* PLLDIV=12, PLLSW=4 (PLLDDR) */
504 #define PRCMU_DSI_CLOCK_SETTING         0x0000008C
505
506 /* PLLDIV=8, PLLSW=4 (PLLDDR) */
507 #define PRCMU_DSI_CLOCK_SETTING_U8400   0x00000088
508
509 /* DPI 50000000 Hz */
510 #define PRCMU_DPI_CLOCK_SETTING         ((1 << PRCMU_CLK_PLL_SW_SHIFT) | \
511                                           (16 << PRCMU_CLK_PLL_DIV_SHIFT))
512 #define PRCMU_DSI_LP_CLOCK_SETTING      0x00000E00
513
514 /* D=101, N=1, R=4, SELDIV2=0 */
515 #define PRCMU_PLLDSI_FREQ_SETTING       0x00040165
516
517 /* D=70, N=1, R=3, SELDIV2=0 */
518 #define PRCMU_PLLDSI_FREQ_SETTING_U8400 0x00030146
519
520 #define PRCMU_ENABLE_PLLDSI             0x00000001
521 #define PRCMU_DISABLE_PLLDSI            0x00000000
522 #define PRCMU_RELEASE_RESET_DSS         0x0000400C
523 #define PRCMU_DSI_PLLOUT_SEL_SETTING    0x00000202
524 /* ESC clk, div0=1, div1=1, div2=3 */
525 #define PRCMU_ENABLE_ESCAPE_CLOCK_DIV   0x07030101
526 #define PRCMU_DISABLE_ESCAPE_CLOCK_DIV  0x00030101
527 #define PRCMU_DSI_RESET_SW              0x00000007
528
529 #define PRCMU_PLLDSI_LOCKP_LOCKED       0x3
530
531 static struct {
532         u8 project_number;
533         u8 api_version;
534         u8 func_version;
535         u8 errata;
536 } prcmu_version;
537
538
539 int db8500_prcmu_enable_dsipll(void)
540 {
541         int i;
542         unsigned int plldsifreq;
543
544         /* Clear DSIPLL_RESETN */
545         writel(PRCMU_RESET_DSIPLL, PRCM_APE_RESETN_CLR);
546         /* Unclamp DSIPLL in/out */
547         writel(PRCMU_UNCLAMP_DSIPLL, PRCM_MMIP_LS_CLAMP_CLR);
548
549         if (prcmu_is_u8400())
550                 plldsifreq = PRCMU_PLLDSI_FREQ_SETTING_U8400;
551         else
552                 plldsifreq = PRCMU_PLLDSI_FREQ_SETTING;
553         /* Set DSI PLL FREQ */
554         writel(plldsifreq, PRCM_PLLDSI_FREQ);
555         writel(PRCMU_DSI_PLLOUT_SEL_SETTING, PRCM_DSI_PLLOUT_SEL);
556         /* Enable Escape clocks */
557         writel(PRCMU_ENABLE_ESCAPE_CLOCK_DIV, PRCM_DSITVCLK_DIV);
558
559         /* Start DSI PLL */
560         writel(PRCMU_ENABLE_PLLDSI, PRCM_PLLDSI_ENABLE);
561         /* Reset DSI PLL */
562         writel(PRCMU_DSI_RESET_SW, PRCM_DSI_SW_RESET);
563         for (i = 0; i < 10; i++) {
564                 if ((readl(PRCM_PLLDSI_LOCKP) & PRCMU_PLLDSI_LOCKP_LOCKED)
565                                         == PRCMU_PLLDSI_LOCKP_LOCKED)
566                         break;
567                 udelay(100);
568         }
569         /* Set DSIPLL_RESETN */
570         writel(PRCMU_RESET_DSIPLL, PRCM_APE_RESETN_SET);
571         return 0;
572 }
573
574 int db8500_prcmu_disable_dsipll(void)
575 {
576         /* Disable dsi pll */
577         writel(PRCMU_DISABLE_PLLDSI, PRCM_PLLDSI_ENABLE);
578         /* Disable  escapeclock */
579         writel(PRCMU_DISABLE_ESCAPE_CLOCK_DIV, PRCM_DSITVCLK_DIV);
580         return 0;
581 }
582
583 int db8500_prcmu_set_display_clocks(void)
584 {
585         unsigned long flags;
586         unsigned int dsiclk;
587
588         if (prcmu_is_u8400())
589                 dsiclk = PRCMU_DSI_CLOCK_SETTING_U8400;
590         else
591                 dsiclk = PRCMU_DSI_CLOCK_SETTING;
592
593         spin_lock_irqsave(&clk_mgt_lock, flags);
594
595         /* Grab the HW semaphore. */
596         while ((readl(PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
597                 cpu_relax();
598
599         writel(dsiclk, PRCM_HDMICLK_MGT);
600         writel(PRCMU_DSI_LP_CLOCK_SETTING, PRCM_TVCLK_MGT);
601         writel(PRCMU_DPI_CLOCK_SETTING, PRCM_LCDCLK_MGT);
602
603         /* Release the HW semaphore. */
604         writel(0, PRCM_SEM);
605
606         spin_unlock_irqrestore(&clk_mgt_lock, flags);
607
608         return 0;
609 }
610
611 /**
612  * prcmu_enable_spi2 - Enables pin muxing for SPI2 on OtherAlternateC1.
613  */
614 void prcmu_enable_spi2(void)
615 {
616         u32 reg;
617         unsigned long flags;
618
619         spin_lock_irqsave(&gpiocr_lock, flags);
620         reg = readl(PRCM_GPIOCR);
621         writel(reg | PRCM_GPIOCR_SPI2_SELECT, PRCM_GPIOCR);
622         spin_unlock_irqrestore(&gpiocr_lock, flags);
623 }
624
625 /**
626  * prcmu_disable_spi2 - Disables pin muxing for SPI2 on OtherAlternateC1.
627  */
628 void prcmu_disable_spi2(void)
629 {
630         u32 reg;
631         unsigned long flags;
632
633         spin_lock_irqsave(&gpiocr_lock, flags);
634         reg = readl(PRCM_GPIOCR);
635         writel(reg & ~PRCM_GPIOCR_SPI2_SELECT, PRCM_GPIOCR);
636         spin_unlock_irqrestore(&gpiocr_lock, flags);
637 }
638
639 bool prcmu_has_arm_maxopp(void)
640 {
641         return (readb(tcdm_base + PRCM_AVS_VARM_MAX_OPP) &
642                 PRCM_AVS_ISMODEENABLE_MASK) == PRCM_AVS_ISMODEENABLE_MASK;
643 }
644
645 bool prcmu_is_u8400(void)
646 {
647         return prcmu_version.project_number == PRCMU_PROJECT_ID_8400V2_0;
648 }
649
650 /**
651  * prcmu_get_boot_status - PRCMU boot status checking
652  * Returns: the current PRCMU boot status
653  */
654 int prcmu_get_boot_status(void)
655 {
656         return readb(tcdm_base + PRCM_BOOT_STATUS);
657 }
658
659 /**
660  * prcmu_set_rc_a2p - This function is used to run few power state sequences
661  * @val: Value to be set, i.e. transition requested
662  * Returns: 0 on success, -EINVAL on invalid argument
663  *
664  * This function is used to run the following power state sequences -
665  * any state to ApReset,  ApDeepSleep to ApExecute, ApExecute to ApDeepSleep
666  */
667 int prcmu_set_rc_a2p(enum romcode_write val)
668 {
669         if (val < RDY_2_DS || val > RDY_2_XP70_RST)
670                 return -EINVAL;
671         writeb(val, (tcdm_base + PRCM_ROMCODE_A2P));
672         return 0;
673 }
674
675 /**
676  * prcmu_get_rc_p2a - This function is used to get power state sequences
677  * Returns: the power transition that has last happened
678  *
679  * This function can return the following transitions-
680  * any state to ApReset,  ApDeepSleep to ApExecute, ApExecute to ApDeepSleep
681  */
682 enum romcode_read prcmu_get_rc_p2a(void)
683 {
684         return readb(tcdm_base + PRCM_ROMCODE_P2A);
685 }
686
687 /**
688  * prcmu_get_current_mode - Return the current XP70 power mode
689  * Returns: Returns the current AP(ARM) power mode: init,
690  * apBoot, apExecute, apDeepSleep, apSleep, apIdle, apReset
691  */
692 enum ap_pwrst prcmu_get_xp70_current_state(void)
693 {
694         return readb(tcdm_base + PRCM_XP70_CUR_PWR_STATE);
695 }
696
697 /**
698  * prcmu_config_clkout - Configure one of the programmable clock outputs.
699  * @clkout:     The CLKOUT number (0 or 1).
700  * @source:     The clock to be used (one of the PRCMU_CLKSRC_*).
701  * @div:        The divider to be applied.
702  *
703  * Configures one of the programmable clock outputs (CLKOUTs).
704  * @div should be in the range [1,63] to request a configuration, or 0 to
705  * inform that the configuration is no longer requested.
706  */
707 int prcmu_config_clkout(u8 clkout, u8 source, u8 div)
708 {
709         static int requests[2];
710         int r = 0;
711         unsigned long flags;
712         u32 val;
713         u32 bits;
714         u32 mask;
715         u32 div_mask;
716
717         BUG_ON(clkout > 1);
718         BUG_ON(div > 63);
719         BUG_ON((clkout == 0) && (source > PRCMU_CLKSRC_CLK009));
720
721         if (!div && !requests[clkout])
722                 return -EINVAL;
723
724         switch (clkout) {
725         case 0:
726                 div_mask = PRCM_CLKOCR_CLKODIV0_MASK;
727                 mask = (PRCM_CLKOCR_CLKODIV0_MASK | PRCM_CLKOCR_CLKOSEL0_MASK);
728                 bits = ((source << PRCM_CLKOCR_CLKOSEL0_SHIFT) |
729                         (div << PRCM_CLKOCR_CLKODIV0_SHIFT));
730                 break;
731         case 1:
732                 div_mask = PRCM_CLKOCR_CLKODIV1_MASK;
733                 mask = (PRCM_CLKOCR_CLKODIV1_MASK | PRCM_CLKOCR_CLKOSEL1_MASK |
734                         PRCM_CLKOCR_CLK1TYPE);
735                 bits = ((source << PRCM_CLKOCR_CLKOSEL1_SHIFT) |
736                         (div << PRCM_CLKOCR_CLKODIV1_SHIFT));
737                 break;
738         }
739         bits &= mask;
740
741         spin_lock_irqsave(&clkout_lock, flags);
742
743         val = readl(PRCM_CLKOCR);
744         if (val & div_mask) {
745                 if (div) {
746                         if ((val & mask) != bits) {
747                                 r = -EBUSY;
748                                 goto unlock_and_return;
749                         }
750                 } else {
751                         if ((val & mask & ~div_mask) != bits) {
752                                 r = -EINVAL;
753                                 goto unlock_and_return;
754                         }
755                 }
756         }
757         writel((bits | (val & ~mask)), PRCM_CLKOCR);
758         requests[clkout] += (div ? 1 : -1);
759
760 unlock_and_return:
761         spin_unlock_irqrestore(&clkout_lock, flags);
762
763         return r;
764 }
765
766 int db8500_prcmu_set_power_state(u8 state, bool keep_ulp_clk, bool keep_ap_pll)
767 {
768         unsigned long flags;
769
770         BUG_ON((state < PRCMU_AP_SLEEP) || (PRCMU_AP_DEEP_IDLE < state));
771
772         spin_lock_irqsave(&mb0_transfer.lock, flags);
773
774         while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(0))
775                 cpu_relax();
776
777         writeb(MB0H_POWER_STATE_TRANS, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB0));
778         writeb(state, (tcdm_base + PRCM_REQ_MB0_AP_POWER_STATE));
779         writeb((keep_ap_pll ? 1 : 0), (tcdm_base + PRCM_REQ_MB0_AP_PLL_STATE));
780         writeb((keep_ulp_clk ? 1 : 0),
781                 (tcdm_base + PRCM_REQ_MB0_ULP_CLOCK_STATE));
782         writeb(0, (tcdm_base + PRCM_REQ_MB0_DO_NOT_WFI));
783         writel(MBOX_BIT(0), PRCM_MBOX_CPU_SET);
784
785         spin_unlock_irqrestore(&mb0_transfer.lock, flags);
786
787         return 0;
788 }
789
790 /* This function should only be called while mb0_transfer.lock is held. */
791 static void config_wakeups(void)
792 {
793         const u8 header[2] = {
794                 MB0H_CONFIG_WAKEUPS_EXE,
795                 MB0H_CONFIG_WAKEUPS_SLEEP
796         };
797         static u32 last_dbb_events;
798         static u32 last_abb_events;
799         u32 dbb_events;
800         u32 abb_events;
801         unsigned int i;
802
803         dbb_events = mb0_transfer.req.dbb_irqs | mb0_transfer.req.dbb_wakeups;
804         dbb_events |= (WAKEUP_BIT_AC_WAKE_ACK | WAKEUP_BIT_AC_SLEEP_ACK);
805
806         abb_events = mb0_transfer.req.abb_events;
807
808         if ((dbb_events == last_dbb_events) && (abb_events == last_abb_events))
809                 return;
810
811         for (i = 0; i < 2; i++) {
812                 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(0))
813                         cpu_relax();
814                 writel(dbb_events, (tcdm_base + PRCM_REQ_MB0_WAKEUP_8500));
815                 writel(abb_events, (tcdm_base + PRCM_REQ_MB0_WAKEUP_4500));
816                 writeb(header[i], (tcdm_base + PRCM_MBOX_HEADER_REQ_MB0));
817                 writel(MBOX_BIT(0), PRCM_MBOX_CPU_SET);
818         }
819         last_dbb_events = dbb_events;
820         last_abb_events = abb_events;
821 }
822
823 void db8500_prcmu_enable_wakeups(u32 wakeups)
824 {
825         unsigned long flags;
826         u32 bits;
827         int i;
828
829         BUG_ON(wakeups != (wakeups & VALID_WAKEUPS));
830
831         for (i = 0, bits = 0; i < NUM_PRCMU_WAKEUP_INDICES; i++) {
832                 if (wakeups & BIT(i))
833                         bits |= prcmu_wakeup_bit[i];
834         }
835
836         spin_lock_irqsave(&mb0_transfer.lock, flags);
837
838         mb0_transfer.req.dbb_wakeups = bits;
839         config_wakeups();
840
841         spin_unlock_irqrestore(&mb0_transfer.lock, flags);
842 }
843
844 void db8500_prcmu_config_abb_event_readout(u32 abb_events)
845 {
846         unsigned long flags;
847
848         spin_lock_irqsave(&mb0_transfer.lock, flags);
849
850         mb0_transfer.req.abb_events = abb_events;
851         config_wakeups();
852
853         spin_unlock_irqrestore(&mb0_transfer.lock, flags);
854 }
855
856 void db8500_prcmu_get_abb_event_buffer(void __iomem **buf)
857 {
858         if (readb(tcdm_base + PRCM_ACK_MB0_READ_POINTER) & 1)
859                 *buf = (tcdm_base + PRCM_ACK_MB0_WAKEUP_1_4500);
860         else
861                 *buf = (tcdm_base + PRCM_ACK_MB0_WAKEUP_0_4500);
862 }
863
864 /**
865  * db8500_prcmu_set_arm_opp - set the appropriate ARM OPP
866  * @opp: The new ARM operating point to which transition is to be made
867  * Returns: 0 on success, non-zero on failure
868  *
869  * This function sets the the operating point of the ARM.
870  */
871 int db8500_prcmu_set_arm_opp(u8 opp)
872 {
873         int r;
874
875         if (opp < ARM_NO_CHANGE || opp > ARM_EXTCLK)
876                 return -EINVAL;
877
878         r = 0;
879
880         mutex_lock(&mb1_transfer.lock);
881
882         while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
883                 cpu_relax();
884
885         writeb(MB1H_ARM_APE_OPP, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
886         writeb(opp, (tcdm_base + PRCM_REQ_MB1_ARM_OPP));
887         writeb(APE_NO_CHANGE, (tcdm_base + PRCM_REQ_MB1_APE_OPP));
888
889         writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET);
890         wait_for_completion(&mb1_transfer.work);
891
892         if ((mb1_transfer.ack.header != MB1H_ARM_APE_OPP) ||
893                 (mb1_transfer.ack.arm_opp != opp))
894                 r = -EIO;
895
896         mutex_unlock(&mb1_transfer.lock);
897
898         return r;
899 }
900
901 /**
902  * db8500_prcmu_get_arm_opp - get the current ARM OPP
903  *
904  * Returns: the current ARM OPP
905  */
906 int db8500_prcmu_get_arm_opp(void)
907 {
908         return readb(tcdm_base + PRCM_ACK_MB1_CURRENT_ARM_OPP);
909 }
910
911 /**
912  * prcmu_get_ddr_opp - get the current DDR OPP
913  *
914  * Returns: the current DDR OPP
915  */
916 int prcmu_get_ddr_opp(void)
917 {
918         return readb(PRCM_DDR_SUBSYS_APE_MINBW);
919 }
920
921 /**
922  * set_ddr_opp - set the appropriate DDR OPP
923  * @opp: The new DDR operating point to which transition is to be made
924  * Returns: 0 on success, non-zero on failure
925  *
926  * This function sets the operating point of the DDR.
927  */
928 int prcmu_set_ddr_opp(u8 opp)
929 {
930         if (opp < DDR_100_OPP || opp > DDR_25_OPP)
931                 return -EINVAL;
932         /* Changing the DDR OPP can hang the hardware pre-v21 */
933         if (cpu_is_u8500v20_or_later() && !cpu_is_u8500v20())
934                 writeb(opp, PRCM_DDR_SUBSYS_APE_MINBW);
935
936         return 0;
937 }
938 /**
939  * set_ape_opp - set the appropriate APE OPP
940  * @opp: The new APE operating point to which transition is to be made
941  * Returns: 0 on success, non-zero on failure
942  *
943  * This function sets the operating point of the APE.
944  */
945 int prcmu_set_ape_opp(u8 opp)
946 {
947         int r = 0;
948
949         mutex_lock(&mb1_transfer.lock);
950
951         while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
952                 cpu_relax();
953
954         writeb(MB1H_ARM_APE_OPP, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
955         writeb(ARM_NO_CHANGE, (tcdm_base + PRCM_REQ_MB1_ARM_OPP));
956         writeb(opp, (tcdm_base + PRCM_REQ_MB1_APE_OPP));
957
958         writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET);
959         wait_for_completion(&mb1_transfer.work);
960
961         if ((mb1_transfer.ack.header != MB1H_ARM_APE_OPP) ||
962                 (mb1_transfer.ack.ape_opp != opp))
963                 r = -EIO;
964
965         mutex_unlock(&mb1_transfer.lock);
966
967         return r;
968 }
969
970 /**
971  * prcmu_get_ape_opp - get the current APE OPP
972  *
973  * Returns: the current APE OPP
974  */
975 int prcmu_get_ape_opp(void)
976 {
977         return readb(tcdm_base + PRCM_ACK_MB1_CURRENT_APE_OPP);
978 }
979
980 /**
981  * prcmu_request_ape_opp_100_voltage - Request APE OPP 100% voltage
982  * @enable: true to request the higher voltage, false to drop a request.
983  *
984  * Calls to this function to enable and disable requests must be balanced.
985  */
986 int prcmu_request_ape_opp_100_voltage(bool enable)
987 {
988         int r = 0;
989         u8 header;
990         static unsigned int requests;
991
992         mutex_lock(&mb1_transfer.lock);
993
994         if (enable) {
995                 if (0 != requests++)
996                         goto unlock_and_return;
997                 header = MB1H_REQUEST_APE_OPP_100_VOLT;
998         } else {
999                 if (requests == 0) {
1000                         r = -EIO;
1001                         goto unlock_and_return;
1002                 } else if (1 != requests--) {
1003                         goto unlock_and_return;
1004                 }
1005                 header = MB1H_RELEASE_APE_OPP_100_VOLT;
1006         }
1007
1008         while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
1009                 cpu_relax();
1010
1011         writeb(header, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
1012
1013         writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET);
1014         wait_for_completion(&mb1_transfer.work);
1015
1016         if ((mb1_transfer.ack.header != header) ||
1017                 ((mb1_transfer.ack.ape_voltage_status & BIT(0)) != 0))
1018                 r = -EIO;
1019
1020 unlock_and_return:
1021         mutex_unlock(&mb1_transfer.lock);
1022
1023         return r;
1024 }
1025
1026 /**
1027  * prcmu_release_usb_wakeup_state - release the state required by a USB wakeup
1028  *
1029  * This function releases the power state requirements of a USB wakeup.
1030  */
1031 int prcmu_release_usb_wakeup_state(void)
1032 {
1033         int r = 0;
1034
1035         mutex_lock(&mb1_transfer.lock);
1036
1037         while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
1038                 cpu_relax();
1039
1040         writeb(MB1H_RELEASE_USB_WAKEUP,
1041                 (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
1042
1043         writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET);
1044         wait_for_completion(&mb1_transfer.work);
1045
1046         if ((mb1_transfer.ack.header != MB1H_RELEASE_USB_WAKEUP) ||
1047                 ((mb1_transfer.ack.ape_voltage_status & BIT(0)) != 0))
1048                 r = -EIO;
1049
1050         mutex_unlock(&mb1_transfer.lock);
1051
1052         return r;
1053 }
1054
1055 static int request_pll(u8 clock, bool enable)
1056 {
1057         int r = 0;
1058
1059         if (clock == PRCMU_PLLSOC1)
1060                 clock = (enable ? PLL_SOC1_ON : PLL_SOC1_OFF);
1061         else
1062                 return -EINVAL;
1063
1064         mutex_lock(&mb1_transfer.lock);
1065
1066         while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
1067                 cpu_relax();
1068
1069         writeb(MB1H_PLL_ON_OFF, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
1070         writeb(clock, (tcdm_base + PRCM_REQ_MB1_PLL_ON_OFF));
1071
1072         writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET);
1073         wait_for_completion(&mb1_transfer.work);
1074
1075         if (mb1_transfer.ack.header != MB1H_PLL_ON_OFF)
1076                 r = -EIO;
1077
1078         mutex_unlock(&mb1_transfer.lock);
1079
1080         return r;
1081 }
1082
1083 /**
1084  * prcmu_set_hwacc - set the power state of a h/w accelerator
1085  * @hwacc_dev: The hardware accelerator (enum hw_acc_dev).
1086  * @state: The new power state (enum hw_acc_state).
1087  *
1088  * This function sets the power state of a hardware accelerator.
1089  * This function should not be called from interrupt context.
1090  *
1091  * NOTE! Deprecated, to be removed when all users switched over to use the
1092  * regulator framework API.
1093  */
1094 int prcmu_set_hwacc(u16 hwacc_dev, u8 state)
1095 {
1096         int r = 0;
1097         bool ram_retention = false;
1098         bool enable, enable_ret;
1099
1100         /* check argument */
1101         BUG_ON(hwacc_dev >= NUM_HW_ACC);
1102
1103         /* get state of switches */
1104         enable = hwacc_enabled[hwacc_dev];
1105         enable_ret = hwacc_ret_enabled[hwacc_dev];
1106
1107         /* set flag if retention is possible */
1108         switch (hwacc_dev) {
1109         case HW_ACC_SVAMMDSP:
1110         case HW_ACC_SIAMMDSP:
1111         case HW_ACC_ESRAM1:
1112         case HW_ACC_ESRAM2:
1113         case HW_ACC_ESRAM3:
1114         case HW_ACC_ESRAM4:
1115                 ram_retention = true;
1116                 break;
1117         }
1118
1119         /* check argument */
1120         BUG_ON(state > HW_ON);
1121         BUG_ON(state == HW_OFF_RAMRET && !ram_retention);
1122
1123         /* modify enable flags */
1124         switch (state) {
1125         case HW_OFF:
1126                 enable_ret = false;
1127                 enable = false;
1128                 break;
1129         case HW_ON:
1130                 enable = true;
1131                 break;
1132         case HW_OFF_RAMRET:
1133                 enable_ret = true;
1134                 enable = false;
1135                 break;
1136         }
1137
1138         /* get regulator (lazy) */
1139         if (hwacc_regulator[hwacc_dev] == NULL) {
1140                 hwacc_regulator[hwacc_dev] = regulator_get(NULL,
1141                         hwacc_regulator_name[hwacc_dev]);
1142                 if (IS_ERR(hwacc_regulator[hwacc_dev])) {
1143                         pr_err("prcmu: failed to get supply %s\n",
1144                                 hwacc_regulator_name[hwacc_dev]);
1145                         r = PTR_ERR(hwacc_regulator[hwacc_dev]);
1146                         goto out;
1147                 }
1148         }
1149
1150         if (ram_retention) {
1151                 if (hwacc_ret_regulator[hwacc_dev] == NULL) {
1152                         hwacc_ret_regulator[hwacc_dev] = regulator_get(NULL,
1153                                 hwacc_ret_regulator_name[hwacc_dev]);
1154                         if (IS_ERR(hwacc_ret_regulator[hwacc_dev])) {
1155                                 pr_err("prcmu: failed to get supply %s\n",
1156                                         hwacc_ret_regulator_name[hwacc_dev]);
1157                                 r = PTR_ERR(hwacc_ret_regulator[hwacc_dev]);
1158                                 goto out;
1159                         }
1160                 }
1161         }
1162
1163         /* set regulators */
1164         if (ram_retention) {
1165                 if (enable_ret && !hwacc_ret_enabled[hwacc_dev]) {
1166                         r = regulator_enable(hwacc_ret_regulator[hwacc_dev]);
1167                         if (r < 0) {
1168                                 pr_err("prcmu_set_hwacc: ret enable failed\n");
1169                                 goto out;
1170                         }
1171                         hwacc_ret_enabled[hwacc_dev] = true;
1172                 }
1173         }
1174
1175         if (enable && !hwacc_enabled[hwacc_dev]) {
1176                 r = regulator_enable(hwacc_regulator[hwacc_dev]);
1177                 if (r < 0) {
1178                         pr_err("prcmu_set_hwacc: enable failed\n");
1179                         goto out;
1180                 }
1181                 hwacc_enabled[hwacc_dev] = true;
1182         }
1183
1184         if (!enable && hwacc_enabled[hwacc_dev]) {
1185                 r = regulator_disable(hwacc_regulator[hwacc_dev]);
1186                 if (r < 0) {
1187                         pr_err("prcmu_set_hwacc: disable failed\n");
1188                         goto out;
1189                 }
1190                 hwacc_enabled[hwacc_dev] = false;
1191         }
1192
1193         if (ram_retention) {
1194                 if (!enable_ret && hwacc_ret_enabled[hwacc_dev]) {
1195                         r = regulator_disable(hwacc_ret_regulator[hwacc_dev]);
1196                         if (r < 0) {
1197                                 pr_err("prcmu_set_hwacc: ret disable failed\n");
1198                                 goto out;
1199                         }
1200                         hwacc_ret_enabled[hwacc_dev] = false;
1201                 }
1202         }
1203
1204 out:
1205         return r;
1206 }
1207 EXPORT_SYMBOL(prcmu_set_hwacc);
1208
1209 /**
1210  * db8500_prcmu_set_epod - set the state of a EPOD (power domain)
1211  * @epod_id: The EPOD to set
1212  * @epod_state: The new EPOD state
1213  *
1214  * This function sets the state of a EPOD (power domain). It may not be called
1215  * from interrupt context.
1216  */
1217 int db8500_prcmu_set_epod(u16 epod_id, u8 epod_state)
1218 {
1219         int r = 0;
1220         bool ram_retention = false;
1221         int i;
1222
1223         /* check argument */
1224         BUG_ON(epod_id >= NUM_EPOD_ID);
1225
1226         /* set flag if retention is possible */
1227         switch (epod_id) {
1228         case EPOD_ID_SVAMMDSP:
1229         case EPOD_ID_SIAMMDSP:
1230         case EPOD_ID_ESRAM12:
1231         case EPOD_ID_ESRAM34:
1232                 ram_retention = true;
1233                 break;
1234         }
1235
1236         /* check argument */
1237         BUG_ON(epod_state > EPOD_STATE_ON);
1238         BUG_ON(epod_state == EPOD_STATE_RAMRET && !ram_retention);
1239
1240         /* get lock */
1241         mutex_lock(&mb2_transfer.lock);
1242
1243         /* wait for mailbox */
1244         while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(2))
1245                 cpu_relax();
1246
1247         /* fill in mailbox */
1248         for (i = 0; i < NUM_EPOD_ID; i++)
1249                 writeb(EPOD_STATE_NO_CHANGE, (tcdm_base + PRCM_REQ_MB2 + i));
1250         writeb(epod_state, (tcdm_base + PRCM_REQ_MB2 + epod_id));
1251
1252         writeb(MB2H_DPS, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB2));
1253
1254         writel(MBOX_BIT(2), PRCM_MBOX_CPU_SET);
1255
1256         /*
1257          * The current firmware version does not handle errors correctly,
1258          * and we cannot recover if there is an error.
1259          * This is expected to change when the firmware is updated.
1260          */
1261         if (!wait_for_completion_timeout(&mb2_transfer.work,
1262                         msecs_to_jiffies(20000))) {
1263                 pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
1264                         __func__);
1265                 r = -EIO;
1266                 goto unlock_and_return;
1267         }
1268
1269         if (mb2_transfer.ack.status != HWACC_PWR_ST_OK)
1270                 r = -EIO;
1271
1272 unlock_and_return:
1273         mutex_unlock(&mb2_transfer.lock);
1274         return r;
1275 }
1276
1277 /**
1278  * prcmu_configure_auto_pm - Configure autonomous power management.
1279  * @sleep: Configuration for ApSleep.
1280  * @idle:  Configuration for ApIdle.
1281  */
1282 void prcmu_configure_auto_pm(struct prcmu_auto_pm_config *sleep,
1283         struct prcmu_auto_pm_config *idle)
1284 {
1285         u32 sleep_cfg;
1286         u32 idle_cfg;
1287         unsigned long flags;
1288
1289         BUG_ON((sleep == NULL) || (idle == NULL));
1290
1291         sleep_cfg = (sleep->sva_auto_pm_enable & 0xF);
1292         sleep_cfg = ((sleep_cfg << 4) | (sleep->sia_auto_pm_enable & 0xF));
1293         sleep_cfg = ((sleep_cfg << 8) | (sleep->sva_power_on & 0xFF));
1294         sleep_cfg = ((sleep_cfg << 8) | (sleep->sia_power_on & 0xFF));
1295         sleep_cfg = ((sleep_cfg << 4) | (sleep->sva_policy & 0xF));
1296         sleep_cfg = ((sleep_cfg << 4) | (sleep->sia_policy & 0xF));
1297
1298         idle_cfg = (idle->sva_auto_pm_enable & 0xF);
1299         idle_cfg = ((idle_cfg << 4) | (idle->sia_auto_pm_enable & 0xF));
1300         idle_cfg = ((idle_cfg << 8) | (idle->sva_power_on & 0xFF));
1301         idle_cfg = ((idle_cfg << 8) | (idle->sia_power_on & 0xFF));
1302         idle_cfg = ((idle_cfg << 4) | (idle->sva_policy & 0xF));
1303         idle_cfg = ((idle_cfg << 4) | (idle->sia_policy & 0xF));
1304
1305         spin_lock_irqsave(&mb2_transfer.auto_pm_lock, flags);
1306
1307         /*
1308          * The autonomous power management configuration is done through
1309          * fields in mailbox 2, but these fields are only used as shared
1310          * variables - i.e. there is no need to send a message.
1311          */
1312         writel(sleep_cfg, (tcdm_base + PRCM_REQ_MB2_AUTO_PM_SLEEP));
1313         writel(idle_cfg, (tcdm_base + PRCM_REQ_MB2_AUTO_PM_IDLE));
1314
1315         mb2_transfer.auto_pm_enabled =
1316                 ((sleep->sva_auto_pm_enable == PRCMU_AUTO_PM_ON) ||
1317                  (sleep->sia_auto_pm_enable == PRCMU_AUTO_PM_ON) ||
1318                  (idle->sva_auto_pm_enable == PRCMU_AUTO_PM_ON) ||
1319                  (idle->sia_auto_pm_enable == PRCMU_AUTO_PM_ON));
1320
1321         spin_unlock_irqrestore(&mb2_transfer.auto_pm_lock, flags);
1322 }
1323 EXPORT_SYMBOL(prcmu_configure_auto_pm);
1324
1325 bool prcmu_is_auto_pm_enabled(void)
1326 {
1327         return mb2_transfer.auto_pm_enabled;
1328 }
1329
1330 static int request_sysclk(bool enable)
1331 {
1332         int r;
1333         unsigned long flags;
1334
1335         r = 0;
1336
1337         mutex_lock(&mb3_transfer.sysclk_lock);
1338
1339         spin_lock_irqsave(&mb3_transfer.lock, flags);
1340
1341         while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(3))
1342                 cpu_relax();
1343
1344         writeb((enable ? ON : OFF), (tcdm_base + PRCM_REQ_MB3_SYSCLK_MGT));
1345
1346         writeb(MB3H_SYSCLK, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB3));
1347         writel(MBOX_BIT(3), PRCM_MBOX_CPU_SET);
1348
1349         spin_unlock_irqrestore(&mb3_transfer.lock, flags);
1350
1351         /*
1352          * The firmware only sends an ACK if we want to enable the
1353          * SysClk, and it succeeds.
1354          */
1355         if (enable && !wait_for_completion_timeout(&mb3_transfer.sysclk_work,
1356                         msecs_to_jiffies(20000))) {
1357                 pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
1358                         __func__);
1359                 r = -EIO;
1360         }
1361
1362         mutex_unlock(&mb3_transfer.sysclk_lock);
1363
1364         return r;
1365 }
1366
1367 static int request_timclk(bool enable)
1368 {
1369         u32 val = (PRCM_TCR_DOZE_MODE | PRCM_TCR_TENSEL_MASK);
1370
1371         if (!enable)
1372                 val |= PRCM_TCR_STOP_TIMERS;
1373         writel(val, PRCM_TCR);
1374
1375         return 0;
1376 }
1377
1378 static int request_reg_clock(u8 clock, bool enable)
1379 {
1380         u32 val;
1381         unsigned long flags;
1382
1383         spin_lock_irqsave(&clk_mgt_lock, flags);
1384
1385         /* Grab the HW semaphore. */
1386         while ((readl(PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
1387                 cpu_relax();
1388
1389         val = readl(_PRCMU_BASE + clk_mgt[clock].offset);
1390         if (enable) {
1391                 val |= (PRCM_CLK_MGT_CLKEN | clk_mgt[clock].pllsw);
1392         } else {
1393                 clk_mgt[clock].pllsw = (val & PRCM_CLK_MGT_CLKPLLSW_MASK);
1394                 val &= ~(PRCM_CLK_MGT_CLKEN | PRCM_CLK_MGT_CLKPLLSW_MASK);
1395         }
1396         writel(val, (_PRCMU_BASE + clk_mgt[clock].offset));
1397
1398         /* Release the HW semaphore. */
1399         writel(0, PRCM_SEM);
1400
1401         spin_unlock_irqrestore(&clk_mgt_lock, flags);
1402
1403         return 0;
1404 }
1405
1406 static int request_sga_clock(u8 clock, bool enable)
1407 {
1408         u32 val;
1409         int ret;
1410
1411         if (enable) {
1412                 val = readl(PRCM_CGATING_BYPASS);
1413                 writel(val | PRCM_CGATING_BYPASS_ICN2, PRCM_CGATING_BYPASS);
1414         }
1415
1416         ret = request_reg_clock(clock, enable);
1417
1418         if (!ret && !enable) {
1419                 val = readl(PRCM_CGATING_BYPASS);
1420                 writel(val & ~PRCM_CGATING_BYPASS_ICN2, PRCM_CGATING_BYPASS);
1421         }
1422
1423         return ret;
1424 }
1425
1426 /**
1427  * db8500_prcmu_request_clock() - Request for a clock to be enabled or disabled.
1428  * @clock:      The clock for which the request is made.
1429  * @enable:     Whether the clock should be enabled (true) or disabled (false).
1430  *
1431  * This function should only be used by the clock implementation.
1432  * Do not use it from any other place!
1433  */
1434 int db8500_prcmu_request_clock(u8 clock, bool enable)
1435 {
1436         switch(clock) {
1437         case PRCMU_SGACLK:
1438                 return request_sga_clock(clock, enable);
1439         case PRCMU_TIMCLK:
1440                 return request_timclk(enable);
1441         case PRCMU_SYSCLK:
1442                 return request_sysclk(enable);
1443         case PRCMU_PLLSOC1:
1444                 return request_pll(clock, enable);
1445         default:
1446                 break;
1447         }
1448         if (clock < PRCMU_NUM_REG_CLOCKS)
1449                 return request_reg_clock(clock, enable);
1450         return -EINVAL;
1451 }
1452
1453 int db8500_prcmu_config_esram0_deep_sleep(u8 state)
1454 {
1455         if ((state > ESRAM0_DEEP_SLEEP_STATE_RET) ||
1456             (state < ESRAM0_DEEP_SLEEP_STATE_OFF))
1457                 return -EINVAL;
1458
1459         mutex_lock(&mb4_transfer.lock);
1460
1461         while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
1462                 cpu_relax();
1463
1464         writeb(MB4H_MEM_ST, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4));
1465         writeb(((DDR_PWR_STATE_OFFHIGHLAT << 4) | DDR_PWR_STATE_ON),
1466                (tcdm_base + PRCM_REQ_MB4_DDR_ST_AP_SLEEP_IDLE));
1467         writeb(DDR_PWR_STATE_ON,
1468                (tcdm_base + PRCM_REQ_MB4_DDR_ST_AP_DEEP_IDLE));
1469         writeb(state, (tcdm_base + PRCM_REQ_MB4_ESRAM0_ST));
1470
1471         writel(MBOX_BIT(4), PRCM_MBOX_CPU_SET);
1472         wait_for_completion(&mb4_transfer.work);
1473
1474         mutex_unlock(&mb4_transfer.lock);
1475
1476         return 0;
1477 }
1478
1479 int prcmu_config_hotdog(u8 threshold)
1480 {
1481         mutex_lock(&mb4_transfer.lock);
1482
1483         while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
1484                 cpu_relax();
1485
1486         writeb(threshold, (tcdm_base + PRCM_REQ_MB4_HOTDOG_THRESHOLD));
1487         writeb(MB4H_HOTDOG, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4));
1488
1489         writel(MBOX_BIT(4), PRCM_MBOX_CPU_SET);
1490         wait_for_completion(&mb4_transfer.work);
1491
1492         mutex_unlock(&mb4_transfer.lock);
1493
1494         return 0;
1495 }
1496
1497 int prcmu_config_hotmon(u8 low, u8 high)
1498 {
1499         mutex_lock(&mb4_transfer.lock);
1500
1501         while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
1502                 cpu_relax();
1503
1504         writeb(low, (tcdm_base + PRCM_REQ_MB4_HOTMON_LOW));
1505         writeb(high, (tcdm_base + PRCM_REQ_MB4_HOTMON_HIGH));
1506         writeb((HOTMON_CONFIG_LOW | HOTMON_CONFIG_HIGH),
1507                 (tcdm_base + PRCM_REQ_MB4_HOTMON_CONFIG));
1508         writeb(MB4H_HOTMON, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4));
1509
1510         writel(MBOX_BIT(4), PRCM_MBOX_CPU_SET);
1511         wait_for_completion(&mb4_transfer.work);
1512
1513         mutex_unlock(&mb4_transfer.lock);
1514
1515         return 0;
1516 }
1517
1518 static int config_hot_period(u16 val)
1519 {
1520         mutex_lock(&mb4_transfer.lock);
1521
1522         while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
1523                 cpu_relax();
1524
1525         writew(val, (tcdm_base + PRCM_REQ_MB4_HOT_PERIOD));
1526         writeb(MB4H_HOT_PERIOD, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4));
1527
1528         writel(MBOX_BIT(4), PRCM_MBOX_CPU_SET);
1529         wait_for_completion(&mb4_transfer.work);
1530
1531         mutex_unlock(&mb4_transfer.lock);
1532
1533         return 0;
1534 }
1535
1536 int prcmu_start_temp_sense(u16 cycles32k)
1537 {
1538         if (cycles32k == 0xFFFF)
1539                 return -EINVAL;
1540
1541         return config_hot_period(cycles32k);
1542 }
1543
1544 int prcmu_stop_temp_sense(void)
1545 {
1546         return config_hot_period(0xFFFF);
1547 }
1548
1549 static int prcmu_a9wdog(u8 cmd, u8 d0, u8 d1, u8 d2, u8 d3)
1550 {
1551
1552         mutex_lock(&mb4_transfer.lock);
1553
1554         while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
1555                 cpu_relax();
1556
1557         writeb(d0, (tcdm_base + PRCM_REQ_MB4_A9WDOG_0));
1558         writeb(d1, (tcdm_base + PRCM_REQ_MB4_A9WDOG_1));
1559         writeb(d2, (tcdm_base + PRCM_REQ_MB4_A9WDOG_2));
1560         writeb(d3, (tcdm_base + PRCM_REQ_MB4_A9WDOG_3));
1561
1562         writeb(cmd, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4));
1563
1564         writel(MBOX_BIT(4), PRCM_MBOX_CPU_SET);
1565         wait_for_completion(&mb4_transfer.work);
1566
1567         mutex_unlock(&mb4_transfer.lock);
1568
1569         return 0;
1570
1571 }
1572
1573 int prcmu_config_a9wdog(u8 num, bool sleep_auto_off)
1574 {
1575         BUG_ON(num == 0 || num > 0xf);
1576         return prcmu_a9wdog(MB4H_A9WDOG_CONF, num, 0, 0,
1577                             sleep_auto_off ? A9WDOG_AUTO_OFF_EN :
1578                             A9WDOG_AUTO_OFF_DIS);
1579 }
1580
1581 int prcmu_enable_a9wdog(u8 id)
1582 {
1583         return prcmu_a9wdog(MB4H_A9WDOG_EN, id, 0, 0, 0);
1584 }
1585
1586 int prcmu_disable_a9wdog(u8 id)
1587 {
1588         return prcmu_a9wdog(MB4H_A9WDOG_DIS, id, 0, 0, 0);
1589 }
1590
1591 int prcmu_kick_a9wdog(u8 id)
1592 {
1593         return prcmu_a9wdog(MB4H_A9WDOG_KICK, id, 0, 0, 0);
1594 }
1595
1596 /*
1597  * timeout is 28 bit, in ms.
1598  */
1599 #define MAX_WATCHDOG_TIMEOUT 131000
1600 int prcmu_load_a9wdog(u8 id, u32 timeout)
1601 {
1602         if (timeout > MAX_WATCHDOG_TIMEOUT)
1603                 /*
1604                  * Due to calculation bug in prcmu fw, timeouts
1605                  * can't be bigger than 131 seconds.
1606                  */
1607                 return -EINVAL;
1608
1609         return prcmu_a9wdog(MB4H_A9WDOG_LOAD,
1610                             (id & A9WDOG_ID_MASK) |
1611                             /*
1612                              * Put the lowest 28 bits of timeout at
1613                              * offset 4. Four first bits are used for id.
1614                              */
1615                             (u8)((timeout << 4) & 0xf0),
1616                             (u8)((timeout >> 4) & 0xff),
1617                             (u8)((timeout >> 12) & 0xff),
1618                             (u8)((timeout >> 20) & 0xff));
1619 }
1620
1621 /**
1622  * prcmu_set_clock_divider() - Configure the clock divider.
1623  * @clock:      The clock for which the request is made.
1624  * @divider:    The clock divider. (< 32)
1625  *
1626  * This function should only be used by the clock implementation.
1627  * Do not use it from any other place!
1628  */
1629 int prcmu_set_clock_divider(u8 clock, u8 divider)
1630 {
1631         u32 val;
1632         unsigned long flags;
1633
1634         if ((clock >= PRCMU_NUM_REG_CLOCKS) || (divider < 1) || (31 < divider))
1635                 return -EINVAL;
1636
1637         spin_lock_irqsave(&clk_mgt_lock, flags);
1638
1639         /* Grab the HW semaphore. */
1640         while ((readl(PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
1641                 cpu_relax();
1642
1643         val = readl(_PRCMU_BASE + clk_mgt[clock].offset);
1644         val &= ~(PRCM_CLK_MGT_CLKPLLDIV_MASK);
1645         val |= (u32)divider;
1646         writel(val, (_PRCMU_BASE + clk_mgt[clock].offset));
1647
1648         /* Release the HW semaphore. */
1649         writel(0, PRCM_SEM);
1650
1651         spin_unlock_irqrestore(&clk_mgt_lock, flags);
1652
1653         return 0;
1654 }
1655
1656 /**
1657  * prcmu_abb_read() - Read register value(s) from the ABB.
1658  * @slave:      The I2C slave address.
1659  * @reg:        The (start) register address.
1660  * @value:      The read out value(s).
1661  * @size:       The number of registers to read.
1662  *
1663  * Reads register value(s) from the ABB.
1664  * @size has to be 1 for the current firmware version.
1665  */
1666 int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size)
1667 {
1668         int r;
1669
1670         if (size != 1)
1671                 return -EINVAL;
1672
1673         mutex_lock(&mb5_transfer.lock);
1674
1675         while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(5))
1676                 cpu_relax();
1677
1678         writeb(PRCMU_I2C_READ(slave), (tcdm_base + PRCM_REQ_MB5_I2C_SLAVE_OP));
1679         writeb(PRCMU_I2C_STOP_EN, (tcdm_base + PRCM_REQ_MB5_I2C_HW_BITS));
1680         writeb(reg, (tcdm_base + PRCM_REQ_MB5_I2C_REG));
1681         writeb(0, (tcdm_base + PRCM_REQ_MB5_I2C_VAL));
1682
1683         writel(MBOX_BIT(5), PRCM_MBOX_CPU_SET);
1684
1685         if (!wait_for_completion_timeout(&mb5_transfer.work,
1686                                 msecs_to_jiffies(20000))) {
1687                 pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
1688                         __func__);
1689                 r = -EIO;
1690         } else {
1691                 r = ((mb5_transfer.ack.status == I2C_RD_OK) ? 0 : -EIO);
1692         }
1693
1694         if (!r)
1695                 *value = mb5_transfer.ack.value;
1696
1697         mutex_unlock(&mb5_transfer.lock);
1698
1699         return r;
1700 }
1701
1702 /**
1703  * prcmu_abb_write() - Write register value(s) to the ABB.
1704  * @slave:      The I2C slave address.
1705  * @reg:        The (start) register address.
1706  * @value:      The value(s) to write.
1707  * @size:       The number of registers to write.
1708  *
1709  * Reads register value(s) from the ABB.
1710  * @size has to be 1 for the current firmware version.
1711  */
1712 int prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size)
1713 {
1714         int r;
1715
1716         if (size != 1)
1717                 return -EINVAL;
1718
1719         mutex_lock(&mb5_transfer.lock);
1720
1721         while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(5))
1722                 cpu_relax();
1723
1724         writeb(PRCMU_I2C_WRITE(slave), (tcdm_base + PRCM_REQ_MB5_I2C_SLAVE_OP));
1725         writeb(PRCMU_I2C_STOP_EN, (tcdm_base + PRCM_REQ_MB5_I2C_HW_BITS));
1726         writeb(reg, (tcdm_base + PRCM_REQ_MB5_I2C_REG));
1727         writeb(*value, (tcdm_base + PRCM_REQ_MB5_I2C_VAL));
1728
1729         writel(MBOX_BIT(5), PRCM_MBOX_CPU_SET);
1730
1731         if (!wait_for_completion_timeout(&mb5_transfer.work,
1732                                 msecs_to_jiffies(20000))) {
1733                 pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
1734                         __func__);
1735                 r = -EIO;
1736         } else {
1737                 r = ((mb5_transfer.ack.status == I2C_WR_OK) ? 0 : -EIO);
1738         }
1739
1740         mutex_unlock(&mb5_transfer.lock);
1741
1742         return r;
1743 }
1744
1745 /**
1746  * prcmu_ac_wake_req - should be called whenever ARM wants to wakeup Modem
1747  */
1748 void prcmu_ac_wake_req(void)
1749 {
1750         u32 val;
1751         u32 status;
1752
1753         mutex_lock(&mb0_transfer.ac_wake_lock);
1754
1755         val = readl(PRCM_HOSTACCESS_REQ);
1756         if (val & PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ)
1757                 goto unlock_and_return;
1758
1759         atomic_set(&ac_wake_req_state, 1);
1760
1761 retry:
1762         writel((val | PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ), PRCM_HOSTACCESS_REQ);
1763
1764         if (!wait_for_completion_timeout(&mb0_transfer.ac_wake_work,
1765                         msecs_to_jiffies(5000))) {
1766                 pr_crit("prcmu: %s timed out (5 s) waiting for a reply.\n",
1767                         __func__);
1768                 goto unlock_and_return;
1769         }
1770
1771         /*
1772          * The modem can generate an AC_WAKE_ACK, and then still go to sleep.
1773          * As a workaround, we wait, and then check that the modem is indeed
1774          * awake (in terms of the value of the PRCM_MOD_AWAKE_STATUS
1775          * register, which may not be the whole truth).
1776          */
1777         udelay(400);
1778         status = (readl(PRCM_MOD_AWAKE_STATUS) & BITS(0, 2));
1779         if (status != (PRCM_MOD_AWAKE_STATUS_PRCM_MOD_AAPD_AWAKE |
1780                         PRCM_MOD_AWAKE_STATUS_PRCM_MOD_COREPD_AWAKE)) {
1781                 pr_err("prcmu: %s received ack, but modem not awake (0x%X).\n",
1782                         __func__, status);
1783                 udelay(1200);
1784                 writel(val, PRCM_HOSTACCESS_REQ);
1785                 if (wait_for_completion_timeout(&mb0_transfer.ac_wake_work,
1786                                 msecs_to_jiffies(5000)))
1787                         goto retry;
1788                 pr_crit("prcmu: %s timed out (5 s) waiting for AC_SLEEP_ACK.\n",
1789                         __func__);
1790         }
1791
1792 unlock_and_return:
1793         mutex_unlock(&mb0_transfer.ac_wake_lock);
1794 }
1795
1796 /**
1797  * prcmu_ac_sleep_req - called when ARM no longer needs to talk to modem
1798  */
1799 void prcmu_ac_sleep_req()
1800 {
1801         u32 val;
1802
1803         mutex_lock(&mb0_transfer.ac_wake_lock);
1804
1805         val = readl(PRCM_HOSTACCESS_REQ);
1806         if (!(val & PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ))
1807                 goto unlock_and_return;
1808
1809         writel((val & ~PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ),
1810                 PRCM_HOSTACCESS_REQ);
1811
1812         if (!wait_for_completion_timeout(&mb0_transfer.ac_wake_work,
1813                         msecs_to_jiffies(5000))) {
1814                 pr_crit("prcmu: %s timed out (5 s) waiting for a reply.\n",
1815                         __func__);
1816         }
1817
1818         atomic_set(&ac_wake_req_state, 0);
1819
1820 unlock_and_return:
1821         mutex_unlock(&mb0_transfer.ac_wake_lock);
1822 }
1823
1824 bool db8500_prcmu_is_ac_wake_requested(void)
1825 {
1826         return (atomic_read(&ac_wake_req_state) != 0);
1827 }
1828
1829 /**
1830  * db8500_prcmu_system_reset - System reset
1831  *
1832  * Saves the reset reason code and then sets the APE_SOFTRST register which
1833  * fires interrupt to fw
1834  */
1835 void db8500_prcmu_system_reset(u16 reset_code)
1836 {
1837         writew(reset_code, (tcdm_base + PRCM_SW_RST_REASON));
1838         writel(1, PRCM_APE_SOFTRST);
1839 }
1840
1841 /**
1842  * db8500_prcmu_get_reset_code - Retrieve SW reset reason code
1843  *
1844  * Retrieves the reset reason code stored by prcmu_system_reset() before
1845  * last restart.
1846  */
1847 u16 db8500_prcmu_get_reset_code(void)
1848 {
1849         return readw(tcdm_base + PRCM_SW_RST_REASON);
1850 }
1851
1852 /**
1853  * prcmu_reset_modem - ask the PRCMU to reset modem
1854  */
1855 void prcmu_modem_reset(void)
1856 {
1857         mutex_lock(&mb1_transfer.lock);
1858
1859         while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
1860                 cpu_relax();
1861
1862         writeb(MB1H_RESET_MODEM, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
1863         writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET);
1864         wait_for_completion(&mb1_transfer.work);
1865
1866         /*
1867          * No need to check return from PRCMU as modem should go in reset state
1868          * This state is already managed by upper layer
1869          */
1870
1871         mutex_unlock(&mb1_transfer.lock);
1872 }
1873
1874 static void ack_dbb_wakeup(void)
1875 {
1876         unsigned long flags;
1877
1878         spin_lock_irqsave(&mb0_transfer.lock, flags);
1879
1880         while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(0))
1881                 cpu_relax();
1882
1883         writeb(MB0H_READ_WAKEUP_ACK, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB0));
1884         writel(MBOX_BIT(0), PRCM_MBOX_CPU_SET);
1885
1886         spin_unlock_irqrestore(&mb0_transfer.lock, flags);
1887 }
1888
1889 static inline void print_unknown_header_warning(u8 n, u8 header)
1890 {
1891         pr_warning("prcmu: Unknown message header (%d) in mailbox %d.\n",
1892                 header, n);
1893 }
1894
1895 static bool read_mailbox_0(void)
1896 {
1897         bool r;
1898         u32 ev;
1899         unsigned int n;
1900         u8 header;
1901
1902         header = readb(tcdm_base + PRCM_MBOX_HEADER_ACK_MB0);
1903         switch (header) {
1904         case MB0H_WAKEUP_EXE:
1905         case MB0H_WAKEUP_SLEEP:
1906                 if (readb(tcdm_base + PRCM_ACK_MB0_READ_POINTER) & 1)
1907                         ev = readl(tcdm_base + PRCM_ACK_MB0_WAKEUP_1_8500);
1908                 else
1909                         ev = readl(tcdm_base + PRCM_ACK_MB0_WAKEUP_0_8500);
1910
1911                 if (ev & (WAKEUP_BIT_AC_WAKE_ACK | WAKEUP_BIT_AC_SLEEP_ACK))
1912                         complete(&mb0_transfer.ac_wake_work);
1913                 if (ev & WAKEUP_BIT_SYSCLK_OK)
1914                         complete(&mb3_transfer.sysclk_work);
1915
1916                 ev &= mb0_transfer.req.dbb_irqs;
1917
1918                 for (n = 0; n < NUM_PRCMU_WAKEUPS; n++) {
1919                         if (ev & prcmu_irq_bit[n])
1920                                 generic_handle_irq(IRQ_PRCMU_BASE + n);
1921                 }
1922                 r = true;
1923                 break;
1924         default:
1925                 print_unknown_header_warning(0, header);
1926                 r = false;
1927                 break;
1928         }
1929         writel(MBOX_BIT(0), PRCM_ARM_IT1_CLR);
1930         return r;
1931 }
1932
1933 static bool read_mailbox_1(void)
1934 {
1935         mb1_transfer.ack.header = readb(tcdm_base + PRCM_MBOX_HEADER_REQ_MB1);
1936         mb1_transfer.ack.arm_opp = readb(tcdm_base +
1937                 PRCM_ACK_MB1_CURRENT_ARM_OPP);
1938         mb1_transfer.ack.ape_opp = readb(tcdm_base +
1939                 PRCM_ACK_MB1_CURRENT_APE_OPP);
1940         mb1_transfer.ack.ape_voltage_status = readb(tcdm_base +
1941                 PRCM_ACK_MB1_APE_VOLTAGE_STATUS);
1942         writel(MBOX_BIT(1), PRCM_ARM_IT1_CLR);
1943         complete(&mb1_transfer.work);
1944         return false;
1945 }
1946
1947 static bool read_mailbox_2(void)
1948 {
1949         mb2_transfer.ack.status = readb(tcdm_base + PRCM_ACK_MB2_DPS_STATUS);
1950         writel(MBOX_BIT(2), PRCM_ARM_IT1_CLR);
1951         complete(&mb2_transfer.work);
1952         return false;
1953 }
1954
1955 static bool read_mailbox_3(void)
1956 {
1957         writel(MBOX_BIT(3), PRCM_ARM_IT1_CLR);
1958         return false;
1959 }
1960
1961 static bool read_mailbox_4(void)
1962 {
1963         u8 header;
1964         bool do_complete = true;
1965
1966         header = readb(tcdm_base + PRCM_MBOX_HEADER_REQ_MB4);
1967         switch (header) {
1968         case MB4H_MEM_ST:
1969         case MB4H_HOTDOG:
1970         case MB4H_HOTMON:
1971         case MB4H_HOT_PERIOD:
1972         case MB4H_A9WDOG_CONF:
1973         case MB4H_A9WDOG_EN:
1974         case MB4H_A9WDOG_DIS:
1975         case MB4H_A9WDOG_LOAD:
1976         case MB4H_A9WDOG_KICK:
1977                 break;
1978         default:
1979                 print_unknown_header_warning(4, header);
1980                 do_complete = false;
1981                 break;
1982         }
1983
1984         writel(MBOX_BIT(4), PRCM_ARM_IT1_CLR);
1985
1986         if (do_complete)
1987                 complete(&mb4_transfer.work);
1988
1989         return false;
1990 }
1991
1992 static bool read_mailbox_5(void)
1993 {
1994         mb5_transfer.ack.status = readb(tcdm_base + PRCM_ACK_MB5_I2C_STATUS);
1995         mb5_transfer.ack.value = readb(tcdm_base + PRCM_ACK_MB5_I2C_VAL);
1996         writel(MBOX_BIT(5), PRCM_ARM_IT1_CLR);
1997         complete(&mb5_transfer.work);
1998         return false;
1999 }
2000
2001 static bool read_mailbox_6(void)
2002 {
2003         writel(MBOX_BIT(6), PRCM_ARM_IT1_CLR);
2004         return false;
2005 }
2006
2007 static bool read_mailbox_7(void)
2008 {
2009         writel(MBOX_BIT(7), PRCM_ARM_IT1_CLR);
2010         return false;
2011 }
2012
2013 static bool (* const read_mailbox[NUM_MB])(void) = {
2014         read_mailbox_0,
2015         read_mailbox_1,
2016         read_mailbox_2,
2017         read_mailbox_3,
2018         read_mailbox_4,
2019         read_mailbox_5,
2020         read_mailbox_6,
2021         read_mailbox_7
2022 };
2023
2024 static irqreturn_t prcmu_irq_handler(int irq, void *data)
2025 {
2026         u32 bits;
2027         u8 n;
2028         irqreturn_t r;
2029
2030         bits = (readl(PRCM_ARM_IT1_VAL) & ALL_MBOX_BITS);
2031         if (unlikely(!bits))
2032                 return IRQ_NONE;
2033
2034         r = IRQ_HANDLED;
2035         for (n = 0; bits; n++) {
2036                 if (bits & MBOX_BIT(n)) {
2037                         bits -= MBOX_BIT(n);
2038                         if (read_mailbox[n]())
2039                                 r = IRQ_WAKE_THREAD;
2040                 }
2041         }
2042         return r;
2043 }
2044
2045 static irqreturn_t prcmu_irq_thread_fn(int irq, void *data)
2046 {
2047         ack_dbb_wakeup();
2048         return IRQ_HANDLED;
2049 }
2050
2051 static void prcmu_mask_work(struct work_struct *work)
2052 {
2053         unsigned long flags;
2054
2055         spin_lock_irqsave(&mb0_transfer.lock, flags);
2056
2057         config_wakeups();
2058
2059         spin_unlock_irqrestore(&mb0_transfer.lock, flags);
2060 }
2061
2062 static void prcmu_irq_mask(struct irq_data *d)
2063 {
2064         unsigned long flags;
2065
2066         spin_lock_irqsave(&mb0_transfer.dbb_irqs_lock, flags);
2067
2068         mb0_transfer.req.dbb_irqs &= ~prcmu_irq_bit[d->irq - IRQ_PRCMU_BASE];
2069
2070         spin_unlock_irqrestore(&mb0_transfer.dbb_irqs_lock, flags);
2071
2072         if (d->irq != IRQ_PRCMU_CA_SLEEP)
2073                 schedule_work(&mb0_transfer.mask_work);
2074 }
2075
2076 static void prcmu_irq_unmask(struct irq_data *d)
2077 {
2078         unsigned long flags;
2079
2080         spin_lock_irqsave(&mb0_transfer.dbb_irqs_lock, flags);
2081
2082         mb0_transfer.req.dbb_irqs |= prcmu_irq_bit[d->irq - IRQ_PRCMU_BASE];
2083
2084         spin_unlock_irqrestore(&mb0_transfer.dbb_irqs_lock, flags);
2085
2086         if (d->irq != IRQ_PRCMU_CA_SLEEP)
2087                 schedule_work(&mb0_transfer.mask_work);
2088 }
2089
2090 static void noop(struct irq_data *d)
2091 {
2092 }
2093
2094 static struct irq_chip prcmu_irq_chip = {
2095         .name           = "prcmu",
2096         .irq_disable    = prcmu_irq_mask,
2097         .irq_ack        = noop,
2098         .irq_mask       = prcmu_irq_mask,
2099         .irq_unmask     = prcmu_irq_unmask,
2100 };
2101
2102 void __init db8500_prcmu_early_init(void)
2103 {
2104         unsigned int i;
2105
2106         if (cpu_is_u8500v1()) {
2107                 tcdm_base = __io_address(U8500_PRCMU_TCDM_BASE_V1);
2108         } else if (cpu_is_u8500v2()) {
2109                 void *tcpm_base = ioremap_nocache(U8500_PRCMU_TCPM_BASE, SZ_4K);
2110
2111                 if (tcpm_base != NULL) {
2112                         int version;
2113                         version = readl(tcpm_base + PRCMU_FW_VERSION_OFFSET);
2114                         prcmu_version.project_number = version & 0xFF;
2115                         prcmu_version.api_version = (version >> 8) & 0xFF;
2116                         prcmu_version.func_version = (version >> 16) & 0xFF;
2117                         prcmu_version.errata = (version >> 24) & 0xFF;
2118                         pr_info("PRCMU firmware version %d.%d.%d\n",
2119                                 (version >> 8) & 0xFF, (version >> 16) & 0xFF,
2120                                 (version >> 24) & 0xFF);
2121                         iounmap(tcpm_base);
2122                 }
2123
2124                 tcdm_base = __io_address(U8500_PRCMU_TCDM_BASE);
2125         } else {
2126                 pr_err("prcmu: Unsupported chip version\n");
2127                 BUG();
2128         }
2129
2130         spin_lock_init(&mb0_transfer.lock);
2131         spin_lock_init(&mb0_transfer.dbb_irqs_lock);
2132         mutex_init(&mb0_transfer.ac_wake_lock);
2133         init_completion(&mb0_transfer.ac_wake_work);
2134         mutex_init(&mb1_transfer.lock);
2135         init_completion(&mb1_transfer.work);
2136         mutex_init(&mb2_transfer.lock);
2137         init_completion(&mb2_transfer.work);
2138         spin_lock_init(&mb2_transfer.auto_pm_lock);
2139         spin_lock_init(&mb3_transfer.lock);
2140         mutex_init(&mb3_transfer.sysclk_lock);
2141         init_completion(&mb3_transfer.sysclk_work);
2142         mutex_init(&mb4_transfer.lock);
2143         init_completion(&mb4_transfer.work);
2144         mutex_init(&mb5_transfer.lock);
2145         init_completion(&mb5_transfer.work);
2146
2147         INIT_WORK(&mb0_transfer.mask_work, prcmu_mask_work);
2148
2149         /* Initalize irqs. */
2150         for (i = 0; i < NUM_PRCMU_WAKEUPS; i++) {
2151                 unsigned int irq;
2152
2153                 irq = IRQ_PRCMU_BASE + i;
2154                 irq_set_chip_and_handler(irq, &prcmu_irq_chip,
2155                                          handle_simple_irq);
2156                 set_irq_flags(irq, IRQF_VALID);
2157         }
2158 }
2159
2160 static void __init db8500_prcmu_init_clkforce(void)
2161 {
2162         u32 val;
2163
2164         val = readl(PRCM_A9PL_FORCE_CLKEN);
2165         val &= ~(PRCM_A9PL_FORCE_CLKEN_PRCM_A9PL_FORCE_CLKEN |
2166                 PRCM_A9PL_FORCE_CLKEN_PRCM_A9AXI_FORCE_CLKEN);
2167         writel(val, (PRCM_A9PL_FORCE_CLKEN));
2168 }
2169
2170 /*
2171  * Power domain switches (ePODs) modeled as regulators for the DB8500 SoC
2172  */
2173 static struct regulator_consumer_supply db8500_vape_consumers[] = {
2174         REGULATOR_SUPPLY("v-ape", NULL),
2175         REGULATOR_SUPPLY("v-i2c", "nmk-i2c.0"),
2176         REGULATOR_SUPPLY("v-i2c", "nmk-i2c.1"),
2177         REGULATOR_SUPPLY("v-i2c", "nmk-i2c.2"),
2178         REGULATOR_SUPPLY("v-i2c", "nmk-i2c.3"),
2179         /* "v-mmc" changed to "vcore" in the mainline kernel */
2180         REGULATOR_SUPPLY("vcore", "sdi0"),
2181         REGULATOR_SUPPLY("vcore", "sdi1"),
2182         REGULATOR_SUPPLY("vcore", "sdi2"),
2183         REGULATOR_SUPPLY("vcore", "sdi3"),
2184         REGULATOR_SUPPLY("vcore", "sdi4"),
2185         REGULATOR_SUPPLY("v-dma", "dma40.0"),
2186         REGULATOR_SUPPLY("v-ape", "ab8500-usb.0"),
2187         /* "v-uart" changed to "vcore" in the mainline kernel */
2188         REGULATOR_SUPPLY("vcore", "uart0"),
2189         REGULATOR_SUPPLY("vcore", "uart1"),
2190         REGULATOR_SUPPLY("vcore", "uart2"),
2191         REGULATOR_SUPPLY("v-ape", "nmk-ske-keypad.0"),
2192 };
2193
2194 static struct regulator_consumer_supply db8500_vsmps2_consumers[] = {
2195         /* CG2900 and CW1200 power to off-chip peripherals */
2196         REGULATOR_SUPPLY("gbf_1v8", "cg2900-uart.0"),
2197         REGULATOR_SUPPLY("wlan_1v8", "cw1200.0"),
2198         REGULATOR_SUPPLY("musb_1v8", "ab8500-usb.0"),
2199         /* AV8100 regulator */
2200         REGULATOR_SUPPLY("hdmi_1v8", "0-0070"),
2201 };
2202
2203 static struct regulator_consumer_supply db8500_b2r2_mcde_consumers[] = {
2204         REGULATOR_SUPPLY("vsupply", "b2r2.0"),
2205         REGULATOR_SUPPLY("vsupply", "mcde"),
2206 };
2207
2208 /* SVA MMDSP regulator switch */
2209 static struct regulator_consumer_supply db8500_svammdsp_consumers[] = {
2210         REGULATOR_SUPPLY("sva-mmdsp", "cm_control"),
2211 };
2212
2213 /* SVA pipe regulator switch */
2214 static struct regulator_consumer_supply db8500_svapipe_consumers[] = {
2215         REGULATOR_SUPPLY("sva-pipe", "cm_control"),
2216 };
2217
2218 /* SIA MMDSP regulator switch */
2219 static struct regulator_consumer_supply db8500_siammdsp_consumers[] = {
2220         REGULATOR_SUPPLY("sia-mmdsp", "cm_control"),
2221 };
2222
2223 /* SIA pipe regulator switch */
2224 static struct regulator_consumer_supply db8500_siapipe_consumers[] = {
2225         REGULATOR_SUPPLY("sia-pipe", "cm_control"),
2226 };
2227
2228 static struct regulator_consumer_supply db8500_sga_consumers[] = {
2229         REGULATOR_SUPPLY("v-mali", NULL),
2230 };
2231
2232 /* ESRAM1 and 2 regulator switch */
2233 static struct regulator_consumer_supply db8500_esram12_consumers[] = {
2234         REGULATOR_SUPPLY("esram12", "cm_control"),
2235 };
2236
2237 /* ESRAM3 and 4 regulator switch */
2238 static struct regulator_consumer_supply db8500_esram34_consumers[] = {
2239         REGULATOR_SUPPLY("v-esram34", "mcde"),
2240         REGULATOR_SUPPLY("esram34", "cm_control"),
2241 };
2242
2243 static struct regulator_init_data db8500_regulators[DB8500_NUM_REGULATORS] = {
2244         [DB8500_REGULATOR_VAPE] = {
2245                 .constraints = {
2246                         .name = "db8500-vape",
2247                         .valid_ops_mask = REGULATOR_CHANGE_STATUS,
2248                 },
2249                 .consumer_supplies = db8500_vape_consumers,
2250                 .num_consumer_supplies = ARRAY_SIZE(db8500_vape_consumers),
2251         },
2252         [DB8500_REGULATOR_VARM] = {
2253                 .constraints = {
2254                         .name = "db8500-varm",
2255                         .valid_ops_mask = REGULATOR_CHANGE_STATUS,
2256                 },
2257         },
2258         [DB8500_REGULATOR_VMODEM] = {
2259                 .constraints = {
2260                         .name = "db8500-vmodem",
2261                         .valid_ops_mask = REGULATOR_CHANGE_STATUS,
2262                 },
2263         },
2264         [DB8500_REGULATOR_VPLL] = {
2265                 .constraints = {
2266                         .name = "db8500-vpll",
2267                         .valid_ops_mask = REGULATOR_CHANGE_STATUS,
2268                 },
2269         },
2270         [DB8500_REGULATOR_VSMPS1] = {
2271                 .constraints = {
2272                         .name = "db8500-vsmps1",
2273                         .valid_ops_mask = REGULATOR_CHANGE_STATUS,
2274                 },
2275         },
2276         [DB8500_REGULATOR_VSMPS2] = {
2277                 .constraints = {
2278                         .name = "db8500-vsmps2",
2279                         .valid_ops_mask = REGULATOR_CHANGE_STATUS,
2280                 },
2281                 .consumer_supplies = db8500_vsmps2_consumers,
2282                 .num_consumer_supplies = ARRAY_SIZE(db8500_vsmps2_consumers),
2283         },
2284         [DB8500_REGULATOR_VSMPS3] = {
2285                 .constraints = {
2286                         .name = "db8500-vsmps3",
2287                         .valid_ops_mask = REGULATOR_CHANGE_STATUS,
2288                 },
2289         },
2290         [DB8500_REGULATOR_VRF1] = {
2291                 .constraints = {
2292                         .name = "db8500-vrf1",
2293                         .valid_ops_mask = REGULATOR_CHANGE_STATUS,
2294                 },
2295         },
2296         [DB8500_REGULATOR_SWITCH_SVAMMDSP] = {
2297                 .supply_regulator = "db8500-vape",
2298                 .constraints = {
2299                         .name = "db8500-sva-mmdsp",
2300                         .valid_ops_mask = REGULATOR_CHANGE_STATUS,
2301                 },
2302                 .consumer_supplies = db8500_svammdsp_consumers,
2303                 .num_consumer_supplies = ARRAY_SIZE(db8500_svammdsp_consumers),
2304         },
2305         [DB8500_REGULATOR_SWITCH_SVAMMDSPRET] = {
2306                 .constraints = {
2307                         /* "ret" means "retention" */
2308                         .name = "db8500-sva-mmdsp-ret",
2309                         .valid_ops_mask = REGULATOR_CHANGE_STATUS,
2310                 },
2311         },
2312         [DB8500_REGULATOR_SWITCH_SVAPIPE] = {
2313                 .supply_regulator = "db8500-vape",
2314                 .constraints = {
2315                         .name = "db8500-sva-pipe",
2316                         .valid_ops_mask = REGULATOR_CHANGE_STATUS,
2317                 },
2318                 .consumer_supplies = db8500_svapipe_consumers,
2319                 .num_consumer_supplies = ARRAY_SIZE(db8500_svapipe_consumers),
2320         },
2321         [DB8500_REGULATOR_SWITCH_SIAMMDSP] = {
2322                 .supply_regulator = "db8500-vape",
2323                 .constraints = {
2324                         .name = "db8500-sia-mmdsp",
2325                         .valid_ops_mask = REGULATOR_CHANGE_STATUS,
2326                 },
2327                 .consumer_supplies = db8500_siammdsp_consumers,
2328                 .num_consumer_supplies = ARRAY_SIZE(db8500_siammdsp_consumers),
2329         },
2330         [DB8500_REGULATOR_SWITCH_SIAMMDSPRET] = {
2331                 .constraints = {
2332                         .name = "db8500-sia-mmdsp-ret",
2333                         .valid_ops_mask = REGULATOR_CHANGE_STATUS,
2334                 },
2335         },
2336         [DB8500_REGULATOR_SWITCH_SIAPIPE] = {
2337                 .supply_regulator = "db8500-vape",
2338                 .constraints = {
2339                         .name = "db8500-sia-pipe",
2340                         .valid_ops_mask = REGULATOR_CHANGE_STATUS,
2341                 },
2342                 .consumer_supplies = db8500_siapipe_consumers,
2343                 .num_consumer_supplies = ARRAY_SIZE(db8500_siapipe_consumers),
2344         },
2345         [DB8500_REGULATOR_SWITCH_SGA] = {
2346                 .supply_regulator = "db8500-vape",
2347                 .constraints = {
2348                         .name = "db8500-sga",
2349                         .valid_ops_mask = REGULATOR_CHANGE_STATUS,
2350                 },
2351                 .consumer_supplies = db8500_sga_consumers,
2352                 .num_consumer_supplies = ARRAY_SIZE(db8500_sga_consumers),
2353
2354         },
2355         [DB8500_REGULATOR_SWITCH_B2R2_MCDE] = {
2356                 .supply_regulator = "db8500-vape",
2357                 .constraints = {
2358                         .name = "db8500-b2r2-mcde",
2359                         .valid_ops_mask = REGULATOR_CHANGE_STATUS,
2360                 },
2361                 .consumer_supplies = db8500_b2r2_mcde_consumers,
2362                 .num_consumer_supplies = ARRAY_SIZE(db8500_b2r2_mcde_consumers),
2363         },
2364         [DB8500_REGULATOR_SWITCH_ESRAM12] = {
2365                 .supply_regulator = "db8500-vape",
2366                 .constraints = {
2367                         .name = "db8500-esram12",
2368                         .valid_ops_mask = REGULATOR_CHANGE_STATUS,
2369                 },
2370                 .consumer_supplies = db8500_esram12_consumers,
2371                 .num_consumer_supplies = ARRAY_SIZE(db8500_esram12_consumers),
2372         },
2373         [DB8500_REGULATOR_SWITCH_ESRAM12RET] = {
2374                 .constraints = {
2375                         .name = "db8500-esram12-ret",
2376                         .valid_ops_mask = REGULATOR_CHANGE_STATUS,
2377                 },
2378         },
2379         [DB8500_REGULATOR_SWITCH_ESRAM34] = {
2380                 .supply_regulator = "db8500-vape",
2381                 .constraints = {
2382                         .name = "db8500-esram34",
2383                         .valid_ops_mask = REGULATOR_CHANGE_STATUS,
2384                 },
2385                 .consumer_supplies = db8500_esram34_consumers,
2386                 .num_consumer_supplies = ARRAY_SIZE(db8500_esram34_consumers),
2387         },
2388         [DB8500_REGULATOR_SWITCH_ESRAM34RET] = {
2389                 .constraints = {
2390                         .name = "db8500-esram34-ret",
2391                         .valid_ops_mask = REGULATOR_CHANGE_STATUS,
2392                 },
2393         },
2394 };
2395
2396 static struct mfd_cell db8500_prcmu_devs[] = {
2397         {
2398                 .name = "db8500-prcmu-regulators",
2399                 .platform_data = &db8500_regulators,
2400                 .pdata_size = sizeof(db8500_regulators),
2401         },
2402         {
2403                 .name = "cpufreq-u8500",
2404         },
2405 };
2406
2407 /**
2408  * prcmu_fw_init - arch init call for the Linux PRCMU fw init logic
2409  *
2410  */
2411 static int __init db8500_prcmu_probe(struct platform_device *pdev)
2412 {
2413         int err = 0;
2414
2415         if (ux500_is_svp())
2416                 return -ENODEV;
2417
2418         db8500_prcmu_init_clkforce();
2419
2420         /* Clean up the mailbox interrupts after pre-kernel code. */
2421         writel(ALL_MBOX_BITS, PRCM_ARM_IT1_CLR);
2422
2423         err = request_threaded_irq(IRQ_DB8500_PRCMU1, prcmu_irq_handler,
2424                 prcmu_irq_thread_fn, IRQF_NO_SUSPEND, "prcmu", NULL);
2425         if (err < 0) {
2426                 pr_err("prcmu: Failed to allocate IRQ_DB8500_PRCMU1.\n");
2427                 err = -EBUSY;
2428                 goto no_irq_return;
2429         }
2430
2431         if (cpu_is_u8500v20_or_later())
2432                 prcmu_config_esram0_deep_sleep(ESRAM0_DEEP_SLEEP_STATE_RET);
2433
2434         err = mfd_add_devices(&pdev->dev, 0, db8500_prcmu_devs,
2435                               ARRAY_SIZE(db8500_prcmu_devs), NULL,
2436                               0);
2437
2438         if (err)
2439                 pr_err("prcmu: Failed to add subdevices\n");
2440         else
2441                 pr_info("DB8500 PRCMU initialized\n");
2442
2443 no_irq_return:
2444         return err;
2445 }
2446
2447 static struct platform_driver db8500_prcmu_driver = {
2448         .driver = {
2449                 .name = "db8500-prcmu",
2450                 .owner = THIS_MODULE,
2451         },
2452 };
2453
2454 static int __init db8500_prcmu_init(void)
2455 {
2456         return platform_driver_probe(&db8500_prcmu_driver, db8500_prcmu_probe);
2457 }
2458
2459 arch_initcall(db8500_prcmu_init);
2460
2461 MODULE_AUTHOR("Mattias Nilsson <mattias.i.nilsson@stericsson.com>");
2462 MODULE_DESCRIPTION("DB8500 PRCM Unit driver");
2463 MODULE_LICENSE("GPL v2");