pandora: reserve CMA area for c64_tools
[pandora-kernel.git] / drivers / cpufreq / exynos4210-cpufreq.c
1 /*
2  * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
3  *              http://www.samsung.com
4  *
5  * EXYNOS4 - CPU frequency scaling support
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10 */
11
12 #include <linux/types.h>
13 #include <linux/kernel.h>
14 #include <linux/err.h>
15 #include <linux/clk.h>
16 #include <linux/io.h>
17 #include <linux/slab.h>
18 #include <linux/regulator/consumer.h>
19 #include <linux/cpufreq.h>
20 #include <linux/notifier.h>
21 #include <linux/suspend.h>
22
23 #include <mach/map.h>
24 #include <mach/regs-clock.h>
25 #include <mach/regs-mem.h>
26
27 #include <plat/clock.h>
28 #include <plat/pm.h>
29
30 static struct clk *cpu_clk;
31 static struct clk *moutcore;
32 static struct clk *mout_mpll;
33 static struct clk *mout_apll;
34
35 static struct regulator *arm_regulator;
36 static struct regulator *int_regulator;
37
38 static struct cpufreq_freqs freqs;
39 static unsigned int memtype;
40
41 static unsigned int locking_frequency;
42 static bool frequency_locked;
43 static DEFINE_MUTEX(cpufreq_lock);
44
45 enum exynos4_memory_type {
46         DDR2 = 4,
47         LPDDR2,
48         DDR3,
49 };
50
51 enum cpufreq_level_index {
52         L0, L1, L2, L3, CPUFREQ_LEVEL_END,
53 };
54
55 static struct cpufreq_frequency_table exynos4_freq_table[] = {
56         {L0, 1000*1000},
57         {L1, 800*1000},
58         {L2, 400*1000},
59         {L3, 100*1000},
60         {0, CPUFREQ_TABLE_END},
61 };
62
63 static unsigned int clkdiv_cpu0[CPUFREQ_LEVEL_END][7] = {
64         /*
65          * Clock divider value for following
66          * { DIVCORE, DIVCOREM0, DIVCOREM1, DIVPERIPH,
67          *              DIVATB, DIVPCLK_DBG, DIVAPLL }
68          */
69
70         /* ARM L0: 1000MHz */
71         { 0, 3, 7, 3, 3, 0, 1 },
72
73         /* ARM L1: 800MHz */
74         { 0, 3, 7, 3, 3, 0, 1 },
75
76         /* ARM L2: 400MHz */
77         { 0, 1, 3, 1, 3, 0, 1 },
78
79         /* ARM L3: 100MHz */
80         { 0, 0, 1, 0, 3, 1, 1 },
81 };
82
83 static unsigned int clkdiv_cpu1[CPUFREQ_LEVEL_END][2] = {
84         /*
85          * Clock divider value for following
86          * { DIVCOPY, DIVHPM }
87          */
88
89          /* ARM L0: 1000MHz */
90         { 3, 0 },
91
92         /* ARM L1: 800MHz */
93         { 3, 0 },
94
95         /* ARM L2: 400MHz */
96         { 3, 0 },
97
98         /* ARM L3: 100MHz */
99         { 3, 0 },
100 };
101
102 static unsigned int clkdiv_dmc0[CPUFREQ_LEVEL_END][8] = {
103         /*
104          * Clock divider value for following
105          * { DIVACP, DIVACP_PCLK, DIVDPHY, DIVDMC, DIVDMCD
106          *              DIVDMCP, DIVCOPY2, DIVCORE_TIMERS }
107          */
108
109         /* DMC L0: 400MHz */
110         { 3, 1, 1, 1, 1, 1, 3, 1 },
111
112         /* DMC L1: 400MHz */
113         { 3, 1, 1, 1, 1, 1, 3, 1 },
114
115         /* DMC L2: 266.7MHz */
116         { 7, 1, 1, 2, 1, 1, 3, 1 },
117
118         /* DMC L3: 200MHz */
119         { 7, 1, 1, 3, 1, 1, 3, 1 },
120 };
121
122 static unsigned int clkdiv_top[CPUFREQ_LEVEL_END][5] = {
123         /*
124          * Clock divider value for following
125          * { DIVACLK200, DIVACLK100, DIVACLK160, DIVACLK133, DIVONENAND }
126          */
127
128         /* ACLK200 L0: 200MHz */
129         { 3, 7, 4, 5, 1 },
130
131         /* ACLK200 L1: 200MHz */
132         { 3, 7, 4, 5, 1 },
133
134         /* ACLK200 L2: 160MHz */
135         { 4, 7, 5, 7, 1 },
136
137         /* ACLK200 L3: 133.3MHz */
138         { 5, 7, 7, 7, 1 },
139 };
140
141 static unsigned int clkdiv_lr_bus[CPUFREQ_LEVEL_END][2] = {
142         /*
143          * Clock divider value for following
144          * { DIVGDL/R, DIVGPL/R }
145          */
146
147         /* ACLK_GDL/R L0: 200MHz */
148         { 3, 1 },
149
150         /* ACLK_GDL/R L1: 200MHz */
151         { 3, 1 },
152
153         /* ACLK_GDL/R L2: 160MHz */
154         { 4, 1 },
155
156         /* ACLK_GDL/R L3: 133.3MHz */
157         { 5, 1 },
158 };
159
160 struct cpufreq_voltage_table {
161         unsigned int    index;          /* any */
162         unsigned int    arm_volt;       /* uV */
163         unsigned int    int_volt;
164 };
165
166 static struct cpufreq_voltage_table exynos4_volt_table[CPUFREQ_LEVEL_END] = {
167         {
168                 .index          = L0,
169                 .arm_volt       = 1200000,
170                 .int_volt       = 1100000,
171         }, {
172                 .index          = L1,
173                 .arm_volt       = 1100000,
174                 .int_volt       = 1100000,
175         }, {
176                 .index          = L2,
177                 .arm_volt       = 1000000,
178                 .int_volt       = 1000000,
179         }, {
180                 .index          = L3,
181                 .arm_volt       = 900000,
182                 .int_volt       = 1000000,
183         },
184 };
185
186 static unsigned int exynos4_apll_pms_table[CPUFREQ_LEVEL_END] = {
187         /* APLL FOUT L0: 1000MHz */
188         ((250 << 16) | (6 << 8) | 1),
189
190         /* APLL FOUT L1: 800MHz */
191         ((200 << 16) | (6 << 8) | 1),
192
193         /* APLL FOUT L2 : 400MHz */
194         ((200 << 16) | (6 << 8) | 2),
195
196         /* APLL FOUT L3: 100MHz */
197         ((200 << 16) | (6 << 8) | 4),
198 };
199
200 static int exynos4_verify_speed(struct cpufreq_policy *policy)
201 {
202         return cpufreq_frequency_table_verify(policy, exynos4_freq_table);
203 }
204
205 static unsigned int exynos4_getspeed(unsigned int cpu)
206 {
207         return clk_get_rate(cpu_clk) / 1000;
208 }
209
210 static void exynos4_set_clkdiv(unsigned int div_index)
211 {
212         unsigned int tmp;
213
214         /* Change Divider - CPU0 */
215
216         tmp = __raw_readl(S5P_CLKDIV_CPU);
217
218         tmp &= ~(S5P_CLKDIV_CPU0_CORE_MASK | S5P_CLKDIV_CPU0_COREM0_MASK |
219                 S5P_CLKDIV_CPU0_COREM1_MASK | S5P_CLKDIV_CPU0_PERIPH_MASK |
220                 S5P_CLKDIV_CPU0_ATB_MASK | S5P_CLKDIV_CPU0_PCLKDBG_MASK |
221                 S5P_CLKDIV_CPU0_APLL_MASK);
222
223         tmp |= ((clkdiv_cpu0[div_index][0] << S5P_CLKDIV_CPU0_CORE_SHIFT) |
224                 (clkdiv_cpu0[div_index][1] << S5P_CLKDIV_CPU0_COREM0_SHIFT) |
225                 (clkdiv_cpu0[div_index][2] << S5P_CLKDIV_CPU0_COREM1_SHIFT) |
226                 (clkdiv_cpu0[div_index][3] << S5P_CLKDIV_CPU0_PERIPH_SHIFT) |
227                 (clkdiv_cpu0[div_index][4] << S5P_CLKDIV_CPU0_ATB_SHIFT) |
228                 (clkdiv_cpu0[div_index][5] << S5P_CLKDIV_CPU0_PCLKDBG_SHIFT) |
229                 (clkdiv_cpu0[div_index][6] << S5P_CLKDIV_CPU0_APLL_SHIFT));
230
231         __raw_writel(tmp, S5P_CLKDIV_CPU);
232
233         do {
234                 tmp = __raw_readl(S5P_CLKDIV_STATCPU);
235         } while (tmp & 0x1111111);
236
237         /* Change Divider - CPU1 */
238
239         tmp = __raw_readl(S5P_CLKDIV_CPU1);
240
241         tmp &= ~((0x7 << 4) | 0x7);
242
243         tmp |= ((clkdiv_cpu1[div_index][0] << 4) |
244                 (clkdiv_cpu1[div_index][1] << 0));
245
246         __raw_writel(tmp, S5P_CLKDIV_CPU1);
247
248         do {
249                 tmp = __raw_readl(S5P_CLKDIV_STATCPU1);
250         } while (tmp & 0x11);
251
252         /* Change Divider - DMC0 */
253
254         tmp = __raw_readl(S5P_CLKDIV_DMC0);
255
256         tmp &= ~(S5P_CLKDIV_DMC0_ACP_MASK | S5P_CLKDIV_DMC0_ACPPCLK_MASK |
257                 S5P_CLKDIV_DMC0_DPHY_MASK | S5P_CLKDIV_DMC0_DMC_MASK |
258                 S5P_CLKDIV_DMC0_DMCD_MASK | S5P_CLKDIV_DMC0_DMCP_MASK |
259                 S5P_CLKDIV_DMC0_COPY2_MASK | S5P_CLKDIV_DMC0_CORETI_MASK);
260
261         tmp |= ((clkdiv_dmc0[div_index][0] << S5P_CLKDIV_DMC0_ACP_SHIFT) |
262                 (clkdiv_dmc0[div_index][1] << S5P_CLKDIV_DMC0_ACPPCLK_SHIFT) |
263                 (clkdiv_dmc0[div_index][2] << S5P_CLKDIV_DMC0_DPHY_SHIFT) |
264                 (clkdiv_dmc0[div_index][3] << S5P_CLKDIV_DMC0_DMC_SHIFT) |
265                 (clkdiv_dmc0[div_index][4] << S5P_CLKDIV_DMC0_DMCD_SHIFT) |
266                 (clkdiv_dmc0[div_index][5] << S5P_CLKDIV_DMC0_DMCP_SHIFT) |
267                 (clkdiv_dmc0[div_index][6] << S5P_CLKDIV_DMC0_COPY2_SHIFT) |
268                 (clkdiv_dmc0[div_index][7] << S5P_CLKDIV_DMC0_CORETI_SHIFT));
269
270         __raw_writel(tmp, S5P_CLKDIV_DMC0);
271
272         do {
273                 tmp = __raw_readl(S5P_CLKDIV_STAT_DMC0);
274         } while (tmp & 0x11111111);
275
276         /* Change Divider - TOP */
277
278         tmp = __raw_readl(S5P_CLKDIV_TOP);
279
280         tmp &= ~(S5P_CLKDIV_TOP_ACLK200_MASK | S5P_CLKDIV_TOP_ACLK100_MASK |
281                 S5P_CLKDIV_TOP_ACLK160_MASK | S5P_CLKDIV_TOP_ACLK133_MASK |
282                 S5P_CLKDIV_TOP_ONENAND_MASK);
283
284         tmp |= ((clkdiv_top[div_index][0] << S5P_CLKDIV_TOP_ACLK200_SHIFT) |
285                 (clkdiv_top[div_index][1] << S5P_CLKDIV_TOP_ACLK100_SHIFT) |
286                 (clkdiv_top[div_index][2] << S5P_CLKDIV_TOP_ACLK160_SHIFT) |
287                 (clkdiv_top[div_index][3] << S5P_CLKDIV_TOP_ACLK133_SHIFT) |
288                 (clkdiv_top[div_index][4] << S5P_CLKDIV_TOP_ONENAND_SHIFT));
289
290         __raw_writel(tmp, S5P_CLKDIV_TOP);
291
292         do {
293                 tmp = __raw_readl(S5P_CLKDIV_STAT_TOP);
294         } while (tmp & 0x11111);
295
296         /* Change Divider - LEFTBUS */
297
298         tmp = __raw_readl(S5P_CLKDIV_LEFTBUS);
299
300         tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
301
302         tmp |= ((clkdiv_lr_bus[div_index][0] << S5P_CLKDIV_BUS_GDLR_SHIFT) |
303                 (clkdiv_lr_bus[div_index][1] << S5P_CLKDIV_BUS_GPLR_SHIFT));
304
305         __raw_writel(tmp, S5P_CLKDIV_LEFTBUS);
306
307         do {
308                 tmp = __raw_readl(S5P_CLKDIV_STAT_LEFTBUS);
309         } while (tmp & 0x11);
310
311         /* Change Divider - RIGHTBUS */
312
313         tmp = __raw_readl(S5P_CLKDIV_RIGHTBUS);
314
315         tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
316
317         tmp |= ((clkdiv_lr_bus[div_index][0] << S5P_CLKDIV_BUS_GDLR_SHIFT) |
318                 (clkdiv_lr_bus[div_index][1] << S5P_CLKDIV_BUS_GPLR_SHIFT));
319
320         __raw_writel(tmp, S5P_CLKDIV_RIGHTBUS);
321
322         do {
323                 tmp = __raw_readl(S5P_CLKDIV_STAT_RIGHTBUS);
324         } while (tmp & 0x11);
325 }
326
327 static void exynos4_set_apll(unsigned int index)
328 {
329         unsigned int tmp;
330
331         /* 1. MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
332         clk_set_parent(moutcore, mout_mpll);
333
334         do {
335                 tmp = (__raw_readl(S5P_CLKMUX_STATCPU)
336                         >> S5P_CLKSRC_CPU_MUXCORE_SHIFT);
337                 tmp &= 0x7;
338         } while (tmp != 0x2);
339
340         /* 2. Set APLL Lock time */
341         __raw_writel(S5P_APLL_LOCKTIME, S5P_APLL_LOCK);
342
343         /* 3. Change PLL PMS values */
344         tmp = __raw_readl(S5P_APLL_CON0);
345         tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0));
346         tmp |= exynos4_apll_pms_table[index];
347         __raw_writel(tmp, S5P_APLL_CON0);
348
349         /* 4. wait_lock_time */
350         do {
351                 tmp = __raw_readl(S5P_APLL_CON0);
352         } while (!(tmp & (0x1 << S5P_APLLCON0_LOCKED_SHIFT)));
353
354         /* 5. MUX_CORE_SEL = APLL */
355         clk_set_parent(moutcore, mout_apll);
356
357         do {
358                 tmp = __raw_readl(S5P_CLKMUX_STATCPU);
359                 tmp &= S5P_CLKMUX_STATCPU_MUXCORE_MASK;
360         } while (tmp != (0x1 << S5P_CLKSRC_CPU_MUXCORE_SHIFT));
361 }
362
363 static void exynos4_set_frequency(unsigned int old_index, unsigned int new_index)
364 {
365         unsigned int tmp;
366
367         if (old_index > new_index) {
368                 /* The frequency changing to L0 needs to change apll */
369                 if (freqs.new == exynos4_freq_table[L0].frequency) {
370                         /* 1. Change the system clock divider values */
371                         exynos4_set_clkdiv(new_index);
372
373                         /* 2. Change the apll m,p,s value */
374                         exynos4_set_apll(new_index);
375                 } else {
376                         /* 1. Change the system clock divider values */
377                         exynos4_set_clkdiv(new_index);
378
379                         /* 2. Change just s value in apll m,p,s value */
380                         tmp = __raw_readl(S5P_APLL_CON0);
381                         tmp &= ~(0x7 << 0);
382                         tmp |= (exynos4_apll_pms_table[new_index] & 0x7);
383                         __raw_writel(tmp, S5P_APLL_CON0);
384                 }
385         }
386
387         else if (old_index < new_index) {
388                 /* The frequency changing from L0 needs to change apll */
389                 if (freqs.old == exynos4_freq_table[L0].frequency) {
390                         /* 1. Change the apll m,p,s value */
391                         exynos4_set_apll(new_index);
392
393                         /* 2. Change the system clock divider values */
394                         exynos4_set_clkdiv(new_index);
395                 } else {
396                         /* 1. Change just s value in apll m,p,s value */
397                         tmp = __raw_readl(S5P_APLL_CON0);
398                         tmp &= ~(0x7 << 0);
399                         tmp |= (exynos4_apll_pms_table[new_index] & 0x7);
400                         __raw_writel(tmp, S5P_APLL_CON0);
401
402                         /* 2. Change the system clock divider values */
403                         exynos4_set_clkdiv(new_index);
404                 }
405         }
406 }
407
408 static int exynos4_target(struct cpufreq_policy *policy,
409                           unsigned int target_freq,
410                           unsigned int relation)
411 {
412         unsigned int index, old_index;
413         unsigned int arm_volt, int_volt;
414         int err = -EINVAL;
415
416         freqs.old = exynos4_getspeed(policy->cpu);
417
418         mutex_lock(&cpufreq_lock);
419
420         if (frequency_locked && target_freq != locking_frequency) {
421                 err = -EAGAIN;
422                 goto out;
423         }
424
425         if (cpufreq_frequency_table_target(policy, exynos4_freq_table,
426                                            freqs.old, relation, &old_index))
427                 goto out;
428
429         if (cpufreq_frequency_table_target(policy, exynos4_freq_table,
430                                            target_freq, relation, &index))
431                 goto out;
432
433         err = 0;
434
435         freqs.new = exynos4_freq_table[index].frequency;
436         freqs.cpu = policy->cpu;
437
438         if (freqs.new == freqs.old)
439                 goto out;
440
441         /* get the voltage value */
442         arm_volt = exynos4_volt_table[index].arm_volt;
443         int_volt = exynos4_volt_table[index].int_volt;
444
445         cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
446
447         /* control regulator */
448         if (freqs.new > freqs.old) {
449                 /* Voltage up */
450                 regulator_set_voltage(arm_regulator, arm_volt, arm_volt);
451                 regulator_set_voltage(int_regulator, int_volt, int_volt);
452         }
453
454         /* Clock Configuration Procedure */
455         exynos4_set_frequency(old_index, index);
456
457         /* control regulator */
458         if (freqs.new < freqs.old) {
459                 /* Voltage down */
460                 regulator_set_voltage(arm_regulator, arm_volt, arm_volt);
461                 regulator_set_voltage(int_regulator, int_volt, int_volt);
462         }
463
464         cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
465
466 out:
467         mutex_unlock(&cpufreq_lock);
468         return err;
469 }
470
471 #ifdef CONFIG_PM
472 /*
473  * These suspend/resume are used as syscore_ops, it is already too
474  * late to set regulator voltages at this stage.
475  */
476 static int exynos4_cpufreq_suspend(struct cpufreq_policy *policy)
477 {
478         return 0;
479 }
480
481 static int exynos4_cpufreq_resume(struct cpufreq_policy *policy)
482 {
483         return 0;
484 }
485 #endif
486
487 /**
488  * exynos4_cpufreq_pm_notifier - block CPUFREQ's activities in suspend-resume
489  *                      context
490  * @notifier
491  * @pm_event
492  * @v
493  *
494  * While frequency_locked == true, target() ignores every frequency but
495  * locking_frequency. The locking_frequency value is the initial frequency,
496  * which is set by the bootloader. In order to eliminate possible
497  * inconsistency in clock values, we save and restore frequencies during
498  * suspend and resume and block CPUFREQ activities. Note that the standard
499  * suspend/resume cannot be used as they are too deep (syscore_ops) for
500  * regulator actions.
501  */
502 static int exynos4_cpufreq_pm_notifier(struct notifier_block *notifier,
503                                        unsigned long pm_event, void *v)
504 {
505         struct cpufreq_policy *policy = cpufreq_cpu_get(0); /* boot CPU */
506         static unsigned int saved_frequency;
507         unsigned int temp;
508
509         mutex_lock(&cpufreq_lock);
510         switch (pm_event) {
511         case PM_SUSPEND_PREPARE:
512                 if (frequency_locked)
513                         goto out;
514                 frequency_locked = true;
515
516                 if (locking_frequency) {
517                         saved_frequency = exynos4_getspeed(0);
518
519                         mutex_unlock(&cpufreq_lock);
520                         exynos4_target(policy, locking_frequency,
521                                        CPUFREQ_RELATION_H);
522                         mutex_lock(&cpufreq_lock);
523                 }
524
525                 break;
526         case PM_POST_SUSPEND:
527
528                 if (saved_frequency) {
529                         /*
530                          * While frequency_locked, only locking_frequency
531                          * is valid for target(). In order to use
532                          * saved_frequency while keeping frequency_locked,
533                          * we temporarly overwrite locking_frequency.
534                          */
535                         temp = locking_frequency;
536                         locking_frequency = saved_frequency;
537
538                         mutex_unlock(&cpufreq_lock);
539                         exynos4_target(policy, locking_frequency,
540                                        CPUFREQ_RELATION_H);
541                         mutex_lock(&cpufreq_lock);
542
543                         locking_frequency = temp;
544                 }
545
546                 frequency_locked = false;
547                 break;
548         }
549 out:
550         mutex_unlock(&cpufreq_lock);
551
552         return NOTIFY_OK;
553 }
554
555 static struct notifier_block exynos4_cpufreq_nb = {
556         .notifier_call = exynos4_cpufreq_pm_notifier,
557 };
558
559 static int exynos4_cpufreq_cpu_init(struct cpufreq_policy *policy)
560 {
561         int ret;
562
563         policy->cur = policy->min = policy->max = exynos4_getspeed(policy->cpu);
564
565         cpufreq_frequency_table_get_attr(exynos4_freq_table, policy->cpu);
566
567         /* set the transition latency value */
568         policy->cpuinfo.transition_latency = 100000;
569
570         /*
571          * EXYNOS4 multi-core processors has 2 cores
572          * that the frequency cannot be set independently.
573          * Each cpu is bound to the same speed.
574          * So the affected cpu is all of the cpus.
575          */
576         cpumask_setall(policy->cpus);
577
578         ret = cpufreq_frequency_table_cpuinfo(policy, exynos4_freq_table);
579         if (ret)
580                 return ret;
581
582         cpufreq_frequency_table_get_attr(exynos4_freq_table, policy->cpu);
583
584         return 0;
585 }
586
587 static int exynos4_cpufreq_cpu_exit(struct cpufreq_policy *policy)
588 {
589         cpufreq_frequency_table_put_attr(policy->cpu);
590         return 0;
591 }
592
593 static struct freq_attr *exynos4_cpufreq_attr[] = {
594         &cpufreq_freq_attr_scaling_available_freqs,
595         NULL,
596 };
597
598 static struct cpufreq_driver exynos4_driver = {
599         .flags          = CPUFREQ_STICKY,
600         .verify         = exynos4_verify_speed,
601         .target         = exynos4_target,
602         .get            = exynos4_getspeed,
603         .init           = exynos4_cpufreq_cpu_init,
604         .exit           = exynos4_cpufreq_cpu_exit,
605         .name           = "exynos4_cpufreq",
606         .attr           = exynos4_cpufreq_attr,
607 #ifdef CONFIG_PM
608         .suspend        = exynos4_cpufreq_suspend,
609         .resume         = exynos4_cpufreq_resume,
610 #endif
611 };
612
613 static int __init exynos4_cpufreq_init(void)
614 {
615         cpu_clk = clk_get(NULL, "armclk");
616         if (IS_ERR(cpu_clk))
617                 return PTR_ERR(cpu_clk);
618
619         locking_frequency = exynos4_getspeed(0);
620
621         moutcore = clk_get(NULL, "moutcore");
622         if (IS_ERR(moutcore))
623                 goto out;
624
625         mout_mpll = clk_get(NULL, "mout_mpll");
626         if (IS_ERR(mout_mpll))
627                 goto out;
628
629         mout_apll = clk_get(NULL, "mout_apll");
630         if (IS_ERR(mout_apll))
631                 goto out;
632
633         arm_regulator = regulator_get(NULL, "vdd_arm");
634         if (IS_ERR(arm_regulator)) {
635                 printk(KERN_ERR "failed to get resource %s\n", "vdd_arm");
636                 goto out;
637         }
638
639         int_regulator = regulator_get(NULL, "vdd_int");
640         if (IS_ERR(int_regulator)) {
641                 printk(KERN_ERR "failed to get resource %s\n", "vdd_int");
642                 goto out;
643         }
644
645         /*
646          * Check DRAM type.
647          * Because DVFS level is different according to DRAM type.
648          */
649         memtype = __raw_readl(S5P_VA_DMC0 + S5P_DMC0_MEMCON_OFFSET);
650         memtype = (memtype >> S5P_DMC0_MEMTYPE_SHIFT);
651         memtype &= S5P_DMC0_MEMTYPE_MASK;
652
653         if ((memtype < DDR2) && (memtype > DDR3)) {
654                 printk(KERN_ERR "%s: wrong memtype= 0x%x\n", __func__, memtype);
655                 goto out;
656         } else {
657                 printk(KERN_DEBUG "%s: memtype= 0x%x\n", __func__, memtype);
658         }
659
660         register_pm_notifier(&exynos4_cpufreq_nb);
661
662         return cpufreq_register_driver(&exynos4_driver);
663
664 out:
665         if (!IS_ERR(cpu_clk))
666                 clk_put(cpu_clk);
667
668         if (!IS_ERR(moutcore))
669                 clk_put(moutcore);
670
671         if (!IS_ERR(mout_mpll))
672                 clk_put(mout_mpll);
673
674         if (!IS_ERR(mout_apll))
675                 clk_put(mout_apll);
676
677         if (!IS_ERR(arm_regulator))
678                 regulator_put(arm_regulator);
679
680         if (!IS_ERR(int_regulator))
681                 regulator_put(int_regulator);
682
683         printk(KERN_ERR "%s: failed initialization\n", __func__);
684
685         return -EINVAL;
686 }
687 late_initcall(exynos4_cpufreq_init);