Merge tag 'mfd-for-linus-3.20' of git://git.kernel.org/pub/scm/linux/kernel/git/lee/mfd
[pandora-kernel.git] / arch / arm64 / kernel / cpuinfo.c
1 /*
2  * Record and handle CPU attributes.
3  *
4  * Copyright (C) 2014 ARM Ltd.
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17 #include <asm/arch_timer.h>
18 #include <asm/cachetype.h>
19 #include <asm/cpu.h>
20 #include <asm/cputype.h>
21 #include <asm/cpufeature.h>
22
23 #include <linux/bitops.h>
24 #include <linux/bug.h>
25 #include <linux/init.h>
26 #include <linux/kernel.h>
27 #include <linux/preempt.h>
28 #include <linux/printk.h>
29 #include <linux/smp.h>
30
31 /*
32  * In case the boot CPU is hotpluggable, we record its initial state and
33  * current state separately. Certain system registers may contain different
34  * values depending on configuration at or after reset.
35  */
36 DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data);
37 static struct cpuinfo_arm64 boot_cpu_data;
38 static bool mixed_endian_el0 = true;
39
40 static char *icache_policy_str[] = {
41         [ICACHE_POLICY_RESERVED] = "RESERVED/UNKNOWN",
42         [ICACHE_POLICY_AIVIVT] = "AIVIVT",
43         [ICACHE_POLICY_VIPT] = "VIPT",
44         [ICACHE_POLICY_PIPT] = "PIPT",
45 };
46
47 unsigned long __icache_flags;
48
49 static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info)
50 {
51         unsigned int cpu = smp_processor_id();
52         u32 l1ip = CTR_L1IP(info->reg_ctr);
53
54         if (l1ip != ICACHE_POLICY_PIPT) {
55                 /*
56                  * VIPT caches are non-aliasing if the VA always equals the PA
57                  * in all bit positions that are covered by the index. This is
58                  * the case if the size of a way (# of sets * line size) does
59                  * not exceed PAGE_SIZE.
60                  */
61                 u32 waysize = icache_get_numsets() * icache_get_linesize();
62
63                 if (l1ip != ICACHE_POLICY_VIPT || waysize > PAGE_SIZE)
64                         set_bit(ICACHEF_ALIASING, &__icache_flags);
65         }
66         if (l1ip == ICACHE_POLICY_AIVIVT)
67                 set_bit(ICACHEF_AIVIVT, &__icache_flags);
68
69         pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu);
70 }
71
72 bool cpu_supports_mixed_endian_el0(void)
73 {
74         return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
75 }
76
77 bool system_supports_mixed_endian_el0(void)
78 {
79         return mixed_endian_el0;
80 }
81
82 static void update_mixed_endian_el0_support(struct cpuinfo_arm64 *info)
83 {
84         mixed_endian_el0 &= id_aa64mmfr0_mixed_endian_el0(info->reg_id_aa64mmfr0);
85 }
86
87 static void update_cpu_features(struct cpuinfo_arm64 *info)
88 {
89         update_mixed_endian_el0_support(info);
90 }
91
92 static int check_reg_mask(char *name, u64 mask, u64 boot, u64 cur, int cpu)
93 {
94         if ((boot & mask) == (cur & mask))
95                 return 0;
96
97         pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016lx, CPU%d: %#016lx\n",
98                 name, (unsigned long)boot, cpu, (unsigned long)cur);
99
100         return 1;
101 }
102
103 #define CHECK_MASK(field, mask, boot, cur, cpu) \
104         check_reg_mask(#field, mask, (boot)->reg_ ## field, (cur)->reg_ ## field, cpu)
105
106 #define CHECK(field, boot, cur, cpu) \
107         CHECK_MASK(field, ~0ULL, boot, cur, cpu)
108
109 /*
110  * Verify that CPUs don't have unexpected differences that will cause problems.
111  */
112 static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur)
113 {
114         unsigned int cpu = smp_processor_id();
115         struct cpuinfo_arm64 *boot = &boot_cpu_data;
116         unsigned int diff = 0;
117
118         /*
119          * The kernel can handle differing I-cache policies, but otherwise
120          * caches should look identical. Userspace JITs will make use of
121          * *minLine.
122          */
123         diff |= CHECK_MASK(ctr, 0xffff3fff, boot, cur, cpu);
124
125         /*
126          * Userspace may perform DC ZVA instructions. Mismatched block sizes
127          * could result in too much or too little memory being zeroed if a
128          * process is preempted and migrated between CPUs.
129          */
130         diff |= CHECK(dczid, boot, cur, cpu);
131
132         /* If different, timekeeping will be broken (especially with KVM) */
133         diff |= CHECK(cntfrq, boot, cur, cpu);
134
135         /*
136          * The kernel uses self-hosted debug features and expects CPUs to
137          * support identical debug features. We presently need CTX_CMPs, WRPs,
138          * and BRPs to be identical.
139          * ID_AA64DFR1 is currently RES0.
140          */
141         diff |= CHECK(id_aa64dfr0, boot, cur, cpu);
142         diff |= CHECK(id_aa64dfr1, boot, cur, cpu);
143
144         /*
145          * Even in big.LITTLE, processors should be identical instruction-set
146          * wise.
147          */
148         diff |= CHECK(id_aa64isar0, boot, cur, cpu);
149         diff |= CHECK(id_aa64isar1, boot, cur, cpu);
150
151         /*
152          * Differing PARange support is fine as long as all peripherals and
153          * memory are mapped within the minimum PARange of all CPUs.
154          * Linux should not care about secure memory.
155          * ID_AA64MMFR1 is currently RES0.
156          */
157         diff |= CHECK_MASK(id_aa64mmfr0, 0xffffffffffff0ff0, boot, cur, cpu);
158         diff |= CHECK(id_aa64mmfr1, boot, cur, cpu);
159
160         /*
161          * EL3 is not our concern.
162          * ID_AA64PFR1 is currently RES0.
163          */
164         diff |= CHECK_MASK(id_aa64pfr0, 0xffffffffffff0fff, boot, cur, cpu);
165         diff |= CHECK(id_aa64pfr1, boot, cur, cpu);
166
167         /*
168          * If we have AArch32, we care about 32-bit features for compat. These
169          * registers should be RES0 otherwise.
170          */
171         diff |= CHECK(id_dfr0, boot, cur, cpu);
172         diff |= CHECK(id_isar0, boot, cur, cpu);
173         diff |= CHECK(id_isar1, boot, cur, cpu);
174         diff |= CHECK(id_isar2, boot, cur, cpu);
175         diff |= CHECK(id_isar3, boot, cur, cpu);
176         diff |= CHECK(id_isar4, boot, cur, cpu);
177         diff |= CHECK(id_isar5, boot, cur, cpu);
178         /*
179          * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and
180          * ACTLR formats could differ across CPUs and therefore would have to
181          * be trapped for virtualization anyway.
182          */
183         diff |= CHECK_MASK(id_mmfr0, 0xff0fffff, boot, cur, cpu);
184         diff |= CHECK(id_mmfr1, boot, cur, cpu);
185         diff |= CHECK(id_mmfr2, boot, cur, cpu);
186         diff |= CHECK(id_mmfr3, boot, cur, cpu);
187         diff |= CHECK(id_pfr0, boot, cur, cpu);
188         diff |= CHECK(id_pfr1, boot, cur, cpu);
189
190         diff |= CHECK(mvfr0, boot, cur, cpu);
191         diff |= CHECK(mvfr1, boot, cur, cpu);
192         diff |= CHECK(mvfr2, boot, cur, cpu);
193
194         /*
195          * Mismatched CPU features are a recipe for disaster. Don't even
196          * pretend to support them.
197          */
198         WARN_TAINT_ONCE(diff, TAINT_CPU_OUT_OF_SPEC,
199                         "Unsupported CPU feature variation.\n");
200 }
201
202 static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
203 {
204         info->reg_cntfrq = arch_timer_get_cntfrq();
205         info->reg_ctr = read_cpuid_cachetype();
206         info->reg_dczid = read_cpuid(DCZID_EL0);
207         info->reg_midr = read_cpuid_id();
208
209         info->reg_id_aa64dfr0 = read_cpuid(ID_AA64DFR0_EL1);
210         info->reg_id_aa64dfr1 = read_cpuid(ID_AA64DFR1_EL1);
211         info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1);
212         info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1);
213         info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
214         info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
215         info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1);
216         info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1);
217
218         info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1);
219         info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1);
220         info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1);
221         info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1);
222         info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1);
223         info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1);
224         info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1);
225         info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1);
226         info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1);
227         info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1);
228         info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1);
229         info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1);
230         info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);
231
232         info->reg_mvfr0 = read_cpuid(MVFR0_EL1);
233         info->reg_mvfr1 = read_cpuid(MVFR1_EL1);
234         info->reg_mvfr2 = read_cpuid(MVFR2_EL1);
235
236         cpuinfo_detect_icache_policy(info);
237
238         check_local_cpu_errata();
239         update_cpu_features(info);
240 }
241
242 void cpuinfo_store_cpu(void)
243 {
244         struct cpuinfo_arm64 *info = this_cpu_ptr(&cpu_data);
245         __cpuinfo_store_cpu(info);
246         cpuinfo_sanity_check(info);
247 }
248
249 void __init cpuinfo_store_boot_cpu(void)
250 {
251         struct cpuinfo_arm64 *info = &per_cpu(cpu_data, 0);
252         __cpuinfo_store_cpu(info);
253
254         boot_cpu_data = *info;
255 }