ACPI: un-export ACPI_ERROR() -- use printk(KERN_ERR...)
[pandora-kernel.git] / drivers / acpi / processor_perflib.c
1 /*
2  * processor_perflib.c - ACPI Processor P-States Library ($Revision: 71 $)
3  *
4  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6  *  Copyright (C) 2004       Dominik Brodowski <linux@brodo.de>
7  *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8  *                      - Added processor hotplug support
9  *
10  *
11  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
12  *
13  *  This program is free software; you can redistribute it and/or modify
14  *  it under the terms of the GNU General Public License as published by
15  *  the Free Software Foundation; either version 2 of the License, or (at
16  *  your option) any later version.
17  *
18  *  This program is distributed in the hope that it will be useful, but
19  *  WITHOUT ANY WARRANTY; without even the implied warranty of
20  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
21  *  General Public License for more details.
22  *
23  *  You should have received a copy of the GNU General Public License along
24  *  with this program; if not, write to the Free Software Foundation, Inc.,
25  *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
26  *
27  */
28
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/init.h>
32 #include <linux/cpufreq.h>
33
34 #ifdef CONFIG_X86_ACPI_CPUFREQ_PROC_INTF
35 #include <linux/proc_fs.h>
36 #include <linux/seq_file.h>
37 #include <linux/mutex.h>
38
39 #include <asm/uaccess.h>
40 #endif
41
42 #include <acpi/acpi_bus.h>
43 #include <acpi/processor.h>
44
45 #define ACPI_PROCESSOR_COMPONENT        0x01000000
46 #define ACPI_PROCESSOR_CLASS            "processor"
47 #define ACPI_PROCESSOR_DRIVER_NAME      "ACPI Processor Driver"
48 #define ACPI_PROCESSOR_FILE_PERFORMANCE "performance"
49 #define _COMPONENT              ACPI_PROCESSOR_COMPONENT
50 ACPI_MODULE_NAME("acpi_processor")
51
52 static DEFINE_MUTEX(performance_mutex);
53
54 /*
55  * _PPC support is implemented as a CPUfreq policy notifier:
56  * This means each time a CPUfreq driver registered also with
57  * the ACPI core is asked to change the speed policy, the maximum
58  * value is adjusted so that it is within the platform limit.
59  *
60  * Also, when a new platform limit value is detected, the CPUfreq
61  * policy is adjusted accordingly.
62  */
63
64 #define PPC_REGISTERED   1
65 #define PPC_IN_USE       2
66
67 static int acpi_processor_ppc_status = 0;
68
69 static int acpi_processor_ppc_notifier(struct notifier_block *nb,
70                                        unsigned long event, void *data)
71 {
72         struct cpufreq_policy *policy = data;
73         struct acpi_processor *pr;
74         unsigned int ppc = 0;
75
76         mutex_lock(&performance_mutex);
77
78         if (event != CPUFREQ_INCOMPATIBLE)
79                 goto out;
80
81         pr = processors[policy->cpu];
82         if (!pr || !pr->performance)
83                 goto out;
84
85         ppc = (unsigned int)pr->performance_platform_limit;
86         if (!ppc)
87                 goto out;
88
89         if (ppc > pr->performance->state_count)
90                 goto out;
91
92         cpufreq_verify_within_limits(policy, 0,
93                                      pr->performance->states[ppc].
94                                      core_frequency * 1000);
95
96       out:
97         mutex_unlock(&performance_mutex);
98
99         return 0;
100 }
101
102 static struct notifier_block acpi_ppc_notifier_block = {
103         .notifier_call = acpi_processor_ppc_notifier,
104 };
105
106 static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
107 {
108         acpi_status status = 0;
109         unsigned long ppc = 0;
110
111         ACPI_FUNCTION_TRACE("acpi_processor_get_platform_limit");
112
113         if (!pr)
114                 return_VALUE(-EINVAL);
115
116         /*
117          * _PPC indicates the maximum state currently supported by the platform
118          * (e.g. 0 = states 0..n; 1 = states 1..n; etc.
119          */
120         status = acpi_evaluate_integer(pr->handle, "_PPC", NULL, &ppc);
121
122         if (status != AE_NOT_FOUND)
123                 acpi_processor_ppc_status |= PPC_IN_USE;
124
125         if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
126                 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PPC"));
127                 return_VALUE(-ENODEV);
128         }
129
130         pr->performance_platform_limit = (int)ppc;
131
132         return_VALUE(0);
133 }
134
135 int acpi_processor_ppc_has_changed(struct acpi_processor *pr)
136 {
137         int ret = acpi_processor_get_platform_limit(pr);
138         if (ret < 0)
139                 return (ret);
140         else
141                 return cpufreq_update_policy(pr->id);
142 }
143
144 void acpi_processor_ppc_init(void)
145 {
146         if (!cpufreq_register_notifier
147             (&acpi_ppc_notifier_block, CPUFREQ_POLICY_NOTIFIER))
148                 acpi_processor_ppc_status |= PPC_REGISTERED;
149         else
150                 printk(KERN_DEBUG
151                        "Warning: Processor Platform Limit not supported.\n");
152 }
153
154 void acpi_processor_ppc_exit(void)
155 {
156         if (acpi_processor_ppc_status & PPC_REGISTERED)
157                 cpufreq_unregister_notifier(&acpi_ppc_notifier_block,
158                                             CPUFREQ_POLICY_NOTIFIER);
159
160         acpi_processor_ppc_status &= ~PPC_REGISTERED;
161 }
162
163 static int acpi_processor_get_performance_control(struct acpi_processor *pr)
164 {
165         int result = 0;
166         acpi_status status = 0;
167         struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
168         union acpi_object *pct = NULL;
169         union acpi_object obj = { 0 };
170
171         ACPI_FUNCTION_TRACE("acpi_processor_get_performance_control");
172
173         status = acpi_evaluate_object(pr->handle, "_PCT", NULL, &buffer);
174         if (ACPI_FAILURE(status)) {
175                 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PCT"));
176                 return_VALUE(-ENODEV);
177         }
178
179         pct = (union acpi_object *)buffer.pointer;
180         if (!pct || (pct->type != ACPI_TYPE_PACKAGE)
181             || (pct->package.count != 2)) {
182                 printk(KERN_ERR PREFIX "Invalid _PCT data\n");
183                 result = -EFAULT;
184                 goto end;
185         }
186
187         /*
188          * control_register
189          */
190
191         obj = pct->package.elements[0];
192
193         if ((obj.type != ACPI_TYPE_BUFFER)
194             || (obj.buffer.length < sizeof(struct acpi_pct_register))
195             || (obj.buffer.pointer == NULL)) {
196                 printk(KERN_ERR PREFIX "Invalid _PCT data (control_register)\n");
197                 result = -EFAULT;
198                 goto end;
199         }
200         memcpy(&pr->performance->control_register, obj.buffer.pointer,
201                sizeof(struct acpi_pct_register));
202
203         /*
204          * status_register
205          */
206
207         obj = pct->package.elements[1];
208
209         if ((obj.type != ACPI_TYPE_BUFFER)
210             || (obj.buffer.length < sizeof(struct acpi_pct_register))
211             || (obj.buffer.pointer == NULL)) {
212                 printk(KERN_ERR PREFIX "Invalid _PCT data (status_register)\n");
213                 result = -EFAULT;
214                 goto end;
215         }
216
217         memcpy(&pr->performance->status_register, obj.buffer.pointer,
218                sizeof(struct acpi_pct_register));
219
220       end:
221         acpi_os_free(buffer.pointer);
222
223         return_VALUE(result);
224 }
225
226 static int acpi_processor_get_performance_states(struct acpi_processor *pr)
227 {
228         int result = 0;
229         acpi_status status = AE_OK;
230         struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
231         struct acpi_buffer format = { sizeof("NNNNNN"), "NNNNNN" };
232         struct acpi_buffer state = { 0, NULL };
233         union acpi_object *pss = NULL;
234         int i;
235
236         ACPI_FUNCTION_TRACE("acpi_processor_get_performance_states");
237
238         status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
239         if (ACPI_FAILURE(status)) {
240                 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PSS"));
241                 return_VALUE(-ENODEV);
242         }
243
244         pss = (union acpi_object *)buffer.pointer;
245         if (!pss || (pss->type != ACPI_TYPE_PACKAGE)) {
246                 printk(KERN_ERR PREFIX "Invalid _PSS data\n");
247                 result = -EFAULT;
248                 goto end;
249         }
250
251         ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d performance states\n",
252                           pss->package.count));
253
254         pr->performance->state_count = pss->package.count;
255         pr->performance->states =
256             kmalloc(sizeof(struct acpi_processor_px) * pss->package.count,
257                     GFP_KERNEL);
258         if (!pr->performance->states) {
259                 result = -ENOMEM;
260                 goto end;
261         }
262
263         for (i = 0; i < pr->performance->state_count; i++) {
264
265                 struct acpi_processor_px *px = &(pr->performance->states[i]);
266
267                 state.length = sizeof(struct acpi_processor_px);
268                 state.pointer = px;
269
270                 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i));
271
272                 status = acpi_extract_package(&(pss->package.elements[i]),
273                                               &format, &state);
274                 if (ACPI_FAILURE(status)) {
275                         ACPI_EXCEPTION((AE_INFO, status, "Invalid _PSS data"));
276                         result = -EFAULT;
277                         kfree(pr->performance->states);
278                         goto end;
279                 }
280
281                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
282                                   "State [%d]: core_frequency[%d] power[%d] transition_latency[%d] bus_master_latency[%d] control[0x%x] status[0x%x]\n",
283                                   i,
284                                   (u32) px->core_frequency,
285                                   (u32) px->power,
286                                   (u32) px->transition_latency,
287                                   (u32) px->bus_master_latency,
288                                   (u32) px->control, (u32) px->status));
289
290                 if (!px->core_frequency) {
291                         printk(KERN_ERR PREFIX
292                                     "Invalid _PSS data: freq is zero\n");
293                         result = -EFAULT;
294                         kfree(pr->performance->states);
295                         goto end;
296                 }
297         }
298
299       end:
300         acpi_os_free(buffer.pointer);
301
302         return_VALUE(result);
303 }
304
305 static int acpi_processor_get_performance_info(struct acpi_processor *pr)
306 {
307         int result = 0;
308         acpi_status status = AE_OK;
309         acpi_handle handle = NULL;
310
311         ACPI_FUNCTION_TRACE("acpi_processor_get_performance_info");
312
313         if (!pr || !pr->performance || !pr->handle)
314                 return_VALUE(-EINVAL);
315
316         status = acpi_get_handle(pr->handle, "_PCT", &handle);
317         if (ACPI_FAILURE(status)) {
318                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
319                                   "ACPI-based processor performance control unavailable\n"));
320                 return_VALUE(-ENODEV);
321         }
322
323         result = acpi_processor_get_performance_control(pr);
324         if (result)
325                 return_VALUE(result);
326
327         result = acpi_processor_get_performance_states(pr);
328         if (result)
329                 return_VALUE(result);
330
331         result = acpi_processor_get_platform_limit(pr);
332         if (result)
333                 return_VALUE(result);
334
335         return_VALUE(0);
336 }
337
338 int acpi_processor_notify_smm(struct module *calling_module)
339 {
340         acpi_status status;
341         static int is_done = 0;
342
343         ACPI_FUNCTION_TRACE("acpi_processor_notify_smm");
344
345         if (!(acpi_processor_ppc_status & PPC_REGISTERED))
346                 return_VALUE(-EBUSY);
347
348         if (!try_module_get(calling_module))
349                 return_VALUE(-EINVAL);
350
351         /* is_done is set to negative if an error occured,
352          * and to postitive if _no_ error occured, but SMM
353          * was already notified. This avoids double notification
354          * which might lead to unexpected results...
355          */
356         if (is_done > 0) {
357                 module_put(calling_module);
358                 return_VALUE(0);
359         } else if (is_done < 0) {
360                 module_put(calling_module);
361                 return_VALUE(is_done);
362         }
363
364         is_done = -EIO;
365
366         /* Can't write pstate_cnt to smi_cmd if either value is zero */
367         if ((!acpi_fadt.smi_cmd) || (!acpi_fadt.pstate_cnt)) {
368                 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No SMI port or pstate_cnt\n"));
369                 module_put(calling_module);
370                 return_VALUE(0);
371         }
372
373         ACPI_DEBUG_PRINT((ACPI_DB_INFO,
374                           "Writing pstate_cnt [0x%x] to smi_cmd [0x%x]\n",
375                           acpi_fadt.pstate_cnt, acpi_fadt.smi_cmd));
376
377         /* FADT v1 doesn't support pstate_cnt, many BIOS vendors use
378          * it anyway, so we need to support it... */
379         if (acpi_fadt_is_v1) {
380                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
381                                   "Using v1.0 FADT reserved value for pstate_cnt\n"));
382         }
383
384         status = acpi_os_write_port(acpi_fadt.smi_cmd,
385                                     (u32) acpi_fadt.pstate_cnt, 8);
386         if (ACPI_FAILURE(status)) {
387                 ACPI_EXCEPTION((AE_INFO, status,
388                                 "Failed to write pstate_cnt [0x%x] to "
389                                 "smi_cmd [0x%x]", acpi_fadt.pstate_cnt,
390                                 acpi_fadt.smi_cmd));
391                 module_put(calling_module);
392                 return_VALUE(status);
393         }
394
395         /* Success. If there's no _PPC, we need to fear nothing, so
396          * we can allow the cpufreq driver to be rmmod'ed. */
397         is_done = 1;
398
399         if (!(acpi_processor_ppc_status & PPC_IN_USE))
400                 module_put(calling_module);
401
402         return_VALUE(0);
403 }
404
405 EXPORT_SYMBOL(acpi_processor_notify_smm);
406
407 #ifdef CONFIG_X86_ACPI_CPUFREQ_PROC_INTF
408 /* /proc/acpi/processor/../performance interface (DEPRECATED) */
409
410 static int acpi_processor_perf_open_fs(struct inode *inode, struct file *file);
411 static struct file_operations acpi_processor_perf_fops = {
412         .open = acpi_processor_perf_open_fs,
413         .read = seq_read,
414         .llseek = seq_lseek,
415         .release = single_release,
416 };
417
418 static int acpi_processor_perf_seq_show(struct seq_file *seq, void *offset)
419 {
420         struct acpi_processor *pr = (struct acpi_processor *)seq->private;
421         int i;
422
423         ACPI_FUNCTION_TRACE("acpi_processor_perf_seq_show");
424
425         if (!pr)
426                 goto end;
427
428         if (!pr->performance) {
429                 seq_puts(seq, "<not supported>\n");
430                 goto end;
431         }
432
433         seq_printf(seq, "state count:             %d\n"
434                    "active state:            P%d\n",
435                    pr->performance->state_count, pr->performance->state);
436
437         seq_puts(seq, "states:\n");
438         for (i = 0; i < pr->performance->state_count; i++)
439                 seq_printf(seq,
440                            "   %cP%d:                  %d MHz, %d mW, %d uS\n",
441                            (i == pr->performance->state ? '*' : ' '), i,
442                            (u32) pr->performance->states[i].core_frequency,
443                            (u32) pr->performance->states[i].power,
444                            (u32) pr->performance->states[i].transition_latency);
445
446       end:
447         return_VALUE(0);
448 }
449
450 static int acpi_processor_perf_open_fs(struct inode *inode, struct file *file)
451 {
452         return single_open(file, acpi_processor_perf_seq_show,
453                            PDE(inode)->data);
454 }
455
456 static ssize_t
457 acpi_processor_write_performance(struct file *file,
458                                  const char __user * buffer,
459                                  size_t count, loff_t * data)
460 {
461         int result = 0;
462         struct seq_file *m = (struct seq_file *)file->private_data;
463         struct acpi_processor *pr = (struct acpi_processor *)m->private;
464         struct acpi_processor_performance *perf;
465         char state_string[12] = { '\0' };
466         unsigned int new_state = 0;
467         struct cpufreq_policy policy;
468
469         ACPI_FUNCTION_TRACE("acpi_processor_write_performance");
470
471         if (!pr || (count > sizeof(state_string) - 1))
472                 return_VALUE(-EINVAL);
473
474         perf = pr->performance;
475         if (!perf)
476                 return_VALUE(-EINVAL);
477
478         if (copy_from_user(state_string, buffer, count))
479                 return_VALUE(-EFAULT);
480
481         state_string[count] = '\0';
482         new_state = simple_strtoul(state_string, NULL, 0);
483
484         if (new_state >= perf->state_count)
485                 return_VALUE(-EINVAL);
486
487         cpufreq_get_policy(&policy, pr->id);
488
489         policy.cpu = pr->id;
490         policy.min = perf->states[new_state].core_frequency * 1000;
491         policy.max = perf->states[new_state].core_frequency * 1000;
492
493         result = cpufreq_set_policy(&policy);
494         if (result)
495                 return_VALUE(result);
496
497         return_VALUE(count);
498 }
499
500 static void acpi_cpufreq_add_file(struct acpi_processor *pr)
501 {
502         struct proc_dir_entry *entry = NULL;
503         struct acpi_device *device = NULL;
504
505         ACPI_FUNCTION_TRACE("acpi_cpufreq_addfile");
506
507         if (acpi_bus_get_device(pr->handle, &device))
508                 return_VOID;
509
510         /* add file 'performance' [R/W] */
511         entry = create_proc_entry(ACPI_PROCESSOR_FILE_PERFORMANCE,
512                                   S_IFREG | S_IRUGO | S_IWUSR,
513                                   acpi_device_dir(device));
514         if (entry){
515                 acpi_processor_perf_fops.write = acpi_processor_write_performance;
516                 entry->proc_fops = &acpi_processor_perf_fops;
517                 entry->data = acpi_driver_data(device);
518                 entry->owner = THIS_MODULE;
519         }
520         return_VOID;
521 }
522
523 static void acpi_cpufreq_remove_file(struct acpi_processor *pr)
524 {
525         struct acpi_device *device = NULL;
526
527         ACPI_FUNCTION_TRACE("acpi_cpufreq_addfile");
528
529         if (acpi_bus_get_device(pr->handle, &device))
530                 return_VOID;
531
532         /* remove file 'performance' */
533         remove_proc_entry(ACPI_PROCESSOR_FILE_PERFORMANCE,
534                           acpi_device_dir(device));
535
536         return_VOID;
537 }
538
539 #else
540 static void acpi_cpufreq_add_file(struct acpi_processor *pr)
541 {
542         return;
543 }
544 static void acpi_cpufreq_remove_file(struct acpi_processor *pr)
545 {
546         return;
547 }
548 #endif                          /* CONFIG_X86_ACPI_CPUFREQ_PROC_INTF */
549
550 static int acpi_processor_get_psd(struct acpi_processor *pr)
551 {
552         int result = 0;
553         acpi_status status = AE_OK;
554         struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
555         struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
556         struct acpi_buffer state = {0, NULL};
557         union acpi_object  *psd = NULL;
558         struct acpi_psd_package *pdomain;
559
560         status = acpi_evaluate_object(pr->handle, "_PSD", NULL, &buffer);
561         if (ACPI_FAILURE(status)) {
562                 return -ENODEV;
563         }
564
565         psd = (union acpi_object *) buffer.pointer;
566         if (!psd || (psd->type != ACPI_TYPE_PACKAGE)) {
567                 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSD data\n"));
568                 result = -EFAULT;
569                 goto end;
570         }
571
572         if (psd->package.count != 1) {
573                 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSD data\n"));
574                 result = -EFAULT;
575                 goto end;
576         }
577
578         pdomain = &(pr->performance->domain_info);
579
580         state.length = sizeof(struct acpi_psd_package);
581         state.pointer = pdomain;
582
583         status = acpi_extract_package(&(psd->package.elements[0]),
584                 &format, &state);
585         if (ACPI_FAILURE(status)) {
586                 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSD data\n"));
587                 result = -EFAULT;
588                 goto end;
589         }
590
591         if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
592                 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _PSD:num_entries\n"));
593                 result = -EFAULT;
594                 goto end;
595         }
596
597         if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
598                 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _PSD:revision\n"));
599                 result = -EFAULT;
600                 goto end;
601         }
602
603 end:
604         acpi_os_free(buffer.pointer);
605         return result;
606 }
607
608 int acpi_processor_preregister_performance(
609                 struct acpi_processor_performance **performance)
610 {
611         int count, count_target;
612         int retval = 0;
613         unsigned int i, j;
614         cpumask_t covered_cpus;
615         struct acpi_processor *pr;
616         struct acpi_psd_package *pdomain;
617         struct acpi_processor *match_pr;
618         struct acpi_psd_package *match_pdomain;
619
620         mutex_lock(&performance_mutex);
621
622         retval = 0;
623
624         /* Call _PSD for all CPUs */
625         for_each_possible_cpu(i) {
626                 pr = processors[i];
627                 if (!pr) {
628                         /* Look only at processors in ACPI namespace */
629                         continue;
630                 }
631
632                 if (pr->performance) {
633                         retval = -EBUSY;
634                         continue;
635                 }
636
637                 if (!performance || !performance[i]) {
638                         retval = -EINVAL;
639                         continue;
640                 }
641
642                 pr->performance = performance[i];
643                 cpu_set(i, pr->performance->shared_cpu_map);
644                 if (acpi_processor_get_psd(pr)) {
645                         retval = -EINVAL;
646                         continue;
647                 }
648         }
649         if (retval)
650                 goto err_ret;
651
652         /*
653          * Now that we have _PSD data from all CPUs, lets setup P-state 
654          * domain info.
655          */
656         for_each_possible_cpu(i) {
657                 pr = processors[i];
658                 if (!pr)
659                         continue;
660
661                 /* Basic validity check for domain info */
662                 pdomain = &(pr->performance->domain_info);
663                 if ((pdomain->revision != ACPI_PSD_REV0_REVISION) ||
664                     (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES)) {
665                         retval = -EINVAL;
666                         goto err_ret;
667                 }
668                 if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
669                     pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
670                     pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
671                         retval = -EINVAL;
672                         goto err_ret;
673                 }
674         }
675
676         cpus_clear(covered_cpus);
677         for_each_possible_cpu(i) {
678                 pr = processors[i];
679                 if (!pr)
680                         continue;
681
682                 if (cpu_isset(i, covered_cpus))
683                         continue;
684
685                 pdomain = &(pr->performance->domain_info);
686                 cpu_set(i, pr->performance->shared_cpu_map);
687                 cpu_set(i, covered_cpus);
688                 if (pdomain->num_processors <= 1)
689                         continue;
690
691                 /* Validate the Domain info */
692                 count_target = pdomain->num_processors;
693                 count = 1;
694                 if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL ||
695                     pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL) {
696                         pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
697                 } else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY) {
698                         pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ANY;
699                 }
700
701                 for_each_possible_cpu(j) {
702                         if (i == j)
703                                 continue;
704
705                         match_pr = processors[j];
706                         if (!match_pr)
707                                 continue;
708
709                         match_pdomain = &(match_pr->performance->domain_info);
710                         if (match_pdomain->domain != pdomain->domain)
711                                 continue;
712
713                         /* Here i and j are in the same domain */
714
715                         if (match_pdomain->num_processors != count_target) {
716                                 retval = -EINVAL;
717                                 goto err_ret;
718                         }
719
720                         if (pdomain->coord_type != match_pdomain->coord_type) {
721                                 retval = -EINVAL;
722                                 goto err_ret;
723                         }
724
725                         cpu_set(j, covered_cpus);
726                         cpu_set(j, pr->performance->shared_cpu_map);
727                         count++;
728                 }
729
730                 for_each_possible_cpu(j) {
731                         if (i == j)
732                                 continue;
733
734                         match_pr = processors[j];
735                         if (!match_pr)
736                                 continue;
737
738                         match_pdomain = &(match_pr->performance->domain_info);
739                         if (match_pdomain->domain != pdomain->domain)
740                                 continue;
741
742                         match_pr->performance->shared_type = 
743                                         pr->performance->shared_type;
744                         match_pr->performance->shared_cpu_map =
745                                 pr->performance->shared_cpu_map;
746                 }
747         }
748
749 err_ret:
750         if (retval) {
751                 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error while parsing _PSD domain information. Assuming no coordination\n"));
752         }
753
754         for_each_possible_cpu(i) {
755                 pr = processors[i];
756                 if (!pr || !pr->performance)
757                         continue;
758
759                 /* Assume no coordination on any error parsing domain info */
760                 if (retval) {
761                         cpus_clear(pr->performance->shared_cpu_map);
762                         cpu_set(i, pr->performance->shared_cpu_map);
763                         pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
764                 }
765                 pr->performance = NULL; /* Will be set for real in register */
766         }
767
768         mutex_unlock(&performance_mutex);
769         return retval;
770 }
771 EXPORT_SYMBOL(acpi_processor_preregister_performance);
772
773
774 int
775 acpi_processor_register_performance(struct acpi_processor_performance
776                                     *performance, unsigned int cpu)
777 {
778         struct acpi_processor *pr;
779
780         ACPI_FUNCTION_TRACE("acpi_processor_register_performance");
781
782         if (!(acpi_processor_ppc_status & PPC_REGISTERED))
783                 return_VALUE(-EINVAL);
784
785         mutex_lock(&performance_mutex);
786
787         pr = processors[cpu];
788         if (!pr) {
789                 mutex_unlock(&performance_mutex);
790                 return_VALUE(-ENODEV);
791         }
792
793         if (pr->performance) {
794                 mutex_unlock(&performance_mutex);
795                 return_VALUE(-EBUSY);
796         }
797
798         WARN_ON(!performance);
799
800         pr->performance = performance;
801
802         if (acpi_processor_get_performance_info(pr)) {
803                 pr->performance = NULL;
804                 mutex_unlock(&performance_mutex);
805                 return_VALUE(-EIO);
806         }
807
808         acpi_cpufreq_add_file(pr);
809
810         mutex_unlock(&performance_mutex);
811         return_VALUE(0);
812 }
813
814 EXPORT_SYMBOL(acpi_processor_register_performance);
815
816 void
817 acpi_processor_unregister_performance(struct acpi_processor_performance
818                                       *performance, unsigned int cpu)
819 {
820         struct acpi_processor *pr;
821
822         ACPI_FUNCTION_TRACE("acpi_processor_unregister_performance");
823
824         mutex_lock(&performance_mutex);
825
826         pr = processors[cpu];
827         if (!pr) {
828                 mutex_unlock(&performance_mutex);
829                 return_VOID;
830         }
831
832         if (pr->performance)
833                 kfree(pr->performance->states);
834         pr->performance = NULL;
835
836         acpi_cpufreq_remove_file(pr);
837
838         mutex_unlock(&performance_mutex);
839
840         return_VOID;
841 }
842
843 EXPORT_SYMBOL(acpi_processor_unregister_performance);