Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[pandora-kernel.git] / drivers / acpi / osl.c
1 /*
2  *  acpi_osl.c - OS-dependent functions ($Revision: 83 $)
3  *
4  *  Copyright (C) 2000       Andrew Henroid
5  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7  *  Copyright (c) 2008 Intel Corporation
8  *   Author: Matthew Wilcox <willy@linux.intel.com>
9  *
10  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2 of the License, or
15  *  (at your option) any later version.
16  *
17  *  This program is distributed in the hope that it will be useful,
18  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  *  GNU General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License
23  *  along with this program; if not, write to the Free Software
24  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
25  *
26  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
27  *
28  */
29
30 #include <linux/module.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/mm.h>
34 #include <linux/pci.h>
35 #include <linux/interrupt.h>
36 #include <linux/kmod.h>
37 #include <linux/delay.h>
38 #include <linux/workqueue.h>
39 #include <linux/nmi.h>
40 #include <linux/acpi.h>
41 #include <linux/acpi_io.h>
42 #include <linux/efi.h>
43 #include <linux/ioport.h>
44 #include <linux/list.h>
45 #include <linux/jiffies.h>
46 #include <linux/semaphore.h>
47
48 #include <asm/io.h>
49 #include <asm/uaccess.h>
50
51 #include <acpi/acpi.h>
52 #include <acpi/acpi_bus.h>
53 #include <acpi/processor.h>
54
55 #define _COMPONENT              ACPI_OS_SERVICES
56 ACPI_MODULE_NAME("osl");
57 #define PREFIX          "ACPI: "
58 struct acpi_os_dpc {
59         acpi_osd_exec_callback function;
60         void *context;
61         struct work_struct work;
62         int wait;
63 };
64
65 #ifdef CONFIG_ACPI_CUSTOM_DSDT
66 #include CONFIG_ACPI_CUSTOM_DSDT_FILE
67 #endif
68
69 #ifdef ENABLE_DEBUGGER
70 #include <linux/kdb.h>
71
72 /* stuff for debugger support */
73 int acpi_in_debugger;
74 EXPORT_SYMBOL(acpi_in_debugger);
75
76 extern char line_buf[80];
77 #endif                          /*ENABLE_DEBUGGER */
78
79 static acpi_osd_handler acpi_irq_handler;
80 static void *acpi_irq_context;
81 static struct workqueue_struct *kacpid_wq;
82 static struct workqueue_struct *kacpi_notify_wq;
83 static struct workqueue_struct *kacpi_hotplug_wq;
84
85 struct acpi_res_list {
86         resource_size_t start;
87         resource_size_t end;
88         acpi_adr_space_type resource_type; /* IO port, System memory, ...*/
89         char name[5];   /* only can have a length of 4 chars, make use of this
90                            one instead of res->name, no need to kalloc then */
91         struct list_head resource_list;
92         int count;
93 };
94
95 static LIST_HEAD(resource_list_head);
96 static DEFINE_SPINLOCK(acpi_res_lock);
97
98 /*
99  * This list of permanent mappings is for memory that may be accessed from
100  * interrupt context, where we can't do the ioremap().
101  */
102 struct acpi_ioremap {
103         struct list_head list;
104         void __iomem *virt;
105         acpi_physical_address phys;
106         acpi_size size;
107         unsigned long refcount;
108 };
109
110 static LIST_HEAD(acpi_ioremaps);
111 static DEFINE_MUTEX(acpi_ioremap_lock);
112
113 static void __init acpi_osi_setup_late(void);
114
115 /*
116  * The story of _OSI(Linux)
117  *
118  * From pre-history through Linux-2.6.22,
119  * Linux responded TRUE upon a BIOS OSI(Linux) query.
120  *
121  * Unfortunately, reference BIOS writers got wind of this
122  * and put OSI(Linux) in their example code, quickly exposing
123  * this string as ill-conceived and opening the door to
124  * an un-bounded number of BIOS incompatibilities.
125  *
126  * For example, OSI(Linux) was used on resume to re-POST a
127  * video card on one system, because Linux at that time
128  * could not do a speedy restore in its native driver.
129  * But then upon gaining quick native restore capability,
130  * Linux has no way to tell the BIOS to skip the time-consuming
131  * POST -- putting Linux at a permanent performance disadvantage.
132  * On another system, the BIOS writer used OSI(Linux)
133  * to infer native OS support for IPMI!  On other systems,
134  * OSI(Linux) simply got in the way of Linux claiming to
135  * be compatible with other operating systems, exposing
136  * BIOS issues such as skipped device initialization.
137  *
138  * So "Linux" turned out to be a really poor chose of
139  * OSI string, and from Linux-2.6.23 onward we respond FALSE.
140  *
141  * BIOS writers should NOT query _OSI(Linux) on future systems.
142  * Linux will complain on the console when it sees it, and return FALSE.
143  * To get Linux to return TRUE for your system  will require
144  * a kernel source update to add a DMI entry,
145  * or boot with "acpi_osi=Linux"
146  */
147
148 static struct osi_linux {
149         unsigned int    enable:1;
150         unsigned int    dmi:1;
151         unsigned int    cmdline:1;
152 } osi_linux = {0, 0, 0};
153
154 static u32 acpi_osi_handler(acpi_string interface, u32 supported)
155 {
156         if (!strcmp("Linux", interface)) {
157
158                 printk(KERN_NOTICE FW_BUG PREFIX
159                         "BIOS _OSI(Linux) query %s%s\n",
160                         osi_linux.enable ? "honored" : "ignored",
161                         osi_linux.cmdline ? " via cmdline" :
162                         osi_linux.dmi ? " via DMI" : "");
163         }
164
165         return supported;
166 }
167
168 static void __init acpi_request_region (struct acpi_generic_address *addr,
169         unsigned int length, char *desc)
170 {
171         if (!addr->address || !length)
172                 return;
173
174         /* Resources are never freed */
175         if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
176                 request_region(addr->address, length, desc);
177         else if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
178                 request_mem_region(addr->address, length, desc);
179 }
180
181 static int __init acpi_reserve_resources(void)
182 {
183         acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
184                 "ACPI PM1a_EVT_BLK");
185
186         acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length,
187                 "ACPI PM1b_EVT_BLK");
188
189         acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length,
190                 "ACPI PM1a_CNT_BLK");
191
192         acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length,
193                 "ACPI PM1b_CNT_BLK");
194
195         if (acpi_gbl_FADT.pm_timer_length == 4)
196                 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR");
197
198         acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length,
199                 "ACPI PM2_CNT_BLK");
200
201         /* Length of GPE blocks must be a non-negative multiple of 2 */
202
203         if (!(acpi_gbl_FADT.gpe0_block_length & 0x1))
204                 acpi_request_region(&acpi_gbl_FADT.xgpe0_block,
205                                acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK");
206
207         if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
208                 acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
209                                acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
210
211         return 0;
212 }
213 device_initcall(acpi_reserve_resources);
214
215 void acpi_os_printf(const char *fmt, ...)
216 {
217         va_list args;
218         va_start(args, fmt);
219         acpi_os_vprintf(fmt, args);
220         va_end(args);
221 }
222
223 void acpi_os_vprintf(const char *fmt, va_list args)
224 {
225         static char buffer[512];
226
227         vsprintf(buffer, fmt, args);
228
229 #ifdef ENABLE_DEBUGGER
230         if (acpi_in_debugger) {
231                 kdb_printf("%s", buffer);
232         } else {
233                 printk(KERN_CONT "%s", buffer);
234         }
235 #else
236         printk(KERN_CONT "%s", buffer);
237 #endif
238 }
239
240 acpi_physical_address __init acpi_os_get_root_pointer(void)
241 {
242         if (efi_enabled) {
243                 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
244                         return efi.acpi20;
245                 else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
246                         return efi.acpi;
247                 else {
248                         printk(KERN_ERR PREFIX
249                                "System description tables not found\n");
250                         return 0;
251                 }
252         } else {
253                 acpi_physical_address pa = 0;
254
255                 acpi_find_root_pointer(&pa);
256                 return pa;
257         }
258 }
259
260 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
261 static struct acpi_ioremap *
262 acpi_map_lookup(acpi_physical_address phys, acpi_size size)
263 {
264         struct acpi_ioremap *map;
265
266         list_for_each_entry_rcu(map, &acpi_ioremaps, list)
267                 if (map->phys <= phys &&
268                     phys + size <= map->phys + map->size)
269                         return map;
270
271         return NULL;
272 }
273
274 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
275 static void __iomem *
276 acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size)
277 {
278         struct acpi_ioremap *map;
279
280         map = acpi_map_lookup(phys, size);
281         if (map)
282                 return map->virt + (phys - map->phys);
283
284         return NULL;
285 }
286
287 void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size)
288 {
289         struct acpi_ioremap *map;
290         void __iomem *virt = NULL;
291
292         mutex_lock(&acpi_ioremap_lock);
293         map = acpi_map_lookup(phys, size);
294         if (map) {
295                 virt = map->virt + (phys - map->phys);
296                 map->refcount++;
297         }
298         mutex_unlock(&acpi_ioremap_lock);
299         return virt;
300 }
301 EXPORT_SYMBOL_GPL(acpi_os_get_iomem);
302
303 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
304 static struct acpi_ioremap *
305 acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
306 {
307         struct acpi_ioremap *map;
308
309         list_for_each_entry_rcu(map, &acpi_ioremaps, list)
310                 if (map->virt <= virt &&
311                     virt + size <= map->virt + map->size)
312                         return map;
313
314         return NULL;
315 }
316
317 void __iomem *__init_refok
318 acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
319 {
320         struct acpi_ioremap *map;
321         void __iomem *virt;
322         acpi_physical_address pg_off;
323         acpi_size pg_sz;
324
325         if (phys > ULONG_MAX) {
326                 printk(KERN_ERR PREFIX "Cannot map memory that high\n");
327                 return NULL;
328         }
329
330         if (!acpi_gbl_permanent_mmap)
331                 return __acpi_map_table((unsigned long)phys, size);
332
333         mutex_lock(&acpi_ioremap_lock);
334         /* Check if there's a suitable mapping already. */
335         map = acpi_map_lookup(phys, size);
336         if (map) {
337                 map->refcount++;
338                 goto out;
339         }
340
341         map = kzalloc(sizeof(*map), GFP_KERNEL);
342         if (!map) {
343                 mutex_unlock(&acpi_ioremap_lock);
344                 return NULL;
345         }
346
347         pg_off = round_down(phys, PAGE_SIZE);
348         pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
349         virt = acpi_os_ioremap(pg_off, pg_sz);
350         if (!virt) {
351                 mutex_unlock(&acpi_ioremap_lock);
352                 kfree(map);
353                 return NULL;
354         }
355
356         INIT_LIST_HEAD(&map->list);
357         map->virt = virt;
358         map->phys = pg_off;
359         map->size = pg_sz;
360         map->refcount = 1;
361
362         list_add_tail_rcu(&map->list, &acpi_ioremaps);
363
364  out:
365         mutex_unlock(&acpi_ioremap_lock);
366         return map->virt + (phys - map->phys);
367 }
368 EXPORT_SYMBOL_GPL(acpi_os_map_memory);
369
370 static void acpi_os_drop_map_ref(struct acpi_ioremap *map)
371 {
372         if (!--map->refcount)
373                 list_del_rcu(&map->list);
374 }
375
376 static void acpi_os_map_cleanup(struct acpi_ioremap *map)
377 {
378         if (!map->refcount) {
379                 synchronize_rcu();
380                 iounmap(map->virt);
381                 kfree(map);
382         }
383 }
384
385 void __ref acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
386 {
387         struct acpi_ioremap *map;
388
389         if (!acpi_gbl_permanent_mmap) {
390                 __acpi_unmap_table(virt, size);
391                 return;
392         }
393
394         mutex_lock(&acpi_ioremap_lock);
395         map = acpi_map_lookup_virt(virt, size);
396         if (!map) {
397                 mutex_unlock(&acpi_ioremap_lock);
398                 WARN(true, PREFIX "%s: bad address %p\n", __func__, virt);
399                 return;
400         }
401         acpi_os_drop_map_ref(map);
402         mutex_unlock(&acpi_ioremap_lock);
403
404         acpi_os_map_cleanup(map);
405 }
406 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
407
408 void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
409 {
410         if (!acpi_gbl_permanent_mmap)
411                 __acpi_unmap_table(virt, size);
412 }
413
414 static int acpi_os_map_generic_address(struct acpi_generic_address *addr)
415 {
416         void __iomem *virt;
417
418         if (addr->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
419                 return 0;
420
421         if (!addr->address || !addr->bit_width)
422                 return -EINVAL;
423
424         virt = acpi_os_map_memory(addr->address, addr->bit_width / 8);
425         if (!virt)
426                 return -EIO;
427
428         return 0;
429 }
430
431 static void acpi_os_unmap_generic_address(struct acpi_generic_address *addr)
432 {
433         struct acpi_ioremap *map;
434
435         if (addr->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
436                 return;
437
438         if (!addr->address || !addr->bit_width)
439                 return;
440
441         mutex_lock(&acpi_ioremap_lock);
442         map = acpi_map_lookup(addr->address, addr->bit_width / 8);
443         if (!map) {
444                 mutex_unlock(&acpi_ioremap_lock);
445                 return;
446         }
447         acpi_os_drop_map_ref(map);
448         mutex_unlock(&acpi_ioremap_lock);
449
450         acpi_os_map_cleanup(map);
451 }
452
453 #ifdef ACPI_FUTURE_USAGE
454 acpi_status
455 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
456 {
457         if (!phys || !virt)
458                 return AE_BAD_PARAMETER;
459
460         *phys = virt_to_phys(virt);
461
462         return AE_OK;
463 }
464 #endif
465
466 #define ACPI_MAX_OVERRIDE_LEN 100
467
468 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
469
470 acpi_status
471 acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
472                             acpi_string * new_val)
473 {
474         if (!init_val || !new_val)
475                 return AE_BAD_PARAMETER;
476
477         *new_val = NULL;
478         if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
479                 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n",
480                        acpi_os_name);
481                 *new_val = acpi_os_name;
482         }
483
484         return AE_OK;
485 }
486
487 acpi_status
488 acpi_os_table_override(struct acpi_table_header * existing_table,
489                        struct acpi_table_header ** new_table)
490 {
491         if (!existing_table || !new_table)
492                 return AE_BAD_PARAMETER;
493
494         *new_table = NULL;
495
496 #ifdef CONFIG_ACPI_CUSTOM_DSDT
497         if (strncmp(existing_table->signature, "DSDT", 4) == 0)
498                 *new_table = (struct acpi_table_header *)AmlCode;
499 #endif
500         if (*new_table != NULL) {
501                 printk(KERN_WARNING PREFIX "Override [%4.4s-%8.8s], "
502                            "this is unsafe: tainting kernel\n",
503                        existing_table->signature,
504                        existing_table->oem_table_id);
505                 add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
506         }
507         return AE_OK;
508 }
509
510 static irqreturn_t acpi_irq(int irq, void *dev_id)
511 {
512         u32 handled;
513
514         handled = (*acpi_irq_handler) (acpi_irq_context);
515
516         if (handled) {
517                 acpi_irq_handled++;
518                 return IRQ_HANDLED;
519         } else {
520                 acpi_irq_not_handled++;
521                 return IRQ_NONE;
522         }
523 }
524
525 acpi_status
526 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
527                                   void *context)
528 {
529         unsigned int irq;
530
531         acpi_irq_stats_init();
532
533         /*
534          * ACPI interrupts different from the SCI in our copy of the FADT are
535          * not supported.
536          */
537         if (gsi != acpi_gbl_FADT.sci_interrupt)
538                 return AE_BAD_PARAMETER;
539
540         if (acpi_irq_handler)
541                 return AE_ALREADY_ACQUIRED;
542
543         if (acpi_gsi_to_irq(gsi, &irq) < 0) {
544                 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
545                        gsi);
546                 return AE_OK;
547         }
548
549         acpi_irq_handler = handler;
550         acpi_irq_context = context;
551         if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
552                 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
553                 acpi_irq_handler = NULL;
554                 return AE_NOT_ACQUIRED;
555         }
556
557         return AE_OK;
558 }
559
560 acpi_status acpi_os_remove_interrupt_handler(u32 irq, acpi_osd_handler handler)
561 {
562         if (irq != acpi_gbl_FADT.sci_interrupt)
563                 return AE_BAD_PARAMETER;
564
565         free_irq(irq, acpi_irq);
566         acpi_irq_handler = NULL;
567
568         return AE_OK;
569 }
570
571 /*
572  * Running in interpreter thread context, safe to sleep
573  */
574
575 void acpi_os_sleep(u64 ms)
576 {
577         schedule_timeout_interruptible(msecs_to_jiffies(ms));
578 }
579
580 void acpi_os_stall(u32 us)
581 {
582         while (us) {
583                 u32 delay = 1000;
584
585                 if (delay > us)
586                         delay = us;
587                 udelay(delay);
588                 touch_nmi_watchdog();
589                 us -= delay;
590         }
591 }
592
593 /*
594  * Support ACPI 3.0 AML Timer operand
595  * Returns 64-bit free-running, monotonically increasing timer
596  * with 100ns granularity
597  */
598 u64 acpi_os_get_timer(void)
599 {
600         static u64 t;
601
602 #ifdef  CONFIG_HPET
603         /* TBD: use HPET if available */
604 #endif
605
606 #ifdef  CONFIG_X86_PM_TIMER
607         /* TBD: default to PM timer if HPET was not available */
608 #endif
609         if (!t)
610                 printk(KERN_ERR PREFIX "acpi_os_get_timer() TBD\n");
611
612         return ++t;
613 }
614
615 acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width)
616 {
617         u32 dummy;
618
619         if (!value)
620                 value = &dummy;
621
622         *value = 0;
623         if (width <= 8) {
624                 *(u8 *) value = inb(port);
625         } else if (width <= 16) {
626                 *(u16 *) value = inw(port);
627         } else if (width <= 32) {
628                 *(u32 *) value = inl(port);
629         } else {
630                 BUG();
631         }
632
633         return AE_OK;
634 }
635
636 EXPORT_SYMBOL(acpi_os_read_port);
637
638 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
639 {
640         if (width <= 8) {
641                 outb(value, port);
642         } else if (width <= 16) {
643                 outw(value, port);
644         } else if (width <= 32) {
645                 outl(value, port);
646         } else {
647                 BUG();
648         }
649
650         return AE_OK;
651 }
652
653 EXPORT_SYMBOL(acpi_os_write_port);
654
655 acpi_status
656 acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
657 {
658         void __iomem *virt_addr;
659         unsigned int size = width / 8;
660         bool unmap = false;
661         u32 dummy;
662
663         rcu_read_lock();
664         virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
665         if (!virt_addr) {
666                 rcu_read_unlock();
667                 virt_addr = acpi_os_ioremap(phys_addr, size);
668                 if (!virt_addr)
669                         return AE_BAD_ADDRESS;
670                 unmap = true;
671         }
672
673         if (!value)
674                 value = &dummy;
675
676         switch (width) {
677         case 8:
678                 *(u8 *) value = readb(virt_addr);
679                 break;
680         case 16:
681                 *(u16 *) value = readw(virt_addr);
682                 break;
683         case 32:
684                 *(u32 *) value = readl(virt_addr);
685                 break;
686         default:
687                 BUG();
688         }
689
690         if (unmap)
691                 iounmap(virt_addr);
692         else
693                 rcu_read_unlock();
694
695         return AE_OK;
696 }
697
698 acpi_status
699 acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
700 {
701         void __iomem *virt_addr;
702         unsigned int size = width / 8;
703         bool unmap = false;
704
705         rcu_read_lock();
706         virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
707         if (!virt_addr) {
708                 rcu_read_unlock();
709                 virt_addr = acpi_os_ioremap(phys_addr, size);
710                 if (!virt_addr)
711                         return AE_BAD_ADDRESS;
712                 unmap = true;
713         }
714
715         switch (width) {
716         case 8:
717                 writeb(value, virt_addr);
718                 break;
719         case 16:
720                 writew(value, virt_addr);
721                 break;
722         case 32:
723                 writel(value, virt_addr);
724                 break;
725         default:
726                 BUG();
727         }
728
729         if (unmap)
730                 iounmap(virt_addr);
731         else
732                 rcu_read_unlock();
733
734         return AE_OK;
735 }
736
737 acpi_status
738 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
739                                u64 *value, u32 width)
740 {
741         int result, size;
742         u32 value32;
743
744         if (!value)
745                 return AE_BAD_PARAMETER;
746
747         switch (width) {
748         case 8:
749                 size = 1;
750                 break;
751         case 16:
752                 size = 2;
753                 break;
754         case 32:
755                 size = 4;
756                 break;
757         default:
758                 return AE_ERROR;
759         }
760
761         result = raw_pci_read(pci_id->segment, pci_id->bus,
762                                 PCI_DEVFN(pci_id->device, pci_id->function),
763                                 reg, size, &value32);
764         *value = value32;
765
766         return (result ? AE_ERROR : AE_OK);
767 }
768
769 acpi_status
770 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
771                                 u64 value, u32 width)
772 {
773         int result, size;
774
775         switch (width) {
776         case 8:
777                 size = 1;
778                 break;
779         case 16:
780                 size = 2;
781                 break;
782         case 32:
783                 size = 4;
784                 break;
785         default:
786                 return AE_ERROR;
787         }
788
789         result = raw_pci_write(pci_id->segment, pci_id->bus,
790                                 PCI_DEVFN(pci_id->device, pci_id->function),
791                                 reg, size, value);
792
793         return (result ? AE_ERROR : AE_OK);
794 }
795
796 static void acpi_os_execute_deferred(struct work_struct *work)
797 {
798         struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
799
800         if (dpc->wait)
801                 acpi_os_wait_events_complete(NULL);
802
803         dpc->function(dpc->context);
804         kfree(dpc);
805 }
806
807 /*******************************************************************************
808  *
809  * FUNCTION:    acpi_os_execute
810  *
811  * PARAMETERS:  Type               - Type of the callback
812  *              Function           - Function to be executed
813  *              Context            - Function parameters
814  *
815  * RETURN:      Status
816  *
817  * DESCRIPTION: Depending on type, either queues function for deferred execution or
818  *              immediately executes function on a separate thread.
819  *
820  ******************************************************************************/
821
822 static acpi_status __acpi_os_execute(acpi_execute_type type,
823         acpi_osd_exec_callback function, void *context, int hp)
824 {
825         acpi_status status = AE_OK;
826         struct acpi_os_dpc *dpc;
827         struct workqueue_struct *queue;
828         int ret;
829         ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
830                           "Scheduling function [%p(%p)] for deferred execution.\n",
831                           function, context));
832
833         /*
834          * Allocate/initialize DPC structure.  Note that this memory will be
835          * freed by the callee.  The kernel handles the work_struct list  in a
836          * way that allows us to also free its memory inside the callee.
837          * Because we may want to schedule several tasks with different
838          * parameters we can't use the approach some kernel code uses of
839          * having a static work_struct.
840          */
841
842         dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
843         if (!dpc)
844                 return AE_NO_MEMORY;
845
846         dpc->function = function;
847         dpc->context = context;
848
849         /*
850          * We can't run hotplug code in keventd_wq/kacpid_wq/kacpid_notify_wq
851          * because the hotplug code may call driver .remove() functions,
852          * which invoke flush_scheduled_work/acpi_os_wait_events_complete
853          * to flush these workqueues.
854          */
855         queue = hp ? kacpi_hotplug_wq :
856                 (type == OSL_NOTIFY_HANDLER ? kacpi_notify_wq : kacpid_wq);
857         dpc->wait = hp ? 1 : 0;
858
859         if (queue == kacpi_hotplug_wq)
860                 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
861         else if (queue == kacpi_notify_wq)
862                 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
863         else
864                 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
865
866         /*
867          * On some machines, a software-initiated SMI causes corruption unless
868          * the SMI runs on CPU 0.  An SMI can be initiated by any AML, but
869          * typically it's done in GPE-related methods that are run via
870          * workqueues, so we can avoid the known corruption cases by always
871          * queueing on CPU 0.
872          */
873         ret = queue_work_on(0, queue, &dpc->work);
874
875         if (!ret) {
876                 printk(KERN_ERR PREFIX
877                           "Call to queue_work() failed.\n");
878                 status = AE_ERROR;
879                 kfree(dpc);
880         }
881         return status;
882 }
883
884 acpi_status acpi_os_execute(acpi_execute_type type,
885                             acpi_osd_exec_callback function, void *context)
886 {
887         return __acpi_os_execute(type, function, context, 0);
888 }
889 EXPORT_SYMBOL(acpi_os_execute);
890
891 acpi_status acpi_os_hotplug_execute(acpi_osd_exec_callback function,
892         void *context)
893 {
894         return __acpi_os_execute(0, function, context, 1);
895 }
896
897 void acpi_os_wait_events_complete(void *context)
898 {
899         flush_workqueue(kacpid_wq);
900         flush_workqueue(kacpi_notify_wq);
901 }
902
903 EXPORT_SYMBOL(acpi_os_wait_events_complete);
904
905 acpi_status
906 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
907 {
908         struct semaphore *sem = NULL;
909
910         sem = acpi_os_allocate(sizeof(struct semaphore));
911         if (!sem)
912                 return AE_NO_MEMORY;
913         memset(sem, 0, sizeof(struct semaphore));
914
915         sema_init(sem, initial_units);
916
917         *handle = (acpi_handle *) sem;
918
919         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n",
920                           *handle, initial_units));
921
922         return AE_OK;
923 }
924
925 /*
926  * TODO: A better way to delete semaphores?  Linux doesn't have a
927  * 'delete_semaphore()' function -- may result in an invalid
928  * pointer dereference for non-synchronized consumers.  Should
929  * we at least check for blocked threads and signal/cancel them?
930  */
931
932 acpi_status acpi_os_delete_semaphore(acpi_handle handle)
933 {
934         struct semaphore *sem = (struct semaphore *)handle;
935
936         if (!sem)
937                 return AE_BAD_PARAMETER;
938
939         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
940
941         BUG_ON(!list_empty(&sem->wait_list));
942         kfree(sem);
943         sem = NULL;
944
945         return AE_OK;
946 }
947
948 /*
949  * TODO: Support for units > 1?
950  */
951 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
952 {
953         acpi_status status = AE_OK;
954         struct semaphore *sem = (struct semaphore *)handle;
955         long jiffies;
956         int ret = 0;
957
958         if (!sem || (units < 1))
959                 return AE_BAD_PARAMETER;
960
961         if (units > 1)
962                 return AE_SUPPORT;
963
964         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
965                           handle, units, timeout));
966
967         if (timeout == ACPI_WAIT_FOREVER)
968                 jiffies = MAX_SCHEDULE_TIMEOUT;
969         else
970                 jiffies = msecs_to_jiffies(timeout);
971         
972         ret = down_timeout(sem, jiffies);
973         if (ret)
974                 status = AE_TIME;
975
976         if (ACPI_FAILURE(status)) {
977                 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
978                                   "Failed to acquire semaphore[%p|%d|%d], %s",
979                                   handle, units, timeout,
980                                   acpi_format_exception(status)));
981         } else {
982                 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
983                                   "Acquired semaphore[%p|%d|%d]", handle,
984                                   units, timeout));
985         }
986
987         return status;
988 }
989
990 /*
991  * TODO: Support for units > 1?
992  */
993 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
994 {
995         struct semaphore *sem = (struct semaphore *)handle;
996
997         if (!sem || (units < 1))
998                 return AE_BAD_PARAMETER;
999
1000         if (units > 1)
1001                 return AE_SUPPORT;
1002
1003         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle,
1004                           units));
1005
1006         up(sem);
1007
1008         return AE_OK;
1009 }
1010
1011 #ifdef ACPI_FUTURE_USAGE
1012 u32 acpi_os_get_line(char *buffer)
1013 {
1014
1015 #ifdef ENABLE_DEBUGGER
1016         if (acpi_in_debugger) {
1017                 u32 chars;
1018
1019                 kdb_read(buffer, sizeof(line_buf));
1020
1021                 /* remove the CR kdb includes */
1022                 chars = strlen(buffer) - 1;
1023                 buffer[chars] = '\0';
1024         }
1025 #endif
1026
1027         return 0;
1028 }
1029 #endif                          /*  ACPI_FUTURE_USAGE  */
1030
1031 acpi_status acpi_os_signal(u32 function, void *info)
1032 {
1033         switch (function) {
1034         case ACPI_SIGNAL_FATAL:
1035                 printk(KERN_ERR PREFIX "Fatal opcode executed\n");
1036                 break;
1037         case ACPI_SIGNAL_BREAKPOINT:
1038                 /*
1039                  * AML Breakpoint
1040                  * ACPI spec. says to treat it as a NOP unless
1041                  * you are debugging.  So if/when we integrate
1042                  * AML debugger into the kernel debugger its
1043                  * hook will go here.  But until then it is
1044                  * not useful to print anything on breakpoints.
1045                  */
1046                 break;
1047         default:
1048                 break;
1049         }
1050
1051         return AE_OK;
1052 }
1053
1054 static int __init acpi_os_name_setup(char *str)
1055 {
1056         char *p = acpi_os_name;
1057         int count = ACPI_MAX_OVERRIDE_LEN - 1;
1058
1059         if (!str || !*str)
1060                 return 0;
1061
1062         for (; count-- && str && *str; str++) {
1063                 if (isalnum(*str) || *str == ' ' || *str == ':')
1064                         *p++ = *str;
1065                 else if (*str == '\'' || *str == '"')
1066                         continue;
1067                 else
1068                         break;
1069         }
1070         *p = 0;
1071
1072         return 1;
1073
1074 }
1075
1076 __setup("acpi_os_name=", acpi_os_name_setup);
1077
1078 #define OSI_STRING_LENGTH_MAX 64        /* arbitrary */
1079 #define OSI_STRING_ENTRIES_MAX 16       /* arbitrary */
1080
1081 struct osi_setup_entry {
1082         char string[OSI_STRING_LENGTH_MAX];
1083         bool enable;
1084 };
1085
1086 static struct osi_setup_entry __initdata osi_setup_entries[OSI_STRING_ENTRIES_MAX];
1087
1088 void __init acpi_osi_setup(char *str)
1089 {
1090         struct osi_setup_entry *osi;
1091         bool enable = true;
1092         int i;
1093
1094         if (!acpi_gbl_create_osi_method)
1095                 return;
1096
1097         if (str == NULL || *str == '\0') {
1098                 printk(KERN_INFO PREFIX "_OSI method disabled\n");
1099                 acpi_gbl_create_osi_method = FALSE;
1100                 return;
1101         }
1102
1103         if (*str == '!') {
1104                 str++;
1105                 enable = false;
1106         }
1107
1108         for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
1109                 osi = &osi_setup_entries[i];
1110                 if (!strcmp(osi->string, str)) {
1111                         osi->enable = enable;
1112                         break;
1113                 } else if (osi->string[0] == '\0') {
1114                         osi->enable = enable;
1115                         strncpy(osi->string, str, OSI_STRING_LENGTH_MAX);
1116                         break;
1117                 }
1118         }
1119 }
1120
1121 static void __init set_osi_linux(unsigned int enable)
1122 {
1123         if (osi_linux.enable != enable)
1124                 osi_linux.enable = enable;
1125
1126         if (osi_linux.enable)
1127                 acpi_osi_setup("Linux");
1128         else
1129                 acpi_osi_setup("!Linux");
1130
1131         return;
1132 }
1133
1134 static void __init acpi_cmdline_osi_linux(unsigned int enable)
1135 {
1136         osi_linux.cmdline = 1;  /* cmdline set the default and override DMI */
1137         osi_linux.dmi = 0;
1138         set_osi_linux(enable);
1139
1140         return;
1141 }
1142
1143 void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d)
1144 {
1145         printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
1146
1147         if (enable == -1)
1148                 return;
1149
1150         osi_linux.dmi = 1;      /* DMI knows that this box asks OSI(Linux) */
1151         set_osi_linux(enable);
1152
1153         return;
1154 }
1155
1156 /*
1157  * Modify the list of "OS Interfaces" reported to BIOS via _OSI
1158  *
1159  * empty string disables _OSI
1160  * string starting with '!' disables that string
1161  * otherwise string is added to list, augmenting built-in strings
1162  */
1163 static void __init acpi_osi_setup_late(void)
1164 {
1165         struct osi_setup_entry *osi;
1166         char *str;
1167         int i;
1168         acpi_status status;
1169
1170         for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
1171                 osi = &osi_setup_entries[i];
1172                 str = osi->string;
1173
1174                 if (*str == '\0')
1175                         break;
1176                 if (osi->enable) {
1177                         status = acpi_install_interface(str);
1178
1179                         if (ACPI_SUCCESS(status))
1180                                 printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str);
1181                 } else {
1182                         status = acpi_remove_interface(str);
1183
1184                         if (ACPI_SUCCESS(status))
1185                                 printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str);
1186                 }
1187         }
1188 }
1189
1190 static int __init osi_setup(char *str)
1191 {
1192         if (str && !strcmp("Linux", str))
1193                 acpi_cmdline_osi_linux(1);
1194         else if (str && !strcmp("!Linux", str))
1195                 acpi_cmdline_osi_linux(0);
1196         else
1197                 acpi_osi_setup(str);
1198
1199         return 1;
1200 }
1201
1202 __setup("acpi_osi=", osi_setup);
1203
1204 /* enable serialization to combat AE_ALREADY_EXISTS errors */
1205 static int __init acpi_serialize_setup(char *str)
1206 {
1207         printk(KERN_INFO PREFIX "serialize enabled\n");
1208
1209         acpi_gbl_all_methods_serialized = TRUE;
1210
1211         return 1;
1212 }
1213
1214 __setup("acpi_serialize", acpi_serialize_setup);
1215
1216 /* Check of resource interference between native drivers and ACPI
1217  * OperationRegions (SystemIO and System Memory only).
1218  * IO ports and memory declared in ACPI might be used by the ACPI subsystem
1219  * in arbitrary AML code and can interfere with legacy drivers.
1220  * acpi_enforce_resources= can be set to:
1221  *
1222  *   - strict (default) (2)
1223  *     -> further driver trying to access the resources will not load
1224  *   - lax              (1)
1225  *     -> further driver trying to access the resources will load, but you
1226  *     get a system message that something might go wrong...
1227  *
1228  *   - no               (0)
1229  *     -> ACPI Operation Region resources will not be registered
1230  *
1231  */
1232 #define ENFORCE_RESOURCES_STRICT 2
1233 #define ENFORCE_RESOURCES_LAX    1
1234 #define ENFORCE_RESOURCES_NO     0
1235
1236 static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1237
1238 static int __init acpi_enforce_resources_setup(char *str)
1239 {
1240         if (str == NULL || *str == '\0')
1241                 return 0;
1242
1243         if (!strcmp("strict", str))
1244                 acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1245         else if (!strcmp("lax", str))
1246                 acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
1247         else if (!strcmp("no", str))
1248                 acpi_enforce_resources = ENFORCE_RESOURCES_NO;
1249
1250         return 1;
1251 }
1252
1253 __setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
1254
1255 /* Check for resource conflicts between ACPI OperationRegions and native
1256  * drivers */
1257 int acpi_check_resource_conflict(const struct resource *res)
1258 {
1259         struct acpi_res_list *res_list_elem;
1260         int ioport = 0, clash = 0;
1261
1262         if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1263                 return 0;
1264         if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM))
1265                 return 0;
1266
1267         ioport = res->flags & IORESOURCE_IO;
1268
1269         spin_lock(&acpi_res_lock);
1270         list_for_each_entry(res_list_elem, &resource_list_head,
1271                             resource_list) {
1272                 if (ioport && (res_list_elem->resource_type
1273                                != ACPI_ADR_SPACE_SYSTEM_IO))
1274                         continue;
1275                 if (!ioport && (res_list_elem->resource_type
1276                                 != ACPI_ADR_SPACE_SYSTEM_MEMORY))
1277                         continue;
1278
1279                 if (res->end < res_list_elem->start
1280                     || res_list_elem->end < res->start)
1281                         continue;
1282                 clash = 1;
1283                 break;
1284         }
1285         spin_unlock(&acpi_res_lock);
1286
1287         if (clash) {
1288                 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
1289                         printk(KERN_WARNING "ACPI: resource %s %pR"
1290                                " conflicts with ACPI region %s "
1291                                "[%s 0x%zx-0x%zx]\n",
1292                                res->name, res, res_list_elem->name,
1293                                (res_list_elem->resource_type ==
1294                                 ACPI_ADR_SPACE_SYSTEM_IO) ? "io" : "mem",
1295                                (size_t) res_list_elem->start,
1296                                (size_t) res_list_elem->end);
1297                         if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
1298                                 printk(KERN_NOTICE "ACPI: This conflict may"
1299                                        " cause random problems and system"
1300                                        " instability\n");
1301                         printk(KERN_INFO "ACPI: If an ACPI driver is available"
1302                                " for this device, you should use it instead of"
1303                                " the native driver\n");
1304                 }
1305                 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
1306                         return -EBUSY;
1307         }
1308         return 0;
1309 }
1310 EXPORT_SYMBOL(acpi_check_resource_conflict);
1311
1312 int acpi_check_region(resource_size_t start, resource_size_t n,
1313                       const char *name)
1314 {
1315         struct resource res = {
1316                 .start = start,
1317                 .end   = start + n - 1,
1318                 .name  = name,
1319                 .flags = IORESOURCE_IO,
1320         };
1321
1322         return acpi_check_resource_conflict(&res);
1323 }
1324 EXPORT_SYMBOL(acpi_check_region);
1325
1326 /*
1327  * Let drivers know whether the resource checks are effective
1328  */
1329 int acpi_resources_are_enforced(void)
1330 {
1331         return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT;
1332 }
1333 EXPORT_SYMBOL(acpi_resources_are_enforced);
1334
1335 /*
1336  * Deallocate the memory for a spinlock.
1337  */
1338 void acpi_os_delete_lock(acpi_spinlock handle)
1339 {
1340         ACPI_FREE(handle);
1341 }
1342
1343 /*
1344  * Acquire a spinlock.
1345  *
1346  * handle is a pointer to the spinlock_t.
1347  */
1348
1349 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
1350 {
1351         acpi_cpu_flags flags;
1352         spin_lock_irqsave(lockp, flags);
1353         return flags;
1354 }
1355
1356 /*
1357  * Release a spinlock. See above.
1358  */
1359
1360 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
1361 {
1362         spin_unlock_irqrestore(lockp, flags);
1363 }
1364
1365 #ifndef ACPI_USE_LOCAL_CACHE
1366
1367 /*******************************************************************************
1368  *
1369  * FUNCTION:    acpi_os_create_cache
1370  *
1371  * PARAMETERS:  name      - Ascii name for the cache
1372  *              size      - Size of each cached object
1373  *              depth     - Maximum depth of the cache (in objects) <ignored>
1374  *              cache     - Where the new cache object is returned
1375  *
1376  * RETURN:      status
1377  *
1378  * DESCRIPTION: Create a cache object
1379  *
1380  ******************************************************************************/
1381
1382 acpi_status
1383 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
1384 {
1385         *cache = kmem_cache_create(name, size, 0, 0, NULL);
1386         if (*cache == NULL)
1387                 return AE_ERROR;
1388         else
1389                 return AE_OK;
1390 }
1391
1392 /*******************************************************************************
1393  *
1394  * FUNCTION:    acpi_os_purge_cache
1395  *
1396  * PARAMETERS:  Cache           - Handle to cache object
1397  *
1398  * RETURN:      Status
1399  *
1400  * DESCRIPTION: Free all objects within the requested cache.
1401  *
1402  ******************************************************************************/
1403
1404 acpi_status acpi_os_purge_cache(acpi_cache_t * cache)
1405 {
1406         kmem_cache_shrink(cache);
1407         return (AE_OK);
1408 }
1409
1410 /*******************************************************************************
1411  *
1412  * FUNCTION:    acpi_os_delete_cache
1413  *
1414  * PARAMETERS:  Cache           - Handle to cache object
1415  *
1416  * RETURN:      Status
1417  *
1418  * DESCRIPTION: Free all objects within the requested cache and delete the
1419  *              cache object.
1420  *
1421  ******************************************************************************/
1422
1423 acpi_status acpi_os_delete_cache(acpi_cache_t * cache)
1424 {
1425         kmem_cache_destroy(cache);
1426         return (AE_OK);
1427 }
1428
1429 /*******************************************************************************
1430  *
1431  * FUNCTION:    acpi_os_release_object
1432  *
1433  * PARAMETERS:  Cache       - Handle to cache object
1434  *              Object      - The object to be released
1435  *
1436  * RETURN:      None
1437  *
1438  * DESCRIPTION: Release an object to the specified cache.  If cache is full,
1439  *              the object is deleted.
1440  *
1441  ******************************************************************************/
1442
1443 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
1444 {
1445         kmem_cache_free(cache, object);
1446         return (AE_OK);
1447 }
1448
1449 static inline int acpi_res_list_add(struct acpi_res_list *res)
1450 {
1451         struct acpi_res_list *res_list_elem;
1452
1453         list_for_each_entry(res_list_elem, &resource_list_head,
1454                             resource_list) {
1455
1456                 if (res->resource_type == res_list_elem->resource_type &&
1457                     res->start == res_list_elem->start &&
1458                     res->end == res_list_elem->end) {
1459
1460                         /*
1461                          * The Region(addr,len) already exist in the list,
1462                          * just increase the count
1463                          */
1464
1465                         res_list_elem->count++;
1466                         return 0;
1467                 }
1468         }
1469
1470         res->count = 1;
1471         list_add(&res->resource_list, &resource_list_head);
1472         return 1;
1473 }
1474
1475 static inline void acpi_res_list_del(struct acpi_res_list *res)
1476 {
1477         struct acpi_res_list *res_list_elem;
1478
1479         list_for_each_entry(res_list_elem, &resource_list_head,
1480                             resource_list) {
1481
1482                 if (res->resource_type == res_list_elem->resource_type &&
1483                     res->start == res_list_elem->start &&
1484                     res->end == res_list_elem->end) {
1485
1486                         /*
1487                          * If the res count is decreased to 0,
1488                          * remove and free it
1489                          */
1490
1491                         if (--res_list_elem->count == 0) {
1492                                 list_del(&res_list_elem->resource_list);
1493                                 kfree(res_list_elem);
1494                         }
1495                         return;
1496                 }
1497         }
1498 }
1499
1500 acpi_status
1501 acpi_os_invalidate_address(
1502     u8                   space_id,
1503     acpi_physical_address   address,
1504     acpi_size               length)
1505 {
1506         struct acpi_res_list res;
1507
1508         switch (space_id) {
1509         case ACPI_ADR_SPACE_SYSTEM_IO:
1510         case ACPI_ADR_SPACE_SYSTEM_MEMORY:
1511                 /* Only interference checks against SystemIO and SystemMemory
1512                    are needed */
1513                 res.start = address;
1514                 res.end = address + length - 1;
1515                 res.resource_type = space_id;
1516                 spin_lock(&acpi_res_lock);
1517                 acpi_res_list_del(&res);
1518                 spin_unlock(&acpi_res_lock);
1519                 break;
1520         case ACPI_ADR_SPACE_PCI_CONFIG:
1521         case ACPI_ADR_SPACE_EC:
1522         case ACPI_ADR_SPACE_SMBUS:
1523         case ACPI_ADR_SPACE_CMOS:
1524         case ACPI_ADR_SPACE_PCI_BAR_TARGET:
1525         case ACPI_ADR_SPACE_DATA_TABLE:
1526         case ACPI_ADR_SPACE_FIXED_HARDWARE:
1527                 break;
1528         }
1529         return AE_OK;
1530 }
1531
1532 /******************************************************************************
1533  *
1534  * FUNCTION:    acpi_os_validate_address
1535  *
1536  * PARAMETERS:  space_id             - ACPI space ID
1537  *              address             - Physical address
1538  *              length              - Address length
1539  *
1540  * RETURN:      AE_OK if address/length is valid for the space_id. Otherwise,
1541  *              should return AE_AML_ILLEGAL_ADDRESS.
1542  *
1543  * DESCRIPTION: Validate a system address via the host OS. Used to validate
1544  *              the addresses accessed by AML operation regions.
1545  *
1546  *****************************************************************************/
1547
1548 acpi_status
1549 acpi_os_validate_address (
1550     u8                   space_id,
1551     acpi_physical_address   address,
1552     acpi_size               length,
1553     char *name)
1554 {
1555         struct acpi_res_list *res;
1556         int added;
1557         if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1558                 return AE_OK;
1559
1560         switch (space_id) {
1561         case ACPI_ADR_SPACE_SYSTEM_IO:
1562         case ACPI_ADR_SPACE_SYSTEM_MEMORY:
1563                 /* Only interference checks against SystemIO and SystemMemory
1564                    are needed */
1565                 res = kzalloc(sizeof(struct acpi_res_list), GFP_KERNEL);
1566                 if (!res)
1567                         return AE_OK;
1568                 /* ACPI names are fixed to 4 bytes, still better use strlcpy */
1569                 strlcpy(res->name, name, 5);
1570                 res->start = address;
1571                 res->end = address + length - 1;
1572                 res->resource_type = space_id;
1573                 spin_lock(&acpi_res_lock);
1574                 added = acpi_res_list_add(res);
1575                 spin_unlock(&acpi_res_lock);
1576                 pr_debug("%s %s resource: start: 0x%llx, end: 0x%llx, "
1577                          "name: %s\n", added ? "Added" : "Already exist",
1578                          (space_id == ACPI_ADR_SPACE_SYSTEM_IO)
1579                          ? "SystemIO" : "System Memory",
1580                          (unsigned long long)res->start,
1581                          (unsigned long long)res->end,
1582                          res->name);
1583                 if (!added)
1584                         kfree(res);
1585                 break;
1586         case ACPI_ADR_SPACE_PCI_CONFIG:
1587         case ACPI_ADR_SPACE_EC:
1588         case ACPI_ADR_SPACE_SMBUS:
1589         case ACPI_ADR_SPACE_CMOS:
1590         case ACPI_ADR_SPACE_PCI_BAR_TARGET:
1591         case ACPI_ADR_SPACE_DATA_TABLE:
1592         case ACPI_ADR_SPACE_FIXED_HARDWARE:
1593                 break;
1594         }
1595         return AE_OK;
1596 }
1597 #endif
1598
1599 acpi_status __init acpi_os_initialize(void)
1600 {
1601         acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
1602         acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
1603         acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block);
1604         acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block);
1605
1606         return AE_OK;
1607 }
1608
1609 acpi_status __init acpi_os_initialize1(void)
1610 {
1611         kacpid_wq = alloc_workqueue("kacpid", 0, 1);
1612         kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
1613         kacpi_hotplug_wq = alloc_workqueue("kacpi_hotplug", 0, 1);
1614         BUG_ON(!kacpid_wq);
1615         BUG_ON(!kacpi_notify_wq);
1616         BUG_ON(!kacpi_hotplug_wq);
1617         acpi_install_interface_handler(acpi_osi_handler);
1618         acpi_osi_setup_late();
1619         return AE_OK;
1620 }
1621
1622 acpi_status acpi_os_terminate(void)
1623 {
1624         if (acpi_irq_handler) {
1625                 acpi_os_remove_interrupt_handler(acpi_gbl_FADT.sci_interrupt,
1626                                                  acpi_irq_handler);
1627         }
1628
1629         acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block);
1630         acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block);
1631         acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
1632         acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
1633
1634         destroy_workqueue(kacpid_wq);
1635         destroy_workqueue(kacpi_notify_wq);
1636         destroy_workqueue(kacpi_hotplug_wq);
1637
1638         return AE_OK;
1639 }