ACPI / init: Fix the ordering of acpi_reserve_resources()
[pandora-kernel.git] / drivers / acpi / osl.c
1 /*
2  *  acpi_osl.c - OS-dependent functions ($Revision: 83 $)
3  *
4  *  Copyright (C) 2000       Andrew Henroid
5  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7  *  Copyright (c) 2008 Intel Corporation
8  *   Author: Matthew Wilcox <willy@linux.intel.com>
9  *
10  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2 of the License, or
15  *  (at your option) any later version.
16  *
17  *  This program is distributed in the hope that it will be useful,
18  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  *  GNU General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License
23  *  along with this program; if not, write to the Free Software
24  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
25  *
26  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
27  *
28  */
29
30 #include <linux/module.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/mm.h>
34 #include <linux/pci.h>
35 #include <linux/interrupt.h>
36 #include <linux/kmod.h>
37 #include <linux/delay.h>
38 #include <linux/workqueue.h>
39 #include <linux/nmi.h>
40 #include <linux/acpi.h>
41 #include <linux/acpi_io.h>
42 #include <linux/efi.h>
43 #include <linux/ioport.h>
44 #include <linux/list.h>
45 #include <linux/jiffies.h>
46 #include <linux/semaphore.h>
47
48 #include <asm/io.h>
49 #include <asm/uaccess.h>
50
51 #include <acpi/acpi.h>
52 #include <acpi/acpi_bus.h>
53 #include <acpi/processor.h>
54
55 #define _COMPONENT              ACPI_OS_SERVICES
56 ACPI_MODULE_NAME("osl");
57 #define PREFIX          "ACPI: "
58 struct acpi_os_dpc {
59         acpi_osd_exec_callback function;
60         void *context;
61         struct work_struct work;
62         int wait;
63 };
64
65 #ifdef CONFIG_ACPI_CUSTOM_DSDT
66 #include CONFIG_ACPI_CUSTOM_DSDT_FILE
67 #endif
68
69 #ifdef ENABLE_DEBUGGER
70 #include <linux/kdb.h>
71
72 /* stuff for debugger support */
73 int acpi_in_debugger;
74 EXPORT_SYMBOL(acpi_in_debugger);
75
76 extern char line_buf[80];
77 #endif                          /*ENABLE_DEBUGGER */
78
79 static acpi_osd_handler acpi_irq_handler;
80 static void *acpi_irq_context;
81 static struct workqueue_struct *kacpid_wq;
82 static struct workqueue_struct *kacpi_notify_wq;
83 struct workqueue_struct *kacpi_hotplug_wq;
84 EXPORT_SYMBOL(kacpi_hotplug_wq);
85
86 struct acpi_res_list {
87         resource_size_t start;
88         resource_size_t end;
89         acpi_adr_space_type resource_type; /* IO port, System memory, ...*/
90         char name[5];   /* only can have a length of 4 chars, make use of this
91                            one instead of res->name, no need to kalloc then */
92         struct list_head resource_list;
93         int count;
94 };
95
96 static LIST_HEAD(resource_list_head);
97 static DEFINE_SPINLOCK(acpi_res_lock);
98
99 /*
100  * This list of permanent mappings is for memory that may be accessed from
101  * interrupt context, where we can't do the ioremap().
102  */
103 struct acpi_ioremap {
104         struct list_head list;
105         void __iomem *virt;
106         acpi_physical_address phys;
107         acpi_size size;
108         unsigned long refcount;
109 };
110
111 static LIST_HEAD(acpi_ioremaps);
112 static DEFINE_MUTEX(acpi_ioremap_lock);
113
114 static void __init acpi_osi_setup_late(void);
115
116 /*
117  * The story of _OSI(Linux)
118  *
119  * From pre-history through Linux-2.6.22,
120  * Linux responded TRUE upon a BIOS OSI(Linux) query.
121  *
122  * Unfortunately, reference BIOS writers got wind of this
123  * and put OSI(Linux) in their example code, quickly exposing
124  * this string as ill-conceived and opening the door to
125  * an un-bounded number of BIOS incompatibilities.
126  *
127  * For example, OSI(Linux) was used on resume to re-POST a
128  * video card on one system, because Linux at that time
129  * could not do a speedy restore in its native driver.
130  * But then upon gaining quick native restore capability,
131  * Linux has no way to tell the BIOS to skip the time-consuming
132  * POST -- putting Linux at a permanent performance disadvantage.
133  * On another system, the BIOS writer used OSI(Linux)
134  * to infer native OS support for IPMI!  On other systems,
135  * OSI(Linux) simply got in the way of Linux claiming to
136  * be compatible with other operating systems, exposing
137  * BIOS issues such as skipped device initialization.
138  *
139  * So "Linux" turned out to be a really poor chose of
140  * OSI string, and from Linux-2.6.23 onward we respond FALSE.
141  *
142  * BIOS writers should NOT query _OSI(Linux) on future systems.
143  * Linux will complain on the console when it sees it, and return FALSE.
144  * To get Linux to return TRUE for your system  will require
145  * a kernel source update to add a DMI entry,
146  * or boot with "acpi_osi=Linux"
147  */
148
149 static struct osi_linux {
150         unsigned int    enable:1;
151         unsigned int    dmi:1;
152         unsigned int    cmdline:1;
153 } osi_linux = {0, 0, 0};
154
155 static u32 acpi_osi_handler(acpi_string interface, u32 supported)
156 {
157         if (!strcmp("Linux", interface)) {
158
159                 printk_once(KERN_NOTICE FW_BUG PREFIX
160                         "BIOS _OSI(Linux) query %s%s\n",
161                         osi_linux.enable ? "honored" : "ignored",
162                         osi_linux.cmdline ? " via cmdline" :
163                         osi_linux.dmi ? " via DMI" : "");
164         }
165
166         return supported;
167 }
168
169 static void __init acpi_request_region (struct acpi_generic_address *addr,
170         unsigned int length, char *desc)
171 {
172         if (!addr->address || !length)
173                 return;
174
175         /* Resources are never freed */
176         if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
177                 request_region(addr->address, length, desc);
178         else if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
179                 request_mem_region(addr->address, length, desc);
180 }
181
182 static void __init acpi_reserve_resources(void)
183 {
184         acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
185                 "ACPI PM1a_EVT_BLK");
186
187         acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length,
188                 "ACPI PM1b_EVT_BLK");
189
190         acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length,
191                 "ACPI PM1a_CNT_BLK");
192
193         acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length,
194                 "ACPI PM1b_CNT_BLK");
195
196         if (acpi_gbl_FADT.pm_timer_length == 4)
197                 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR");
198
199         acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length,
200                 "ACPI PM2_CNT_BLK");
201
202         /* Length of GPE blocks must be a non-negative multiple of 2 */
203
204         if (!(acpi_gbl_FADT.gpe0_block_length & 0x1))
205                 acpi_request_region(&acpi_gbl_FADT.xgpe0_block,
206                                acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK");
207
208         if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
209                 acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
210                                acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
211 }
212
213 void acpi_os_printf(const char *fmt, ...)
214 {
215         va_list args;
216         va_start(args, fmt);
217         acpi_os_vprintf(fmt, args);
218         va_end(args);
219 }
220
221 void acpi_os_vprintf(const char *fmt, va_list args)
222 {
223         static char buffer[512];
224
225         vsprintf(buffer, fmt, args);
226
227 #ifdef ENABLE_DEBUGGER
228         if (acpi_in_debugger) {
229                 kdb_printf("%s", buffer);
230         } else {
231                 printk(KERN_CONT "%s", buffer);
232         }
233 #else
234         printk(KERN_CONT "%s", buffer);
235 #endif
236 }
237
238 #ifdef CONFIG_KEXEC
239 static unsigned long acpi_rsdp;
240 static int __init setup_acpi_rsdp(char *arg)
241 {
242         acpi_rsdp = simple_strtoul(arg, NULL, 16);
243         return 0;
244 }
245 early_param("acpi_rsdp", setup_acpi_rsdp);
246 #endif
247
248 acpi_physical_address __init acpi_os_get_root_pointer(void)
249 {
250 #ifdef CONFIG_KEXEC
251         if (acpi_rsdp)
252                 return acpi_rsdp;
253 #endif
254
255         if (efi_enabled(EFI_CONFIG_TABLES)) {
256                 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
257                         return efi.acpi20;
258                 else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
259                         return efi.acpi;
260                 else {
261                         printk(KERN_ERR PREFIX
262                                "System description tables not found\n");
263                         return 0;
264                 }
265         } else {
266                 acpi_physical_address pa = 0;
267
268                 acpi_find_root_pointer(&pa);
269                 return pa;
270         }
271 }
272
273 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
274 static struct acpi_ioremap *
275 acpi_map_lookup(acpi_physical_address phys, acpi_size size)
276 {
277         struct acpi_ioremap *map;
278
279         list_for_each_entry_rcu(map, &acpi_ioremaps, list)
280                 if (map->phys <= phys &&
281                     phys + size <= map->phys + map->size)
282                         return map;
283
284         return NULL;
285 }
286
287 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
288 static void __iomem *
289 acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size)
290 {
291         struct acpi_ioremap *map;
292
293         map = acpi_map_lookup(phys, size);
294         if (map)
295                 return map->virt + (phys - map->phys);
296
297         return NULL;
298 }
299
300 void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size)
301 {
302         struct acpi_ioremap *map;
303         void __iomem *virt = NULL;
304
305         mutex_lock(&acpi_ioremap_lock);
306         map = acpi_map_lookup(phys, size);
307         if (map) {
308                 virt = map->virt + (phys - map->phys);
309                 map->refcount++;
310         }
311         mutex_unlock(&acpi_ioremap_lock);
312         return virt;
313 }
314 EXPORT_SYMBOL_GPL(acpi_os_get_iomem);
315
316 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
317 static struct acpi_ioremap *
318 acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
319 {
320         struct acpi_ioremap *map;
321
322         list_for_each_entry_rcu(map, &acpi_ioremaps, list)
323                 if (map->virt <= virt &&
324                     virt + size <= map->virt + map->size)
325                         return map;
326
327         return NULL;
328 }
329
330 void __iomem *__init_refok
331 acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
332 {
333         struct acpi_ioremap *map;
334         void __iomem *virt;
335         acpi_physical_address pg_off;
336         acpi_size pg_sz;
337
338         if (phys > ULONG_MAX) {
339                 printk(KERN_ERR PREFIX "Cannot map memory that high\n");
340                 return NULL;
341         }
342
343         if (!acpi_gbl_permanent_mmap)
344                 return __acpi_map_table((unsigned long)phys, size);
345
346         mutex_lock(&acpi_ioremap_lock);
347         /* Check if there's a suitable mapping already. */
348         map = acpi_map_lookup(phys, size);
349         if (map) {
350                 map->refcount++;
351                 goto out;
352         }
353
354         map = kzalloc(sizeof(*map), GFP_KERNEL);
355         if (!map) {
356                 mutex_unlock(&acpi_ioremap_lock);
357                 return NULL;
358         }
359
360         pg_off = round_down(phys, PAGE_SIZE);
361         pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
362         virt = acpi_os_ioremap(pg_off, pg_sz);
363         if (!virt) {
364                 mutex_unlock(&acpi_ioremap_lock);
365                 kfree(map);
366                 return NULL;
367         }
368
369         INIT_LIST_HEAD(&map->list);
370         map->virt = virt;
371         map->phys = pg_off;
372         map->size = pg_sz;
373         map->refcount = 1;
374
375         list_add_tail_rcu(&map->list, &acpi_ioremaps);
376
377  out:
378         mutex_unlock(&acpi_ioremap_lock);
379         return map->virt + (phys - map->phys);
380 }
381 EXPORT_SYMBOL_GPL(acpi_os_map_memory);
382
383 static void acpi_os_drop_map_ref(struct acpi_ioremap *map)
384 {
385         if (!--map->refcount)
386                 list_del_rcu(&map->list);
387 }
388
389 static void acpi_os_map_cleanup(struct acpi_ioremap *map)
390 {
391         if (!map->refcount) {
392                 synchronize_rcu();
393                 iounmap(map->virt);
394                 kfree(map);
395         }
396 }
397
398 void __ref acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
399 {
400         struct acpi_ioremap *map;
401
402         if (!acpi_gbl_permanent_mmap) {
403                 __acpi_unmap_table(virt, size);
404                 return;
405         }
406
407         mutex_lock(&acpi_ioremap_lock);
408         map = acpi_map_lookup_virt(virt, size);
409         if (!map) {
410                 mutex_unlock(&acpi_ioremap_lock);
411                 WARN(true, PREFIX "%s: bad address %p\n", __func__, virt);
412                 return;
413         }
414         acpi_os_drop_map_ref(map);
415         mutex_unlock(&acpi_ioremap_lock);
416
417         acpi_os_map_cleanup(map);
418 }
419 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
420
421 void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
422 {
423         if (!acpi_gbl_permanent_mmap)
424                 __acpi_unmap_table(virt, size);
425 }
426
427 static int acpi_os_map_generic_address(struct acpi_generic_address *addr)
428 {
429         void __iomem *virt;
430
431         if (addr->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
432                 return 0;
433
434         if (!addr->address || !addr->bit_width)
435                 return -EINVAL;
436
437         virt = acpi_os_map_memory(addr->address, addr->bit_width / 8);
438         if (!virt)
439                 return -EIO;
440
441         return 0;
442 }
443
444 static void acpi_os_unmap_generic_address(struct acpi_generic_address *addr)
445 {
446         struct acpi_ioremap *map;
447
448         if (addr->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
449                 return;
450
451         if (!addr->address || !addr->bit_width)
452                 return;
453
454         mutex_lock(&acpi_ioremap_lock);
455         map = acpi_map_lookup(addr->address, addr->bit_width / 8);
456         if (!map) {
457                 mutex_unlock(&acpi_ioremap_lock);
458                 return;
459         }
460         acpi_os_drop_map_ref(map);
461         mutex_unlock(&acpi_ioremap_lock);
462
463         acpi_os_map_cleanup(map);
464 }
465
466 #ifdef ACPI_FUTURE_USAGE
467 acpi_status
468 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
469 {
470         if (!phys || !virt)
471                 return AE_BAD_PARAMETER;
472
473         *phys = virt_to_phys(virt);
474
475         return AE_OK;
476 }
477 #endif
478
479 #define ACPI_MAX_OVERRIDE_LEN 100
480
481 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
482
483 acpi_status
484 acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
485                             acpi_string * new_val)
486 {
487         if (!init_val || !new_val)
488                 return AE_BAD_PARAMETER;
489
490         *new_val = NULL;
491         if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
492                 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n",
493                        acpi_os_name);
494                 *new_val = acpi_os_name;
495         }
496
497         return AE_OK;
498 }
499
500 acpi_status
501 acpi_os_table_override(struct acpi_table_header * existing_table,
502                        struct acpi_table_header ** new_table)
503 {
504         if (!existing_table || !new_table)
505                 return AE_BAD_PARAMETER;
506
507         *new_table = NULL;
508
509 #ifdef CONFIG_ACPI_CUSTOM_DSDT
510         if (strncmp(existing_table->signature, "DSDT", 4) == 0)
511                 *new_table = (struct acpi_table_header *)AmlCode;
512 #endif
513         if (*new_table != NULL) {
514                 printk(KERN_WARNING PREFIX "Override [%4.4s-%8.8s], "
515                            "this is unsafe: tainting kernel\n",
516                        existing_table->signature,
517                        existing_table->oem_table_id);
518                 add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
519         }
520         return AE_OK;
521 }
522
523 static irqreturn_t acpi_irq(int irq, void *dev_id)
524 {
525         u32 handled;
526
527         handled = (*acpi_irq_handler) (acpi_irq_context);
528
529         if (handled) {
530                 acpi_irq_handled++;
531                 return IRQ_HANDLED;
532         } else {
533                 acpi_irq_not_handled++;
534                 return IRQ_NONE;
535         }
536 }
537
538 acpi_status
539 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
540                                   void *context)
541 {
542         unsigned int irq;
543
544         acpi_irq_stats_init();
545
546         /*
547          * ACPI interrupts different from the SCI in our copy of the FADT are
548          * not supported.
549          */
550         if (gsi != acpi_gbl_FADT.sci_interrupt)
551                 return AE_BAD_PARAMETER;
552
553         if (acpi_irq_handler)
554                 return AE_ALREADY_ACQUIRED;
555
556         if (acpi_gsi_to_irq(gsi, &irq) < 0) {
557                 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
558                        gsi);
559                 return AE_OK;
560         }
561
562         acpi_irq_handler = handler;
563         acpi_irq_context = context;
564         if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
565                 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
566                 acpi_irq_handler = NULL;
567                 return AE_NOT_ACQUIRED;
568         }
569
570         return AE_OK;
571 }
572
573 acpi_status acpi_os_remove_interrupt_handler(u32 irq, acpi_osd_handler handler)
574 {
575         if (irq != acpi_gbl_FADT.sci_interrupt)
576                 return AE_BAD_PARAMETER;
577
578         free_irq(irq, acpi_irq);
579         acpi_irq_handler = NULL;
580
581         return AE_OK;
582 }
583
584 /*
585  * Running in interpreter thread context, safe to sleep
586  */
587
588 void acpi_os_sleep(u64 ms)
589 {
590         schedule_timeout_interruptible(msecs_to_jiffies(ms));
591 }
592
593 void acpi_os_stall(u32 us)
594 {
595         while (us) {
596                 u32 delay = 1000;
597
598                 if (delay > us)
599                         delay = us;
600                 udelay(delay);
601                 touch_nmi_watchdog();
602                 us -= delay;
603         }
604 }
605
606 /*
607  * Support ACPI 3.0 AML Timer operand
608  * Returns 64-bit free-running, monotonically increasing timer
609  * with 100ns granularity
610  */
611 u64 acpi_os_get_timer(void)
612 {
613         static u64 t;
614
615 #ifdef  CONFIG_HPET
616         /* TBD: use HPET if available */
617 #endif
618
619 #ifdef  CONFIG_X86_PM_TIMER
620         /* TBD: default to PM timer if HPET was not available */
621 #endif
622         if (!t)
623                 printk(KERN_ERR PREFIX "acpi_os_get_timer() TBD\n");
624
625         return ++t;
626 }
627
628 acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width)
629 {
630         u32 dummy;
631
632         if (!value)
633                 value = &dummy;
634
635         *value = 0;
636         if (width <= 8) {
637                 *(u8 *) value = inb(port);
638         } else if (width <= 16) {
639                 *(u16 *) value = inw(port);
640         } else if (width <= 32) {
641                 *(u32 *) value = inl(port);
642         } else {
643                 BUG();
644         }
645
646         return AE_OK;
647 }
648
649 EXPORT_SYMBOL(acpi_os_read_port);
650
651 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
652 {
653         if (width <= 8) {
654                 outb(value, port);
655         } else if (width <= 16) {
656                 outw(value, port);
657         } else if (width <= 32) {
658                 outl(value, port);
659         } else {
660                 BUG();
661         }
662
663         return AE_OK;
664 }
665
666 EXPORT_SYMBOL(acpi_os_write_port);
667
668 acpi_status
669 acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
670 {
671         void __iomem *virt_addr;
672         unsigned int size = width / 8;
673         bool unmap = false;
674         u32 dummy;
675
676         rcu_read_lock();
677         virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
678         if (!virt_addr) {
679                 rcu_read_unlock();
680                 virt_addr = acpi_os_ioremap(phys_addr, size);
681                 if (!virt_addr)
682                         return AE_BAD_ADDRESS;
683                 unmap = true;
684         }
685
686         if (!value)
687                 value = &dummy;
688
689         switch (width) {
690         case 8:
691                 *(u8 *) value = readb(virt_addr);
692                 break;
693         case 16:
694                 *(u16 *) value = readw(virt_addr);
695                 break;
696         case 32:
697                 *(u32 *) value = readl(virt_addr);
698                 break;
699         default:
700                 BUG();
701         }
702
703         if (unmap)
704                 iounmap(virt_addr);
705         else
706                 rcu_read_unlock();
707
708         return AE_OK;
709 }
710
711 acpi_status
712 acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
713 {
714         void __iomem *virt_addr;
715         unsigned int size = width / 8;
716         bool unmap = false;
717
718         rcu_read_lock();
719         virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
720         if (!virt_addr) {
721                 rcu_read_unlock();
722                 virt_addr = acpi_os_ioremap(phys_addr, size);
723                 if (!virt_addr)
724                         return AE_BAD_ADDRESS;
725                 unmap = true;
726         }
727
728         switch (width) {
729         case 8:
730                 writeb(value, virt_addr);
731                 break;
732         case 16:
733                 writew(value, virt_addr);
734                 break;
735         case 32:
736                 writel(value, virt_addr);
737                 break;
738         default:
739                 BUG();
740         }
741
742         if (unmap)
743                 iounmap(virt_addr);
744         else
745                 rcu_read_unlock();
746
747         return AE_OK;
748 }
749
750 acpi_status
751 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
752                                u64 *value, u32 width)
753 {
754         int result, size;
755         u32 value32;
756
757         if (!value)
758                 return AE_BAD_PARAMETER;
759
760         switch (width) {
761         case 8:
762                 size = 1;
763                 break;
764         case 16:
765                 size = 2;
766                 break;
767         case 32:
768                 size = 4;
769                 break;
770         default:
771                 return AE_ERROR;
772         }
773
774         result = raw_pci_read(pci_id->segment, pci_id->bus,
775                                 PCI_DEVFN(pci_id->device, pci_id->function),
776                                 reg, size, &value32);
777         *value = value32;
778
779         return (result ? AE_ERROR : AE_OK);
780 }
781
782 acpi_status
783 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
784                                 u64 value, u32 width)
785 {
786         int result, size;
787
788         switch (width) {
789         case 8:
790                 size = 1;
791                 break;
792         case 16:
793                 size = 2;
794                 break;
795         case 32:
796                 size = 4;
797                 break;
798         default:
799                 return AE_ERROR;
800         }
801
802         result = raw_pci_write(pci_id->segment, pci_id->bus,
803                                 PCI_DEVFN(pci_id->device, pci_id->function),
804                                 reg, size, value);
805
806         return (result ? AE_ERROR : AE_OK);
807 }
808
809 static void acpi_os_execute_deferred(struct work_struct *work)
810 {
811         struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
812
813         if (dpc->wait)
814                 acpi_os_wait_events_complete(NULL);
815
816         dpc->function(dpc->context);
817         kfree(dpc);
818 }
819
820 /*******************************************************************************
821  *
822  * FUNCTION:    acpi_os_execute
823  *
824  * PARAMETERS:  Type               - Type of the callback
825  *              Function           - Function to be executed
826  *              Context            - Function parameters
827  *
828  * RETURN:      Status
829  *
830  * DESCRIPTION: Depending on type, either queues function for deferred execution or
831  *              immediately executes function on a separate thread.
832  *
833  ******************************************************************************/
834
835 static acpi_status __acpi_os_execute(acpi_execute_type type,
836         acpi_osd_exec_callback function, void *context, int hp)
837 {
838         acpi_status status = AE_OK;
839         struct acpi_os_dpc *dpc;
840         struct workqueue_struct *queue;
841         int ret;
842         ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
843                           "Scheduling function [%p(%p)] for deferred execution.\n",
844                           function, context));
845
846         /*
847          * Allocate/initialize DPC structure.  Note that this memory will be
848          * freed by the callee.  The kernel handles the work_struct list  in a
849          * way that allows us to also free its memory inside the callee.
850          * Because we may want to schedule several tasks with different
851          * parameters we can't use the approach some kernel code uses of
852          * having a static work_struct.
853          */
854
855         dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
856         if (!dpc)
857                 return AE_NO_MEMORY;
858
859         dpc->function = function;
860         dpc->context = context;
861
862         /*
863          * We can't run hotplug code in keventd_wq/kacpid_wq/kacpid_notify_wq
864          * because the hotplug code may call driver .remove() functions,
865          * which invoke flush_scheduled_work/acpi_os_wait_events_complete
866          * to flush these workqueues.
867          */
868         queue = hp ? kacpi_hotplug_wq :
869                 (type == OSL_NOTIFY_HANDLER ? kacpi_notify_wq : kacpid_wq);
870         dpc->wait = hp ? 1 : 0;
871
872         if (queue == kacpi_hotplug_wq)
873                 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
874         else if (queue == kacpi_notify_wq)
875                 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
876         else
877                 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
878
879         /*
880          * On some machines, a software-initiated SMI causes corruption unless
881          * the SMI runs on CPU 0.  An SMI can be initiated by any AML, but
882          * typically it's done in GPE-related methods that are run via
883          * workqueues, so we can avoid the known corruption cases by always
884          * queueing on CPU 0.
885          */
886         ret = queue_work_on(0, queue, &dpc->work);
887
888         if (!ret) {
889                 printk(KERN_ERR PREFIX
890                           "Call to queue_work() failed.\n");
891                 status = AE_ERROR;
892                 kfree(dpc);
893         }
894         return status;
895 }
896
897 acpi_status acpi_os_execute(acpi_execute_type type,
898                             acpi_osd_exec_callback function, void *context)
899 {
900         return __acpi_os_execute(type, function, context, 0);
901 }
902 EXPORT_SYMBOL(acpi_os_execute);
903
904 acpi_status acpi_os_hotplug_execute(acpi_osd_exec_callback function,
905         void *context)
906 {
907         return __acpi_os_execute(0, function, context, 1);
908 }
909
910 void acpi_os_wait_events_complete(void *context)
911 {
912         flush_workqueue(kacpid_wq);
913         flush_workqueue(kacpi_notify_wq);
914 }
915
916 EXPORT_SYMBOL(acpi_os_wait_events_complete);
917
918 acpi_status
919 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
920 {
921         struct semaphore *sem = NULL;
922
923         sem = acpi_os_allocate(sizeof(struct semaphore));
924         if (!sem)
925                 return AE_NO_MEMORY;
926         memset(sem, 0, sizeof(struct semaphore));
927
928         sema_init(sem, initial_units);
929
930         *handle = (acpi_handle *) sem;
931
932         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n",
933                           *handle, initial_units));
934
935         return AE_OK;
936 }
937
938 /*
939  * TODO: A better way to delete semaphores?  Linux doesn't have a
940  * 'delete_semaphore()' function -- may result in an invalid
941  * pointer dereference for non-synchronized consumers.  Should
942  * we at least check for blocked threads and signal/cancel them?
943  */
944
945 acpi_status acpi_os_delete_semaphore(acpi_handle handle)
946 {
947         struct semaphore *sem = (struct semaphore *)handle;
948
949         if (!sem)
950                 return AE_BAD_PARAMETER;
951
952         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
953
954         BUG_ON(!list_empty(&sem->wait_list));
955         kfree(sem);
956         sem = NULL;
957
958         return AE_OK;
959 }
960
961 /*
962  * TODO: Support for units > 1?
963  */
964 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
965 {
966         acpi_status status = AE_OK;
967         struct semaphore *sem = (struct semaphore *)handle;
968         long jiffies;
969         int ret = 0;
970
971         if (!sem || (units < 1))
972                 return AE_BAD_PARAMETER;
973
974         if (units > 1)
975                 return AE_SUPPORT;
976
977         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
978                           handle, units, timeout));
979
980         if (timeout == ACPI_WAIT_FOREVER)
981                 jiffies = MAX_SCHEDULE_TIMEOUT;
982         else
983                 jiffies = msecs_to_jiffies(timeout);
984         
985         ret = down_timeout(sem, jiffies);
986         if (ret)
987                 status = AE_TIME;
988
989         if (ACPI_FAILURE(status)) {
990                 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
991                                   "Failed to acquire semaphore[%p|%d|%d], %s",
992                                   handle, units, timeout,
993                                   acpi_format_exception(status)));
994         } else {
995                 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
996                                   "Acquired semaphore[%p|%d|%d]", handle,
997                                   units, timeout));
998         }
999
1000         return status;
1001 }
1002
1003 /*
1004  * TODO: Support for units > 1?
1005  */
1006 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
1007 {
1008         struct semaphore *sem = (struct semaphore *)handle;
1009
1010         if (!sem || (units < 1))
1011                 return AE_BAD_PARAMETER;
1012
1013         if (units > 1)
1014                 return AE_SUPPORT;
1015
1016         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle,
1017                           units));
1018
1019         up(sem);
1020
1021         return AE_OK;
1022 }
1023
1024 #ifdef ACPI_FUTURE_USAGE
1025 u32 acpi_os_get_line(char *buffer)
1026 {
1027
1028 #ifdef ENABLE_DEBUGGER
1029         if (acpi_in_debugger) {
1030                 u32 chars;
1031
1032                 kdb_read(buffer, sizeof(line_buf));
1033
1034                 /* remove the CR kdb includes */
1035                 chars = strlen(buffer) - 1;
1036                 buffer[chars] = '\0';
1037         }
1038 #endif
1039
1040         return 0;
1041 }
1042 #endif                          /*  ACPI_FUTURE_USAGE  */
1043
1044 acpi_status acpi_os_signal(u32 function, void *info)
1045 {
1046         switch (function) {
1047         case ACPI_SIGNAL_FATAL:
1048                 printk(KERN_ERR PREFIX "Fatal opcode executed\n");
1049                 break;
1050         case ACPI_SIGNAL_BREAKPOINT:
1051                 /*
1052                  * AML Breakpoint
1053                  * ACPI spec. says to treat it as a NOP unless
1054                  * you are debugging.  So if/when we integrate
1055                  * AML debugger into the kernel debugger its
1056                  * hook will go here.  But until then it is
1057                  * not useful to print anything on breakpoints.
1058                  */
1059                 break;
1060         default:
1061                 break;
1062         }
1063
1064         return AE_OK;
1065 }
1066
1067 static int __init acpi_os_name_setup(char *str)
1068 {
1069         char *p = acpi_os_name;
1070         int count = ACPI_MAX_OVERRIDE_LEN - 1;
1071
1072         if (!str || !*str)
1073                 return 0;
1074
1075         for (; count-- && str && *str; str++) {
1076                 if (isalnum(*str) || *str == ' ' || *str == ':')
1077                         *p++ = *str;
1078                 else if (*str == '\'' || *str == '"')
1079                         continue;
1080                 else
1081                         break;
1082         }
1083         *p = 0;
1084
1085         return 1;
1086
1087 }
1088
1089 __setup("acpi_os_name=", acpi_os_name_setup);
1090
1091 #define OSI_STRING_LENGTH_MAX 64        /* arbitrary */
1092 #define OSI_STRING_ENTRIES_MAX 16       /* arbitrary */
1093
1094 struct osi_setup_entry {
1095         char string[OSI_STRING_LENGTH_MAX];
1096         bool enable;
1097 };
1098
1099 static struct osi_setup_entry __initdata
1100                 osi_setup_entries[OSI_STRING_ENTRIES_MAX] = {
1101         {"Module Device", true},
1102         {"Processor Device", true},
1103         {"3.0 _SCP Extensions", true},
1104         {"Processor Aggregator Device", true},
1105 };
1106
1107 void __init acpi_osi_setup(char *str)
1108 {
1109         struct osi_setup_entry *osi;
1110         bool enable = true;
1111         int i;
1112
1113         if (!acpi_gbl_create_osi_method)
1114                 return;
1115
1116         if (str == NULL || *str == '\0') {
1117                 printk(KERN_INFO PREFIX "_OSI method disabled\n");
1118                 acpi_gbl_create_osi_method = FALSE;
1119                 return;
1120         }
1121
1122         if (*str == '!') {
1123                 str++;
1124                 enable = false;
1125         }
1126
1127         for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
1128                 osi = &osi_setup_entries[i];
1129                 if (!strcmp(osi->string, str)) {
1130                         osi->enable = enable;
1131                         break;
1132                 } else if (osi->string[0] == '\0') {
1133                         osi->enable = enable;
1134                         strncpy(osi->string, str, OSI_STRING_LENGTH_MAX);
1135                         break;
1136                 }
1137         }
1138 }
1139
1140 static void __init set_osi_linux(unsigned int enable)
1141 {
1142         if (osi_linux.enable != enable)
1143                 osi_linux.enable = enable;
1144
1145         if (osi_linux.enable)
1146                 acpi_osi_setup("Linux");
1147         else
1148                 acpi_osi_setup("!Linux");
1149
1150         return;
1151 }
1152
1153 static void __init acpi_cmdline_osi_linux(unsigned int enable)
1154 {
1155         osi_linux.cmdline = 1;  /* cmdline set the default and override DMI */
1156         osi_linux.dmi = 0;
1157         set_osi_linux(enable);
1158
1159         return;
1160 }
1161
1162 void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d)
1163 {
1164         printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
1165
1166         if (enable == -1)
1167                 return;
1168
1169         osi_linux.dmi = 1;      /* DMI knows that this box asks OSI(Linux) */
1170         set_osi_linux(enable);
1171
1172         return;
1173 }
1174
1175 /*
1176  * Modify the list of "OS Interfaces" reported to BIOS via _OSI
1177  *
1178  * empty string disables _OSI
1179  * string starting with '!' disables that string
1180  * otherwise string is added to list, augmenting built-in strings
1181  */
1182 static void __init acpi_osi_setup_late(void)
1183 {
1184         struct osi_setup_entry *osi;
1185         char *str;
1186         int i;
1187         acpi_status status;
1188
1189         for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
1190                 osi = &osi_setup_entries[i];
1191                 str = osi->string;
1192
1193                 if (*str == '\0')
1194                         break;
1195                 if (osi->enable) {
1196                         status = acpi_install_interface(str);
1197
1198                         if (ACPI_SUCCESS(status))
1199                                 printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str);
1200                 } else {
1201                         status = acpi_remove_interface(str);
1202
1203                         if (ACPI_SUCCESS(status))
1204                                 printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str);
1205                 }
1206         }
1207 }
1208
1209 static int __init osi_setup(char *str)
1210 {
1211         if (str && !strcmp("Linux", str))
1212                 acpi_cmdline_osi_linux(1);
1213         else if (str && !strcmp("!Linux", str))
1214                 acpi_cmdline_osi_linux(0);
1215         else
1216                 acpi_osi_setup(str);
1217
1218         return 1;
1219 }
1220
1221 __setup("acpi_osi=", osi_setup);
1222
1223 /* enable serialization to combat AE_ALREADY_EXISTS errors */
1224 static int __init acpi_serialize_setup(char *str)
1225 {
1226         printk(KERN_INFO PREFIX "serialize enabled\n");
1227
1228         acpi_gbl_all_methods_serialized = TRUE;
1229
1230         return 1;
1231 }
1232
1233 __setup("acpi_serialize", acpi_serialize_setup);
1234
1235 /* Check of resource interference between native drivers and ACPI
1236  * OperationRegions (SystemIO and System Memory only).
1237  * IO ports and memory declared in ACPI might be used by the ACPI subsystem
1238  * in arbitrary AML code and can interfere with legacy drivers.
1239  * acpi_enforce_resources= can be set to:
1240  *
1241  *   - strict (default) (2)
1242  *     -> further driver trying to access the resources will not load
1243  *   - lax              (1)
1244  *     -> further driver trying to access the resources will load, but you
1245  *     get a system message that something might go wrong...
1246  *
1247  *   - no               (0)
1248  *     -> ACPI Operation Region resources will not be registered
1249  *
1250  */
1251 #define ENFORCE_RESOURCES_STRICT 2
1252 #define ENFORCE_RESOURCES_LAX    1
1253 #define ENFORCE_RESOURCES_NO     0
1254
1255 static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1256
1257 static int __init acpi_enforce_resources_setup(char *str)
1258 {
1259         if (str == NULL || *str == '\0')
1260                 return 0;
1261
1262         if (!strcmp("strict", str))
1263                 acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1264         else if (!strcmp("lax", str))
1265                 acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
1266         else if (!strcmp("no", str))
1267                 acpi_enforce_resources = ENFORCE_RESOURCES_NO;
1268
1269         return 1;
1270 }
1271
1272 __setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
1273
1274 /* Check for resource conflicts between ACPI OperationRegions and native
1275  * drivers */
1276 int acpi_check_resource_conflict(const struct resource *res)
1277 {
1278         struct acpi_res_list *res_list_elem;
1279         int ioport = 0, clash = 0;
1280
1281         if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1282                 return 0;
1283         if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM))
1284                 return 0;
1285
1286         ioport = res->flags & IORESOURCE_IO;
1287
1288         spin_lock(&acpi_res_lock);
1289         list_for_each_entry(res_list_elem, &resource_list_head,
1290                             resource_list) {
1291                 if (ioport && (res_list_elem->resource_type
1292                                != ACPI_ADR_SPACE_SYSTEM_IO))
1293                         continue;
1294                 if (!ioport && (res_list_elem->resource_type
1295                                 != ACPI_ADR_SPACE_SYSTEM_MEMORY))
1296                         continue;
1297
1298                 if (res->end < res_list_elem->start
1299                     || res_list_elem->end < res->start)
1300                         continue;
1301                 clash = 1;
1302                 break;
1303         }
1304         spin_unlock(&acpi_res_lock);
1305
1306         if (clash) {
1307                 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
1308                         printk(KERN_WARNING "ACPI: resource %s %pR"
1309                                " conflicts with ACPI region %s "
1310                                "[%s 0x%zx-0x%zx]\n",
1311                                res->name, res, res_list_elem->name,
1312                                (res_list_elem->resource_type ==
1313                                 ACPI_ADR_SPACE_SYSTEM_IO) ? "io" : "mem",
1314                                (size_t) res_list_elem->start,
1315                                (size_t) res_list_elem->end);
1316                         if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
1317                                 printk(KERN_NOTICE "ACPI: This conflict may"
1318                                        " cause random problems and system"
1319                                        " instability\n");
1320                         printk(KERN_INFO "ACPI: If an ACPI driver is available"
1321                                " for this device, you should use it instead of"
1322                                " the native driver\n");
1323                 }
1324                 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
1325                         return -EBUSY;
1326         }
1327         return 0;
1328 }
1329 EXPORT_SYMBOL(acpi_check_resource_conflict);
1330
1331 int acpi_check_region(resource_size_t start, resource_size_t n,
1332                       const char *name)
1333 {
1334         struct resource res = {
1335                 .start = start,
1336                 .end   = start + n - 1,
1337                 .name  = name,
1338                 .flags = IORESOURCE_IO,
1339         };
1340
1341         return acpi_check_resource_conflict(&res);
1342 }
1343 EXPORT_SYMBOL(acpi_check_region);
1344
1345 /*
1346  * Let drivers know whether the resource checks are effective
1347  */
1348 int acpi_resources_are_enforced(void)
1349 {
1350         return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT;
1351 }
1352 EXPORT_SYMBOL(acpi_resources_are_enforced);
1353
1354 /*
1355  * Deallocate the memory for a spinlock.
1356  */
1357 void acpi_os_delete_lock(acpi_spinlock handle)
1358 {
1359         ACPI_FREE(handle);
1360 }
1361
1362 /*
1363  * Acquire a spinlock.
1364  *
1365  * handle is a pointer to the spinlock_t.
1366  */
1367
1368 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
1369 {
1370         acpi_cpu_flags flags;
1371         spin_lock_irqsave(lockp, flags);
1372         return flags;
1373 }
1374
1375 /*
1376  * Release a spinlock. See above.
1377  */
1378
1379 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
1380 {
1381         spin_unlock_irqrestore(lockp, flags);
1382 }
1383
1384 #ifndef ACPI_USE_LOCAL_CACHE
1385
1386 /*******************************************************************************
1387  *
1388  * FUNCTION:    acpi_os_create_cache
1389  *
1390  * PARAMETERS:  name      - Ascii name for the cache
1391  *              size      - Size of each cached object
1392  *              depth     - Maximum depth of the cache (in objects) <ignored>
1393  *              cache     - Where the new cache object is returned
1394  *
1395  * RETURN:      status
1396  *
1397  * DESCRIPTION: Create a cache object
1398  *
1399  ******************************************************************************/
1400
1401 acpi_status
1402 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
1403 {
1404         *cache = kmem_cache_create(name, size, 0, 0, NULL);
1405         if (*cache == NULL)
1406                 return AE_ERROR;
1407         else
1408                 return AE_OK;
1409 }
1410
1411 /*******************************************************************************
1412  *
1413  * FUNCTION:    acpi_os_purge_cache
1414  *
1415  * PARAMETERS:  Cache           - Handle to cache object
1416  *
1417  * RETURN:      Status
1418  *
1419  * DESCRIPTION: Free all objects within the requested cache.
1420  *
1421  ******************************************************************************/
1422
1423 acpi_status acpi_os_purge_cache(acpi_cache_t * cache)
1424 {
1425         kmem_cache_shrink(cache);
1426         return (AE_OK);
1427 }
1428
1429 /*******************************************************************************
1430  *
1431  * FUNCTION:    acpi_os_delete_cache
1432  *
1433  * PARAMETERS:  Cache           - Handle to cache object
1434  *
1435  * RETURN:      Status
1436  *
1437  * DESCRIPTION: Free all objects within the requested cache and delete the
1438  *              cache object.
1439  *
1440  ******************************************************************************/
1441
1442 acpi_status acpi_os_delete_cache(acpi_cache_t * cache)
1443 {
1444         kmem_cache_destroy(cache);
1445         return (AE_OK);
1446 }
1447
1448 /*******************************************************************************
1449  *
1450  * FUNCTION:    acpi_os_release_object
1451  *
1452  * PARAMETERS:  Cache       - Handle to cache object
1453  *              Object      - The object to be released
1454  *
1455  * RETURN:      None
1456  *
1457  * DESCRIPTION: Release an object to the specified cache.  If cache is full,
1458  *              the object is deleted.
1459  *
1460  ******************************************************************************/
1461
1462 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
1463 {
1464         kmem_cache_free(cache, object);
1465         return (AE_OK);
1466 }
1467
1468 static inline int acpi_res_list_add(struct acpi_res_list *res)
1469 {
1470         struct acpi_res_list *res_list_elem;
1471
1472         list_for_each_entry(res_list_elem, &resource_list_head,
1473                             resource_list) {
1474
1475                 if (res->resource_type == res_list_elem->resource_type &&
1476                     res->start == res_list_elem->start &&
1477                     res->end == res_list_elem->end) {
1478
1479                         /*
1480                          * The Region(addr,len) already exist in the list,
1481                          * just increase the count
1482                          */
1483
1484                         res_list_elem->count++;
1485                         return 0;
1486                 }
1487         }
1488
1489         res->count = 1;
1490         list_add(&res->resource_list, &resource_list_head);
1491         return 1;
1492 }
1493
1494 static inline void acpi_res_list_del(struct acpi_res_list *res)
1495 {
1496         struct acpi_res_list *res_list_elem;
1497
1498         list_for_each_entry(res_list_elem, &resource_list_head,
1499                             resource_list) {
1500
1501                 if (res->resource_type == res_list_elem->resource_type &&
1502                     res->start == res_list_elem->start &&
1503                     res->end == res_list_elem->end) {
1504
1505                         /*
1506                          * If the res count is decreased to 0,
1507                          * remove and free it
1508                          */
1509
1510                         if (--res_list_elem->count == 0) {
1511                                 list_del(&res_list_elem->resource_list);
1512                                 kfree(res_list_elem);
1513                         }
1514                         return;
1515                 }
1516         }
1517 }
1518
1519 acpi_status
1520 acpi_os_invalidate_address(
1521     u8                   space_id,
1522     acpi_physical_address   address,
1523     acpi_size               length)
1524 {
1525         struct acpi_res_list res;
1526
1527         switch (space_id) {
1528         case ACPI_ADR_SPACE_SYSTEM_IO:
1529         case ACPI_ADR_SPACE_SYSTEM_MEMORY:
1530                 /* Only interference checks against SystemIO and SystemMemory
1531                    are needed */
1532                 res.start = address;
1533                 res.end = address + length - 1;
1534                 res.resource_type = space_id;
1535                 spin_lock(&acpi_res_lock);
1536                 acpi_res_list_del(&res);
1537                 spin_unlock(&acpi_res_lock);
1538                 break;
1539         case ACPI_ADR_SPACE_PCI_CONFIG:
1540         case ACPI_ADR_SPACE_EC:
1541         case ACPI_ADR_SPACE_SMBUS:
1542         case ACPI_ADR_SPACE_CMOS:
1543         case ACPI_ADR_SPACE_PCI_BAR_TARGET:
1544         case ACPI_ADR_SPACE_DATA_TABLE:
1545         case ACPI_ADR_SPACE_FIXED_HARDWARE:
1546                 break;
1547         }
1548         return AE_OK;
1549 }
1550
1551 /******************************************************************************
1552  *
1553  * FUNCTION:    acpi_os_validate_address
1554  *
1555  * PARAMETERS:  space_id             - ACPI space ID
1556  *              address             - Physical address
1557  *              length              - Address length
1558  *
1559  * RETURN:      AE_OK if address/length is valid for the space_id. Otherwise,
1560  *              should return AE_AML_ILLEGAL_ADDRESS.
1561  *
1562  * DESCRIPTION: Validate a system address via the host OS. Used to validate
1563  *              the addresses accessed by AML operation regions.
1564  *
1565  *****************************************************************************/
1566
1567 acpi_status
1568 acpi_os_validate_address (
1569     u8                   space_id,
1570     acpi_physical_address   address,
1571     acpi_size               length,
1572     char *name)
1573 {
1574         struct acpi_res_list *res;
1575         int added;
1576         if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1577                 return AE_OK;
1578
1579         switch (space_id) {
1580         case ACPI_ADR_SPACE_SYSTEM_IO:
1581         case ACPI_ADR_SPACE_SYSTEM_MEMORY:
1582                 /* Only interference checks against SystemIO and SystemMemory
1583                    are needed */
1584                 res = kzalloc(sizeof(struct acpi_res_list), GFP_KERNEL);
1585                 if (!res)
1586                         return AE_OK;
1587                 /* ACPI names are fixed to 4 bytes, still better use strlcpy */
1588                 strlcpy(res->name, name, 5);
1589                 res->start = address;
1590                 res->end = address + length - 1;
1591                 res->resource_type = space_id;
1592                 spin_lock(&acpi_res_lock);
1593                 added = acpi_res_list_add(res);
1594                 spin_unlock(&acpi_res_lock);
1595                 pr_debug("%s %s resource: start: 0x%llx, end: 0x%llx, "
1596                          "name: %s\n", added ? "Added" : "Already exist",
1597                          (space_id == ACPI_ADR_SPACE_SYSTEM_IO)
1598                          ? "SystemIO" : "System Memory",
1599                          (unsigned long long)res->start,
1600                          (unsigned long long)res->end,
1601                          res->name);
1602                 if (!added)
1603                         kfree(res);
1604                 break;
1605         case ACPI_ADR_SPACE_PCI_CONFIG:
1606         case ACPI_ADR_SPACE_EC:
1607         case ACPI_ADR_SPACE_SMBUS:
1608         case ACPI_ADR_SPACE_CMOS:
1609         case ACPI_ADR_SPACE_PCI_BAR_TARGET:
1610         case ACPI_ADR_SPACE_DATA_TABLE:
1611         case ACPI_ADR_SPACE_FIXED_HARDWARE:
1612                 break;
1613         }
1614         return AE_OK;
1615 }
1616 #endif
1617
1618 acpi_status __init acpi_os_initialize(void)
1619 {
1620         acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
1621         acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
1622         acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block);
1623         acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block);
1624
1625         return AE_OK;
1626 }
1627
1628 acpi_status __init acpi_os_initialize1(void)
1629 {
1630         acpi_reserve_resources();
1631         kacpid_wq = alloc_workqueue("kacpid", 0, 1);
1632         kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
1633         kacpi_hotplug_wq = alloc_workqueue("kacpi_hotplug", 0, 1);
1634         BUG_ON(!kacpid_wq);
1635         BUG_ON(!kacpi_notify_wq);
1636         BUG_ON(!kacpi_hotplug_wq);
1637         acpi_install_interface_handler(acpi_osi_handler);
1638         acpi_osi_setup_late();
1639         return AE_OK;
1640 }
1641
1642 acpi_status acpi_os_terminate(void)
1643 {
1644         if (acpi_irq_handler) {
1645                 acpi_os_remove_interrupt_handler(acpi_gbl_FADT.sci_interrupt,
1646                                                  acpi_irq_handler);
1647         }
1648
1649         acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block);
1650         acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block);
1651         acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
1652         acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
1653
1654         destroy_workqueue(kacpid_wq);
1655         destroy_workqueue(kacpi_notify_wq);
1656         destroy_workqueue(kacpi_hotplug_wq);
1657
1658         return AE_OK;
1659 }