Merge branches 'release', 'bugzilla-6217', 'bugzilla-6629', 'bugzilla-6933', 'bugzill...
[pandora-kernel.git] / drivers / acpi / osl.c
1 /*
2  *  acpi_osl.c - OS-dependent functions ($Revision: 83 $)
3  *
4  *  Copyright (C) 2000       Andrew Henroid
5  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7  *
8  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9  *
10  *  This program is free software; you can redistribute it and/or modify
11  *  it under the terms of the GNU General Public License as published by
12  *  the Free Software Foundation; either version 2 of the License, or
13  *  (at your option) any later version.
14  *
15  *  This program is distributed in the hope that it will be useful,
16  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
17  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  *  GNU General Public License for more details.
19  *
20  *  You should have received a copy of the GNU General Public License
21  *  along with this program; if not, write to the Free Software
22  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
23  *
24  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
25  *
26  */
27
28 #include <linux/module.h>
29 #include <linux/kernel.h>
30 #include <linux/slab.h>
31 #include <linux/mm.h>
32 #include <linux/pci.h>
33 #include <linux/interrupt.h>
34 #include <linux/kmod.h>
35 #include <linux/delay.h>
36 #include <linux/dmi.h>
37 #include <linux/workqueue.h>
38 #include <linux/nmi.h>
39 #include <linux/acpi.h>
40 #include <acpi/acpi.h>
41 #include <asm/io.h>
42 #include <acpi/acpi_bus.h>
43 #include <acpi/processor.h>
44 #include <asm/uaccess.h>
45
46 #include <linux/efi.h>
47
48 #define _COMPONENT              ACPI_OS_SERVICES
49 ACPI_MODULE_NAME("osl");
50 #define PREFIX          "ACPI: "
51 struct acpi_os_dpc {
52         acpi_osd_exec_callback function;
53         void *context;
54         struct work_struct work;
55 };
56
57 #ifdef CONFIG_ACPI_CUSTOM_DSDT
58 #include CONFIG_ACPI_CUSTOM_DSDT_FILE
59 #endif
60
61 #ifdef ENABLE_DEBUGGER
62 #include <linux/kdb.h>
63
64 /* stuff for debugger support */
65 int acpi_in_debugger;
66 EXPORT_SYMBOL(acpi_in_debugger);
67
68 extern char line_buf[80];
69 #endif                          /*ENABLE_DEBUGGER */
70
71 static unsigned int acpi_irq_irq;
72 static acpi_osd_handler acpi_irq_handler;
73 static void *acpi_irq_context;
74 static struct workqueue_struct *kacpid_wq;
75 static struct workqueue_struct *kacpi_notify_wq;
76
77 #define OSI_STRING_LENGTH_MAX 64        /* arbitrary */
78 static char osi_additional_string[OSI_STRING_LENGTH_MAX];
79
80 /*
81  * "Ode to _OSI(Linux)"
82  *
83  * osi_linux -- Control response to BIOS _OSI(Linux) query.
84  *
85  * As Linux evolves, the features that it supports change.
86  * So an OSI string such as "Linux" is not specific enough
87  * to be useful across multiple versions of Linux.  It
88  * doesn't identify any particular feature, interface,
89  * or even any particular version of Linux...
90  *
91  * Unfortunately, Linux-2.6.22 and earlier responded "yes"
92  * to a BIOS _OSI(Linux) query.  When
93  * a reference mobile BIOS started using it, its use
94  * started to spread to many vendor platforms.
95  * As it is not supportable, we need to halt that spread.
96  *
97  * Today, most BIOS references to _OSI(Linux) are noise --
98  * they have no functional effect and are just dead code
99  * carried over from the reference BIOS.
100  *
101  * The next most common case is that _OSI(Linux) harms Linux,
102  * usually by causing the BIOS to follow paths that are
103  * not tested during Windows validation.
104  *
105  * Finally, there is a short list of platforms
106  * where OSI(Linux) benefits Linux.
107  *
108  * In Linux-2.6.23, OSI(Linux) is first disabled by default.
109  * DMI is used to disable the dmesg warning about OSI(Linux)
110  * on platforms where it is known to have no effect.
111  * But a dmesg warning remains for systems where
112  * we do not know if OSI(Linux) is good or bad for the system.
113  * DMI is also used to enable OSI(Linux) for the machines
114  * that are known to need it.
115  *
116  * BIOS writers should NOT query _OSI(Linux) on future systems.
117  * It will be ignored by default, and to get Linux to
118  * not ignore it will require a kernel source update to
119  * add a DMI entry, or a boot-time "acpi_osi=Linux" invocation.
120  */
121 #define OSI_LINUX_ENABLE 0
122
123 struct osi_linux {
124         unsigned int    enable:1;
125         unsigned int    dmi:1;
126         unsigned int    cmdline:1;
127         unsigned int    known:1;
128 } osi_linux = { OSI_LINUX_ENABLE, 0, 0, 0};
129
130 static void __init acpi_request_region (struct acpi_generic_address *addr,
131         unsigned int length, char *desc)
132 {
133         struct resource *res;
134
135         if (!addr->address || !length)
136                 return;
137
138         if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
139                 res = request_region(addr->address, length, desc);
140         else if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
141                 res = request_mem_region(addr->address, length, desc);
142 }
143
144 static int __init acpi_reserve_resources(void)
145 {
146         acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
147                 "ACPI PM1a_EVT_BLK");
148
149         acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length,
150                 "ACPI PM1b_EVT_BLK");
151
152         acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length,
153                 "ACPI PM1a_CNT_BLK");
154
155         acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length,
156                 "ACPI PM1b_CNT_BLK");
157
158         if (acpi_gbl_FADT.pm_timer_length == 4)
159                 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR");
160
161         acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length,
162                 "ACPI PM2_CNT_BLK");
163
164         /* Length of GPE blocks must be a non-negative multiple of 2 */
165
166         if (!(acpi_gbl_FADT.gpe0_block_length & 0x1))
167                 acpi_request_region(&acpi_gbl_FADT.xgpe0_block,
168                                acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK");
169
170         if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
171                 acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
172                                acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
173
174         return 0;
175 }
176 device_initcall(acpi_reserve_resources);
177
178 acpi_status __init acpi_os_initialize(void)
179 {
180         return AE_OK;
181 }
182
183 acpi_status acpi_os_initialize1(void)
184 {
185         /*
186          * Initialize PCI configuration space access, as we'll need to access
187          * it while walking the namespace (bus 0 and root bridges w/ _BBNs).
188          */
189         if (!raw_pci_ops) {
190                 printk(KERN_ERR PREFIX
191                        "Access to PCI configuration space unavailable\n");
192                 return AE_NULL_ENTRY;
193         }
194         kacpid_wq = create_singlethread_workqueue("kacpid");
195         kacpi_notify_wq = create_singlethread_workqueue("kacpi_notify");
196         BUG_ON(!kacpid_wq);
197         BUG_ON(!kacpi_notify_wq);
198         return AE_OK;
199 }
200
201 acpi_status acpi_os_terminate(void)
202 {
203         if (acpi_irq_handler) {
204                 acpi_os_remove_interrupt_handler(acpi_irq_irq,
205                                                  acpi_irq_handler);
206         }
207
208         destroy_workqueue(kacpid_wq);
209         destroy_workqueue(kacpi_notify_wq);
210
211         return AE_OK;
212 }
213
214 void acpi_os_printf(const char *fmt, ...)
215 {
216         va_list args;
217         va_start(args, fmt);
218         acpi_os_vprintf(fmt, args);
219         va_end(args);
220 }
221
222 EXPORT_SYMBOL(acpi_os_printf);
223
224 void acpi_os_vprintf(const char *fmt, va_list args)
225 {
226         static char buffer[512];
227
228         vsprintf(buffer, fmt, args);
229
230 #ifdef ENABLE_DEBUGGER
231         if (acpi_in_debugger) {
232                 kdb_printf("%s", buffer);
233         } else {
234                 printk("%s", buffer);
235         }
236 #else
237         printk("%s", buffer);
238 #endif
239 }
240
241 acpi_physical_address __init acpi_os_get_root_pointer(void)
242 {
243         if (efi_enabled) {
244                 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
245                         return efi.acpi20;
246                 else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
247                         return efi.acpi;
248                 else {
249                         printk(KERN_ERR PREFIX
250                                "System description tables not found\n");
251                         return 0;
252                 }
253         } else {
254                 acpi_physical_address pa = 0;
255
256                 acpi_find_root_pointer(&pa);
257                 return pa;
258         }
259 }
260
261 void __iomem *acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
262 {
263         if (phys > ULONG_MAX) {
264                 printk(KERN_ERR PREFIX "Cannot map memory that high\n");
265                 return NULL;
266         }
267         if (acpi_gbl_permanent_mmap)
268                 /*
269                 * ioremap checks to ensure this is in reserved space
270                 */
271                 return ioremap((unsigned long)phys, size);
272         else
273                 return __acpi_map_table((unsigned long)phys, size);
274 }
275 EXPORT_SYMBOL_GPL(acpi_os_map_memory);
276
277 void acpi_os_unmap_memory(void __iomem * virt, acpi_size size)
278 {
279         if (acpi_gbl_permanent_mmap) {
280                 iounmap(virt);
281         }
282 }
283 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
284
285 #ifdef ACPI_FUTURE_USAGE
286 acpi_status
287 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
288 {
289         if (!phys || !virt)
290                 return AE_BAD_PARAMETER;
291
292         *phys = virt_to_phys(virt);
293
294         return AE_OK;
295 }
296 #endif
297
298 #define ACPI_MAX_OVERRIDE_LEN 100
299
300 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
301
302 acpi_status
303 acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
304                             acpi_string * new_val)
305 {
306         if (!init_val || !new_val)
307                 return AE_BAD_PARAMETER;
308
309         *new_val = NULL;
310         if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
311                 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n",
312                        acpi_os_name);
313                 *new_val = acpi_os_name;
314         }
315
316         return AE_OK;
317 }
318
319 acpi_status
320 acpi_os_table_override(struct acpi_table_header * existing_table,
321                        struct acpi_table_header ** new_table)
322 {
323         if (!existing_table || !new_table)
324                 return AE_BAD_PARAMETER;
325
326 #ifdef CONFIG_ACPI_CUSTOM_DSDT
327         if (strncmp(existing_table->signature, "DSDT", 4) == 0)
328                 *new_table = (struct acpi_table_header *)AmlCode;
329         else
330                 *new_table = NULL;
331 #else
332         *new_table = NULL;
333 #endif
334         return AE_OK;
335 }
336
337 static irqreturn_t acpi_irq(int irq, void *dev_id)
338 {
339         return (*acpi_irq_handler) (acpi_irq_context) ? IRQ_HANDLED : IRQ_NONE;
340 }
341
342 acpi_status
343 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
344                                   void *context)
345 {
346         unsigned int irq;
347
348         /*
349          * Ignore the GSI from the core, and use the value in our copy of the
350          * FADT. It may not be the same if an interrupt source override exists
351          * for the SCI.
352          */
353         gsi = acpi_gbl_FADT.sci_interrupt;
354         if (acpi_gsi_to_irq(gsi, &irq) < 0) {
355                 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
356                        gsi);
357                 return AE_OK;
358         }
359
360         acpi_irq_handler = handler;
361         acpi_irq_context = context;
362         if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
363                 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
364                 return AE_NOT_ACQUIRED;
365         }
366         acpi_irq_irq = irq;
367
368         return AE_OK;
369 }
370
371 acpi_status acpi_os_remove_interrupt_handler(u32 irq, acpi_osd_handler handler)
372 {
373         if (irq) {
374                 free_irq(irq, acpi_irq);
375                 acpi_irq_handler = NULL;
376                 acpi_irq_irq = 0;
377         }
378
379         return AE_OK;
380 }
381
382 /*
383  * Running in interpreter thread context, safe to sleep
384  */
385
386 void acpi_os_sleep(acpi_integer ms)
387 {
388         schedule_timeout_interruptible(msecs_to_jiffies(ms));
389 }
390
391 EXPORT_SYMBOL(acpi_os_sleep);
392
393 void acpi_os_stall(u32 us)
394 {
395         while (us) {
396                 u32 delay = 1000;
397
398                 if (delay > us)
399                         delay = us;
400                 udelay(delay);
401                 touch_nmi_watchdog();
402                 us -= delay;
403         }
404 }
405
406 EXPORT_SYMBOL(acpi_os_stall);
407
408 /*
409  * Support ACPI 3.0 AML Timer operand
410  * Returns 64-bit free-running, monotonically increasing timer
411  * with 100ns granularity
412  */
413 u64 acpi_os_get_timer(void)
414 {
415         static u64 t;
416
417 #ifdef  CONFIG_HPET
418         /* TBD: use HPET if available */
419 #endif
420
421 #ifdef  CONFIG_X86_PM_TIMER
422         /* TBD: default to PM timer if HPET was not available */
423 #endif
424         if (!t)
425                 printk(KERN_ERR PREFIX "acpi_os_get_timer() TBD\n");
426
427         return ++t;
428 }
429
430 acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width)
431 {
432         u32 dummy;
433
434         if (!value)
435                 value = &dummy;
436
437         *value = 0;
438         if (width <= 8) {
439                 *(u8 *) value = inb(port);
440         } else if (width <= 16) {
441                 *(u16 *) value = inw(port);
442         } else if (width <= 32) {
443                 *(u32 *) value = inl(port);
444         } else {
445                 BUG();
446         }
447
448         return AE_OK;
449 }
450
451 EXPORT_SYMBOL(acpi_os_read_port);
452
453 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
454 {
455         if (width <= 8) {
456                 outb(value, port);
457         } else if (width <= 16) {
458                 outw(value, port);
459         } else if (width <= 32) {
460                 outl(value, port);
461         } else {
462                 BUG();
463         }
464
465         return AE_OK;
466 }
467
468 EXPORT_SYMBOL(acpi_os_write_port);
469
470 acpi_status
471 acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
472 {
473         u32 dummy;
474         void __iomem *virt_addr;
475
476         virt_addr = ioremap(phys_addr, width);
477         if (!value)
478                 value = &dummy;
479
480         switch (width) {
481         case 8:
482                 *(u8 *) value = readb(virt_addr);
483                 break;
484         case 16:
485                 *(u16 *) value = readw(virt_addr);
486                 break;
487         case 32:
488                 *(u32 *) value = readl(virt_addr);
489                 break;
490         default:
491                 BUG();
492         }
493
494         iounmap(virt_addr);
495
496         return AE_OK;
497 }
498
499 acpi_status
500 acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
501 {
502         void __iomem *virt_addr;
503
504         virt_addr = ioremap(phys_addr, width);
505
506         switch (width) {
507         case 8:
508                 writeb(value, virt_addr);
509                 break;
510         case 16:
511                 writew(value, virt_addr);
512                 break;
513         case 32:
514                 writel(value, virt_addr);
515                 break;
516         default:
517                 BUG();
518         }
519
520         iounmap(virt_addr);
521
522         return AE_OK;
523 }
524
525 acpi_status
526 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
527                                void *value, u32 width)
528 {
529         int result, size;
530
531         if (!value)
532                 return AE_BAD_PARAMETER;
533
534         switch (width) {
535         case 8:
536                 size = 1;
537                 break;
538         case 16:
539                 size = 2;
540                 break;
541         case 32:
542                 size = 4;
543                 break;
544         default:
545                 return AE_ERROR;
546         }
547
548         BUG_ON(!raw_pci_ops);
549
550         result = raw_pci_ops->read(pci_id->segment, pci_id->bus,
551                                    PCI_DEVFN(pci_id->device, pci_id->function),
552                                    reg, size, value);
553
554         return (result ? AE_ERROR : AE_OK);
555 }
556
557 EXPORT_SYMBOL(acpi_os_read_pci_configuration);
558
559 acpi_status
560 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
561                                 acpi_integer value, u32 width)
562 {
563         int result, size;
564
565         switch (width) {
566         case 8:
567                 size = 1;
568                 break;
569         case 16:
570                 size = 2;
571                 break;
572         case 32:
573                 size = 4;
574                 break;
575         default:
576                 return AE_ERROR;
577         }
578
579         BUG_ON(!raw_pci_ops);
580
581         result = raw_pci_ops->write(pci_id->segment, pci_id->bus,
582                                     PCI_DEVFN(pci_id->device, pci_id->function),
583                                     reg, size, value);
584
585         return (result ? AE_ERROR : AE_OK);
586 }
587
588 /* TODO: Change code to take advantage of driver model more */
589 static void acpi_os_derive_pci_id_2(acpi_handle rhandle,        /* upper bound  */
590                                     acpi_handle chandle,        /* current node */
591                                     struct acpi_pci_id **id,
592                                     int *is_bridge, u8 * bus_number)
593 {
594         acpi_handle handle;
595         struct acpi_pci_id *pci_id = *id;
596         acpi_status status;
597         unsigned long temp;
598         acpi_object_type type;
599         u8 tu8;
600
601         acpi_get_parent(chandle, &handle);
602         if (handle != rhandle) {
603                 acpi_os_derive_pci_id_2(rhandle, handle, &pci_id, is_bridge,
604                                         bus_number);
605
606                 status = acpi_get_type(handle, &type);
607                 if ((ACPI_FAILURE(status)) || (type != ACPI_TYPE_DEVICE))
608                         return;
609
610                 status =
611                     acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL,
612                                           &temp);
613                 if (ACPI_SUCCESS(status)) {
614                         pci_id->device = ACPI_HIWORD(ACPI_LODWORD(temp));
615                         pci_id->function = ACPI_LOWORD(ACPI_LODWORD(temp));
616
617                         if (*is_bridge)
618                                 pci_id->bus = *bus_number;
619
620                         /* any nicer way to get bus number of bridge ? */
621                         status =
622                             acpi_os_read_pci_configuration(pci_id, 0x0e, &tu8,
623                                                            8);
624                         if (ACPI_SUCCESS(status)
625                             && ((tu8 & 0x7f) == 1 || (tu8 & 0x7f) == 2)) {
626                                 status =
627                                     acpi_os_read_pci_configuration(pci_id, 0x18,
628                                                                    &tu8, 8);
629                                 if (!ACPI_SUCCESS(status)) {
630                                         /* Certainly broken...  FIX ME */
631                                         return;
632                                 }
633                                 *is_bridge = 1;
634                                 pci_id->bus = tu8;
635                                 status =
636                                     acpi_os_read_pci_configuration(pci_id, 0x19,
637                                                                    &tu8, 8);
638                                 if (ACPI_SUCCESS(status)) {
639                                         *bus_number = tu8;
640                                 }
641                         } else
642                                 *is_bridge = 0;
643                 }
644         }
645 }
646
647 void acpi_os_derive_pci_id(acpi_handle rhandle, /* upper bound  */
648                            acpi_handle chandle, /* current node */
649                            struct acpi_pci_id **id)
650 {
651         int is_bridge = 1;
652         u8 bus_number = (*id)->bus;
653
654         acpi_os_derive_pci_id_2(rhandle, chandle, id, &is_bridge, &bus_number);
655 }
656
657 static void acpi_os_execute_deferred(struct work_struct *work)
658 {
659         struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
660         if (!dpc) {
661                 printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
662                 return;
663         }
664
665         dpc->function(dpc->context);
666         kfree(dpc);
667
668         /* Yield cpu to notify thread */
669         cond_resched();
670
671         return;
672 }
673
674 static void acpi_os_execute_notify(struct work_struct *work)
675 {
676         struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
677
678         if (!dpc) {
679                 printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
680                 return;
681         }
682
683         dpc->function(dpc->context);
684
685         kfree(dpc);
686
687         return;
688 }
689
690 /*******************************************************************************
691  *
692  * FUNCTION:    acpi_os_execute
693  *
694  * PARAMETERS:  Type               - Type of the callback
695  *              Function           - Function to be executed
696  *              Context            - Function parameters
697  *
698  * RETURN:      Status
699  *
700  * DESCRIPTION: Depending on type, either queues function for deferred execution or
701  *              immediately executes function on a separate thread.
702  *
703  ******************************************************************************/
704
705 acpi_status acpi_os_execute(acpi_execute_type type,
706                             acpi_osd_exec_callback function, void *context)
707 {
708         acpi_status status = AE_OK;
709         struct acpi_os_dpc *dpc;
710
711         ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
712                           "Scheduling function [%p(%p)] for deferred execution.\n",
713                           function, context));
714
715         if (!function)
716                 return AE_BAD_PARAMETER;
717
718         /*
719          * Allocate/initialize DPC structure.  Note that this memory will be
720          * freed by the callee.  The kernel handles the work_struct list  in a
721          * way that allows us to also free its memory inside the callee.
722          * Because we may want to schedule several tasks with different
723          * parameters we can't use the approach some kernel code uses of
724          * having a static work_struct.
725          */
726
727         dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
728         if (!dpc)
729                 return_ACPI_STATUS(AE_NO_MEMORY);
730
731         dpc->function = function;
732         dpc->context = context;
733
734         if (type == OSL_NOTIFY_HANDLER) {
735                 INIT_WORK(&dpc->work, acpi_os_execute_notify);
736                 if (!queue_work(kacpi_notify_wq, &dpc->work)) {
737                         status = AE_ERROR;
738                         kfree(dpc);
739                 }
740         } else {
741                 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
742                 if (!queue_work(kacpid_wq, &dpc->work)) {
743                         ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
744                                   "Call to queue_work() failed.\n"));
745                         status = AE_ERROR;
746                         kfree(dpc);
747                 }
748         }
749         return_ACPI_STATUS(status);
750 }
751
752 EXPORT_SYMBOL(acpi_os_execute);
753
754 void acpi_os_wait_events_complete(void *context)
755 {
756         flush_workqueue(kacpid_wq);
757 }
758
759 EXPORT_SYMBOL(acpi_os_wait_events_complete);
760
761 /*
762  * Allocate the memory for a spinlock and initialize it.
763  */
764 acpi_status acpi_os_create_lock(acpi_spinlock * handle)
765 {
766         spin_lock_init(*handle);
767
768         return AE_OK;
769 }
770
771 /*
772  * Deallocate the memory for a spinlock.
773  */
774 void acpi_os_delete_lock(acpi_spinlock handle)
775 {
776         return;
777 }
778
779 acpi_status
780 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
781 {
782         struct semaphore *sem = NULL;
783
784
785         sem = acpi_os_allocate(sizeof(struct semaphore));
786         if (!sem)
787                 return AE_NO_MEMORY;
788         memset(sem, 0, sizeof(struct semaphore));
789
790         sema_init(sem, initial_units);
791
792         *handle = (acpi_handle *) sem;
793
794         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n",
795                           *handle, initial_units));
796
797         return AE_OK;
798 }
799
800 EXPORT_SYMBOL(acpi_os_create_semaphore);
801
802 /*
803  * TODO: A better way to delete semaphores?  Linux doesn't have a
804  * 'delete_semaphore()' function -- may result in an invalid
805  * pointer dereference for non-synchronized consumers.  Should
806  * we at least check for blocked threads and signal/cancel them?
807  */
808
809 acpi_status acpi_os_delete_semaphore(acpi_handle handle)
810 {
811         struct semaphore *sem = (struct semaphore *)handle;
812
813
814         if (!sem)
815                 return AE_BAD_PARAMETER;
816
817         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
818
819         kfree(sem);
820         sem = NULL;
821
822         return AE_OK;
823 }
824
825 EXPORT_SYMBOL(acpi_os_delete_semaphore);
826
827 /*
828  * TODO: The kernel doesn't have a 'down_timeout' function -- had to
829  * improvise.  The process is to sleep for one scheduler quantum
830  * until the semaphore becomes available.  Downside is that this
831  * may result in starvation for timeout-based waits when there's
832  * lots of semaphore activity.
833  *
834  * TODO: Support for units > 1?
835  */
836 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
837 {
838         acpi_status status = AE_OK;
839         struct semaphore *sem = (struct semaphore *)handle;
840         int ret = 0;
841
842
843         if (!sem || (units < 1))
844                 return AE_BAD_PARAMETER;
845
846         if (units > 1)
847                 return AE_SUPPORT;
848
849         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
850                           handle, units, timeout));
851
852         /*
853          * This can be called during resume with interrupts off.
854          * Like boot-time, we should be single threaded and will
855          * always get the lock if we try -- timeout or not.
856          * If this doesn't succeed, then we will oops courtesy of
857          * might_sleep() in down().
858          */
859         if (!down_trylock(sem))
860                 return AE_OK;
861
862         switch (timeout) {
863                 /*
864                  * No Wait:
865                  * --------
866                  * A zero timeout value indicates that we shouldn't wait - just
867                  * acquire the semaphore if available otherwise return AE_TIME
868                  * (a.k.a. 'would block').
869                  */
870         case 0:
871                 if (down_trylock(sem))
872                         status = AE_TIME;
873                 break;
874
875                 /*
876                  * Wait Indefinitely:
877                  * ------------------
878                  */
879         case ACPI_WAIT_FOREVER:
880                 down(sem);
881                 break;
882
883                 /*
884                  * Wait w/ Timeout:
885                  * ----------------
886                  */
887         default:
888                 // TODO: A better timeout algorithm?
889                 {
890                         int i = 0;
891                         static const int quantum_ms = 1000 / HZ;
892
893                         ret = down_trylock(sem);
894                         for (i = timeout; (i > 0 && ret != 0); i -= quantum_ms) {
895                                 schedule_timeout_interruptible(1);
896                                 ret = down_trylock(sem);
897                         }
898
899                         if (ret != 0)
900                                 status = AE_TIME;
901                 }
902                 break;
903         }
904
905         if (ACPI_FAILURE(status)) {
906                 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
907                                   "Failed to acquire semaphore[%p|%d|%d], %s",
908                                   handle, units, timeout,
909                                   acpi_format_exception(status)));
910         } else {
911                 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
912                                   "Acquired semaphore[%p|%d|%d]", handle,
913                                   units, timeout));
914         }
915
916         return status;
917 }
918
919 EXPORT_SYMBOL(acpi_os_wait_semaphore);
920
921 /*
922  * TODO: Support for units > 1?
923  */
924 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
925 {
926         struct semaphore *sem = (struct semaphore *)handle;
927
928
929         if (!sem || (units < 1))
930                 return AE_BAD_PARAMETER;
931
932         if (units > 1)
933                 return AE_SUPPORT;
934
935         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle,
936                           units));
937
938         up(sem);
939
940         return AE_OK;
941 }
942
943 EXPORT_SYMBOL(acpi_os_signal_semaphore);
944
945 #ifdef ACPI_FUTURE_USAGE
946 u32 acpi_os_get_line(char *buffer)
947 {
948
949 #ifdef ENABLE_DEBUGGER
950         if (acpi_in_debugger) {
951                 u32 chars;
952
953                 kdb_read(buffer, sizeof(line_buf));
954
955                 /* remove the CR kdb includes */
956                 chars = strlen(buffer) - 1;
957                 buffer[chars] = '\0';
958         }
959 #endif
960
961         return 0;
962 }
963 #endif                          /*  ACPI_FUTURE_USAGE  */
964
965 acpi_status acpi_os_signal(u32 function, void *info)
966 {
967         switch (function) {
968         case ACPI_SIGNAL_FATAL:
969                 printk(KERN_ERR PREFIX "Fatal opcode executed\n");
970                 break;
971         case ACPI_SIGNAL_BREAKPOINT:
972                 /*
973                  * AML Breakpoint
974                  * ACPI spec. says to treat it as a NOP unless
975                  * you are debugging.  So if/when we integrate
976                  * AML debugger into the kernel debugger its
977                  * hook will go here.  But until then it is
978                  * not useful to print anything on breakpoints.
979                  */
980                 break;
981         default:
982                 break;
983         }
984
985         return AE_OK;
986 }
987
988 EXPORT_SYMBOL(acpi_os_signal);
989
990 static int __init acpi_os_name_setup(char *str)
991 {
992         char *p = acpi_os_name;
993         int count = ACPI_MAX_OVERRIDE_LEN - 1;
994
995         if (!str || !*str)
996                 return 0;
997
998         for (; count-- && str && *str; str++) {
999                 if (isalnum(*str) || *str == ' ' || *str == ':')
1000                         *p++ = *str;
1001                 else if (*str == '\'' || *str == '"')
1002                         continue;
1003                 else
1004                         break;
1005         }
1006         *p = 0;
1007
1008         return 1;
1009
1010 }
1011
1012 __setup("acpi_os_name=", acpi_os_name_setup);
1013
1014 static void __init set_osi_linux(unsigned int enable)
1015 {
1016         if (osi_linux.enable != enable) {
1017                 osi_linux.enable = enable;
1018                 printk(KERN_NOTICE PREFIX "%sed _OSI(Linux)\n",
1019                         enable ? "Add": "Delet");
1020         }
1021         return;
1022 }
1023
1024 static void __init acpi_cmdline_osi_linux(unsigned int enable)
1025 {
1026         osi_linux.cmdline = 1;  /* cmdline set the default */
1027         set_osi_linux(enable);
1028
1029         return;
1030 }
1031
1032 void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d)
1033 {
1034         osi_linux.dmi = 1;      /* DMI knows that this box asks OSI(Linux) */
1035
1036         printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
1037
1038         if (enable == -1)
1039                 return;
1040
1041         osi_linux.known = 1;    /* DMI knows which OSI(Linux) default needed */
1042
1043         set_osi_linux(enable);
1044
1045         return;
1046 }
1047
1048 /*
1049  * Modify the list of "OS Interfaces" reported to BIOS via _OSI
1050  *
1051  * empty string disables _OSI
1052  * string starting with '!' disables that string
1053  * otherwise string is added to list, augmenting built-in strings
1054  */
1055 static int __init acpi_osi_setup(char *str)
1056 {
1057         if (str == NULL || *str == '\0') {
1058                 printk(KERN_INFO PREFIX "_OSI method disabled\n");
1059                 acpi_gbl_create_osi_method = FALSE;
1060         } else if (!strcmp("!Linux", str)) {
1061                 acpi_cmdline_osi_linux(0);      /* !enable */
1062         } else if (*str == '!') {
1063                 if (acpi_osi_invalidate(++str) == AE_OK)
1064                         printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str);
1065         } else if (!strcmp("Linux", str)) {
1066                 acpi_cmdline_osi_linux(1);      /* enable */
1067         } else if (*osi_additional_string == '\0') {
1068                 strncpy(osi_additional_string, str, OSI_STRING_LENGTH_MAX);
1069                 printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str);
1070         }
1071
1072         return 1;
1073 }
1074
1075 __setup("acpi_osi=", acpi_osi_setup);
1076
1077 /* enable serialization to combat AE_ALREADY_EXISTS errors */
1078 static int __init acpi_serialize_setup(char *str)
1079 {
1080         printk(KERN_INFO PREFIX "serialize enabled\n");
1081
1082         acpi_gbl_all_methods_serialized = TRUE;
1083
1084         return 1;
1085 }
1086
1087 __setup("acpi_serialize", acpi_serialize_setup);
1088
1089 /*
1090  * Wake and Run-Time GPES are expected to be separate.
1091  * We disable wake-GPEs at run-time to prevent spurious
1092  * interrupts.
1093  *
1094  * However, if a system exists that shares Wake and
1095  * Run-time events on the same GPE this flag is available
1096  * to tell Linux to keep the wake-time GPEs enabled at run-time.
1097  */
1098 static int __init acpi_wake_gpes_always_on_setup(char *str)
1099 {
1100         printk(KERN_INFO PREFIX "wake GPEs not disabled\n");
1101
1102         acpi_gbl_leave_wake_gpes_disabled = FALSE;
1103
1104         return 1;
1105 }
1106
1107 __setup("acpi_wake_gpes_always_on", acpi_wake_gpes_always_on_setup);
1108
1109 /*
1110  * Acquire a spinlock.
1111  *
1112  * handle is a pointer to the spinlock_t.
1113  */
1114
1115 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
1116 {
1117         acpi_cpu_flags flags;
1118         spin_lock_irqsave(lockp, flags);
1119         return flags;
1120 }
1121
1122 /*
1123  * Release a spinlock. See above.
1124  */
1125
1126 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
1127 {
1128         spin_unlock_irqrestore(lockp, flags);
1129 }
1130
1131 #ifndef ACPI_USE_LOCAL_CACHE
1132
1133 /*******************************************************************************
1134  *
1135  * FUNCTION:    acpi_os_create_cache
1136  *
1137  * PARAMETERS:  name      - Ascii name for the cache
1138  *              size      - Size of each cached object
1139  *              depth     - Maximum depth of the cache (in objects) <ignored>
1140  *              cache     - Where the new cache object is returned
1141  *
1142  * RETURN:      status
1143  *
1144  * DESCRIPTION: Create a cache object
1145  *
1146  ******************************************************************************/
1147
1148 acpi_status
1149 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
1150 {
1151         *cache = kmem_cache_create(name, size, 0, 0, NULL);
1152         if (*cache == NULL)
1153                 return AE_ERROR;
1154         else
1155                 return AE_OK;
1156 }
1157
1158 /*******************************************************************************
1159  *
1160  * FUNCTION:    acpi_os_purge_cache
1161  *
1162  * PARAMETERS:  Cache           - Handle to cache object
1163  *
1164  * RETURN:      Status
1165  *
1166  * DESCRIPTION: Free all objects within the requested cache.
1167  *
1168  ******************************************************************************/
1169
1170 acpi_status acpi_os_purge_cache(acpi_cache_t * cache)
1171 {
1172         kmem_cache_shrink(cache);
1173         return (AE_OK);
1174 }
1175
1176 /*******************************************************************************
1177  *
1178  * FUNCTION:    acpi_os_delete_cache
1179  *
1180  * PARAMETERS:  Cache           - Handle to cache object
1181  *
1182  * RETURN:      Status
1183  *
1184  * DESCRIPTION: Free all objects within the requested cache and delete the
1185  *              cache object.
1186  *
1187  ******************************************************************************/
1188
1189 acpi_status acpi_os_delete_cache(acpi_cache_t * cache)
1190 {
1191         kmem_cache_destroy(cache);
1192         return (AE_OK);
1193 }
1194
1195 /*******************************************************************************
1196  *
1197  * FUNCTION:    acpi_os_release_object
1198  *
1199  * PARAMETERS:  Cache       - Handle to cache object
1200  *              Object      - The object to be released
1201  *
1202  * RETURN:      None
1203  *
1204  * DESCRIPTION: Release an object to the specified cache.  If cache is full,
1205  *              the object is deleted.
1206  *
1207  ******************************************************************************/
1208
1209 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
1210 {
1211         kmem_cache_free(cache, object);
1212         return (AE_OK);
1213 }
1214
1215 /**
1216  *      acpi_dmi_dump - dump DMI slots needed for blacklist entry
1217  *
1218  *      Returns 0 on success
1219  */
1220 int acpi_dmi_dump(void)
1221 {
1222
1223         if (!dmi_available)
1224                 return -1;
1225
1226         printk(KERN_NOTICE PREFIX "DMI System Vendor: %s\n",
1227                 dmi_get_slot(DMI_SYS_VENDOR));
1228         printk(KERN_NOTICE PREFIX "DMI Product Name: %s\n",
1229                 dmi_get_slot(DMI_PRODUCT_NAME));
1230         printk(KERN_NOTICE PREFIX "DMI Product Version: %s\n",
1231                 dmi_get_slot(DMI_PRODUCT_VERSION));
1232         printk(KERN_NOTICE PREFIX "DMI Board Name: %s\n",
1233                 dmi_get_slot(DMI_BOARD_NAME));
1234         printk(KERN_NOTICE PREFIX "DMI BIOS Vendor: %s\n",
1235                 dmi_get_slot(DMI_BIOS_VENDOR));
1236         printk(KERN_NOTICE PREFIX "DMI BIOS Date: %s\n",
1237                 dmi_get_slot(DMI_BIOS_DATE));
1238
1239         return 0;
1240 }
1241
1242
1243 /******************************************************************************
1244  *
1245  * FUNCTION:    acpi_os_validate_interface
1246  *
1247  * PARAMETERS:  interface           - Requested interface to be validated
1248  *
1249  * RETURN:      AE_OK if interface is supported, AE_SUPPORT otherwise
1250  *
1251  * DESCRIPTION: Match an interface string to the interfaces supported by the
1252  *              host. Strings originate from an AML call to the _OSI method.
1253  *
1254  *****************************************************************************/
1255
1256 acpi_status
1257 acpi_os_validate_interface (char *interface)
1258 {
1259         if (!strncmp(osi_additional_string, interface, OSI_STRING_LENGTH_MAX))
1260                 return AE_OK;
1261         if (!strcmp("Linux", interface)) {
1262
1263                 printk(KERN_NOTICE PREFIX
1264                         "BIOS _OSI(Linux) query %s%s\n",
1265                         osi_linux.enable ? "honored" : "ignored",
1266                         osi_linux.cmdline ? " via cmdline" :
1267                         osi_linux.dmi ? " via DMI" : "");
1268
1269                 if (!osi_linux.dmi) {
1270                         if (acpi_dmi_dump())
1271                                 printk(KERN_NOTICE PREFIX
1272                                         "[please extract dmidecode output]\n");
1273                         printk(KERN_NOTICE PREFIX
1274                                 "Please send DMI info above to "
1275                                 "linux-acpi@vger.kernel.org\n");
1276                 }
1277                 if (!osi_linux.known && !osi_linux.cmdline) {
1278                         printk(KERN_NOTICE PREFIX
1279                                 "If \"acpi_osi=%sLinux\" works better, "
1280                                 "please notify linux-acpi@vger.kernel.org\n",
1281                                 osi_linux.enable ? "!" : "");
1282                 }
1283
1284                 if (osi_linux.enable)
1285                         return AE_OK;
1286         }
1287         return AE_SUPPORT;
1288 }
1289
1290 /******************************************************************************
1291  *
1292  * FUNCTION:    acpi_os_validate_address
1293  *
1294  * PARAMETERS:  space_id             - ACPI space ID
1295  *              address             - Physical address
1296  *              length              - Address length
1297  *
1298  * RETURN:      AE_OK if address/length is valid for the space_id. Otherwise,
1299  *              should return AE_AML_ILLEGAL_ADDRESS.
1300  *
1301  * DESCRIPTION: Validate a system address via the host OS. Used to validate
1302  *              the addresses accessed by AML operation regions.
1303  *
1304  *****************************************************************************/
1305
1306 acpi_status
1307 acpi_os_validate_address (
1308     u8                   space_id,
1309     acpi_physical_address   address,
1310     acpi_size               length)
1311 {
1312
1313     return AE_OK;
1314 }
1315
1316 #endif