ACPICA: Add GPE support for dynamically loaded ACPI tables
[pandora-kernel.git] / drivers / acpi / acpica / evgpeblk.c
1 /******************************************************************************
2  *
3  * Module Name: evgpeblk - GPE block creation and initialization.
4  *
5  *****************************************************************************/
6
7 /*
8  * Copyright (C) 2000 - 2010, Intel Corp.
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions, and the following disclaimer,
16  *    without modification.
17  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18  *    substantially similar to the "NO WARRANTY" disclaimer below
19  *    ("Disclaimer") and any redistribution must be conditioned upon
20  *    including a substantially similar Disclaimer requirement for further
21  *    binary redistribution.
22  * 3. Neither the names of the above-listed copyright holders nor the names
23  *    of any contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * Alternatively, this software may be distributed under the terms of the
27  * GNU General Public License ("GPL") version 2 as published by the Free
28  * Software Foundation.
29  *
30  * NO WARRANTY
31  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41  * POSSIBILITY OF SUCH DAMAGES.
42  */
43
44 #include <acpi/acpi.h>
45 #include "accommon.h"
46 #include "acevents.h"
47 #include "acnamesp.h"
48 #include "acinterp.h"
49
50 #define _COMPONENT          ACPI_EVENTS
51 ACPI_MODULE_NAME("evgpeblk")
52
53 /* Local prototypes */
54 static acpi_status
55 acpi_ev_match_gpe_method(acpi_handle obj_handle,
56                          u32 level, void *obj_desc, void **return_value);
57
58 static acpi_status
59 acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
60                           u32 level, void *info, void **return_value);
61
62 static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
63                                                                interrupt_number);
64
65 static acpi_status
66 acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt);
67
68 static acpi_status
69 acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
70                           u32 interrupt_number);
71
72 static acpi_status
73 acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block);
74
75 /*******************************************************************************
76  *
77  * FUNCTION:    acpi_ev_valid_gpe_event
78  *
79  * PARAMETERS:  gpe_event_info              - Info for this GPE
80  *
81  * RETURN:      TRUE if the gpe_event is valid
82  *
83  * DESCRIPTION: Validate a GPE event. DO NOT CALL FROM INTERRUPT LEVEL.
84  *              Should be called only when the GPE lists are semaphore locked
85  *              and not subject to change.
86  *
87  ******************************************************************************/
88
89 u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info)
90 {
91         struct acpi_gpe_xrupt_info *gpe_xrupt_block;
92         struct acpi_gpe_block_info *gpe_block;
93
94         ACPI_FUNCTION_ENTRY();
95
96         /* No need for spin lock since we are not changing any list elements */
97
98         /* Walk the GPE interrupt levels */
99
100         gpe_xrupt_block = acpi_gbl_gpe_xrupt_list_head;
101         while (gpe_xrupt_block) {
102                 gpe_block = gpe_xrupt_block->gpe_block_list_head;
103
104                 /* Walk the GPE blocks on this interrupt level */
105
106                 while (gpe_block) {
107                         if ((&gpe_block->event_info[0] <= gpe_event_info) &&
108                             (&gpe_block->event_info[gpe_block->gpe_count] >
109                              gpe_event_info)) {
110                                 return (TRUE);
111                         }
112
113                         gpe_block = gpe_block->next;
114                 }
115
116                 gpe_xrupt_block = gpe_xrupt_block->next;
117         }
118
119         return (FALSE);
120 }
121
122 /*******************************************************************************
123  *
124  * FUNCTION:    acpi_ev_walk_gpe_list
125  *
126  * PARAMETERS:  gpe_walk_callback   - Routine called for each GPE block
127  *              Context             - Value passed to callback
128  *
129  * RETURN:      Status
130  *
131  * DESCRIPTION: Walk the GPE lists.
132  *
133  ******************************************************************************/
134
135 acpi_status
136 acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context)
137 {
138         struct acpi_gpe_block_info *gpe_block;
139         struct acpi_gpe_xrupt_info *gpe_xrupt_info;
140         acpi_status status = AE_OK;
141         acpi_cpu_flags flags;
142
143         ACPI_FUNCTION_TRACE(ev_walk_gpe_list);
144
145         flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
146
147         /* Walk the interrupt level descriptor list */
148
149         gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
150         while (gpe_xrupt_info) {
151
152                 /* Walk all Gpe Blocks attached to this interrupt level */
153
154                 gpe_block = gpe_xrupt_info->gpe_block_list_head;
155                 while (gpe_block) {
156
157                         /* One callback per GPE block */
158
159                         status =
160                             gpe_walk_callback(gpe_xrupt_info, gpe_block,
161                                               context);
162                         if (ACPI_FAILURE(status)) {
163                                 if (status == AE_CTRL_END) {    /* Callback abort */
164                                         status = AE_OK;
165                                 }
166                                 goto unlock_and_exit;
167                         }
168
169                         gpe_block = gpe_block->next;
170                 }
171
172                 gpe_xrupt_info = gpe_xrupt_info->next;
173         }
174
175       unlock_and_exit:
176         acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
177         return_ACPI_STATUS(status);
178 }
179
180 /*******************************************************************************
181  *
182  * FUNCTION:    acpi_ev_delete_gpe_handlers
183  *
184  * PARAMETERS:  gpe_xrupt_info      - GPE Interrupt info
185  *              gpe_block           - Gpe Block info
186  *
187  * RETURN:      Status
188  *
189  * DESCRIPTION: Delete all Handler objects found in the GPE data structs.
190  *              Used only prior to termination.
191  *
192  ******************************************************************************/
193
194 acpi_status
195 acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
196                             struct acpi_gpe_block_info *gpe_block,
197                             void *context)
198 {
199         struct acpi_gpe_event_info *gpe_event_info;
200         u32 i;
201         u32 j;
202
203         ACPI_FUNCTION_TRACE(ev_delete_gpe_handlers);
204
205         /* Examine each GPE Register within the block */
206
207         for (i = 0; i < gpe_block->register_count; i++) {
208
209                 /* Now look at the individual GPEs in this byte register */
210
211                 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
212                         gpe_event_info = &gpe_block->event_info[((acpi_size) i *
213                                                                  ACPI_GPE_REGISTER_WIDTH)
214                                                                 + j];
215
216                         if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
217                             ACPI_GPE_DISPATCH_HANDLER) {
218                                 ACPI_FREE(gpe_event_info->dispatch.handler);
219                                 gpe_event_info->dispatch.handler = NULL;
220                                 gpe_event_info->flags &=
221                                     ~ACPI_GPE_DISPATCH_MASK;
222                         }
223                 }
224         }
225
226         return_ACPI_STATUS(AE_OK);
227 }
228
229 /*******************************************************************************
230  *
231  * FUNCTION:    acpi_ev_match_gpe_method
232  *
233  * PARAMETERS:  Callback from walk_namespace
234  *
235  * RETURN:      Status
236  *
237  * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
238  *              control method under the _GPE portion of the namespace.
239  *              Extract the name and GPE type from the object, saving this
240  *              information for quick lookup during GPE dispatch. Allows a
241  *              per-owner_id evaluation if execute_by_owner_id is TRUE in the
242  *              walk_info parameter block.
243  *
244  *              The name of each GPE control method is of the form:
245  *              "_Lxx" or "_Exx", where:
246  *                  L      - means that the GPE is level triggered
247  *                  E      - means that the GPE is edge triggered
248  *                  xx     - is the GPE number [in HEX]
249  *
250  * If walk_info->execute_by_owner_id is TRUE, we only execute examine GPE methods
251  *    with that owner.
252  * If walk_info->enable_this_gpe is TRUE, the GPE that is referred to by a GPE
253  *    method is immediately enabled (Used for Load/load_table operators)
254  *
255  ******************************************************************************/
256
257 static acpi_status
258 acpi_ev_match_gpe_method(acpi_handle obj_handle,
259                          u32 level, void *context, void **return_value)
260 {
261         struct acpi_namespace_node *method_node =
262             ACPI_CAST_PTR(struct acpi_namespace_node, obj_handle);
263         struct acpi_gpe_walk_info *walk_info =
264             ACPI_CAST_PTR(struct acpi_gpe_walk_info, context);
265         struct acpi_gpe_event_info *gpe_event_info;
266         struct acpi_namespace_node *gpe_device;
267         acpi_status status;
268         u32 gpe_number;
269         char name[ACPI_NAME_SIZE + 1];
270         u8 type;
271
272         ACPI_FUNCTION_TRACE(ev_match_gpe_method);
273
274         /* Check if requested owner_id matches this owner_id */
275
276         if ((walk_info->execute_by_owner_id) &&
277             (method_node->owner_id != walk_info->owner_id)) {
278                 return_ACPI_STATUS(AE_OK);
279         }
280
281         /*
282          * Match and decode the _Lxx and _Exx GPE method names
283          *
284          * 1) Extract the method name and null terminate it
285          */
286         ACPI_MOVE_32_TO_32(name, &method_node->name.integer);
287         name[ACPI_NAME_SIZE] = 0;
288
289         /* 2) Name must begin with an underscore */
290
291         if (name[0] != '_') {
292                 return_ACPI_STATUS(AE_OK);      /* Ignore this method */
293         }
294
295         /*
296          * 3) Edge/Level determination is based on the 2nd character
297          *    of the method name
298          *
299          * NOTE: Default GPE type is RUNTIME only. Later, if a _PRW object is
300          * found that points to this GPE, the ACPI_GPE_CAN_WAKE flag is set.
301          */
302         switch (name[1]) {
303         case 'L':
304                 type = ACPI_GPE_LEVEL_TRIGGERED;
305                 break;
306
307         case 'E':
308                 type = ACPI_GPE_EDGE_TRIGGERED;
309                 break;
310
311         default:
312                 /* Unknown method type, just ignore it */
313
314                 ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
315                                   "Ignoring unknown GPE method type: %s "
316                                   "(name not of form _Lxx or _Exx)", name));
317                 return_ACPI_STATUS(AE_OK);
318         }
319
320         /* 4) The last two characters of the name are the hex GPE Number */
321
322         gpe_number = ACPI_STRTOUL(&name[2], NULL, 16);
323         if (gpe_number == ACPI_UINT32_MAX) {
324
325                 /* Conversion failed; invalid method, just ignore it */
326
327                 ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
328                                   "Could not extract GPE number from name: %s "
329                                   "(name is not of form _Lxx or _Exx)", name));
330                 return_ACPI_STATUS(AE_OK);
331         }
332
333         /* Ensure that we have a valid GPE number for this GPE block */
334
335         gpe_event_info =
336             acpi_ev_low_get_gpe_info(gpe_number, walk_info->gpe_block);
337         if (!gpe_event_info) {
338                 /*
339                  * This gpe_number is not valid for this GPE block, just ignore it.
340                  * However, it may be valid for a different GPE block, since GPE0
341                  * and GPE1 methods both appear under \_GPE.
342                  */
343                 return_ACPI_STATUS(AE_OK);
344         }
345
346         if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
347             ACPI_GPE_DISPATCH_HANDLER) {
348
349                 /* If there is already a handler, ignore this GPE method */
350
351                 return_ACPI_STATUS(AE_OK);
352         }
353
354         if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
355             ACPI_GPE_DISPATCH_METHOD) {
356                 /*
357                  * If there is already a method, ignore this method. But check
358                  * for a type mismatch (if both the _Lxx AND _Exx exist)
359                  */
360                 if (type != (gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK)) {
361                         ACPI_ERROR((AE_INFO,
362                                     "For GPE 0x%.2X, found both _L%2.2X and _E%2.2X methods",
363                                     gpe_number, gpe_number, gpe_number));
364                 }
365                 return_ACPI_STATUS(AE_OK);
366         }
367
368         /*
369          * Add the GPE information from above to the gpe_event_info block for
370          * use during dispatch of this GPE.
371          */
372         gpe_event_info->flags |= (u8)(type | ACPI_GPE_DISPATCH_METHOD);
373         gpe_event_info->dispatch.method_node = method_node;
374
375         /*
376          * Enable this GPE if requested. This only happens when during the
377          * execution of a Load or load_table operator. We have found a new
378          * GPE method and want to immediately enable the GPE if it is a
379          * runtime GPE.
380          */
381         if (walk_info->enable_this_gpe) {
382
383                 /* Ignore GPEs that can wake the system */
384
385                 if (!(gpe_event_info->flags & ACPI_GPE_CAN_WAKE) ||
386                     !acpi_gbl_leave_wake_gpes_disabled) {
387                         walk_info->count++;
388                         gpe_device = walk_info->gpe_device;
389
390                         if (gpe_device == acpi_gbl_fadt_gpe_device) {
391                                 gpe_device = NULL;
392                         }
393
394                         status = acpi_enable_gpe(gpe_device, gpe_number,
395                                                  ACPI_GPE_TYPE_RUNTIME);
396                         if (ACPI_FAILURE(status)) {
397                                 ACPI_EXCEPTION((AE_INFO, status,
398                                                 "Could not enable GPE 0x%02X",
399                                                 gpe_number));
400                         }
401                 }
402         }
403
404         ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
405                           "Registered GPE method %s as GPE number 0x%.2X\n",
406                           name, gpe_number));
407         return_ACPI_STATUS(AE_OK);
408 }
409
410 /*******************************************************************************
411  *
412  * FUNCTION:    acpi_ev_match_prw_and_gpe
413  *
414  * PARAMETERS:  Callback from walk_namespace
415  *
416  * RETURN:      Status. NOTE: We ignore errors so that the _PRW walk is
417  *              not aborted on a single _PRW failure.
418  *
419  * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
420  *              Device. Run the _PRW method. If present, extract the GPE
421  *              number and mark the GPE as a CAN_WAKE GPE. Allows a
422  *              per-owner_id execution if execute_by_owner_id is TRUE in the
423  *              walk_info parameter block.
424  *
425  * If walk_info->execute_by_owner_id is TRUE, we only execute _PRWs with that
426  *    owner.
427  * If walk_info->gpe_device is NULL, we execute every _PRW found. Otherwise,
428  *    we only execute _PRWs that refer to the input gpe_device.
429  *
430  ******************************************************************************/
431
432 static acpi_status
433 acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
434                           u32 level, void *context, void **return_value)
435 {
436         struct acpi_gpe_walk_info *walk_info =
437             ACPI_CAST_PTR(struct acpi_gpe_walk_info, context);
438         struct acpi_namespace_node *gpe_device;
439         struct acpi_gpe_block_info *gpe_block;
440         struct acpi_namespace_node *target_gpe_device;
441         struct acpi_namespace_node *prw_node;
442         struct acpi_gpe_event_info *gpe_event_info;
443         union acpi_operand_object *pkg_desc;
444         union acpi_operand_object *obj_desc;
445         u32 gpe_number;
446         acpi_status status;
447
448         ACPI_FUNCTION_TRACE(ev_match_prw_and_gpe);
449
450         /* Check for a _PRW method under this device */
451
452         status = acpi_ns_get_node(obj_handle, METHOD_NAME__PRW,
453                                   ACPI_NS_NO_UPSEARCH, &prw_node);
454         if (ACPI_FAILURE(status)) {
455                 return_ACPI_STATUS(AE_OK);
456         }
457
458         /* Check if requested owner_id matches this owner_id */
459
460         if ((walk_info->execute_by_owner_id) &&
461             (prw_node->owner_id != walk_info->owner_id)) {
462                 return_ACPI_STATUS(AE_OK);
463         }
464
465         /* Execute the _PRW */
466
467         status = acpi_ut_evaluate_object(prw_node, NULL,
468                                          ACPI_BTYPE_PACKAGE, &pkg_desc);
469         if (ACPI_FAILURE(status)) {
470                 return_ACPI_STATUS(AE_OK);
471         }
472
473         /* The returned _PRW package must have at least two elements */
474
475         if (pkg_desc->package.count < 2) {
476                 goto cleanup;
477         }
478
479         /* Extract pointers from the input context */
480
481         gpe_device = walk_info->gpe_device;
482         gpe_block = walk_info->gpe_block;
483
484         /*
485          * The _PRW object must return a package, we are only interested
486          * in the first element
487          */
488         obj_desc = pkg_desc->package.elements[0];
489
490         if (obj_desc->common.type == ACPI_TYPE_INTEGER) {
491
492                 /* Use FADT-defined GPE device (from definition of _PRW) */
493
494                 target_gpe_device = NULL;
495                 if (gpe_device) {
496                         target_gpe_device = acpi_gbl_fadt_gpe_device;
497                 }
498
499                 /* Integer is the GPE number in the FADT described GPE blocks */
500
501                 gpe_number = (u32) obj_desc->integer.value;
502         } else if (obj_desc->common.type == ACPI_TYPE_PACKAGE) {
503
504                 /* Package contains a GPE reference and GPE number within a GPE block */
505
506                 if ((obj_desc->package.count < 2) ||
507                     ((obj_desc->package.elements[0])->common.type !=
508                      ACPI_TYPE_LOCAL_REFERENCE) ||
509                     ((obj_desc->package.elements[1])->common.type !=
510                      ACPI_TYPE_INTEGER)) {
511                         goto cleanup;
512                 }
513
514                 /* Get GPE block reference and decode */
515
516                 target_gpe_device =
517                     obj_desc->package.elements[0]->reference.node;
518                 gpe_number = (u32) obj_desc->package.elements[1]->integer.value;
519         } else {
520                 /* Unknown type, just ignore it */
521
522                 goto cleanup;
523         }
524
525         /* Get the gpe_event_info for this GPE */
526
527         if (gpe_device) {
528                 /*
529                  * Is this GPE within this block?
530                  *
531                  * TRUE if and only if these conditions are true:
532                  *     1) The GPE devices match.
533                  *     2) The GPE index(number) is within the range of the Gpe Block
534                  *          associated with the GPE device.
535                  */
536                 if (gpe_device != target_gpe_device) {
537                         goto cleanup;
538                 }
539
540                 gpe_event_info =
541                     acpi_ev_low_get_gpe_info(gpe_number, gpe_block);
542         } else {
543                 /* gpe_device is NULL, just match the target_device and gpe_number */
544
545                 gpe_event_info =
546                     acpi_ev_get_gpe_event_info(target_gpe_device, gpe_number);
547         }
548
549         if (gpe_event_info) {
550                 if (!(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
551
552                         /* This GPE can wake the system */
553
554                         gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
555                         walk_info->count++;
556                 }
557         }
558
559       cleanup:
560         acpi_ut_remove_reference(pkg_desc);
561         return_ACPI_STATUS(AE_OK);
562 }
563
564 /*******************************************************************************
565  *
566  * FUNCTION:    acpi_ev_get_gpe_xrupt_block
567  *
568  * PARAMETERS:  interrupt_number     - Interrupt for a GPE block
569  *
570  * RETURN:      A GPE interrupt block
571  *
572  * DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt
573  *              block per unique interrupt level used for GPEs. Should be
574  *              called only when the GPE lists are semaphore locked and not
575  *              subject to change.
576  *
577  ******************************************************************************/
578
579 static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
580                                                                interrupt_number)
581 {
582         struct acpi_gpe_xrupt_info *next_gpe_xrupt;
583         struct acpi_gpe_xrupt_info *gpe_xrupt;
584         acpi_status status;
585         acpi_cpu_flags flags;
586
587         ACPI_FUNCTION_TRACE(ev_get_gpe_xrupt_block);
588
589         /* No need for lock since we are not changing any list elements here */
590
591         next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
592         while (next_gpe_xrupt) {
593                 if (next_gpe_xrupt->interrupt_number == interrupt_number) {
594                         return_PTR(next_gpe_xrupt);
595                 }
596
597                 next_gpe_xrupt = next_gpe_xrupt->next;
598         }
599
600         /* Not found, must allocate a new xrupt descriptor */
601
602         gpe_xrupt = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_xrupt_info));
603         if (!gpe_xrupt) {
604                 return_PTR(NULL);
605         }
606
607         gpe_xrupt->interrupt_number = interrupt_number;
608
609         /* Install new interrupt descriptor with spin lock */
610
611         flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
612         if (acpi_gbl_gpe_xrupt_list_head) {
613                 next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
614                 while (next_gpe_xrupt->next) {
615                         next_gpe_xrupt = next_gpe_xrupt->next;
616                 }
617
618                 next_gpe_xrupt->next = gpe_xrupt;
619                 gpe_xrupt->previous = next_gpe_xrupt;
620         } else {
621                 acpi_gbl_gpe_xrupt_list_head = gpe_xrupt;
622         }
623         acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
624
625         /* Install new interrupt handler if not SCI_INT */
626
627         if (interrupt_number != acpi_gbl_FADT.sci_interrupt) {
628                 status = acpi_os_install_interrupt_handler(interrupt_number,
629                                                            acpi_ev_gpe_xrupt_handler,
630                                                            gpe_xrupt);
631                 if (ACPI_FAILURE(status)) {
632                         ACPI_ERROR((AE_INFO,
633                                     "Could not install GPE interrupt handler at level 0x%X",
634                                     interrupt_number));
635                         return_PTR(NULL);
636                 }
637         }
638
639         return_PTR(gpe_xrupt);
640 }
641
642 /*******************************************************************************
643  *
644  * FUNCTION:    acpi_ev_delete_gpe_xrupt
645  *
646  * PARAMETERS:  gpe_xrupt       - A GPE interrupt info block
647  *
648  * RETURN:      Status
649  *
650  * DESCRIPTION: Remove and free a gpe_xrupt block. Remove an associated
651  *              interrupt handler if not the SCI interrupt.
652  *
653  ******************************************************************************/
654
655 static acpi_status
656 acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
657 {
658         acpi_status status;
659         acpi_cpu_flags flags;
660
661         ACPI_FUNCTION_TRACE(ev_delete_gpe_xrupt);
662
663         /* We never want to remove the SCI interrupt handler */
664
665         if (gpe_xrupt->interrupt_number == acpi_gbl_FADT.sci_interrupt) {
666                 gpe_xrupt->gpe_block_list_head = NULL;
667                 return_ACPI_STATUS(AE_OK);
668         }
669
670         /* Disable this interrupt */
671
672         status =
673             acpi_os_remove_interrupt_handler(gpe_xrupt->interrupt_number,
674                                              acpi_ev_gpe_xrupt_handler);
675         if (ACPI_FAILURE(status)) {
676                 return_ACPI_STATUS(status);
677         }
678
679         /* Unlink the interrupt block with lock */
680
681         flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
682         if (gpe_xrupt->previous) {
683                 gpe_xrupt->previous->next = gpe_xrupt->next;
684         } else {
685                 /* No previous, update list head */
686
687                 acpi_gbl_gpe_xrupt_list_head = gpe_xrupt->next;
688         }
689
690         if (gpe_xrupt->next) {
691                 gpe_xrupt->next->previous = gpe_xrupt->previous;
692         }
693         acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
694
695         /* Free the block */
696
697         ACPI_FREE(gpe_xrupt);
698         return_ACPI_STATUS(AE_OK);
699 }
700
701 /*******************************************************************************
702  *
703  * FUNCTION:    acpi_ev_install_gpe_block
704  *
705  * PARAMETERS:  gpe_block               - New GPE block
706  *              interrupt_number        - Xrupt to be associated with this
707  *                                        GPE block
708  *
709  * RETURN:      Status
710  *
711  * DESCRIPTION: Install new GPE block with mutex support
712  *
713  ******************************************************************************/
714
715 static acpi_status
716 acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
717                           u32 interrupt_number)
718 {
719         struct acpi_gpe_block_info *next_gpe_block;
720         struct acpi_gpe_xrupt_info *gpe_xrupt_block;
721         acpi_status status;
722         acpi_cpu_flags flags;
723
724         ACPI_FUNCTION_TRACE(ev_install_gpe_block);
725
726         status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
727         if (ACPI_FAILURE(status)) {
728                 return_ACPI_STATUS(status);
729         }
730
731         gpe_xrupt_block = acpi_ev_get_gpe_xrupt_block(interrupt_number);
732         if (!gpe_xrupt_block) {
733                 status = AE_NO_MEMORY;
734                 goto unlock_and_exit;
735         }
736
737         /* Install the new block at the end of the list with lock */
738
739         flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
740         if (gpe_xrupt_block->gpe_block_list_head) {
741                 next_gpe_block = gpe_xrupt_block->gpe_block_list_head;
742                 while (next_gpe_block->next) {
743                         next_gpe_block = next_gpe_block->next;
744                 }
745
746                 next_gpe_block->next = gpe_block;
747                 gpe_block->previous = next_gpe_block;
748         } else {
749                 gpe_xrupt_block->gpe_block_list_head = gpe_block;
750         }
751
752         gpe_block->xrupt_block = gpe_xrupt_block;
753         acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
754
755       unlock_and_exit:
756         status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
757         return_ACPI_STATUS(status);
758 }
759
760 /*******************************************************************************
761  *
762  * FUNCTION:    acpi_ev_delete_gpe_block
763  *
764  * PARAMETERS:  gpe_block           - Existing GPE block
765  *
766  * RETURN:      Status
767  *
768  * DESCRIPTION: Remove a GPE block
769  *
770  ******************************************************************************/
771
772 acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block)
773 {
774         acpi_status status;
775         acpi_cpu_flags flags;
776
777         ACPI_FUNCTION_TRACE(ev_install_gpe_block);
778
779         status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
780         if (ACPI_FAILURE(status)) {
781                 return_ACPI_STATUS(status);
782         }
783
784         /* Disable all GPEs in this block */
785
786         status =
787             acpi_hw_disable_gpe_block(gpe_block->xrupt_block, gpe_block, NULL);
788
789         if (!gpe_block->previous && !gpe_block->next) {
790
791                 /* This is the last gpe_block on this interrupt */
792
793                 status = acpi_ev_delete_gpe_xrupt(gpe_block->xrupt_block);
794                 if (ACPI_FAILURE(status)) {
795                         goto unlock_and_exit;
796                 }
797         } else {
798                 /* Remove the block on this interrupt with lock */
799
800                 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
801                 if (gpe_block->previous) {
802                         gpe_block->previous->next = gpe_block->next;
803                 } else {
804                         gpe_block->xrupt_block->gpe_block_list_head =
805                             gpe_block->next;
806                 }
807
808                 if (gpe_block->next) {
809                         gpe_block->next->previous = gpe_block->previous;
810                 }
811                 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
812         }
813
814         acpi_current_gpe_count -= gpe_block->gpe_count;
815
816         /* Free the gpe_block */
817
818         ACPI_FREE(gpe_block->register_info);
819         ACPI_FREE(gpe_block->event_info);
820         ACPI_FREE(gpe_block);
821
822       unlock_and_exit:
823         status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
824         return_ACPI_STATUS(status);
825 }
826
827 /*******************************************************************************
828  *
829  * FUNCTION:    acpi_ev_create_gpe_info_blocks
830  *
831  * PARAMETERS:  gpe_block   - New GPE block
832  *
833  * RETURN:      Status
834  *
835  * DESCRIPTION: Create the register_info and event_info blocks for this GPE block
836  *
837  ******************************************************************************/
838
839 static acpi_status
840 acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
841 {
842         struct acpi_gpe_register_info *gpe_register_info = NULL;
843         struct acpi_gpe_event_info *gpe_event_info = NULL;
844         struct acpi_gpe_event_info *this_event;
845         struct acpi_gpe_register_info *this_register;
846         u32 i;
847         u32 j;
848         acpi_status status;
849
850         ACPI_FUNCTION_TRACE(ev_create_gpe_info_blocks);
851
852         /* Allocate the GPE register information block */
853
854         gpe_register_info = ACPI_ALLOCATE_ZEROED((acpi_size) gpe_block->
855                                                  register_count *
856                                                  sizeof(struct
857                                                         acpi_gpe_register_info));
858         if (!gpe_register_info) {
859                 ACPI_ERROR((AE_INFO,
860                             "Could not allocate the GpeRegisterInfo table"));
861                 return_ACPI_STATUS(AE_NO_MEMORY);
862         }
863
864         /*
865          * Allocate the GPE event_info block. There are eight distinct GPEs
866          * per register. Initialization to zeros is sufficient.
867          */
868         gpe_event_info = ACPI_ALLOCATE_ZEROED((acpi_size) gpe_block->gpe_count *
869                                               sizeof(struct
870                                                      acpi_gpe_event_info));
871         if (!gpe_event_info) {
872                 ACPI_ERROR((AE_INFO,
873                             "Could not allocate the GpeEventInfo table"));
874                 status = AE_NO_MEMORY;
875                 goto error_exit;
876         }
877
878         /* Save the new Info arrays in the GPE block */
879
880         gpe_block->register_info = gpe_register_info;
881         gpe_block->event_info = gpe_event_info;
882
883         /*
884          * Initialize the GPE Register and Event structures. A goal of these
885          * tables is to hide the fact that there are two separate GPE register
886          * sets in a given GPE hardware block, the status registers occupy the
887          * first half, and the enable registers occupy the second half.
888          */
889         this_register = gpe_register_info;
890         this_event = gpe_event_info;
891
892         for (i = 0; i < gpe_block->register_count; i++) {
893
894                 /* Init the register_info for this GPE register (8 GPEs) */
895
896                 this_register->base_gpe_number =
897                     (u8) (gpe_block->block_base_number +
898                           (i * ACPI_GPE_REGISTER_WIDTH));
899
900                 this_register->status_address.address =
901                     gpe_block->block_address.address + i;
902
903                 this_register->enable_address.address =
904                     gpe_block->block_address.address + i +
905                     gpe_block->register_count;
906
907                 this_register->status_address.space_id =
908                     gpe_block->block_address.space_id;
909                 this_register->enable_address.space_id =
910                     gpe_block->block_address.space_id;
911                 this_register->status_address.bit_width =
912                     ACPI_GPE_REGISTER_WIDTH;
913                 this_register->enable_address.bit_width =
914                     ACPI_GPE_REGISTER_WIDTH;
915                 this_register->status_address.bit_offset = 0;
916                 this_register->enable_address.bit_offset = 0;
917
918                 /* Init the event_info for each GPE within this register */
919
920                 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
921                         this_event->gpe_number =
922                             (u8) (this_register->base_gpe_number + j);
923                         this_event->register_info = this_register;
924                         this_event++;
925                 }
926
927                 /* Disable all GPEs within this register */
928
929                 status = acpi_hw_write(0x00, &this_register->enable_address);
930                 if (ACPI_FAILURE(status)) {
931                         goto error_exit;
932                 }
933
934                 /* Clear any pending GPE events within this register */
935
936                 status = acpi_hw_write(0xFF, &this_register->status_address);
937                 if (ACPI_FAILURE(status)) {
938                         goto error_exit;
939                 }
940
941                 this_register++;
942         }
943
944         return_ACPI_STATUS(AE_OK);
945
946       error_exit:
947         if (gpe_register_info) {
948                 ACPI_FREE(gpe_register_info);
949         }
950         if (gpe_event_info) {
951                 ACPI_FREE(gpe_event_info);
952         }
953
954         return_ACPI_STATUS(status);
955 }
956
957 /*******************************************************************************
958  *
959  * FUNCTION:    acpi_ev_create_gpe_block
960  *
961  * PARAMETERS:  gpe_device          - Handle to the parent GPE block
962  *              gpe_block_address   - Address and space_iD
963  *              register_count      - Number of GPE register pairs in the block
964  *              gpe_block_base_number - Starting GPE number for the block
965  *              interrupt_number    - H/W interrupt for the block
966  *              return_gpe_block    - Where the new block descriptor is returned
967  *
968  * RETURN:      Status
969  *
970  * DESCRIPTION: Create and Install a block of GPE registers. All GPEs within
971  *              the block are disabled at exit.
972  *              Note: Assumes namespace is locked.
973  *
974  ******************************************************************************/
975
976 acpi_status
977 acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
978                          struct acpi_generic_address *gpe_block_address,
979                          u32 register_count,
980                          u8 gpe_block_base_number,
981                          u32 interrupt_number,
982                          struct acpi_gpe_block_info **return_gpe_block)
983 {
984         acpi_status status;
985         struct acpi_gpe_block_info *gpe_block;
986         struct acpi_gpe_walk_info walk_info;
987
988         ACPI_FUNCTION_TRACE(ev_create_gpe_block);
989
990         if (!register_count) {
991                 return_ACPI_STATUS(AE_OK);
992         }
993
994         /* Allocate a new GPE block */
995
996         gpe_block = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_block_info));
997         if (!gpe_block) {
998                 return_ACPI_STATUS(AE_NO_MEMORY);
999         }
1000
1001         /* Initialize the new GPE block */
1002
1003         gpe_block->node = gpe_device;
1004         gpe_block->gpe_count = (u16)(register_count * ACPI_GPE_REGISTER_WIDTH);
1005         gpe_block->register_count = register_count;
1006         gpe_block->block_base_number = gpe_block_base_number;
1007
1008         ACPI_MEMCPY(&gpe_block->block_address, gpe_block_address,
1009                     sizeof(struct acpi_generic_address));
1010
1011         /*
1012          * Create the register_info and event_info sub-structures
1013          * Note: disables and clears all GPEs in the block
1014          */
1015         status = acpi_ev_create_gpe_info_blocks(gpe_block);
1016         if (ACPI_FAILURE(status)) {
1017                 ACPI_FREE(gpe_block);
1018                 return_ACPI_STATUS(status);
1019         }
1020
1021         /* Install the new block in the global lists */
1022
1023         status = acpi_ev_install_gpe_block(gpe_block, interrupt_number);
1024         if (ACPI_FAILURE(status)) {
1025                 ACPI_FREE(gpe_block);
1026                 return_ACPI_STATUS(status);
1027         }
1028
1029         /* Find all GPE methods (_Lxx or_Exx) for this block */
1030
1031         walk_info.gpe_block = gpe_block;
1032         walk_info.gpe_device = gpe_device;
1033         walk_info.enable_this_gpe = FALSE;
1034         walk_info.execute_by_owner_id = FALSE;
1035
1036         status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD, gpe_device,
1037                                         ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK,
1038                                         acpi_ev_match_gpe_method, NULL,
1039                                         &walk_info, NULL);
1040
1041         /* Return the new block */
1042
1043         if (return_gpe_block) {
1044                 (*return_gpe_block) = gpe_block;
1045         }
1046
1047         ACPI_DEBUG_PRINT((ACPI_DB_INIT,
1048                           "GPE %02X to %02X [%4.4s] %u regs on int 0x%X\n",
1049                           (u32) gpe_block->block_base_number,
1050                           (u32) (gpe_block->block_base_number +
1051                                 (gpe_block->gpe_count - 1)),
1052                           gpe_device->name.ascii, gpe_block->register_count,
1053                           interrupt_number));
1054
1055         /* Update global count of currently available GPEs */
1056
1057         acpi_current_gpe_count += gpe_block->gpe_count;
1058         return_ACPI_STATUS(AE_OK);
1059 }
1060
1061 /*******************************************************************************
1062  *
1063  * FUNCTION:    acpi_ev_update_gpes
1064  *
1065  * PARAMETERS:  table_owner_id      - ID of the newly-loaded ACPI table
1066  *
1067  * RETURN:      None
1068  *
1069  * DESCRIPTION: Check for new GPE methods (_Lxx/_Exx) made available as a
1070  *              result of a Load() or load_table() operation. If new GPE
1071  *              methods have been installed, register the new methods and
1072  *              enable and runtime GPEs that are associated with them. Also,
1073  *              run any newly loaded _PRW methods in order to discover any
1074  *              new CAN_WAKE GPEs.
1075  *
1076  ******************************************************************************/
1077
1078 void acpi_ev_update_gpes(acpi_owner_id table_owner_id)
1079 {
1080         struct acpi_gpe_xrupt_info *gpe_xrupt_info;
1081         struct acpi_gpe_block_info *gpe_block;
1082         struct acpi_gpe_walk_info walk_info;
1083         acpi_status status = AE_OK;
1084         u32 new_wake_gpe_count = 0;
1085
1086         /* We will examine only _PRW/_Lxx/_Exx methods owned by this table */
1087
1088         walk_info.owner_id = table_owner_id;
1089         walk_info.execute_by_owner_id = TRUE;
1090         walk_info.count = 0;
1091
1092         if (acpi_gbl_leave_wake_gpes_disabled) {
1093                 /*
1094                  * 1) Run any newly-loaded _PRW methods to find any GPEs that
1095                  * can now be marked as CAN_WAKE GPEs. Note: We must run the
1096                  * _PRW methods before we process the _Lxx/_Exx methods because
1097                  * we will enable all runtime GPEs associated with the new
1098                  * _Lxx/_Exx methods at the time we process those methods.
1099                  *
1100                  * Unlock interpreter so that we can run the _PRW methods.
1101                  */
1102                 walk_info.gpe_block = NULL;
1103                 walk_info.gpe_device = NULL;
1104
1105                 acpi_ex_exit_interpreter();
1106
1107                 status =
1108                     acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
1109                                            ACPI_UINT32_MAX,
1110                                            ACPI_NS_WALK_NO_UNLOCK,
1111                                            acpi_ev_match_prw_and_gpe, NULL,
1112                                            &walk_info, NULL);
1113                 if (ACPI_FAILURE(status)) {
1114                         ACPI_EXCEPTION((AE_INFO, status,
1115                                         "While executing _PRW methods"));
1116                 }
1117
1118                 acpi_ex_enter_interpreter();
1119                 new_wake_gpe_count = walk_info.count;
1120         }
1121
1122         /*
1123          * 2) Find any _Lxx/_Exx GPE methods that have just been loaded.
1124          *
1125          * Any GPEs that correspond to new _Lxx/_Exx methods and are not
1126          * marked as CAN_WAKE are immediately enabled.
1127          *
1128          * Examine the namespace underneath each gpe_device within the
1129          * gpe_block lists.
1130          */
1131         status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
1132         if (ACPI_FAILURE(status)) {
1133                 return;
1134         }
1135
1136         walk_info.count = 0;
1137         walk_info.enable_this_gpe = TRUE;
1138
1139         /* Walk the interrupt level descriptor list */
1140
1141         gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
1142         while (gpe_xrupt_info) {
1143
1144                 /* Walk all Gpe Blocks attached to this interrupt level */
1145
1146                 gpe_block = gpe_xrupt_info->gpe_block_list_head;
1147                 while (gpe_block) {
1148                         walk_info.gpe_block = gpe_block;
1149                         walk_info.gpe_device = gpe_block->node;
1150
1151                         status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD,
1152                                                         walk_info.gpe_device,
1153                                                         ACPI_UINT32_MAX,
1154                                                         ACPI_NS_WALK_NO_UNLOCK,
1155                                                         acpi_ev_match_gpe_method,
1156                                                         NULL, &walk_info, NULL);
1157                         if (ACPI_FAILURE(status)) {
1158                                 ACPI_EXCEPTION((AE_INFO, status,
1159                                                 "While decoding _Lxx/_Exx methods"));
1160                         }
1161
1162                         gpe_block = gpe_block->next;
1163                 }
1164
1165                 gpe_xrupt_info = gpe_xrupt_info->next;
1166         }
1167
1168         if (walk_info.count || new_wake_gpe_count) {
1169                 ACPI_INFO((AE_INFO,
1170                            "Enabled %u new runtime GPEs, added %u new wakeup GPEs",
1171                            walk_info.count, new_wake_gpe_count));
1172         }
1173
1174         (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
1175         return;
1176 }
1177
1178 /*******************************************************************************
1179  *
1180  * FUNCTION:    acpi_ev_initialize_gpe_block
1181  *
1182  * PARAMETERS:  gpe_device          - Handle to the parent GPE block
1183  *              gpe_block           - Gpe Block info
1184  *
1185  * RETURN:      Status
1186  *
1187  * DESCRIPTION: Initialize and enable a GPE block. First find and run any
1188  *              _PRT methods associated with the block, then enable the
1189  *              appropriate GPEs.
1190  *              Note: Assumes namespace is locked.
1191  *
1192  ******************************************************************************/
1193
1194 acpi_status
1195 acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
1196                              struct acpi_gpe_block_info *gpe_block)
1197 {
1198         acpi_status status;
1199         struct acpi_gpe_event_info *gpe_event_info;
1200         struct acpi_gpe_walk_info walk_info;
1201         u32 wake_gpe_count;
1202         u32 gpe_enabled_count;
1203         u32 gpe_index;
1204         u32 gpe_number;
1205         u32 i;
1206         u32 j;
1207
1208         ACPI_FUNCTION_TRACE(ev_initialize_gpe_block);
1209
1210         /* Ignore a null GPE block (e.g., if no GPE block 1 exists) */
1211
1212         if (!gpe_block) {
1213                 return_ACPI_STATUS(AE_OK);
1214         }
1215
1216         /*
1217          * Runtime option: Should wake GPEs be enabled at runtime?  The default
1218          * is no, they should only be enabled just as the machine goes to sleep.
1219          */
1220         if (acpi_gbl_leave_wake_gpes_disabled) {
1221                 /*
1222                  * Differentiate runtime vs wake GPEs, via the _PRW control methods.
1223                  * Each GPE that has one or more _PRWs that reference it is by
1224                  * definition a wake GPE and will not be enabled while the machine
1225                  * is running.
1226                  */
1227                 walk_info.gpe_block = gpe_block;
1228                 walk_info.gpe_device = gpe_device;
1229                 walk_info.execute_by_owner_id = FALSE;
1230
1231                 status =
1232                     acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
1233                                            ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK,
1234                                            acpi_ev_match_prw_and_gpe, NULL,
1235                                            &walk_info, NULL);
1236                 if (ACPI_FAILURE(status)) {
1237                         ACPI_EXCEPTION((AE_INFO, status,
1238                                         "While executing _PRW methods"));
1239                 }
1240         }
1241
1242         /*
1243          * Enable all GPEs that have a corresponding method and are not
1244          * capable of generating wakeups. Any other GPEs within this block
1245          * must be enabled via the acpi_enable_gpe interface.
1246          */
1247         wake_gpe_count = 0;
1248         gpe_enabled_count = 0;
1249
1250         if (gpe_device == acpi_gbl_fadt_gpe_device) {
1251                 gpe_device = NULL;
1252         }
1253
1254         for (i = 0; i < gpe_block->register_count; i++) {
1255                 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
1256
1257                         /* Get the info block for this particular GPE */
1258
1259                         gpe_index = (i * ACPI_GPE_REGISTER_WIDTH) + j;
1260                         gpe_event_info = &gpe_block->event_info[gpe_index];
1261
1262                         if (gpe_event_info->flags & ACPI_GPE_CAN_WAKE) {
1263                                 wake_gpe_count++;
1264                                 if (acpi_gbl_leave_wake_gpes_disabled) {
1265                                         continue;
1266                                 }
1267                         }
1268
1269                         /* Ignore GPEs that have no corresponding _Lxx/_Exx method */
1270
1271                         if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD)) {
1272                                 continue;
1273                         }
1274
1275                         /* Enable this GPE */
1276
1277                         gpe_number = gpe_index + gpe_block->block_base_number;
1278                         status = acpi_enable_gpe(gpe_device, gpe_number,
1279                                                  ACPI_GPE_TYPE_RUNTIME);
1280                         if (ACPI_FAILURE(status)) {
1281                                 ACPI_EXCEPTION((AE_INFO, status,
1282                                                 "Could not enable GPE 0x%02X",
1283                                                 gpe_number));
1284                                 continue;
1285                         }
1286
1287                         gpe_enabled_count++;
1288                 }
1289         }
1290
1291         if (gpe_enabled_count || wake_gpe_count) {
1292                 ACPI_DEBUG_PRINT((ACPI_DB_INIT,
1293                                   "Enabled %u Runtime GPEs, added %u Wake GPEs in this block\n",
1294                                   gpe_enabled_count, wake_gpe_count));
1295         }
1296
1297         return_ACPI_STATUS(AE_OK);
1298 }
1299
1300 /*******************************************************************************
1301  *
1302  * FUNCTION:    acpi_ev_gpe_initialize
1303  *
1304  * PARAMETERS:  None
1305  *
1306  * RETURN:      Status
1307  *
1308  * DESCRIPTION: Initialize the GPE data structures
1309  *
1310  ******************************************************************************/
1311
1312 acpi_status acpi_ev_gpe_initialize(void)
1313 {
1314         u32 register_count0 = 0;
1315         u32 register_count1 = 0;
1316         u32 gpe_number_max = 0;
1317         acpi_status status;
1318
1319         ACPI_FUNCTION_TRACE(ev_gpe_initialize);
1320
1321         status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
1322         if (ACPI_FAILURE(status)) {
1323                 return_ACPI_STATUS(status);
1324         }
1325
1326         /*
1327          * Initialize the GPE Block(s) defined in the FADT
1328          *
1329          * Why the GPE register block lengths are divided by 2:  From the ACPI
1330          * Spec, section "General-Purpose Event Registers", we have:
1331          *
1332          * "Each register block contains two registers of equal length
1333          *  GPEx_STS and GPEx_EN (where x is 0 or 1). The length of the
1334          *  GPE0_STS and GPE0_EN registers is equal to half the GPE0_LEN
1335          *  The length of the GPE1_STS and GPE1_EN registers is equal to
1336          *  half the GPE1_LEN. If a generic register block is not supported
1337          *  then its respective block pointer and block length values in the
1338          *  FADT table contain zeros. The GPE0_LEN and GPE1_LEN do not need
1339          *  to be the same size."
1340          */
1341
1342         /*
1343          * Determine the maximum GPE number for this machine.
1344          *
1345          * Note: both GPE0 and GPE1 are optional, and either can exist without
1346          * the other.
1347          *
1348          * If EITHER the register length OR the block address are zero, then that
1349          * particular block is not supported.
1350          */
1351         if (acpi_gbl_FADT.gpe0_block_length &&
1352             acpi_gbl_FADT.xgpe0_block.address) {
1353
1354                 /* GPE block 0 exists (has both length and address > 0) */
1355
1356                 register_count0 = (u16) (acpi_gbl_FADT.gpe0_block_length / 2);
1357
1358                 gpe_number_max =
1359                     (register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1;
1360
1361                 /* Install GPE Block 0 */
1362
1363                 status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
1364                                                   &acpi_gbl_FADT.xgpe0_block,
1365                                                   register_count0, 0,
1366                                                   acpi_gbl_FADT.sci_interrupt,
1367                                                   &acpi_gbl_gpe_fadt_blocks[0]);
1368
1369                 if (ACPI_FAILURE(status)) {
1370                         ACPI_EXCEPTION((AE_INFO, status,
1371                                         "Could not create GPE Block 0"));
1372                 }
1373         }
1374
1375         if (acpi_gbl_FADT.gpe1_block_length &&
1376             acpi_gbl_FADT.xgpe1_block.address) {
1377
1378                 /* GPE block 1 exists (has both length and address > 0) */
1379
1380                 register_count1 = (u16) (acpi_gbl_FADT.gpe1_block_length / 2);
1381
1382                 /* Check for GPE0/GPE1 overlap (if both banks exist) */
1383
1384                 if ((register_count0) &&
1385                     (gpe_number_max >= acpi_gbl_FADT.gpe1_base)) {
1386                         ACPI_ERROR((AE_INFO,
1387                                     "GPE0 block (GPE 0 to %u) overlaps the GPE1 block "
1388                                     "(GPE %u to %u) - Ignoring GPE1",
1389                                     gpe_number_max, acpi_gbl_FADT.gpe1_base,
1390                                     acpi_gbl_FADT.gpe1_base +
1391                                     ((register_count1 *
1392                                       ACPI_GPE_REGISTER_WIDTH) - 1)));
1393
1394                         /* Ignore GPE1 block by setting the register count to zero */
1395
1396                         register_count1 = 0;
1397                 } else {
1398                         /* Install GPE Block 1 */
1399
1400                         status =
1401                             acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
1402                                                      &acpi_gbl_FADT.xgpe1_block,
1403                                                      register_count1,
1404                                                      acpi_gbl_FADT.gpe1_base,
1405                                                      acpi_gbl_FADT.
1406                                                      sci_interrupt,
1407                                                      &acpi_gbl_gpe_fadt_blocks
1408                                                      [1]);
1409
1410                         if (ACPI_FAILURE(status)) {
1411                                 ACPI_EXCEPTION((AE_INFO, status,
1412                                                 "Could not create GPE Block 1"));
1413                         }
1414
1415                         /*
1416                          * GPE0 and GPE1 do not have to be contiguous in the GPE number
1417                          * space. However, GPE0 always starts at GPE number zero.
1418                          */
1419                         gpe_number_max = acpi_gbl_FADT.gpe1_base +
1420                             ((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1);
1421                 }
1422         }
1423
1424         /* Exit if there are no GPE registers */
1425
1426         if ((register_count0 + register_count1) == 0) {
1427
1428                 /* GPEs are not required by ACPI, this is OK */
1429
1430                 ACPI_DEBUG_PRINT((ACPI_DB_INIT,
1431                                   "There are no GPE blocks defined in the FADT\n"));
1432                 status = AE_OK;
1433                 goto cleanup;
1434         }
1435
1436         /* Check for Max GPE number out-of-range */
1437
1438         if (gpe_number_max > ACPI_GPE_MAX) {
1439                 ACPI_ERROR((AE_INFO,
1440                             "Maximum GPE number from FADT is too large: 0x%X",
1441                             gpe_number_max));
1442                 status = AE_BAD_VALUE;
1443                 goto cleanup;
1444         }
1445
1446       cleanup:
1447         (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
1448         return_ACPI_STATUS(AE_OK);
1449 }