ACPICA: Reformat comments, no functional changes
[deliverable/linux.git] / drivers / acpi / events / evgpeblk.c
1 /******************************************************************************
2 *
3 * Module Name: evgpeblk - GPE block creation and initialization.
4 *
5 *****************************************************************************/
6
7 /*
8 * Copyright (C) 2000 - 2008, Intel Corp.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44 #include <acpi/acpi.h>
45 #include <acpi/acevents.h>
46 #include <acpi/acnamesp.h>
47
48 #define _COMPONENT ACPI_EVENTS
49 ACPI_MODULE_NAME("evgpeblk")
50
51 /* Local prototypes */
52 static acpi_status
53 acpi_ev_save_method_info(acpi_handle obj_handle,
54 u32 level, void *obj_desc, void **return_value);
55
56 static acpi_status
57 acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
58 u32 level, void *info, void **return_value);
59
60 static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
61 interrupt_number);
62
63 static acpi_status
64 acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt);
65
66 static acpi_status
67 acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
68 u32 interrupt_number);
69
70 static acpi_status
71 acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block);
72
73 /*******************************************************************************
74 *
75 * FUNCTION: acpi_ev_valid_gpe_event
76 *
77 * PARAMETERS: gpe_event_info - Info for this GPE
78 *
79 * RETURN: TRUE if the gpe_event is valid
80 *
81 * DESCRIPTION: Validate a GPE event. DO NOT CALL FROM INTERRUPT LEVEL.
82 * Should be called only when the GPE lists are semaphore locked
83 * and not subject to change.
84 *
85 ******************************************************************************/
86
87 u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info)
88 {
89 struct acpi_gpe_xrupt_info *gpe_xrupt_block;
90 struct acpi_gpe_block_info *gpe_block;
91
92 ACPI_FUNCTION_ENTRY();
93
94 /* No need for spin lock since we are not changing any list elements */
95
96 /* Walk the GPE interrupt levels */
97
98 gpe_xrupt_block = acpi_gbl_gpe_xrupt_list_head;
99 while (gpe_xrupt_block) {
100 gpe_block = gpe_xrupt_block->gpe_block_list_head;
101
102 /* Walk the GPE blocks on this interrupt level */
103
104 while (gpe_block) {
105 if ((&gpe_block->event_info[0] <= gpe_event_info) &&
106 (&gpe_block->
107 event_info[((acpi_size) gpe_block->
108 register_count) * 8] >
109 gpe_event_info)) {
110 return (TRUE);
111 }
112
113 gpe_block = gpe_block->next;
114 }
115
116 gpe_xrupt_block = gpe_xrupt_block->next;
117 }
118
119 return (FALSE);
120 }
121
122 /*******************************************************************************
123 *
124 * FUNCTION: acpi_ev_walk_gpe_list
125 *
126 * PARAMETERS: gpe_walk_callback - Routine called for each GPE block
127 *
128 * RETURN: Status
129 *
130 * DESCRIPTION: Walk the GPE lists.
131 *
132 ******************************************************************************/
133
134 acpi_status acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback)
135 {
136 struct acpi_gpe_block_info *gpe_block;
137 struct acpi_gpe_xrupt_info *gpe_xrupt_info;
138 acpi_status status = AE_OK;
139 acpi_cpu_flags flags;
140
141 ACPI_FUNCTION_TRACE(ev_walk_gpe_list);
142
143 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
144
145 /* Walk the interrupt level descriptor list */
146
147 gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
148 while (gpe_xrupt_info) {
149
150 /* Walk all Gpe Blocks attached to this interrupt level */
151
152 gpe_block = gpe_xrupt_info->gpe_block_list_head;
153 while (gpe_block) {
154
155 /* One callback per GPE block */
156
157 status = gpe_walk_callback(gpe_xrupt_info, gpe_block);
158 if (ACPI_FAILURE(status)) {
159 goto unlock_and_exit;
160 }
161
162 gpe_block = gpe_block->next;
163 }
164
165 gpe_xrupt_info = gpe_xrupt_info->next;
166 }
167
168 unlock_and_exit:
169 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
170 return_ACPI_STATUS(status);
171 }
172
173 /*******************************************************************************
174 *
175 * FUNCTION: acpi_ev_delete_gpe_handlers
176 *
177 * PARAMETERS: gpe_xrupt_info - GPE Interrupt info
178 * gpe_block - Gpe Block info
179 *
180 * RETURN: Status
181 *
182 * DESCRIPTION: Delete all Handler objects found in the GPE data structs.
183 * Used only prior to termination.
184 *
185 ******************************************************************************/
186
187 acpi_status
188 acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
189 struct acpi_gpe_block_info *gpe_block)
190 {
191 struct acpi_gpe_event_info *gpe_event_info;
192 u32 i;
193 u32 j;
194
195 ACPI_FUNCTION_TRACE(ev_delete_gpe_handlers);
196
197 /* Examine each GPE Register within the block */
198
199 for (i = 0; i < gpe_block->register_count; i++) {
200
201 /* Now look at the individual GPEs in this byte register */
202
203 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
204 gpe_event_info =
205 &gpe_block->
206 event_info[((acpi_size) i *
207 ACPI_GPE_REGISTER_WIDTH) + j];
208
209 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
210 ACPI_GPE_DISPATCH_HANDLER) {
211 ACPI_FREE(gpe_event_info->dispatch.handler);
212 gpe_event_info->dispatch.handler = NULL;
213 gpe_event_info->flags &=
214 ~ACPI_GPE_DISPATCH_MASK;
215 }
216 }
217 }
218
219 return_ACPI_STATUS(AE_OK);
220 }
221
222 /*******************************************************************************
223 *
224 * FUNCTION: acpi_ev_save_method_info
225 *
226 * PARAMETERS: Callback from walk_namespace
227 *
228 * RETURN: Status
229 *
230 * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
231 * control method under the _GPE portion of the namespace.
232 * Extract the name and GPE type from the object, saving this
233 * information for quick lookup during GPE dispatch
234 *
235 * The name of each GPE control method is of the form:
236 * "_Lxx" or "_Exx"
237 * Where:
238 * L - means that the GPE is level triggered
239 * E - means that the GPE is edge triggered
240 * xx - is the GPE number [in HEX]
241 *
242 ******************************************************************************/
243
244 static acpi_status
245 acpi_ev_save_method_info(acpi_handle obj_handle,
246 u32 level, void *obj_desc, void **return_value)
247 {
248 struct acpi_gpe_block_info *gpe_block = (void *)obj_desc;
249 struct acpi_gpe_event_info *gpe_event_info;
250 u32 gpe_number;
251 char name[ACPI_NAME_SIZE + 1];
252 u8 type;
253 acpi_status status;
254
255 ACPI_FUNCTION_TRACE(ev_save_method_info);
256
257 /*
258 * _Lxx and _Exx GPE method support
259 *
260 * 1) Extract the name from the object and convert to a string
261 */
262 ACPI_MOVE_32_TO_32(name,
263 &((struct acpi_namespace_node *)obj_handle)->name.
264 integer);
265 name[ACPI_NAME_SIZE] = 0;
266
267 /*
268 * 2) Edge/Level determination is based on the 2nd character
269 * of the method name
270 *
271 * NOTE: Default GPE type is RUNTIME. May be changed later to WAKE
272 * if a _PRW object is found that points to this GPE.
273 */
274 switch (name[1]) {
275 case 'L':
276 type = ACPI_GPE_LEVEL_TRIGGERED;
277 break;
278
279 case 'E':
280 type = ACPI_GPE_EDGE_TRIGGERED;
281 break;
282
283 default:
284 /* Unknown method type, just ignore it! */
285
286 ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
287 "Ignoring unknown GPE method type: %s (name not of form _Lxx or _Exx)",
288 name));
289 return_ACPI_STATUS(AE_OK);
290 }
291
292 /* Convert the last two characters of the name to the GPE Number */
293
294 gpe_number = ACPI_STRTOUL(&name[2], NULL, 16);
295 if (gpe_number == ACPI_UINT32_MAX) {
296
297 /* Conversion failed; invalid method, just ignore it */
298
299 ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
300 "Could not extract GPE number from name: %s (name is not of form _Lxx or _Exx)",
301 name));
302 return_ACPI_STATUS(AE_OK);
303 }
304
305 /* Ensure that we have a valid GPE number for this GPE block */
306
307 if ((gpe_number < gpe_block->block_base_number) ||
308 (gpe_number >=
309 (gpe_block->block_base_number +
310 (gpe_block->register_count * 8)))) {
311 /*
312 * Not valid for this GPE block, just ignore it. However, it may be
313 * valid for a different GPE block, since GPE0 and GPE1 methods both
314 * appear under \_GPE.
315 */
316 return_ACPI_STATUS(AE_OK);
317 }
318
319 /*
320 * Now we can add this information to the gpe_event_info block for use
321 * during dispatch of this GPE. Default type is RUNTIME, although this may
322 * change when the _PRW methods are executed later.
323 */
324 gpe_event_info =
325 &gpe_block->event_info[gpe_number - gpe_block->block_base_number];
326
327 gpe_event_info->flags = (u8)
328 (type | ACPI_GPE_DISPATCH_METHOD | ACPI_GPE_TYPE_RUNTIME);
329
330 gpe_event_info->dispatch.method_node =
331 (struct acpi_namespace_node *)obj_handle;
332
333 /* Update enable mask, but don't enable the HW GPE as of yet */
334
335 status = acpi_ev_enable_gpe(gpe_event_info, FALSE);
336
337 ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
338 "Registered GPE method %s as GPE number 0x%.2X\n",
339 name, gpe_number));
340 return_ACPI_STATUS(status);
341 }
342
343 /*******************************************************************************
344 *
345 * FUNCTION: acpi_ev_match_prw_and_gpe
346 *
347 * PARAMETERS: Callback from walk_namespace
348 *
349 * RETURN: Status. NOTE: We ignore errors so that the _PRW walk is
350 * not aborted on a single _PRW failure.
351 *
352 * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
353 * Device. Run the _PRW method. If present, extract the GPE
354 * number and mark the GPE as a WAKE GPE.
355 *
356 ******************************************************************************/
357
358 static acpi_status
359 acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
360 u32 level, void *info, void **return_value)
361 {
362 struct acpi_gpe_walk_info *gpe_info = (void *)info;
363 struct acpi_namespace_node *gpe_device;
364 struct acpi_gpe_block_info *gpe_block;
365 struct acpi_namespace_node *target_gpe_device;
366 struct acpi_gpe_event_info *gpe_event_info;
367 union acpi_operand_object *pkg_desc;
368 union acpi_operand_object *obj_desc;
369 u32 gpe_number;
370 acpi_status status;
371
372 ACPI_FUNCTION_TRACE(ev_match_prw_and_gpe);
373
374 /* Check for a _PRW method under this device */
375
376 status = acpi_ut_evaluate_object(obj_handle, METHOD_NAME__PRW,
377 ACPI_BTYPE_PACKAGE, &pkg_desc);
378 if (ACPI_FAILURE(status)) {
379
380 /* Ignore all errors from _PRW, we don't want to abort the subsystem */
381
382 return_ACPI_STATUS(AE_OK);
383 }
384
385 /* The returned _PRW package must have at least two elements */
386
387 if (pkg_desc->package.count < 2) {
388 goto cleanup;
389 }
390
391 /* Extract pointers from the input context */
392
393 gpe_device = gpe_info->gpe_device;
394 gpe_block = gpe_info->gpe_block;
395
396 /*
397 * The _PRW object must return a package, we are only interested in the
398 * first element
399 */
400 obj_desc = pkg_desc->package.elements[0];
401
402 if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_INTEGER) {
403
404 /* Use FADT-defined GPE device (from definition of _PRW) */
405
406 target_gpe_device = acpi_gbl_fadt_gpe_device;
407
408 /* Integer is the GPE number in the FADT described GPE blocks */
409
410 gpe_number = (u32) obj_desc->integer.value;
411 } else if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_PACKAGE) {
412
413 /* Package contains a GPE reference and GPE number within a GPE block */
414
415 if ((obj_desc->package.count < 2) ||
416 (ACPI_GET_OBJECT_TYPE(obj_desc->package.elements[0]) !=
417 ACPI_TYPE_LOCAL_REFERENCE)
418 || (ACPI_GET_OBJECT_TYPE(obj_desc->package.elements[1]) !=
419 ACPI_TYPE_INTEGER)) {
420 goto cleanup;
421 }
422
423 /* Get GPE block reference and decode */
424
425 target_gpe_device =
426 obj_desc->package.elements[0]->reference.node;
427 gpe_number = (u32) obj_desc->package.elements[1]->integer.value;
428 } else {
429 /* Unknown type, just ignore it */
430
431 goto cleanup;
432 }
433
434 /*
435 * Is this GPE within this block?
436 *
437 * TRUE if and only if these conditions are true:
438 * 1) The GPE devices match.
439 * 2) The GPE index(number) is within the range of the Gpe Block
440 * associated with the GPE device.
441 */
442 if ((gpe_device == target_gpe_device) &&
443 (gpe_number >= gpe_block->block_base_number) &&
444 (gpe_number <
445 gpe_block->block_base_number + (gpe_block->register_count * 8))) {
446 gpe_event_info =
447 &gpe_block->event_info[gpe_number -
448 gpe_block->block_base_number];
449
450 /* Mark GPE for WAKE-ONLY but WAKE_DISABLED */
451
452 gpe_event_info->flags &=
453 ~(ACPI_GPE_WAKE_ENABLED | ACPI_GPE_RUN_ENABLED);
454
455 status =
456 acpi_ev_set_gpe_type(gpe_event_info, ACPI_GPE_TYPE_WAKE);
457 if (ACPI_FAILURE(status)) {
458 goto cleanup;
459 }
460
461 status =
462 acpi_ev_update_gpe_enable_masks(gpe_event_info,
463 ACPI_GPE_DISABLE);
464 }
465
466 cleanup:
467 acpi_ut_remove_reference(pkg_desc);
468 return_ACPI_STATUS(AE_OK);
469 }
470
471 /*******************************************************************************
472 *
473 * FUNCTION: acpi_ev_get_gpe_xrupt_block
474 *
475 * PARAMETERS: interrupt_number - Interrupt for a GPE block
476 *
477 * RETURN: A GPE interrupt block
478 *
479 * DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt
480 * block per unique interrupt level used for GPEs. Should be
481 * called only when the GPE lists are semaphore locked and not
482 * subject to change.
483 *
484 ******************************************************************************/
485
486 static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
487 interrupt_number)
488 {
489 struct acpi_gpe_xrupt_info *next_gpe_xrupt;
490 struct acpi_gpe_xrupt_info *gpe_xrupt;
491 acpi_status status;
492 acpi_cpu_flags flags;
493
494 ACPI_FUNCTION_TRACE(ev_get_gpe_xrupt_block);
495
496 /* No need for lock since we are not changing any list elements here */
497
498 next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
499 while (next_gpe_xrupt) {
500 if (next_gpe_xrupt->interrupt_number == interrupt_number) {
501 return_PTR(next_gpe_xrupt);
502 }
503
504 next_gpe_xrupt = next_gpe_xrupt->next;
505 }
506
507 /* Not found, must allocate a new xrupt descriptor */
508
509 gpe_xrupt = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_xrupt_info));
510 if (!gpe_xrupt) {
511 return_PTR(NULL);
512 }
513
514 gpe_xrupt->interrupt_number = interrupt_number;
515
516 /* Install new interrupt descriptor with spin lock */
517
518 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
519 if (acpi_gbl_gpe_xrupt_list_head) {
520 next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
521 while (next_gpe_xrupt->next) {
522 next_gpe_xrupt = next_gpe_xrupt->next;
523 }
524
525 next_gpe_xrupt->next = gpe_xrupt;
526 gpe_xrupt->previous = next_gpe_xrupt;
527 } else {
528 acpi_gbl_gpe_xrupt_list_head = gpe_xrupt;
529 }
530 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
531
532 /* Install new interrupt handler if not SCI_INT */
533
534 if (interrupt_number != acpi_gbl_FADT.sci_interrupt) {
535 status = acpi_os_install_interrupt_handler(interrupt_number,
536 acpi_ev_gpe_xrupt_handler,
537 gpe_xrupt);
538 if (ACPI_FAILURE(status)) {
539 ACPI_ERROR((AE_INFO,
540 "Could not install GPE interrupt handler at level 0x%X",
541 interrupt_number));
542 return_PTR(NULL);
543 }
544 }
545
546 return_PTR(gpe_xrupt);
547 }
548
549 /*******************************************************************************
550 *
551 * FUNCTION: acpi_ev_delete_gpe_xrupt
552 *
553 * PARAMETERS: gpe_xrupt - A GPE interrupt info block
554 *
555 * RETURN: Status
556 *
557 * DESCRIPTION: Remove and free a gpe_xrupt block. Remove an associated
558 * interrupt handler if not the SCI interrupt.
559 *
560 ******************************************************************************/
561
562 static acpi_status
563 acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
564 {
565 acpi_status status;
566 acpi_cpu_flags flags;
567
568 ACPI_FUNCTION_TRACE(ev_delete_gpe_xrupt);
569
570 /* We never want to remove the SCI interrupt handler */
571
572 if (gpe_xrupt->interrupt_number == acpi_gbl_FADT.sci_interrupt) {
573 gpe_xrupt->gpe_block_list_head = NULL;
574 return_ACPI_STATUS(AE_OK);
575 }
576
577 /* Disable this interrupt */
578
579 status =
580 acpi_os_remove_interrupt_handler(gpe_xrupt->interrupt_number,
581 acpi_ev_gpe_xrupt_handler);
582 if (ACPI_FAILURE(status)) {
583 return_ACPI_STATUS(status);
584 }
585
586 /* Unlink the interrupt block with lock */
587
588 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
589 if (gpe_xrupt->previous) {
590 gpe_xrupt->previous->next = gpe_xrupt->next;
591 } else {
592 /* No previous, update list head */
593
594 acpi_gbl_gpe_xrupt_list_head = gpe_xrupt->next;
595 }
596
597 if (gpe_xrupt->next) {
598 gpe_xrupt->next->previous = gpe_xrupt->previous;
599 }
600 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
601
602 /* Free the block */
603
604 ACPI_FREE(gpe_xrupt);
605 return_ACPI_STATUS(AE_OK);
606 }
607
608 /*******************************************************************************
609 *
610 * FUNCTION: acpi_ev_install_gpe_block
611 *
612 * PARAMETERS: gpe_block - New GPE block
613 * interrupt_number - Xrupt to be associated with this
614 * GPE block
615 *
616 * RETURN: Status
617 *
618 * DESCRIPTION: Install new GPE block with mutex support
619 *
620 ******************************************************************************/
621
622 static acpi_status
623 acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
624 u32 interrupt_number)
625 {
626 struct acpi_gpe_block_info *next_gpe_block;
627 struct acpi_gpe_xrupt_info *gpe_xrupt_block;
628 acpi_status status;
629 acpi_cpu_flags flags;
630
631 ACPI_FUNCTION_TRACE(ev_install_gpe_block);
632
633 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
634 if (ACPI_FAILURE(status)) {
635 return_ACPI_STATUS(status);
636 }
637
638 gpe_xrupt_block = acpi_ev_get_gpe_xrupt_block(interrupt_number);
639 if (!gpe_xrupt_block) {
640 status = AE_NO_MEMORY;
641 goto unlock_and_exit;
642 }
643
644 /* Install the new block at the end of the list with lock */
645
646 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
647 if (gpe_xrupt_block->gpe_block_list_head) {
648 next_gpe_block = gpe_xrupt_block->gpe_block_list_head;
649 while (next_gpe_block->next) {
650 next_gpe_block = next_gpe_block->next;
651 }
652
653 next_gpe_block->next = gpe_block;
654 gpe_block->previous = next_gpe_block;
655 } else {
656 gpe_xrupt_block->gpe_block_list_head = gpe_block;
657 }
658
659 gpe_block->xrupt_block = gpe_xrupt_block;
660 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
661
662 unlock_and_exit:
663 status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
664 return_ACPI_STATUS(status);
665 }
666
667 /*******************************************************************************
668 *
669 * FUNCTION: acpi_ev_delete_gpe_block
670 *
671 * PARAMETERS: gpe_block - Existing GPE block
672 *
673 * RETURN: Status
674 *
675 * DESCRIPTION: Remove a GPE block
676 *
677 ******************************************************************************/
678
679 acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block)
680 {
681 acpi_status status;
682 acpi_cpu_flags flags;
683
684 ACPI_FUNCTION_TRACE(ev_install_gpe_block);
685
686 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
687 if (ACPI_FAILURE(status)) {
688 return_ACPI_STATUS(status);
689 }
690
691 /* Disable all GPEs in this block */
692
693 status = acpi_hw_disable_gpe_block(gpe_block->xrupt_block, gpe_block);
694
695 if (!gpe_block->previous && !gpe_block->next) {
696
697 /* This is the last gpe_block on this interrupt */
698
699 status = acpi_ev_delete_gpe_xrupt(gpe_block->xrupt_block);
700 if (ACPI_FAILURE(status)) {
701 goto unlock_and_exit;
702 }
703 } else {
704 /* Remove the block on this interrupt with lock */
705
706 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
707 if (gpe_block->previous) {
708 gpe_block->previous->next = gpe_block->next;
709 } else {
710 gpe_block->xrupt_block->gpe_block_list_head =
711 gpe_block->next;
712 }
713
714 if (gpe_block->next) {
715 gpe_block->next->previous = gpe_block->previous;
716 }
717 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
718 }
719
720 /* Free the gpe_block */
721
722 ACPI_FREE(gpe_block->register_info);
723 ACPI_FREE(gpe_block->event_info);
724 ACPI_FREE(gpe_block);
725
726 unlock_and_exit:
727 status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
728 return_ACPI_STATUS(status);
729 }
730
731 /*******************************************************************************
732 *
733 * FUNCTION: acpi_ev_create_gpe_info_blocks
734 *
735 * PARAMETERS: gpe_block - New GPE block
736 *
737 * RETURN: Status
738 *
739 * DESCRIPTION: Create the register_info and event_info blocks for this GPE block
740 *
741 ******************************************************************************/
742
743 static acpi_status
744 acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
745 {
746 struct acpi_gpe_register_info *gpe_register_info = NULL;
747 struct acpi_gpe_event_info *gpe_event_info = NULL;
748 struct acpi_gpe_event_info *this_event;
749 struct acpi_gpe_register_info *this_register;
750 u32 i;
751 u32 j;
752 acpi_status status;
753
754 ACPI_FUNCTION_TRACE(ev_create_gpe_info_blocks);
755
756 /* Allocate the GPE register information block */
757
758 gpe_register_info = ACPI_ALLOCATE_ZEROED((acpi_size) gpe_block->
759 register_count *
760 sizeof(struct
761 acpi_gpe_register_info));
762 if (!gpe_register_info) {
763 ACPI_ERROR((AE_INFO,
764 "Could not allocate the GpeRegisterInfo table"));
765 return_ACPI_STATUS(AE_NO_MEMORY);
766 }
767
768 /*
769 * Allocate the GPE event_info block. There are eight distinct GPEs
770 * per register. Initialization to zeros is sufficient.
771 */
772 gpe_event_info = ACPI_ALLOCATE_ZEROED(((acpi_size) gpe_block->
773 register_count *
774 ACPI_GPE_REGISTER_WIDTH) *
775 sizeof(struct
776 acpi_gpe_event_info));
777 if (!gpe_event_info) {
778 ACPI_ERROR((AE_INFO,
779 "Could not allocate the GpeEventInfo table"));
780 status = AE_NO_MEMORY;
781 goto error_exit;
782 }
783
784 /* Save the new Info arrays in the GPE block */
785
786 gpe_block->register_info = gpe_register_info;
787 gpe_block->event_info = gpe_event_info;
788
789 /*
790 * Initialize the GPE Register and Event structures. A goal of these
791 * tables is to hide the fact that there are two separate GPE register
792 * sets in a given GPE hardware block, the status registers occupy the
793 * first half, and the enable registers occupy the second half.
794 */
795 this_register = gpe_register_info;
796 this_event = gpe_event_info;
797
798 for (i = 0; i < gpe_block->register_count; i++) {
799
800 /* Init the register_info for this GPE register (8 GPEs) */
801
802 this_register->base_gpe_number =
803 (u8) (gpe_block->block_base_number +
804 (i * ACPI_GPE_REGISTER_WIDTH));
805
806 this_register->status_address.address =
807 gpe_block->block_address.address + i;
808
809 this_register->enable_address.address =
810 gpe_block->block_address.address + i +
811 gpe_block->register_count;
812
813 this_register->status_address.space_id =
814 gpe_block->block_address.space_id;
815 this_register->enable_address.space_id =
816 gpe_block->block_address.space_id;
817 this_register->status_address.bit_width =
818 ACPI_GPE_REGISTER_WIDTH;
819 this_register->enable_address.bit_width =
820 ACPI_GPE_REGISTER_WIDTH;
821 this_register->status_address.bit_offset =
822 ACPI_GPE_REGISTER_WIDTH;
823 this_register->enable_address.bit_offset =
824 ACPI_GPE_REGISTER_WIDTH;
825
826 /* Init the event_info for each GPE within this register */
827
828 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
829 this_event->gpe_number =
830 (u8) (this_register->base_gpe_number + j);
831 this_event->register_info = this_register;
832 this_event++;
833 }
834
835 /* Disable all GPEs within this register */
836
837 status = acpi_hw_low_level_write(ACPI_GPE_REGISTER_WIDTH, 0x00,
838 &this_register->
839 enable_address);
840 if (ACPI_FAILURE(status)) {
841 goto error_exit;
842 }
843
844 /* Clear any pending GPE events within this register */
845
846 status = acpi_hw_low_level_write(ACPI_GPE_REGISTER_WIDTH, 0xFF,
847 &this_register->
848 status_address);
849 if (ACPI_FAILURE(status)) {
850 goto error_exit;
851 }
852
853 this_register++;
854 }
855
856 return_ACPI_STATUS(AE_OK);
857
858 error_exit:
859 if (gpe_register_info) {
860 ACPI_FREE(gpe_register_info);
861 }
862 if (gpe_event_info) {
863 ACPI_FREE(gpe_event_info);
864 }
865
866 return_ACPI_STATUS(status);
867 }
868
869 /*******************************************************************************
870 *
871 * FUNCTION: acpi_ev_create_gpe_block
872 *
873 * PARAMETERS: gpe_device - Handle to the parent GPE block
874 * gpe_block_address - Address and space_iD
875 * register_count - Number of GPE register pairs in the block
876 * gpe_block_base_number - Starting GPE number for the block
877 * interrupt_number - H/W interrupt for the block
878 * return_gpe_block - Where the new block descriptor is returned
879 *
880 * RETURN: Status
881 *
882 * DESCRIPTION: Create and Install a block of GPE registers. All GPEs within
883 * the block are disabled at exit.
884 * Note: Assumes namespace is locked.
885 *
886 ******************************************************************************/
887
888 acpi_status
889 acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
890 struct acpi_generic_address *gpe_block_address,
891 u32 register_count,
892 u8 gpe_block_base_number,
893 u32 interrupt_number,
894 struct acpi_gpe_block_info **return_gpe_block)
895 {
896 acpi_status status;
897 struct acpi_gpe_block_info *gpe_block;
898
899 ACPI_FUNCTION_TRACE(ev_create_gpe_block);
900
901 if (!register_count) {
902 return_ACPI_STATUS(AE_OK);
903 }
904
905 /* Allocate a new GPE block */
906
907 gpe_block = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_block_info));
908 if (!gpe_block) {
909 return_ACPI_STATUS(AE_NO_MEMORY);
910 }
911
912 /* Initialize the new GPE block */
913
914 gpe_block->node = gpe_device;
915 gpe_block->register_count = register_count;
916 gpe_block->block_base_number = gpe_block_base_number;
917
918 ACPI_MEMCPY(&gpe_block->block_address, gpe_block_address,
919 sizeof(struct acpi_generic_address));
920
921 /*
922 * Create the register_info and event_info sub-structures
923 * Note: disables and clears all GPEs in the block
924 */
925 status = acpi_ev_create_gpe_info_blocks(gpe_block);
926 if (ACPI_FAILURE(status)) {
927 ACPI_FREE(gpe_block);
928 return_ACPI_STATUS(status);
929 }
930
931 /* Install the new block in the global lists */
932
933 status = acpi_ev_install_gpe_block(gpe_block, interrupt_number);
934 if (ACPI_FAILURE(status)) {
935 ACPI_FREE(gpe_block);
936 return_ACPI_STATUS(status);
937 }
938
939 /* Find all GPE methods (_Lxx, _Exx) for this block */
940
941 status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD, gpe_device,
942 ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK,
943 acpi_ev_save_method_info, gpe_block,
944 NULL);
945
946 /* Return the new block */
947
948 if (return_gpe_block) {
949 (*return_gpe_block) = gpe_block;
950 }
951
952 ACPI_DEBUG_PRINT((ACPI_DB_INIT,
953 "GPE %02X to %02X [%4.4s] %u regs on int 0x%X\n",
954 (u32) gpe_block->block_base_number,
955 (u32) (gpe_block->block_base_number +
956 ((gpe_block->register_count *
957 ACPI_GPE_REGISTER_WIDTH) - 1)),
958 gpe_device->name.ascii, gpe_block->register_count,
959 interrupt_number));
960
961 return_ACPI_STATUS(AE_OK);
962 }
963
964 /*******************************************************************************
965 *
966 * FUNCTION: acpi_ev_initialize_gpe_block
967 *
968 * PARAMETERS: gpe_device - Handle to the parent GPE block
969 * gpe_block - Gpe Block info
970 *
971 * RETURN: Status
972 *
973 * DESCRIPTION: Initialize and enable a GPE block. First find and run any
974 * _PRT methods associated with the block, then enable the
975 * appropriate GPEs.
976 * Note: Assumes namespace is locked.
977 *
978 ******************************************************************************/
979
980 acpi_status
981 acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
982 struct acpi_gpe_block_info *gpe_block)
983 {
984 acpi_status status;
985 struct acpi_gpe_event_info *gpe_event_info;
986 struct acpi_gpe_walk_info gpe_info;
987 u32 wake_gpe_count;
988 u32 gpe_enabled_count;
989 u32 i;
990 u32 j;
991
992 ACPI_FUNCTION_TRACE(ev_initialize_gpe_block);
993
994 /* Ignore a null GPE block (e.g., if no GPE block 1 exists) */
995
996 if (!gpe_block) {
997 return_ACPI_STATUS(AE_OK);
998 }
999
1000 /*
1001 * Runtime option: Should wake GPEs be enabled at runtime? The default
1002 * is no, they should only be enabled just as the machine goes to sleep.
1003 */
1004 if (acpi_gbl_leave_wake_gpes_disabled) {
1005 /*
1006 * Differentiate runtime vs wake GPEs, via the _PRW control methods.
1007 * Each GPE that has one or more _PRWs that reference it is by
1008 * definition a wake GPE and will not be enabled while the machine
1009 * is running.
1010 */
1011 gpe_info.gpe_block = gpe_block;
1012 gpe_info.gpe_device = gpe_device;
1013
1014 status =
1015 acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
1016 ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK,
1017 acpi_ev_match_prw_and_gpe, &gpe_info,
1018 NULL);
1019 }
1020
1021 /*
1022 * Enable all GPEs in this block that have these attributes:
1023 * 1) are "runtime" or "run/wake" GPEs, and
1024 * 2) have a corresponding _Lxx or _Exx method
1025 *
1026 * Any other GPEs within this block must be enabled via the acpi_enable_gpe()
1027 * external interface.
1028 */
1029 wake_gpe_count = 0;
1030 gpe_enabled_count = 0;
1031
1032 for (i = 0; i < gpe_block->register_count; i++) {
1033 for (j = 0; j < 8; j++) {
1034
1035 /* Get the info block for this particular GPE */
1036
1037 gpe_event_info =
1038 &gpe_block->
1039 event_info[((acpi_size) i *
1040 ACPI_GPE_REGISTER_WIDTH) + j];
1041
1042 if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
1043 ACPI_GPE_DISPATCH_METHOD)
1044 && (gpe_event_info->flags & ACPI_GPE_TYPE_RUNTIME)) {
1045 gpe_enabled_count++;
1046 }
1047
1048 if (gpe_event_info->flags & ACPI_GPE_TYPE_WAKE) {
1049 wake_gpe_count++;
1050 }
1051 }
1052 }
1053
1054 ACPI_DEBUG_PRINT((ACPI_DB_INIT,
1055 "Found %u Wake, Enabled %u Runtime GPEs in this block\n",
1056 wake_gpe_count, gpe_enabled_count));
1057
1058 /* Enable all valid runtime GPEs found above */
1059
1060 status = acpi_hw_enable_runtime_gpe_block(NULL, gpe_block);
1061 if (ACPI_FAILURE(status)) {
1062 ACPI_ERROR((AE_INFO, "Could not enable GPEs in GpeBlock %p",
1063 gpe_block));
1064 }
1065
1066 return_ACPI_STATUS(status);
1067 }
1068
1069 /*******************************************************************************
1070 *
1071 * FUNCTION: acpi_ev_gpe_initialize
1072 *
1073 * PARAMETERS: None
1074 *
1075 * RETURN: Status
1076 *
1077 * DESCRIPTION: Initialize the GPE data structures
1078 *
1079 ******************************************************************************/
1080
1081 acpi_status acpi_ev_gpe_initialize(void)
1082 {
1083 u32 register_count0 = 0;
1084 u32 register_count1 = 0;
1085 u32 gpe_number_max = 0;
1086 acpi_status status;
1087
1088 ACPI_FUNCTION_TRACE(ev_gpe_initialize);
1089
1090 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
1091 if (ACPI_FAILURE(status)) {
1092 return_ACPI_STATUS(status);
1093 }
1094
1095 /*
1096 * Initialize the GPE Block(s) defined in the FADT
1097 *
1098 * Why the GPE register block lengths are divided by 2: From the ACPI Spec,
1099 * section "General-Purpose Event Registers", we have:
1100 *
1101 * "Each register block contains two registers of equal length
1102 * GPEx_STS and GPEx_EN (where x is 0 or 1). The length of the
1103 * GPE0_STS and GPE0_EN registers is equal to half the GPE0_LEN
1104 * The length of the GPE1_STS and GPE1_EN registers is equal to
1105 * half the GPE1_LEN. If a generic register block is not supported
1106 * then its respective block pointer and block length values in the
1107 * FADT table contain zeros. The GPE0_LEN and GPE1_LEN do not need
1108 * to be the same size."
1109 */
1110
1111 /*
1112 * Determine the maximum GPE number for this machine.
1113 *
1114 * Note: both GPE0 and GPE1 are optional, and either can exist without
1115 * the other.
1116 *
1117 * If EITHER the register length OR the block address are zero, then that
1118 * particular block is not supported.
1119 */
1120 if (acpi_gbl_FADT.gpe0_block_length &&
1121 acpi_gbl_FADT.xgpe0_block.address) {
1122
1123 /* GPE block 0 exists (has both length and address > 0) */
1124
1125 register_count0 = (u16) (acpi_gbl_FADT.gpe0_block_length / 2);
1126
1127 gpe_number_max =
1128 (register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1;
1129
1130 /* Install GPE Block 0 */
1131
1132 status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
1133 &acpi_gbl_FADT.xgpe0_block,
1134 register_count0, 0,
1135 acpi_gbl_FADT.sci_interrupt,
1136 &acpi_gbl_gpe_fadt_blocks[0]);
1137
1138 if (ACPI_FAILURE(status)) {
1139 ACPI_EXCEPTION((AE_INFO, status,
1140 "Could not create GPE Block 0"));
1141 }
1142 }
1143
1144 if (acpi_gbl_FADT.gpe1_block_length &&
1145 acpi_gbl_FADT.xgpe1_block.address) {
1146
1147 /* GPE block 1 exists (has both length and address > 0) */
1148
1149 register_count1 = (u16) (acpi_gbl_FADT.gpe1_block_length / 2);
1150
1151 /* Check for GPE0/GPE1 overlap (if both banks exist) */
1152
1153 if ((register_count0) &&
1154 (gpe_number_max >= acpi_gbl_FADT.gpe1_base)) {
1155 ACPI_ERROR((AE_INFO,
1156 "GPE0 block (GPE 0 to %d) overlaps the GPE1 block (GPE %d to %d) - Ignoring GPE1",
1157 gpe_number_max, acpi_gbl_FADT.gpe1_base,
1158 acpi_gbl_FADT.gpe1_base +
1159 ((register_count1 *
1160 ACPI_GPE_REGISTER_WIDTH) - 1)));
1161
1162 /* Ignore GPE1 block by setting the register count to zero */
1163
1164 register_count1 = 0;
1165 } else {
1166 /* Install GPE Block 1 */
1167
1168 status =
1169 acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
1170 &acpi_gbl_FADT.xgpe1_block,
1171 register_count1,
1172 acpi_gbl_FADT.gpe1_base,
1173 acpi_gbl_FADT.
1174 sci_interrupt,
1175 &acpi_gbl_gpe_fadt_blocks
1176 [1]);
1177
1178 if (ACPI_FAILURE(status)) {
1179 ACPI_EXCEPTION((AE_INFO, status,
1180 "Could not create GPE Block 1"));
1181 }
1182
1183 /*
1184 * GPE0 and GPE1 do not have to be contiguous in the GPE number
1185 * space. However, GPE0 always starts at GPE number zero.
1186 */
1187 gpe_number_max = acpi_gbl_FADT.gpe1_base +
1188 ((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1);
1189 }
1190 }
1191
1192 /* Exit if there are no GPE registers */
1193
1194 if ((register_count0 + register_count1) == 0) {
1195
1196 /* GPEs are not required by ACPI, this is OK */
1197
1198 ACPI_DEBUG_PRINT((ACPI_DB_INIT,
1199 "There are no GPE blocks defined in the FADT\n"));
1200 status = AE_OK;
1201 goto cleanup;
1202 }
1203
1204 /* Check for Max GPE number out-of-range */
1205
1206 if (gpe_number_max > ACPI_GPE_MAX) {
1207 ACPI_ERROR((AE_INFO,
1208 "Maximum GPE number from FADT is too large: 0x%X",
1209 gpe_number_max));
1210 status = AE_BAD_VALUE;
1211 goto cleanup;
1212 }
1213
1214 cleanup:
1215 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
1216 return_ACPI_STATUS(AE_OK);
1217 }
This page took 0.09151 seconds and 5 git commands to generate.