ReactOS 0.4.16-dev-1948-gd260c1d
event.c
Go to the documentation of this file.
4#include <uacpi/internal/io.h>
12#include <uacpi/acpi.h>
13
14#define UACPI_EVENT_DISABLED 0
15#define UACPI_EVENT_ENABLED 1
16
17#if !defined(UACPI_REDUCED_HARDWARE) && !defined(UACPI_BAREBONES_MODE)
18
22
28};
29
33};
34
37 .status_field = UACPI_REGISTER_FIELD_GBL_STS,
38 .enable_field = UACPI_REGISTER_FIELD_GBL_EN,
39 .enable_mask = ACPI_PM1_EN_GBL_EN_MASK,
40 .status_mask = ACPI_PM1_STS_GBL_STS_MASK,
41 },
43 .status_field = UACPI_REGISTER_FIELD_TMR_STS,
44 .enable_field = UACPI_REGISTER_FIELD_TMR_EN,
45 .enable_mask = ACPI_PM1_EN_TMR_EN_MASK,
46 .status_mask = ACPI_PM1_STS_TMR_STS_MASK,
47 },
49 .status_field = UACPI_REGISTER_FIELD_PWRBTN_STS,
50 .enable_field = UACPI_REGISTER_FIELD_PWRBTN_EN,
51 .enable_mask = ACPI_PM1_EN_PWRBTN_EN_MASK,
52 .status_mask = ACPI_PM1_STS_PWRBTN_STS_MASK,
53 },
55 .status_field = UACPI_REGISTER_FIELD_SLPBTN_STS,
56 .enable_field = UACPI_REGISTER_FIELD_SLPBTN_EN,
57 .enable_mask = ACPI_PM1_EN_SLPBTN_EN_MASK,
58 .status_mask = ACPI_PM1_STS_SLPBTN_STS_MASK,
59 },
61 .status_field = UACPI_REGISTER_FIELD_RTC_STS,
62 .enable_field = UACPI_REGISTER_FIELD_RTC_EN,
63 .enable_mask = ACPI_PM1_EN_RTC_EN_MASK,
64 .status_mask = ACPI_PM1_STS_RTC_STS_MASK,
65 },
66};
67
68static struct fixed_event_handler
70
72{
74
75 for (i = 0; i < UACPI_FIXED_EVENT_MAX; ++i) {
78 );
79 }
80
81 return UACPI_STATUS_OK;
82}
83
85{
87 uacpi_u64 raw_value;
88 const struct fixed_event *ev = &fixed_events[event];
89
92 return ret;
93
96 return ret;
97
98 if (raw_value != value) {
99 uacpi_error("failed to %sable fixed event %d\n",
100 value ? "en" : "dis", event);
102 }
103
104 uacpi_trace("fixed event %d %sabled successfully\n",
105 event, value ? "en" : "dis");
106 return UACPI_STATUS_OK;
107}
108
110{
112
114
115 if (uacpi_unlikely(event < 0 || event > UACPI_FIXED_EVENT_MAX))
118 return UACPI_STATUS_OK;
119
122 return ret;
123
124 /*
125 * Attempting to enable an event that doesn't have a handler is most likely
126 * an error, don't allow it.
127 */
130 goto out;
131 }
132
134
135out:
137 return ret;
138}
139
141{
143
145
146 if (uacpi_unlikely(event < 0 || event > UACPI_FIXED_EVENT_MAX))
149 return UACPI_STATUS_OK;
150
153 return ret;
154
156
158 return ret;
159}
160
162{
164
165 if (uacpi_unlikely(event < 0 || event > UACPI_FIXED_EVENT_MAX))
168 return UACPI_STATUS_OK;
169
172 );
173}
174
176 const struct fixed_event *ev, uacpi_fixed_event event
177)
178{
181
185
186 if (uacpi_unlikely(evh->handler == UACPI_NULL)) {
188 "fixed event %d fired but no handler installed, disabling...\n",
189 event
190 );
193 }
194
195 return evh->handler(evh->ctx);
196}
197
199{
202 uacpi_u64 enable_mask, status_mask;
204
207 return int_ret;
208
211 return int_ret;
212
213 for (i = 0; i < UACPI_FIXED_EVENT_MAX; ++i)
214 {
215 const struct fixed_event *ev = &fixed_events[i];
216
217 if (!(status_mask & ev->status_mask) ||
218 !(enable_mask & ev->enable_mask))
219 continue;
220
221 int_ret |= dispatch_fixed_event(ev, i);
222 }
223
224 return int_ret;
225}
226
230
231 /*
232 * Preserved values to be used for state restoration if this handler is
233 * removed at any point.
234 */
239};
240
244};
245
246#define EVENTS_PER_GPE_REGISTER 8
247
248/*
249 * NOTE:
250 * This API and handler types are inspired by ACPICA, let's not reinvent the
251 * wheel and follow a similar path that people ended up finding useful after
252 * years of dealing with ACPI. Obviously credit goes to them for inventing
253 * "implicit notify" and other neat API.
254 */
261};
262
263struct gp_event {
264 union {
269 };
270
273
274 // "reference count" of the number of times this event has been enabled
276
281};
282
286
291
293};
294
295struct gpe_block {
296 struct gpe_block *prev, *next;
297
298 /*
299 * Technically this can only refer to \_GPE, but there's also apparently a
300 * "GPE Block Device" with id "ACPI0006", which is not used by anyone. We
301 * still keep it as a possibility that someone might eventually use it, so
302 * it is supported here.
303 */
305
309
313};
314
317
321};
323
325{
326 return 1 << (event->idx - event->reg->base_idx);
327}
328
333};
334
336{
338 struct gpe_register *reg = event->reg;
339 uacpi_u64 enable_mask;
340 uacpi_u8 event_bit;
342
343 event_bit = gpe_get_mask(event);
344 if (state != GPE_STATE_DISABLED && (reg->masked_mask & event_bit))
345 return UACPI_STATUS_OK;
346
348 if (!(reg->current_mask & event_bit))
349 return UACPI_STATUS_OK;
350
352 }
353
355
356 ret = uacpi_gas_read_mapped(&reg->enable, &enable_mask);
358 goto out;
359
360 switch (state) {
362 enable_mask |= event_bit;
363 break;
365 enable_mask &= ~event_bit;
366 break;
367 default:
369 goto out;
370 }
371
372 ret = uacpi_gas_write_mapped(&reg->enable, enable_mask);
373out:
375 return ret;
376}
377
379{
380 struct gpe_register *reg = event->reg;
381
382 return uacpi_gas_write_mapped(&reg->status, gpe_get_mask(event));
383}
384
386{
388
389 if (event->triggering == UACPI_GPE_TRIGGERING_LEVEL) {
392 return ret;
393 }
394
396 event->block_interrupts = UACPI_FALSE;
397
398 return ret;
399}
400
402{
404 struct gp_event *event = opaque;
405
408 uacpi_error("unable to restore GPE(%02X): %s\n",
410 }
411}
412
414{
416 struct gp_event *event = opaque;
417
420 goto out_no_unlock;
421
422 switch (event->handler_type) {
424 uacpi_object *method_obj;
425
427 event->aml_handler, UACPI_OBJECT_METHOD_BIT
428 );
429 if (uacpi_unlikely(method_obj == UACPI_NULL)) {
430 uacpi_error("GPE(%02X) AML handler gone\n", event->idx);
431 break;
432 }
433
435 "executing GPE(%02X) handler %.4s\n",
436 event->idx, uacpi_namespace_node_name(event->aml_handler).text
437 );
438
440 event->aml_handler, method_obj->method, UACPI_NULL, UACPI_NULL
441 );
444 "error while executing GPE(%02X) handler %.4s: %s\n",
445 event->idx, event->aml_handler->name.text,
447 );
448 }
449 break;
450 }
451
454
455 handler = event->implicit_handler;
456 while (handler) {
457 /*
458 * 2 - Device Wake. Used to notify OSPM that the device has signaled
459 * its wake event, and that OSPM needs to notify OSPM native device
460 * driver for the device.
461 */
462 uacpi_notify_all(handler->device, 2);
463 handler = handler->next;
464 }
465 break;
466 }
467
468 default:
469 break;
470 }
471
473
474out_no_unlock:
475 /*
476 * We schedule the work as NOTIFICATION to make sure all other notifications
477 * finish before this GPE is re-enabled.
478 */
481 );
483 uacpi_error("unable to schedule GPE(%02X) restore: %s\n",
486 }
487}
488
490 uacpi_namespace_node *device_node, struct gp_event *event
491)
492{
495
496 /*
497 * For raw handlers we don't do any management whatsoever, we just let the
498 * handler know a GPE has triggered and let it handle disable/enable as
499 * well as clearing.
500 */
501 if (event->handler_type == GPE_HANDLER_TYPE_NATIVE_HANDLER_RAW) {
502 return event->native_handler->cb(
503 event->native_handler->ctx, device_node, event->idx
504 );
505 }
506
509 uacpi_error("failed to disable GPE(%02X): %s\n",
511 return int_ret;
512 }
513
514 event->block_interrupts = UACPI_TRUE;
515
516 if (event->triggering == UACPI_GPE_TRIGGERING_EDGE) {
519 uacpi_error("unable to clear GPE(%02X): %s\n",
522 return int_ret;
523 }
524 }
525
526 switch (event->handler_type) {
528 int_ret = event->native_handler->cb(
529 event->native_handler->ctx, device_node, event->idx
530 );
531 if (!(int_ret & UACPI_GPE_REENABLE))
532 break;
533
536 uacpi_error("unable to restore GPE(%02X): %s\n",
538 }
539 break;
540
545 );
548 "unable to schedule GPE(%02X) for execution: %s\n",
550 );
551 }
552 break;
553
554 default:
555 uacpi_warn("GPE(%02X) fired but no handler, keeping disabled\n",
556 event->idx);
557 break;
558 }
559
561}
562
564{
567 struct gpe_register *reg;
568 struct gp_event *event;
570 uacpi_size i, j;
571
572 while (block) {
573 for (i = 0; i < block->num_registers; ++i) {
574 reg = &block->registers[i];
575
576 if (!reg->runtime_mask && !reg->wake_mask)
577 continue;
578
579 ret = uacpi_gas_read_mapped(&reg->status, &status);
581 return int_ret;
582
583 ret = uacpi_gas_read_mapped(&reg->enable, &enable);
585 return int_ret;
586
587 if (status == 0)
588 continue;
589
590 for (j = 0; j < EVENTS_PER_GPE_REGISTER; ++j) {
591 if (!((status & enable) & (1ull << j)))
592 continue;
593
594 event = &block->events[j + i * EVENTS_PER_GPE_REGISTER];
595 int_ret |= dispatch_gpe(block->device_node, event);
596 }
597 }
598
599 block = block->next;
600 }
601
602 return int_ret;
603}
604
606 uacpi_namespace_node *gpe_device, struct gp_event *event
607)
608{
610 struct gpe_register *reg = event->reg;
612
613 ret = uacpi_gas_read_mapped(&reg->status, &status);
615 return ret;
616
617 if (!(status & gpe_get_mask(event)))
618 return ret;
619
620 dispatch_gpe(gpe_device, event);
621 return ret;
622}
623
625{
626 struct gpe_interrupt_ctx *ctx = opaque;
627
630
631 return detect_gpes(ctx->gpe_head);
632}
633
635 uacpi_u32 irq, struct gpe_interrupt_ctx **out_ctx
636)
637{
640
641 while (entry) {
642 if (entry->irq == irq) {
643 *out_ctx = entry;
644 return UACPI_STATUS_OK;
645 }
646
647 entry = entry->next;
648 }
649
653
654 /*
655 * SCI interrupt is installed by other code and is responsible for more
656 * things than just the GPE handling. Don't install it here.
657 */
658 if (irq != g_uacpi_rt_ctx.fadt.sci_int) {
660 irq, handle_gpes, entry, &entry->irq_handle
661 );
663 uacpi_free(entry, sizeof(*entry));
664 return ret;
665 }
666 }
667
668 entry->irq = irq;
671
672 *out_ctx = entry;
673 return UACPI_STATUS_OK;
674}
675
677{
678 struct gpe_implicit_notify_handler *handler, *next_handler;
679
680 handler = event->implicit_handler;
681 while (handler) {
682 next_handler = handler->next;
683 uacpi_free(handler, sizeof(*handler));
684 handler = next_handler;
685 }
686
687 event->implicit_handler = UACPI_NULL;
688}
689
691{
696};
697
700)
701{
705 struct gpe_register *reg;
706
707 for (i = 0; i < block->num_registers; ++i) {
708 reg = &block->registers[i];
709
710 switch (action) {
712 value = 0;
713 break;
715 value = reg->runtime_mask & ~reg->masked_mask;
716 break;
718 value = reg->wake_mask;
719 break;
721 ret = uacpi_gas_write_mapped(&reg->status, 0xFF);
723 return ret;
724 continue;
725 default:
727 }
728
729 reg->current_mask = value;
730 ret = uacpi_gas_write_mapped(&reg->enable, value);
732 return ret;
733 }
734
735 return UACPI_STATUS_OK;
736}
737
739{
741 struct gpe_register *reg;
742
743 for (i = 0; i < block->num_registers; ++i) {
744 reg = &block->registers[i];
745
746 // No need to flush or do anything if it's not currently enabled
747 if (!reg->current_mask)
748 continue;
749
750 // 1. Mask the GPEs, this makes sure their state is no longer modifyable
751 reg->masked_mask = 0xFF;
752
753 /*
754 * 2. Wait for in-flight work & IRQs to finish, these might already
755 * be past the respective "if (masked)" check and therefore may
756 * try to re-enable a masked GPE.
757 */
759
760 /*
761 * 3. Now that this GPE's state is unmodifyable and we know that
762 * currently in-flight IRQs will see the masked state, we can
763 * safely disable all events knowing they won't be re-enabled by
764 * a racing IRQ.
765 */
766 uacpi_gas_write_mapped(&reg->enable, 0x00);
767
768 /*
769 * 4. Wait for the last possible IRQ to finish, now that this event is
770 * disabled.
771 */
773 }
774}
775
777{
778 if (block->registers != UACPI_NULL) {
779 struct gpe_register *reg;
781
783
784 for (i = 0; i < block->num_registers; ++i) {
785 reg = &block->registers[i];
786
787 if (reg->enable.total_bit_width)
788 uacpi_unmap_gas_nofree(&reg->enable);
789 if (reg->status.total_bit_width)
790 uacpi_unmap_gas_nofree(&reg->status);
791 }
792 }
793
794 if (block->prev)
795 block->prev->next = block->next;
796
797 if (block->irq_ctx) {
798 struct gpe_interrupt_ctx *ctx = block->irq_ctx;
799
800 // Are we the first GPE block?
801 if (block == ctx->gpe_head) {
802 ctx->gpe_head = ctx->gpe_head->next;
803 } else {
804 struct gpe_block *prev_block = ctx->gpe_head;
805
806 // We're not, do a search
807 while (prev_block) {
808 if (prev_block->next == block) {
809 prev_block->next = block->next;
810 break;
811 }
812
813 prev_block = prev_block->next;
814 }
815 }
816
817 // This GPE block was the last user of this interrupt context, remove it
818 if (ctx->gpe_head == UACPI_NULL) {
819 if (ctx->prev)
820 ctx->prev->next = ctx->next;
821
822 if (ctx->irq != g_uacpi_rt_ctx.fadt.sci_int) {
824 handle_gpes, ctx->irq_handle
825 );
826 }
827
828 uacpi_free(block->irq_ctx, sizeof(*block->irq_ctx));
829 }
830 }
831
832 if (block->events != UACPI_NULL) {
834 struct gp_event *event;
835
836 for (i = 0; i < block->num_events; ++i) {
837 event = &block->events[i];
838
839 switch (event->handler_type) {
842 break;
843
846 uacpi_free(event->native_handler,
847 sizeof(*event->native_handler));
848 break;
849
852 break;
853 }
854
855 default:
856 break;
857 }
858 }
859
860 }
861
862 uacpi_free(block->registers,
863 sizeof(*block->registers) * block->num_registers);
864 uacpi_free(block->events,
865 sizeof(*block->events) * block->num_events);
866 uacpi_free(block, sizeof(*block));
867}
868
870{
872
873 if (idx < block->base_idx)
874 return UACPI_NULL;
875
876 offset = idx - block->base_idx;
877 if (offset > block->num_events)
878 return UACPI_NULL;
879
880 return &block->events[offset];
881}
882
887};
888
891)
892{
894 struct gpe_match_ctx *ctx = opaque;
895 struct gp_event *event;
898
900
901 if (node->name.text[0] != '_')
903
904 switch (node->name.text[1]) {
905 case 'L':
907 break;
908 case 'E':
910 break;
911 default:
913 }
914
915 ret = uacpi_string_to_integer(&node->name.text[2], 2, UACPI_BASE_HEX, &idx);
917 uacpi_trace("invalid GPE method name %.4s, ignored\n", node->name.text);
919 }
920
921 event = gpe_from_block(ctx->block, idx);
922 if (event == UACPI_NULL)
924
925 switch (event->handler_type) {
926 /*
927 * This had implicit notify configured but this is no longer needed as we
928 * now have an actual AML handler. Free the implicit notify list and switch
929 * this handler to AML mode.
930 */
935 event->aml_handler = node;
936 event->handler_type = GPE_HANDLER_TYPE_AML_HANDLER;
937 break;
938
940 // This is okay, since we're re-running the detection code
941 if (!ctx->post_dynamic_table_load) {
943 "GPE(%02X) already matched %.4s, skipping %.4s\n",
944 (uacpi_u32)idx, event->aml_handler->name.text, node->name.text
945 );
946 }
948
952 "not assigning GPE(%02X) to %.4s, override "
953 "installed by user\n", (uacpi_u32)idx, node->name.text
954 );
956 default:
958 }
959
960 uacpi_trace("assigned GPE(%02X) -> %.4s\n",
961 (uacpi_u32)idx, node->name.text);
962 event->triggering = triggering;
963 ctx->matched_count++;
964
966}
967
969{
970 struct gpe_match_ctx match_ctx = {
972 };
973
975
977 goto out;
978
979 struct gpe_interrupt_ctx *irq_ctx = g_gpe_interrupt_head;
980
981 while (irq_ctx) {
982 match_ctx.block = irq_ctx->gpe_head;
983
984 while (match_ctx.block) {
986 match_ctx.block->device_node, do_match_gpe_methods, UACPI_NULL,
989 );
990 match_ctx.block = match_ctx.block->next;
991 }
992
993 irq_ctx = irq_ctx->next;
994 }
995
996 if (match_ctx.matched_count) {
997 uacpi_info("matched %u additional GPEs post dynamic table load\n",
998 match_ctx.matched_count);
999 }
1000
1001out:
1004}
1005
1007 uacpi_namespace_node *device_node, uacpi_u32 irq, uacpi_u16 base_idx,
1009)
1010{
1012 struct gpe_match_ctx match_ctx = { 0 };
1013 struct gpe_block *block;
1014 struct gpe_register *reg;
1015 struct gp_event *event;
1016 struct acpi_gas tmp_gas = {
1017 .address_space_id = address_space_id,
1018 .register_bit_width = 8,
1019 };
1020 uacpi_size i, j;
1021
1024 return ret;
1025
1026 block->device_node = device_node;
1027 block->base_idx = base_idx;
1028
1029 block->num_registers = num_registers;
1030 block->registers = uacpi_kernel_alloc_zeroed(
1031 num_registers * sizeof(*block->registers)
1032 );
1033 if (uacpi_unlikely(block->registers == UACPI_NULL))
1034 goto error_out;
1035
1038 block->num_events * sizeof(*block->events)
1039 );
1040 if (uacpi_unlikely(block->events == UACPI_NULL))
1041 goto error_out;
1042
1043 for (reg = block->registers, event = block->events, i = 0;
1044 i < num_registers; ++i, ++reg) {
1045
1046 /*
1047 * Initialize this register pair as well as all the events within it.
1048 *
1049 * Each register has two sub registers: status & enable, 8 bits each.
1050 * Each bit corresponds to one event that we initialize below.
1051 */
1052 reg->base_idx = base_idx + (i * EVENTS_PER_GPE_REGISTER);
1053
1054
1055 tmp_gas.address = address + i;
1056 ret = uacpi_map_gas_noalloc(&tmp_gas, &reg->status);
1058 goto error_out;
1059
1060 tmp_gas.address += num_registers;
1061 ret = uacpi_map_gas_noalloc(&tmp_gas, &reg->enable);
1063 goto error_out;
1064
1065 for (j = 0; j < EVENTS_PER_GPE_REGISTER; ++j, ++event) {
1066 event->idx = reg->base_idx + j;
1067 event->reg = reg;
1068 }
1069
1070 /*
1071 * Disable all GPEs in this register & clear anything that might be
1072 * pending from earlier.
1073 */
1074 ret = uacpi_gas_write_mapped(&reg->enable, 0x00);
1076 goto error_out;
1077
1078 ret = uacpi_gas_write_mapped(&reg->status, 0xFF);
1080 goto error_out;
1081 }
1082
1085 goto error_out;
1086
1087 block->next = block->irq_ctx->gpe_head;
1088 block->irq_ctx->gpe_head = block;
1089 match_ctx.block = block;
1090
1092 device_node, do_match_gpe_methods, UACPI_NULL,
1095 );
1096
1097 uacpi_trace("initialized GPE block %.4s[%d->%d], %d AML handlers (IRQ %d)\n",
1098 device_node->name.text, base_idx, base_idx + block->num_events,
1099 match_ctx.matched_count, irq);
1100 return UACPI_STATUS_OK;
1101
1102error_out:
1104 return ret;
1105}
1106
1108 (struct gpe_block*, uacpi_handle);
1109
1112)
1113{
1114 uacpi_iteration_decision decision;
1115 struct gpe_interrupt_ctx *irq_ctx = g_gpe_interrupt_head;
1116 struct gpe_block *block;
1117
1118 while (irq_ctx) {
1119 block = irq_ctx->gpe_head;
1120
1121 while (block) {
1122 decision = cb(block, handle);
1123 if (decision == UACPI_ITERATION_DECISION_BREAK)
1124 return;
1125
1126 block = block->next;
1127 }
1128
1129 irq_ctx = irq_ctx->next;
1130 }
1131}
1132
1138};
1139
1141 struct gpe_block *block, uacpi_handle opaque
1142)
1143{
1144 struct gpe_search_ctx *ctx = opaque;
1145
1146 if (block->device_node != ctx->gpe_device)
1148
1149 ctx->out_block = block;
1150 ctx->out_event = gpe_from_block(block, ctx->idx);
1151 if (ctx->out_event == UACPI_NULL)
1153
1155}
1156
1157static struct gp_event *get_gpe(
1159)
1160{
1161 struct gpe_search_ctx ctx = {
1162 .gpe_device = gpe_device,
1163 .idx = idx,
1164 };
1165
1167 return ctx.out_event;
1168}
1169
1171{
1172 uacpi_u8 this_mask;
1173 struct gpe_register *reg = event->reg;
1174
1175 this_mask = gpe_get_mask(event);
1176
1177 if (set_on) {
1178 reg->runtime_mask |= this_mask;
1179 reg->current_mask = reg->runtime_mask;
1180 return;
1181 }
1182
1183 reg->runtime_mask &= ~this_mask;
1184 reg->current_mask = reg->runtime_mask;
1185}
1186
1188{
1190
1191 if (uacpi_unlikely(event->num_users == 0))
1193
1194 if (--event->num_users == 0) {
1196
1200 event->num_users++;
1201 }
1202 }
1203
1204 return ret;
1205}
1206
1210};
1211
1213 struct gp_event *event, enum event_clear_if_first clear_if_first
1214)
1215{
1217
1218 if (uacpi_unlikely(event->num_users == 0xFF))
1220
1221 if (++event->num_users == 1) {
1222 if (clear_if_first == EVENT_CLEAR_IF_FIRST_YES)
1224
1226
1230 event->num_users--;
1231 }
1232 }
1233
1234 return ret;
1235}
1236
1239)
1240{
1241 switch (triggering) {
1243 return "edge";
1245 return "level";
1246 default:
1247 return "invalid";
1248 }
1249}
1250
1252{
1253 return event->num_users && event->triggering == UACPI_GPE_TRIGGERING_EDGE;
1254}
1255
1257 struct gp_event *event, uacpi_bool should_mask
1258)
1259{
1260 struct gpe_register *reg;
1261 uacpi_u8 mask;
1262
1263 reg = event->reg;
1265
1266 if (should_mask) {
1267 if (reg->masked_mask & mask)
1269
1270 // 1. Mask the GPE, this makes sure its state is no longer modifyable
1271 reg->masked_mask |= mask;
1272
1273 /*
1274 * 2. Wait for in-flight work & IRQs to finish, these might already
1275 * be past the respective "if (masked)" check and therefore may
1276 * try to re-enable a masked GPE.
1277 */
1279
1280 /*
1281 * 3. Now that this GPE's state is unmodifyable and we know that currently
1282 * in-flight IRQs will see the masked state, we can safely disable this
1283 * event knowing it won't be re-enabled by a racing IRQ.
1284 */
1286
1287 /*
1288 * 4. Wait for the last possible IRQ to finish, now that this event is
1289 * disabled.
1290 */
1292
1293 return UACPI_STATUS_OK;
1294 }
1295
1296 if (!(reg->masked_mask & mask))
1298
1299 reg->masked_mask &= ~mask;
1300 if (!event->block_interrupts && event->num_users)
1302
1303 return UACPI_STATUS_OK;
1304}
1305
1306/*
1307 * Safely mask the event before we modify its handlers.
1308 *
1309 * This makes sure we can't get an IRQ in the middle of modifying this
1310 * event's structures.
1311 */
1313{
1314 // No need to flush or do anything if it's not currently enabled
1315 if (!(event->reg->current_mask & gpe_get_mask(event)))
1316 return UACPI_FALSE;
1317
1319 return UACPI_TRUE;
1320}
1321
1323 struct gpe_block *block, uacpi_handle opaque
1324)
1325{
1327 uacpi_bool *poll_blocks = opaque;
1328 uacpi_size i, j, count_enabled = 0;
1329 struct gp_event *event;
1330
1331 for (i = 0; i < block->num_registers; ++i) {
1332 for (j = 0; j < EVENTS_PER_GPE_REGISTER; ++j) {
1333 event = &block->events[j + i * EVENTS_PER_GPE_REGISTER];
1334
1335 if (event->wake ||
1336 event->handler_type != GPE_HANDLER_TYPE_AML_HANDLER)
1337 continue;
1338
1341 uacpi_warn("failed to enable GPE(%02X): %s\n",
1343 continue;
1344 }
1345
1346 *poll_blocks |= gpe_needs_polling(event);
1347 count_enabled++;
1348 }
1349 }
1350
1351 if (count_enabled) {
1352 uacpi_info(
1353 "enabled %zu GPEs in block %.4s@[%d->%d]\n",
1354 count_enabled, block->device_node->name.text,
1355 block->base_idx, block->base_idx + block->num_events
1356 );
1357 }
1359}
1360
1362{
1364 uacpi_bool poll_blocks = UACPI_FALSE;
1365
1367
1370 return ret;
1371
1372 if (g_gpes_finalized)
1373 goto out;
1374
1376
1378 if (poll_blocks)
1380
1381out:
1383 return ret;
1384}
1385
1387 uacpi_namespace_node **gpe_device, uacpi_u16 idx,
1388 struct gp_event **out_event
1389)
1390{
1391 if (*gpe_device == UACPI_NULL) {
1392 *gpe_device = uacpi_namespace_get_predefined(
1394 );
1395 }
1396
1397 *out_event = get_gpe(*gpe_device, idx);
1398 if (*out_event == UACPI_NULL)
1400
1401 return UACPI_STATUS_OK;
1402}
1403
1405 uacpi_namespace_node *gpe_device, uacpi_u16 idx,
1408)
1409{
1411 struct gp_event *event;
1412 struct gpe_native_handler *native_handler;
1413 uacpi_bool did_mask;
1414
1416
1419
1422 return ret;
1423
1424 ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event);
1426 goto out;
1427
1428 if (event->handler_type == GPE_HANDLER_TYPE_NATIVE_HANDLER ||
1429 event->handler_type == GPE_HANDLER_TYPE_NATIVE_HANDLER_RAW) {
1431 goto out;
1432 }
1433
1434 native_handler = uacpi_kernel_alloc(sizeof(*native_handler));
1435 if (uacpi_unlikely(native_handler == UACPI_NULL)) {
1437 goto out;
1438 }
1439
1440 native_handler->cb = handler;
1441 native_handler->ctx = ctx;
1442 native_handler->previous_handler = event->any_handler;
1443 native_handler->previous_handler_type = event->handler_type;
1444 native_handler->previous_triggering = event->triggering;
1445 native_handler->previously_enabled = UACPI_FALSE;
1446
1447 did_mask = gpe_mask_safe(event);
1448
1449 if ((event->handler_type == GPE_HANDLER_TYPE_AML_HANDLER ||
1450 event->handler_type == GPE_HANDLER_TYPE_IMPLICIT_NOTIFY) &&
1451 event->num_users != 0) {
1452 native_handler->previously_enabled = UACPI_TRUE;
1454
1455 if (uacpi_unlikely(event->triggering != triggering)) {
1456 uacpi_warn(
1457 "GPE(%02X) user handler claims %s triggering, originally "
1458 "configured as %s\n", idx,
1461 );
1462 }
1463 }
1464
1465 event->native_handler = native_handler;
1466 event->handler_type = type;
1467 event->triggering = triggering;
1468
1469 if (did_mask)
1471out:
1473 return ret;
1474}
1475
1477 uacpi_namespace_node *gpe_device, uacpi_u16 idx,
1480)
1481{
1484 handler, ctx
1485 );
1486}
1487
1489 uacpi_namespace_node *gpe_device, uacpi_u16 idx,
1492)
1493{
1496 handler, ctx
1497 );
1498}
1499
1501 uacpi_namespace_node *gpe_device, uacpi_u16 idx,
1503)
1504{
1506 struct gp_event *event;
1507 struct gpe_native_handler *native_handler;
1508 uacpi_bool did_mask;
1509
1511
1514 return ret;
1515
1516 ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event);
1518 goto out;
1519
1520 if (event->handler_type != GPE_HANDLER_TYPE_NATIVE_HANDLER &&
1521 event->handler_type != GPE_HANDLER_TYPE_NATIVE_HANDLER_RAW) {
1523 goto out;
1524 }
1525
1526 native_handler = event->native_handler;
1527 if (uacpi_unlikely(native_handler->cb != handler)) {
1529 goto out;
1530 }
1531
1532 did_mask = gpe_mask_safe(event);
1533
1534 event->aml_handler = native_handler->previous_handler;
1535 event->triggering = native_handler->previous_triggering;
1536 event->handler_type = native_handler->previous_handler_type;
1537
1538 if ((event->handler_type == GPE_HANDLER_TYPE_AML_HANDLER ||
1539 event->handler_type == GPE_HANDLER_TYPE_IMPLICIT_NOTIFY) &&
1540 native_handler->previously_enabled) {
1542 }
1543
1544 uacpi_free(native_handler, sizeof(*native_handler));
1545
1546 if (did_mask)
1548
1550 maybe_dispatch_gpe(gpe_device, event);
1551out:
1553 return ret;
1554}
1555
1558)
1559{
1561 struct gp_event *event;
1562
1564
1567 return ret;
1568
1569 ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event);
1571 goto out;
1572
1573 if (uacpi_unlikely(event->handler_type == GPE_HANDLER_TYPE_NONE)) {
1575 goto out;
1576 }
1577
1580 goto out;
1581
1583 maybe_dispatch_gpe(gpe_device, event);
1584
1585out:
1587 return ret;
1588}
1589
1592)
1593{
1595 struct gp_event *event;
1596
1598
1601 return ret;
1602
1603 ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event);
1605 goto out;
1606
1608out:
1610 return ret;
1611}
1612
1615)
1616{
1618 struct gp_event *event;
1619
1621
1624 return ret;
1625
1626 ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event);
1628 goto out;
1629
1630 ret = clear_gpe(event);
1631out:
1633 return ret;
1634}
1635
1638)
1639{
1641 struct gp_event *event;
1642
1644
1647 return ret;
1648
1649 ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event);
1651 goto out;
1652
1653 event->block_interrupts = state == GPE_STATE_DISABLED;
1655out:
1657 return ret;
1658}
1659
1662)
1663{
1664 return gpe_suspend_resume(gpe_device, idx, GPE_STATE_DISABLED);
1665}
1666
1669)
1670{
1671 return gpe_suspend_resume(gpe_device, idx, GPE_STATE_ENABLED);
1672}
1673
1676)
1677{
1679 struct gp_event *event;
1680
1682
1685 return ret;
1686
1687 ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event);
1689 goto out;
1690
1691 event = get_gpe(gpe_device, idx);
1694 goto out;
1695 }
1696
1698out:
1700 return ret;
1701
1702}
1703
1705 uacpi_namespace_node *gpe_device, uacpi_u16 idx, uacpi_bool should_mask
1706)
1707{
1709 struct gp_event *event;
1710
1712
1715 return ret;
1716
1717 ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event);
1719 goto out;
1720
1721 ret = gpe_mask_unmask(event, should_mask);
1722
1723out:
1725 return ret;
1726}
1727
1730)
1731{
1732 return gpe_get_mask_unmask(gpe_device, idx, UACPI_TRUE);
1733}
1734
1737)
1738{
1739 return gpe_get_mask_unmask(gpe_device, idx, UACPI_FALSE);
1740}
1741
1743 uacpi_namespace_node *gpe_device, uacpi_u16 idx,
1744 uacpi_namespace_node *wake_device
1745)
1746{
1748 struct gp_event *event;
1749 uacpi_bool did_mask;
1750
1752
1753 if (wake_device != UACPI_NULL) {
1754 uacpi_bool is_dev = wake_device == uacpi_namespace_root();
1755
1756 if (!is_dev) {
1757 ret = uacpi_namespace_node_is(wake_device, UACPI_OBJECT_DEVICE, &is_dev);
1759 return ret;
1760 }
1761
1762 if (!is_dev)
1764 }
1765
1768 return ret;
1769
1770 ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event);
1772 goto out;
1773
1774 did_mask = gpe_mask_safe(event);
1775
1776 if (wake_device != UACPI_NULL) {
1777 switch (event->handler_type) {
1779 event->handler_type = GPE_HANDLER_TYPE_IMPLICIT_NOTIFY;
1780 event->triggering = UACPI_GPE_TRIGGERING_LEVEL;
1781 break;
1782
1784 /*
1785 * An AML handler already exists, we expect it to call Notify() as
1786 * it sees fit. For now just make sure this event is disabled if it
1787 * had been enabled automatically previously during initialization.
1788 */
1790 break;
1791
1794 uacpi_warn(
1795 "not configuring implicit notify for GPE(%02X) -> %.4s: "
1796 " a user handler already installed\n", event->idx,
1797 wake_device->name.text
1798 );
1799 break;
1800
1801 // We will re-check this below
1803 break;
1804
1805 default:
1806 uacpi_warn("invalid GPE(%02X) handler type: %d\n",
1807 event->idx, event->handler_type);
1809 goto out_unmask;
1810 }
1811
1812 /*
1813 * This GPE has no known AML handler, so we configure it to receive
1814 * implicit notifications for wake devices when we get a corresponding
1815 * GPE triggered. Usually it's the job of a matching AML handler, but
1816 * we didn't find any.
1817 */
1818 if (event->handler_type == GPE_HANDLER_TYPE_IMPLICIT_NOTIFY) {
1819 struct gpe_implicit_notify_handler *implicit_handler;
1820
1821 implicit_handler = event->implicit_handler;
1822 while (implicit_handler) {
1823 if (implicit_handler->device == wake_device) {
1825 goto out_unmask;
1826 }
1827
1828 implicit_handler = implicit_handler->next;
1829 }
1830
1831 implicit_handler = uacpi_kernel_alloc(sizeof(*implicit_handler));
1832 if (uacpi_likely(implicit_handler != UACPI_NULL)) {
1833 implicit_handler->device = wake_device;
1834 implicit_handler->next = event->implicit_handler;
1835 event->implicit_handler = implicit_handler;
1836 } else {
1837 uacpi_warn(
1838 "unable to configure implicit wake for GPE(%02X) -> %.4s: "
1839 "out of memory\n", event->idx, wake_device->name.text
1840 );
1841 }
1842 }
1843 }
1844
1845 event->wake = UACPI_TRUE;
1846
1847out_unmask:
1848 if (did_mask)
1850out:
1852 return ret;
1853}
1854
1857)
1858{
1860 struct gp_event *event;
1861 struct gpe_register *reg;
1862 uacpi_u8 mask;
1863
1865
1868 return ret;
1869
1870 ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event);
1872 goto out;
1873
1874 if (!event->wake) {
1876 goto out;
1877 }
1878
1879 reg = event->reg;
1881
1882 if (enabled)
1883 reg->wake_mask |= mask;
1884 else
1885 reg->wake_mask &= mask;
1886
1887out:
1889 return ret;
1890}
1891
1894)
1895{
1896 return gpe_enable_disable_for_wake(gpe_device, idx, UACPI_TRUE);
1897}
1898
1901)
1902{
1903 return gpe_enable_disable_for_wake(gpe_device, idx, UACPI_FALSE);
1904}
1905
1909};
1910
1912 struct gpe_block *block, uacpi_handle opaque
1913)
1914{
1915 struct do_for_all_gpes_ctx *ctx = opaque;
1916
1917 ctx->ret = gpe_block_apply_action(block, ctx->action);
1918 if (uacpi_unlikely_error(ctx->ret))
1920
1922}
1923
1925{
1927
1929
1932 return ret;
1933
1935
1937 return ctx->ret;
1938}
1939
1941{
1942 struct do_for_all_gpes_ctx ctx = {
1944 };
1945 return for_all_gpes_locked(&ctx);
1946}
1947
1949{
1950 struct do_for_all_gpes_ctx ctx = {
1952 };
1953 return for_all_gpes_locked(&ctx);
1954}
1955
1957{
1958 struct do_for_all_gpes_ctx ctx = {
1960 };
1961 return for_all_gpes_locked(&ctx);
1962}
1963
1965{
1967 uacpi_namespace_node *gpe_node;
1968 struct acpi_fadt *fadt = &g_uacpi_rt_ctx.fadt;
1969 uacpi_u8 gpe0_regs = 0, gpe1_regs = 0;
1970
1972
1973 if (fadt->x_gpe0_blk.address && fadt->gpe0_blk_len) {
1974 gpe0_regs = fadt->gpe0_blk_len / 2;
1975
1977 gpe_node, fadt->sci_int, 0, fadt->x_gpe0_blk.address,
1978 fadt->x_gpe0_blk.address_space_id, gpe0_regs
1979 );
1981 uacpi_error("unable to create FADT GPE block 0: %s\n",
1983 }
1984 }
1985
1986 if (fadt->x_gpe1_blk.address && fadt->gpe1_blk_len) {
1987 gpe1_regs = fadt->gpe1_blk_len / 2;
1988
1989 if (uacpi_unlikely((gpe0_regs * EVENTS_PER_GPE_REGISTER) >
1990 fadt->gpe1_base)) {
1992 "FADT GPE block 1 [%d->%d] collides with GPE block 0 "
1993 "[%d->%d], ignoring\n",
1994 0, gpe0_regs * EVENTS_PER_GPE_REGISTER, fadt->gpe1_base,
1995 gpe1_regs * EVENTS_PER_GPE_REGISTER
1996 );
1997 gpe1_regs = 0;
1998 goto out;
1999 }
2000
2002 gpe_node, fadt->sci_int, fadt->gpe1_base, fadt->x_gpe1_blk.address,
2003 fadt->x_gpe1_blk.address_space_id, gpe1_regs
2004 );
2006 uacpi_error("unable to create FADT GPE block 1: %s\n",
2008 }
2009 }
2010
2011 if (gpe0_regs == 0 && gpe1_regs == 0)
2012 uacpi_trace("platform has no FADT GPE events\n");
2013
2014out:
2015 return UACPI_STATUS_OK;
2016}
2017
2021)
2022{
2024 uacpi_bool is_dev;
2025
2027
2028 ret = uacpi_namespace_node_is(gpe_device, UACPI_OBJECT_DEVICE, &is_dev);
2030 return ret;
2031 if (!is_dev)
2033
2036 return ret;
2037
2038 if (uacpi_unlikely(get_gpe(gpe_device, 0) != UACPI_NULL)) {
2040 goto out;
2041 }
2042
2044 gpe_device, irq, 0, address, address_space, num_registers
2045 );
2046
2047out:
2049 return ret;
2050}
2051
2053 uacpi_namespace_node *gpe_device
2054)
2055{
2057 uacpi_bool is_dev;
2058 struct gpe_search_ctx search_ctx = {
2059 .idx = 0,
2060 .gpe_device = gpe_device,
2061 };
2062
2064
2067 return ret;
2068 if (!is_dev)
2070
2073 return ret;
2074
2075 for_each_gpe_block(do_find_gpe, &search_ctx);
2076 if (search_ctx.out_block == UACPI_NULL) {
2078 goto out;
2079 }
2080
2081 uninstall_gpe_block(search_ctx.out_block);
2082
2083out:
2085 return ret;
2086}
2087
2089{
2092
2093 if (uacpi_unlikely(!g_uacpi_rt_ctx.has_global_lock)) {
2094 uacpi_warn("platform has no global lock but a release event "
2095 "was fired anyway?\n");
2097 }
2098
2099 flags = uacpi_kernel_lock_spinlock(g_uacpi_rt_ctx.global_lock_spinlock);
2100 if (!g_uacpi_rt_ctx.global_lock_pending) {
2101 uacpi_trace("spurious firmware global lock release notification\n");
2102 goto out;
2103 }
2104
2105 uacpi_trace("received a firmware global lock release notification\n");
2106
2107 uacpi_kernel_signal_event(g_uacpi_rt_ctx.global_lock_event);
2108 g_uacpi_rt_ctx.global_lock_pending = UACPI_FALSE;
2109
2110out:
2111 uacpi_kernel_unlock_spinlock(g_uacpi_rt_ctx.global_lock_spinlock, flags);
2113}
2114
2116{
2118
2119 int_ret |= handle_fixed_events();
2120 int_ret |= handle_gpes(ctx);
2121
2122 return int_ret;
2123}
2124
2126{
2128
2130 return UACPI_STATUS_OK;
2131
2135
2138 return ret;
2139
2142 return ret;
2143
2144 return UACPI_STATUS_OK;
2145}
2146
2148{
2150
2152 return UACPI_STATUS_OK;
2153
2154 ret = initialize_gpes();
2156 return ret;
2157
2160 &g_uacpi_rt_ctx.sci_handle
2161 );
2164 "unable to install SCI interrupt handler: %s\n",
2166 );
2167 return ret;
2168 }
2169 g_uacpi_rt_ctx.sci_handle_valid = UACPI_TRUE;
2170
2171 g_uacpi_rt_ctx.global_lock_event = uacpi_kernel_create_event();
2172 if (uacpi_unlikely(g_uacpi_rt_ctx.global_lock_event == UACPI_NULL))
2174
2175 g_uacpi_rt_ctx.global_lock_spinlock = uacpi_kernel_create_spinlock();
2176 if (uacpi_unlikely(g_uacpi_rt_ctx.global_lock_spinlock == UACPI_NULL))
2178
2181 );
2185 uacpi_warn("platform has global lock but no FACS was provided\n");
2186 return ret;
2187 }
2188 g_uacpi_rt_ctx.has_global_lock = UACPI_TRUE;
2189 } else if (ret == UACPI_STATUS_HARDWARE_TIMEOUT) {
2190 // has_global_lock remains set to false
2191 uacpi_trace("platform has no global lock\n");
2193 }
2194
2195 return ret;
2196}
2197
2199{
2200 struct gpe_interrupt_ctx *ctx, *next_ctx = g_gpe_interrupt_head;
2201 uacpi_size i;
2202
2204
2205 if (g_uacpi_rt_ctx.sci_handle_valid) {
2207 handle_sci, g_uacpi_rt_ctx.sci_handle
2208 );
2209 g_uacpi_rt_ctx.sci_handle_valid = UACPI_FALSE;
2210 }
2211
2212 while (next_ctx) {
2213 ctx = next_ctx;
2214 next_ctx = ctx->next;
2215
2216 struct gpe_block *block, *next_block = ctx->gpe_head;
2217 while (next_block) {
2218 block = next_block;
2219 next_block = block->next;
2221 }
2222 }
2223
2224 for (i = 0; i < UACPI_FIXED_EVENT_MAX; ++i) {
2227 }
2228
2232 }
2233
2235
2237}
2238
2242)
2243{
2245 struct fixed_event_handler *ev;
2246
2248
2249 if (uacpi_unlikely(event < 0 || event > UACPI_FIXED_EVENT_MAX))
2252 return UACPI_STATUS_OK;
2253
2256 return ret;
2257
2259
2260 if (ev->handler != UACPI_NULL) {
2262 goto out;
2263 }
2264
2265 ev->handler = handler;
2266 ev->ctx = user;
2267
2270 ev->handler = UACPI_NULL;
2271 ev->ctx = UACPI_NULL;
2272 }
2273
2274out:
2276 return ret;
2277}
2278
2281)
2282{
2284 struct fixed_event_handler *ev;
2285
2287
2288 if (uacpi_unlikely(event < 0 || event > UACPI_FIXED_EVENT_MAX))
2291 return UACPI_STATUS_OK;
2292
2295 return ret;
2296
2298
2301 goto out;
2302
2304
2305 ev->handler = UACPI_NULL;
2306 ev->ctx = UACPI_NULL;
2307
2308out:
2310 return ret;
2311}
2312
2315)
2316{
2318 const struct fixed_event *ev;
2319 uacpi_u64 raw_value;
2321
2323
2324 if (uacpi_unlikely(event < 0 || event > UACPI_FIXED_EVENT_MAX))
2328
2331 return ret;
2332
2334 info |= UACPI_EVENT_INFO_HAS_HANDLER;
2335
2336 ev = &fixed_events[event];
2337
2338 ret = uacpi_read_register_field(ev->enable_field, &raw_value);
2340 goto out;
2341 if (raw_value)
2342 info |= UACPI_EVENT_INFO_ENABLED | UACPI_EVENT_INFO_HW_ENABLED;
2343
2344 ret = uacpi_read_register_field(ev->status_field, &raw_value);
2346 goto out;
2347 if (raw_value)
2348 info |= UACPI_EVENT_INFO_HW_STATUS;
2349
2350 *out_info = info;
2351out:
2353 return ret;
2354}
2355
2357 uacpi_namespace_node *gpe_device, uacpi_u16 idx, uacpi_event_info *out_info
2358)
2359{
2361 struct gp_event *event;
2362 struct gpe_register *reg;
2363 uacpi_u8 mask;
2364 uacpi_u64 raw_value;
2366
2368
2371 return ret;
2372
2373 ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event);
2375 goto out;
2376
2377 if (event->handler_type != GPE_HANDLER_TYPE_NONE)
2378 info |= UACPI_EVENT_INFO_HAS_HANDLER;
2379
2381 reg = event->reg;
2382
2383 if (reg->runtime_mask & mask)
2384 info |= UACPI_EVENT_INFO_ENABLED;
2385 if (reg->masked_mask & mask)
2386 info |= UACPI_EVENT_INFO_MASKED;
2387 if (reg->wake_mask & mask)
2388 info |= UACPI_EVENT_INFO_ENABLED_FOR_WAKE;
2389
2390 ret = uacpi_gas_read_mapped(&reg->enable, &raw_value);
2392 goto out;
2393 if (raw_value & mask)
2394 info |= UACPI_EVENT_INFO_HW_ENABLED;
2395
2396 ret = uacpi_gas_read_mapped(&reg->status, &raw_value);
2398 goto out;
2399 if (raw_value & mask)
2400 info |= UACPI_EVENT_INFO_HW_STATUS;
2401
2402 *out_info = info;
2403out:
2405 return ret;
2406}
2407
2408#define PM1_STATUS_BITS ( \
2409 ACPI_PM1_STS_TMR_STS_MASK | \
2410 ACPI_PM1_STS_BM_STS_MASK | \
2411 ACPI_PM1_STS_GBL_STS_MASK | \
2412 ACPI_PM1_STS_PWRBTN_STS_MASK | \
2413 ACPI_PM1_STS_SLPBTN_STS_MASK | \
2414 ACPI_PM1_STS_RTC_STS_MASK | \
2415 ACPI_PM1_STS_PCIEXP_WAKE_STS_MASK | \
2416 ACPI_PM1_STS_WAKE_STS_MASK \
2417)
2418
2420{
2422 struct do_for_all_gpes_ctx ctx = {
2424 };
2425
2427
2430 return ret;
2431
2434 goto out;
2435
2437 ret = ctx.ret;
2438
2439out:
2441 return ret;
2442}
2443
2444#endif // !UACPI_REDUCED_HARDWARE && !UACPI_BAREBONES_MODE
static int state
Definition: maze.c:121
unsigned long uacpi_cpu_flags
Definition: arch_helpers.h:13
void user(int argc, const char *argv[])
Definition: cmds.c:1350
uacpi_status uacpi_recursive_lock_deinit(struct uacpi_recursive_lock *lock)
Definition: mutex.c:267
uacpi_status uacpi_recursive_lock_init(struct uacpi_recursive_lock *lock)
Definition: mutex.c:255
uacpi_status uacpi_recursive_lock_acquire(struct uacpi_recursive_lock *lock)
Definition: mutex.c:287
uacpi_status uacpi_recursive_lock_release(struct uacpi_recursive_lock *lock)
Definition: mutex.c:307
unsigned int idx
Definition: utils.c:41
UINT(* handler)(MSIPACKAGE *)
Definition: action.c:7512
#define ACPI_PM1_EN_PWRBTN_EN_MASK
Definition: acpi.h:886
#define ACPI_PM1_STS_SLPBTN_STS_MASK
Definition: acpi.h:868
#define ACPI_PM1_EN_RTC_EN_MASK
Definition: acpi.h:888
#define ACPI_PM1_EN_SLPBTN_EN_MASK
Definition: acpi.h:887
#define ACPI_PM1_EN_TMR_EN_MASK
Definition: acpi.h:884
#define ACPI_PM1_STS_CLEAR
Definition: acpi.h:874
#define ACPI_PM1_STS_RTC_STS_MASK
Definition: acpi.h:869
#define ACPI_PM1_STS_TMR_STS_MASK
Definition: acpi.h:864
#define ACPI_PM1_STS_PWRBTN_STS_MASK
Definition: acpi.h:867
#define ACPI_PM1_EN_GBL_EN_MASK
Definition: acpi.h:885
#define ACPI_PM1_STS_GBL_STS_MASK
Definition: acpi.h:866
uacpi_interrupt_ret(* uacpi_gpe_handler)(uacpi_handle ctx, uacpi_namespace_node *gpe_device, uacpi_u16 idx)
Definition: event.h:80
#define UACPI_GPE_REENABLE
Definition: event.h:78
uacpi_u16 uacpi_gpe_triggering uacpi_gpe_handler uacpi_handle ctx uacpi_u16 uacpi_namespace_node *wake_device uacpi_u16 idx uacpi_u16 idx uacpi_u16 idx uacpi_u16 idx uacpi_u16 idx uacpi_u64 uacpi_address_space uacpi_u16 num_registers
Definition: event.h:274
uacpi_gpe_triggering
Definition: event.h:84
@ UACPI_GPE_TRIGGERING_MAX
Definition: event.h:87
@ UACPI_GPE_TRIGGERING_EDGE
Definition: event.h:86
@ UACPI_GPE_TRIGGERING_LEVEL
Definition: event.h:85
uacpi_fixed_event
Definition: event.h:13
@ UACPI_FIXED_EVENT_TIMER_STATUS
Definition: event.h:14
@ UACPI_FIXED_EVENT_RTC
Definition: event.h:17
@ UACPI_FIXED_EVENT_SLEEP_BUTTON
Definition: event.h:16
@ UACPI_FIXED_EVENT_POWER_BUTTON
Definition: event.h:15
@ UACPI_FIXED_EVENT_MAX
Definition: event.h:18
uacpi_event_info
Definition: event.h:64
uacpi_u16 uacpi_gpe_triggering triggering
Definition: event.h:118
uacpi_u16 uacpi_gpe_triggering uacpi_gpe_handler uacpi_handle ctx uacpi_u16 uacpi_namespace_node *wake_device uacpi_u16 idx uacpi_u16 idx uacpi_u16 idx uacpi_u16 idx uacpi_u16 idx uacpi_u64 uacpi_address_space address_space
Definition: event.h:274
#define UACPI_ENSURE_INIT_LEVEL_AT_LEAST(lvl)
Definition: context.h:127
static uacpi_bool uacpi_is_hardware_reduced(void)
Definition: context.h:100
struct uacpi_runtime_context g_uacpi_rt_ctx
Definition: uacpi.c:17
#define UACPI_FIXED_EVENT_GLOBAL_LOCK
Definition: event.h:6
#define UACPI_UNUSED(x)
Definition: helpers.h:7
void uacpi_unmap_gas_nofree(uacpi_mapped_gas *gas)
Definition: io.c:889
uacpi_status uacpi_map_gas_noalloc(const struct acpi_gas *gas, uacpi_mapped_gas *out_mapped)
Definition: io.c:832
#define uacpi_trace(...)
Definition: log.h:18
#define uacpi_info(...)
Definition: log.h:19
#define uacpi_error(...)
Definition: log.h:21
#define uacpi_warn(...)
Definition: log.h:20
uacpi_status uacpi_namespace_do_for_each_child(uacpi_namespace_node *parent, uacpi_iteration_callback descending_callback, uacpi_iteration_callback ascending_callback, uacpi_object_type_bits, uacpi_u32 max_depth, enum uacpi_should_lock, enum uacpi_permanent_only, void *user)
Definition: namespace.c:834
uacpi_status uacpi_namespace_write_unlock(void)
Definition: namespace.c:54
uacpi_object * uacpi_namespace_node_get_object_typed(const uacpi_namespace_node *node, uacpi_object_type_bits type_mask)
Definition: namespace.c:654
@ UACPI_PERMANENT_ONLY_YES
Definition: namespace.h:92
@ UACPI_SHOULD_LOCK_YES
Definition: namespace.h:97
uacpi_status uacpi_namespace_write_lock(void)
Definition: namespace.c:49
#define uacpi_kernel_alloc_zeroed
Definition: stdlib.h:127
#define uacpi_free(mem, _)
Definition: stdlib.h:96
uacpi_status uacpi_gas_read_mapped(const uacpi_mapped_gas *gas, uacpi_u64 *value)
Definition: io.c:751
uacpi_status uacpi_gas_write_mapped(const uacpi_mapped_gas *gas, uacpi_u64 value)
Definition: io.c:790
uacpi_status uacpi_namespace_node_is(const uacpi_namespace_node *node, uacpi_object_type type, uacpi_bool *out)
Definition: namespace.c:825
uacpi_namespace_node * uacpi_namespace_get_predefined(uacpi_predefined_namespace)
Definition: namespace.c:272
#define UACPI_MAX_DEPTH_ANY
Definition: namespace.h:102
@ UACPI_PREDEFINED_NAMESPACE_GPE
Definition: namespace.h:18
uacpi_namespace_node * uacpi_namespace_root(void)
Definition: namespace.c:267
uacpi_object_name uacpi_namespace_node_name(const uacpi_namespace_node *node)
Definition: namespace.c:752
#define UACPI_FALLTHROUGH
Definition: compiler.h:70
#define uacpi_likely(expr)
Definition: compiler.h:59
#define uacpi_unlikely(expr)
Definition: compiler.h:58
size_t uacpi_size
Definition: types.h:37
uint32_t uacpi_u32
Definition: types.h:21
bool uacpi_bool
Definition: types.h:31
#define UACPI_FALSE
Definition: types.h:30
uint64_t uacpi_u64
Definition: types.h:22
char uacpi_char
Definition: types.h:44
uint16_t uacpi_u16
Definition: types.h:20
#define UACPI_NULL
Definition: types.h:33
uint8_t uacpi_u8
Definition: types.h:19
#define UACPI_TRUE
Definition: types.h:29
@ UACPI_REGISTER_FIELD_PWRBTN_EN
Definition: registers.h:68
@ UACPI_REGISTER_FIELD_PWRBTN_STS
Definition: registers.h:60
@ UACPI_REGISTER_FIELD_RTC_STS
Definition: registers.h:62
@ UACPI_REGISTER_FIELD_RTC_EN
Definition: registers.h:70
@ UACPI_REGISTER_FIELD_GBL_EN
Definition: registers.h:67
@ UACPI_REGISTER_FIELD_SLPBTN_STS
Definition: registers.h:61
@ UACPI_REGISTER_FIELD_SLPBTN_EN
Definition: registers.h:69
@ UACPI_REGISTER_FIELD_GBL_STS
Definition: registers.h:59
@ UACPI_REGISTER_FIELD_TMR_STS
Definition: registers.h:57
@ UACPI_REGISTER_FIELD_TMR_EN
Definition: registers.h:66
uacpi_status uacpi_read_register_field(uacpi_register_field, uacpi_u64 *)
Definition: registers.c:487
uacpi_status uacpi_read_register(uacpi_register, uacpi_u64 *)
Definition: registers.c:227
uacpi_status uacpi_write_register(uacpi_register, uacpi_u64)
Definition: registers.c:286
uacpi_status uacpi_write_register_field(uacpi_register_field, uacpi_u64)
Definition: registers.c:518
@ UACPI_REGISTER_PM1_EN
Definition: registers.h:18
@ UACPI_REGISTER_PM1_STS
Definition: registers.h:17
#define uacpi_likely_success(expr)
Definition: status.h:53
#define uacpi_unlikely_error(expr)
Definition: status.h:49
uacpi_status
Definition: status.h:10
@ UACPI_STATUS_INVALID_ARGUMENT
Definition: status.h:18
@ UACPI_STATUS_INTERNAL_ERROR
Definition: status.h:21
@ UACPI_STATUS_NOT_FOUND
Definition: status.h:17
@ UACPI_STATUS_OUT_OF_MEMORY
Definition: status.h:13
@ UACPI_STATUS_ALREADY_EXISTS
Definition: status.h:20
@ UACPI_STATUS_OK
Definition: status.h:11
@ UACPI_STATUS_HARDWARE_TIMEOUT
Definition: status.h:28
@ UACPI_STATUS_NO_HANDLER
Definition: status.h:25
const uacpi_char * uacpi_status_to_string(uacpi_status)
Definition: uacpi.c:50
#define UACPI_INTERRUPT_HANDLED
Definition: types.h:535
void * uacpi_handle
Definition: types.h:21
uacpi_iteration_decision
Definition: types.h:28
@ UACPI_ITERATION_DECISION_BREAK
Definition: types.h:30
@ UACPI_ITERATION_DECISION_CONTINUE
Definition: types.h:29
@ UACPI_OBJECT_METHOD_BIT
Definition: types.h:136
@ UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED
Definition: types.h:66
@ UACPI_INIT_LEVEL_NAMESPACE_LOADED
Definition: types.h:72
uacpi_interrupt_ret(* uacpi_interrupt_handler)(uacpi_handle)
Definition: types.h:538
@ UACPI_OBJECT_DEVICE
Definition: types.h:111
uacpi_u32 uacpi_interrupt_ret
Definition: types.h:536
#define UACPI_INTERRUPT_NOT_HANDLED
Definition: types.h:534
uacpi_address_space
Definition: types.h:36
static struct gp_event * get_gpe(uacpi_namespace_node *gpe_device, uacpi_u16 idx)
Definition: event.c:1157
gpe_handler_type
Definition: event.c:255
@ GPE_HANDLER_TYPE_NATIVE_HANDLER
Definition: event.c:258
@ GPE_HANDLER_TYPE_NATIVE_HANDLER_RAW
Definition: event.c:259
@ GPE_HANDLER_TYPE_IMPLICIT_NOTIFY
Definition: event.c:260
@ GPE_HANDLER_TYPE_NONE
Definition: event.c:256
@ GPE_HANDLER_TYPE_AML_HANDLER
Definition: event.c:257
const uacpi_char * uacpi_gpe_triggering_to_string(uacpi_gpe_triggering triggering)
Definition: event.c:1237
static uacpi_status gpe_enable_disable_for_wake(uacpi_namespace_node *gpe_device, uacpi_u16 idx, uacpi_bool enabled)
Definition: event.c:1855
uacpi_status uacpi_install_gpe_handler_raw(uacpi_namespace_node *gpe_device, uacpi_u16 idx, uacpi_gpe_triggering triggering, uacpi_gpe_handler handler, uacpi_handle ctx)
Definition: event.c:1488
static uacpi_handle g_gpe_state_slock
Definition: event.c:19
static uacpi_status gpe_add_user(struct gp_event *event, enum event_clear_if_first clear_if_first)
Definition: event.c:1212
static struct uacpi_recursive_lock g_event_lock
Definition: event.c:20
uacpi_status uacpi_initialize_events_early(void)
Definition: event.c:2125
static uacpi_status set_gpe_state(struct gp_event *event, enum gpe_state state)
Definition: event.c:335
uacpi_status uacpi_disable_gpe_for_wake(uacpi_namespace_node *gpe_device, uacpi_u16 idx)
Definition: event.c:1899
event_clear_if_first
Definition: event.c:1207
@ EVENT_CLEAR_IF_FIRST_YES
Definition: event.c:1208
@ EVENT_CLEAR_IF_FIRST_NO
Definition: event.c:1209
uacpi_status uacpi_suspend_gpe(uacpi_namespace_node *gpe_device, uacpi_u16 idx)
Definition: event.c:1660
#define PM1_STATUS_BITS
Definition: event.c:2408
uacpi_status uacpi_enable_fixed_event(uacpi_fixed_event event)
Definition: event.c:109
static const struct fixed_event fixed_events[UACPI_FIXED_EVENT_MAX+1]
Definition: event.c:35
static uacpi_status sanitize_device_and_find_gpe(uacpi_namespace_node **gpe_device, uacpi_u16 idx, struct gp_event **out_event)
Definition: event.c:1386
uacpi_status uacpi_initialize_events(void)
Definition: event.c:2147
static void async_run_gpe_handler(uacpi_handle opaque)
Definition: event.c:413
uacpi_status uacpi_resume_gpe(uacpi_namespace_node *gpe_device, uacpi_u16 idx)
Definition: event.c:1667
uacpi_status uacpi_enable_all_wake_gpes(void)
Definition: event.c:1956
static uacpi_interrupt_ret handle_global_lock(uacpi_handle ctx)
Definition: event.c:2088
static uacpi_status find_or_create_gpe_interrupt_ctx(uacpi_u32 irq, struct gpe_interrupt_ctx **out_ctx)
Definition: event.c:634
uacpi_status uacpi_enable_gpe_for_wake(uacpi_namespace_node *gpe_device, uacpi_u16 idx)
Definition: event.c:1892
uacpi_status uacpi_disable_all_gpes(void)
Definition: event.c:1940
static struct fixed_event_handler fixed_event_handlers[UACPI_FIXED_EVENT_MAX+1]
Definition: event.c:68
static uacpi_bool gpe_mask_safe(struct gp_event *event)
Definition: event.c:1312
static uacpi_interrupt_ret handle_sci(uacpi_handle ctx)
Definition: event.c:2115
static struct gpe_interrupt_ctx * g_gpe_interrupt_head
Definition: event.c:322
uacpi_status uacpi_install_gpe_block(uacpi_namespace_node *gpe_device, uacpi_u64 address, uacpi_address_space address_space, uacpi_u16 num_registers, uacpi_u32 irq)
Definition: event.c:2018
static uacpi_status set_event(uacpi_u8 event, uacpi_u8 value)
Definition: event.c:84
static uacpi_status gpe_mask_unmask(struct gp_event *event, uacpi_bool should_mask)
Definition: event.c:1256
static void gpe_block_mask_safe(struct gpe_block *block)
Definition: event.c:738
uacpi_status uacpi_uninstall_gpe_handler(uacpi_namespace_node *gpe_device, uacpi_u16 idx, uacpi_gpe_handler handler)
Definition: event.c:1500
static uacpi_status maybe_dispatch_gpe(uacpi_namespace_node *gpe_device, struct gp_event *event)
Definition: event.c:605
static uacpi_status create_gpe_block(uacpi_namespace_node *device_node, uacpi_u32 irq, uacpi_u16 base_idx, uacpi_u64 address, uacpi_u8 address_space_id, uacpi_u16 num_registers)
Definition: event.c:1006
static uacpi_status restore_gpe(struct gp_event *event)
Definition: event.c:385
uacpi_status uacpi_enable_all_runtime_gpes(void)
Definition: event.c:1948
static uacpi_iteration_decision do_match_gpe_methods(uacpi_handle opaque, uacpi_namespace_node *node, uacpi_u32 depth)
Definition: event.c:889
static uacpi_bool gpe_needs_polling(struct gp_event *event)
Definition: event.c:1251
static void uninstall_gpe_block(struct gpe_block *block)
Definition: event.c:776
static uacpi_iteration_decision do_initialize_gpe_block(struct gpe_block *block, uacpi_handle opaque)
Definition: event.c:1322
#define EVENTS_PER_GPE_REGISTER
Definition: event.c:246
uacpi_iteration_decision(* gpe_block_iteration_callback)(struct gpe_block *, uacpi_handle)
Definition: event.c:1108
uacpi_status uacpi_disable_fixed_event(uacpi_fixed_event event)
Definition: event.c:140
static uacpi_iteration_decision do_find_gpe(struct gpe_block *block, uacpi_handle opaque)
Definition: event.c:1140
uacpi_status uacpi_mask_gpe(uacpi_namespace_node *gpe_device, uacpi_u16 idx)
Definition: event.c:1728
uacpi_status uacpi_gpe_info(uacpi_namespace_node *gpe_device, uacpi_u16 idx, uacpi_event_info *out_info)
Definition: event.c:2356
uacpi_status uacpi_uninstall_fixed_event_handler(uacpi_fixed_event event)
Definition: event.c:2279
static void gpe_release_implicit_notify_handlers(struct gp_event *event)
Definition: event.c:676
uacpi_status uacpi_uninstall_gpe_block(uacpi_namespace_node *gpe_device)
Definition: event.c:2052
uacpi_status uacpi_finalize_gpe_initialization(void)
Definition: event.c:1361
static uacpi_interrupt_ret handle_gpes(uacpi_handle opaque)
Definition: event.c:624
static uacpi_u8 gpe_get_mask(struct gp_event *event)
Definition: event.c:324
uacpi_status uacpi_fixed_event_info(uacpi_fixed_event event, uacpi_event_info *out_info)
Definition: event.c:2313
static uacpi_status gpe_suspend_resume(uacpi_namespace_node *gpe_device, uacpi_u16 idx, enum gpe_state state)
Definition: event.c:1636
static uacpi_interrupt_ret dispatch_fixed_event(const struct fixed_event *ev, uacpi_fixed_event event)
Definition: event.c:175
uacpi_status uacpi_install_fixed_event_handler(uacpi_fixed_event event, uacpi_interrupt_handler handler, uacpi_handle user)
Definition: event.c:2239
static uacpi_interrupt_ret dispatch_gpe(uacpi_namespace_node *device_node, struct gp_event *event)
Definition: event.c:489
static uacpi_bool g_gpes_finalized
Definition: event.c:21
static void for_each_gpe_block(gpe_block_iteration_callback cb, uacpi_handle handle)
Definition: event.c:1110
uacpi_status uacpi_finish_handling_gpe(uacpi_namespace_node *gpe_device, uacpi_u16 idx)
Definition: event.c:1674
void uacpi_deinitialize_events(void)
Definition: event.c:2198
static uacpi_status do_install_gpe_handler(uacpi_namespace_node *gpe_device, uacpi_u16 idx, uacpi_gpe_triggering triggering, enum gpe_handler_type type, uacpi_gpe_handler handler, uacpi_handle ctx)
Definition: event.c:1404
#define UACPI_EVENT_DISABLED
Definition: event.c:14
static uacpi_interrupt_ret detect_gpes(struct gpe_block *block)
Definition: event.c:563
uacpi_status uacpi_setup_gpe_for_wake(uacpi_namespace_node *gpe_device, uacpi_u16 idx, uacpi_namespace_node *wake_device)
Definition: event.c:1742
static uacpi_iteration_decision do_for_all_gpes(struct gpe_block *block, uacpi_handle opaque)
Definition: event.c:1911
gpe_state
Definition: event.c:329
@ GPE_STATE_ENABLED
Definition: event.c:330
@ GPE_STATE_ENABLED_CONDITIONALLY
Definition: event.c:331
@ GPE_STATE_DISABLED
Definition: event.c:332
uacpi_status uacpi_enable_gpe(uacpi_namespace_node *gpe_device, uacpi_u16 idx)
Definition: event.c:1556
uacpi_status uacpi_clear_fixed_event(uacpi_fixed_event event)
Definition: event.c:161
static uacpi_interrupt_ret handle_fixed_events(void)
Definition: event.c:198
#define UACPI_EVENT_ENABLED
Definition: event.c:15
static uacpi_status gpe_block_apply_action(struct gpe_block *block, enum gpe_block_action action)
Definition: event.c:698
static void gp_event_toggle_masks(struct gp_event *event, uacpi_bool set_on)
Definition: event.c:1170
gpe_block_action
Definition: event.c:691
@ GPE_BLOCK_ACTION_ENABLE_ALL_FOR_WAKE
Definition: event.c:694
@ GPE_BLOCK_ACTION_ENABLE_ALL_FOR_RUNTIME
Definition: event.c:693
@ GPE_BLOCK_ACTION_DISABLE_ALL
Definition: event.c:692
@ GPE_BLOCK_ACTION_CLEAR_ALL
Definition: event.c:695
uacpi_status uacpi_disable_gpe(uacpi_namespace_node *gpe_device, uacpi_u16 idx)
Definition: event.c:1590
static uacpi_status initialize_fixed_events(void)
Definition: event.c:71
static uacpi_status for_all_gpes_locked(struct do_for_all_gpes_ctx *ctx)
Definition: event.c:1924
uacpi_status uacpi_clear_gpe(uacpi_namespace_node *gpe_device, uacpi_u16 idx)
Definition: event.c:1613
static uacpi_status initialize_gpes(void)
Definition: event.c:1964
static void async_restore_gpe(uacpi_handle opaque)
Definition: event.c:401
uacpi_status uacpi_install_gpe_handler(uacpi_namespace_node *gpe_device, uacpi_u16 idx, uacpi_gpe_triggering triggering, uacpi_gpe_handler handler, uacpi_handle ctx)
Definition: event.c:1476
static uacpi_status gpe_remove_user(struct gp_event *event)
Definition: event.c:1187
uacpi_status uacpi_unmask_gpe(uacpi_namespace_node *gpe_device, uacpi_u16 idx)
Definition: event.c:1735
static uacpi_status clear_gpe(struct gp_event *event)
Definition: event.c:378
static uacpi_status gpe_get_mask_unmask(uacpi_namespace_node *gpe_device, uacpi_u16 idx, uacpi_bool should_mask)
Definition: event.c:1704
void uacpi_events_match_post_dynamic_table_load(void)
Definition: event.c:968
static struct gp_event * gpe_from_block(struct gpe_block *block, uacpi_u16 idx)
Definition: event.c:869
uacpi_status uacpi_clear_all_events(void)
Definition: event.c:2419
return ret
Definition: mutex.c:146
action
Definition: namespace.c:707
unsigned char irq
Definition: dsp.h:13
GLint GLint GLsizei GLsizei GLsizei depth
Definition: gl.h:1546
GLuint GLuint GLsizei GLenum type
Definition: gl.h:1545
struct _cl_event * event
Definition: glext.h:7739
GLenum GLenum GLsizei const GLuint GLboolean enabled
Definition: glext.h:7750
GLuint address
Definition: glext.h:9393
GLintptr offset
Definition: glext.h:5920
GLenum GLint GLuint mask
Definition: glext.h:6028
GLbitfield flags
Definition: glext.h:7161
GLboolean enable
Definition: glext.h:11120
GLsizei GLenum const GLvoid GLsizei GLenum GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLint GLint GLint GLshort GLshort GLshort GLubyte GLubyte GLubyte GLuint GLuint GLuint GLushort GLushort GLushort GLbyte GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLfloat GLint GLint GLint GLint GLshort GLshort GLshort GLshort GLubyte GLubyte GLubyte GLubyte GLuint GLuint GLuint GLuint GLushort GLushort GLushort GLushort GLboolean const GLdouble const GLfloat const GLint const GLshort const GLbyte const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLdouble const GLfloat const GLfloat const GLint const GLint const GLshort const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort GLenum GLenum GLenum GLfloat GLenum GLint GLenum GLenum GLenum GLfloat GLenum GLenum GLint GLenum GLfloat GLenum GLint GLint GLushort GLenum GLenum GLfloat GLenum GLenum GLint GLfloat const GLubyte GLenum GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLint GLint GLsizei GLsizei GLint GLenum GLenum const GLvoid GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLenum const GLdouble GLenum GLenum const GLfloat GLenum GLenum const GLint GLsizei GLuint GLfloat GLuint GLbitfield GLfloat GLint GLuint GLboolean GLenum GLfloat GLenum GLbitfield GLenum GLfloat GLfloat GLint GLint const GLfloat GLenum GLfloat GLfloat GLint GLint GLfloat GLfloat GLint GLint const GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat const GLdouble const GLfloat const GLdouble const GLfloat GLint i
Definition: glfuncs.h:248
GLsizei GLenum const GLvoid GLsizei GLenum GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLint GLint GLint GLshort GLshort GLshort GLubyte GLubyte GLubyte GLuint GLuint GLuint GLushort GLushort GLushort GLbyte GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLfloat GLint GLint GLint GLint GLshort GLshort GLshort GLshort GLubyte GLubyte GLubyte GLubyte GLuint GLuint GLuint GLuint GLushort GLushort GLushort GLushort GLboolean const GLdouble const GLfloat const GLint const GLshort const GLbyte const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLdouble const GLfloat const GLfloat const GLint const GLint const GLshort const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort GLenum GLenum GLenum GLfloat GLenum GLint GLenum GLenum GLenum GLfloat GLenum GLenum GLint GLenum GLfloat GLenum GLint GLint GLushort GLenum GLenum GLfloat GLenum GLenum GLint GLfloat const GLubyte GLenum GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLint GLint GLsizei GLsizei GLint GLenum GLenum const GLvoid GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLenum const GLdouble GLenum GLenum const GLfloat GLenum GLenum const GLint GLsizei GLuint GLfloat GLuint GLbitfield GLfloat GLint GLuint GLboolean GLenum GLfloat GLenum GLbitfield GLenum GLfloat GLfloat GLint GLint const GLfloat GLenum GLfloat GLfloat GLint GLint GLfloat GLfloat GLint GLint const GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat const GLdouble const GLfloat const GLdouble const GLfloat GLint GLint GLint j
Definition: glfuncs.h:250
static int reg
Definition: i386-dis.c:1290
uacpi_status uacpi_notify_all(uacpi_namespace_node *node, uacpi_u64 value)
Definition: notify.c:69
uacpi_status uacpi_string_to_integer(const uacpi_char *str, uacpi_size max_chars, enum uacpi_base base, uacpi_u64 *out_value)
Definition: utilities.c:233
@ UACPI_BASE_HEX
Definition: utilities.h:34
uacpi_status uacpi_execute_control_method(uacpi_namespace_node *scope, uacpi_control_method *method, const uacpi_object_array *args, uacpi_object **ret)
Definition: interpreter.c:5934
uint32_t entry
Definition: isohybrid.c:63
void * uacpi_kernel_alloc(uacpi_size size)
Definition: uacpiosl.c:111
uacpi_status uacpi_kernel_wait_for_work_completion(void)
Definition: uacpiosl.c:235
uacpi_status uacpi_kernel_schedule_work(uacpi_work_type, uacpi_work_handler, uacpi_handle ctx)
Definition: uacpiosl.c:228
uacpi_status uacpi_kernel_uninstall_interrupt_handler(uacpi_interrupt_handler, uacpi_handle irq_handle)
Definition: uacpiosl.c:221
uacpi_status uacpi_kernel_install_interrupt_handler(uacpi_u32 irq, uacpi_interrupt_handler, uacpi_handle ctx, uacpi_handle *out_irq_handle)
Definition: uacpiosl.c:212
uacpi_handle uacpi_kernel_create_event(void)
Definition: uacpiosl.c:53
@ UACPI_WORK_GPE_EXECUTION
Definition: kernel_api.h:343
@ UACPI_WORK_NOTIFICATION
Definition: kernel_api.h:349
uacpi_handle uacpi_kernel_create_spinlock(void)
Definition: uacpiosl.c:85
void uacpi_kernel_signal_event(uacpi_handle)
Definition: uacpiosl.c:73
void uacpi_kernel_free_spinlock(uacpi_handle)
Definition: uacpiosl.c:92
uacpi_cpu_flags uacpi_kernel_lock_spinlock(uacpi_handle)
Definition: uacpiosl.c:98
void uacpi_kernel_unlock_spinlock(uacpi_handle, uacpi_cpu_flags)
Definition: uacpiosl.c:105
static HMODULE MODULEINFO DWORD cb
Definition: module.c:33
enum gpe_block_action action
Definition: event.c:1907
uacpi_status ret
Definition: event.c:1908
uacpi_handle ctx
Definition: event.c:32
uacpi_interrupt_handler handler
Definition: event.c:31
uacpi_u16 status_mask
Definition: event.c:27
uacpi_u8 enable_field
Definition: event.c:24
uacpi_u8 status_field
Definition: event.c:25
uacpi_u16 enable_mask
Definition: event.c:26
uacpi_u8 num_users
Definition: event.c:275
uacpi_u8 handler_type
Definition: event.c:277
uacpi_u8 block_interrupts
Definition: event.c:280
struct gpe_implicit_notify_handler * implicit_handler
Definition: event.c:266
uacpi_u8 triggering
Definition: event.c:278
struct gpe_register * reg
Definition: event.c:271
uacpi_u16 idx
Definition: event.c:272
uacpi_namespace_node * aml_handler
Definition: event.c:267
uacpi_u8 wake
Definition: event.c:279
struct gpe_native_handler * native_handler
Definition: event.c:265
uacpi_handle * any_handler
Definition: event.c:268
uacpi_u16 base_idx
Definition: event.c:312
uacpi_namespace_node * device_node
Definition: event.c:304
uacpi_u16 num_events
Definition: event.c:311
struct gpe_block * next
Definition: event.c:296
struct gp_event * events
Definition: event.c:307
struct gpe_register * registers
Definition: event.c:306
uacpi_u16 num_registers
Definition: event.c:310
struct gpe_interrupt_ctx * irq_ctx
Definition: event.c:308
struct gpe_block * prev
Definition: event.c:296
uacpi_namespace_node * device
Definition: event.c:243
struct gpe_implicit_notify_handler * next
Definition: event.c:242
uacpi_handle irq_handle
Definition: event.c:319
struct gpe_interrupt_ctx * prev
Definition: event.c:316
uacpi_u32 irq
Definition: event.c:320
struct gpe_interrupt_ctx * next
Definition: event.c:316
struct gpe_block * gpe_head
Definition: event.c:318
uacpi_u32 matched_count
Definition: event.c:885
struct gpe_block * block
Definition: event.c:884
uacpi_bool post_dynamic_table_load
Definition: event.c:886
uacpi_gpe_handler cb
Definition: event.c:228
uacpi_u8 previously_enabled
Definition: event.c:238
uacpi_u8 previous_triggering
Definition: event.c:236
uacpi_handle ctx
Definition: event.c:229
uacpi_handle previous_handler
Definition: event.c:235
uacpi_u8 previous_handler_type
Definition: event.c:237
uacpi_u16 base_idx
Definition: event.c:292
uacpi_u8 masked_mask
Definition: event.c:289
uacpi_mapped_gas enable
Definition: event.c:285
uacpi_u8 runtime_mask
Definition: event.c:287
uacpi_u8 current_mask
Definition: event.c:290
uacpi_mapped_gas status
Definition: event.c:284
uacpi_u8 wake_mask
Definition: event.c:288
uacpi_u16 idx
Definition: event.c:1135
uacpi_namespace_node * gpe_device
Definition: event.c:1134
struct gp_event * out_event
Definition: event.c:1137
struct gpe_block * out_block
Definition: event.c:1136
Definition: ps.c:97
uacpi_object_name name
Definition: namespace.h:31
uacpi_control_method * method
Definition: types.h:257
Definition: dlist.c:348
uacpi_char text[4]
Definition: types.h:24
Definition: pdh_main.c:96
wchar_t tm const _CrtWcstime_Writes_and_advances_ptr_ count wchar_t ** out
Definition: wcsftime.cpp:383
static unsigned int block
Definition: xmlmemory.c:101