Merge branch 'acpica' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6

* 'acpica' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6: (27 commits)
  ACPI / ACPICA: Simplify acpi_ev_initialize_gpe_block()
  ACPI / ACPICA: Fail acpi_gpe_wakeup() if ACPI_GPE_CAN_WAKE is unset
  ACPI / ACPICA: Do not execute _PRW methods during initialization
  ACPI: Fix bogus GPE test in acpi_bus_set_run_wake_flags()
  ACPICA: Update version to 20100702
  ACPICA: Fix for Alias references within Package objects
  ACPICA: Fix lint warning for 64-bit constant
  ACPICA: Remove obsolete GPE function
  ACPICA: Update debug output components
  ACPICA: Add support for WDDT - Watchdog Descriptor Table
  ACPICA: Drop acpi_set_gpe
  ACPICA: Use low-level GPE enable during GPE block initialization
  ACPI / EC: Do not use acpi_set_gpe
  ACPI / EC: Drop suspend and resume routines
  ACPICA: Remove wakeup GPE reference counting which is not used
  ACPICA: Introduce acpi_gpe_wakeup()
  ACPICA: Rename acpi_hw_gpe_register_bit
  ACPICA: Update version to 20100528
  ACPICA: Add signatures for undefined tables: ATKG, GSCI, IEIT
  ACPICA: Optimization: Reduce the number of namespace walks
  ...
This commit is contained in:
Linus Torvalds 2010-08-07 17:08:30 -07:00
commit 9e50ab91d0
53 changed files with 471 additions and 896 deletions

View file

@ -78,7 +78,9 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node *node,
u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list);
acpi_status
acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info);
acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info);
acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info);
struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
u32 gpe_number);

View file

@ -99,13 +99,6 @@ u8 ACPI_INIT_GLOBAL(acpi_gbl_all_methods_serialized, FALSE);
*/
u8 ACPI_INIT_GLOBAL(acpi_gbl_create_osi_method, TRUE);
/*
* Disable wakeup GPEs during runtime? Default is TRUE because WAKE and
* RUNTIME GPEs should never be shared, and WAKE GPEs should typically only
* be enabled just before going to sleep.
*/
u8 ACPI_INIT_GLOBAL(acpi_gbl_leave_wake_gpes_disabled, TRUE);
/*
* Optionally use default values for the ACPI register widths. Set this to
* TRUE to use the defaults, if an FADT contains incorrect widths/lengths.

View file

@ -90,15 +90,12 @@ acpi_status acpi_hw_write_port(acpi_io_address address, u32 value, u32 width);
/*
* hwgpe - GPE support
*/
u32 acpi_hw_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info,
u32 acpi_hw_get_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info,
struct acpi_gpe_register_info *gpe_register_info);
acpi_status
acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action);
acpi_status
acpi_hw_write_gpe_enable_reg(struct acpi_gpe_event_info *gpe_event_info);
acpi_status
acpi_hw_disable_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
struct acpi_gpe_block_info *gpe_block, void *context);

View file

@ -184,8 +184,9 @@ struct acpi_namespace_node {
u8 flags; /* Miscellaneous flags */
acpi_owner_id owner_id; /* Node creator */
union acpi_name_union name; /* ACPI Name, always 4 chars per ACPI spec */
struct acpi_namespace_node *parent; /* Parent node */
struct acpi_namespace_node *child; /* First child */
struct acpi_namespace_node *peer; /* Peer. Parent if ANOBJ_END_OF_PEER_LIST set */
struct acpi_namespace_node *peer; /* First peer */
/*
* The following fields are used by the ASL compiler and disassembler only
@ -199,7 +200,7 @@ struct acpi_namespace_node {
/* Namespace Node flags */
#define ANOBJ_END_OF_PEER_LIST 0x01 /* End-of-list, Peer field points to parent */
#define ANOBJ_RESERVED 0x01 /* Available for use */
#define ANOBJ_TEMPORARY 0x02 /* Node is create by a method and is temporary */
#define ANOBJ_METHOD_ARG 0x04 /* Node is a method argument */
#define ANOBJ_METHOD_LOCAL 0x08 /* Node is a method local */
@ -428,7 +429,6 @@ struct acpi_gpe_event_info {
u8 flags; /* Misc info about this GPE */
u8 gpe_number; /* This GPE */
u8 runtime_count; /* References to a run GPE */
u8 wakeup_count; /* References to a wake GPE */
};
/* Information about a GPE register pair, one per each status/enable pair in an array */

View file

@ -369,11 +369,4 @@ struct acpi_namespace_node *acpi_ns_validate_handle(acpi_handle handle);
void acpi_ns_terminate(void);
struct acpi_namespace_node *acpi_ns_get_parent_node(struct acpi_namespace_node
*node);
struct acpi_namespace_node *acpi_ns_get_next_valid_node(struct
acpi_namespace_node
*node);
#endif /* __ACNAMESP_H__ */

View file

@ -91,14 +91,14 @@
/* Values for Flag byte above */
#define AOPOBJ_AML_CONSTANT 0x01
#define AOPOBJ_STATIC_POINTER 0x02
#define AOPOBJ_DATA_VALID 0x04
#define AOPOBJ_OBJECT_INITIALIZED 0x08
#define AOPOBJ_SETUP_COMPLETE 0x10
#define AOPOBJ_SINGLE_DATUM 0x20
#define AOPOBJ_INVALID 0x40 /* Used if host OS won't allow an op_region address */
#define AOPOBJ_MODULE_LEVEL 0x80
#define AOPOBJ_AML_CONSTANT 0x01 /* Integer is an AML constant */
#define AOPOBJ_STATIC_POINTER 0x02 /* Data is part of an ACPI table, don't delete */
#define AOPOBJ_DATA_VALID 0x04 /* Object is intialized and data is valid */
#define AOPOBJ_OBJECT_INITIALIZED 0x08 /* Region is initialized, _REG was run */
#define AOPOBJ_SETUP_COMPLETE 0x10 /* Region setup is complete */
#define AOPOBJ_INVALID 0x20 /* Host OS won't allow a Region address */
#define AOPOBJ_MODULE_LEVEL 0x40 /* Method is actually module-level code */
#define AOPOBJ_MODIFIED_NAMESPACE 0x80 /* Method modified the namespace */
/******************************************************************************
*

View file

@ -503,15 +503,16 @@ static const union acpi_predefined_info predefined_names[] =
{{"_WAK", 1, ACPI_RTYPE_NONE | ACPI_RTYPE_INTEGER | ACPI_RTYPE_PACKAGE}},
{{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 2,0}, 0,0}}, /* Fixed-length (2 Int), but is optional */
{{{0,0,0,0}, 0,0}} /* Table terminator */
/* _WDG/_WED are MS extensions defined by "Windows Instrumentation" */
{{"_WDG", 0, ACPI_RTYPE_BUFFER}},
{{"_WED", 1,
ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING | ACPI_RTYPE_BUFFER}},
{{{0, 0, 0, 0}, 0, 0}} /* Table terminator */
};
#if 0
/* Not implemented */
{{"_WDG", 0, ACPI_RTYPE_BUFFER}}, /* MS Extension */
{{"_WED", 1, ACPI_RTYPE_PACKAGE}}, /* MS Extension */
/* This is an internally implemented control method, no need to check */
{{"_OSI", 1, ACPI_RTYPE_INTEGER}},

View file

@ -127,22 +127,22 @@ struct acpi_walk_state {
acpi_parse_upwards ascending_callback;
};
/* Info used by acpi_ps_init_objects */
/* Info used by acpi_ns_initialize_objects and acpi_ds_initialize_objects */
struct acpi_init_walk_info {
u16 method_count;
u16 device_count;
u16 op_region_count;
u16 field_count;
u16 buffer_count;
u16 package_count;
u16 op_region_init;
u16 field_init;
u16 buffer_init;
u16 package_init;
u16 object_count;
acpi_owner_id owner_id;
u32 table_index;
u32 object_count;
u32 method_count;
u32 device_count;
u32 op_region_count;
u32 field_count;
u32 buffer_count;
u32 package_count;
u32 op_region_init;
u32 field_init;
u32 buffer_init;
u32 package_init;
acpi_owner_id owner_id;
};
struct acpi_get_devices_info {
@ -201,11 +201,11 @@ struct acpi_evaluate_info {
/* Info used by acpi_ns_initialize_devices */
struct acpi_device_walk_info {
u16 device_count;
u16 num_STA;
u16 num_INI;
struct acpi_table_desc *table_desc;
struct acpi_evaluate_info *evaluate_info;
u32 device_count;
u32 num_STA;
u32 num_INI;
};
/* TBD: [Restructure] Merge with struct above */

View file

@ -171,12 +171,12 @@ acpi_ds_initialize_objects(u32 table_index,
"**** Starting initialization of namespace objects ****\n"));
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "Parsing all Control Methods:"));
info.method_count = 0;
info.op_region_count = 0;
info.object_count = 0;
info.device_count = 0;
info.table_index = table_index;
/* Set all init info to zero */
ACPI_MEMSET(&info, 0, sizeof(struct acpi_init_walk_info));
info.owner_id = owner_id;
info.table_index = table_index;
/* Walk entire namespace from the supplied root */
@ -204,13 +204,13 @@ acpi_ds_initialize_objects(u32 table_index,
}
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
"\nTable [%4.4s](id %4.4X) - %hd Objects with %hd Devices %hd Methods %hd Regions\n",
"\nTable [%4.4s](id %4.4X) - %u Objects with %u Devices %u Methods %u Regions\n",
table->signature, owner_id, info.object_count,
info.device_count, info.method_count,
info.op_region_count));
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"%hd Methods, %hd Regions\n", info.method_count,
"%u Methods, %u Regions\n", info.method_count,
info.op_region_count));
return_ACPI_STATUS(AE_OK);

View file

@ -584,8 +584,22 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
* want make the objects permanent.
*/
if (!(method_desc->method.flags & AOPOBJ_MODULE_LEVEL)) {
acpi_ns_delete_namespace_by_owner(method_desc->method.
owner_id);
/* Delete any direct children of (created by) this method */
acpi_ns_delete_namespace_subtree(walk_state->
method_node);
/*
* Delete any objects that were created by this method
* elsewhere in the namespace (if any were created).
*/
if (method_desc->method.
flags & AOPOBJ_MODIFIED_NAMESPACE) {
acpi_ns_delete_namespace_by_owner(method_desc->
method.
owner_id);
}
}
}
@ -605,7 +619,7 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
* we immediately reuse it for the next thread executing this method
*/
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"*** Completed execution of one thread, %d threads remaining\n",
"*** Completed execution of one thread, %u threads remaining\n",
method_desc->method.thread_count));
} else {
/* This is the only executing thread for this method */

View file

@ -102,8 +102,7 @@ void acpi_ds_method_data_init(struct acpi_walk_state *walk_state)
walk_state->arguments[i].name.integer |= (i << 24);
walk_state->arguments[i].descriptor_type = ACPI_DESC_TYPE_NAMED;
walk_state->arguments[i].type = ACPI_TYPE_ANY;
walk_state->arguments[i].flags =
ANOBJ_END_OF_PEER_LIST | ANOBJ_METHOD_ARG;
walk_state->arguments[i].flags = ANOBJ_METHOD_ARG;
}
/* Init the method locals */
@ -116,8 +115,7 @@ void acpi_ds_method_data_init(struct acpi_walk_state *walk_state)
walk_state->local_variables[i].descriptor_type =
ACPI_DESC_TYPE_NAMED;
walk_state->local_variables[i].type = ACPI_TYPE_ANY;
walk_state->local_variables[i].flags =
ANOBJ_END_OF_PEER_LIST | ANOBJ_METHOD_LOCAL;
walk_state->local_variables[i].flags = ANOBJ_METHOD_LOCAL;
}
return_VOID;
@ -146,7 +144,7 @@ void acpi_ds_method_data_delete_all(struct acpi_walk_state *walk_state)
for (index = 0; index < ACPI_METHOD_NUM_LOCALS; index++) {
if (walk_state->local_variables[index].object) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Deleting Local%d=%p\n",
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Deleting Local%u=%p\n",
index,
walk_state->local_variables[index].
object));
@ -162,7 +160,7 @@ void acpi_ds_method_data_delete_all(struct acpi_walk_state *walk_state)
for (index = 0; index < ACPI_METHOD_NUM_ARGS; index++) {
if (walk_state->arguments[index].object) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Deleting Arg%d=%p\n",
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Deleting Arg%u=%p\n",
index,
walk_state->arguments[index].object));
@ -226,7 +224,7 @@ acpi_ds_method_data_init_args(union acpi_operand_object **params,
index++;
}
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%d args passed to method\n", index));
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%u args passed to method\n", index));
return_ACPI_STATUS(AE_OK);
}
@ -323,7 +321,7 @@ acpi_ds_method_data_set_value(u8 type,
ACPI_FUNCTION_TRACE(ds_method_data_set_value);
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"NewObj %p Type %2.2X, Refs=%d [%s]\n", object,
"NewObj %p Type %2.2X, Refs=%u [%s]\n", object,
type, object->common.reference_count,
acpi_ut_get_type_name(object->common.type)));
@ -543,7 +541,7 @@ acpi_ds_store_object_to_local(u8 type,
union acpi_operand_object *new_obj_desc;
ACPI_FUNCTION_TRACE(ds_store_object_to_local);
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Type=%2.2X Index=%d Obj=%p\n",
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Type=%2.2X Index=%u Obj=%p\n",
type, index, obj_desc));
/* Parameter validation */

View file

@ -81,6 +81,7 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
{
union acpi_operand_object *obj_desc;
acpi_status status;
acpi_object_type type;
ACPI_FUNCTION_TRACE(ds_build_internal_object);
@ -172,7 +173,20 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
return_ACPI_STATUS(status);
}
switch (op->common.node->type) {
/*
* Special handling for Alias objects. We need to setup the type
* and the Op->Common.Node to point to the Alias target. Note,
* Alias has at most one level of indirection internally.
*/
type = op->common.node->type;
if (type == ACPI_TYPE_LOCAL_ALIAS) {
type = obj_desc->common.type;
op->common.node =
ACPI_CAST_PTR(struct acpi_namespace_node,
op->common.node->object);
}
switch (type) {
/*
* For these types, we need the actual node, not the subobject.
* However, the subobject did not get an extra reference count above.

View file

@ -213,7 +213,7 @@ acpi_ds_get_buffer_field_arguments(union acpi_operand_object *obj_desc)
/* Execute the AML code for the term_arg arguments */
status = acpi_ds_execute_arguments(node, acpi_ns_get_parent_node(node),
status = acpi_ds_execute_arguments(node, node->parent,
extra_desc->extra.aml_length,
extra_desc->extra.aml_start);
return_ACPI_STATUS(status);
@ -257,7 +257,7 @@ acpi_ds_get_bank_field_arguments(union acpi_operand_object *obj_desc)
/* Execute the AML code for the term_arg arguments */
status = acpi_ds_execute_arguments(node, acpi_ns_get_parent_node(node),
status = acpi_ds_execute_arguments(node, node->parent,
extra_desc->extra.aml_length,
extra_desc->extra.aml_start);
return_ACPI_STATUS(status);
@ -394,7 +394,7 @@ acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc)
/* Execute the argument AML */
status = acpi_ds_execute_arguments(node, acpi_ns_get_parent_node(node),
status = acpi_ds_execute_arguments(node, node->parent,
extra_desc->extra.aml_length,
extra_desc->extra.aml_start);
if (ACPI_FAILURE(status)) {

View file

@ -746,7 +746,7 @@ acpi_ds_create_operands(struct acpi_walk_state *walk_state,
index--;
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"Arg #%d (%p) done, Arg1=%p\n", index, arg,
"Arg #%u (%p) done, Arg1=%p\n", index, arg,
first_arg));
}
@ -760,7 +760,7 @@ acpi_ds_create_operands(struct acpi_walk_state *walk_state,
*/
acpi_ds_obj_stack_pop_and_delete(arg_count, walk_state);
ACPI_EXCEPTION((AE_INFO, status, "While creating Arg %d", index));
ACPI_EXCEPTION((AE_INFO, status, "While creating Arg %u", index));
return_ACPI_STATUS(status);
}

View file

@ -102,9 +102,8 @@ acpi_status acpi_ev_initialize_events(void)
* RETURN: Status
*
* DESCRIPTION: Completes initialization of the FADT-defined GPE blocks
* (0 and 1). This causes the _PRW methods to be run, so the HW
* must be fully initialized at this point, including global lock
* support.
* (0 and 1). The HW must be fully initialized at this point,
* including global lock support.
*
******************************************************************************/

View file

@ -54,51 +54,86 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context);
/*******************************************************************************
*
* FUNCTION: acpi_ev_update_gpe_enable_masks
* FUNCTION: acpi_ev_update_gpe_enable_mask
*
* PARAMETERS: gpe_event_info - GPE to update
*
* RETURN: Status
*
* DESCRIPTION: Updates GPE register enable masks based upon whether there are
* references (either wake or run) to this GPE
* DESCRIPTION: Updates GPE register enable mask based upon whether there are
* runtime references to this GPE
*
******************************************************************************/
acpi_status
acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info)
acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info)
{
struct acpi_gpe_register_info *gpe_register_info;
u32 register_bit;
ACPI_FUNCTION_TRACE(ev_update_gpe_enable_masks);
ACPI_FUNCTION_TRACE(ev_update_gpe_enable_mask);
gpe_register_info = gpe_event_info->register_info;
if (!gpe_register_info) {
return_ACPI_STATUS(AE_NOT_EXIST);
}
register_bit = acpi_hw_gpe_register_bit(gpe_event_info,
register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info,
gpe_register_info);
/* Clear the wake/run bits up front */
/* Clear the run bit up front */
ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake, register_bit);
ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit);
/* Set the mask bits only if there are references to this GPE */
/* Set the mask bit only if there are references to this GPE */
if (gpe_event_info->runtime_count) {
ACPI_SET_BIT(gpe_register_info->enable_for_run, register_bit);
}
if (gpe_event_info->wakeup_count) {
ACPI_SET_BIT(gpe_register_info->enable_for_wake, register_bit);
ACPI_SET_BIT(gpe_register_info->enable_for_run, (u8)register_bit);
}
return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_enable_gpe
*
* PARAMETERS: gpe_event_info - GPE to enable
*
* RETURN: Status
*
* DESCRIPTION: Clear the given GPE from stale events and enable it.
*
******************************************************************************/
acpi_status
acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
{
acpi_status status;
ACPI_FUNCTION_TRACE(ev_enable_gpe);
/*
* We will only allow a GPE to be enabled if it has either an
* associated method (_Lxx/_Exx) or a handler. Otherwise, the
* GPE will be immediately disabled by acpi_ev_gpe_dispatch the
* first time it fires.
*/
if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)) {
return_ACPI_STATUS(AE_NO_HANDLER);
}
/* Clear the GPE (of stale events) */
status = acpi_hw_clear_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Enable the requested GPE */
status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
@ -417,8 +452,12 @@ static void acpi_ev_asynch_enable_gpe(void *context)
}
}
/* Enable this GPE */
(void)acpi_hw_write_gpe_enable_reg(gpe_event_info);
/*
* Enable this GPE, conditionally. This means that the GPE will only be
* physically enabled if the enable_for_run bit is set in the event_info
*/
(void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_COND_ENABLE);
return_VOID;
}

View file

@ -439,8 +439,6 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
{
acpi_status status;
struct acpi_gpe_event_info *gpe_event_info;
struct acpi_gpe_walk_info walk_info;
u32 wake_gpe_count;
u32 gpe_enabled_count;
u32 gpe_index;
u32 gpe_number;
@ -456,37 +454,9 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
}
/*
* Runtime option: Should wake GPEs be enabled at runtime? The default
* is no, they should only be enabled just as the machine goes to sleep.
* Enable all GPEs that have a corresponding method. Any other GPEs
* within this block must be enabled via the acpi_enable_gpe interface.
*/
if (acpi_gbl_leave_wake_gpes_disabled) {
/*
* Differentiate runtime vs wake GPEs, via the _PRW control methods.
* Each GPE that has one or more _PRWs that reference it is by
* definition a wake GPE and will not be enabled while the machine
* is running.
*/
walk_info.gpe_block = gpe_block;
walk_info.gpe_device = gpe_device;
walk_info.execute_by_owner_id = FALSE;
status =
acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK,
acpi_ev_match_prw_and_gpe, NULL,
&walk_info, NULL);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"While executing _PRW methods"));
}
}
/*
* Enable all GPEs that have a corresponding method and are not
* capable of generating wakeups. Any other GPEs within this block
* must be enabled via the acpi_enable_gpe interface.
*/
wake_gpe_count = 0;
gpe_enabled_count = 0;
if (gpe_device == acpi_gbl_fadt_gpe_device) {
@ -502,35 +472,21 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
gpe_event_info = &gpe_block->event_info[gpe_index];
gpe_number = gpe_index + gpe_block->block_base_number;
/*
* If the GPE has already been enabled for runtime
* signaling, make sure it remains enabled, but do not
* increment its reference counter.
*/
if (gpe_event_info->runtime_count) {
acpi_set_gpe(gpe_device, gpe_number,
ACPI_GPE_ENABLE);
gpe_enabled_count++;
continue;
}
if (gpe_event_info->flags & ACPI_GPE_CAN_WAKE) {
wake_gpe_count++;
if (acpi_gbl_leave_wake_gpes_disabled) {
continue;
}
}
/* Ignore GPEs that have no corresponding _Lxx/_Exx method */
if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD)) {
continue;
}
/* Enable this GPE */
/*
* If the GPE has already been enabled for runtime
* signaling, make sure it remains enabled, but do not
* increment its reference counter.
*/
status = gpe_event_info->runtime_count ?
acpi_ev_enable_gpe(gpe_event_info) :
acpi_enable_gpe(gpe_device, gpe_number);
status = acpi_enable_gpe(gpe_device, gpe_number,
ACPI_GPE_TYPE_RUNTIME);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Could not enable GPE 0x%02X",
@ -542,10 +498,10 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
}
}
if (gpe_enabled_count || wake_gpe_count) {
if (gpe_enabled_count) {
ACPI_DEBUG_PRINT((ACPI_DB_INIT,
"Enabled %u Runtime GPEs, added %u Wake GPEs in this block\n",
gpe_enabled_count, wake_gpe_count));
"Enabled %u GPEs in this block\n",
gpe_enabled_count));
}
return_ACPI_STATUS(AE_OK);

View file

@ -211,9 +211,7 @@ acpi_status acpi_ev_gpe_initialize(void)
* DESCRIPTION: Check for new GPE methods (_Lxx/_Exx) made available as a
* result of a Load() or load_table() operation. If new GPE
* methods have been installed, register the new methods and
* enable and runtime GPEs that are associated with them. Also,
* run any newly loaded _PRW methods in order to discover any
* new CAN_WAKE GPEs.
* enable and runtime GPEs that are associated with them.
*
******************************************************************************/
@ -223,49 +221,12 @@ void acpi_ev_update_gpes(acpi_owner_id table_owner_id)
struct acpi_gpe_block_info *gpe_block;
struct acpi_gpe_walk_info walk_info;
acpi_status status = AE_OK;
u32 new_wake_gpe_count = 0;
/* We will examine only _PRW/_Lxx/_Exx methods owned by this table */
walk_info.owner_id = table_owner_id;
walk_info.execute_by_owner_id = TRUE;
walk_info.count = 0;
if (acpi_gbl_leave_wake_gpes_disabled) {
/*
* 1) Run any newly-loaded _PRW methods to find any GPEs that
* can now be marked as CAN_WAKE GPEs. Note: We must run the
* _PRW methods before we process the _Lxx/_Exx methods because
* we will enable all runtime GPEs associated with the new
* _Lxx/_Exx methods at the time we process those methods.
*
* Unlock interpreter so that we can run the _PRW methods.
*/
walk_info.gpe_block = NULL;
walk_info.gpe_device = NULL;
acpi_ex_exit_interpreter();
status =
acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX,
ACPI_NS_WALK_NO_UNLOCK,
acpi_ev_match_prw_and_gpe, NULL,
&walk_info, NULL);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"While executing _PRW methods"));
}
acpi_ex_enter_interpreter();
new_wake_gpe_count = walk_info.count;
}
/*
* 2) Find any _Lxx/_Exx GPE methods that have just been loaded.
*
* Any GPEs that correspond to new _Lxx/_Exx methods and are not
* marked as CAN_WAKE are immediately enabled.
* Any GPEs that correspond to new _Lxx/_Exx methods are immediately
* enabled.
*
* Examine the namespace underneath each gpe_device within the
* gpe_block lists.
@ -275,6 +236,8 @@ void acpi_ev_update_gpes(acpi_owner_id table_owner_id)
return;
}
walk_info.owner_id = table_owner_id;
walk_info.execute_by_owner_id = TRUE;
walk_info.count = 0;
walk_info.enable_this_gpe = TRUE;
@ -307,10 +270,8 @@ void acpi_ev_update_gpes(acpi_owner_id table_owner_id)
gpe_xrupt_info = gpe_xrupt_info->next;
}
if (walk_info.count || new_wake_gpe_count) {
ACPI_INFO((AE_INFO,
"Enabled %u new runtime GPEs, added %u new wakeup GPEs",
walk_info.count, new_wake_gpe_count));
if (walk_info.count) {
ACPI_INFO((AE_INFO, "Enabled %u new GPEs", walk_info.count));
}
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
@ -386,9 +347,6 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle,
/*
* 3) Edge/Level determination is based on the 2nd character
* of the method name
*
* NOTE: Default GPE type is RUNTIME only. Later, if a _PRW object is
* found that points to this GPE, the ACPI_GPE_CAN_WAKE flag is set.
*/
switch (name[1]) {
case 'L':
@ -471,24 +429,18 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle,
*/
if (walk_info->enable_this_gpe) {
/* Ignore GPEs that can wake the system */
walk_info->count++;
gpe_device = walk_info->gpe_device;
if (!(gpe_event_info->flags & ACPI_GPE_CAN_WAKE) ||
!acpi_gbl_leave_wake_gpes_disabled) {
walk_info->count++;
gpe_device = walk_info->gpe_device;
if (gpe_device == acpi_gbl_fadt_gpe_device) {
gpe_device = NULL;
}
if (gpe_device == acpi_gbl_fadt_gpe_device) {
gpe_device = NULL;
}
status = acpi_enable_gpe(gpe_device, gpe_number,
ACPI_GPE_TYPE_RUNTIME);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Could not enable GPE 0x%02X",
gpe_number));
}
status = acpi_enable_gpe(gpe_device, gpe_number);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Could not enable GPE 0x%02X",
gpe_number));
}
}
@ -497,157 +449,3 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle,
name, gpe_number));
return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_match_prw_and_gpe
*
* PARAMETERS: Callback from walk_namespace
*
* RETURN: Status. NOTE: We ignore errors so that the _PRW walk is
* not aborted on a single _PRW failure.
*
* DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
* Device. Run the _PRW method. If present, extract the GPE
* number and mark the GPE as a CAN_WAKE GPE. Allows a
* per-owner_id execution if execute_by_owner_id is TRUE in the
* walk_info parameter block.
*
* If walk_info->execute_by_owner_id is TRUE, we only execute _PRWs with that
* owner.
* If walk_info->gpe_device is NULL, we execute every _PRW found. Otherwise,
* we only execute _PRWs that refer to the input gpe_device.
*
******************************************************************************/
acpi_status
acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
u32 level, void *context, void **return_value)
{
struct acpi_gpe_walk_info *walk_info =
ACPI_CAST_PTR(struct acpi_gpe_walk_info, context);
struct acpi_namespace_node *gpe_device;
struct acpi_gpe_block_info *gpe_block;
struct acpi_namespace_node *target_gpe_device;
struct acpi_namespace_node *prw_node;
struct acpi_gpe_event_info *gpe_event_info;
union acpi_operand_object *pkg_desc;
union acpi_operand_object *obj_desc;
u32 gpe_number;
acpi_status status;
ACPI_FUNCTION_TRACE(ev_match_prw_and_gpe);
/* Check for a _PRW method under this device */
status = acpi_ns_get_node(obj_handle, METHOD_NAME__PRW,
ACPI_NS_NO_UPSEARCH, &prw_node);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(AE_OK);
}
/* Check if requested owner_id matches this owner_id */
if ((walk_info->execute_by_owner_id) &&
(prw_node->owner_id != walk_info->owner_id)) {
return_ACPI_STATUS(AE_OK);
}
/* Execute the _PRW */
status = acpi_ut_evaluate_object(prw_node, NULL,
ACPI_BTYPE_PACKAGE, &pkg_desc);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(AE_OK);
}
/* The returned _PRW package must have at least two elements */
if (pkg_desc->package.count < 2) {
goto cleanup;
}
/* Extract pointers from the input context */
gpe_device = walk_info->gpe_device;
gpe_block = walk_info->gpe_block;
/*
* The _PRW object must return a package, we are only interested
* in the first element
*/
obj_desc = pkg_desc->package.elements[0];
if (obj_desc->common.type == ACPI_TYPE_INTEGER) {
/* Use FADT-defined GPE device (from definition of _PRW) */
target_gpe_device = NULL;
if (gpe_device) {
target_gpe_device = acpi_gbl_fadt_gpe_device;
}
/* Integer is the GPE number in the FADT described GPE blocks */
gpe_number = (u32)obj_desc->integer.value;
} else if (obj_desc->common.type == ACPI_TYPE_PACKAGE) {
/* Package contains a GPE reference and GPE number within a GPE block */
if ((obj_desc->package.count < 2) ||
((obj_desc->package.elements[0])->common.type !=
ACPI_TYPE_LOCAL_REFERENCE) ||
((obj_desc->package.elements[1])->common.type !=
ACPI_TYPE_INTEGER)) {
goto cleanup;
}
/* Get GPE block reference and decode */
target_gpe_device =
obj_desc->package.elements[0]->reference.node;
gpe_number = (u32)obj_desc->package.elements[1]->integer.value;
} else {
/* Unknown type, just ignore it */
goto cleanup;
}
/* Get the gpe_event_info for this GPE */
if (gpe_device) {
/*
* Is this GPE within this block?
*
* TRUE if and only if these conditions are true:
* 1) The GPE devices match.
* 2) The GPE index(number) is within the range of the Gpe Block
* associated with the GPE device.
*/
if (gpe_device != target_gpe_device) {
goto cleanup;
}
gpe_event_info =
acpi_ev_low_get_gpe_info(gpe_number, gpe_block);
} else {
/* gpe_device is NULL, just match the target_device and gpe_number */
gpe_event_info =
acpi_ev_get_gpe_event_info(target_gpe_device, gpe_number);
}
if (gpe_event_info) {
if (!(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
/* This GPE can wake the system */
gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
walk_info->count++;
}
}
cleanup:
acpi_ut_remove_reference(pkg_desc);
return_ACPI_STATUS(AE_OK);
}

View file

@ -199,7 +199,7 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
return_ACPI_STATUS(status);
}
parent_node = acpi_ns_get_parent_node(region_obj->region.node);
parent_node = region_obj->region.node->parent;
/*
* Get the _SEG and _BBN values from the device upon which the handler
@ -248,7 +248,7 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
break;
}
pci_root_node = acpi_ns_get_parent_node(pci_root_node);
pci_root_node = pci_root_node->parent;
}
/* PCI root bridge not found, use namespace root node */
@ -280,7 +280,7 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
*/
pci_device_node = region_obj->region.node;
while (pci_device_node && (pci_device_node->type != ACPI_TYPE_DEVICE)) {
pci_device_node = acpi_ns_get_parent_node(pci_device_node);
pci_device_node = pci_device_node->parent;
}
if (!pci_device_node) {
@ -521,7 +521,7 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
return_ACPI_STATUS(AE_NOT_EXIST);
}
node = acpi_ns_get_parent_node(region_obj->region.node);
node = region_obj->region.node->parent;
space_id = region_obj->region.space_id;
/* Setup defaults */
@ -654,7 +654,7 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
/* This node does not have the handler we need; Pop up one level */
node = acpi_ns_get_parent_node(node);
node = node->parent;
}
/* If we get here, there is no handler for this region */

View file

@ -213,101 +213,71 @@ ACPI_EXPORT_SYMBOL(acpi_enable_event)
/*******************************************************************************
*
* FUNCTION: acpi_clear_and_enable_gpe
*
* PARAMETERS: gpe_event_info - GPE to enable
*
* RETURN: Status
*
* DESCRIPTION: Clear the given GPE from stale events and enable it.
*
******************************************************************************/
static acpi_status
acpi_clear_and_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
{
acpi_status status;
/*
* We will only allow a GPE to be enabled if it has either an
* associated method (_Lxx/_Exx) or a handler. Otherwise, the
* GPE will be immediately disabled by acpi_ev_gpe_dispatch the
* first time it fires.
*/
if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)) {
return_ACPI_STATUS(AE_NO_HANDLER);
}
/* Clear the GPE (of stale events) */
status = acpi_hw_clear_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Enable the requested GPE */
status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_set_gpe
* FUNCTION: acpi_gpe_wakeup
*
* PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
* gpe_number - GPE level within the GPE block
* action - ACPI_GPE_ENABLE or ACPI_GPE_DISABLE
* Action - Enable or Disable
*
* RETURN: Status
*
* DESCRIPTION: Enable or disable an individual GPE. This function bypasses
* the reference count mechanism used in the acpi_enable_gpe and
* acpi_disable_gpe interfaces -- and should be used with care.
*
* Note: Typically used to disable a runtime GPE for short period of time,
* then re-enable it, without disturbing the existing reference counts. This
* is useful, for example, in the Embedded Controller (EC) driver.
* DESCRIPTION: Set or clear the GPE's wakeup enable mask bit.
*
******************************************************************************/
acpi_status acpi_set_gpe(acpi_handle gpe_device, u32 gpe_number, u8 action)
acpi_status acpi_gpe_wakeup(acpi_handle gpe_device, u32 gpe_number, u8 action)
{
acpi_status status = AE_OK;
struct acpi_gpe_event_info *gpe_event_info;
acpi_status status;
struct acpi_gpe_register_info *gpe_register_info;
acpi_cpu_flags flags;
u32 register_bit;
ACPI_FUNCTION_TRACE(acpi_set_gpe);
ACPI_FUNCTION_TRACE(acpi_gpe_wakeup);
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
/* Ensure that we have a valid GPE number */
gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
if (!gpe_event_info) {
if (!gpe_event_info || !(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
}
gpe_register_info = gpe_event_info->register_info;
if (!gpe_register_info) {
status = AE_NOT_EXIST;
goto unlock_and_exit;
}
register_bit =
acpi_hw_get_gpe_register_bit(gpe_event_info, gpe_register_info);
/* Perform the action */
switch (action) {
case ACPI_GPE_ENABLE:
status = acpi_clear_and_enable_gpe(gpe_event_info);
ACPI_SET_BIT(gpe_register_info->enable_for_wake,
(u8)register_bit);
break;
case ACPI_GPE_DISABLE:
status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake,
(u8)register_bit);
break;
default:
ACPI_ERROR((AE_INFO, "%u, Invalid action", action));
status = AE_BAD_PARAMETER;
break;
}
unlock_and_exit:
unlock_and_exit:
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_set_gpe)
ACPI_EXPORT_SYMBOL(acpi_gpe_wakeup)
/*******************************************************************************
*
@ -315,17 +285,14 @@ ACPI_EXPORT_SYMBOL(acpi_set_gpe)
*
* PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
* gpe_number - GPE level within the GPE block
* gpe_type - ACPI_GPE_TYPE_RUNTIME or ACPI_GPE_TYPE_WAKE
* or both
*
* RETURN: Status
*
* DESCRIPTION: Add a reference to a GPE. On the first reference, the GPE is
* hardware-enabled (for runtime GPEs), or the GPE register mask
* is updated (for wake GPEs).
* hardware-enabled.
*
******************************************************************************/
acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 gpe_type)
acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number)
{
acpi_status status = AE_OK;
struct acpi_gpe_event_info *gpe_event_info;
@ -333,12 +300,6 @@ acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 gpe_type)
ACPI_FUNCTION_TRACE(acpi_enable_gpe);
/* Parameter validation */
if (!gpe_type || (gpe_type & ~ACPI_GPE_TYPE_WAKE_RUN)) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
/* Ensure that we have a valid GPE number */
@ -349,46 +310,19 @@ acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 gpe_type)
goto unlock_and_exit;
}
if (gpe_type & ACPI_GPE_TYPE_RUNTIME) {
if (gpe_event_info->runtime_count == ACPI_UINT8_MAX) {
status = AE_LIMIT; /* Too many references */
goto unlock_and_exit;
}
gpe_event_info->runtime_count++;
if (gpe_event_info->runtime_count == 1) {
status = acpi_ev_update_gpe_enable_masks(gpe_event_info);
if (ACPI_SUCCESS(status)) {
status = acpi_clear_and_enable_gpe(gpe_event_info);
}
if (ACPI_FAILURE(status)) {
gpe_event_info->runtime_count--;
goto unlock_and_exit;
}
}
if (gpe_event_info->runtime_count == ACPI_UINT8_MAX) {
status = AE_LIMIT; /* Too many references */
goto unlock_and_exit;
}
if (gpe_type & ACPI_GPE_TYPE_WAKE) {
/* The GPE must have the ability to wake the system */
if (!(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
status = AE_TYPE;
goto unlock_and_exit;
gpe_event_info->runtime_count++;
if (gpe_event_info->runtime_count == 1) {
status = acpi_ev_update_gpe_enable_mask(gpe_event_info);
if (ACPI_SUCCESS(status)) {
status = acpi_ev_enable_gpe(gpe_event_info);
}
if (gpe_event_info->wakeup_count == ACPI_UINT8_MAX) {
status = AE_LIMIT; /* Too many references */
goto unlock_and_exit;
}
/*
* Update the enable mask on the first wakeup reference. Wake GPEs
* are only hardware-enabled just before sleeping.
*/
gpe_event_info->wakeup_count++;
if (gpe_event_info->wakeup_count == 1) {
status = acpi_ev_update_gpe_enable_masks(gpe_event_info);
if (ACPI_FAILURE(status)) {
gpe_event_info->runtime_count--;
}
}
@ -404,8 +338,6 @@ ACPI_EXPORT_SYMBOL(acpi_enable_gpe)
*
* PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
* gpe_number - GPE level within the GPE block
* gpe_type - ACPI_GPE_TYPE_RUNTIME or ACPI_GPE_TYPE_WAKE
* or both
*
* RETURN: Status
*
@ -414,7 +346,7 @@ ACPI_EXPORT_SYMBOL(acpi_enable_gpe)
* the GPE mask bit disabled (for wake GPEs)
*
******************************************************************************/
acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 gpe_type)
acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number)
{
acpi_status status = AE_OK;
struct acpi_gpe_event_info *gpe_event_info;
@ -422,12 +354,6 @@ acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 gpe_type
ACPI_FUNCTION_TRACE(acpi_disable_gpe);
/* Parameter validation */
if (!gpe_type || (gpe_type & ~ACPI_GPE_TYPE_WAKE_RUN)) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
/* Ensure that we have a valid GPE number */
@ -440,41 +366,21 @@ acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 gpe_type
/* Hardware-disable a runtime GPE on removal of the last reference */
if (gpe_type & ACPI_GPE_TYPE_RUNTIME) {
if (!gpe_event_info->runtime_count) {
status = AE_LIMIT; /* There are no references to remove */
goto unlock_and_exit;
}
gpe_event_info->runtime_count--;
if (!gpe_event_info->runtime_count) {
status = acpi_ev_update_gpe_enable_masks(gpe_event_info);
if (ACPI_SUCCESS(status)) {
status = acpi_hw_low_set_gpe(gpe_event_info,
ACPI_GPE_DISABLE);
}
if (ACPI_FAILURE(status)) {
gpe_event_info->runtime_count++;
goto unlock_and_exit;
}
}
if (!gpe_event_info->runtime_count) {
status = AE_LIMIT; /* There are no references to remove */
goto unlock_and_exit;
}
/*
* Update masks for wake GPE on removal of the last reference.
* No need to hardware-disable wake GPEs here, they are not currently
* enabled.
*/
if (gpe_type & ACPI_GPE_TYPE_WAKE) {
if (!gpe_event_info->wakeup_count) {
status = AE_LIMIT; /* There are no references to remove */
goto unlock_and_exit;
gpe_event_info->runtime_count--;
if (!gpe_event_info->runtime_count) {
status = acpi_ev_update_gpe_enable_mask(gpe_event_info);
if (ACPI_SUCCESS(status)) {
status =
acpi_hw_low_set_gpe(gpe_event_info,
ACPI_GPE_DISABLE);
}
gpe_event_info->wakeup_count--;
if (!gpe_event_info->wakeup_count) {
status = acpi_ev_update_gpe_enable_masks(gpe_event_info);
if (ACPI_FAILURE(status)) {
gpe_event_info->runtime_count++;
}
}
@ -484,6 +390,59 @@ unlock_and_exit:
}
ACPI_EXPORT_SYMBOL(acpi_disable_gpe)
/*******************************************************************************
*
* FUNCTION: acpi_gpe_can_wake
*
* PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
* gpe_number - GPE level within the GPE block
*
* RETURN: Status
*
* DESCRIPTION: Set the ACPI_GPE_CAN_WAKE flag for the given GPE. If the GPE
* has a corresponding method and is currently enabled, disable it
* (GPEs with corresponding methods are enabled unconditionally
* during initialization, but GPEs that can wake up are expected
* to be initially disabled).
*
******************************************************************************/
acpi_status acpi_gpe_can_wake(acpi_handle gpe_device, u32 gpe_number)
{
acpi_status status = AE_OK;
struct acpi_gpe_event_info *gpe_event_info;
acpi_cpu_flags flags;
u8 disable = 0;
ACPI_FUNCTION_TRACE(acpi_gpe_can_wake);
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
/* Ensure that we have a valid GPE number */
gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
if (!gpe_event_info) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
}
if (gpe_event_info->flags & ACPI_GPE_CAN_WAKE) {
goto unlock_and_exit;
}
gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
disable = (gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD)
&& gpe_event_info->runtime_count;
unlock_and_exit:
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
if (disable)
status = acpi_disable_gpe(gpe_device, gpe_number);
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_gpe_can_wake)
/*******************************************************************************
*
* FUNCTION: acpi_disable_event
@ -800,7 +759,7 @@ acpi_install_gpe_block(acpi_handle gpe_device,
obj_desc->device.gpe_block = gpe_block;
/* Run the _PRW methods and enable the runtime GPEs in the new block */
/* Enable the runtime GPEs in the new block */
status = acpi_ev_initialize_gpe_block(node, gpe_block);

View file

@ -120,7 +120,7 @@ acpi_ex_add_table(u32 table_index,
acpi_ns_exec_module_code_list();
acpi_ex_enter_interpreter();
/* Update GPEs for any new _PRW or _Lxx/_Exx methods. Ignore errors */
/* Update GPEs for any new _Lxx/_Exx methods. Ignore errors */
status = acpi_tb_get_owner_id(table_index, &owner_id);
if (ACPI_SUCCESS(status)) {

View file

@ -742,7 +742,7 @@ acpi_ex_dump_operands(union acpi_operand_object **operands,
}
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"**** Start operand dump for opcode [%s], %d operands\n",
"**** Start operand dump for opcode [%s], %u operands\n",
opcode_name, num_operands));
if (num_operands == 0) {
@ -812,7 +812,7 @@ void acpi_ex_dump_namespace_node(struct acpi_namespace_node *node, u32 flags)
acpi_ex_out_string("Type", acpi_ut_get_type_name(node->type));
acpi_ex_out_pointer("Attached Object",
acpi_ns_get_attached_object(node));
acpi_ex_out_pointer("Parent", acpi_ns_get_parent_node(node));
acpi_ex_out_pointer("Parent", node->parent);
acpi_ex_dump_object(ACPI_CAST_PTR(union acpi_operand_object, node),
acpi_ex_dump_node);
@ -945,7 +945,7 @@ acpi_ex_dump_package_obj(union acpi_operand_object *obj_desc,
case ACPI_TYPE_PACKAGE:
acpi_os_printf("[Package] Contains %d Elements:\n",
acpi_os_printf("[Package] Contains %u Elements:\n",
obj_desc->package.count);
for (i = 0; i < obj_desc->package.count; i++) {

View file

@ -534,13 +534,13 @@ acpi_ex_field_datum_io(union acpi_operand_object *obj_desc,
if (ACPI_SUCCESS(status)) {
if (read_write == ACPI_READ) {
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
"Value Read %8.8X%8.8X, Width %d\n",
"Value Read %8.8X%8.8X, Width %u\n",
ACPI_FORMAT_UINT64(*value),
obj_desc->common_field.
access_byte_width));
} else {
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
"Value Written %8.8X%8.8X, Width %d\n",
"Value Written %8.8X%8.8X, Width %u\n",
ACPI_FORMAT_UINT64(*value),
obj_desc->common_field.
access_byte_width));

View file

@ -108,11 +108,11 @@ acpi_ex_generate_access(u32 field_bit_offset,
field_byte_length = field_byte_end_offset - field_byte_offset;
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
"Bit length %d, Bit offset %d\n",
"Bit length %u, Bit offset %u\n",
field_bit_length, field_bit_offset));
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
"Byte Length %d, Byte Offset %d, End Offset %d\n",
"Byte Length %u, Byte Offset %u, End Offset %u\n",
field_byte_length, field_byte_offset,
field_byte_end_offset));
@ -147,11 +147,11 @@ acpi_ex_generate_access(u32 field_bit_offset,
accesses = field_end_offset - field_start_offset;
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
"AccessWidth %d end is within region\n",
"AccessWidth %u end is within region\n",
access_byte_width));
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
"Field Start %d, Field End %d -- requires %d accesses\n",
"Field Start %u, Field End %u -- requires %u accesses\n",
field_start_offset, field_end_offset,
accesses));
@ -159,7 +159,7 @@ acpi_ex_generate_access(u32 field_bit_offset,
if (accesses <= 1) {
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
"Entire field can be accessed with one operation of size %d\n",
"Entire field can be accessed with one operation of size %u\n",
access_byte_width));
return_VALUE(access_byte_width);
}
@ -174,7 +174,7 @@ acpi_ex_generate_access(u32 field_bit_offset,
}
} else {
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
"AccessWidth %d end is NOT within region\n",
"AccessWidth %u end is NOT within region\n",
access_byte_width));
if (access_byte_width == 1) {
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
@ -190,7 +190,7 @@ acpi_ex_generate_access(u32 field_bit_offset,
* previous access
*/
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
"Backing off to previous optimal access width of %d\n",
"Backing off to previous optimal access width of %u\n",
minimum_access_width));
return_VALUE(minimum_access_width);
}
@ -385,15 +385,6 @@ acpi_ex_prep_common_field_object(union acpi_operand_object *obj_desc,
(field_bit_position -
ACPI_MUL_8(obj_desc->common_field.base_byte_offset));
/*
* Does the entire field fit within a single field access element? (datum)
* (i.e., without crossing a datum boundary)
*/
if ((obj_desc->common_field.start_field_bit_offset +
field_bit_length) <= (u16) access_bit_width) {
obj_desc->common.flags |= AOPOBJ_SINGLE_DATUM;
}
return_ACPI_STATUS(AE_OK);
}

View file

@ -194,7 +194,7 @@ acpi_ex_system_memory_space_handler(u32 function,
((u64) address - (u64) mem_info->mapped_physical_address);
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"System-Memory (width %d) R/W %d Address=%8.8X%8.8X\n",
"System-Memory (width %u) R/W %u Address=%8.8X%8.8X\n",
bit_width, function,
ACPI_FORMAT_NATIVE_UINT(address)));
@ -297,7 +297,7 @@ acpi_ex_system_io_space_handler(u32 function,
ACPI_FUNCTION_TRACE(ex_system_io_space_handler);
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"System-IO (width %d) R/W %d Address=%8.8X%8.8X\n",
"System-IO (width %u) R/W %u Address=%8.8X%8.8X\n",
bit_width, function,
ACPI_FORMAT_NATIVE_UINT(address)));
@ -373,7 +373,7 @@ acpi_ex_pci_config_space_handler(u32 function,
pci_register = (u16) (u32) address;
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Pci-Config %d (%d) Seg(%04x) Bus(%04x) Dev(%04x) Func(%04x) Reg(%04x)\n",
"Pci-Config %u (%u) Seg(%04x) Bus(%04x) Dev(%04x) Func(%04x) Reg(%04x)\n",
function, bit_width, pci_id->segment, pci_id->bus,
pci_id->device, pci_id->function, pci_register));

View file

@ -57,7 +57,7 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
/******************************************************************************
*
* FUNCTION: acpi_hw_gpe_register_bit
* FUNCTION: acpi_hw_get_gpe_register_bit
*
* PARAMETERS: gpe_event_info - Info block for the GPE
* gpe_register_info - Info block for the GPE register
@ -69,7 +69,7 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
*
******************************************************************************/
u32 acpi_hw_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info,
u32 acpi_hw_get_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info,
struct acpi_gpe_register_info *gpe_register_info)
{
return (u32)1 << (gpe_event_info->gpe_number -
@ -115,7 +115,7 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action)
/* Set ot clear just the bit that corresponds to this GPE */
register_bit = acpi_hw_gpe_register_bit(gpe_event_info,
register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info,
gpe_register_info);
switch (action) {
case ACPI_GPE_COND_ENABLE:
@ -141,31 +141,6 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action)
return (status);
}
/******************************************************************************
*
* FUNCTION: acpi_hw_write_gpe_enable_reg
*
* PARAMETERS: gpe_event_info - Info block for the GPE to be enabled
*
* RETURN: Status
*
* DESCRIPTION: Write a GPE enable register. Note: The bit for this GPE must
* already be cleared or set in the parent register
* enable_for_run mask.
*
******************************************************************************/
acpi_status
acpi_hw_write_gpe_enable_reg(struct acpi_gpe_event_info * gpe_event_info)
{
acpi_status status;
ACPI_FUNCTION_ENTRY();
status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_COND_ENABLE);
return (status);
}
/******************************************************************************
*
* FUNCTION: acpi_hw_clear_gpe
@ -193,7 +168,7 @@ acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info * gpe_event_info)
return (AE_NOT_EXIST);
}
register_bit = acpi_hw_gpe_register_bit(gpe_event_info,
register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info,
gpe_register_info);
/*
@ -241,7 +216,7 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info,
/* Get the register bitmask for this GPE */
register_bit = acpi_hw_gpe_register_bit(gpe_event_info,
register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info,
gpe_register_info);
/* GPE currently enabled? (enabled for runtime?) */

View file

@ -307,7 +307,7 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
return_ACPI_STATUS(status);
}
ACPI_DEBUG_PRINT((ACPI_DB_INIT,
"Entering sleep state [S%d]\n", sleep_state));
"Entering sleep state [S%u]\n", sleep_state));
/* Clear the SLP_EN and SLP_TYP fields */

View file

@ -338,8 +338,7 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
*/
while (!acpi_ns_opens_scope(prefix_node->type) &&
prefix_node->type != ACPI_TYPE_ANY) {
prefix_node =
acpi_ns_get_parent_node(prefix_node);
prefix_node = prefix_node->parent;
}
}
}
@ -419,7 +418,7 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
/* Backup to the parent node */
num_carats++;
this_node = acpi_ns_get_parent_node(this_node);
this_node = this_node->parent;
if (!this_node) {
/* Current scope has no parent scope */
@ -433,7 +432,7 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
if (search_parent_flag == ACPI_NS_NO_UPSEARCH) {
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
"Search scope is [%4.4s], path has %d carat(s)\n",
"Search scope is [%4.4s], path has %u carat(s)\n",
acpi_ut_get_node_name
(this_node), num_carats));
}
@ -495,7 +494,7 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
path++;
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
"Multi Pathname (%d Segments, Flags=%X)\n",
"Multi Pathname (%u Segments, Flags=%X)\n",
num_segments, flags));
break;

View file

@ -159,7 +159,7 @@ void acpi_ns_remove_node(struct acpi_namespace_node *node)
ACPI_FUNCTION_TRACE_PTR(ns_remove_node, node);
parent_node = acpi_ns_get_parent_node(node);
parent_node = node->parent;
prev_node = NULL;
next_node = parent_node->child;
@ -168,29 +168,20 @@ void acpi_ns_remove_node(struct acpi_namespace_node *node)
while (next_node != node) {
prev_node = next_node;
next_node = prev_node->peer;
next_node = next_node->peer;
}
if (prev_node) {
/* Node is not first child, unlink it */
prev_node->peer = next_node->peer;
if (next_node->flags & ANOBJ_END_OF_PEER_LIST) {
prev_node->flags |= ANOBJ_END_OF_PEER_LIST;
}
prev_node->peer = node->peer;
} else {
/* Node is first child (has no previous peer) */
if (next_node->flags & ANOBJ_END_OF_PEER_LIST) {
/* No peers at all */
parent_node->child = NULL;
} else { /* Link peer list to parent */
parent_node->child = next_node->peer;
}
/*
* Node is first child (has no previous peer).
* Link peer list to parent
*/
parent_node->child = node->peer;
}
/* Delete the node and any attached objects */
@ -228,33 +219,42 @@ void acpi_ns_install_node(struct acpi_walk_state *walk_state, struct acpi_namesp
ACPI_FUNCTION_TRACE(ns_install_node);
/*
* Get the owner ID from the Walk state. The owner ID is used to track
* table deletion and deletion of objects created by methods.
*/
if (walk_state) {
/*
* Get the owner ID from the Walk state. The owner ID is used to
* track table deletion and deletion of objects created by methods.
*/
owner_id = walk_state->owner_id;
if ((walk_state->method_desc) &&
(parent_node != walk_state->method_node)) {
/*
* A method is creating a new node that is not a child of the
* method (it is non-local). Mark the executing method as having
* modified the namespace. This is used for cleanup when the
* method exits.
*/
walk_state->method_desc->method.flags |=
AOPOBJ_MODIFIED_NAMESPACE;
}
}
/* Link the new entry into the parent and existing children */
node->peer = NULL;
node->parent = parent_node;
child_node = parent_node->child;
if (!child_node) {
parent_node->child = node;
node->flags |= ANOBJ_END_OF_PEER_LIST;
node->peer = parent_node;
} else {
while (!(child_node->flags & ANOBJ_END_OF_PEER_LIST)) {
/* Add node to the end of the peer list */
while (child_node->peer) {
child_node = child_node->peer;
}
child_node->peer = node;
/* Clear end-of-list flag */
child_node->flags &= ~ANOBJ_END_OF_PEER_LIST;
node->flags |= ANOBJ_END_OF_PEER_LIST;
node->peer = parent_node;
}
/* Init the new entry */
@ -288,9 +288,8 @@ void acpi_ns_install_node(struct acpi_walk_state *walk_state, struct acpi_namesp
void acpi_ns_delete_children(struct acpi_namespace_node *parent_node)
{
struct acpi_namespace_node *child_node;
struct acpi_namespace_node *next_node;
u8 flags;
struct acpi_namespace_node *node_to_delete;
ACPI_FUNCTION_TRACE_PTR(ns_delete_children, parent_node);
@ -298,37 +297,26 @@ void acpi_ns_delete_children(struct acpi_namespace_node *parent_node)
return_VOID;
}
/* If no children, all done! */
child_node = parent_node->child;
if (!child_node) {
return_VOID;
}
/* Deallocate all children at this level */
do {
/* Get the things we need */
next_node = child_node->peer;
flags = child_node->flags;
next_node = parent_node->child;
while (next_node) {
/* Grandchildren should have all been deleted already */
if (child_node->child) {
if (next_node->child) {
ACPI_ERROR((AE_INFO, "Found a grandchild! P=%p C=%p",
parent_node, child_node));
parent_node, next_node));
}
/*
* Delete this child node and move on to the next child in the list.
* No need to unlink the node since we are deleting the entire branch.
*/
acpi_ns_delete_node(child_node);
child_node = next_node;
} while (!(flags & ANOBJ_END_OF_PEER_LIST));
node_to_delete = next_node;
next_node = next_node->peer;
acpi_ns_delete_node(node_to_delete);
};
/* Clear the parent's child pointer */
@ -405,7 +393,7 @@ void acpi_ns_delete_namespace_subtree(struct acpi_namespace_node *parent_node)
/* Move up the tree to the grandparent */
parent_node = acpi_ns_get_parent_node(parent_node);
parent_node = parent_node->parent;
}
}
@ -510,7 +498,7 @@ void acpi_ns_delete_namespace_by_owner(acpi_owner_id owner_id)
/* Move up the tree to the grandparent */
parent_node = acpi_ns_get_parent_node(parent_node);
parent_node = parent_node->parent;
}
}

View file

@ -441,7 +441,7 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
return (AE_OK);
}
acpi_os_printf("(R%d)", obj_desc->common.reference_count);
acpi_os_printf("(R%u)", obj_desc->common.reference_count);
switch (type) {
case ACPI_TYPE_METHOD:

View file

@ -103,8 +103,8 @@ acpi_status acpi_ns_initialize_objects(void)
}
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
"\nInitialized %hd/%hd Regions %hd/%hd Fields %hd/%hd "
"Buffers %hd/%hd Packages (%hd nodes)\n",
"\nInitialized %u/%u Regions %u/%u Fields %u/%u "
"Buffers %u/%u Packages (%u nodes)\n",
info.op_region_init, info.op_region_count,
info.field_init, info.field_count,
info.buffer_init, info.buffer_count,
@ -112,9 +112,9 @@ acpi_status acpi_ns_initialize_objects(void)
info.object_count));
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"%hd Control Methods found\n", info.method_count));
"%u Control Methods found\n", info.method_count));
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"%hd Op Regions found\n", info.op_region_count));
"%u Op Regions found\n", info.op_region_count));
return_ACPI_STATUS(AE_OK);
}
@ -208,8 +208,8 @@ acpi_status acpi_ns_initialize_devices(void)
}
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
"\nExecuted %hd _INI methods requiring %hd _STA executions "
"(examined %hd objects)\n",
"\nExecuted %u _INI methods requiring %u _STA executions "
"(examined %u objects)\n",
info.num_INI, info.num_STA, info.device_count));
return_ACPI_STATUS(status);
@ -410,7 +410,7 @@ acpi_ns_find_ini_methods(acpi_handle obj_handle,
* The only _INI methods that we care about are those that are
* present under Device, Processor, and Thermal objects.
*/
parent_node = acpi_ns_get_parent_node(node);
parent_node = node->parent;
switch (parent_node->type) {
case ACPI_TYPE_DEVICE:
case ACPI_TYPE_PROCESSOR:
@ -420,7 +420,7 @@ acpi_ns_find_ini_methods(acpi_handle obj_handle,
while (parent_node) {
parent_node->flags |= ANOBJ_SUBTREE_HAS_INI;
parent_node = acpi_ns_get_parent_node(parent_node);
parent_node = parent_node->parent;
}
break;

View file

@ -93,7 +93,7 @@ acpi_ns_build_external_path(struct acpi_namespace_node *node,
/* Put the name into the buffer */
ACPI_MOVE_32_TO_32((name_buffer + index), &parent_node->name);
parent_node = acpi_ns_get_parent_node(parent_node);
parent_node = parent_node->parent;
/* Prefix name with the path separator */
@ -198,7 +198,7 @@ acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node)
return 0;
}
size += ACPI_PATH_SEGMENT_LENGTH;
next_node = acpi_ns_get_parent_node(next_node);
next_node = next_node->parent;
}
if (!size) {

View file

@ -136,8 +136,8 @@ acpi_ns_one_complete_parse(u32 pass_number,
/* Parse the AML */
ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "*PARSE* pass %d parse\n",
(unsigned)pass_number));
ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "*PARSE* pass %u parse\n",
pass_number));
status = acpi_ps_parse_aml(walk_state);
cleanup:

View file

@ -556,7 +556,7 @@ acpi_ns_repair_null_element(struct acpi_predefined_data *data,
/* Need an Integer - create a zero-value integer */
new_object = acpi_ut_create_integer_object(0);
new_object = acpi_ut_create_integer_object((u64)0);
} else if (expected_btypes & ACPI_RTYPE_STRING) {
/* Need a String - create a NULL string */

View file

@ -112,6 +112,13 @@ acpi_ns_sort_list(union acpi_operand_object **elements,
* _GTM: Convert Buffer of BYTEs to a Buffer of DWORDs
* _PSS: Sort the list descending by Power
* _TSS: Sort the list descending by Power
*
* Names that must be packages, but cannot be sorted:
*
* _BCL: Values are tied to the Package index where they appear, and cannot
* be moved or sorted. These index values are used for _BQC and _BCM.
* However, we can fix the case where a buffer is returned, by converting
* it to a Package of integers.
*/
static const struct acpi_repair_info acpi_ns_repairable_names[] = {
{"_ALR", acpi_ns_repair_ALR},

View file

@ -152,17 +152,6 @@ acpi_ns_search_one_scope(u32 target_name,
return_ACPI_STATUS(AE_OK);
}
/*
* The last entry in the list points back to the parent,
* so a flag is used to indicate the end-of-list
*/
if (node->flags & ANOBJ_END_OF_PEER_LIST) {
/* Searched entire list, we are done */
break;
}
/* Didn't match name, move on to the next peer object */
node = node->peer;
@ -217,7 +206,7 @@ acpi_ns_search_parent_tree(u32 target_name,
ACPI_FUNCTION_TRACE(ns_search_parent_tree);
parent_node = acpi_ns_get_parent_node(node);
parent_node = node->parent;
/*
* If there is no parent (i.e., we are at the root) or type is "local",
@ -261,7 +250,7 @@ acpi_ns_search_parent_tree(u32 target_name,
/* Not found here, go up another level (until we reach the root) */
parent_node = acpi_ns_get_parent_node(parent_node);
parent_node = parent_node->parent;
}
/* Not found in parent tree */

View file

@ -847,116 +847,3 @@ acpi_ns_get_node(struct acpi_namespace_node *prefix_node,
ACPI_FREE(internal_path);
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ns_get_parent_node
*
* PARAMETERS: Node - Current table entry
*
* RETURN: Parent entry of the given entry
*
* DESCRIPTION: Obtain the parent entry for a given entry in the namespace.
*
******************************************************************************/
struct acpi_namespace_node *acpi_ns_get_parent_node(struct acpi_namespace_node
*node)
{
ACPI_FUNCTION_ENTRY();
if (!node) {
return (NULL);
}
/*
* Walk to the end of this peer list. The last entry is marked with a flag
* and the peer pointer is really a pointer back to the parent. This saves
* putting a parent back pointer in each and every named object!
*/
while (!(node->flags & ANOBJ_END_OF_PEER_LIST)) {
node = node->peer;
}
return (node->peer);
}
/*******************************************************************************
*
* FUNCTION: acpi_ns_get_next_valid_node
*
* PARAMETERS: Node - Current table entry
*
* RETURN: Next valid Node in the linked node list. NULL if no more valid
* nodes.
*
* DESCRIPTION: Find the next valid node within a name table.
* Useful for implementing NULL-end-of-list loops.
*
******************************************************************************/
struct acpi_namespace_node *acpi_ns_get_next_valid_node(struct
acpi_namespace_node
*node)
{
/* If we are at the end of this peer list, return NULL */
if (node->flags & ANOBJ_END_OF_PEER_LIST) {
return NULL;
}
/* Otherwise just return the next peer */
return (node->peer);
}
#ifdef ACPI_OBSOLETE_FUNCTIONS
/*******************************************************************************
*
* FUNCTION: acpi_ns_find_parent_name
*
* PARAMETERS: *child_node - Named Obj whose name is to be found
*
* RETURN: The ACPI name
*
* DESCRIPTION: Search for the given obj in its parent scope and return the
* name segment, or "????" if the parent name can't be found
* (which "should not happen").
*
******************************************************************************/
acpi_name acpi_ns_find_parent_name(struct acpi_namespace_node * child_node)
{
struct acpi_namespace_node *parent_node;
ACPI_FUNCTION_TRACE(ns_find_parent_name);
if (child_node) {
/* Valid entry. Get the parent Node */
parent_node = acpi_ns_get_parent_node(child_node);
if (parent_node) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"Parent of %p [%4.4s] is %p [%4.4s]\n",
child_node,
acpi_ut_get_node_name(child_node),
parent_node,
acpi_ut_get_node_name(parent_node)));
if (parent_node->name.integer) {
return_VALUE((acpi_name) parent_node->name.
integer);
}
}
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"Unable to find parent of %p (%4.4s)\n",
child_node,
acpi_ut_get_node_name(child_node)));
}
return_VALUE(ACPI_UNKNOWN_NAME);
}
#endif

View file

@ -79,15 +79,6 @@ struct acpi_namespace_node *acpi_ns_get_next_node(struct acpi_namespace_node
return parent_node->child;
}
/*
* Get the next node.
*
* If we are at the end of this peer list, return NULL
*/
if (child_node->flags & ANOBJ_END_OF_PEER_LIST) {
return NULL;
}
/* Otherwise just return the next peer */
return child_node->peer;
@ -146,9 +137,9 @@ struct acpi_namespace_node *acpi_ns_get_next_node_typed(acpi_object_type type,
return (next_node);
}
/* Otherwise, move on to the next node */
/* Otherwise, move on to the next peer node */
next_node = acpi_ns_get_next_valid_node(next_node);
next_node = next_node->peer;
}
/* Not found */
@ -355,7 +346,7 @@ acpi_ns_walk_namespace(acpi_object_type type,
*/
level--;
child_node = parent_node;
parent_node = acpi_ns_get_parent_node(parent_node);
parent_node = parent_node->parent;
node_previously_visited = TRUE;
}

View file

@ -190,7 +190,7 @@ acpi_status acpi_get_parent(acpi_handle handle, acpi_handle * ret_handle)
/* Get the parent entry */
parent_node = acpi_ns_get_parent_node(node);
parent_node = node->parent;
*ret_handle = ACPI_CAST_PTR(acpi_handle, parent_node);
/* Return exception if parent is null */

View file

@ -813,10 +813,10 @@ acpi_status acpi_ut_init_globals(void)
acpi_gbl_root_node_struct.name.integer = ACPI_ROOT_NAME;
acpi_gbl_root_node_struct.descriptor_type = ACPI_DESC_TYPE_NAMED;
acpi_gbl_root_node_struct.type = ACPI_TYPE_DEVICE;
acpi_gbl_root_node_struct.parent = NULL;
acpi_gbl_root_node_struct.child = NULL;
acpi_gbl_root_node_struct.peer = NULL;
acpi_gbl_root_node_struct.object = NULL;
acpi_gbl_root_node_struct.flags = ANOBJ_END_OF_PEER_LIST;
#ifdef ACPI_DEBUG_OUTPUT
acpi_gbl_lowest_stack_pointer = ACPI_CAST_PTR(acpi_size, ACPI_SIZE_MAX);

View file

@ -293,12 +293,8 @@ acpi_status acpi_initialize_objects(u32 flags)
* Complete the GPE initialization for the GPE blocks defined in the FADT
* (GPE block 0 and 1).
*
* Note1: This is where the _PRW methods are executed for the GPEs. These
* methods can only be executed after the SCI and Global Lock handlers are
* installed and initialized.
*
* Note2: Currently, there seems to be no need to run the _REG methods
* before execution of the _PRW methods and enabling of the GPEs.
* NOTE: Currently, there seems to be no need to run the _REG methods
* before enabling the GPEs.
*/
if (!(flags & ACPI_NO_EVENT_INIT)) {
status = acpi_ev_install_fadt_gpes();

View file

@ -424,8 +424,7 @@ static int acpi_button_add(struct acpi_device *device)
if (device->wakeup.flags.valid) {
/* Button's GPE is run-wake GPE */
acpi_enable_gpe(device->wakeup.gpe_device,
device->wakeup.gpe_number,
ACPI_GPE_TYPE_RUNTIME);
device->wakeup.gpe_number);
device->wakeup.run_wake_count++;
device->wakeup.state.enabled = 1;
}
@ -448,8 +447,7 @@ static int acpi_button_remove(struct acpi_device *device, int type)
if (device->wakeup.flags.valid) {
acpi_disable_gpe(device->wakeup.gpe_device,
device->wakeup.gpe_number,
ACPI_GPE_TYPE_RUNTIME);
device->wakeup.gpe_number);
device->wakeup.run_wake_count--;
device->wakeup.state.enabled = 0;
}

View file

@ -303,11 +303,8 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
pr_debug(PREFIX "transaction start\n");
/* disable GPE during transaction if storm is detected */
if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
/*
* It has to be disabled at the hardware level regardless of the
* GPE reference counting, so that it doesn't trigger.
*/
acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
/* It has to be disabled, so that it doesn't trigger. */
acpi_disable_gpe(NULL, ec->gpe);
}
status = acpi_ec_transaction_unlocked(ec, t);
@ -316,12 +313,8 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
ec_check_sci_sync(ec, acpi_ec_read_status(ec));
if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
msleep(1);
/*
* It is safe to enable the GPE outside of the transaction. Use
* acpi_set_gpe() for that, since we used it to disable the GPE
* above.
*/
acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
/* It is safe to enable the GPE outside of the transaction. */
acpi_enable_gpe(NULL, ec->gpe);
} else if (t->irq_count > ACPI_EC_STORM_THRESHOLD) {
pr_info(PREFIX "GPE storm detected, "
"transactions will use polling mode\n");
@ -746,7 +739,7 @@ static int ec_install_handlers(struct acpi_ec *ec)
if (ACPI_FAILURE(status))
return -ENODEV;
acpi_enable_gpe(NULL, ec->gpe, ACPI_GPE_TYPE_RUNTIME);
acpi_enable_gpe(NULL, ec->gpe);
status = acpi_install_address_space_handler(ec->handle,
ACPI_ADR_SPACE_EC,
&acpi_ec_space_handler,
@ -763,7 +756,7 @@ static int ec_install_handlers(struct acpi_ec *ec)
} else {
acpi_remove_gpe_handler(NULL, ec->gpe,
&acpi_ec_gpe_handler);
acpi_disable_gpe(NULL, ec->gpe, ACPI_GPE_TYPE_RUNTIME);
acpi_disable_gpe(NULL, ec->gpe);
return -ENODEV;
}
}
@ -774,7 +767,7 @@ static int ec_install_handlers(struct acpi_ec *ec)
static void ec_remove_handlers(struct acpi_ec *ec)
{
acpi_disable_gpe(NULL, ec->gpe, ACPI_GPE_TYPE_RUNTIME);
acpi_disable_gpe(NULL, ec->gpe);
if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
pr_err(PREFIX "failed to remove space handler\n");
@ -1018,22 +1011,6 @@ error:
return -ENODEV;
}
static int acpi_ec_suspend(struct acpi_device *device, pm_message_t state)
{
struct acpi_ec *ec = acpi_driver_data(device);
/* Stop using the GPE, but keep it reference counted. */
acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
return 0;
}
static int acpi_ec_resume(struct acpi_device *device)
{
struct acpi_ec *ec = acpi_driver_data(device);
/* Enable the GPE again, but don't reference count it once more. */
acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
return 0;
}
static struct acpi_driver acpi_ec_driver = {
.name = "ec",
.class = ACPI_EC_CLASS,
@ -1041,8 +1018,6 @@ static struct acpi_driver acpi_ec_driver = {
.ops = {
.add = acpi_ec_add,
.remove = acpi_ec_remove,
.suspend = acpi_ec_suspend,
.resume = acpi_ec_resume,
},
};

View file

@ -1046,26 +1046,6 @@ static int __init acpi_serialize_setup(char *str)
__setup("acpi_serialize", acpi_serialize_setup);
/*
* Wake and Run-Time GPES are expected to be separate.
* We disable wake-GPEs at run-time to prevent spurious
* interrupts.
*
* However, if a system exists that shares Wake and
* Run-time events on the same GPE this flag is available
* to tell Linux to keep the wake-time GPEs enabled at run-time.
*/
static int __init acpi_wake_gpes_always_on_setup(char *str)
{
printk(KERN_INFO PREFIX "wake GPEs not disabled\n");
acpi_gbl_leave_wake_gpes_disabled = FALSE;
return 1;
}
__setup("acpi_wake_gpes_always_on", acpi_wake_gpes_always_on_setup);
/* Check of resource interference between native drivers and ACPI
* OperationRegions (SystemIO and System Memory only).
* IO ports and memory declared in ACPI might be used by the ACPI subsystem

View file

@ -740,6 +740,8 @@ acpi_bus_extract_wakeup_device_power_package(struct acpi_device *device,
device->wakeup.resources.handles[i] = element->reference.handle;
}
acpi_gpe_can_wake(device->wakeup.gpe_device, device->wakeup.gpe_number);
return AE_OK;
}
@ -764,8 +766,9 @@ static void acpi_bus_set_run_wake_flags(struct acpi_device *device)
return;
}
status = acpi_get_gpe_status(NULL, device->wakeup.gpe_number,
&event_status);
status = acpi_get_gpe_status(device->wakeup.gpe_device,
device->wakeup.gpe_number,
&event_status);
if (status == AE_OK)
device->wakeup.flags.run_wake =
!!(event_status & ACPI_EVENT_FLAG_HANDLE);

View file

@ -663,18 +663,9 @@ int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
return -ENODEV;
}
if (enable) {
error = acpi_enable_wakeup_device_power(adev,
acpi_target_sleep_state);
if (!error)
acpi_enable_gpe(adev->wakeup.gpe_device,
adev->wakeup.gpe_number,
ACPI_GPE_TYPE_WAKE);
} else {
acpi_disable_gpe(adev->wakeup.gpe_device, adev->wakeup.gpe_number,
ACPI_GPE_TYPE_WAKE);
error = acpi_disable_wakeup_device_power(adev);
}
error = enable ?
acpi_enable_wakeup_device_power(adev, acpi_target_sleep_state) :
acpi_disable_wakeup_device_power(adev);
if (!error)
dev_info(dev, "wake-up capability %s by ACPI\n",
enable ? "enabled" : "disabled");

View file

@ -388,12 +388,10 @@ static ssize_t counter_set(struct kobject *kobj,
if (index < num_gpes) {
if (!strcmp(buf, "disable\n") &&
(status & ACPI_EVENT_FLAG_ENABLED))
result = acpi_disable_gpe(handle, index,
ACPI_GPE_TYPE_RUNTIME);
result = acpi_disable_gpe(handle, index);
else if (!strcmp(buf, "enable\n") &&
!(status & ACPI_EVENT_FLAG_ENABLED))
result = acpi_enable_gpe(handle, index,
ACPI_GPE_TYPE_RUNTIME);
result = acpi_enable_gpe(handle, index);
else if (!strcmp(buf, "clear\n") &&
(status & ACPI_EVENT_FLAG_SET))
result = acpi_clear_gpe(handle, index);

View file

@ -64,13 +64,14 @@ void acpi_enable_wakeup_device(u8 sleep_state)
struct acpi_device *dev =
container_of(node, struct acpi_device, wakeup_list);
if (!dev->wakeup.flags.valid || !dev->wakeup.state.enabled
if (!dev->wakeup.flags.valid
|| !(dev->wakeup.state.enabled || dev->wakeup.prepare_count)
|| sleep_state > (u32) dev->wakeup.sleep_state)
continue;
/* The wake-up power should have been enabled already. */
acpi_enable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number,
ACPI_GPE_TYPE_WAKE);
acpi_gpe_wakeup(dev->wakeup.gpe_device, dev->wakeup.gpe_number,
ACPI_GPE_ENABLE);
}
}
@ -89,13 +90,16 @@ void acpi_disable_wakeup_device(u8 sleep_state)
struct acpi_device *dev =
container_of(node, struct acpi_device, wakeup_list);
if (!dev->wakeup.flags.valid || !dev->wakeup.state.enabled
if (!dev->wakeup.flags.valid
|| !(dev->wakeup.state.enabled || dev->wakeup.prepare_count)
|| (sleep_state > (u32) dev->wakeup.sleep_state))
continue;
acpi_disable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number,
ACPI_GPE_TYPE_WAKE);
acpi_disable_wakeup_device_power(dev);
acpi_gpe_wakeup(dev->wakeup.gpe_device, dev->wakeup.gpe_number,
ACPI_GPE_DISABLE);
if (dev->wakeup.state.enabled)
acpi_disable_wakeup_device_power(dev);
}
}

View file

@ -296,14 +296,12 @@ static int acpi_dev_run_wake(struct device *phys_dev, bool enable)
if (!dev->wakeup.run_wake_count++) {
acpi_enable_wakeup_device_power(dev, ACPI_STATE_S0);
acpi_enable_gpe(dev->wakeup.gpe_device,
dev->wakeup.gpe_number,
ACPI_GPE_TYPE_RUNTIME);
dev->wakeup.gpe_number);
}
} else if (dev->wakeup.run_wake_count > 0) {
if (!--dev->wakeup.run_wake_count) {
acpi_disable_gpe(dev->wakeup.gpe_device,
dev->wakeup.gpe_number,
ACPI_GPE_TYPE_RUNTIME);
dev->wakeup.gpe_number);
acpi_disable_wakeup_device_power(dev);
}
} else {

View file

@ -71,8 +71,9 @@
#define ACPI_TOOLS 0x00002000
#define ACPI_EXAMPLE 0x00004000
#define ACPI_DRIVER 0x00008000
#define DT_COMPILER 0x00010000
#define ACPI_ALL_COMPONENTS 0x0000FFFF
#define ACPI_ALL_COMPONENTS 0x0001FFFF
#define ACPI_COMPONENT_DEFAULT (ACPI_ALL_COMPONENTS)
/* Component IDs reserved for ACPI drivers */

View file

@ -47,7 +47,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
#define ACPI_CA_VERSION 0x20100428
#define ACPI_CA_VERSION 0x20100702
#include "actypes.h"
#include "actbl.h"
@ -63,7 +63,6 @@ extern u32 acpi_dbg_layer;
extern u8 acpi_gbl_enable_interpreter_slack;
extern u8 acpi_gbl_all_methods_serialized;
extern u8 acpi_gbl_create_osi_method;
extern u8 acpi_gbl_leave_wake_gpes_disabled;
extern u8 acpi_gbl_use_default_register_widths;
extern acpi_name acpi_gbl_trace_method_name;
extern u32 acpi_gbl_trace_flags;
@ -282,16 +281,16 @@ acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status);
/*
* GPE Interfaces
*/
acpi_status acpi_set_gpe(acpi_handle gpe_device, u32 gpe_number, u8 action);
acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number);
acpi_status
acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 gpe_type);
acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number);
acpi_status
acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 gpe_type);
acpi_status acpi_gpe_can_wake(acpi_handle gpe_device, u32 gpe_number);
acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number);
acpi_status acpi_gpe_wakeup(acpi_handle gpe_device, u32 gpe_number, u8 action);
acpi_status
acpi_get_gpe_status(acpi_handle gpe_device,
u32 gpe_number, acpi_event_status *event_status);

View file

@ -77,8 +77,18 @@
#define ACPI_SIG_UEFI "UEFI" /* Uefi Boot Optimization Table */
#define ACPI_SIG_WAET "WAET" /* Windows ACPI Emulated devices Table */
#define ACPI_SIG_WDAT "WDAT" /* Watchdog Action Table */
#define ACPI_SIG_WDDT "WDDT" /* Watchdog Timer Description Table */
#define ACPI_SIG_WDRT "WDRT" /* Watchdog Resource Table */
#ifdef ACPI_UNDEFINED_TABLES
/*
* These tables have been seen in the field, but no definition has been found
*/
#define ACPI_SIG_ATKG "ATKG"
#define ACPI_SIG_GSCI "GSCI" /* GMCH SCI table */
#define ACPI_SIG_IEIT "IEIT"
#endif
/*
* All tables must be byte-packed to match the ACPI specification, since
* the tables are provided by the system BIOS.
@ -907,6 +917,44 @@ enum acpi_wdat_instructions {
ACPI_WDAT_PRESERVE_REGISTER = 0x80 /* Except for this value */
};
/*******************************************************************************
*
* WDDT - Watchdog Descriptor Table
* Version 1
*
* Conforms to "Using the Intel ICH Family Watchdog Timer (WDT)",
* Version 001, September 2002
*
******************************************************************************/
struct acpi_table_wddt {
struct acpi_table_header header; /* Common ACPI table header */
u16 spec_version;
u16 table_version;
u16 pci_vendor_id;
struct acpi_generic_address address;
u16 max_count; /* Maximum counter value supported */
u16 min_count; /* Minimum counter value supported */
u16 period;
u16 status;
u16 capability;
};
/* Flags for Status field above */
#define ACPI_WDDT_AVAILABLE (1)
#define ACPI_WDDT_ACTIVE (1<<1)
#define ACPI_WDDT_TCO_OS_OWNED (1<<2)
#define ACPI_WDDT_USER_RESET (1<<11)
#define ACPI_WDDT_WDT_RESET (1<<12)
#define ACPI_WDDT_POWER_FAIL (1<<13)
#define ACPI_WDDT_UNKNOWN_RESET (1<<14)
/* Flags for Capability field above */
#define ACPI_WDDT_AUTO_RESET (1)
#define ACPI_WDDT_ALERT_SUPPORT (1<<1)
/*******************************************************************************
*
* WDRT - Watchdog Resource Table

View file

@ -663,18 +663,12 @@ typedef u32 acpi_event_status;
#define ACPI_GPE_MAX 0xFF
#define ACPI_NUM_GPE 256
/* Actions for acpi_set_gpe and acpi_hw_low_set_gpe */
/* Actions for acpi_gpe_wakeup, acpi_hw_low_set_gpe */
#define ACPI_GPE_ENABLE 0
#define ACPI_GPE_DISABLE 1
#define ACPI_GPE_COND_ENABLE 2
/* gpe_types for acpi_enable_gpe and acpi_disable_gpe */
#define ACPI_GPE_TYPE_WAKE (u8) 0x01
#define ACPI_GPE_TYPE_RUNTIME (u8) 0x02
#define ACPI_GPE_TYPE_WAKE_RUN (u8) 0x03
/*
* GPE info flags - Per GPE
* +-------+---+-+-+