summaryrefslogtreecommitdiffstats
path: root/drivers/acpi/dispatcher/dsmethod.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/acpi/dispatcher/dsmethod.c')
-rw-r--r--drivers/acpi/dispatcher/dsmethod.c518
1 files changed, 258 insertions, 260 deletions
diff --git a/drivers/acpi/dispatcher/dsmethod.c b/drivers/acpi/dispatcher/dsmethod.c
index c475546535b6..a39a33f4847a 100644
--- a/drivers/acpi/dispatcher/dsmethod.c
+++ b/drivers/acpi/dispatcher/dsmethod.c
@@ -52,6 +52,10 @@
#define _COMPONENT ACPI_DISPATCHER
ACPI_MODULE_NAME("dsmethod")
+/* Local prototypes */
+static acpi_status
+acpi_ds_create_method_mutex(union acpi_operand_object *method_desc);
+
/*******************************************************************************
*
* FUNCTION: acpi_ds_method_error
@@ -67,6 +71,7 @@ ACPI_MODULE_NAME("dsmethod")
* Note: Allows the exception handler to change the status code
*
******************************************************************************/
+
acpi_status
acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state)
{
@@ -81,6 +86,7 @@ acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state)
/* Invoke the global exception handler */
if (acpi_gbl_exception_handler) {
+
/* Exit the interpreter, allow handler to execute methods */
acpi_ex_exit_interpreter();
@@ -100,6 +106,7 @@ acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state)
}
#ifdef ACPI_DISASSEMBLER
if (ACPI_FAILURE(status)) {
+
/* Display method locals/args if disassembler is present */
acpi_dm_dump_method_info(status, walk_state, walk_state->op);
@@ -111,11 +118,51 @@ acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state)
/*******************************************************************************
*
+ * FUNCTION: acpi_ds_create_method_mutex
+ *
+ * PARAMETERS: obj_desc - The method object
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Create a mutex object for a serialized control method
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ds_create_method_mutex(union acpi_operand_object *method_desc)
+{
+ union acpi_operand_object *mutex_desc;
+ acpi_status status;
+
+ ACPI_FUNCTION_NAME(ds_create_method_mutex);
+
+ /* Create the new mutex object */
+
+ mutex_desc = acpi_ut_create_internal_object(ACPI_TYPE_MUTEX);
+ if (!mutex_desc) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ /* Create the actual OS Mutex */
+
+ status = acpi_os_create_mutex(&mutex_desc->mutex.os_mutex);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ mutex_desc->mutex.sync_level = method_desc->method.sync_level;
+ method_desc->method.mutex = mutex_desc;
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_ds_begin_method_execution
*
* PARAMETERS: method_node - Node of the method
* obj_desc - The method object
- * calling_method_node - Caller of this method (if non-null)
+ * walk_state - current state, NULL if not yet executing
+ * a method.
*
* RETURN: Status
*
@@ -126,13 +173,13 @@ acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state)
******************************************************************************/
acpi_status
-acpi_ds_begin_method_execution(struct acpi_namespace_node * method_node,
- union acpi_operand_object * obj_desc,
- struct acpi_namespace_node * calling_method_node)
+acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
+ union acpi_operand_object *obj_desc,
+ struct acpi_walk_state *walk_state)
{
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE_PTR("ds_begin_method_execution", method_node);
+ ACPI_FUNCTION_TRACE_PTR(ds_begin_method_execution, method_node);
if (!method_node) {
return_ACPI_STATUS(AE_NULL_ENTRY);
@@ -147,32 +194,80 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node * method_node,
}
/*
- * If there is a concurrency limit on this method, we need to
- * obtain a unit from the method semaphore.
+ * If this method is serialized, we need to acquire the method mutex.
*/
- if (obj_desc->method.semaphore) {
+ if (obj_desc->method.method_flags & AML_METHOD_SERIALIZED) {
/*
- * Allow recursive method calls, up to the reentrancy/concurrency
- * limit imposed by the SERIALIZED rule and the sync_level method
- * parameter.
- *
- * The point of this code is to avoid permanently blocking a
- * thread that is making recursive method calls.
+ * Create a mutex for the method if it is defined to be Serialized
+ * and a mutex has not already been created. We defer the mutex creation
+ * until a method is actually executed, to minimize the object count
*/
- if (method_node == calling_method_node) {
- if (obj_desc->method.thread_count >=
- obj_desc->method.concurrency) {
- return_ACPI_STATUS(AE_AML_METHOD_LIMIT);
+ if (!obj_desc->method.mutex) {
+ status = acpi_ds_create_method_mutex(obj_desc);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
}
}
/*
- * Get a unit from the method semaphore. This releases the
- * interpreter if we block
+ * The current_sync_level (per-thread) must be less than or equal to
+ * the sync level of the method. This mechanism provides some
+ * deadlock prevention
+ *
+ * Top-level method invocation has no walk state at this point
*/
- status =
- acpi_ex_system_wait_semaphore(obj_desc->method.semaphore,
- ACPI_WAIT_FOREVER);
+ if (walk_state &&
+ (walk_state->thread->current_sync_level >
+ obj_desc->method.mutex->mutex.sync_level)) {
+ ACPI_ERROR((AE_INFO,
+ "Cannot acquire Mutex for method [%4.4s], current SyncLevel is too large (%d)",
+ acpi_ut_get_node_name(method_node),
+ walk_state->thread->current_sync_level));
+
+ return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
+ }
+
+ /*
+ * Obtain the method mutex if necessary. Do not acquire mutex for a
+ * recursive call.
+ */
+ if (!walk_state ||
+ !obj_desc->method.mutex->mutex.owner_thread ||
+ (walk_state->thread !=
+ obj_desc->method.mutex->mutex.owner_thread)) {
+ /*
+ * Acquire the method mutex. This releases the interpreter if we
+ * block (and reacquires it before it returns)
+ */
+ status =
+ acpi_ex_system_wait_mutex(obj_desc->method.mutex->
+ mutex.os_mutex,
+ ACPI_WAIT_FOREVER);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Update the mutex and walk info and save the original sync_level */
+
+ if (walk_state) {
+ obj_desc->method.mutex->mutex.
+ original_sync_level =
+ walk_state->thread->current_sync_level;
+
+ obj_desc->method.mutex->mutex.owner_thread =
+ walk_state->thread;
+ walk_state->thread->current_sync_level =
+ obj_desc->method.sync_level;
+ } else {
+ obj_desc->method.mutex->mutex.
+ original_sync_level =
+ obj_desc->method.mutex->mutex.sync_level;
+ }
+ }
+
+ /* Always increase acquisition depth */
+
+ obj_desc->method.mutex->mutex.acquisition_depth++;
}
/*
@@ -183,7 +278,7 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node * method_node,
if (!obj_desc->method.owner_id) {
status = acpi_ut_allocate_owner_id(&obj_desc->method.owner_id);
if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
+ goto cleanup;
}
}
@@ -193,6 +288,14 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node * method_node,
*/
obj_desc->method.thread_count++;
return_ACPI_STATUS(status);
+
+ cleanup:
+ /* On error, must release the method mutex (if present) */
+
+ if (obj_desc->method.mutex) {
+ acpi_os_release_mutex(obj_desc->method.mutex->mutex.os_mutex);
+ }
+ return_ACPI_STATUS(status);
}
/*******************************************************************************
@@ -218,10 +321,10 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
struct acpi_namespace_node *method_node;
struct acpi_walk_state *next_walk_state = NULL;
union acpi_operand_object *obj_desc;
- struct acpi_parameter_info info;
+ struct acpi_evaluate_info *info;
u32 i;
- ACPI_FUNCTION_TRACE_PTR("ds_call_control_method", this_walk_state);
+ ACPI_FUNCTION_TRACE_PTR(ds_call_control_method, this_walk_state);
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"Execute method %p, currentstate=%p\n",
@@ -240,25 +343,31 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
return_ACPI_STATUS(AE_NULL_OBJECT);
}
- /* Init for new method, wait on concurrency semaphore */
+ /* Init for new method, possibly wait on method mutex */
status = acpi_ds_begin_method_execution(method_node, obj_desc,
- this_walk_state->method_node);
+ this_walk_state);
if (ACPI_FAILURE(status)) {
- goto cleanup;
+ return_ACPI_STATUS(status);
}
+ /*
+ * 1) Parse the method. All "normal" methods are parsed for each execution.
+ * Internal methods (_OSI, etc.) do not require parsing.
+ */
if (!(obj_desc->method.method_flags & AML_METHOD_INTERNAL_ONLY)) {
- /* 1) Parse: Create a new walk state for the preempting walk */
+
+ /* Create a new walk state for the parse */
next_walk_state =
acpi_ds_create_walk_state(obj_desc->method.owner_id, op,
obj_desc, NULL);
if (!next_walk_state) {
- return_ACPI_STATUS(AE_NO_MEMORY);
+ status = AE_NO_MEMORY;
+ goto cleanup;
}
- /* Create and init a Root Node */
+ /* Create and init a parse tree root */
op = acpi_ps_create_scope_op();
if (!op) {
@@ -271,17 +380,20 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
obj_desc->method.aml_length,
NULL, 1);
if (ACPI_FAILURE(status)) {
- acpi_ds_delete_walk_state(next_walk_state);
+ acpi_ps_delete_parse_tree(op);
goto cleanup;
}
- /* Begin AML parse */
+ /* Begin AML parse (deletes next_walk_state) */
status = acpi_ps_parse_aml(next_walk_state);
acpi_ps_delete_parse_tree(op);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
}
- /* 2) Execute: Create a new state for the preempting walk */
+ /* 2) Begin method execution. Create a new walk state */
next_walk_state = acpi_ds_create_walk_state(obj_desc->method.owner_id,
NULL, obj_desc, thread);
@@ -289,6 +401,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
status = AE_NO_MEMORY;
goto cleanup;
}
+
/*
* The resolved arguments were put on the previous walk state's operand
* stack. Operands on the previous walk state stack always
@@ -296,12 +409,24 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
*/
this_walk_state->operands[this_walk_state->num_operands] = NULL;
- info.parameters = &this_walk_state->operands[0];
- info.parameter_type = ACPI_PARAM_ARGS;
+ /*
+ * Allocate and initialize the evaluation information block
+ * TBD: this is somewhat inefficient, should change interface to
+ * ds_init_aml_walk. For now, keeps this struct off the CPU stack
+ */
+ info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
+ if (!info) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ info->parameters = &this_walk_state->operands[0];
+ info->parameter_type = ACPI_PARAM_ARGS;
status = acpi_ds_init_aml_walk(next_walk_state, NULL, method_node,
obj_desc->method.aml_start,
- obj_desc->method.aml_length, &info, 3);
+ obj_desc->method.aml_length, info, 3);
+
+ ACPI_FREE(info);
if (ACPI_FAILURE(status)) {
goto cleanup;
}
@@ -323,6 +448,8 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
"Starting nested execution, newstate=%p\n",
next_walk_state));
+ /* Invoke an internal method if necessary */
+
if (obj_desc->method.method_flags & AML_METHOD_INTERNAL_ONLY) {
status = obj_desc->method.implementation(next_walk_state);
}
@@ -330,16 +457,14 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
return_ACPI_STATUS(status);
cleanup:
- /* Decrement the thread count on the method parse tree */
- if (next_walk_state && (next_walk_state->method_desc)) {
- next_walk_state->method_desc->method.thread_count--;
- }
+ /* On error, we must terminate the method properly */
- /* On error, we must delete the new walk state */
+ acpi_ds_terminate_control_method(obj_desc, next_walk_state);
+ if (next_walk_state) {
+ acpi_ds_delete_walk_state(next_walk_state);
+ }
- acpi_ds_terminate_control_method(next_walk_state);
- acpi_ds_delete_walk_state(next_walk_state);
return_ACPI_STATUS(status);
}
@@ -362,25 +487,33 @@ acpi_ds_restart_control_method(struct acpi_walk_state *walk_state,
union acpi_operand_object *return_desc)
{
acpi_status status;
+ int same_as_implicit_return;
- ACPI_FUNCTION_TRACE_PTR("ds_restart_control_method", walk_state);
+ ACPI_FUNCTION_TRACE_PTR(ds_restart_control_method, walk_state);
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
- "****Restart [%4.4s] Op %p return_value_from_callee %p\n",
+ "****Restart [%4.4s] Op %p ReturnValueFromCallee %p\n",
(char *)&walk_state->method_node->name,
walk_state->method_call_op, return_desc));
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
- " return_from_this_method_used?=%X res_stack %p Walk %p\n",
+ " ReturnFromThisMethodUsed?=%X ResStack %p Walk %p\n",
walk_state->return_used,
walk_state->results, walk_state));
/* Did the called method return a value? */
if (return_desc) {
+
+ /* Is the implicit return object the same as the return desc? */
+
+ same_as_implicit_return =
+ (walk_state->implicit_return_obj == return_desc);
+
/* Are we actually going to use the return value? */
if (walk_state->return_used) {
+
/* Save the return value from the previous method */
status = acpi_ds_result_push(return_desc, walk_state);
@@ -397,18 +530,23 @@ acpi_ds_restart_control_method(struct acpi_walk_state *walk_state,
}
/*
- * The following code is the
- * optional support for a so-called "implicit return". Some AML code
- * assumes that the last value of the method is "implicitly" returned
- * to the caller. Just save the last result as the return value.
+ * The following code is the optional support for the so-called
+ * "implicit return". Some AML code assumes that the last value of the
+ * method is "implicitly" returned to the caller, in the absence of an
+ * explicit return value.
+ *
+ * Just save the last result of the method as the return value.
+ *
* NOTE: this is optional because the ASL language does not actually
* support this behavior.
*/
else if (!acpi_ds_do_implicit_return
- (return_desc, walk_state, FALSE)) {
+ (return_desc, walk_state, FALSE)
+ || same_as_implicit_return) {
/*
* Delete the return value if it will not be used by the
- * calling method
+ * calling method or remove one reference if the explicit return
+ * is the same as the implicit return value.
*/
acpi_ut_remove_reference(return_desc);
}
@@ -421,7 +559,8 @@ acpi_ds_restart_control_method(struct acpi_walk_state *walk_state,
*
* FUNCTION: acpi_ds_terminate_control_method
*
- * PARAMETERS: walk_state - State of the method
+ * PARAMETERS: method_desc - Method object
+ * walk_state - State associated with the method
*
* RETURN: None
*
@@ -429,262 +568,121 @@ acpi_ds_restart_control_method(struct acpi_walk_state *walk_state,
* created, delete all locals and arguments, and delete the parse
* tree if requested.
*
+ * MUTEX: Interpreter is locked
+ *
******************************************************************************/
-void acpi_ds_terminate_control_method(struct acpi_walk_state *walk_state)
+void
+acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
+ struct acpi_walk_state *walk_state)
{
- union acpi_operand_object *obj_desc;
struct acpi_namespace_node *method_node;
acpi_status status;
- ACPI_FUNCTION_TRACE_PTR("ds_terminate_control_method", walk_state);
-
- if (!walk_state) {
- return_VOID;
- }
+ ACPI_FUNCTION_TRACE_PTR(ds_terminate_control_method, walk_state);
- /* The current method object was saved in the walk state */
+ /* method_desc is required, walk_state is optional */
- obj_desc = walk_state->method_desc;
- if (!obj_desc) {
+ if (!method_desc) {
return_VOID;
}
- /* Delete all arguments and locals */
+ if (walk_state) {
- acpi_ds_method_data_delete_all(walk_state);
+ /* Delete all arguments and locals */
+
+ acpi_ds_method_data_delete_all(walk_state);
+ }
/*
- * Lock the parser while we terminate this method.
- * If this is the last thread executing the method,
- * we have additional cleanup to perform
+ * If method is serialized, release the mutex and restore the
+ * current sync level for this thread
*/
- status = acpi_ut_acquire_mutex(ACPI_MTX_PARSER);
- if (ACPI_FAILURE(status)) {
- return_VOID;
- }
+ if (method_desc->method.mutex) {
- /* Signal completion of the execution of this method if necessary */
+ /* Acquisition Depth handles recursive calls */
- if (walk_state->method_desc->method.semaphore) {
- status =
- acpi_os_signal_semaphore(walk_state->method_desc->method.
- semaphore, 1);
- if (ACPI_FAILURE(status)) {
- ACPI_ERROR((AE_INFO,
- "Could not signal method semaphore"));
+ method_desc->method.mutex->mutex.acquisition_depth--;
+ if (!method_desc->method.mutex->mutex.acquisition_depth) {
+ walk_state->thread->current_sync_level =
+ method_desc->method.mutex->mutex.
+ original_sync_level;
- /* Ignore error and continue cleanup */
+ acpi_os_release_mutex(method_desc->method.mutex->mutex.
+ os_mutex);
}
}
- /*
- * There are no more threads executing this method. Perform
- * additional cleanup.
- *
- * The method Node is stored in the walk state
- */
- method_node = walk_state->method_node;
+ if (walk_state) {
+ /*
+ * Delete any objects created by this method during execution.
+ * The method Node is stored in the walk state
+ */
+ method_node = walk_state->method_node;
- /* Lock namespace for possible update */
+ /* Lock namespace for possible update */
- status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
- if (ACPI_FAILURE(status)) {
- goto exit;
- }
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return_VOID;
+ }
- /*
- * Delete any namespace entries created immediately underneath
- * the method
- */
- if (method_node->child) {
- acpi_ns_delete_namespace_subtree(method_node);
+ /*
+ * Delete any namespace entries created immediately underneath
+ * the method
+ */
+ if (method_node && method_node->child) {
+ acpi_ns_delete_namespace_subtree(method_node);
+ }
+
+ /*
+ * Delete any namespace entries created anywhere else within
+ * the namespace by the execution of this method
+ */
+ acpi_ns_delete_namespace_by_owner(method_desc->method.owner_id);
+ status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
}
- /*
- * Delete any namespace entries created anywhere else within
- * the namespace by the execution of this method
- */
- acpi_ns_delete_namespace_by_owner(walk_state->method_desc->method.
- owner_id);
- status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ /* Decrement the thread count on the method */
+
+ if (method_desc->method.thread_count) {
+ method_desc->method.thread_count--;
+ } else {
+ ACPI_ERROR((AE_INFO, "Invalid zero thread count in method"));
+ }
/* Are there any other threads currently executing this method? */
- if (walk_state->method_desc->method.thread_count) {
+ if (method_desc->method.thread_count) {
/*
* Additional threads. Do not release the owner_id in this case,
* we immediately reuse it for the next thread executing this method
*/
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"*** Completed execution of one thread, %d threads remaining\n",
- walk_state->method_desc->method.
- thread_count));
+ method_desc->method.thread_count));
} else {
/* This is the only executing thread for this method */
/*
* Support to dynamically change a method from not_serialized to
* Serialized if it appears that the method is incorrectly written and
- * does not support multiple thread execution. The best example of this
- * is if such a method creates namespace objects and blocks. A second
+ * does not support multiple thread execution. The best example of this
+ * is if such a method creates namespace objects and blocks. A second
* thread will fail with an AE_ALREADY_EXISTS exception
*
* This code is here because we must wait until the last thread exits
* before creating the synchronization semaphore.
*/
- if ((walk_state->method_desc->method.concurrency == 1) &&
- (!walk_state->method_desc->method.semaphore)) {
- status = acpi_os_create_semaphore(1, 1,
- &walk_state->
- method_desc->method.
- semaphore);
+ if ((method_desc->method.method_flags & AML_METHOD_SERIALIZED)
+ && (!method_desc->method.mutex)) {
+ status = acpi_ds_create_method_mutex(method_desc);
}
/* No more threads, we can free the owner_id */
- acpi_ut_release_owner_id(&walk_state->method_desc->method.
- owner_id);
+ acpi_ut_release_owner_id(&method_desc->method.owner_id);
}
- exit:
- (void)acpi_ut_release_mutex(ACPI_MTX_PARSER);
return_VOID;
}
-
-#ifdef ACPI_INIT_PARSE_METHODS
- /*
- * Note 11/2005: Removed this code to parse all methods during table
- * load because it causes problems if there are any errors during the
- * parse. Also, it seems like overkill and we probably don't want to
- * abort a table load because of an issue with a single method.
- */
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ds_parse_method
- *
- * PARAMETERS: Node - Method node
- *
- * RETURN: Status
- *
- * DESCRIPTION: Parse the AML that is associated with the method.
- *
- * MUTEX: Assumes parser is locked
- *
- ******************************************************************************/
-
-acpi_status acpi_ds_parse_method(struct acpi_namespace_node *node)
-{
- acpi_status status;
- union acpi_operand_object *obj_desc;
- union acpi_parse_object *op;
- struct acpi_walk_state *walk_state;
-
- ACPI_FUNCTION_TRACE_PTR("ds_parse_method", node);
-
- /* Parameter Validation */
-
- if (!node) {
- return_ACPI_STATUS(AE_NULL_ENTRY);
- }
-
- ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
- "**** Parsing [%4.4s] **** named_obj=%p\n",
- acpi_ut_get_node_name(node), node));
-
- /* Extract the method object from the method Node */
-
- obj_desc = acpi_ns_get_attached_object(node);
- if (!obj_desc) {
- return_ACPI_STATUS(AE_NULL_OBJECT);
- }
-
- /* Create a mutex for the method if there is a concurrency limit */
-
- if ((obj_desc->method.concurrency != ACPI_INFINITE_CONCURRENCY) &&
- (!obj_desc->method.semaphore)) {
- status = acpi_os_create_semaphore(obj_desc->method.concurrency,
- obj_desc->method.concurrency,
- &obj_desc->method.semaphore);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
- }
-
- /*
- * Allocate a new parser op to be the root of the parsed
- * method tree
- */
- op = acpi_ps_alloc_op(AML_METHOD_OP);
- if (!op) {
- return_ACPI_STATUS(AE_NO_MEMORY);
- }
-
- /* Init new op with the method name and pointer back to the Node */
-
- acpi_ps_set_name(op, node->name.integer);
- op->common.node = node;
-
- /*
- * Get a new owner_id for objects created by this method. Namespace
- * objects (such as Operation Regions) can be created during the
- * first pass parse.
- */
- status = acpi_ut_allocate_owner_id(&obj_desc->method.owner_id);
- if (ACPI_FAILURE(status)) {
- goto cleanup;
- }
-
- /* Create and initialize a new walk state */
-
- walk_state =
- acpi_ds_create_walk_state(obj_desc->method.owner_id, NULL, NULL,
- NULL);
- if (!walk_state) {
- status = AE_NO_MEMORY;
- goto cleanup2;
- }
-
- status = acpi_ds_init_aml_walk(walk_state, op, node,
- obj_desc->method.aml_start,
- obj_desc->method.aml_length, NULL, 1);
- if (ACPI_FAILURE(status)) {
- acpi_ds_delete_walk_state(walk_state);
- goto cleanup2;
- }
-
- /*
- * Parse the method, first pass
- *
- * The first pass load is where newly declared named objects are added into
- * the namespace. Actual evaluation of the named objects (what would be
- * called a "second pass") happens during the actual execution of the
- * method so that operands to the named objects can take on dynamic
- * run-time values.
- */
- status = acpi_ps_parse_aml(walk_state);
- if (ACPI_FAILURE(status)) {
- goto cleanup2;
- }
-
- ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
- "**** [%4.4s] Parsed **** named_obj=%p Op=%p\n",
- acpi_ut_get_node_name(node), node, op));
-
- /*
- * Delete the parse tree. We simply re-parse the method for every
- * execution since there isn't much overhead (compared to keeping lots
- * of parse trees around)
- */
- acpi_ns_delete_namespace_subtree(node);
- acpi_ns_delete_namespace_by_owner(obj_desc->method.owner_id);
-
- cleanup2:
- acpi_ut_release_owner_id(&obj_desc->method.owner_id);
-
- cleanup:
- acpi_ps_delete_parse_tree(op);
- return_ACPI_STATUS(status);
-}
-#endif