From 4d2acd9ea539e0f59178b126f6750ccc41eefcdd Mon Sep 17 00:00:00 2001 From: Len Brown Date: Wed, 9 May 2007 22:56:38 -0400 Subject: Revert "ACPICA: revert "acpi_serialize" changes" This reverts commit a8f4af6dc6600980885c594f52eecd60edd62013. Thus restoring ACPICA's new acpi_serialize code. This commit by itself may cause a regression, but it is reverted in this order so that subsequent reverts reverts under this one can be made without conflict. Signed-off-by: Len Brown diff --git a/drivers/acpi/events/evmisc.c b/drivers/acpi/events/evmisc.c index cae786c..3a799b9 100644 --- a/drivers/acpi/events/evmisc.c +++ b/drivers/acpi/events/evmisc.c @@ -196,15 +196,11 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node * node, notify_info->notify.value = (u16) notify_value; notify_info->notify.handler_obj = handler_obj; - acpi_ex_exit_interpreter(); + acpi_ex_relinquish_interpreter(); acpi_ev_notify_dispatch(notify_info); - status = acpi_ex_enter_interpreter(); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - + acpi_ex_reacquire_interpreter(); } if (!handler_obj) { diff --git a/drivers/acpi/events/evregion.c b/drivers/acpi/events/evregion.c index 96b0e84..e99f0c4 100644 --- a/drivers/acpi/events/evregion.c +++ b/drivers/acpi/events/evregion.c @@ -291,7 +291,6 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, u32 bit_width, acpi_integer * value) { acpi_status status; - acpi_status status2; acpi_adr_space_handler handler; acpi_adr_space_setup region_setup; union acpi_operand_object *handler_desc; @@ -345,7 +344,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, * setup will potentially execute control methods * (e.g., _REG method for this region) */ - acpi_ex_exit_interpreter(); + acpi_ex_relinquish_interpreter(); status = region_setup(region_obj, ACPI_REGION_ACTIVATE, handler_desc->address_space.context, @@ -353,10 +352,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, /* Re-enter the interpreter */ - status2 = acpi_ex_enter_interpreter(); - if (ACPI_FAILURE(status2)) { - return_ACPI_STATUS(status2); - } + acpi_ex_reacquire_interpreter(); /* Check for failure of the Region Setup */ @@ -409,7 +405,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, * exit the interpreter because the handler *might* block -- we don't * know what it will do, so we can't hold the lock on the intepreter. */ - acpi_ex_exit_interpreter(); + acpi_ex_relinquish_interpreter(); } /* Call the handler */ @@ -430,10 +426,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, * We just returned from a non-default handler, we must re-enter the * interpreter */ - status2 = acpi_ex_enter_interpreter(); - if (ACPI_FAILURE(status2)) { - return_ACPI_STATUS(status2); - } + acpi_ex_reacquire_interpreter(); } return_ACPI_STATUS(status); diff --git a/drivers/acpi/events/evxface.c b/drivers/acpi/events/evxface.c index a3379ba..685a103 100644 --- a/drivers/acpi/events/evxface.c +++ b/drivers/acpi/events/evxface.c @@ -768,11 +768,9 @@ acpi_status acpi_acquire_global_lock(u16 timeout, u32 * handle) return (AE_BAD_PARAMETER); } - status = acpi_ex_enter_interpreter(); - if (ACPI_FAILURE(status)) { - return (status); - } + /* Must lock interpreter to prevent race conditions */ + acpi_ex_enter_interpreter(); status = acpi_ev_acquire_global_lock(timeout); acpi_ex_exit_interpreter(); diff --git a/drivers/acpi/executer/excreate.c b/drivers/acpi/executer/excreate.c index ae97812..7c38528 100644 --- a/drivers/acpi/executer/excreate.c +++ b/drivers/acpi/executer/excreate.c @@ -583,10 +583,7 @@ acpi_ex_create_method(u8 * aml_start, * Get the sync_level. If method is serialized, a mutex will be * created for this method when it is parsed. */ - if (acpi_gbl_all_methods_serialized) { - obj_desc->method.sync_level = 0; - obj_desc->method.method_flags |= AML_METHOD_SERIALIZED; - } else if (method_flags & AML_METHOD_SERIALIZED) { + if (method_flags & AML_METHOD_SERIALIZED) { /* * ACPI 1.0: sync_level = 0 * ACPI 2.0: sync_level = sync_level in method declaration diff --git a/drivers/acpi/executer/exsystem.c b/drivers/acpi/executer/exsystem.c index b2edf62..9460baf 100644 --- a/drivers/acpi/executer/exsystem.c +++ b/drivers/acpi/executer/exsystem.c @@ -66,7 +66,6 @@ ACPI_MODULE_NAME("exsystem") acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout) { acpi_status status; - acpi_status status2; ACPI_FUNCTION_TRACE(ex_system_wait_semaphore); @@ -79,7 +78,7 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout) /* We must wait, so unlock the interpreter */ - acpi_ex_exit_interpreter(); + acpi_ex_relinquish_interpreter(); status = acpi_os_wait_semaphore(semaphore, 1, timeout); @@ -89,13 +88,7 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout) /* Reacquire the interpreter */ - status2 = acpi_ex_enter_interpreter(); - if (ACPI_FAILURE(status2)) { - - /* Report fatal error, could not acquire interpreter */ - - return_ACPI_STATUS(status2); - } + acpi_ex_reacquire_interpreter(); } return_ACPI_STATUS(status); @@ -119,7 +112,6 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout) acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout) { acpi_status status; - acpi_status status2; ACPI_FUNCTION_TRACE(ex_system_wait_mutex); @@ -132,7 +124,7 @@ acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout) /* We must wait, so unlock the interpreter */ - acpi_ex_exit_interpreter(); + acpi_ex_relinquish_interpreter(); status = acpi_os_acquire_mutex(mutex, timeout); @@ -142,13 +134,7 @@ acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout) /* Reacquire the interpreter */ - status2 = acpi_ex_enter_interpreter(); - if (ACPI_FAILURE(status2)) { - - /* Report fatal error, could not acquire interpreter */ - - return_ACPI_STATUS(status2); - } + acpi_ex_reacquire_interpreter(); } return_ACPI_STATUS(status); @@ -209,20 +195,18 @@ acpi_status acpi_ex_system_do_stall(u32 how_long) acpi_status acpi_ex_system_do_suspend(acpi_integer how_long) { - acpi_status status; - ACPI_FUNCTION_ENTRY(); /* Since this thread will sleep, we must release the interpreter */ - acpi_ex_exit_interpreter(); + acpi_ex_relinquish_interpreter(); acpi_os_sleep(how_long); /* And now we must get the interpreter again */ - status = acpi_ex_enter_interpreter(); - return (status); + acpi_ex_reacquire_interpreter(); + return (AE_OK); } /******************************************************************************* diff --git a/drivers/acpi/executer/exutils.c b/drivers/acpi/executer/exutils.c index aea461f..6b0aecc 100644 --- a/drivers/acpi/executer/exutils.c +++ b/drivers/acpi/executer/exutils.c @@ -76,14 +76,15 @@ static u32 acpi_ex_digits_needed(acpi_integer value, u32 base); * * PARAMETERS: None * - * RETURN: Status + * RETURN: None * - * DESCRIPTION: Enter the interpreter execution region. Failure to enter - * the interpreter region is a fatal system error + * DESCRIPTION: Enter the interpreter execution region. Failure to enter + * the interpreter region is a fatal system error. Used in + * conjunction with exit_interpreter. * ******************************************************************************/ -acpi_status acpi_ex_enter_interpreter(void) +void acpi_ex_enter_interpreter(void) { acpi_status status; @@ -91,31 +92,55 @@ acpi_status acpi_ex_enter_interpreter(void) status = acpi_ut_acquire_mutex(ACPI_MTX_INTERPRETER); if (ACPI_FAILURE(status)) { - ACPI_ERROR((AE_INFO, "Could not acquire interpreter mutex")); + ACPI_ERROR((AE_INFO, + "Could not acquire AML Interpreter mutex")); } - return_ACPI_STATUS(status); + return_VOID; } /******************************************************************************* * - * FUNCTION: acpi_ex_exit_interpreter + * FUNCTION: acpi_ex_reacquire_interpreter * * PARAMETERS: None * * RETURN: None * - * DESCRIPTION: Exit the interpreter execution region + * DESCRIPTION: Reacquire the interpreter execution region from within the + * interpreter code. Failure to enter the interpreter region is a + * fatal system error. Used in conjuction with + * relinquish_interpreter + * + ******************************************************************************/ + +void acpi_ex_reacquire_interpreter(void) +{ + ACPI_FUNCTION_TRACE(ex_reacquire_interpreter); + + /* + * If the global serialized flag is set, do not release the interpreter, + * since it was not actually released by acpi_ex_relinquish_interpreter. + * This forces the interpreter to be single threaded. + */ + if (!acpi_gbl_all_methods_serialized) { + acpi_ex_enter_interpreter(); + } + + return_VOID; +} + +/******************************************************************************* + * + * FUNCTION: acpi_ex_exit_interpreter + * + * PARAMETERS: None + * + * RETURN: None * - * Cases where the interpreter is unlocked: - * 1) Completion of the execution of a control method - * 2) Method blocked on a Sleep() AML opcode - * 3) Method blocked on an Acquire() AML opcode - * 4) Method blocked on a Wait() AML opcode - * 5) Method blocked to acquire the global lock - * 6) Method blocked to execute a serialized control method that is - * already executing - * 7) About to invoke a user-installed opregion handler + * DESCRIPTION: Exit the interpreter execution region. This is the top level + * routine used to exit the interpreter when all processing has + * been completed. * ******************************************************************************/ @@ -127,7 +152,46 @@ void acpi_ex_exit_interpreter(void) status = acpi_ut_release_mutex(ACPI_MTX_INTERPRETER); if (ACPI_FAILURE(status)) { - ACPI_ERROR((AE_INFO, "Could not release interpreter mutex")); + ACPI_ERROR((AE_INFO, + "Could not release AML Interpreter mutex")); + } + + return_VOID; +} + +/******************************************************************************* + * + * FUNCTION: acpi_ex_relinquish_interpreter + * + * PARAMETERS: None + * + * RETURN: None + * + * DESCRIPTION: Exit the interpreter execution region, from within the + * interpreter - before attempting an operation that will possibly + * block the running thread. + * + * Cases where the interpreter is unlocked internally + * 1) Method to be blocked on a Sleep() AML opcode + * 2) Method to be blocked on an Acquire() AML opcode + * 3) Method to be blocked on a Wait() AML opcode + * 4) Method to be blocked to acquire the global lock + * 5) Method to be blocked waiting to execute a serialized control method + * that is currently executing + * 6) About to invoke a user-installed opregion handler + * + ******************************************************************************/ + +void acpi_ex_relinquish_interpreter(void) +{ + ACPI_FUNCTION_TRACE(ex_relinquish_interpreter); + + /* + * If the global serialized flag is set, do not release the interpreter. + * This forces the interpreter to be single threaded. + */ + if (!acpi_gbl_all_methods_serialized) { + acpi_ex_exit_interpreter(); } return_VOID; @@ -141,8 +205,8 @@ void acpi_ex_exit_interpreter(void) * * RETURN: none * - * DESCRIPTION: Truncate a number to 32-bits if the currently executing method - * belongs to a 32-bit ACPI table. + * DESCRIPTION: Truncate an ACPI Integer to 32 bits if the execution mode is + * 32-bit, as determined by the revision of the DSDT. * ******************************************************************************/ diff --git a/drivers/acpi/namespace/nseval.c b/drivers/acpi/namespace/nseval.c index 26fd0dd..aa6370c 100644 --- a/drivers/acpi/namespace/nseval.c +++ b/drivers/acpi/namespace/nseval.c @@ -154,11 +154,7 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info) * Execute the method via the interpreter. The interpreter is locked * here before calling into the AML parser */ - status = acpi_ex_enter_interpreter(); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - + acpi_ex_enter_interpreter(); status = acpi_ps_execute_method(info); acpi_ex_exit_interpreter(); } else { @@ -182,10 +178,7 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info) * resolution, we must lock it because we could access an opregion. * The opregion access code assumes that the interpreter is locked. */ - status = acpi_ex_enter_interpreter(); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } + acpi_ex_enter_interpreter(); /* Function has a strange interface */ diff --git a/drivers/acpi/namespace/nsinit.c b/drivers/acpi/namespace/nsinit.c index c4ab615..33db224 100644 --- a/drivers/acpi/namespace/nsinit.c +++ b/drivers/acpi/namespace/nsinit.c @@ -214,7 +214,7 @@ acpi_ns_init_one_object(acpi_handle obj_handle, u32 level, void *context, void **return_value) { acpi_object_type type; - acpi_status status; + acpi_status status = AE_OK; struct acpi_init_walk_info *info = (struct acpi_init_walk_info *)context; struct acpi_namespace_node *node = @@ -268,10 +268,7 @@ acpi_ns_init_one_object(acpi_handle obj_handle, /* * Must lock the interpreter before executing AML code */ - status = acpi_ex_enter_interpreter(); - if (ACPI_FAILURE(status)) { - return (status); - } + acpi_ex_enter_interpreter(); /* * Each of these types can contain executable AML code within the diff --git a/drivers/acpi/namespace/nsxfeval.c b/drivers/acpi/namespace/nsxfeval.c index 8904d0f..7ac6ace 100644 --- a/drivers/acpi/namespace/nsxfeval.c +++ b/drivers/acpi/namespace/nsxfeval.c @@ -170,7 +170,6 @@ acpi_evaluate_object(acpi_handle handle, struct acpi_buffer *return_buffer) { acpi_status status; - acpi_status status2; struct acpi_evaluate_info *info; acpi_size buffer_space_needed; u32 i; @@ -329,14 +328,12 @@ acpi_evaluate_object(acpi_handle handle, * Delete the internal return object. NOTE: Interpreter must be * locked to avoid race condition. */ - status2 = acpi_ex_enter_interpreter(); - if (ACPI_SUCCESS(status2)) { + acpi_ex_enter_interpreter(); - /* Remove one reference on the return object (should delete it) */ + /* Remove one reference on the return object (should delete it) */ - acpi_ut_remove_reference(info->return_object); - acpi_ex_exit_interpreter(); - } + acpi_ut_remove_reference(info->return_object); + acpi_ex_exit_interpreter(); } cleanup: diff --git a/include/acpi/acinterp.h b/include/acpi/acinterp.h index 4409830..73967c8 100644 --- a/include/acpi/acinterp.h +++ b/include/acpi/acinterp.h @@ -446,10 +446,14 @@ acpi_ex_copy_integer_to_buffer_field(union acpi_operand_object *source_desc, /* * exutils - interpreter/scanner utilities */ -acpi_status acpi_ex_enter_interpreter(void); +void acpi_ex_enter_interpreter(void); void acpi_ex_exit_interpreter(void); +void acpi_ex_reacquire_interpreter(void); + +void acpi_ex_relinquish_interpreter(void); + void acpi_ex_truncate_for32bit_table(union acpi_operand_object *obj_desc); u8 acpi_ex_acquire_global_lock(u32 rule); -- cgit v0.10.2 From 40d07080e585396dc58bc64befa1de0695318b3b Mon Sep 17 00:00:00 2001 From: Len Brown Date: Wed, 9 May 2007 22:59:38 -0400 Subject: Revert "Execute AML Notify() requests on stack." This reverts commit 5f7748cf91558a5026ded5be93c5bf6c1ac34edf. While that change fixed the HP http://bugzilla.kernel.org/show_bug.cgi?id=5534 it broke the ACER: http://bugzilla.kernel.org/show_bug.cgi?id=8385 which as AML that caused Linux go recursive and stack fault. So this commit by itself will restore the ACER and again break the HP, which we'll fix another way. Signed-off-by: Len Brown diff --git a/drivers/acpi/events/evmisc.c b/drivers/acpi/events/evmisc.c index 3a799b9..ad447b1 100644 --- a/drivers/acpi/events/evmisc.c +++ b/drivers/acpi/events/evmisc.c @@ -196,11 +196,12 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node * node, notify_info->notify.value = (u16) notify_value; notify_info->notify.handler_obj = handler_obj; - acpi_ex_relinquish_interpreter(); - - acpi_ev_notify_dispatch(notify_info); - - acpi_ex_reacquire_interpreter(); + status = + acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_notify_dispatch, + notify_info); + if (ACPI_FAILURE(status)) { + acpi_ut_delete_generic_state(notify_info); + } } if (!handler_obj) { -- cgit v0.10.2 From 262a7a28de060f3a63cae20035876d6f22fd7670 Mon Sep 17 00:00:00 2001 From: Len Brown Date: Wed, 9 May 2007 23:01:59 -0400 Subject: Revert "ACPICA: fix AML mutex re-entrancy" This reverts commit c0d127b56937c3e72c2b1819161d2f6718eee877. These changes to AML locking were made to allow Notify handlers to be called on the stack and not deadlock. However, that scheme turns out to be flawed and was reverted by the previous commit, so this commit restores the locking to it previous design. Signed-off-by: Len Brown diff --git a/drivers/acpi/dispatcher/dsmethod.c b/drivers/acpi/dispatcher/dsmethod.c index 1683e5c..1cbe619 100644 --- a/drivers/acpi/dispatcher/dsmethod.c +++ b/drivers/acpi/dispatcher/dsmethod.c @@ -231,8 +231,10 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node, * Obtain the method mutex if necessary. Do not acquire mutex for a * recursive call. */ - if (acpi_os_get_thread_id() != - obj_desc->method.mutex->mutex.owner_thread_id) { + if (!walk_state || + !obj_desc->method.mutex->mutex.owner_thread || + (walk_state->thread != + obj_desc->method.mutex->mutex.owner_thread)) { /* * Acquire the method mutex. This releases the interpreter if we * block (and reacquires it before it returns) @@ -246,14 +248,14 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node, } /* Update the mutex and walk info and save the original sync_level */ - obj_desc->method.mutex->mutex.owner_thread_id = - acpi_os_get_thread_id(); if (walk_state) { obj_desc->method.mutex->mutex. original_sync_level = walk_state->thread->current_sync_level; + obj_desc->method.mutex->mutex.owner_thread = + walk_state->thread; walk_state->thread->current_sync_level = obj_desc->method.sync_level; } else { @@ -567,7 +569,7 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc, acpi_os_release_mutex(method_desc->method.mutex->mutex. os_mutex); - method_desc->method.mutex->mutex.owner_thread_id = ACPI_MUTEX_NOT_ACQUIRED; + method_desc->method.mutex->mutex.owner_thread = NULL; } } diff --git a/drivers/acpi/executer/exdump.c b/drivers/acpi/executer/exdump.c index 1a73c14..68d283f 100644 --- a/drivers/acpi/executer/exdump.c +++ b/drivers/acpi/executer/exdump.c @@ -134,7 +134,7 @@ static struct acpi_exdump_info acpi_ex_dump_method[8] = { static struct acpi_exdump_info acpi_ex_dump_mutex[5] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_mutex), NULL}, {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(mutex.sync_level), "Sync Level"}, - {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.owner_thread_id), "Owner Thread"}, + {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.owner_thread), "Owner Thread"}, {ACPI_EXD_UINT16, ACPI_EXD_OFFSET(mutex.acquisition_depth), "Acquire Depth"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.os_mutex), "OsMutex"} diff --git a/drivers/acpi/executer/exmutex.c b/drivers/acpi/executer/exmutex.c index 4eb883b..5101bad 100644 --- a/drivers/acpi/executer/exmutex.c +++ b/drivers/acpi/executer/exmutex.c @@ -66,9 +66,10 @@ acpi_ex_link_mutex(union acpi_operand_object *obj_desc, * ******************************************************************************/ -void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc, - struct acpi_thread_state *thread) +void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc) { + struct acpi_thread_state *thread = obj_desc->mutex.owner_thread; + if (!thread) { return; } @@ -173,13 +174,16 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc, /* Support for multiple acquires by the owning thread */ - if (obj_desc->mutex.owner_thread_id == acpi_os_get_thread_id()) { - /* - * The mutex is already owned by this thread, just increment the - * acquisition depth - */ - obj_desc->mutex.acquisition_depth++; - return_ACPI_STATUS(AE_OK); + if (obj_desc->mutex.owner_thread) { + if (obj_desc->mutex.owner_thread->thread_id == + walk_state->thread->thread_id) { + /* + * The mutex is already owned by this thread, just increment the + * acquisition depth + */ + obj_desc->mutex.acquisition_depth++; + return_ACPI_STATUS(AE_OK); + } } /* Acquire the mutex, wait if necessary. Special case for Global Lock */ @@ -202,7 +206,7 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc, /* Have the mutex: update mutex and walk info and save the sync_level */ - obj_desc->mutex.owner_thread_id = acpi_os_get_thread_id(); + obj_desc->mutex.owner_thread = walk_state->thread; obj_desc->mutex.acquisition_depth = 1; obj_desc->mutex.original_sync_level = walk_state->thread->current_sync_level; @@ -242,7 +246,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc, /* The mutex must have been previously acquired in order to release it */ - if (!obj_desc->mutex.owner_thread_id) { + if (!obj_desc->mutex.owner_thread) { ACPI_ERROR((AE_INFO, "Cannot release Mutex [%4.4s], not acquired", acpi_ut_get_node_name(obj_desc->mutex.node))); @@ -262,14 +266,14 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc, * The Mutex is owned, but this thread must be the owner. * Special case for Global Lock, any thread can release */ - if ((obj_desc->mutex.owner_thread_id != + if ((obj_desc->mutex.owner_thread->thread_id != walk_state->thread->thread_id) && (obj_desc->mutex.os_mutex != acpi_gbl_global_lock_mutex)) { ACPI_ERROR((AE_INFO, "Thread %lX cannot release Mutex [%4.4s] acquired by thread %lX", (unsigned long)walk_state->thread->thread_id, acpi_ut_get_node_name(obj_desc->mutex.node), - (unsigned long)obj_desc->mutex.owner_thread_id)); + (unsigned long)obj_desc->mutex.owner_thread->thread_id)); return_ACPI_STATUS(AE_AML_NOT_OWNER); } @@ -296,7 +300,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc, /* Unlink the mutex from the owner's list */ - acpi_ex_unlink_mutex(obj_desc, walk_state->thread); + acpi_ex_unlink_mutex(obj_desc); /* Release the mutex, special case for Global Lock */ @@ -308,7 +312,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc, /* Update the mutex and restore sync_level */ - obj_desc->mutex.owner_thread_id = ACPI_MUTEX_NOT_ACQUIRED; + obj_desc->mutex.owner_thread = NULL; walk_state->thread->current_sync_level = obj_desc->mutex.original_sync_level; @@ -363,7 +367,7 @@ void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread) /* Mark mutex unowned */ - obj_desc->mutex.owner_thread_id = ACPI_MUTEX_NOT_ACQUIRED; + obj_desc->mutex.owner_thread = NULL; /* Update Thread sync_level (Last mutex is the important one) */ diff --git a/drivers/acpi/utilities/utdelete.c b/drivers/acpi/utilities/utdelete.c index 673a0ca..f777ceb 100644 --- a/drivers/acpi/utilities/utdelete.c +++ b/drivers/acpi/utilities/utdelete.c @@ -170,6 +170,7 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object) acpi_os_delete_mutex(object->mutex.os_mutex); acpi_gbl_global_lock_mutex = NULL; } else { + acpi_ex_unlink_mutex(object); acpi_os_delete_mutex(object->mutex.os_mutex); } break; diff --git a/include/acpi/acinterp.h b/include/acpi/acinterp.h index 73967c8..ce7c9d6 100644 --- a/include/acpi/acinterp.h +++ b/include/acpi/acinterp.h @@ -253,8 +253,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc, void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread); -void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc, - struct acpi_thread_state *thread); +void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc); /* * exprep - ACPI AML execution - prep utilities diff --git a/include/acpi/acobject.h b/include/acpi/acobject.h index 5206d61..04e9735 100644 --- a/include/acpi/acobject.h +++ b/include/acpi/acobject.h @@ -155,7 +155,7 @@ struct acpi_object_event { struct acpi_object_mutex { ACPI_OBJECT_COMMON_HEADER u8 sync_level; /* 0-15, specified in Mutex() call */ u16 acquisition_depth; /* Allow multiple Acquires, same thread */ - acpi_thread_id owner_thread_id; /* Current owner of the mutex */ + struct acpi_thread_state *owner_thread; /* Current owner of the mutex */ acpi_mutex os_mutex; /* Actual OS synchronization object */ union acpi_operand_object *prev; /* Link for list of acquired mutexes */ union acpi_operand_object *next; /* Link for list of acquired mutexes */ -- cgit v0.10.2 From 88db5e1489f23876a226f5393fd978ddc09dc5f9 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Wed, 9 May 2007 23:31:03 -0400 Subject: ACPI: created a dedicated workqueue for notify() execution HP nx6125/nx6325/... machines have a _GPE handler with an infinite loop sending Notify() events to different ACPI subsystems. Notify handler in ACPI driver is a C-routine, which may call ACPI interpreter again to get access to some ACPI variables (acpi_evaluate_xxx). On these HP machines such an evaluation changes state of some variable and lets the loop above break. In the current ACPI implementation Notify requests are being deferred to the same kacpid workqueue on which the above GPE handler with infinite loop is executing. Thus we have a deadlock -- loop will continue to spin, sending notify events, and at the same time preventing these notify events from being run on a workqueue. All notify events are deferred, thus we see increase in memory consumption noticed by author of the thread. Also as GPE handling is bloked, machines overheat. Eventually by external poll of the same acpi_evaluate, kacpid is released and all the queued notify events are free to run, thus 100% cpu utilization by kacpid for several seconds or more. To prevent all these horrors it's needed to not put notify events to kacpid workqueue by either executing them immediately or putting them on some other thread. It's dangerous to execute notify events in place, as it will put several ACPI interpreter stacks on top of each other (at least 4 in case of nx6125), thus causing kernel stack overflow. First attempt to create a new thread was done by Peter Wainwright He created a bunch of threads, which were stealing work from a kacpid workqueue. This patch appeared in 2.6.15 kernel shipped with Ubuntu 6.06 LTS. Second attempt was done by me, I created a new thread for each Notify event. This worked OK on HP nx machines, but broke Linus' Compaq n620c, by producing threads with a speed what they stopped the machine completely. Thus this patch was reverted from 18-rc2 as I remember. I re-made the patch to create second workqueue just for notify events, thus hopping it will not break Linus' machine. Patch was tested on the same HP nx machines in #5534 and #7122, but I did not received reply from Linus on a test patch sent to him. Patch went to 19-rc and was rejected with much fanfare again. There was 4th patch, which inserted schedule_timeout(1) into deferred execution of kacpid, if we had any notify requests pending, but Linus decided that it was too complex (involved either changes to workqueue to see if it's empty or atomic inc/dec). Now you see last variant which adds yield() to every GPE execution. http://bugzilla.kernel.org/show_bug.cgi?id=5534 http://bugzilla.kernel.org/show_bug.cgi?id=8385 Signed-off-by: Alexey Starikovskiy Signed-off-by: Len Brown diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index c2bed56..b998340 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -71,6 +71,7 @@ static unsigned int acpi_irq_irq; static acpi_osd_handler acpi_irq_handler; static void *acpi_irq_context; static struct workqueue_struct *kacpid_wq; +static struct workqueue_struct *kacpi_notify_wq; static void __init acpi_request_region (struct acpi_generic_address *addr, unsigned int length, char *desc) @@ -137,8 +138,9 @@ acpi_status acpi_os_initialize1(void) return AE_NULL_ENTRY; } kacpid_wq = create_singlethread_workqueue("kacpid"); + kacpi_notify_wq = create_singlethread_workqueue("kacpi_notify"); BUG_ON(!kacpid_wq); - + BUG_ON(!kacpi_notify_wq); return AE_OK; } @@ -150,6 +152,7 @@ acpi_status acpi_os_terminate(void) } destroy_workqueue(kacpid_wq); + destroy_workqueue(kacpi_notify_wq); return AE_OK; } @@ -603,6 +606,23 @@ void acpi_os_derive_pci_id(acpi_handle rhandle, /* upper bound */ static void acpi_os_execute_deferred(struct work_struct *work) { struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work); + if (!dpc) { + printk(KERN_ERR PREFIX "Invalid (NULL) context\n"); + return; + } + + dpc->function(dpc->context); + kfree(dpc); + + /* Yield cpu to notify thread */ + cond_resched(); + + return; +} + +static void acpi_os_execute_notify(struct work_struct *work) +{ + struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work); if (!dpc) { printk(KERN_ERR PREFIX "Invalid (NULL) context\n"); @@ -637,14 +657,12 @@ acpi_status acpi_os_execute(acpi_execute_type type, acpi_status status = AE_OK; struct acpi_os_dpc *dpc; - ACPI_FUNCTION_TRACE("os_queue_for_execution"); - ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Scheduling function [%p(%p)] for deferred execution.\n", function, context)); if (!function) - return_ACPI_STATUS(AE_BAD_PARAMETER); + return AE_BAD_PARAMETER; /* * Allocate/initialize DPC structure. Note that this memory will be @@ -662,14 +680,21 @@ acpi_status acpi_os_execute(acpi_execute_type type, dpc->function = function; dpc->context = context; - INIT_WORK(&dpc->work, acpi_os_execute_deferred); - if (!queue_work(kacpid_wq, &dpc->work)) { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + if (type == OSL_NOTIFY_HANDLER) { + INIT_WORK(&dpc->work, acpi_os_execute_notify); + if (!queue_work(kacpi_notify_wq, &dpc->work)) { + status = AE_ERROR; + kfree(dpc); + } + } else { + INIT_WORK(&dpc->work, acpi_os_execute_deferred); + if (!queue_work(kacpid_wq, &dpc->work)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Call to queue_work() failed.\n")); - kfree(dpc); - status = AE_ERROR; + status = AE_ERROR; + kfree(dpc); + } } - return_ACPI_STATUS(status); } -- cgit v0.10.2