controller_t * const main = (controller_t *) argument;
f_time_spec_t delay = f_time_spec_t_initialize;
+ f_time_spec_t now = f_time_spec_t_initialize;
siginfo_t signal_data;
uint8_t reap_count = 0;
uint8_t enable = controller_thread_enable_none_e;
while (controller_thread_enable_get(&main->thread) == controller_thread_enable_e) {
- controller_time_now(
- main->setting.flag & controller_main_flag_simulate_d
- ? controller_thread_cleanup_interval_short_d
- : controller_thread_cleanup_interval_long_d,
- 0,
- &delay
- );
+ if (now.tv_sec > delay.tv_sec || now.tv_sec == delay.tv_sec && now.tv_nsec >= delay.tv_nsec) {
+ controller_time_now(
+ main->setting.flag & controller_main_flag_simulate_d
+ ? controller_thread_cleanup_interval_short_d
+ : controller_thread_cleanup_interval_long_d,
+ 0,
+ &delay
+ );
+ }
// Allow thread to be interrupted and auto-cancelled while sleeping.
f_thread_cancel_state_set(PTHREAD_CANCEL_ASYNCHRONOUS, 0);
reap_count = 0;
do {
- if (!waitid(P_ALL, 0, &signal_data, WEXITED | WNOHANG)) {
- if (signal_data.si_pid) {
- if (++reap_count < controller_lock_max_reap_d) continue;
- }
- }
-
- } while (F_false);
+ // Do nothing.
+ } while (!waitid(P_ALL, 0, &signal_data, WEXITED | WNOHANG) && signal_data.si_pid && ++reap_count < controller_lock_max_reap_d);
}
enable = controller_thread_enable_get(&main->thread);
break;
}
- status = f_thread_mutex_lock_try(&main->thread.lock.cancel.mutex);
- if (status != F_okay) continue;
+ // Avoid excess execution in every reap condition signal by checking if the delay has expired yet or not.
+ controller_time_now(0, 0, &now);
+
+ if (now.tv_sec > delay.tv_sec || now.tv_sec == delay.tv_sec && now.tv_nsec >= delay.tv_nsec) {
+ status = f_thread_mutex_lock_try(&main->thread.lock.cancel.mutex);
+
+ if (status == F_okay) {
+ status = controller_thread_cleanup_perform(main);
+
+ f_thread_mutex_unlock(&main->thread.lock.cancel.mutex);
+ }
+ }
+ } // while
+
+ return 0;
+ }
+#endif // _di_controller_thread_cleanup_
- if (f_thread_lock_read_try(&main->thread.lock.instance) == F_okay) {
- if (main->thread.instances.used) {
- const f_number_unsigned_t total = main->thread.instances.used;
+#ifndef _di_controller_thread_cleanup_perform_
+ f_status_t controller_thread_cleanup_perform(controller_t * const main) {
- controller_instance_t *instances[total];
+ if (!main) return F_status_set_error(F_parameter);
- memcpy(instances, main->thread.instances.array, sizeof(controller_instance_t *) * total);
+ f_status_t status = f_thread_lock_read_try(&main->thread.lock.instance);
+ if (status != F_okay) return status;
- f_thread_unlock(&main->thread.lock.instance);
+ if (!main->thread.instances.used) {
+ f_thread_unlock(&main->thread.lock.instance);
- status = F_okay;
+ return F_data_not;
+ }
- for (f_number_unsigned_t i = 0; i < total && controller_thread_enable_get(&main->thread) == controller_thread_enable_e; ++i) {
+ const f_number_unsigned_t total = main->thread.instances.used;
- if (!instances[i]) continue;
+ controller_instance_t *instances[total];
- // If "active" has a read or write lock, then do not attempt to clean it.
- // Once a write lock is achieved on "active", then there should only ever be one process operating on it.
- // Therefore, the instance.use lock does not need to be used in any way.
- if (f_thread_lock_write_try(&instances[i]->active) != F_okay) continue;
+ memcpy(instances, main->thread.instances.array, sizeof(controller_instance_t *) * total);
- // If instance is active or busy, then do not attempt to clean it.
- if (instances[i]->state == controller_instance_state_active_e || instances[i]->state == controller_instance_state_busy_e || controller_thread_enable_get(&main->thread) != controller_thread_enable_e) {
- f_thread_unlock(&instances[i]->active);
+ f_thread_unlock(&main->thread.lock.instance);
- if (controller_thread_enable_get(&main->thread) == controller_thread_enable_e) continue;
+ status = F_okay;
- break;
- }
+ for (f_number_unsigned_t i = 0; i < total && controller_thread_enable_get(&main->thread) == controller_thread_enable_e; ++i) {
- // If instance has a PID file, then it is running in the background, only cleanup if the PID file no longer exists.
- if (instances[i]->path_pids.used) {
- f_number_unsigned_t j = 0;
+ if (!instances[i]) continue;
- for (; j < instances[i]->path_pids.used && controller_thread_enable_get(&main->thread) == controller_thread_enable_e; ++j) {
+ // If "active" has a read or write lock, then do not attempt to clean it.
+ // Once a write lock is achieved on "active", then there should only ever be one process operating on it (this clean up process).
+ // Therefore, the instance.use lock does not need to be used in any way.
+ if (f_thread_lock_write_try(&instances[i]->active) != F_okay) continue;
- if (instances[i]->path_pids.array[j].used && f_file_exists(instances[i]->path_pids.array[j], F_true) == F_true) {
- break;
- }
- } // for
+ // If instance is active or busy, then do not attempt to clean it.
+ if (instances[i]->state == controller_instance_state_active_e || instances[i]->state == controller_instance_state_busy_e || controller_thread_enable_get(&main->thread) != controller_thread_enable_e) {
+ f_thread_unlock(&instances[i]->active);
- if (j < instances[i]->path_pids.used || controller_thread_enable_get(&main->thread) != controller_thread_enable_e) {
- f_thread_unlock(&instances[i]->active);
+ if (controller_thread_enable_get(&main->thread) == controller_thread_enable_e) continue;
- if (controller_thread_enable_get(&main->thread) == controller_thread_enable_e) continue;
+ break;
+ }
- break;
- }
- }
+ // If instance has a PID file, then it is running in the background, only cleanup if the PID file no longer exists.
+ if (instances[i]->path_pids.used) {
+ f_number_unsigned_t j = 0;
- // Close any still open thread.
- if (instances[i]->thread) {
- status = controller_thread_join(&instances[i]->thread);
+ for (; j < instances[i]->path_pids.used && controller_thread_enable_get(&main->thread) == controller_thread_enable_e; ++j) {
- if (F_status_is_error_not(status) || F_status_set_fine(status) == F_found_not) {
- instances[i]->state = controller_instance_state_idle_e;
+ if (instances[i]->path_pids.array[j].used && f_file_exists(instances[i]->path_pids.array[j], F_true) == F_true) {
+ break;
+ }
+ } // for
- f_thread_condition_signal_all(&instances[i]->wait_condition);
- }
- else {
- f_thread_unlock(&instances[i]->active);
+ if (j < instances[i]->path_pids.used || controller_thread_enable_get(&main->thread) != controller_thread_enable_e) {
+ f_thread_unlock(&instances[i]->active);
- continue;
- }
- }
+ if (controller_thread_enable_get(&main->thread) == controller_thread_enable_e) continue;
- // De-allocate dynamic portions of the structure that are only ever needed while the instance is running.
- controller_cache_delete(&instances[i]->cache);
+ break;
+ }
+ }
- f_memory_array_resize(0, sizeof(controller_rule_t *), (void **) &instances[i]->stack.array, &instances[i]->stack.used, &instances[i]->stack.size);
+ // Close any still open thread.
+ if (instances[i]->thread) {
+ status = controller_thread_join(&instances[i]->thread);
- // Shrink the childs array.
- if (instances[i]->childs.used && instances[i]->childs.used < instances[i]->childs.size) {
- for (; instances[i]->childs.used && instances[i]->childs.used < instances[i]->childs.size && controller_thread_enable_get(&main->thread) == controller_thread_enable_e; --instances[i]->childs.used) {
- if (instances[i]->childs.array[instances[i]->childs.used]) break;
- } // for
+ if (F_status_is_error_not(status) || F_status_set_fine(status) == F_found_not) {
+ instances[i]->state = controller_instance_state_idle_e;
- if (controller_thread_enable_get(&main->thread) != controller_thread_enable_e) {
- f_thread_unlock(&instances[i]->active);
+ f_thread_condition_signal_all(&instances[i]->wait_condition);
+ }
+ else {
+ f_thread_unlock(&instances[i]->active);
- break;
- }
+ continue;
+ }
+ }
- if (instances[i]->childs.used < instances[i]->childs.size) {
- f_memory_array_resize(instances[i]->childs.used, sizeof(pid_t), (void **) &instances[i]->childs.array, &instances[i]->childs.used, &instances[i]->childs.size);
- }
- }
+ // De-allocate dynamic portions of the structure that are only ever needed while the instance is running.
+ controller_cache_delete(&instances[i]->cache);
- // De-allocate the PID files.
- if (instances[i]->path_pids.used) {
- instances[i]->path_pids.used = 0;
+ f_memory_array_resize(0, sizeof(controller_rule_t *), (void **) &instances[i]->stack.array, &instances[i]->stack.used, &instances[i]->stack.size);
- f_memory_arrays_resize(0, sizeof(f_string_dynamic_t), (void **) &instances[i]->path_pids.array, &instances[i]->path_pids.used, &instances[i]->path_pids.size, &f_string_dynamics_delete_callback);
- }
+ // Shrink the childs array.
+ if (instances[i]->childs.used && instances[i]->childs.used < instances[i]->childs.size) {
+ for (; instances[i]->childs.used && instances[i]->childs.used < instances[i]->childs.size && controller_thread_enable_get(&main->thread) == controller_thread_enable_e; --instances[i]->childs.used) {
+ if (instances[i]->childs.array[instances[i]->childs.used]) break;
+ } // for
- // De-allocate any rules in the space that is declared to be unused.
- if (i >= main->thread.instances.used) {
- controller_rule_delete(&instances[i]->rule);
- }
+ if (controller_thread_enable_get(&main->thread) != controller_thread_enable_e) {
+ f_thread_unlock(&instances[i]->active);
- f_thread_unlock(&instances[i]->active);
- } // for
+ break;
}
- else {
- f_thread_unlock(&main->thread.lock.instance);
+
+ if (instances[i]->childs.used < instances[i]->childs.size) {
+ f_memory_array_resize(instances[i]->childs.used, sizeof(pid_t), (void **) &instances[i]->childs.array, &instances[i]->childs.used, &instances[i]->childs.size);
}
}
- f_thread_mutex_unlock(&main->thread.lock.cancel.mutex);
- } // while
+ // De-allocate the PID files.
+ if (instances[i]->path_pids.used) {
+ instances[i]->path_pids.used = 0;
- return 0;
+ f_memory_arrays_resize(0, sizeof(f_string_dynamic_t), (void **) &instances[i]->path_pids.array, &instances[i]->path_pids.used, &instances[i]->path_pids.size, &f_string_dynamics_delete_callback);
+ }
+
+ // De-allocate any rules in the space that is declared to be unused.
+ if (i >= main->thread.instances.used) {
+ controller_rule_delete(&instances[i]->rule);
+ }
+
+ f_thread_unlock(&instances[i]->active);
+ } // for
+
+ return F_status_is_error_not(status) ? F_okay : status;
}
-#endif // _di_controller_thread_cleanup_
+#endif // _di_controller_thread_cleanup_perform_
#ifdef __cplusplus
} // extern "C"