--- /dev/null
+
+--- a/src/scheduler.c Mon Aug 25 20:23:24 2008 +0200
++++ b/src/scheduler.c Mon Aug 25 23:02:29 2008 +0200
+@@ -63,7 +63,6 @@ clock_t now_times; /* current id
+ /* Hashed root of all timers */
+ struct list_node timer_wheel[TIMER_WHEEL_SLOTS];
+ clock_t timer_last_run; /* remember the last timeslot walk */
+-struct list_node *timer_walk_list_node = NULL; /* used for timeslot walk */
+
+ /* Pool of timers to avoid malloc() churn */
+ struct list_node free_timer_list;
+@@ -317,6 +316,35 @@ olsr_init_timers(void)
+ timers_running = 0;
+ }
+
++/*
++ * olsr_get_next_list_entry
++ *
++ * Get the next list node in a hash bucket.
++ * The listnode of the timer in may be subject to getting removed from
++ * this timer bucket in olsr_change_timer() and olsr_stop_timer(), which
++ * means that we can miss our walking context.
++ * By caching the previous node we can figure out if the current node
++ * has been removed from the hash bucket and compute the next node.
++ */
++static struct list_node *
++olsr_get_next_list_entry (struct list_node **prev_node,
++ struct list_node *current_node)
++{
++ if ((*prev_node)->next == current_node) {
++
++ /*
++ * No change in the list, normal traversal, update the previous node.
++ */
++ *prev_node = current_node;
++ return (current_node->next);
++ } else {
++
++ /*
++ * List change. Recompute the walking context.
++ */
++ return ((*prev_node)->next);
++ }
++}
+
+ /**
+ * Walk through the timer list and check if any timer is ready to fire.
+@@ -326,7 +354,7 @@ olsr_walk_timers(clock_t * last_run)
+ olsr_walk_timers(clock_t * last_run)
+ {
+ static struct timer_entry *timer;
+- struct list_node *timer_head_node;
++ struct list_node *timer_head_node, *timer_walk_node, *timer_walk_prev_node;
+ unsigned int timers_walked, timers_fired;
+ unsigned int total_timers_walked, total_timers_fired;
+ unsigned int wheel_slot_walks = 0;
+@@ -347,12 +375,15 @@ olsr_walk_timers(clock_t * last_run)
+
+ /* Get the hash slot for this clocktick */
+ timer_head_node = &timer_wheel[*last_run & TIMER_WHEEL_MASK];
++ timer_walk_prev_node = timer_head_node;
+
+ /* Walk all entries hanging off this hash bucket */
+- for (timer_walk_list_node = timer_head_node->next; timer_walk_list_node != timer_head_node; /* circular list */
+- timer_walk_list_node = timer_walk_list_node->next) {
+-
+- timer = list2timer(timer_walk_list_node);
++ for (timer_walk_node = timer_head_node->next;
++ timer_walk_node != timer_head_node; /* circular list */
++ timer_walk_node = olsr_get_next_list_entry(&timer_walk_prev_node,
++ timer_walk_node)) {
++
++ timer = list2timer(timer_walk_node);
+
+ timers_walked++;
+
+@@ -403,11 +434,6 @@ olsr_walk_timers(clock_t * last_run)
+ /* Increment the time slot and wheel slot walk iteration */
+ (*last_run)++;
+ wheel_slot_walks++;
+-
+- /*
+- * Mark the timer walk context unused.
+- */
+- timer_walk_list_node = NULL;
+ }
+
+ #ifdef DEBUG
+@@ -581,21 +607,6 @@ olsr_start_timer(unsigned int rel_time,
+ return timer;
+ }
+
+-/*
+- * Check if there is a timer walk in progress and advance the
+- * walking context if so. Keep in mind we are about to delete
+- * the timer from a list and this will destroy the walking context.
+- */
+-
+-static inline void
+-olsr_update_timer_walk_ctx(struct timer_entry *timer)
+-{
+- if (timer_walk_list_node == &timer->timer_list) {
+- timer_walk_list_node = timer_walk_list_node->next;
+- }
+-}
+-
+-
+ /**
+ * Delete a timer.
+ *
+@@ -615,8 +626,6 @@ olsr_stop_timer(struct timer_entry *time
+ olsr_cookie_name(timer->timer_cookie),
+ timer, timer->timer_cb_context);
+ #endif
+-
+- olsr_update_timer_walk_ctx(timer);
+
+ /*
+ * Carve out of the existing wheel_slot and return to the pool
+@@ -658,8 +667,6 @@ olsr_change_timer(struct timer_entry *ti
+ timer->timer_clock = olsr_jitter(rel_time, jitter_pct, timer->timer_random);
+ timer->timer_jitter_pct = jitter_pct;
+
+- olsr_update_timer_walk_ctx(timer);
+-
+ /*
+ * Changes are easy: Remove timer from the exisiting timer_wheel slot
+ * and reinsert into the new slot.
+
+
+
+