* HRTIMER_CB_IRQSAFE: Callback may run in hardirq context
* HRTIMER_CB_IRQSAFE_NO_RESTART: Callback may run in hardirq context and
* does not restart the timer
- * HRTIMER_CB_IRQSAFE_NO_SOFTIRQ: Callback must run in hardirq context
- * Special mode for tick emultation
+ * HRTIMER_CB_IRQSAFE_PERCPU: Callback must run in hardirq context
+ * Special mode for tick emulation and
+ * scheduler timer. Such timers are per
+ * cpu and not allowed to be migrated on
+ * cpu unplug.
+ * HRTIMER_CB_IRQSAFE_UNLOCKED: Callback should run in hardirq context
+ * with timer->base lock unlocked
+ * used for timers which call wakeup to
+ * avoid lock order problems with rq->lock
*/
enum hrtimer_cb_mode {
HRTIMER_CB_SOFTIRQ,
HRTIMER_CB_IRQSAFE,
HRTIMER_CB_IRQSAFE_NO_RESTART,
- HRTIMER_CB_IRQSAFE_NO_SOFTIRQ,
+ HRTIMER_CB_IRQSAFE_PERCPU,
+ HRTIMER_CB_IRQSAFE_UNLOCKED,
};
/*
*/
BUG_ON(timer->function(timer) != HRTIMER_NORESTART);
return 1;
- case HRTIMER_CB_IRQSAFE_NO_SOFTIRQ:
+ case HRTIMER_CB_IRQSAFE_PERCPU:
+ case HRTIMER_CB_IRQSAFE_UNLOCKED:
/*
* This is solely for the sched tick emulation with
* dynamic tick support to ensure that we do not
* restart the tick right on the edge and end up with
* the tick timer in the softirq ! The calling site
- * takes care of this.
+ * takes care of this. Also used for hrtimer sleeper !
*/
debug_hrtimer_deactivate(timer);
return 1;
timer_stats_account_hrtimer(timer);
fn = timer->function;
- if (timer->cb_mode == HRTIMER_CB_IRQSAFE_NO_SOFTIRQ) {
+ if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU ||
+ timer->cb_mode == HRTIMER_CB_IRQSAFE_UNLOCKED) {
/*
* Used for scheduler timers, avoid lock inversion with
* rq->lock and tasklist_lock.
sl->timer.function = hrtimer_wakeup;
sl->task = task;
#ifdef CONFIG_HIGH_RES_TIMERS
- sl->timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ;
+ sl->timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED;
#endif
}
#ifdef CONFIG_HOTPLUG_CPU
static int migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
- struct hrtimer_clock_base *new_base)
+ struct hrtimer_clock_base *new_base, int dcpu)
{
struct hrtimer *timer;
struct rb_node *node;
BUG_ON(hrtimer_callback_running(timer));
debug_hrtimer_deactivate(timer);
+ /*
+ * Should not happen. Per CPU timers should be
+ * canceled _before_ the migration code is called
+ */
+ if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU) {
+ __remove_hrtimer(timer, old_base,
+ HRTIMER_STATE_INACTIVE, 0);
+ WARN(1, "hrtimer (%p %p)active but cpu %d dead\n",
+ timer, timer->function, dcpu);
+ continue;
+ }
+
/*
* Mark it as STATE_MIGRATE not INACTIVE otherwise the
* timer could be seen as !active and just vanish away
/*
* Happens with high res enabled when the timer was
* already expired and the callback mode is
- * HRTIMER_CB_IRQSAFE_NO_SOFTIRQ
- * (hrtimer_sleeper). The enqueue code does not move
- * them to the soft irq pending list for
- * performance/latency reasons, but in the migration
- * state, we need to do that otherwise we end up with
- * a stale timer.
+ * HRTIMER_CB_IRQSAFE_UNLOCKED (hrtimer_sleeper). The
+ * enqueue code does not move them to the soft irq
+ * pending list for performance/latency reasons, but
+ * in the migration state, we need to do that
+ * otherwise we end up with a stale timer.
*/
if (timer->state == HRTIMER_STATE_MIGRATE) {
timer->state = HRTIMER_STATE_PENDING;
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
if (migrate_hrtimer_list(&old_base->clock_base[i],
- &new_base->clock_base[i]))
+ &new_base->clock_base[i], cpu))
raise = 1;
}
hrtimer_init(&rt_b->rt_period_timer,
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
rt_b->rt_period_timer.function = sched_rt_period_timer;
- rt_b->rt_period_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ;
+ rt_b->rt_period_timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED;
}
static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
rq->hrtick_timer.function = hrtick;
- rq->hrtick_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ;
+ rq->hrtick_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU;
}
#else
static inline void hrtick_clear(struct rq *rq)