*/
struct hrtimer_clock_base {
struct hrtimer_cpu_base *cpu_base;
- clockid_t index;
+ int index;
+ clockid_t clockid;
struct timerqueue_head active;
ktime_t resolution;
ktime_t (*get_time)(void);
* struct hrtimer_cpu_base - the per cpu clock bases
* @lock: lock protecting the base and associated clock bases
* and timers
- * @clock_base: array of clock bases for this cpu
+ * @active_bases: Bitfield to mark bases with active timers
* @expires_next: absolute time of the next event which was scheduled
* via clock_set_next_event()
* @hres_active: State of high resolution mode
* @nr_retries: Total number of hrtimer interrupt retries
* @nr_hangs: Total number of hrtimer interrupt hangs
* @max_hang_time: Maximum time spent in hrtimer_interrupt
+ * @clock_base: array of clock bases for this cpu
*/
struct hrtimer_cpu_base {
raw_spinlock_t lock;
+ unsigned long active_bases;
#ifdef CONFIG_HIGH_RES_TIMERS
ktime_t expires_next;
int hres_active;
.clock_base =
{
{
- .index = CLOCK_REALTIME,
+ .index = HRTIMER_BASE_REALTIME,
+ .clockid = CLOCK_REALTIME,
.get_time = &ktime_get_real,
.resolution = KTIME_LOW_RES,
},
{
- .index = CLOCK_MONOTONIC,
+ .index = HRTIMER_BASE_MONOTONIC,
+ .clockid = CLOCK_MONOTONIC,
.get_time = &ktime_get,
.resolution = KTIME_LOW_RES,
},
{
- .index = CLOCK_BOOTTIME,
+ .index = HRTIMER_BASE_BOOTTIME,
+ .clockid = CLOCK_BOOTTIME,
.get_time = &ktime_get_boottime,
.resolution = KTIME_LOW_RES,
},
struct hrtimer_cpu_base *new_cpu_base;
int this_cpu = smp_processor_id();
int cpu = hrtimer_get_target(this_cpu, pinned);
- int basenum = hrtimer_clockid_to_base(base->index);
+ int basenum = base->index;
again:
new_cpu_base = &per_cpu(hrtimer_bases, cpu);
debug_activate(timer);
timerqueue_add(&base->active, &timer->node);
+ base->cpu_base->active_bases |= 1 << base->index;
/*
* HRTIMER_STATE_ENQUEUED is or'ed to the current state to preserve the
#endif
}
timerqueue_del(&base->active, &timer->node);
+ if (!timerqueue_getnext(&base->active))
+ base->cpu_base->active_bases &= ~(1 << base->index);
out:
timer->state = newstate;
}
void hrtimer_interrupt(struct clock_event_device *dev)
{
struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
- struct hrtimer_clock_base *base;
ktime_t expires_next, now, entry_time, delta;
int i, retries = 0;
*/
cpu_base->expires_next.tv64 = KTIME_MAX;
- base = cpu_base->clock_base;
-
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
- ktime_t basenow;
+ struct hrtimer_clock_base *base;
struct timerqueue_node *node;
+ ktime_t basenow;
+
+ if (!(cpu_base->active_bases & (1 << i)))
+ continue;
+ base = cpu_base->clock_base + i;
basenow = ktime_add(now, base->offset);
while ((node = timerqueue_getnext(&base->active))) {
__run_hrtimer(timer, &basenow);
}
- base++;
}
/*
struct timespec __user *rmtp;
int ret = 0;
- hrtimer_init_on_stack(&t.timer, restart->nanosleep.index,
+ hrtimer_init_on_stack(&t.timer, restart->nanosleep.clockid,
HRTIMER_MODE_ABS);
hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
restart = ¤t_thread_info()->restart_block;
restart->fn = hrtimer_nanosleep_restart;
- restart->nanosleep.index = t.timer.base->index;
+ restart->nanosleep.clockid = t.timer.base->clockid;
restart->nanosleep.rmtp = rmtp;
restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer);