push_rt_tasks(rq);
}
-/*
- * Load-balancing iterator. Note: while the runqueue stays locked
- * during the whole iteration, the current task might be
- * dequeued so the iterator has to be dequeue-safe. Here we
- * achieve that by always pre-iterating before returning
- * the current task:
- */
-static struct task_struct *load_balance_start_rt(void *arg)
-{
- struct rq *rq = arg;
- struct rt_prio_array *array = &rq->rt.active;
- struct list_head *head, *curr;
- struct task_struct *p;
- int idx;
-
- idx = sched_find_first_bit(array->bitmap);
- if (idx >= MAX_RT_PRIO)
- return NULL;
-
- head = array->queue + idx;
- curr = head->prev;
-
- p = list_entry(curr, struct task_struct, run_list);
-
- curr = curr->prev;
-
- rq->rt.rt_load_balance_idx = idx;
- rq->rt.rt_load_balance_head = head;
- rq->rt.rt_load_balance_curr = curr;
-
- return p;
-}
-
-static struct task_struct *load_balance_next_rt(void *arg)
-{
- struct rq *rq = arg;
- struct rt_prio_array *array = &rq->rt.active;
- struct list_head *head, *curr;
- struct task_struct *p;
- int idx;
-
- idx = rq->rt.rt_load_balance_idx;
- head = rq->rt.rt_load_balance_head;
- curr = rq->rt.rt_load_balance_curr;
-
- /*
- * If we arrived back to the head again then
- * iterate to the next queue (if any):
- */
- if (unlikely(head == curr)) {
- int next_idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
-
- if (next_idx >= MAX_RT_PRIO)
- return NULL;
-
- idx = next_idx;
- head = array->queue + idx;
- curr = head->prev;
-
- rq->rt.rt_load_balance_idx = idx;
- rq->rt.rt_load_balance_head = head;
- }
-
- p = list_entry(curr, struct task_struct, run_list);
-
- curr = curr->prev;
-
- rq->rt.rt_load_balance_curr = curr;
-
- return p;
-}
-
static unsigned long
load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
unsigned long max_load_move,
struct sched_domain *sd, enum cpu_idle_type idle,
int *all_pinned, int *this_best_prio)
{
- struct rq_iterator rt_rq_iterator;
-
- rt_rq_iterator.start = load_balance_start_rt;
- rt_rq_iterator.next = load_balance_next_rt;
- /* pass 'busiest' rq argument into
- * load_balance_[start|next]_rt iterators
- */
- rt_rq_iterator.arg = busiest;
-
- return balance_tasks(this_rq, this_cpu, busiest, max_load_move, sd,
- idle, all_pinned, this_best_prio, &rt_rq_iterator);
+ /* don't touch RT tasks */
+ return 0;
}
static int
move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
struct sched_domain *sd, enum cpu_idle_type idle)
{
- struct rq_iterator rt_rq_iterator;
-
- rt_rq_iterator.start = load_balance_start_rt;
- rt_rq_iterator.next = load_balance_next_rt;
- rt_rq_iterator.arg = busiest;
-
- return iter_move_one_task(this_rq, this_cpu, busiest, sd, idle,
- &rt_rq_iterator);
+ /* don't touch RT tasks */
+ return 0;
}
#else /* CONFIG_SMP */
# define schedule_tail_balance_rt(rq) do { } while (0)