sched: Change rq->nr_running to unsigned int
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Thu, 26 Apr 2012 11:12:27 +0000 (13:12 +0200)
committerIngo Molnar <mingo@kernel.org>
Wed, 9 May 2012 13:00:49 +0000 (15:00 +0200)
Since there's a PID space limit of 30bits (see
futex.h:FUTEX_TID_MASK) and allocating that many tasks (assuming a
lower bound of 2 pages per task) would still take 8T of memory it
seems reasonable to say that unsigned int is sufficient for
rq->nr_running.

When we do get anywhere near that amount of tasks I suspect other
things would go funny, load-balancer load computations would really
need to be hoisted to 128bit etc.

So save a few bytes and convert rq->nr_running and friends to
unsigned int.

Suggested-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/n/tip-y3tvyszjdmbibade5bw8zl81@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/debug.c
kernel/sched/fair.c
kernel/sched/sched.h

index 09acaa15161d3c857cbf192a4162bcbfffd1acc9..31e4f61a1629f0af944acd463a4f73c415cb101b 100644 (file)
@@ -202,7 +202,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
                        SPLIT_NS(spread0));
        SEQ_printf(m, "  .%-30s: %d\n", "nr_spread_over",
                        cfs_rq->nr_spread_over);
-       SEQ_printf(m, "  .%-30s: %ld\n", "nr_running", cfs_rq->nr_running);
+       SEQ_printf(m, "  .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
        SEQ_printf(m, "  .%-30s: %ld\n", "load", cfs_rq->load.weight);
 #ifdef CONFIG_FAIR_GROUP_SCHED
 #ifdef CONFIG_SMP
index e9553640c1c3b679b6ae80b6ff4446b05b486c4b..678966ca393b822eaa12fdd3b7803e3b38927a53 100644 (file)
@@ -4447,10 +4447,10 @@ redo:
                 * correctly treated as an imbalance.
                 */
                env.flags |= LBF_ALL_PINNED;
-               env.load_move   = imbalance;
-               env.src_cpu     = busiest->cpu;
-               env.src_rq      = busiest;
-               env.loop_max    = min_t(unsigned long, sysctl_sched_nr_migrate, busiest->nr_running);
+               env.load_move = imbalance;
+               env.src_cpu   = busiest->cpu;
+               env.src_rq    = busiest;
+               env.loop_max  = min(sysctl_sched_nr_migrate, busiest->nr_running);
 
 more_balance:
                local_irq_save(flags);
index fb3acba4d52e052c8bf6a5da32979313a88e0478..7282e7b5f4c7415405898b3cbe4893a9452c28d9 100644 (file)
@@ -201,7 +201,7 @@ struct cfs_bandwidth { };
 /* CFS-related fields in a runqueue */
 struct cfs_rq {
        struct load_weight load;
-       unsigned long nr_running, h_nr_running;
+       unsigned int nr_running, h_nr_running;
 
        u64 exec_clock;
        u64 min_vruntime;
@@ -279,7 +279,7 @@ static inline int rt_bandwidth_enabled(void)
 /* Real-Time classes' related field in a runqueue: */
 struct rt_rq {
        struct rt_prio_array active;
-       unsigned long rt_nr_running;
+       unsigned int rt_nr_running;
 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
        struct {
                int curr; /* highest queued rt task prio */
@@ -353,7 +353,7 @@ struct rq {
         * nr_running and cpu_load should be in the same cacheline because
         * remote CPUs use both these fields when doing load calculation.
         */
-       unsigned long nr_running;
+       unsigned int nr_running;
        #define CPU_LOAD_IDX_MAX 5
        unsigned long cpu_load[CPU_LOAD_IDX_MAX];
        unsigned long last_load_update_tick;