From: Luke Browning Date: Thu, 20 Dec 2007 07:39:59 +0000 (+0900) Subject: [POWERPC] spufs: reorganize spu_run_init X-Git-Url: http://git.lede-project.org./?a=commitdiff_plain;h=91569531d1297db42d68136ac0c85cd85223d0b9;p=openwrt%2Fstaging%2Fblogic.git [POWERPC] spufs: reorganize spu_run_init This cleans up spu_run_init so that it does all of the spu initialization for spufs_run_spu. It initializes the spu context as much as possible before it activates the spu and writes the runcntl register. Signed-off-by: Luke Browning Signed-off-by: Jeremy Kerr Signed-off-by: Paul Mackerras --- diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c index 3b3de6c7ee5b..652ae1366dc8 100644 --- a/arch/powerpc/platforms/cell/spufs/run.c +++ b/arch/powerpc/platforms/cell/spufs/run.c @@ -152,23 +152,41 @@ out: static int spu_run_init(struct spu_context *ctx, u32 *npc) { unsigned long runcntl; + int ret; spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); if (ctx->flags & SPU_CREATE_ISOLATE) { + /* + * Force activation of spu. Isolated state assumes that + * special loader context is loaded and running on spu. + */ + if (ctx->state == SPU_STATE_SAVED) { + spu_set_timeslice(ctx); + + ret = spu_activate(ctx, 0); + if (ret) + return ret; + } if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) { - int ret = spu_setup_isolated(ctx); + ret = spu_setup_isolated(ctx); if (ret) return ret; } - /* if userspace has set the runcntrl register (eg, to issue an - * isolated exit), we need to re-set it here */ + /* + * If userspace has set the runcntrl register (eg, to + * issue an isolated exit), we need to re-set it here + */ runcntl = ctx->ops->runcntl_read(ctx) & (SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE); if (runcntl == 0) runcntl = SPU_RUNCNTL_RUNNABLE; + + spuctx_switch_state(ctx, SPU_UTIL_USER); + ctx->ops->runcntl_write(ctx, runcntl); + } else { unsigned long privcntl; @@ -180,11 +198,17 @@ static int spu_run_init(struct spu_context *ctx, u32 *npc) ctx->ops->npc_write(ctx, *npc); ctx->ops->privcntl_write(ctx, privcntl); - } - ctx->ops->runcntl_write(ctx, runcntl); + if (ctx->state == SPU_STATE_SAVED) { + spu_set_timeslice(ctx); + ret = spu_activate(ctx, 0); + if (ret) + return ret; + } - spuctx_switch_state(ctx, SPU_UTIL_USER); + spuctx_switch_state(ctx, SPU_UTIL_USER); + ctx->ops->runcntl_write(ctx, runcntl); + } return 0; } @@ -323,25 +347,8 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event) ctx->event_return = 0; spu_acquire(ctx); - if (ctx->state == SPU_STATE_SAVED) { - __spu_update_sched_info(ctx); - spu_set_timeslice(ctx); - ret = spu_activate(ctx, 0); - if (ret) { - spu_release(ctx); - goto out; - } - } else { - /* - * We have to update the scheduling priority under active_mutex - * to protect against find_victim(). - * - * No need to update the timeslice ASAP, it will get updated - * once the current one has expired. - */ - spu_update_sched_info(ctx); - } + spu_update_sched_info(ctx); ret = spu_run_init(ctx, npc); if (ret) { diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 82ea576c53a3..ef0e5e230fbb 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -104,6 +104,12 @@ void spu_set_timeslice(struct spu_context *ctx) */ void __spu_update_sched_info(struct spu_context *ctx) { + /* + * assert that the context is not on the runqueue, so it is safe + * to change its scheduling parameters. + */ + BUG_ON(!list_empty(&ctx->rq)); + /* * 32-Bit assignments are atomic on powerpc, and we don't care about * memory ordering here because retrieving the controlling thread is @@ -124,23 +130,28 @@ void __spu_update_sched_info(struct spu_context *ctx) ctx->policy = current->policy; /* - * A lot of places that don't hold list_mutex poke into - * cpus_allowed, including grab_runnable_context which - * already holds the runq_lock. So abuse runq_lock - * to protect this field as well. + * TO DO: the context may be loaded, so we may need to activate + * it again on a different node. But it shouldn't hurt anything + * to update its parameters, because we know that the scheduler + * is not actively looking at this field, since it is not on the + * runqueue. The context will be rescheduled on the proper node + * if it is timesliced or preempted. */ - spin_lock(&spu_prio->runq_lock); ctx->cpus_allowed = current->cpus_allowed; - spin_unlock(&spu_prio->runq_lock); } void spu_update_sched_info(struct spu_context *ctx) { - int node = ctx->spu->node; + int node; - mutex_lock(&cbe_spu_info[node].list_mutex); - __spu_update_sched_info(ctx); - mutex_unlock(&cbe_spu_info[node].list_mutex); + if (ctx->state == SPU_STATE_RUNNABLE) { + node = ctx->spu->node; + mutex_lock(&cbe_spu_info[node].list_mutex); + __spu_update_sched_info(ctx); + mutex_unlock(&cbe_spu_info[node].list_mutex); + } else { + __spu_update_sched_info(ctx); + } } static int __node_allowed(struct spu_context *ctx, int node) @@ -604,6 +615,10 @@ static struct spu *find_victim(struct spu_context *ctx) * higher priority contexts before lower priority * ones, so this is safe until we introduce * priority inheritance schemes. + * + * XXX if the highest priority context is locked, + * this can loop a long time. Might be better to + * look at another context or give up after X retries. */ if (!mutex_trylock(&victim->state_mutex)) { victim = NULL;