save = nvkm_mask(device, 0x002520, 0x0000003f, 1 << engn);
nvkm_wr32(device, 0x0032fc, nv_gpuobj(base)->addr >> 12);
- done = nv_wait_ne(fifo, 0x0032fc, 0xffffffff, 0xffffffff);
+ done = nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x0032fc) != 0xffffffff)
+ break;
+ ) >= 0;
nvkm_wr32(device, 0x002520, save);
if (!done) {
nv_error(fifo, "channel %d [%s] unload timeout\n",
}
nvkm_wr32(device, 0x002634, chan->base.chid);
- if (!nv_wait(fifo, 0x002634, 0xffffffff, chan->base.chid)) {
+ if (nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x002634) == chan->base.chid)
+ break;
+ ) < 0) {
nv_error(fifo, "channel %d [%s] kick timeout\n",
chan->base.chid, nvkm_client_name(chan));
if (suspend)
struct nvkm_device *device = fifo->base.engine.subdev.device;
nvkm_wr32(device, 0x002634, chan->base.chid);
- if (!nv_wait(fifo, 0x002634, 0x100000, 0x000000)) {
+ if (nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x002634) & 0x00100000))
+ break;
+ ) < 0) {
nv_error(fifo, "channel %d [%s] kick timeout\n",
chan->base.chid, nvkm_client_name(chan));
return -EBUSY;
* to avoid this, we invalidate the most recently calculated
* instance.
*/
- if (!nv_wait(fifo, NV04_PFIFO_CACHE1_PULL0,
- NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0x00000000))
- nv_warn(fifo, "timeout idling puller\n");
+ nvkm_msec(device, 2000,
+ u32 tmp = nvkm_rd32(device, NV04_PFIFO_CACHE1_PULL0);
+ if (!(tmp & NV04_PFIFO_CACHE1_PULL0_HASH_BUSY))
+ break;
+ );
if (nvkm_rd32(device, NV04_PFIFO_CACHE1_PULL0) &
NV04_PFIFO_CACHE1_PULL0_HASH_FAILED)
/* do the kickoff... */
nvkm_wr32(device, 0x0032fc, nv_gpuobj(base)->addr >> 12);
- if (!nv_wait_ne(fifo, 0x0032fc, 0xffffffff, 0xffffffff)) {
+ if (nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x0032fc) != 0xffffffff)
+ break;
+ ) < 0) {
nv_error(fifo, "channel %d [%s] unload timeout\n",
chan->base.chid, nvkm_client_name(chan));
if (suspend)