{
struct nvkm_device *device = clk->base.subdev.device;
nvkm_mask(device, 0x137100, (1 << idx), 0x00000000);
- nv_wait(clk, 0x137100, (1 << idx), 0x00000000);
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x137100) & (1 << idx)))
+ break;
+ );
}
static void
if (info->coef) {
nvkm_wr32(device, addr + 0x04, info->coef);
nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000001);
- nv_wait(clk, addr + 0x00, 0x00020000, 0x00020000);
+ nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, addr + 0x00) & 0x00020000)
+ break;
+ );
nvkm_mask(device, addr + 0x00, 0x00020004, 0x00000004);
}
}
struct nvkm_device *device = clk->base.subdev.device;
if (info->ssel) {
nvkm_mask(device, 0x137100, (1 << idx), info->ssel);
- nv_wait(clk, 0x137100, (1 << idx), info->ssel);
+ nvkm_msec(device, 2000,
+ u32 tmp = nvkm_rd32(device, 0x137100) & (1 << idx);
+ if (tmp == info->ssel)
+ break;
+ );
}
}
{
struct nvkm_device *device = clk->base.subdev.device;
nvkm_mask(device, 0x137100, (1 << idx), 0x00000000);
- nv_wait(clk, 0x137100, (1 << idx), 0x00000000);
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x137100) & (1 << idx)))
+ break;
+ );
}
static void
if (info->coef) {
nvkm_wr32(device, addr + 0x04, info->coef);
nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000001);
- nv_wait(clk, addr + 0x00, 0x00020000, 0x00020000);
+ nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, addr + 0x00) & 0x00020000)
+ break;
+ );
nvkm_mask(device, addr + 0x00, 0x00020004, 0x00000004);
}
}
struct nvkm_device *device = clk->base.subdev.device;
if (info->ssel) {
nvkm_mask(device, 0x137100, (1 << idx), info->ssel);
- nv_wait(clk, 0x137100, (1 << idx), info->ssel);
+ nvkm_msec(device, 2000,
+ u32 tmp = nvkm_rd32(device, 0x137100) & (1 << idx);
+ if (tmp == info->ssel)
+ break;
+ );
}
}
nvkm_wr32(device, GPCPLL_CFG, val);
}
- if (!nvkm_timer_wait_eq(clk, 300000, GPCPLL_CFG, GPCPLL_CFG_LOCK,
- GPCPLL_CFG_LOCK)) {
- nv_error(clk, "%s: timeout waiting for pllg lock\n", __func__);
+ if (nvkm_usec(device, 300,
+ if (nvkm_rd32(device, GPCPLL_CFG) & GPCPLL_CFG_LOCK)
+ break;
+ ) < 0)
return -ETIMEDOUT;
- }
/* switch to VCO mode */
nvkm_mask(device, SEL_VCO, 0, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
gt215_clk_pre(struct nvkm_clk *clk, unsigned long *flags)
{
struct nvkm_device *device = clk->subdev.device;
- struct nvkm_fifo *fifo = nvkm_fifo(clk);
+ struct nvkm_fifo *fifo = device->fifo;
/* halt and idle execution engines */
nvkm_mask(device, 0x020060, 0x00070000, 0x00000000);
nvkm_mask(device, 0x002504, 0x00000001, 0x00000001);
/* Wait until the interrupt handler is finished */
- if (!nv_wait(clk, 0x000100, 0xffffffff, 0x00000000))
+ if (nvkm_msec(device, 2000,
+ if (!nvkm_rd32(device, 0x000100))
+ break;
+ ) < 0)
return -EBUSY;
if (fifo)
fifo->pause(fifo, flags);
- if (!nv_wait(clk, 0x002504, 0x00000010, 0x00000010))
+ if (nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x002504) & 0x00000010)
+ break;
+ ) < 0)
return -EIO;
- if (!nv_wait(clk, 0x00251c, 0x0000003f, 0x0000003f))
+
+ if (nvkm_msec(device, 2000,
+ u32 tmp = nvkm_rd32(device, 0x002504) & 0x0000003f;
+ if (tmp == 0x0000003f)
+ break;
+ ) < 0)
return -EIO;
return 0;
nvkm_wr32(device, coef, info->pll);
nvkm_mask(device, ctrl, 0x00000015, 0x00000015);
nvkm_mask(device, ctrl, 0x00000010, 0x00000000);
- if (!nv_wait(clk, ctrl, 0x00020000, 0x00020000)) {
+ if (nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, ctrl) & 0x00020000)
+ break;
+ ) < 0) {
nvkm_mask(device, ctrl, 0x00000010, 0x00000010);
nvkm_mask(device, src0, 0x00000101, 0x00000000);
return;
goto resume;
}
- if (!nv_wait(clk, 0x004080, pllmask, pllmask)) {
- nv_warn(clk,"Reclocking failed: unstable PLLs\n");
+ if (nvkm_msec(device, 2000,
+ u32 tmp = nvkm_rd32(device, 0x004080) & pllmask;
+ if (tmp == pllmask)
+ break;
+ ) < 0)
goto resume;
- }
switch (clk->vsrc) {
case nv_clk_src_cclk: