func->alpha_beta_tables(gr);
if (func->max_ways_evict)
func->max_ways_evict(gr);
+ if (func->dist_skip_table)
+ func->dist_skip_table(gr);
}
void
void (*rop_mapping)(struct gf100_gr *);
void (*alpha_beta_tables)(struct gf100_gr *);
void (*max_ways_evict)(struct gf100_gr *);
+ void (*dist_skip_table)(struct gf100_gr *);
};
extern const struct gf100_grctx_func gf100_grctx;
extern const struct gf100_grctx_func gf117_grctx;
void gf117_grctx_generate_attrib(struct gf100_grctx *);
void gf117_grctx_generate_rop_mapping(struct gf100_gr *);
+void gf117_grctx_generate_dist_skip_table(struct gf100_gr *);
extern const struct gf100_grctx_func gf119_grctx;
void gm107_grctx_generate_attrib(struct gf100_grctx *);
extern const struct gf100_grctx_func gm200_grctx;
+void gm200_grctx_generate_dist_skip_table(struct gf100_gr *);
void gm200_grctx_generate_405b60(struct gf100_gr *);
extern const struct gf100_grctx_func gm20b_grctx;
* PGRAPH context implementation
******************************************************************************/
+void
+gf117_grctx_generate_dist_skip_table(struct gf100_gr *gr)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ int i;
+
+ for (i = 0; i < 8; i++)
+ nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
+}
+
void
gf117_grctx_generate_rop_mapping(struct gf100_gr *gr)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
const struct gf100_grctx_func *grctx = gr->func->grctx;
u32 idle_timeout;
- int i;
nvkm_mc_unk260(device, 0);
gf100_grctx_generate_floorsweep(gr);
- for (i = 0; i < 8; i++)
- nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
-
gf100_gr_icmd(gr, grctx->icmd);
nvkm_wr32(device, 0x404154, idle_timeout);
gf100_gr_mthd(gr, grctx->mthd);
.rop_mapping = gf117_grctx_generate_rop_mapping,
.alpha_beta_tables = gf100_grctx_generate_alpha_beta_tables,
.max_ways_evict = gf100_grctx_generate_max_ways_evict,
+ .dist_skip_table = gf117_grctx_generate_dist_skip_table,
};
struct nvkm_device *device = gr->base.engine.subdev.device;
const struct gf100_grctx_func *grctx = gr->func->grctx;
u32 idle_timeout;
- int i;
nvkm_mc_unk260(device, 0);
gf100_grctx_generate_floorsweep(gr);
- for (i = 0; i < 8; i++)
- nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
-
nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
nvkm_mask(device, 0x419f78, 0x00000001, 0x00000000);
.tpc_nr = gf100_grctx_generate_tpc_nr,
.rop_mapping = gf117_grctx_generate_rop_mapping,
.alpha_beta_tables = gk104_grctx_generate_alpha_beta_tables,
+ .dist_skip_table = gf117_grctx_generate_dist_skip_table,
};
.tpc_nr = gf100_grctx_generate_tpc_nr,
.rop_mapping = gf117_grctx_generate_rop_mapping,
.alpha_beta_tables = gk104_grctx_generate_alpha_beta_tables,
+ .dist_skip_table = gf117_grctx_generate_dist_skip_table,
};
.tpc_nr = gf100_grctx_generate_tpc_nr,
.rop_mapping = gf117_grctx_generate_rop_mapping,
.alpha_beta_tables = gk104_grctx_generate_alpha_beta_tables,
+ .dist_skip_table = gf117_grctx_generate_dist_skip_table,
};
.tpc_nr = gf100_grctx_generate_tpc_nr,
.rop_mapping = gf117_grctx_generate_rop_mapping,
.alpha_beta_tables = gk104_grctx_generate_alpha_beta_tables,
+ .dist_skip_table = gf117_grctx_generate_dist_skip_table,
};
struct nvkm_device *device = gr->base.engine.subdev.device;
const struct gf100_grctx_func *grctx = gr->func->grctx;
u32 idle_timeout;
- int i;
gf100_gr_mmio(gr, grctx->hub);
gf100_gr_mmio(gr, grctx->gpc);
gf100_grctx_generate_floorsweep(gr);
- nvkm_wr32(device, 0x4064d0, 0x00000001);
- for (i = 1; i < 8; i++)
- nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
nvkm_wr32(device, 0x406500, 0x00000001);
nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
.tpc_nr = gf100_grctx_generate_tpc_nr,
.rop_mapping = gf117_grctx_generate_rop_mapping,
.alpha_beta_tables = gk104_grctx_generate_alpha_beta_tables,
+ .dist_skip_table = gf117_grctx_generate_dist_skip_table,
};
gf100_grctx_generate_floorsweep(gr);
- for (i = 0; i < 8; i++)
- nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
nvkm_wr32(device, 0x406500, 0x00000000);
nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
nvkm_mask(device, 0x418e4c, 0xffffffff, 0x70000000);
}
+void
+gm200_grctx_generate_dist_skip_table(struct gf100_gr *gr)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ u32 data[8] = {};
+ int gpc, ppc, i;
+
+ for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+ for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++) {
+ u8 ppc_tpcs = gr->ppc_tpc_nr[gpc][ppc];
+ u8 ppc_tpcm = gr->ppc_tpc_mask[gpc][ppc];
+ while (ppc_tpcs-- > gr->ppc_tpc_min)
+ ppc_tpcm &= ppc_tpcm - 1;
+ ppc_tpcm ^= gr->ppc_tpc_mask[gpc][ppc];
+ ((u8 *)data)[gpc] |= ppc_tpcm;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(data); i++)
+ nvkm_wr32(device, 0x4064d0 + (i * 0x04), data[i]);
+}
+
const struct gf100_grctx_func
gm200_grctx = {
.main = gm200_grctx_generate_main,
.alpha_nr = 0x1000,
.sm_id = gm107_grctx_generate_sm_id,
.rop_mapping = gf117_grctx_generate_rop_mapping,
+ .dist_skip_table = gm200_grctx_generate_dist_skip_table,
};
gf100_grctx_generate_floorsweep(gr);
- for (i = 0; i < 8; i++)
- nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
nvkm_wr32(device, 0x406500, 0x00000000);
nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
.alpha_nr = 0x800,
.sm_id = gm107_grctx_generate_sm_id,
.rop_mapping = gf117_grctx_generate_rop_mapping,
+ .dist_skip_table = gm200_grctx_generate_dist_skip_table,
};
.alpha_nr = 0x800,
.sm_id = gm107_grctx_generate_sm_id,
.rop_mapping = gf117_grctx_generate_rop_mapping,
+ .dist_skip_table = gm200_grctx_generate_dist_skip_table,
};
.alpha_nr = 0x800,
.sm_id = gm107_grctx_generate_sm_id,
.rop_mapping = gf117_grctx_generate_rop_mapping,
+ .dist_skip_table = gm200_grctx_generate_dist_skip_table,
};
continue;
gr->ppc_mask[i] |= (1 << j);
gr->ppc_tpc_nr[i][j] = hweight8(gr->ppc_tpc_mask[i][j]);
+ if (gr->ppc_tpc_min == 0 ||
+ gr->ppc_tpc_min > gr->ppc_tpc_nr[i][j])
+ gr->ppc_tpc_min = gr->ppc_tpc_nr[i][j];
}
}
u8 ppc_mask[GPC_MAX];
u8 ppc_tpc_mask[GPC_MAX][4];
u8 ppc_tpc_nr[GPC_MAX][4];
+ u8 ppc_tpc_min;
struct gf100_gr_data mmio_data[4];
struct gf100_gr_mmio mmio_list[4096/8];