trace_dpu_crtc_complete_commit(DRMID(crtc));
}
-static void _dpu_crtc_setup_mixer_for_encoder(
- struct drm_crtc *crtc,
- struct drm_encoder *enc)
-{
- struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
- struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
- struct dpu_rm *rm = &dpu_kms->rm;
- struct dpu_crtc_mixer *mixer;
- struct dpu_hw_ctl *last_valid_ctl = NULL;
- int i;
- struct dpu_rm_hw_iter lm_iter, ctl_iter;
-
- dpu_rm_init_hw_iter(&lm_iter, enc->base.id, DPU_HW_BLK_LM);
- dpu_rm_init_hw_iter(&ctl_iter, enc->base.id, DPU_HW_BLK_CTL);
-
- /* Set up all the mixers and ctls reserved by this encoder */
- for (i = cstate->num_mixers; i < ARRAY_SIZE(cstate->mixers); i++) {
- mixer = &cstate->mixers[i];
-
- if (!dpu_rm_get_hw(rm, &lm_iter))
- break;
- mixer->hw_lm = (struct dpu_hw_mixer *)lm_iter.hw;
-
- /* CTL may be <= LMs, if <, multiple LMs controlled by 1 CTL */
- if (!dpu_rm_get_hw(rm, &ctl_iter)) {
- DPU_DEBUG("no ctl assigned to lm %d, using previous\n",
- mixer->hw_lm->idx - LM_0);
- mixer->lm_ctl = last_valid_ctl;
- } else {
- mixer->lm_ctl = (struct dpu_hw_ctl *)ctl_iter.hw;
- last_valid_ctl = mixer->lm_ctl;
- }
-
- /* Shouldn't happen, mixers are always >= ctls */
- if (!mixer->lm_ctl) {
- DPU_ERROR("no valid ctls found for lm %d\n",
- mixer->hw_lm->idx - LM_0);
- return;
- }
-
- cstate->num_mixers++;
- DPU_DEBUG("setup mixer %d: lm %d\n",
- i, mixer->hw_lm->idx - LM_0);
- DPU_DEBUG("setup mixer %d: ctl %d\n",
- i, mixer->lm_ctl->idx - CTL_0);
- }
-}
-
-static void _dpu_crtc_setup_mixers(struct drm_crtc *crtc)
-{
- struct drm_encoder *enc;
-
- WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
-
- /* Check for mixers on all encoders attached to this crtc */
- drm_for_each_encoder_mask(enc, crtc->dev, crtc->state->encoder_mask)
- _dpu_crtc_setup_mixer_for_encoder(crtc, enc);
-}
-
static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
dev = crtc->dev;
smmu_state = &dpu_crtc->smmu_state;
- if (!cstate->num_mixers) {
- _dpu_crtc_setup_mixers(crtc);
- _dpu_crtc_setup_lm_bounds(crtc, crtc->state);
- }
+ _dpu_crtc_setup_lm_bounds(crtc, crtc->state);
if (dpu_crtc->event) {
WARN_ON(dpu_crtc->event);
struct list_head *connector_list;
struct drm_connector *conn = NULL, *conn_iter;
struct drm_crtc *drm_crtc;
- struct dpu_rm_hw_iter pp_iter, ctl_iter;
+ struct dpu_crtc_state *cstate;
+ struct dpu_rm_hw_iter hw_iter;
struct msm_display_topology topology;
struct dpu_hw_ctl *hw_ctl[MAX_CHANNELS_PER_ENC] = { NULL };
+ struct dpu_hw_mixer *hw_lm[MAX_CHANNELS_PER_ENC] = { NULL };
+ int num_lm = 0, num_ctl = 0;
int i = 0, ret;
if (!drm_enc) {
return;
}
- dpu_rm_init_hw_iter(&pp_iter, drm_enc->base.id, DPU_HW_BLK_PINGPONG);
+ dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_PINGPONG);
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
dpu_enc->hw_pp[i] = NULL;
- if (!dpu_rm_get_hw(&dpu_kms->rm, &pp_iter))
+ if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
break;
- dpu_enc->hw_pp[i] = (struct dpu_hw_pingpong *) pp_iter.hw;
+ dpu_enc->hw_pp[i] = (struct dpu_hw_pingpong *) hw_iter.hw;
}
- dpu_rm_init_hw_iter(&ctl_iter, drm_enc->base.id, DPU_HW_BLK_CTL);
+ dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_CTL);
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
- if (!dpu_rm_get_hw(&dpu_kms->rm, &ctl_iter))
+ if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
break;
- hw_ctl[i] = (struct dpu_hw_ctl *)ctl_iter.hw;
+ hw_ctl[i] = (struct dpu_hw_ctl *)hw_iter.hw;
+ num_ctl++;
}
+ dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_LM);
+ for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
+ if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
+ break;
+ hw_lm[i] = (struct dpu_hw_mixer *)hw_iter.hw;
+ num_lm++;
+ }
+
+ cstate = to_dpu_crtc_state(drm_crtc->state);
+
+ for (i = 0; i < num_lm; i++) {
+ int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
+
+ cstate->mixers[i].hw_lm = hw_lm[i];
+ cstate->mixers[i].lm_ctl = hw_ctl[ctl_idx];
+ }
+
+ cstate->num_mixers = num_lm;
+
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];