to avoid null access in case res_ctx is used to access res_pool before it's fully constructed
also make it clear which function has dependency on resource_pool
Signed-off-by: Tony Cheng <tony.cheng@amd.com>
Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
{
int i, j;
int group_index = 0;
- int pipe_count = ctx->res_ctx.pool->pipe_count;
+ int pipe_count = core_dc->res_pool->pipe_count;
struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
for (i = 0; i < pipe_count; i++) {
post_surface_trace(dc);
- for (i = 0; i < context->res_ctx.pool->pipe_count; i++)
+ for (i = 0; i < core_dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].stream == NULL) {
context->res_ctx.pipe_ctx[i].pipe_idx = i;
core_dc->hwss.power_down_front_end(
{
int j;
- for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
+ for (j = 0; j < MAX_PIPES; j++) {
const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
if (surface == &pipe_ctx->surface->public) {
/* add surface to context */
if (!resource_attach_surfaces_to_context(
- new_surfaces, surface_count, dc_stream, context)) {
+ new_surfaces, surface_count, dc_stream,
+ context, core_dc->res_pool)) {
BREAK_TO_DEBUGGER();
goto fail;
}
/* not sure if we still need this */
if (update_type == UPDATE_TYPE_FULL) {
- for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
+ for (j = 0; j < core_dc->res_pool->pipe_count; j++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
if (pipe_ctx->surface != surface)
for (i = 0; i < surface_count; i++) {
struct core_surface *surface = DC_SURFACE_TO_CORE(srf_updates[i].surface);
- for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
+ for (j = 0; j < core_dc->res_pool->pipe_count; j++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
if (pipe_ctx->surface != surface)
context_timing_trace(dc, &context->res_ctx);
}
- for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
+ for (j = 0; j < core_dc->res_pool->pipe_count; j++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
struct pipe_ctx *cur_pipe_ctx;
bool is_new_pipe_surface = true;
}
/* Unlock pipes */
- for (i = context->res_ctx.pool->pipe_count - 1; i >= 0; i--) {
+ for (i = core_dc->res_pool->pipe_count - 1; i >= 0; i--) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
for (j = 0; j < surface_count; j++) {
memset(core_dc->current_context, 0,
sizeof(*core_dc->current_context));
- core_dc->current_context->res_ctx.pool = core_dc->res_pool;
-
break;
}
case DP_TEST_PATTERN_VIDEO_MODE:
{
/* restore bitdepth reduction */
- link->dc->current_context->res_ctx.pool->funcs->
+ link->dc->res_pool->funcs->
build_bit_depth_reduction_params(pipe_ctx->stream,
¶ms);
pipe_ctx->stream->bit_depth_params = params;
void resource_unreference_clock_source(
struct resource_context *res_ctx,
+ const struct resource_pool *pool,
struct clock_source **clock_source)
{
int i;
- for (i = 0; i < res_ctx->pool->clk_src_count; i++) {
- if (res_ctx->pool->clock_sources[i] != *clock_source)
+ for (i = 0; i < pool->clk_src_count; i++) {
+ if (pool->clock_sources[i] != *clock_source)
continue;
res_ctx->clock_source_ref_count[i]--;
break;
}
- if (res_ctx->pool->dp_clock_source == *clock_source) {
+ if (pool->dp_clock_source == *clock_source) {
res_ctx->dp_clock_source_ref_count--;
if (res_ctx->dp_clock_source_ref_count == 0)
void resource_reference_clock_source(
struct resource_context *res_ctx,
+ const struct resource_pool *pool,
struct clock_source *clock_source)
{
int i;
- for (i = 0; i < res_ctx->pool->clk_src_count; i++) {
- if (res_ctx->pool->clock_sources[i] != clock_source)
+ for (i = 0; i < pool->clk_src_count; i++) {
+ if (pool->clock_sources[i] != clock_source)
continue;
res_ctx->clock_source_ref_count[i]++;
break;
}
- if (res_ctx->pool->dp_clock_source == clock_source)
+ if (pool->dp_clock_source == clock_source)
res_ctx->dp_clock_source_ref_count++;
}
static void detach_surfaces_for_stream(
struct validate_context *context,
+ const struct resource_pool *pool,
const struct dc_stream *dc_stream)
{
int i;
struct core_stream *stream = DC_STREAM_TO_CORE(dc_stream);
- for (i = 0; i < context->res_ctx.pool->pipe_count; i++) {
+ for (i = 0; i < pool->pipe_count; i++) {
struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i];
if (cur_pipe->stream == stream) {
cur_pipe->surface = NULL;
}
}
-struct pipe_ctx *find_idle_secondary_pipe(struct resource_context *res_ctx)
+struct pipe_ctx *find_idle_secondary_pipe(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool)
{
int i;
struct pipe_ctx *secondary_pipe = NULL;
* assignment more consistent
*/
- for (i = res_ctx->pool->pipe_count - 1; i >= 0; i--) {
+ for (i = pool->pipe_count - 1; i >= 0; i--) {
if (res_ctx->pipe_ctx[i].stream == NULL) {
secondary_pipe = &res_ctx->pipe_ctx[i];
secondary_pipe->pipe_idx = i;
const struct core_stream *stream)
{
int i;
- for (i = 0; i < res_ctx->pool->pipe_count; i++) {
+ for (i = 0; i < MAX_PIPES; i++) {
if (res_ctx->pipe_ctx[i].stream == stream &&
res_ctx->pipe_ctx[i].stream_enc) {
return &res_ctx->pipe_ctx[i];
*/
static struct pipe_ctx *acquire_free_pipe_for_stream(
struct validate_context *context,
+ const struct resource_pool *pool,
const struct dc_stream *dc_stream)
{
int i;
return head_pipe;
/* Re-use pipe already acquired for this stream if available*/
- for (i = res_ctx->pool->pipe_count - 1; i >= 0; i--) {
+ for (i = pool->pipe_count - 1; i >= 0; i--) {
if (res_ctx->pipe_ctx[i].stream == stream &&
!res_ctx->pipe_ctx[i].surface) {
return &res_ctx->pipe_ctx[i];
* to acquire an idle one to satisfy the request
*/
- if(!res_ctx->pool->funcs->acquire_idle_pipe_for_layer)
+ if (!pool->funcs->acquire_idle_pipe_for_layer)
return NULL;
- return res_ctx->pool->funcs->acquire_idle_pipe_for_layer(context, stream);
+ return pool->funcs->acquire_idle_pipe_for_layer(context, pool, stream);
}
int i;
struct core_stream *stream = DC_STREAM_TO_CORE(dc_stream);
- for (i = res_ctx->pool->pipe_count - 1; i >= 0; i--) {
+ for (i = MAX_PIPES - 1; i >= 0; i--) {
if (res_ctx->pipe_ctx[i].stream == stream &&
!res_ctx->pipe_ctx[i].surface) {
res_ctx->pipe_ctx[i].stream = NULL;
const struct dc_surface * const *surfaces,
int surface_count,
const struct dc_stream *dc_stream,
- struct validate_context *context)
+ struct validate_context *context,
+ const struct resource_pool *pool)
{
int i;
struct pipe_ctx *tail_pipe;
for (i = 0; i < surface_count; i++)
dc_surface_retain(surfaces[i]);
- detach_surfaces_for_stream(context, dc_stream);
+ detach_surfaces_for_stream(context, pool, dc_stream);
/* release existing surfaces*/
for (i = 0; i < stream_status->surface_count; i++)
tail_pipe = NULL;
for (i = 0; i < surface_count; i++) {
struct core_surface *surface = DC_SURFACE_TO_CORE(surfaces[i]);
- struct pipe_ctx *free_pipe = acquire_free_pipe_for_stream(context, dc_stream);
+ struct pipe_ctx *free_pipe = acquire_free_pipe_for_stream(
+ context, pool, dc_stream);
if (!free_pipe) {
stream_status->surfaces[i] = NULL;
const struct dc_validation_set set[],
int set_count,
const struct validate_context *old_context,
- struct validate_context *context)
+ struct validate_context *context,
+ const struct resource_pool *pool)
{
int i, j;
old_context->stream_status[j].surfaces,
old_context->stream_status[j].surface_count,
&context->streams[i]->public,
- context))
+ context, pool))
return false;
context->stream_status[i] = old_context->stream_status[j];
}
set[i].surfaces,
set[i].surface_count,
&context->streams[i]->public,
- context))
+ context, pool))
return false;
}
static void set_stream_engine_in_use(
struct resource_context *res_ctx,
+ const struct resource_pool *pool,
struct stream_encoder *stream_enc)
{
int i;
- for (i = 0; i < res_ctx->pool->stream_enc_count; i++) {
- if (res_ctx->pool->stream_enc[i] == stream_enc)
+ for (i = 0; i < pool->stream_enc_count; i++) {
+ if (pool->stream_enc[i] == stream_enc)
res_ctx->is_stream_enc_acquired[i] = true;
}
}
/* TODO: release audio object */
static void set_audio_in_use(
struct resource_context *res_ctx,
+ const struct resource_pool *pool,
struct audio *audio)
{
int i;
- for (i = 0; i < res_ctx->pool->audio_count; i++) {
- if (res_ctx->pool->audios[i] == audio) {
+ for (i = 0; i < pool->audio_count; i++) {
+ if (pool->audios[i] == audio)
res_ctx->is_audio_acquired[i] = true;
- }
}
}
static int acquire_first_free_pipe(
struct resource_context *res_ctx,
+ const struct resource_pool *pool,
struct core_stream *stream)
{
int i;
- for (i = 0; i < res_ctx->pool->pipe_count; i++) {
+ for (i = 0; i < pool->pipe_count; i++) {
if (!res_ctx->pipe_ctx[i].stream) {
struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
- pipe_ctx->tg = res_ctx->pool->timing_generators[i];
- pipe_ctx->mi = res_ctx->pool->mis[i];
- pipe_ctx->ipp = res_ctx->pool->ipps[i];
- pipe_ctx->xfm = res_ctx->pool->transforms[i];
- pipe_ctx->opp = res_ctx->pool->opps[i];
- pipe_ctx->dis_clk = res_ctx->pool->display_clock;
+ pipe_ctx->tg = pool->timing_generators[i];
+ pipe_ctx->mi = pool->mis[i];
+ pipe_ctx->ipp = pool->ipps[i];
+ pipe_ctx->xfm = pool->transforms[i];
+ pipe_ctx->opp = pool->opps[i];
+ pipe_ctx->dis_clk = pool->display_clock;
pipe_ctx->pipe_idx = i;
pipe_ctx->stream = stream;
static struct stream_encoder *find_first_free_match_stream_enc_for_link(
struct resource_context *res_ctx,
+ const struct resource_pool *pool,
struct core_stream *stream)
{
int i;
int j = -1;
struct core_link *link = stream->sink->link;
- for (i = 0; i < res_ctx->pool->stream_enc_count; i++) {
+ for (i = 0; i < pool->stream_enc_count; i++) {
if (!res_ctx->is_stream_enc_acquired[i] &&
- res_ctx->pool->stream_enc[i]) {
+ pool->stream_enc[i]) {
/* Store first available for MST second display
* in daisy chain use case */
j = i;
- if (res_ctx->pool->stream_enc[i]->id ==
+ if (pool->stream_enc[i]->id ==
link->link_enc->preferred_engine)
- return res_ctx->pool->stream_enc[i];
+ return pool->stream_enc[i];
}
}
*/
if (j >= 0 && dc_is_dp_signal(stream->signal))
- return res_ctx->pool->stream_enc[j];
+ return pool->stream_enc[j];
return NULL;
}
-static struct audio *find_first_free_audio(struct resource_context *res_ctx)
+static struct audio *find_first_free_audio(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool)
{
int i;
- for (i = 0; i < res_ctx->pool->audio_count; i++) {
+ for (i = 0; i < pool->audio_count; i++) {
if (res_ctx->is_audio_acquired[i] == false) {
- return res_ctx->pool->audios[i];
+ return pool->audios[i];
}
}
const struct core_dc *dc,
struct validate_context *context)
{
+ const struct resource_pool *pool = dc->res_pool;
int i, j;
calculate_phy_pix_clks(dc, context);
stream->bit_depth_params =
dc->current_context->streams[i]->bit_depth_params;
stream->clamping = dc->current_context->streams[i]->clamping;
- continue;
+ continue;
+ }
}
- }
/* mark resources used for stream that is already active */
- for (j = 0; j < MAX_PIPES; j++) {
+ for (j = 0; j < pool->pipe_count; j++) {
struct pipe_ctx *pipe_ctx =
&context->res_ctx.pipe_ctx[j];
const struct pipe_ctx *old_pipe_ctx =
continue;
set_stream_engine_in_use(
- &context->res_ctx,
+ &context->res_ctx, pool,
pipe_ctx->stream_enc);
/* Switch to dp clock source only if there is
*/
if (dc_is_dp_signal(pipe_ctx->stream->signal) &&
!find_pll_sharable_stream(stream, context))
- pipe_ctx->clock_source =
- context->res_ctx.pool->dp_clock_source;
+ pipe_ctx->clock_source = pool->dp_clock_source;
resource_reference_clock_source(
- &context->res_ctx,
+ &context->res_ctx, pool,
pipe_ctx->clock_source);
- set_audio_in_use(&context->res_ctx,
- pipe_ctx->audio);
+ set_audio_in_use(&context->res_ctx, pool,
+ pipe_ctx->audio);
}
}
if (resource_is_stream_unchanged(dc->current_context, stream))
continue;
/* acquire new resources */
- pipe_idx = acquire_first_free_pipe(&context->res_ctx, stream);
+ pipe_idx = acquire_first_free_pipe(
+ &context->res_ctx, pool, stream);
if (pipe_idx < 0)
return DC_NO_CONTROLLER_RESOURCE;
-
pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx];
pipe_ctx->stream_enc =
find_first_free_match_stream_enc_for_link(
- &context->res_ctx, stream);
+ &context->res_ctx, pool, stream);
if (!pipe_ctx->stream_enc)
return DC_NO_STREAM_ENG_RESOURCE;
set_stream_engine_in_use(
- &context->res_ctx,
+ &context->res_ctx, pool,
pipe_ctx->stream_enc);
/* TODO: Add check if ASIC support and EDID audio */
dc_is_audio_capable_signal(pipe_ctx->stream->signal) &&
stream->public.audio_info.mode_count) {
pipe_ctx->audio = find_first_free_audio(
- &context->res_ctx);
+ &context->res_ctx, pool);
/*
* Audio assigned in order first come first get.
*/
if (pipe_ctx->audio)
set_audio_in_use(
- &context->res_ctx,
+ &context->res_ctx, pool,
pipe_ctx->audio);
}
*dst_ctx = *src_ctx;
- for (i = 0; i < dst_ctx->res_ctx.pool->pipe_count; i++) {
+ for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *cur_pipe = &dst_ctx->res_ctx.pipe_ctx[i];
if (cur_pipe->top_pipe)
}
struct clock_source *dc_resource_find_first_free_pll(
- struct resource_context *res_ctx)
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool)
{
int i;
- for (i = 0; i < res_ctx->pool->clk_src_count; ++i) {
+ for (i = 0; i < pool->clk_src_count; ++i) {
if (res_ctx->clock_source_ref_count[i] == 0)
- return res_ctx->pool->clock_sources[i];
+ return pool->clock_sources[i];
}
return NULL;
struct validate_context *context)
{
int i, j;
+ const struct resource_pool *pool = dc->res_pool;
/* acquire new resources */
for (i = 0; i < context->stream_count; i++) {
if (dc_is_dp_signal(pipe_ctx->stream->signal)
|| pipe_ctx->stream->signal == SIGNAL_TYPE_VIRTUAL)
- pipe_ctx->clock_source =
- context->res_ctx.pool->dp_clock_source;
+ pipe_ctx->clock_source = pool->dp_clock_source;
else {
pipe_ctx->clock_source = NULL;
if (pipe_ctx->clock_source == NULL)
pipe_ctx->clock_source =
- dc_resource_find_first_free_pll(&context->res_ctx);
+ dc_resource_find_first_free_pll(
+ &context->res_ctx,
+ pool);
}
if (pipe_ctx->clock_source == NULL)
return DC_NO_CLOCK_SOURCE_RESOURCE;
resource_reference_clock_source(
- &context->res_ctx,
+ &context->res_ctx, pool,
pipe_ctx->clock_source);
/* only one cs per stream regardless of mpo */
struct dc_cursor_position pos_cpy = *position;
struct dc_cursor_mi_param param = {
.pixel_clk_khz = dc_stream->timing.pix_clk_khz,
- .ref_clk_khz = res_ctx->pool->ref_clock_inKhz,
+ .ref_clk_khz = core_dc->res_pool->ref_clock_inKhz,
.viewport_x_start = pipe_ctx->scl_data.viewport.x,
.viewport_width = pipe_ctx->scl_data.viewport.width,
.h_scale_ratio = pipe_ctx->scl_data.ratios.horz
if (core_dc->current_context == NULL)
return NULL;
- for (i = 0; i < core_dc->current_context->res_ctx.pool->pipe_count;
- i++) {
+ for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx =
&core_dc->current_context->res_ctx.pipe_ctx[i];
bool decrease_allowed)
{
if (decrease_allowed || context->dispclk_khz > dc->current_context->dispclk_khz) {
- context->res_ctx.pool->display_clock->funcs->set_clock(
- context->res_ctx.pool->display_clock,
+ dc->res_pool->display_clock->funcs->set_clock(
+ dc->res_pool->display_clock,
context->dispclk_khz * 115 / 100);
dc->current_context->bw_results.dispclk_khz = context->dispclk_khz;
dc->current_context->dispclk_khz = context->dispclk_khz;
if (!dce100_validate_surface_sets(set, set_count))
return DC_FAIL_SURFACE_VALIDATE;
- context->res_ctx.pool = dc->res_pool;
-
for (i = 0; i < set_count; i++) {
context->streams[i] = DC_STREAM_TO_CORE(set[i].stream);
dc_stream_retain(&context->streams[i]->public);
if (result == DC_OK)
result = resource_map_clock_resources(dc, context);
- if (!resource_validate_attach_surfaces(
- set, set_count, dc->current_context, context)) {
+ if (!resource_validate_attach_surfaces(set, set_count,
+ dc->current_context, context, dc->res_pool)) {
DC_ERROR("Failed to attach surface to stream!\n");
return DC_FAIL_ATTACH_SURFACES;
}
{
enum dc_status result = DC_ERROR_UNEXPECTED;
- context->res_ctx.pool = dc->res_pool;
-
context->streams[0] = DC_STREAM_TO_CORE(dc_stream);
dc_stream_retain(&context->streams[0]->public);
context->stream_count++;
}
}
-static void set_safe_displaymarks(struct resource_context *res_ctx)
+static void set_safe_displaymarks(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool)
{
int i;
- int underlay_idx = res_ctx->pool->underlay_pipe_index;
+ int underlay_idx = pool->underlay_pipe_index;
struct bw_watermarks max_marks = {
MAX_WATERMARK, MAX_WATERMARK, MAX_WATERMARK, MAX_WATERMARK };
struct bw_watermarks nbp_marks = {
if (clk_src &&
clk_src != pipe_ctx->clock_source) {
resource_unreference_clock_source(
- res_ctx, &pipe_ctx->clock_source);
+ res_ctx, dc->res_pool,
+ &pipe_ctx->clock_source);
pipe_ctx->clock_source = clk_src;
- resource_reference_clock_source(res_ctx, clk_src);
+ resource_reference_clock_source(
+ res_ctx, dc->res_pool, clk_src);
dce_crtc_switch_to_clk_src(dc->hwseq, clk_src, i);
}
pipe_ctx->tg->funcs->disable_crtc(pipe_ctx->tg);
pipe_ctx->mi->funcs->free_mem_input(
pipe_ctx->mi, context->stream_count);
- resource_unreference_clock_source(
- &context->res_ctx, &pipe_ctx->clock_source);
+ resource_unreference_clock_source(&context->res_ctx, dc->res_pool,
+ &pipe_ctx->clock_source);
dc->hwss.power_down_front_end((struct core_dc *)dc, pipe_ctx);
enum dc_status status = DC_ERROR_UNEXPECTED;
int i;
- for (i = 0; i < context->res_ctx.pool->pipe_count; i++) {
+ for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *pipe_ctx_old =
&dc->current_context->res_ctx.pipe_ctx[i];
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
/* Reset old context */
/* look up the targets that have been removed since last commit */
- for (i = 0; i < context->res_ctx.pool->pipe_count; i++) {
+ for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *pipe_ctx_old =
&dc->current_context->res_ctx.pipe_ctx[i];
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
dcb->funcs->set_scratch_critical_state(dcb, true);
/* below is for real asic only */
- for (i = 0; i < context->res_ctx.pool->pipe_count; i++) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx_old =
&dc->current_context->res_ctx.pipe_ctx[i];
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
PIPE_GATING_CONTROL_DISABLE);
}
- set_safe_displaymarks(&context->res_ctx);
+ set_safe_displaymarks(&context->res_ctx, dc->res_pool);
/*TODO: when pplib works*/
apply_min_clocks(dc, context, &clocks_state, true);
if (context->dispclk_khz
> dc->current_context->dispclk_khz) {
- context->res_ctx.pool->display_clock->funcs->set_clock(
- context->res_ctx.pool->display_clock,
+ dc->res_pool->display_clock->funcs->set_clock(
+ dc->res_pool->display_clock,
context->dispclk_khz * 115 / 100);
}
/* program audio wall clock. use HDMI as clock source if HDMI
* find first available pipe with audio, setup audio wall DTO per topology
* instead of per pipe.
*/
- for (i = 0; i < context->res_ctx.pool->pipe_count; i++) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream == NULL)
}
/* no HDMI audio is found, try DP audio */
- if (i == context->res_ctx.pool->pipe_count) {
- for (i = 0; i < context->res_ctx.pool->pipe_count; i++) {
+ if (i == dc->res_pool->pipe_count) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream == NULL)
}
}
- for (i = 0; i < context->res_ctx.pool->pipe_count; i++) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx_old =
&dc->current_context->res_ctx.pipe_ctx[i];
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
dce110_set_displaymarks(dc, context);
if (decrease_allowed || context->dispclk_khz > dc->current_context->dispclk_khz) {
- context->res_ctx.pool->display_clock->funcs->set_clock(
- context->res_ctx.pool->display_clock,
+ dc->res_pool->display_clock->funcs->set_clock(
+ dc->res_pool->display_clock,
context->dispclk_khz * 115 / 100);
dc->current_context->bw_results.dispclk_khz = context->dispclk_khz;
dc->current_context->dispclk_khz = context->dispclk_khz;
if (!surface)
return;
- for (i = 0; i < context->res_ctx.pool->pipe_count; i++) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
if (pipe_ctx->surface != surface)
continue;
if (!is_surface_pixel_format_supported(pipe_ctx,
- context->res_ctx.pool->underlay_pipe_index))
+ dc->res_pool->underlay_pipe_index))
return DC_SURFACE_PIXEL_FORMAT_UNSUPPORTED;
if (!pipe_ctx->tg->funcs->validate_timing(
&dc->bw_dceip,
&dc->bw_vbios,
context->res_ctx.pipe_ctx,
- context->res_ctx.pool->pipe_count,
+ dc->res_pool->pipe_count,
&context->bw_results))
result = true;
context->dispclk_khz = context->bw_results.dispclk_khz;
if (!dce110_validate_surface_sets(set, set_count))
return DC_FAIL_SURFACE_VALIDATE;
- context->res_ctx.pool = dc->res_pool;
-
for (i = 0; i < set_count; i++) {
context->streams[i] = DC_STREAM_TO_CORE(set[i].stream);
dc_stream_retain(&context->streams[i]->public);
if (result == DC_OK)
result = resource_map_clock_resources(dc, context);
- if (!resource_validate_attach_surfaces(
- set, set_count, dc->current_context, context)) {
+ if (!resource_validate_attach_surfaces(set, set_count,
+ dc->current_context, context, dc->res_pool)) {
DC_ERROR("Failed to attach surface to stream!\n");
return DC_FAIL_ATTACH_SURFACES;
}
{
enum dc_status result = DC_ERROR_UNEXPECTED;
- context->res_ctx.pool = dc->res_pool;
-
context->streams[0] = DC_STREAM_TO_CORE(dc_stream);
dc_stream_retain(&context->streams[0]->public);
context->stream_count++;
static struct pipe_ctx *dce110_acquire_underlay(
struct validate_context *context,
+ const struct resource_pool *pool,
struct core_stream *stream)
{
struct core_dc *dc = DC_TO_CORE(stream->ctx->dc);
struct resource_context *res_ctx = &context->res_ctx;
- unsigned int underlay_idx = res_ctx->pool->underlay_pipe_index;
+ unsigned int underlay_idx = pool->underlay_pipe_index;
struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[underlay_idx];
if (res_ctx->pipe_ctx[underlay_idx].stream)
return NULL;
- pipe_ctx->tg = res_ctx->pool->timing_generators[underlay_idx];
- pipe_ctx->mi = res_ctx->pool->mis[underlay_idx];
+ pipe_ctx->tg = pool->timing_generators[underlay_idx];
+ pipe_ctx->mi = pool->mis[underlay_idx];
/*pipe_ctx->ipp = res_ctx->pool->ipps[underlay_idx];*/
- pipe_ctx->xfm = res_ctx->pool->transforms[underlay_idx];
- pipe_ctx->opp = res_ctx->pool->opps[underlay_idx];
- pipe_ctx->dis_clk = res_ctx->pool->display_clock;
+ pipe_ctx->xfm = pool->transforms[underlay_idx];
+ pipe_ctx->opp = pool->opps[underlay_idx];
+ pipe_ctx->dis_clk = pool->display_clock;
pipe_ctx->pipe_idx = underlay_idx;
pipe_ctx->stream = stream;
}
}
-static struct clock_source *find_matching_pll(struct resource_context *res_ctx,
+static struct clock_source *find_matching_pll(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
const struct core_stream *const stream)
{
switch (stream->sink->link->link_enc->transmitter) {
case TRANSMITTER_UNIPHY_A:
- return res_ctx->pool->clock_sources[DCE112_CLK_SRC_PLL0];
+ return pool->clock_sources[DCE112_CLK_SRC_PLL0];
case TRANSMITTER_UNIPHY_B:
- return res_ctx->pool->clock_sources[DCE112_CLK_SRC_PLL1];
+ return pool->clock_sources[DCE112_CLK_SRC_PLL1];
case TRANSMITTER_UNIPHY_C:
- return res_ctx->pool->clock_sources[DCE112_CLK_SRC_PLL2];
+ return pool->clock_sources[DCE112_CLK_SRC_PLL2];
case TRANSMITTER_UNIPHY_D:
- return res_ctx->pool->clock_sources[DCE112_CLK_SRC_PLL3];
+ return pool->clock_sources[DCE112_CLK_SRC_PLL3];
case TRANSMITTER_UNIPHY_E:
- return res_ctx->pool->clock_sources[DCE112_CLK_SRC_PLL4];
+ return pool->clock_sources[DCE112_CLK_SRC_PLL4];
case TRANSMITTER_UNIPHY_F:
- return res_ctx->pool->clock_sources[DCE112_CLK_SRC_PLL5];
+ return pool->clock_sources[DCE112_CLK_SRC_PLL5];
default:
return NULL;
};
&dc->bw_dceip,
&dc->bw_vbios,
context->res_ctx.pipe_ctx,
- context->res_ctx.pool->pipe_count,
+ dc->res_pool->pipe_count,
&context->bw_results))
result = true;
context->dispclk_khz = context->bw_results.dispclk_khz;
if (dc_is_dp_signal(pipe_ctx->stream->signal)
|| pipe_ctx->stream->signal == SIGNAL_TYPE_VIRTUAL)
pipe_ctx->clock_source =
- context->res_ctx.pool->dp_clock_source;
+ dc->res_pool->dp_clock_source;
else
- pipe_ctx->clock_source =
- find_matching_pll(&context->res_ctx,
- stream);
+ pipe_ctx->clock_source = find_matching_pll(
+ &context->res_ctx, dc->res_pool,
+ stream);
if (pipe_ctx->clock_source == NULL)
return DC_NO_CLOCK_SOURCE_RESOURCE;
resource_reference_clock_source(
&context->res_ctx,
+ dc->res_pool,
pipe_ctx->clock_source);
/* only one cs per stream regardless of mpo */
if (!dce112_validate_surface_sets(set, set_count))
return DC_FAIL_SURFACE_VALIDATE;
- context->res_ctx.pool = dc->res_pool;
-
for (i = 0; i < set_count; i++) {
context->streams[i] = DC_STREAM_TO_CORE(set[i].stream);
dc_stream_retain(&context->streams[i]->public);
if (result == DC_OK)
result = resource_map_phy_clock_resources(dc, context);
- if (!resource_validate_attach_surfaces(
- set, set_count, dc->current_context, context)) {
+ if (!resource_validate_attach_surfaces(set, set_count,
+ dc->current_context, context, dc->res_pool)) {
DC_ERROR("Failed to attach surface to stream!\n");
return DC_FAIL_ATTACH_SURFACES;
}
{
enum dc_status result = DC_ERROR_UNEXPECTED;
- context->res_ctx.pool = dc->res_pool;
-
context->streams[0] = DC_STREAM_TO_CORE(dc_stream);
dc_stream_retain(&context->streams[0]->public);
context->stream_count++;
if (!dce80_validate_surface_sets(set, set_count))
return DC_FAIL_SURFACE_VALIDATE;
- context->res_ctx.pool = dc->res_pool;
-
for (i = 0; i < set_count; i++) {
context->streams[i] = DC_STREAM_TO_CORE(set[i].stream);
dc_stream_retain(&context->streams[i]->public);
if (result == DC_OK)
result = resource_map_clock_resources(dc, context);
- if (!resource_validate_attach_surfaces(
- set, set_count, dc->current_context, context)) {
+ if (!resource_validate_attach_surfaces(set, set_count,
+ dc->current_context, context, dc->res_pool)) {
DC_ERROR("Failed to attach surface to stream!\n");
return DC_FAIL_ATTACH_SURFACES;
}
{
enum dc_status result = DC_ERROR_UNEXPECTED;
- context->res_ctx.pool = dc->res_pool;
-
context->streams[0] = DC_STREAM_TO_CORE(dc_stream);
dc_stream_retain(&context->streams[0]->public);
context->stream_count++;
struct pipe_ctx *(*acquire_idle_pipe_for_layer)(
struct validate_context *context,
+ const struct resource_pool *pool,
struct core_stream *stream);
void (*build_bit_depth_reduction_params)(
};
struct resource_context {
- const struct resource_pool *pool;
struct pipe_ctx pipe_ctx[MAX_PIPES];
bool is_stream_enc_acquired[MAX_PIPES * 2];
bool is_audio_acquired[MAX_PIPES];
void resource_unreference_clock_source(
struct resource_context *res_ctx,
+ const struct resource_pool *pool,
struct clock_source **clock_source);
void resource_reference_clock_source(
struct resource_context *res_ctx,
+ const struct resource_pool *pool,
struct clock_source *clock_source);
bool resource_are_streams_timing_synchronizable(
struct pipe_ctx *pipe_ctx);
struct clock_source *dc_resource_find_first_free_pll(
- struct resource_context *res_ctx);
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool);
struct pipe_ctx *resource_get_head_pipe_for_stream(
struct resource_context *res_ctx,
const struct dc_surface *const *surfaces,
int surface_count,
const struct dc_stream *dc_stream,
- struct validate_context *context);
+ struct validate_context *context,
+ const struct resource_pool *pool);
-struct pipe_ctx *find_idle_secondary_pipe(struct resource_context *res_ctx);
+struct pipe_ctx *find_idle_secondary_pipe(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool);
bool resource_is_stream_unchanged(
const struct validate_context *old_context, const struct core_stream *stream);
const struct dc_validation_set set[],
int set_count,
const struct validate_context *old_context,
- struct validate_context *context);
+ struct validate_context *context,
+ const struct resource_pool *pool);
void validate_guaranteed_copy_streams(
struct validate_context *context,