Merge branch 'drm-next' into drm-next-5.3
authorAlex Deucher <alexander.deucher@amd.com>
Tue, 25 Jun 2019 13:42:25 +0000 (08:42 -0500)
committerAlex Deucher <alexander.deucher@amd.com>
Tue, 25 Jun 2019 13:42:25 +0000 (08:42 -0500)
Backmerge drm-next and fix up conflicts due to drmP.h removal.

Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
84 files changed:
1  2 
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
drivers/gpu/drm/amd/amdgpu/dce_virtual.c
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
drivers/gpu/drm/amd/amdgpu/soc15.c
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
drivers/gpu/drm/amd/display/dc/core/dc_stream.c
drivers/gpu/drm/amd/display/dc/core/dc_surface.c
drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c
drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
drivers/gpu/drm/amd/display/modules/color/color_gamma.c
drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
drivers/gpu/drm/amd/powerplay/smu_v11_0.c
drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
drivers/gpu/drm/radeon/radeon_ttm.c
include/drm/drm_dp_helper.h

Simple merge
index c604f1504d3e793d5f331cba812b5925f401709f,9f32bf862d949f6a000737ea0e096675ac705a94..2e12eeb314a755bc00d9e15eb9979187cec1c968
@@@ -339,10 -318,10 +340,10 @@@ int amdgpu_vcn_dec_ring_test_ring(struc
        amdgpu_ring_write(ring, 0xDEADBEEF);
        amdgpu_ring_commit(ring);
        for (i = 0; i < adev->usec_timeout; i++) {
 -              tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9));
 +              tmp = RREG32(adev->vcn.external.scratch9);
                if (tmp == 0xDEADBEEF)
                        break;
-               DRM_UDELAY(1);
+               udelay(1);
        }
  
        if (i >= adev->usec_timeout)
@@@ -660,10 -641,10 +661,10 @@@ int amdgpu_vcn_jpeg_ring_test_ring(stru
        amdgpu_ring_commit(ring);
  
        for (i = 0; i < adev->usec_timeout; i++) {
 -              tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9));
 +              tmp = RREG32(adev->vcn.external.jpeg_pitch);
                if (tmp == 0xDEADBEEF)
                        break;
-               DRM_UDELAY(1);
+               udelay(1);
        }
  
        if (i >= adev->usec_timeout)
@@@ -734,10 -715,10 +735,10 @@@ int amdgpu_vcn_jpeg_ring_test_ib(struc
        }
  
        for (i = 0; i < adev->usec_timeout; i++) {
 -              tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9));
 +              tmp = RREG32(adev->vcn.external.jpeg_pitch);
                if (tmp == 0xDEADBEEF)
                        break;
-               DRM_UDELAY(1);
+               udelay(1);
        }
  
        if (i >= adev->usec_timeout)
Simple merge
Simple merge
Simple merge
Simple merge
index acdd1868d010e3abc323e7555de0b5fe314498e9,0000000000000000000000000000000000000000..cec7c1fb14bf4d2337a64f8c71bb56572893fb18
mode 100644,000000..100644
--- /dev/null
@@@ -1,916 -1,0 +1,917 @@@
 +/*
 + * Copyright 2019 Advanced Micro Devices, Inc.
 + *
 + * Permission is hereby granted, free of charge, to any person obtaining a
 + * copy of this software and associated documentation files (the "Software"),
 + * to deal in the Software without restriction, including without limitation
 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 + * and/or sell copies of the Software, and to permit persons to whom the
 + * Software is furnished to do so, subject to the following conditions:
 + *
 + * The above copyright notice and this permission notice shall be included in
 + * all copies or substantial portions of the Software.
 + *
 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 + * OTHER DEALINGS IN THE SOFTWARE.
 + *
 + */
 +#include <linux/firmware.h>
++#include <linux/pci.h>
 +#include "amdgpu.h"
 +#include "amdgpu_atomfirmware.h"
 +#include "gmc_v10_0.h"
 +
 +#include "hdp/hdp_5_0_0_offset.h"
 +#include "hdp/hdp_5_0_0_sh_mask.h"
 +#include "gc/gc_10_1_0_sh_mask.h"
 +#include "mmhub/mmhub_2_0_0_sh_mask.h"
 +#include "dcn/dcn_2_0_0_offset.h"
 +#include "dcn/dcn_2_0_0_sh_mask.h"
 +#include "oss/osssys_5_0_0_offset.h"
 +#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
 +#include "navi10_enum.h"
 +
 +#include "soc15.h"
 +#include "soc15_common.h"
 +
 +#include "nbio_v2_3.h"
 +
 +#include "gfxhub_v2_0.h"
 +#include "mmhub_v2_0.h"
 +#include "athub_v2_0.h"
 +/* XXX Move this macro to navi10 header file, which is like vid.h for VI.*/
 +#define AMDGPU_NUM_OF_VMIDS                   8
 +
 +#if 0
 +static const struct soc15_reg_golden golden_settings_navi10_hdp[] =
 +{
 +      /* TODO add golden setting for hdp */
 +};
 +#endif
 +
 +static int
 +gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
 +                                 struct amdgpu_irq_src *src, unsigned type,
 +                                 enum amdgpu_interrupt_state state)
 +{
 +      struct amdgpu_vmhub *hub;
 +      u32 tmp, reg, bits[AMDGPU_MAX_VMHUBS], i;
 +
 +      bits[AMDGPU_GFXHUB] = GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
 +              GCVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
 +              GCVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
 +              GCVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
 +              GCVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
 +              GCVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
 +              GCVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
 +
 +      bits[AMDGPU_MMHUB] = MMVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
 +              MMVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
 +              MMVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
 +              MMVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
 +              MMVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
 +              MMVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
 +              MMVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
 +
 +      switch (state) {
 +      case AMDGPU_IRQ_STATE_DISABLE:
 +              /* MM HUB */
 +              hub = &adev->vmhub[AMDGPU_MMHUB];
 +              for (i = 0; i < 16; i++) {
 +                      reg = hub->vm_context0_cntl + i;
 +                      tmp = RREG32(reg);
 +                      tmp &= ~bits[AMDGPU_MMHUB];
 +                      WREG32(reg, tmp);
 +              }
 +
 +              /* GFX HUB */
 +              hub = &adev->vmhub[AMDGPU_GFXHUB];
 +              for (i = 0; i < 16; i++) {
 +                      reg = hub->vm_context0_cntl + i;
 +                      tmp = RREG32(reg);
 +                      tmp &= ~bits[AMDGPU_GFXHUB];
 +                      WREG32(reg, tmp);
 +              }
 +              break;
 +      case AMDGPU_IRQ_STATE_ENABLE:
 +              /* MM HUB */
 +              hub = &adev->vmhub[AMDGPU_MMHUB];
 +              for (i = 0; i < 16; i++) {
 +                      reg = hub->vm_context0_cntl + i;
 +                      tmp = RREG32(reg);
 +                      tmp |= bits[AMDGPU_MMHUB];
 +                      WREG32(reg, tmp);
 +              }
 +
 +              /* GFX HUB */
 +              hub = &adev->vmhub[AMDGPU_GFXHUB];
 +              for (i = 0; i < 16; i++) {
 +                      reg = hub->vm_context0_cntl + i;
 +                      tmp = RREG32(reg);
 +                      tmp |= bits[AMDGPU_GFXHUB];
 +                      WREG32(reg, tmp);
 +              }
 +              break;
 +      default:
 +              break;
 +      }
 +
 +      return 0;
 +}
 +
 +static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
 +                                     struct amdgpu_irq_src *source,
 +                                     struct amdgpu_iv_entry *entry)
 +{
 +      struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
 +      uint32_t status = 0;
 +      u64 addr;
 +
 +      addr = (u64)entry->src_data[0] << 12;
 +      addr |= ((u64)entry->src_data[1] & 0xf) << 44;
 +
 +      if (!amdgpu_sriov_vf(adev)) {
 +              status = RREG32(hub->vm_l2_pro_fault_status);
 +              WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
 +      }
 +
 +      if (printk_ratelimit()) {
 +              dev_err(adev->dev,
 +                      "[%s] VMC page fault (src_id:%u ring:%u vmid:%u pasid:%u)\n",
 +                      entry->vmid_src ? "mmhub" : "gfxhub",
 +                      entry->src_id, entry->ring_id, entry->vmid,
 +                      entry->pasid);
 +              dev_err(adev->dev, "  at page 0x%016llx from %d\n",
 +                      addr, entry->client_id);
 +              if (!amdgpu_sriov_vf(adev))
 +                      dev_err(adev->dev,
 +                              "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
 +                              status);
 +      }
 +
 +      return 0;
 +}
 +
 +static const struct amdgpu_irq_src_funcs gmc_v10_0_irq_funcs = {
 +      .set = gmc_v10_0_vm_fault_interrupt_state,
 +      .process = gmc_v10_0_process_interrupt,
 +};
 +
 +static void gmc_v10_0_set_irq_funcs(struct amdgpu_device *adev)
 +{
 +      adev->gmc.vm_fault.num_types = 1;
 +      adev->gmc.vm_fault.funcs = &gmc_v10_0_irq_funcs;
 +}
 +
 +static uint32_t gmc_v10_0_get_invalidate_req(unsigned int vmid,
 +                                           uint32_t flush_type)
 +{
 +      u32 req = 0;
 +
 +      /* invalidate using legacy mode on vmid*/
 +      req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
 +                          PER_VMID_INVALIDATE_REQ, 1 << vmid);
 +      req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
 +      req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
 +      req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
 +      req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
 +      req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
 +      req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
 +      req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
 +                          CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
 +
 +      return req;
 +}
 +
 +/*
 + * GART
 + * VMID 0 is the physical GPU addresses as used by the kernel.
 + * VMIDs 1-15 are used for userspace clients and are handled
 + * by the amdgpu vm/hsa code.
 + */
 +
 +static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
 +                                 unsigned int vmhub, uint32_t flush_type)
 +{
 +      struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
 +      u32 tmp = gmc_v10_0_get_invalidate_req(vmid, flush_type);
 +      /* Use register 17 for GART */
 +      const unsigned eng = 17;
 +      unsigned int i;
 +
 +      WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
 +
 +      /* Wait for ACK with a delay.*/
 +      for (i = 0; i < adev->usec_timeout; i++) {
 +              tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
 +              tmp &= 1 << vmid;
 +              if (tmp)
 +                      break;
 +
 +              udelay(1);
 +      }
 +
 +      if (i < adev->usec_timeout)
 +              return;
 +
 +      DRM_ERROR("Timeout waiting for VM flush ACK!\n");
 +}
 +
 +/**
 + * gmc_v10_0_flush_gpu_tlb - gart tlb flush callback
 + *
 + * @adev: amdgpu_device pointer
 + * @vmid: vm instance to flush
 + *
 + * Flush the TLB for the requested page table.
 + */
 +static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev,
 +                                  uint32_t vmid, uint32_t flush_type)
 +{
 +      struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
 +      struct dma_fence *fence;
 +      struct amdgpu_job *job;
 +
 +      int r;
 +
 +      /* flush hdp cache */
 +      adev->nbio_funcs->hdp_flush(adev, NULL);
 +
 +      mutex_lock(&adev->mman.gtt_window_lock);
 +
 +      gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_MMHUB, 0);
 +      if (!adev->mman.buffer_funcs_enabled || !adev->ib_pool_ready ||
 +          adev->asic_type != CHIP_NAVI10) {
 +              gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB, 0);
 +              mutex_unlock(&adev->mman.gtt_window_lock);
 +              return;
 +      }
 +
 +      /* The SDMA on Navi has a bug which can theoretically result in memory
 +       * corruption if an invalidation happens at the same time as an VA
 +       * translation. Avoid this by doing the invalidation from the SDMA
 +       * itself.
 +       */
 +      r = amdgpu_job_alloc_with_ib(adev, 16 * 4, &job);
 +      if (r)
 +              goto error_alloc;
 +
 +      job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
 +      job->vm_needs_flush = true;
 +      amdgpu_ring_pad_ib(ring, &job->ibs[0]);
 +      r = amdgpu_job_submit(job, &adev->mman.entity,
 +                            AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
 +      if (r)
 +              goto error_submit;
 +
 +      mutex_unlock(&adev->mman.gtt_window_lock);
 +
 +      dma_fence_wait(fence, false);
 +      dma_fence_put(fence);
 +
 +      return;
 +
 +error_submit:
 +      amdgpu_job_free(job);
 +
 +error_alloc:
 +      mutex_unlock(&adev->mman.gtt_window_lock);
 +      DRM_ERROR("Error flushing GPU TLB using the SDMA (%d)!\n", r);
 +}
 +
 +static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
 +                                           unsigned vmid, uint64_t pd_addr)
 +{
 +      struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
 +      uint32_t req = gmc_v10_0_get_invalidate_req(vmid, 0);
 +      unsigned eng = ring->vm_inv_eng;
 +
 +      amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
 +                            lower_32_bits(pd_addr));
 +
 +      amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid),
 +                            upper_32_bits(pd_addr));
 +
 +      amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_req + eng, req);
 +
 +      /* wait for the invalidate to complete */
 +      amdgpu_ring_emit_reg_wait(ring, hub->vm_inv_eng0_ack + eng,
 +                                1 << vmid, 1 << vmid);
 +
 +      return pd_addr;
 +}
 +
 +static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
 +                                       unsigned pasid)
 +{
 +      struct amdgpu_device *adev = ring->adev;
 +      uint32_t reg;
 +
 +      if (ring->funcs->vmhub == AMDGPU_GFXHUB)
 +              reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
 +      else
 +              reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
 +
 +      amdgpu_ring_emit_wreg(ring, reg, pasid);
 +}
 +
 +/*
 + * PTE format on NAVI 10:
 + * 63:59 reserved
 + * 58:57 reserved
 + * 56 F
 + * 55 L
 + * 54 reserved
 + * 53:52 SW
 + * 51 T
 + * 50:48 mtype
 + * 47:12 4k physical page base address
 + * 11:7 fragment
 + * 6 write
 + * 5 read
 + * 4 exe
 + * 3 Z
 + * 2 snooped
 + * 1 system
 + * 0 valid
 + *
 + * PDE format on NAVI 10:
 + * 63:59 block fragment size
 + * 58:55 reserved
 + * 54 P
 + * 53:48 reserved
 + * 47:6 physical base address of PD or PTE
 + * 5:3 reserved
 + * 2 C
 + * 1 system
 + * 0 valid
 + */
 +static uint64_t gmc_v10_0_get_vm_pte_flags(struct amdgpu_device *adev,
 +                                         uint32_t flags)
 +{
 +      uint64_t pte_flag = 0;
 +
 +      if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
 +              pte_flag |= AMDGPU_PTE_EXECUTABLE;
 +      if (flags & AMDGPU_VM_PAGE_READABLE)
 +              pte_flag |= AMDGPU_PTE_READABLE;
 +      if (flags & AMDGPU_VM_PAGE_WRITEABLE)
 +              pte_flag |= AMDGPU_PTE_WRITEABLE;
 +
 +      switch (flags & AMDGPU_VM_MTYPE_MASK) {
 +      case AMDGPU_VM_MTYPE_DEFAULT:
 +              pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
 +              break;
 +      case AMDGPU_VM_MTYPE_NC:
 +              pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
 +              break;
 +      case AMDGPU_VM_MTYPE_WC:
 +              pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_WC);
 +              break;
 +      case AMDGPU_VM_MTYPE_CC:
 +              pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_CC);
 +              break;
 +      case AMDGPU_VM_MTYPE_UC:
 +              pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
 +              break;
 +      default:
 +              pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
 +              break;
 +      }
 +
 +      if (flags & AMDGPU_VM_PAGE_PRT)
 +              pte_flag |= AMDGPU_PTE_PRT;
 +
 +      return pte_flag;
 +}
 +
 +static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
 +                               uint64_t *addr, uint64_t *flags)
 +{
 +      if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
 +              *addr = adev->vm_manager.vram_base_offset + *addr -
 +                      adev->gmc.vram_start;
 +      BUG_ON(*addr & 0xFFFF00000000003FULL);
 +
 +      if (!adev->gmc.translate_further)
 +              return;
 +
 +      if (level == AMDGPU_VM_PDB1) {
 +              /* Set the block fragment size */
 +              if (!(*flags & AMDGPU_PDE_PTE))
 +                      *flags |= AMDGPU_PDE_BFS(0x9);
 +
 +      } else if (level == AMDGPU_VM_PDB0) {
 +              if (*flags & AMDGPU_PDE_PTE)
 +                      *flags &= ~AMDGPU_PDE_PTE;
 +              else
 +                      *flags |= AMDGPU_PTE_TF;
 +      }
 +}
 +
 +static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
 +      .flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb,
 +      .emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb,
 +      .emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
 +      .get_vm_pte_flags = gmc_v10_0_get_vm_pte_flags,
 +      .get_vm_pde = gmc_v10_0_get_vm_pde
 +};
 +
 +static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev)
 +{
 +      if (adev->gmc.gmc_funcs == NULL)
 +              adev->gmc.gmc_funcs = &gmc_v10_0_gmc_funcs;
 +}
 +
 +static int gmc_v10_0_early_init(void *handle)
 +{
 +      struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 +
 +      gmc_v10_0_set_gmc_funcs(adev);
 +      gmc_v10_0_set_irq_funcs(adev);
 +
 +      adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
 +      adev->gmc.shared_aperture_end =
 +              adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
 +      adev->gmc.private_aperture_start = 0x1000000000000000ULL;
 +      adev->gmc.private_aperture_end =
 +              adev->gmc.private_aperture_start + (4ULL << 30) - 1;
 +
 +      return 0;
 +}
 +
 +static int gmc_v10_0_late_init(void *handle)
 +{
 +      struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 +      unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 };
 +      unsigned i;
 +
 +      for(i = 0; i < adev->num_rings; ++i) {
 +              struct amdgpu_ring *ring = adev->rings[i];
 +              unsigned vmhub = ring->funcs->vmhub;
 +
 +              ring->vm_inv_eng = vm_inv_eng[vmhub]++;
 +              dev_info(adev->dev, "ring %u(%s) uses VM inv eng %u on hub %u\n",
 +                       ring->idx, ring->name, ring->vm_inv_eng,
 +                       ring->funcs->vmhub);
 +      }
 +
 +      /* Engine 17 is used for GART flushes */
 +      for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
 +              BUG_ON(vm_inv_eng[i] > 17);
 +
 +      return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
 +}
 +
 +static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
 +                                      struct amdgpu_gmc *mc)
 +{
 +      u64 base = 0;
 +
 +      if (!amdgpu_sriov_vf(adev))
 +              base = gfxhub_v2_0_get_fb_location(adev);
 +
 +      amdgpu_gmc_vram_location(adev, &adev->gmc, base);
 +      amdgpu_gmc_gart_location(adev, mc);
 +
 +      /* base offset of vram pages */
 +      adev->vm_manager.vram_base_offset = gfxhub_v2_0_get_mc_fb_offset(adev);
 +}
 +
 +/**
 + * gmc_v10_0_mc_init - initialize the memory controller driver params
 + *
 + * @adev: amdgpu_device pointer
 + *
 + * Look up the amount of vram, vram width, and decide how to place
 + * vram and gart within the GPU's physical address space.
 + * Returns 0 for success.
 + */
 +static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
 +{
 +      int chansize, numchan;
 +
 +      if (!amdgpu_emu_mode)
 +              adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
 +      else {
 +              /* hard code vram_width for emulation */
 +              chansize = 128;
 +              numchan = 1;
 +              adev->gmc.vram_width = numchan * chansize;
 +      }
 +
 +      /* Could aper size report 0 ? */
 +      adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
 +      adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
 +
 +      /* size in MB on si */
 +      adev->gmc.mc_vram_size =
 +              adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
 +      adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
 +      adev->gmc.visible_vram_size = adev->gmc.aper_size;
 +
 +      /* In case the PCI BAR is larger than the actual amount of vram */
 +      if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
 +              adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
 +
 +      /* set the gart size */
 +      if (amdgpu_gart_size == -1) {
 +              switch (adev->asic_type) {
 +              case CHIP_NAVI10:
 +              default:
 +                      adev->gmc.gart_size = 512ULL << 20;
 +                      break;
 +              }
 +      } else
 +              adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
 +
 +      gmc_v10_0_vram_gtt_location(adev, &adev->gmc);
 +
 +      return 0;
 +}
 +
 +static int gmc_v10_0_gart_init(struct amdgpu_device *adev)
 +{
 +      int r;
 +
 +      if (adev->gart.bo) {
 +              WARN(1, "NAVI10 PCIE GART already initialized\n");
 +              return 0;
 +      }
 +
 +      /* Initialize common gart structure */
 +      r = amdgpu_gart_init(adev);
 +      if (r)
 +              return r;
 +
 +      adev->gart.table_size = adev->gart.num_gpu_pages * 8;
 +      adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_NV10(MTYPE_UC) |
 +                               AMDGPU_PTE_EXECUTABLE;
 +
 +      return amdgpu_gart_table_vram_alloc(adev);
 +}
 +
 +static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev)
 +{
 +      u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
 +      unsigned size;
 +
 +      if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
 +              size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
 +      } else {
 +              u32 viewport;
 +              u32 pitch;
 +
 +              viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
 +              pitch = RREG32_SOC15(DCE, 0, mmHUBPREQ0_DCSURF_SURFACE_PITCH);
 +              size = (REG_GET_FIELD(viewport,
 +                                      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
 +                              REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) *
 +                              4);
 +      }
 +      /* return 0 if the pre-OS buffer uses up most of vram */
 +      if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024)) {
 +              DRM_ERROR("Warning: pre-OS buffer uses most of vram, \
 +                              be aware of gart table overwrite\n");
 +              return 0;
 +      }
 +
 +      return size;
 +}
 +
 +
 +
 +static int gmc_v10_0_sw_init(void *handle)
 +{
 +      int r;
 +      int dma_bits;
 +      struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 +
 +      gfxhub_v2_0_init(adev);
 +      mmhub_v2_0_init(adev);
 +
 +      spin_lock_init(&adev->gmc.invalidate_lock);
 +
 +      adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev);
 +      switch (adev->asic_type) {
 +      case CHIP_NAVI10:
 +              /*
 +               * To fulfill 4-level page support,
 +               * vm size is 256TB (48bit), maximum size of Navi10,
 +               * block size 512 (9bit)
 +               */
 +              amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
 +              break;
 +      default:
 +              break;
 +      }
 +
 +      /* This interrupt is VMC page fault.*/
 +      r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC,
 +                            VMC_1_0__SRCID__VM_FAULT,
 +                            &adev->gmc.vm_fault);
 +      r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2,
 +                            UTCL2_1_0__SRCID__FAULT,
 +                            &adev->gmc.vm_fault);
 +      if (r)
 +              return r;
 +
 +      /*
 +       * Set the internal MC address mask This is the max address of the GPU's
 +       * internal address space.
 +       */
 +      adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
 +
 +      /*
 +       * Reserve 8M stolen memory for navi10 like vega10
 +       * TODO: will check if it's really needed on asic.
 +       */
 +      if (amdgpu_emu_mode == 1)
 +              adev->gmc.stolen_size = 0;
 +      else
 +              adev->gmc.stolen_size = 9 * 1024 *1024;
 +
 +      /*
 +       * Set DMA mask + need_dma32 flags.
 +       * PCIE - can handle 44-bits.
 +       * IGP - can handle 44-bits
 +       * PCI - dma32 for legacy pci gart, 44 bits on navi10
 +       */
 +      adev->need_dma32 = false;
 +      dma_bits = adev->need_dma32 ? 32 : 44;
 +
 +      r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
 +      if (r) {
 +              adev->need_dma32 = true;
 +              dma_bits = 32;
 +              printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
 +      }
 +
 +      r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
 +      if (r) {
 +              pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
 +              printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
 +      }
 +
 +      r = gmc_v10_0_mc_init(adev);
 +      if (r)
 +              return r;
 +
 +      adev->gmc.stolen_size = gmc_v10_0_get_vbios_fb_size(adev);
 +
 +      /* Memory manager */
 +      r = amdgpu_bo_init(adev);
 +      if (r)
 +              return r;
 +
 +      r = gmc_v10_0_gart_init(adev);
 +      if (r)
 +              return r;
 +
 +      /*
 +       * number of VMs
 +       * VMID 0 is reserved for System
 +       * amdgpu graphics/compute will use VMIDs 1-7
 +       * amdkfd will use VMIDs 8-15
 +       */
 +      adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
 +      adev->vm_manager.id_mgr[AMDGPU_MMHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
 +
 +      amdgpu_vm_manager_init(adev);
 +
 +      return 0;
 +}
 +
 +/**
 + * gmc_v8_0_gart_fini - vm fini callback
 + *
 + * @adev: amdgpu_device pointer
 + *
 + * Tears down the driver GART/VM setup (CIK).
 + */
 +static void gmc_v10_0_gart_fini(struct amdgpu_device *adev)
 +{
 +      amdgpu_gart_table_vram_free(adev);
 +      amdgpu_gart_fini(adev);
 +}
 +
 +static int gmc_v10_0_sw_fini(void *handle)
 +{
 +      struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 +
 +      amdgpu_vm_manager_fini(adev);
 +      gmc_v10_0_gart_fini(adev);
 +      amdgpu_gem_force_release(adev);
 +      amdgpu_bo_fini(adev);
 +
 +      return 0;
 +}
 +
 +static void gmc_v10_0_init_golden_registers(struct amdgpu_device *adev)
 +{
 +      switch (adev->asic_type) {
 +      case CHIP_NAVI10:
 +              break;
 +      default:
 +              break;
 +      }
 +}
 +
 +/**
 + * gmc_v10_0_gart_enable - gart enable
 + *
 + * @adev: amdgpu_device pointer
 + */
 +static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
 +{
 +      int r;
 +      bool value;
 +      u32 tmp;
 +
 +      if (adev->gart.bo == NULL) {
 +              dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
 +              return -EINVAL;
 +      }
 +
 +      r = amdgpu_gart_table_vram_pin(adev);
 +      if (r)
 +              return r;
 +
 +      r = gfxhub_v2_0_gart_enable(adev);
 +      if (r)
 +              return r;
 +
 +      r = mmhub_v2_0_gart_enable(adev);
 +      if (r)
 +              return r;
 +
 +      tmp = RREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL);
 +      tmp |= HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK;
 +      WREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL, tmp);
 +
 +      tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
 +      WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
 +
 +      /* Flush HDP after it is initialized */
 +      adev->nbio_funcs->hdp_flush(adev, NULL);
 +
 +      value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
 +              false : true;
 +
 +      gfxhub_v2_0_set_fault_enable_default(adev, value);
 +      mmhub_v2_0_set_fault_enable_default(adev, value);
 +      gmc_v10_0_flush_gpu_tlb(adev, 0, 0);
 +
 +      DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
 +               (unsigned)(adev->gmc.gart_size >> 20),
 +               (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
 +
 +      adev->gart.ready = true;
 +
 +      return 0;
 +}
 +
 +static int gmc_v10_0_hw_init(void *handle)
 +{
 +      int r;
 +      struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 +
 +      /* The sequence of these two function calls matters.*/
 +      gmc_v10_0_init_golden_registers(adev);
 +
 +      r = gmc_v10_0_gart_enable(adev);
 +      if (r)
 +              return r;
 +
 +      return 0;
 +}
 +
 +/**
 + * gmc_v10_0_gart_disable - gart disable
 + *
 + * @adev: amdgpu_device pointer
 + *
 + * This disables all VM page table.
 + */
 +static void gmc_v10_0_gart_disable(struct amdgpu_device *adev)
 +{
 +      gfxhub_v2_0_gart_disable(adev);
 +      mmhub_v2_0_gart_disable(adev);
 +      amdgpu_gart_table_vram_unpin(adev);
 +}
 +
 +static int gmc_v10_0_hw_fini(void *handle)
 +{
 +      struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 +
 +      if (amdgpu_sriov_vf(adev)) {
 +              /* full access mode, so don't touch any GMC register */
 +              DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
 +              return 0;
 +      }
 +
 +      amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
 +      gmc_v10_0_gart_disable(adev);
 +
 +      return 0;
 +}
 +
 +static int gmc_v10_0_suspend(void *handle)
 +{
 +      struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 +
 +      gmc_v10_0_hw_fini(adev);
 +
 +      return 0;
 +}
 +
 +static int gmc_v10_0_resume(void *handle)
 +{
 +      int r;
 +      struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 +
 +      r = gmc_v10_0_hw_init(adev);
 +      if (r)
 +              return r;
 +
 +      amdgpu_vmid_reset_all(adev);
 +
 +      return 0;
 +}
 +
 +static bool gmc_v10_0_is_idle(void *handle)
 +{
 +      /* MC is always ready in GMC v10.*/
 +      return true;
 +}
 +
 +static int gmc_v10_0_wait_for_idle(void *handle)
 +{
 +      /* There is no need to wait for MC idle in GMC v10.*/
 +      return 0;
 +}
 +
 +static int gmc_v10_0_soft_reset(void *handle)
 +{
 +      return 0;
 +}
 +
 +static int gmc_v10_0_set_clockgating_state(void *handle,
 +                                         enum amd_clockgating_state state)
 +{
 +      int r;
 +      struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 +
 +      r = mmhub_v2_0_set_clockgating(adev, state);
 +      if (r)
 +              return r;
 +
 +      return athub_v2_0_set_clockgating(adev, state);
 +}
 +
 +static void gmc_v10_0_get_clockgating_state(void *handle, u32 *flags)
 +{
 +      struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 +
 +      mmhub_v2_0_get_clockgating(adev, flags);
 +
 +      athub_v2_0_get_clockgating(adev, flags);
 +}
 +
 +static int gmc_v10_0_set_powergating_state(void *handle,
 +                                         enum amd_powergating_state state)
 +{
 +      return 0;
 +}
 +
 +const struct amd_ip_funcs gmc_v10_0_ip_funcs = {
 +      .name = "gmc_v10_0",
 +      .early_init = gmc_v10_0_early_init,
 +      .late_init = gmc_v10_0_late_init,
 +      .sw_init = gmc_v10_0_sw_init,
 +      .sw_fini = gmc_v10_0_sw_fini,
 +      .hw_init = gmc_v10_0_hw_init,
 +      .hw_fini = gmc_v10_0_hw_fini,
 +      .suspend = gmc_v10_0_suspend,
 +      .resume = gmc_v10_0_resume,
 +      .is_idle = gmc_v10_0_is_idle,
 +      .wait_for_idle = gmc_v10_0_wait_for_idle,
 +      .soft_reset = gmc_v10_0_soft_reset,
 +      .set_clockgating_state = gmc_v10_0_set_clockgating_state,
 +      .set_powergating_state = gmc_v10_0_set_powergating_state,
 +      .get_clockgating_state = gmc_v10_0_get_clockgating_state,
 +};
 +
 +const struct amdgpu_ip_block_version gmc_v10_0_ip_block =
 +{
 +      .type = AMD_IP_BLOCK_TYPE_GMC,
 +      .major = 10,
 +      .minor = 0,
 +      .rev = 0,
 +      .funcs = &gmc_v10_0_ip_funcs,
 +};
Simple merge
index 0d92b88a85b8431192675364b6f8202a4b40cfbd,0000000000000000000000000000000000000000..29fab7984855a6e7c0df27fd3b463f19104c96c5
mode 100644,000000..100644
--- /dev/null
@@@ -1,365 -1,0 +1,366 @@@
 +/*
 + * Copyright 2019 Advanced Micro Devices, Inc.
 + *
 + * Permission is hereby granted, free of charge, to any person obtaining a
 + * copy of this software and associated documentation files (the "Software"),
 + * to deal in the Software without restriction, including without limitation
 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 + * and/or sell copies of the Software, and to permit persons to whom the
 + * Software is furnished to do so, subject to the following conditions:
 + *
 + * The above copyright notice and this permission notice shall be included in
 + * all copies or substantial portions of the Software.
 + *
 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 + * OTHER DEALINGS IN THE SOFTWARE.
 + *
 + */
 +
 +#include <linux/firmware.h>
++#include <linux/module.h>
 +#include "amdgpu.h"
 +#include "soc15_common.h"
 +#include "nv.h"
 +#include "gc/gc_10_1_0_offset.h"
 +#include "gc/gc_10_1_0_sh_mask.h"
 +
 +MODULE_FIRMWARE("amdgpu/navi10_mes.bin");
 +
 +static int mes_v10_1_add_hw_queue(struct amdgpu_mes *mes,
 +                                struct mes_add_queue_input *input)
 +{
 +      return 0;
 +}
 +
 +static int mes_v10_1_remove_hw_queue(struct amdgpu_mes *mes,
 +                                   struct mes_remove_queue_input *input)
 +{
 +      return 0;
 +}
 +
 +static int mes_v10_1_suspend_gang(struct amdgpu_mes *mes,
 +                                struct mes_suspend_gang_input *input)
 +{
 +      return 0;
 +}
 +
 +static int mes_v10_1_resume_gang(struct amdgpu_mes *mes,
 +                               struct mes_resume_gang_input *input)
 +{
 +      return 0;
 +}
 +
 +static const struct amdgpu_mes_funcs mes_v10_1_funcs = {
 +      .add_hw_queue = mes_v10_1_add_hw_queue,
 +      .remove_hw_queue = mes_v10_1_remove_hw_queue,
 +      .suspend_gang = mes_v10_1_suspend_gang,
 +      .resume_gang = mes_v10_1_resume_gang,
 +};
 +
 +static int mes_v10_1_init_microcode(struct amdgpu_device *adev)
 +{
 +      const char *chip_name;
 +      char fw_name[30];
 +      int err;
 +      const struct mes_firmware_header_v1_0 *mes_hdr;
 +
 +      switch (adev->asic_type) {
 +      case CHIP_NAVI10:
 +              chip_name = "navi10";
 +              break;
 +      default:
 +              BUG();
 +      }
 +
 +      snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes.bin", chip_name);
 +      err = request_firmware(&adev->mes.fw, fw_name, adev->dev);
 +      if (err)
 +              return err;
 +
 +      err = amdgpu_ucode_validate(adev->mes.fw);
 +      if (err) {
 +              release_firmware(adev->mes.fw);
 +              adev->mes.fw = NULL;
 +              return err;
 +      }
 +
 +      mes_hdr = (const struct mes_firmware_header_v1_0 *)adev->mes.fw->data;
 +      adev->mes.ucode_fw_version = le32_to_cpu(mes_hdr->mes_ucode_version);
 +      adev->mes.ucode_fw_version =
 +              le32_to_cpu(mes_hdr->mes_ucode_data_version);
 +      adev->mes.uc_start_addr =
 +              le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
 +              ((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
 +      adev->mes.data_start_addr =
 +              le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
 +              ((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
 +
 +      return 0;
 +}
 +
 +static void mes_v10_1_free_microcode(struct amdgpu_device *adev)
 +{
 +      release_firmware(adev->mes.fw);
 +      adev->mes.fw = NULL;
 +}
 +
 +static int mes_v10_1_allocate_ucode_buffer(struct amdgpu_device *adev)
 +{
 +      int r;
 +      const struct mes_firmware_header_v1_0 *mes_hdr;
 +      const __le32 *fw_data;
 +      unsigned fw_size;
 +
 +      mes_hdr = (const struct mes_firmware_header_v1_0 *)
 +              adev->mes.fw->data;
 +
 +      fw_data = (const __le32 *)(adev->mes.fw->data +
 +                 le32_to_cpu(mes_hdr->mes_ucode_offset_bytes));
 +      fw_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes);
 +
 +      r = amdgpu_bo_create_reserved(adev, fw_size,
 +                                    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
 +                                    &adev->mes.ucode_fw_obj,
 +                                    &adev->mes.ucode_fw_gpu_addr,
 +                                    (void **)&adev->mes.ucode_fw_ptr);
 +      if (r) {
 +              dev_err(adev->dev, "(%d) failed to create mes fw bo\n", r);
 +              return r;
 +      }
 +
 +      memcpy(adev->mes.ucode_fw_ptr, fw_data, fw_size);
 +
 +      amdgpu_bo_kunmap(adev->mes.ucode_fw_obj);
 +      amdgpu_bo_unreserve(adev->mes.ucode_fw_obj);
 +
 +      return 0;
 +}
 +
 +static int mes_v10_1_allocate_ucode_data_buffer(struct amdgpu_device *adev)
 +{
 +      int r;
 +      const struct mes_firmware_header_v1_0 *mes_hdr;
 +      const __le32 *fw_data;
 +      unsigned fw_size;
 +
 +      mes_hdr = (const struct mes_firmware_header_v1_0 *)
 +              adev->mes.fw->data;
 +
 +      fw_data = (const __le32 *)(adev->mes.fw->data +
 +                 le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes));
 +      fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes);
 +
 +      r = amdgpu_bo_create_reserved(adev, fw_size,
 +                                    64 * 1024, AMDGPU_GEM_DOMAIN_GTT,
 +                                    &adev->mes.data_fw_obj,
 +                                    &adev->mes.data_fw_gpu_addr,
 +                                    (void **)&adev->mes.data_fw_ptr);
 +      if (r) {
 +              dev_err(adev->dev, "(%d) failed to create mes data fw bo\n", r);
 +              return r;
 +      }
 +
 +      memcpy(adev->mes.data_fw_ptr, fw_data, fw_size);
 +
 +      amdgpu_bo_kunmap(adev->mes.data_fw_obj);
 +      amdgpu_bo_unreserve(adev->mes.data_fw_obj);
 +
 +      return 0;
 +}
 +
 +static void mes_v10_1_free_ucode_buffers(struct amdgpu_device *adev)
 +{
 +      amdgpu_bo_free_kernel(&adev->mes.data_fw_obj,
 +                            &adev->mes.data_fw_gpu_addr,
 +                            (void **)&adev->mes.data_fw_ptr);
 +
 +      amdgpu_bo_free_kernel(&adev->mes.ucode_fw_obj,
 +                            &adev->mes.ucode_fw_gpu_addr,
 +                            (void **)&adev->mes.ucode_fw_ptr);
 +}
 +
 +static void mes_v10_1_enable(struct amdgpu_device *adev, bool enable)
 +{
 +      uint32_t data = 0;
 +
 +      if (enable) {
 +              data = RREG32_SOC15(GC, 0, mmCP_MES_CNTL);
 +              data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_RESET, 1);
 +              WREG32_SOC15(GC, 0, mmCP_MES_CNTL, data);
 +
 +              /* set ucode start address */
 +              WREG32_SOC15(GC, 0, mmCP_MES_PRGRM_CNTR_START,
 +                           (uint32_t)(adev->mes.uc_start_addr) >> 2);
 +
 +              /* clear BYPASS_UNCACHED to avoid hangs after interrupt. */
 +              data = RREG32_SOC15(GC, 0, mmCP_MES_DC_OP_CNTL);
 +              data = REG_SET_FIELD(data, CP_MES_DC_OP_CNTL,
 +                                   BYPASS_UNCACHED, 0);
 +              WREG32_SOC15(GC, 0, mmCP_MES_DC_OP_CNTL, data);
 +
 +              /* unhalt MES and activate pipe0 */
 +              data = REG_SET_FIELD(0, CP_MES_CNTL, MES_PIPE0_ACTIVE, 1);
 +              WREG32_SOC15(GC, 0, mmCP_MES_CNTL, data);
 +      } else {
 +              data = RREG32_SOC15(GC, 0, mmCP_MES_CNTL);
 +              data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_ACTIVE, 0);
 +              data = REG_SET_FIELD(data, CP_MES_CNTL,
 +                                   MES_INVALIDATE_ICACHE, 1);
 +              data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_RESET, 1);
 +              data = REG_SET_FIELD(data, CP_MES_CNTL, MES_HALT, 1);
 +              WREG32_SOC15(GC, 0, mmCP_MES_CNTL, data);
 +      }
 +}
 +
 +/* This function is for backdoor MES firmware */
 +static int mes_v10_1_load_microcode(struct amdgpu_device *adev)
 +{
 +      int r;
 +      uint32_t data;
 +
 +      if (!adev->mes.fw)
 +              return -EINVAL;
 +
 +      r = mes_v10_1_allocate_ucode_buffer(adev);
 +      if (r)
 +              return r;
 +
 +      r = mes_v10_1_allocate_ucode_data_buffer(adev);
 +      if (r) {
 +              mes_v10_1_free_ucode_buffers(adev);
 +              return r;
 +      }
 +
 +      mes_v10_1_enable(adev, false);
 +
 +      WREG32_SOC15(GC, 0, mmCP_MES_IC_BASE_CNTL, 0);
 +
 +      mutex_lock(&adev->srbm_mutex);
 +      /* me=3, pipe=0, queue=0 */
 +      nv_grbm_select(adev, 3, 0, 0, 0);
 +
 +      /* set ucode start address */
 +      WREG32_SOC15(GC, 0, mmCP_MES_PRGRM_CNTR_START,
 +                   (uint32_t)(adev->mes.uc_start_addr) >> 2);
 +
 +      /* set ucode fimrware address */
 +      WREG32_SOC15(GC, 0, mmCP_MES_IC_BASE_LO,
 +                   lower_32_bits(adev->mes.ucode_fw_gpu_addr));
 +      WREG32_SOC15(GC, 0, mmCP_MES_IC_BASE_HI,
 +                   upper_32_bits(adev->mes.ucode_fw_gpu_addr));
 +
 +      /* set ucode instruction cache boundary to 2M-1 */
 +      WREG32_SOC15(GC, 0, mmCP_MES_MIBOUND_LO, 0x1FFFFF);
 +
 +      /* set ucode data firmware address */
 +      WREG32_SOC15(GC, 0, mmCP_MES_MDBASE_LO,
 +                   lower_32_bits(adev->mes.data_fw_gpu_addr));
 +      WREG32_SOC15(GC, 0, mmCP_MES_MDBASE_HI,
 +                   upper_32_bits(adev->mes.data_fw_gpu_addr));
 +
 +      /* Set 0x3FFFF (256K-1) to CP_MES_MDBOUND_LO */
 +      WREG32_SOC15(GC, 0, mmCP_MES_MDBOUND_LO, 0x3FFFF);
 +
 +      /* invalidate ICACHE */
 +      data = RREG32_SOC15(GC, 0, mmCP_MES_IC_OP_CNTL);
 +      data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 0);
 +      data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, INVALIDATE_CACHE, 1);
 +      WREG32_SOC15(GC, 0, mmCP_MES_IC_OP_CNTL, data);
 +
 +      /* prime the ICACHE. */
 +      data = RREG32_SOC15(GC, 0, mmCP_MES_IC_OP_CNTL);
 +      data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 1);
 +      WREG32_SOC15(GC, 0, mmCP_MES_IC_OP_CNTL, data);
 +
 +      nv_grbm_select(adev, 0, 0, 0, 0);
 +      mutex_unlock(&adev->srbm_mutex);
 +
 +      return 0;
 +}
 +
 +static int mes_v10_1_sw_init(void *handle)
 +{
 +      int r;
 +      struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 +
 +      r = mes_v10_1_init_microcode(adev);
 +      if (r)
 +              return r;
 +
 +      return 0;
 +}
 +
 +static int mes_v10_1_sw_fini(void *handle)
 +{
 +      struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 +
 +      mes_v10_1_free_microcode(adev);
 +
 +      return 0;
 +}
 +
 +static int mes_v10_1_hw_init(void *handle)
 +{
 +      int r;
 +      struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 +
 +      if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
 +              r = mes_v10_1_load_microcode(adev);
 +              if (r) {
 +                      DRM_ERROR("failed to MES fw, r=%d\n", r);
 +                      return r;
 +              }
 +      } else {
 +              DRM_ERROR("only support direct fw loading on MES\n");
 +              return -EINVAL;
 +      }
 +
 +      mes_v10_1_enable(adev, true);
 +
 +      return 0;
 +}
 +
 +static int mes_v10_1_hw_fini(void *handle)
 +{
 +      struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 +
 +      mes_v10_1_enable(adev, false);
 +
 +      if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)
 +              mes_v10_1_free_ucode_buffers(adev);
 +
 +      return 0;
 +}
 +
 +static int mes_v10_1_suspend(void *handle)
 +{
 +      return 0;
 +}
 +
 +static int mes_v10_1_resume(void *handle)
 +{
 +      return 0;
 +}
 +
 +static const struct amd_ip_funcs mes_v10_1_ip_funcs = {
 +      .name = "mes_v10_1",
 +      .sw_init = mes_v10_1_sw_init,
 +      .sw_fini = mes_v10_1_sw_fini,
 +      .hw_init = mes_v10_1_hw_init,
 +      .hw_fini = mes_v10_1_hw_fini,
 +      .suspend = mes_v10_1_suspend,
 +      .resume = mes_v10_1_resume,
 +};
 +
 +const struct amdgpu_ip_block_version mes_v10_1_ip_block = {
 +      .type = AMD_IP_BLOCK_TYPE_MES,
 +      .major = 10,
 +      .minor = 1,
 +      .rev = 0,
 +      .funcs = &mes_v10_1_ip_funcs,
 +};
Simple merge
Simple merge
index 58d7bbc5ada74bc671468ed6e6056dbd284df717,b16c658074d2a2edfcd98cad8be5675f84ea0441..4c70a0803b851a61c95e295acf11bbbc093c2806
  #include <drm/drm_atomic_helper.h>
  #include <drm/drm_dp_mst_helper.h>
  #include <drm/drm_fb_helper.h>
+ #include <drm/drm_fourcc.h>
  #include <drm/drm_edid.h>
+ #include <drm/drm_vblank.h>
  
  #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
 -#include "ivsrcid/irqsrcs_dcn_1_0.h"
 +#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
  
  #include "dcn/dcn_1_0_offset.h"
  #include "dcn/dcn_1_0_sh_mask.h"
index 23362dd4b6d394f9157f5a403fc5cb965cd6a676,0000000000000000000000000000000000000000..51a3dfe97f0eaa9541ab7060eacefa018b98043f
mode 100644,000000..100644
--- /dev/null
@@@ -1,157 -1,0 +1,159 @@@
 +/*
 + * Copyright 2018 Advanced Micro Devices, Inc.
 + *
 + * Permission is hereby granted, free of charge, to any person obtaining a
 + * copy of this software and associated documentation files (the "Software"),
 + * to deal in the Software without restriction, including without limitation
 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 + * and/or sell copies of the Software, and to permit persons to whom the
 + * Software is furnished to do so, subject to the following conditions:
 + *
 + * The above copyright notice and this permission notice shall be included in
 + * all copies or substantial portions of the Software.
 + *
 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 + * OTHER DEALINGS IN THE SOFTWARE.
 + *
 + * Authors: AMD
 + *
 + */
 +
++#include <linux/slab.h>
++
 +#include "reg_helper.h"
 +#include "core_types.h"
 +#include "dcn20_dccg.h"
 +
 +#define TO_DCN_DCCG(dccg)\
 +      container_of(dccg, struct dcn_dccg, base)
 +
 +#define REG(reg) \
 +      (dccg_dcn->regs->reg)
 +
 +#undef FN
 +#define FN(reg_name, field_name) \
 +      dccg_dcn->dccg_shift->field_name, dccg_dcn->dccg_mask->field_name
 +
 +#define CTX \
 +      dccg_dcn->base.ctx
 +#define DC_LOGGER \
 +      dccg->ctx->logger
 +
 +void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk)
 +{
 +      struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
 +
 +      if (dccg->ref_dppclk && req_dppclk) {
 +              int ref_dppclk = dccg->ref_dppclk;
 +
 +              ASSERT(req_dppclk <= ref_dppclk);
 +              /* need to clamp to 8 bits */
 +              if (ref_dppclk > 0xff) {
 +                      int divider = (ref_dppclk + 0xfe) / 0xff;
 +
 +                      ref_dppclk /= divider;
 +                      req_dppclk = (req_dppclk + divider - 1) / divider;
 +                      if (req_dppclk > ref_dppclk)
 +                              req_dppclk = ref_dppclk;
 +              }
 +              REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
 +                              DPPCLK0_DTO_PHASE, req_dppclk,
 +                              DPPCLK0_DTO_MODULO, ref_dppclk);
 +              REG_UPDATE(DPPCLK_DTO_CTRL,
 +                              DPPCLK_DTO_ENABLE[dpp_inst], 1);
 +      } else {
 +              REG_UPDATE(DPPCLK_DTO_CTRL,
 +                              DPPCLK_DTO_ENABLE[dpp_inst], 0);
 +      }
 +}
 +
 +void dccg2_get_dccg_ref_freq(struct dccg *dccg,
 +              unsigned int xtalin_freq_inKhz,
 +              unsigned int *dccg_ref_freq_inKhz)
 +{
 +      struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
 +      uint32_t clk_en = 0;
 +      uint32_t clk_sel = 0;
 +
 +      REG_GET_2(REFCLK_CNTL, REFCLK_CLOCK_EN, &clk_en, REFCLK_SRC_SEL, &clk_sel);
 +
 +      if (clk_en != 0) {
 +              // DCN20 has never been validated for non-xtalin as reference
 +              // frequency.  There's actually no way for DC to determine what
 +              // frequency a non-xtalin source is.
 +              ASSERT_CRITICAL(false);
 +      }
 +
 +      *dccg_ref_freq_inKhz = xtalin_freq_inKhz;
 +
 +      return;
 +}
 +
 +void dccg2_init(struct dccg *dccg)
 +{
 +      struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
 +
 +      // Fallthrough intentional to program all available dpp_dto's
 +      switch (dccg_dcn->base.ctx->dc->res_pool->pipe_count) {
 +      case 6:
 +              REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[5], 1);
 +      case 5:
 +              REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[4], 1);
 +      case 4:
 +              REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[3], 1);
 +      case 3:
 +              REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[2], 1);
 +      case 2:
 +              REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[1], 1);
 +      case 1:
 +              REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[0], 1);
 +              break;
 +      default:
 +              ASSERT(false);
 +              break;
 +      }
 +}
 +
 +static const struct dccg_funcs dccg2_funcs = {
 +      .update_dpp_dto = dccg2_update_dpp_dto,
 +      .get_dccg_ref_freq = dccg2_get_dccg_ref_freq,
 +      .dccg_init = dccg2_init
 +};
 +
 +struct dccg *dccg2_create(
 +      struct dc_context *ctx,
 +      const struct dccg_registers *regs,
 +      const struct dccg_shift *dccg_shift,
 +      const struct dccg_mask *dccg_mask)
 +{
 +      struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_KERNEL);
 +      struct dccg *base;
 +
 +      if (dccg_dcn == NULL) {
 +              BREAK_TO_DEBUGGER();
 +              return NULL;
 +      }
 +
 +      base = &dccg_dcn->base;
 +      base->ctx = ctx;
 +      base->funcs = &dccg2_funcs;
 +
 +      dccg_dcn->regs = regs;
 +      dccg_dcn->dccg_shift = dccg_shift;
 +      dccg_dcn->dccg_mask = dccg_mask;
 +
 +      return &dccg_dcn->base;
 +}
 +
 +void dcn_dccg_destroy(struct dccg **dccg)
 +{
 +      struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(*dccg);
 +
 +      kfree(dccg_dcn);
 +      *dccg = NULL;
 +}
index c5ac25980f199efd45ef940d442b13a92645b55e,0000000000000000000000000000000000000000..a8ba7d15abbb0334e26ed85afcfae1558ef71cf9
mode 100644,000000..100644
--- /dev/null
@@@ -1,3175 -1,0 +1,3177 @@@
 +/*
 +* Copyright 2016 Advanced Micro Devices, Inc.
 + *
 + * Permission is hereby granted, free of charge, to any person obtaining a
 + * copy of this software and associated documentation files (the "Software"),
 + * to deal in the Software without restriction, including without limitation
 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 + * and/or sell copies of the Software, and to permit persons to whom the
 + * Software is furnished to do so, subject to the following conditions:
 + *
 + * The above copyright notice and this permission notice shall be included in
 + * all copies or substantial portions of the Software.
 + *
 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 + * OTHER DEALINGS IN THE SOFTWARE.
 + *
 + * Authors: AMD
 + *
 + */
 +
++#include <linux/slab.h>
++
 +#include "dm_services.h"
 +#include "dc.h"
 +
 +#include "resource.h"
 +#include "include/irq_service_interface.h"
 +#include "dcn20/dcn20_resource.h"
 +
 +#include "dcn10/dcn10_hubp.h"
 +#include "dcn10/dcn10_ipp.h"
 +#include "dcn20_hubbub.h"
 +#include "dcn20_mpc.h"
 +#include "dcn20_hubp.h"
 +#include "irq/dcn20/irq_service_dcn20.h"
 +#include "dcn20_dpp.h"
 +#include "dcn20_optc.h"
 +#include "dcn20_hwseq.h"
 +#include "dce110/dce110_hw_sequencer.h"
 +#include "dcn10/dcn10_resource.h"
 +#include "dcn20_opp.h"
 +
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +#include "dcn20_dsc.h"
 +#endif
 +
 +#include "dcn20_link_encoder.h"
 +#include "dcn20_stream_encoder.h"
 +#include "dce/dce_clock_source.h"
 +#include "dce/dce_audio.h"
 +#include "dce/dce_hwseq.h"
 +#include "virtual/virtual_stream_encoder.h"
 +#include "dce110/dce110_resource.h"
 +#include "dml/display_mode_vba.h"
 +#include "dcn20_dccg.h"
 +#include "dcn20_vmid.h"
 +
 +#include "navi10_ip_offset.h"
 +
 +#include "dcn/dcn_2_0_0_offset.h"
 +#include "dcn/dcn_2_0_0_sh_mask.h"
 +
 +#include "nbio/nbio_2_3_offset.h"
 +
 +#include "dcn20/dcn20_dwb.h"
 +#include "dcn20/dcn20_mmhubbub.h"
 +
 +#include "mmhub/mmhub_2_0_0_offset.h"
 +#include "mmhub/mmhub_2_0_0_sh_mask.h"
 +
 +#include "reg_helper.h"
 +#include "dce/dce_abm.h"
 +#include "dce/dce_dmcu.h"
 +#include "dce/dce_aux.h"
 +#include "dce/dce_i2c.h"
 +#include "vm_helper.h"
 +
 +#include "amdgpu_socbb.h"
 +
 +#define SOC_BOUNDING_BOX_VALID false
 +#define DC_LOGGER_INIT(logger)
 +
 +struct _vcs_dpi_ip_params_st dcn2_0_ip = {
 +      .odm_capable = 1,
 +      .gpuvm_enable = 0,
 +      .hostvm_enable = 0,
 +      .gpuvm_max_page_table_levels = 4,
 +      .hostvm_max_page_table_levels = 4,
 +      .hostvm_cached_page_table_levels = 0,
 +      .pte_group_size_bytes = 2048,
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +      .num_dsc = 6,
 +#else
 +      .num_dsc = 0,
 +#endif
 +      .rob_buffer_size_kbytes = 168,
 +      .det_buffer_size_kbytes = 164,
 +      .dpte_buffer_size_in_pte_reqs_luma = 84,
 +      .pde_proc_buffer_size_64k_reqs = 48,
 +      .dpp_output_buffer_pixels = 2560,
 +      .opp_output_buffer_lines = 1,
 +      .pixel_chunk_size_kbytes = 8,
 +      .pte_chunk_size_kbytes = 2,
 +      .meta_chunk_size_kbytes = 2,
 +      .writeback_chunk_size_kbytes = 2,
 +      .line_buffer_size_bits = 789504,
 +      .is_line_buffer_bpp_fixed = 0,
 +      .line_buffer_fixed_bpp = 0,
 +      .dcc_supported = true,
 +      .max_line_buffer_lines = 12,
 +      .writeback_luma_buffer_size_kbytes = 12,
 +      .writeback_chroma_buffer_size_kbytes = 8,
 +      .writeback_chroma_line_buffer_width_pixels = 4,
 +      .writeback_max_hscl_ratio = 1,
 +      .writeback_max_vscl_ratio = 1,
 +      .writeback_min_hscl_ratio = 1,
 +      .writeback_min_vscl_ratio = 1,
 +      .writeback_max_hscl_taps = 12,
 +      .writeback_max_vscl_taps = 12,
 +      .writeback_line_buffer_luma_buffer_size = 0,
 +      .writeback_line_buffer_chroma_buffer_size = 14643,
 +      .cursor_buffer_size = 8,
 +      .cursor_chunk_size = 2,
 +      .max_num_otg = 6,
 +      .max_num_dpp = 6,
 +      .max_num_wb = 1,
 +      .max_dchub_pscl_bw_pix_per_clk = 4,
 +      .max_pscl_lb_bw_pix_per_clk = 2,
 +      .max_lb_vscl_bw_pix_per_clk = 4,
 +      .max_vscl_hscl_bw_pix_per_clk = 4,
 +      .max_hscl_ratio = 8,
 +      .max_vscl_ratio = 8,
 +      .hscl_mults = 4,
 +      .vscl_mults = 4,
 +      .max_hscl_taps = 8,
 +      .max_vscl_taps = 8,
 +      .dispclk_ramp_margin_percent = 1,
 +      .underscan_factor = 1.10,
 +      .min_vblank_lines = 32, //
 +      .dppclk_delay_subtotal = 77, //
 +      .dppclk_delay_scl_lb_only = 16,
 +      .dppclk_delay_scl = 50,
 +      .dppclk_delay_cnvc_formatter = 8,
 +      .dppclk_delay_cnvc_cursor = 6,
 +      .dispclk_delay_subtotal = 87, //
 +      .dcfclk_cstate_latency = 10, // SRExitTime
 +      .max_inter_dcn_tile_repeaters = 8,
 +
 +      .xfc_supported = true,
 +      .xfc_fill_bw_overhead_percent = 10.0,
 +      .xfc_fill_constant_bytes = 0,
 +};
 +
 +struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = { 0 };
 +
 +
 +#ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
 +      #define mmDP0_DP_DPHY_INTERNAL_CTRL             0x210f
 +      #define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX    2
 +      #define mmDP1_DP_DPHY_INTERNAL_CTRL             0x220f
 +      #define mmDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX    2
 +      #define mmDP2_DP_DPHY_INTERNAL_CTRL             0x230f
 +      #define mmDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX    2
 +      #define mmDP3_DP_DPHY_INTERNAL_CTRL             0x240f
 +      #define mmDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX    2
 +      #define mmDP4_DP_DPHY_INTERNAL_CTRL             0x250f
 +      #define mmDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX    2
 +      #define mmDP5_DP_DPHY_INTERNAL_CTRL             0x260f
 +      #define mmDP5_DP_DPHY_INTERNAL_CTRL_BASE_IDX    2
 +      #define mmDP6_DP_DPHY_INTERNAL_CTRL             0x270f
 +      #define mmDP6_DP_DPHY_INTERNAL_CTRL_BASE_IDX    2
 +#endif
 +
 +
 +enum dcn20_clk_src_array_id {
 +      DCN20_CLK_SRC_PLL0,
 +      DCN20_CLK_SRC_PLL1,
 +      DCN20_CLK_SRC_PLL2,
 +      DCN20_CLK_SRC_PLL3,
 +      DCN20_CLK_SRC_PLL4,
 +      DCN20_CLK_SRC_PLL5,
 +      DCN20_CLK_SRC_TOTAL
 +};
 +
 +/* begin *********************
 + * macros to expend register list macro defined in HW object header file */
 +
 +/* DCN */
 +/* TODO awful hack. fixup dcn20_dwb.h */
 +#undef BASE_INNER
 +#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
 +
 +#define BASE(seg) BASE_INNER(seg)
 +
 +#define SR(reg_name)\
 +              .reg_name = BASE(mm ## reg_name ## _BASE_IDX) +  \
 +                                      mm ## reg_name
 +
 +#define SRI(reg_name, block, id)\
 +      .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
 +                                      mm ## block ## id ## _ ## reg_name
 +
 +#define SRIR(var_name, reg_name, block, id)\
 +      .var_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
 +                                      mm ## block ## id ## _ ## reg_name
 +
 +#define SRII(reg_name, block, id)\
 +      .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
 +                                      mm ## block ## id ## _ ## reg_name
 +
 +#define DCCG_SRII(reg_name, block, id)\
 +      .block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
 +                                      mm ## block ## id ## _ ## reg_name
 +
 +/* NBIO */
 +#define NBIO_BASE_INNER(seg) \
 +      NBIO_BASE__INST0_SEG ## seg
 +
 +#define NBIO_BASE(seg) \
 +      NBIO_BASE_INNER(seg)
 +
 +#define NBIO_SR(reg_name)\
 +              .reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \
 +                                      mm ## reg_name
 +
 +/* MMHUB */
 +#define MMHUB_BASE_INNER(seg) \
 +      MMHUB_BASE__INST0_SEG ## seg
 +
 +#define MMHUB_BASE(seg) \
 +      MMHUB_BASE_INNER(seg)
 +
 +#define MMHUB_SR(reg_name)\
 +              .reg_name = MMHUB_BASE(mmMM ## reg_name ## _BASE_IDX) + \
 +                                      mmMM ## reg_name
 +
 +static const struct bios_registers bios_regs = {
 +              NBIO_SR(BIOS_SCRATCH_3),
 +              NBIO_SR(BIOS_SCRATCH_6)
 +};
 +
 +#define clk_src_regs(index, pllid)\
 +[index] = {\
 +      CS_COMMON_REG_LIST_DCN2_0(index, pllid),\
 +}
 +
 +static const struct dce110_clk_src_regs clk_src_regs[] = {
 +      clk_src_regs(0, A),
 +      clk_src_regs(1, B),
 +      clk_src_regs(2, C),
 +      clk_src_regs(3, D),
 +      clk_src_regs(4, E),
 +      clk_src_regs(5, F)
 +};
 +
 +static const struct dce110_clk_src_shift cs_shift = {
 +              CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
 +};
 +
 +static const struct dce110_clk_src_mask cs_mask = {
 +              CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
 +};
 +
 +static const struct dce_dmcu_registers dmcu_regs = {
 +              DMCU_DCN10_REG_LIST()
 +};
 +
 +static const struct dce_dmcu_shift dmcu_shift = {
 +              DMCU_MASK_SH_LIST_DCN10(__SHIFT)
 +};
 +
 +static const struct dce_dmcu_mask dmcu_mask = {
 +              DMCU_MASK_SH_LIST_DCN10(_MASK)
 +};
 +
 +static const struct dce_abm_registers abm_regs = {
 +              ABM_DCN20_REG_LIST()
 +};
 +
 +static const struct dce_abm_shift abm_shift = {
 +              ABM_MASK_SH_LIST_DCN20(__SHIFT)
 +};
 +
 +static const struct dce_abm_mask abm_mask = {
 +              ABM_MASK_SH_LIST_DCN20(_MASK)
 +};
 +
 +#define audio_regs(id)\
 +[id] = {\
 +              AUD_COMMON_REG_LIST(id)\
 +}
 +
 +static const struct dce_audio_registers audio_regs[] = {
 +      audio_regs(0),
 +      audio_regs(1),
 +      audio_regs(2),
 +      audio_regs(3),
 +      audio_regs(4),
 +      audio_regs(5),
 +      audio_regs(6),
 +};
 +
 +#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\
 +              SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\
 +              SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\
 +              AUD_COMMON_MASK_SH_LIST_BASE(mask_sh)
 +
 +static const struct dce_audio_shift audio_shift = {
 +              DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT)
 +};
 +
 +static const struct dce_aduio_mask audio_mask = {
 +              DCE120_AUD_COMMON_MASK_SH_LIST(_MASK)
 +};
 +
 +#define stream_enc_regs(id)\
 +[id] = {\
 +      SE_DCN2_REG_LIST(id)\
 +}
 +
 +static const struct dcn10_stream_enc_registers stream_enc_regs[] = {
 +      stream_enc_regs(0),
 +      stream_enc_regs(1),
 +      stream_enc_regs(2),
 +      stream_enc_regs(3),
 +      stream_enc_regs(4),
 +      stream_enc_regs(5),
 +};
 +
 +static const struct dcn10_stream_encoder_shift se_shift = {
 +              SE_COMMON_MASK_SH_LIST_DCN20(__SHIFT)
 +};
 +
 +static const struct dcn10_stream_encoder_mask se_mask = {
 +              SE_COMMON_MASK_SH_LIST_DCN20(_MASK)
 +};
 +
 +
 +#define aux_regs(id)\
 +[id] = {\
 +      DCN2_AUX_REG_LIST(id)\
 +}
 +
 +static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = {
 +              aux_regs(0),
 +              aux_regs(1),
 +              aux_regs(2),
 +              aux_regs(3),
 +              aux_regs(4),
 +              aux_regs(5)
 +};
 +
 +#define hpd_regs(id)\
 +[id] = {\
 +      HPD_REG_LIST(id)\
 +}
 +
 +static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {
 +              hpd_regs(0),
 +              hpd_regs(1),
 +              hpd_regs(2),
 +              hpd_regs(3),
 +              hpd_regs(4),
 +              hpd_regs(5)
 +};
 +
 +#define link_regs(id, phyid)\
 +[id] = {\
 +      LE_DCN10_REG_LIST(id), \
 +      UNIPHY_DCN2_REG_LIST(phyid), \
 +      SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \
 +}
 +
 +static const struct dcn10_link_enc_registers link_enc_regs[] = {
 +      link_regs(0, A),
 +      link_regs(1, B),
 +      link_regs(2, C),
 +      link_regs(3, D),
 +      link_regs(4, E),
 +      link_regs(5, F)
 +};
 +
 +static const struct dcn10_link_enc_shift le_shift = {
 +      LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT)
 +};
 +
 +static const struct dcn10_link_enc_mask le_mask = {
 +      LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK)
 +};
 +
 +#define ipp_regs(id)\
 +[id] = {\
 +      IPP_REG_LIST_DCN20(id),\
 +}
 +
 +static const struct dcn10_ipp_registers ipp_regs[] = {
 +      ipp_regs(0),
 +      ipp_regs(1),
 +      ipp_regs(2),
 +      ipp_regs(3),
 +      ipp_regs(4),
 +      ipp_regs(5),
 +};
 +
 +static const struct dcn10_ipp_shift ipp_shift = {
 +              IPP_MASK_SH_LIST_DCN20(__SHIFT)
 +};
 +
 +static const struct dcn10_ipp_mask ipp_mask = {
 +              IPP_MASK_SH_LIST_DCN20(_MASK),
 +};
 +
 +#define opp_regs(id)\
 +[id] = {\
 +      OPP_REG_LIST_DCN20(id),\
 +}
 +
 +static const struct dcn20_opp_registers opp_regs[] = {
 +      opp_regs(0),
 +      opp_regs(1),
 +      opp_regs(2),
 +      opp_regs(3),
 +      opp_regs(4),
 +      opp_regs(5),
 +};
 +
 +static const struct dcn20_opp_shift opp_shift = {
 +              OPP_MASK_SH_LIST_DCN20(__SHIFT)
 +};
 +
 +static const struct dcn20_opp_mask opp_mask = {
 +              OPP_MASK_SH_LIST_DCN20(_MASK)
 +};
 +
 +#define aux_engine_regs(id)\
 +[id] = {\
 +      AUX_COMMON_REG_LIST0(id), \
 +      .AUXN_IMPCAL = 0, \
 +      .AUXP_IMPCAL = 0, \
 +      .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \
 +}
 +
 +static const struct dce110_aux_registers aux_engine_regs[] = {
 +              aux_engine_regs(0),
 +              aux_engine_regs(1),
 +              aux_engine_regs(2),
 +              aux_engine_regs(3),
 +              aux_engine_regs(4),
 +              aux_engine_regs(5)
 +};
 +
 +#define tf_regs(id)\
 +[id] = {\
 +      TF_REG_LIST_DCN20(id),\
 +}
 +
 +static const struct dcn2_dpp_registers tf_regs[] = {
 +      tf_regs(0),
 +      tf_regs(1),
 +      tf_regs(2),
 +      tf_regs(3),
 +      tf_regs(4),
 +      tf_regs(5),
 +};
 +
 +static const struct dcn2_dpp_shift tf_shift = {
 +              TF_REG_LIST_SH_MASK_DCN20(__SHIFT)
 +};
 +
 +static const struct dcn2_dpp_mask tf_mask = {
 +              TF_REG_LIST_SH_MASK_DCN20(_MASK)
 +};
 +
 +#define dwbc_regs_dcn2(id)\
 +[id] = {\
 +      DWBC_COMMON_REG_LIST_DCN2_0(id),\
 +              }
 +
 +static const struct dcn20_dwbc_registers dwbc20_regs[] = {
 +      dwbc_regs_dcn2(0),
 +};
 +
 +static const struct dcn20_dwbc_shift dwbc20_shift = {
 +      DWBC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
 +};
 +
 +static const struct dcn20_dwbc_mask dwbc20_mask = {
 +      DWBC_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
 +};
 +
 +#define mcif_wb_regs_dcn2(id)\
 +[id] = {\
 +      MCIF_WB_COMMON_REG_LIST_DCN2_0(id),\
 +              }
 +
 +static const struct dcn20_mmhubbub_registers mcif_wb20_regs[] = {
 +      mcif_wb_regs_dcn2(0),
 +};
 +
 +static const struct dcn20_mmhubbub_shift mcif_wb20_shift = {
 +      MCIF_WB_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
 +};
 +
 +static const struct dcn20_mmhubbub_mask mcif_wb20_mask = {
 +      MCIF_WB_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
 +};
 +
 +static const struct dcn20_mpc_registers mpc_regs = {
 +              MPC_REG_LIST_DCN2_0(0),
 +              MPC_REG_LIST_DCN2_0(1),
 +              MPC_REG_LIST_DCN2_0(2),
 +              MPC_REG_LIST_DCN2_0(3),
 +              MPC_REG_LIST_DCN2_0(4),
 +              MPC_REG_LIST_DCN2_0(5),
 +              MPC_OUT_MUX_REG_LIST_DCN2_0(0),
 +              MPC_OUT_MUX_REG_LIST_DCN2_0(1),
 +              MPC_OUT_MUX_REG_LIST_DCN2_0(2),
 +              MPC_OUT_MUX_REG_LIST_DCN2_0(3),
 +              MPC_OUT_MUX_REG_LIST_DCN2_0(4),
 +              MPC_OUT_MUX_REG_LIST_DCN2_0(5),
 +};
 +
 +static const struct dcn20_mpc_shift mpc_shift = {
 +      MPC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
 +};
 +
 +static const struct dcn20_mpc_mask mpc_mask = {
 +      MPC_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
 +};
 +
 +#define tg_regs(id)\
 +[id] = {TG_COMMON_REG_LIST_DCN2_0(id)}
 +
 +
 +static const struct dcn_optc_registers tg_regs[] = {
 +      tg_regs(0),
 +      tg_regs(1),
 +      tg_regs(2),
 +      tg_regs(3),
 +      tg_regs(4),
 +      tg_regs(5)
 +};
 +
 +static const struct dcn_optc_shift tg_shift = {
 +      TG_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
 +};
 +
 +static const struct dcn_optc_mask tg_mask = {
 +      TG_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
 +};
 +
 +#define hubp_regs(id)\
 +[id] = {\
 +      HUBP_REG_LIST_DCN20(id)\
 +}
 +
 +static const struct dcn_hubp2_registers hubp_regs[] = {
 +              hubp_regs(0),
 +              hubp_regs(1),
 +              hubp_regs(2),
 +              hubp_regs(3),
 +              hubp_regs(4),
 +              hubp_regs(5)
 +};
 +
 +static const struct dcn_hubp2_shift hubp_shift = {
 +              HUBP_MASK_SH_LIST_DCN20(__SHIFT)
 +};
 +
 +static const struct dcn_hubp2_mask hubp_mask = {
 +              HUBP_MASK_SH_LIST_DCN20(_MASK)
 +};
 +
 +static const struct dcn_hubbub_registers hubbub_reg = {
 +              HUBBUB_REG_LIST_DCN20(0)
 +};
 +
 +static const struct dcn_hubbub_shift hubbub_shift = {
 +              HUBBUB_MASK_SH_LIST_DCN20(__SHIFT)
 +};
 +
 +static const struct dcn_hubbub_mask hubbub_mask = {
 +              HUBBUB_MASK_SH_LIST_DCN20(_MASK)
 +};
 +
 +#define vmid_regs(id)\
 +[id] = {\
 +              DCN20_VMID_REG_LIST(id)\
 +}
 +
 +static const struct dcn_vmid_registers vmid_regs[] = {
 +      vmid_regs(0),
 +      vmid_regs(1),
 +      vmid_regs(2),
 +      vmid_regs(3),
 +      vmid_regs(4),
 +      vmid_regs(5),
 +      vmid_regs(6),
 +      vmid_regs(7),
 +      vmid_regs(8),
 +      vmid_regs(9),
 +      vmid_regs(10),
 +      vmid_regs(11),
 +      vmid_regs(12),
 +      vmid_regs(13),
 +      vmid_regs(14),
 +      vmid_regs(15)
 +};
 +
 +static const struct dcn20_vmid_shift vmid_shifts = {
 +              DCN20_VMID_MASK_SH_LIST(__SHIFT)
 +};
 +
 +static const struct dcn20_vmid_mask vmid_masks = {
 +              DCN20_VMID_MASK_SH_LIST(_MASK)
 +};
 +
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +#define dsc_regsDCN20(id)\
 +[id] = {\
 +      DSC_REG_LIST_DCN20(id)\
 +}
 +
 +static const struct dcn20_dsc_registers dsc_regs[] = {
 +      dsc_regsDCN20(0),
 +      dsc_regsDCN20(1),
 +      dsc_regsDCN20(2),
 +      dsc_regsDCN20(3),
 +      dsc_regsDCN20(4),
 +      dsc_regsDCN20(5)
 +};
 +
 +static const struct dcn20_dsc_shift dsc_shift = {
 +      DSC_REG_LIST_SH_MASK_DCN20(__SHIFT)
 +};
 +
 +static const struct dcn20_dsc_mask dsc_mask = {
 +      DSC_REG_LIST_SH_MASK_DCN20(_MASK)
 +};
 +#endif
 +
 +static const struct dccg_registers dccg_regs = {
 +              DCCG_REG_LIST_DCN2()
 +};
 +
 +static const struct dccg_shift dccg_shift = {
 +              DCCG_MASK_SH_LIST_DCN2(__SHIFT)
 +};
 +
 +static const struct dccg_mask dccg_mask = {
 +              DCCG_MASK_SH_LIST_DCN2(_MASK)
 +};
 +
 +static const struct resource_caps res_cap_nv10 = {
 +              .num_timing_generator = 6,
 +              .num_opp = 6,
 +              .num_video_plane = 6,
 +              .num_audio = 7,
 +              .num_stream_encoder = 6,
 +              .num_pll = 6,
 +              .num_dwb = 1,
 +              .num_ddc = 6,
 +              .num_vmid = 16,
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +              .num_dsc = 6,
 +#endif
 +};
 +
 +static const struct dc_plane_cap plane_cap = {
 +      .type = DC_PLANE_TYPE_DCN_UNIVERSAL,
 +      .blends_with_above = true,
 +      .blends_with_below = true,
 +      .per_pixel_alpha = true,
 +
 +      .pixel_format_support = {
 +                      .argb8888 = true,
 +                      .nv12 = true,
 +                      .fp16 = true
 +      },
 +
 +      .max_upscale_factor = {
 +                      .argb8888 = 16000,
 +                      .nv12 = 16000,
 +                      .fp16 = 1
 +      },
 +
 +      .max_downscale_factor = {
 +                      .argb8888 = 250,
 +                      .nv12 = 250,
 +                      .fp16 = 1
 +      }
 +};
 +
 +static const struct dc_debug_options debug_defaults_drv = {
 +              .disable_dmcu = true,
 +              .force_abm_enable = false,
 +              .timing_trace = false,
 +              .clock_trace = true,
 +              .disable_pplib_clock_request = true,
 +              .pipe_split_policy = MPC_SPLIT_DYNAMIC,
 +              .force_single_disp_pipe_split = true,
 +              .disable_dcc = DCC_ENABLE,
 +              .vsr_support = true,
 +              .performance_trace = false,
 +              .max_downscale_src_width = 5120,/*upto 5K*/
 +              .disable_pplib_wm_range = false,
 +              .scl_reset_length10 = true,
 +              .sanity_checks = false,
 +              .disable_tri_buf = true,
 +              .underflow_assert_delay_us = 0xFFFFFFFF,
 +};
 +
 +static const struct dc_debug_options debug_defaults_diags = {
 +              .disable_dmcu = true,
 +              .force_abm_enable = false,
 +              .timing_trace = true,
 +              .clock_trace = true,
 +              .disable_dpp_power_gate = true,
 +              .disable_hubp_power_gate = true,
 +              .disable_clock_gate = true,
 +              .disable_pplib_clock_request = true,
 +              .disable_pplib_wm_range = true,
 +              .disable_stutter = true,
 +              .scl_reset_length10 = true,
 +              .underflow_assert_delay_us = 0xFFFFFFFF,
 +};
 +
 +void dcn20_dpp_destroy(struct dpp **dpp)
 +{
 +      kfree(TO_DCN20_DPP(*dpp));
 +      *dpp = NULL;
 +}
 +
 +struct dpp *dcn20_dpp_create(
 +      struct dc_context *ctx,
 +      uint32_t inst)
 +{
 +      struct dcn20_dpp *dpp =
 +              kzalloc(sizeof(struct dcn20_dpp), GFP_KERNEL);
 +
 +      if (!dpp)
 +              return NULL;
 +
 +      if (dpp2_construct(dpp, ctx, inst,
 +                      &tf_regs[inst], &tf_shift, &tf_mask))
 +              return &dpp->base;
 +
 +      BREAK_TO_DEBUGGER();
 +      kfree(dpp);
 +      return NULL;
 +}
 +
 +struct input_pixel_processor *dcn20_ipp_create(
 +      struct dc_context *ctx, uint32_t inst)
 +{
 +      struct dcn10_ipp *ipp =
 +              kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL);
 +
 +      if (!ipp) {
 +              BREAK_TO_DEBUGGER();
 +              return NULL;
 +      }
 +
 +      dcn20_ipp_construct(ipp, ctx, inst,
 +                      &ipp_regs[inst], &ipp_shift, &ipp_mask);
 +      return &ipp->base;
 +}
 +
 +
 +struct output_pixel_processor *dcn20_opp_create(
 +      struct dc_context *ctx, uint32_t inst)
 +{
 +      struct dcn20_opp *opp =
 +              kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL);
 +
 +      if (!opp) {
 +              BREAK_TO_DEBUGGER();
 +              return NULL;
 +      }
 +
 +      dcn20_opp_construct(opp, ctx, inst,
 +                      &opp_regs[inst], &opp_shift, &opp_mask);
 +      return &opp->base;
 +}
 +
 +struct dce_aux *dcn20_aux_engine_create(
 +      struct dc_context *ctx,
 +      uint32_t inst)
 +{
 +      struct aux_engine_dce110 *aux_engine =
 +              kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
 +
 +      if (!aux_engine)
 +              return NULL;
 +
 +      dce110_aux_engine_construct(aux_engine, ctx, inst,
 +                                  SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
 +                                  &aux_engine_regs[inst]);
 +
 +      return &aux_engine->base;
 +}
 +#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) }
 +
 +static const struct dce_i2c_registers i2c_hw_regs[] = {
 +              i2c_inst_regs(1),
 +              i2c_inst_regs(2),
 +              i2c_inst_regs(3),
 +              i2c_inst_regs(4),
 +              i2c_inst_regs(5),
 +              i2c_inst_regs(6),
 +};
 +
 +static const struct dce_i2c_shift i2c_shifts = {
 +              I2C_COMMON_MASK_SH_LIST_DCN2(__SHIFT)
 +};
 +
 +static const struct dce_i2c_mask i2c_masks = {
 +              I2C_COMMON_MASK_SH_LIST_DCN2(_MASK)
 +};
 +
 +struct dce_i2c_hw *dcn20_i2c_hw_create(
 +      struct dc_context *ctx,
 +      uint32_t inst)
 +{
 +      struct dce_i2c_hw *dce_i2c_hw =
 +              kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
 +
 +      if (!dce_i2c_hw)
 +              return NULL;
 +
 +      dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst,
 +                                  &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks);
 +
 +      return dce_i2c_hw;
 +}
 +struct mpc *dcn20_mpc_create(struct dc_context *ctx)
 +{
 +      struct dcn20_mpc *mpc20 = kzalloc(sizeof(struct dcn20_mpc),
 +                                        GFP_KERNEL);
 +
 +      if (!mpc20)
 +              return NULL;
 +
 +      dcn20_mpc_construct(mpc20, ctx,
 +                      &mpc_regs,
 +                      &mpc_shift,
 +                      &mpc_mask,
 +                      6);
 +
 +      return &mpc20->base;
 +}
 +
 +struct hubbub *dcn20_hubbub_create(struct dc_context *ctx)
 +{
 +      int i;
 +      struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub),
 +                                        GFP_KERNEL);
 +
 +      if (!hubbub)
 +              return NULL;
 +
 +      hubbub2_construct(hubbub, ctx,
 +                      &hubbub_reg,
 +                      &hubbub_shift,
 +                      &hubbub_mask);
 +
 +      for (i = 0; i < res_cap_nv10.num_vmid; i++) {
 +              struct dcn20_vmid *vmid = &hubbub->vmid[i];
 +
 +              vmid->ctx = ctx;
 +
 +              vmid->regs = &vmid_regs[i];
 +              vmid->shifts = &vmid_shifts;
 +              vmid->masks = &vmid_masks;
 +      }
 +
 +      return &hubbub->base;
 +}
 +
 +struct timing_generator *dcn20_timing_generator_create(
 +              struct dc_context *ctx,
 +              uint32_t instance)
 +{
 +      struct optc *tgn10 =
 +              kzalloc(sizeof(struct optc), GFP_KERNEL);
 +
 +      if (!tgn10)
 +              return NULL;
 +
 +      tgn10->base.inst = instance;
 +      tgn10->base.ctx = ctx;
 +
 +      tgn10->tg_regs = &tg_regs[instance];
 +      tgn10->tg_shift = &tg_shift;
 +      tgn10->tg_mask = &tg_mask;
 +
 +      dcn20_timing_generator_init(tgn10);
 +
 +      return &tgn10->base;
 +}
 +
 +static const struct encoder_feature_support link_enc_feature = {
 +              .max_hdmi_deep_color = COLOR_DEPTH_121212,
 +              .max_hdmi_pixel_clock = 600000,
 +              .hdmi_ycbcr420_supported = true,
 +              .dp_ycbcr420_supported = true,
 +              .flags.bits.IS_HBR2_CAPABLE = true,
 +              .flags.bits.IS_HBR3_CAPABLE = true,
 +              .flags.bits.IS_TPS3_CAPABLE = true,
 +              .flags.bits.IS_TPS4_CAPABLE = true
 +};
 +
 +struct link_encoder *dcn20_link_encoder_create(
 +      const struct encoder_init_data *enc_init_data)
 +{
 +      struct dcn20_link_encoder *enc20 =
 +              kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
 +
 +      if (!enc20)
 +              return NULL;
 +
 +      dcn20_link_encoder_construct(enc20,
 +                                    enc_init_data,
 +                                    &link_enc_feature,
 +                                    &link_enc_regs[enc_init_data->transmitter],
 +                                    &link_enc_aux_regs[enc_init_data->channel - 1],
 +                                    &link_enc_hpd_regs[enc_init_data->hpd_source],
 +                                    &le_shift,
 +                                    &le_mask);
 +
 +      return &enc20->enc10.base;
 +}
 +
 +struct clock_source *dcn20_clock_source_create(
 +      struct dc_context *ctx,
 +      struct dc_bios *bios,
 +      enum clock_source_id id,
 +      const struct dce110_clk_src_regs *regs,
 +      bool dp_clk_src)
 +{
 +      struct dce110_clk_src *clk_src =
 +              kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
 +
 +      if (!clk_src)
 +              return NULL;
 +
 +      if (dcn20_clk_src_construct(clk_src, ctx, bios, id,
 +                      regs, &cs_shift, &cs_mask)) {
 +              clk_src->base.dp_clk_src = dp_clk_src;
 +              return &clk_src->base;
 +      }
 +
 +      BREAK_TO_DEBUGGER();
 +      return NULL;
 +}
 +
 +static void read_dce_straps(
 +      struct dc_context *ctx,
 +      struct resource_straps *straps)
 +{
 +      generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX),
 +              FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio);
 +}
 +
 +static struct audio *dcn20_create_audio(
 +              struct dc_context *ctx, unsigned int inst)
 +{
 +      return dce_audio_create(ctx, inst,
 +                      &audio_regs[inst], &audio_shift, &audio_mask);
 +}
 +
 +struct stream_encoder *dcn20_stream_encoder_create(
 +      enum engine_id eng_id,
 +      struct dc_context *ctx)
 +{
 +      struct dcn10_stream_encoder *enc1 =
 +              kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL);
 +
 +      if (!enc1)
 +              return NULL;
 +
 +      dcn20_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id,
 +                                      &stream_enc_regs[eng_id],
 +                                      &se_shift, &se_mask);
 +
 +      return &enc1->base;
 +}
 +
 +static const struct dce_hwseq_registers hwseq_reg = {
 +              HWSEQ_DCN2_REG_LIST()
 +};
 +
 +static const struct dce_hwseq_shift hwseq_shift = {
 +              HWSEQ_DCN2_MASK_SH_LIST(__SHIFT)
 +};
 +
 +static const struct dce_hwseq_mask hwseq_mask = {
 +              HWSEQ_DCN2_MASK_SH_LIST(_MASK)
 +};
 +
 +struct dce_hwseq *dcn20_hwseq_create(
 +      struct dc_context *ctx)
 +{
 +      struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
 +
 +      if (hws) {
 +              hws->ctx = ctx;
 +              hws->regs = &hwseq_reg;
 +              hws->shifts = &hwseq_shift;
 +              hws->masks = &hwseq_mask;
 +      }
 +      return hws;
 +}
 +
 +static const struct resource_create_funcs res_create_funcs = {
 +      .read_dce_straps = read_dce_straps,
 +      .create_audio = dcn20_create_audio,
 +      .create_stream_encoder = dcn20_stream_encoder_create,
 +      .create_hwseq = dcn20_hwseq_create,
 +};
 +
 +static const struct resource_create_funcs res_create_maximus_funcs = {
 +      .read_dce_straps = NULL,
 +      .create_audio = NULL,
 +      .create_stream_encoder = NULL,
 +      .create_hwseq = dcn20_hwseq_create,
 +};
 +
 +void dcn20_clock_source_destroy(struct clock_source **clk_src)
 +{
 +      kfree(TO_DCE110_CLK_SRC(*clk_src));
 +      *clk_src = NULL;
 +}
 +
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +
 +struct display_stream_compressor *dcn20_dsc_create(
 +      struct dc_context *ctx, uint32_t inst)
 +{
 +      struct dcn20_dsc *dsc =
 +              kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL);
 +
 +      if (!dsc) {
 +              BREAK_TO_DEBUGGER();
 +              return NULL;
 +      }
 +
 +      dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask);
 +      return &dsc->base;
 +}
 +
 +void dcn20_dsc_destroy(struct display_stream_compressor **dsc)
 +{
 +      kfree(container_of(*dsc, struct dcn20_dsc, base));
 +      *dsc = NULL;
 +}
 +
 +#endif
 +
 +static void destruct(struct dcn20_resource_pool *pool)
 +{
 +      unsigned int i;
 +
 +      for (i = 0; i < pool->base.stream_enc_count; i++) {
 +              if (pool->base.stream_enc[i] != NULL) {
 +                      kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i]));
 +                      pool->base.stream_enc[i] = NULL;
 +              }
 +      }
 +
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +      for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
 +              if (pool->base.dscs[i] != NULL)
 +                      dcn20_dsc_destroy(&pool->base.dscs[i]);
 +      }
 +#endif
 +
 +      if (pool->base.mpc != NULL) {
 +              kfree(TO_DCN20_MPC(pool->base.mpc));
 +              pool->base.mpc = NULL;
 +      }
 +      if (pool->base.hubbub != NULL) {
 +              kfree(pool->base.hubbub);
 +              pool->base.hubbub = NULL;
 +      }
 +      for (i = 0; i < pool->base.pipe_count; i++) {
 +              if (pool->base.dpps[i] != NULL)
 +                      dcn20_dpp_destroy(&pool->base.dpps[i]);
 +
 +              if (pool->base.ipps[i] != NULL)
 +                      pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]);
 +
 +              if (pool->base.hubps[i] != NULL) {
 +                      kfree(TO_DCN20_HUBP(pool->base.hubps[i]));
 +                      pool->base.hubps[i] = NULL;
 +              }
 +
 +              if (pool->base.irqs != NULL) {
 +                      dal_irq_service_destroy(&pool->base.irqs);
 +              }
 +      }
 +
 +      for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
 +              if (pool->base.engines[i] != NULL)
 +                      dce110_engine_destroy(&pool->base.engines[i]);
 +              if (pool->base.hw_i2cs[i] != NULL) {
 +                      kfree(pool->base.hw_i2cs[i]);
 +                      pool->base.hw_i2cs[i] = NULL;
 +              }
 +              if (pool->base.sw_i2cs[i] != NULL) {
 +                      kfree(pool->base.sw_i2cs[i]);
 +                      pool->base.sw_i2cs[i] = NULL;
 +              }
 +      }
 +
 +      for (i = 0; i < pool->base.res_cap->num_opp; i++) {
 +              if (pool->base.opps[i] != NULL)
 +                      pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]);
 +      }
 +
 +      for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
 +              if (pool->base.timing_generators[i] != NULL)    {
 +                      kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i]));
 +                      pool->base.timing_generators[i] = NULL;
 +              }
 +      }
 +
 +      for (i = 0; i < pool->base.res_cap->num_dwb; i++) {
 +              if (pool->base.dwbc[i] != NULL) {
 +                      kfree(TO_DCN20_DWBC(pool->base.dwbc[i]));
 +                      pool->base.dwbc[i] = NULL;
 +              }
 +              if (pool->base.mcif_wb[i] != NULL) {
 +                      kfree(TO_DCN20_MMHUBBUB(pool->base.mcif_wb[i]));
 +                      pool->base.mcif_wb[i] = NULL;
 +              }
 +      }
 +
 +      for (i = 0; i < pool->base.audio_count; i++) {
 +              if (pool->base.audios[i])
 +                      dce_aud_destroy(&pool->base.audios[i]);
 +      }
 +
 +      for (i = 0; i < pool->base.clk_src_count; i++) {
 +              if (pool->base.clock_sources[i] != NULL) {
 +                      dcn20_clock_source_destroy(&pool->base.clock_sources[i]);
 +                      pool->base.clock_sources[i] = NULL;
 +              }
 +      }
 +
 +      if (pool->base.dp_clock_source != NULL) {
 +              dcn20_clock_source_destroy(&pool->base.dp_clock_source);
 +              pool->base.dp_clock_source = NULL;
 +      }
 +
 +
 +      if (pool->base.abm != NULL)
 +              dce_abm_destroy(&pool->base.abm);
 +
 +      if (pool->base.dmcu != NULL)
 +              dce_dmcu_destroy(&pool->base.dmcu);
 +
 +      if (pool->base.dccg != NULL)
 +              dcn_dccg_destroy(&pool->base.dccg);
 +
 +      if (pool->base.pp_smu != NULL)
 +              dcn20_pp_smu_destroy(&pool->base.pp_smu);
 +
 +}
 +
 +struct hubp *dcn20_hubp_create(
 +      struct dc_context *ctx,
 +      uint32_t inst)
 +{
 +      struct dcn20_hubp *hubp2 =
 +              kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL);
 +
 +      if (!hubp2)
 +              return NULL;
 +
 +      if (hubp2_construct(hubp2, ctx, inst,
 +                      &hubp_regs[inst], &hubp_shift, &hubp_mask))
 +              return &hubp2->base;
 +
 +      BREAK_TO_DEBUGGER();
 +      kfree(hubp2);
 +      return NULL;
 +}
 +
 +static void get_pixel_clock_parameters(
 +      struct pipe_ctx *pipe_ctx,
 +      struct pixel_clk_params *pixel_clk_params)
 +{
 +      const struct dc_stream_state *stream = pipe_ctx->stream;
 +      bool odm_combine = dc_res_get_odm_bottom_pipe(pipe_ctx) != NULL;
 +
 +      pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz;
 +      pixel_clk_params->encoder_object_id = stream->link->link_enc->id;
 +      pixel_clk_params->signal_type = pipe_ctx->stream->signal;
 +      pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1;
 +      /* TODO: un-hardcode*/
 +      pixel_clk_params->requested_sym_clk = LINK_RATE_LOW *
 +              LINK_RATE_REF_FREQ_IN_KHZ;
 +      pixel_clk_params->flags.ENABLE_SS = 0;
 +      pixel_clk_params->color_depth =
 +              stream->timing.display_color_depth;
 +      pixel_clk_params->flags.DISPLAY_BLANKED = 1;
 +      pixel_clk_params->pixel_encoding = stream->timing.pixel_encoding;
 +
 +      if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
 +              pixel_clk_params->color_depth = COLOR_DEPTH_888;
 +
 +      if (optc1_is_two_pixels_per_containter(&stream->timing) || odm_combine)
 +              pixel_clk_params->requested_pix_clk_100hz /= 2;
 +
 +      if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
 +              pixel_clk_params->requested_pix_clk_100hz *= 2;
 +
 +}
 +
 +static void build_clamping_params(struct dc_stream_state *stream)
 +{
 +      stream->clamping.clamping_level = CLAMPING_FULL_RANGE;
 +      stream->clamping.c_depth = stream->timing.display_color_depth;
 +      stream->clamping.pixel_encoding = stream->timing.pixel_encoding;
 +}
 +
 +static enum dc_status build_pipe_hw_param(struct pipe_ctx *pipe_ctx)
 +{
 +
 +      get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->stream_res.pix_clk_params);
 +
 +      pipe_ctx->clock_source->funcs->get_pix_clk_dividers(
 +              pipe_ctx->clock_source,
 +              &pipe_ctx->stream_res.pix_clk_params,
 +              &pipe_ctx->pll_settings);
 +
 +      pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding;
 +
 +      resource_build_bit_depth_reduction_params(pipe_ctx->stream,
 +                                      &pipe_ctx->stream->bit_depth_params);
 +      build_clamping_params(pipe_ctx->stream);
 +
 +      return DC_OK;
 +}
 +
 +enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream)
 +{
 +      enum dc_status status = DC_OK;
 +      struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
 +
 +      /*TODO Seems unneeded anymore */
 +      /*      if (old_context && resource_is_stream_unchanged(old_context, stream)) {
 +                      if (stream != NULL && old_context->streams[i] != NULL) {
 +                               todo: shouldn't have to copy missing parameter here
 +                              resource_build_bit_depth_reduction_params(stream,
 +                                              &stream->bit_depth_params);
 +                              stream->clamping.pixel_encoding =
 +                                              stream->timing.pixel_encoding;
 +
 +                              resource_build_bit_depth_reduction_params(stream,
 +                                                              &stream->bit_depth_params);
 +                              build_clamping_params(stream);
 +
 +                              continue;
 +                      }
 +              }
 +      */
 +
 +      if (!pipe_ctx)
 +              return DC_ERROR_UNEXPECTED;
 +
 +
 +      status = build_pipe_hw_param(pipe_ctx);
 +
 +      return status;
 +}
 +
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +
 +static void acquire_dsc(struct resource_context *res_ctx,
 +                      const struct resource_pool *pool,
 +                      struct display_stream_compressor **dsc)
 +{
 +      int i;
 +
 +      ASSERT(*dsc == NULL);
 +      *dsc = NULL;
 +
 +      /* Find first free DSC */
 +      for (i = 0; i < pool->res_cap->num_dsc; i++)
 +              if (!res_ctx->is_dsc_acquired[i]) {
 +                      *dsc = pool->dscs[i];
 +                      res_ctx->is_dsc_acquired[i] = true;
 +                      break;
 +              }
 +}
 +
 +static void release_dsc(struct resource_context *res_ctx,
 +                      const struct resource_pool *pool,
 +                      struct display_stream_compressor **dsc)
 +{
 +      int i;
 +
 +      for (i = 0; i < pool->res_cap->num_dsc; i++)
 +              if (pool->dscs[i] == *dsc) {
 +                      res_ctx->is_dsc_acquired[i] = false;
 +                      *dsc = NULL;
 +                      break;
 +              }
 +}
 +
 +#endif
 +
 +
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +static enum dc_status add_dsc_to_stream_resource(struct dc *dc,
 +              struct dc_state *dc_ctx,
 +              struct dc_stream_state *dc_stream)
 +{
 +      enum dc_status result = DC_OK;
 +      int i;
 +      const struct resource_pool *pool = dc->res_pool;
 +
 +      /* Get a DSC if required and available */
 +      for (i = 0; i < dc->res_pool->pipe_count; i++) {
 +              struct pipe_ctx *pipe_ctx = &dc_ctx->res_ctx.pipe_ctx[i];
 +
 +              if (pipe_ctx->stream != dc_stream)
 +                      continue;
 +
 +              acquire_dsc(&dc_ctx->res_ctx, pool, &pipe_ctx->stream_res.dsc);
 +
 +              /* The number of DSCs can be less than the number of pipes */
 +              if (!pipe_ctx->stream_res.dsc) {
 +                      dm_output_to_console("No DSCs available\n");
 +                      result = DC_NO_DSC_RESOURCE;
 +              }
 +
 +              break;
 +      }
 +
 +      return result;
 +}
 +
 +
 +static enum dc_status remove_dsc_from_stream_resource(struct dc *dc,
 +              struct dc_state *new_ctx,
 +              struct dc_stream_state *dc_stream)
 +{
 +      struct pipe_ctx *pipe_ctx = NULL;
 +      int i;
 +
 +      for (i = 0; i < MAX_PIPES; i++) {
 +              if (new_ctx->res_ctx.pipe_ctx[i].stream == dc_stream && !new_ctx->res_ctx.pipe_ctx[i].top_pipe) {
 +                      pipe_ctx = &new_ctx->res_ctx.pipe_ctx[i];
 +                      break;
 +              }
 +      }
 +
 +      if (!pipe_ctx)
 +              return DC_ERROR_UNEXPECTED;
 +
 +      if (pipe_ctx->stream_res.dsc) {
 +              struct pipe_ctx *odm_pipe = dc_res_get_odm_bottom_pipe(pipe_ctx);
 +
 +              release_dsc(&new_ctx->res_ctx, dc->res_pool, &pipe_ctx->stream_res.dsc);
 +              if (odm_pipe)
 +                      release_dsc(&new_ctx->res_ctx, dc->res_pool, &odm_pipe->stream_res.dsc);
 +      }
 +
 +      return DC_OK;
 +}
 +#endif
 +
 +
 +enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream)
 +{
 +      enum dc_status result = DC_ERROR_UNEXPECTED;
 +
 +      result = resource_map_pool_resources(dc, new_ctx, dc_stream);
 +
 +      if (result == DC_OK)
 +              result = resource_map_phy_clock_resources(dc, new_ctx, dc_stream);
 +
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +      /* Get a DSC if required and available */
 +      if (result == DC_OK && dc_stream->timing.flags.DSC)
 +              result = add_dsc_to_stream_resource(dc, new_ctx, dc_stream);
 +#endif
 +
 +      if (result == DC_OK)
 +              result = dcn20_build_mapped_resource(dc, new_ctx, dc_stream);
 +
 +      return result;
 +}
 +
 +
 +enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream)
 +{
 +      enum dc_status result = DC_OK;
 +
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +      result = remove_dsc_from_stream_resource(dc, new_ctx, dc_stream);
 +#endif
 +
 +      return result;
 +}
 +
 +
 +static void swizzle_to_dml_params(
 +              enum swizzle_mode_values swizzle,
 +              unsigned int *sw_mode)
 +{
 +      switch (swizzle) {
 +      case DC_SW_LINEAR:
 +              *sw_mode = dm_sw_linear;
 +              break;
 +      case DC_SW_4KB_S:
 +              *sw_mode = dm_sw_4kb_s;
 +              break;
 +      case DC_SW_4KB_S_X:
 +              *sw_mode = dm_sw_4kb_s_x;
 +              break;
 +      case DC_SW_4KB_D:
 +              *sw_mode = dm_sw_4kb_d;
 +              break;
 +      case DC_SW_4KB_D_X:
 +              *sw_mode = dm_sw_4kb_d_x;
 +              break;
 +      case DC_SW_64KB_S:
 +              *sw_mode = dm_sw_64kb_s;
 +              break;
 +      case DC_SW_64KB_S_X:
 +              *sw_mode = dm_sw_64kb_s_x;
 +              break;
 +      case DC_SW_64KB_S_T:
 +              *sw_mode = dm_sw_64kb_s_t;
 +              break;
 +      case DC_SW_64KB_D:
 +              *sw_mode = dm_sw_64kb_d;
 +              break;
 +      case DC_SW_64KB_D_X:
 +              *sw_mode = dm_sw_64kb_d_x;
 +              break;
 +      case DC_SW_64KB_D_T:
 +              *sw_mode = dm_sw_64kb_d_t;
 +              break;
 +      case DC_SW_64KB_R_X:
 +              *sw_mode = dm_sw_64kb_r_x;
 +              break;
 +      case DC_SW_VAR_S:
 +              *sw_mode = dm_sw_var_s;
 +              break;
 +      case DC_SW_VAR_S_X:
 +              *sw_mode = dm_sw_var_s_x;
 +              break;
 +      case DC_SW_VAR_D:
 +              *sw_mode = dm_sw_var_d;
 +              break;
 +      case DC_SW_VAR_D_X:
 +              *sw_mode = dm_sw_var_d_x;
 +              break;
 +
 +      default:
 +              ASSERT(0); /* Not supported */
 +              break;
 +      }
 +}
 +
 +static bool dcn20_split_stream_for_combine(
 +              struct resource_context *res_ctx,
 +              const struct resource_pool *pool,
 +              struct pipe_ctx *primary_pipe,
 +              struct pipe_ctx *secondary_pipe,
 +              bool is_odm_combine)
 +{
 +      int pipe_idx = secondary_pipe->pipe_idx;
 +      struct scaler_data *sd = &primary_pipe->plane_res.scl_data;
 +      struct pipe_ctx *sec_bot_pipe = secondary_pipe->bottom_pipe;
 +      int new_width;
 +
 +      *secondary_pipe = *primary_pipe;
 +      secondary_pipe->bottom_pipe = sec_bot_pipe;
 +
 +      secondary_pipe->pipe_idx = pipe_idx;
 +      secondary_pipe->plane_res.mi = pool->mis[secondary_pipe->pipe_idx];
 +      secondary_pipe->plane_res.hubp = pool->hubps[secondary_pipe->pipe_idx];
 +      secondary_pipe->plane_res.ipp = pool->ipps[secondary_pipe->pipe_idx];
 +      secondary_pipe->plane_res.xfm = pool->transforms[secondary_pipe->pipe_idx];
 +      secondary_pipe->plane_res.dpp = pool->dpps[secondary_pipe->pipe_idx];
 +      secondary_pipe->plane_res.mpcc_inst = pool->dpps[secondary_pipe->pipe_idx]->inst;
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +      secondary_pipe->stream_res.dsc = NULL;
 +#endif
 +      if (primary_pipe->bottom_pipe && primary_pipe->bottom_pipe != secondary_pipe) {
 +              ASSERT(!secondary_pipe->bottom_pipe);
 +              secondary_pipe->bottom_pipe = primary_pipe->bottom_pipe;
 +              secondary_pipe->bottom_pipe->top_pipe = secondary_pipe;
 +      }
 +      primary_pipe->bottom_pipe = secondary_pipe;
 +      secondary_pipe->top_pipe = primary_pipe;
 +
 +      if (is_odm_combine) {
 +              if (primary_pipe->plane_state) {
 +                      /* HACTIVE halved for odm combine */
 +                      sd->h_active /= 2;
 +                      /* Copy scl_data to secondary pipe */
 +                      secondary_pipe->plane_res.scl_data = *sd;
 +
 +                      /* Calculate new vp and recout for left pipe */
 +                      /* Need at least 16 pixels width per side */
 +                      if (sd->recout.x + 16 >= sd->h_active)
 +                              return false;
 +                      new_width = sd->h_active - sd->recout.x;
 +                      sd->viewport.width -= dc_fixpt_floor(dc_fixpt_mul_int(
 +                                      sd->ratios.horz, sd->recout.width - new_width));
 +                      sd->viewport_c.width -= dc_fixpt_floor(dc_fixpt_mul_int(
 +                                      sd->ratios.horz_c, sd->recout.width - new_width));
 +                      sd->recout.width = new_width;
 +
 +                      /* Calculate new vp and recout for right pipe */
 +                      sd = &secondary_pipe->plane_res.scl_data;
 +                      new_width = sd->recout.width + sd->recout.x - sd->h_active;
 +                      /* Need at least 16 pixels width per side */
 +                      if (new_width <= 16)
 +                              return false;
 +                      sd->viewport.width -= dc_fixpt_floor(dc_fixpt_mul_int(
 +                                      sd->ratios.horz, sd->recout.width - new_width));
 +                      sd->viewport_c.width -= dc_fixpt_floor(dc_fixpt_mul_int(
 +                                      sd->ratios.horz_c, sd->recout.width - new_width));
 +                      sd->recout.width = new_width;
 +                      sd->viewport.x += dc_fixpt_floor(dc_fixpt_mul_int(
 +                                      sd->ratios.horz, sd->h_active - sd->recout.x));
 +                      sd->viewport_c.x += dc_fixpt_floor(dc_fixpt_mul_int(
 +                                      sd->ratios.horz_c, sd->h_active - sd->recout.x));
 +                      sd->recout.x = 0;
 +              }
 +              secondary_pipe->stream_res.opp = pool->opps[secondary_pipe->pipe_idx];
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +              if (secondary_pipe->stream->timing.flags.DSC == 1) {
 +                      acquire_dsc(res_ctx, pool, &secondary_pipe->stream_res.dsc);
 +                      ASSERT(secondary_pipe->stream_res.dsc);
 +                      if (secondary_pipe->stream_res.dsc == NULL)
 +                              return false;
 +              }
 +#endif
 +      } else {
 +              ASSERT(primary_pipe->plane_state);
 +              resource_build_scaling_params(primary_pipe);
 +              resource_build_scaling_params(secondary_pipe);
 +      }
 +
 +      return true;
 +}
 +
 +void dcn20_populate_dml_writeback_from_context(
 +              struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes)
 +{
 +      int pipe_cnt, i;
 +
 +      for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
 +              struct dc_writeback_info *wb_info = &res_ctx->pipe_ctx[i].stream->writeback_info[0];
 +
 +              if (!res_ctx->pipe_ctx[i].stream)
 +                      continue;
 +
 +              /* Set writeback information */
 +              pipes[pipe_cnt].dout.wb_enable = (wb_info->wb_enabled == true) ? 1 : 0;
 +              pipes[pipe_cnt].dout.num_active_wb++;
 +              pipes[pipe_cnt].dout.wb.wb_src_height = wb_info->dwb_params.cnv_params.crop_height;
 +              pipes[pipe_cnt].dout.wb.wb_src_width = wb_info->dwb_params.cnv_params.crop_width;
 +              pipes[pipe_cnt].dout.wb.wb_dst_width = wb_info->dwb_params.dest_width;
 +              pipes[pipe_cnt].dout.wb.wb_dst_height = wb_info->dwb_params.dest_height;
 +              pipes[pipe_cnt].dout.wb.wb_htaps_luma = 1;
 +              pipes[pipe_cnt].dout.wb.wb_vtaps_luma = 1;
 +              pipes[pipe_cnt].dout.wb.wb_htaps_chroma = wb_info->dwb_params.scaler_taps.h_taps_c;
 +              pipes[pipe_cnt].dout.wb.wb_vtaps_chroma = wb_info->dwb_params.scaler_taps.v_taps_c;
 +              pipes[pipe_cnt].dout.wb.wb_hratio = 1.0;
 +              pipes[pipe_cnt].dout.wb.wb_vratio = 1.0;
 +              if (wb_info->dwb_params.out_format == dwb_scaler_mode_yuv420) {
 +                      if (wb_info->dwb_params.output_depth == DWB_OUTPUT_PIXEL_DEPTH_8BPC)
 +                              pipes[pipe_cnt].dout.wb.wb_pixel_format = dm_420_8;
 +                      else
 +                              pipes[pipe_cnt].dout.wb.wb_pixel_format = dm_420_10;
 +              } else
 +                      pipes[pipe_cnt].dout.wb.wb_pixel_format = dm_444_32;
 +
 +              pipe_cnt++;
 +      }
 +
 +}
 +
 +int dcn20_populate_dml_pipes_from_context(
 +              struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes)
 +{
 +      int pipe_cnt, i;
 +      bool synchronized_vblank = true;
 +
 +      for (i = 0, pipe_cnt = -1; i < dc->res_pool->pipe_count; i++) {
 +              if (!res_ctx->pipe_ctx[i].stream)
 +                      continue;
 +
 +              if (pipe_cnt < 0) {
 +                      pipe_cnt = i;
 +                      continue;
 +              }
 +              if (!resource_are_streams_timing_synchronizable(
 +                              res_ctx->pipe_ctx[pipe_cnt].stream,
 +                              res_ctx->pipe_ctx[i].stream)) {
 +                      synchronized_vblank = false;
 +                      break;
 +              }
 +      }
 +
 +      for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
 +              struct dc_crtc_timing *timing = &res_ctx->pipe_ctx[i].stream->timing;
 +              int output_bpc;
 +
 +              if (!res_ctx->pipe_ctx[i].stream)
 +                      continue;
 +              /* todo:
 +              pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = 0;
 +              pipes[pipe_cnt].pipe.src.dcc = 0;
 +              pipes[pipe_cnt].pipe.src.vm = 0;*/
 +
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +              pipes[pipe_cnt].dout.dsc_enable = res_ctx->pipe_ctx[i].stream->timing.flags.DSC;
 +              /* todo: rotation?*/
 +              pipes[pipe_cnt].dout.dsc_slices = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.num_slices_h;
 +#endif
 +              if (res_ctx->pipe_ctx[i].stream->use_dynamic_meta) {
 +                      pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = true;
 +                      /* 1/2 vblank */
 +                      pipes[pipe_cnt].pipe.src.dynamic_metadata_lines_before_active =
 +                              (timing->v_total - timing->v_addressable
 +                                      - timing->v_border_top - timing->v_border_bottom) / 2;
 +                      /* 36 bytes dp, 32 hdmi */
 +                      pipes[pipe_cnt].pipe.src.dynamic_metadata_xmit_bytes =
 +                              dc_is_dp_signal(res_ctx->pipe_ctx[i].stream->signal) ? 36 : 32;
 +              }
 +              pipes[pipe_cnt].pipe.src.dcc = false;
 +              pipes[pipe_cnt].pipe.src.dcc_rate = 1;
 +              pipes[pipe_cnt].pipe.dest.synchronized_vblank_all_planes = synchronized_vblank;
 +              pipes[pipe_cnt].pipe.dest.hblank_start = timing->h_total - timing->h_front_porch;
 +              pipes[pipe_cnt].pipe.dest.hblank_end = pipes[pipe_cnt].pipe.dest.hblank_start
 +                              - timing->h_addressable
 +                              - timing->h_border_left
 +                              - timing->h_border_right;
 +              pipes[pipe_cnt].pipe.dest.vblank_start = timing->v_total - timing->v_front_porch;
 +              pipes[pipe_cnt].pipe.dest.vblank_end = pipes[pipe_cnt].pipe.dest.vblank_start
 +                              - timing->v_addressable
 +                              - timing->v_border_top
 +                              - timing->v_border_bottom;
 +              pipes[pipe_cnt].pipe.dest.htotal = timing->h_total;
 +              pipes[pipe_cnt].pipe.dest.vtotal = timing->v_total;
 +              pipes[pipe_cnt].pipe.dest.hactive = timing->h_addressable;
 +              pipes[pipe_cnt].pipe.dest.vactive = timing->v_addressable;
 +              pipes[pipe_cnt].pipe.dest.interlaced = timing->flags.INTERLACE;
 +              pipes[pipe_cnt].pipe.dest.pixel_rate_mhz = timing->pix_clk_100hz/10000.0;
 +              if (timing->timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
 +                      pipes[pipe_cnt].pipe.dest.pixel_rate_mhz *= 2;
 +              pipes[pipe_cnt].pipe.dest.otg_inst = res_ctx->pipe_ctx[i].stream_res.tg->inst;
 +              pipes[pipe_cnt].dout.dp_lanes = 4;
 +              pipes[pipe_cnt].pipe.dest.vtotal_min = res_ctx->pipe_ctx[i].stream->adjust.v_total_min;
 +              pipes[pipe_cnt].pipe.dest.vtotal_max = res_ctx->pipe_ctx[i].stream->adjust.v_total_max;
 +
 +              switch (res_ctx->pipe_ctx[i].stream->signal) {
 +              case SIGNAL_TYPE_DISPLAY_PORT_MST:
 +              case SIGNAL_TYPE_DISPLAY_PORT:
 +                      pipes[pipe_cnt].dout.output_type = dm_dp;
 +                      break;
 +              case SIGNAL_TYPE_EDP:
 +                      pipes[pipe_cnt].dout.output_type = dm_edp;
 +                      break;
 +              case SIGNAL_TYPE_HDMI_TYPE_A:
 +              case SIGNAL_TYPE_DVI_SINGLE_LINK:
 +              case SIGNAL_TYPE_DVI_DUAL_LINK:
 +                      pipes[pipe_cnt].dout.output_type = dm_hdmi;
 +                      break;
 +              default:
 +                      /* In case there is no signal, set dp with 4 lanes to allow max config */
 +                      pipes[pipe_cnt].dout.output_type = dm_dp;
 +                      pipes[pipe_cnt].dout.dp_lanes = 4;
 +              }
 +
 +              switch (res_ctx->pipe_ctx[i].stream->timing.display_color_depth) {
 +              case COLOR_DEPTH_666:
 +                      output_bpc = 6;
 +                      break;
 +              case COLOR_DEPTH_888:
 +                      output_bpc = 8;
 +                      break;
 +              case COLOR_DEPTH_101010:
 +                      output_bpc = 10;
 +                      break;
 +              case COLOR_DEPTH_121212:
 +                      output_bpc = 12;
 +                      break;
 +              case COLOR_DEPTH_141414:
 +                      output_bpc = 14;
 +                      break;
 +              case COLOR_DEPTH_161616:
 +                      output_bpc = 16;
 +                      break;
 +#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 +              case COLOR_DEPTH_999:
 +                      output_bpc = 9;
 +                      break;
 +              case COLOR_DEPTH_111111:
 +                      output_bpc = 11;
 +                      break;
 +#endif
 +              default:
 +                      output_bpc = 8;
 +                      break;
 +              }
 +
 +
 +              switch (res_ctx->pipe_ctx[i].stream->timing.pixel_encoding) {
 +              case PIXEL_ENCODING_RGB:
 +              case PIXEL_ENCODING_YCBCR444:
 +                      pipes[pipe_cnt].dout.output_format = dm_444;
 +                      pipes[pipe_cnt].dout.output_bpp = output_bpc * 3;
 +                      break;
 +              case PIXEL_ENCODING_YCBCR420:
 +                      pipes[pipe_cnt].dout.output_format = dm_420;
 +                      pipes[pipe_cnt].dout.output_bpp = (output_bpc * 3) / 2;
 +                      break;
 +              case PIXEL_ENCODING_YCBCR422:
 +                      if (true) /* todo */
 +                              pipes[pipe_cnt].dout.output_format = dm_s422;
 +                      else
 +                              pipes[pipe_cnt].dout.output_format = dm_n422;
 +                      pipes[pipe_cnt].dout.output_bpp = output_bpc * 2;
 +                      break;
 +              default:
 +                      pipes[pipe_cnt].dout.output_format = dm_444;
 +                      pipes[pipe_cnt].dout.output_bpp = output_bpc * 3;
 +              }
 +              pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].pipe_idx;
 +              if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state
 +                              == res_ctx->pipe_ctx[i].plane_state)
 +                      pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].top_pipe->pipe_idx;
 +
 +              /* todo: default max for now, until there is logic reflecting this in dc*/
 +              pipes[pipe_cnt].dout.output_bpc = 12;
 +              /*
 +               * Use max cursor settings for calculations to minimize
 +               * bw calculations due to cursor on/off
 +               */
 +              pipes[pipe_cnt].pipe.src.num_cursors = 2;
 +              pipes[pipe_cnt].pipe.src.cur0_src_width = 256;
 +              pipes[pipe_cnt].pipe.src.cur0_bpp = dm_cur_32bit;
 +              pipes[pipe_cnt].pipe.src.cur1_src_width = 256;
 +              pipes[pipe_cnt].pipe.src.cur1_bpp = dm_cur_32bit;
 +
 +              if (!res_ctx->pipe_ctx[i].plane_state) {
 +                      pipes[pipe_cnt].pipe.src.source_scan = dm_horz;
 +                      pipes[pipe_cnt].pipe.src.sw_mode = dm_sw_linear;
 +                      pipes[pipe_cnt].pipe.src.macro_tile_size = dm_64k_tile;
 +                      pipes[pipe_cnt].pipe.src.viewport_width = timing->h_addressable;
 +                      if (pipes[pipe_cnt].pipe.src.viewport_width > 1920)
 +                              pipes[pipe_cnt].pipe.src.viewport_width = 1920;
 +                      pipes[pipe_cnt].pipe.src.viewport_height = timing->v_addressable;
 +                      if (pipes[pipe_cnt].pipe.src.viewport_height > 1080)
 +                              pipes[pipe_cnt].pipe.src.viewport_height = 1080;
 +                      pipes[pipe_cnt].pipe.src.data_pitch = ((pipes[pipe_cnt].pipe.src.viewport_width + 63) / 64) * 64; /* linear sw only */
 +                      pipes[pipe_cnt].pipe.src.source_format = dm_444_32;
 +                      pipes[pipe_cnt].pipe.dest.recout_width = pipes[pipe_cnt].pipe.src.viewport_width; /*vp_width/hratio*/
 +                      pipes[pipe_cnt].pipe.dest.recout_height = pipes[pipe_cnt].pipe.src.viewport_height; /*vp_height/vratio*/
 +                      pipes[pipe_cnt].pipe.dest.full_recout_width = pipes[pipe_cnt].pipe.dest.recout_width;  /*when is_hsplit != 1*/
 +                      pipes[pipe_cnt].pipe.dest.full_recout_height = pipes[pipe_cnt].pipe.dest.recout_height; /*when is_hsplit != 1*/
 +                      pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_16;
 +                      pipes[pipe_cnt].pipe.scale_ratio_depth.hscl_ratio = 1.0;
 +                      pipes[pipe_cnt].pipe.scale_ratio_depth.vscl_ratio = 1.0;
 +                      pipes[pipe_cnt].pipe.scale_ratio_depth.scl_enable = 0; /*Lb only or Full scl*/
 +                      pipes[pipe_cnt].pipe.scale_taps.htaps = 1;
 +                      pipes[pipe_cnt].pipe.scale_taps.vtaps = 1;
 +                      pipes[pipe_cnt].pipe.src.is_hsplit = 0;
 +                      pipes[pipe_cnt].pipe.dest.odm_combine = 0;
 +                      pipes[pipe_cnt].pipe.dest.vtotal_min = timing->v_total;
 +                      pipes[pipe_cnt].pipe.dest.vtotal_max = timing->v_total;
 +              } else {
 +                      struct dc_plane_state *pln = res_ctx->pipe_ctx[i].plane_state;
 +                      struct scaler_data *scl = &res_ctx->pipe_ctx[i].plane_res.scl_data;
 +
 +                      pipes[pipe_cnt].pipe.src.immediate_flip = pln->flip_immediate;
 +                      pipes[pipe_cnt].pipe.src.is_hsplit = (res_ctx->pipe_ctx[i].bottom_pipe
 +                                      && res_ctx->pipe_ctx[i].bottom_pipe->plane_state == pln)
 +                                      || (res_ctx->pipe_ctx[i].top_pipe
 +                                      && res_ctx->pipe_ctx[i].top_pipe->plane_state == pln);
 +                      pipes[pipe_cnt].pipe.dest.odm_combine = (res_ctx->pipe_ctx[i].bottom_pipe
 +                                      && res_ctx->pipe_ctx[i].bottom_pipe->plane_state == pln
 +                                      && res_ctx->pipe_ctx[i].bottom_pipe->stream_res.opp
 +                                              != res_ctx->pipe_ctx[i].stream_res.opp)
 +                              || (res_ctx->pipe_ctx[i].top_pipe
 +                                      && res_ctx->pipe_ctx[i].top_pipe->plane_state == pln
 +                                      && res_ctx->pipe_ctx[i].top_pipe->stream_res.opp
 +                                              != res_ctx->pipe_ctx[i].stream_res.opp);
 +                      pipes[pipe_cnt].pipe.src.source_scan = pln->rotation == ROTATION_ANGLE_90
 +                                      || pln->rotation == ROTATION_ANGLE_270 ? dm_vert : dm_horz;
 +                      pipes[pipe_cnt].pipe.src.viewport_y_y = scl->viewport.y;
 +                      pipes[pipe_cnt].pipe.src.viewport_y_c = scl->viewport_c.y;
 +                      pipes[pipe_cnt].pipe.src.viewport_width = scl->viewport.width;
 +                      pipes[pipe_cnt].pipe.src.viewport_width_c = scl->viewport_c.width;
 +                      pipes[pipe_cnt].pipe.src.viewport_height = scl->viewport.height;
 +                      pipes[pipe_cnt].pipe.src.viewport_height_c = scl->viewport_c.height;
 +                      if (pln->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
 +                              pipes[pipe_cnt].pipe.src.data_pitch = pln->plane_size.video.luma_pitch;
 +                              pipes[pipe_cnt].pipe.src.data_pitch_c = pln->plane_size.video.chroma_pitch;
 +                              pipes[pipe_cnt].pipe.src.meta_pitch = pln->dcc.video.meta_pitch_l;
 +                              pipes[pipe_cnt].pipe.src.meta_pitch_c = pln->dcc.video.meta_pitch_c;
 +                      } else {
 +                              pipes[pipe_cnt].pipe.src.data_pitch = pln->plane_size.grph.surface_pitch;
 +                              pipes[pipe_cnt].pipe.src.meta_pitch = pln->dcc.grph.meta_pitch;
 +                      }
 +                      pipes[pipe_cnt].pipe.src.dcc = pln->dcc.enable;
 +                      pipes[pipe_cnt].pipe.dest.recout_width = scl->recout.width;
 +                      pipes[pipe_cnt].pipe.dest.recout_height = scl->recout.height;
 +                      pipes[pipe_cnt].pipe.dest.full_recout_width = scl->recout.width;
 +                      pipes[pipe_cnt].pipe.dest.full_recout_height = scl->recout.height;
 +                      if (res_ctx->pipe_ctx[i].bottom_pipe && res_ctx->pipe_ctx[i].bottom_pipe->plane_state == pln) {
 +                              pipes[pipe_cnt].pipe.dest.full_recout_width +=
 +                                              res_ctx->pipe_ctx[i].bottom_pipe->plane_res.scl_data.recout.width;
 +                              pipes[pipe_cnt].pipe.dest.full_recout_height +=
 +                                              res_ctx->pipe_ctx[i].bottom_pipe->plane_res.scl_data.recout.height;
 +                      } else if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state == pln) {
 +                              pipes[pipe_cnt].pipe.dest.full_recout_width +=
 +                                              res_ctx->pipe_ctx[i].top_pipe->plane_res.scl_data.recout.width;
 +                              pipes[pipe_cnt].pipe.dest.full_recout_height +=
 +                                              res_ctx->pipe_ctx[i].top_pipe->plane_res.scl_data.recout.height;
 +                      }
 +
 +                      pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_16;
 +                      pipes[pipe_cnt].pipe.scale_ratio_depth.hscl_ratio = (double) scl->ratios.horz.value / (1ULL<<32);
 +                      pipes[pipe_cnt].pipe.scale_ratio_depth.hscl_ratio_c = (double) scl->ratios.horz_c.value / (1ULL<<32);
 +                      pipes[pipe_cnt].pipe.scale_ratio_depth.vscl_ratio = (double) scl->ratios.vert.value / (1ULL<<32);
 +                      pipes[pipe_cnt].pipe.scale_ratio_depth.vscl_ratio_c = (double) scl->ratios.vert_c.value / (1ULL<<32);
 +                      pipes[pipe_cnt].pipe.scale_ratio_depth.scl_enable =
 +                                      scl->ratios.vert.value != dc_fixpt_one.value
 +                                      || scl->ratios.horz.value != dc_fixpt_one.value
 +                                      || scl->ratios.vert_c.value != dc_fixpt_one.value
 +                                      || scl->ratios.horz_c.value != dc_fixpt_one.value /*Lb only or Full scl*/
 +                                      || dc->debug.always_scale; /*support always scale*/
 +                      pipes[pipe_cnt].pipe.scale_taps.htaps = scl->taps.h_taps;
 +                      pipes[pipe_cnt].pipe.scale_taps.htaps_c = scl->taps.h_taps_c;
 +                      pipes[pipe_cnt].pipe.scale_taps.vtaps = scl->taps.v_taps;
 +                      pipes[pipe_cnt].pipe.scale_taps.vtaps_c = scl->taps.v_taps_c;
 +
 +                      pipes[pipe_cnt].pipe.src.macro_tile_size =
 +                                      swizzle_mode_to_macro_tile_size(pln->tiling_info.gfx9.swizzle);
 +                      swizzle_to_dml_params(pln->tiling_info.gfx9.swizzle,
 +                                      &pipes[pipe_cnt].pipe.src.sw_mode);
 +
 +                      switch (pln->format) {
 +                      case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
 +                      case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
 +                              pipes[pipe_cnt].pipe.src.source_format = dm_420_8;
 +                              break;
 +                      case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
 +                      case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
 +                              pipes[pipe_cnt].pipe.src.source_format = dm_420_10;
 +                              break;
 +                      case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
 +                      case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
 +                      case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
 +                              pipes[pipe_cnt].pipe.src.source_format = dm_444_64;
 +                              break;
 +                      case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
 +                      case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
 +                              pipes[pipe_cnt].pipe.src.source_format = dm_444_16;
 +                              break;
 +                      case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS:
 +                              pipes[pipe_cnt].pipe.src.source_format = dm_444_8;
 +                              break;
 +                      default:
 +                              pipes[pipe_cnt].pipe.src.source_format = dm_444_32;
 +                              break;
 +                      }
 +              }
 +
 +              pipe_cnt++;
 +      }
 +
 +      /* populate writeback information */
 +      dc->res_pool->funcs->populate_dml_writeback_from_context(dc, res_ctx, pipes);
 +
 +      return pipe_cnt;
 +}
 +
 +unsigned int dcn20_calc_max_scaled_time(
 +              unsigned int time_per_pixel,
 +              enum mmhubbub_wbif_mode mode,
 +              unsigned int urgent_watermark)
 +{
 +      unsigned int time_per_byte = 0;
 +      unsigned int total_y_free_entry = 0x200; /* two memory piece for luma */
 +      unsigned int total_c_free_entry = 0x140; /* two memory piece for chroma */
 +      unsigned int small_free_entry, max_free_entry;
 +      unsigned int buf_lh_capability;
 +      unsigned int max_scaled_time;
 +
 +      if (mode == PACKED_444) /* packed mode */
 +              time_per_byte = time_per_pixel/4;
 +      else if (mode == PLANAR_420_8BPC)
 +              time_per_byte  = time_per_pixel;
 +      else if (mode == PLANAR_420_10BPC) /* p010 */
 +              time_per_byte  = time_per_pixel * 819/1024;
 +
 +      if (time_per_byte == 0)
 +              time_per_byte = 1;
 +
 +      small_free_entry  = (total_y_free_entry > total_c_free_entry) ? total_c_free_entry : total_y_free_entry;
 +      max_free_entry    = (mode == PACKED_444) ? total_y_free_entry + total_c_free_entry : small_free_entry;
 +      buf_lh_capability = max_free_entry*time_per_byte*32/16; /* there is 4bit fraction */
 +      max_scaled_time   = buf_lh_capability - urgent_watermark;
 +      return max_scaled_time;
 +}
 +
 +void dcn20_set_mcif_arb_params(
 +              struct dc *dc,
 +              struct dc_state *context,
 +              display_e2e_pipe_params_st *pipes,
 +              int pipe_cnt)
 +{
 +      enum mmhubbub_wbif_mode wbif_mode;
 +      struct mcif_arb_params *wb_arb_params;
 +      int i, j, k, dwb_pipe;
 +
 +      /* Writeback MCIF_WB arbitration parameters */
 +      dwb_pipe = 0;
 +      for (i = 0; i < dc->res_pool->pipe_count; i++) {
 +
 +              if (!context->res_ctx.pipe_ctx[i].stream)
 +                      continue;
 +
 +              for (j = 0; j < MAX_DWB_PIPES; j++) {
 +                      if (context->res_ctx.pipe_ctx[i].stream->writeback_info[j].wb_enabled == false)
 +                              continue;
 +
 +                      //wb_arb_params = &context->res_ctx.pipe_ctx[i].stream->writeback_info[j].mcif_arb_params;
 +                      wb_arb_params = &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[dwb_pipe];
 +
 +                      if (context->res_ctx.pipe_ctx[i].stream->writeback_info[j].dwb_params.out_format == dwb_scaler_mode_yuv420) {
 +                              if (context->res_ctx.pipe_ctx[i].stream->writeback_info[j].dwb_params.output_depth == DWB_OUTPUT_PIXEL_DEPTH_8BPC)
 +                                      wbif_mode = PLANAR_420_8BPC;
 +                              else
 +                                      wbif_mode = PLANAR_420_10BPC;
 +                      } else
 +                              wbif_mode = PACKED_444;
 +
 +                      for (k = 0; k < sizeof(wb_arb_params->cli_watermark)/sizeof(wb_arb_params->cli_watermark[0]); k++) {
 +                              wb_arb_params->cli_watermark[k] = get_wm_writeback_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 +                              wb_arb_params->pstate_watermark[k] = get_wm_writeback_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 +                      }
 +                      wb_arb_params->time_per_pixel = 16.0 / context->res_ctx.pipe_ctx[i].stream->phy_pix_clk; /* 4 bit fraction, ms */
 +                      wb_arb_params->slice_lines = 32;
 +                      wb_arb_params->arbitration_slice = 2;
 +                      wb_arb_params->max_scaled_time = dcn20_calc_max_scaled_time(wb_arb_params->time_per_pixel,
 +                              wbif_mode,
 +                              wb_arb_params->cli_watermark[0]); /* assume 4 watermark sets have the same value */
 +
 +                      dwb_pipe++;
 +
 +                      if (dwb_pipe >= MAX_DWB_PIPES)
 +                              return;
 +              }
 +              if (dwb_pipe >= MAX_DWB_PIPES)
 +                      return;
 +      }
 +}
 +
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +static bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
 +{
 +      int i;
 +
 +      /* Validate DSC config, dsc count validation is already done */
 +      for (i = 0; i < dc->res_pool->pipe_count; i++) {
 +              struct pipe_ctx *pipe_ctx = &new_ctx->res_ctx.pipe_ctx[i];
 +              struct dc_stream_state *stream = pipe_ctx->stream;
 +              struct dsc_config dsc_cfg;
 +
 +              /* Only need to validate top pipe */
 +              if (pipe_ctx->top_pipe || !stream || !stream->timing.flags.DSC)
 +                      continue;
 +
 +              dsc_cfg.pic_width = stream->timing.h_addressable + stream->timing.h_border_left
 +                              + stream->timing.h_border_right;
 +              dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top
 +                              + stream->timing.v_border_bottom;
 +              if (dc_res_get_odm_bottom_pipe(pipe_ctx))
 +                      dsc_cfg.pic_width /= 2;
 +              dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
 +              dsc_cfg.color_depth = stream->timing.display_color_depth;
 +              dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
 +
 +              if (!pipe_ctx->stream_res.dsc->funcs->dsc_validate_stream(pipe_ctx->stream_res.dsc, &dsc_cfg))
 +                      return false;
 +      }
 +      return true;
 +}
 +#endif
 +
 +bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
 +              bool fast_validate)
 +{
 +      bool out = false;
 +
 +      BW_VAL_TRACE_SETUP();
 +
 +      int pipe_cnt, i, pipe_idx, vlevel, vlevel_unsplit;
 +      int pipe_split_from[MAX_PIPES];
 +      bool odm_capable = context->bw_ctx.dml.ip.odm_capable;
 +      bool force_split = false;
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +      bool failed_non_odm_dsc = false;
 +#endif
 +      int split_threshold = dc->res_pool->pipe_count / 2;
 +      bool avoid_split = dc->debug.pipe_split_policy != MPC_SPLIT_DYNAMIC;
 +      display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
 +      DC_LOGGER_INIT(dc->ctx->logger);
 +
 +      BW_VAL_TRACE_COUNT();
 +
 +      ASSERT(pipes);
 +      if (!pipes)
 +              return false;
 +
 +      for (i = 0; i < dc->res_pool->pipe_count; i++) {
 +              struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 +              struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe;
 +
 +              if (!hsplit_pipe || hsplit_pipe->plane_state != pipe->plane_state)
 +                      continue;
 +
 +              /* merge previously split pipe since mode support needs to make the decision */
 +              pipe->bottom_pipe = hsplit_pipe->bottom_pipe;
 +              if (hsplit_pipe->bottom_pipe)
 +                      hsplit_pipe->bottom_pipe->top_pipe = pipe;
 +              hsplit_pipe->plane_state = NULL;
 +              hsplit_pipe->stream = NULL;
 +              hsplit_pipe->top_pipe = NULL;
 +              hsplit_pipe->bottom_pipe = NULL;
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +              if (hsplit_pipe->stream_res.dsc && hsplit_pipe->stream_res.dsc != pipe->stream_res.dsc)
 +                      release_dsc(&context->res_ctx, dc->res_pool, &hsplit_pipe->stream_res.dsc);
 +#endif
 +              /* Clear plane_res and stream_res */
 +              memset(&hsplit_pipe->plane_res, 0, sizeof(hsplit_pipe->plane_res));
 +              memset(&hsplit_pipe->stream_res, 0, sizeof(hsplit_pipe->stream_res));
 +              if (pipe->plane_state)
 +                      resource_build_scaling_params(pipe);
 +      }
 +
 +      if (dc->res_pool->funcs->populate_dml_pipes)
 +              pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc,
 +                      &context->res_ctx, pipes);
 +      else
 +              pipe_cnt = dcn20_populate_dml_pipes_from_context(dc,
 +                      &context->res_ctx, pipes);
 +
 +      if (!pipe_cnt) {
 +              BW_VAL_TRACE_SKIP(pass);
 +              out = true;
 +              goto validate_out;
 +      }
 +
 +      context->bw_ctx.dml.ip.odm_capable = 0;
 +
 +      vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
 +
 +      context->bw_ctx.dml.ip.odm_capable = odm_capable;
 +
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +      /* 1 dsc per stream dsc validation */
 +      if (vlevel <= context->bw_ctx.dml.soc.num_states)
 +              if (!dcn20_validate_dsc(dc, context)) {
 +                      failed_non_odm_dsc = true;
 +                      vlevel = context->bw_ctx.dml.soc.num_states + 1;
 +              }
 +#endif
 +
 +      if (vlevel > context->bw_ctx.dml.soc.num_states && odm_capable)
 +              vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
 +
 +      if (vlevel > context->bw_ctx.dml.soc.num_states)
 +              goto validate_fail;
 +
 +      if ((context->stream_count > split_threshold && dc->current_state->stream_count <= split_threshold)
 +              || (context->stream_count <= split_threshold && dc->current_state->stream_count > split_threshold))
 +              context->commit_hints.full_update_needed = true;
 +
 +      /*initialize pipe_just_split_from to invalid idx*/
 +      for (i = 0; i < MAX_PIPES; i++)
 +              pipe_split_from[i] = -1;
 +
 +      /* Single display only conditionals get set here */
 +      for (i = 0; i < dc->res_pool->pipe_count; i++) {
 +              struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 +              bool exit_loop = false;
 +
 +              if (!pipe->stream || pipe->top_pipe)
 +                      continue;
 +
 +              if (dc->debug.force_single_disp_pipe_split) {
 +                      if (!force_split)
 +                              force_split = true;
 +                      else {
 +                              force_split = false;
 +                              exit_loop = true;
 +                      }
 +              }
 +              if (dc->debug.pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP) {
 +                      if (avoid_split)
 +                              avoid_split = false;
 +                      else {
 +                              avoid_split = true;
 +                              exit_loop = true;
 +                      }
 +              }
 +              if (exit_loop)
 +                      break;
 +      }
 +
 +      if (context->stream_count > split_threshold)
 +              avoid_split = true;
 +
 +      vlevel_unsplit = vlevel;
 +      for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
 +              if (!context->res_ctx.pipe_ctx[i].stream)
 +                      continue;
 +              for (; vlevel_unsplit <= context->bw_ctx.dml.soc.num_states; vlevel_unsplit++)
 +                      if (context->bw_ctx.dml.vba.NoOfDPP[vlevel_unsplit][0][pipe_idx] == 1)
 +                              break;
 +              pipe_idx++;
 +      }
 +
 +      for (i = 0, pipe_idx = -1; i < dc->res_pool->pipe_count; i++) {
 +              struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 +              struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe;
 +              bool need_split = true;
 +              bool need_split3d;
 +
 +              if (!pipe->stream || pipe_split_from[i] >= 0)
 +                      continue;
 +
 +              pipe_idx++;
 +
 +              if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) {
 +                      force_split = true;
 +                      context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx] = true;
 +                      context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx] = true;
 +              }
 +              if (force_split && context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 1)
 +                      context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] /= 2;
 +              if (dc->config.forced_clocks == true) {
 +                      context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] =
 +                                      context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
 +              }
 +              if (!pipe->top_pipe && !pipe->plane_state && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) {
 +                      hsplit_pipe = find_idle_secondary_pipe(&context->res_ctx, dc->res_pool, pipe);
 +                      ASSERT(hsplit_pipe);
 +                      if (!dcn20_split_stream_for_combine(
 +                                      &context->res_ctx, dc->res_pool,
 +                                      pipe, hsplit_pipe,
 +                                      true))
 +                              goto validate_fail;
 +                      pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx;
 +                      dcn20_build_mapped_resource(dc, context, pipe->stream);
 +              }
 +
 +              if (!pipe->plane_state)
 +                      continue;
 +              /* Skip 2nd half of already split pipe */
 +              if (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state)
 +                      continue;
 +
 +              need_split3d = ((pipe->stream->view_format ==
 +                              VIEW_3D_FORMAT_SIDE_BY_SIDE ||
 +                              pipe->stream->view_format ==
 +                              VIEW_3D_FORMAT_TOP_AND_BOTTOM) &&
 +                              (pipe->stream->timing.timing_3d_format ==
 +                              TIMING_3D_FORMAT_TOP_AND_BOTTOM ||
 +                               pipe->stream->timing.timing_3d_format ==
 +                              TIMING_3D_FORMAT_SIDE_BY_SIDE));
 +
 +              if (avoid_split && vlevel_unsplit <= context->bw_ctx.dml.soc.num_states && !force_split && !need_split3d) {
 +                      need_split = false;
 +                      vlevel = vlevel_unsplit;
 +                      context->bw_ctx.dml.vba.maxMpcComb = 0;
 +              } else
 +                      need_split = context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 2;
 +
 +              /* We do not support mpo + odm at the moment */
 +              if (hsplit_pipe && hsplit_pipe->plane_state != pipe->plane_state
 +                              && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx])
 +                      goto validate_fail;
 +
 +              if (need_split3d || need_split || force_split) {
 +                      if (!hsplit_pipe || hsplit_pipe->plane_state != pipe->plane_state) {
 +                              /* pipe not split previously needs split */
 +                              hsplit_pipe = find_idle_secondary_pipe(&context->res_ctx, dc->res_pool, pipe);
 +                              ASSERT(hsplit_pipe || force_split);
 +                              if (!hsplit_pipe)
 +                                      continue;
 +
 +                              if (!dcn20_split_stream_for_combine(
 +                                              &context->res_ctx, dc->res_pool,
 +                                              pipe, hsplit_pipe,
 +                                              context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]))
 +                                      goto validate_fail;
 +                              pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx;
 +                      }
 +              } else if (hsplit_pipe && hsplit_pipe->plane_state == pipe->plane_state) {
 +                      /* merge should already have been done */
 +                      ASSERT(0);
 +              }
 +      }
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +      /* Actual dsc count per stream dsc validation*/
 +      if (failed_non_odm_dsc && !dcn20_validate_dsc(dc, context)) {
 +              context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states] =
 +                              DML_FAIL_DSC_VALIDATION_FAILURE;
 +              goto validate_fail;
 +      }
 +#endif
 +
 +      BW_VAL_TRACE_END_VOLTAGE_LEVEL();
 +
 +      if (fast_validate) {
 +              BW_VAL_TRACE_SKIP(fast);
 +              out = true;
 +              goto validate_out;
 +      }
 +
 +      for (i = 0, pipe_idx = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
 +              if (!context->res_ctx.pipe_ctx[i].stream)
 +                      continue;
 +
 +              pipes[pipe_cnt].clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0;
 +              pipes[pipe_cnt].clks_cfg.dispclk_mhz = context->bw_ctx.dml.vba.RequiredDISPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
 +
 +              if (pipe_split_from[i] < 0) {
 +                      pipes[pipe_cnt].clks_cfg.dppclk_mhz =
 +                                      context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx];
 +                      if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_idx] == pipe_idx)
 +                              pipes[pipe_cnt].pipe.dest.odm_combine =
 +                                              context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx];
 +                      else
 +                              pipes[pipe_cnt].pipe.dest.odm_combine = 0;
 +                      pipe_idx++;
 +              } else {
 +                      pipes[pipe_cnt].clks_cfg.dppclk_mhz =
 +                                      context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_split_from[i]];
 +                      if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_split_from[i]] == pipe_split_from[i])
 +                              pipes[pipe_cnt].pipe.dest.odm_combine =
 +                                              context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_split_from[i]];
 +                      else
 +                              pipes[pipe_cnt].pipe.dest.odm_combine = 0;
 +              }
 +              if (dc->config.forced_clocks) {
 +                      pipes[pipe_cnt].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
 +                      pipes[pipe_cnt].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
 +              }
 +              pipe_cnt++;
 +      }
 +
 +      if (pipe_cnt != pipe_idx) {
 +              if (dc->res_pool->funcs->populate_dml_pipes)
 +                      pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc,
 +                              &context->res_ctx, pipes);
 +              else
 +                      pipe_cnt = dcn20_populate_dml_pipes_from_context(dc,
 +                              &context->res_ctx, pipes);
 +      }
 +
 +      pipes[0].clks_cfg.voltage = vlevel;
 +      pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].dcfclk_mhz;
 +      pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
 +
 +      /* only pipe 0 is read for voltage and dcf/soc clocks */
 +      if (vlevel < 1) {
 +              pipes[0].clks_cfg.voltage = 1;
 +              pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[1].dcfclk_mhz;
 +              pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[1].socclk_mhz;
 +      }
 +      context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 +      context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 +      context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 +      context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 +      context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 +
 +      if (vlevel < 2) {
 +              pipes[0].clks_cfg.voltage = 2;
 +              pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].dcfclk_mhz;
 +              pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].socclk_mhz;
 +      }
 +      context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 +      context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 +      context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 +      context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 +      context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 +
 +      if (vlevel < 3) {
 +              pipes[0].clks_cfg.voltage = 3;
 +              pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].dcfclk_mhz;
 +              pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].socclk_mhz;
 +      }
 +      context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 +      context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 +      context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 +      context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 +      context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 +
 +      pipes[0].clks_cfg.voltage = vlevel;
 +      pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].dcfclk_mhz;
 +      pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
 +      context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 +      context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 +      context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 +      context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 +      context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 +      /* Writeback MCIF_WB arbitration parameters */
 +      dc->res_pool->funcs->set_mcif_arb_params(dc, context, pipes, pipe_cnt);
 +
 +      context->bw_ctx.bw.dcn.clk.dispclk_khz = context->bw_ctx.dml.vba.DISPCLK * 1000;
 +      context->bw_ctx.bw.dcn.clk.dcfclk_khz = context->bw_ctx.dml.vba.DCFCLK * 1000;
 +      context->bw_ctx.bw.dcn.clk.socclk_khz = context->bw_ctx.dml.vba.SOCCLK * 1000;
 +      context->bw_ctx.bw.dcn.clk.dramclk_khz = context->bw_ctx.dml.vba.DRAMSpeed * 1000 / 16;
 +      context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = context->bw_ctx.dml.vba.DCFCLKDeepSleep * 1000;
 +      context->bw_ctx.bw.dcn.clk.fclk_khz = context->bw_ctx.dml.vba.FabricClock * 1000;
 +      context->bw_ctx.bw.dcn.clk.p_state_change_support =
 +              context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb]
 +                                                      != dm_dram_clock_change_unsupported;
 +      context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
 +
 +      BW_VAL_TRACE_END_WATERMARKS();
 +
 +      for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
 +              if (!context->res_ctx.pipe_ctx[i].stream)
 +                      continue;
 +              pipes[pipe_idx].pipe.dest.vstartup_start = context->bw_ctx.dml.vba.VStartup[pipe_idx];
 +              pipes[pipe_idx].pipe.dest.vupdate_offset = context->bw_ctx.dml.vba.VUpdateOffsetPix[pipe_idx];
 +              pipes[pipe_idx].pipe.dest.vupdate_width = context->bw_ctx.dml.vba.VUpdateWidthPix[pipe_idx];
 +              pipes[pipe_idx].pipe.dest.vready_offset = context->bw_ctx.dml.vba.VReadyOffsetPix[pipe_idx];
 +              if (context->bw_ctx.bw.dcn.clk.dppclk_khz < pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
 +                      context->bw_ctx.bw.dcn.clk.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
 +              context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz =
 +                                              pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +              context->res_ctx.pipe_ctx[i].stream_res.dscclk_khz =
 +                              context->bw_ctx.dml.vba.DSCCLK_calculated[pipe_idx] * 1000;
 +#endif
 +              context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest;
 +              pipe_idx++;
 +      }
 +
 +      for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
 +              bool cstate_en = context->bw_ctx.dml.vba.PrefetchMode[vlevel][context->bw_ctx.dml.vba.maxMpcComb] != 2;
 +
 +              if (!context->res_ctx.pipe_ctx[i].stream)
 +                      continue;
 +
 +              context->bw_ctx.dml.funcs.rq_dlg_get_dlg_reg(&context->bw_ctx.dml,
 +                              &context->res_ctx.pipe_ctx[i].dlg_regs,
 +                              &context->res_ctx.pipe_ctx[i].ttu_regs,
 +                              pipes,
 +                              pipe_cnt,
 +                              pipe_idx,
 +                              cstate_en,
 +                              context->bw_ctx.bw.dcn.clk.p_state_change_support,
 +                              false, false, false);
 +
 +              context->bw_ctx.dml.funcs.rq_dlg_get_rq_reg(&context->bw_ctx.dml,
 +                              &context->res_ctx.pipe_ctx[i].rq_regs,
 +                              pipes[pipe_idx].pipe);
 +              pipe_idx++;
 +      }
 +
 +      out = true;
 +      goto validate_out;
 +
 +validate_fail:
 +      DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n",
 +              dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states]));
 +
 +      BW_VAL_TRACE_SKIP(fail);
 +      out = false;
 +
 +validate_out:
 +      kfree(pipes);
 +
 +      BW_VAL_TRACE_FINISH();
 +
 +      return out;
 +}
 +
 +struct pipe_ctx *dcn20_acquire_idle_pipe_for_layer(
 +              struct dc_state *state,
 +              const struct resource_pool *pool,
 +              struct dc_stream_state *stream)
 +{
 +      struct resource_context *res_ctx = &state->res_ctx;
 +      struct pipe_ctx *head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
 +      struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool, head_pipe);
 +
 +      if (!head_pipe)
 +              ASSERT(0);
 +
 +      if (!idle_pipe)
 +              return false;
 +
 +      idle_pipe->stream = head_pipe->stream;
 +      idle_pipe->stream_res.tg = head_pipe->stream_res.tg;
 +      idle_pipe->stream_res.opp = head_pipe->stream_res.opp;
 +
 +      idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx];
 +      idle_pipe->plane_res.ipp = pool->ipps[idle_pipe->pipe_idx];
 +      idle_pipe->plane_res.dpp = pool->dpps[idle_pipe->pipe_idx];
 +      idle_pipe->plane_res.mpcc_inst = pool->dpps[idle_pipe->pipe_idx]->inst;
 +
 +      return idle_pipe;
 +}
 +
 +bool dcn20_get_dcc_compression_cap(const struct dc *dc,
 +              const struct dc_dcc_surface_param *input,
 +              struct dc_surface_dcc_cap *output)
 +{
 +      return dc->res_pool->hubbub->funcs->get_dcc_compression_cap(
 +                      dc->res_pool->hubbub,
 +                      input,
 +                      output);
 +}
 +
 +static void dcn20_destroy_resource_pool(struct resource_pool **pool)
 +{
 +      struct dcn20_resource_pool *dcn20_pool = TO_DCN20_RES_POOL(*pool);
 +
 +      destruct(dcn20_pool);
 +      kfree(dcn20_pool);
 +      *pool = NULL;
 +}
 +
 +
 +static struct dc_cap_funcs cap_funcs = {
 +      .get_dcc_compression_cap = dcn20_get_dcc_compression_cap
 +};
 +
 +
 +enum dc_status dcn20_get_default_swizzle_mode(struct dc_plane_state *plane_state)
 +{
 +      enum dc_status result = DC_OK;
 +
 +      enum surface_pixel_format surf_pix_format = plane_state->format;
 +      unsigned int bpp = resource_pixel_format_to_bpp(surf_pix_format);
 +
 +      enum swizzle_mode_values swizzle = DC_SW_LINEAR;
 +
 +      if (bpp == 64)
 +              swizzle = DC_SW_64KB_D;
 +      else
 +              swizzle = DC_SW_64KB_S;
 +
 +      plane_state->tiling_info.gfx9.swizzle = swizzle;
 +      return result;
 +}
 +
 +static struct resource_funcs dcn20_res_pool_funcs = {
 +      .destroy = dcn20_destroy_resource_pool,
 +      .link_enc_create = dcn20_link_encoder_create,
 +      .validate_bandwidth = dcn20_validate_bandwidth,
 +      .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
 +      .add_stream_to_ctx = dcn20_add_stream_to_ctx,
 +      .remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
 +      .populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context,
 +      .get_default_swizzle_mode = dcn20_get_default_swizzle_mode,
 +      .set_mcif_arb_params = dcn20_set_mcif_arb_params,
 +      .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link
 +};
 +
 +bool dcn20_dwbc_create(struct dc_context *ctx, struct resource_pool *pool)
 +{
 +      int i;
 +      uint32_t pipe_count = pool->res_cap->num_dwb;
 +
 +      ASSERT(pipe_count > 0);
 +
 +      for (i = 0; i < pipe_count; i++) {
 +              struct dcn20_dwbc *dwbc20 = kzalloc(sizeof(struct dcn20_dwbc),
 +                                                  GFP_KERNEL);
 +
 +              if (!dwbc20) {
 +                      dm_error("DC: failed to create dwbc20!\n");
 +                      return false;
 +              }
 +              dcn20_dwbc_construct(dwbc20, ctx,
 +                              &dwbc20_regs[i],
 +                              &dwbc20_shift,
 +                              &dwbc20_mask,
 +                              i);
 +              pool->dwbc[i] = &dwbc20->base;
 +      }
 +      return true;
 +}
 +
 +bool dcn20_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool)
 +{
 +      int i;
 +      uint32_t pipe_count = pool->res_cap->num_dwb;
 +
 +      ASSERT(pipe_count > 0);
 +
 +      for (i = 0; i < pipe_count; i++) {
 +              struct dcn20_mmhubbub *mcif_wb20 = kzalloc(sizeof(struct dcn20_mmhubbub),
 +                                                  GFP_KERNEL);
 +
 +              if (!mcif_wb20) {
 +                      dm_error("DC: failed to create mcif_wb20!\n");
 +                      return false;
 +              }
 +
 +              dcn20_mmhubbub_construct(mcif_wb20, ctx,
 +                              &mcif_wb20_regs[i],
 +                              &mcif_wb20_shift,
 +                              &mcif_wb20_mask,
 +                              i);
 +
 +              pool->mcif_wb[i] = &mcif_wb20->base;
 +      }
 +      return true;
 +}
 +
 +struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx)
 +{
 +      struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
 +
 +      if (!pp_smu)
 +              return pp_smu;
 +
 +      dm_pp_get_funcs(ctx, pp_smu);
 +
 +      if (pp_smu->ctx.ver != PP_SMU_VER_NV)
 +              pp_smu = memset(pp_smu, 0, sizeof(struct pp_smu_funcs));
 +
 +      return pp_smu;
 +}
 +
 +void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu)
 +{
 +      if (pp_smu && *pp_smu) {
 +              kfree(*pp_smu);
 +              *pp_smu = NULL;
 +      }
 +}
 +
 +static void cap_soc_clocks(
 +              struct _vcs_dpi_soc_bounding_box_st *bb,
 +              struct pp_smu_nv_clock_table max_clocks)
 +{
 +      int i;
 +
 +      // First pass - cap all clocks higher than the reported max
 +      for (i = 0; i < bb->num_states; i++) {
 +              if ((bb->clock_limits[i].dcfclk_mhz > (max_clocks.dcfClockInKhz / 1000))
 +                              && max_clocks.dcfClockInKhz != 0)
 +                      bb->clock_limits[i].dcfclk_mhz = (max_clocks.dcfClockInKhz / 1000);
 +
 +              if ((bb->clock_limits[i].dram_speed_mts > (max_clocks.uClockInKhz / 1000) * 16)
 +                                              && max_clocks.uClockInKhz != 0)
 +                      bb->clock_limits[i].dram_speed_mts = (max_clocks.uClockInKhz / 1000) * 16;
 +
 +              if ((bb->clock_limits[i].fabricclk_mhz > (max_clocks.fabricClockInKhz / 1000))
 +                                              && max_clocks.fabricClockInKhz != 0)
 +                      bb->clock_limits[i].fabricclk_mhz = (max_clocks.fabricClockInKhz / 1000);
 +
 +              if ((bb->clock_limits[i].dispclk_mhz > (max_clocks.displayClockInKhz / 1000))
 +                                              && max_clocks.displayClockInKhz != 0)
 +                      bb->clock_limits[i].dispclk_mhz = (max_clocks.displayClockInKhz / 1000);
 +
 +              if ((bb->clock_limits[i].dppclk_mhz > (max_clocks.dppClockInKhz / 1000))
 +                                              && max_clocks.dppClockInKhz != 0)
 +                      bb->clock_limits[i].dppclk_mhz = (max_clocks.dppClockInKhz / 1000);
 +
 +              if ((bb->clock_limits[i].phyclk_mhz > (max_clocks.phyClockInKhz / 1000))
 +                                              && max_clocks.phyClockInKhz != 0)
 +                      bb->clock_limits[i].phyclk_mhz = (max_clocks.phyClockInKhz / 1000);
 +
 +              if ((bb->clock_limits[i].socclk_mhz > (max_clocks.socClockInKhz / 1000))
 +                                              && max_clocks.socClockInKhz != 0)
 +                      bb->clock_limits[i].socclk_mhz = (max_clocks.socClockInKhz / 1000);
 +
 +              if ((bb->clock_limits[i].dscclk_mhz > (max_clocks.dscClockInKhz / 1000))
 +                                              && max_clocks.dscClockInKhz != 0)
 +                      bb->clock_limits[i].dscclk_mhz = (max_clocks.dscClockInKhz / 1000);
 +      }
 +
 +      // Second pass - remove all duplicate clock states
 +      for (i = bb->num_states - 1; i > 1; i--) {
 +              bool duplicate = true;
 +
 +              if (bb->clock_limits[i-1].dcfclk_mhz != bb->clock_limits[i].dcfclk_mhz)
 +                      duplicate = false;
 +              if (bb->clock_limits[i-1].dispclk_mhz != bb->clock_limits[i].dispclk_mhz)
 +                      duplicate = false;
 +              if (bb->clock_limits[i-1].dppclk_mhz != bb->clock_limits[i].dppclk_mhz)
 +                      duplicate = false;
 +              if (bb->clock_limits[i-1].dram_speed_mts != bb->clock_limits[i].dram_speed_mts)
 +                      duplicate = false;
 +              if (bb->clock_limits[i-1].dscclk_mhz != bb->clock_limits[i].dscclk_mhz)
 +                      duplicate = false;
 +              if (bb->clock_limits[i-1].fabricclk_mhz != bb->clock_limits[i].fabricclk_mhz)
 +                      duplicate = false;
 +              if (bb->clock_limits[i-1].phyclk_mhz != bb->clock_limits[i].phyclk_mhz)
 +                      duplicate = false;
 +              if (bb->clock_limits[i-1].socclk_mhz != bb->clock_limits[i].socclk_mhz)
 +                      duplicate = false;
 +
 +              if (duplicate)
 +                      bb->num_states--;
 +      }
 +}
 +
 +static void update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb,
 +              struct pp_smu_nv_clock_table *max_clocks, unsigned int *uclk_states, unsigned int num_states)
 +{
 +      struct _vcs_dpi_voltage_scaling_st calculated_states[MAX_CLOCK_LIMIT_STATES] = {0};
 +      int i;
 +      int num_calculated_states = 0;
 +      int min_dcfclk = 0;
 +
 +      if (num_states == 0)
 +              return;
 +
 +      if (dc->bb_overrides.min_dcfclk_mhz > 0)
 +              min_dcfclk = dc->bb_overrides.min_dcfclk_mhz;
 +
 +      for (i = 0; i < num_states; i++) {
 +              int min_fclk_required_by_uclk;
 +              calculated_states[i].state = i;
 +              calculated_states[i].dram_speed_mts = uclk_states[i] * 16 / 1000;
 +
 +              // FCLK:UCLK ratio is 1.08
 +              min_fclk_required_by_uclk = ((unsigned long long)uclk_states[i]) * 1080 / 1000000;
 +
 +              calculated_states[i].fabricclk_mhz = (min_fclk_required_by_uclk < min_dcfclk) ?
 +                              min_dcfclk : min_fclk_required_by_uclk;
 +
 +              calculated_states[i].socclk_mhz = (calculated_states[i].fabricclk_mhz > max_clocks->socClockInKhz / 1000) ?
 +                              max_clocks->socClockInKhz / 1000 : calculated_states[i].fabricclk_mhz;
 +
 +              calculated_states[i].dcfclk_mhz = (calculated_states[i].fabricclk_mhz > max_clocks->dcfClockInKhz / 1000) ?
 +                              max_clocks->dcfClockInKhz / 1000 : calculated_states[i].fabricclk_mhz;
 +
 +              calculated_states[i].dispclk_mhz = max_clocks->displayClockInKhz / 1000;
 +              calculated_states[i].dppclk_mhz = max_clocks->displayClockInKhz / 1000;
 +              calculated_states[i].dscclk_mhz = max_clocks->displayClockInKhz / (1000 * 3);
 +
 +              calculated_states[i].phyclk_mhz = max_clocks->phyClockInKhz / 1000;
 +
 +              num_calculated_states++;
 +      }
 +
 +      memcpy(bb->clock_limits, calculated_states, sizeof(bb->clock_limits));
 +      bb->num_states = num_calculated_states;
 +
 +      // Duplicate the last state, DML always an extra state identical to max state to work
 +      memcpy(&bb->clock_limits[num_calculated_states], &bb->clock_limits[num_calculated_states - 1], sizeof(struct _vcs_dpi_voltage_scaling_st));
 +      bb->clock_limits[num_calculated_states].state = bb->num_states;
 +}
 +
 +static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb)
 +{
 +      kernel_fpu_begin();
 +      if ((int)(bb->sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns
 +                      && dc->bb_overrides.sr_exit_time_ns) {
 +              bb->sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0;
 +      }
 +
 +      if ((int)(bb->sr_enter_plus_exit_time_us * 1000)
 +                              != dc->bb_overrides.sr_enter_plus_exit_time_ns
 +                      && dc->bb_overrides.sr_enter_plus_exit_time_ns) {
 +              bb->sr_enter_plus_exit_time_us =
 +                              dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
 +      }
 +
 +      if ((int)(bb->urgent_latency_us * 1000) != dc->bb_overrides.urgent_latency_ns
 +                      && dc->bb_overrides.urgent_latency_ns) {
 +              bb->urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
 +      }
 +
 +      if ((int)(bb->dram_clock_change_latency_us * 1000)
 +                              != dc->bb_overrides.dram_clock_change_latency_ns
 +                      && dc->bb_overrides.dram_clock_change_latency_ns) {
 +              bb->dram_clock_change_latency_us =
 +                              dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
 +      }
 +      kernel_fpu_end();
 +}
 +
 +#define fixed16_to_double(x) (((double) x) / ((double) (1 << 16)))
 +#define fixed16_to_double_to_cpu(x) fixed16_to_double(le32_to_cpu(x))
 +
 +static bool init_soc_bounding_box(struct dc *dc,
 +                                struct dcn20_resource_pool *pool)
 +{
 +      const struct gpu_info_soc_bounding_box_v1_0 *bb = dc->soc_bounding_box;
 +      DC_LOGGER_INIT(dc->ctx->logger);
 +
 +      if (!bb && !SOC_BOUNDING_BOX_VALID) {
 +              DC_LOG_ERROR("%s: not valid soc bounding box/n", __func__);
 +              return false;
 +      }
 +
 +      if (bb && !SOC_BOUNDING_BOX_VALID) {
 +              int i;
 +
 +              dcn2_0_soc.sr_exit_time_us =
 +                              fixed16_to_double_to_cpu(bb->sr_exit_time_us);
 +              dcn2_0_soc.sr_enter_plus_exit_time_us =
 +                              fixed16_to_double_to_cpu(bb->sr_enter_plus_exit_time_us);
 +              dcn2_0_soc.urgent_latency_us =
 +                              fixed16_to_double_to_cpu(bb->urgent_latency_us);
 +              dcn2_0_soc.urgent_latency_pixel_data_only_us =
 +                              fixed16_to_double_to_cpu(bb->urgent_latency_pixel_data_only_us);
 +              dcn2_0_soc.urgent_latency_pixel_mixed_with_vm_data_us =
 +                              fixed16_to_double_to_cpu(bb->urgent_latency_pixel_mixed_with_vm_data_us);
 +              dcn2_0_soc.urgent_latency_vm_data_only_us =
 +                              fixed16_to_double_to_cpu(bb->urgent_latency_vm_data_only_us);
 +              dcn2_0_soc.urgent_out_of_order_return_per_channel_pixel_only_bytes =
 +                              le32_to_cpu(bb->urgent_out_of_order_return_per_channel_pixel_only_bytes);
 +              dcn2_0_soc.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes =
 +                              le32_to_cpu(bb->urgent_out_of_order_return_per_channel_pixel_and_vm_bytes);
 +              dcn2_0_soc.urgent_out_of_order_return_per_channel_vm_only_bytes =
 +                              le32_to_cpu(bb->urgent_out_of_order_return_per_channel_vm_only_bytes);
 +              dcn2_0_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_only =
 +                              fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_pixel_only);
 +              dcn2_0_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm =
 +                              fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm);
 +              dcn2_0_soc.pct_ideal_dram_sdp_bw_after_urgent_vm_only =
 +                              fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_vm_only);
 +              dcn2_0_soc.max_avg_sdp_bw_use_normal_percent =
 +                              fixed16_to_double_to_cpu(bb->max_avg_sdp_bw_use_normal_percent);
 +              dcn2_0_soc.max_avg_dram_bw_use_normal_percent =
 +                              fixed16_to_double_to_cpu(bb->max_avg_dram_bw_use_normal_percent);
 +              dcn2_0_soc.writeback_latency_us =
 +                              fixed16_to_double_to_cpu(bb->writeback_latency_us);
 +              dcn2_0_soc.ideal_dram_bw_after_urgent_percent =
 +                              fixed16_to_double_to_cpu(bb->ideal_dram_bw_after_urgent_percent);
 +              dcn2_0_soc.max_request_size_bytes =
 +                              le32_to_cpu(bb->max_request_size_bytes);
 +              dcn2_0_soc.dram_channel_width_bytes =
 +                              le32_to_cpu(bb->dram_channel_width_bytes);
 +              dcn2_0_soc.fabric_datapath_to_dcn_data_return_bytes =
 +                              le32_to_cpu(bb->fabric_datapath_to_dcn_data_return_bytes);
 +              dcn2_0_soc.dcn_downspread_percent =
 +                              fixed16_to_double_to_cpu(bb->dcn_downspread_percent);
 +              dcn2_0_soc.downspread_percent =
 +                              fixed16_to_double_to_cpu(bb->downspread_percent);
 +              dcn2_0_soc.dram_page_open_time_ns =
 +                              fixed16_to_double_to_cpu(bb->dram_page_open_time_ns);
 +              dcn2_0_soc.dram_rw_turnaround_time_ns =
 +                              fixed16_to_double_to_cpu(bb->dram_rw_turnaround_time_ns);
 +              dcn2_0_soc.dram_return_buffer_per_channel_bytes =
 +                              le32_to_cpu(bb->dram_return_buffer_per_channel_bytes);
 +              dcn2_0_soc.round_trip_ping_latency_dcfclk_cycles =
 +                              le32_to_cpu(bb->round_trip_ping_latency_dcfclk_cycles);
 +              dcn2_0_soc.urgent_out_of_order_return_per_channel_bytes =
 +                              le32_to_cpu(bb->urgent_out_of_order_return_per_channel_bytes);
 +              dcn2_0_soc.channel_interleave_bytes =
 +                              le32_to_cpu(bb->channel_interleave_bytes);
 +              dcn2_0_soc.num_banks =
 +                              le32_to_cpu(bb->num_banks);
 +              dcn2_0_soc.num_chans =
 +                              le32_to_cpu(bb->num_chans);
 +              dcn2_0_soc.vmm_page_size_bytes =
 +                              le32_to_cpu(bb->vmm_page_size_bytes);
 +              dcn2_0_soc.dram_clock_change_latency_us =
 +                              fixed16_to_double_to_cpu(bb->dram_clock_change_latency_us);
 +              dcn2_0_soc.writeback_dram_clock_change_latency_us =
 +                              fixed16_to_double_to_cpu(bb->writeback_dram_clock_change_latency_us);
 +              dcn2_0_soc.return_bus_width_bytes =
 +                              le32_to_cpu(bb->return_bus_width_bytes);
 +              dcn2_0_soc.dispclk_dppclk_vco_speed_mhz =
 +                              le32_to_cpu(bb->dispclk_dppclk_vco_speed_mhz);
 +              dcn2_0_soc.xfc_bus_transport_time_us =
 +                              le32_to_cpu(bb->xfc_bus_transport_time_us);
 +              dcn2_0_soc.xfc_xbuf_latency_tolerance_us =
 +                              le32_to_cpu(bb->xfc_xbuf_latency_tolerance_us);
 +              dcn2_0_soc.use_urgent_burst_bw =
 +                              le32_to_cpu(bb->use_urgent_burst_bw);
 +              dcn2_0_soc.num_states =
 +                              le32_to_cpu(bb->num_states);
 +
 +              for (i = 0; i < dcn2_0_soc.num_states; i++) {
 +                      dcn2_0_soc.clock_limits[i].state =
 +                                      le32_to_cpu(bb->clock_limits[i].state);
 +                      dcn2_0_soc.clock_limits[i].dcfclk_mhz =
 +                                      fixed16_to_double_to_cpu(bb->clock_limits[i].dcfclk_mhz);
 +                      dcn2_0_soc.clock_limits[i].fabricclk_mhz =
 +                                      fixed16_to_double_to_cpu(bb->clock_limits[i].fabricclk_mhz);
 +                      dcn2_0_soc.clock_limits[i].dispclk_mhz =
 +                                      fixed16_to_double_to_cpu(bb->clock_limits[i].dispclk_mhz);
 +                      dcn2_0_soc.clock_limits[i].dppclk_mhz =
 +                                      fixed16_to_double_to_cpu(bb->clock_limits[i].dppclk_mhz);
 +                      dcn2_0_soc.clock_limits[i].phyclk_mhz =
 +                                      fixed16_to_double_to_cpu(bb->clock_limits[i].phyclk_mhz);
 +                      dcn2_0_soc.clock_limits[i].socclk_mhz =
 +                                      fixed16_to_double_to_cpu(bb->clock_limits[i].socclk_mhz);
 +                      dcn2_0_soc.clock_limits[i].dscclk_mhz =
 +                                      fixed16_to_double_to_cpu(bb->clock_limits[i].dscclk_mhz);
 +                      dcn2_0_soc.clock_limits[i].dram_speed_mts =
 +                                      fixed16_to_double_to_cpu(bb->clock_limits[i].dram_speed_mts);
 +              }
 +      }
 +
 +      if (pool->base.pp_smu) {
 +              struct pp_smu_nv_clock_table max_clocks = {0};
 +              unsigned int uclk_states[8] = {0};
 +              unsigned int num_states = 0;
 +              enum pp_smu_status status;
 +              bool clock_limits_available = false;
 +              bool uclk_states_available = false;
 +
 +              if (pool->base.pp_smu->nv_funcs.get_uclk_dpm_states) {
 +                      status = (pool->base.pp_smu->nv_funcs.get_uclk_dpm_states)
 +                              (&pool->base.pp_smu->nv_funcs.pp_smu, uclk_states, &num_states);
 +
 +                      uclk_states_available = (status == PP_SMU_RESULT_OK);
 +              }
 +
 +              if (pool->base.pp_smu->nv_funcs.get_maximum_sustainable_clocks) {
 +                      status = (*pool->base.pp_smu->nv_funcs.get_maximum_sustainable_clocks)
 +                                      (&pool->base.pp_smu->nv_funcs.pp_smu, &max_clocks);
 +                      /* SMU cannot set DCF clock to anything equal to or higher than SOC clock
 +                       */
 +                      if (max_clocks.dcfClockInKhz >= max_clocks.socClockInKhz)
 +                              max_clocks.dcfClockInKhz = max_clocks.socClockInKhz - 1000;
 +                      clock_limits_available = (status == PP_SMU_RESULT_OK);
 +              }
 +
 +              if (clock_limits_available && uclk_states_available && num_states)
 +                      update_bounding_box(dc, &dcn2_0_soc, &max_clocks, uclk_states, num_states);
 +              else if (clock_limits_available)
 +                      cap_soc_clocks(&dcn2_0_soc, max_clocks);
 +      }
 +
 +      dcn2_0_ip.max_num_otg = pool->base.res_cap->num_timing_generator;
 +      dcn2_0_ip.max_num_dpp = pool->base.pipe_count;
 +      patch_bounding_box(dc, &dcn2_0_soc);
 +
 +      return true;
 +}
 +
 +static bool construct(
 +      uint8_t num_virtual_links,
 +      struct dc *dc,
 +      struct dcn20_resource_pool *pool)
 +{
 +      int i;
 +      struct dc_context *ctx = dc->ctx;
 +      struct irq_service_init_data init_data;
 +
 +      ctx->dc_bios->regs = &bios_regs;
 +
 +      pool->base.res_cap = &res_cap_nv10;
 +      pool->base.funcs = &dcn20_res_pool_funcs;
 +
 +      /*************************************************
 +       *  Resource + asic cap harcoding                *
 +       *************************************************/
 +      pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
 +
 +      pool->base.pipe_count = 6;
 +      pool->base.mpcc_count = 6;
 +      dc->caps.max_downscale_ratio = 200;
 +      dc->caps.i2c_speed_in_khz = 100;
 +      dc->caps.max_cursor_size = 256;
 +      dc->caps.dmdata_alloc_size = 2048;
 +
 +      dc->caps.max_slave_planes = 1;
 +      dc->caps.post_blend_color_processing = true;
 +      dc->caps.force_dp_tps4_for_cp2520 = true;
 +      dc->caps.hw_3d_lut = true;
 +
 +      if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
 +              dc->debug = debug_defaults_drv;
 +      else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) {
 +                      pool->base.pipe_count = 4;
 +
 +              pool->base.mpcc_count = pool->base.pipe_count;
 +              dc->debug = debug_defaults_diags;
 +      } else
 +              dc->debug = debug_defaults_diags;
 +      //dcn2.0x
 +      dc->work_arounds.dedcn20_305_wa = true;
 +
 +      // Init the vm_helper
 +      if (dc->vm_helper)
 +              vm_helper_init(dc->vm_helper, 16);
 +
 +      /*************************************************
 +       *  Create resources                             *
 +       *************************************************/
 +
 +      pool->base.clock_sources[DCN20_CLK_SRC_PLL0] =
 +                      dcn20_clock_source_create(ctx, ctx->dc_bios,
 +                              CLOCK_SOURCE_COMBO_PHY_PLL0,
 +                              &clk_src_regs[0], false);
 +      pool->base.clock_sources[DCN20_CLK_SRC_PLL1] =
 +                      dcn20_clock_source_create(ctx, ctx->dc_bios,
 +                              CLOCK_SOURCE_COMBO_PHY_PLL1,
 +                              &clk_src_regs[1], false);
 +      pool->base.clock_sources[DCN20_CLK_SRC_PLL2] =
 +                      dcn20_clock_source_create(ctx, ctx->dc_bios,
 +                              CLOCK_SOURCE_COMBO_PHY_PLL2,
 +                              &clk_src_regs[2], false);
 +      pool->base.clock_sources[DCN20_CLK_SRC_PLL3] =
 +                      dcn20_clock_source_create(ctx, ctx->dc_bios,
 +                              CLOCK_SOURCE_COMBO_PHY_PLL3,
 +                              &clk_src_regs[3], false);
 +      pool->base.clock_sources[DCN20_CLK_SRC_PLL4] =
 +                      dcn20_clock_source_create(ctx, ctx->dc_bios,
 +                              CLOCK_SOURCE_COMBO_PHY_PLL4,
 +                              &clk_src_regs[4], false);
 +      pool->base.clock_sources[DCN20_CLK_SRC_PLL5] =
 +                      dcn20_clock_source_create(ctx, ctx->dc_bios,
 +                              CLOCK_SOURCE_COMBO_PHY_PLL5,
 +                              &clk_src_regs[5], false);
 +      pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL;
 +      /* todo: not reuse phy_pll registers */
 +      pool->base.dp_clock_source =
 +                      dcn20_clock_source_create(ctx, ctx->dc_bios,
 +                              CLOCK_SOURCE_ID_DP_DTO,
 +                              &clk_src_regs[0], true);
 +
 +      for (i = 0; i < pool->base.clk_src_count; i++) {
 +              if (pool->base.clock_sources[i] == NULL) {
 +                      dm_error("DC: failed to create clock sources!\n");
 +                      BREAK_TO_DEBUGGER();
 +                      goto create_fail;
 +              }
 +      }
 +
 +      pool->base.dccg = dccg2_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask);
 +      if (pool->base.dccg == NULL) {
 +              dm_error("DC: failed to create dccg!\n");
 +              BREAK_TO_DEBUGGER();
 +              goto create_fail;
 +      }
 +
 +      pool->base.dmcu = dcn20_dmcu_create(ctx,
 +                      &dmcu_regs,
 +                      &dmcu_shift,
 +                      &dmcu_mask);
 +      if (pool->base.dmcu == NULL) {
 +              dm_error("DC: failed to create dmcu!\n");
 +              BREAK_TO_DEBUGGER();
 +              goto create_fail;
 +      }
 +
 +      pool->base.abm = dce_abm_create(ctx,
 +                      &abm_regs,
 +                      &abm_shift,
 +                      &abm_mask);
 +      if (pool->base.abm == NULL) {
 +              dm_error("DC: failed to create abm!\n");
 +              BREAK_TO_DEBUGGER();
 +              goto create_fail;
 +      }
 +
 +      pool->base.pp_smu = dcn20_pp_smu_create(ctx);
 +
 +
 +      if (!init_soc_bounding_box(dc, pool)) {
 +              dm_error("DC: failed to initialize soc bounding box!\n");
 +              BREAK_TO_DEBUGGER();
 +              goto create_fail;
 +      }
 +
 +      dml_init_instance(&dc->dml, &dcn2_0_soc, &dcn2_0_ip, DML_PROJECT_NAVI10);
 +
 +      if (!dc->debug.disable_pplib_wm_range) {
 +              struct pp_smu_wm_range_sets ranges = {0};
 +              int i = 0;
 +
 +              ranges.num_reader_wm_sets = 0;
 +
 +              if (dcn2_0_soc.num_states == 1) {
 +                      ranges.reader_wm_sets[0].wm_inst = i;
 +                      ranges.reader_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
 +                      ranges.reader_wm_sets[0].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
 +                      ranges.reader_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
 +                      ranges.reader_wm_sets[0].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
 +
 +                      ranges.num_reader_wm_sets = 1;
 +              } else if (dcn2_0_soc.num_states > 1) {
 +                      for (i = 0; i < 4 && i < dcn2_0_soc.num_states; i++) {
 +                              ranges.reader_wm_sets[i].wm_inst = i;
 +                              ranges.reader_wm_sets[i].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
 +                              ranges.reader_wm_sets[i].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
 +                              ranges.reader_wm_sets[i].min_fill_clk_mhz = (i > 0) ? (dcn2_0_soc.clock_limits[i - 1].dram_speed_mts / 16) + 1 : 0;
 +                              ranges.reader_wm_sets[i].max_fill_clk_mhz = dcn2_0_soc.clock_limits[i].dram_speed_mts / 16;
 +
 +                              ranges.num_reader_wm_sets = i + 1;
 +                      }
 +
 +                      ranges.reader_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
 +                      ranges.reader_wm_sets[ranges.num_reader_wm_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
 +              }
 +
 +              ranges.num_writer_wm_sets = 1;
 +
 +              ranges.writer_wm_sets[0].wm_inst = 0;
 +              ranges.writer_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
 +              ranges.writer_wm_sets[0].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
 +              ranges.writer_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
 +              ranges.writer_wm_sets[0].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
 +
 +              /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */
 +              if (pool->base.pp_smu->nv_funcs.set_wm_ranges)
 +                      pool->base.pp_smu->nv_funcs.set_wm_ranges(&pool->base.pp_smu->nv_funcs.pp_smu, &ranges);
 +      }
 +
 +      init_data.ctx = dc->ctx;
 +      pool->base.irqs = dal_irq_service_dcn20_create(&init_data);
 +      if (!pool->base.irqs)
 +              goto create_fail;
 +
 +      /* mem input -> ipp -> dpp -> opp -> TG */
 +      for (i = 0; i < pool->base.pipe_count; i++) {
 +              pool->base.hubps[i] = dcn20_hubp_create(ctx, i);
 +              if (pool->base.hubps[i] == NULL) {
 +                      BREAK_TO_DEBUGGER();
 +                      dm_error(
 +                              "DC: failed to create memory input!\n");
 +                      goto create_fail;
 +              }
 +
 +              pool->base.ipps[i] = dcn20_ipp_create(ctx, i);
 +              if (pool->base.ipps[i] == NULL) {
 +                      BREAK_TO_DEBUGGER();
 +                      dm_error(
 +                              "DC: failed to create input pixel processor!\n");
 +                      goto create_fail;
 +              }
 +
 +              pool->base.dpps[i] = dcn20_dpp_create(ctx, i);
 +              if (pool->base.dpps[i] == NULL) {
 +                      BREAK_TO_DEBUGGER();
 +                      dm_error(
 +                              "DC: failed to create dpps!\n");
 +                      goto create_fail;
 +              }
 +      }
 +      for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
 +              pool->base.engines[i] = dcn20_aux_engine_create(ctx, i);
 +              if (pool->base.engines[i] == NULL) {
 +                      BREAK_TO_DEBUGGER();
 +                      dm_error(
 +                              "DC:failed to create aux engine!!\n");
 +                      goto create_fail;
 +              }
 +              pool->base.hw_i2cs[i] = dcn20_i2c_hw_create(ctx, i);
 +              if (pool->base.hw_i2cs[i] == NULL) {
 +                      BREAK_TO_DEBUGGER();
 +                      dm_error(
 +                              "DC:failed to create hw i2c!!\n");
 +                      goto create_fail;
 +              }
 +              pool->base.sw_i2cs[i] = NULL;
 +      }
 +
 +      for (i = 0; i < pool->base.res_cap->num_opp; i++) {
 +              pool->base.opps[i] = dcn20_opp_create(ctx, i);
 +              if (pool->base.opps[i] == NULL) {
 +                      BREAK_TO_DEBUGGER();
 +                      dm_error(
 +                              "DC: failed to create output pixel processor!\n");
 +                      goto create_fail;
 +              }
 +      }
 +
 +      for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
 +              pool->base.timing_generators[i] = dcn20_timing_generator_create(
 +                              ctx, i);
 +              if (pool->base.timing_generators[i] == NULL) {
 +                      BREAK_TO_DEBUGGER();
 +                      dm_error("DC: failed to create tg!\n");
 +                      goto create_fail;
 +              }
 +      }
 +
 +      pool->base.timing_generator_count = i;
 +
 +      pool->base.mpc = dcn20_mpc_create(ctx);
 +      if (pool->base.mpc == NULL) {
 +              BREAK_TO_DEBUGGER();
 +              dm_error("DC: failed to create mpc!\n");
 +              goto create_fail;
 +      }
 +
 +      pool->base.hubbub = dcn20_hubbub_create(ctx);
 +      if (pool->base.hubbub == NULL) {
 +              BREAK_TO_DEBUGGER();
 +              dm_error("DC: failed to create hubbub!\n");
 +              goto create_fail;
 +      }
 +
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +      for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
 +              pool->base.dscs[i] = dcn20_dsc_create(ctx, i);
 +              if (pool->base.dscs[i] == NULL) {
 +                      BREAK_TO_DEBUGGER();
 +                      dm_error("DC: failed to create display stream compressor %d!\n", i);
 +                      goto create_fail;
 +              }
 +      }
 +#endif
 +
 +      if (!dcn20_dwbc_create(ctx, &pool->base)) {
 +              BREAK_TO_DEBUGGER();
 +              dm_error("DC: failed to create dwbc!\n");
 +              goto create_fail;
 +      }
 +      if (!dcn20_mmhubbub_create(ctx, &pool->base)) {
 +              BREAK_TO_DEBUGGER();
 +              dm_error("DC: failed to create mcif_wb!\n");
 +              goto create_fail;
 +      }
 +
 +      if (!resource_construct(num_virtual_links, dc, &pool->base,
 +                      (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ?
 +                      &res_create_funcs : &res_create_maximus_funcs)))
 +                      goto create_fail;
 +
 +      dcn20_hw_sequencer_construct(dc);
 +
 +      dc->caps.max_planes =  pool->base.pipe_count;
 +
 +      for (i = 0; i < dc->caps.max_planes; ++i)
 +              dc->caps.planes[i] = plane_cap;
 +
 +      dc->cap_funcs = cap_funcs;
 +
 +      return true;
 +
 +create_fail:
 +
 +      destruct(pool);
 +
 +      return false;
 +}
 +
 +struct resource_pool *dcn20_create_resource_pool(
 +              const struct dc_init_data *init_data,
 +              struct dc *dc)
 +{
 +      struct dcn20_resource_pool *pool =
 +              kzalloc(sizeof(struct dcn20_resource_pool), GFP_KERNEL);
 +
 +      if (!pool)
 +              return NULL;
 +
 +      if (construct(init_data->num_virtual_links, dc, pool))
 +              return &pool->base;
 +
 +      BREAK_TO_DEBUGGER();
 +      kfree(pool);
 +      return NULL;
 +}
index 791aa745efd22354a254e7c7aed47b1259935a62,0000000000000000000000000000000000000000..f5bcffc426b84c3a8496794783d038a68100ac0f
mode 100644,000000..100644
--- /dev/null
@@@ -1,608 -1,0 +1,610 @@@
 +/*
 + * Copyright 2012-15 Advanced Micro Devices, Inc.
 + *
 + * Permission is hereby granted, free of charge, to any person obtaining a
 + * copy of this software and associated documentation files (the "Software"),
 + * to deal in the Software without restriction, including without limitation
 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 + *  and/or sell copies of the Software, and to permit persons to whom the
 + * Software is furnished to do so, subject to the following conditions:
 + *
 + * The above copyright notice and this permission notice shall be included in
 + * all copies or substantial portions of the Software.
 + *
 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 + * OTHER DEALINGS IN THE SOFTWARE.
 + *
 + * Authors: AMD
 + *
 + */
 +
++#include <linux/delay.h>
++
 +#include "dc_bios_types.h"
 +#include "dcn20_stream_encoder.h"
 +#include "reg_helper.h"
 +#include "hw_shared.h"
 +
 +#define DC_LOGGER \
 +              enc1->base.ctx->logger
 +
 +
 +#define REG(reg)\
 +      (enc1->regs->reg)
 +
 +#undef FN
 +#define FN(reg_name, field_name) \
 +      enc1->se_shift->field_name, enc1->se_mask->field_name
 +
 +
 +#define CTX \
 +      enc1->base.ctx
 +
 +
 +static void enc2_update_hdmi_info_packet(
 +      struct dcn10_stream_encoder *enc1,
 +      uint32_t packet_index,
 +      const struct dc_info_packet *info_packet)
 +{
 +      uint32_t cont, send, line;
 +
 +      if (info_packet->valid) {
 +              enc1_update_generic_info_packet(
 +                      enc1,
 +                      packet_index,
 +                      info_packet);
 +
 +              /* enable transmission of packet(s) -
 +               * packet transmission begins on the next frame */
 +              cont = 1;
 +              /* send packet(s) every frame */
 +              send = 1;
 +              /* select line number to send packets on */
 +              line = 2;
 +      } else {
 +              cont = 0;
 +              send = 0;
 +              line = 0;
 +      }
 +
 +      /* DP_SEC_GSP[x]_LINE_REFERENCE - keep default value REFER_TO_DP_SOF */
 +
 +      /* choose which generic packet control to use */
 +      switch (packet_index) {
 +      case 0:
 +              REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0,
 +                              HDMI_GENERIC0_CONT, cont,
 +                              HDMI_GENERIC0_SEND, send);
 +              REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL1,
 +                              HDMI_GENERIC0_LINE, line);
 +              break;
 +      case 1:
 +              REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0,
 +                              HDMI_GENERIC1_CONT, cont,
 +                              HDMI_GENERIC1_SEND, send);
 +              REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL1,
 +                              HDMI_GENERIC1_LINE, line);
 +              break;
 +      case 2:
 +              REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0,
 +                              HDMI_GENERIC2_CONT, cont,
 +                              HDMI_GENERIC2_SEND, send);
 +              REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL2,
 +                              HDMI_GENERIC2_LINE, line);
 +              break;
 +      case 3:
 +              REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0,
 +                              HDMI_GENERIC3_CONT, cont,
 +                              HDMI_GENERIC3_SEND, send);
 +              REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL2,
 +                              HDMI_GENERIC3_LINE, line);
 +              break;
 +      case 4:
 +              REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0,
 +                              HDMI_GENERIC4_CONT, cont,
 +                              HDMI_GENERIC4_SEND, send);
 +              REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL3,
 +                              HDMI_GENERIC4_LINE, line);
 +              break;
 +      case 5:
 +              REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0,
 +                              HDMI_GENERIC5_CONT, cont,
 +                              HDMI_GENERIC5_SEND, send);
 +              REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL3,
 +                              HDMI_GENERIC5_LINE, line);
 +              break;
 +      case 6:
 +              REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0,
 +                              HDMI_GENERIC6_CONT, cont,
 +                              HDMI_GENERIC6_SEND, send);
 +              REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL4,
 +                              HDMI_GENERIC6_LINE, line);
 +              break;
 +      case 7:
 +              REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0,
 +                              HDMI_GENERIC7_CONT, cont,
 +                              HDMI_GENERIC7_SEND, send);
 +              REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL4,
 +                              HDMI_GENERIC7_LINE, line);
 +              break;
 +      default:
 +              /* invalid HW packet index */
 +              DC_LOG_WARNING(
 +                      "Invalid HW packet index: %s()\n",
 +                      __func__);
 +              return;
 +      }
 +}
 +
 +static void enc2_stream_encoder_update_hdmi_info_packets(
 +      struct stream_encoder *enc,
 +      const struct encoder_info_frame *info_frame)
 +{
 +      struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
 +
 +      /* for bring up, disable dp double  TODO */
 +      REG_UPDATE(HDMI_DB_CONTROL, HDMI_DB_DISABLE, 1);
 +
 +      /*Always add mandatory packets first followed by optional ones*/
 +      enc2_update_hdmi_info_packet(enc1, 0, &info_frame->avi);
 +      enc2_update_hdmi_info_packet(enc1, 5, &info_frame->hfvsif);
 +      enc2_update_hdmi_info_packet(enc1, 2, &info_frame->gamut);
 +      enc2_update_hdmi_info_packet(enc1, 1, &info_frame->vendor);
 +      enc2_update_hdmi_info_packet(enc1, 3, &info_frame->spd);
 +      enc2_update_hdmi_info_packet(enc1, 4, &info_frame->hdrsmd);
 +}
 +
 +static void enc2_stream_encoder_stop_hdmi_info_packets(
 +      struct stream_encoder *enc)
 +{
 +      struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
 +
 +      /* stop generic packets 0,1 on HDMI */
 +      REG_SET_4(HDMI_GENERIC_PACKET_CONTROL0, 0,
 +              HDMI_GENERIC0_CONT, 0,
 +              HDMI_GENERIC0_SEND, 0,
 +              HDMI_GENERIC1_CONT, 0,
 +              HDMI_GENERIC1_SEND, 0);
 +      REG_SET_2(HDMI_GENERIC_PACKET_CONTROL1, 0,
 +              HDMI_GENERIC0_LINE, 0,
 +              HDMI_GENERIC1_LINE, 0);
 +
 +      /* stop generic packets 2,3 on HDMI */
 +      REG_SET_4(HDMI_GENERIC_PACKET_CONTROL0, 0,
 +              HDMI_GENERIC2_CONT, 0,
 +              HDMI_GENERIC2_SEND, 0,
 +              HDMI_GENERIC3_CONT, 0,
 +              HDMI_GENERIC3_SEND, 0);
 +      REG_SET_2(HDMI_GENERIC_PACKET_CONTROL2, 0,
 +              HDMI_GENERIC2_LINE, 0,
 +              HDMI_GENERIC3_LINE, 0);
 +
 +      /* stop generic packets 4,5 on HDMI */
 +      REG_SET_4(HDMI_GENERIC_PACKET_CONTROL0, 0,
 +              HDMI_GENERIC4_CONT, 0,
 +              HDMI_GENERIC4_SEND, 0,
 +              HDMI_GENERIC5_CONT, 0,
 +              HDMI_GENERIC5_SEND, 0);
 +      REG_SET_2(HDMI_GENERIC_PACKET_CONTROL3, 0,
 +              HDMI_GENERIC4_LINE, 0,
 +              HDMI_GENERIC5_LINE, 0);
 +
 +      /* stop generic packets 6,7 on HDMI */
 +      REG_SET_4(HDMI_GENERIC_PACKET_CONTROL0, 0,
 +              HDMI_GENERIC6_CONT, 0,
 +              HDMI_GENERIC6_SEND, 0,
 +              HDMI_GENERIC7_CONT, 0,
 +              HDMI_GENERIC7_SEND, 0);
 +      REG_SET_2(HDMI_GENERIC_PACKET_CONTROL4, 0,
 +              HDMI_GENERIC6_LINE, 0,
 +              HDMI_GENERIC7_LINE, 0);
 +}
 +
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +
 +
 +/* Update GSP7 SDP 128 byte long */
 +static void enc2_send_gsp7_128_info_packet(
 +      struct dcn10_stream_encoder *enc1,
 +      const struct dc_info_packet_128 *info_packet)
 +{
 +      uint32_t i;
 +
 +      /* TODOFPGA Figure out a proper number for max_retries polling for lock
 +       * use 50 for now.
 +       */
 +      uint32_t max_retries = 50;
 +      const uint32_t *content = (const uint32_t *) &info_packet->sb[0];
 +
 +      ASSERT(info_packet->hb1  == DC_DP_INFOFRAME_TYPE_PPS);
 +
 +      /* Configure for PPS packet size (128 bytes) */
 +      REG_UPDATE(DP_SEC_CNTL2, DP_SEC_GSP7_PPS, 1);
 +
 +      /* We need turn on clock before programming AFMT block*/
 +      REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1);
 +
 +      /* Poll dig_update_lock is not locked -> asic internal signal
 +       * assumes otg master lock will unlock it
 +       */
 +      /*REG_WAIT(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_LOCK_STATUS, 0, 10, max_retries);*/
 +
 +      /* Wait for HW/SW GSP memory access conflict to go away */
 +      REG_WAIT(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT,
 +                      0, 10, max_retries);
 +
 +      /* Clear HW/SW memory access conflict flag */
 +      REG_UPDATE(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT_CLR, 1);
 +
 +      /* write generic packet header */
 +      REG_UPDATE(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_INDEX, 7);
 +      REG_SET_4(AFMT_GENERIC_HDR, 0,
 +                      AFMT_GENERIC_HB0, info_packet->hb0,
 +                      AFMT_GENERIC_HB1, info_packet->hb1,
 +                      AFMT_GENERIC_HB2, info_packet->hb2,
 +                      AFMT_GENERIC_HB3, info_packet->hb3);
 +
 +      /* Write generic packet content 128 bytes long. Four sets are used (indexes 7
 +       * through 10) to fit 128 bytes.
 +       */
 +      for (i = 0; i < 4; i++) {
 +              uint32_t packet_index = 7 + i;
 +              REG_UPDATE(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_INDEX, packet_index);
 +
 +              REG_WRITE(AFMT_GENERIC_0, *content++);
 +              REG_WRITE(AFMT_GENERIC_1, *content++);
 +              REG_WRITE(AFMT_GENERIC_2, *content++);
 +              REG_WRITE(AFMT_GENERIC_3, *content++);
 +              REG_WRITE(AFMT_GENERIC_4, *content++);
 +              REG_WRITE(AFMT_GENERIC_5, *content++);
 +              REG_WRITE(AFMT_GENERIC_6, *content++);
 +              REG_WRITE(AFMT_GENERIC_7, *content++);
 +      }
 +
 +      REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC7_FRAME_UPDATE, 1);
 +}
 +
 +/* Set DSC-related configuration.
 + *   dsc_mode: 0 disables DSC, other values enable DSC in specified format
 + *   sc_bytes_per_pixel: Bytes per pixel in u3.28 format
 + *   dsc_slice_width: Slice width in pixels
 + */
 +static void enc2_dp_set_dsc_config(struct stream_encoder *enc,
 +                                      enum optc_dsc_mode dsc_mode,
 +                                      uint32_t dsc_bytes_per_pixel,
 +                                      uint32_t dsc_slice_width,
 +                                      uint8_t *dsc_packed_pps)
 +{
 +      struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
 +      uint32_t dsc_value = 0;
 +
 +      dsc_value = REG_READ(DP_DSC_CNTL);
 +
 +      /* dsc disable skip */
 +      if ((dsc_value & 0x3) == 0x0)
 +              return;
 +
 +
 +      REG_UPDATE_2(DP_DSC_CNTL,
 +                      DP_DSC_MODE, dsc_mode,
 +                      DP_DSC_SLICE_WIDTH, dsc_slice_width);
 +
 +      REG_SET(DP_DSC_BYTES_PER_PIXEL, 0,
 +              DP_DSC_BYTES_PER_PIXEL, dsc_bytes_per_pixel);
 +
 +      if (dsc_mode != OPTC_DSC_DISABLED) {
 +              struct dc_info_packet_128 pps_sdp;
 +
 +              ASSERT(dsc_packed_pps);
 +
 +              /* Load PPS into infoframe (SDP) registers */
 +              pps_sdp.valid = true;
 +              pps_sdp.hb0 = 0;
 +              pps_sdp.hb1 = DC_DP_INFOFRAME_TYPE_PPS;
 +              pps_sdp.hb2 = 127;
 +              pps_sdp.hb3 = 0;
 +              memcpy(&pps_sdp.sb[0], dsc_packed_pps, sizeof(pps_sdp.sb));
 +              enc2_send_gsp7_128_info_packet(enc1, &pps_sdp);
 +
 +              /* Enable Generic Stream Packet 7 (GSP) transmission */
 +              //REG_UPDATE(DP_SEC_CNTL,
 +              //      DP_SEC_GSP7_ENABLE, 1);
 +
 +              /* SW should make sure VBID[6] update line number is bigger
 +               * than PPS transmit line number
 +               */
 +              REG_UPDATE(DP_SEC_CNTL6,
 +                              DP_SEC_GSP7_LINE_NUM, 2);
 +              REG_UPDATE_2(DP_MSA_VBID_MISC,
 +                              DP_VBID6_LINE_REFERENCE, 0,
 +                              DP_VBID6_LINE_NUM, 3);
 +
 +              /* Send PPS data at the line number specified above.
 +               * DP spec requires PPS to be sent only when it changes, however since
 +               * decoder has to be able to handle its change on every frame, we're
 +               * sending it always (i.e. on every frame) to reduce the chance it'd be
 +               * missed by decoder. If it turns out required to send PPS only when it
 +               * changes, we can use DP_SEC_GSP7_SEND register.
 +               */
 +              REG_UPDATE_2(DP_SEC_CNTL,
 +                      DP_SEC_GSP7_ENABLE, 1,
 +                      DP_SEC_STREAM_ENABLE, 1);
 +      } else {
 +              /* Disable Generic Stream Packet 7 (GSP) transmission */
 +              REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP7_ENABLE, 0);
 +              REG_UPDATE(DP_SEC_CNTL2, DP_SEC_GSP7_PPS, 0);
 +      }
 +}
 +#endif
 +
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +/* this function read dsc related register fields to be logged later in dcn10_log_hw_state
 + * into a dcn_dsc_state struct.
 + */
 +static void enc2_read_state(struct stream_encoder *enc, struct enc_state *s)
 +{
 +      struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
 +
 +      //if dsc is enabled, continue to read
 +      REG_GET(DP_DSC_CNTL, DP_DSC_MODE, &s->dsc_mode);
 +      if (s->dsc_mode) {
 +              REG_GET(DP_DSC_CNTL, DP_DSC_SLICE_WIDTH, &s->dsc_slice_width);
 +              REG_GET(DP_SEC_CNTL6, DP_SEC_GSP7_LINE_NUM, &s->sec_gsp_pps_line_num);
 +
 +              REG_GET(DP_MSA_VBID_MISC, DP_VBID6_LINE_REFERENCE, &s->vbid6_line_reference);
 +              REG_GET(DP_MSA_VBID_MISC, DP_VBID6_LINE_NUM, &s->vbid6_line_num);
 +
 +              REG_GET(DP_SEC_CNTL, DP_SEC_GSP7_ENABLE, &s->sec_gsp_pps_enable);
 +              REG_GET(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, &s->sec_stream_enable);
 +      }
 +}
 +#endif
 +
 +/* Set Dynamic Metadata-configuration.
 + *   enable_dme:         TRUE: enables Dynamic Metadata Enfine, FALSE: disables DME
 + *   hubp_requestor_id:  HUBP physical instance that is the source of dynamic metadata
 + *                       only needs to be set when enable_dme is TRUE
 + *   dmdata_mode:        dynamic metadata packet type: DP, HDMI, or Dolby Vision
 + *
 + *   Ensure the OTG master update lock is set when changing DME configuration.
 + */
 +static void enc2_set_dynamic_metadata(struct stream_encoder *enc,
 +              bool enable_dme,
 +              uint32_t hubp_requestor_id,
 +              enum dynamic_metadata_mode dmdata_mode)
 +{
 +      struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
 +
 +      if (enable_dme) {
 +              REG_UPDATE_2(DME_CONTROL,
 +                              METADATA_HUBP_REQUESTOR_ID, hubp_requestor_id,
 +                              METADATA_STREAM_TYPE, (dmdata_mode == dmdata_dolby_vision) ? 1 : 0);
 +
 +              /* Use default line reference DP_SOF for bringup.
 +               * Should use OTG_SOF for DRR cases
 +               */
 +              if (dmdata_mode == dmdata_dp)
 +                      REG_UPDATE_3(DP_SEC_METADATA_TRANSMISSION,
 +                                      DP_SEC_METADATA_PACKET_ENABLE, 1,
 +                                      DP_SEC_METADATA_PACKET_LINE_REFERENCE, 0,
 +                                      DP_SEC_METADATA_PACKET_LINE, 20);
 +              else {
 +                      REG_UPDATE_3(HDMI_METADATA_PACKET_CONTROL,
 +                                      HDMI_METADATA_PACKET_ENABLE, 1,
 +                                      HDMI_METADATA_PACKET_LINE_REFERENCE, 0,
 +                                      HDMI_METADATA_PACKET_LINE, 2);
 +
 +                      if (dmdata_mode == dmdata_dolby_vision)
 +                              REG_UPDATE(DIG_FE_CNTL,
 +                                              DOLBY_VISION_EN, 1);
 +              }
 +
 +              REG_UPDATE(DME_CONTROL,
 +                              METADATA_ENGINE_EN, 1);
 +      } else {
 +              REG_UPDATE(DME_CONTROL,
 +                              METADATA_ENGINE_EN, 0);
 +
 +              if (dmdata_mode == dmdata_dp)
 +                      REG_UPDATE(DP_SEC_METADATA_TRANSMISSION,
 +                                      DP_SEC_METADATA_PACKET_ENABLE, 0);
 +              else {
 +                      REG_UPDATE(HDMI_METADATA_PACKET_CONTROL,
 +                                      HDMI_METADATA_PACKET_ENABLE, 0);
 +                      REG_UPDATE(DIG_FE_CNTL,
 +                                      DOLBY_VISION_EN, 0);
 +              }
 +      }
 +}
 +
 +static void enc2_stream_encoder_update_dp_info_packets(
 +      struct stream_encoder *enc,
 +      const struct encoder_info_frame *info_frame)
 +{
 +      struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
 +      uint32_t dmdata_packet_enabled = 0;
 +
 +      enc1_stream_encoder_update_dp_info_packets(enc, info_frame);
 +
 +      /* check if dynamic metadata packet transmission is enabled */
 +      REG_GET(DP_SEC_METADATA_TRANSMISSION,
 +                      DP_SEC_METADATA_PACKET_ENABLE, &dmdata_packet_enabled);
 +
 +      if (dmdata_packet_enabled)
 +              REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
 +}
 +
 +static bool is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
 +{
 +      bool two_pix = timing->pixel_encoding == PIXEL_ENCODING_YCBCR420;
 +
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +      two_pix = two_pix || (timing->flags.DSC && timing->pixel_encoding == PIXEL_ENCODING_YCBCR422
 +                      && !timing->dsc_cfg.ycbcr422_simple);
 +#endif
 +      return two_pix;
 +}
 +
 +void enc2_stream_encoder_dp_unblank(
 +              struct stream_encoder *enc,
 +              const struct encoder_unblank_param *param)
 +{
 +      struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
 +
 +      if (param->link_settings.link_rate != LINK_RATE_UNKNOWN) {
 +              uint32_t n_vid = 0x8000;
 +              uint32_t m_vid;
 +              uint32_t n_multiply = 0;
 +              uint64_t m_vid_l = n_vid;
 +
 +              /* YCbCr 4:2:0 : Computed VID_M will be 2X the input rate */
 +              if (is_two_pixels_per_containter(&param->timing) || param->odm) {
 +                      /*this logic should be the same in get_pixel_clock_parameters() */
 +                      n_multiply = 1;
 +              }
 +              /* M / N = Fstream / Flink
 +               * m_vid / n_vid = pixel rate / link rate
 +               */
 +
 +              m_vid_l *= param->timing.pix_clk_100hz / 10;
 +              m_vid_l = div_u64(m_vid_l,
 +                      param->link_settings.link_rate
 +                              * LINK_RATE_REF_FREQ_IN_KHZ);
 +
 +              m_vid = (uint32_t) m_vid_l;
 +
 +              /* enable auto measurement */
 +
 +              REG_UPDATE(DP_VID_TIMING, DP_VID_M_N_GEN_EN, 0);
 +
 +              /* auto measurement need 1 full 0x8000 symbol cycle to kick in,
 +               * therefore program initial value for Mvid and Nvid
 +               */
 +
 +              REG_UPDATE(DP_VID_N, DP_VID_N, n_vid);
 +
 +              REG_UPDATE(DP_VID_M, DP_VID_M, m_vid);
 +
 +              REG_UPDATE_2(DP_VID_TIMING,
 +                              DP_VID_M_N_GEN_EN, 1,
 +                              DP_VID_N_MUL, n_multiply);
 +      }
 +
 +      /* set DIG_START to 0x1 to reset FIFO */
 +
 +      REG_UPDATE(DIG_FE_CNTL, DIG_START, 1);
 +
 +      /* write 0 to take the FIFO out of reset */
 +
 +      REG_UPDATE(DIG_FE_CNTL, DIG_START, 0);
 +
 +      /* switch DP encoder to CRTC data */
 +
 +      REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 0);
 +
 +      /* wait 100us for DIG/DP logic to prime
 +       * (i.e. a few video lines)
 +       */
 +      udelay(100);
 +
 +      /* the hardware would start sending video at the start of the next DP
 +       * frame (i.e. rising edge of the vblank).
 +       * NOTE: We used to program DP_VID_STREAM_DIS_DEFER = 2 here, but this
 +       * register has no effect on enable transition! HW always guarantees
 +       * VID_STREAM enable at start of next frame, and this is not
 +       * programmable
 +       */
 +
 +      REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true);
 +}
 +
 +static void enc2_dp_set_odm_combine(
 +      struct stream_encoder *enc,
 +      bool odm_combine)
 +{
 +      struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
 +
 +      REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_COMBINE, odm_combine);
 +}
 +
 +void enc2_stream_encoder_dp_set_stream_attribute(
 +      struct stream_encoder *enc,
 +      struct dc_crtc_timing *crtc_timing,
 +      enum dc_color_space output_color_space,
 +      uint32_t enable_sdp_splitting)
 +{
 +      struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
 +
 +      enc1_stream_encoder_dp_set_stream_attribute(enc, crtc_timing, output_color_space, enable_sdp_splitting);
 +
 +      REG_UPDATE(DP_SEC_FRAMING4,
 +              DP_SST_SDP_SPLITTING, enable_sdp_splitting);
 +}
 +
 +static const struct stream_encoder_funcs dcn20_str_enc_funcs = {
 +      .dp_set_odm_combine =
 +              enc2_dp_set_odm_combine,
 +      .dp_set_stream_attribute =
 +              enc2_stream_encoder_dp_set_stream_attribute,
 +      .hdmi_set_stream_attribute =
 +              enc1_stream_encoder_hdmi_set_stream_attribute,
 +      .dvi_set_stream_attribute =
 +              enc1_stream_encoder_dvi_set_stream_attribute,
 +      .set_mst_bandwidth =
 +              enc1_stream_encoder_set_mst_bandwidth,
 +      .update_hdmi_info_packets =
 +              enc2_stream_encoder_update_hdmi_info_packets,
 +      .stop_hdmi_info_packets =
 +              enc2_stream_encoder_stop_hdmi_info_packets,
 +      .update_dp_info_packets =
 +              enc2_stream_encoder_update_dp_info_packets,
 +      .stop_dp_info_packets =
 +              enc1_stream_encoder_stop_dp_info_packets,
 +      .dp_blank =
 +              enc1_stream_encoder_dp_blank,
 +      .dp_unblank =
 +              enc2_stream_encoder_dp_unblank,
 +      .audio_mute_control = enc1_se_audio_mute_control,
 +
 +      .dp_audio_setup = enc1_se_dp_audio_setup,
 +      .dp_audio_enable = enc1_se_dp_audio_enable,
 +      .dp_audio_disable = enc1_se_dp_audio_disable,
 +
 +      .hdmi_audio_setup = enc1_se_hdmi_audio_setup,
 +      .hdmi_audio_disable = enc1_se_hdmi_audio_disable,
 +      .setup_stereo_sync  = enc1_setup_stereo_sync,
 +      .set_avmute = enc1_stream_encoder_set_avmute,
 +      .dig_connect_to_otg  = enc1_dig_connect_to_otg,
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +      .enc_read_state = enc2_read_state,
 +#endif
 +
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +      .dp_set_dsc_config = enc2_dp_set_dsc_config,
 +#endif
 +      .set_dynamic_metadata = enc2_set_dynamic_metadata,
 +};
 +
 +void dcn20_stream_encoder_construct(
 +      struct dcn10_stream_encoder *enc1,
 +      struct dc_context *ctx,
 +      struct dc_bios *bp,
 +      enum engine_id eng_id,
 +      const struct dcn10_stream_enc_registers *regs,
 +      const struct dcn10_stream_encoder_shift *se_shift,
 +      const struct dcn10_stream_encoder_mask *se_mask)
 +{
 +      enc1->base.funcs = &dcn20_str_enc_funcs;
 +      enc1->base.ctx = ctx;
 +      enc1->base.id = eng_id;
 +      enc1->base.bp = bp;
 +      enc1->regs = regs;
 +      enc1->se_shift = se_shift;
 +      enc1->se_mask = se_mask;
 +}
 +
index 65866d620759ecebf195b0e808f0d6e6ada218fb,0000000000000000000000000000000000000000..3cc0f2a1f77cc69b33188a6299dfceba60804a64
mode 100644,000000..100644
--- /dev/null
@@@ -1,373 -1,0 +1,375 @@@
 +/*
 + * Copyright 2018 Advanced Micro Devices, Inc.
 + *
 + * Permission is hereby granted, free of charge, to any person obtaining a
 + * copy of this software and associated documentation files (the "Software"),
 + * to deal in the Software without restriction, including without limitation
 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 + * and/or sell copies of the Software, and to permit persons to whom the
 + * Software is furnished to do so, subject to the following conditions:
 + *
 + * The above copyright notice and this permission notice shall be included in
 + * all copies or substantial portions of the Software.
 + *
 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 + * OTHER DEALINGS IN THE SOFTWARE.
 + *
 + * Authors: AMD
 + *
 + */
 +
++#include <linux/slab.h>
++
 +#include "dm_services.h"
 +
 +#include "include/logger_interface.h"
 +
 +#include "../dce110/irq_service_dce110.h"
 +
 +#include "dcn/dcn_2_0_0_offset.h"
 +#include "dcn/dcn_2_0_0_sh_mask.h"
 +#include "navi10_ip_offset.h"
 +
 +
 +#include "irq_service_dcn20.h"
 +
 +#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
 +
 +enum dc_irq_source to_dal_irq_source_dcn20(
 +              struct irq_service *irq_service,
 +              uint32_t src_id,
 +              uint32_t ext_id)
 +{
 +      switch (src_id) {
 +      case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
 +              return DC_IRQ_SOURCE_VBLANK1;
 +      case DCN_1_0__SRCID__DC_D2_OTG_VSTARTUP:
 +              return DC_IRQ_SOURCE_VBLANK2;
 +      case DCN_1_0__SRCID__DC_D3_OTG_VSTARTUP:
 +              return DC_IRQ_SOURCE_VBLANK3;
 +      case DCN_1_0__SRCID__DC_D4_OTG_VSTARTUP:
 +              return DC_IRQ_SOURCE_VBLANK4;
 +      case DCN_1_0__SRCID__DC_D5_OTG_VSTARTUP:
 +              return DC_IRQ_SOURCE_VBLANK5;
 +      case DCN_1_0__SRCID__DC_D6_OTG_VSTARTUP:
 +              return DC_IRQ_SOURCE_VBLANK6;
 +      case DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT:
 +              return DC_IRQ_SOURCE_PFLIP1;
 +      case DCN_1_0__SRCID__HUBP1_FLIP_INTERRUPT:
 +              return DC_IRQ_SOURCE_PFLIP2;
 +      case DCN_1_0__SRCID__HUBP2_FLIP_INTERRUPT:
 +              return DC_IRQ_SOURCE_PFLIP3;
 +      case DCN_1_0__SRCID__HUBP3_FLIP_INTERRUPT:
 +              return DC_IRQ_SOURCE_PFLIP4;
 +      case DCN_1_0__SRCID__HUBP4_FLIP_INTERRUPT:
 +              return DC_IRQ_SOURCE_PFLIP5;
 +      case DCN_1_0__SRCID__HUBP5_FLIP_INTERRUPT:
 +              return DC_IRQ_SOURCE_PFLIP6;
 +      case DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
 +              return DC_IRQ_SOURCE_VUPDATE1;
 +      case DCN_1_0__SRCID__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
 +              return DC_IRQ_SOURCE_VUPDATE2;
 +      case DCN_1_0__SRCID__OTG2_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
 +              return DC_IRQ_SOURCE_VUPDATE3;
 +      case DCN_1_0__SRCID__OTG3_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
 +              return DC_IRQ_SOURCE_VUPDATE4;
 +      case DCN_1_0__SRCID__OTG4_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
 +              return DC_IRQ_SOURCE_VUPDATE5;
 +      case DCN_1_0__SRCID__OTG5_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
 +              return DC_IRQ_SOURCE_VUPDATE6;
 +
 +      case DCN_1_0__SRCID__DC_HPD1_INT:
 +              /* generic src_id for all HPD and HPDRX interrupts */
 +              switch (ext_id) {
 +              case DCN_1_0__CTXID__DC_HPD1_INT:
 +                      return DC_IRQ_SOURCE_HPD1;
 +              case DCN_1_0__CTXID__DC_HPD2_INT:
 +                      return DC_IRQ_SOURCE_HPD2;
 +              case DCN_1_0__CTXID__DC_HPD3_INT:
 +                      return DC_IRQ_SOURCE_HPD3;
 +              case DCN_1_0__CTXID__DC_HPD4_INT:
 +                      return DC_IRQ_SOURCE_HPD4;
 +              case DCN_1_0__CTXID__DC_HPD5_INT:
 +                      return DC_IRQ_SOURCE_HPD5;
 +              case DCN_1_0__CTXID__DC_HPD6_INT:
 +                      return DC_IRQ_SOURCE_HPD6;
 +              case DCN_1_0__CTXID__DC_HPD1_RX_INT:
 +                      return DC_IRQ_SOURCE_HPD1RX;
 +              case DCN_1_0__CTXID__DC_HPD2_RX_INT:
 +                      return DC_IRQ_SOURCE_HPD2RX;
 +              case DCN_1_0__CTXID__DC_HPD3_RX_INT:
 +                      return DC_IRQ_SOURCE_HPD3RX;
 +              case DCN_1_0__CTXID__DC_HPD4_RX_INT:
 +                      return DC_IRQ_SOURCE_HPD4RX;
 +              case DCN_1_0__CTXID__DC_HPD5_RX_INT:
 +                      return DC_IRQ_SOURCE_HPD5RX;
 +              case DCN_1_0__CTXID__DC_HPD6_RX_INT:
 +                      return DC_IRQ_SOURCE_HPD6RX;
 +              default:
 +                      return DC_IRQ_SOURCE_INVALID;
 +              }
 +              break;
 +
 +      default:
 +              return DC_IRQ_SOURCE_INVALID;
 +      }
 +}
 +
 +static bool hpd_ack(
 +      struct irq_service *irq_service,
 +      const struct irq_source_info *info)
 +{
 +      uint32_t addr = info->status_reg;
 +      uint32_t value = dm_read_reg(irq_service->ctx, addr);
 +      uint32_t current_status =
 +              get_reg_field_value(
 +                      value,
 +                      HPD0_DC_HPD_INT_STATUS,
 +                      DC_HPD_SENSE_DELAYED);
 +
 +      dal_irq_service_ack_generic(irq_service, info);
 +
 +      value = dm_read_reg(irq_service->ctx, info->enable_reg);
 +
 +      set_reg_field_value(
 +              value,
 +              current_status ? 0 : 1,
 +              HPD0_DC_HPD_INT_CONTROL,
 +              DC_HPD_INT_POLARITY);
 +
 +      dm_write_reg(irq_service->ctx, info->enable_reg, value);
 +
 +      return true;
 +}
 +
 +static const struct irq_source_info_funcs hpd_irq_info_funcs = {
 +      .set = NULL,
 +      .ack = hpd_ack
 +};
 +
 +static const struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
 +      .set = NULL,
 +      .ack = NULL
 +};
 +
 +static const struct irq_source_info_funcs pflip_irq_info_funcs = {
 +      .set = NULL,
 +      .ack = NULL
 +};
 +
 +static const struct irq_source_info_funcs vblank_irq_info_funcs = {
 +      .set = NULL,
 +      .ack = NULL
 +};
 +
 +#undef BASE_INNER
 +#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
 +
 +/* compile time expand base address. */
 +#define BASE(seg) \
 +      BASE_INNER(seg)
 +
 +
 +#define SRI(reg_name, block, id)\
 +      BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
 +                      mm ## block ## id ## _ ## reg_name
 +
 +
 +#define IRQ_REG_ENTRY(block, reg_num, reg1, mask1, reg2, mask2)\
 +      .enable_reg = SRI(reg1, block, reg_num),\
 +      .enable_mask = \
 +              block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
 +      .enable_value = {\
 +              block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
 +              ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
 +      },\
 +      .ack_reg = SRI(reg2, block, reg_num),\
 +      .ack_mask = \
 +              block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\
 +      .ack_value = \
 +              block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK \
 +
 +
 +
 +#define hpd_int_entry(reg_num)\
 +      [DC_IRQ_SOURCE_HPD1 + reg_num] = {\
 +              IRQ_REG_ENTRY(HPD, reg_num,\
 +                      DC_HPD_INT_CONTROL, DC_HPD_INT_EN,\
 +                      DC_HPD_INT_CONTROL, DC_HPD_INT_ACK),\
 +              .status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
 +              .funcs = &hpd_irq_info_funcs\
 +      }
 +
 +#define hpd_rx_int_entry(reg_num)\
 +      [DC_IRQ_SOURCE_HPD1RX + reg_num] = {\
 +              IRQ_REG_ENTRY(HPD, reg_num,\
 +                      DC_HPD_INT_CONTROL, DC_HPD_RX_INT_EN,\
 +                      DC_HPD_INT_CONTROL, DC_HPD_RX_INT_ACK),\
 +              .status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
 +              .funcs = &hpd_rx_irq_info_funcs\
 +      }
 +#define pflip_int_entry(reg_num)\
 +      [DC_IRQ_SOURCE_PFLIP1 + reg_num] = {\
 +              IRQ_REG_ENTRY(HUBPREQ, reg_num,\
 +                      DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK,\
 +                      DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_CLEAR),\
 +              .funcs = &pflip_irq_info_funcs\
 +      }
 +
 +#define vupdate_int_entry(reg_num)\
 +      [DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
 +              IRQ_REG_ENTRY(OTG, reg_num,\
 +                      OTG_GLOBAL_SYNC_STATUS, VUPDATE_INT_EN,\
 +                      OTG_GLOBAL_SYNC_STATUS, VUPDATE_EVENT_CLEAR),\
 +              .funcs = &vblank_irq_info_funcs\
 +      }
 +
 +#define vblank_int_entry(reg_num)\
 +      [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
 +              IRQ_REG_ENTRY(OTG, reg_num,\
 +                      OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\
 +                      OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\
 +              .funcs = &vblank_irq_info_funcs\
 +      }
 +
 +#define dummy_irq_entry() \
 +      {\
 +              .funcs = &dummy_irq_info_funcs\
 +      }
 +
 +#define i2c_int_entry(reg_num) \
 +      [DC_IRQ_SOURCE_I2C_DDC ## reg_num] = dummy_irq_entry()
 +
 +#define dp_sink_int_entry(reg_num) \
 +      [DC_IRQ_SOURCE_DPSINK ## reg_num] = dummy_irq_entry()
 +
 +#define gpio_pad_int_entry(reg_num) \
 +      [DC_IRQ_SOURCE_GPIOPAD ## reg_num] = dummy_irq_entry()
 +
 +#define dc_underflow_int_entry(reg_num) \
 +      [DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW] = dummy_irq_entry()
 +
 +static const struct irq_source_info_funcs dummy_irq_info_funcs = {
 +      .set = dal_irq_service_dummy_set,
 +      .ack = dal_irq_service_dummy_ack
 +};
 +
 +static const struct irq_source_info
 +irq_source_info_dcn20[DAL_IRQ_SOURCES_NUMBER] = {
 +      [DC_IRQ_SOURCE_INVALID] = dummy_irq_entry(),
 +      hpd_int_entry(0),
 +      hpd_int_entry(1),
 +      hpd_int_entry(2),
 +      hpd_int_entry(3),
 +      hpd_int_entry(4),
 +      hpd_int_entry(5),
 +      hpd_rx_int_entry(0),
 +      hpd_rx_int_entry(1),
 +      hpd_rx_int_entry(2),
 +      hpd_rx_int_entry(3),
 +      hpd_rx_int_entry(4),
 +      hpd_rx_int_entry(5),
 +      i2c_int_entry(1),
 +      i2c_int_entry(2),
 +      i2c_int_entry(3),
 +      i2c_int_entry(4),
 +      i2c_int_entry(5),
 +      i2c_int_entry(6),
 +      dp_sink_int_entry(1),
 +      dp_sink_int_entry(2),
 +      dp_sink_int_entry(3),
 +      dp_sink_int_entry(4),
 +      dp_sink_int_entry(5),
 +      dp_sink_int_entry(6),
 +      [DC_IRQ_SOURCE_TIMER] = dummy_irq_entry(),
 +      pflip_int_entry(0),
 +      pflip_int_entry(1),
 +      pflip_int_entry(2),
 +      pflip_int_entry(3),
 +      [DC_IRQ_SOURCE_PFLIP5] = dummy_irq_entry(),
 +      [DC_IRQ_SOURCE_PFLIP6] = dummy_irq_entry(),
 +      [DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(),
 +      gpio_pad_int_entry(0),
 +      gpio_pad_int_entry(1),
 +      gpio_pad_int_entry(2),
 +      gpio_pad_int_entry(3),
 +      gpio_pad_int_entry(4),
 +      gpio_pad_int_entry(5),
 +      gpio_pad_int_entry(6),
 +      gpio_pad_int_entry(7),
 +      gpio_pad_int_entry(8),
 +      gpio_pad_int_entry(9),
 +      gpio_pad_int_entry(10),
 +      gpio_pad_int_entry(11),
 +      gpio_pad_int_entry(12),
 +      gpio_pad_int_entry(13),
 +      gpio_pad_int_entry(14),
 +      gpio_pad_int_entry(15),
 +      gpio_pad_int_entry(16),
 +      gpio_pad_int_entry(17),
 +      gpio_pad_int_entry(18),
 +      gpio_pad_int_entry(19),
 +      gpio_pad_int_entry(20),
 +      gpio_pad_int_entry(21),
 +      gpio_pad_int_entry(22),
 +      gpio_pad_int_entry(23),
 +      gpio_pad_int_entry(24),
 +      gpio_pad_int_entry(25),
 +      gpio_pad_int_entry(26),
 +      gpio_pad_int_entry(27),
 +      gpio_pad_int_entry(28),
 +      gpio_pad_int_entry(29),
 +      gpio_pad_int_entry(30),
 +      dc_underflow_int_entry(1),
 +      dc_underflow_int_entry(2),
 +      dc_underflow_int_entry(3),
 +      dc_underflow_int_entry(4),
 +      dc_underflow_int_entry(5),
 +      dc_underflow_int_entry(6),
 +      [DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
 +      [DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
 +      vupdate_int_entry(0),
 +      vupdate_int_entry(1),
 +      vupdate_int_entry(2),
 +      vupdate_int_entry(3),
 +      vupdate_int_entry(4),
 +      vupdate_int_entry(5),
 +      vblank_int_entry(0),
 +      vblank_int_entry(1),
 +      vblank_int_entry(2),
 +      vblank_int_entry(3),
 +      vblank_int_entry(4),
 +      vblank_int_entry(5),
 +};
 +
 +static const struct irq_service_funcs irq_service_funcs_dcn20 = {
 +              .to_dal_irq_source = to_dal_irq_source_dcn20
 +};
 +
 +static void construct(
 +      struct irq_service *irq_service,
 +      struct irq_service_init_data *init_data)
 +{
 +      dal_irq_service_construct(irq_service, init_data);
 +
 +      irq_service->info = irq_source_info_dcn20;
 +      irq_service->funcs = &irq_service_funcs_dcn20;
 +}
 +
 +struct irq_service *dal_irq_service_dcn20_create(
 +      struct irq_service_init_data *init_data)
 +{
 +      struct irq_service *irq_service = kzalloc(sizeof(*irq_service),
 +                                                GFP_KERNEL);
 +
 +      if (!irq_service)
 +              return NULL;
 +
 +      construct(irq_service, init_data);
 +      return irq_service;
 +}
index 5237246f89f757654e680bb143fd45a3cd5bd1ab,463275f88e89e443540969c74a45dd7979671f0b..f63a2becb7770a726c1868b6d873028bd7ecf8f7
   * OTHER DEALINGS IN THE SOFTWARE.
   */
  
- #include "pp_debug.h"
  #include <linux/firmware.h>
+ #include <linux/module.h>
++#include <linux/pci.h>
+ #include "pp_debug.h"
  #include "amdgpu.h"
  #include "amdgpu_smu.h"
  #include "atomfirmware.h"
Simple merge
Simple merge