}
}
+/**
+ * evergreen_get_allowed_info_register - fetch the register for the info ioctl
+ *
+ * @rdev: radeon_device pointer
+ * @reg: register offset in bytes
+ * @val: register value
+ *
+ * Returns 0 for success or -EINVAL for an invalid register
+ *
+ */
+int evergreen_get_allowed_info_register(struct radeon_device *rdev,
+ u32 reg, u32 *val)
+{
+ switch (reg) {
+ case GRBM_STATUS:
+ case GRBM_STATUS_SE0:
+ case GRBM_STATUS_SE1:
+ case SRBM_STATUS:
+ case SRBM_STATUS2:
+ case DMA_STATUS_REG:
+ case UVD_STATUS:
+ *val = RREG32(reg);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
unsigned *bankh, unsigned *mtaspect,
unsigned *tile_split)
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &rv770_get_xclk,
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+ .get_allowed_info_register = evergreen_get_allowed_info_register,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &r600_get_xclk,
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+ .get_allowed_info_register = evergreen_get_allowed_info_register,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &rv770_get_xclk,
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+ .get_allowed_info_register = evergreen_get_allowed_info_register,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
unsigned num_gpu_pages,
struct reservation_object *resv);
int evergreen_get_temp(struct radeon_device *rdev);
+int evergreen_get_allowed_info_register(struct radeon_device *rdev,
+ u32 reg, u32 *val);
int sumo_get_temp(struct radeon_device *rdev);
int tn_get_temp(struct radeon_device *rdev);
int cypress_dpm_init(struct radeon_device *rdev);