From: David Cunado Date: Mon, 2 Oct 2017 16:41:39 +0000 (+0100) Subject: Init and save / restore of PMCR_EL0 / PMCR X-Git-Url: http://git.lede-project.org./?a=commitdiff_plain;h=3e61b2b54336510475fbab83b7d97538f3ac5460;p=project%2Fbcm63xx%2Fatf.git Init and save / restore of PMCR_EL0 / PMCR Currently TF does not initialise the PMCR_EL0 register in the secure context or save/restore the register. In particular, the DP field may not be set to one to prohibit cycle counting in the secure state, even though event counting generally is prohibited via the default setting of MDCR_EL3.SMPE to 0. This patch initialises PMCR_EL0.DP to one in the secure state to prohibit cycle counting and also initialises other fields that have an architectually UNKNOWN reset value. Additionally, PMCR_EL0 is added to the list of registers that are saved and restored during a world switch. Similar changes are made for PMCR for the AArch32 execution state. NOTE: secure world code at lower ELs that assume other values in PMCR_EL0 will be impacted. Change-Id: Iae40e8c0a196d74053accf97063ebc257b4d2f3a Signed-off-by: David Cunado --- diff --git a/bl32/sp_min/aarch32/entrypoint.S b/bl32/sp_min/aarch32/entrypoint.S index d868c53d..cd9fe5cb 100644 --- a/bl32/sp_min/aarch32/entrypoint.S +++ b/bl32/sp_min/aarch32/entrypoint.S @@ -162,6 +162,15 @@ func handle_smc stcopr r0, SCR isb + /* + * Set PMCR.DP to 1 to prohibit cycle counting whilst in Secure Mode. + * Also, the PMCR.LC field has an architecturally UNKNOWN value on reset + * and so set to 1 as ARM has deprecated use of PMCR.LC=0. + */ + ldcopr r0, PMCR + orr r0, r0, #(PMCR_LC_BIT | PMCR_DP_BIT) + stcopr r0, PMCR + ldr r0, [r2, #SMC_CTX_GPREG_R0] /* smc_fid */ /* Check whether an SMC64 is issued */ tst r0, #(FUNCID_CC_MASK << FUNCID_CC_SHIFT) @@ -210,6 +219,15 @@ func handle_fiq stcopr r0, SCR isb + /* + * Set PMCR.DP to 1 to prohibit cycle counting whilst in Secure Mode. + * Also, the PMCR.LC field has an architecturally UNKNOWN value on reset + * and so set to 1 as ARM has deprecated use of PMCR.LC=0. + */ + ldcopr r0, PMCR + orr r0, r0, #(PMCR_LC_BIT | PMCR_DP_BIT) + stcopr r0, PMCR + push {r2, r3} bl sp_min_fiq pop {r0, r3} diff --git a/include/lib/aarch32/arch.h b/include/lib/aarch32/arch.h index 5fbb83a6..6c6d6a1d 100644 --- a/include/lib/aarch32/arch.h +++ b/include/lib/aarch32/arch.h @@ -350,6 +350,8 @@ #define PMCR_N_SHIFT 11 #define PMCR_N_MASK 0x1f #define PMCR_N_BITS (PMCR_N_MASK << PMCR_N_SHIFT) +#define PMCR_LC_BIT (1 << 6) +#define PMCR_DP_BIT (1 << 5) /******************************************************************************* * Definitions of register offsets, fields and macros for CPU system diff --git a/include/lib/aarch32/smcc_helpers.h b/include/lib/aarch32/smcc_helpers.h index 1bc84381..53f1aa4a 100644 --- a/include/lib/aarch32/smcc_helpers.h +++ b/include/lib/aarch32/smcc_helpers.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -21,7 +21,8 @@ #define SMC_CTX_SP_MON 0x7C #define SMC_CTX_LR_MON 0x80 #define SMC_CTX_SCR 0x84 -#define SMC_CTX_SIZE 0x88 +#define SMC_CTX_PMCR 0x88 +#define SMC_CTX_SIZE 0x8C #ifndef __ASSEMBLY__ #include @@ -73,6 +74,7 @@ typedef struct smc_ctx { u_register_t sp_mon; u_register_t lr_mon; u_register_t scr; + u_register_t pmcr; } smc_ctx_t; /* diff --git a/include/lib/aarch32/smcc_macros.S b/include/lib/aarch32/smcc_macros.S index 7edf4106..cf26175d 100644 --- a/include/lib/aarch32/smcc_macros.S +++ b/include/lib/aarch32/smcc_macros.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -13,6 +13,8 @@ * spsr, lr, sp registers and the `scr` register to the SMC context on entry * due a SMC call. The `lr` of the current mode (monitor) is expected to be * already saved. The `sp` must point to the `smc_ctx_t` to save to. + * Additionally, also save the 'pmcr' register as this is updated whilst + * executing in the secure world. */ .macro smcc_save_gp_mode_regs /* Save r0 - r12 in the SMC context */ @@ -46,6 +48,8 @@ /* lr_mon is already saved by caller */ ldcopr r4, SCR str r4, [sp, #SMC_CTX_SCR] + ldcopr r4, PMCR + str r4, [sp, #SMC_CTX_PMCR] .endm /* @@ -70,6 +74,12 @@ stcopr r1, SCR isb + /* + * Restore the PMCR register. + */ + ldr r1, [r0, #SMC_CTX_PMCR] + stcopr r1, PMCR + /* Restore the banked registers including the current SPSR */ add r1, r0, #SMC_CTX_SP_USR ldm r1!, {r4-r12} diff --git a/include/lib/aarch64/arch.h b/include/lib/aarch64/arch.h index f85e7897..9cbe4058 100644 --- a/include/lib/aarch64/arch.h +++ b/include/lib/aarch64/arch.h @@ -502,9 +502,14 @@ #define CNTACR_RWPT_SHIFT U(0x5) /* PMCR_EL0 definitions */ +#define PMCR_EL0_RESET_VAL U(0x0) #define PMCR_EL0_N_SHIFT U(11) #define PMCR_EL0_N_MASK U(0x1f) #define PMCR_EL0_N_BITS (PMCR_EL0_N_MASK << PMCR_EL0_N_SHIFT) +#define PMCR_EL0_LC_BIT (U(1) << 6) +#define PMCR_EL0_DP_BIT (U(1) << 5) +#define PMCR_EL0_X_BIT (U(1) << 4) +#define PMCR_EL0_D_BIT (U(1) << 3) /******************************************************************************* * Definitions of MAIR encodings for device and normal memory diff --git a/include/lib/aarch64/arch_helpers.h b/include/lib/aarch64/arch_helpers.h index 0d0d7d33..684a0deb 100644 --- a/include/lib/aarch64/arch_helpers.h +++ b/include/lib/aarch64/arch_helpers.h @@ -307,7 +307,7 @@ DEFINE_SYSREG_READ_FUNC(ctr_el0) DEFINE_SYSREG_RW_FUNCS(mdcr_el2) DEFINE_SYSREG_RW_FUNCS(hstr_el2) DEFINE_SYSREG_RW_FUNCS(cnthp_ctl_el2) -DEFINE_SYSREG_READ_FUNC(pmcr_el0) +DEFINE_SYSREG_RW_FUNCS(pmcr_el0) DEFINE_RENAME_SYSREG_RW_FUNCS(icc_sre_el1, ICC_SRE_EL1) DEFINE_RENAME_SYSREG_RW_FUNCS(icc_sre_el2, ICC_SRE_EL2) diff --git a/include/lib/el3_runtime/aarch64/context.h b/include/lib/el3_runtime/aarch64/context.h index dcbf1c9d..a89468d4 100644 --- a/include/lib/el3_runtime/aarch64/context.h +++ b/include/lib/el3_runtime/aarch64/context.h @@ -87,22 +87,23 @@ #define CTX_AFSR1_EL1 U(0x98) #define CTX_CONTEXTIDR_EL1 U(0xa0) #define CTX_VBAR_EL1 U(0xa8) +#define CTX_PMCR_EL0 U(0xb0) /* * If the platform is AArch64-only, there is no need to save and restore these * AArch32 registers. */ #if CTX_INCLUDE_AARCH32_REGS -#define CTX_SPSR_ABT U(0xb0) -#define CTX_SPSR_UND U(0xb8) -#define CTX_SPSR_IRQ U(0xc0) -#define CTX_SPSR_FIQ U(0xc8) -#define CTX_DACR32_EL2 U(0xd0) -#define CTX_IFSR32_EL2 U(0xd8) -#define CTX_FP_FPEXC32_EL2 U(0xe0) -#define CTX_TIMER_SYSREGS_OFF U(0xf0) /* Align to the next 16 byte boundary */ +#define CTX_SPSR_ABT U(0xc0) /* Align to the next 16 byte boundary */ +#define CTX_SPSR_UND U(0xc8) +#define CTX_SPSR_IRQ U(0xd0) +#define CTX_SPSR_FIQ U(0xd8) +#define CTX_DACR32_EL2 U(0xe0) +#define CTX_IFSR32_EL2 U(0xe8) +#define CTX_FP_FPEXC32_EL2 U(0xf0) +#define CTX_TIMER_SYSREGS_OFF U(0x100) /* Align to the next 16 byte boundary */ #else -#define CTX_TIMER_SYSREGS_OFF U(0xb0) +#define CTX_TIMER_SYSREGS_OFF U(0xc0) /* Align to the next 16 byte boundary */ #endif /* __CTX_INCLUDE_AARCH32_REGS__ */ /* diff --git a/lib/el3_runtime/aarch64/context.S b/lib/el3_runtime/aarch64/context.S index 8a6c11b7..db16a9f0 100644 --- a/lib/el3_runtime/aarch64/context.S +++ b/lib/el3_runtime/aarch64/context.S @@ -74,6 +74,9 @@ func el1_sysregs_context_save mrs x9, vbar_el1 stp x17, x9, [x0, #CTX_CONTEXTIDR_EL1] + mrs x10, pmcr_el0 + str x10, [x0, #CTX_PMCR_EL0] + /* Save AArch32 system registers if the build has instructed so */ #if CTX_INCLUDE_AARCH32_REGS mrs x11, spsr_abt @@ -193,6 +196,9 @@ func el1_sysregs_context_restore msr contextidr_el1, x17 msr vbar_el1, x9 + ldr x10, [x0, #CTX_PMCR_EL0] + msr pmcr_el0, x10 + /* Restore AArch32 system registers if the build has instructed so */ #if CTX_INCLUDE_AARCH32_REGS ldp x11, x12, [x0, #CTX_SPSR_ABT] diff --git a/lib/el3_runtime/aarch64/context_mgmt.c b/lib/el3_runtime/aarch64/context_mgmt.c index 3d26056a..21e86de0 100644 --- a/lib/el3_runtime/aarch64/context_mgmt.c +++ b/lib/el3_runtime/aarch64/context_mgmt.c @@ -58,7 +58,7 @@ void cm_init(void) static void cm_init_context_common(cpu_context_t *ctx, const entry_point_info_t *ep) { unsigned int security_state; - uint32_t scr_el3; + uint32_t scr_el3, pmcr_el0; el3_state_t *state; gp_regs_t *gp_regs; unsigned long sctlr_elx; @@ -164,11 +164,35 @@ static void cm_init_context_common(cpu_context_t *ctx, const entry_point_info_t /* * Store the initialised SCTLR_EL1 value in the cpu_context - SCTLR_EL2 - * and other EL2 resgisters are set up by cm_preapre_ns_entry() as they + * and other EL2 registers are set up by cm_preapre_ns_entry() as they * are not part of the stored cpu_context. */ write_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_elx); + if (security_state == SECURE) { + /* + * Initialise PMCR_EL0 for secure context only, setting all + * fields rather than relying on hw. Some fields are + * architecturally UNKNOWN on reset. + * + * PMCR_EL0.LC: Set to one so that cycle counter overflow, that + * is recorded in PMOVSCLR_EL0[31], occurs on the increment + * that changes PMCCNTR_EL0[63] from 1 to 0. + * + * PMCR_EL0.DP: Set to one so that the cycle counter, + * PMCCNTR_EL0 does not count when event counting is prohibited. + * + * PMCR_EL0.X: Set to zero to disable export of events. + * + * PMCR_EL0.D: Set to zero so that, when enabled, PMCCNTR_EL0 + * counts on every clock cycle. + */ + pmcr_el0 = ((PMCR_EL0_RESET_VAL | PMCR_EL0_LC_BIT + | PMCR_EL0_DP_BIT) + & ~(PMCR_EL0_X_BIT | PMCR_EL0_D_BIT)); + write_ctx_reg(get_sysregs_ctx(ctx), CTX_PMCR_EL0, pmcr_el0); + } + /* Populate EL3 state so that we've the right context before doing ERET */ state = get_el3state_ctx(ctx); write_ctx_reg(state, CTX_SCR_EL3, scr_el3);