From 167a935733a6e3e412b8ed6a60034d0d84895f2e Mon Sep 17 00:00:00 2001 From: Andrew Thoelke Date: Wed, 4 Jun 2014 21:10:52 +0100 Subject: [PATCH] Initialise CPU contexts from entry_point_info Consolidate all BL3-1 CPU context initialization for cold boot, PSCI and SPDs into two functions: * The first uses entry_point_info to initialize the relevant cpu_context for first entry into a lower exception level on a CPU * The second populates the EL1 and EL2 system registers as needed from the cpu_context to ensure correct entry into the lower EL This patch alters the way that BL3-1 determines which exception level is used when first entering EL1 or EL2 during cold boot - this is now fully determined by the SPSR value in the entry_point_info for BL3-3, as set up by the platform code in BL2 (or otherwise provided to BL3-1). In the situation that EL1 (or svc mode) is selected for a processor that supports EL2, the context management code will now configure all essential EL2 register state to ensure correct execution of EL1. This allows the platform code to run non-secure EL1 payloads directly without requiring a small EL2 stub or OS loader. Change-Id: If9fbb2417e82d2226e47568203d5a369f39d3b0f --- bl1/aarch64/bl1_arch_setup.c | 7 +- bl31/aarch64/bl31_arch_setup.c | 44 +--- bl31/aarch64/context.S | 6 +- bl31/bl31_main.c | 41 +--- bl31/context_mgmt.c | 217 ++++++++++++++++---- include/bl31/context_mgmt.h | 11 +- include/common/bl_common.h | 14 +- include/lib/aarch64/arch.h | 1 + include/lib/aarch64/arch_helpers.h | 3 + services/spd/tspd/tspd_common.c | 60 ++---- services/spd/tspd/tspd_main.c | 8 +- services/std_svc/psci/psci_afflvl_off.c | 8 +- services/std_svc/psci/psci_afflvl_on.c | 17 +- services/std_svc/psci/psci_afflvl_suspend.c | 21 +- services/std_svc/psci/psci_common.c | 138 +++---------- services/std_svc/psci/psci_private.h | 24 +-- services/std_svc/psci/psci_setup.c | 2 +- 17 files changed, 293 insertions(+), 329 deletions(-) diff --git a/bl1/aarch64/bl1_arch_setup.c b/bl1/aarch64/bl1_arch_setup.c index 5725bac1..8ed45d9c 100644 --- a/bl1/aarch64/bl1_arch_setup.c +++ b/bl1/aarch64/bl1_arch_setup.c @@ -46,11 +46,10 @@ void bl1_arch_setup(void) isb(); /* - * Enable HVCs, route FIQs to EL3, set the next EL to be AArch64, route - * external abort and SError interrupts to EL3 + * Set the next EL to be AArch64, route external abort and SError + * interrupts to EL3 */ - tmp_reg = SCR_RES1_BITS | SCR_RW_BIT | SCR_HCE_BIT | SCR_EA_BIT | - SCR_FIQ_BIT; + tmp_reg = SCR_RES1_BITS | SCR_RW_BIT | SCR_EA_BIT; write_scr(tmp_reg); /* diff --git a/bl31/aarch64/bl31_arch_setup.c b/bl31/aarch64/bl31_arch_setup.c index ad73de02..e0382b33 100644 --- a/bl31/aarch64/bl31_arch_setup.c +++ b/bl31/aarch64/bl31_arch_setup.c @@ -51,11 +51,11 @@ void bl31_arch_setup(void) write_sctlr_el3(tmp_reg); /* - * Enable HVCs, route FIQs to EL3, set the next EL to be AArch64, route - * external abort and SError interrupts to EL3 + * Route external abort and SError interrupts to EL3 + * other SCR bits will be configured before exiting to a lower exception + * level */ - tmp_reg = SCR_RES1_BITS | SCR_RW_BIT | SCR_HCE_BIT | SCR_EA_BIT | - SCR_FIQ_BIT; + tmp_reg = SCR_RES1_BITS | SCR_EA_BIT; write_scr(tmp_reg); /* @@ -68,39 +68,3 @@ void bl31_arch_setup(void) counter_freq = plat_get_syscnt_freq(); write_cntfrq_el0(counter_freq); } - -/******************************************************************************* - * Detect what the security state of the next EL is and setup the minimum - * required architectural state: program SCTRL to reflect the RES1 bits, and to - * have MMU and caches disabled - ******************************************************************************/ -void bl31_next_el_arch_setup(uint32_t security_state) -{ - unsigned long id_aa64pfr0 = read_id_aa64pfr0_el1(); - unsigned long next_sctlr; - unsigned long el_status; - unsigned long scr = read_scr(); - - /* Use the same endianness than the current BL */ - next_sctlr = (read_sctlr_el3() & SCTLR_EE_BIT); - - /* Find out which EL we are going to */ - el_status = (id_aa64pfr0 >> ID_AA64PFR0_EL2_SHIFT) & ID_AA64PFR0_ELX_MASK; - - if (security_state == NON_SECURE) { - /* Check if EL2 is supported */ - if (el_status && (scr & SCR_HCE_BIT)) { - /* Set SCTLR EL2 */ - next_sctlr |= SCTLR_EL2_RES1; - write_sctlr_el2(next_sctlr); - return; - } - } - - /* - * SCTLR_EL1 needs the same programming irrespective of the - * security state of EL1. - */ - next_sctlr |= SCTLR_EL1_RES1; - write_sctlr_el1(next_sctlr); -} diff --git a/bl31/aarch64/context.S b/bl31/aarch64/context.S index d0bca64f..6667419a 100644 --- a/bl31/aarch64/context.S +++ b/bl31/aarch64/context.S @@ -43,9 +43,8 @@ .global el3_sysregs_context_save func el3_sysregs_context_save - mrs x9, scr_el3 mrs x10, sctlr_el3 - stp x9, x10, [x0, #CTX_SCR_EL3] + str x10, [x0, #CTX_SCTLR_EL3] mrs x11, cptr_el3 stp x11, xzr, [x0, #CTX_CPTR_EL3] @@ -98,8 +97,7 @@ func el3_sysregs_context_restore /* Make sure all the above changes are observed */ isb - ldp x9, x10, [x0, #CTX_SCR_EL3] - msr scr_el3, x9 + ldr x10, [x0, #CTX_SCTLR_EL3] msr sctlr_el3, x10 isb diff --git a/bl31/bl31_main.c b/bl31/bl31_main.c index 6f88e656..8cc7e0d6 100644 --- a/bl31/bl31_main.c +++ b/bl31/bl31_main.c @@ -140,53 +140,18 @@ uint32_t bl31_get_next_image_type(void) void bl31_prepare_next_image_entry() { entry_point_info_t *next_image_info; - uint32_t scr, image_type; - cpu_context_t *ctx; - gp_regs_t *gp_regs; + uint32_t image_type; /* Determine which image to execute next */ image_type = bl31_get_next_image_type(); - /* - * Setup minimal architectural state of the next highest EL to - * allow execution in it immediately upon entering it. - */ - bl31_next_el_arch_setup(image_type); - /* Program EL3 registers to enable entry into the next EL */ next_image_info = bl31_plat_get_next_image_ep_info(image_type); assert(next_image_info); assert(image_type == GET_SECURITY_STATE(next_image_info->h.attr)); - scr = read_scr(); - scr &= ~SCR_NS_BIT; - if (image_type == NON_SECURE) - scr |= SCR_NS_BIT; - - scr &= ~SCR_RW_BIT; - if ((next_image_info->spsr & (1 << MODE_RW_SHIFT)) == - (MODE_RW_64 << MODE_RW_SHIFT)) - scr |= SCR_RW_BIT; - - /* - * Tell the context mgmt. library to ensure that SP_EL3 points to - * the right context to exit from EL3 correctly. - */ - cm_set_el3_eret_context(image_type, - next_image_info->pc, - next_image_info->spsr, - scr); - - /* - * Save the args generated in BL2 for the image in the right context - * used on its entry - */ - ctx = cm_get_context(image_type); - gp_regs = get_gpregs_ctx(ctx); - memcpy(gp_regs, (void *)&next_image_info->args, sizeof(aapcs64_params_t)); - - /* Finally set the next context */ - cm_set_next_eret_context(image_type); + cm_init_context(read_mpidr_el1(), next_image_info); + cm_prepare_el3_exit(image_type); } /******************************************************************************* diff --git a/bl31/context_mgmt.c b/bl31/context_mgmt.c index 67a6e037..81c7c568 100644 --- a/bl31/context_mgmt.c +++ b/bl31/context_mgmt.c @@ -40,6 +40,7 @@ #include #include #include +#include /******************************************************************************* @@ -86,6 +87,177 @@ void cm_set_context_by_mpidr(uint64_t mpidr, void *context, uint32_t security_st set_cpu_data_by_mpidr(mpidr, cpu_context[security_state], context); } +/******************************************************************************* + * This function is used to program the context that's used for exception + * return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for + * the required security state + ******************************************************************************/ +static inline void cm_set_next_context(void *context) +{ +#if DEBUG + uint64_t sp_mode; + + /* + * Check that this function is called with SP_EL0 as the stack + * pointer + */ + __asm__ volatile("mrs %0, SPSel\n" + : "=r" (sp_mode)); + + assert(sp_mode == MODE_SP_EL0); +#endif + + __asm__ volatile("msr spsel, #1\n" + "mov sp, %0\n" + "msr spsel, #0\n" + : : "r" (context)); +} + +/******************************************************************************* + * The following function initializes a cpu_context for the current CPU for + * first use, and sets the initial entrypoint state as specified by the + * entry_point_info structure. + * + * The security state to initialize is determined by the SECURE attribute + * of the entry_point_info. The function returns a pointer to the initialized + * context and sets this as the next context to return to. + * + * The EE and ST attributes are used to configure the endianess and secure + * timer availability for the new excution context. + * + * To prepare the register state for entry call cm_prepare_el3_exit() and + * el3_exit(). For Secure-EL1 cm_prepare_el3_exit() is equivalent to + * cm_e1_sysreg_context_restore(). + ******************************************************************************/ +void cm_init_context(uint64_t mpidr, const entry_point_info_t *ep) +{ + uint32_t security_state; + cpu_context_t *ctx; + uint32_t scr_el3; + el3_state_t *state; + gp_regs_t *gp_regs; + unsigned long sctlr_elx; + + security_state = GET_SECURITY_STATE(ep->h.attr); + ctx = cm_get_context_by_mpidr(mpidr, security_state); + assert(ctx); + + /* Clear any residual register values from the context */ + memset(ctx, 0, sizeof(*ctx)); + + /* + * Base the context SCR on the current value, adjust for entry point + * specific requirements and set trap bits from the IMF + * TODO: provide the base/global SCR bits using another mechanism? + */ + scr_el3 = read_scr(); + scr_el3 &= ~(SCR_NS_BIT | SCR_RW_BIT | SCR_FIQ_BIT | SCR_IRQ_BIT | + SCR_ST_BIT | SCR_HCE_BIT); + + if (security_state != SECURE) + scr_el3 |= SCR_NS_BIT; + + if (GET_RW(ep->spsr) == MODE_RW_64) + scr_el3 |= SCR_RW_BIT; + + if (EP_GET_ST(ep->h.attr)) + scr_el3 |= SCR_ST_BIT; + + scr_el3 |= get_scr_el3_from_routing_model(security_state); + + /* + * Set up SCTLR_ELx for the target exception level: + * EE bit is taken from the entrpoint attributes + * M, C and I bits must be zero (as required by PSCI specification) + * + * The target exception level is based on the spsr mode requested. + * If execution is requested to EL2 or hyp mode, HVC is enabled + * via SCR_EL3.HCE. + * + * Always compute the SCTLR_EL1 value and save in the cpu_context + * - the EL2 registers are set up by cm_preapre_ns_entry() as they + * are not part of the stored cpu_context + * + * TODO: In debug builds the spsr should be validated and checked + * against the CPU support, security state, endianess and pc + */ + sctlr_elx = EP_GET_EE(ep->h.attr) ? SCTLR_EE_BIT : 0; + sctlr_elx |= SCTLR_EL1_RES1; + write_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_elx); + + if ((GET_RW(ep->spsr) == MODE_RW_64 + && GET_EL(ep->spsr) == MODE_EL2) + || (GET_RW(ep->spsr) != MODE_RW_64 + && GET_M32(ep->spsr) == MODE32_hyp)) { + scr_el3 |= SCR_HCE_BIT; + } + + /* Populate EL3 state so that we've the right context before doing ERET */ + state = get_el3state_ctx(ctx); + write_ctx_reg(state, CTX_SCR_EL3, scr_el3); + write_ctx_reg(state, CTX_ELR_EL3, ep->pc); + write_ctx_reg(state, CTX_SPSR_EL3, ep->spsr); + + /* + * Store the X0-X7 value from the entrypoint into the context + * Use memcpy as we are in control of the layout of the structures + */ + gp_regs = get_gpregs_ctx(ctx); + memcpy(gp_regs, (void *)&ep->args, sizeof(aapcs64_params_t)); +} + +/******************************************************************************* + * Prepare the CPU system registers for first entry into secure or normal world + * + * If execution is requested to EL2 or hyp mode, SCTLR_EL2 is initialized + * If execution is requested to non-secure EL1 or svc mode, and the CPU supports + * EL2 then EL2 is disabled by configuring all necessary EL2 registers. + * For all entries, the EL1 registers are initialized from the cpu_context + ******************************************************************************/ +void cm_prepare_el3_exit(uint32_t security_state) +{ + uint32_t sctlr_elx, scr_el3, cptr_el2; + cpu_context_t *ctx = cm_get_context(security_state); + + assert(ctx); + + if (security_state == NON_SECURE) { + scr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3); + if (scr_el3 & SCR_HCE_BIT) { + /* Use SCTLR_EL1.EE value to initialise sctlr_el2 */ + sctlr_elx = read_ctx_reg(get_sysregs_ctx(ctx), + CTX_SCTLR_EL1); + sctlr_elx &= ~SCTLR_EE_BIT; + sctlr_elx |= SCTLR_EL2_RES1; + write_sctlr_el2(sctlr_elx); + } else if (read_id_aa64pfr0_el1() & + (ID_AA64PFR0_ELX_MASK << ID_AA64PFR0_EL2_SHIFT)) { + /* EL2 present but unused, need to disable safely */ + + /* HCR_EL2 = 0, except RW bit set to match SCR_EL3 */ + write_hcr_el2((scr_el3 & SCR_RW_BIT) ? HCR_RW_BIT : 0); + + /* SCTLR_EL2 : can be ignored when bypassing */ + + /* CPTR_EL2 : disable all traps TCPAC, TTA, TFP */ + cptr_el2 = read_cptr_el2(); + cptr_el2 &= ~(TCPAC_BIT | TTA_BIT | TFP_BIT); + write_cptr_el2(cptr_el2); + + /* Enable EL1 access to timer */ + write_cnthctl_el2(EL1PCEN_BIT | EL1PCTEN_BIT); + + /* Set VPIDR, VMPIDR to match MIDR, MPIDR */ + write_vpidr_el2(read_midr_el1()); + write_vmpidr_el2(read_mpidr_el1()); + } + } + + el1_sysregs_context_restore(get_sysregs_ctx(ctx)); + + cm_set_next_context(ctx); +} + /******************************************************************************* * The next four functions are used by runtime services to save and restore EL3 * and EL1 contexts on the 'cpu_context' structure for the specified security @@ -132,13 +304,10 @@ void cm_el1_sysregs_context_restore(uint32_t security_state) } /******************************************************************************* - * This function populates 'cpu_context' pertaining to the given security state - * with the entrypoint, SPSR and SCR values so that an ERET from this security - * state correctly restores corresponding values to drop the CPU to the next - * exception level + * This function populates ELR_EL3 member of 'cpu_context' pertaining to the + * given security state with the given entrypoint ******************************************************************************/ -void cm_set_el3_eret_context(uint32_t security_state, uint64_t entrypoint, - uint32_t spsr, uint32_t scr) +void cm_set_elr_el3(uint32_t security_state, uint64_t entrypoint) { cpu_context_t *ctx; el3_state_t *state; @@ -146,23 +315,17 @@ void cm_set_el3_eret_context(uint32_t security_state, uint64_t entrypoint, ctx = cm_get_context(security_state); assert(ctx); - /* Program the interrupt routing model for this security state */ - scr &= ~SCR_FIQ_BIT; - scr &= ~SCR_IRQ_BIT; - scr |= get_scr_el3_from_routing_model(security_state); - - /* Populate EL3 state so that we've the right context before doing ERET */ + /* Populate EL3 state so that ERET jumps to the correct entry */ state = get_el3state_ctx(ctx); - write_ctx_reg(state, CTX_SPSR_EL3, spsr); write_ctx_reg(state, CTX_ELR_EL3, entrypoint); - write_ctx_reg(state, CTX_SCR_EL3, scr); } /******************************************************************************* - * This function populates ELR_EL3 member of 'cpu_context' pertaining to the - * given security state with the given entrypoint + * This function populates ELR_EL3 and SPSR_EL3 members of 'cpu_context' + * pertaining to the given security state ******************************************************************************/ -void cm_set_elr_el3(uint32_t security_state, uint64_t entrypoint) +void cm_set_elr_spsr_el3(uint32_t security_state, + uint64_t entrypoint, uint32_t spsr) { cpu_context_t *ctx; el3_state_t *state; @@ -173,6 +336,7 @@ void cm_set_elr_el3(uint32_t security_state, uint64_t entrypoint) /* Populate EL3 state so that ERET jumps to the correct entry */ state = get_el3state_ctx(ctx); write_ctx_reg(state, CTX_ELR_EL3, entrypoint); + write_ctx_reg(state, CTX_SPSR_EL3, spsr); } /******************************************************************************* @@ -233,26 +397,9 @@ uint32_t cm_get_scr_el3(uint32_t security_state) void cm_set_next_eret_context(uint32_t security_state) { cpu_context_t *ctx; -#if DEBUG - uint64_t sp_mode; -#endif ctx = cm_get_context(security_state); assert(ctx); -#if DEBUG - /* - * Check that this function is called with SP_EL0 as the stack - * pointer - */ - __asm__ volatile("mrs %0, SPSel\n" - : "=r" (sp_mode)); - - assert(sp_mode == MODE_SP_EL0); -#endif - - __asm__ volatile("msr spsel, #1\n" - "mov sp, %0\n" - "msr spsel, #0\n" - : : "r" (ctx)); + cm_set_next_context(ctx); } diff --git a/include/bl31/context_mgmt.h b/include/bl31/context_mgmt.h index ade2fa1d..6127b74b 100644 --- a/include/bl31/context_mgmt.h +++ b/include/bl31/context_mgmt.h @@ -34,6 +34,11 @@ #include #include +/******************************************************************************* + * Forward declarations + ******************************************************************************/ +struct entry_point_info; + /******************************************************************************* * Function & variable prototypes ******************************************************************************/ @@ -45,12 +50,14 @@ void cm_set_context_by_mpidr(uint64_t mpidr, uint32_t security_state); static inline void cm_set_context(void *context, uint32_t security_state); void cm_el3_sysregs_context_save(uint32_t security_state); +void cm_init_context(uint64_t mpidr, const struct entry_point_info *ep); +void cm_prepare_el3_exit(uint32_t security_state); void cm_el3_sysregs_context_restore(uint32_t security_state); void cm_el1_sysregs_context_save(uint32_t security_state); void cm_el1_sysregs_context_restore(uint32_t security_state); -void cm_set_el3_eret_context(uint32_t security_state, uint64_t entrypoint, - uint32_t spsr, uint32_t scr); void cm_set_elr_el3(uint32_t security_state, uint64_t entrypoint); +void cm_set_elr_spsr_el3(uint32_t security_state, + uint64_t entrypoint, uint32_t spsr); void cm_write_scr_el3_bit(uint32_t security_state, uint32_t bit_pos, uint32_t value); diff --git a/include/common/bl_common.h b/include/common/bl_common.h index 2f3bade1..f5e2a9a2 100644 --- a/include/common/bl_common.h +++ b/include/common/bl_common.h @@ -33,7 +33,6 @@ #define SECURE 0x0 #define NON_SECURE 0x1 -#define PARAM_EP_SECURITY_MASK 0x1 #define UP 1 #define DOWN 0 @@ -64,10 +63,23 @@ #define ENTRY_POINT_INFO_PC_OFFSET 0x08 #define ENTRY_POINT_INFO_ARGS_OFFSET 0x18 +#define PARAM_EP_SECURITY_MASK 0x1 #define GET_SECURITY_STATE(x) (x & PARAM_EP_SECURITY_MASK) #define SET_SECURITY_STATE(x, security) \ ((x) = ((x) & ~PARAM_EP_SECURITY_MASK) | (security)) +#define EP_EE_MASK 0x2 +#define EP_EE_LITTLE 0x0 +#define EP_EE_BIG 0x2 +#define EP_GET_EE(x) (x & EP_EE_MASK) +#define EP_SET_EE(x, ee) ((x) = ((x) & ~EP_EE_MASK) | (ee)) + +#define EP_ST_MASK 0x4 +#define EP_ST_DISABLE 0x0 +#define EP_ST_ENABLE 0x4 +#define EP_GET_ST(x) (x & EP_ST_MASK) +#define EP_SET_ST(x, ee) ((x) = ((x) & ~EP_ST_MASK) | (ee)) + #define PARAM_EP 0x01 #define PARAM_IMAGE_BINARY 0x02 #define PARAM_BL31 0x03 diff --git a/include/lib/aarch64/arch.h b/include/lib/aarch64/arch.h index 0bfbd66c..5dc488bb 100644 --- a/include/lib/aarch64/arch.h +++ b/include/lib/aarch64/arch.h @@ -167,6 +167,7 @@ #define HCR_FMO_BIT (1 << 3) /* CNTHCTL_EL2 definitions */ +#define EVNTEN_BIT (1 << 2) #define EL1PCEN_BIT (1 << 1) #define EL1PCTEN_BIT (1 << 0) diff --git a/include/lib/aarch64/arch_helpers.h b/include/lib/aarch64/arch_helpers.h index 1ca33502..673e897a 100644 --- a/include/lib/aarch64/arch_helpers.h +++ b/include/lib/aarch64/arch_helpers.h @@ -262,6 +262,9 @@ DEFINE_SYSREG_RW_FUNCS(cnthctl_el2) DEFINE_SYSREG_RW_FUNCS(tpidr_el3) +DEFINE_SYSREG_RW_FUNCS(vpidr_el2) +DEFINE_SYSREG_RW_FUNCS(vmpidr_el2) + /* Implementation specific registers */ DEFINE_RENAME_SYSREG_RW_FUNCS(cpuectlr_el1, CPUECTLR_EL1) diff --git a/services/spd/tspd/tspd_common.c b/services/spd/tspd/tspd_common.c index 92427021..c497670b 100644 --- a/services/spd/tspd/tspd_common.c +++ b/services/spd/tspd/tspd_common.c @@ -45,9 +45,8 @@ int32_t tspd_init_secure_context(uint64_t entrypoint, uint64_t mpidr, tsp_context_t *tsp_ctx) { - uint32_t scr, sctlr; - el1_sys_regs_t *el1_state; - uint32_t spsr; + entry_point_info_t ep; + uint32_t ep_attr; /* Passing a NULL context is a critical programming error */ assert(tsp_ctx); @@ -58,51 +57,24 @@ int32_t tspd_init_secure_context(uint64_t entrypoint, */ assert(rw == TSP_AARCH64); - /* - * This might look redundant if the context was statically - * allocated but this function cannot make that assumption. - */ - memset(tsp_ctx, 0, sizeof(*tsp_ctx)); - - /* - * Set the right security state, register width and enable access to - * the secure physical timer for the SP. - */ - scr = read_scr(); - scr &= ~SCR_NS_BIT; - scr &= ~SCR_RW_BIT; - scr |= SCR_ST_BIT; - if (rw == TSP_AARCH64) - scr |= SCR_RW_BIT; - - /* Get a pointer to the S-EL1 context memory */ - el1_state = get_sysregs_ctx(&tsp_ctx->cpu_ctx); - - /* - * Program the SCTLR_EL1 such that upon entry in S-EL1, caches and MMU are - * disabled and exception endianess is set to be the same as EL3 - */ - sctlr = read_sctlr_el3(); - sctlr &= SCTLR_EE_BIT; - sctlr |= SCTLR_EL1_RES1; - write_ctx_reg(el1_state, CTX_SCTLR_EL1, sctlr); - - /* Set this context as ready to be initialised i.e OFF */ + /* Associate this context with the cpu specified */ + tsp_ctx->mpidr = mpidr; + tsp_ctx->state = 0; set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_OFF); - - /* - * This context has not been used yet. It will become valid - * when the TSP is interrupted and wants the TSPD to preserve - * the context. - */ clr_std_smc_active_flag(tsp_ctx->state); - /* Associate this context with the cpu specified */ - tsp_ctx->mpidr = mpidr; + cm_set_context_by_mpidr(mpidr, &tsp_ctx->cpu_ctx, SECURE); + + /* initialise an entrypoint to set up the CPU context */ + ep_attr = SECURE | EP_ST_ENABLE; + if (read_sctlr_el3() & SCTLR_EE_BIT) + ep_attr |= EP_EE_BIG; + SET_PARAM_HEAD(&ep, PARAM_EP, VERSION_1, ep_attr); + ep.pc = entrypoint; + ep.spsr = SPSR_64(MODE_EL1, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS); + memset(&ep.args, 0, sizeof(ep.args)); - cm_set_context(&tsp_ctx->cpu_ctx, SECURE); - spsr = SPSR_64(MODE_EL1, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS); - cm_set_el3_eret_context(SECURE, entrypoint, spsr, scr); + cm_init_context(mpidr, &ep); return 0; } diff --git a/services/spd/tspd/tspd_main.c b/services/spd/tspd/tspd_main.c index 35bc6e2e..f1dbe68b 100644 --- a/services/spd/tspd/tspd_main.c +++ b/services/spd/tspd/tspd_main.c @@ -122,13 +122,9 @@ static uint64_t tspd_sel1_interrupt_handler(uint32_t id, CTX_ELR_EL3); } - SMC_SET_EL3(&tsp_ctx->cpu_ctx, - CTX_SPSR_EL3, - SPSR_64(MODE_EL1, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS)); - SMC_SET_EL3(&tsp_ctx->cpu_ctx, - CTX_ELR_EL3, - (uint64_t) &tsp_vectors->fiq_entry); cm_el1_sysregs_context_restore(SECURE); + cm_set_elr_spsr_el3(SECURE, (uint64_t) &tsp_vectors->fiq_entry, + SPSR_64(MODE_EL1, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS)); cm_set_next_eret_context(SECURE); /* diff --git a/services/std_svc/psci/psci_afflvl_off.c b/services/std_svc/psci/psci_afflvl_off.c index 21a4d1a6..30f2bd19 100644 --- a/services/std_svc/psci/psci_afflvl_off.c +++ b/services/std_svc/psci/psci_afflvl_off.c @@ -42,8 +42,8 @@ typedef int (*afflvl_off_handler_t)(unsigned long, aff_map_node_t *); ******************************************************************************/ static int psci_afflvl0_off(unsigned long mpidr, aff_map_node_t *cpu_node) { - unsigned int index, plat_state; - int rc = PSCI_E_SUCCESS; + unsigned int plat_state; + int rc; unsigned long sctlr; assert(cpu_node->level == MPIDR_AFFLVL0); @@ -67,9 +67,6 @@ static int psci_afflvl0_off(unsigned long mpidr, aff_map_node_t *cpu_node) return rc; } - index = cpu_node->data; - memset(&psci_ns_entry_info[index], 0, sizeof(psci_ns_entry_info[index])); - /* * Arch. management. Perform the necessary steps to flush all * cpu caches. @@ -96,6 +93,7 @@ static int psci_afflvl0_off(unsigned long mpidr, aff_map_node_t *cpu_node) * Plat. management: Perform platform specific actions to turn this * cpu off e.g. exit cpu coherency, program the power controller etc. */ + rc = PSCI_E_SUCCESS; if (psci_plat_pm_ops->affinst_off) { /* Get the current physical state of this cpu */ diff --git a/services/std_svc/psci/psci_afflvl_on.c b/services/std_svc/psci/psci_afflvl_on.c index e4d8f1f2..d91db961 100644 --- a/services/std_svc/psci/psci_afflvl_on.c +++ b/services/std_svc/psci/psci_afflvl_on.c @@ -75,8 +75,10 @@ static int psci_afflvl0_on(unsigned long target_cpu, unsigned long ns_entrypoint, unsigned long context_id) { - unsigned int index, plat_state; + unsigned int plat_state; unsigned long psci_entrypoint; + uint32_t ns_scr_el3 = read_scr_el3(); + uint32_t ns_sctlr_el1 = read_sctlr_el1(); int rc; /* Sanity check to safeguard against data corruption */ @@ -103,8 +105,8 @@ static int psci_afflvl0_on(unsigned long target_cpu, * the non-secure world from the non-secure state from * where this call originated. */ - index = cpu_node->data; - rc = psci_set_ns_entry_info(index, ns_entrypoint, context_id); + rc = psci_save_ns_entry(target_cpu, ns_entrypoint, context_id, + ns_scr_el3, ns_sctlr_el1); if (rc != PSCI_E_SUCCESS) return rc; @@ -336,7 +338,7 @@ int psci_afflvl_on(unsigned long target_cpu, static unsigned int psci_afflvl0_on_finish(unsigned long mpidr, aff_map_node_t *cpu_node) { - unsigned int index, plat_state, state, rc = PSCI_E_SUCCESS; + unsigned int plat_state, state, rc; assert(cpu_node->level == MPIDR_AFFLVL0); @@ -383,11 +385,9 @@ static unsigned int psci_afflvl0_on_finish(unsigned long mpidr, /* * Generic management: Now we just need to retrieve the * information that we had stashed away during the cpu_on - * call to set this cpu on its way. First get the index - * for restoring the re-entry info + * call to set this cpu on its way. */ - index = cpu_node->data; - psci_get_ns_entry_info(index); + cm_prepare_el3_exit(NON_SECURE); /* State management: mark this cpu as on */ psci_set_state(cpu_node, PSCI_STATE_ON); @@ -395,6 +395,7 @@ static unsigned int psci_afflvl0_on_finish(unsigned long mpidr, /* Clean caches before re-entering normal world */ dcsw_op_louis(DCCSW); + rc = PSCI_E_SUCCESS; return rc; } diff --git a/services/std_svc/psci/psci_afflvl_suspend.c b/services/std_svc/psci/psci_afflvl_suspend.c index 99343104..f43dcedf 100644 --- a/services/std_svc/psci/psci_afflvl_suspend.c +++ b/services/std_svc/psci/psci_afflvl_suspend.c @@ -132,10 +132,12 @@ static int psci_afflvl0_suspend(unsigned long mpidr, unsigned long context_id, unsigned int power_state) { - unsigned int index, plat_state; + unsigned int plat_state; unsigned long psci_entrypoint, sctlr; el3_state_t *saved_el3_state; - int rc = PSCI_E_SUCCESS; + uint32_t ns_scr_el3 = read_scr_el3(); + uint32_t ns_sctlr_el1 = read_sctlr_el1(); + int rc; /* Sanity check to safeguard against data corruption */ assert(cpu_node->level == MPIDR_AFFLVL0); @@ -163,8 +165,8 @@ static int psci_afflvl0_suspend(unsigned long mpidr, * Generic management: Store the re-entry information for the * non-secure world */ - index = cpu_node->data; - rc = psci_set_ns_entry_info(index, ns_entrypoint, context_id); + rc = psci_save_ns_entry(read_mpidr_el1(), ns_entrypoint, context_id, + ns_scr_el3, ns_sctlr_el1); if (rc != PSCI_E_SUCCESS) return rc; @@ -174,7 +176,6 @@ static int psci_afflvl0_suspend(unsigned long mpidr, * L1 caches and exit intra-cluster coherency et al */ cm_el3_sysregs_context_save(NON_SECURE); - rc = PSCI_E_SUCCESS; /* * The EL3 state to PoC since it will be accessed after a @@ -214,6 +215,8 @@ static int psci_afflvl0_suspend(unsigned long mpidr, * platform defined mailbox with the psci entrypoint, * program the power controller etc. */ + rc = PSCI_E_SUCCESS; + if (psci_plat_pm_ops->affinst_suspend) { plat_state = psci_get_phys_state(cpu_node); rc = psci_plat_pm_ops->affinst_suspend(mpidr, @@ -454,7 +457,7 @@ int psci_afflvl_suspend(unsigned long mpidr, static unsigned int psci_afflvl0_suspend_finish(unsigned long mpidr, aff_map_node_t *cpu_node) { - unsigned int index, plat_state, state, rc = PSCI_E_SUCCESS; + unsigned int plat_state, state, rc; int32_t suspend_level; assert(cpu_node->level == MPIDR_AFFLVL0); @@ -481,14 +484,11 @@ static unsigned int psci_afflvl0_suspend_finish(unsigned long mpidr, } /* Get the index for restoring the re-entry information */ - index = cpu_node->data; - /* * Arch. management: Restore the stashed EL3 architectural * context from the 'cpu_context' structure for this cpu. */ cm_el3_sysregs_context_restore(NON_SECURE); - rc = PSCI_E_SUCCESS; /* * Call the cpu suspend finish handler registered by the Secure Payload @@ -509,7 +509,7 @@ static unsigned int psci_afflvl0_suspend_finish(unsigned long mpidr, * information that we had stashed away during the suspend * call to set this cpu on its way. */ - psci_get_ns_entry_info(index); + cm_prepare_el3_exit(NON_SECURE); /* State management: mark this cpu as on */ psci_set_state(cpu_node, PSCI_STATE_ON); @@ -517,6 +517,7 @@ static unsigned int psci_afflvl0_suspend_finish(unsigned long mpidr, /* Clean caches before re-entering normal world */ dcsw_op_louis(DCCSW); + rc = PSCI_E_SUCCESS; return rc; } diff --git a/services/std_svc/psci/psci_common.c b/services/std_svc/psci/psci_common.c index 3cbacd7a..d69c5f51 100644 --- a/services/std_svc/psci/psci_common.c +++ b/services/std_svc/psci/psci_common.c @@ -36,6 +36,7 @@ #include #include #include +#include #include "psci_private.h" /* @@ -50,7 +51,6 @@ const spd_pm_ops_t *psci_spd_pm; * array during startup. ******************************************************************************/ suspend_context_t psci_suspend_context[PSCI_NUM_AFFS]; -ns_entry_info_t psci_ns_entry_info[PSCI_NUM_AFFS]; /******************************************************************************* * Grand array that holds the platform's topology information for state @@ -212,97 +212,36 @@ int psci_validate_mpidr(unsigned long mpidr, int level) } /******************************************************************************* - * This function retrieves all the stashed information needed to correctly - * resume a cpu's execution in the non-secure state after it has been physically - * powered on i.e. turned ON or resumed from SUSPEND + * This function determines the full entrypoint information for the requested + * PSCI entrypoint on power on/resume and saves this in the non-secure CPU + * cpu_context, ready for when the core boots. ******************************************************************************/ -void psci_get_ns_entry_info(unsigned int index) +int psci_save_ns_entry(uint64_t mpidr, + uint64_t entrypoint, uint64_t context_id, + uint32_t ns_scr_el3, uint32_t ns_sctlr_el1) { - unsigned long sctlr = 0, scr, el_status, id_aa64pfr0; - cpu_context_t *ns_entry_context; - gp_regs_t *ns_entry_gpregs; + uint32_t ep_attr, mode, sctlr, daif, ee; + entry_point_info_t ep; - scr = read_scr(); + sctlr = ns_scr_el3 & SCR_HCE_BIT ? read_sctlr_el2() : ns_sctlr_el1; + ee = 0; - /* Find out which EL we are going to */ - id_aa64pfr0 = read_id_aa64pfr0_el1(); - el_status = (id_aa64pfr0 >> ID_AA64PFR0_EL2_SHIFT) & - ID_AA64PFR0_ELX_MASK; - - /* Restore endianess */ - if (psci_ns_entry_info[index].sctlr & SCTLR_EE_BIT) - sctlr |= SCTLR_EE_BIT; - else - sctlr &= ~SCTLR_EE_BIT; - - /* Turn off MMU and Caching */ - sctlr &= ~(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_M_BIT); - - /* Set the register width */ - if (psci_ns_entry_info[index].scr & SCR_RW_BIT) - scr |= SCR_RW_BIT; - else - scr &= ~SCR_RW_BIT; - - scr |= SCR_NS_BIT; - - if (el_status) - write_sctlr_el2(sctlr); - else - write_sctlr_el1(sctlr); - - /* Fulfill the cpu_on entry reqs. as per the psci spec */ - ns_entry_context = (cpu_context_t *) cm_get_context(NON_SECURE); - assert(ns_entry_context); - - /* - * Setup general purpose registers to return the context id and - * prevent leakage of secure information into the normal world. - */ - ns_entry_gpregs = get_gpregs_ctx(ns_entry_context); - write_ctx_reg(ns_entry_gpregs, - CTX_GPREG_X0, - psci_ns_entry_info[index].context_id); - - /* - * Tell the context management library to setup EL3 system registers to - * be able to ERET into the ns state, and SP_EL3 points to the right - * context to exit from EL3 correctly. - */ - cm_set_el3_eret_context(NON_SECURE, - psci_ns_entry_info[index].eret_info.entrypoint, - psci_ns_entry_info[index].eret_info.spsr, - scr); - - cm_set_next_eret_context(NON_SECURE); -} - -/******************************************************************************* - * This function retrieves and stashes all the information needed to correctly - * resume a cpu's execution in the non-secure state after it has been physically - * powered on i.e. turned ON or resumed from SUSPEND. This is done prior to - * turning it on or before suspending it. - ******************************************************************************/ -int psci_set_ns_entry_info(unsigned int index, - unsigned long entrypoint, - unsigned long context_id) -{ - int rc = PSCI_E_SUCCESS; - unsigned int rw, mode, ee, spsr = 0; - unsigned long id_aa64pfr0 = read_id_aa64pfr0_el1(), scr = read_scr(); - unsigned long el_status; - unsigned long daif; + ep_attr = NON_SECURE | EP_ST_DISABLE; + if (sctlr & SCTLR_EE_BIT) { + ep_attr |= EP_EE_BIG; + ee = 1; + } + SET_PARAM_HEAD(&ep, PARAM_EP, VERSION_1, ep_attr); - /* Figure out what mode do we enter the non-secure world in */ - el_status = (id_aa64pfr0 >> ID_AA64PFR0_EL2_SHIFT) & - ID_AA64PFR0_ELX_MASK; + ep.pc = entrypoint; + memset(&ep.args, 0, sizeof(ep.args)); + ep.args.arg0 = context_id; /* * Figure out whether the cpu enters the non-secure address space * in aarch32 or aarch64 */ - rw = scr & SCR_RW_BIT; - if (rw) { + if (ns_scr_el3 & SCR_RW_BIT) { /* * Check whether a Thumb entry point has been provided for an @@ -311,28 +250,12 @@ int psci_set_ns_entry_info(unsigned int index, if (entrypoint & 0x1) return PSCI_E_INVALID_PARAMS; - if (el_status && (scr & SCR_HCE_BIT)) { - mode = MODE_EL2; - ee = read_sctlr_el2() & SCTLR_EE_BIT; - } else { - mode = MODE_EL1; - ee = read_sctlr_el1() & SCTLR_EE_BIT; - } - - spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS); + mode = ns_scr_el3 & SCR_HCE_BIT ? MODE_EL2 : MODE_EL1; - psci_ns_entry_info[index].sctlr |= ee; - psci_ns_entry_info[index].scr |= SCR_RW_BIT; + ep.spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS); } else { - - if (el_status && (scr & SCR_HCE_BIT)) { - mode = MODE32_hyp; - ee = read_sctlr_el2() & SCTLR_EE_BIT; - } else { - mode = MODE32_svc; - ee = read_sctlr_el1() & SCTLR_EE_BIT; - } + mode = ns_scr_el3 & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc; /* * TODO: Choose async. exception bits if HYP mode is not @@ -340,18 +263,13 @@ int psci_set_ns_entry_info(unsigned int index, */ daif = DAIF_ABT_BIT | DAIF_IRQ_BIT | DAIF_FIQ_BIT; - spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, daif); - - /* Ensure that the CSPR.E and SCTLR.EE bits match */ - psci_ns_entry_info[index].sctlr |= ee; - psci_ns_entry_info[index].scr &= ~SCR_RW_BIT; + ep.spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, daif); } - psci_ns_entry_info[index].eret_info.entrypoint = entrypoint; - psci_ns_entry_info[index].eret_info.spsr = spsr; - psci_ns_entry_info[index].context_id = context_id; + /* initialise an entrypoint to set up the CPU context */ + cm_init_context(mpidr, &ep); - return rc; + return PSCI_E_SUCCESS; } /******************************************************************************* diff --git a/services/std_svc/psci/psci_private.h b/services/std_svc/psci/psci_private.h index 747a2d4e..970ad21e 100644 --- a/services/std_svc/psci/psci_private.h +++ b/services/std_svc/psci/psci_private.h @@ -35,22 +35,6 @@ #include #include -/******************************************************************************* - * The following two data structures hold the generic information to bringup - * a suspended/hotplugged out cpu - ******************************************************************************/ -typedef struct eret_params { - unsigned long entrypoint; - unsigned long spsr; -} eret_params_t; - -typedef struct ns_entry_info { - eret_params_t eret_info; - unsigned long context_id; - unsigned int scr; - unsigned int sctlr; -} ns_entry_info_t; - /******************************************************************************* * The following two data structures hold the topology tree which in turn tracks * the state of the all the affinity instances supported by the platform. @@ -85,7 +69,6 @@ typedef unsigned int (*afflvl_power_on_finisher_t)(unsigned long, * Data prototypes ******************************************************************************/ extern suspend_context_t psci_suspend_context[PSCI_NUM_AFFS]; -extern ns_entry_info_t psci_ns_entry_info[PSCI_NUM_AFFS]; extern const plat_pm_ops_t *psci_plat_pm_ops; extern aff_map_node_t psci_aff_map[PSCI_NUM_AFFS]; @@ -102,7 +85,6 @@ int get_max_afflvl(void); unsigned short psci_get_state(aff_map_node_t *node); unsigned short psci_get_phys_state(aff_map_node_t *node); void psci_set_state(aff_map_node_t *node, unsigned short state); -void psci_get_ns_entry_info(unsigned int index); unsigned long mpidr_set_aff_inst(unsigned long, unsigned char, int); int psci_validate_mpidr(unsigned long, int); int get_power_on_target_afflvl(unsigned long mpidr); @@ -110,9 +92,9 @@ void psci_afflvl_power_on_finish(unsigned long, int, int, afflvl_power_on_finisher_t *); -int psci_set_ns_entry_info(unsigned int index, - unsigned long entrypoint, - unsigned long context_id); +int psci_save_ns_entry(uint64_t mpidr, + uint64_t entrypoint, uint64_t context_id, + uint32_t caller_scr_el3, uint32_t caller_sctlr_el1); int psci_check_afflvl_range(int start_afflvl, int end_afflvl); void psci_acquire_afflvl_locks(unsigned long mpidr, int start_afflvl, diff --git a/services/std_svc/psci/psci_setup.c b/services/std_svc/psci/psci_setup.c index 015beabb..af821506 100644 --- a/services/std_svc/psci/psci_setup.c +++ b/services/std_svc/psci/psci_setup.c @@ -59,7 +59,7 @@ static aff_limits_node_t psci_aff_limits[MPIDR_MAX_AFFLVL + 1]; /******************************************************************************* * 'psci_ns_einfo_idx' keeps track of the next free index in the - * 'psci_ns_entry_info' & 'psci_suspend_context' arrays. + * 'psci_suspend_context' arrays. ******************************************************************************/ static unsigned int psci_ns_einfo_idx; -- 2.30.2