next_smc_ctx->r3 = read_ctx_reg(cpu_reg_ctx, CTX_GPREG_R3);
next_smc_ctx->lr_mon = read_ctx_reg(cpu_reg_ctx, CTX_LR);
next_smc_ctx->spsr_mon = read_ctx_reg(cpu_reg_ctx, CTX_SPSR);
+ next_smc_ctx->scr = read_ctx_reg(cpu_reg_ctx, CTX_SCR);
}
/*******************************************************************************
copy_cpu_ctx_to_smc_ctx(get_regs_ctx(cm_get_next_context()),
smc_get_next_ctx());
+ /*
+ * If the next image is non-secure, then we need to program the banked
+ * non secure sctlr. This is not required when the next image is secure
+ * because in AArch32, we expect the secure world to have the same
+ * SCTLR settings.
+ */
+ if (security_state == NON_SECURE) {
+ cpu_context_t *ctx = cm_get_context(security_state);
+ u_register_t ns_sctlr;
+
+ /* Temporarily set the NS bit to access NS SCTLR */
+ write_scr(read_scr() | SCR_NS_BIT);
+ isb();
+
+ ns_sctlr = read_ctx_reg(get_regs_ctx(ctx), CTX_NS_SCTLR);
+ write_sctlr(ns_sctlr);
+ isb();
+
+ write_scr(read_scr() & ~SCR_NS_BIT);
+ isb();
+ }
+
/*
* Flush the SMC & CPU context and the (next)pointers,
* to access them after caches are disabled.
dsb sy
isb
- /* Get the cpu_context for next BL image */
- bl cm_get_next_context
-
- /* Restore the SCR */
- ldr r2, [r0, #CTX_REGS_OFFSET + CTX_SCR]
- stcopr r2, SCR
- isb
-
/*
* Get the smc_context for next BL image,
* program the gp/system registers and exit
* secure monitor mode
*/
bl smc_get_next_ctx
- smcc_restore_gp_mode_regs
- eret
+ monitor_exit
endfunc bl1_entrypoint
sub r1, r1, r0
bl clean_dcache_range
- /* Program the registers in cpu_context and exit monitor mode */
- mov r0, #NON_SECURE
- bl cm_get_context
-
- /* Restore the SCR */
- ldr r2, [r0, #CTX_REGS_OFFSET + CTX_SCR]
- stcopr r2, SCR
- isb
-
- /* Restore the SCTLR */
- ldr r2, [r0, #CTX_REGS_OFFSET + CTX_NS_SCTLR]
- stcopr r2, SCTLR
-
bl smc_get_next_ctx
- /* The other cpu_context registers have been copied to smc context */
+
+ /* r0 points to `smc_ctx_t` */
+ /* The PSCI cpu_context registers have been copied to `smc_ctx_t` */
b sp_min_exit
endfunc sp_min_entrypoint
* SMC handling function for SP_MIN.
*/
func handle_smc
- smcc_save_gp_mode_regs
+ /* On SMC entry, `sp` points to `smc_ctx_t`. Save `lr`. */
+ str lr, [sp, #SMC_CTX_LR_MON]
- /* r0 points to smc_context */
- mov r2, r0 /* handle */
- ldcopr r0, SCR
+ smcc_save_gp_mode_regs
/*
- * Save SCR in stack. r1 is pushed to meet the 8 byte
- * stack alignment requirement.
+ * `sp` still points to `smc_ctx_t`. Save it to a register
+ * and restore the C runtime stack pointer to `sp`.
*/
- push {r0, r1}
+ mov r2, sp /* handle */
+ ldr sp, [r2, #SMC_CTX_SP_MON]
+
+ ldr r0, [r2, #SMC_CTX_SCR]
and r3, r0, #SCR_NS_BIT /* flags */
/* Switch to Secure Mode*/
bic r0, #SCR_NS_BIT
stcopr r0, SCR
isb
+
ldr r0, [r2, #SMC_CTX_GPREG_R0] /* smc_fid */
/* Check whether an SMC64 is issued */
tst r0, #(FUNCID_CC_MASK << FUNCID_CC_SHIFT)
- beq 1f /* SMC32 is detected */
+ beq 1f
+ /* SMC32 is not detected. Return error back to caller */
mov r0, #SMC_UNK
str r0, [r2, #SMC_CTX_GPREG_R0]
mov r0, r2
- b 2f /* Skip handling the SMC */
+ b sp_min_exit
1:
+ /* SMC32 is detected */
mov r1, #0 /* cookie */
bl handle_runtime_svc
-2:
- /* r0 points to smc context */
-
- /* Restore SCR from stack */
- pop {r1, r2}
- stcopr r1, SCR
- isb
+ /* `r0` points to `smc_ctx_t` */
b sp_min_exit
endfunc handle_smc
-
/*
* The Warm boot entrypoint for SP_MIN.
*/
#endif
bl sp_min_warm_boot
-
- /* Program the registers in cpu_context and exit monitor mode */
- mov r0, #NON_SECURE
- bl cm_get_context
-
- /* Restore the SCR */
- ldr r2, [r0, #CTX_REGS_OFFSET + CTX_SCR]
- stcopr r2, SCR
- isb
-
- /* Restore the SCTLR */
- ldr r2, [r0, #CTX_REGS_OFFSET + CTX_NS_SCTLR]
- stcopr r2, SCTLR
-
bl smc_get_next_ctx
-
- /* The other cpu_context registers have been copied to smc context */
+ /* r0 points to `smc_ctx_t` */
+ /* The PSCI cpu_context registers have been copied to `smc_ctx_t` */
b sp_min_exit
endfunc sp_min_warm_entrypoint
* Arguments : r0 must point to the SMC context to restore from.
*/
func sp_min_exit
- smcc_restore_gp_mode_regs
- eret
+ monitor_exit
endfunc sp_min_exit
next_smc_ctx->r0 = read_ctx_reg(cpu_reg_ctx, CTX_GPREG_R0);
next_smc_ctx->lr_mon = read_ctx_reg(cpu_reg_ctx, CTX_LR);
next_smc_ctx->spsr_mon = read_ctx_reg(cpu_reg_ctx, CTX_SPSR);
+ next_smc_ctx->scr = read_ctx_reg(cpu_reg_ctx, CTX_SCR);
}
/*******************************************************************************
static void sp_min_prepare_next_image_entry(void)
{
entry_point_info_t *next_image_info;
+ cpu_context_t *ctx = cm_get_context(NON_SECURE);
+ u_register_t ns_sctlr;
/* Program system registers to proceed to non-secure */
next_image_info = sp_min_plat_get_bl33_ep_info();
/* Copy r0, lr and spsr from cpu context to SMC context */
copy_cpu_ctx_to_smc_stx(get_regs_ctx(cm_get_context(NON_SECURE)),
smc_get_next_ctx());
+
+ /* Temporarily set the NS bit to access NS SCTLR */
+ write_scr(read_scr() | SCR_NS_BIT);
+ isb();
+ ns_sctlr = read_ctx_reg(get_regs_ctx(ctx), CTX_NS_SCTLR);
+ write_sctlr(ns_sctlr);
+ isb();
+
+ write_scr(read_scr() & ~SCR_NS_BIT);
+ isb();
}
/******************************************************************************
#define SMC_CTX_GPREG_R5 0x14
#define SMC_CTX_SP_USR 0x34
#define SMC_CTX_SPSR_MON 0x78
-#define SMC_CTX_LR_MON 0x7C
-#define SMC_CTX_SIZE 0x80
+#define SMC_CTX_SP_MON 0x7C
+#define SMC_CTX_LR_MON 0x80
+#define SMC_CTX_SCR 0x84
+#define SMC_CTX_SIZE 0x88
#ifndef __ASSEMBLY__
#include <cassert.h>
u_register_t sp_und;
u_register_t lr_und;
u_register_t spsr_mon;
- /* No need to save 'sp_mon' because we are already in monitor mode */
+ /*
+ * `sp_mon` will point to the C runtime stack in monitor mode. But prior
+ * to exit from SMC, this will point to the `smc_ctx_t` so that
+ * on next entry due to SMC, the `smc_ctx_t` can be easily accessed.
+ */
+ u_register_t sp_mon;
u_register_t lr_mon;
+ u_register_t scr;
} smc_ctx_t;
/*
#include <arch.h>
/*
- * Macro to save the General purpose registers including the banked
- * registers to the SMC context on entry due a SMC call. On return, r0
- * contains the pointer to the `smc_context_t`.
+ * Macro to save the General purpose registers (r0 - r12), the banked
+ * spsr, lr, sp registers and the `scr` register to the SMC context on entry
+ * due a SMC call. The `lr` of the current mode (monitor) is expected to be
+ * already saved. The `sp` must point to the `smc_ctx_t` to save to.
*/
.macro smcc_save_gp_mode_regs
- push {r0-r4, lr}
-
- ldcopr r0, SCR
- and r0, r0, #SCR_NS_BIT
- bl smc_get_ctx
-
- /* Save r5 - r12 in the SMC context */
- add r1, r0, #SMC_CTX_GPREG_R5
- stm r1!, {r5-r12}
-
- /*
- * Pop r0 - r4, lr to r4 - r8, lr from stack and then save
- * it to SMC context.
- */
- pop {r4-r8, lr}
- stm r0, {r4-r8}
+ /* Save r0 - r12 in the SMC context */
+ stm sp, {r0-r12}
+ mov r0, sp
+ add r0, r0, #SMC_CTX_SP_USR
/* Save the banked registers including the current SPSR and LR */
mrs r4, sp_usr
mrs r10, sp_fiq
mrs r11, lr_fiq
mrs r12, spsr_svc
- stm r1!, {r4-r12}
+ stm r0!, {r4-r12}
mrs r4, sp_svc
mrs r5, lr_svc
mrs r10, sp_und
mrs r11, lr_und
mrs r12, spsr
- stm r1!, {r4-r12, lr}
+ stm r0!, {r4-r12}
+ /* lr_mon is already saved by caller */
+ ldcopr r4, SCR
+ str r4, [sp, #SMC_CTX_SCR]
.endm
/*
- * Macro to restore the General purpose registers including the banked
- * registers from the SMC context prior to exit from the SMC call.
- * r0 must point to the `smc_context_t` to restore from.
+ * Macro to restore the `smc_ctx_t`, which includes the General purpose
+ * registers and banked mode registers, and exit from the monitor mode.
+ * r0 must point to the `smc_ctx_t` to restore from.
*/
- .macro smcc_restore_gp_mode_regs
+ .macro monitor_exit
+ /*
+ * Save the current sp and restore the smc context
+ * pointer to sp which will be used for handling the
+ * next SMC.
+ */
+ str sp, [r0, #SMC_CTX_SP_MON]
+ mov sp, r0
+
+ /*
+ * Restore SCR first so that we access the right banked register
+ * when the other mode registers are restored.
+ */
+ ldr r1, [r0, #SMC_CTX_SCR]
+ stcopr r1, SCR
+ isb
- /* Restore the banked registers including the current SPSR and LR */
+ /* Restore the banked registers including the current SPSR */
add r1, r0, #SMC_CTX_SP_USR
ldm r1!, {r4-r12}
msr sp_usr, r4
msr lr_fiq, r11
msr spsr_svc, r12
- ldm r1!, {r4-r12, lr}
+ ldm r1!, {r4-r12}
msr sp_svc, r4
msr lr_svc, r5
msr spsr_abt, r6
*/
msr spsr_fsxc, r12
+ /* Restore the LR */
+ ldr lr, [r0, #SMC_CTX_LR_MON]
+
/* Restore the rest of the general purpose registers */
ldm r0, {r0-r12}
+ eret
.endm
#endif /* __SMCC_MACROS_S__ */