/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*/
}
+/*******************************************************************************
+ * This function returns a pointer to the most recent 'cpu_context' structure
+ * for the CPU identified by `cpu_idx` that was set as the context for the
+ * specified security state. NULL is returned if no such structure has been
+ * specified.
+ ******************************************************************************/
+void *cm_get_context_by_index(unsigned int cpu_idx,
+ unsigned int security_state)
+{
+ assert(sec_state_is_valid(security_state));
+
+ return get_cpu_data_by_index(cpu_idx, cpu_context[security_state]);
+}
+
+/*******************************************************************************
+ * This function sets the pointer to the current 'cpu_context' structure for the
+ * specified security state for the CPU identified by CPU index.
+ ******************************************************************************/
+void cm_set_context_by_index(unsigned int cpu_idx, void *context,
+ unsigned int security_state)
+{
+ assert(sec_state_is_valid(security_state));
+
+ set_cpu_data_by_index(cpu_idx, cpu_context[security_state], context);
+}
+
/*******************************************************************************
* This function returns a pointer to the most recent 'cpu_context' structure
* for the CPU identified by MPIDR that was set as the context for the specified
{
assert(sec_state_is_valid(security_state));
- return get_cpu_data_by_mpidr(mpidr, cpu_context[security_state]);
+ return cm_get_context_by_index(platform_get_core_pos(mpidr), security_state);
}
/*******************************************************************************
{
assert(sec_state_is_valid(security_state));
- set_cpu_data_by_mpidr(mpidr, cpu_context[security_state], context);
+ cm_set_context_by_index(platform_get_core_pos(mpidr),
+ context, security_state);
}
/*******************************************************************************
}
/*******************************************************************************
- * The following function initializes a cpu_context for the current CPU for
+ * The following function initializes the cpu_context 'ctx' for
* first use, and sets the initial entrypoint state as specified by the
* entry_point_info structure.
*
* context and sets this as the next context to return to.
*
* The EE and ST attributes are used to configure the endianess and secure
- * timer availability for the new excution context.
+ * timer availability for the new execution context.
*
* To prepare the register state for entry call cm_prepare_el3_exit() and
* el3_exit(). For Secure-EL1 cm_prepare_el3_exit() is equivalent to
* cm_e1_sysreg_context_restore().
******************************************************************************/
-void cm_init_context(uint64_t mpidr, const entry_point_info_t *ep)
+static void cm_init_context_common(cpu_context_t *ctx, const entry_point_info_t *ep)
{
- uint32_t security_state;
- cpu_context_t *ctx;
+ unsigned int security_state;
uint32_t scr_el3;
el3_state_t *state;
gp_regs_t *gp_regs;
unsigned long sctlr_elx;
- security_state = GET_SECURITY_STATE(ep->h.attr);
- ctx = cm_get_context_by_mpidr(mpidr, security_state);
assert(ctx);
+ security_state = GET_SECURITY_STATE(ep->h.attr);
+
/* Clear any residual register values from the context */
memset(ctx, 0, sizeof(*ctx));
memcpy(gp_regs, (void *)&ep->args, sizeof(aapcs64_params_t));
}
+/*******************************************************************************
+ * The following function initializes the cpu_context for a CPU specified by
+ * its `cpu_idx` for first use, and sets the initial entrypoint state as
+ * specified by the entry_point_info structure.
+ ******************************************************************************/
+void cm_init_context_by_index(unsigned int cpu_idx,
+ const entry_point_info_t *ep)
+{
+ cpu_context_t *ctx;
+ ctx = cm_get_context_by_index(cpu_idx, GET_SECURITY_STATE(ep->h.attr));
+ cm_init_context_common(ctx, ep);
+}
+
+/*******************************************************************************
+ * The following function initializes the cpu_context for the current CPU
+ * for first use, and sets the initial entrypoint state as specified by the
+ * entry_point_info structure.
+ ******************************************************************************/
+void cm_init_my_context(const entry_point_info_t *ep)
+{
+ cpu_context_t *ctx;
+ ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr));
+ cm_init_context_common(ctx, ep);
+}
+
+/*******************************************************************************
+ * The following function provides a compatibility function for SPDs using the
+ * existing cm library routines. This function is expected to be invoked for
+ * initializing the cpu_context for the CPU specified by MPIDR for first use.
+ ******************************************************************************/
+void cm_init_context(unsigned long mpidr, const entry_point_info_t *ep)
+{
+ if ((mpidr & MPIDR_AFFINITY_MASK) ==
+ (read_mpidr_el1() & MPIDR_AFFINITY_MASK))
+ cm_init_my_context(ep);
+ else
+ cm_init_context_by_index(platform_get_core_pos(mpidr), ep);
+}
+
/*******************************************************************************
* Prepare the CPU system registers for first entry into secure or normal world
*
/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
void cm_set_context_by_mpidr(uint64_t mpidr,
void *context,
uint32_t security_state);
+void *cm_get_context_by_index(unsigned int cpu_idx,
+ unsigned int security_state);
+void cm_set_context_by_index(unsigned int cpu_idx,
+ void *context,
+ unsigned int security_state);
static inline void cm_set_context(void *context, uint32_t security_state);
void cm_init_context(uint64_t mpidr, const struct entry_point_info *ep);
+void cm_init_my_context(const struct entry_point_info *ep);
+void cm_init_context_by_index(unsigned int cpu_idx,
+ const struct entry_point_info *ep);
void cm_prepare_el3_exit(uint32_t security_state);
void cm_el1_sysregs_context_save(uint32_t security_state);
void cm_el1_sysregs_context_restore(uint32_t security_state);
madd x0, x0, x1, x2
.endm
+ /*
+ * This macro calculates the base address of the current CPU's MP stack
+ * using the plat_my_core_pos() index, the name of the stack storage
+ * and the size of each stack
+ * Out: X0 = physical address of stack base
+ * Clobber: X30, X1, X2
+ */
+ .macro get_my_mp_stack _name, _size
+ bl plat_my_core_pos
+ ldr x2, =(\_name + \_size)
+ mov x1, #\_size
+ madd x0, x0, x1, x2
+ .endm
+
/*
* This macro calculates the base address of a UP stack using the
* name of the stack storage and the size of the stack
uintptr_t *dev_handle,
uintptr_t *image_spec);
unsigned long plat_get_ns_image_entrypoint(void);
+unsigned int plat_my_core_pos(void);
+int plat_core_pos_by_mpidr(unsigned long mpidr);
/*******************************************************************************
* Mandatory interrupt management functions
/*******************************************************************************
* Optional common functions (may be overridden)
******************************************************************************/
-unsigned int platform_get_core_pos(unsigned long mpidr);
-unsigned long platform_get_stack(unsigned long mpidr);
+unsigned long plat_get_my_stack(void);
void plat_report_exception(unsigned long);
int plat_crash_console_init(void);
int plat_crash_console_putc(int c);
.local platform_normal_stacks
.weak platform_set_stack
.weak platform_get_stack
-
+ .weak plat_get_my_stack
+ .weak plat_set_my_stack
/* -----------------------------------------------------
* unsigned long platform_get_stack (unsigned long mpidr)
ret x9
endfunc platform_set_stack
+ /* -----------------------------------------------------
+ * unsigned long plat_get_my_stack ()
+ *
+ * For the current CPU, this function returns the stack
+ * pointer for a stack allocated in device memory.
+ * -----------------------------------------------------
+ */
+func plat_get_my_stack
+ mov x10, x30 // lr
+ get_my_mp_stack platform_normal_stacks, PLATFORM_STACK_SIZE
+ ret x10
+endfunc plat_get_my_stack
+
+ /* -----------------------------------------------------
+ * void plat_set_my_stack ()
+ *
+ * For the current CPU, this function sets the stack
+ * pointer to a stack allocated in normal memory.
+ * -----------------------------------------------------
+ */
+func plat_set_my_stack
+ mov x9, x30 // lr
+ bl plat_get_my_stack
+ mov sp, x0
+ ret x9
+endfunc plat_set_my_stack
+
/* -----------------------------------------------------
* Per-cpu stacks in normal memory. Each cpu gets a
* stack of PLATFORM_STACK_SIZE bytes.
.local platform_normal_stacks
+ .globl plat_set_my_stack
+ .globl plat_get_my_stack
.globl platform_set_stack
.globl platform_get_stack
/* -----------------------------------------------------
+ * unsigned long plat_get_my_stack ()
* unsigned long platform_get_stack (unsigned long)
*
* For cold-boot BL images, only the primary CPU needs a
* stack allocated in device memory.
* -----------------------------------------------------
*/
-func platform_get_stack
+func plat_get_my_stack
+platform_get_stack:
get_up_stack platform_normal_stacks, PLATFORM_STACK_SIZE
ret
-endfunc platform_get_stack
+endfunc plat_get_my_stack
/* -----------------------------------------------------
+ * void plat_set_my_stack ()
* void platform_set_stack (unsigned long)
*
* For cold-boot BL images, only the primary CPU needs a
* allocated in normal memory.
* -----------------------------------------------------
*/
-func platform_set_stack
+func plat_set_my_stack
+platform_set_stack:
get_up_stack platform_normal_stacks, PLATFORM_STACK_SIZE
mov sp, x0
ret
-endfunc platform_set_stack
+endfunc plat_set_my_stack
/* -----------------------------------------------------
* Single cpu stack in normal memory.
}
/*******************************************************************************
- * Simple routine to determine whether an power domain instance at a given
- * level in an mpidr exists or not.
+ * Simple routine to determine whether a mpidr is valid or not.
******************************************************************************/
-int psci_validate_mpidr(unsigned long mpidr, int level)
+int psci_validate_mpidr(unsigned long mpidr)
{
- pwr_map_node_t *node;
-
- node = psci_get_pwr_map_node(mpidr, level);
- if (node && (node->state & PSCI_PWR_DOMAIN_PRESENT))
- return PSCI_E_SUCCESS;
- else
+ if (plat_core_pos_by_mpidr(mpidr) < 0)
return PSCI_E_INVALID_PARAMS;
+
+ return PSCI_E_SUCCESS;
}
/*******************************************************************************
* ---------------------------------------------
*/
do_stack_maintenance:
- mrs x0, mpidr_el1
- bl platform_get_stack
+ bl plat_get_my_stack
/* ---------------------------------------------
* Calculate and store the size of the used
* stack base address in x0.
* ---------------------------------------------
*/
- mrs x0, mpidr_el1
- bl platform_get_stack
+ bl plat_get_my_stack
mov x1, sp
sub x1, x0, x1
mov x0, sp
entry_point_info_t ep;
/* Determine if the cpu exists of not */
- rc = psci_validate_mpidr(target_cpu, MPIDR_AFFLVL0);
- if (rc != PSCI_E_SUCCESS) {
+ rc = psci_validate_mpidr(target_cpu);
+ if (rc != PSCI_E_SUCCESS)
return PSCI_E_INVALID_PARAMS;
- }
/* Validate the entrypoint using platform pm_ops */
if (psci_plat_pm_ops->validate_ns_entrypoint) {
return PSCI_E_NOT_PRESENT;
/* Check the validity of the specified target cpu */
- rc = psci_validate_mpidr(target_cpu, MPIDR_AFFLVL0);
+ rc = psci_validate_mpidr(target_cpu);
if (rc != PSCI_E_SUCCESS)
return PSCI_E_INVALID_PARAMS;
if (rc == PSCI_E_SUCCESS)
/* Store the re-entry information for the non-secure world. */
- cm_init_context(target_cpu, ep);
+ cm_init_context_by_index(target_idx, ep);
else
/* Restore the state on error. */
psci_do_state_coordination(MPIDR_AFFLVL0,
end_pwrlvl,
target_cpu_nodes,
PSCI_STATE_OFF);
+
exit:
/*
* This loop releases the lock corresponding to each power level
unsigned short psci_get_phys_state(pwr_map_node_t *node);
void psci_set_state(pwr_map_node_t *node, unsigned short state);
unsigned long mpidr_set_pwr_domain_inst(unsigned long, unsigned char, int);
-int psci_validate_mpidr(unsigned long, int);
+int psci_validate_mpidr(unsigned long mpidr);
int get_power_on_target_pwrlvl(void);
void psci_power_up_finish(int end_pwrlvl,
pwrlvl_power_on_finisher_t pon_handler);
state = plat_get_pwr_domain_state(level, mpidr);
psci_pwr_domain_map[idx].state = state;
- if (level == MPIDR_AFFLVL0) {
+ /*
+ * Check if this is a CPU node and is present in which case certain
+ * other initialisations are required.
+ */
+ if (level != MPIDR_AFFLVL0)
+ return;
- /*
- * Mark the cpu as OFF. Higher power level reference counts
- * have already been memset to 0
- */
- if (state & PSCI_PWR_DOMAIN_PRESENT)
- psci_set_state(&psci_pwr_domain_map[idx],
- PSCI_STATE_OFF);
+ if (!(state & PSCI_PWR_DOMAIN_PRESENT))
+ return;
- /*
- * Associate a non-secure context with this power
- * instance through the context management library.
- */
- linear_id = platform_get_core_pos(mpidr);
- assert(linear_id < PLATFORM_CORE_COUNT);
+ /*
+ * Mark the cpu as OFF. Higher power level reference counts
+ * have already been memset to 0
+ */
+ psci_set_state(&psci_pwr_domain_map[idx], PSCI_STATE_OFF);
- /* Invalidate the suspend context for the node */
- set_cpu_data_by_index(linear_id,
- psci_svc_cpu_data.power_state,
- PSCI_INVALID_DATA);
+ /*
+ * Associate a non-secure context with this power
+ * instance through the context management library.
+ */
+ linear_id = plat_core_pos_by_mpidr(mpidr);
+ assert(linear_id < PLATFORM_CORE_COUNT);
- flush_cpu_data_by_index(linear_id, psci_svc_cpu_data);
+ /* Invalidate the suspend context for the node */
+ set_cpu_data_by_index(linear_id,
+ psci_svc_cpu_data.power_state,
+ PSCI_INVALID_DATA);
- cm_set_context_by_mpidr(mpidr,
- (void *) &psci_ns_context[linear_id],
- NON_SECURE);
- }
+ flush_cpu_data_by_index(linear_id, psci_svc_cpu_data);
- return;
+ cm_set_context_by_index(linear_id,
+ (void *) &psci_ns_context[linear_id],
+ NON_SECURE);
}
/*******************************************************************************
{
unsigned int power_state;
- power_state = get_cpu_data_by_mpidr(mpidr,
+ power_state = get_cpu_data_by_index(plat_core_pos_by_mpidr(mpidr),
psci_svc_cpu_data.power_state);
return ((power_state == PSCI_INVALID_DATA) ?
/*
* Store the re-entry information for the non-secure world.
*/
- cm_init_context(read_mpidr_el1(), ep);
+ cm_init_my_context(ep);
/* Set the secure world (EL3) re-entry point after BL1 */
psci_entrypoint = (unsigned long) psci_cpu_suspend_finish_entry;