ldr x1, =__COHERENT_RAM_UNALIGNED_SIZE__
bl zeromem16
+ /* ---------------------------------------------
+ * Initialise cpu_data and crash reporting
+ * ---------------------------------------------
+ */
+ bl init_cpu_data_ptr
+#if CRASH_REPORTING
+ bl init_crash_reporting
+#endif
+
/* ---------------------------------------------
* Use SP_EL0 for the C runtime stack.
* ---------------------------------------------
--- /dev/null
+/*
+ * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm_macros.S>
+#include <cpu_data.h>
+
+.globl init_cpu_data_ptr
+.globl _cpu_data_by_mpidr
+.globl _cpu_data_by_index
+
+/* -----------------------------------------------------------------
+ * void init_cpu_data_ptr(void)
+ *
+ * Initialise the TPIDR_EL3 register to refer to the cpu_data_t
+ * for the calling CPU. This must be called before cm_get_cpu_data()
+ *
+ * This can be called without a valid stack.
+ * clobbers: x0, x1, x9, x10
+ * -----------------------------------------------------------------
+ */
+func init_cpu_data_ptr
+ mov x10, x30
+ mrs x0, mpidr_el1
+ bl _cpu_data_by_mpidr
+ msr tpidr_el3, x0
+ ret x10
+
+
+/* -----------------------------------------------------------------
+ * cpu_data_t *_cpu_data_by_mpidr(uint64_t mpidr)
+ *
+ * Return the cpu_data structure for the CPU with given MPIDR
+ *
+ * This can be called without a valid stack. It assumes that
+ * platform_get_core_pos() does not clobber register x9.
+ * clobbers: x0, x1, x9
+ * -----------------------------------------------------------------
+ */
+func _cpu_data_by_mpidr
+ mov x9, x30
+ bl platform_get_core_pos
+ mov x30, x9
+ b _cpu_data_by_index
+
+
+/* -----------------------------------------------------------------
+ * cpu_data_t *_cpu_data_by_index(uint32_t cpu_index)
+ *
+ * Return the cpu_data structure for the CPU with given linear index
+ *
+ * This can be called without a valid stack.
+ * clobbers: x0, x1
+ * -----------------------------------------------------------------
+ */
+func _cpu_data_by_index
+ adr x1, percpu_data
+ add x0, x1, x0, LSL #CPU_DATA_LOG2SIZE
+ ret
#include <arch.h>
#include <asm_macros.S>
#include <context.h>
+#include <cpu_data.h>
#include <plat_macros.S>
#include <platform_def.h>
- .globl get_crash_stack
.globl dump_state_and_die
.globl dump_intr_state_and_die
+ .globl init_crash_reporting
#if CRASH_REPORTING
/* ------------------------------------------------------
/* Check if tpidr is initialized */
cbz x0, infinite_loop
- ldr x0, [x0, #PTR_CACHE_CRASH_STACK_OFFSET]
+ ldr x0, [x0, #CPU_DATA_CRASH_STACK_OFFSET]
/* store the x30 and sp to stack */
str x30, [x0, #-(REG_SIZE)]!
mov x30, sp
#define PCPU_CRASH_STACK_SIZE 0x140
/* -----------------------------------------------------
- * void get_crash_stack (uint64_t mpidr) : This
- * function is used to allocate a small stack for
- * reporting unhandled exceptions
+ * Per-cpu crash stacks in normal memory.
* -----------------------------------------------------
*/
-func get_crash_stack
- mov x10, x30 // lr
- get_mp_stack pcpu_crash_stack, PCPU_CRASH_STACK_SIZE
- ret x10
+declare_stack pcpu_crash_stack, tzfw_normal_stacks, \
+ PCPU_CRASH_STACK_SIZE, PLATFORM_CORE_COUNT
/* -----------------------------------------------------
- * Per-cpu crash stacks in normal memory.
+ * Provides each CPU with a small stacks for reporting
+ * unhandled exceptions, and stores the stack address
+ * in cpu_data
+ *
+ * This can be called without a runtime stack
+ * clobbers: x0 - x4
* -----------------------------------------------------
*/
-declare_stack pcpu_crash_stack, tzfw_normal_stacks, \
- PCPU_CRASH_STACK_SIZE, PLATFORM_CORE_COUNT
+func init_crash_reporting
+ mov x4, x30
+ mov x2, #0
+ adr x3, pcpu_crash_stack
+init_crash_loop:
+ mov x0, x2
+ bl _cpu_data_by_index
+ add x3, x3, #PCPU_CRASH_STACK_SIZE
+ str x3, [x0, #CPU_DATA_CRASH_STACK_OFFSET]
+ add x2, x2, #1
+ cmp x2, #PLATFORM_CORE_COUNT
+ b.lo init_crash_loop
+ ret x4
BL31_SOURCES += bl31/bl31_main.c \
bl31/context_mgmt.c \
+ bl31/cpu_data_array.c \
bl31/runtime_svc.c \
bl31/interrupt_mgmt.c \
bl31/aarch64/bl31_arch_setup.c \
bl31/aarch64/bl31_entrypoint.S \
bl31/aarch64/context.S \
+ bl31/aarch64/cpu_data.S \
bl31/aarch64/runtime_exceptions.S \
bl31/aarch64/crash_reporting.S \
common/aarch64/early_exceptions.S \
*/
assert(cm_get_context(NON_SECURE));
cm_set_next_eret_context(NON_SECURE);
- cm_init_pcpu_ptr_cache();
write_vbar_el3((uint64_t) runtime_exceptions);
isb();
next_image_type = NON_SECURE;
static context_info_t cm_context_info[PLATFORM_CORE_COUNT];
-/* The per_cpu_ptr_cache_t space allocation */
-static per_cpu_ptr_cache_t per_cpu_ptr_cache_space[PLATFORM_CORE_COUNT];
-
/*******************************************************************************
* Context management library initialisation routine. This library is used by
* runtime services to share pointers to 'cpu_context' structures for the secure
"msr spsel, #0\n"
: : "r" (ctx));
}
-
-/************************************************************************
- * The following function is used to populate the per cpu pointer cache.
- * The pointer will be stored in the tpidr_el3 register.
- *************************************************************************/
-void cm_init_pcpu_ptr_cache()
-{
- unsigned long mpidr = read_mpidr();
- uint32_t linear_id = platform_get_core_pos(mpidr);
- per_cpu_ptr_cache_t *pcpu_ptr_cache;
-
- pcpu_ptr_cache = &per_cpu_ptr_cache_space[linear_id];
- assert(pcpu_ptr_cache);
-#if CRASH_REPORTING
- pcpu_ptr_cache->crash_stack = get_crash_stack(mpidr);
-#endif
-
- cm_set_pcpu_ptr_cache(pcpu_ptr_cache);
-}
-
-
-void cm_set_pcpu_ptr_cache(const void *pcpu_ptr)
-{
- write_tpidr_el3((unsigned long)pcpu_ptr);
-}
-
-void *cm_get_pcpu_ptr_cache(void)
-{
- return (void *)read_tpidr_el3();
-}
-
--- /dev/null
+/*
+ * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <cassert.h>
+#include <cpu_data.h>
+#include <platform_def.h>
+
+/* verify assembler offsets match data structures */
+CASSERT(CPU_DATA_CRASH_STACK_OFFSET == __builtin_offsetof
+ (cpu_data_t, crash_stack),
+ assert_cpu_data_crash_stack_offset_mismatch);
+
+CASSERT((1 << CPU_DATA_LOG2SIZE) == sizeof(cpu_data_t),
+ assert_cpu_data_log2size_mismatch);
+
+/* The per_cpu_ptr_cache_t space allocation */
+cpu_data_t percpu_data[PLATFORM_CORE_COUNT];
#define CTX_FP_FPCR 0x208
#define CTX_FPREGS_END 0x210
-/******************************************************************************
- * Offsets for the per cpu cache implementation
- ******************************************************************************/
-#define PTR_CACHE_CRASH_STACK_OFFSET 0x0
-
#ifndef __ASSEMBLY__
#include <cassert.h>
void fpregs_context_restore(fp_regs_t *regs);
-/* Per-CPU pointer cache of recently used pointers and also the crash stack
- * TODO: Add other commonly used variables to this (tf_issues#90)
- */
-typedef struct per_cpu_ptr_cache {
- uint64_t crash_stack;
-} per_cpu_ptr_cache_t;
-
-CASSERT(PTR_CACHE_CRASH_STACK_OFFSET == __builtin_offsetof\
- (per_cpu_ptr_cache_t, crash_stack), \
- assert_per_cpu_ptr_cache_crash_stack_offset_mismatch);
-
#undef CTX_SYSREG_ALL
#undef CTX_FP_ALL
#undef CTX_GPREG_ALL
uint32_t bit_pos,
uint32_t value);
void cm_set_next_eret_context(uint32_t security_state);
-void cm_init_pcpu_ptr_cache();
-void cm_set_pcpu_ptr_cache(const void *pcpu_ptr);
-void *cm_get_pcpu_ptr_cache(void);
uint32_t cm_get_scr_el3(uint32_t security_state);
+
#endif /* __CM_H__ */
--- /dev/null
+/*
+ * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __CPU_DATA_H__
+#define __CPU_DATA_H__
+
+/* Offsets for the cpu_data structure */
+#define CPU_DATA_CRASH_STACK_OFFSET 0x0
+#define CPU_DATA_LOG2SIZE 6
+
+#ifndef __ASSEMBLY__
+
+#include <arch_helpers.h>
+#include <platform_def.h>
+#include <stdint.h>
+
+/*******************************************************************************
+ * Function & variable prototypes
+ ******************************************************************************/
+
+/*******************************************************************************
+ * Cache of frequently used per-cpu data:
+ * Address of the crash stack
+ * It is aligned to the cache line boundary to allow efficient concurrent
+ * manipulation of these pointers on different cpus
+ *
+ * TODO: Add other commonly used variables to this (tf_issues#90)
+ *
+ * The data structure and the _cpu_data accessors should not be used directly
+ * by components that have per-cpu members. The member access macros should be
+ * used for this.
+ ******************************************************************************/
+
+typedef struct cpu_data {
+ uint64_t crash_stack;
+} __aligned(CACHE_WRITEBACK_GRANULE) cpu_data_t;
+
+struct cpu_data *_cpu_data_by_index(uint32_t cpu_index);
+struct cpu_data *_cpu_data_by_mpidr(uint64_t mpidr);
+
+/* Return the cpu_data structure for the current CPU. */
+static inline struct cpu_data *_cpu_data(void)
+{
+ return (cpu_data_t *)read_tpidr_el3();
+}
+
+
+/**************************************************************************
+ * APIs for initialising and accessing per-cpu data
+ *************************************************************************/
+
+void init_cpu_data_ptr(void);
+
+#define get_cpu_data(_m) _cpu_data()->_m
+#define set_cpu_data(_m, _v) _cpu_data()->_m = _v
+#define get_cpu_data_by_index(_ix, _m) _cpu_data_by_index(_ix)->_m
+#define set_cpu_data_by_index(_ix, _m, _v) _cpu_data_by_index(_ix)->_m = _v
+#define get_cpu_data_by_mpidr(_id, _m) _cpu_data_by_mpidr(_id)->_m
+#define set_cpu_data_by_mpidr(_id, _m, _v) _cpu_data_by_mpidr(_id)->_m = _v
+
+
+#endif /* __ASSEMBLY__ */
+#endif /* __CPU_DATA_H__ */
void runtime_svc_init();
extern uint64_t __RT_SVC_DESCS_START__;
extern uint64_t __RT_SVC_DESCS_END__;
-uint64_t get_crash_stack(uint64_t mpidr);
+void init_crash_reporting(void);
void runtime_exceptions(void);
+
#endif /*__ASSEMBLY__*/
#endif /* __RUNTIME_SVC_H__ */
*/
assert(cm_get_context(NON_SECURE));
cm_set_next_eret_context(NON_SECURE);
- cm_init_pcpu_ptr_cache();
write_vbar_el3((uint64_t) runtime_exceptions);
/*
* set on this cpu prior to suspension.
*/
cm_set_next_eret_context(NON_SECURE);
- cm_init_pcpu_ptr_cache();
write_vbar_el3((uint64_t) runtime_exceptions);
/*
psci_aff_common_finish_entry:
adr x22, psci_afflvl_power_on_finish
+ /* ---------------------------------------------
+ * Initialise the pcpu cache pointer for the CPU
+ * ---------------------------------------------
+ */
+ bl init_cpu_data_ptr
+
/* ---------------------------------------------
* Exceptions should not occur at this point.
* Set VBAR in order to handle and report any