Introduce `el3_runtime` and `PSCI` libraries
authorSoby Mathew <soby.mathew@arm.com>
Thu, 24 Mar 2016 16:56:29 +0000 (16:56 +0000)
committerSoby Mathew <soby.mathew@arm.com>
Mon, 18 Jul 2016 16:52:15 +0000 (17:52 +0100)
This patch moves the PSCI services and BL31 frameworks like context
management and per-cpu data into new library components `PSCI` and
`el3_runtime` respectively. This enables PSCI to be built independently from
BL31. A new `psci_lib.mk` makefile is introduced which adds the relevant
PSCI library sources and gets included by `bl31.mk`. Other changes which
are done as part of this patch are:

* The runtime services framework is now moved to the `common/` folder to
  enable reuse.
* The `asm_macros.S` and `assert_macros.S` helpers are moved to architecture
  specific folder.
* The `plat_psci_common.c` is moved from the `plat/common/aarch64/` folder
  to `plat/common` folder. The original file location now has a stub which
  just includes the file from new location to maintain platform compatibility.

Most of the changes wouldn't affect platform builds as they just involve
changes to the generic bl1.mk and bl31.mk makefiles.

NOTE: THE `plat_psci_common.c` FILE HAS MOVED LOCATION AND THE STUB FILE AT
THE ORIGINAL LOCATION IS NOW DEPRECATED. PLATFORMS SHOULD MODIFY THEIR
MAKEFILES TO INCLUDE THE FILE FROM THE NEW LOCATION.

Change-Id: I6bd87d5b59424995c6a65ef8076d4fda91ad5e86

59 files changed:
Makefile
bl1/bl1.mk
bl31/aarch64/cpu_data.S [deleted file]
bl31/bl31.mk
bl31/bl31_context_mgmt.c
bl31/cpu_data_array.c [deleted file]
bl31/runtime_svc.c [deleted file]
common/aarch64/context.S [deleted file]
common/context_mgmt.c [deleted file]
common/runtime_svc.c [new file with mode: 0644]
docs/firmware-design.md
docs/rt-svc-writers-guide.md
include/bl31/cpu_data.h [deleted file]
include/bl31/runtime_svc.h [deleted file]
include/bl31/services/psci.h [deleted file]
include/bl31/services/psci_compat.h [deleted file]
include/bl31/services/std_svc.h [deleted file]
include/common/aarch64/asm_macros.S [new file with mode: 0644]
include/common/aarch64/assert_macros.S [new file with mode: 0644]
include/common/asm_macros.S [deleted file]
include/common/assert_macros.S [deleted file]
include/common/context.h [deleted file]
include/common/context_mgmt.h [deleted file]
include/common/runtime_svc.h [new file with mode: 0644]
include/lib/el3_runtime/aarch64/context.h [new file with mode: 0644]
include/lib/el3_runtime/context_mgmt.h [new file with mode: 0644]
include/lib/el3_runtime/cpu_data.h [new file with mode: 0644]
include/lib/psci/psci.h [new file with mode: 0644]
include/lib/psci/psci_compat.h [new file with mode: 0644]
include/services/std_svc.h [new file with mode: 0644]
lib/el3_runtime/aarch64/context.S [new file with mode: 0644]
lib/el3_runtime/aarch64/context_mgmt.c [new file with mode: 0644]
lib/el3_runtime/aarch64/cpu_data.S [new file with mode: 0644]
lib/el3_runtime/cpu_data_array.c [new file with mode: 0644]
lib/psci/aarch64/psci_entry.S [new file with mode: 0644]
lib/psci/aarch64/psci_helpers.S [new file with mode: 0644]
lib/psci/psci_common.c [new file with mode: 0644]
lib/psci/psci_lib.mk [new file with mode: 0644]
lib/psci/psci_main.c [new file with mode: 0644]
lib/psci/psci_off.c [new file with mode: 0644]
lib/psci/psci_on.c [new file with mode: 0644]
lib/psci/psci_private.h [new file with mode: 0644]
lib/psci/psci_setup.c [new file with mode: 0644]
lib/psci/psci_stat.c [new file with mode: 0644]
lib/psci/psci_suspend.c [new file with mode: 0644]
lib/psci/psci_system_off.c [new file with mode: 0644]
plat/common/aarch64/plat_psci_common.c
plat/common/plat_psci_common.c [new file with mode: 0644]
services/std_svc/psci/psci_common.c [deleted file]
services/std_svc/psci/psci_entry.S [deleted file]
services/std_svc/psci/psci_helpers.S [deleted file]
services/std_svc/psci/psci_main.c [deleted file]
services/std_svc/psci/psci_off.c [deleted file]
services/std_svc/psci/psci_on.c [deleted file]
services/std_svc/psci/psci_private.h [deleted file]
services/std_svc/psci/psci_setup.c [deleted file]
services/std_svc/psci/psci_stat.c [deleted file]
services/std_svc/psci/psci_suspend.c [deleted file]
services/std_svc/psci/psci_system_off.c [deleted file]

index 800312c94feabd1afb4e4b0d7a271ca02967d5fc..bb5098b2365b1eb4f45ff7975e788d21c8672992 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -226,20 +226,24 @@ BL_COMMON_SOURCES +=      common/bl_common.c                      \
                                plat/common/aarch64/platform_helpers.S  \
                                ${STDLIB_SRCS}
 
-INCLUDES               +=      -Iinclude/bl1                   \
-                               -Iinclude/bl31                  \
-                               -Iinclude/bl31/services         \
-                               -Iinclude/common                \
-                               -Iinclude/drivers               \
-                               -Iinclude/drivers/arm           \
-                               -Iinclude/drivers/auth          \
-                               -Iinclude/drivers/io            \
-                               -Iinclude/drivers/ti/uart       \
-                               -Iinclude/lib                   \
-                               -Iinclude/lib/aarch64           \
-                               -Iinclude/lib/cpus/aarch64      \
-                               -Iinclude/plat/common           \
-                               ${PLAT_INCLUDES}                \
+INCLUDES               +=      -Iinclude/bl1                           \
+                               -Iinclude/bl31                          \
+                               -Iinclude/common                        \
+                               -Iinclude/common/aarch64                \
+                               -Iinclude/drivers                       \
+                               -Iinclude/drivers/arm                   \
+                               -Iinclude/drivers/auth                  \
+                               -Iinclude/drivers/io                    \
+                               -Iinclude/drivers/ti/uart               \
+                               -Iinclude/lib                           \
+                               -Iinclude/lib/aarch64                   \
+                               -Iinclude/lib/cpus/aarch64              \
+                               -Iinclude/lib/el3_runtime               \
+                               -Iinclude/lib/el3_runtime/aarch64       \
+                               -Iinclude/lib/psci                      \
+                               -Iinclude/plat/common                   \
+                               -Iinclude/services                      \
+                               ${PLAT_INCLUDES}                        \
                                ${SPD_INCLUDES}
 
 
index 21e87c79cf0d2e7d83c9dd285eee72af90ba3eef..591e047b44ab8b8f15ff74277a8375ee431f44fb 100644 (file)
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
 #
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are met:
@@ -33,9 +33,9 @@ BL1_SOURCES           +=      bl1/bl1_main.c                          \
                                bl1/aarch64/bl1_entrypoint.S            \
                                bl1/aarch64/bl1_exceptions.S            \
                                bl1/bl1_context_mgmt.c                  \
-                               common/aarch64/context.S                \
-                               common/context_mgmt.c                   \
                                lib/cpus/aarch64/cpu_helpers.S          \
+                               lib/el3_runtime/aarch64/context.S       \
+                               lib/el3_runtime/aarch64/context_mgmt.c  \
                                plat/common/plat_bl1_common.c
 
 ifeq (${TRUSTED_BOARD_BOOT},1)
diff --git a/bl31/aarch64/cpu_data.S b/bl31/aarch64/cpu_data.S
deleted file mode 100644 (file)
index 0842825..0000000
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright (c) 2014-2015, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <asm_macros.S>
-#include <cpu_data.h>
-
-.globl init_cpu_data_ptr
-.globl _cpu_data_by_index
-
-/* -----------------------------------------------------------------
- * void init_cpu_data_ptr(void)
- *
- * Initialise the TPIDR_EL3 register to refer to the cpu_data_t
- * for the calling CPU. This must be called before cm_get_cpu_data()
- *
- * This can be called without a valid stack. It assumes that
- * plat_my_core_pos() does not clobber register x10.
- * clobbers: x0, x1, x10
- * -----------------------------------------------------------------
- */
-func init_cpu_data_ptr
-       mov     x10, x30
-       bl      plat_my_core_pos
-       bl      _cpu_data_by_index
-       msr     tpidr_el3, x0
-       ret     x10
-endfunc init_cpu_data_ptr
-
-/* -----------------------------------------------------------------
- * cpu_data_t *_cpu_data_by_index(uint32_t cpu_index)
- *
- * Return the cpu_data structure for the CPU with given linear index
- *
- * This can be called without a valid stack.
- * clobbers: x0, x1
- * -----------------------------------------------------------------
- */
-func _cpu_data_by_index
-       adr     x1, percpu_data
-       add     x0, x1, x0, LSL #CPU_DATA_LOG2SIZE
-       ret
-endfunc _cpu_data_by_index
index 8a7fccb0d7168f20e904bcbe70e5ffc35f6d71ca..dd3e4cf19935235ed3b74bb42ddb2eda38b6e3da 100644 (file)
 # POSSIBILITY OF SUCH DAMAGE.
 #
 
+include lib/psci/psci_lib.mk
+
 BL31_SOURCES           +=      bl31/bl31_main.c                                \
-                               bl31/cpu_data_array.c                           \
-                               bl31/runtime_svc.c                              \
                                bl31/interrupt_mgmt.c                           \
                                bl31/aarch64/bl31_arch_setup.c                  \
                                bl31/aarch64/bl31_entrypoint.S                  \
-                               bl31/aarch64/cpu_data.S                         \
                                bl31/aarch64/runtime_exceptions.S               \
                                bl31/aarch64/crash_reporting.S                  \
                                bl31/bl31_context_mgmt.c                        \
-                               common/aarch64/context.S                        \
-                               common/context_mgmt.c                           \
-                               lib/cpus/aarch64/cpu_helpers.S                  \
-                               lib/locks/exclusive/spinlock.S                  \
+                               common/runtime_svc.c                            \
                                services/std_svc/std_svc_setup.c                \
-                               services/std_svc/psci/psci_off.c                \
-                               services/std_svc/psci/psci_on.c                 \
-                               services/std_svc/psci/psci_suspend.c            \
-                               services/std_svc/psci/psci_common.c             \
-                               services/std_svc/psci/psci_entry.S              \
-                               services/std_svc/psci/psci_helpers.S            \
-                               services/std_svc/psci/psci_main.c               \
-                               services/std_svc/psci/psci_setup.c              \
-                               services/std_svc/psci/psci_system_off.c
-
-ifeq (${USE_COHERENT_MEM}, 1)
-BL31_SOURCES           +=      lib/locks/bakery/bakery_lock_coherent.c
-else
-BL31_SOURCES           +=      lib/locks/bakery/bakery_lock_normal.c
-endif
+                               ${PSCI_LIB_SOURCES}
 
 ifeq (${ENABLE_PMF}, 1)
 BL31_SOURCES           +=      lib/pmf/pmf_main.c
 endif
 
-ifeq (${ENABLE_PSCI_STAT}, 1)
-BL31_SOURCES           +=      services/std_svc/psci/psci_stat.c
-endif
-
 BL31_LINKERFILE                :=      bl31/bl31.ld.S
 
 # Flag used to indicate if Crash reporting via console should be included
index ae2442471a980b030511fecb046b59980292fc7b..f8751c2ab0aa96f1e850fd2c4beaac64421ce36e 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -30,6 +30,7 @@
 
 #include <assert.h>
 #include <bl31.h>
+#include <bl_common.h>
 #include <context.h>
 #include <context_mgmt.h>
 #include <cpu_data.h>
@@ -130,4 +131,4 @@ void cm_init_context(unsigned long mpidr, const entry_point_info_t *ep)
        else
                cm_init_context_by_index(platform_get_core_pos(mpidr), ep);
 }
-#endif
\ No newline at end of file
+#endif
diff --git a/bl31/cpu_data_array.c b/bl31/cpu_data_array.c
deleted file mode 100644 (file)
index 4cba118..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <cassert.h>
-#include <cpu_data.h>
-#include <platform_def.h>
-
-/* The per_cpu_ptr_cache_t space allocation */
-cpu_data_t percpu_data[PLATFORM_CORE_COUNT];
diff --git a/bl31/runtime_svc.c b/bl31/runtime_svc.c
deleted file mode 100644 (file)
index 8729e29..0000000
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <assert.h>
-#include <debug.h>
-#include <errno.h>
-#include <runtime_svc.h>
-#include <string.h>
-
-/*******************************************************************************
- * The 'rt_svc_descs' array holds the runtime service descriptors exported by
- * services by placing them in the 'rt_svc_descs' linker section.
- * The 'rt_svc_descs_indices' array holds the index of a descriptor in the
- * 'rt_svc_descs' array. When an SMC arrives, the OEN[29:24] bits and the call
- * type[31] bit in the function id are combined to get an index into the
- * 'rt_svc_descs_indices' array. This gives the index of the descriptor in the
- * 'rt_svc_descs' array which contains the SMC handler.
- ******************************************************************************/
-#define RT_SVC_DESCS_START     ((uintptr_t) (&__RT_SVC_DESCS_START__))
-#define RT_SVC_DESCS_END       ((uintptr_t) (&__RT_SVC_DESCS_END__))
-uint8_t rt_svc_descs_indices[MAX_RT_SVCS];
-static rt_svc_desc_t *rt_svc_descs;
-
-#define RT_SVC_DECS_NUM                ((RT_SVC_DESCS_END - RT_SVC_DESCS_START)\
-                                       / sizeof(rt_svc_desc_t))
-
-/*******************************************************************************
- * Simple routine to sanity check a runtime service descriptor before using it
- ******************************************************************************/
-static int32_t validate_rt_svc_desc(rt_svc_desc_t *desc)
-{
-       if (desc == NULL)
-               return -EINVAL;
-
-       if (desc->start_oen > desc->end_oen)
-               return -EINVAL;
-
-       if (desc->end_oen >= OEN_LIMIT)
-               return -EINVAL;
-
-       if (desc->call_type != SMC_TYPE_FAST && desc->call_type != SMC_TYPE_STD)
-               return -EINVAL;
-
-       /* A runtime service having no init or handle function doesn't make sense */
-       if (desc->init == NULL && desc->handle == NULL)
-               return -EINVAL;
-
-       return 0;
-}
-
-/*******************************************************************************
- * This function calls the initialisation routine in the descriptor exported by
- * a runtime service. Once a descriptor has been validated, its start & end
- * owning entity numbers and the call type are combined to form a unique oen.
- * The unique oen is used as an index into the 'rt_svc_descs_indices' array.
- * The index of the runtime service descriptor is stored at this index.
- ******************************************************************************/
-void runtime_svc_init(void)
-{
-       int rc = 0, index, start_idx, end_idx;
-
-       /* Assert the number of descriptors detected are less than maximum indices */
-       assert((RT_SVC_DECS_NUM >= 0) && (RT_SVC_DECS_NUM < MAX_RT_SVCS));
-
-       /* If no runtime services are implemented then simply bail out */
-       if (RT_SVC_DECS_NUM == 0)
-               return;
-
-       /* Initialise internal variables to invalid state */
-       memset(rt_svc_descs_indices, -1, sizeof(rt_svc_descs_indices));
-
-       rt_svc_descs = (rt_svc_desc_t *) RT_SVC_DESCS_START;
-       for (index = 0; index < RT_SVC_DECS_NUM; index++) {
-
-               /*
-                * An invalid descriptor is an error condition since it is
-                * difficult to predict the system behaviour in the absence
-                * of this service.
-                */
-               rc = validate_rt_svc_desc(&rt_svc_descs[index]);
-               if (rc) {
-                       ERROR("Invalid runtime service descriptor %p (%s)\n",
-                                       (void *) &rt_svc_descs[index],
-                                       rt_svc_descs[index].name);
-                       goto error;
-               }
-
-               /*
-                * The runtime service may have separate rt_svc_desc_t
-                * for its fast smc and standard smc. Since the service itself
-                * need to be initialized only once, only one of them will have
-                * an initialisation routine defined. Call the initialisation
-                * routine for this runtime service, if it is defined.
-                */
-               if (rt_svc_descs[index].init) {
-                       rc = rt_svc_descs[index].init();
-                       if (rc) {
-                               ERROR("Error initializing runtime service %s\n",
-                                               rt_svc_descs[index].name);
-                               continue;
-                       }
-               }
-
-               /*
-                * Fill the indices corresponding to the start and end
-                * owning entity numbers with the index of the
-                * descriptor which will handle the SMCs for this owning
-                * entity range.
-                */
-               start_idx = get_unique_oen(rt_svc_descs[index].start_oen,
-                               rt_svc_descs[index].call_type);
-               end_idx = get_unique_oen(rt_svc_descs[index].end_oen,
-                               rt_svc_descs[index].call_type);
-
-               for (; start_idx <= end_idx; start_idx++)
-                       rt_svc_descs_indices[start_idx] = index;
-       }
-
-       return;
-error:
-       panic();
-}
diff --git a/common/aarch64/context.S b/common/aarch64/context.S
deleted file mode 100644 (file)
index d51daa7..0000000
+++ /dev/null
@@ -1,405 +0,0 @@
-/*
- * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch.h>
-#include <asm_macros.S>
-#include <context.h>
-
-       .global el1_sysregs_context_save
-       .global el1_sysregs_context_restore
-#if CTX_INCLUDE_FPREGS
-       .global fpregs_context_save
-       .global fpregs_context_restore
-#endif
-       .global save_gp_registers
-       .global restore_gp_registers_eret
-       .global restore_gp_registers_callee_eret
-       .global el3_exit
-
-/* -----------------------------------------------------
- * The following function strictly follows the AArch64
- * PCS to use x9-x17 (temporary caller-saved registers)
- * to save EL1 system register context. It assumes that
- * 'x0' is pointing to a 'el1_sys_regs' structure where
- * the register context will be saved.
- * -----------------------------------------------------
- */
-func el1_sysregs_context_save
-
-       mrs     x9, spsr_el1
-       mrs     x10, elr_el1
-       stp     x9, x10, [x0, #CTX_SPSR_EL1]
-
-       mrs     x15, sctlr_el1
-       mrs     x16, actlr_el1
-       stp     x15, x16, [x0, #CTX_SCTLR_EL1]
-
-       mrs     x17, cpacr_el1
-       mrs     x9, csselr_el1
-       stp     x17, x9, [x0, #CTX_CPACR_EL1]
-
-       mrs     x10, sp_el1
-       mrs     x11, esr_el1
-       stp     x10, x11, [x0, #CTX_SP_EL1]
-
-       mrs     x12, ttbr0_el1
-       mrs     x13, ttbr1_el1
-       stp     x12, x13, [x0, #CTX_TTBR0_EL1]
-
-       mrs     x14, mair_el1
-       mrs     x15, amair_el1
-       stp     x14, x15, [x0, #CTX_MAIR_EL1]
-
-       mrs     x16, tcr_el1
-       mrs     x17, tpidr_el1
-       stp     x16, x17, [x0, #CTX_TCR_EL1]
-
-       mrs     x9, tpidr_el0
-       mrs     x10, tpidrro_el0
-       stp     x9, x10, [x0, #CTX_TPIDR_EL0]
-
-       mrs     x13, par_el1
-       mrs     x14, far_el1
-       stp     x13, x14, [x0, #CTX_PAR_EL1]
-
-       mrs     x15, afsr0_el1
-       mrs     x16, afsr1_el1
-       stp     x15, x16, [x0, #CTX_AFSR0_EL1]
-
-       mrs     x17, contextidr_el1
-       mrs     x9, vbar_el1
-       stp     x17, x9, [x0, #CTX_CONTEXTIDR_EL1]
-
-       /* Save AArch32 system registers if the build has instructed so */
-#if CTX_INCLUDE_AARCH32_REGS
-       mrs     x11, spsr_abt
-       mrs     x12, spsr_und
-       stp     x11, x12, [x0, #CTX_SPSR_ABT]
-
-       mrs     x13, spsr_irq
-       mrs     x14, spsr_fiq
-       stp     x13, x14, [x0, #CTX_SPSR_IRQ]
-
-       mrs     x15, dacr32_el2
-       mrs     x16, ifsr32_el2
-       stp     x15, x16, [x0, #CTX_DACR32_EL2]
-
-       mrs     x17, fpexc32_el2
-       str     x17, [x0, #CTX_FP_FPEXC32_EL2]
-#endif
-
-       /* Save NS timer registers if the build has instructed so */
-#if NS_TIMER_SWITCH
-       mrs     x10, cntp_ctl_el0
-       mrs     x11, cntp_cval_el0
-       stp     x10, x11, [x0, #CTX_CNTP_CTL_EL0]
-
-       mrs     x12, cntv_ctl_el0
-       mrs     x13, cntv_cval_el0
-       stp     x12, x13, [x0, #CTX_CNTV_CTL_EL0]
-
-       mrs     x14, cntkctl_el1
-       str     x14, [x0, #CTX_CNTKCTL_EL1]
-#endif
-
-       ret
-endfunc el1_sysregs_context_save
-
-/* -----------------------------------------------------
- * The following function strictly follows the AArch64
- * PCS to use x9-x17 (temporary caller-saved registers)
- * to restore EL1 system register context.  It assumes
- * that 'x0' is pointing to a 'el1_sys_regs' structure
- * from where the register context will be restored
- * -----------------------------------------------------
- */
-func el1_sysregs_context_restore
-
-       ldp     x9, x10, [x0, #CTX_SPSR_EL1]
-       msr     spsr_el1, x9
-       msr     elr_el1, x10
-
-       ldp     x15, x16, [x0, #CTX_SCTLR_EL1]
-       msr     sctlr_el1, x15
-       msr     actlr_el1, x16
-
-       ldp     x17, x9, [x0, #CTX_CPACR_EL1]
-       msr     cpacr_el1, x17
-       msr     csselr_el1, x9
-
-       ldp     x10, x11, [x0, #CTX_SP_EL1]
-       msr     sp_el1, x10
-       msr     esr_el1, x11
-
-       ldp     x12, x13, [x0, #CTX_TTBR0_EL1]
-       msr     ttbr0_el1, x12
-       msr     ttbr1_el1, x13
-
-       ldp     x14, x15, [x0, #CTX_MAIR_EL1]
-       msr     mair_el1, x14
-       msr     amair_el1, x15
-
-       ldp     x16, x17, [x0, #CTX_TCR_EL1]
-       msr     tcr_el1, x16
-       msr     tpidr_el1, x17
-
-       ldp     x9, x10, [x0, #CTX_TPIDR_EL0]
-       msr     tpidr_el0, x9
-       msr     tpidrro_el0, x10
-
-       ldp     x13, x14, [x0, #CTX_PAR_EL1]
-       msr     par_el1, x13
-       msr     far_el1, x14
-
-       ldp     x15, x16, [x0, #CTX_AFSR0_EL1]
-       msr     afsr0_el1, x15
-       msr     afsr1_el1, x16
-
-       ldp     x17, x9, [x0, #CTX_CONTEXTIDR_EL1]
-       msr     contextidr_el1, x17
-       msr     vbar_el1, x9
-
-       /* Restore AArch32 system registers if the build has instructed so */
-#if CTX_INCLUDE_AARCH32_REGS
-       ldp     x11, x12, [x0, #CTX_SPSR_ABT]
-       msr     spsr_abt, x11
-       msr     spsr_und, x12
-
-       ldp     x13, x14, [x0, #CTX_SPSR_IRQ]
-       msr     spsr_irq, x13
-       msr     spsr_fiq, x14
-
-       ldp     x15, x16, [x0, #CTX_DACR32_EL2]
-       msr     dacr32_el2, x15
-       msr     ifsr32_el2, x16
-
-       ldr     x17, [x0, #CTX_FP_FPEXC32_EL2]
-       msr     fpexc32_el2, x17
-#endif
-       /* Restore NS timer registers if the build has instructed so */
-#if NS_TIMER_SWITCH
-       ldp     x10, x11, [x0, #CTX_CNTP_CTL_EL0]
-       msr     cntp_ctl_el0, x10
-       msr     cntp_cval_el0, x11
-
-       ldp     x12, x13, [x0, #CTX_CNTV_CTL_EL0]
-       msr     cntv_ctl_el0, x12
-       msr     cntv_cval_el0, x13
-
-       ldr     x14, [x0, #CTX_CNTKCTL_EL1]
-       msr     cntkctl_el1, x14
-#endif
-
-       /* No explict ISB required here as ERET covers it */
-       ret
-endfunc el1_sysregs_context_restore
-
-/* -----------------------------------------------------
- * The following function follows the aapcs_64 strictly
- * to use x9-x17 (temporary caller-saved registers
- * according to AArch64 PCS) to save floating point
- * register context. It assumes that 'x0' is pointing to
- * a 'fp_regs' structure where the register context will
- * be saved.
- *
- * Access to VFP registers will trap if CPTR_EL3.TFP is
- * set.  However currently we don't use VFP registers
- * nor set traps in Trusted Firmware, and assume it's
- * cleared
- *
- * TODO: Revisit when VFP is used in secure world
- * -----------------------------------------------------
- */
-#if CTX_INCLUDE_FPREGS
-func fpregs_context_save
-       stp     q0, q1, [x0, #CTX_FP_Q0]
-       stp     q2, q3, [x0, #CTX_FP_Q2]
-       stp     q4, q5, [x0, #CTX_FP_Q4]
-       stp     q6, q7, [x0, #CTX_FP_Q6]
-       stp     q8, q9, [x0, #CTX_FP_Q8]
-       stp     q10, q11, [x0, #CTX_FP_Q10]
-       stp     q12, q13, [x0, #CTX_FP_Q12]
-       stp     q14, q15, [x0, #CTX_FP_Q14]
-       stp     q16, q17, [x0, #CTX_FP_Q16]
-       stp     q18, q19, [x0, #CTX_FP_Q18]
-       stp     q20, q21, [x0, #CTX_FP_Q20]
-       stp     q22, q23, [x0, #CTX_FP_Q22]
-       stp     q24, q25, [x0, #CTX_FP_Q24]
-       stp     q26, q27, [x0, #CTX_FP_Q26]
-       stp     q28, q29, [x0, #CTX_FP_Q28]
-       stp     q30, q31, [x0, #CTX_FP_Q30]
-
-       mrs     x9, fpsr
-       str     x9, [x0, #CTX_FP_FPSR]
-
-       mrs     x10, fpcr
-       str     x10, [x0, #CTX_FP_FPCR]
-
-       ret
-endfunc fpregs_context_save
-
-/* -----------------------------------------------------
- * The following function follows the aapcs_64 strictly
- * to use x9-x17 (temporary caller-saved registers
- * according to AArch64 PCS) to restore floating point
- * register context. It assumes that 'x0' is pointing to
- * a 'fp_regs' structure from where the register context
- * will be restored.
- *
- * Access to VFP registers will trap if CPTR_EL3.TFP is
- * set.  However currently we don't use VFP registers
- * nor set traps in Trusted Firmware, and assume it's
- * cleared
- *
- * TODO: Revisit when VFP is used in secure world
- * -----------------------------------------------------
- */
-func fpregs_context_restore
-       ldp     q0, q1, [x0, #CTX_FP_Q0]
-       ldp     q2, q3, [x0, #CTX_FP_Q2]
-       ldp     q4, q5, [x0, #CTX_FP_Q4]
-       ldp     q6, q7, [x0, #CTX_FP_Q6]
-       ldp     q8, q9, [x0, #CTX_FP_Q8]
-       ldp     q10, q11, [x0, #CTX_FP_Q10]
-       ldp     q12, q13, [x0, #CTX_FP_Q12]
-       ldp     q14, q15, [x0, #CTX_FP_Q14]
-       ldp     q16, q17, [x0, #CTX_FP_Q16]
-       ldp     q18, q19, [x0, #CTX_FP_Q18]
-       ldp     q20, q21, [x0, #CTX_FP_Q20]
-       ldp     q22, q23, [x0, #CTX_FP_Q22]
-       ldp     q24, q25, [x0, #CTX_FP_Q24]
-       ldp     q26, q27, [x0, #CTX_FP_Q26]
-       ldp     q28, q29, [x0, #CTX_FP_Q28]
-       ldp     q30, q31, [x0, #CTX_FP_Q30]
-
-       ldr     x9, [x0, #CTX_FP_FPSR]
-       msr     fpsr, x9
-
-       ldr     x10, [x0, #CTX_FP_FPCR]
-       msr     fpcr, x10
-
-       /*
-        * No explict ISB required here as ERET to
-        * switch to secure EL1 or non-secure world
-        * covers it
-        */
-
-       ret
-endfunc fpregs_context_restore
-#endif /* CTX_INCLUDE_FPREGS */
-
-/* -----------------------------------------------------
- * The following functions are used to save and restore
- * all the general purpose registers. Ideally we would
- * only save and restore the callee saved registers when
- * a world switch occurs but that type of implementation
- * is more complex. So currently we will always save and
- * restore these registers on entry and exit of EL3.
- * These are not macros to ensure their invocation fits
- * within the 32 instructions per exception vector.
- * clobbers: x18
- * -----------------------------------------------------
- */
-func save_gp_registers
-       stp     x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
-       stp     x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
-       stp     x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
-       stp     x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
-       stp     x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
-       stp     x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
-       stp     x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
-       stp     x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
-       stp     x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
-       stp     x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
-       stp     x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
-       stp     x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
-       stp     x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
-       stp     x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
-       stp     x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
-       mrs     x18, sp_el0
-       str     x18, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0]
-       ret
-endfunc save_gp_registers
-
-func restore_gp_registers_eret
-       ldp     x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
-       ldp     x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
-       b       restore_gp_registers_callee_eret
-endfunc restore_gp_registers_eret
-
-func restore_gp_registers_callee_eret
-       ldp     x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
-       ldp     x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
-       ldp     x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
-       ldp     x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
-       ldp     x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
-       ldp     x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
-       ldp     x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
-       ldp     x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
-       ldp     x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
-       ldp     x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
-       ldp     x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
-       ldp     x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
-       ldp      x30, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
-       msr     sp_el0, x17
-       ldp     x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
-       eret
-endfunc        restore_gp_registers_callee_eret
-
-       /* -----------------------------------------------------
-        * This routine assumes that the SP_EL3 is pointing to
-        * a valid context structure from where the gp regs and
-        * other special registers can be retrieved.
-        * -----------------------------------------------------
-        */
-func el3_exit
-       /* -----------------------------------------------------
-        * Save the current SP_EL0 i.e. the EL3 runtime stack
-        * which will be used for handling the next SMC. Then
-        * switch to SP_EL3
-        * -----------------------------------------------------
-        */
-       mov     x17, sp
-       msr     spsel, #1
-       str     x17, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
-
-       /* -----------------------------------------------------
-        * Restore SPSR_EL3, ELR_EL3 and SCR_EL3 prior to ERET
-        * -----------------------------------------------------
-        */
-       ldr     x18, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
-       ldp     x16, x17, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
-       msr     scr_el3, x18
-       msr     spsr_el3, x16
-       msr     elr_el3, x17
-
-       /* Restore saved general purpose registers and return */
-       b       restore_gp_registers_eret
-endfunc el3_exit
diff --git a/common/context_mgmt.c b/common/context_mgmt.c
deleted file mode 100644 (file)
index 4527aa3..0000000
+++ /dev/null
@@ -1,383 +0,0 @@
-/*
- * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch.h>
-#include <arch_helpers.h>
-#include <assert.h>
-#include <bl_common.h>
-#include <context.h>
-#include <context_mgmt.h>
-#include <interrupt_mgmt.h>
-#include <platform.h>
-#include <platform_def.h>
-#include <smcc_helpers.h>
-#include <string.h>
-
-
-/*******************************************************************************
- * Context management library initialisation routine. This library is used by
- * runtime services to share pointers to 'cpu_context' structures for the secure
- * and non-secure states. Management of the structures and their associated
- * memory is not done by the context management library e.g. the PSCI service
- * manages the cpu context used for entry from and exit to the non-secure state.
- * The Secure payload dispatcher service manages the context(s) corresponding to
- * the secure state. It also uses this library to get access to the non-secure
- * state cpu context pointers.
- * Lastly, this library provides the api to make SP_EL3 point to the cpu context
- * which will used for programming an entry into a lower EL. The same context
- * will used to save state upon exception entry from that EL.
- ******************************************************************************/
-void cm_init(void)
-{
-       /*
-        * The context management library has only global data to intialize, but
-        * that will be done when the BSS is zeroed out
-        */
-}
-
-/*******************************************************************************
- * The following function initializes the cpu_context 'ctx' for
- * first use, and sets the initial entrypoint state as specified by the
- * entry_point_info structure.
- *
- * The security state to initialize is determined by the SECURE attribute
- * of the entry_point_info. The function returns a pointer to the initialized
- * context and sets this as the next context to return to.
- *
- * The EE and ST attributes are used to configure the endianess and secure
- * timer availability for the new execution context.
- *
- * To prepare the register state for entry call cm_prepare_el3_exit() and
- * el3_exit(). For Secure-EL1 cm_prepare_el3_exit() is equivalent to
- * cm_e1_sysreg_context_restore().
- ******************************************************************************/
-static void cm_init_context_common(cpu_context_t *ctx, const entry_point_info_t *ep)
-{
-       unsigned int security_state;
-       uint32_t scr_el3;
-       el3_state_t *state;
-       gp_regs_t *gp_regs;
-       unsigned long sctlr_elx;
-
-       assert(ctx);
-
-       security_state = GET_SECURITY_STATE(ep->h.attr);
-
-       /* Clear any residual register values from the context */
-       memset(ctx, 0, sizeof(*ctx));
-
-       /*
-        * Base the context SCR on the current value, adjust for entry point
-        * specific requirements and set trap bits from the IMF
-        * TODO: provide the base/global SCR bits using another mechanism?
-        */
-       scr_el3 = read_scr();
-       scr_el3 &= ~(SCR_NS_BIT | SCR_RW_BIT | SCR_FIQ_BIT | SCR_IRQ_BIT |
-                       SCR_ST_BIT | SCR_HCE_BIT);
-
-       if (security_state != SECURE)
-               scr_el3 |= SCR_NS_BIT;
-
-       if (GET_RW(ep->spsr) == MODE_RW_64)
-               scr_el3 |= SCR_RW_BIT;
-
-       if (EP_GET_ST(ep->h.attr))
-               scr_el3 |= SCR_ST_BIT;
-
-#ifndef HANDLE_EA_EL3_FIRST
-       /* Explicitly stop to trap aborts from lower exception levels. */
-       scr_el3 &= ~SCR_EA_BIT;
-#endif
-
-#if IMAGE_BL31
-       /*
-        * IRQ/FIQ bits only need setting if interrupt routing
-        * model has been set up for BL31.
-        */
-       scr_el3 |= get_scr_el3_from_routing_model(security_state);
-#endif
-
-       /*
-        * Set up SCTLR_ELx for the target exception level:
-        * EE bit is taken from the entrypoint attributes
-        * M, C and I bits must be zero (as required by PSCI specification)
-        *
-        * The target exception level is based on the spsr mode requested.
-        * If execution is requested to EL2 or hyp mode, HVC is enabled
-        * via SCR_EL3.HCE.
-        *
-        * Always compute the SCTLR_EL1 value and save in the cpu_context
-        * - the EL2 registers are set up by cm_preapre_ns_entry() as they
-        * are not part of the stored cpu_context
-        *
-        * TODO: In debug builds the spsr should be validated and checked
-        * against the CPU support, security state, endianess and pc
-        */
-       sctlr_elx = EP_GET_EE(ep->h.attr) ? SCTLR_EE_BIT : 0;
-       if (GET_RW(ep->spsr) == MODE_RW_64)
-               sctlr_elx |= SCTLR_EL1_RES1;
-       else
-               sctlr_elx |= SCTLR_AARCH32_EL1_RES1;
-       write_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_elx);
-
-       if ((GET_RW(ep->spsr) == MODE_RW_64
-            && GET_EL(ep->spsr) == MODE_EL2)
-           || (GET_RW(ep->spsr) != MODE_RW_64
-               && GET_M32(ep->spsr) == MODE32_hyp)) {
-               scr_el3 |= SCR_HCE_BIT;
-       }
-
-       /* Populate EL3 state so that we've the right context before doing ERET */
-       state = get_el3state_ctx(ctx);
-       write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
-       write_ctx_reg(state, CTX_ELR_EL3, ep->pc);
-       write_ctx_reg(state, CTX_SPSR_EL3, ep->spsr);
-
-       /*
-        * Store the X0-X7 value from the entrypoint into the context
-        * Use memcpy as we are in control of the layout of the structures
-        */
-       gp_regs = get_gpregs_ctx(ctx);
-       memcpy(gp_regs, (void *)&ep->args, sizeof(aapcs64_params_t));
-}
-
-/*******************************************************************************
- * The following function initializes the cpu_context for a CPU specified by
- * its `cpu_idx` for first use, and sets the initial entrypoint state as
- * specified by the entry_point_info structure.
- ******************************************************************************/
-void cm_init_context_by_index(unsigned int cpu_idx,
-                             const entry_point_info_t *ep)
-{
-       cpu_context_t *ctx;
-       ctx = cm_get_context_by_index(cpu_idx, GET_SECURITY_STATE(ep->h.attr));
-       cm_init_context_common(ctx, ep);
-}
-
-/*******************************************************************************
- * The following function initializes the cpu_context for the current CPU
- * for first use, and sets the initial entrypoint state as specified by the
- * entry_point_info structure.
- ******************************************************************************/
-void cm_init_my_context(const entry_point_info_t *ep)
-{
-       cpu_context_t *ctx;
-       ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr));
-       cm_init_context_common(ctx, ep);
-}
-
-/*******************************************************************************
- * Prepare the CPU system registers for first entry into secure or normal world
- *
- * If execution is requested to EL2 or hyp mode, SCTLR_EL2 is initialized
- * If execution is requested to non-secure EL1 or svc mode, and the CPU supports
- * EL2 then EL2 is disabled by configuring all necessary EL2 registers.
- * For all entries, the EL1 registers are initialized from the cpu_context
- ******************************************************************************/
-void cm_prepare_el3_exit(uint32_t security_state)
-{
-       uint32_t sctlr_elx, scr_el3, cptr_el2;
-       cpu_context_t *ctx = cm_get_context(security_state);
-
-       assert(ctx);
-
-       if (security_state == NON_SECURE) {
-               scr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3);
-               if (scr_el3 & SCR_HCE_BIT) {
-                       /* Use SCTLR_EL1.EE value to initialise sctlr_el2 */
-                       sctlr_elx = read_ctx_reg(get_sysregs_ctx(ctx),
-                                                CTX_SCTLR_EL1);
-                       sctlr_elx &= ~SCTLR_EE_BIT;
-                       sctlr_elx |= SCTLR_EL2_RES1;
-                       write_sctlr_el2(sctlr_elx);
-               } else if (read_id_aa64pfr0_el1() &
-                          (ID_AA64PFR0_ELX_MASK << ID_AA64PFR0_EL2_SHIFT)) {
-                       /* EL2 present but unused, need to disable safely */
-
-                       /* HCR_EL2 = 0, except RW bit set to match SCR_EL3 */
-                       write_hcr_el2((scr_el3 & SCR_RW_BIT) ? HCR_RW_BIT : 0);
-
-                       /* SCTLR_EL2 : can be ignored when bypassing */
-
-                       /* CPTR_EL2 : disable all traps TCPAC, TTA, TFP */
-                       cptr_el2 = read_cptr_el2();
-                       cptr_el2 &= ~(TCPAC_BIT | TTA_BIT | TFP_BIT);
-                       write_cptr_el2(cptr_el2);
-
-                       /* Enable EL1 access to timer */
-                       write_cnthctl_el2(EL1PCEN_BIT | EL1PCTEN_BIT);
-
-                       /* Reset CNTVOFF_EL2 */
-                       write_cntvoff_el2(0);
-
-                       /* Set VPIDR, VMPIDR to match MIDR, MPIDR */
-                       write_vpidr_el2(read_midr_el1());
-                       write_vmpidr_el2(read_mpidr_el1());
-
-                       /*
-                        * Reset VTTBR_EL2.
-                        * Needed because cache maintenance operations depend on
-                        * the VMID even when non-secure EL1&0 stage 2 address
-                        * translation are disabled.
-                        */
-                       write_vttbr_el2(0);
-               }
-       }
-
-       el1_sysregs_context_restore(get_sysregs_ctx(ctx));
-
-       cm_set_next_context(ctx);
-}
-
-/*******************************************************************************
- * The next four functions are used by runtime services to save and restore
- * EL1 context on the 'cpu_context' structure for the specified security
- * state.
- ******************************************************************************/
-void cm_el1_sysregs_context_save(uint32_t security_state)
-{
-       cpu_context_t *ctx;
-
-       ctx = cm_get_context(security_state);
-       assert(ctx);
-
-       el1_sysregs_context_save(get_sysregs_ctx(ctx));
-}
-
-void cm_el1_sysregs_context_restore(uint32_t security_state)
-{
-       cpu_context_t *ctx;
-
-       ctx = cm_get_context(security_state);
-       assert(ctx);
-
-       el1_sysregs_context_restore(get_sysregs_ctx(ctx));
-}
-
-/*******************************************************************************
- * This function populates ELR_EL3 member of 'cpu_context' pertaining to the
- * given security state with the given entrypoint
- ******************************************************************************/
-void cm_set_elr_el3(uint32_t security_state, uintptr_t entrypoint)
-{
-       cpu_context_t *ctx;
-       el3_state_t *state;
-
-       ctx = cm_get_context(security_state);
-       assert(ctx);
-
-       /* Populate EL3 state so that ERET jumps to the correct entry */
-       state = get_el3state_ctx(ctx);
-       write_ctx_reg(state, CTX_ELR_EL3, entrypoint);
-}
-
-/*******************************************************************************
- * This function populates ELR_EL3 and SPSR_EL3 members of 'cpu_context'
- * pertaining to the given security state
- ******************************************************************************/
-void cm_set_elr_spsr_el3(uint32_t security_state,
-                       uintptr_t entrypoint, uint32_t spsr)
-{
-       cpu_context_t *ctx;
-       el3_state_t *state;
-
-       ctx = cm_get_context(security_state);
-       assert(ctx);
-
-       /* Populate EL3 state so that ERET jumps to the correct entry */
-       state = get_el3state_ctx(ctx);
-       write_ctx_reg(state, CTX_ELR_EL3, entrypoint);
-       write_ctx_reg(state, CTX_SPSR_EL3, spsr);
-}
-
-/*******************************************************************************
- * This function updates a single bit in the SCR_EL3 member of the 'cpu_context'
- * pertaining to the given security state using the value and bit position
- * specified in the parameters. It preserves all other bits.
- ******************************************************************************/
-void cm_write_scr_el3_bit(uint32_t security_state,
-                         uint32_t bit_pos,
-                         uint32_t value)
-{
-       cpu_context_t *ctx;
-       el3_state_t *state;
-       uint32_t scr_el3;
-
-       ctx = cm_get_context(security_state);
-       assert(ctx);
-
-       /* Ensure that the bit position is a valid one */
-       assert((1 << bit_pos) & SCR_VALID_BIT_MASK);
-
-       /* Ensure that the 'value' is only a bit wide */
-       assert(value <= 1);
-
-       /*
-        * Get the SCR_EL3 value from the cpu context, clear the desired bit
-        * and set it to its new value.
-        */
-       state = get_el3state_ctx(ctx);
-       scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
-       scr_el3 &= ~(1 << bit_pos);
-       scr_el3 |= value << bit_pos;
-       write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
-}
-
-/*******************************************************************************
- * This function retrieves SCR_EL3 member of 'cpu_context' pertaining to the
- * given security state.
- ******************************************************************************/
-uint32_t cm_get_scr_el3(uint32_t security_state)
-{
-       cpu_context_t *ctx;
-       el3_state_t *state;
-
-       ctx = cm_get_context(security_state);
-       assert(ctx);
-
-       /* Populate EL3 state so that ERET jumps to the correct entry */
-       state = get_el3state_ctx(ctx);
-       return read_ctx_reg(state, CTX_SCR_EL3);
-}
-
-/*******************************************************************************
- * This function is used to program the context that's used for exception
- * return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for
- * the required security state
- ******************************************************************************/
-void cm_set_next_eret_context(uint32_t security_state)
-{
-       cpu_context_t *ctx;
-
-       ctx = cm_get_context(security_state);
-       assert(ctx);
-
-       cm_set_next_context(ctx);
-}
diff --git a/common/runtime_svc.c b/common/runtime_svc.c
new file mode 100644 (file)
index 0000000..8729e29
--- /dev/null
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <debug.h>
+#include <errno.h>
+#include <runtime_svc.h>
+#include <string.h>
+
+/*******************************************************************************
+ * The 'rt_svc_descs' array holds the runtime service descriptors exported by
+ * services by placing them in the 'rt_svc_descs' linker section.
+ * The 'rt_svc_descs_indices' array holds the index of a descriptor in the
+ * 'rt_svc_descs' array. When an SMC arrives, the OEN[29:24] bits and the call
+ * type[31] bit in the function id are combined to get an index into the
+ * 'rt_svc_descs_indices' array. This gives the index of the descriptor in the
+ * 'rt_svc_descs' array which contains the SMC handler.
+ ******************************************************************************/
+#define RT_SVC_DESCS_START     ((uintptr_t) (&__RT_SVC_DESCS_START__))
+#define RT_SVC_DESCS_END       ((uintptr_t) (&__RT_SVC_DESCS_END__))
+uint8_t rt_svc_descs_indices[MAX_RT_SVCS];
+static rt_svc_desc_t *rt_svc_descs;
+
+#define RT_SVC_DECS_NUM                ((RT_SVC_DESCS_END - RT_SVC_DESCS_START)\
+                                       / sizeof(rt_svc_desc_t))
+
+/*******************************************************************************
+ * Simple routine to sanity check a runtime service descriptor before using it
+ ******************************************************************************/
+static int32_t validate_rt_svc_desc(rt_svc_desc_t *desc)
+{
+       if (desc == NULL)
+               return -EINVAL;
+
+       if (desc->start_oen > desc->end_oen)
+               return -EINVAL;
+
+       if (desc->end_oen >= OEN_LIMIT)
+               return -EINVAL;
+
+       if (desc->call_type != SMC_TYPE_FAST && desc->call_type != SMC_TYPE_STD)
+               return -EINVAL;
+
+       /* A runtime service having no init or handle function doesn't make sense */
+       if (desc->init == NULL && desc->handle == NULL)
+               return -EINVAL;
+
+       return 0;
+}
+
+/*******************************************************************************
+ * This function calls the initialisation routine in the descriptor exported by
+ * a runtime service. Once a descriptor has been validated, its start & end
+ * owning entity numbers and the call type are combined to form a unique oen.
+ * The unique oen is used as an index into the 'rt_svc_descs_indices' array.
+ * The index of the runtime service descriptor is stored at this index.
+ ******************************************************************************/
+void runtime_svc_init(void)
+{
+       int rc = 0, index, start_idx, end_idx;
+
+       /* Assert the number of descriptors detected are less than maximum indices */
+       assert((RT_SVC_DECS_NUM >= 0) && (RT_SVC_DECS_NUM < MAX_RT_SVCS));
+
+       /* If no runtime services are implemented then simply bail out */
+       if (RT_SVC_DECS_NUM == 0)
+               return;
+
+       /* Initialise internal variables to invalid state */
+       memset(rt_svc_descs_indices, -1, sizeof(rt_svc_descs_indices));
+
+       rt_svc_descs = (rt_svc_desc_t *) RT_SVC_DESCS_START;
+       for (index = 0; index < RT_SVC_DECS_NUM; index++) {
+
+               /*
+                * An invalid descriptor is an error condition since it is
+                * difficult to predict the system behaviour in the absence
+                * of this service.
+                */
+               rc = validate_rt_svc_desc(&rt_svc_descs[index]);
+               if (rc) {
+                       ERROR("Invalid runtime service descriptor %p (%s)\n",
+                                       (void *) &rt_svc_descs[index],
+                                       rt_svc_descs[index].name);
+                       goto error;
+               }
+
+               /*
+                * The runtime service may have separate rt_svc_desc_t
+                * for its fast smc and standard smc. Since the service itself
+                * need to be initialized only once, only one of them will have
+                * an initialisation routine defined. Call the initialisation
+                * routine for this runtime service, if it is defined.
+                */
+               if (rt_svc_descs[index].init) {
+                       rc = rt_svc_descs[index].init();
+                       if (rc) {
+                               ERROR("Error initializing runtime service %s\n",
+                                               rt_svc_descs[index].name);
+                               continue;
+                       }
+               }
+
+               /*
+                * Fill the indices corresponding to the start and end
+                * owning entity numbers with the index of the
+                * descriptor which will handle the SMCs for this owning
+                * entity range.
+                */
+               start_idx = get_unique_oen(rt_svc_descs[index].start_oen,
+                               rt_svc_descs[index].call_type);
+               end_idx = get_unique_oen(rt_svc_descs[index].end_oen,
+                               rt_svc_descs[index].call_type);
+
+               for (; start_idx <= end_idx; start_idx++)
+                       rt_svc_descs_indices[start_idx] = index;
+       }
+
+       return;
+error:
+       panic();
+}
index b99a2838f905d109285d464ab383b09827cbd7cc..d9f9ff025286dd0babee6c7ebc17ec5c0bdc99a4 100644 (file)
@@ -1779,10 +1779,11 @@ following categories (present as directories in the source code):
     the platform.
 *   **Common code.** This is platform and architecture agnostic code.
 *   **Library code.** This code comprises of functionality commonly used by all
-    other code.
+    other code. The PSCI implementation and other EL3 runtime frameworks reside
+    as Library components.
 *   **Stage specific.** Code specific to a boot stage.
 *   **Drivers.**
-*   **Services.** EL3 runtime services, e.g. PSCI or SPD. Specific SPD services
+*   **Services.** EL3 runtime services (eg: SPD). Specific SPD services
     reside in the `services/spd` directory (e.g. `services/spd/tspd`).
 
 Each boot loader stage uses code from one or more of the above mentioned
index 856e8feba7e3b4c83705b140857de304a36ae941..7fe71851526daa258d0437370d9ff2f0ff9d5d26 100644 (file)
@@ -95,8 +95,7 @@ handler will be responsible for all SMC Functions within a given service type.
 
 ARM Trusted Firmware has a [`services`] directory in the source tree under which
 each owning entity can place the implementation of its runtime service.  The
-[PSCI] implementation is located here in the [`services/std_svc/psci`]
-directory.
+[PSCI] implementation is located here in the [`lib/psci`] directory.
 
 Runtime service sources will need to include the [`runtime_svc.h`] header file.
 
@@ -299,12 +298,11 @@ provide this information....
 _Copyright (c) 2014-2015, ARM Limited and Contributors. All rights reserved._
 
 
-[Firmware Design]:  ./firmware-design.md
-
+[Firmware Design]:          ./firmware-design.md
 [`services`]:               ../services
-[`services/std_svc/psci`]:  ../services/std_svc/psci
+[`lib/psci`]:               ../lib/psci
 [`std_svc_setup.c`]:        ../services/std_svc/std_svc_setup.c
-[`runtime_svc.h`]:          ../include/bl31/runtime_svc.h
+[`runtime_svc.h`]:          ../include/common/runtime_svc.h
 [`smcc_helpers.h`]:          ../include/common/smcc_helpers.h
 [PSCI]:                     http://infocenter.arm.com/help/topic/com.arm.doc.den0022c/DEN0022C_Power_State_Coordination_Interface.pdf "Power State Coordination Interface PDD (ARM DEN 0022C)"
 [SMCCC]:                    http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html "SMC Calling Convention PDD (ARM DEN 0028A)"
diff --git a/include/bl31/cpu_data.h b/include/bl31/cpu_data.h
deleted file mode 100644 (file)
index 4fc801b..0000000
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __CPU_DATA_H__
-#define __CPU_DATA_H__
-
-/* Offsets for the cpu_data structure */
-#define CPU_DATA_CRASH_BUF_OFFSET      0x18
-#if CRASH_REPORTING
-#define CPU_DATA_LOG2SIZE              7
-#else
-#define CPU_DATA_LOG2SIZE              6
-#endif
-/* need enough space in crash buffer to save 8 registers */
-#define CPU_DATA_CRASH_BUF_SIZE                64
-#define CPU_DATA_CPU_OPS_PTR           0x10
-
-#ifndef __ASSEMBLY__
-
-#include <arch_helpers.h>
-#include <cassert.h>
-#include <platform_def.h>
-#include <psci.h>
-#include <stdint.h>
-
-/* Offsets for the cpu_data structure */
-#define CPU_DATA_PSCI_LOCK_OFFSET      __builtin_offsetof\
-               (cpu_data_t, psci_svc_cpu_data.pcpu_bakery_info)
-
-#if PLAT_PCPU_DATA_SIZE
-#define CPU_DATA_PLAT_PCPU_OFFSET      __builtin_offsetof\
-               (cpu_data_t, platform_cpu_data)
-#endif
-
-/*******************************************************************************
- * Function & variable prototypes
- ******************************************************************************/
-
-/*******************************************************************************
- * Cache of frequently used per-cpu data:
- *   Pointers to non-secure and secure security state contexts
- *   Address of the crash stack
- * It is aligned to the cache line boundary to allow efficient concurrent
- * manipulation of these pointers on different cpus
- *
- * TODO: Add other commonly used variables to this (tf_issues#90)
- *
- * The data structure and the _cpu_data accessors should not be used directly
- * by components that have per-cpu members. The member access macros should be
- * used for this.
- ******************************************************************************/
-typedef struct cpu_data {
-       void *cpu_context[2];
-       uintptr_t cpu_ops_ptr;
-#if CRASH_REPORTING
-       u_register_t crash_buf[CPU_DATA_CRASH_BUF_SIZE >> 3];
-#endif
-       struct psci_cpu_data psci_svc_cpu_data;
-#if PLAT_PCPU_DATA_SIZE
-       uint8_t platform_cpu_data[PLAT_PCPU_DATA_SIZE];
-#endif
-} __aligned(CACHE_WRITEBACK_GRANULE) cpu_data_t;
-
-#if CRASH_REPORTING
-/* verify assembler offsets match data structures */
-CASSERT(CPU_DATA_CRASH_BUF_OFFSET == __builtin_offsetof
-       (cpu_data_t, crash_buf),
-       assert_cpu_data_crash_stack_offset_mismatch);
-#endif
-
-CASSERT((1 << CPU_DATA_LOG2SIZE) == sizeof(cpu_data_t),
-       assert_cpu_data_log2size_mismatch);
-
-CASSERT(CPU_DATA_CPU_OPS_PTR == __builtin_offsetof
-               (cpu_data_t, cpu_ops_ptr),
-               assert_cpu_data_cpu_ops_ptr_offset_mismatch);
-
-struct cpu_data *_cpu_data_by_index(uint32_t cpu_index);
-
-/* Return the cpu_data structure for the current CPU. */
-static inline struct cpu_data *_cpu_data(void)
-{
-       return (cpu_data_t *)read_tpidr_el3();
-}
-
-
-/**************************************************************************
- * APIs for initialising and accessing per-cpu data
- *************************************************************************/
-
-void init_cpu_data_ptr(void);
-void init_cpu_ops(void);
-
-#define get_cpu_data(_m)                  _cpu_data()->_m
-#define set_cpu_data(_m, _v)              _cpu_data()->_m = _v
-#define get_cpu_data_by_index(_ix, _m)    _cpu_data_by_index(_ix)->_m
-#define set_cpu_data_by_index(_ix, _m, _v) _cpu_data_by_index(_ix)->_m = _v
-
-#define flush_cpu_data(_m)        flush_dcache_range((uintptr_t)         \
-                                                     &(_cpu_data()->_m), \
-                                                     sizeof(_cpu_data()->_m))
-#define inv_cpu_data(_m)          inv_dcache_range((uintptr_t)           \
-                                                     &(_cpu_data()->_m), \
-                                                     sizeof(_cpu_data()->_m))
-#define flush_cpu_data_by_index(_ix, _m)       \
-                                  flush_dcache_range((uintptr_t)         \
-                                        &(_cpu_data_by_index(_ix)->_m),  \
-                                        sizeof(_cpu_data_by_index(_ix)->_m))
-
-
-#endif /* __ASSEMBLY__ */
-#endif /* __CPU_DATA_H__ */
diff --git a/include/bl31/runtime_svc.h b/include/bl31/runtime_svc.h
deleted file mode 100644 (file)
index adafcee..0000000
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __RUNTIME_SVC_H__
-#define __RUNTIME_SVC_H__
-
-#include <bl_common.h>         /* to include exception types */
-#include <smcc_helpers.h>      /* to include SMCC definitions */
-
-
-/*******************************************************************************
- * Structure definition, typedefs & constants for the runtime service framework
- ******************************************************************************/
-
-/*
- * Constants to allow the assembler access a runtime service
- * descriptor
- */
-#define RT_SVC_SIZE_LOG2       5
-#define SIZEOF_RT_SVC_DESC     (1 << RT_SVC_SIZE_LOG2)
-#define RT_SVC_DESC_INIT       16
-#define RT_SVC_DESC_HANDLE     24
-
-/*
- * The function identifier has 6 bits for the owning entity number and
- * single bit for the type of smc call. When taken together these
- * values limit the maximum number of runtime services to 128.
- */
-#define MAX_RT_SVCS            128
-
-#ifndef __ASSEMBLY__
-
-/* Prototype for runtime service initializing function */
-typedef int32_t (*rt_svc_init_t)(void);
-
-/*
- * Prototype for runtime service SMC handler function. x0 (SMC Function ID) to
- * x4 are as passed by the caller. Rest of the arguments to SMC and the context
- * can be accessed using the handle pointer. The cookie parameter is reserved
- * for future use
- */
-typedef uintptr_t (*rt_svc_handle_t)(uint32_t smc_fid,
-                                 u_register_t x1,
-                                 u_register_t x2,
-                                 u_register_t x3,
-                                 u_register_t x4,
-                                 void *cookie,
-                                 void *handle,
-                                 u_register_t flags);
-typedef struct rt_svc_desc {
-       uint8_t start_oen;
-       uint8_t end_oen;
-       uint8_t call_type;
-       const char *name;
-       rt_svc_init_t init;
-       rt_svc_handle_t handle;
-} rt_svc_desc_t;
-
-/*
- * Convenience macro to declare a service descriptor
- */
-#define DECLARE_RT_SVC(_name, _start, _end, _type, _setup, _smch) \
-       static const rt_svc_desc_t __svc_desc_ ## _name \
-               __section("rt_svc_descs") __used = { \
-                       .start_oen = _start, \
-                       .end_oen = _end, \
-                       .call_type = _type, \
-                       .name = #_name, \
-                       .init = _setup, \
-                       .handle = _smch }
-
-/*
- * Compile time assertions related to the 'rt_svc_desc' structure to:
- * 1. ensure that the assembler and the compiler view of the size
- *    of the structure are the same.
- * 2. ensure that the assembler and the compiler see the initialisation
- *    routine at the same offset.
- * 3. ensure that the assembler and the compiler see the handler
- *    routine at the same offset.
- */
-CASSERT((sizeof(rt_svc_desc_t) == SIZEOF_RT_SVC_DESC), \
-       assert_sizeof_rt_svc_desc_mismatch);
-CASSERT(RT_SVC_DESC_INIT == __builtin_offsetof(rt_svc_desc_t, init), \
-       assert_rt_svc_desc_init_offset_mismatch);
-CASSERT(RT_SVC_DESC_HANDLE == __builtin_offsetof(rt_svc_desc_t, handle), \
-       assert_rt_svc_desc_handle_offset_mismatch);
-
-
-/*
- * This macro combines the call type and the owning entity number corresponding
- * to a runtime service to generate a unique owning entity number. This unique
- * oen is used to access an entry in the 'rt_svc_descs_indices' array. The entry
- * contains the index of the service descriptor in the 'rt_svc_descs' array.
- */
-#define get_unique_oen(oen, call_type) ((oen & FUNCID_OEN_MASK) |      \
-                                       ((call_type & FUNCID_TYPE_MASK) \
-                                        << FUNCID_OEN_WIDTH))
-
-/*******************************************************************************
- * Function & variable prototypes
- ******************************************************************************/
-void runtime_svc_init(void);
-extern uintptr_t __RT_SVC_DESCS_START__;
-extern uintptr_t __RT_SVC_DESCS_END__;
-void init_crash_reporting(void);
-
-#endif /*__ASSEMBLY__*/
-#endif /* __RUNTIME_SVC_H__ */
diff --git a/include/bl31/services/psci.h b/include/bl31/services/psci.h
deleted file mode 100644 (file)
index b6d6d4e..0000000
+++ /dev/null
@@ -1,345 +0,0 @@
-/*
- * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __PSCI_H__
-#define __PSCI_H__
-
-#include <bakery_lock.h>
-#include <platform_def.h>      /* for PLAT_NUM_PWR_DOMAINS */
-#if ENABLE_PLAT_COMPAT
-#include <psci_compat.h>
-#endif
-
-/*******************************************************************************
- * Number of power domains whose state this PSCI implementation can track
- ******************************************************************************/
-#ifdef PLAT_NUM_PWR_DOMAINS
-#define PSCI_NUM_PWR_DOMAINS   PLAT_NUM_PWR_DOMAINS
-#else
-#define PSCI_NUM_PWR_DOMAINS   (2 * PLATFORM_CORE_COUNT)
-#endif
-
-#define PSCI_NUM_NON_CPU_PWR_DOMAINS   (PSCI_NUM_PWR_DOMAINS - \
-                                        PLATFORM_CORE_COUNT)
-
-/* This is the power level corresponding to a CPU */
-#define PSCI_CPU_PWR_LVL       0
-
-/*
- * The maximum power level supported by PSCI. Since PSCI CPU_SUSPEND
- * uses the old power_state parameter format which has 2 bits to specify the
- * power level, this constant is defined to be 3.
- */
-#define PSCI_MAX_PWR_LVL       3
-
-/*******************************************************************************
- * Defines for runtime services function ids
- ******************************************************************************/
-#define PSCI_VERSION                   0x84000000
-#define PSCI_CPU_SUSPEND_AARCH32       0x84000001
-#define PSCI_CPU_SUSPEND_AARCH64       0xc4000001
-#define PSCI_CPU_OFF                   0x84000002
-#define PSCI_CPU_ON_AARCH32            0x84000003
-#define PSCI_CPU_ON_AARCH64            0xc4000003
-#define PSCI_AFFINITY_INFO_AARCH32     0x84000004
-#define PSCI_AFFINITY_INFO_AARCH64     0xc4000004
-#define PSCI_MIG_AARCH32               0x84000005
-#define PSCI_MIG_AARCH64               0xc4000005
-#define PSCI_MIG_INFO_TYPE             0x84000006
-#define PSCI_MIG_INFO_UP_CPU_AARCH32   0x84000007
-#define PSCI_MIG_INFO_UP_CPU_AARCH64   0xc4000007
-#define PSCI_SYSTEM_OFF                        0x84000008
-#define PSCI_SYSTEM_RESET              0x84000009
-#define PSCI_FEATURES                  0x8400000A
-#define PSCI_SYSTEM_SUSPEND_AARCH32    0x8400000E
-#define PSCI_SYSTEM_SUSPEND_AARCH64    0xc400000E
-#define PSCI_STAT_RESIDENCY_AARCH32    0x84000010
-#define PSCI_STAT_RESIDENCY_AARCH64    0xc4000010
-#define PSCI_STAT_COUNT_AARCH32                0x84000011
-#define PSCI_STAT_COUNT_AARCH64                0xc4000011
-
-/* Macro to help build the psci capabilities bitfield */
-#define define_psci_cap(x)             (1 << (x & 0x1f))
-
-/*
- * Number of PSCI calls (above) implemented
- */
-#if ENABLE_PSCI_STAT
-#define PSCI_NUM_CALLS                 22
-#else
-#define PSCI_NUM_CALLS                 18
-#endif
-
-/*******************************************************************************
- * PSCI Migrate and friends
- ******************************************************************************/
-#define PSCI_TOS_UP_MIG_CAP    0
-#define PSCI_TOS_NOT_UP_MIG_CAP        1
-#define PSCI_TOS_NOT_PRESENT_MP        2
-
-/*******************************************************************************
- * PSCI CPU_SUSPEND 'power_state' parameter specific defines
- ******************************************************************************/
-#define PSTATE_ID_SHIFT                0
-
-#if PSCI_EXTENDED_STATE_ID
-#define PSTATE_VALID_MASK      0xB0000000
-#define PSTATE_TYPE_SHIFT      30
-#define PSTATE_ID_MASK         0xfffffff
-#else
-#define PSTATE_VALID_MASK      0xFCFE0000
-#define PSTATE_TYPE_SHIFT      16
-#define PSTATE_PWR_LVL_SHIFT   24
-#define PSTATE_ID_MASK         0xffff
-#define PSTATE_PWR_LVL_MASK    0x3
-
-#define psci_get_pstate_pwrlvl(pstate) (((pstate) >> PSTATE_PWR_LVL_SHIFT) & \
-                                       PSTATE_PWR_LVL_MASK)
-#define psci_make_powerstate(state_id, type, pwrlvl) \
-                       (((state_id) & PSTATE_ID_MASK) << PSTATE_ID_SHIFT) |\
-                       (((type) & PSTATE_TYPE_MASK) << PSTATE_TYPE_SHIFT) |\
-                       (((pwrlvl) & PSTATE_PWR_LVL_MASK) << PSTATE_PWR_LVL_SHIFT)
-#endif /* __PSCI_EXTENDED_STATE_ID__ */
-
-#define PSTATE_TYPE_STANDBY    0x0
-#define PSTATE_TYPE_POWERDOWN  0x1
-#define PSTATE_TYPE_MASK       0x1
-
-#define psci_get_pstate_id(pstate)     (((pstate) >> PSTATE_ID_SHIFT) & \
-                                       PSTATE_ID_MASK)
-#define psci_get_pstate_type(pstate)   (((pstate) >> PSTATE_TYPE_SHIFT) & \
-                                       PSTATE_TYPE_MASK)
-#define psci_check_power_state(pstate) ((pstate) & PSTATE_VALID_MASK)
-
-/*******************************************************************************
- * PSCI CPU_FEATURES feature flag specific defines
- ******************************************************************************/
-/* Features flags for CPU SUSPEND power state parameter format. Bits [1:1] */
-#define FF_PSTATE_SHIFT                1
-#define FF_PSTATE_ORIG         0
-#define FF_PSTATE_EXTENDED     1
-#if PSCI_EXTENDED_STATE_ID
-#define FF_PSTATE              FF_PSTATE_EXTENDED
-#else
-#define FF_PSTATE              FF_PSTATE_ORIG
-#endif
-
-/* Features flags for CPU SUSPEND OS Initiated mode support. Bits [0:0] */
-#define FF_MODE_SUPPORT_SHIFT          0
-#define FF_SUPPORTS_OS_INIT_MODE       1
-
-/*******************************************************************************
- * PSCI version
- ******************************************************************************/
-#define PSCI_MAJOR_VER         (1 << 16)
-#define PSCI_MINOR_VER         0x0
-
-/*******************************************************************************
- * PSCI error codes
- ******************************************************************************/
-#define PSCI_E_SUCCESS         0
-#define PSCI_E_NOT_SUPPORTED   -1
-#define PSCI_E_INVALID_PARAMS  -2
-#define PSCI_E_DENIED          -3
-#define PSCI_E_ALREADY_ON      -4
-#define PSCI_E_ON_PENDING      -5
-#define PSCI_E_INTERN_FAIL     -6
-#define PSCI_E_NOT_PRESENT     -7
-#define PSCI_E_DISABLED                -8
-#define PSCI_E_INVALID_ADDRESS -9
-
-#define PSCI_INVALID_MPIDR     ~((u_register_t)0)
-
-#ifndef __ASSEMBLY__
-
-#include <stdint.h>
-#include <types.h>
-
-/*
- * These are the states reported by the PSCI_AFFINITY_INFO API for the specified
- * CPU. The definitions of these states can be found in Section 5.7.1 in the
- * PSCI specification (ARM DEN 0022C).
- */
-typedef enum {
-       AFF_STATE_ON = 0,
-       AFF_STATE_OFF = 1,
-       AFF_STATE_ON_PENDING = 2
-} aff_info_state_t;
-
-/*
- * Macro to represent invalid affinity level within PSCI.
- */
-#define PSCI_INVALID_PWR_LVL   (PLAT_MAX_PWR_LVL + 1)
-
-/*
- * Type for representing the local power state at a particular level.
- */
-typedef uint8_t plat_local_state_t;
-
-/* The local state macro used to represent RUN state. */
-#define PSCI_LOCAL_STATE_RUN   0
-
-/*
- * Macro to test whether the plat_local_state is RUN state
- */
-#define is_local_state_run(plat_local_state) \
-                       ((plat_local_state) == PSCI_LOCAL_STATE_RUN)
-
-/*
- * Macro to test whether the plat_local_state is RETENTION state
- */
-#define is_local_state_retn(plat_local_state) \
-                       (((plat_local_state) > PSCI_LOCAL_STATE_RUN) && \
-                       ((plat_local_state) <= PLAT_MAX_RET_STATE))
-
-/*
- * Macro to test whether the plat_local_state is OFF state
- */
-#define is_local_state_off(plat_local_state) \
-                       (((plat_local_state) > PLAT_MAX_RET_STATE) && \
-                       ((plat_local_state) <= PLAT_MAX_OFF_STATE))
-
-/*****************************************************************************
- * This data structure defines the representation of the power state parameter
- * for its exchange between the generic PSCI code and the platform port. For
- * example, it is used by the platform port to specify the requested power
- * states during a power management operation. It is used by the generic code to
- * inform the platform about the target power states that each level should
- * enter.
- ****************************************************************************/
-typedef struct psci_power_state {
-       /*
-        * The pwr_domain_state[] stores the local power state at each level
-        * for the CPU.
-        */
-       plat_local_state_t pwr_domain_state[PLAT_MAX_PWR_LVL + 1];
-} psci_power_state_t;
-
-/*******************************************************************************
- * Structure used to store per-cpu information relevant to the PSCI service.
- * It is populated in the per-cpu data array. In return we get a guarantee that
- * this information will not reside on a cache line shared with another cpu.
- ******************************************************************************/
-typedef struct psci_cpu_data {
-       /* State as seen by PSCI Affinity Info API */
-       aff_info_state_t aff_info_state;
-
-       /*
-        * Highest power level which takes part in a power management
-        * operation.
-        */
-       unsigned char target_pwrlvl;
-
-       /* The local power state of this CPU */
-       plat_local_state_t local_state;
-} psci_cpu_data_t;
-
-/*******************************************************************************
- * Structure populated by platform specific code to export routines which
- * perform common low level power management functions
- ******************************************************************************/
-typedef struct plat_psci_ops {
-       void (*cpu_standby)(plat_local_state_t cpu_state);
-       int (*pwr_domain_on)(u_register_t mpidr);
-       void (*pwr_domain_off)(const psci_power_state_t *target_state);
-       void (*pwr_domain_suspend)(const psci_power_state_t *target_state);
-       void (*pwr_domain_on_finish)(const psci_power_state_t *target_state);
-       void (*pwr_domain_suspend_finish)(
-                               const psci_power_state_t *target_state);
-       void (*pwr_domain_pwr_down_wfi)(
-                               const psci_power_state_t *target_state) __dead2;
-       void (*system_off)(void) __dead2;
-       void (*system_reset)(void) __dead2;
-       int (*validate_power_state)(unsigned int power_state,
-                                   psci_power_state_t *req_state);
-       int (*validate_ns_entrypoint)(uintptr_t ns_entrypoint);
-       void (*get_sys_suspend_power_state)(
-                                   psci_power_state_t *req_state);
-       int (*get_pwr_lvl_state_idx)(plat_local_state_t pwr_domain_state,
-                                   int pwrlvl);
-       int (*translate_power_state_by_mpidr)(u_register_t mpidr,
-                                   unsigned int power_state,
-                                   psci_power_state_t *output_state);
-} plat_psci_ops_t;
-
-/*******************************************************************************
- * Optional structure populated by the Secure Payload Dispatcher to be given a
- * chance to perform any bookkeeping before PSCI executes a power management
- * operation. It also allows PSCI to determine certain properties of the SP e.g.
- * migrate capability etc.
- ******************************************************************************/
-typedef struct spd_pm_ops {
-       void (*svc_on)(u_register_t target_cpu);
-       int32_t (*svc_off)(u_register_t __unused);
-       void (*svc_suspend)(u_register_t max_off_pwrlvl);
-       void (*svc_on_finish)(u_register_t __unused);
-       void (*svc_suspend_finish)(u_register_t max_off_pwrlvl);
-       int32_t (*svc_migrate)(u_register_t from_cpu, u_register_t to_cpu);
-       int32_t (*svc_migrate_info)(u_register_t *resident_cpu);
-       void (*svc_system_off)(void);
-       void (*svc_system_reset)(void);
-} spd_pm_ops_t;
-
-/*******************************************************************************
- * Function & Data prototypes
- ******************************************************************************/
-unsigned int psci_version(void);
-int psci_cpu_on(u_register_t target_cpu,
-               uintptr_t entrypoint,
-               u_register_t context_id);
-int psci_cpu_suspend(unsigned int power_state,
-                    uintptr_t entrypoint,
-                    u_register_t context_id);
-int psci_system_suspend(uintptr_t entrypoint, u_register_t context_id);
-int psci_cpu_off(void);
-int psci_affinity_info(u_register_t target_affinity,
-                      unsigned int lowest_affinity_level);
-int psci_migrate(u_register_t target_cpu);
-int psci_migrate_info_type(void);
-long psci_migrate_info_up_cpu(void);
-int psci_features(unsigned int psci_fid);
-void __dead2 psci_power_down_wfi(void);
-void psci_entrypoint(void);
-void psci_register_spd_pm_hook(const spd_pm_ops_t *);
-uintptr_t psci_smc_handler(uint32_t smc_fid,
-                         u_register_t x1,
-                         u_register_t x2,
-                         u_register_t x3,
-                         u_register_t x4,
-                         void *cookie,
-                         void *handle,
-                         u_register_t flags);
-
-/* PSCI setup function */
-int psci_setup(void);
-
-#endif /*__ASSEMBLY__*/
-
-#endif /* __PSCI_H__ */
diff --git a/include/bl31/services/psci_compat.h b/include/bl31/services/psci_compat.h
deleted file mode 100644 (file)
index 24bd8dc..0000000
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __PSCI_COMPAT_H__
-#define __PSCI_COMPAT_H__
-
-#include <arch.h>
-#include <platform_def.h>
-
-#ifndef __ASSEMBLY__
-/*
- * The below declarations are to enable compatibility for the platform ports
- * using the old platform interface and psci helpers.
- */
-#define PLAT_MAX_PWR_LVL       PLATFORM_MAX_AFFLVL
-#define PLAT_NUM_PWR_DOMAINS   PLATFORM_NUM_AFFS
-
-/*******************************************************************************
- * PSCI affinity related constants. An affinity instance could
- * be present or absent physically to cater for asymmetric topologies.
- ******************************************************************************/
-#define PSCI_AFF_ABSENT                0x0
-#define PSCI_AFF_PRESENT       0x1
-
-#define PSCI_STATE_ON          0x0
-#define PSCI_STATE_OFF         0x1
-#define PSCI_STATE_ON_PENDING  0x2
-#define PSCI_STATE_SUSPEND     0x3
-
-/*
- * Using the compatibility platform interfaces means that the local states
- * used in psci_power_state_t need to only convey whether its power down
- * or standby state. The onus is on the platform port to do the right thing
- * including the state coordination in case multiple power down states are
- * involved. Hence if we assume 3 generic states viz, run, standby and
- * power down, we can assign 1 and 2 to standby and power down respectively.
- */
-#define PLAT_MAX_RET_STATE     1
-#define PLAT_MAX_OFF_STATE     2
-
-/*
- * Macro to represent invalid affinity level within PSCI.
- */
-#define PSCI_INVALID_DATA -1
-
-#define psci_get_pstate_afflvl(pstate)         psci_get_pstate_pwrlvl(pstate)
-
-/*
- * This array stores the 'power_state' requests of each CPU during
- * CPU_SUSPEND and SYSTEM_SUSPEND which will be populated by the
- * compatibility layer when appropriate platform hooks are invoked.
- */
-extern unsigned int psci_power_state_compat[PLATFORM_CORE_COUNT];
-
-/*******************************************************************************
- * Structure populated by platform specific code to export routines which
- * perform common low level pm functions
- ******************************************************************************/
-typedef struct plat_pm_ops {
-       void (*affinst_standby)(unsigned int power_state);
-       int (*affinst_on)(unsigned long mpidr,
-                         unsigned long sec_entrypoint,
-                         unsigned int afflvl,
-                         unsigned int state);
-       void (*affinst_off)(unsigned int afflvl, unsigned int state);
-       void (*affinst_suspend)(unsigned long sec_entrypoint,
-                              unsigned int afflvl,
-                              unsigned int state);
-       void (*affinst_on_finish)(unsigned int afflvl, unsigned int state);
-       void (*affinst_suspend_finish)(unsigned int afflvl,
-                                     unsigned int state);
-       void (*system_off)(void) __dead2;
-       void (*system_reset)(void) __dead2;
-       int (*validate_power_state)(unsigned int power_state);
-       int (*validate_ns_entrypoint)(unsigned long ns_entrypoint);
-       unsigned int (*get_sys_suspend_power_state)(void);
-} plat_pm_ops_t;
-
-/*******************************************************************************
- * Function & Data prototypes to enable compatibility for older platform ports
- ******************************************************************************/
-int psci_get_suspend_stateid_by_mpidr(unsigned long);
-int psci_get_suspend_stateid(void);
-int psci_get_suspend_powerstate(void);
-unsigned int psci_get_max_phys_off_afflvl(void);
-int psci_get_suspend_afflvl(void);
-
-#endif /* ____ASSEMBLY__ */
-#endif /* __PSCI_COMPAT_H__ */
diff --git a/include/bl31/services/std_svc.h b/include/bl31/services/std_svc.h
deleted file mode 100644 (file)
index cbd5b62..0000000
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __STD_SVC_H__
-#define __STD_SVC_H__
-
-/* SMC function IDs for Standard Service queries */
-
-#define ARM_STD_SVC_CALL_COUNT         0x8400ff00
-#define ARM_STD_SVC_UID                        0x8400ff01
-/*                                     0x8400ff02 is reserved */
-#define ARM_STD_SVC_VERSION            0x8400ff03
-
-/* ARM Standard Service Calls version numbers */
-#define STD_SVC_VERSION_MAJOR          0x0
-#define STD_SVC_VERSION_MINOR          0x1
-
-/* The macros below are used to identify PSCI calls from the SMC function ID */
-#define PSCI_FID_MASK                  0xffe0u
-#define PSCI_FID_VALUE                 0u
-#define is_psci_fid(_fid) \
-       (((_fid) & PSCI_FID_MASK) == PSCI_FID_VALUE)
-
-#endif /* __STD_SVC_H__ */
diff --git a/include/common/aarch64/asm_macros.S b/include/common/aarch64/asm_macros.S
new file mode 100644 (file)
index 0000000..e766989
--- /dev/null
@@ -0,0 +1,255 @@
+/*
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __ASM_MACROS_S__
+#define __ASM_MACROS_S__
+
+#include <arch.h>
+
+
+       .macro  func_prologue
+       stp     x29, x30, [sp, #-0x10]!
+       mov     x29,sp
+       .endm
+
+       .macro  func_epilogue
+       ldp     x29, x30, [sp], #0x10
+       .endm
+
+
+       .macro  dcache_line_size  reg, tmp
+       mrs     \tmp, ctr_el0
+       ubfx    \tmp, \tmp, #16, #4
+       mov     \reg, #4
+       lsl     \reg, \reg, \tmp
+       .endm
+
+
+       .macro  icache_line_size  reg, tmp
+       mrs     \tmp, ctr_el0
+       and     \tmp, \tmp, #0xf
+       mov     \reg, #4
+       lsl     \reg, \reg, \tmp
+       .endm
+
+
+       .macro  smc_check  label
+       mrs     x0, esr_el3
+       ubfx    x0, x0, #ESR_EC_SHIFT, #ESR_EC_LENGTH
+       cmp     x0, #EC_AARCH64_SMC
+       b.ne    $label
+       .endm
+
+       /*
+        * Declare the exception vector table, enforcing it is aligned on a
+        * 2KB boundary, as required by the ARMv8 architecture.
+        * Use zero bytes as the fill value to be stored in the padding bytes
+        * so that it inserts illegal AArch64 instructions. This increases
+        * security, robustness and potentially facilitates debugging.
+        */
+       .macro vector_base  label
+       .section .vectors, "ax"
+       .align 11, 0
+       \label:
+       .endm
+
+       /*
+        * Create an entry in the exception vector table, enforcing it is
+        * aligned on a 128-byte boundary, as required by the ARMv8 architecture.
+        * Use zero bytes as the fill value to be stored in the padding bytes
+        * so that it inserts illegal AArch64 instructions. This increases
+        * security, robustness and potentially facilitates debugging.
+        */
+       .macro vector_entry  label
+       .section .vectors, "ax"
+       .align 7, 0
+       \label:
+       .endm
+
+       /*
+        * This macro verifies that the given vector doesn't exceed the
+        * architectural limit of 32 instructions. This is meant to be placed
+        * immediately after the last instruction in the vector. It takes the
+        * vector entry as the parameter
+        */
+       .macro check_vector_size since
+         .if (. - \since) > (32 * 4)
+           .error "Vector exceeds 32 instructions"
+         .endif
+       .endm
+
+       /*
+        * This macro is used to create a function label and place the
+        * code into a separate text section based on the function name
+        * to enable elimination of unused code during linking
+        */
+       .macro func _name
+       .section .text.\_name, "ax"
+       .type \_name, %function
+       .func \_name
+       \_name:
+       .endm
+
+       /*
+        * This macro is used to mark the end of a function.
+        */
+       .macro endfunc _name
+       .endfunc
+       .size \_name, . - \_name
+       .endm
+
+       /*
+        * Theses macros are used to create function labels for deprecated
+        * APIs. If ERROR_DEPRECATED is non zero, the callers of these APIs
+        * will fail to link and cause build failure.
+        */
+#if ERROR_DEPRECATED
+       .macro func_deprecated _name
+       func deprecated\_name
+       .endm
+
+       .macro endfunc_deprecated _name
+       endfunc deprecated\_name
+       .endm
+#else
+       .macro func_deprecated _name
+       func \_name
+       .endm
+
+       .macro endfunc_deprecated _name
+       endfunc \_name
+       .endm
+#endif
+
+       /*
+        * Helper assembler macro to count trailing zeros. The output is
+        * populated in the `TZ_COUNT` symbol.
+        */
+       .macro count_tz _value, _tz_count
+       .if \_value
+         count_tz "(\_value >> 1)", "(\_tz_count + 1)"
+       .else
+         .equ TZ_COUNT, (\_tz_count - 1)
+       .endif
+       .endm
+
+       /*
+        * This macro declares an array of 1 or more stacks, properly
+        * aligned and in the requested section
+        */
+#define DEFAULT_STACK_ALIGN    (1 << 6)   /* In case the caller doesnt provide alignment */
+
+       .macro declare_stack _name, _section, _size, _count, _align=DEFAULT_STACK_ALIGN
+       count_tz \_align, 0
+       .if (\_align - (1 << TZ_COUNT))
+         .error "Incorrect stack alignment specified (Must be a power of 2)."
+       .endif
+       .if ((\_size & ((1 << TZ_COUNT) - 1)) <> 0)
+         .error "Stack size not correctly aligned"
+       .endif
+       .section    \_section, "aw", %nobits
+       .align TZ_COUNT
+       \_name:
+       .space ((\_count) * (\_size)), 0
+       .endm
+
+#if ENABLE_PLAT_COMPAT
+       /*
+        * This macro calculates the base address of an MP stack using the
+        * platform_get_core_pos() index, the name of the stack storage and
+        * the size of each stack
+        * In: X0 = MPIDR of CPU whose stack is wanted
+        * Out: X0 = physical address of stack base
+        * Clobber: X30, X1, X2
+        */
+       .macro get_mp_stack _name, _size
+       bl  platform_get_core_pos
+       ldr x2, =(\_name + \_size)
+       mov x1, #\_size
+       madd x0, x0, x1, x2
+       .endm
+#endif
+
+       /*
+        * This macro calculates the base address of the current CPU's MP stack
+        * using the plat_my_core_pos() index, the name of the stack storage
+        * and the size of each stack
+        * Out: X0 = physical address of stack base
+        * Clobber: X30, X1, X2
+        */
+       .macro get_my_mp_stack _name, _size
+       bl  plat_my_core_pos
+       ldr x2, =(\_name + \_size)
+       mov x1, #\_size
+       madd x0, x0, x1, x2
+       .endm
+
+       /*
+        * This macro calculates the base address of a UP stack using the
+        * name of the stack storage and the size of the stack
+        * Out: X0 = physical address of stack base
+        */
+       .macro get_up_stack _name, _size
+       ldr x0, =(\_name + \_size)
+       .endm
+
+       /*
+        * Helper macro to generate the best mov/movk combinations according
+        * the value to be moved. The 16 bits from '_shift' are tested and
+        * if not zero, they are moved into '_reg' without affecting
+        * other bits.
+        */
+       .macro _mov_imm16 _reg, _val, _shift
+               .if (\_val >> \_shift) & 0xffff
+                       .if (\_val & (1 << \_shift - 1))
+                               movk    \_reg, (\_val >> \_shift) & 0xffff, LSL \_shift
+                       .else
+                               mov     \_reg, \_val & (0xffff << \_shift)
+                       .endif
+               .endif
+       .endm
+
+       /*
+        * Helper macro to load arbitrary values into 32 or 64-bit registers
+        * which generates the best mov/movk combinations. Many base addresses
+        * are 64KB aligned the macro will eliminate updating bits 15:0 in
+        * that case
+        */
+       .macro mov_imm _reg, _val
+               .if (\_val) == 0
+                       mov     \_reg, #0
+               .else
+                       _mov_imm16      \_reg, (\_val), 0
+                       _mov_imm16      \_reg, (\_val), 16
+                       _mov_imm16      \_reg, (\_val), 32
+                       _mov_imm16      \_reg, (\_val), 48
+               .endif
+       .endm
+
+#endif /* __ASM_MACROS_S__ */
diff --git a/include/common/aarch64/assert_macros.S b/include/common/aarch64/assert_macros.S
new file mode 100644 (file)
index 0000000..b7e536c
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __ASSERT_MACROS_S__
+#define __ASSERT_MACROS_S__
+
+       /*
+        * Assembler macro to enable asm_assert. Use this macro wherever
+        * assert is required in assembly. Please note that the macro makes
+        * use of label '300' to provide the logic and the caller
+        * should make sure that this label is not used to branch prior
+        * to calling this macro.
+        */
+#define ASM_ASSERT(_cc) \
+.ifndef .L_assert_filename ;\
+       .pushsection .rodata.str1.1, "aS" ;\
+       .L_assert_filename: ;\
+                       .string __FILE__ ;\
+       .popsection ;\
+.endif ;\
+       b._cc   300f ;\
+       adr     x0, .L_assert_filename ;\
+       mov     x1, __LINE__ ;\
+       b       asm_assert ;\
+300:
+
+#endif /* __ASSERT_MACROS_S__ */
diff --git a/include/common/asm_macros.S b/include/common/asm_macros.S
deleted file mode 100644 (file)
index bd8bb70..0000000
+++ /dev/null
@@ -1,255 +0,0 @@
-/*
- * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef __ASM_MACROS_S__
-#define __ASM_MACROS_S__
-
-#include <arch.h>
-
-
-       .macro  func_prologue
-       stp     x29, x30, [sp, #-0x10]!
-       mov     x29,sp
-       .endm
-
-       .macro  func_epilogue
-       ldp     x29, x30, [sp], #0x10
-       .endm
-
-
-       .macro  dcache_line_size  reg, tmp
-       mrs     \tmp, ctr_el0
-       ubfx    \tmp, \tmp, #16, #4
-       mov     \reg, #4
-       lsl     \reg, \reg, \tmp
-       .endm
-
-
-       .macro  icache_line_size  reg, tmp
-       mrs     \tmp, ctr_el0
-       and     \tmp, \tmp, #0xf
-       mov     \reg, #4
-       lsl     \reg, \reg, \tmp
-       .endm
-
-
-       .macro  smc_check  label
-       mrs     x0, esr_el3
-       ubfx    x0, x0, #ESR_EC_SHIFT, #ESR_EC_LENGTH
-       cmp     x0, #EC_AARCH64_SMC
-       b.ne    $label
-       .endm
-
-       /*
-        * Declare the exception vector table, enforcing it is aligned on a
-        * 2KB boundary, as required by the ARMv8 architecture.
-        * Use zero bytes as the fill value to be stored in the padding bytes
-        * so that it inserts illegal AArch64 instructions. This increases
-        * security, robustness and potentially facilitates debugging.
-        */
-       .macro vector_base  label
-       .section .vectors, "ax"
-       .align 11, 0
-       \label:
-       .endm
-
-       /*
-        * Create an entry in the exception vector table, enforcing it is
-        * aligned on a 128-byte boundary, as required by the ARMv8 architecture.
-        * Use zero bytes as the fill value to be stored in the padding bytes
-        * so that it inserts illegal AArch64 instructions. This increases
-        * security, robustness and potentially facilitates debugging.
-        */
-       .macro vector_entry  label
-       .section .vectors, "ax"
-       .align 7, 0
-       \label:
-       .endm
-
-       /*
-        * This macro verifies that the given vector doesn't exceed the
-        * architectural limit of 32 instructions. This is meant to be placed
-        * immediately after the last instruction in the vector. It takes the
-        * vector entry as the parameter
-        */
-       .macro check_vector_size since
-         .if (. - \since) > (32 * 4)
-           .error "Vector exceeds 32 instructions"
-         .endif
-       .endm
-
-       /*
-        * This macro is used to create a function label and place the
-        * code into a separate text section based on the function name
-        * to enable elimination of unused code during linking
-        */
-       .macro func _name
-       .section .text.\_name, "ax"
-       .type \_name, %function
-       .func \_name
-       \_name:
-       .endm
-
-       /*
-        * This macro is used to mark the end of a function.
-        */
-       .macro endfunc _name
-       .endfunc
-       .size \_name, . - \_name
-       .endm
-
-       /*
-        * Theses macros are used to create function labels for deprecated
-        * APIs. If ERROR_DEPRECATED is non zero, the callers of these APIs
-        * will fail to link and cause build failure.
-        */
-#if ERROR_DEPRECATED
-       .macro func_deprecated _name
-       func deprecated\_name
-       .endm
-
-       .macro endfunc_deprecated _name
-       endfunc deprecated\_name
-       .endm
-#else
-       .macro func_deprecated _name
-       func \_name
-       .endm
-
-       .macro endfunc_deprecated _name
-       endfunc \_name
-       .endm
-#endif
-
-       /*
-        * Helper assembler macro to count trailing zeros. The output is
-        * populated in the `TZ_COUNT` symbol.
-        */
-       .macro count_tz _value, _tz_count
-       .if \_value
-         count_tz "(\_value >> 1)", "(\_tz_count + 1)"
-       .else
-         .equ TZ_COUNT, (\_tz_count - 1)
-       .endif
-       .endm
-
-       /*
-        * This macro declares an array of 1 or more stacks, properly
-        * aligned and in the requested section
-        */
-#define DEFAULT_STACK_ALIGN    (1 << 6)   /* In case the caller doesnt provide alignment */
-
-       .macro declare_stack _name, _section, _size, _count, _align=DEFAULT_STACK_ALIGN
-       count_tz \_align, 0
-       .if (\_align - (1 << TZ_COUNT))
-         .error "Incorrect stack alignment specified (Must be a power of 2)."
-       .endif
-       .if ((\_size & ((1 << TZ_COUNT) - 1)) <> 0)
-         .error "Stack size not correctly aligned"
-       .endif
-       .section    \_section, "aw", %nobits
-       .align TZ_COUNT
-       \_name:
-       .space ((\_count) * (\_size)), 0
-       .endm
-
-#if ENABLE_PLAT_COMPAT
-       /*
-        * This macro calculates the base address of an MP stack using the
-        * platform_get_core_pos() index, the name of the stack storage and
-        * the size of each stack
-        * In: X0 = MPIDR of CPU whose stack is wanted
-        * Out: X0 = physical address of stack base
-        * Clobber: X30, X1, X2
-        */
-       .macro get_mp_stack _name, _size
-       bl  platform_get_core_pos
-       ldr x2, =(\_name + \_size)
-       mov x1, #\_size
-       madd x0, x0, x1, x2
-       .endm
-#endif
-
-       /*
-        * This macro calculates the base address of the current CPU's MP stack
-        * using the plat_my_core_pos() index, the name of the stack storage
-        * and the size of each stack
-        * Out: X0 = physical address of stack base
-        * Clobber: X30, X1, X2
-        */
-       .macro get_my_mp_stack _name, _size
-       bl  plat_my_core_pos
-       ldr x2, =(\_name + \_size)
-       mov x1, #\_size
-       madd x0, x0, x1, x2
-       .endm
-
-       /*
-        * This macro calculates the base address of a UP stack using the
-        * name of the stack storage and the size of the stack
-        * Out: X0 = physical address of stack base
-        */
-       .macro get_up_stack _name, _size
-       ldr x0, =(\_name + \_size)
-       .endm
-
-       /*
-        * Helper macro to generate the best mov/movk combinations according
-        * the value to be moved. The 16 bits from '_shift' are tested and
-        * if not zero, they are moved into '_reg' without affecting
-        * other bits.
-        */
-       .macro _mov_imm16 _reg, _val, _shift
-               .if (\_val >> \_shift) & 0xffff
-                       .if (\_val & (1 << \_shift - 1))
-                               movk    \_reg, (\_val >> \_shift) & 0xffff, LSL \_shift
-                       .else
-                               mov     \_reg, \_val & (0xffff << \_shift)
-                       .endif
-               .endif
-       .endm
-
-       /*
-        * Helper macro to load arbitrary values into 32 or 64-bit registers
-        * which generates the best mov/movk combinations. Many base addresses
-        * are 64KB aligned the macro will eliminate updating bits 15:0 in
-        * that case
-        */
-       .macro mov_imm _reg, _val
-               .if (\_val) == 0
-                       mov     \_reg, #0
-               .else
-                       _mov_imm16      \_reg, (\_val), 0
-                       _mov_imm16      \_reg, (\_val), 16
-                       _mov_imm16      \_reg, (\_val), 32
-                       _mov_imm16      \_reg, (\_val), 48
-               .endif
-       .endm
-
-#endif /* __ASM_MACROS_S__ */
diff --git a/include/common/assert_macros.S b/include/common/assert_macros.S
deleted file mode 100644 (file)
index cb6c78b..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (c) 2014-2015, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef __ASSERT_MACROS_S__
-#define __ASSERT_MACROS_S__
-
-       /*
-        * Assembler macro to enable asm_assert. Use this macro wherever
-        * assert is required in assembly. Please note that the macro makes
-        * use of label '300' to provide the logic and the caller
-        * should make sure that this label is not used to branch prior
-        * to calling this macro.
-        */
-#define ASM_ASSERT(_cc) \
-.ifndef .L_assert_filename ;\
-       .pushsection .rodata.str1.1, "aS" ;\
-       .L_assert_filename: ;\
-                       .string __FILE__ ;\
-       .popsection ;\
-.endif ;\
-       b._cc   300f ;\
-       adr     x0, .L_assert_filename ;\
-       mov     x1, __LINE__ ;\
-       b       asm_assert ;\
-300:
-
-#endif /* __ASSERT_MACROS_S__ */
diff --git a/include/common/context.h b/include/common/context.h
deleted file mode 100644 (file)
index b528c03..0000000
+++ /dev/null
@@ -1,351 +0,0 @@
-/*
- * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __CONTEXT_H__
-#define __CONTEXT_H__
-
-/*******************************************************************************
- * Constants that allow assembler code to access members of and the 'gp_regs'
- * structure at their correct offsets.
- ******************************************************************************/
-#define CTX_GPREGS_OFFSET      0x0
-#define CTX_GPREG_X0           0x0
-#define CTX_GPREG_X1           0x8
-#define CTX_GPREG_X2           0x10
-#define CTX_GPREG_X3           0x18
-#define CTX_GPREG_X4           0x20
-#define CTX_GPREG_X5           0x28
-#define CTX_GPREG_X6           0x30
-#define CTX_GPREG_X7           0x38
-#define CTX_GPREG_X8           0x40
-#define CTX_GPREG_X9           0x48
-#define CTX_GPREG_X10          0x50
-#define CTX_GPREG_X11          0x58
-#define CTX_GPREG_X12          0x60
-#define CTX_GPREG_X13          0x68
-#define CTX_GPREG_X14          0x70
-#define CTX_GPREG_X15          0x78
-#define CTX_GPREG_X16          0x80
-#define CTX_GPREG_X17          0x88
-#define CTX_GPREG_X18          0x90
-#define CTX_GPREG_X19          0x98
-#define CTX_GPREG_X20          0xa0
-#define CTX_GPREG_X21          0xa8
-#define CTX_GPREG_X22          0xb0
-#define CTX_GPREG_X23          0xb8
-#define CTX_GPREG_X24          0xc0
-#define CTX_GPREG_X25          0xc8
-#define CTX_GPREG_X26          0xd0
-#define CTX_GPREG_X27          0xd8
-#define CTX_GPREG_X28          0xe0
-#define CTX_GPREG_X29          0xe8
-#define CTX_GPREG_LR           0xf0
-#define CTX_GPREG_SP_EL0       0xf8
-#define CTX_GPREGS_END         0x100
-
-/*******************************************************************************
- * Constants that allow assembler code to access members of and the 'el3_state'
- * structure at their correct offsets. Note that some of the registers are only
- * 32-bits wide but are stored as 64-bit values for convenience
- ******************************************************************************/
-#define CTX_EL3STATE_OFFSET    (CTX_GPREGS_OFFSET + CTX_GPREGS_END)
-#define CTX_SCR_EL3            0x0
-#define CTX_RUNTIME_SP         0x8
-#define CTX_SPSR_EL3           0x10
-#define CTX_ELR_EL3            0x18
-#define CTX_EL3STATE_END       0x20
-
-/*******************************************************************************
- * Constants that allow assembler code to access members of and the
- * 'el1_sys_regs' structure at their correct offsets. Note that some of the
- * registers are only 32-bits wide but are stored as 64-bit values for
- * convenience
- ******************************************************************************/
-#define CTX_SYSREGS_OFFSET     (CTX_EL3STATE_OFFSET + CTX_EL3STATE_END)
-#define CTX_SPSR_EL1           0x0
-#define CTX_ELR_EL1            0x8
-#define CTX_SCTLR_EL1          0x10
-#define CTX_ACTLR_EL1          0x18
-#define CTX_CPACR_EL1          0x20
-#define CTX_CSSELR_EL1         0x28
-#define CTX_SP_EL1             0x30
-#define CTX_ESR_EL1            0x38
-#define CTX_TTBR0_EL1          0x40
-#define CTX_TTBR1_EL1          0x48
-#define CTX_MAIR_EL1           0x50
-#define CTX_AMAIR_EL1          0x58
-#define CTX_TCR_EL1            0x60
-#define CTX_TPIDR_EL1          0x68
-#define CTX_TPIDR_EL0          0x70
-#define CTX_TPIDRRO_EL0                0x78
-#define CTX_PAR_EL1            0x80
-#define CTX_FAR_EL1            0x88
-#define CTX_AFSR0_EL1          0x90
-#define CTX_AFSR1_EL1          0x98
-#define CTX_CONTEXTIDR_EL1     0xa0
-#define CTX_VBAR_EL1           0xa8
-
-/*
- * If the platform is AArch64-only, there is no need to save and restore these
- * AArch32 registers.
- */
-#if CTX_INCLUDE_AARCH32_REGS
-#define CTX_SPSR_ABT           0xb0
-#define CTX_SPSR_UND           0xb8
-#define CTX_SPSR_IRQ           0xc0
-#define CTX_SPSR_FIQ           0xc8
-#define CTX_DACR32_EL2         0xd0
-#define CTX_IFSR32_EL2         0xd8
-#define CTX_FP_FPEXC32_EL2     0xe0
-#define CTX_TIMER_SYSREGS_OFF          0xf0 /* Align to the next 16 byte boundary */
-#else
-#define CTX_TIMER_SYSREGS_OFF          0xb0
-#endif /* __CTX_INCLUDE_AARCH32_REGS__ */
-
-/*
- * If the timer registers aren't saved and restored, we don't have to reserve
- * space for them in the context
- */
-#if NS_TIMER_SWITCH
-#define CTX_CNTP_CTL_EL0       (CTX_TIMER_SYSREGS_OFF + 0x0)
-#define CTX_CNTP_CVAL_EL0      (CTX_TIMER_SYSREGS_OFF + 0x8)
-#define CTX_CNTV_CTL_EL0       (CTX_TIMER_SYSREGS_OFF + 0x10)
-#define CTX_CNTV_CVAL_EL0      (CTX_TIMER_SYSREGS_OFF + 0x18)
-#define CTX_CNTKCTL_EL1                (CTX_TIMER_SYSREGS_OFF + 0x20)
-#define CTX_SYSREGS_END                (CTX_TIMER_SYSREGS_OFF + 0x30) /* Align to the next 16 byte boundary */
-#else
-#define CTX_SYSREGS_END                CTX_TIMER_SYSREGS_OFF
-#endif /* __NS_TIMER_SWITCH__ */
-
-/*******************************************************************************
- * Constants that allow assembler code to access members of and the 'fp_regs'
- * structure at their correct offsets.
- ******************************************************************************/
-#if CTX_INCLUDE_FPREGS
-#define CTX_FPREGS_OFFSET      (CTX_SYSREGS_OFFSET + CTX_SYSREGS_END)
-#define CTX_FP_Q0              0x0
-#define CTX_FP_Q1              0x10
-#define CTX_FP_Q2              0x20
-#define CTX_FP_Q3              0x30
-#define CTX_FP_Q4              0x40
-#define CTX_FP_Q5              0x50
-#define CTX_FP_Q6              0x60
-#define CTX_FP_Q7              0x70
-#define CTX_FP_Q8              0x80
-#define CTX_FP_Q9              0x90
-#define CTX_FP_Q10             0xa0
-#define CTX_FP_Q11             0xb0
-#define CTX_FP_Q12             0xc0
-#define CTX_FP_Q13             0xd0
-#define CTX_FP_Q14             0xe0
-#define CTX_FP_Q15             0xf0
-#define CTX_FP_Q16             0x100
-#define CTX_FP_Q17             0x110
-#define CTX_FP_Q18             0x120
-#define CTX_FP_Q19             0x130
-#define CTX_FP_Q20             0x140
-#define CTX_FP_Q21             0x150
-#define CTX_FP_Q22             0x160
-#define CTX_FP_Q23             0x170
-#define CTX_FP_Q24             0x180
-#define CTX_FP_Q25             0x190
-#define CTX_FP_Q26             0x1a0
-#define CTX_FP_Q27             0x1b0
-#define CTX_FP_Q28             0x1c0
-#define CTX_FP_Q29             0x1d0
-#define CTX_FP_Q30             0x1e0
-#define CTX_FP_Q31             0x1f0
-#define CTX_FP_FPSR            0x200
-#define CTX_FP_FPCR            0x208
-#define CTX_FPREGS_END         0x210
-#endif
-
-#ifndef __ASSEMBLY__
-
-#include <cassert.h>
-#include <platform_def.h>      /* for CACHE_WRITEBACK_GRANULE */
-#include <stdint.h>
-
-/*
- * Common constants to help define the 'cpu_context' structure and its
- * members below.
- */
-#define DWORD_SHIFT            3
-#define DEFINE_REG_STRUCT(name, num_regs)      \
-       typedef struct name {                   \
-               uint64_t _regs[num_regs];       \
-       }  __aligned(16) name##_t
-
-/* Constants to determine the size of individual context structures */
-#define CTX_GPREG_ALL          (CTX_GPREGS_END >> DWORD_SHIFT)
-#define CTX_SYSREG_ALL         (CTX_SYSREGS_END >> DWORD_SHIFT)
-#if CTX_INCLUDE_FPREGS
-#define CTX_FPREG_ALL          (CTX_FPREGS_END >> DWORD_SHIFT)
-#endif
-#define CTX_EL3STATE_ALL       (CTX_EL3STATE_END >> DWORD_SHIFT)
-
-/*
- * AArch64 general purpose register context structure. Usually x0-x18,
- * lr are saved as the compiler is expected to preserve the remaining
- * callee saved registers if used by the C runtime and the assembler
- * does not touch the remaining. But in case of world switch during
- * exception handling, we need to save the callee registers too.
- */
-DEFINE_REG_STRUCT(gp_regs, CTX_GPREG_ALL);
-
-/*
- * AArch64 EL1 system register context structure for preserving the
- * architectural state during switches from one security state to
- * another in EL1.
- */
-DEFINE_REG_STRUCT(el1_sys_regs, CTX_SYSREG_ALL);
-
-/*
- * AArch64 floating point register context structure for preserving
- * the floating point state during switches from one security state to
- * another.
- */
-#if CTX_INCLUDE_FPREGS
-DEFINE_REG_STRUCT(fp_regs, CTX_FPREG_ALL);
-#endif
-
-/*
- * Miscellaneous registers used by EL3 firmware to maintain its state
- * across exception entries and exits
- */
-DEFINE_REG_STRUCT(el3_state, CTX_EL3STATE_ALL);
-
-/*
- * Macros to access members of any of the above structures using their
- * offsets
- */
-#define read_ctx_reg(ctx, offset)      ((ctx)->_regs[offset >> DWORD_SHIFT])
-#define write_ctx_reg(ctx, offset, val)        (((ctx)->_regs[offset >> DWORD_SHIFT]) \
-                                        = val)
-
-/*
- * Top-level context structure which is used by EL3 firmware to
- * preserve the state of a core at EL1 in one of the two security
- * states and save enough EL3 meta data to be able to return to that
- * EL and security state. The context management library will be used
- * to ensure that SP_EL3 always points to an instance of this
- * structure at exception entry and exit. Each instance will
- * correspond to either the secure or the non-secure state.
- */
-typedef struct cpu_context {
-       gp_regs_t gpregs_ctx;
-       el3_state_t el3state_ctx;
-       el1_sys_regs_t sysregs_ctx;
-#if CTX_INCLUDE_FPREGS
-       fp_regs_t fpregs_ctx;
-#endif
-} cpu_context_t;
-
-/* Macros to access members of the 'cpu_context_t' structure */
-#define get_el3state_ctx(h)    (&((cpu_context_t *) h)->el3state_ctx)
-#if CTX_INCLUDE_FPREGS
-#define get_fpregs_ctx(h)      (&((cpu_context_t *) h)->fpregs_ctx)
-#endif
-#define get_sysregs_ctx(h)     (&((cpu_context_t *) h)->sysregs_ctx)
-#define get_gpregs_ctx(h)      (&((cpu_context_t *) h)->gpregs_ctx)
-
-/*
- * Compile time assertions related to the 'cpu_context' structure to
- * ensure that the assembler and the compiler view of the offsets of
- * the structure members is the same.
- */
-CASSERT(CTX_GPREGS_OFFSET == __builtin_offsetof(cpu_context_t, gpregs_ctx), \
-       assert_core_context_gp_offset_mismatch);
-CASSERT(CTX_SYSREGS_OFFSET == __builtin_offsetof(cpu_context_t, sysregs_ctx), \
-       assert_core_context_sys_offset_mismatch);
-#if CTX_INCLUDE_FPREGS
-CASSERT(CTX_FPREGS_OFFSET == __builtin_offsetof(cpu_context_t, fpregs_ctx), \
-       assert_core_context_fp_offset_mismatch);
-#endif
-CASSERT(CTX_EL3STATE_OFFSET == __builtin_offsetof(cpu_context_t, el3state_ctx), \
-       assert_core_context_el3state_offset_mismatch);
-
-/*
- * Helper macro to set the general purpose registers that correspond to
- * parameters in an aapcs_64 call i.e. x0-x7
- */
-#define set_aapcs_args0(ctx, x0)                               do {    \
-               write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X0, x0);   \
-       } while (0)
-#define set_aapcs_args1(ctx, x0, x1)                           do {    \
-               write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X1, x1);   \
-               set_aapcs_args0(ctx, x0);                               \
-       } while (0)
-#define set_aapcs_args2(ctx, x0, x1, x2)                       do {    \
-               write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X2, x2);   \
-               set_aapcs_args1(ctx, x0, x1);                           \
-       } while (0)
-#define set_aapcs_args3(ctx, x0, x1, x2, x3)                   do {    \
-               write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X3, x3);   \
-               set_aapcs_args2(ctx, x0, x1, x2);                       \
-       } while (0)
-#define set_aapcs_args4(ctx, x0, x1, x2, x3, x4)               do {    \
-               write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X4, x4);   \
-               set_aapcs_args3(ctx, x0, x1, x2, x3);                   \
-       } while (0)
-#define set_aapcs_args5(ctx, x0, x1, x2, x3, x4, x5)           do {    \
-               write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X5, x5);   \
-               set_aapcs_args4(ctx, x0, x1, x2, x3, x4);               \
-       } while (0)
-#define set_aapcs_args6(ctx, x0, x1, x2, x3, x4, x5, x6)       do {    \
-               write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X6, x6);   \
-               set_aapcs_args5(ctx, x0, x1, x2, x3, x4, x5);           \
-       } while (0)
-#define set_aapcs_args7(ctx, x0, x1, x2, x3, x4, x5, x6, x7)   do {    \
-               write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X7, x7);   \
-               set_aapcs_args6(ctx, x0, x1, x2, x3, x4, x5, x6);       \
-       } while (0)
-
-/*******************************************************************************
- * Function prototypes
- ******************************************************************************/
-void el1_sysregs_context_save(el1_sys_regs_t *regs);
-void el1_sysregs_context_restore(el1_sys_regs_t *regs);
-#if CTX_INCLUDE_FPREGS
-void fpregs_context_save(fp_regs_t *regs);
-void fpregs_context_restore(fp_regs_t *regs);
-#endif
-
-
-#undef CTX_SYSREG_ALL
-#if CTX_INCLUDE_FPREGS
-#undef CTX_FPREG_ALL
-#endif
-#undef CTX_GPREG_ALL
-#undef CTX_EL3STATE_ALL
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* __CONTEXT_H__ */
diff --git a/include/common/context_mgmt.h b/include/common/context_mgmt.h
deleted file mode 100644 (file)
index 8a38ee5..0000000
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __CM_H__
-#define __CM_H__
-
-#include <arch.h>
-#include <bl_common.h>
-
-/*******************************************************************************
- * Forward declarations
- ******************************************************************************/
-struct entry_point_info;
-
-/*******************************************************************************
- * Function & variable prototypes
- ******************************************************************************/
-void cm_init(void);
-void *cm_get_context_by_mpidr(uint64_t mpidr,
-                             uint32_t security_state) __deprecated;
-void cm_set_context_by_mpidr(uint64_t mpidr,
-                            void *context,
-                            uint32_t security_state) __deprecated;
-void *cm_get_context_by_index(unsigned int cpu_idx,
-                             unsigned int security_state);
-void cm_set_context_by_index(unsigned int cpu_idx,
-                            void *context,
-                            unsigned int security_state);
-void *cm_get_context(uint32_t security_state);
-void cm_set_context(void *context, uint32_t security_state);
-void cm_init_context(uint64_t mpidr,
-                    const struct entry_point_info *ep) __deprecated;
-void cm_init_my_context(const struct entry_point_info *ep);
-void cm_init_context_by_index(unsigned int cpu_idx,
-                             const struct entry_point_info *ep);
-void cm_prepare_el3_exit(uint32_t security_state);
-void cm_el1_sysregs_context_save(uint32_t security_state);
-void cm_el1_sysregs_context_restore(uint32_t security_state);
-void cm_set_elr_el3(uint32_t security_state, uintptr_t entrypoint);
-void cm_set_elr_spsr_el3(uint32_t security_state,
-                       uintptr_t entrypoint, uint32_t spsr);
-void cm_write_scr_el3_bit(uint32_t security_state,
-                         uint32_t bit_pos,
-                         uint32_t value);
-void cm_set_next_eret_context(uint32_t security_state);
-uint32_t cm_get_scr_el3(uint32_t security_state);
-
-/* Inline definitions */
-
-/*******************************************************************************
- * This function is used to program the context that's used for exception
- * return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for
- * the required security state
- ******************************************************************************/
-static inline void cm_set_next_context(void *context)
-{
-#if DEBUG
-       uint64_t sp_mode;
-
-       /*
-        * Check that this function is called with SP_EL0 as the stack
-        * pointer
-        */
-       __asm__ volatile("mrs   %0, SPSel\n"
-                        : "=r" (sp_mode));
-
-       assert(sp_mode == MODE_SP_EL0);
-#endif
-
-       __asm__ volatile("msr   spsel, #1\n"
-                        "mov   sp, %0\n"
-                        "msr   spsel, #0\n"
-                        : : "r" (context));
-}
-#endif /* __CM_H__ */
diff --git a/include/common/runtime_svc.h b/include/common/runtime_svc.h
new file mode 100644 (file)
index 0000000..adafcee
--- /dev/null
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __RUNTIME_SVC_H__
+#define __RUNTIME_SVC_H__
+
+#include <bl_common.h>         /* to include exception types */
+#include <smcc_helpers.h>      /* to include SMCC definitions */
+
+
+/*******************************************************************************
+ * Structure definition, typedefs & constants for the runtime service framework
+ ******************************************************************************/
+
+/*
+ * Constants to allow the assembler access a runtime service
+ * descriptor
+ */
+#define RT_SVC_SIZE_LOG2       5
+#define SIZEOF_RT_SVC_DESC     (1 << RT_SVC_SIZE_LOG2)
+#define RT_SVC_DESC_INIT       16
+#define RT_SVC_DESC_HANDLE     24
+
+/*
+ * The function identifier has 6 bits for the owning entity number and
+ * single bit for the type of smc call. When taken together these
+ * values limit the maximum number of runtime services to 128.
+ */
+#define MAX_RT_SVCS            128
+
+#ifndef __ASSEMBLY__
+
+/* Prototype for runtime service initializing function */
+typedef int32_t (*rt_svc_init_t)(void);
+
+/*
+ * Prototype for runtime service SMC handler function. x0 (SMC Function ID) to
+ * x4 are as passed by the caller. Rest of the arguments to SMC and the context
+ * can be accessed using the handle pointer. The cookie parameter is reserved
+ * for future use
+ */
+typedef uintptr_t (*rt_svc_handle_t)(uint32_t smc_fid,
+                                 u_register_t x1,
+                                 u_register_t x2,
+                                 u_register_t x3,
+                                 u_register_t x4,
+                                 void *cookie,
+                                 void *handle,
+                                 u_register_t flags);
+typedef struct rt_svc_desc {
+       uint8_t start_oen;
+       uint8_t end_oen;
+       uint8_t call_type;
+       const char *name;
+       rt_svc_init_t init;
+       rt_svc_handle_t handle;
+} rt_svc_desc_t;
+
+/*
+ * Convenience macro to declare a service descriptor
+ */
+#define DECLARE_RT_SVC(_name, _start, _end, _type, _setup, _smch) \
+       static const rt_svc_desc_t __svc_desc_ ## _name \
+               __section("rt_svc_descs") __used = { \
+                       .start_oen = _start, \
+                       .end_oen = _end, \
+                       .call_type = _type, \
+                       .name = #_name, \
+                       .init = _setup, \
+                       .handle = _smch }
+
+/*
+ * Compile time assertions related to the 'rt_svc_desc' structure to:
+ * 1. ensure that the assembler and the compiler view of the size
+ *    of the structure are the same.
+ * 2. ensure that the assembler and the compiler see the initialisation
+ *    routine at the same offset.
+ * 3. ensure that the assembler and the compiler see the handler
+ *    routine at the same offset.
+ */
+CASSERT((sizeof(rt_svc_desc_t) == SIZEOF_RT_SVC_DESC), \
+       assert_sizeof_rt_svc_desc_mismatch);
+CASSERT(RT_SVC_DESC_INIT == __builtin_offsetof(rt_svc_desc_t, init), \
+       assert_rt_svc_desc_init_offset_mismatch);
+CASSERT(RT_SVC_DESC_HANDLE == __builtin_offsetof(rt_svc_desc_t, handle), \
+       assert_rt_svc_desc_handle_offset_mismatch);
+
+
+/*
+ * This macro combines the call type and the owning entity number corresponding
+ * to a runtime service to generate a unique owning entity number. This unique
+ * oen is used to access an entry in the 'rt_svc_descs_indices' array. The entry
+ * contains the index of the service descriptor in the 'rt_svc_descs' array.
+ */
+#define get_unique_oen(oen, call_type) ((oen & FUNCID_OEN_MASK) |      \
+                                       ((call_type & FUNCID_TYPE_MASK) \
+                                        << FUNCID_OEN_WIDTH))
+
+/*******************************************************************************
+ * Function & variable prototypes
+ ******************************************************************************/
+void runtime_svc_init(void);
+extern uintptr_t __RT_SVC_DESCS_START__;
+extern uintptr_t __RT_SVC_DESCS_END__;
+void init_crash_reporting(void);
+
+#endif /*__ASSEMBLY__*/
+#endif /* __RUNTIME_SVC_H__ */
diff --git a/include/lib/el3_runtime/aarch64/context.h b/include/lib/el3_runtime/aarch64/context.h
new file mode 100644 (file)
index 0000000..b528c03
--- /dev/null
@@ -0,0 +1,351 @@
+/*
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __CONTEXT_H__
+#define __CONTEXT_H__
+
+/*******************************************************************************
+ * Constants that allow assembler code to access members of and the 'gp_regs'
+ * structure at their correct offsets.
+ ******************************************************************************/
+#define CTX_GPREGS_OFFSET      0x0
+#define CTX_GPREG_X0           0x0
+#define CTX_GPREG_X1           0x8
+#define CTX_GPREG_X2           0x10
+#define CTX_GPREG_X3           0x18
+#define CTX_GPREG_X4           0x20
+#define CTX_GPREG_X5           0x28
+#define CTX_GPREG_X6           0x30
+#define CTX_GPREG_X7           0x38
+#define CTX_GPREG_X8           0x40
+#define CTX_GPREG_X9           0x48
+#define CTX_GPREG_X10          0x50
+#define CTX_GPREG_X11          0x58
+#define CTX_GPREG_X12          0x60
+#define CTX_GPREG_X13          0x68
+#define CTX_GPREG_X14          0x70
+#define CTX_GPREG_X15          0x78
+#define CTX_GPREG_X16          0x80
+#define CTX_GPREG_X17          0x88
+#define CTX_GPREG_X18          0x90
+#define CTX_GPREG_X19          0x98
+#define CTX_GPREG_X20          0xa0
+#define CTX_GPREG_X21          0xa8
+#define CTX_GPREG_X22          0xb0
+#define CTX_GPREG_X23          0xb8
+#define CTX_GPREG_X24          0xc0
+#define CTX_GPREG_X25          0xc8
+#define CTX_GPREG_X26          0xd0
+#define CTX_GPREG_X27          0xd8
+#define CTX_GPREG_X28          0xe0
+#define CTX_GPREG_X29          0xe8
+#define CTX_GPREG_LR           0xf0
+#define CTX_GPREG_SP_EL0       0xf8
+#define CTX_GPREGS_END         0x100
+
+/*******************************************************************************
+ * Constants that allow assembler code to access members of and the 'el3_state'
+ * structure at their correct offsets. Note that some of the registers are only
+ * 32-bits wide but are stored as 64-bit values for convenience
+ ******************************************************************************/
+#define CTX_EL3STATE_OFFSET    (CTX_GPREGS_OFFSET + CTX_GPREGS_END)
+#define CTX_SCR_EL3            0x0
+#define CTX_RUNTIME_SP         0x8
+#define CTX_SPSR_EL3           0x10
+#define CTX_ELR_EL3            0x18
+#define CTX_EL3STATE_END       0x20
+
+/*******************************************************************************
+ * Constants that allow assembler code to access members of and the
+ * 'el1_sys_regs' structure at their correct offsets. Note that some of the
+ * registers are only 32-bits wide but are stored as 64-bit values for
+ * convenience
+ ******************************************************************************/
+#define CTX_SYSREGS_OFFSET     (CTX_EL3STATE_OFFSET + CTX_EL3STATE_END)
+#define CTX_SPSR_EL1           0x0
+#define CTX_ELR_EL1            0x8
+#define CTX_SCTLR_EL1          0x10
+#define CTX_ACTLR_EL1          0x18
+#define CTX_CPACR_EL1          0x20
+#define CTX_CSSELR_EL1         0x28
+#define CTX_SP_EL1             0x30
+#define CTX_ESR_EL1            0x38
+#define CTX_TTBR0_EL1          0x40
+#define CTX_TTBR1_EL1          0x48
+#define CTX_MAIR_EL1           0x50
+#define CTX_AMAIR_EL1          0x58
+#define CTX_TCR_EL1            0x60
+#define CTX_TPIDR_EL1          0x68
+#define CTX_TPIDR_EL0          0x70
+#define CTX_TPIDRRO_EL0                0x78
+#define CTX_PAR_EL1            0x80
+#define CTX_FAR_EL1            0x88
+#define CTX_AFSR0_EL1          0x90
+#define CTX_AFSR1_EL1          0x98
+#define CTX_CONTEXTIDR_EL1     0xa0
+#define CTX_VBAR_EL1           0xa8
+
+/*
+ * If the platform is AArch64-only, there is no need to save and restore these
+ * AArch32 registers.
+ */
+#if CTX_INCLUDE_AARCH32_REGS
+#define CTX_SPSR_ABT           0xb0
+#define CTX_SPSR_UND           0xb8
+#define CTX_SPSR_IRQ           0xc0
+#define CTX_SPSR_FIQ           0xc8
+#define CTX_DACR32_EL2         0xd0
+#define CTX_IFSR32_EL2         0xd8
+#define CTX_FP_FPEXC32_EL2     0xe0
+#define CTX_TIMER_SYSREGS_OFF          0xf0 /* Align to the next 16 byte boundary */
+#else
+#define CTX_TIMER_SYSREGS_OFF          0xb0
+#endif /* __CTX_INCLUDE_AARCH32_REGS__ */
+
+/*
+ * If the timer registers aren't saved and restored, we don't have to reserve
+ * space for them in the context
+ */
+#if NS_TIMER_SWITCH
+#define CTX_CNTP_CTL_EL0       (CTX_TIMER_SYSREGS_OFF + 0x0)
+#define CTX_CNTP_CVAL_EL0      (CTX_TIMER_SYSREGS_OFF + 0x8)
+#define CTX_CNTV_CTL_EL0       (CTX_TIMER_SYSREGS_OFF + 0x10)
+#define CTX_CNTV_CVAL_EL0      (CTX_TIMER_SYSREGS_OFF + 0x18)
+#define CTX_CNTKCTL_EL1                (CTX_TIMER_SYSREGS_OFF + 0x20)
+#define CTX_SYSREGS_END                (CTX_TIMER_SYSREGS_OFF + 0x30) /* Align to the next 16 byte boundary */
+#else
+#define CTX_SYSREGS_END                CTX_TIMER_SYSREGS_OFF
+#endif /* __NS_TIMER_SWITCH__ */
+
+/*******************************************************************************
+ * Constants that allow assembler code to access members of and the 'fp_regs'
+ * structure at their correct offsets.
+ ******************************************************************************/
+#if CTX_INCLUDE_FPREGS
+#define CTX_FPREGS_OFFSET      (CTX_SYSREGS_OFFSET + CTX_SYSREGS_END)
+#define CTX_FP_Q0              0x0
+#define CTX_FP_Q1              0x10
+#define CTX_FP_Q2              0x20
+#define CTX_FP_Q3              0x30
+#define CTX_FP_Q4              0x40
+#define CTX_FP_Q5              0x50
+#define CTX_FP_Q6              0x60
+#define CTX_FP_Q7              0x70
+#define CTX_FP_Q8              0x80
+#define CTX_FP_Q9              0x90
+#define CTX_FP_Q10             0xa0
+#define CTX_FP_Q11             0xb0
+#define CTX_FP_Q12             0xc0
+#define CTX_FP_Q13             0xd0
+#define CTX_FP_Q14             0xe0
+#define CTX_FP_Q15             0xf0
+#define CTX_FP_Q16             0x100
+#define CTX_FP_Q17             0x110
+#define CTX_FP_Q18             0x120
+#define CTX_FP_Q19             0x130
+#define CTX_FP_Q20             0x140
+#define CTX_FP_Q21             0x150
+#define CTX_FP_Q22             0x160
+#define CTX_FP_Q23             0x170
+#define CTX_FP_Q24             0x180
+#define CTX_FP_Q25             0x190
+#define CTX_FP_Q26             0x1a0
+#define CTX_FP_Q27             0x1b0
+#define CTX_FP_Q28             0x1c0
+#define CTX_FP_Q29             0x1d0
+#define CTX_FP_Q30             0x1e0
+#define CTX_FP_Q31             0x1f0
+#define CTX_FP_FPSR            0x200
+#define CTX_FP_FPCR            0x208
+#define CTX_FPREGS_END         0x210
+#endif
+
+#ifndef __ASSEMBLY__
+
+#include <cassert.h>
+#include <platform_def.h>      /* for CACHE_WRITEBACK_GRANULE */
+#include <stdint.h>
+
+/*
+ * Common constants to help define the 'cpu_context' structure and its
+ * members below.
+ */
+#define DWORD_SHIFT            3
+#define DEFINE_REG_STRUCT(name, num_regs)      \
+       typedef struct name {                   \
+               uint64_t _regs[num_regs];       \
+       }  __aligned(16) name##_t
+
+/* Constants to determine the size of individual context structures */
+#define CTX_GPREG_ALL          (CTX_GPREGS_END >> DWORD_SHIFT)
+#define CTX_SYSREG_ALL         (CTX_SYSREGS_END >> DWORD_SHIFT)
+#if CTX_INCLUDE_FPREGS
+#define CTX_FPREG_ALL          (CTX_FPREGS_END >> DWORD_SHIFT)
+#endif
+#define CTX_EL3STATE_ALL       (CTX_EL3STATE_END >> DWORD_SHIFT)
+
+/*
+ * AArch64 general purpose register context structure. Usually x0-x18,
+ * lr are saved as the compiler is expected to preserve the remaining
+ * callee saved registers if used by the C runtime and the assembler
+ * does not touch the remaining. But in case of world switch during
+ * exception handling, we need to save the callee registers too.
+ */
+DEFINE_REG_STRUCT(gp_regs, CTX_GPREG_ALL);
+
+/*
+ * AArch64 EL1 system register context structure for preserving the
+ * architectural state during switches from one security state to
+ * another in EL1.
+ */
+DEFINE_REG_STRUCT(el1_sys_regs, CTX_SYSREG_ALL);
+
+/*
+ * AArch64 floating point register context structure for preserving
+ * the floating point state during switches from one security state to
+ * another.
+ */
+#if CTX_INCLUDE_FPREGS
+DEFINE_REG_STRUCT(fp_regs, CTX_FPREG_ALL);
+#endif
+
+/*
+ * Miscellaneous registers used by EL3 firmware to maintain its state
+ * across exception entries and exits
+ */
+DEFINE_REG_STRUCT(el3_state, CTX_EL3STATE_ALL);
+
+/*
+ * Macros to access members of any of the above structures using their
+ * offsets
+ */
+#define read_ctx_reg(ctx, offset)      ((ctx)->_regs[offset >> DWORD_SHIFT])
+#define write_ctx_reg(ctx, offset, val)        (((ctx)->_regs[offset >> DWORD_SHIFT]) \
+                                        = val)
+
+/*
+ * Top-level context structure which is used by EL3 firmware to
+ * preserve the state of a core at EL1 in one of the two security
+ * states and save enough EL3 meta data to be able to return to that
+ * EL and security state. The context management library will be used
+ * to ensure that SP_EL3 always points to an instance of this
+ * structure at exception entry and exit. Each instance will
+ * correspond to either the secure or the non-secure state.
+ */
+typedef struct cpu_context {
+       gp_regs_t gpregs_ctx;
+       el3_state_t el3state_ctx;
+       el1_sys_regs_t sysregs_ctx;
+#if CTX_INCLUDE_FPREGS
+       fp_regs_t fpregs_ctx;
+#endif
+} cpu_context_t;
+
+/* Macros to access members of the 'cpu_context_t' structure */
+#define get_el3state_ctx(h)    (&((cpu_context_t *) h)->el3state_ctx)
+#if CTX_INCLUDE_FPREGS
+#define get_fpregs_ctx(h)      (&((cpu_context_t *) h)->fpregs_ctx)
+#endif
+#define get_sysregs_ctx(h)     (&((cpu_context_t *) h)->sysregs_ctx)
+#define get_gpregs_ctx(h)      (&((cpu_context_t *) h)->gpregs_ctx)
+
+/*
+ * Compile time assertions related to the 'cpu_context' structure to
+ * ensure that the assembler and the compiler view of the offsets of
+ * the structure members is the same.
+ */
+CASSERT(CTX_GPREGS_OFFSET == __builtin_offsetof(cpu_context_t, gpregs_ctx), \
+       assert_core_context_gp_offset_mismatch);
+CASSERT(CTX_SYSREGS_OFFSET == __builtin_offsetof(cpu_context_t, sysregs_ctx), \
+       assert_core_context_sys_offset_mismatch);
+#if CTX_INCLUDE_FPREGS
+CASSERT(CTX_FPREGS_OFFSET == __builtin_offsetof(cpu_context_t, fpregs_ctx), \
+       assert_core_context_fp_offset_mismatch);
+#endif
+CASSERT(CTX_EL3STATE_OFFSET == __builtin_offsetof(cpu_context_t, el3state_ctx), \
+       assert_core_context_el3state_offset_mismatch);
+
+/*
+ * Helper macro to set the general purpose registers that correspond to
+ * parameters in an aapcs_64 call i.e. x0-x7
+ */
+#define set_aapcs_args0(ctx, x0)                               do {    \
+               write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X0, x0);   \
+       } while (0)
+#define set_aapcs_args1(ctx, x0, x1)                           do {    \
+               write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X1, x1);   \
+               set_aapcs_args0(ctx, x0);                               \
+       } while (0)
+#define set_aapcs_args2(ctx, x0, x1, x2)                       do {    \
+               write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X2, x2);   \
+               set_aapcs_args1(ctx, x0, x1);                           \
+       } while (0)
+#define set_aapcs_args3(ctx, x0, x1, x2, x3)                   do {    \
+               write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X3, x3);   \
+               set_aapcs_args2(ctx, x0, x1, x2);                       \
+       } while (0)
+#define set_aapcs_args4(ctx, x0, x1, x2, x3, x4)               do {    \
+               write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X4, x4);   \
+               set_aapcs_args3(ctx, x0, x1, x2, x3);                   \
+       } while (0)
+#define set_aapcs_args5(ctx, x0, x1, x2, x3, x4, x5)           do {    \
+               write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X5, x5);   \
+               set_aapcs_args4(ctx, x0, x1, x2, x3, x4);               \
+       } while (0)
+#define set_aapcs_args6(ctx, x0, x1, x2, x3, x4, x5, x6)       do {    \
+               write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X6, x6);   \
+               set_aapcs_args5(ctx, x0, x1, x2, x3, x4, x5);           \
+       } while (0)
+#define set_aapcs_args7(ctx, x0, x1, x2, x3, x4, x5, x6, x7)   do {    \
+               write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X7, x7);   \
+               set_aapcs_args6(ctx, x0, x1, x2, x3, x4, x5, x6);       \
+       } while (0)
+
+/*******************************************************************************
+ * Function prototypes
+ ******************************************************************************/
+void el1_sysregs_context_save(el1_sys_regs_t *regs);
+void el1_sysregs_context_restore(el1_sys_regs_t *regs);
+#if CTX_INCLUDE_FPREGS
+void fpregs_context_save(fp_regs_t *regs);
+void fpregs_context_restore(fp_regs_t *regs);
+#endif
+
+
+#undef CTX_SYSREG_ALL
+#if CTX_INCLUDE_FPREGS
+#undef CTX_FPREG_ALL
+#endif
+#undef CTX_GPREG_ALL
+#undef CTX_EL3STATE_ALL
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __CONTEXT_H__ */
diff --git a/include/lib/el3_runtime/context_mgmt.h b/include/lib/el3_runtime/context_mgmt.h
new file mode 100644 (file)
index 0000000..672ea11
--- /dev/null
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __CM_H__
+#define __CM_H__
+
+#include <arch.h>
+
+/*******************************************************************************
+ * Forward declarations
+ ******************************************************************************/
+struct entry_point_info;
+
+/*******************************************************************************
+ * Function & variable prototypes
+ ******************************************************************************/
+void cm_init(void);
+void *cm_get_context_by_mpidr(uint64_t mpidr,
+                             uint32_t security_state) __deprecated;
+void cm_set_context_by_mpidr(uint64_t mpidr,
+                            void *context,
+                            uint32_t security_state) __deprecated;
+void *cm_get_context_by_index(unsigned int cpu_idx,
+                             unsigned int security_state);
+void cm_set_context_by_index(unsigned int cpu_idx,
+                            void *context,
+                            unsigned int security_state);
+void *cm_get_context(uint32_t security_state);
+void cm_set_context(void *context, uint32_t security_state);
+void cm_init_context(uint64_t mpidr,
+                    const struct entry_point_info *ep) __deprecated;
+void cm_init_my_context(const struct entry_point_info *ep);
+void cm_init_context_by_index(unsigned int cpu_idx,
+                             const struct entry_point_info *ep);
+void cm_prepare_el3_exit(uint32_t security_state);
+void cm_el1_sysregs_context_save(uint32_t security_state);
+void cm_el1_sysregs_context_restore(uint32_t security_state);
+void cm_set_elr_el3(uint32_t security_state, uintptr_t entrypoint);
+void cm_set_elr_spsr_el3(uint32_t security_state,
+                       uintptr_t entrypoint, uint32_t spsr);
+void cm_write_scr_el3_bit(uint32_t security_state,
+                         uint32_t bit_pos,
+                         uint32_t value);
+void cm_set_next_eret_context(uint32_t security_state);
+uint32_t cm_get_scr_el3(uint32_t security_state);
+
+/* Inline definitions */
+
+/*******************************************************************************
+ * This function is used to program the context that's used for exception
+ * return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for
+ * the required security state
+ ******************************************************************************/
+static inline void cm_set_next_context(void *context)
+{
+#if DEBUG
+       uint64_t sp_mode;
+
+       /*
+        * Check that this function is called with SP_EL0 as the stack
+        * pointer
+        */
+       __asm__ volatile("mrs   %0, SPSel\n"
+                        : "=r" (sp_mode));
+
+       assert(sp_mode == MODE_SP_EL0);
+#endif
+
+       __asm__ volatile("msr   spsel, #1\n"
+                        "mov   sp, %0\n"
+                        "msr   spsel, #0\n"
+                        : : "r" (context));
+}
+#endif /* __CM_H__ */
diff --git a/include/lib/el3_runtime/cpu_data.h b/include/lib/el3_runtime/cpu_data.h
new file mode 100644 (file)
index 0000000..4fc801b
--- /dev/null
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __CPU_DATA_H__
+#define __CPU_DATA_H__
+
+/* Offsets for the cpu_data structure */
+#define CPU_DATA_CRASH_BUF_OFFSET      0x18
+#if CRASH_REPORTING
+#define CPU_DATA_LOG2SIZE              7
+#else
+#define CPU_DATA_LOG2SIZE              6
+#endif
+/* need enough space in crash buffer to save 8 registers */
+#define CPU_DATA_CRASH_BUF_SIZE                64
+#define CPU_DATA_CPU_OPS_PTR           0x10
+
+#ifndef __ASSEMBLY__
+
+#include <arch_helpers.h>
+#include <cassert.h>
+#include <platform_def.h>
+#include <psci.h>
+#include <stdint.h>
+
+/* Offsets for the cpu_data structure */
+#define CPU_DATA_PSCI_LOCK_OFFSET      __builtin_offsetof\
+               (cpu_data_t, psci_svc_cpu_data.pcpu_bakery_info)
+
+#if PLAT_PCPU_DATA_SIZE
+#define CPU_DATA_PLAT_PCPU_OFFSET      __builtin_offsetof\
+               (cpu_data_t, platform_cpu_data)
+#endif
+
+/*******************************************************************************
+ * Function & variable prototypes
+ ******************************************************************************/
+
+/*******************************************************************************
+ * Cache of frequently used per-cpu data:
+ *   Pointers to non-secure and secure security state contexts
+ *   Address of the crash stack
+ * It is aligned to the cache line boundary to allow efficient concurrent
+ * manipulation of these pointers on different cpus
+ *
+ * TODO: Add other commonly used variables to this (tf_issues#90)
+ *
+ * The data structure and the _cpu_data accessors should not be used directly
+ * by components that have per-cpu members. The member access macros should be
+ * used for this.
+ ******************************************************************************/
+typedef struct cpu_data {
+       void *cpu_context[2];
+       uintptr_t cpu_ops_ptr;
+#if CRASH_REPORTING
+       u_register_t crash_buf[CPU_DATA_CRASH_BUF_SIZE >> 3];
+#endif
+       struct psci_cpu_data psci_svc_cpu_data;
+#if PLAT_PCPU_DATA_SIZE
+       uint8_t platform_cpu_data[PLAT_PCPU_DATA_SIZE];
+#endif
+} __aligned(CACHE_WRITEBACK_GRANULE) cpu_data_t;
+
+#if CRASH_REPORTING
+/* verify assembler offsets match data structures */
+CASSERT(CPU_DATA_CRASH_BUF_OFFSET == __builtin_offsetof
+       (cpu_data_t, crash_buf),
+       assert_cpu_data_crash_stack_offset_mismatch);
+#endif
+
+CASSERT((1 << CPU_DATA_LOG2SIZE) == sizeof(cpu_data_t),
+       assert_cpu_data_log2size_mismatch);
+
+CASSERT(CPU_DATA_CPU_OPS_PTR == __builtin_offsetof
+               (cpu_data_t, cpu_ops_ptr),
+               assert_cpu_data_cpu_ops_ptr_offset_mismatch);
+
+struct cpu_data *_cpu_data_by_index(uint32_t cpu_index);
+
+/* Return the cpu_data structure for the current CPU. */
+static inline struct cpu_data *_cpu_data(void)
+{
+       return (cpu_data_t *)read_tpidr_el3();
+}
+
+
+/**************************************************************************
+ * APIs for initialising and accessing per-cpu data
+ *************************************************************************/
+
+void init_cpu_data_ptr(void);
+void init_cpu_ops(void);
+
+#define get_cpu_data(_m)                  _cpu_data()->_m
+#define set_cpu_data(_m, _v)              _cpu_data()->_m = _v
+#define get_cpu_data_by_index(_ix, _m)    _cpu_data_by_index(_ix)->_m
+#define set_cpu_data_by_index(_ix, _m, _v) _cpu_data_by_index(_ix)->_m = _v
+
+#define flush_cpu_data(_m)        flush_dcache_range((uintptr_t)         \
+                                                     &(_cpu_data()->_m), \
+                                                     sizeof(_cpu_data()->_m))
+#define inv_cpu_data(_m)          inv_dcache_range((uintptr_t)           \
+                                                     &(_cpu_data()->_m), \
+                                                     sizeof(_cpu_data()->_m))
+#define flush_cpu_data_by_index(_ix, _m)       \
+                                  flush_dcache_range((uintptr_t)         \
+                                        &(_cpu_data_by_index(_ix)->_m),  \
+                                        sizeof(_cpu_data_by_index(_ix)->_m))
+
+
+#endif /* __ASSEMBLY__ */
+#endif /* __CPU_DATA_H__ */
diff --git a/include/lib/psci/psci.h b/include/lib/psci/psci.h
new file mode 100644 (file)
index 0000000..b6d6d4e
--- /dev/null
@@ -0,0 +1,345 @@
+/*
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __PSCI_H__
+#define __PSCI_H__
+
+#include <bakery_lock.h>
+#include <platform_def.h>      /* for PLAT_NUM_PWR_DOMAINS */
+#if ENABLE_PLAT_COMPAT
+#include <psci_compat.h>
+#endif
+
+/*******************************************************************************
+ * Number of power domains whose state this PSCI implementation can track
+ ******************************************************************************/
+#ifdef PLAT_NUM_PWR_DOMAINS
+#define PSCI_NUM_PWR_DOMAINS   PLAT_NUM_PWR_DOMAINS
+#else
+#define PSCI_NUM_PWR_DOMAINS   (2 * PLATFORM_CORE_COUNT)
+#endif
+
+#define PSCI_NUM_NON_CPU_PWR_DOMAINS   (PSCI_NUM_PWR_DOMAINS - \
+                                        PLATFORM_CORE_COUNT)
+
+/* This is the power level corresponding to a CPU */
+#define PSCI_CPU_PWR_LVL       0
+
+/*
+ * The maximum power level supported by PSCI. Since PSCI CPU_SUSPEND
+ * uses the old power_state parameter format which has 2 bits to specify the
+ * power level, this constant is defined to be 3.
+ */
+#define PSCI_MAX_PWR_LVL       3
+
+/*******************************************************************************
+ * Defines for runtime services function ids
+ ******************************************************************************/
+#define PSCI_VERSION                   0x84000000
+#define PSCI_CPU_SUSPEND_AARCH32       0x84000001
+#define PSCI_CPU_SUSPEND_AARCH64       0xc4000001
+#define PSCI_CPU_OFF                   0x84000002
+#define PSCI_CPU_ON_AARCH32            0x84000003
+#define PSCI_CPU_ON_AARCH64            0xc4000003
+#define PSCI_AFFINITY_INFO_AARCH32     0x84000004
+#define PSCI_AFFINITY_INFO_AARCH64     0xc4000004
+#define PSCI_MIG_AARCH32               0x84000005
+#define PSCI_MIG_AARCH64               0xc4000005
+#define PSCI_MIG_INFO_TYPE             0x84000006
+#define PSCI_MIG_INFO_UP_CPU_AARCH32   0x84000007
+#define PSCI_MIG_INFO_UP_CPU_AARCH64   0xc4000007
+#define PSCI_SYSTEM_OFF                        0x84000008
+#define PSCI_SYSTEM_RESET              0x84000009
+#define PSCI_FEATURES                  0x8400000A
+#define PSCI_SYSTEM_SUSPEND_AARCH32    0x8400000E
+#define PSCI_SYSTEM_SUSPEND_AARCH64    0xc400000E
+#define PSCI_STAT_RESIDENCY_AARCH32    0x84000010
+#define PSCI_STAT_RESIDENCY_AARCH64    0xc4000010
+#define PSCI_STAT_COUNT_AARCH32                0x84000011
+#define PSCI_STAT_COUNT_AARCH64                0xc4000011
+
+/* Macro to help build the psci capabilities bitfield */
+#define define_psci_cap(x)             (1 << (x & 0x1f))
+
+/*
+ * Number of PSCI calls (above) implemented
+ */
+#if ENABLE_PSCI_STAT
+#define PSCI_NUM_CALLS                 22
+#else
+#define PSCI_NUM_CALLS                 18
+#endif
+
+/*******************************************************************************
+ * PSCI Migrate and friends
+ ******************************************************************************/
+#define PSCI_TOS_UP_MIG_CAP    0
+#define PSCI_TOS_NOT_UP_MIG_CAP        1
+#define PSCI_TOS_NOT_PRESENT_MP        2
+
+/*******************************************************************************
+ * PSCI CPU_SUSPEND 'power_state' parameter specific defines
+ ******************************************************************************/
+#define PSTATE_ID_SHIFT                0
+
+#if PSCI_EXTENDED_STATE_ID
+#define PSTATE_VALID_MASK      0xB0000000
+#define PSTATE_TYPE_SHIFT      30
+#define PSTATE_ID_MASK         0xfffffff
+#else
+#define PSTATE_VALID_MASK      0xFCFE0000
+#define PSTATE_TYPE_SHIFT      16
+#define PSTATE_PWR_LVL_SHIFT   24
+#define PSTATE_ID_MASK         0xffff
+#define PSTATE_PWR_LVL_MASK    0x3
+
+#define psci_get_pstate_pwrlvl(pstate) (((pstate) >> PSTATE_PWR_LVL_SHIFT) & \
+                                       PSTATE_PWR_LVL_MASK)
+#define psci_make_powerstate(state_id, type, pwrlvl) \
+                       (((state_id) & PSTATE_ID_MASK) << PSTATE_ID_SHIFT) |\
+                       (((type) & PSTATE_TYPE_MASK) << PSTATE_TYPE_SHIFT) |\
+                       (((pwrlvl) & PSTATE_PWR_LVL_MASK) << PSTATE_PWR_LVL_SHIFT)
+#endif /* __PSCI_EXTENDED_STATE_ID__ */
+
+#define PSTATE_TYPE_STANDBY    0x0
+#define PSTATE_TYPE_POWERDOWN  0x1
+#define PSTATE_TYPE_MASK       0x1
+
+#define psci_get_pstate_id(pstate)     (((pstate) >> PSTATE_ID_SHIFT) & \
+                                       PSTATE_ID_MASK)
+#define psci_get_pstate_type(pstate)   (((pstate) >> PSTATE_TYPE_SHIFT) & \
+                                       PSTATE_TYPE_MASK)
+#define psci_check_power_state(pstate) ((pstate) & PSTATE_VALID_MASK)
+
+/*******************************************************************************
+ * PSCI CPU_FEATURES feature flag specific defines
+ ******************************************************************************/
+/* Features flags for CPU SUSPEND power state parameter format. Bits [1:1] */
+#define FF_PSTATE_SHIFT                1
+#define FF_PSTATE_ORIG         0
+#define FF_PSTATE_EXTENDED     1
+#if PSCI_EXTENDED_STATE_ID
+#define FF_PSTATE              FF_PSTATE_EXTENDED
+#else
+#define FF_PSTATE              FF_PSTATE_ORIG
+#endif
+
+/* Features flags for CPU SUSPEND OS Initiated mode support. Bits [0:0] */
+#define FF_MODE_SUPPORT_SHIFT          0
+#define FF_SUPPORTS_OS_INIT_MODE       1
+
+/*******************************************************************************
+ * PSCI version
+ ******************************************************************************/
+#define PSCI_MAJOR_VER         (1 << 16)
+#define PSCI_MINOR_VER         0x0
+
+/*******************************************************************************
+ * PSCI error codes
+ ******************************************************************************/
+#define PSCI_E_SUCCESS         0
+#define PSCI_E_NOT_SUPPORTED   -1
+#define PSCI_E_INVALID_PARAMS  -2
+#define PSCI_E_DENIED          -3
+#define PSCI_E_ALREADY_ON      -4
+#define PSCI_E_ON_PENDING      -5
+#define PSCI_E_INTERN_FAIL     -6
+#define PSCI_E_NOT_PRESENT     -7
+#define PSCI_E_DISABLED                -8
+#define PSCI_E_INVALID_ADDRESS -9
+
+#define PSCI_INVALID_MPIDR     ~((u_register_t)0)
+
+#ifndef __ASSEMBLY__
+
+#include <stdint.h>
+#include <types.h>
+
+/*
+ * These are the states reported by the PSCI_AFFINITY_INFO API for the specified
+ * CPU. The definitions of these states can be found in Section 5.7.1 in the
+ * PSCI specification (ARM DEN 0022C).
+ */
+typedef enum {
+       AFF_STATE_ON = 0,
+       AFF_STATE_OFF = 1,
+       AFF_STATE_ON_PENDING = 2
+} aff_info_state_t;
+
+/*
+ * Macro to represent invalid affinity level within PSCI.
+ */
+#define PSCI_INVALID_PWR_LVL   (PLAT_MAX_PWR_LVL + 1)
+
+/*
+ * Type for representing the local power state at a particular level.
+ */
+typedef uint8_t plat_local_state_t;
+
+/* The local state macro used to represent RUN state. */
+#define PSCI_LOCAL_STATE_RUN   0
+
+/*
+ * Macro to test whether the plat_local_state is RUN state
+ */
+#define is_local_state_run(plat_local_state) \
+                       ((plat_local_state) == PSCI_LOCAL_STATE_RUN)
+
+/*
+ * Macro to test whether the plat_local_state is RETENTION state
+ */
+#define is_local_state_retn(plat_local_state) \
+                       (((plat_local_state) > PSCI_LOCAL_STATE_RUN) && \
+                       ((plat_local_state) <= PLAT_MAX_RET_STATE))
+
+/*
+ * Macro to test whether the plat_local_state is OFF state
+ */
+#define is_local_state_off(plat_local_state) \
+                       (((plat_local_state) > PLAT_MAX_RET_STATE) && \
+                       ((plat_local_state) <= PLAT_MAX_OFF_STATE))
+
+/*****************************************************************************
+ * This data structure defines the representation of the power state parameter
+ * for its exchange between the generic PSCI code and the platform port. For
+ * example, it is used by the platform port to specify the requested power
+ * states during a power management operation. It is used by the generic code to
+ * inform the platform about the target power states that each level should
+ * enter.
+ ****************************************************************************/
+typedef struct psci_power_state {
+       /*
+        * The pwr_domain_state[] stores the local power state at each level
+        * for the CPU.
+        */
+       plat_local_state_t pwr_domain_state[PLAT_MAX_PWR_LVL + 1];
+} psci_power_state_t;
+
+/*******************************************************************************
+ * Structure used to store per-cpu information relevant to the PSCI service.
+ * It is populated in the per-cpu data array. In return we get a guarantee that
+ * this information will not reside on a cache line shared with another cpu.
+ ******************************************************************************/
+typedef struct psci_cpu_data {
+       /* State as seen by PSCI Affinity Info API */
+       aff_info_state_t aff_info_state;
+
+       /*
+        * Highest power level which takes part in a power management
+        * operation.
+        */
+       unsigned char target_pwrlvl;
+
+       /* The local power state of this CPU */
+       plat_local_state_t local_state;
+} psci_cpu_data_t;
+
+/*******************************************************************************
+ * Structure populated by platform specific code to export routines which
+ * perform common low level power management functions
+ ******************************************************************************/
+typedef struct plat_psci_ops {
+       void (*cpu_standby)(plat_local_state_t cpu_state);
+       int (*pwr_domain_on)(u_register_t mpidr);
+       void (*pwr_domain_off)(const psci_power_state_t *target_state);
+       void (*pwr_domain_suspend)(const psci_power_state_t *target_state);
+       void (*pwr_domain_on_finish)(const psci_power_state_t *target_state);
+       void (*pwr_domain_suspend_finish)(
+                               const psci_power_state_t *target_state);
+       void (*pwr_domain_pwr_down_wfi)(
+                               const psci_power_state_t *target_state) __dead2;
+       void (*system_off)(void) __dead2;
+       void (*system_reset)(void) __dead2;
+       int (*validate_power_state)(unsigned int power_state,
+                                   psci_power_state_t *req_state);
+       int (*validate_ns_entrypoint)(uintptr_t ns_entrypoint);
+       void (*get_sys_suspend_power_state)(
+                                   psci_power_state_t *req_state);
+       int (*get_pwr_lvl_state_idx)(plat_local_state_t pwr_domain_state,
+                                   int pwrlvl);
+       int (*translate_power_state_by_mpidr)(u_register_t mpidr,
+                                   unsigned int power_state,
+                                   psci_power_state_t *output_state);
+} plat_psci_ops_t;
+
+/*******************************************************************************
+ * Optional structure populated by the Secure Payload Dispatcher to be given a
+ * chance to perform any bookkeeping before PSCI executes a power management
+ * operation. It also allows PSCI to determine certain properties of the SP e.g.
+ * migrate capability etc.
+ ******************************************************************************/
+typedef struct spd_pm_ops {
+       void (*svc_on)(u_register_t target_cpu);
+       int32_t (*svc_off)(u_register_t __unused);
+       void (*svc_suspend)(u_register_t max_off_pwrlvl);
+       void (*svc_on_finish)(u_register_t __unused);
+       void (*svc_suspend_finish)(u_register_t max_off_pwrlvl);
+       int32_t (*svc_migrate)(u_register_t from_cpu, u_register_t to_cpu);
+       int32_t (*svc_migrate_info)(u_register_t *resident_cpu);
+       void (*svc_system_off)(void);
+       void (*svc_system_reset)(void);
+} spd_pm_ops_t;
+
+/*******************************************************************************
+ * Function & Data prototypes
+ ******************************************************************************/
+unsigned int psci_version(void);
+int psci_cpu_on(u_register_t target_cpu,
+               uintptr_t entrypoint,
+               u_register_t context_id);
+int psci_cpu_suspend(unsigned int power_state,
+                    uintptr_t entrypoint,
+                    u_register_t context_id);
+int psci_system_suspend(uintptr_t entrypoint, u_register_t context_id);
+int psci_cpu_off(void);
+int psci_affinity_info(u_register_t target_affinity,
+                      unsigned int lowest_affinity_level);
+int psci_migrate(u_register_t target_cpu);
+int psci_migrate_info_type(void);
+long psci_migrate_info_up_cpu(void);
+int psci_features(unsigned int psci_fid);
+void __dead2 psci_power_down_wfi(void);
+void psci_entrypoint(void);
+void psci_register_spd_pm_hook(const spd_pm_ops_t *);
+uintptr_t psci_smc_handler(uint32_t smc_fid,
+                         u_register_t x1,
+                         u_register_t x2,
+                         u_register_t x3,
+                         u_register_t x4,
+                         void *cookie,
+                         void *handle,
+                         u_register_t flags);
+
+/* PSCI setup function */
+int psci_setup(void);
+
+#endif /*__ASSEMBLY__*/
+
+#endif /* __PSCI_H__ */
diff --git a/include/lib/psci/psci_compat.h b/include/lib/psci/psci_compat.h
new file mode 100644 (file)
index 0000000..3554667
--- /dev/null
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __PSCI_COMPAT_H__
+#define __PSCI_COMPAT_H__
+
+#include <arch.h>
+#include <platform_def.h>
+
+#ifndef __ASSEMBLY__
+/*
+ * The below declarations are to enable compatibility for the platform ports
+ * using the old platform interface and psci helpers.
+ */
+#define PLAT_MAX_PWR_LVL       PLATFORM_MAX_AFFLVL
+#define PLAT_NUM_PWR_DOMAINS   PLATFORM_NUM_AFFS
+
+/*******************************************************************************
+ * PSCI affinity related constants. An affinity instance could
+ * be present or absent physically to cater for asymmetric topologies.
+ ******************************************************************************/
+#define PSCI_AFF_ABSENT                0x0
+#define PSCI_AFF_PRESENT       0x1
+
+#define PSCI_STATE_ON          0x0
+#define PSCI_STATE_OFF         0x1
+#define PSCI_STATE_ON_PENDING  0x2
+#define PSCI_STATE_SUSPEND     0x3
+
+/*
+ * Using the compatibility platform interfaces means that the local states
+ * used in psci_power_state_t need to only convey whether its power down
+ * or standby state. The onus is on the platform port to do the right thing
+ * including the state coordination in case multiple power down states are
+ * involved. Hence if we assume 3 generic states viz, run, standby and
+ * power down, we can assign 1 and 2 to standby and power down respectively.
+ */
+#define PLAT_MAX_RET_STATE     1
+#define PLAT_MAX_OFF_STATE     2
+
+/*
+ * Macro to represent invalid affinity level within PSCI.
+ */
+#define PSCI_INVALID_DATA -1
+
+#define psci_get_pstate_afflvl(pstate)         psci_get_pstate_pwrlvl(pstate)
+
+/*
+ * This array stores the 'power_state' requests of each CPU during
+ * CPU_SUSPEND and SYSTEM_SUSPEND which will be populated by the
+ * compatibility layer when appropriate platform hooks are invoked.
+ */
+extern unsigned int psci_power_state_compat[PLATFORM_CORE_COUNT];
+
+/*******************************************************************************
+ * Structure populated by platform specific code to export routines which
+ * perform common low level pm functions
+ ******************************************************************************/
+typedef struct plat_pm_ops {
+       void (*affinst_standby)(unsigned int power_state);
+       int (*affinst_on)(unsigned long mpidr,
+                         unsigned long sec_entrypoint,
+                         unsigned int afflvl,
+                         unsigned int state);
+       void (*affinst_off)(unsigned int afflvl, unsigned int state);
+       void (*affinst_suspend)(unsigned long sec_entrypoint,
+                              unsigned int afflvl,
+                              unsigned int state);
+       void (*affinst_on_finish)(unsigned int afflvl, unsigned int state);
+       void (*affinst_suspend_finish)(unsigned int afflvl,
+                                     unsigned int state);
+       void (*system_off)(void) __dead2;
+       void (*system_reset)(void) __dead2;
+       int (*validate_power_state)(unsigned int power_state);
+       int (*validate_ns_entrypoint)(unsigned long ns_entrypoint);
+       unsigned int (*get_sys_suspend_power_state)(void);
+} plat_pm_ops_t;
+
+/*******************************************************************************
+ * Function & Data prototypes to enable compatibility for older platform ports
+ ******************************************************************************/
+int psci_get_suspend_stateid_by_mpidr(unsigned long);
+int psci_get_suspend_stateid(void);
+int psci_get_suspend_powerstate(void);
+unsigned int psci_get_max_phys_off_afflvl(void);
+int psci_get_suspend_afflvl(void);
+
+#endif /* ____ASSEMBLY__ */
+#endif /* __PSCI_COMPAT_H__ */
diff --git a/include/services/std_svc.h b/include/services/std_svc.h
new file mode 100644 (file)
index 0000000..49d79f8
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __STD_SVC_H__
+#define __STD_SVC_H__
+
+/* SMC function IDs for Standard Service queries */
+
+#define ARM_STD_SVC_CALL_COUNT         0x8400ff00
+#define ARM_STD_SVC_UID                        0x8400ff01
+/*                                     0x8400ff02 is reserved */
+#define ARM_STD_SVC_VERSION            0x8400ff03
+
+/* ARM Standard Service Calls version numbers */
+#define STD_SVC_VERSION_MAJOR          0x0
+#define STD_SVC_VERSION_MINOR          0x1
+
+/* The macros below are used to identify PSCI calls from the SMC function ID */
+#define PSCI_FID_MASK                  0xffe0u
+#define PSCI_FID_VALUE                 0u
+#define is_psci_fid(_fid) \
+       (((_fid) & PSCI_FID_MASK) == PSCI_FID_VALUE)
+
+#endif /* __STD_SVC_H__ */
diff --git a/lib/el3_runtime/aarch64/context.S b/lib/el3_runtime/aarch64/context.S
new file mode 100644 (file)
index 0000000..7982e50
--- /dev/null
@@ -0,0 +1,405 @@
+/*
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <context.h>
+
+       .global el1_sysregs_context_save
+       .global el1_sysregs_context_restore
+#if CTX_INCLUDE_FPREGS
+       .global fpregs_context_save
+       .global fpregs_context_restore
+#endif
+       .global save_gp_registers
+       .global restore_gp_registers_eret
+       .global restore_gp_registers_callee_eret
+       .global el3_exit
+
+/* -----------------------------------------------------
+ * The following function strictly follows the AArch64
+ * PCS to use x9-x17 (temporary caller-saved registers)
+ * to save EL1 system register context. It assumes that
+ * 'x0' is pointing to a 'el1_sys_regs' structure where
+ * the register context will be saved.
+ * -----------------------------------------------------
+ */
+func el1_sysregs_context_save
+
+       mrs     x9, spsr_el1
+       mrs     x10, elr_el1
+       stp     x9, x10, [x0, #CTX_SPSR_EL1]
+
+       mrs     x15, sctlr_el1
+       mrs     x16, actlr_el1
+       stp     x15, x16, [x0, #CTX_SCTLR_EL1]
+
+       mrs     x17, cpacr_el1
+       mrs     x9, csselr_el1
+       stp     x17, x9, [x0, #CTX_CPACR_EL1]
+
+       mrs     x10, sp_el1
+       mrs     x11, esr_el1
+       stp     x10, x11, [x0, #CTX_SP_EL1]
+
+       mrs     x12, ttbr0_el1
+       mrs     x13, ttbr1_el1
+       stp     x12, x13, [x0, #CTX_TTBR0_EL1]
+
+       mrs     x14, mair_el1
+       mrs     x15, amair_el1
+       stp     x14, x15, [x0, #CTX_MAIR_EL1]
+
+       mrs     x16, tcr_el1
+       mrs     x17, tpidr_el1
+       stp     x16, x17, [x0, #CTX_TCR_EL1]
+
+       mrs     x9, tpidr_el0
+       mrs     x10, tpidrro_el0
+       stp     x9, x10, [x0, #CTX_TPIDR_EL0]
+
+       mrs     x13, par_el1
+       mrs     x14, far_el1
+       stp     x13, x14, [x0, #CTX_PAR_EL1]
+
+       mrs     x15, afsr0_el1
+       mrs     x16, afsr1_el1
+       stp     x15, x16, [x0, #CTX_AFSR0_EL1]
+
+       mrs     x17, contextidr_el1
+       mrs     x9, vbar_el1
+       stp     x17, x9, [x0, #CTX_CONTEXTIDR_EL1]
+
+       /* Save AArch32 system registers if the build has instructed so */
+#if CTX_INCLUDE_AARCH32_REGS
+       mrs     x11, spsr_abt
+       mrs     x12, spsr_und
+       stp     x11, x12, [x0, #CTX_SPSR_ABT]
+
+       mrs     x13, spsr_irq
+       mrs     x14, spsr_fiq
+       stp     x13, x14, [x0, #CTX_SPSR_IRQ]
+
+       mrs     x15, dacr32_el2
+       mrs     x16, ifsr32_el2
+       stp     x15, x16, [x0, #CTX_DACR32_EL2]
+
+       mrs     x17, fpexc32_el2
+       str     x17, [x0, #CTX_FP_FPEXC32_EL2]
+#endif
+
+       /* Save NS timer registers if the build has instructed so */
+#if NS_TIMER_SWITCH
+       mrs     x10, cntp_ctl_el0
+       mrs     x11, cntp_cval_el0
+       stp     x10, x11, [x0, #CTX_CNTP_CTL_EL0]
+
+       mrs     x12, cntv_ctl_el0
+       mrs     x13, cntv_cval_el0
+       stp     x12, x13, [x0, #CTX_CNTV_CTL_EL0]
+
+       mrs     x14, cntkctl_el1
+       str     x14, [x0, #CTX_CNTKCTL_EL1]
+#endif
+
+       ret
+endfunc el1_sysregs_context_save
+
+/* -----------------------------------------------------
+ * The following function strictly follows the AArch64
+ * PCS to use x9-x17 (temporary caller-saved registers)
+ * to restore EL1 system register context.  It assumes
+ * that 'x0' is pointing to a 'el1_sys_regs' structure
+ * from where the register context will be restored
+ * -----------------------------------------------------
+ */
+func el1_sysregs_context_restore
+
+       ldp     x9, x10, [x0, #CTX_SPSR_EL1]
+       msr     spsr_el1, x9
+       msr     elr_el1, x10
+
+       ldp     x15, x16, [x0, #CTX_SCTLR_EL1]
+       msr     sctlr_el1, x15
+       msr     actlr_el1, x16
+
+       ldp     x17, x9, [x0, #CTX_CPACR_EL1]
+       msr     cpacr_el1, x17
+       msr     csselr_el1, x9
+
+       ldp     x10, x11, [x0, #CTX_SP_EL1]
+       msr     sp_el1, x10
+       msr     esr_el1, x11
+
+       ldp     x12, x13, [x0, #CTX_TTBR0_EL1]
+       msr     ttbr0_el1, x12
+       msr     ttbr1_el1, x13
+
+       ldp     x14, x15, [x0, #CTX_MAIR_EL1]
+       msr     mair_el1, x14
+       msr     amair_el1, x15
+
+       ldp     x16, x17, [x0, #CTX_TCR_EL1]
+       msr     tcr_el1, x16
+       msr     tpidr_el1, x17
+
+       ldp     x9, x10, [x0, #CTX_TPIDR_EL0]
+       msr     tpidr_el0, x9
+       msr     tpidrro_el0, x10
+
+       ldp     x13, x14, [x0, #CTX_PAR_EL1]
+       msr     par_el1, x13
+       msr     far_el1, x14
+
+       ldp     x15, x16, [x0, #CTX_AFSR0_EL1]
+       msr     afsr0_el1, x15
+       msr     afsr1_el1, x16
+
+       ldp     x17, x9, [x0, #CTX_CONTEXTIDR_EL1]
+       msr     contextidr_el1, x17
+       msr     vbar_el1, x9
+
+       /* Restore AArch32 system registers if the build has instructed so */
+#if CTX_INCLUDE_AARCH32_REGS
+       ldp     x11, x12, [x0, #CTX_SPSR_ABT]
+       msr     spsr_abt, x11
+       msr     spsr_und, x12
+
+       ldp     x13, x14, [x0, #CTX_SPSR_IRQ]
+       msr     spsr_irq, x13
+       msr     spsr_fiq, x14
+
+       ldp     x15, x16, [x0, #CTX_DACR32_EL2]
+       msr     dacr32_el2, x15
+       msr     ifsr32_el2, x16
+
+       ldr     x17, [x0, #CTX_FP_FPEXC32_EL2]
+       msr     fpexc32_el2, x17
+#endif
+       /* Restore NS timer registers if the build has instructed so */
+#if NS_TIMER_SWITCH
+       ldp     x10, x11, [x0, #CTX_CNTP_CTL_EL0]
+       msr     cntp_ctl_el0, x10
+       msr     cntp_cval_el0, x11
+
+       ldp     x12, x13, [x0, #CTX_CNTV_CTL_EL0]
+       msr     cntv_ctl_el0, x12
+       msr     cntv_cval_el0, x13
+
+       ldr     x14, [x0, #CTX_CNTKCTL_EL1]
+       msr     cntkctl_el1, x14
+#endif
+
+       /* No explict ISB required here as ERET covers it */
+       ret
+endfunc el1_sysregs_context_restore
+
+/* -----------------------------------------------------
+ * The following function follows the aapcs_64 strictly
+ * to use x9-x17 (temporary caller-saved registers
+ * according to AArch64 PCS) to save floating point
+ * register context. It assumes that 'x0' is pointing to
+ * a 'fp_regs' structure where the register context will
+ * be saved.
+ *
+ * Access to VFP registers will trap if CPTR_EL3.TFP is
+ * set.  However currently we don't use VFP registers
+ * nor set traps in Trusted Firmware, and assume it's
+ * cleared
+ *
+ * TODO: Revisit when VFP is used in secure world
+ * -----------------------------------------------------
+ */
+#if CTX_INCLUDE_FPREGS
+func fpregs_context_save
+       stp     q0, q1, [x0, #CTX_FP_Q0]
+       stp     q2, q3, [x0, #CTX_FP_Q2]
+       stp     q4, q5, [x0, #CTX_FP_Q4]
+       stp     q6, q7, [x0, #CTX_FP_Q6]
+       stp     q8, q9, [x0, #CTX_FP_Q8]
+       stp     q10, q11, [x0, #CTX_FP_Q10]
+       stp     q12, q13, [x0, #CTX_FP_Q12]
+       stp     q14, q15, [x0, #CTX_FP_Q14]
+       stp     q16, q17, [x0, #CTX_FP_Q16]
+       stp     q18, q19, [x0, #CTX_FP_Q18]
+       stp     q20, q21, [x0, #CTX_FP_Q20]
+       stp     q22, q23, [x0, #CTX_FP_Q22]
+       stp     q24, q25, [x0, #CTX_FP_Q24]
+       stp     q26, q27, [x0, #CTX_FP_Q26]
+       stp     q28, q29, [x0, #CTX_FP_Q28]
+       stp     q30, q31, [x0, #CTX_FP_Q30]
+
+       mrs     x9, fpsr
+       str     x9, [x0, #CTX_FP_FPSR]
+
+       mrs     x10, fpcr
+       str     x10, [x0, #CTX_FP_FPCR]
+
+       ret
+endfunc fpregs_context_save
+
+/* -----------------------------------------------------
+ * The following function follows the aapcs_64 strictly
+ * to use x9-x17 (temporary caller-saved registers
+ * according to AArch64 PCS) to restore floating point
+ * register context. It assumes that 'x0' is pointing to
+ * a 'fp_regs' structure from where the register context
+ * will be restored.
+ *
+ * Access to VFP registers will trap if CPTR_EL3.TFP is
+ * set.  However currently we don't use VFP registers
+ * nor set traps in Trusted Firmware, and assume it's
+ * cleared
+ *
+ * TODO: Revisit when VFP is used in secure world
+ * -----------------------------------------------------
+ */
+func fpregs_context_restore
+       ldp     q0, q1, [x0, #CTX_FP_Q0]
+       ldp     q2, q3, [x0, #CTX_FP_Q2]
+       ldp     q4, q5, [x0, #CTX_FP_Q4]
+       ldp     q6, q7, [x0, #CTX_FP_Q6]
+       ldp     q8, q9, [x0, #CTX_FP_Q8]
+       ldp     q10, q11, [x0, #CTX_FP_Q10]
+       ldp     q12, q13, [x0, #CTX_FP_Q12]
+       ldp     q14, q15, [x0, #CTX_FP_Q14]
+       ldp     q16, q17, [x0, #CTX_FP_Q16]
+       ldp     q18, q19, [x0, #CTX_FP_Q18]
+       ldp     q20, q21, [x0, #CTX_FP_Q20]
+       ldp     q22, q23, [x0, #CTX_FP_Q22]
+       ldp     q24, q25, [x0, #CTX_FP_Q24]
+       ldp     q26, q27, [x0, #CTX_FP_Q26]
+       ldp     q28, q29, [x0, #CTX_FP_Q28]
+       ldp     q30, q31, [x0, #CTX_FP_Q30]
+
+       ldr     x9, [x0, #CTX_FP_FPSR]
+       msr     fpsr, x9
+
+       ldr     x10, [x0, #CTX_FP_FPCR]
+       msr     fpcr, x10
+
+       /*
+        * No explict ISB required here as ERET to
+        * switch to secure EL1 or non-secure world
+        * covers it
+        */
+
+       ret
+endfunc fpregs_context_restore
+#endif /* CTX_INCLUDE_FPREGS */
+
+/* -----------------------------------------------------
+ * The following functions are used to save and restore
+ * all the general purpose registers. Ideally we would
+ * only save and restore the callee saved registers when
+ * a world switch occurs but that type of implementation
+ * is more complex. So currently we will always save and
+ * restore these registers on entry and exit of EL3.
+ * These are not macros to ensure their invocation fits
+ * within the 32 instructions per exception vector.
+ * clobbers: x18
+ * -----------------------------------------------------
+ */
+func save_gp_registers
+       stp     x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+       stp     x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+       stp     x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
+       stp     x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
+       stp     x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
+       stp     x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
+       stp     x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
+       stp     x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
+       stp     x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
+       stp     x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
+       stp     x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
+       stp     x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
+       stp     x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
+       stp     x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
+       stp     x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
+       mrs     x18, sp_el0
+       str     x18, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0]
+       ret
+endfunc save_gp_registers
+
+func restore_gp_registers_eret
+       ldp     x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+       ldp     x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+       b       restore_gp_registers_callee_eret
+endfunc restore_gp_registers_eret
+
+func restore_gp_registers_callee_eret
+       ldp     x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
+       ldp     x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
+       ldp     x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
+       ldp     x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
+       ldp     x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
+       ldp     x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
+       ldp     x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
+       ldp     x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
+       ldp     x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
+       ldp     x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
+       ldp     x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
+       ldp     x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
+       ldp      x30, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+       msr     sp_el0, x17
+       ldp     x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
+       eret
+endfunc        restore_gp_registers_callee_eret
+
+       /* -----------------------------------------------------
+        * This routine assumes that the SP_EL3 is pointing to
+        * a valid context structure from where the gp regs and
+        * other special registers can be retrieved.
+        * -----------------------------------------------------
+        */
+func el3_exit
+       /* -----------------------------------------------------
+        * Save the current SP_EL0 i.e. the EL3 runtime stack
+        * which will be used for handling the next SMC. Then
+        * switch to SP_EL3
+        * -----------------------------------------------------
+        */
+       mov     x17, sp
+       msr     spsel, #1
+       str     x17, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
+
+       /* -----------------------------------------------------
+        * Restore SPSR_EL3, ELR_EL3 and SCR_EL3 prior to ERET
+        * -----------------------------------------------------
+        */
+       ldr     x18, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
+       ldp     x16, x17, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
+       msr     scr_el3, x18
+       msr     spsr_el3, x16
+       msr     elr_el3, x17
+
+       /* Restore saved general purpose registers and return */
+       b       restore_gp_registers_eret
+endfunc el3_exit
diff --git a/lib/el3_runtime/aarch64/context_mgmt.c b/lib/el3_runtime/aarch64/context_mgmt.c
new file mode 100644 (file)
index 0000000..4527aa3
--- /dev/null
@@ -0,0 +1,383 @@
+/*
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <context.h>
+#include <context_mgmt.h>
+#include <interrupt_mgmt.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <smcc_helpers.h>
+#include <string.h>
+
+
+/*******************************************************************************
+ * Context management library initialisation routine. This library is used by
+ * runtime services to share pointers to 'cpu_context' structures for the secure
+ * and non-secure states. Management of the structures and their associated
+ * memory is not done by the context management library e.g. the PSCI service
+ * manages the cpu context used for entry from and exit to the non-secure state.
+ * The Secure payload dispatcher service manages the context(s) corresponding to
+ * the secure state. It also uses this library to get access to the non-secure
+ * state cpu context pointers.
+ * Lastly, this library provides the api to make SP_EL3 point to the cpu context
+ * which will used for programming an entry into a lower EL. The same context
+ * will used to save state upon exception entry from that EL.
+ ******************************************************************************/
+void cm_init(void)
+{
+       /*
+        * The context management library has only global data to intialize, but
+        * that will be done when the BSS is zeroed out
+        */
+}
+
+/*******************************************************************************
+ * The following function initializes the cpu_context 'ctx' for
+ * first use, and sets the initial entrypoint state as specified by the
+ * entry_point_info structure.
+ *
+ * The security state to initialize is determined by the SECURE attribute
+ * of the entry_point_info. The function returns a pointer to the initialized
+ * context and sets this as the next context to return to.
+ *
+ * The EE and ST attributes are used to configure the endianess and secure
+ * timer availability for the new execution context.
+ *
+ * To prepare the register state for entry call cm_prepare_el3_exit() and
+ * el3_exit(). For Secure-EL1 cm_prepare_el3_exit() is equivalent to
+ * cm_e1_sysreg_context_restore().
+ ******************************************************************************/
+static void cm_init_context_common(cpu_context_t *ctx, const entry_point_info_t *ep)
+{
+       unsigned int security_state;
+       uint32_t scr_el3;
+       el3_state_t *state;
+       gp_regs_t *gp_regs;
+       unsigned long sctlr_elx;
+
+       assert(ctx);
+
+       security_state = GET_SECURITY_STATE(ep->h.attr);
+
+       /* Clear any residual register values from the context */
+       memset(ctx, 0, sizeof(*ctx));
+
+       /*
+        * Base the context SCR on the current value, adjust for entry point
+        * specific requirements and set trap bits from the IMF
+        * TODO: provide the base/global SCR bits using another mechanism?
+        */
+       scr_el3 = read_scr();
+       scr_el3 &= ~(SCR_NS_BIT | SCR_RW_BIT | SCR_FIQ_BIT | SCR_IRQ_BIT |
+                       SCR_ST_BIT | SCR_HCE_BIT);
+
+       if (security_state != SECURE)
+               scr_el3 |= SCR_NS_BIT;
+
+       if (GET_RW(ep->spsr) == MODE_RW_64)
+               scr_el3 |= SCR_RW_BIT;
+
+       if (EP_GET_ST(ep->h.attr))
+               scr_el3 |= SCR_ST_BIT;
+
+#ifndef HANDLE_EA_EL3_FIRST
+       /* Explicitly stop to trap aborts from lower exception levels. */
+       scr_el3 &= ~SCR_EA_BIT;
+#endif
+
+#if IMAGE_BL31
+       /*
+        * IRQ/FIQ bits only need setting if interrupt routing
+        * model has been set up for BL31.
+        */
+       scr_el3 |= get_scr_el3_from_routing_model(security_state);
+#endif
+
+       /*
+        * Set up SCTLR_ELx for the target exception level:
+        * EE bit is taken from the entrypoint attributes
+        * M, C and I bits must be zero (as required by PSCI specification)
+        *
+        * The target exception level is based on the spsr mode requested.
+        * If execution is requested to EL2 or hyp mode, HVC is enabled
+        * via SCR_EL3.HCE.
+        *
+        * Always compute the SCTLR_EL1 value and save in the cpu_context
+        * - the EL2 registers are set up by cm_preapre_ns_entry() as they
+        * are not part of the stored cpu_context
+        *
+        * TODO: In debug builds the spsr should be validated and checked
+        * against the CPU support, security state, endianess and pc
+        */
+       sctlr_elx = EP_GET_EE(ep->h.attr) ? SCTLR_EE_BIT : 0;
+       if (GET_RW(ep->spsr) == MODE_RW_64)
+               sctlr_elx |= SCTLR_EL1_RES1;
+       else
+               sctlr_elx |= SCTLR_AARCH32_EL1_RES1;
+       write_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_elx);
+
+       if ((GET_RW(ep->spsr) == MODE_RW_64
+            && GET_EL(ep->spsr) == MODE_EL2)
+           || (GET_RW(ep->spsr) != MODE_RW_64
+               && GET_M32(ep->spsr) == MODE32_hyp)) {
+               scr_el3 |= SCR_HCE_BIT;
+       }
+
+       /* Populate EL3 state so that we've the right context before doing ERET */
+       state = get_el3state_ctx(ctx);
+       write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
+       write_ctx_reg(state, CTX_ELR_EL3, ep->pc);
+       write_ctx_reg(state, CTX_SPSR_EL3, ep->spsr);
+
+       /*
+        * Store the X0-X7 value from the entrypoint into the context
+        * Use memcpy as we are in control of the layout of the structures
+        */
+       gp_regs = get_gpregs_ctx(ctx);
+       memcpy(gp_regs, (void *)&ep->args, sizeof(aapcs64_params_t));
+}
+
+/*******************************************************************************
+ * The following function initializes the cpu_context for a CPU specified by
+ * its `cpu_idx` for first use, and sets the initial entrypoint state as
+ * specified by the entry_point_info structure.
+ ******************************************************************************/
+void cm_init_context_by_index(unsigned int cpu_idx,
+                             const entry_point_info_t *ep)
+{
+       cpu_context_t *ctx;
+       ctx = cm_get_context_by_index(cpu_idx, GET_SECURITY_STATE(ep->h.attr));
+       cm_init_context_common(ctx, ep);
+}
+
+/*******************************************************************************
+ * The following function initializes the cpu_context for the current CPU
+ * for first use, and sets the initial entrypoint state as specified by the
+ * entry_point_info structure.
+ ******************************************************************************/
+void cm_init_my_context(const entry_point_info_t *ep)
+{
+       cpu_context_t *ctx;
+       ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr));
+       cm_init_context_common(ctx, ep);
+}
+
+/*******************************************************************************
+ * Prepare the CPU system registers for first entry into secure or normal world
+ *
+ * If execution is requested to EL2 or hyp mode, SCTLR_EL2 is initialized
+ * If execution is requested to non-secure EL1 or svc mode, and the CPU supports
+ * EL2 then EL2 is disabled by configuring all necessary EL2 registers.
+ * For all entries, the EL1 registers are initialized from the cpu_context
+ ******************************************************************************/
+void cm_prepare_el3_exit(uint32_t security_state)
+{
+       uint32_t sctlr_elx, scr_el3, cptr_el2;
+       cpu_context_t *ctx = cm_get_context(security_state);
+
+       assert(ctx);
+
+       if (security_state == NON_SECURE) {
+               scr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3);
+               if (scr_el3 & SCR_HCE_BIT) {
+                       /* Use SCTLR_EL1.EE value to initialise sctlr_el2 */
+                       sctlr_elx = read_ctx_reg(get_sysregs_ctx(ctx),
+                                                CTX_SCTLR_EL1);
+                       sctlr_elx &= ~SCTLR_EE_BIT;
+                       sctlr_elx |= SCTLR_EL2_RES1;
+                       write_sctlr_el2(sctlr_elx);
+               } else if (read_id_aa64pfr0_el1() &
+                          (ID_AA64PFR0_ELX_MASK << ID_AA64PFR0_EL2_SHIFT)) {
+                       /* EL2 present but unused, need to disable safely */
+
+                       /* HCR_EL2 = 0, except RW bit set to match SCR_EL3 */
+                       write_hcr_el2((scr_el3 & SCR_RW_BIT) ? HCR_RW_BIT : 0);
+
+                       /* SCTLR_EL2 : can be ignored when bypassing */
+
+                       /* CPTR_EL2 : disable all traps TCPAC, TTA, TFP */
+                       cptr_el2 = read_cptr_el2();
+                       cptr_el2 &= ~(TCPAC_BIT | TTA_BIT | TFP_BIT);
+                       write_cptr_el2(cptr_el2);
+
+                       /* Enable EL1 access to timer */
+                       write_cnthctl_el2(EL1PCEN_BIT | EL1PCTEN_BIT);
+
+                       /* Reset CNTVOFF_EL2 */
+                       write_cntvoff_el2(0);
+
+                       /* Set VPIDR, VMPIDR to match MIDR, MPIDR */
+                       write_vpidr_el2(read_midr_el1());
+                       write_vmpidr_el2(read_mpidr_el1());
+
+                       /*
+                        * Reset VTTBR_EL2.
+                        * Needed because cache maintenance operations depend on
+                        * the VMID even when non-secure EL1&0 stage 2 address
+                        * translation are disabled.
+                        */
+                       write_vttbr_el2(0);
+               }
+       }
+
+       el1_sysregs_context_restore(get_sysregs_ctx(ctx));
+
+       cm_set_next_context(ctx);
+}
+
+/*******************************************************************************
+ * The next four functions are used by runtime services to save and restore
+ * EL1 context on the 'cpu_context' structure for the specified security
+ * state.
+ ******************************************************************************/
+void cm_el1_sysregs_context_save(uint32_t security_state)
+{
+       cpu_context_t *ctx;
+
+       ctx = cm_get_context(security_state);
+       assert(ctx);
+
+       el1_sysregs_context_save(get_sysregs_ctx(ctx));
+}
+
+void cm_el1_sysregs_context_restore(uint32_t security_state)
+{
+       cpu_context_t *ctx;
+
+       ctx = cm_get_context(security_state);
+       assert(ctx);
+
+       el1_sysregs_context_restore(get_sysregs_ctx(ctx));
+}
+
+/*******************************************************************************
+ * This function populates ELR_EL3 member of 'cpu_context' pertaining to the
+ * given security state with the given entrypoint
+ ******************************************************************************/
+void cm_set_elr_el3(uint32_t security_state, uintptr_t entrypoint)
+{
+       cpu_context_t *ctx;
+       el3_state_t *state;
+
+       ctx = cm_get_context(security_state);
+       assert(ctx);
+
+       /* Populate EL3 state so that ERET jumps to the correct entry */
+       state = get_el3state_ctx(ctx);
+       write_ctx_reg(state, CTX_ELR_EL3, entrypoint);
+}
+
+/*******************************************************************************
+ * This function populates ELR_EL3 and SPSR_EL3 members of 'cpu_context'
+ * pertaining to the given security state
+ ******************************************************************************/
+void cm_set_elr_spsr_el3(uint32_t security_state,
+                       uintptr_t entrypoint, uint32_t spsr)
+{
+       cpu_context_t *ctx;
+       el3_state_t *state;
+
+       ctx = cm_get_context(security_state);
+       assert(ctx);
+
+       /* Populate EL3 state so that ERET jumps to the correct entry */
+       state = get_el3state_ctx(ctx);
+       write_ctx_reg(state, CTX_ELR_EL3, entrypoint);
+       write_ctx_reg(state, CTX_SPSR_EL3, spsr);
+}
+
+/*******************************************************************************
+ * This function updates a single bit in the SCR_EL3 member of the 'cpu_context'
+ * pertaining to the given security state using the value and bit position
+ * specified in the parameters. It preserves all other bits.
+ ******************************************************************************/
+void cm_write_scr_el3_bit(uint32_t security_state,
+                         uint32_t bit_pos,
+                         uint32_t value)
+{
+       cpu_context_t *ctx;
+       el3_state_t *state;
+       uint32_t scr_el3;
+
+       ctx = cm_get_context(security_state);
+       assert(ctx);
+
+       /* Ensure that the bit position is a valid one */
+       assert((1 << bit_pos) & SCR_VALID_BIT_MASK);
+
+       /* Ensure that the 'value' is only a bit wide */
+       assert(value <= 1);
+
+       /*
+        * Get the SCR_EL3 value from the cpu context, clear the desired bit
+        * and set it to its new value.
+        */
+       state = get_el3state_ctx(ctx);
+       scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
+       scr_el3 &= ~(1 << bit_pos);
+       scr_el3 |= value << bit_pos;
+       write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
+}
+
+/*******************************************************************************
+ * This function retrieves SCR_EL3 member of 'cpu_context' pertaining to the
+ * given security state.
+ ******************************************************************************/
+uint32_t cm_get_scr_el3(uint32_t security_state)
+{
+       cpu_context_t *ctx;
+       el3_state_t *state;
+
+       ctx = cm_get_context(security_state);
+       assert(ctx);
+
+       /* Populate EL3 state so that ERET jumps to the correct entry */
+       state = get_el3state_ctx(ctx);
+       return read_ctx_reg(state, CTX_SCR_EL3);
+}
+
+/*******************************************************************************
+ * This function is used to program the context that's used for exception
+ * return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for
+ * the required security state
+ ******************************************************************************/
+void cm_set_next_eret_context(uint32_t security_state)
+{
+       cpu_context_t *ctx;
+
+       ctx = cm_get_context(security_state);
+       assert(ctx);
+
+       cm_set_next_context(ctx);
+}
diff --git a/lib/el3_runtime/aarch64/cpu_data.S b/lib/el3_runtime/aarch64/cpu_data.S
new file mode 100644 (file)
index 0000000..2cc07ba
--- /dev/null
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm_macros.S>
+#include <cpu_data.h>
+
+.globl init_cpu_data_ptr
+.globl _cpu_data_by_index
+
+/* -----------------------------------------------------------------
+ * void init_cpu_data_ptr(void)
+ *
+ * Initialise the TPIDR_EL3 register to refer to the cpu_data_t
+ * for the calling CPU. This must be called before cm_get_cpu_data()
+ *
+ * This can be called without a valid stack. It assumes that
+ * plat_my_core_pos() does not clobber register x10.
+ * clobbers: x0, x1, x10
+ * -----------------------------------------------------------------
+ */
+func init_cpu_data_ptr
+       mov     x10, x30
+       bl      plat_my_core_pos
+       bl      _cpu_data_by_index
+       msr     tpidr_el3, x0
+       ret     x10
+endfunc init_cpu_data_ptr
+
+/* -----------------------------------------------------------------
+ * cpu_data_t *_cpu_data_by_index(uint32_t cpu_index)
+ *
+ * Return the cpu_data structure for the CPU with given linear index
+ *
+ * This can be called without a valid stack.
+ * clobbers: x0, x1
+ * -----------------------------------------------------------------
+ */
+func _cpu_data_by_index
+       adr     x1, percpu_data
+       add     x0, x1, x0, LSL #CPU_DATA_LOG2SIZE
+       ret
+endfunc _cpu_data_by_index
diff --git a/lib/el3_runtime/cpu_data_array.c b/lib/el3_runtime/cpu_data_array.c
new file mode 100644 (file)
index 0000000..eba21a5
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <cassert.h>
+#include <cpu_data.h>
+#include <platform_def.h>
+
+/* The per_cpu_ptr_cache_t space allocation */
+cpu_data_t percpu_data[PLATFORM_CORE_COUNT];
diff --git a/lib/psci/aarch64/psci_entry.S b/lib/psci/aarch64/psci_entry.S
new file mode 100644 (file)
index 0000000..646ebcf
--- /dev/null
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <el3_common_macros.S>
+#include <psci.h>
+#include <xlat_tables.h>
+
+       .globl  psci_entrypoint
+       .globl  psci_power_down_wfi
+
+       /* --------------------------------------------------------------------
+        * This CPU has been physically powered up. It is either resuming from
+        * suspend or has simply been turned on. In both cases, call the power
+        * on finisher.
+        * --------------------------------------------------------------------
+        */
+func psci_entrypoint
+       /*
+        * On the warm boot path, most of the EL3 initialisations performed by
+        * 'el3_entrypoint_common' must be skipped:
+        *
+        *  - Only when the platform bypasses the BL1/BL31 entrypoint by
+        *    programming the reset address do we need to set the CPU endianness.
+        *    In other cases, we assume this has been taken care by the
+        *    entrypoint code.
+        *
+        *  - No need to determine the type of boot, we know it is a warm boot.
+        *
+        *  - Do not try to distinguish between primary and secondary CPUs, this
+        *    notion only exists for a cold boot.
+        *
+        *  - No need to initialise the memory or the C runtime environment,
+        *    it has been done once and for all on the cold boot path.
+        */
+       el3_entrypoint_common                                   \
+               _set_endian=PROGRAMMABLE_RESET_ADDRESS          \
+               _warm_boot_mailbox=0                            \
+               _secondary_cold_boot=0                          \
+               _init_memory=0                                  \
+               _init_c_runtime=0                               \
+               _exception_vectors=runtime_exceptions
+
+       /* --------------------------------------------
+        * Enable the MMU with the DCache disabled. It
+        * is safe to use stacks allocated in normal
+        * memory as a result. All memory accesses are
+        * marked nGnRnE when the MMU is disabled. So
+        * all the stack writes will make it to memory.
+        * All memory accesses are marked Non-cacheable
+        * when the MMU is enabled but D$ is disabled.
+        * So used stack memory is guaranteed to be
+        * visible immediately after the MMU is enabled
+        * Enabling the DCache at the same time as the
+        * MMU can lead to speculatively fetched and
+        * possibly stale stack memory being read from
+        * other caches. This can lead to coherency
+        * issues.
+        * --------------------------------------------
+        */
+       mov     x0, #DISABLE_DCACHE
+       bl      bl31_plat_enable_mmu
+
+       bl      psci_power_up_finish
+
+       b       el3_exit
+endfunc psci_entrypoint
+
+       /* --------------------------------------------
+        * This function is called to indicate to the
+        * power controller that it is safe to power
+        * down this cpu. It should not exit the wfi
+        * and will be released from reset upon power
+        * up. 'wfi_spill' is used to catch erroneous
+        * exits from wfi.
+        * --------------------------------------------
+        */
+func psci_power_down_wfi
+       dsb     sy              // ensure write buffer empty
+       wfi
+       bl      plat_panic_handler
+endfunc psci_power_down_wfi
+
diff --git a/lib/psci/aarch64/psci_helpers.S b/lib/psci/aarch64/psci_helpers.S
new file mode 100644 (file)
index 0000000..87144dd
--- /dev/null
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <platform_def.h>
+#include <psci.h>
+
+       .globl  psci_do_pwrdown_cache_maintenance
+       .globl  psci_do_pwrup_cache_maintenance
+
+/* -----------------------------------------------------------------------
+ * void psci_do_pwrdown_cache_maintenance(unsigned int power level);
+ *
+ * This function performs cache maintenance for the specified power
+ * level. The levels of cache affected are determined by the power
+ * level which is passed as the argument i.e. level 0 results
+ * in a flush of the L1 cache. Both the L1 and L2 caches are flushed
+ * for a higher power level.
+ *
+ * Additionally, this function also ensures that stack memory is correctly
+ * flushed out to avoid coherency issues due to a change in its memory
+ * attributes after the data cache is disabled.
+ * -----------------------------------------------------------------------
+ */
+func psci_do_pwrdown_cache_maintenance
+       stp     x29, x30, [sp,#-16]!
+       stp     x19, x20, [sp,#-16]!
+
+       /* ---------------------------------------------
+        * Determine to how many levels of cache will be
+        * subject to cache maintenance. Power level
+        * 0 implies that only the cpu is being powered
+        * down. Only the L1 data cache needs to be
+        * flushed to the PoU in this case. For a higher
+        * power level we are assuming that a flush
+        * of L1 data and L2 unified cache is enough.
+        * This information should be provided by the
+        * platform.
+        * ---------------------------------------------
+        */
+       cmp     w0, #PSCI_CPU_PWR_LVL
+       b.eq    do_core_pwr_dwn
+       bl      prepare_cluster_pwr_dwn
+       b       do_stack_maintenance
+
+do_core_pwr_dwn:
+       bl      prepare_core_pwr_dwn
+
+       /* ---------------------------------------------
+        * Do stack maintenance by flushing the used
+        * stack to the main memory and invalidating the
+        * remainder.
+        * ---------------------------------------------
+        */
+do_stack_maintenance:
+       bl      plat_get_my_stack
+
+       /* ---------------------------------------------
+        * Calculate and store the size of the used
+        * stack memory in x1.
+        * ---------------------------------------------
+        */
+       mov     x19, x0
+       mov     x1, sp
+       sub     x1, x0, x1
+       mov     x0, sp
+       bl      flush_dcache_range
+
+       /* ---------------------------------------------
+        * Calculate and store the size of the unused
+        * stack memory in x1. Calculate and store the
+        * stack base address in x0.
+        * ---------------------------------------------
+        */
+       sub     x0, x19, #PLATFORM_STACK_SIZE
+       sub     x1, sp, x0
+       bl      inv_dcache_range
+
+       ldp     x19, x20, [sp], #16
+       ldp     x29, x30, [sp], #16
+       ret
+endfunc psci_do_pwrdown_cache_maintenance
+
+
+/* -----------------------------------------------------------------------
+ * void psci_do_pwrup_cache_maintenance(void);
+ *
+ * This function performs cache maintenance after this cpu is powered up.
+ * Currently, this involves managing the used stack memory before turning
+ * on the data cache.
+ * -----------------------------------------------------------------------
+ */
+func psci_do_pwrup_cache_maintenance
+       stp     x29, x30, [sp,#-16]!
+
+       /* ---------------------------------------------
+        * Ensure any inflight stack writes have made it
+        * to main memory.
+        * ---------------------------------------------
+        */
+       dmb     st
+
+       /* ---------------------------------------------
+        * Calculate and store the size of the used
+        * stack memory in x1. Calculate and store the
+        * stack base address in x0.
+        * ---------------------------------------------
+        */
+       bl      plat_get_my_stack
+       mov     x1, sp
+       sub     x1, x0, x1
+       mov     x0, sp
+       bl      inv_dcache_range
+
+       /* ---------------------------------------------
+        * Enable the data cache.
+        * ---------------------------------------------
+        */
+       mrs     x0, sctlr_el3
+       orr     x0, x0, #SCTLR_C_BIT
+       msr     sctlr_el3, x0
+       isb
+
+       ldp     x29, x30, [sp], #16
+       ret
+endfunc psci_do_pwrup_cache_maintenance
diff --git a/lib/psci/psci_common.c b/lib/psci/psci_common.c
new file mode 100644 (file)
index 0000000..2a0afb4
--- /dev/null
@@ -0,0 +1,928 @@
+/*
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <context.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <platform.h>
+#include <string.h>
+#include "psci_private.h"
+
+/*
+ * SPD power management operations, expected to be supplied by the registered
+ * SPD on successful SP initialization
+ */
+const spd_pm_ops_t *psci_spd_pm;
+
+/*
+ * PSCI requested local power state map. This array is used to store the local
+ * power states requested by a CPU for power levels from level 1 to
+ * PLAT_MAX_PWR_LVL. It does not store the requested local power state for power
+ * level 0 (PSCI_CPU_PWR_LVL) as the requested and the target power state for a
+ * CPU are the same.
+ *
+ * During state coordination, the platform is passed an array containing the
+ * local states requested for a particular non cpu power domain by each cpu
+ * within the domain.
+ *
+ * TODO: Dense packing of the requested states will cause cache thrashing
+ * when multiple power domains write to it. If we allocate the requested
+ * states at each power level in a cache-line aligned per-domain memory,
+ * the cache thrashing can be avoided.
+ */
+static plat_local_state_t
+       psci_req_local_pwr_states[PLAT_MAX_PWR_LVL][PLATFORM_CORE_COUNT];
+
+
+/*******************************************************************************
+ * Arrays that hold the platform's power domain tree information for state
+ * management of power domains.
+ * Each node in the array 'psci_non_cpu_pd_nodes' corresponds to a power domain
+ * which is an ancestor of a CPU power domain.
+ * Each node in the array 'psci_cpu_pd_nodes' corresponds to a cpu power domain
+ ******************************************************************************/
+non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS]
+#if USE_COHERENT_MEM
+__section("tzfw_coherent_mem")
+#endif
+;
+
+DEFINE_BAKERY_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]);
+
+cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT];
+
+/*******************************************************************************
+ * Pointer to functions exported by the platform to complete power mgmt. ops
+ ******************************************************************************/
+const plat_psci_ops_t *psci_plat_pm_ops;
+
+/******************************************************************************
+ * Check that the maximum power level supported by the platform makes sense
+ *****************************************************************************/
+CASSERT(PLAT_MAX_PWR_LVL <= PSCI_MAX_PWR_LVL && \
+               PLAT_MAX_PWR_LVL >= PSCI_CPU_PWR_LVL, \
+               assert_platform_max_pwrlvl_check);
+
+/*
+ * The plat_local_state used by the platform is one of these types: RUN,
+ * RETENTION and OFF. The platform can define further sub-states for each type
+ * apart from RUN. This categorization is done to verify the sanity of the
+ * psci_power_state passed by the platform and to print debug information. The
+ * categorization is done on the basis of the following conditions:
+ *
+ * 1. If (plat_local_state == 0) then the category is STATE_TYPE_RUN.
+ *
+ * 2. If (0 < plat_local_state <= PLAT_MAX_RET_STATE), then the category is
+ *    STATE_TYPE_RETN.
+ *
+ * 3. If (plat_local_state > PLAT_MAX_RET_STATE), then the category is
+ *    STATE_TYPE_OFF.
+ */
+typedef enum plat_local_state_type {
+       STATE_TYPE_RUN = 0,
+       STATE_TYPE_RETN,
+       STATE_TYPE_OFF
+} plat_local_state_type_t;
+
+/* The macro used to categorize plat_local_state. */
+#define find_local_state_type(plat_local_state)                                        \
+               ((plat_local_state) ? ((plat_local_state > PLAT_MAX_RET_STATE)  \
+               ? STATE_TYPE_OFF : STATE_TYPE_RETN)                             \
+               : STATE_TYPE_RUN)
+
+/******************************************************************************
+ * Check that the maximum retention level supported by the platform is less
+ * than the maximum off level.
+ *****************************************************************************/
+CASSERT(PLAT_MAX_RET_STATE < PLAT_MAX_OFF_STATE, \
+               assert_platform_max_off_and_retn_state_check);
+
+/******************************************************************************
+ * This function ensures that the power state parameter in a CPU_SUSPEND request
+ * is valid. If so, it returns the requested states for each power level.
+ *****************************************************************************/
+int psci_validate_power_state(unsigned int power_state,
+                             psci_power_state_t *state_info)
+{
+       /* Check SBZ bits in power state are zero */
+       if (psci_check_power_state(power_state))
+               return PSCI_E_INVALID_PARAMS;
+
+       assert(psci_plat_pm_ops->validate_power_state);
+
+       /* Validate the power_state using platform pm_ops */
+       return psci_plat_pm_ops->validate_power_state(power_state, state_info);
+}
+
+/******************************************************************************
+ * This function retrieves the `psci_power_state_t` for system suspend from
+ * the platform.
+ *****************************************************************************/
+void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info)
+{
+       /*
+        * Assert that the required pm_ops hook is implemented to ensure that
+        * the capability detected during psci_setup() is valid.
+        */
+       assert(psci_plat_pm_ops->get_sys_suspend_power_state);
+
+       /*
+        * Query the platform for the power_state required for system suspend
+        */
+       psci_plat_pm_ops->get_sys_suspend_power_state(state_info);
+}
+
+/*******************************************************************************
+ * This function verifies that the all the other cores in the system have been
+ * turned OFF and the current CPU is the last running CPU in the system.
+ * Returns 1 (true) if the current CPU is the last ON CPU or 0 (false)
+ * otherwise.
+ ******************************************************************************/
+unsigned int psci_is_last_on_cpu(void)
+{
+       unsigned int cpu_idx, my_idx = plat_my_core_pos();
+
+       for (cpu_idx = 0; cpu_idx < PLATFORM_CORE_COUNT; cpu_idx++) {
+               if (cpu_idx == my_idx) {
+                       assert(psci_get_aff_info_state() == AFF_STATE_ON);
+                       continue;
+               }
+
+               if (psci_get_aff_info_state_by_idx(cpu_idx) != AFF_STATE_OFF)
+                       return 0;
+       }
+
+       return 1;
+}
+
+/*******************************************************************************
+ * Routine to return the maximum power level to traverse to after a cpu has
+ * been physically powered up. It is expected to be called immediately after
+ * reset from assembler code.
+ ******************************************************************************/
+static unsigned int get_power_on_target_pwrlvl(void)
+{
+       unsigned int pwrlvl;
+
+       /*
+        * Assume that this cpu was suspended and retrieve its target power
+        * level. If it is invalid then it could only have been turned off
+        * earlier. PLAT_MAX_PWR_LVL will be the highest power level a
+        * cpu can be turned off to.
+        */
+       pwrlvl = psci_get_suspend_pwrlvl();
+       if (pwrlvl == PSCI_INVALID_PWR_LVL)
+               pwrlvl = PLAT_MAX_PWR_LVL;
+       return pwrlvl;
+}
+
+/******************************************************************************
+ * Helper function to update the requested local power state array. This array
+ * does not store the requested state for the CPU power level. Hence an
+ * assertion is added to prevent us from accessing the wrong index.
+ *****************************************************************************/
+static void psci_set_req_local_pwr_state(unsigned int pwrlvl,
+                                        unsigned int cpu_idx,
+                                        plat_local_state_t req_pwr_state)
+{
+       assert(pwrlvl > PSCI_CPU_PWR_LVL);
+       psci_req_local_pwr_states[pwrlvl - 1][cpu_idx] = req_pwr_state;
+}
+
+/******************************************************************************
+ * This function initializes the psci_req_local_pwr_states.
+ *****************************************************************************/
+void psci_init_req_local_pwr_states(void)
+{
+       /* Initialize the requested state of all non CPU power domains as OFF */
+       memset(&psci_req_local_pwr_states, PLAT_MAX_OFF_STATE,
+                       sizeof(psci_req_local_pwr_states));
+}
+
+/******************************************************************************
+ * Helper function to return a reference to an array containing the local power
+ * states requested by each cpu for a power domain at 'pwrlvl'. The size of the
+ * array will be the number of cpu power domains of which this power domain is
+ * an ancestor. These requested states will be used to determine a suitable
+ * target state for this power domain during psci state coordination. An
+ * assertion is added to prevent us from accessing the CPU power level.
+ *****************************************************************************/
+static plat_local_state_t *psci_get_req_local_pwr_states(unsigned int pwrlvl,
+                                                        unsigned int cpu_idx)
+{
+       assert(pwrlvl > PSCI_CPU_PWR_LVL);
+
+       return &psci_req_local_pwr_states[pwrlvl - 1][cpu_idx];
+}
+
+/******************************************************************************
+ * Helper function to return the current local power state of each power domain
+ * from the current cpu power domain to its ancestor at the 'end_pwrlvl'. This
+ * function will be called after a cpu is powered on to find the local state
+ * each power domain has emerged from.
+ *****************************************************************************/
+static void psci_get_target_local_pwr_states(unsigned int end_pwrlvl,
+                                            psci_power_state_t *target_state)
+{
+       unsigned int parent_idx, lvl;
+       plat_local_state_t *pd_state = target_state->pwr_domain_state;
+
+       pd_state[PSCI_CPU_PWR_LVL] = psci_get_cpu_local_state();
+       parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node;
+
+       /* Copy the local power state from node to state_info */
+       for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
+#if !USE_COHERENT_MEM
+               /*
+                * If using normal memory for psci_non_cpu_pd_nodes, we need
+                * to flush before reading the local power state as another
+                * cpu in the same power domain could have updated it and this
+                * code runs before caches are enabled.
+                */
+               flush_dcache_range(
+                               (uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
+                               sizeof(psci_non_cpu_pd_nodes[parent_idx]));
+#endif
+               pd_state[lvl] = psci_non_cpu_pd_nodes[parent_idx].local_state;
+               parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
+       }
+
+       /* Set the the higher levels to RUN */
+       for (; lvl <= PLAT_MAX_PWR_LVL; lvl++)
+               target_state->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN;
+}
+
+/******************************************************************************
+ * Helper function to set the target local power state that each power domain
+ * from the current cpu power domain to its ancestor at the 'end_pwrlvl' will
+ * enter. This function will be called after coordination of requested power
+ * states has been done for each power level.
+ *****************************************************************************/
+static void psci_set_target_local_pwr_states(unsigned int end_pwrlvl,
+                                       const psci_power_state_t *target_state)
+{
+       unsigned int parent_idx, lvl;
+       const plat_local_state_t *pd_state = target_state->pwr_domain_state;
+
+       psci_set_cpu_local_state(pd_state[PSCI_CPU_PWR_LVL]);
+
+       /*
+        * Need to flush as local_state will be accessed with Data Cache
+        * disabled during power on
+        */
+       flush_cpu_data(psci_svc_cpu_data.local_state);
+
+       parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node;
+
+       /* Copy the local_state from state_info */
+       for (lvl = 1; lvl <= end_pwrlvl; lvl++) {
+               psci_non_cpu_pd_nodes[parent_idx].local_state = pd_state[lvl];
+#if !USE_COHERENT_MEM
+               flush_dcache_range(
+                               (uintptr_t)&psci_non_cpu_pd_nodes[parent_idx],
+                               sizeof(psci_non_cpu_pd_nodes[parent_idx]));
+#endif
+               parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
+       }
+}
+
+
+/*******************************************************************************
+ * PSCI helper function to get the parent nodes corresponding to a cpu_index.
+ ******************************************************************************/
+void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx,
+                                     unsigned int end_lvl,
+                                     unsigned int node_index[])
+{
+       unsigned int parent_node = psci_cpu_pd_nodes[cpu_idx].parent_node;
+       int i;
+
+       for (i = PSCI_CPU_PWR_LVL + 1; i <= end_lvl; i++) {
+               *node_index++ = parent_node;
+               parent_node = psci_non_cpu_pd_nodes[parent_node].parent_node;
+       }
+}
+
+/******************************************************************************
+ * This function is invoked post CPU power up and initialization. It sets the
+ * affinity info state, target power state and requested power state for the
+ * current CPU and all its ancestor power domains to RUN.
+ *****************************************************************************/
+void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl)
+{
+       unsigned int parent_idx, cpu_idx = plat_my_core_pos(), lvl;
+       parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
+
+       /* Reset the local_state to RUN for the non cpu power domains. */
+       for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
+               psci_non_cpu_pd_nodes[parent_idx].local_state =
+                               PSCI_LOCAL_STATE_RUN;
+#if !USE_COHERENT_MEM
+               flush_dcache_range(
+                               (uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
+                               sizeof(psci_non_cpu_pd_nodes[parent_idx]));
+#endif
+               psci_set_req_local_pwr_state(lvl,
+                                            cpu_idx,
+                                            PSCI_LOCAL_STATE_RUN);
+               parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
+       }
+
+       /* Set the affinity info state to ON */
+       psci_set_aff_info_state(AFF_STATE_ON);
+
+       psci_set_cpu_local_state(PSCI_LOCAL_STATE_RUN);
+       flush_cpu_data(psci_svc_cpu_data);
+}
+
+/******************************************************************************
+ * This function is passed the local power states requested for each power
+ * domain (state_info) between the current CPU domain and its ancestors until
+ * the target power level (end_pwrlvl). It updates the array of requested power
+ * states with this information.
+ *
+ * Then, for each level (apart from the CPU level) until the 'end_pwrlvl', it
+ * retrieves the states requested by all the cpus of which the power domain at
+ * that level is an ancestor. It passes this information to the platform to
+ * coordinate and return the target power state. If the target state for a level
+ * is RUN then subsequent levels are not considered. At the CPU level, state
+ * coordination is not required. Hence, the requested and the target states are
+ * the same.
+ *
+ * The 'state_info' is updated with the target state for each level between the
+ * CPU and the 'end_pwrlvl' and returned to the caller.
+ *
+ * This function will only be invoked with data cache enabled and while
+ * powering down a core.
+ *****************************************************************************/
+void psci_do_state_coordination(unsigned int end_pwrlvl,
+                               psci_power_state_t *state_info)
+{
+       unsigned int lvl, parent_idx, cpu_idx = plat_my_core_pos();
+       unsigned int start_idx, ncpus;
+       plat_local_state_t target_state, *req_states;
+
+       assert(end_pwrlvl <= PLAT_MAX_PWR_LVL);
+       parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
+
+       /* For level 0, the requested state will be equivalent
+          to target state */
+       for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
+
+               /* First update the requested power state */
+               psci_set_req_local_pwr_state(lvl, cpu_idx,
+                                            state_info->pwr_domain_state[lvl]);
+
+               /* Get the requested power states for this power level */
+               start_idx = psci_non_cpu_pd_nodes[parent_idx].cpu_start_idx;
+               req_states = psci_get_req_local_pwr_states(lvl, start_idx);
+
+               /*
+                * Let the platform coordinate amongst the requested states at
+                * this power level and return the target local power state.
+                */
+               ncpus = psci_non_cpu_pd_nodes[parent_idx].ncpus;
+               target_state = plat_get_target_pwr_state(lvl,
+                                                        req_states,
+                                                        ncpus);
+
+               state_info->pwr_domain_state[lvl] = target_state;
+
+               /* Break early if the negotiated target power state is RUN */
+               if (is_local_state_run(state_info->pwr_domain_state[lvl]))
+                       break;
+
+               parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
+       }
+
+       /*
+        * This is for cases when we break out of the above loop early because
+        * the target power state is RUN at a power level < end_pwlvl.
+        * We update the requested power state from state_info and then
+        * set the target state as RUN.
+        */
+       for (lvl = lvl + 1; lvl <= end_pwrlvl; lvl++) {
+               psci_set_req_local_pwr_state(lvl, cpu_idx,
+                                            state_info->pwr_domain_state[lvl]);
+               state_info->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN;
+
+       }
+
+       /* Update the target state in the power domain nodes */
+       psci_set_target_local_pwr_states(end_pwrlvl, state_info);
+}
+
+/******************************************************************************
+ * This function validates a suspend request by making sure that if a standby
+ * state is requested then no power level is turned off and the highest power
+ * level is placed in a standby/retention state.
+ *
+ * It also ensures that the state level X will enter is not shallower than the
+ * state level X + 1 will enter.
+ *
+ * This validation will be enabled only for DEBUG builds as the platform is
+ * expected to perform these validations as well.
+ *****************************************************************************/
+int psci_validate_suspend_req(const psci_power_state_t *state_info,
+                             unsigned int is_power_down_state)
+{
+       unsigned int max_off_lvl, target_lvl, max_retn_lvl;
+       plat_local_state_t state;
+       plat_local_state_type_t req_state_type, deepest_state_type;
+       int i;
+
+       /* Find the target suspend power level */
+       target_lvl = psci_find_target_suspend_lvl(state_info);
+       if (target_lvl == PSCI_INVALID_PWR_LVL)
+               return PSCI_E_INVALID_PARAMS;
+
+       /* All power domain levels are in a RUN state to begin with */
+       deepest_state_type = STATE_TYPE_RUN;
+
+       for (i = target_lvl; i >= PSCI_CPU_PWR_LVL; i--) {
+               state = state_info->pwr_domain_state[i];
+               req_state_type = find_local_state_type(state);
+
+               /*
+                * While traversing from the highest power level to the lowest,
+                * the state requested for lower levels has to be the same or
+                * deeper i.e. equal to or greater than the state at the higher
+                * levels. If this condition is true, then the requested state
+                * becomes the deepest state encountered so far.
+                */
+               if (req_state_type < deepest_state_type)
+                       return PSCI_E_INVALID_PARAMS;
+               deepest_state_type = req_state_type;
+       }
+
+       /* Find the highest off power level */
+       max_off_lvl = psci_find_max_off_lvl(state_info);
+
+       /* The target_lvl is either equal to the max_off_lvl or max_retn_lvl */
+       max_retn_lvl = PSCI_INVALID_PWR_LVL;
+       if (target_lvl != max_off_lvl)
+               max_retn_lvl = target_lvl;
+
+       /*
+        * If this is not a request for a power down state then max off level
+        * has to be invalid and max retention level has to be a valid power
+        * level.
+        */
+       if (!is_power_down_state && (max_off_lvl != PSCI_INVALID_PWR_LVL ||
+                                   max_retn_lvl == PSCI_INVALID_PWR_LVL))
+               return PSCI_E_INVALID_PARAMS;
+
+       return PSCI_E_SUCCESS;
+}
+
+/******************************************************************************
+ * This function finds the highest power level which will be powered down
+ * amongst all the power levels specified in the 'state_info' structure
+ *****************************************************************************/
+unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info)
+{
+       int i;
+
+       for (i = PLAT_MAX_PWR_LVL; i >= PSCI_CPU_PWR_LVL; i--) {
+               if (is_local_state_off(state_info->pwr_domain_state[i]))
+                       return i;
+       }
+
+       return PSCI_INVALID_PWR_LVL;
+}
+
+/******************************************************************************
+ * This functions finds the level of the highest power domain which will be
+ * placed in a low power state during a suspend operation.
+ *****************************************************************************/
+unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info)
+{
+       int i;
+
+       for (i = PLAT_MAX_PWR_LVL; i >= PSCI_CPU_PWR_LVL; i--) {
+               if (!is_local_state_run(state_info->pwr_domain_state[i]))
+                       return i;
+       }
+
+       return PSCI_INVALID_PWR_LVL;
+}
+
+/*******************************************************************************
+ * This function is passed a cpu_index and the highest level in the topology
+ * tree that the operation should be applied to. It picks up locks in order of
+ * increasing power domain level in the range specified.
+ ******************************************************************************/
+void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl,
+                                  unsigned int cpu_idx)
+{
+       unsigned int parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
+       unsigned int level;
+
+       /* No locking required for level 0. Hence start locking from level 1 */
+       for (level = PSCI_CPU_PWR_LVL + 1; level <= end_pwrlvl; level++) {
+               psci_lock_get(&psci_non_cpu_pd_nodes[parent_idx]);
+               parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
+       }
+}
+
+/*******************************************************************************
+ * This function is passed a cpu_index and the highest level in the topology
+ * tree that the operation should be applied to. It releases the locks in order
+ * of decreasing power domain level in the range specified.
+ ******************************************************************************/
+void psci_release_pwr_domain_locks(unsigned int end_pwrlvl,
+                                  unsigned int cpu_idx)
+{
+       unsigned int parent_idx, parent_nodes[PLAT_MAX_PWR_LVL] = {0};
+       int level;
+
+       /* Get the parent nodes */
+       psci_get_parent_pwr_domain_nodes(cpu_idx, end_pwrlvl, parent_nodes);
+
+       /* Unlock top down. No unlocking required for level 0. */
+       for (level = end_pwrlvl; level >= PSCI_CPU_PWR_LVL + 1; level--) {
+               parent_idx = parent_nodes[level - 1];
+               psci_lock_release(&psci_non_cpu_pd_nodes[parent_idx]);
+       }
+}
+
+/*******************************************************************************
+ * Simple routine to determine whether a mpidr is valid or not.
+ ******************************************************************************/
+int psci_validate_mpidr(u_register_t mpidr)
+{
+       if (plat_core_pos_by_mpidr(mpidr) < 0)
+               return PSCI_E_INVALID_PARAMS;
+
+       return PSCI_E_SUCCESS;
+}
+
+/*******************************************************************************
+ * This function determines the full entrypoint information for the requested
+ * PSCI entrypoint on power on/resume and returns it.
+ ******************************************************************************/
+static int psci_get_ns_ep_info(entry_point_info_t *ep,
+                              uintptr_t entrypoint,
+                              u_register_t context_id)
+{
+       u_register_t ep_attr, sctlr;
+       unsigned int daif, ee, mode;
+       u_register_t ns_scr_el3 = read_scr_el3();
+       u_register_t ns_sctlr_el1 = read_sctlr_el1();
+
+       sctlr = ns_scr_el3 & SCR_HCE_BIT ? read_sctlr_el2() : ns_sctlr_el1;
+       ee = 0;
+
+       ep_attr = NON_SECURE | EP_ST_DISABLE;
+       if (sctlr & SCTLR_EE_BIT) {
+               ep_attr |= EP_EE_BIG;
+               ee = 1;
+       }
+       SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr);
+
+       ep->pc = entrypoint;
+       memset(&ep->args, 0, sizeof(ep->args));
+       ep->args.arg0 = context_id;
+
+       /*
+        * Figure out whether the cpu enters the non-secure address space
+        * in aarch32 or aarch64
+        */
+       if (ns_scr_el3 & SCR_RW_BIT) {
+
+               /*
+                * Check whether a Thumb entry point has been provided for an
+                * aarch64 EL
+                */
+               if (entrypoint & 0x1)
+                       return PSCI_E_INVALID_ADDRESS;
+
+               mode = ns_scr_el3 & SCR_HCE_BIT ? MODE_EL2 : MODE_EL1;
+
+               ep->spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
+       } else {
+
+               mode = ns_scr_el3 & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc;
+
+               /*
+                * TODO: Choose async. exception bits if HYP mode is not
+                * implemented according to the values of SCR.{AW, FW} bits
+                */
+               daif = DAIF_ABT_BIT | DAIF_IRQ_BIT | DAIF_FIQ_BIT;
+
+               ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, daif);
+       }
+
+       return PSCI_E_SUCCESS;
+}
+
+/*******************************************************************************
+ * This function validates the entrypoint with the platform layer if the
+ * appropriate pm_ops hook is exported by the platform and returns the
+ * 'entry_point_info'.
+ ******************************************************************************/
+int psci_validate_entry_point(entry_point_info_t *ep,
+                             uintptr_t entrypoint,
+                             u_register_t context_id)
+{
+       int rc;
+
+       /* Validate the entrypoint using platform psci_ops */
+       if (psci_plat_pm_ops->validate_ns_entrypoint) {
+               rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint);
+               if (rc != PSCI_E_SUCCESS)
+                       return PSCI_E_INVALID_ADDRESS;
+       }
+
+       /*
+        * Verify and derive the re-entry information for
+        * the non-secure world from the non-secure state from
+        * where this call originated.
+        */
+       rc = psci_get_ns_ep_info(ep, entrypoint, context_id);
+       return rc;
+}
+
+/*******************************************************************************
+ * Generic handler which is called when a cpu is physically powered on. It
+ * traverses the node information and finds the highest power level powered
+ * off and performs generic, architectural, platform setup and state management
+ * to power on that power level and power levels below it.
+ * e.g. For a cpu that's been powered on, it will call the platform specific
+ * code to enable the gic cpu interface and for a cluster it will enable
+ * coherency at the interconnect level in addition to gic cpu interface.
+ ******************************************************************************/
+void psci_power_up_finish(void)
+{
+       unsigned int end_pwrlvl, cpu_idx = plat_my_core_pos();
+       psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} };
+
+       /*
+        * Verify that we have been explicitly turned ON or resumed from
+        * suspend.
+        */
+       if (psci_get_aff_info_state() == AFF_STATE_OFF) {
+               ERROR("Unexpected affinity info state");
+               panic();
+       }
+
+       /*
+        * Get the maximum power domain level to traverse to after this cpu
+        * has been physically powered up.
+        */
+       end_pwrlvl = get_power_on_target_pwrlvl();
+
+       /*
+        * This function acquires the lock corresponding to each power level so
+        * that by the time all locks are taken, the system topology is snapshot
+        * and state management can be done safely.
+        */
+       psci_acquire_pwr_domain_locks(end_pwrlvl,
+                                     cpu_idx);
+
+#if ENABLE_PSCI_STAT
+       /*
+        * Capture power up time-stamp.
+        * No cache maintenance is required as caches are off
+        * and writes are direct to the main memory.
+        */
+       PMF_CAPTURE_TIMESTAMP(psci_svc, PSCI_STAT_ID_EXIT_LOW_PWR,
+               PMF_NO_CACHE_MAINT);
+#endif
+
+       psci_get_target_local_pwr_states(end_pwrlvl, &state_info);
+
+       /*
+        * This CPU could be resuming from suspend or it could have just been
+        * turned on. To distinguish between these 2 cases, we examine the
+        * affinity state of the CPU:
+        *  - If the affinity state is ON_PENDING then it has just been
+        *    turned on.
+        *  - Else it is resuming from suspend.
+        *
+        * Depending on the type of warm reset identified, choose the right set
+        * of power management handler and perform the generic, architecture
+        * and platform specific handling.
+        */
+       if (psci_get_aff_info_state() == AFF_STATE_ON_PENDING)
+               psci_cpu_on_finish(cpu_idx, &state_info);
+       else
+               psci_cpu_suspend_finish(cpu_idx, &state_info);
+
+       /*
+        * Set the requested and target state of this CPU and all the higher
+        * power domains which are ancestors of this CPU to run.
+        */
+       psci_set_pwr_domains_to_run(end_pwrlvl);
+
+#if ENABLE_PSCI_STAT
+       /*
+        * Update PSCI stats.
+        * Caches are off when writing stats data on the power down path.
+        * Since caches are now enabled, it's necessary to do cache
+        * maintenance before reading that same data.
+        */
+       psci_stats_update_pwr_up(end_pwrlvl, &state_info, PMF_CACHE_MAINT);
+#endif
+
+       /*
+        * This loop releases the lock corresponding to each power level
+        * in the reverse order to which they were acquired.
+        */
+       psci_release_pwr_domain_locks(end_pwrlvl,
+                                     cpu_idx);
+}
+
+/*******************************************************************************
+ * This function initializes the set of hooks that PSCI invokes as part of power
+ * management operation. The power management hooks are expected to be provided
+ * by the SPD, after it finishes all its initialization
+ ******************************************************************************/
+void psci_register_spd_pm_hook(const spd_pm_ops_t *pm)
+{
+       assert(pm);
+       psci_spd_pm = pm;
+
+       if (pm->svc_migrate)
+               psci_caps |= define_psci_cap(PSCI_MIG_AARCH64);
+
+       if (pm->svc_migrate_info)
+               psci_caps |= define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64)
+                               | define_psci_cap(PSCI_MIG_INFO_TYPE);
+}
+
+/*******************************************************************************
+ * This function invokes the migrate info hook in the spd_pm_ops. It performs
+ * the necessary return value validation. If the Secure Payload is UP and
+ * migrate capable, it returns the mpidr of the CPU on which the Secure payload
+ * is resident through the mpidr parameter. Else the value of the parameter on
+ * return is undefined.
+ ******************************************************************************/
+int psci_spd_migrate_info(u_register_t *mpidr)
+{
+       int rc;
+
+       if (!psci_spd_pm || !psci_spd_pm->svc_migrate_info)
+               return PSCI_E_NOT_SUPPORTED;
+
+       rc = psci_spd_pm->svc_migrate_info(mpidr);
+
+       assert(rc == PSCI_TOS_UP_MIG_CAP || rc == PSCI_TOS_NOT_UP_MIG_CAP \
+               || rc == PSCI_TOS_NOT_PRESENT_MP || rc == PSCI_E_NOT_SUPPORTED);
+
+       return rc;
+}
+
+
+/*******************************************************************************
+ * This function prints the state of all power domains present in the
+ * system
+ ******************************************************************************/
+void psci_print_power_domain_map(void)
+{
+#if LOG_LEVEL >= LOG_LEVEL_INFO
+       unsigned int idx;
+       plat_local_state_t state;
+       plat_local_state_type_t state_type;
+
+       /* This array maps to the PSCI_STATE_X definitions in psci.h */
+       static const char * const psci_state_type_str[] = {
+               "ON",
+               "RETENTION",
+               "OFF",
+       };
+
+       INFO("PSCI Power Domain Map:\n");
+       for (idx = 0; idx < (PSCI_NUM_PWR_DOMAINS - PLATFORM_CORE_COUNT);
+                                                       idx++) {
+               state_type = find_local_state_type(
+                               psci_non_cpu_pd_nodes[idx].local_state);
+               INFO("  Domain Node : Level %u, parent_node %d,"
+                               " State %s (0x%x)\n",
+                               psci_non_cpu_pd_nodes[idx].level,
+                               psci_non_cpu_pd_nodes[idx].parent_node,
+                               psci_state_type_str[state_type],
+                               psci_non_cpu_pd_nodes[idx].local_state);
+       }
+
+       for (idx = 0; idx < PLATFORM_CORE_COUNT; idx++) {
+               state = psci_get_cpu_local_state_by_idx(idx);
+               state_type = find_local_state_type(state);
+               INFO("  CPU Node : MPID 0x%llx, parent_node %d,"
+                               " State %s (0x%x)\n",
+                               (unsigned long long)psci_cpu_pd_nodes[idx].mpidr,
+                               psci_cpu_pd_nodes[idx].parent_node,
+                               psci_state_type_str[state_type],
+                               psci_get_cpu_local_state_by_idx(idx));
+       }
+#endif
+}
+
+#if ENABLE_PLAT_COMPAT
+/*******************************************************************************
+ * PSCI Compatibility helper function to return the 'power_state' parameter of
+ * the PSCI CPU SUSPEND request for the current CPU. Returns PSCI_INVALID_DATA
+ * if not invoked within CPU_SUSPEND for the current CPU.
+ ******************************************************************************/
+int psci_get_suspend_powerstate(void)
+{
+       /* Sanity check to verify that CPU is within CPU_SUSPEND */
+       if (psci_get_aff_info_state() == AFF_STATE_ON &&
+               !is_local_state_run(psci_get_cpu_local_state()))
+               return psci_power_state_compat[plat_my_core_pos()];
+
+       return PSCI_INVALID_DATA;
+}
+
+/*******************************************************************************
+ * PSCI Compatibility helper function to return the state id of the current
+ * cpu encoded in the 'power_state' parameter. Returns PSCI_INVALID_DATA
+ * if not invoked within CPU_SUSPEND for the current CPU.
+ ******************************************************************************/
+int psci_get_suspend_stateid(void)
+{
+       unsigned int power_state;
+       power_state = psci_get_suspend_powerstate();
+       if (power_state != PSCI_INVALID_DATA)
+               return psci_get_pstate_id(power_state);
+
+       return PSCI_INVALID_DATA;
+}
+
+/*******************************************************************************
+ * PSCI Compatibility helper function to return the state id encoded in the
+ * 'power_state' parameter of the CPU specified by 'mpidr'. Returns
+ * PSCI_INVALID_DATA if the CPU is not in CPU_SUSPEND.
+ ******************************************************************************/
+int psci_get_suspend_stateid_by_mpidr(unsigned long mpidr)
+{
+       int cpu_idx = plat_core_pos_by_mpidr(mpidr);
+
+       if (cpu_idx == -1)
+               return PSCI_INVALID_DATA;
+
+       /* Sanity check to verify that the CPU is in CPU_SUSPEND */
+       if (psci_get_aff_info_state_by_idx(cpu_idx) == AFF_STATE_ON &&
+               !is_local_state_run(psci_get_cpu_local_state_by_idx(cpu_idx)))
+               return psci_get_pstate_id(psci_power_state_compat[cpu_idx]);
+
+       return PSCI_INVALID_DATA;
+}
+
+/*******************************************************************************
+ * This function returns highest affinity level which is in OFF
+ * state. The affinity instance with which the level is associated is
+ * determined by the caller.
+ ******************************************************************************/
+unsigned int psci_get_max_phys_off_afflvl(void)
+{
+       psci_power_state_t state_info;
+
+       memset(&state_info, 0, sizeof(state_info));
+       psci_get_target_local_pwr_states(PLAT_MAX_PWR_LVL, &state_info);
+
+       return psci_find_target_suspend_lvl(&state_info);
+}
+
+/*******************************************************************************
+ * PSCI Compatibility helper function to return target affinity level requested
+ * for the CPU_SUSPEND. This function assumes affinity levels correspond to
+ * power domain levels on the platform.
+ ******************************************************************************/
+int psci_get_suspend_afflvl(void)
+{
+       return psci_get_suspend_pwrlvl();
+}
+
+#endif
diff --git a/lib/psci/psci_lib.mk b/lib/psci/psci_lib.mk
new file mode 100644 (file)
index 0000000..93c7832
--- /dev/null
@@ -0,0 +1,55 @@
+#
+# Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer.
+#
+# Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# Neither the name of ARM nor the names of its contributors may be used
+# to endorse or promote products derived from this software without specific
+# prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+
+PSCI_LIB_SOURCES               :=      lib/el3_runtime/cpu_data_array.c        \
+                               lib/el3_runtime/aarch64/context.S               \
+                               lib/el3_runtime/aarch64/cpu_data.S              \
+                               lib/el3_runtime/aarch64/context_mgmt.c          \
+                               lib/cpus/aarch64/cpu_helpers.S                  \
+                               lib/locks/exclusive/spinlock.S                  \
+                               lib/psci/psci_off.c                             \
+                               lib/psci/psci_on.c                              \
+                               lib/psci/psci_suspend.c                         \
+                               lib/psci/psci_common.c                          \
+                               lib/psci/psci_main.c                            \
+                               lib/psci/psci_setup.c                           \
+                               lib/psci/psci_system_off.c                      \
+                               lib/psci/aarch64/psci_entry.S                   \
+                               lib/psci/aarch64/psci_helpers.S                 \
+
+ifeq (${USE_COHERENT_MEM}, 1)
+PSCI_LIB_SOURCES               +=      lib/locks/bakery/bakery_lock_coherent.c
+else
+PSCI_LIB_SOURCES               +=      lib/locks/bakery/bakery_lock_normal.c
+endif
+
+ifeq (${ENABLE_PSCI_STAT}, 1)
+PSCI_LIB_SOURCES               +=      lib/psci/psci_stat.c
+endif
diff --git a/lib/psci/psci_main.c b/lib/psci/psci_main.c
new file mode 100644 (file)
index 0000000..04ef10e
--- /dev/null
@@ -0,0 +1,440 @@
+/*
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <platform.h>
+#include <runtime_svc.h>
+#include <std_svc.h>
+#include <string.h>
+#include "psci_private.h"
+
+/*******************************************************************************
+ * PSCI frontend api for servicing SMCs. Described in the PSCI spec.
+ ******************************************************************************/
+int psci_cpu_on(u_register_t target_cpu,
+               uintptr_t entrypoint,
+               u_register_t context_id)
+
+{
+       int rc;
+       entry_point_info_t ep;
+
+       /* Determine if the cpu exists of not */
+       rc = psci_validate_mpidr(target_cpu);
+       if (rc != PSCI_E_SUCCESS)
+               return PSCI_E_INVALID_PARAMS;
+
+       /* Validate the entry point and get the entry_point_info */
+       rc = psci_validate_entry_point(&ep, entrypoint, context_id);
+       if (rc != PSCI_E_SUCCESS)
+               return rc;
+
+       /*
+        * To turn this cpu on, specify which power
+        * levels need to be turned on
+        */
+       return psci_cpu_on_start(target_cpu, &ep);
+}
+
+unsigned int psci_version(void)
+{
+       return PSCI_MAJOR_VER | PSCI_MINOR_VER;
+}
+
+int psci_cpu_suspend(unsigned int power_state,
+                    uintptr_t entrypoint,
+                    u_register_t context_id)
+{
+       int rc;
+       unsigned int target_pwrlvl, is_power_down_state;
+       entry_point_info_t ep;
+       psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} };
+       plat_local_state_t cpu_pd_state;
+
+       /* Validate the power_state parameter */
+       rc = psci_validate_power_state(power_state, &state_info);
+       if (rc != PSCI_E_SUCCESS) {
+               assert(rc == PSCI_E_INVALID_PARAMS);
+               return rc;
+       }
+
+       /*
+        * Get the value of the state type bit from the power state parameter.
+        */
+       is_power_down_state = psci_get_pstate_type(power_state);
+
+       /* Sanity check the requested suspend levels */
+       assert(psci_validate_suspend_req(&state_info, is_power_down_state)
+                       == PSCI_E_SUCCESS);
+
+       target_pwrlvl = psci_find_target_suspend_lvl(&state_info);
+
+       /* Fast path for CPU standby.*/
+       if (is_cpu_standby_req(is_power_down_state, target_pwrlvl)) {
+               if  (!psci_plat_pm_ops->cpu_standby)
+                       return PSCI_E_INVALID_PARAMS;
+
+               /*
+                * Set the state of the CPU power domain to the platform
+                * specific retention state and enter the standby state.
+                */
+               cpu_pd_state = state_info.pwr_domain_state[PSCI_CPU_PWR_LVL];
+               psci_set_cpu_local_state(cpu_pd_state);
+
+#if ENABLE_PSCI_STAT
+               /*
+                * Capture time-stamp before CPU standby
+                * No cache maintenance is needed as caches
+                * are ON through out the CPU standby operation.
+                */
+               PMF_CAPTURE_TIMESTAMP(psci_svc, PSCI_STAT_ID_ENTER_LOW_PWR,
+                       PMF_NO_CACHE_MAINT);
+#endif
+
+               psci_plat_pm_ops->cpu_standby(cpu_pd_state);
+
+               /* Upon exit from standby, set the state back to RUN. */
+               psci_set_cpu_local_state(PSCI_LOCAL_STATE_RUN);
+
+#if ENABLE_PSCI_STAT
+               /* Capture time-stamp after CPU standby */
+               PMF_CAPTURE_TIMESTAMP(psci_svc, PSCI_STAT_ID_EXIT_LOW_PWR,
+                       PMF_NO_CACHE_MAINT);
+
+               /* Update PSCI stats */
+               psci_stats_update_pwr_up(PSCI_CPU_PWR_LVL, &state_info,
+                       PMF_NO_CACHE_MAINT);
+#endif
+
+               return PSCI_E_SUCCESS;
+       }
+
+       /*
+        * If a power down state has been requested, we need to verify entry
+        * point and program entry information.
+        */
+       if (is_power_down_state) {
+               rc = psci_validate_entry_point(&ep, entrypoint, context_id);
+               if (rc != PSCI_E_SUCCESS)
+                       return rc;
+       }
+
+       /*
+        * Do what is needed to enter the power down state. Upon success,
+        * enter the final wfi which will power down this CPU. This function
+        * might return if the power down was abandoned for any reason, e.g.
+        * arrival of an interrupt
+        */
+       psci_cpu_suspend_start(&ep,
+                           target_pwrlvl,
+                           &state_info,
+                           is_power_down_state);
+
+       return PSCI_E_SUCCESS;
+}
+
+
+int psci_system_suspend(uintptr_t entrypoint, u_register_t context_id)
+{
+       int rc;
+       psci_power_state_t state_info;
+       entry_point_info_t ep;
+
+       /* Check if the current CPU is the last ON CPU in the system */
+       if (!psci_is_last_on_cpu())
+               return PSCI_E_DENIED;
+
+       /* Validate the entry point and get the entry_point_info */
+       rc = psci_validate_entry_point(&ep, entrypoint, context_id);
+       if (rc != PSCI_E_SUCCESS)
+               return rc;
+
+       /* Query the psci_power_state for system suspend */
+       psci_query_sys_suspend_pwrstate(&state_info);
+
+       /* Ensure that the psci_power_state makes sense */
+       assert(psci_find_target_suspend_lvl(&state_info) == PLAT_MAX_PWR_LVL);
+       assert(psci_validate_suspend_req(&state_info, PSTATE_TYPE_POWERDOWN)
+                                               == PSCI_E_SUCCESS);
+       assert(is_local_state_off(state_info.pwr_domain_state[PLAT_MAX_PWR_LVL]));
+
+       /*
+        * Do what is needed to enter the system suspend state. This function
+        * might return if the power down was abandoned for any reason, e.g.
+        * arrival of an interrupt
+        */
+       psci_cpu_suspend_start(&ep,
+                           PLAT_MAX_PWR_LVL,
+                           &state_info,
+                           PSTATE_TYPE_POWERDOWN);
+
+       return PSCI_E_SUCCESS;
+}
+
+int psci_cpu_off(void)
+{
+       int rc;
+       unsigned int target_pwrlvl = PLAT_MAX_PWR_LVL;
+
+       /*
+        * Do what is needed to power off this CPU and possible higher power
+        * levels if it able to do so. Upon success, enter the final wfi
+        * which will power down this CPU.
+        */
+       rc = psci_do_cpu_off(target_pwrlvl);
+
+       /*
+        * The only error cpu_off can return is E_DENIED. So check if that's
+        * indeed the case.
+        */
+       assert(rc == PSCI_E_DENIED);
+
+       return rc;
+}
+
+int psci_affinity_info(u_register_t target_affinity,
+                      unsigned int lowest_affinity_level)
+{
+       unsigned int target_idx;
+
+       /* We dont support level higher than PSCI_CPU_PWR_LVL */
+       if (lowest_affinity_level > PSCI_CPU_PWR_LVL)
+               return PSCI_E_INVALID_PARAMS;
+
+       /* Calculate the cpu index of the target */
+       target_idx = plat_core_pos_by_mpidr(target_affinity);
+       if (target_idx == -1)
+               return PSCI_E_INVALID_PARAMS;
+
+       return psci_get_aff_info_state_by_idx(target_idx);
+}
+
+int psci_migrate(u_register_t target_cpu)
+{
+       int rc;
+       u_register_t resident_cpu_mpidr;
+
+       rc = psci_spd_migrate_info(&resident_cpu_mpidr);
+       if (rc != PSCI_TOS_UP_MIG_CAP)
+               return (rc == PSCI_TOS_NOT_UP_MIG_CAP) ?
+                         PSCI_E_DENIED : PSCI_E_NOT_SUPPORTED;
+
+       /*
+        * Migrate should only be invoked on the CPU where
+        * the Secure OS is resident.
+        */
+       if (resident_cpu_mpidr != read_mpidr_el1())
+               return PSCI_E_NOT_PRESENT;
+
+       /* Check the validity of the specified target cpu */
+       rc = psci_validate_mpidr(target_cpu);
+       if (rc != PSCI_E_SUCCESS)
+               return PSCI_E_INVALID_PARAMS;
+
+       assert(psci_spd_pm && psci_spd_pm->svc_migrate);
+
+       rc = psci_spd_pm->svc_migrate(read_mpidr_el1(), target_cpu);
+       assert(rc == PSCI_E_SUCCESS || rc == PSCI_E_INTERN_FAIL);
+
+       return rc;
+}
+
+int psci_migrate_info_type(void)
+{
+       u_register_t resident_cpu_mpidr;
+
+       return psci_spd_migrate_info(&resident_cpu_mpidr);
+}
+
+long psci_migrate_info_up_cpu(void)
+{
+       u_register_t resident_cpu_mpidr;
+       int rc;
+
+       /*
+        * Return value of this depends upon what
+        * psci_spd_migrate_info() returns.
+        */
+       rc = psci_spd_migrate_info(&resident_cpu_mpidr);
+       if (rc != PSCI_TOS_NOT_UP_MIG_CAP && rc != PSCI_TOS_UP_MIG_CAP)
+               return PSCI_E_INVALID_PARAMS;
+
+       return resident_cpu_mpidr;
+}
+
+int psci_features(unsigned int psci_fid)
+{
+       unsigned int local_caps = psci_caps;
+
+       /* Check if it is a 64 bit function */
+       if (((psci_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_64)
+               local_caps &= PSCI_CAP_64BIT_MASK;
+
+       /* Check for invalid fid */
+       if (!(is_std_svc_call(psci_fid) && is_valid_fast_smc(psci_fid)
+                       && is_psci_fid(psci_fid)))
+               return PSCI_E_NOT_SUPPORTED;
+
+
+       /* Check if the psci fid is supported or not */
+       if (!(local_caps & define_psci_cap(psci_fid)))
+               return PSCI_E_NOT_SUPPORTED;
+
+       /* Format the feature flags */
+       if (psci_fid == PSCI_CPU_SUSPEND_AARCH32 ||
+                       psci_fid == PSCI_CPU_SUSPEND_AARCH64) {
+               /*
+                * The trusted firmware does not support OS Initiated Mode.
+                */
+               return (FF_PSTATE << FF_PSTATE_SHIFT) |
+                       ((!FF_SUPPORTS_OS_INIT_MODE) << FF_MODE_SUPPORT_SHIFT);
+       }
+
+       /* Return 0 for all other fid's */
+       return PSCI_E_SUCCESS;
+}
+
+/*******************************************************************************
+ * PSCI top level handler for servicing SMCs.
+ ******************************************************************************/
+uintptr_t psci_smc_handler(uint32_t smc_fid,
+                         u_register_t x1,
+                         u_register_t x2,
+                         u_register_t x3,
+                         u_register_t x4,
+                         void *cookie,
+                         void *handle,
+                         u_register_t flags)
+{
+       if (is_caller_secure(flags))
+               SMC_RET1(handle, SMC_UNK);
+
+       /* Check the fid against the capabilities */
+       if (!(psci_caps & define_psci_cap(smc_fid)))
+               SMC_RET1(handle, SMC_UNK);
+
+       if (((smc_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_32) {
+               /* 32-bit PSCI function, clear top parameter bits */
+
+               x1 = (uint32_t)x1;
+               x2 = (uint32_t)x2;
+               x3 = (uint32_t)x3;
+
+               switch (smc_fid) {
+               case PSCI_VERSION:
+                       SMC_RET1(handle, psci_version());
+
+               case PSCI_CPU_OFF:
+                       SMC_RET1(handle, psci_cpu_off());
+
+               case PSCI_CPU_SUSPEND_AARCH32:
+                       SMC_RET1(handle, psci_cpu_suspend(x1, x2, x3));
+
+               case PSCI_CPU_ON_AARCH32:
+                       SMC_RET1(handle, psci_cpu_on(x1, x2, x3));
+
+               case PSCI_AFFINITY_INFO_AARCH32:
+                       SMC_RET1(handle, psci_affinity_info(x1, x2));
+
+               case PSCI_MIG_AARCH32:
+                       SMC_RET1(handle, psci_migrate(x1));
+
+               case PSCI_MIG_INFO_TYPE:
+                       SMC_RET1(handle, psci_migrate_info_type());
+
+               case PSCI_MIG_INFO_UP_CPU_AARCH32:
+                       SMC_RET1(handle, psci_migrate_info_up_cpu());
+
+               case PSCI_SYSTEM_SUSPEND_AARCH32:
+                       SMC_RET1(handle, psci_system_suspend(x1, x2));
+
+               case PSCI_SYSTEM_OFF:
+                       psci_system_off();
+                       /* We should never return from psci_system_off() */
+
+               case PSCI_SYSTEM_RESET:
+                       psci_system_reset();
+                       /* We should never return from psci_system_reset() */
+
+               case PSCI_FEATURES:
+                       SMC_RET1(handle, psci_features(x1));
+
+#if ENABLE_PSCI_STAT
+               case PSCI_STAT_RESIDENCY_AARCH32:
+                       SMC_RET1(handle, psci_stat_residency(x1, x2));
+
+               case PSCI_STAT_COUNT_AARCH32:
+                       SMC_RET1(handle, psci_stat_count(x1, x2));
+#endif
+
+               default:
+                       break;
+               }
+       } else {
+               /* 64-bit PSCI function */
+
+               switch (smc_fid) {
+               case PSCI_CPU_SUSPEND_AARCH64:
+                       SMC_RET1(handle, psci_cpu_suspend(x1, x2, x3));
+
+               case PSCI_CPU_ON_AARCH64:
+                       SMC_RET1(handle, psci_cpu_on(x1, x2, x3));
+
+               case PSCI_AFFINITY_INFO_AARCH64:
+                       SMC_RET1(handle, psci_affinity_info(x1, x2));
+
+               case PSCI_MIG_AARCH64:
+                       SMC_RET1(handle, psci_migrate(x1));
+
+               case PSCI_MIG_INFO_UP_CPU_AARCH64:
+                       SMC_RET1(handle, psci_migrate_info_up_cpu());
+
+               case PSCI_SYSTEM_SUSPEND_AARCH64:
+                       SMC_RET1(handle, psci_system_suspend(x1, x2));
+
+#if ENABLE_PSCI_STAT
+               case PSCI_STAT_RESIDENCY_AARCH64:
+                       SMC_RET1(handle, psci_stat_residency(x1, x2));
+
+               case PSCI_STAT_COUNT_AARCH64:
+                       SMC_RET1(handle, psci_stat_count(x1, x2));
+#endif
+
+               default:
+                       break;
+               }
+       }
+
+       WARN("Unimplemented PSCI Call: 0x%x \n", smc_fid);
+       SMC_RET1(handle, SMC_UNK);
+}
diff --git a/lib/psci/psci_off.c b/lib/psci/psci_off.c
new file mode 100644 (file)
index 0000000..471141d
--- /dev/null
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <platform.h>
+#include <string.h>
+#include "psci_private.h"
+
+/******************************************************************************
+ * Construct the psci_power_state to request power OFF at all power levels.
+ ******************************************************************************/
+static void psci_set_power_off_state(psci_power_state_t *state_info)
+{
+       int lvl;
+
+       for (lvl = PSCI_CPU_PWR_LVL; lvl <= PLAT_MAX_PWR_LVL; lvl++)
+               state_info->pwr_domain_state[lvl] = PLAT_MAX_OFF_STATE;
+}
+
+/******************************************************************************
+ * Top level handler which is called when a cpu wants to power itself down.
+ * It's assumed that along with turning the cpu power domain off, power
+ * domains at higher levels will be turned off as far as possible. It finds
+ * the highest level where a domain has to be powered off by traversing the
+ * node information and then performs generic, architectural, platform setup
+ * and state management required to turn OFF that power domain and domains
+ * below it. e.g. For a cpu that's to be powered OFF, it could mean programming
+ * the power controller whereas for a cluster that's to be powered off, it will
+ * call the platform specific code which will disable coherency at the
+ * interconnect level if the cpu is the last in the cluster and also the
+ * program the power controller.
+ ******************************************************************************/
+int psci_do_cpu_off(unsigned int end_pwrlvl)
+{
+       int rc = PSCI_E_SUCCESS, idx = plat_my_core_pos();
+       psci_power_state_t state_info;
+
+       /*
+        * This function must only be called on platforms where the
+        * CPU_OFF platform hooks have been implemented.
+        */
+       assert(psci_plat_pm_ops->pwr_domain_off);
+
+       /*
+        * This function acquires the lock corresponding to each power
+        * level so that by the time all locks are taken, the system topology
+        * is snapshot and state management can be done safely.
+        */
+       psci_acquire_pwr_domain_locks(end_pwrlvl,
+                                     idx);
+
+       /*
+        * Call the cpu off handler registered by the Secure Payload Dispatcher
+        * to let it do any bookkeeping. Assume that the SPD always reports an
+        * E_DENIED error if SP refuse to power down
+        */
+       if (psci_spd_pm && psci_spd_pm->svc_off) {
+               rc = psci_spd_pm->svc_off(0);
+               if (rc)
+                       goto exit;
+       }
+
+       /* Construct the psci_power_state for CPU_OFF */
+       psci_set_power_off_state(&state_info);
+
+       /*
+        * This function is passed the requested state info and
+        * it returns the negotiated state info for each power level upto
+        * the end level specified.
+        */
+       psci_do_state_coordination(end_pwrlvl, &state_info);
+
+#if ENABLE_PSCI_STAT
+       /* Update the last cpu for each level till end_pwrlvl */
+       psci_stats_update_pwr_down(end_pwrlvl, &state_info);
+#endif
+
+       /*
+        * Arch. management. Perform the necessary steps to flush all
+        * cpu caches.
+        */
+       psci_do_pwrdown_cache_maintenance(psci_find_max_off_lvl(&state_info));
+
+       /*
+        * Plat. management: Perform platform specific actions to turn this
+        * cpu off e.g. exit cpu coherency, program the power controller etc.
+        */
+       psci_plat_pm_ops->pwr_domain_off(&state_info);
+
+#if ENABLE_PSCI_STAT
+       /*
+        * Capture time-stamp while entering low power state.
+        * No cache maintenance needed because caches are off
+        * and writes are direct to main memory.
+        */
+       PMF_CAPTURE_TIMESTAMP(psci_svc, PSCI_STAT_ID_ENTER_LOW_PWR,
+               PMF_NO_CACHE_MAINT);
+#endif
+
+exit:
+       /*
+        * Release the locks corresponding to each power level in the
+        * reverse order to which they were acquired.
+        */
+       psci_release_pwr_domain_locks(end_pwrlvl,
+                                     idx);
+
+       /*
+        * Check if all actions needed to safely power down this cpu have
+        * successfully completed.
+        */
+       if (rc == PSCI_E_SUCCESS) {
+               /*
+                * Set the affinity info state to OFF. This writes directly to
+                * main memory as caches are disabled, so cache maintenance is
+                * required to ensure that later cached reads of aff_info_state
+                * return AFF_STATE_OFF.  A dsbish() ensures ordering of the
+                * update to the affinity info state prior to cache line
+                * invalidation.
+                */
+               flush_cpu_data(psci_svc_cpu_data.aff_info_state);
+               psci_set_aff_info_state(AFF_STATE_OFF);
+               dsbish();
+               inv_cpu_data(psci_svc_cpu_data.aff_info_state);
+
+               if (psci_plat_pm_ops->pwr_domain_pwr_down_wfi) {
+                       /* This function must not return */
+                       psci_plat_pm_ops->pwr_domain_pwr_down_wfi(&state_info);
+               } else {
+                       /*
+                        * Enter a wfi loop which will allow the power
+                        * controller to physically power down this cpu.
+                        */
+                       psci_power_down_wfi();
+               }
+       }
+
+       return rc;
+}
diff --git a/lib/psci/psci_on.c b/lib/psci/psci_on.c
new file mode 100644 (file)
index 0000000..d4826ed
--- /dev/null
@@ -0,0 +1,212 @@
+/*
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <bl31.h>
+#include <debug.h>
+#include <context_mgmt.h>
+#include <platform.h>
+#include <runtime_svc.h>
+#include <stddef.h>
+#include "psci_private.h"
+
+/*******************************************************************************
+ * This function checks whether a cpu which has been requested to be turned on
+ * is OFF to begin with.
+ ******************************************************************************/
+static int cpu_on_validate_state(aff_info_state_t aff_state)
+{
+       if (aff_state == AFF_STATE_ON)
+               return PSCI_E_ALREADY_ON;
+
+       if (aff_state == AFF_STATE_ON_PENDING)
+               return PSCI_E_ON_PENDING;
+
+       assert(aff_state == AFF_STATE_OFF);
+       return PSCI_E_SUCCESS;
+}
+
+/*******************************************************************************
+ * Generic handler which is called to physically power on a cpu identified by
+ * its mpidr. It performs the generic, architectural, platform setup and state
+ * management to power on the target cpu e.g. it will ensure that
+ * enough information is stashed for it to resume execution in the non-secure
+ * security state.
+ *
+ * The state of all the relevant power domains are changed after calling the
+ * platform handler as it can return error.
+ ******************************************************************************/
+int psci_cpu_on_start(u_register_t target_cpu,
+                     entry_point_info_t *ep)
+{
+       int rc;
+       unsigned int target_idx = plat_core_pos_by_mpidr(target_cpu);
+       aff_info_state_t target_aff_state;
+
+       /* Calling function must supply valid input arguments */
+       assert((int) target_idx >= 0);
+       assert(ep != NULL);
+
+       /*
+        * This function must only be called on platforms where the
+        * CPU_ON platform hooks have been implemented.
+        */
+       assert(psci_plat_pm_ops->pwr_domain_on &&
+                       psci_plat_pm_ops->pwr_domain_on_finish);
+
+       /* Protect against multiple CPUs trying to turn ON the same target CPU */
+       psci_spin_lock_cpu(target_idx);
+
+       /*
+        * Generic management: Ensure that the cpu is off to be
+        * turned on.
+        */
+       rc = cpu_on_validate_state(psci_get_aff_info_state_by_idx(target_idx));
+       if (rc != PSCI_E_SUCCESS)
+               goto exit;
+
+       /*
+        * Call the cpu on handler registered by the Secure Payload Dispatcher
+        * to let it do any bookeeping. If the handler encounters an error, it's
+        * expected to assert within
+        */
+       if (psci_spd_pm && psci_spd_pm->svc_on)
+               psci_spd_pm->svc_on(target_cpu);
+
+       /*
+        * Set the Affinity info state of the target cpu to ON_PENDING.
+        * Flush aff_info_state as it will be accessed with caches
+        * turned OFF.
+        */
+       psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_ON_PENDING);
+       flush_cpu_data_by_index(target_idx, psci_svc_cpu_data.aff_info_state);
+
+       /*
+        * The cache line invalidation by the target CPU after setting the
+        * state to OFF (see psci_do_cpu_off()), could cause the update to
+        * aff_info_state to be invalidated. Retry the update if the target
+        * CPU aff_info_state is not ON_PENDING.
+        */
+       target_aff_state = psci_get_aff_info_state_by_idx(target_idx);
+       if (target_aff_state != AFF_STATE_ON_PENDING) {
+               assert(target_aff_state == AFF_STATE_OFF);
+               psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_ON_PENDING);
+               flush_cpu_data_by_index(target_idx, psci_svc_cpu_data.aff_info_state);
+
+               assert(psci_get_aff_info_state_by_idx(target_idx) == AFF_STATE_ON_PENDING);
+       }
+
+       /*
+        * Perform generic, architecture and platform specific handling.
+        */
+       /*
+        * Plat. management: Give the platform the current state
+        * of the target cpu to allow it to perform the necessary
+        * steps to power on.
+        */
+       rc = psci_plat_pm_ops->pwr_domain_on(target_cpu);
+       assert(rc == PSCI_E_SUCCESS || rc == PSCI_E_INTERN_FAIL);
+
+       if (rc == PSCI_E_SUCCESS)
+               /* Store the re-entry information for the non-secure world. */
+               cm_init_context_by_index(target_idx, ep);
+       else {
+               /* Restore the state on error. */
+               psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_OFF);
+               flush_cpu_data_by_index(target_idx, psci_svc_cpu_data.aff_info_state);
+       }
+
+exit:
+       psci_spin_unlock_cpu(target_idx);
+       return rc;
+}
+
+/*******************************************************************************
+ * The following function finish an earlier power on request. They
+ * are called by the common finisher routine in psci_common.c. The `state_info`
+ * is the psci_power_state from which this CPU has woken up from.
+ ******************************************************************************/
+void psci_cpu_on_finish(unsigned int cpu_idx,
+                       psci_power_state_t *state_info)
+{
+       /*
+        * Plat. management: Perform the platform specific actions
+        * for this cpu e.g. enabling the gic or zeroing the mailbox
+        * register. The actual state of this cpu has already been
+        * changed.
+        */
+       psci_plat_pm_ops->pwr_domain_on_finish(state_info);
+
+       /*
+        * Arch. management: Enable data cache and manage stack memory
+        */
+       psci_do_pwrup_cache_maintenance();
+
+       /*
+        * All the platform specific actions for turning this cpu
+        * on have completed. Perform enough arch.initialization
+        * to run in the non-secure address space.
+        */
+       bl31_arch_setup();
+
+       /*
+        * Lock the CPU spin lock to make sure that the context initialization
+        * is done. Since the lock is only used in this function to create
+        * a synchronization point with cpu_on_start(), it can be released
+        * immediately.
+        */
+       psci_spin_lock_cpu(cpu_idx);
+       psci_spin_unlock_cpu(cpu_idx);
+
+       /* Ensure we have been explicitly woken up by another cpu */
+       assert(psci_get_aff_info_state() == AFF_STATE_ON_PENDING);
+
+       /*
+        * Call the cpu on finish handler registered by the Secure Payload
+        * Dispatcher to let it do any bookeeping. If the handler encounters an
+        * error, it's expected to assert within
+        */
+       if (psci_spd_pm && psci_spd_pm->svc_on_finish)
+               psci_spd_pm->svc_on_finish(0);
+
+       /* Populate the mpidr field within the cpu node array */
+       /* This needs to be done only once */
+       psci_cpu_pd_nodes[cpu_idx].mpidr = read_mpidr() & MPIDR_AFFINITY_MASK;
+
+       /*
+        * Generic management: Now we just need to retrieve the
+        * information that we had stashed away during the cpu_on
+        * call to set this cpu on its way.
+        */
+       cm_prepare_el3_exit(NON_SECURE);
+}
diff --git a/lib/psci/psci_private.h b/lib/psci/psci_private.h
new file mode 100644 (file)
index 0000000..f42ce55
--- /dev/null
@@ -0,0 +1,254 @@
+/*
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __PSCI_PRIVATE_H__
+#define __PSCI_PRIVATE_H__
+
+#include <arch.h>
+#include <bakery_lock.h>
+#include <bl_common.h>
+#include <cpu_data.h>
+#include <pmf.h>
+#include <psci.h>
+#include <spinlock.h>
+
+/*
+ * The following helper macros abstract the interface to the Bakery
+ * Lock API.
+ */
+#define psci_lock_init(non_cpu_pd_node, idx)                   \
+       ((non_cpu_pd_node)[(idx)].lock_index = (idx))
+#define psci_lock_get(non_cpu_pd_node)                         \
+       bakery_lock_get(&psci_locks[(non_cpu_pd_node)->lock_index])
+#define psci_lock_release(non_cpu_pd_node)                     \
+       bakery_lock_release(&psci_locks[(non_cpu_pd_node)->lock_index])
+
+/*
+ * The PSCI capability which are provided by the generic code but does not
+ * depend on the platform or spd capabilities.
+ */
+#define PSCI_GENERIC_CAP       \
+                       (define_psci_cap(PSCI_VERSION) |                \
+                       define_psci_cap(PSCI_AFFINITY_INFO_AARCH64) |   \
+                       define_psci_cap(PSCI_FEATURES))
+
+/*
+ * The PSCI capabilities mask for 64 bit functions.
+ */
+#define PSCI_CAP_64BIT_MASK    \
+                       (define_psci_cap(PSCI_CPU_SUSPEND_AARCH64) |    \
+                       define_psci_cap(PSCI_CPU_ON_AARCH64) |          \
+                       define_psci_cap(PSCI_AFFINITY_INFO_AARCH64) |   \
+                       define_psci_cap(PSCI_MIG_AARCH64) |             \
+                       define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64) | \
+                       define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64) |  \
+                       define_psci_cap(PSCI_STAT_RESIDENCY_AARCH64) |  \
+                       define_psci_cap(PSCI_STAT_COUNT_AARCH64))
+
+/*
+ * Helper macros to get/set the fields of PSCI per-cpu data.
+ */
+#define psci_set_aff_info_state(aff_state) \
+               set_cpu_data(psci_svc_cpu_data.aff_info_state, aff_state)
+#define psci_get_aff_info_state() \
+               get_cpu_data(psci_svc_cpu_data.aff_info_state)
+#define psci_get_aff_info_state_by_idx(idx) \
+               get_cpu_data_by_index(idx, psci_svc_cpu_data.aff_info_state)
+#define psci_set_aff_info_state_by_idx(idx, aff_state) \
+               set_cpu_data_by_index(idx, psci_svc_cpu_data.aff_info_state,\
+                                       aff_state)
+#define psci_get_suspend_pwrlvl() \
+               get_cpu_data(psci_svc_cpu_data.target_pwrlvl)
+#define psci_set_suspend_pwrlvl(target_lvl) \
+               set_cpu_data(psci_svc_cpu_data.target_pwrlvl, target_lvl)
+#define psci_set_cpu_local_state(state) \
+               set_cpu_data(psci_svc_cpu_data.local_state, state)
+#define psci_get_cpu_local_state() \
+               get_cpu_data(psci_svc_cpu_data.local_state)
+#define psci_get_cpu_local_state_by_idx(idx) \
+               get_cpu_data_by_index(idx, psci_svc_cpu_data.local_state)
+
+/*
+ * Helper macros for the CPU level spinlocks
+ */
+#define psci_spin_lock_cpu(idx)        spin_lock(&psci_cpu_pd_nodes[idx].cpu_lock)
+#define psci_spin_unlock_cpu(idx) spin_unlock(&psci_cpu_pd_nodes[idx].cpu_lock)
+
+/* Helper macro to identify a CPU standby request in PSCI Suspend call */
+#define is_cpu_standby_req(is_power_down_state, retn_lvl) \
+               (((!(is_power_down_state)) && ((retn_lvl) == 0)) ? 1 : 0)
+
+/* Following are used as ID's to capture time-stamp */
+#define PSCI_STAT_ID_ENTER_LOW_PWR             0
+#define PSCI_STAT_ID_EXIT_LOW_PWR              1
+#define PSCI_STAT_TOTAL_IDS                    2
+
+/* Declare PMF service functions for PSCI */
+PMF_DECLARE_CAPTURE_TIMESTAMP(psci_svc)
+PMF_DECLARE_GET_TIMESTAMP(psci_svc)
+
+/*******************************************************************************
+ * The following two data structures implement the power domain tree. The tree
+ * is used to track the state of all the nodes i.e. power domain instances
+ * described by the platform. The tree consists of nodes that describe CPU power
+ * domains i.e. leaf nodes and all other power domains which are parents of a
+ * CPU power domain i.e. non-leaf nodes.
+ ******************************************************************************/
+typedef struct non_cpu_pwr_domain_node {
+       /*
+        * Index of the first CPU power domain node level 0 which has this node
+        * as its parent.
+        */
+       unsigned int cpu_start_idx;
+
+       /*
+        * Number of CPU power domains which are siblings of the domain indexed
+        * by 'cpu_start_idx' i.e. all the domains in the range 'cpu_start_idx
+        * -> cpu_start_idx + ncpus' have this node as their parent.
+        */
+       unsigned int ncpus;
+
+       /*
+        * Index of the parent power domain node.
+        * TODO: Figure out whether to whether using pointer is more efficient.
+        */
+       unsigned int parent_node;
+
+       plat_local_state_t local_state;
+
+       unsigned char level;
+
+       /* For indexing the psci_lock array*/
+       unsigned char lock_index;
+} non_cpu_pd_node_t;
+
+typedef struct cpu_pwr_domain_node {
+       u_register_t mpidr;
+
+       /*
+        * Index of the parent power domain node.
+        * TODO: Figure out whether to whether using pointer is more efficient.
+        */
+       unsigned int parent_node;
+
+       /*
+        * A CPU power domain does not require state coordination like its
+        * parent power domains. Hence this node does not include a bakery
+        * lock. A spinlock is required by the CPU_ON handler to prevent a race
+        * when multiple CPUs try to turn ON the same target CPU.
+        */
+       spinlock_t cpu_lock;
+} cpu_pd_node_t;
+
+/*******************************************************************************
+ * Data prototypes
+ ******************************************************************************/
+extern const plat_psci_ops_t *psci_plat_pm_ops;
+extern non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS];
+extern cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT];
+extern unsigned int psci_caps;
+
+/* One bakery lock is required for each non-cpu power domain */
+DECLARE_BAKERY_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]);
+
+/*******************************************************************************
+ * SPD's power management hooks registered with PSCI
+ ******************************************************************************/
+extern const spd_pm_ops_t *psci_spd_pm;
+
+/*******************************************************************************
+ * Function prototypes
+ ******************************************************************************/
+/* Private exported functions from psci_common.c */
+int psci_validate_power_state(unsigned int power_state,
+                             psci_power_state_t *state_info);
+void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info);
+int psci_validate_mpidr(u_register_t mpidr);
+void psci_init_req_local_pwr_states(void);
+void psci_power_up_finish(void);
+int psci_validate_entry_point(entry_point_info_t *ep,
+                       uintptr_t entrypoint, u_register_t context_id);
+void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx,
+                                     unsigned int end_lvl,
+                                     unsigned int node_index[]);
+void psci_do_state_coordination(unsigned int end_pwrlvl,
+                               psci_power_state_t *state_info);
+void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl,
+                                  unsigned int cpu_idx);
+void psci_release_pwr_domain_locks(unsigned int end_pwrlvl,
+                                  unsigned int cpu_idx);
+int psci_validate_suspend_req(const psci_power_state_t *state_info,
+                             unsigned int is_power_down_state_req);
+unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info);
+unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info);
+void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl);
+void psci_print_power_domain_map(void);
+unsigned int psci_is_last_on_cpu(void);
+int psci_spd_migrate_info(u_register_t *mpidr);
+
+/* Private exported functions from psci_on.c */
+int psci_cpu_on_start(u_register_t target_cpu,
+                     entry_point_info_t *ep);
+
+void psci_cpu_on_finish(unsigned int cpu_idx,
+                       psci_power_state_t *state_info);
+
+/* Private exported functions from psci_off.c */
+int psci_do_cpu_off(unsigned int end_pwrlvl);
+
+/* Private exported functions from psci_suspend.c */
+void psci_cpu_suspend_start(entry_point_info_t *ep,
+                       unsigned int end_pwrlvl,
+                       psci_power_state_t *state_info,
+                       unsigned int is_power_down_state_req);
+
+void psci_cpu_suspend_finish(unsigned int cpu_idx,
+                       psci_power_state_t *state_info);
+
+/* Private exported functions from psci_helpers.S */
+void psci_do_pwrdown_cache_maintenance(unsigned int pwr_level);
+void psci_do_pwrup_cache_maintenance(void);
+
+/* Private exported functions from psci_system_off.c */
+void __dead2 psci_system_off(void);
+void __dead2 psci_system_reset(void);
+
+/* Private exported functions from psci_stat.c */
+void psci_stats_update_pwr_down(unsigned int end_pwrlvl,
+                       const psci_power_state_t *state_info);
+void psci_stats_update_pwr_up(unsigned int end_pwrlvl,
+                       const psci_power_state_t *state_info,
+                       unsigned int flags);
+u_register_t psci_stat_residency(u_register_t target_cpu,
+                       unsigned int power_state);
+u_register_t psci_stat_count(u_register_t target_cpu,
+                       unsigned int power_state);
+
+#endif /* __PSCI_PRIVATE_H__ */
diff --git a/lib/psci/psci_setup.c b/lib/psci/psci_setup.c
new file mode 100644 (file)
index 0000000..fac0ede
--- /dev/null
@@ -0,0 +1,261 @@
+/*
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <context.h>
+#include <context_mgmt.h>
+#include <platform.h>
+#include <stddef.h>
+#include "psci_private.h"
+
+/*******************************************************************************
+ * Per cpu non-secure contexts used to program the architectural state prior
+ * return to the normal world.
+ * TODO: Use the memory allocator to set aside memory for the contexts instead
+ * of relying on platform defined constants.
+ ******************************************************************************/
+static cpu_context_t psci_ns_context[PLATFORM_CORE_COUNT];
+
+/******************************************************************************
+ * Define the psci capability variable.
+ *****************************************************************************/
+unsigned int psci_caps;
+
+/*******************************************************************************
+ * Function which initializes the 'psci_non_cpu_pd_nodes' or the
+ * 'psci_cpu_pd_nodes' corresponding to the power level.
+ ******************************************************************************/
+static void psci_init_pwr_domain_node(unsigned int node_idx,
+                                       unsigned int parent_idx,
+                                       unsigned int level)
+{
+       if (level > PSCI_CPU_PWR_LVL) {
+               psci_non_cpu_pd_nodes[node_idx].level = level;
+               psci_lock_init(psci_non_cpu_pd_nodes, node_idx);
+               psci_non_cpu_pd_nodes[node_idx].parent_node = parent_idx;
+               psci_non_cpu_pd_nodes[node_idx].local_state =
+                                                        PLAT_MAX_OFF_STATE;
+       } else {
+               psci_cpu_data_t *svc_cpu_data;
+
+               psci_cpu_pd_nodes[node_idx].parent_node = parent_idx;
+
+               /* Initialize with an invalid mpidr */
+               psci_cpu_pd_nodes[node_idx].mpidr = PSCI_INVALID_MPIDR;
+
+               svc_cpu_data =
+                       &(_cpu_data_by_index(node_idx)->psci_svc_cpu_data);
+
+               /* Set the Affinity Info for the cores as OFF */
+               svc_cpu_data->aff_info_state = AFF_STATE_OFF;
+
+               /* Invalidate the suspend level for the cpu */
+               svc_cpu_data->target_pwrlvl = PSCI_INVALID_PWR_LVL;
+
+               /* Set the power state to OFF state */
+               svc_cpu_data->local_state = PLAT_MAX_OFF_STATE;
+
+               flush_dcache_range((uintptr_t)svc_cpu_data,
+                                                sizeof(*svc_cpu_data));
+
+               cm_set_context_by_index(node_idx,
+                                       (void *) &psci_ns_context[node_idx],
+                                       NON_SECURE);
+       }
+}
+
+/*******************************************************************************
+ * This functions updates cpu_start_idx and ncpus field for each of the node in
+ * psci_non_cpu_pd_nodes[]. It does so by comparing the parent nodes of each of
+ * the CPUs and check whether they match with the parent of the previous
+ * CPU. The basic assumption for this work is that children of the same parent
+ * are allocated adjacent indices. The platform should ensure this though proper
+ * mapping of the CPUs to indices via plat_core_pos_by_mpidr() and
+ * plat_my_core_pos() APIs.
+ *******************************************************************************/
+static void psci_update_pwrlvl_limits(void)
+{
+       int j;
+       unsigned int nodes_idx[PLAT_MAX_PWR_LVL] = {0};
+       unsigned int temp_index[PLAT_MAX_PWR_LVL], cpu_idx;
+
+       for (cpu_idx = 0; cpu_idx < PLATFORM_CORE_COUNT; cpu_idx++) {
+               psci_get_parent_pwr_domain_nodes(cpu_idx,
+                                                PLAT_MAX_PWR_LVL,
+                                                temp_index);
+               for (j = PLAT_MAX_PWR_LVL - 1; j >= 0; j--) {
+                       if (temp_index[j] != nodes_idx[j]) {
+                               nodes_idx[j] = temp_index[j];
+                               psci_non_cpu_pd_nodes[nodes_idx[j]].cpu_start_idx
+                                       = cpu_idx;
+                       }
+                       psci_non_cpu_pd_nodes[nodes_idx[j]].ncpus++;
+               }
+       }
+}
+
+/*******************************************************************************
+ * Core routine to populate the power domain tree. The tree descriptor passed by
+ * the platform is populated breadth-first and the first entry in the map
+ * informs the number of root power domains. The parent nodes of the root nodes
+ * will point to an invalid entry(-1).
+ ******************************************************************************/
+static void populate_power_domain_tree(const unsigned char *topology)
+{
+       unsigned int i, j = 0, num_nodes_at_lvl = 1, num_nodes_at_next_lvl;
+       unsigned int node_index = 0, parent_node_index = 0, num_children;
+       int level = PLAT_MAX_PWR_LVL;
+
+       /*
+        * For each level the inputs are:
+        * - number of nodes at this level in plat_array i.e. num_nodes_at_level
+        *   This is the sum of values of nodes at the parent level.
+        * - Index of first entry at this level in the plat_array i.e.
+        *   parent_node_index.
+        * - Index of first free entry in psci_non_cpu_pd_nodes[] or
+        *   psci_cpu_pd_nodes[] i.e. node_index depending upon the level.
+        */
+       while (level >= PSCI_CPU_PWR_LVL) {
+               num_nodes_at_next_lvl = 0;
+               /*
+                * For each entry (parent node) at this level in the plat_array:
+                * - Find the number of children
+                * - Allocate a node in a power domain array for each child
+                * - Set the parent of the child to the parent_node_index - 1
+                * - Increment parent_node_index to point to the next parent
+                * - Accumulate the number of children at next level.
+                */
+               for (i = 0; i < num_nodes_at_lvl; i++) {
+                       assert(parent_node_index <=
+                                       PSCI_NUM_NON_CPU_PWR_DOMAINS);
+                       num_children = topology[parent_node_index];
+
+                       for (j = node_index;
+                               j < node_index + num_children; j++)
+                               psci_init_pwr_domain_node(j,
+                                                         parent_node_index - 1,
+                                                         level);
+
+                       node_index = j;
+                       num_nodes_at_next_lvl += num_children;
+                       parent_node_index++;
+               }
+
+               num_nodes_at_lvl = num_nodes_at_next_lvl;
+               level--;
+
+               /* Reset the index for the cpu power domain array */
+               if (level == PSCI_CPU_PWR_LVL)
+                       node_index = 0;
+       }
+
+       /* Validate the sanity of array exported by the platform */
+       assert(j == PLATFORM_CORE_COUNT);
+}
+
+/*******************************************************************************
+ * This function initializes the power domain topology tree by querying the
+ * platform. The power domain nodes higher than the CPU are populated in the
+ * array psci_non_cpu_pd_nodes[] and the CPU power domains are populated in
+ * psci_cpu_pd_nodes[]. The platform exports its static topology map through the
+ * populate_power_domain_topology_tree() API. The algorithm populates the
+ * psci_non_cpu_pd_nodes and psci_cpu_pd_nodes iteratively by using this
+ * topology map.  On a platform that implements two clusters of 2 cpus each, and
+ * supporting 3 domain levels, the populated psci_non_cpu_pd_nodes would look
+ * like this:
+ *
+ * ---------------------------------------------------
+ * | system node | cluster 0 node  | cluster 1 node  |
+ * ---------------------------------------------------
+ *
+ * And populated psci_cpu_pd_nodes would look like this :
+ * <-    cpus cluster0   -><-   cpus cluster1   ->
+ * ------------------------------------------------
+ * |   CPU 0   |   CPU 1   |   CPU 2   |   CPU 3  |
+ * ------------------------------------------------
+ ******************************************************************************/
+int psci_setup(void)
+{
+       const unsigned char *topology_tree;
+
+       /* Query the topology map from the platform */
+       topology_tree = plat_get_power_domain_tree_desc();
+
+       /* Populate the power domain arrays using the platform topology map */
+       populate_power_domain_tree(topology_tree);
+
+       /* Update the CPU limits for each node in psci_non_cpu_pd_nodes */
+       psci_update_pwrlvl_limits();
+
+       /* Populate the mpidr field of cpu node for this CPU */
+       psci_cpu_pd_nodes[plat_my_core_pos()].mpidr =
+               read_mpidr() & MPIDR_AFFINITY_MASK;
+
+       psci_init_req_local_pwr_states();
+
+       /*
+        * Set the requested and target state of this CPU and all the higher
+        * power domain levels for this CPU to run.
+        */
+       psci_set_pwr_domains_to_run(PLAT_MAX_PWR_LVL);
+
+       plat_setup_psci_ops((uintptr_t)psci_entrypoint,
+                                       &psci_plat_pm_ops);
+       assert(psci_plat_pm_ops);
+
+       /* Initialize the psci capability */
+       psci_caps = PSCI_GENERIC_CAP;
+
+       if (psci_plat_pm_ops->pwr_domain_off)
+               psci_caps |=  define_psci_cap(PSCI_CPU_OFF);
+       if (psci_plat_pm_ops->pwr_domain_on &&
+                       psci_plat_pm_ops->pwr_domain_on_finish)
+               psci_caps |=  define_psci_cap(PSCI_CPU_ON_AARCH64);
+       if (psci_plat_pm_ops->pwr_domain_suspend &&
+                       psci_plat_pm_ops->pwr_domain_suspend_finish) {
+               psci_caps |=  define_psci_cap(PSCI_CPU_SUSPEND_AARCH64);
+               if (psci_plat_pm_ops->get_sys_suspend_power_state)
+                       psci_caps |=  define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64);
+       }
+       if (psci_plat_pm_ops->system_off)
+               psci_caps |=  define_psci_cap(PSCI_SYSTEM_OFF);
+       if (psci_plat_pm_ops->system_reset)
+               psci_caps |=  define_psci_cap(PSCI_SYSTEM_RESET);
+
+#if ENABLE_PSCI_STAT
+       psci_caps |=  define_psci_cap(PSCI_STAT_RESIDENCY_AARCH64);
+       psci_caps |=  define_psci_cap(PSCI_STAT_COUNT_AARCH64);
+#endif
+
+       return 0;
+}
diff --git a/lib/psci/psci_stat.c b/lib/psci/psci_stat.c
new file mode 100644 (file)
index 0000000..155bbb0
--- /dev/null
@@ -0,0 +1,309 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <debug.h>
+#include <platform.h>
+#include <platform_def.h>
+#include "psci_private.h"
+
+#ifndef PLAT_MAX_PWR_LVL_STATES
+#define PLAT_MAX_PWR_LVL_STATES 2
+#endif
+
+/* Ticks elapsed in one second by a signal of 1 MHz */
+#define MHZ_TICKS_PER_SEC 1000000
+
+/* Following structure is used for PSCI STAT */
+typedef struct psci_stat {
+       u_register_t residency;
+       u_register_t count;
+} psci_stat_t;
+
+/*
+ * Following is used to keep track of the last cpu
+ * that goes to power down in non cpu power domains.
+ */
+static int last_cpu_in_non_cpu_pd[PSCI_NUM_NON_CPU_PWR_DOMAINS] = {-1};
+
+/*
+ * Following are used to store PSCI STAT values for
+ * CPU and non CPU power domains.
+ */
+static psci_stat_t psci_cpu_stat[PLATFORM_CORE_COUNT]
+                               [PLAT_MAX_PWR_LVL_STATES];
+static psci_stat_t psci_non_cpu_stat[PSCI_NUM_NON_CPU_PWR_DOMAINS]
+                               [PLAT_MAX_PWR_LVL_STATES];
+
+/* Register PMF PSCI service */
+PMF_REGISTER_SERVICE(psci_svc, PMF_PSCI_STAT_SVC_ID,
+        PSCI_STAT_TOTAL_IDS, PMF_STORE_ENABLE)
+
+/* The divisor to use to convert raw timestamp into microseconds */
+u_register_t residency_div;
+
+/*
+ * This macro calculates the stats residency in microseconds,
+ * taking in account the wrap around condition.
+ */
+#define calc_stat_residency(_pwrupts, _pwrdnts, _res)          \
+       do {                                                    \
+               if (_pwrupts < _pwrdnts)                        \
+                       _res = UINT64_MAX - _pwrdnts + _pwrupts;\
+               else                                            \
+                       _res = _pwrupts - _pwrdnts;             \
+               /* Convert timestamp into microseconds */       \
+               _res = _res/residency_div;                      \
+       } while (0)
+
+/*
+ * This functions returns the index into the `psci_stat_t` array given the
+ * local power state and power domain level. If the platform implements the
+ * `get_pwr_lvl_state_idx` pm hook, then that will be used to return the index.
+ */
+static int get_stat_idx(plat_local_state_t local_state, int pwr_lvl)
+{
+       int idx;
+
+       if (psci_plat_pm_ops->get_pwr_lvl_state_idx == NULL) {
+               assert(PLAT_MAX_PWR_LVL_STATES == 2);
+               if (is_local_state_retn(local_state))
+                       return 0;
+
+               assert(is_local_state_off(local_state));
+               return 1;
+       }
+
+       idx = psci_plat_pm_ops->get_pwr_lvl_state_idx(local_state, pwr_lvl);
+       assert((idx >= 0) && (idx < PLAT_MAX_PWR_LVL_STATES));
+       return idx;
+}
+
+/*******************************************************************************
+ * This function is passed the target local power states for each power
+ * domain (state_info) between the current CPU domain and its ancestors until
+ * the target power level (end_pwrlvl).
+ *
+ * Then, for each level (apart from the CPU level) until the 'end_pwrlvl', it
+ * updates the `last_cpu_in_non_cpu_pd[]` with last power down cpu id.
+ *
+ * This function will only be invoked with data cache enabled and while
+ * powering down a core.
+ ******************************************************************************/
+void psci_stats_update_pwr_down(unsigned int end_pwrlvl,
+                       const psci_power_state_t *state_info)
+{
+       int lvl, parent_idx, cpu_idx = plat_my_core_pos();
+
+       assert(end_pwrlvl <= PLAT_MAX_PWR_LVL);
+       assert(state_info);
+
+       parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
+
+       for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
+
+               /* Break early if the target power state is RUN */
+               if (is_local_state_run(state_info->pwr_domain_state[lvl]))
+                       break;
+
+               /*
+                * The power domain is entering a low power state, so this is
+                * the last CPU for this power domain
+                */
+               last_cpu_in_non_cpu_pd[parent_idx] = cpu_idx;
+
+               parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
+       }
+
+}
+
+/*******************************************************************************
+ * This function updates the PSCI STATS(residency time and count) for CPU
+ * and NON-CPU power domains.
+ * It is called with caches enabled and locks acquired(for NON-CPU domain)
+ ******************************************************************************/
+void psci_stats_update_pwr_up(unsigned int end_pwrlvl,
+                       const psci_power_state_t *state_info,
+                       unsigned int flags)
+{
+       int parent_idx, cpu_idx = plat_my_core_pos();
+       int lvl, stat_idx;
+       plat_local_state_t local_state;
+       unsigned long long pwrup_ts = 0, pwrdn_ts = 0;
+       u_register_t residency;
+
+       assert(end_pwrlvl <= PLAT_MAX_PWR_LVL);
+       assert(state_info);
+
+       /* Initialize the residency divisor if not already initialized */
+       if (!residency_div) {
+               /* Pre-calculate divisor so that it can be directly used to
+                  convert time-stamp into microseconds */
+               residency_div = read_cntfrq_el0() / MHZ_TICKS_PER_SEC;
+               assert(residency_div);
+       }
+
+       /* Get power down time-stamp for current CPU */
+       PMF_GET_TIMESTAMP_BY_INDEX(psci_svc, PSCI_STAT_ID_ENTER_LOW_PWR,
+                       cpu_idx, flags, pwrdn_ts);
+
+       /* In the case of 1st power on just return */
+       if (!pwrdn_ts)
+               return;
+
+       /* Get power up time-stamp for current CPU */
+       PMF_GET_TIMESTAMP_BY_INDEX(psci_svc, PSCI_STAT_ID_EXIT_LOW_PWR,
+                       cpu_idx, flags, pwrup_ts);
+
+       /* Get the index into the stats array */
+       local_state = state_info->pwr_domain_state[PSCI_CPU_PWR_LVL];
+       stat_idx = get_stat_idx(local_state, PSCI_CPU_PWR_LVL);
+
+       /* Calculate stats residency */
+       calc_stat_residency(pwrup_ts, pwrdn_ts, residency);
+
+       /* Update CPU stats. */
+       psci_cpu_stat[cpu_idx][stat_idx].residency += residency;
+       psci_cpu_stat[cpu_idx][stat_idx].count++;
+
+       /*
+        * Check what power domains above CPU were off
+        * prior to this CPU powering on.
+        */
+       parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
+       for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
+               local_state = state_info->pwr_domain_state[lvl];
+               if (is_local_state_run(local_state)) {
+                       /* Break early */
+                       break;
+               }
+
+               assert(last_cpu_in_non_cpu_pd[parent_idx] != -1);
+
+               /* Get power down time-stamp for last CPU */
+               PMF_GET_TIMESTAMP_BY_INDEX(psci_svc, PSCI_STAT_ID_ENTER_LOW_PWR,
+                               last_cpu_in_non_cpu_pd[parent_idx],
+                               flags, pwrdn_ts);
+
+               /* Initialize back to reset value */
+               last_cpu_in_non_cpu_pd[parent_idx] = -1;
+
+               /* Get the index into the stats array */
+               stat_idx = get_stat_idx(local_state, lvl);
+
+               /* Calculate stats residency */
+               calc_stat_residency(pwrup_ts, pwrdn_ts, residency);
+
+               /* Update non cpu stats */
+               psci_non_cpu_stat[parent_idx][stat_idx].residency += residency;
+               psci_non_cpu_stat[parent_idx][stat_idx].count++;
+
+               parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
+       }
+
+}
+
+/*******************************************************************************
+ * This function returns the appropriate count and residency time of the
+ * local state for the highest power level expressed in the `power_state`
+ * for the node represented by `target_cpu`.
+ ******************************************************************************/
+int psci_get_stat(u_register_t target_cpu, unsigned int power_state,
+                        psci_stat_t *psci_stat)
+{
+       int rc, pwrlvl, lvl, parent_idx, stat_idx, target_idx;
+       psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} };
+       plat_local_state_t local_state;
+
+       /* Validate the target_cpu parameter and determine the cpu index */
+       target_idx = plat_core_pos_by_mpidr(target_cpu);
+       if (target_idx == -1)
+               return PSCI_E_INVALID_PARAMS;
+
+       /* Validate the power_state parameter */
+       if (!psci_plat_pm_ops->translate_power_state_by_mpidr)
+               rc = psci_validate_power_state(power_state, &state_info);
+       else
+               rc = psci_plat_pm_ops->translate_power_state_by_mpidr(
+                               target_cpu, power_state, &state_info);
+
+       if (rc != PSCI_E_SUCCESS)
+               return PSCI_E_INVALID_PARAMS;
+
+       /* Find the highest power level */
+       pwrlvl = psci_find_target_suspend_lvl(&state_info);
+       if (pwrlvl == PSCI_INVALID_PWR_LVL)
+               return PSCI_E_INVALID_PARAMS;
+
+       /* Get the index into the stats array */
+       local_state = state_info.pwr_domain_state[pwrlvl];
+       stat_idx = get_stat_idx(local_state, pwrlvl);
+
+       if (pwrlvl > PSCI_CPU_PWR_LVL) {
+               /* Get the power domain index */
+               parent_idx = psci_cpu_pd_nodes[target_idx].parent_node;
+               for (lvl = PSCI_CPU_PWR_LVL + 1; lvl < pwrlvl; lvl++)
+                       parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
+
+               /* Get the non cpu power domain stats */
+               *psci_stat = psci_non_cpu_stat[parent_idx][stat_idx];
+       } else {
+               /* Get the cpu power domain stats */
+               *psci_stat = psci_cpu_stat[target_idx][stat_idx];
+       }
+
+       return PSCI_E_SUCCESS;
+}
+
+/* This is the top level function for PSCI_STAT_RESIDENCY SMC. */
+u_register_t psci_stat_residency(u_register_t target_cpu,
+               unsigned int power_state)
+{
+       psci_stat_t psci_stat;
+
+       int rc = psci_get_stat(target_cpu, power_state, &psci_stat);
+       if (rc == PSCI_E_SUCCESS)
+               return psci_stat.residency;
+       else
+               return 0;
+}
+
+/* This is the top level function for PSCI_STAT_COUNT SMC. */
+u_register_t psci_stat_count(u_register_t target_cpu,
+       unsigned int power_state)
+{
+       psci_stat_t psci_stat;
+
+       int rc = psci_get_stat(target_cpu, power_state, &psci_stat);
+       if (rc == PSCI_E_SUCCESS)
+               return psci_stat.count;
+       else
+               return 0;
+}
diff --git a/lib/psci/psci_suspend.c b/lib/psci/psci_suspend.c
new file mode 100644 (file)
index 0000000..ee1ccef
--- /dev/null
@@ -0,0 +1,284 @@
+/*
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <bl_common.h>
+#include <arch.h>
+#include <arch_helpers.h>
+#include <context.h>
+#include <context_mgmt.h>
+#include <cpu_data.h>
+#include <debug.h>
+#include <platform.h>
+#include <runtime_svc.h>
+#include <stddef.h>
+#include "psci_private.h"
+
+/*******************************************************************************
+ * This function does generic and platform specific operations after a wake-up
+ * from standby/retention states at multiple power levels.
+ ******************************************************************************/
+static void psci_suspend_to_standby_finisher(unsigned int cpu_idx,
+                                            psci_power_state_t *state_info,
+                                            unsigned int end_pwrlvl)
+{
+       psci_acquire_pwr_domain_locks(end_pwrlvl,
+                               cpu_idx);
+
+       /*
+        * Plat. management: Allow the platform to do operations
+        * on waking up from retention.
+        */
+       psci_plat_pm_ops->pwr_domain_suspend_finish(state_info);
+
+       /*
+        * Set the requested and target state of this CPU and all the higher
+        * power domain levels for this CPU to run.
+        */
+       psci_set_pwr_domains_to_run(end_pwrlvl);
+
+       psci_release_pwr_domain_locks(end_pwrlvl,
+                               cpu_idx);
+}
+
+/*******************************************************************************
+ * This function does generic and platform specific suspend to power down
+ * operations.
+ ******************************************************************************/
+static void psci_suspend_to_pwrdown_start(unsigned int end_pwrlvl,
+                                         entry_point_info_t *ep,
+                                         psci_power_state_t *state_info)
+{
+       unsigned int max_off_lvl = psci_find_max_off_lvl(state_info);
+
+       /* Save PSCI target power level for the suspend finisher handler */
+       psci_set_suspend_pwrlvl(end_pwrlvl);
+
+       /*
+        * Flush the target power level as it will be accessed on power up with
+        * Data cache disabled.
+        */
+       flush_cpu_data(psci_svc_cpu_data.target_pwrlvl);
+
+       /*
+        * Call the cpu suspend handler registered by the Secure Payload
+        * Dispatcher to let it do any book-keeping. If the handler encounters an
+        * error, it's expected to assert within
+        */
+       if (psci_spd_pm && psci_spd_pm->svc_suspend)
+               psci_spd_pm->svc_suspend(max_off_lvl);
+
+       /*
+        * Store the re-entry information for the non-secure world.
+        */
+       cm_init_my_context(ep);
+
+       /*
+        * Arch. management. Perform the necessary steps to flush all
+        * cpu caches. Currently we assume that the power level correspond
+        * the cache level.
+        * TODO : Introduce a mechanism to query the cache level to flush
+        * and the cpu-ops power down to perform from the platform.
+        */
+       psci_do_pwrdown_cache_maintenance(max_off_lvl);
+}
+
+/*******************************************************************************
+ * Top level handler which is called when a cpu wants to suspend its execution.
+ * It is assumed that along with suspending the cpu power domain, power domains
+ * at higher levels until the target power level will be suspended as well. It
+ * coordinates with the platform to negotiate the target state for each of
+ * the power domain level till the target power domain level. It then performs
+ * generic, architectural, platform setup and state management required to
+ * suspend that power domain level and power domain levels below it.
+ * e.g. For a cpu that's to be suspended, it could mean programming the
+ * power controller whereas for a cluster that's to be suspended, it will call
+ * the platform specific code which will disable coherency at the interconnect
+ * level if the cpu is the last in the cluster and also the program the power
+ * controller.
+ *
+ * All the required parameter checks are performed at the beginning and after
+ * the state transition has been done, no further error is expected and it is
+ * not possible to undo any of the actions taken beyond that point.
+ ******************************************************************************/
+void psci_cpu_suspend_start(entry_point_info_t *ep,
+                           unsigned int end_pwrlvl,
+                           psci_power_state_t *state_info,
+                           unsigned int is_power_down_state)
+{
+       int skip_wfi = 0;
+       unsigned int idx = plat_my_core_pos();
+
+       /*
+        * This function must only be called on platforms where the
+        * CPU_SUSPEND platform hooks have been implemented.
+        */
+       assert(psci_plat_pm_ops->pwr_domain_suspend &&
+                       psci_plat_pm_ops->pwr_domain_suspend_finish);
+
+       /*
+        * This function acquires the lock corresponding to each power
+        * level so that by the time all locks are taken, the system topology
+        * is snapshot and state management can be done safely.
+        */
+       psci_acquire_pwr_domain_locks(end_pwrlvl,
+                                     idx);
+
+       /*
+        * We check if there are any pending interrupts after the delay
+        * introduced by lock contention to increase the chances of early
+        * detection that a wake-up interrupt has fired.
+        */
+       if (read_isr_el1()) {
+               skip_wfi = 1;
+               goto exit;
+       }
+
+       /*
+        * This function is passed the requested state info and
+        * it returns the negotiated state info for each power level upto
+        * the end level specified.
+        */
+       psci_do_state_coordination(end_pwrlvl, state_info);
+
+#if ENABLE_PSCI_STAT
+       /* Update the last cpu for each level till end_pwrlvl */
+       psci_stats_update_pwr_down(end_pwrlvl, state_info);
+#endif
+
+       if (is_power_down_state)
+               psci_suspend_to_pwrdown_start(end_pwrlvl, ep, state_info);
+
+       /*
+        * Plat. management: Allow the platform to perform the
+        * necessary actions to turn off this cpu e.g. set the
+        * platform defined mailbox with the psci entrypoint,
+        * program the power controller etc.
+        */
+       psci_plat_pm_ops->pwr_domain_suspend(state_info);
+
+#if ENABLE_PSCI_STAT
+       /*
+        * Capture time-stamp while entering low power state.
+        * No cache maintenance needed because caches are off
+        * and writes are direct to main memory.
+        */
+       PMF_CAPTURE_TIMESTAMP(psci_svc, PSCI_STAT_ID_ENTER_LOW_PWR,
+               PMF_NO_CACHE_MAINT);
+#endif
+
+exit:
+       /*
+        * Release the locks corresponding to each power level in the
+        * reverse order to which they were acquired.
+        */
+       psci_release_pwr_domain_locks(end_pwrlvl,
+                                 idx);
+       if (skip_wfi)
+               return;
+
+       if (is_power_down_state) {
+               /* The function calls below must not return */
+               if (psci_plat_pm_ops->pwr_domain_pwr_down_wfi)
+                       psci_plat_pm_ops->pwr_domain_pwr_down_wfi(state_info);
+               else
+                       psci_power_down_wfi();
+       }
+
+       /*
+        * We will reach here if only retention/standby states have been
+        * requested at multiple power levels. This means that the cpu
+        * context will be preserved.
+        */
+       wfi();
+
+       /*
+        * After we wake up from context retaining suspend, call the
+        * context retaining suspend finisher.
+        */
+       psci_suspend_to_standby_finisher(idx, state_info, end_pwrlvl);
+}
+
+/*******************************************************************************
+ * The following functions finish an earlier suspend request. They
+ * are called by the common finisher routine in psci_common.c. The `state_info`
+ * is the psci_power_state from which this CPU has woken up from.
+ ******************************************************************************/
+void psci_cpu_suspend_finish(unsigned int cpu_idx,
+                            psci_power_state_t *state_info)
+{
+       unsigned int counter_freq;
+       unsigned int max_off_lvl;
+
+       /* Ensure we have been woken up from a suspended state */
+       assert(psci_get_aff_info_state() == AFF_STATE_ON && is_local_state_off(\
+                       state_info->pwr_domain_state[PSCI_CPU_PWR_LVL]));
+
+       /*
+        * Plat. management: Perform the platform specific actions
+        * before we change the state of the cpu e.g. enabling the
+        * gic or zeroing the mailbox register. If anything goes
+        * wrong then assert as there is no way to recover from this
+        * situation.
+        */
+       psci_plat_pm_ops->pwr_domain_suspend_finish(state_info);
+
+       /*
+        * Arch. management: Enable the data cache, manage stack memory and
+        * restore the stashed EL3 architectural context from the 'cpu_context'
+        * structure for this cpu.
+        */
+       psci_do_pwrup_cache_maintenance();
+
+       /* Re-init the cntfrq_el0 register */
+       counter_freq = plat_get_syscnt_freq2();
+       write_cntfrq_el0(counter_freq);
+
+       /*
+        * Call the cpu suspend finish handler registered by the Secure Payload
+        * Dispatcher to let it do any bookeeping. If the handler encounters an
+        * error, it's expected to assert within
+        */
+       if (psci_spd_pm && psci_spd_pm->svc_suspend) {
+               max_off_lvl = psci_find_max_off_lvl(state_info);
+               assert (max_off_lvl != PSCI_INVALID_PWR_LVL);
+               psci_spd_pm->svc_suspend_finish(max_off_lvl);
+       }
+
+       /* Invalidate the suspend level for the cpu */
+       psci_set_suspend_pwrlvl(PSCI_INVALID_PWR_LVL);
+
+       /*
+        * Generic management: Now we just need to retrieve the
+        * information that we had stashed away during the suspend
+        * call to set this cpu on its way.
+        */
+       cm_prepare_el3_exit(NON_SECURE);
+}
diff --git a/lib/psci/psci_system_off.c b/lib/psci/psci_system_off.c
new file mode 100644 (file)
index 0000000..de9ec64
--- /dev/null
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stddef.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <platform.h>
+#include "psci_private.h"
+
+void psci_system_off(void)
+{
+       psci_print_power_domain_map();
+
+       assert(psci_plat_pm_ops->system_off);
+
+       /* Notify the Secure Payload Dispatcher */
+       if (psci_spd_pm && psci_spd_pm->svc_system_off) {
+               psci_spd_pm->svc_system_off();
+       }
+
+       /* Call the platform specific hook */
+       psci_plat_pm_ops->system_off();
+
+       /* This function does not return. We should never get here */
+}
+
+void psci_system_reset(void)
+{
+       psci_print_power_domain_map();
+
+       assert(psci_plat_pm_ops->system_reset);
+
+       /* Notify the Secure Payload Dispatcher */
+       if (psci_spd_pm && psci_spd_pm->svc_system_reset) {
+               psci_spd_pm->svc_system_reset();
+       }
+
+       /* Call the platform specific hook */
+       psci_plat_pm_ops->system_reset();
+
+       /* This function does not return. We should never get here */
+}
index 0748ef4deb1722213c3ca447afa9d12d54664bfd..804da93048f6c7cc543745f5097c0a16e78b0e90 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
-#include <arch.h>
-#include <assert.h>
-#include <platform.h>
-#include <psci.h>
-
-/*
- * The PSCI generic code uses this API to let the platform participate in state
- * coordination during a power management operation. It compares the platform
- * specific local power states requested by each cpu for a given power domain
- * and returns the coordinated target power state that the domain should
- * enter. A platform assigns a number to a local power state. This default
- * implementation assumes that the platform assigns these numbers in order of
- * increasing depth of the power state i.e. for two power states X & Y, if X < Y
- * then X represents a shallower power state than Y. As a result, the
- * coordinated target local power state for a power domain will be the minimum
- * of the requested local power states.
- */
-plat_local_state_t plat_get_target_pwr_state(unsigned int lvl,
-                                            const plat_local_state_t *states,
-                                            unsigned int ncpu)
-{
-       plat_local_state_t target = PLAT_MAX_OFF_STATE, temp;
-
-       assert(ncpu);
-
-       do {
-               temp = *states++;
-               if (temp < target)
-                       target = temp;
-       } while (--ncpu);
-
-       return target;
-}
+#if !ERROR_DEPRECATED
+#include "../plat_psci_common.c"
+#endif
diff --git a/plat/common/plat_psci_common.c b/plat/common/plat_psci_common.c
new file mode 100644 (file)
index 0000000..3eb6886
--- /dev/null
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <assert.h>
+#include <platform.h>
+#include <psci.h>
+
+/*
+ * The PSCI generic code uses this API to let the platform participate in state
+ * coordination during a power management operation. It compares the platform
+ * specific local power states requested by each cpu for a given power domain
+ * and returns the coordinated target power state that the domain should
+ * enter. A platform assigns a number to a local power state. This default
+ * implementation assumes that the platform assigns these numbers in order of
+ * increasing depth of the power state i.e. for two power states X & Y, if X < Y
+ * then X represents a shallower power state than Y. As a result, the
+ * coordinated target local power state for a power domain will be the minimum
+ * of the requested local power states.
+ */
+plat_local_state_t plat_get_target_pwr_state(unsigned int lvl,
+                                            const plat_local_state_t *states,
+                                            unsigned int ncpu)
+{
+       plat_local_state_t target = PLAT_MAX_OFF_STATE, temp;
+
+       assert(ncpu);
+
+       do {
+               temp = *states++;
+               if (temp < target)
+                       target = temp;
+       } while (--ncpu);
+
+       return target;
+}
diff --git a/services/std_svc/psci/psci_common.c b/services/std_svc/psci/psci_common.c
deleted file mode 100644 (file)
index 2a0afb4..0000000
+++ /dev/null
@@ -1,928 +0,0 @@
-/*
- * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch.h>
-#include <arch_helpers.h>
-#include <assert.h>
-#include <bl_common.h>
-#include <context.h>
-#include <context_mgmt.h>
-#include <debug.h>
-#include <platform.h>
-#include <string.h>
-#include "psci_private.h"
-
-/*
- * SPD power management operations, expected to be supplied by the registered
- * SPD on successful SP initialization
- */
-const spd_pm_ops_t *psci_spd_pm;
-
-/*
- * PSCI requested local power state map. This array is used to store the local
- * power states requested by a CPU for power levels from level 1 to
- * PLAT_MAX_PWR_LVL. It does not store the requested local power state for power
- * level 0 (PSCI_CPU_PWR_LVL) as the requested and the target power state for a
- * CPU are the same.
- *
- * During state coordination, the platform is passed an array containing the
- * local states requested for a particular non cpu power domain by each cpu
- * within the domain.
- *
- * TODO: Dense packing of the requested states will cause cache thrashing
- * when multiple power domains write to it. If we allocate the requested
- * states at each power level in a cache-line aligned per-domain memory,
- * the cache thrashing can be avoided.
- */
-static plat_local_state_t
-       psci_req_local_pwr_states[PLAT_MAX_PWR_LVL][PLATFORM_CORE_COUNT];
-
-
-/*******************************************************************************
- * Arrays that hold the platform's power domain tree information for state
- * management of power domains.
- * Each node in the array 'psci_non_cpu_pd_nodes' corresponds to a power domain
- * which is an ancestor of a CPU power domain.
- * Each node in the array 'psci_cpu_pd_nodes' corresponds to a cpu power domain
- ******************************************************************************/
-non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS]
-#if USE_COHERENT_MEM
-__section("tzfw_coherent_mem")
-#endif
-;
-
-DEFINE_BAKERY_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]);
-
-cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT];
-
-/*******************************************************************************
- * Pointer to functions exported by the platform to complete power mgmt. ops
- ******************************************************************************/
-const plat_psci_ops_t *psci_plat_pm_ops;
-
-/******************************************************************************
- * Check that the maximum power level supported by the platform makes sense
- *****************************************************************************/
-CASSERT(PLAT_MAX_PWR_LVL <= PSCI_MAX_PWR_LVL && \
-               PLAT_MAX_PWR_LVL >= PSCI_CPU_PWR_LVL, \
-               assert_platform_max_pwrlvl_check);
-
-/*
- * The plat_local_state used by the platform is one of these types: RUN,
- * RETENTION and OFF. The platform can define further sub-states for each type
- * apart from RUN. This categorization is done to verify the sanity of the
- * psci_power_state passed by the platform and to print debug information. The
- * categorization is done on the basis of the following conditions:
- *
- * 1. If (plat_local_state == 0) then the category is STATE_TYPE_RUN.
- *
- * 2. If (0 < plat_local_state <= PLAT_MAX_RET_STATE), then the category is
- *    STATE_TYPE_RETN.
- *
- * 3. If (plat_local_state > PLAT_MAX_RET_STATE), then the category is
- *    STATE_TYPE_OFF.
- */
-typedef enum plat_local_state_type {
-       STATE_TYPE_RUN = 0,
-       STATE_TYPE_RETN,
-       STATE_TYPE_OFF
-} plat_local_state_type_t;
-
-/* The macro used to categorize plat_local_state. */
-#define find_local_state_type(plat_local_state)                                        \
-               ((plat_local_state) ? ((plat_local_state > PLAT_MAX_RET_STATE)  \
-               ? STATE_TYPE_OFF : STATE_TYPE_RETN)                             \
-               : STATE_TYPE_RUN)
-
-/******************************************************************************
- * Check that the maximum retention level supported by the platform is less
- * than the maximum off level.
- *****************************************************************************/
-CASSERT(PLAT_MAX_RET_STATE < PLAT_MAX_OFF_STATE, \
-               assert_platform_max_off_and_retn_state_check);
-
-/******************************************************************************
- * This function ensures that the power state parameter in a CPU_SUSPEND request
- * is valid. If so, it returns the requested states for each power level.
- *****************************************************************************/
-int psci_validate_power_state(unsigned int power_state,
-                             psci_power_state_t *state_info)
-{
-       /* Check SBZ bits in power state are zero */
-       if (psci_check_power_state(power_state))
-               return PSCI_E_INVALID_PARAMS;
-
-       assert(psci_plat_pm_ops->validate_power_state);
-
-       /* Validate the power_state using platform pm_ops */
-       return psci_plat_pm_ops->validate_power_state(power_state, state_info);
-}
-
-/******************************************************************************
- * This function retrieves the `psci_power_state_t` for system suspend from
- * the platform.
- *****************************************************************************/
-void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info)
-{
-       /*
-        * Assert that the required pm_ops hook is implemented to ensure that
-        * the capability detected during psci_setup() is valid.
-        */
-       assert(psci_plat_pm_ops->get_sys_suspend_power_state);
-
-       /*
-        * Query the platform for the power_state required for system suspend
-        */
-       psci_plat_pm_ops->get_sys_suspend_power_state(state_info);
-}
-
-/*******************************************************************************
- * This function verifies that the all the other cores in the system have been
- * turned OFF and the current CPU is the last running CPU in the system.
- * Returns 1 (true) if the current CPU is the last ON CPU or 0 (false)
- * otherwise.
- ******************************************************************************/
-unsigned int psci_is_last_on_cpu(void)
-{
-       unsigned int cpu_idx, my_idx = plat_my_core_pos();
-
-       for (cpu_idx = 0; cpu_idx < PLATFORM_CORE_COUNT; cpu_idx++) {
-               if (cpu_idx == my_idx) {
-                       assert(psci_get_aff_info_state() == AFF_STATE_ON);
-                       continue;
-               }
-
-               if (psci_get_aff_info_state_by_idx(cpu_idx) != AFF_STATE_OFF)
-                       return 0;
-       }
-
-       return 1;
-}
-
-/*******************************************************************************
- * Routine to return the maximum power level to traverse to after a cpu has
- * been physically powered up. It is expected to be called immediately after
- * reset from assembler code.
- ******************************************************************************/
-static unsigned int get_power_on_target_pwrlvl(void)
-{
-       unsigned int pwrlvl;
-
-       /*
-        * Assume that this cpu was suspended and retrieve its target power
-        * level. If it is invalid then it could only have been turned off
-        * earlier. PLAT_MAX_PWR_LVL will be the highest power level a
-        * cpu can be turned off to.
-        */
-       pwrlvl = psci_get_suspend_pwrlvl();
-       if (pwrlvl == PSCI_INVALID_PWR_LVL)
-               pwrlvl = PLAT_MAX_PWR_LVL;
-       return pwrlvl;
-}
-
-/******************************************************************************
- * Helper function to update the requested local power state array. This array
- * does not store the requested state for the CPU power level. Hence an
- * assertion is added to prevent us from accessing the wrong index.
- *****************************************************************************/
-static void psci_set_req_local_pwr_state(unsigned int pwrlvl,
-                                        unsigned int cpu_idx,
-                                        plat_local_state_t req_pwr_state)
-{
-       assert(pwrlvl > PSCI_CPU_PWR_LVL);
-       psci_req_local_pwr_states[pwrlvl - 1][cpu_idx] = req_pwr_state;
-}
-
-/******************************************************************************
- * This function initializes the psci_req_local_pwr_states.
- *****************************************************************************/
-void psci_init_req_local_pwr_states(void)
-{
-       /* Initialize the requested state of all non CPU power domains as OFF */
-       memset(&psci_req_local_pwr_states, PLAT_MAX_OFF_STATE,
-                       sizeof(psci_req_local_pwr_states));
-}
-
-/******************************************************************************
- * Helper function to return a reference to an array containing the local power
- * states requested by each cpu for a power domain at 'pwrlvl'. The size of the
- * array will be the number of cpu power domains of which this power domain is
- * an ancestor. These requested states will be used to determine a suitable
- * target state for this power domain during psci state coordination. An
- * assertion is added to prevent us from accessing the CPU power level.
- *****************************************************************************/
-static plat_local_state_t *psci_get_req_local_pwr_states(unsigned int pwrlvl,
-                                                        unsigned int cpu_idx)
-{
-       assert(pwrlvl > PSCI_CPU_PWR_LVL);
-
-       return &psci_req_local_pwr_states[pwrlvl - 1][cpu_idx];
-}
-
-/******************************************************************************
- * Helper function to return the current local power state of each power domain
- * from the current cpu power domain to its ancestor at the 'end_pwrlvl'. This
- * function will be called after a cpu is powered on to find the local state
- * each power domain has emerged from.
- *****************************************************************************/
-static void psci_get_target_local_pwr_states(unsigned int end_pwrlvl,
-                                            psci_power_state_t *target_state)
-{
-       unsigned int parent_idx, lvl;
-       plat_local_state_t *pd_state = target_state->pwr_domain_state;
-
-       pd_state[PSCI_CPU_PWR_LVL] = psci_get_cpu_local_state();
-       parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node;
-
-       /* Copy the local power state from node to state_info */
-       for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
-#if !USE_COHERENT_MEM
-               /*
-                * If using normal memory for psci_non_cpu_pd_nodes, we need
-                * to flush before reading the local power state as another
-                * cpu in the same power domain could have updated it and this
-                * code runs before caches are enabled.
-                */
-               flush_dcache_range(
-                               (uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
-                               sizeof(psci_non_cpu_pd_nodes[parent_idx]));
-#endif
-               pd_state[lvl] = psci_non_cpu_pd_nodes[parent_idx].local_state;
-               parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
-       }
-
-       /* Set the the higher levels to RUN */
-       for (; lvl <= PLAT_MAX_PWR_LVL; lvl++)
-               target_state->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN;
-}
-
-/******************************************************************************
- * Helper function to set the target local power state that each power domain
- * from the current cpu power domain to its ancestor at the 'end_pwrlvl' will
- * enter. This function will be called after coordination of requested power
- * states has been done for each power level.
- *****************************************************************************/
-static void psci_set_target_local_pwr_states(unsigned int end_pwrlvl,
-                                       const psci_power_state_t *target_state)
-{
-       unsigned int parent_idx, lvl;
-       const plat_local_state_t *pd_state = target_state->pwr_domain_state;
-
-       psci_set_cpu_local_state(pd_state[PSCI_CPU_PWR_LVL]);
-
-       /*
-        * Need to flush as local_state will be accessed with Data Cache
-        * disabled during power on
-        */
-       flush_cpu_data(psci_svc_cpu_data.local_state);
-
-       parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node;
-
-       /* Copy the local_state from state_info */
-       for (lvl = 1; lvl <= end_pwrlvl; lvl++) {
-               psci_non_cpu_pd_nodes[parent_idx].local_state = pd_state[lvl];
-#if !USE_COHERENT_MEM
-               flush_dcache_range(
-                               (uintptr_t)&psci_non_cpu_pd_nodes[parent_idx],
-                               sizeof(psci_non_cpu_pd_nodes[parent_idx]));
-#endif
-               parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
-       }
-}
-
-
-/*******************************************************************************
- * PSCI helper function to get the parent nodes corresponding to a cpu_index.
- ******************************************************************************/
-void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx,
-                                     unsigned int end_lvl,
-                                     unsigned int node_index[])
-{
-       unsigned int parent_node = psci_cpu_pd_nodes[cpu_idx].parent_node;
-       int i;
-
-       for (i = PSCI_CPU_PWR_LVL + 1; i <= end_lvl; i++) {
-               *node_index++ = parent_node;
-               parent_node = psci_non_cpu_pd_nodes[parent_node].parent_node;
-       }
-}
-
-/******************************************************************************
- * This function is invoked post CPU power up and initialization. It sets the
- * affinity info state, target power state and requested power state for the
- * current CPU and all its ancestor power domains to RUN.
- *****************************************************************************/
-void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl)
-{
-       unsigned int parent_idx, cpu_idx = plat_my_core_pos(), lvl;
-       parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
-
-       /* Reset the local_state to RUN for the non cpu power domains. */
-       for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
-               psci_non_cpu_pd_nodes[parent_idx].local_state =
-                               PSCI_LOCAL_STATE_RUN;
-#if !USE_COHERENT_MEM
-               flush_dcache_range(
-                               (uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
-                               sizeof(psci_non_cpu_pd_nodes[parent_idx]));
-#endif
-               psci_set_req_local_pwr_state(lvl,
-                                            cpu_idx,
-                                            PSCI_LOCAL_STATE_RUN);
-               parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
-       }
-
-       /* Set the affinity info state to ON */
-       psci_set_aff_info_state(AFF_STATE_ON);
-
-       psci_set_cpu_local_state(PSCI_LOCAL_STATE_RUN);
-       flush_cpu_data(psci_svc_cpu_data);
-}
-
-/******************************************************************************
- * This function is passed the local power states requested for each power
- * domain (state_info) between the current CPU domain and its ancestors until
- * the target power level (end_pwrlvl). It updates the array of requested power
- * states with this information.
- *
- * Then, for each level (apart from the CPU level) until the 'end_pwrlvl', it
- * retrieves the states requested by all the cpus of which the power domain at
- * that level is an ancestor. It passes this information to the platform to
- * coordinate and return the target power state. If the target state for a level
- * is RUN then subsequent levels are not considered. At the CPU level, state
- * coordination is not required. Hence, the requested and the target states are
- * the same.
- *
- * The 'state_info' is updated with the target state for each level between the
- * CPU and the 'end_pwrlvl' and returned to the caller.
- *
- * This function will only be invoked with data cache enabled and while
- * powering down a core.
- *****************************************************************************/
-void psci_do_state_coordination(unsigned int end_pwrlvl,
-                               psci_power_state_t *state_info)
-{
-       unsigned int lvl, parent_idx, cpu_idx = plat_my_core_pos();
-       unsigned int start_idx, ncpus;
-       plat_local_state_t target_state, *req_states;
-
-       assert(end_pwrlvl <= PLAT_MAX_PWR_LVL);
-       parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
-
-       /* For level 0, the requested state will be equivalent
-          to target state */
-       for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
-
-               /* First update the requested power state */
-               psci_set_req_local_pwr_state(lvl, cpu_idx,
-                                            state_info->pwr_domain_state[lvl]);
-
-               /* Get the requested power states for this power level */
-               start_idx = psci_non_cpu_pd_nodes[parent_idx].cpu_start_idx;
-               req_states = psci_get_req_local_pwr_states(lvl, start_idx);
-
-               /*
-                * Let the platform coordinate amongst the requested states at
-                * this power level and return the target local power state.
-                */
-               ncpus = psci_non_cpu_pd_nodes[parent_idx].ncpus;
-               target_state = plat_get_target_pwr_state(lvl,
-                                                        req_states,
-                                                        ncpus);
-
-               state_info->pwr_domain_state[lvl] = target_state;
-
-               /* Break early if the negotiated target power state is RUN */
-               if (is_local_state_run(state_info->pwr_domain_state[lvl]))
-                       break;
-
-               parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
-       }
-
-       /*
-        * This is for cases when we break out of the above loop early because
-        * the target power state is RUN at a power level < end_pwlvl.
-        * We update the requested power state from state_info and then
-        * set the target state as RUN.
-        */
-       for (lvl = lvl + 1; lvl <= end_pwrlvl; lvl++) {
-               psci_set_req_local_pwr_state(lvl, cpu_idx,
-                                            state_info->pwr_domain_state[lvl]);
-               state_info->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN;
-
-       }
-
-       /* Update the target state in the power domain nodes */
-       psci_set_target_local_pwr_states(end_pwrlvl, state_info);
-}
-
-/******************************************************************************
- * This function validates a suspend request by making sure that if a standby
- * state is requested then no power level is turned off and the highest power
- * level is placed in a standby/retention state.
- *
- * It also ensures that the state level X will enter is not shallower than the
- * state level X + 1 will enter.
- *
- * This validation will be enabled only for DEBUG builds as the platform is
- * expected to perform these validations as well.
- *****************************************************************************/
-int psci_validate_suspend_req(const psci_power_state_t *state_info,
-                             unsigned int is_power_down_state)
-{
-       unsigned int max_off_lvl, target_lvl, max_retn_lvl;
-       plat_local_state_t state;
-       plat_local_state_type_t req_state_type, deepest_state_type;
-       int i;
-
-       /* Find the target suspend power level */
-       target_lvl = psci_find_target_suspend_lvl(state_info);
-       if (target_lvl == PSCI_INVALID_PWR_LVL)
-               return PSCI_E_INVALID_PARAMS;
-
-       /* All power domain levels are in a RUN state to begin with */
-       deepest_state_type = STATE_TYPE_RUN;
-
-       for (i = target_lvl; i >= PSCI_CPU_PWR_LVL; i--) {
-               state = state_info->pwr_domain_state[i];
-               req_state_type = find_local_state_type(state);
-
-               /*
-                * While traversing from the highest power level to the lowest,
-                * the state requested for lower levels has to be the same or
-                * deeper i.e. equal to or greater than the state at the higher
-                * levels. If this condition is true, then the requested state
-                * becomes the deepest state encountered so far.
-                */
-               if (req_state_type < deepest_state_type)
-                       return PSCI_E_INVALID_PARAMS;
-               deepest_state_type = req_state_type;
-       }
-
-       /* Find the highest off power level */
-       max_off_lvl = psci_find_max_off_lvl(state_info);
-
-       /* The target_lvl is either equal to the max_off_lvl or max_retn_lvl */
-       max_retn_lvl = PSCI_INVALID_PWR_LVL;
-       if (target_lvl != max_off_lvl)
-               max_retn_lvl = target_lvl;
-
-       /*
-        * If this is not a request for a power down state then max off level
-        * has to be invalid and max retention level has to be a valid power
-        * level.
-        */
-       if (!is_power_down_state && (max_off_lvl != PSCI_INVALID_PWR_LVL ||
-                                   max_retn_lvl == PSCI_INVALID_PWR_LVL))
-               return PSCI_E_INVALID_PARAMS;
-
-       return PSCI_E_SUCCESS;
-}
-
-/******************************************************************************
- * This function finds the highest power level which will be powered down
- * amongst all the power levels specified in the 'state_info' structure
- *****************************************************************************/
-unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info)
-{
-       int i;
-
-       for (i = PLAT_MAX_PWR_LVL; i >= PSCI_CPU_PWR_LVL; i--) {
-               if (is_local_state_off(state_info->pwr_domain_state[i]))
-                       return i;
-       }
-
-       return PSCI_INVALID_PWR_LVL;
-}
-
-/******************************************************************************
- * This functions finds the level of the highest power domain which will be
- * placed in a low power state during a suspend operation.
- *****************************************************************************/
-unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info)
-{
-       int i;
-
-       for (i = PLAT_MAX_PWR_LVL; i >= PSCI_CPU_PWR_LVL; i--) {
-               if (!is_local_state_run(state_info->pwr_domain_state[i]))
-                       return i;
-       }
-
-       return PSCI_INVALID_PWR_LVL;
-}
-
-/*******************************************************************************
- * This function is passed a cpu_index and the highest level in the topology
- * tree that the operation should be applied to. It picks up locks in order of
- * increasing power domain level in the range specified.
- ******************************************************************************/
-void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl,
-                                  unsigned int cpu_idx)
-{
-       unsigned int parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
-       unsigned int level;
-
-       /* No locking required for level 0. Hence start locking from level 1 */
-       for (level = PSCI_CPU_PWR_LVL + 1; level <= end_pwrlvl; level++) {
-               psci_lock_get(&psci_non_cpu_pd_nodes[parent_idx]);
-               parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
-       }
-}
-
-/*******************************************************************************
- * This function is passed a cpu_index and the highest level in the topology
- * tree that the operation should be applied to. It releases the locks in order
- * of decreasing power domain level in the range specified.
- ******************************************************************************/
-void psci_release_pwr_domain_locks(unsigned int end_pwrlvl,
-                                  unsigned int cpu_idx)
-{
-       unsigned int parent_idx, parent_nodes[PLAT_MAX_PWR_LVL] = {0};
-       int level;
-
-       /* Get the parent nodes */
-       psci_get_parent_pwr_domain_nodes(cpu_idx, end_pwrlvl, parent_nodes);
-
-       /* Unlock top down. No unlocking required for level 0. */
-       for (level = end_pwrlvl; level >= PSCI_CPU_PWR_LVL + 1; level--) {
-               parent_idx = parent_nodes[level - 1];
-               psci_lock_release(&psci_non_cpu_pd_nodes[parent_idx]);
-       }
-}
-
-/*******************************************************************************
- * Simple routine to determine whether a mpidr is valid or not.
- ******************************************************************************/
-int psci_validate_mpidr(u_register_t mpidr)
-{
-       if (plat_core_pos_by_mpidr(mpidr) < 0)
-               return PSCI_E_INVALID_PARAMS;
-
-       return PSCI_E_SUCCESS;
-}
-
-/*******************************************************************************
- * This function determines the full entrypoint information for the requested
- * PSCI entrypoint on power on/resume and returns it.
- ******************************************************************************/
-static int psci_get_ns_ep_info(entry_point_info_t *ep,
-                              uintptr_t entrypoint,
-                              u_register_t context_id)
-{
-       u_register_t ep_attr, sctlr;
-       unsigned int daif, ee, mode;
-       u_register_t ns_scr_el3 = read_scr_el3();
-       u_register_t ns_sctlr_el1 = read_sctlr_el1();
-
-       sctlr = ns_scr_el3 & SCR_HCE_BIT ? read_sctlr_el2() : ns_sctlr_el1;
-       ee = 0;
-
-       ep_attr = NON_SECURE | EP_ST_DISABLE;
-       if (sctlr & SCTLR_EE_BIT) {
-               ep_attr |= EP_EE_BIG;
-               ee = 1;
-       }
-       SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr);
-
-       ep->pc = entrypoint;
-       memset(&ep->args, 0, sizeof(ep->args));
-       ep->args.arg0 = context_id;
-
-       /*
-        * Figure out whether the cpu enters the non-secure address space
-        * in aarch32 or aarch64
-        */
-       if (ns_scr_el3 & SCR_RW_BIT) {
-
-               /*
-                * Check whether a Thumb entry point has been provided for an
-                * aarch64 EL
-                */
-               if (entrypoint & 0x1)
-                       return PSCI_E_INVALID_ADDRESS;
-
-               mode = ns_scr_el3 & SCR_HCE_BIT ? MODE_EL2 : MODE_EL1;
-
-               ep->spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
-       } else {
-
-               mode = ns_scr_el3 & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc;
-
-               /*
-                * TODO: Choose async. exception bits if HYP mode is not
-                * implemented according to the values of SCR.{AW, FW} bits
-                */
-               daif = DAIF_ABT_BIT | DAIF_IRQ_BIT | DAIF_FIQ_BIT;
-
-               ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, daif);
-       }
-
-       return PSCI_E_SUCCESS;
-}
-
-/*******************************************************************************
- * This function validates the entrypoint with the platform layer if the
- * appropriate pm_ops hook is exported by the platform and returns the
- * 'entry_point_info'.
- ******************************************************************************/
-int psci_validate_entry_point(entry_point_info_t *ep,
-                             uintptr_t entrypoint,
-                             u_register_t context_id)
-{
-       int rc;
-
-       /* Validate the entrypoint using platform psci_ops */
-       if (psci_plat_pm_ops->validate_ns_entrypoint) {
-               rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint);
-               if (rc != PSCI_E_SUCCESS)
-                       return PSCI_E_INVALID_ADDRESS;
-       }
-
-       /*
-        * Verify and derive the re-entry information for
-        * the non-secure world from the non-secure state from
-        * where this call originated.
-        */
-       rc = psci_get_ns_ep_info(ep, entrypoint, context_id);
-       return rc;
-}
-
-/*******************************************************************************
- * Generic handler which is called when a cpu is physically powered on. It
- * traverses the node information and finds the highest power level powered
- * off and performs generic, architectural, platform setup and state management
- * to power on that power level and power levels below it.
- * e.g. For a cpu that's been powered on, it will call the platform specific
- * code to enable the gic cpu interface and for a cluster it will enable
- * coherency at the interconnect level in addition to gic cpu interface.
- ******************************************************************************/
-void psci_power_up_finish(void)
-{
-       unsigned int end_pwrlvl, cpu_idx = plat_my_core_pos();
-       psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} };
-
-       /*
-        * Verify that we have been explicitly turned ON or resumed from
-        * suspend.
-        */
-       if (psci_get_aff_info_state() == AFF_STATE_OFF) {
-               ERROR("Unexpected affinity info state");
-               panic();
-       }
-
-       /*
-        * Get the maximum power domain level to traverse to after this cpu
-        * has been physically powered up.
-        */
-       end_pwrlvl = get_power_on_target_pwrlvl();
-
-       /*
-        * This function acquires the lock corresponding to each power level so
-        * that by the time all locks are taken, the system topology is snapshot
-        * and state management can be done safely.
-        */
-       psci_acquire_pwr_domain_locks(end_pwrlvl,
-                                     cpu_idx);
-
-#if ENABLE_PSCI_STAT
-       /*
-        * Capture power up time-stamp.
-        * No cache maintenance is required as caches are off
-        * and writes are direct to the main memory.
-        */
-       PMF_CAPTURE_TIMESTAMP(psci_svc, PSCI_STAT_ID_EXIT_LOW_PWR,
-               PMF_NO_CACHE_MAINT);
-#endif
-
-       psci_get_target_local_pwr_states(end_pwrlvl, &state_info);
-
-       /*
-        * This CPU could be resuming from suspend or it could have just been
-        * turned on. To distinguish between these 2 cases, we examine the
-        * affinity state of the CPU:
-        *  - If the affinity state is ON_PENDING then it has just been
-        *    turned on.
-        *  - Else it is resuming from suspend.
-        *
-        * Depending on the type of warm reset identified, choose the right set
-        * of power management handler and perform the generic, architecture
-        * and platform specific handling.
-        */
-       if (psci_get_aff_info_state() == AFF_STATE_ON_PENDING)
-               psci_cpu_on_finish(cpu_idx, &state_info);
-       else
-               psci_cpu_suspend_finish(cpu_idx, &state_info);
-
-       /*
-        * Set the requested and target state of this CPU and all the higher
-        * power domains which are ancestors of this CPU to run.
-        */
-       psci_set_pwr_domains_to_run(end_pwrlvl);
-
-#if ENABLE_PSCI_STAT
-       /*
-        * Update PSCI stats.
-        * Caches are off when writing stats data on the power down path.
-        * Since caches are now enabled, it's necessary to do cache
-        * maintenance before reading that same data.
-        */
-       psci_stats_update_pwr_up(end_pwrlvl, &state_info, PMF_CACHE_MAINT);
-#endif
-
-       /*
-        * This loop releases the lock corresponding to each power level
-        * in the reverse order to which they were acquired.
-        */
-       psci_release_pwr_domain_locks(end_pwrlvl,
-                                     cpu_idx);
-}
-
-/*******************************************************************************
- * This function initializes the set of hooks that PSCI invokes as part of power
- * management operation. The power management hooks are expected to be provided
- * by the SPD, after it finishes all its initialization
- ******************************************************************************/
-void psci_register_spd_pm_hook(const spd_pm_ops_t *pm)
-{
-       assert(pm);
-       psci_spd_pm = pm;
-
-       if (pm->svc_migrate)
-               psci_caps |= define_psci_cap(PSCI_MIG_AARCH64);
-
-       if (pm->svc_migrate_info)
-               psci_caps |= define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64)
-                               | define_psci_cap(PSCI_MIG_INFO_TYPE);
-}
-
-/*******************************************************************************
- * This function invokes the migrate info hook in the spd_pm_ops. It performs
- * the necessary return value validation. If the Secure Payload is UP and
- * migrate capable, it returns the mpidr of the CPU on which the Secure payload
- * is resident through the mpidr parameter. Else the value of the parameter on
- * return is undefined.
- ******************************************************************************/
-int psci_spd_migrate_info(u_register_t *mpidr)
-{
-       int rc;
-
-       if (!psci_spd_pm || !psci_spd_pm->svc_migrate_info)
-               return PSCI_E_NOT_SUPPORTED;
-
-       rc = psci_spd_pm->svc_migrate_info(mpidr);
-
-       assert(rc == PSCI_TOS_UP_MIG_CAP || rc == PSCI_TOS_NOT_UP_MIG_CAP \
-               || rc == PSCI_TOS_NOT_PRESENT_MP || rc == PSCI_E_NOT_SUPPORTED);
-
-       return rc;
-}
-
-
-/*******************************************************************************
- * This function prints the state of all power domains present in the
- * system
- ******************************************************************************/
-void psci_print_power_domain_map(void)
-{
-#if LOG_LEVEL >= LOG_LEVEL_INFO
-       unsigned int idx;
-       plat_local_state_t state;
-       plat_local_state_type_t state_type;
-
-       /* This array maps to the PSCI_STATE_X definitions in psci.h */
-       static const char * const psci_state_type_str[] = {
-               "ON",
-               "RETENTION",
-               "OFF",
-       };
-
-       INFO("PSCI Power Domain Map:\n");
-       for (idx = 0; idx < (PSCI_NUM_PWR_DOMAINS - PLATFORM_CORE_COUNT);
-                                                       idx++) {
-               state_type = find_local_state_type(
-                               psci_non_cpu_pd_nodes[idx].local_state);
-               INFO("  Domain Node : Level %u, parent_node %d,"
-                               " State %s (0x%x)\n",
-                               psci_non_cpu_pd_nodes[idx].level,
-                               psci_non_cpu_pd_nodes[idx].parent_node,
-                               psci_state_type_str[state_type],
-                               psci_non_cpu_pd_nodes[idx].local_state);
-       }
-
-       for (idx = 0; idx < PLATFORM_CORE_COUNT; idx++) {
-               state = psci_get_cpu_local_state_by_idx(idx);
-               state_type = find_local_state_type(state);
-               INFO("  CPU Node : MPID 0x%llx, parent_node %d,"
-                               " State %s (0x%x)\n",
-                               (unsigned long long)psci_cpu_pd_nodes[idx].mpidr,
-                               psci_cpu_pd_nodes[idx].parent_node,
-                               psci_state_type_str[state_type],
-                               psci_get_cpu_local_state_by_idx(idx));
-       }
-#endif
-}
-
-#if ENABLE_PLAT_COMPAT
-/*******************************************************************************
- * PSCI Compatibility helper function to return the 'power_state' parameter of
- * the PSCI CPU SUSPEND request for the current CPU. Returns PSCI_INVALID_DATA
- * if not invoked within CPU_SUSPEND for the current CPU.
- ******************************************************************************/
-int psci_get_suspend_powerstate(void)
-{
-       /* Sanity check to verify that CPU is within CPU_SUSPEND */
-       if (psci_get_aff_info_state() == AFF_STATE_ON &&
-               !is_local_state_run(psci_get_cpu_local_state()))
-               return psci_power_state_compat[plat_my_core_pos()];
-
-       return PSCI_INVALID_DATA;
-}
-
-/*******************************************************************************
- * PSCI Compatibility helper function to return the state id of the current
- * cpu encoded in the 'power_state' parameter. Returns PSCI_INVALID_DATA
- * if not invoked within CPU_SUSPEND for the current CPU.
- ******************************************************************************/
-int psci_get_suspend_stateid(void)
-{
-       unsigned int power_state;
-       power_state = psci_get_suspend_powerstate();
-       if (power_state != PSCI_INVALID_DATA)
-               return psci_get_pstate_id(power_state);
-
-       return PSCI_INVALID_DATA;
-}
-
-/*******************************************************************************
- * PSCI Compatibility helper function to return the state id encoded in the
- * 'power_state' parameter of the CPU specified by 'mpidr'. Returns
- * PSCI_INVALID_DATA if the CPU is not in CPU_SUSPEND.
- ******************************************************************************/
-int psci_get_suspend_stateid_by_mpidr(unsigned long mpidr)
-{
-       int cpu_idx = plat_core_pos_by_mpidr(mpidr);
-
-       if (cpu_idx == -1)
-               return PSCI_INVALID_DATA;
-
-       /* Sanity check to verify that the CPU is in CPU_SUSPEND */
-       if (psci_get_aff_info_state_by_idx(cpu_idx) == AFF_STATE_ON &&
-               !is_local_state_run(psci_get_cpu_local_state_by_idx(cpu_idx)))
-               return psci_get_pstate_id(psci_power_state_compat[cpu_idx]);
-
-       return PSCI_INVALID_DATA;
-}
-
-/*******************************************************************************
- * This function returns highest affinity level which is in OFF
- * state. The affinity instance with which the level is associated is
- * determined by the caller.
- ******************************************************************************/
-unsigned int psci_get_max_phys_off_afflvl(void)
-{
-       psci_power_state_t state_info;
-
-       memset(&state_info, 0, sizeof(state_info));
-       psci_get_target_local_pwr_states(PLAT_MAX_PWR_LVL, &state_info);
-
-       return psci_find_target_suspend_lvl(&state_info);
-}
-
-/*******************************************************************************
- * PSCI Compatibility helper function to return target affinity level requested
- * for the CPU_SUSPEND. This function assumes affinity levels correspond to
- * power domain levels on the platform.
- ******************************************************************************/
-int psci_get_suspend_afflvl(void)
-{
-       return psci_get_suspend_pwrlvl();
-}
-
-#endif
diff --git a/services/std_svc/psci/psci_entry.S b/services/std_svc/psci/psci_entry.S
deleted file mode 100644 (file)
index f8c0afa..0000000
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch.h>
-#include <asm_macros.S>
-#include <el3_common_macros.S>
-#include <psci.h>
-#include <xlat_tables.h>
-
-       .globl  psci_entrypoint
-       .globl  psci_power_down_wfi
-
-       /* --------------------------------------------------------------------
-        * This CPU has been physically powered up. It is either resuming from
-        * suspend or has simply been turned on. In both cases, call the power
-        * on finisher.
-        * --------------------------------------------------------------------
-        */
-func psci_entrypoint
-       /*
-        * On the warm boot path, most of the EL3 initialisations performed by
-        * 'el3_entrypoint_common' must be skipped:
-        *
-        *  - Only when the platform bypasses the BL1/BL31 entrypoint by
-        *    programming the reset address do we need to set the CPU endianness.
-        *    In other cases, we assume this has been taken care by the
-        *    entrypoint code.
-        *
-        *  - No need to determine the type of boot, we know it is a warm boot.
-        *
-        *  - Do not try to distinguish between primary and secondary CPUs, this
-        *    notion only exists for a cold boot.
-        *
-        *  - No need to initialise the memory or the C runtime environment,
-        *    it has been done once and for all on the cold boot path.
-        */
-       el3_entrypoint_common                                   \
-               _set_endian=PROGRAMMABLE_RESET_ADDRESS          \
-               _warm_boot_mailbox=0                            \
-               _secondary_cold_boot=0                          \
-               _init_memory=0                                  \
-               _init_c_runtime=0                               \
-               _exception_vectors=runtime_exceptions
-
-       /* --------------------------------------------
-        * Enable the MMU with the DCache disabled. It
-        * is safe to use stacks allocated in normal
-        * memory as a result. All memory accesses are
-        * marked nGnRnE when the MMU is disabled. So
-        * all the stack writes will make it to memory.
-        * All memory accesses are marked Non-cacheable
-        * when the MMU is enabled but D$ is disabled.
-        * So used stack memory is guaranteed to be
-        * visible immediately after the MMU is enabled
-        * Enabling the DCache at the same time as the
-        * MMU can lead to speculatively fetched and
-        * possibly stale stack memory being read from
-        * other caches. This can lead to coherency
-        * issues.
-        * --------------------------------------------
-        */
-       mov     x0, #DISABLE_DCACHE
-       bl      bl31_plat_enable_mmu
-
-       bl      psci_power_up_finish
-
-       b       el3_exit
-endfunc psci_entrypoint
-
-       /* --------------------------------------------
-        * This function is called to indicate to the
-        * power controller that it is safe to power
-        * down this cpu. It should not exit the wfi
-        * and will be released from reset upon power
-        * up. 'wfi_spill' is used to catch erroneous
-        * exits from wfi.
-        * --------------------------------------------
-        */
-func psci_power_down_wfi
-       dsb     sy              // ensure write buffer empty
-       wfi
-       bl      plat_panic_handler
-endfunc psci_power_down_wfi
-
diff --git a/services/std_svc/psci/psci_helpers.S b/services/std_svc/psci/psci_helpers.S
deleted file mode 100644 (file)
index 6ccf943..0000000
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * Copyright (c) 2014-2015, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <asm_macros.S>
-#include <assert_macros.S>
-#include <platform_def.h>
-#include <psci.h>
-
-       .globl  psci_do_pwrdown_cache_maintenance
-       .globl  psci_do_pwrup_cache_maintenance
-
-/* -----------------------------------------------------------------------
- * void psci_do_pwrdown_cache_maintenance(unsigned int power level);
- *
- * This function performs cache maintenance for the specified power
- * level. The levels of cache affected are determined by the power
- * level which is passed as the argument i.e. level 0 results
- * in a flush of the L1 cache. Both the L1 and L2 caches are flushed
- * for a higher power level.
- *
- * Additionally, this function also ensures that stack memory is correctly
- * flushed out to avoid coherency issues due to a change in its memory
- * attributes after the data cache is disabled.
- * -----------------------------------------------------------------------
- */
-func psci_do_pwrdown_cache_maintenance
-       stp     x29, x30, [sp,#-16]!
-       stp     x19, x20, [sp,#-16]!
-
-       /* ---------------------------------------------
-        * Determine to how many levels of cache will be
-        * subject to cache maintenance. Power level
-        * 0 implies that only the cpu is being powered
-        * down. Only the L1 data cache needs to be
-        * flushed to the PoU in this case. For a higher
-        * power level we are assuming that a flush
-        * of L1 data and L2 unified cache is enough.
-        * This information should be provided by the
-        * platform.
-        * ---------------------------------------------
-        */
-       cmp     w0, #PSCI_CPU_PWR_LVL
-       b.eq    do_core_pwr_dwn
-       bl      prepare_cluster_pwr_dwn
-       b       do_stack_maintenance
-
-do_core_pwr_dwn:
-       bl      prepare_core_pwr_dwn
-
-       /* ---------------------------------------------
-        * Do stack maintenance by flushing the used
-        * stack to the main memory and invalidating the
-        * remainder.
-        * ---------------------------------------------
-        */
-do_stack_maintenance:
-       bl      plat_get_my_stack
-
-       /* ---------------------------------------------
-        * Calculate and store the size of the used
-        * stack memory in x1.
-        * ---------------------------------------------
-        */
-       mov     x19, x0
-       mov     x1, sp
-       sub     x1, x0, x1
-       mov     x0, sp
-       bl      flush_dcache_range
-
-       /* ---------------------------------------------
-        * Calculate and store the size of the unused
-        * stack memory in x1. Calculate and store the
-        * stack base address in x0.
-        * ---------------------------------------------
-        */
-       sub     x0, x19, #PLATFORM_STACK_SIZE
-       sub     x1, sp, x0
-       bl      inv_dcache_range
-
-       ldp     x19, x20, [sp], #16
-       ldp     x29, x30, [sp], #16
-       ret
-endfunc psci_do_pwrdown_cache_maintenance
-
-
-/* -----------------------------------------------------------------------
- * void psci_do_pwrup_cache_maintenance(void);
- *
- * This function performs cache maintenance after this cpu is powered up.
- * Currently, this involves managing the used stack memory before turning
- * on the data cache.
- * -----------------------------------------------------------------------
- */
-func psci_do_pwrup_cache_maintenance
-       stp     x29, x30, [sp,#-16]!
-
-       /* ---------------------------------------------
-        * Ensure any inflight stack writes have made it
-        * to main memory.
-        * ---------------------------------------------
-        */
-       dmb     st
-
-       /* ---------------------------------------------
-        * Calculate and store the size of the used
-        * stack memory in x1. Calculate and store the
-        * stack base address in x0.
-        * ---------------------------------------------
-        */
-       bl      plat_get_my_stack
-       mov     x1, sp
-       sub     x1, x0, x1
-       mov     x0, sp
-       bl      inv_dcache_range
-
-       /* ---------------------------------------------
-        * Enable the data cache.
-        * ---------------------------------------------
-        */
-       mrs     x0, sctlr_el3
-       orr     x0, x0, #SCTLR_C_BIT
-       msr     sctlr_el3, x0
-       isb
-
-       ldp     x29, x30, [sp], #16
-       ret
-endfunc psci_do_pwrup_cache_maintenance
diff --git a/services/std_svc/psci/psci_main.c b/services/std_svc/psci/psci_main.c
deleted file mode 100644 (file)
index 04ef10e..0000000
+++ /dev/null
@@ -1,440 +0,0 @@
-/*
- * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch.h>
-#include <arch_helpers.h>
-#include <assert.h>
-#include <debug.h>
-#include <platform.h>
-#include <runtime_svc.h>
-#include <std_svc.h>
-#include <string.h>
-#include "psci_private.h"
-
-/*******************************************************************************
- * PSCI frontend api for servicing SMCs. Described in the PSCI spec.
- ******************************************************************************/
-int psci_cpu_on(u_register_t target_cpu,
-               uintptr_t entrypoint,
-               u_register_t context_id)
-
-{
-       int rc;
-       entry_point_info_t ep;
-
-       /* Determine if the cpu exists of not */
-       rc = psci_validate_mpidr(target_cpu);
-       if (rc != PSCI_E_SUCCESS)
-               return PSCI_E_INVALID_PARAMS;
-
-       /* Validate the entry point and get the entry_point_info */
-       rc = psci_validate_entry_point(&ep, entrypoint, context_id);
-       if (rc != PSCI_E_SUCCESS)
-               return rc;
-
-       /*
-        * To turn this cpu on, specify which power
-        * levels need to be turned on
-        */
-       return psci_cpu_on_start(target_cpu, &ep);
-}
-
-unsigned int psci_version(void)
-{
-       return PSCI_MAJOR_VER | PSCI_MINOR_VER;
-}
-
-int psci_cpu_suspend(unsigned int power_state,
-                    uintptr_t entrypoint,
-                    u_register_t context_id)
-{
-       int rc;
-       unsigned int target_pwrlvl, is_power_down_state;
-       entry_point_info_t ep;
-       psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} };
-       plat_local_state_t cpu_pd_state;
-
-       /* Validate the power_state parameter */
-       rc = psci_validate_power_state(power_state, &state_info);
-       if (rc != PSCI_E_SUCCESS) {
-               assert(rc == PSCI_E_INVALID_PARAMS);
-               return rc;
-       }
-
-       /*
-        * Get the value of the state type bit from the power state parameter.
-        */
-       is_power_down_state = psci_get_pstate_type(power_state);
-
-       /* Sanity check the requested suspend levels */
-       assert(psci_validate_suspend_req(&state_info, is_power_down_state)
-                       == PSCI_E_SUCCESS);
-
-       target_pwrlvl = psci_find_target_suspend_lvl(&state_info);
-
-       /* Fast path for CPU standby.*/
-       if (is_cpu_standby_req(is_power_down_state, target_pwrlvl)) {
-               if  (!psci_plat_pm_ops->cpu_standby)
-                       return PSCI_E_INVALID_PARAMS;
-
-               /*
-                * Set the state of the CPU power domain to the platform
-                * specific retention state and enter the standby state.
-                */
-               cpu_pd_state = state_info.pwr_domain_state[PSCI_CPU_PWR_LVL];
-               psci_set_cpu_local_state(cpu_pd_state);
-
-#if ENABLE_PSCI_STAT
-               /*
-                * Capture time-stamp before CPU standby
-                * No cache maintenance is needed as caches
-                * are ON through out the CPU standby operation.
-                */
-               PMF_CAPTURE_TIMESTAMP(psci_svc, PSCI_STAT_ID_ENTER_LOW_PWR,
-                       PMF_NO_CACHE_MAINT);
-#endif
-
-               psci_plat_pm_ops->cpu_standby(cpu_pd_state);
-
-               /* Upon exit from standby, set the state back to RUN. */
-               psci_set_cpu_local_state(PSCI_LOCAL_STATE_RUN);
-
-#if ENABLE_PSCI_STAT
-               /* Capture time-stamp after CPU standby */
-               PMF_CAPTURE_TIMESTAMP(psci_svc, PSCI_STAT_ID_EXIT_LOW_PWR,
-                       PMF_NO_CACHE_MAINT);
-
-               /* Update PSCI stats */
-               psci_stats_update_pwr_up(PSCI_CPU_PWR_LVL, &state_info,
-                       PMF_NO_CACHE_MAINT);
-#endif
-
-               return PSCI_E_SUCCESS;
-       }
-
-       /*
-        * If a power down state has been requested, we need to verify entry
-        * point and program entry information.
-        */
-       if (is_power_down_state) {
-               rc = psci_validate_entry_point(&ep, entrypoint, context_id);
-               if (rc != PSCI_E_SUCCESS)
-                       return rc;
-       }
-
-       /*
-        * Do what is needed to enter the power down state. Upon success,
-        * enter the final wfi which will power down this CPU. This function
-        * might return if the power down was abandoned for any reason, e.g.
-        * arrival of an interrupt
-        */
-       psci_cpu_suspend_start(&ep,
-                           target_pwrlvl,
-                           &state_info,
-                           is_power_down_state);
-
-       return PSCI_E_SUCCESS;
-}
-
-
-int psci_system_suspend(uintptr_t entrypoint, u_register_t context_id)
-{
-       int rc;
-       psci_power_state_t state_info;
-       entry_point_info_t ep;
-
-       /* Check if the current CPU is the last ON CPU in the system */
-       if (!psci_is_last_on_cpu())
-               return PSCI_E_DENIED;
-
-       /* Validate the entry point and get the entry_point_info */
-       rc = psci_validate_entry_point(&ep, entrypoint, context_id);
-       if (rc != PSCI_E_SUCCESS)
-               return rc;
-
-       /* Query the psci_power_state for system suspend */
-       psci_query_sys_suspend_pwrstate(&state_info);
-
-       /* Ensure that the psci_power_state makes sense */
-       assert(psci_find_target_suspend_lvl(&state_info) == PLAT_MAX_PWR_LVL);
-       assert(psci_validate_suspend_req(&state_info, PSTATE_TYPE_POWERDOWN)
-                                               == PSCI_E_SUCCESS);
-       assert(is_local_state_off(state_info.pwr_domain_state[PLAT_MAX_PWR_LVL]));
-
-       /*
-        * Do what is needed to enter the system suspend state. This function
-        * might return if the power down was abandoned for any reason, e.g.
-        * arrival of an interrupt
-        */
-       psci_cpu_suspend_start(&ep,
-                           PLAT_MAX_PWR_LVL,
-                           &state_info,
-                           PSTATE_TYPE_POWERDOWN);
-
-       return PSCI_E_SUCCESS;
-}
-
-int psci_cpu_off(void)
-{
-       int rc;
-       unsigned int target_pwrlvl = PLAT_MAX_PWR_LVL;
-
-       /*
-        * Do what is needed to power off this CPU and possible higher power
-        * levels if it able to do so. Upon success, enter the final wfi
-        * which will power down this CPU.
-        */
-       rc = psci_do_cpu_off(target_pwrlvl);
-
-       /*
-        * The only error cpu_off can return is E_DENIED. So check if that's
-        * indeed the case.
-        */
-       assert(rc == PSCI_E_DENIED);
-
-       return rc;
-}
-
-int psci_affinity_info(u_register_t target_affinity,
-                      unsigned int lowest_affinity_level)
-{
-       unsigned int target_idx;
-
-       /* We dont support level higher than PSCI_CPU_PWR_LVL */
-       if (lowest_affinity_level > PSCI_CPU_PWR_LVL)
-               return PSCI_E_INVALID_PARAMS;
-
-       /* Calculate the cpu index of the target */
-       target_idx = plat_core_pos_by_mpidr(target_affinity);
-       if (target_idx == -1)
-               return PSCI_E_INVALID_PARAMS;
-
-       return psci_get_aff_info_state_by_idx(target_idx);
-}
-
-int psci_migrate(u_register_t target_cpu)
-{
-       int rc;
-       u_register_t resident_cpu_mpidr;
-
-       rc = psci_spd_migrate_info(&resident_cpu_mpidr);
-       if (rc != PSCI_TOS_UP_MIG_CAP)
-               return (rc == PSCI_TOS_NOT_UP_MIG_CAP) ?
-                         PSCI_E_DENIED : PSCI_E_NOT_SUPPORTED;
-
-       /*
-        * Migrate should only be invoked on the CPU where
-        * the Secure OS is resident.
-        */
-       if (resident_cpu_mpidr != read_mpidr_el1())
-               return PSCI_E_NOT_PRESENT;
-
-       /* Check the validity of the specified target cpu */
-       rc = psci_validate_mpidr(target_cpu);
-       if (rc != PSCI_E_SUCCESS)
-               return PSCI_E_INVALID_PARAMS;
-
-       assert(psci_spd_pm && psci_spd_pm->svc_migrate);
-
-       rc = psci_spd_pm->svc_migrate(read_mpidr_el1(), target_cpu);
-       assert(rc == PSCI_E_SUCCESS || rc == PSCI_E_INTERN_FAIL);
-
-       return rc;
-}
-
-int psci_migrate_info_type(void)
-{
-       u_register_t resident_cpu_mpidr;
-
-       return psci_spd_migrate_info(&resident_cpu_mpidr);
-}
-
-long psci_migrate_info_up_cpu(void)
-{
-       u_register_t resident_cpu_mpidr;
-       int rc;
-
-       /*
-        * Return value of this depends upon what
-        * psci_spd_migrate_info() returns.
-        */
-       rc = psci_spd_migrate_info(&resident_cpu_mpidr);
-       if (rc != PSCI_TOS_NOT_UP_MIG_CAP && rc != PSCI_TOS_UP_MIG_CAP)
-               return PSCI_E_INVALID_PARAMS;
-
-       return resident_cpu_mpidr;
-}
-
-int psci_features(unsigned int psci_fid)
-{
-       unsigned int local_caps = psci_caps;
-
-       /* Check if it is a 64 bit function */
-       if (((psci_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_64)
-               local_caps &= PSCI_CAP_64BIT_MASK;
-
-       /* Check for invalid fid */
-       if (!(is_std_svc_call(psci_fid) && is_valid_fast_smc(psci_fid)
-                       && is_psci_fid(psci_fid)))
-               return PSCI_E_NOT_SUPPORTED;
-
-
-       /* Check if the psci fid is supported or not */
-       if (!(local_caps & define_psci_cap(psci_fid)))
-               return PSCI_E_NOT_SUPPORTED;
-
-       /* Format the feature flags */
-       if (psci_fid == PSCI_CPU_SUSPEND_AARCH32 ||
-                       psci_fid == PSCI_CPU_SUSPEND_AARCH64) {
-               /*
-                * The trusted firmware does not support OS Initiated Mode.
-                */
-               return (FF_PSTATE << FF_PSTATE_SHIFT) |
-                       ((!FF_SUPPORTS_OS_INIT_MODE) << FF_MODE_SUPPORT_SHIFT);
-       }
-
-       /* Return 0 for all other fid's */
-       return PSCI_E_SUCCESS;
-}
-
-/*******************************************************************************
- * PSCI top level handler for servicing SMCs.
- ******************************************************************************/
-uintptr_t psci_smc_handler(uint32_t smc_fid,
-                         u_register_t x1,
-                         u_register_t x2,
-                         u_register_t x3,
-                         u_register_t x4,
-                         void *cookie,
-                         void *handle,
-                         u_register_t flags)
-{
-       if (is_caller_secure(flags))
-               SMC_RET1(handle, SMC_UNK);
-
-       /* Check the fid against the capabilities */
-       if (!(psci_caps & define_psci_cap(smc_fid)))
-               SMC_RET1(handle, SMC_UNK);
-
-       if (((smc_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_32) {
-               /* 32-bit PSCI function, clear top parameter bits */
-
-               x1 = (uint32_t)x1;
-               x2 = (uint32_t)x2;
-               x3 = (uint32_t)x3;
-
-               switch (smc_fid) {
-               case PSCI_VERSION:
-                       SMC_RET1(handle, psci_version());
-
-               case PSCI_CPU_OFF:
-                       SMC_RET1(handle, psci_cpu_off());
-
-               case PSCI_CPU_SUSPEND_AARCH32:
-                       SMC_RET1(handle, psci_cpu_suspend(x1, x2, x3));
-
-               case PSCI_CPU_ON_AARCH32:
-                       SMC_RET1(handle, psci_cpu_on(x1, x2, x3));
-
-               case PSCI_AFFINITY_INFO_AARCH32:
-                       SMC_RET1(handle, psci_affinity_info(x1, x2));
-
-               case PSCI_MIG_AARCH32:
-                       SMC_RET1(handle, psci_migrate(x1));
-
-               case PSCI_MIG_INFO_TYPE:
-                       SMC_RET1(handle, psci_migrate_info_type());
-
-               case PSCI_MIG_INFO_UP_CPU_AARCH32:
-                       SMC_RET1(handle, psci_migrate_info_up_cpu());
-
-               case PSCI_SYSTEM_SUSPEND_AARCH32:
-                       SMC_RET1(handle, psci_system_suspend(x1, x2));
-
-               case PSCI_SYSTEM_OFF:
-                       psci_system_off();
-                       /* We should never return from psci_system_off() */
-
-               case PSCI_SYSTEM_RESET:
-                       psci_system_reset();
-                       /* We should never return from psci_system_reset() */
-
-               case PSCI_FEATURES:
-                       SMC_RET1(handle, psci_features(x1));
-
-#if ENABLE_PSCI_STAT
-               case PSCI_STAT_RESIDENCY_AARCH32:
-                       SMC_RET1(handle, psci_stat_residency(x1, x2));
-
-               case PSCI_STAT_COUNT_AARCH32:
-                       SMC_RET1(handle, psci_stat_count(x1, x2));
-#endif
-
-               default:
-                       break;
-               }
-       } else {
-               /* 64-bit PSCI function */
-
-               switch (smc_fid) {
-               case PSCI_CPU_SUSPEND_AARCH64:
-                       SMC_RET1(handle, psci_cpu_suspend(x1, x2, x3));
-
-               case PSCI_CPU_ON_AARCH64:
-                       SMC_RET1(handle, psci_cpu_on(x1, x2, x3));
-
-               case PSCI_AFFINITY_INFO_AARCH64:
-                       SMC_RET1(handle, psci_affinity_info(x1, x2));
-
-               case PSCI_MIG_AARCH64:
-                       SMC_RET1(handle, psci_migrate(x1));
-
-               case PSCI_MIG_INFO_UP_CPU_AARCH64:
-                       SMC_RET1(handle, psci_migrate_info_up_cpu());
-
-               case PSCI_SYSTEM_SUSPEND_AARCH64:
-                       SMC_RET1(handle, psci_system_suspend(x1, x2));
-
-#if ENABLE_PSCI_STAT
-               case PSCI_STAT_RESIDENCY_AARCH64:
-                       SMC_RET1(handle, psci_stat_residency(x1, x2));
-
-               case PSCI_STAT_COUNT_AARCH64:
-                       SMC_RET1(handle, psci_stat_count(x1, x2));
-#endif
-
-               default:
-                       break;
-               }
-       }
-
-       WARN("Unimplemented PSCI Call: 0x%x \n", smc_fid);
-       SMC_RET1(handle, SMC_UNK);
-}
diff --git a/services/std_svc/psci/psci_off.c b/services/std_svc/psci/psci_off.c
deleted file mode 100644 (file)
index 36dab49..0000000
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch.h>
-#include <arch_helpers.h>
-#include <assert.h>
-#include <debug.h>
-#include <platform.h>
-#include <string.h>
-#include "psci_private.h"
-
-/******************************************************************************
- * Construct the psci_power_state to request power OFF at all power levels.
- ******************************************************************************/
-static void psci_set_power_off_state(psci_power_state_t *state_info)
-{
-       int lvl;
-
-       for (lvl = PSCI_CPU_PWR_LVL; lvl <= PLAT_MAX_PWR_LVL; lvl++)
-               state_info->pwr_domain_state[lvl] = PLAT_MAX_OFF_STATE;
-}
-
-/******************************************************************************
- * Top level handler which is called when a cpu wants to power itself down.
- * It's assumed that along with turning the cpu power domain off, power
- * domains at higher levels will be turned off as far as possible. It finds
- * the highest level where a domain has to be powered off by traversing the
- * node information and then performs generic, architectural, platform setup
- * and state management required to turn OFF that power domain and domains
- * below it. e.g. For a cpu that's to be powered OFF, it could mean programming
- * the power controller whereas for a cluster that's to be powered off, it will
- * call the platform specific code which will disable coherency at the
- * interconnect level if the cpu is the last in the cluster and also the
- * program the power controller.
- ******************************************************************************/
-int psci_do_cpu_off(unsigned int end_pwrlvl)
-{
-       int rc = PSCI_E_SUCCESS, idx = plat_my_core_pos();
-       psci_power_state_t state_info;
-
-       /*
-        * This function must only be called on platforms where the
-        * CPU_OFF platform hooks have been implemented.
-        */
-       assert(psci_plat_pm_ops->pwr_domain_off);
-
-       /*
-        * This function acquires the lock corresponding to each power
-        * level so that by the time all locks are taken, the system topology
-        * is snapshot and state management can be done safely.
-        */
-       psci_acquire_pwr_domain_locks(end_pwrlvl,
-                                     idx);
-
-       /*
-        * Call the cpu off handler registered by the Secure Payload Dispatcher
-        * to let it do any bookkeeping. Assume that the SPD always reports an
-        * E_DENIED error if SP refuse to power down
-        */
-       if (psci_spd_pm && psci_spd_pm->svc_off) {
-               rc = psci_spd_pm->svc_off(0);
-               if (rc)
-                       goto exit;
-       }
-
-       /* Construct the psci_power_state for CPU_OFF */
-       psci_set_power_off_state(&state_info);
-
-       /*
-        * This function is passed the requested state info and
-        * it returns the negotiated state info for each power level upto
-        * the end level specified.
-        */
-       psci_do_state_coordination(end_pwrlvl, &state_info);
-
-#if ENABLE_PSCI_STAT
-       /* Update the last cpu for each level till end_pwrlvl */
-       psci_stats_update_pwr_down(end_pwrlvl, &state_info);
-#endif
-
-       /*
-        * Arch. management. Perform the necessary steps to flush all
-        * cpu caches.
-        */
-       psci_do_pwrdown_cache_maintenance(psci_find_max_off_lvl(&state_info));
-
-       /*
-        * Plat. management: Perform platform specific actions to turn this
-        * cpu off e.g. exit cpu coherency, program the power controller etc.
-        */
-       psci_plat_pm_ops->pwr_domain_off(&state_info);
-
-#if ENABLE_PSCI_STAT
-       /*
-        * Capture time-stamp while entering low power state.
-        * No cache maintenance needed because caches are off
-        * and writes are direct to main memory.
-        */
-       PMF_CAPTURE_TIMESTAMP(psci_svc, PSCI_STAT_ID_ENTER_LOW_PWR,
-               PMF_NO_CACHE_MAINT);
-#endif
-
-exit:
-       /*
-        * Release the locks corresponding to each power level in the
-        * reverse order to which they were acquired.
-        */
-       psci_release_pwr_domain_locks(end_pwrlvl,
-                                     idx);
-
-       /*
-        * Check if all actions needed to safely power down this cpu have
-        * successfully completed.
-        */
-       if (rc == PSCI_E_SUCCESS) {
-               /*
-                * Set the affinity info state to OFF. This writes directly to
-                * main memory as caches are disabled, so cache maintenance is
-                * required to ensure that later cached reads of aff_info_state
-                * return AFF_STATE_OFF.  A dsbish() ensures ordering of the
-                * update to the affinity info state prior to cache line
-                * invalidation.
-                */
-               flush_cpu_data(psci_svc_cpu_data.aff_info_state);
-               psci_set_aff_info_state(AFF_STATE_OFF);
-               dsbish();
-               inv_cpu_data(psci_svc_cpu_data.aff_info_state);
-
-               if (psci_plat_pm_ops->pwr_domain_pwr_down_wfi) {
-                       /* This function must not return */
-                       psci_plat_pm_ops->pwr_domain_pwr_down_wfi(&state_info);
-               } else {
-                       /*
-                        * Enter a wfi loop which will allow the power
-                        * controller to physically power down this cpu.
-                        */
-                       psci_power_down_wfi();
-               }
-       }
-
-       return rc;
-}
diff --git a/services/std_svc/psci/psci_on.c b/services/std_svc/psci/psci_on.c
deleted file mode 100644 (file)
index c8c36cd..0000000
+++ /dev/null
@@ -1,212 +0,0 @@
-/*
- * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch.h>
-#include <arch_helpers.h>
-#include <assert.h>
-#include <bl_common.h>
-#include <bl31.h>
-#include <debug.h>
-#include <context_mgmt.h>
-#include <platform.h>
-#include <runtime_svc.h>
-#include <stddef.h>
-#include "psci_private.h"
-
-/*******************************************************************************
- * This function checks whether a cpu which has been requested to be turned on
- * is OFF to begin with.
- ******************************************************************************/
-static int cpu_on_validate_state(aff_info_state_t aff_state)
-{
-       if (aff_state == AFF_STATE_ON)
-               return PSCI_E_ALREADY_ON;
-
-       if (aff_state == AFF_STATE_ON_PENDING)
-               return PSCI_E_ON_PENDING;
-
-       assert(aff_state == AFF_STATE_OFF);
-       return PSCI_E_SUCCESS;
-}
-
-/*******************************************************************************
- * Generic handler which is called to physically power on a cpu identified by
- * its mpidr. It performs the generic, architectural, platform setup and state
- * management to power on the target cpu e.g. it will ensure that
- * enough information is stashed for it to resume execution in the non-secure
- * security state.
- *
- * The state of all the relevant power domains are changed after calling the
- * platform handler as it can return error.
- ******************************************************************************/
-int psci_cpu_on_start(u_register_t target_cpu,
-                     entry_point_info_t *ep)
-{
-       int rc;
-       unsigned int target_idx = plat_core_pos_by_mpidr(target_cpu);
-       aff_info_state_t target_aff_state;
-
-       /* Calling function must supply valid input arguments */
-       assert((int) target_idx >= 0);
-       assert(ep != NULL);
-
-       /*
-        * This function must only be called on platforms where the
-        * CPU_ON platform hooks have been implemented.
-        */
-       assert(psci_plat_pm_ops->pwr_domain_on &&
-                       psci_plat_pm_ops->pwr_domain_on_finish);
-
-       /* Protect against multiple CPUs trying to turn ON the same target CPU */
-       psci_spin_lock_cpu(target_idx);
-
-       /*
-        * Generic management: Ensure that the cpu is off to be
-        * turned on.
-        */
-       rc = cpu_on_validate_state(psci_get_aff_info_state_by_idx(target_idx));
-       if (rc != PSCI_E_SUCCESS)
-               goto exit;
-
-       /*
-        * Call the cpu on handler registered by the Secure Payload Dispatcher
-        * to let it do any bookeeping. If the handler encounters an error, it's
-        * expected to assert within
-        */
-       if (psci_spd_pm && psci_spd_pm->svc_on)
-               psci_spd_pm->svc_on(target_cpu);
-
-       /*
-        * Set the Affinity info state of the target cpu to ON_PENDING.
-        * Flush aff_info_state as it will be accessed with caches
-        * turned OFF.
-        */
-       psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_ON_PENDING);
-       flush_cpu_data_by_index(target_idx, psci_svc_cpu_data.aff_info_state);
-
-       /*
-        * The cache line invalidation by the target CPU after setting the
-        * state to OFF (see psci_do_cpu_off()), could cause the update to
-        * aff_info_state to be invalidated. Retry the update if the target
-        * CPU aff_info_state is not ON_PENDING.
-        */
-       target_aff_state = psci_get_aff_info_state_by_idx(target_idx);
-       if (target_aff_state != AFF_STATE_ON_PENDING) {
-               assert(target_aff_state == AFF_STATE_OFF);
-               psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_ON_PENDING);
-               flush_cpu_data_by_index(target_idx, psci_svc_cpu_data.aff_info_state);
-
-               assert(psci_get_aff_info_state_by_idx(target_idx) == AFF_STATE_ON_PENDING);
-       }
-
-       /*
-        * Perform generic, architecture and platform specific handling.
-        */
-       /*
-        * Plat. management: Give the platform the current state
-        * of the target cpu to allow it to perform the necessary
-        * steps to power on.
-        */
-       rc = psci_plat_pm_ops->pwr_domain_on(target_cpu);
-       assert(rc == PSCI_E_SUCCESS || rc == PSCI_E_INTERN_FAIL);
-
-       if (rc == PSCI_E_SUCCESS)
-               /* Store the re-entry information for the non-secure world. */
-               cm_init_context_by_index(target_idx, ep);
-       else {
-               /* Restore the state on error. */
-               psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_OFF);
-               flush_cpu_data_by_index(target_idx, psci_svc_cpu_data.aff_info_state);
-       }
-
-exit:
-       psci_spin_unlock_cpu(target_idx);
-       return rc;
-}
-
-/*******************************************************************************
- * The following function finish an earlier power on request. They
- * are called by the common finisher routine in psci_common.c. The `state_info`
- * is the psci_power_state from which this CPU has woken up from.
- ******************************************************************************/
-void psci_cpu_on_finish(unsigned int cpu_idx,
-                       psci_power_state_t *state_info)
-{
-       /*
-        * Plat. management: Perform the platform specific actions
-        * for this cpu e.g. enabling the gic or zeroing the mailbox
-        * register. The actual state of this cpu has already been
-        * changed.
-        */
-       psci_plat_pm_ops->pwr_domain_on_finish(state_info);
-
-       /*
-        * Arch. management: Enable data cache and manage stack memory
-        */
-       psci_do_pwrup_cache_maintenance();
-
-       /*
-        * All the platform specific actions for turning this cpu
-        * on have completed. Perform enough arch.initialization
-        * to run in the non-secure address space.
-        */
-       bl31_arch_setup();
-
-       /*
-        * Lock the CPU spin lock to make sure that the context initialization
-        * is done. Since the lock is only used in this function to create
-        * a synchronization point with cpu_on_start(), it can be released
-        * immediately.
-        */
-       psci_spin_lock_cpu(cpu_idx);
-       psci_spin_unlock_cpu(cpu_idx);
-
-       /* Ensure we have been explicitly woken up by another cpu */
-       assert(psci_get_aff_info_state() == AFF_STATE_ON_PENDING);
-
-       /*
-        * Call the cpu on finish handler registered by the Secure Payload
-        * Dispatcher to let it do any bookeeping. If the handler encounters an
-        * error, it's expected to assert within
-        */
-       if (psci_spd_pm && psci_spd_pm->svc_on_finish)
-               psci_spd_pm->svc_on_finish(0);
-
-       /* Populate the mpidr field within the cpu node array */
-       /* This needs to be done only once */
-       psci_cpu_pd_nodes[cpu_idx].mpidr = read_mpidr() & MPIDR_AFFINITY_MASK;
-
-       /*
-        * Generic management: Now we just need to retrieve the
-        * information that we had stashed away during the cpu_on
-        * call to set this cpu on its way.
-        */
-       cm_prepare_el3_exit(NON_SECURE);
-}
diff --git a/services/std_svc/psci/psci_private.h b/services/std_svc/psci/psci_private.h
deleted file mode 100644 (file)
index f42ce55..0000000
+++ /dev/null
@@ -1,254 +0,0 @@
-/*
- * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __PSCI_PRIVATE_H__
-#define __PSCI_PRIVATE_H__
-
-#include <arch.h>
-#include <bakery_lock.h>
-#include <bl_common.h>
-#include <cpu_data.h>
-#include <pmf.h>
-#include <psci.h>
-#include <spinlock.h>
-
-/*
- * The following helper macros abstract the interface to the Bakery
- * Lock API.
- */
-#define psci_lock_init(non_cpu_pd_node, idx)                   \
-       ((non_cpu_pd_node)[(idx)].lock_index = (idx))
-#define psci_lock_get(non_cpu_pd_node)                         \
-       bakery_lock_get(&psci_locks[(non_cpu_pd_node)->lock_index])
-#define psci_lock_release(non_cpu_pd_node)                     \
-       bakery_lock_release(&psci_locks[(non_cpu_pd_node)->lock_index])
-
-/*
- * The PSCI capability which are provided by the generic code but does not
- * depend on the platform or spd capabilities.
- */
-#define PSCI_GENERIC_CAP       \
-                       (define_psci_cap(PSCI_VERSION) |                \
-                       define_psci_cap(PSCI_AFFINITY_INFO_AARCH64) |   \
-                       define_psci_cap(PSCI_FEATURES))
-
-/*
- * The PSCI capabilities mask for 64 bit functions.
- */
-#define PSCI_CAP_64BIT_MASK    \
-                       (define_psci_cap(PSCI_CPU_SUSPEND_AARCH64) |    \
-                       define_psci_cap(PSCI_CPU_ON_AARCH64) |          \
-                       define_psci_cap(PSCI_AFFINITY_INFO_AARCH64) |   \
-                       define_psci_cap(PSCI_MIG_AARCH64) |             \
-                       define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64) | \
-                       define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64) |  \
-                       define_psci_cap(PSCI_STAT_RESIDENCY_AARCH64) |  \
-                       define_psci_cap(PSCI_STAT_COUNT_AARCH64))
-
-/*
- * Helper macros to get/set the fields of PSCI per-cpu data.
- */
-#define psci_set_aff_info_state(aff_state) \
-               set_cpu_data(psci_svc_cpu_data.aff_info_state, aff_state)
-#define psci_get_aff_info_state() \
-               get_cpu_data(psci_svc_cpu_data.aff_info_state)
-#define psci_get_aff_info_state_by_idx(idx) \
-               get_cpu_data_by_index(idx, psci_svc_cpu_data.aff_info_state)
-#define psci_set_aff_info_state_by_idx(idx, aff_state) \
-               set_cpu_data_by_index(idx, psci_svc_cpu_data.aff_info_state,\
-                                       aff_state)
-#define psci_get_suspend_pwrlvl() \
-               get_cpu_data(psci_svc_cpu_data.target_pwrlvl)
-#define psci_set_suspend_pwrlvl(target_lvl) \
-               set_cpu_data(psci_svc_cpu_data.target_pwrlvl, target_lvl)
-#define psci_set_cpu_local_state(state) \
-               set_cpu_data(psci_svc_cpu_data.local_state, state)
-#define psci_get_cpu_local_state() \
-               get_cpu_data(psci_svc_cpu_data.local_state)
-#define psci_get_cpu_local_state_by_idx(idx) \
-               get_cpu_data_by_index(idx, psci_svc_cpu_data.local_state)
-
-/*
- * Helper macros for the CPU level spinlocks
- */
-#define psci_spin_lock_cpu(idx)        spin_lock(&psci_cpu_pd_nodes[idx].cpu_lock)
-#define psci_spin_unlock_cpu(idx) spin_unlock(&psci_cpu_pd_nodes[idx].cpu_lock)
-
-/* Helper macro to identify a CPU standby request in PSCI Suspend call */
-#define is_cpu_standby_req(is_power_down_state, retn_lvl) \
-               (((!(is_power_down_state)) && ((retn_lvl) == 0)) ? 1 : 0)
-
-/* Following are used as ID's to capture time-stamp */
-#define PSCI_STAT_ID_ENTER_LOW_PWR             0
-#define PSCI_STAT_ID_EXIT_LOW_PWR              1
-#define PSCI_STAT_TOTAL_IDS                    2
-
-/* Declare PMF service functions for PSCI */
-PMF_DECLARE_CAPTURE_TIMESTAMP(psci_svc)
-PMF_DECLARE_GET_TIMESTAMP(psci_svc)
-
-/*******************************************************************************
- * The following two data structures implement the power domain tree. The tree
- * is used to track the state of all the nodes i.e. power domain instances
- * described by the platform. The tree consists of nodes that describe CPU power
- * domains i.e. leaf nodes and all other power domains which are parents of a
- * CPU power domain i.e. non-leaf nodes.
- ******************************************************************************/
-typedef struct non_cpu_pwr_domain_node {
-       /*
-        * Index of the first CPU power domain node level 0 which has this node
-        * as its parent.
-        */
-       unsigned int cpu_start_idx;
-
-       /*
-        * Number of CPU power domains which are siblings of the domain indexed
-        * by 'cpu_start_idx' i.e. all the domains in the range 'cpu_start_idx
-        * -> cpu_start_idx + ncpus' have this node as their parent.
-        */
-       unsigned int ncpus;
-
-       /*
-        * Index of the parent power domain node.
-        * TODO: Figure out whether to whether using pointer is more efficient.
-        */
-       unsigned int parent_node;
-
-       plat_local_state_t local_state;
-
-       unsigned char level;
-
-       /* For indexing the psci_lock array*/
-       unsigned char lock_index;
-} non_cpu_pd_node_t;
-
-typedef struct cpu_pwr_domain_node {
-       u_register_t mpidr;
-
-       /*
-        * Index of the parent power domain node.
-        * TODO: Figure out whether to whether using pointer is more efficient.
-        */
-       unsigned int parent_node;
-
-       /*
-        * A CPU power domain does not require state coordination like its
-        * parent power domains. Hence this node does not include a bakery
-        * lock. A spinlock is required by the CPU_ON handler to prevent a race
-        * when multiple CPUs try to turn ON the same target CPU.
-        */
-       spinlock_t cpu_lock;
-} cpu_pd_node_t;
-
-/*******************************************************************************
- * Data prototypes
- ******************************************************************************/
-extern const plat_psci_ops_t *psci_plat_pm_ops;
-extern non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS];
-extern cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT];
-extern unsigned int psci_caps;
-
-/* One bakery lock is required for each non-cpu power domain */
-DECLARE_BAKERY_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]);
-
-/*******************************************************************************
- * SPD's power management hooks registered with PSCI
- ******************************************************************************/
-extern const spd_pm_ops_t *psci_spd_pm;
-
-/*******************************************************************************
- * Function prototypes
- ******************************************************************************/
-/* Private exported functions from psci_common.c */
-int psci_validate_power_state(unsigned int power_state,
-                             psci_power_state_t *state_info);
-void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info);
-int psci_validate_mpidr(u_register_t mpidr);
-void psci_init_req_local_pwr_states(void);
-void psci_power_up_finish(void);
-int psci_validate_entry_point(entry_point_info_t *ep,
-                       uintptr_t entrypoint, u_register_t context_id);
-void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx,
-                                     unsigned int end_lvl,
-                                     unsigned int node_index[]);
-void psci_do_state_coordination(unsigned int end_pwrlvl,
-                               psci_power_state_t *state_info);
-void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl,
-                                  unsigned int cpu_idx);
-void psci_release_pwr_domain_locks(unsigned int end_pwrlvl,
-                                  unsigned int cpu_idx);
-int psci_validate_suspend_req(const psci_power_state_t *state_info,
-                             unsigned int is_power_down_state_req);
-unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info);
-unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info);
-void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl);
-void psci_print_power_domain_map(void);
-unsigned int psci_is_last_on_cpu(void);
-int psci_spd_migrate_info(u_register_t *mpidr);
-
-/* Private exported functions from psci_on.c */
-int psci_cpu_on_start(u_register_t target_cpu,
-                     entry_point_info_t *ep);
-
-void psci_cpu_on_finish(unsigned int cpu_idx,
-                       psci_power_state_t *state_info);
-
-/* Private exported functions from psci_off.c */
-int psci_do_cpu_off(unsigned int end_pwrlvl);
-
-/* Private exported functions from psci_suspend.c */
-void psci_cpu_suspend_start(entry_point_info_t *ep,
-                       unsigned int end_pwrlvl,
-                       psci_power_state_t *state_info,
-                       unsigned int is_power_down_state_req);
-
-void psci_cpu_suspend_finish(unsigned int cpu_idx,
-                       psci_power_state_t *state_info);
-
-/* Private exported functions from psci_helpers.S */
-void psci_do_pwrdown_cache_maintenance(unsigned int pwr_level);
-void psci_do_pwrup_cache_maintenance(void);
-
-/* Private exported functions from psci_system_off.c */
-void __dead2 psci_system_off(void);
-void __dead2 psci_system_reset(void);
-
-/* Private exported functions from psci_stat.c */
-void psci_stats_update_pwr_down(unsigned int end_pwrlvl,
-                       const psci_power_state_t *state_info);
-void psci_stats_update_pwr_up(unsigned int end_pwrlvl,
-                       const psci_power_state_t *state_info,
-                       unsigned int flags);
-u_register_t psci_stat_residency(u_register_t target_cpu,
-                       unsigned int power_state);
-u_register_t psci_stat_count(u_register_t target_cpu,
-                       unsigned int power_state);
-
-#endif /* __PSCI_PRIVATE_H__ */
diff --git a/services/std_svc/psci/psci_setup.c b/services/std_svc/psci/psci_setup.c
deleted file mode 100644 (file)
index 975b257..0000000
+++ /dev/null
@@ -1,261 +0,0 @@
-/*
- * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch.h>
-#include <arch_helpers.h>
-#include <assert.h>
-#include <bl_common.h>
-#include <context.h>
-#include <context_mgmt.h>
-#include <platform.h>
-#include <stddef.h>
-#include "psci_private.h"
-
-/*******************************************************************************
- * Per cpu non-secure contexts used to program the architectural state prior
- * return to the normal world.
- * TODO: Use the memory allocator to set aside memory for the contexts instead
- * of relying on platform defined constants.
- ******************************************************************************/
-static cpu_context_t psci_ns_context[PLATFORM_CORE_COUNT];
-
-/******************************************************************************
- * Define the psci capability variable.
- *****************************************************************************/
-unsigned int psci_caps;
-
-/*******************************************************************************
- * Function which initializes the 'psci_non_cpu_pd_nodes' or the
- * 'psci_cpu_pd_nodes' corresponding to the power level.
- ******************************************************************************/
-static void psci_init_pwr_domain_node(unsigned int node_idx,
-                                       unsigned int parent_idx,
-                                       unsigned int level)
-{
-       if (level > PSCI_CPU_PWR_LVL) {
-               psci_non_cpu_pd_nodes[node_idx].level = level;
-               psci_lock_init(psci_non_cpu_pd_nodes, node_idx);
-               psci_non_cpu_pd_nodes[node_idx].parent_node = parent_idx;
-               psci_non_cpu_pd_nodes[node_idx].local_state =
-                                                        PLAT_MAX_OFF_STATE;
-       } else {
-               psci_cpu_data_t *svc_cpu_data;
-
-               psci_cpu_pd_nodes[node_idx].parent_node = parent_idx;
-
-               /* Initialize with an invalid mpidr */
-               psci_cpu_pd_nodes[node_idx].mpidr = PSCI_INVALID_MPIDR;
-
-               svc_cpu_data =
-                       &(_cpu_data_by_index(node_idx)->psci_svc_cpu_data);
-
-               /* Set the Affinity Info for the cores as OFF */
-               svc_cpu_data->aff_info_state = AFF_STATE_OFF;
-
-               /* Invalidate the suspend level for the cpu */
-               svc_cpu_data->target_pwrlvl = PSCI_INVALID_PWR_LVL;
-
-               /* Set the power state to OFF state */
-               svc_cpu_data->local_state = PLAT_MAX_OFF_STATE;
-
-               flush_dcache_range((uintptr_t)svc_cpu_data,
-                                                sizeof(*svc_cpu_data));
-
-               cm_set_context_by_index(node_idx,
-                                       (void *) &psci_ns_context[node_idx],
-                                       NON_SECURE);
-       }
-}
-
-/*******************************************************************************
- * This functions updates cpu_start_idx and ncpus field for each of the node in
- * psci_non_cpu_pd_nodes[]. It does so by comparing the parent nodes of each of
- * the CPUs and check whether they match with the parent of the previous
- * CPU. The basic assumption for this work is that children of the same parent
- * are allocated adjacent indices. The platform should ensure this though proper
- * mapping of the CPUs to indices via plat_core_pos_by_mpidr() and
- * plat_my_core_pos() APIs.
- *******************************************************************************/
-static void psci_update_pwrlvl_limits(void)
-{
-       int j;
-       unsigned int nodes_idx[PLAT_MAX_PWR_LVL] = {0};
-       unsigned int temp_index[PLAT_MAX_PWR_LVL], cpu_idx;
-
-       for (cpu_idx = 0; cpu_idx < PLATFORM_CORE_COUNT; cpu_idx++) {
-               psci_get_parent_pwr_domain_nodes(cpu_idx,
-                                                PLAT_MAX_PWR_LVL,
-                                                temp_index);
-               for (j = PLAT_MAX_PWR_LVL - 1; j >= 0; j--) {
-                       if (temp_index[j] != nodes_idx[j]) {
-                               nodes_idx[j] = temp_index[j];
-                               psci_non_cpu_pd_nodes[nodes_idx[j]].cpu_start_idx
-                                       = cpu_idx;
-                       }
-                       psci_non_cpu_pd_nodes[nodes_idx[j]].ncpus++;
-               }
-       }
-}
-
-/*******************************************************************************
- * Core routine to populate the power domain tree. The tree descriptor passed by
- * the platform is populated breadth-first and the first entry in the map
- * informs the number of root power domains. The parent nodes of the root nodes
- * will point to an invalid entry(-1).
- ******************************************************************************/
-static void populate_power_domain_tree(const unsigned char *topology)
-{
-       unsigned int i, j = 0, num_nodes_at_lvl = 1, num_nodes_at_next_lvl;
-       unsigned int node_index = 0, parent_node_index = 0, num_children;
-       int level = PLAT_MAX_PWR_LVL;
-
-       /*
-        * For each level the inputs are:
-        * - number of nodes at this level in plat_array i.e. num_nodes_at_level
-        *   This is the sum of values of nodes at the parent level.
-        * - Index of first entry at this level in the plat_array i.e.
-        *   parent_node_index.
-        * - Index of first free entry in psci_non_cpu_pd_nodes[] or
-        *   psci_cpu_pd_nodes[] i.e. node_index depending upon the level.
-        */
-       while (level >= PSCI_CPU_PWR_LVL) {
-               num_nodes_at_next_lvl = 0;
-               /*
-                * For each entry (parent node) at this level in the plat_array:
-                * - Find the number of children
-                * - Allocate a node in a power domain array for each child
-                * - Set the parent of the child to the parent_node_index - 1
-                * - Increment parent_node_index to point to the next parent
-                * - Accumulate the number of children at next level.
-                */
-               for (i = 0; i < num_nodes_at_lvl; i++) {
-                       assert(parent_node_index <=
-                                       PSCI_NUM_NON_CPU_PWR_DOMAINS);
-                       num_children = topology[parent_node_index];
-
-                       for (j = node_index;
-                               j < node_index + num_children; j++)
-                               psci_init_pwr_domain_node(j,
-                                                         parent_node_index - 1,
-                                                         level);
-
-                       node_index = j;
-                       num_nodes_at_next_lvl += num_children;
-                       parent_node_index++;
-               }
-
-               num_nodes_at_lvl = num_nodes_at_next_lvl;
-               level--;
-
-               /* Reset the index for the cpu power domain array */
-               if (level == PSCI_CPU_PWR_LVL)
-                       node_index = 0;
-       }
-
-       /* Validate the sanity of array exported by the platform */
-       assert(j == PLATFORM_CORE_COUNT);
-}
-
-/*******************************************************************************
- * This function initializes the power domain topology tree by querying the
- * platform. The power domain nodes higher than the CPU are populated in the
- * array psci_non_cpu_pd_nodes[] and the CPU power domains are populated in
- * psci_cpu_pd_nodes[]. The platform exports its static topology map through the
- * populate_power_domain_topology_tree() API. The algorithm populates the
- * psci_non_cpu_pd_nodes and psci_cpu_pd_nodes iteratively by using this
- * topology map.  On a platform that implements two clusters of 2 cpus each, and
- * supporting 3 domain levels, the populated psci_non_cpu_pd_nodes would look
- * like this:
- *
- * ---------------------------------------------------
- * | system node | cluster 0 node  | cluster 1 node  |
- * ---------------------------------------------------
- *
- * And populated psci_cpu_pd_nodes would look like this :
- * <-    cpus cluster0   -><-   cpus cluster1   ->
- * ------------------------------------------------
- * |   CPU 0   |   CPU 1   |   CPU 2   |   CPU 3  |
- * ------------------------------------------------
- ******************************************************************************/
-int psci_setup(void)
-{
-       const unsigned char *topology_tree;
-
-       /* Query the topology map from the platform */
-       topology_tree = plat_get_power_domain_tree_desc();
-
-       /* Populate the power domain arrays using the platform topology map */
-       populate_power_domain_tree(topology_tree);
-
-       /* Update the CPU limits for each node in psci_non_cpu_pd_nodes */
-       psci_update_pwrlvl_limits();
-
-       /* Populate the mpidr field of cpu node for this CPU */
-       psci_cpu_pd_nodes[plat_my_core_pos()].mpidr =
-               read_mpidr() & MPIDR_AFFINITY_MASK;
-
-       psci_init_req_local_pwr_states();
-
-       /*
-        * Set the requested and target state of this CPU and all the higher
-        * power domain levels for this CPU to run.
-        */
-       psci_set_pwr_domains_to_run(PLAT_MAX_PWR_LVL);
-
-       plat_setup_psci_ops((uintptr_t)psci_entrypoint,
-                                       &psci_plat_pm_ops);
-       assert(psci_plat_pm_ops);
-
-       /* Initialize the psci capability */
-       psci_caps = PSCI_GENERIC_CAP;
-
-       if (psci_plat_pm_ops->pwr_domain_off)
-               psci_caps |=  define_psci_cap(PSCI_CPU_OFF);
-       if (psci_plat_pm_ops->pwr_domain_on &&
-                       psci_plat_pm_ops->pwr_domain_on_finish)
-               psci_caps |=  define_psci_cap(PSCI_CPU_ON_AARCH64);
-       if (psci_plat_pm_ops->pwr_domain_suspend &&
-                       psci_plat_pm_ops->pwr_domain_suspend_finish) {
-               psci_caps |=  define_psci_cap(PSCI_CPU_SUSPEND_AARCH64);
-               if (psci_plat_pm_ops->get_sys_suspend_power_state)
-                       psci_caps |=  define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64);
-       }
-       if (psci_plat_pm_ops->system_off)
-               psci_caps |=  define_psci_cap(PSCI_SYSTEM_OFF);
-       if (psci_plat_pm_ops->system_reset)
-               psci_caps |=  define_psci_cap(PSCI_SYSTEM_RESET);
-
-#if ENABLE_PSCI_STAT
-       psci_caps |=  define_psci_cap(PSCI_STAT_RESIDENCY_AARCH64);
-       psci_caps |=  define_psci_cap(PSCI_STAT_COUNT_AARCH64);
-#endif
-
-       return 0;
-}
diff --git a/services/std_svc/psci/psci_stat.c b/services/std_svc/psci/psci_stat.c
deleted file mode 100644 (file)
index 155bbb0..0000000
+++ /dev/null
@@ -1,309 +0,0 @@
-/*
- * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <assert.h>
-#include <debug.h>
-#include <platform.h>
-#include <platform_def.h>
-#include "psci_private.h"
-
-#ifndef PLAT_MAX_PWR_LVL_STATES
-#define PLAT_MAX_PWR_LVL_STATES 2
-#endif
-
-/* Ticks elapsed in one second by a signal of 1 MHz */
-#define MHZ_TICKS_PER_SEC 1000000
-
-/* Following structure is used for PSCI STAT */
-typedef struct psci_stat {
-       u_register_t residency;
-       u_register_t count;
-} psci_stat_t;
-
-/*
- * Following is used to keep track of the last cpu
- * that goes to power down in non cpu power domains.
- */
-static int last_cpu_in_non_cpu_pd[PSCI_NUM_NON_CPU_PWR_DOMAINS] = {-1};
-
-/*
- * Following are used to store PSCI STAT values for
- * CPU and non CPU power domains.
- */
-static psci_stat_t psci_cpu_stat[PLATFORM_CORE_COUNT]
-                               [PLAT_MAX_PWR_LVL_STATES];
-static psci_stat_t psci_non_cpu_stat[PSCI_NUM_NON_CPU_PWR_DOMAINS]
-                               [PLAT_MAX_PWR_LVL_STATES];
-
-/* Register PMF PSCI service */
-PMF_REGISTER_SERVICE(psci_svc, PMF_PSCI_STAT_SVC_ID,
-        PSCI_STAT_TOTAL_IDS, PMF_STORE_ENABLE)
-
-/* The divisor to use to convert raw timestamp into microseconds */
-u_register_t residency_div;
-
-/*
- * This macro calculates the stats residency in microseconds,
- * taking in account the wrap around condition.
- */
-#define calc_stat_residency(_pwrupts, _pwrdnts, _res)          \
-       do {                                                    \
-               if (_pwrupts < _pwrdnts)                        \
-                       _res = UINT64_MAX - _pwrdnts + _pwrupts;\
-               else                                            \
-                       _res = _pwrupts - _pwrdnts;             \
-               /* Convert timestamp into microseconds */       \
-               _res = _res/residency_div;                      \
-       } while (0)
-
-/*
- * This functions returns the index into the `psci_stat_t` array given the
- * local power state and power domain level. If the platform implements the
- * `get_pwr_lvl_state_idx` pm hook, then that will be used to return the index.
- */
-static int get_stat_idx(plat_local_state_t local_state, int pwr_lvl)
-{
-       int idx;
-
-       if (psci_plat_pm_ops->get_pwr_lvl_state_idx == NULL) {
-               assert(PLAT_MAX_PWR_LVL_STATES == 2);
-               if (is_local_state_retn(local_state))
-                       return 0;
-
-               assert(is_local_state_off(local_state));
-               return 1;
-       }
-
-       idx = psci_plat_pm_ops->get_pwr_lvl_state_idx(local_state, pwr_lvl);
-       assert((idx >= 0) && (idx < PLAT_MAX_PWR_LVL_STATES));
-       return idx;
-}
-
-/*******************************************************************************
- * This function is passed the target local power states for each power
- * domain (state_info) between the current CPU domain and its ancestors until
- * the target power level (end_pwrlvl).
- *
- * Then, for each level (apart from the CPU level) until the 'end_pwrlvl', it
- * updates the `last_cpu_in_non_cpu_pd[]` with last power down cpu id.
- *
- * This function will only be invoked with data cache enabled and while
- * powering down a core.
- ******************************************************************************/
-void psci_stats_update_pwr_down(unsigned int end_pwrlvl,
-                       const psci_power_state_t *state_info)
-{
-       int lvl, parent_idx, cpu_idx = plat_my_core_pos();
-
-       assert(end_pwrlvl <= PLAT_MAX_PWR_LVL);
-       assert(state_info);
-
-       parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
-
-       for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
-
-               /* Break early if the target power state is RUN */
-               if (is_local_state_run(state_info->pwr_domain_state[lvl]))
-                       break;
-
-               /*
-                * The power domain is entering a low power state, so this is
-                * the last CPU for this power domain
-                */
-               last_cpu_in_non_cpu_pd[parent_idx] = cpu_idx;
-
-               parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
-       }
-
-}
-
-/*******************************************************************************
- * This function updates the PSCI STATS(residency time and count) for CPU
- * and NON-CPU power domains.
- * It is called with caches enabled and locks acquired(for NON-CPU domain)
- ******************************************************************************/
-void psci_stats_update_pwr_up(unsigned int end_pwrlvl,
-                       const psci_power_state_t *state_info,
-                       unsigned int flags)
-{
-       int parent_idx, cpu_idx = plat_my_core_pos();
-       int lvl, stat_idx;
-       plat_local_state_t local_state;
-       unsigned long long pwrup_ts = 0, pwrdn_ts = 0;
-       u_register_t residency;
-
-       assert(end_pwrlvl <= PLAT_MAX_PWR_LVL);
-       assert(state_info);
-
-       /* Initialize the residency divisor if not already initialized */
-       if (!residency_div) {
-               /* Pre-calculate divisor so that it can be directly used to
-                  convert time-stamp into microseconds */
-               residency_div = read_cntfrq_el0() / MHZ_TICKS_PER_SEC;
-               assert(residency_div);
-       }
-
-       /* Get power down time-stamp for current CPU */
-       PMF_GET_TIMESTAMP_BY_INDEX(psci_svc, PSCI_STAT_ID_ENTER_LOW_PWR,
-                       cpu_idx, flags, pwrdn_ts);
-
-       /* In the case of 1st power on just return */
-       if (!pwrdn_ts)
-               return;
-
-       /* Get power up time-stamp for current CPU */
-       PMF_GET_TIMESTAMP_BY_INDEX(psci_svc, PSCI_STAT_ID_EXIT_LOW_PWR,
-                       cpu_idx, flags, pwrup_ts);
-
-       /* Get the index into the stats array */
-       local_state = state_info->pwr_domain_state[PSCI_CPU_PWR_LVL];
-       stat_idx = get_stat_idx(local_state, PSCI_CPU_PWR_LVL);
-
-       /* Calculate stats residency */
-       calc_stat_residency(pwrup_ts, pwrdn_ts, residency);
-
-       /* Update CPU stats. */
-       psci_cpu_stat[cpu_idx][stat_idx].residency += residency;
-       psci_cpu_stat[cpu_idx][stat_idx].count++;
-
-       /*
-        * Check what power domains above CPU were off
-        * prior to this CPU powering on.
-        */
-       parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
-       for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
-               local_state = state_info->pwr_domain_state[lvl];
-               if (is_local_state_run(local_state)) {
-                       /* Break early */
-                       break;
-               }
-
-               assert(last_cpu_in_non_cpu_pd[parent_idx] != -1);
-
-               /* Get power down time-stamp for last CPU */
-               PMF_GET_TIMESTAMP_BY_INDEX(psci_svc, PSCI_STAT_ID_ENTER_LOW_PWR,
-                               last_cpu_in_non_cpu_pd[parent_idx],
-                               flags, pwrdn_ts);
-
-               /* Initialize back to reset value */
-               last_cpu_in_non_cpu_pd[parent_idx] = -1;
-
-               /* Get the index into the stats array */
-               stat_idx = get_stat_idx(local_state, lvl);
-
-               /* Calculate stats residency */
-               calc_stat_residency(pwrup_ts, pwrdn_ts, residency);
-
-               /* Update non cpu stats */
-               psci_non_cpu_stat[parent_idx][stat_idx].residency += residency;
-               psci_non_cpu_stat[parent_idx][stat_idx].count++;
-
-               parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
-       }
-
-}
-
-/*******************************************************************************
- * This function returns the appropriate count and residency time of the
- * local state for the highest power level expressed in the `power_state`
- * for the node represented by `target_cpu`.
- ******************************************************************************/
-int psci_get_stat(u_register_t target_cpu, unsigned int power_state,
-                        psci_stat_t *psci_stat)
-{
-       int rc, pwrlvl, lvl, parent_idx, stat_idx, target_idx;
-       psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} };
-       plat_local_state_t local_state;
-
-       /* Validate the target_cpu parameter and determine the cpu index */
-       target_idx = plat_core_pos_by_mpidr(target_cpu);
-       if (target_idx == -1)
-               return PSCI_E_INVALID_PARAMS;
-
-       /* Validate the power_state parameter */
-       if (!psci_plat_pm_ops->translate_power_state_by_mpidr)
-               rc = psci_validate_power_state(power_state, &state_info);
-       else
-               rc = psci_plat_pm_ops->translate_power_state_by_mpidr(
-                               target_cpu, power_state, &state_info);
-
-       if (rc != PSCI_E_SUCCESS)
-               return PSCI_E_INVALID_PARAMS;
-
-       /* Find the highest power level */
-       pwrlvl = psci_find_target_suspend_lvl(&state_info);
-       if (pwrlvl == PSCI_INVALID_PWR_LVL)
-               return PSCI_E_INVALID_PARAMS;
-
-       /* Get the index into the stats array */
-       local_state = state_info.pwr_domain_state[pwrlvl];
-       stat_idx = get_stat_idx(local_state, pwrlvl);
-
-       if (pwrlvl > PSCI_CPU_PWR_LVL) {
-               /* Get the power domain index */
-               parent_idx = psci_cpu_pd_nodes[target_idx].parent_node;
-               for (lvl = PSCI_CPU_PWR_LVL + 1; lvl < pwrlvl; lvl++)
-                       parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
-
-               /* Get the non cpu power domain stats */
-               *psci_stat = psci_non_cpu_stat[parent_idx][stat_idx];
-       } else {
-               /* Get the cpu power domain stats */
-               *psci_stat = psci_cpu_stat[target_idx][stat_idx];
-       }
-
-       return PSCI_E_SUCCESS;
-}
-
-/* This is the top level function for PSCI_STAT_RESIDENCY SMC. */
-u_register_t psci_stat_residency(u_register_t target_cpu,
-               unsigned int power_state)
-{
-       psci_stat_t psci_stat;
-
-       int rc = psci_get_stat(target_cpu, power_state, &psci_stat);
-       if (rc == PSCI_E_SUCCESS)
-               return psci_stat.residency;
-       else
-               return 0;
-}
-
-/* This is the top level function for PSCI_STAT_COUNT SMC. */
-u_register_t psci_stat_count(u_register_t target_cpu,
-       unsigned int power_state)
-{
-       psci_stat_t psci_stat;
-
-       int rc = psci_get_stat(target_cpu, power_state, &psci_stat);
-       if (rc == PSCI_E_SUCCESS)
-               return psci_stat.count;
-       else
-               return 0;
-}
diff --git a/services/std_svc/psci/psci_suspend.c b/services/std_svc/psci/psci_suspend.c
deleted file mode 100644 (file)
index e6c8cd9..0000000
+++ /dev/null
@@ -1,284 +0,0 @@
-/*
- * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <assert.h>
-#include <bl_common.h>
-#include <arch.h>
-#include <arch_helpers.h>
-#include <context.h>
-#include <context_mgmt.h>
-#include <cpu_data.h>
-#include <debug.h>
-#include <platform.h>
-#include <runtime_svc.h>
-#include <stddef.h>
-#include "psci_private.h"
-
-/*******************************************************************************
- * This function does generic and platform specific operations after a wake-up
- * from standby/retention states at multiple power levels.
- ******************************************************************************/
-static void psci_suspend_to_standby_finisher(unsigned int cpu_idx,
-                                            psci_power_state_t *state_info,
-                                            unsigned int end_pwrlvl)
-{
-       psci_acquire_pwr_domain_locks(end_pwrlvl,
-                               cpu_idx);
-
-       /*
-        * Plat. management: Allow the platform to do operations
-        * on waking up from retention.
-        */
-       psci_plat_pm_ops->pwr_domain_suspend_finish(state_info);
-
-       /*
-        * Set the requested and target state of this CPU and all the higher
-        * power domain levels for this CPU to run.
-        */
-       psci_set_pwr_domains_to_run(end_pwrlvl);
-
-       psci_release_pwr_domain_locks(end_pwrlvl,
-                               cpu_idx);
-}
-
-/*******************************************************************************
- * This function does generic and platform specific suspend to power down
- * operations.
- ******************************************************************************/
-static void psci_suspend_to_pwrdown_start(unsigned int end_pwrlvl,
-                                         entry_point_info_t *ep,
-                                         psci_power_state_t *state_info)
-{
-       unsigned int max_off_lvl = psci_find_max_off_lvl(state_info);
-
-       /* Save PSCI target power level for the suspend finisher handler */
-       psci_set_suspend_pwrlvl(end_pwrlvl);
-
-       /*
-        * Flush the target power level as it will be accessed on power up with
-        * Data cache disabled.
-        */
-       flush_cpu_data(psci_svc_cpu_data.target_pwrlvl);
-
-       /*
-        * Call the cpu suspend handler registered by the Secure Payload
-        * Dispatcher to let it do any book-keeping. If the handler encounters an
-        * error, it's expected to assert within
-        */
-       if (psci_spd_pm && psci_spd_pm->svc_suspend)
-               psci_spd_pm->svc_suspend(max_off_lvl);
-
-       /*
-        * Store the re-entry information for the non-secure world.
-        */
-       cm_init_my_context(ep);
-
-       /*
-        * Arch. management. Perform the necessary steps to flush all
-        * cpu caches. Currently we assume that the power level correspond
-        * the cache level.
-        * TODO : Introduce a mechanism to query the cache level to flush
-        * and the cpu-ops power down to perform from the platform.
-        */
-       psci_do_pwrdown_cache_maintenance(max_off_lvl);
-}
-
-/*******************************************************************************
- * Top level handler which is called when a cpu wants to suspend its execution.
- * It is assumed that along with suspending the cpu power domain, power domains
- * at higher levels until the target power level will be suspended as well. It
- * coordinates with the platform to negotiate the target state for each of
- * the power domain level till the target power domain level. It then performs
- * generic, architectural, platform setup and state management required to
- * suspend that power domain level and power domain levels below it.
- * e.g. For a cpu that's to be suspended, it could mean programming the
- * power controller whereas for a cluster that's to be suspended, it will call
- * the platform specific code which will disable coherency at the interconnect
- * level if the cpu is the last in the cluster and also the program the power
- * controller.
- *
- * All the required parameter checks are performed at the beginning and after
- * the state transition has been done, no further error is expected and it is
- * not possible to undo any of the actions taken beyond that point.
- ******************************************************************************/
-void psci_cpu_suspend_start(entry_point_info_t *ep,
-                           unsigned int end_pwrlvl,
-                           psci_power_state_t *state_info,
-                           unsigned int is_power_down_state)
-{
-       int skip_wfi = 0;
-       unsigned int idx = plat_my_core_pos();
-
-       /*
-        * This function must only be called on platforms where the
-        * CPU_SUSPEND platform hooks have been implemented.
-        */
-       assert(psci_plat_pm_ops->pwr_domain_suspend &&
-                       psci_plat_pm_ops->pwr_domain_suspend_finish);
-
-       /*
-        * This function acquires the lock corresponding to each power
-        * level so that by the time all locks are taken, the system topology
-        * is snapshot and state management can be done safely.
-        */
-       psci_acquire_pwr_domain_locks(end_pwrlvl,
-                                     idx);
-
-       /*
-        * We check if there are any pending interrupts after the delay
-        * introduced by lock contention to increase the chances of early
-        * detection that a wake-up interrupt has fired.
-        */
-       if (read_isr_el1()) {
-               skip_wfi = 1;
-               goto exit;
-       }
-
-       /*
-        * This function is passed the requested state info and
-        * it returns the negotiated state info for each power level upto
-        * the end level specified.
-        */
-       psci_do_state_coordination(end_pwrlvl, state_info);
-
-#if ENABLE_PSCI_STAT
-       /* Update the last cpu for each level till end_pwrlvl */
-       psci_stats_update_pwr_down(end_pwrlvl, state_info);
-#endif
-
-       if (is_power_down_state)
-               psci_suspend_to_pwrdown_start(end_pwrlvl, ep, state_info);
-
-       /*
-        * Plat. management: Allow the platform to perform the
-        * necessary actions to turn off this cpu e.g. set the
-        * platform defined mailbox with the psci entrypoint,
-        * program the power controller etc.
-        */
-       psci_plat_pm_ops->pwr_domain_suspend(state_info);
-
-#if ENABLE_PSCI_STAT
-       /*
-        * Capture time-stamp while entering low power state.
-        * No cache maintenance needed because caches are off
-        * and writes are direct to main memory.
-        */
-       PMF_CAPTURE_TIMESTAMP(psci_svc, PSCI_STAT_ID_ENTER_LOW_PWR,
-               PMF_NO_CACHE_MAINT);
-#endif
-
-exit:
-       /*
-        * Release the locks corresponding to each power level in the
-        * reverse order to which they were acquired.
-        */
-       psci_release_pwr_domain_locks(end_pwrlvl,
-                                 idx);
-       if (skip_wfi)
-               return;
-
-       if (is_power_down_state) {
-               /* The function calls below must not return */
-               if (psci_plat_pm_ops->pwr_domain_pwr_down_wfi)
-                       psci_plat_pm_ops->pwr_domain_pwr_down_wfi(state_info);
-               else
-                       psci_power_down_wfi();
-       }
-
-       /*
-        * We will reach here if only retention/standby states have been
-        * requested at multiple power levels. This means that the cpu
-        * context will be preserved.
-        */
-       wfi();
-
-       /*
-        * After we wake up from context retaining suspend, call the
-        * context retaining suspend finisher.
-        */
-       psci_suspend_to_standby_finisher(idx, state_info, end_pwrlvl);
-}
-
-/*******************************************************************************
- * The following functions finish an earlier suspend request. They
- * are called by the common finisher routine in psci_common.c. The `state_info`
- * is the psci_power_state from which this CPU has woken up from.
- ******************************************************************************/
-void psci_cpu_suspend_finish(unsigned int cpu_idx,
-                            psci_power_state_t *state_info)
-{
-       unsigned int counter_freq;
-       unsigned int max_off_lvl;
-
-       /* Ensure we have been woken up from a suspended state */
-       assert(psci_get_aff_info_state() == AFF_STATE_ON && is_local_state_off(\
-                       state_info->pwr_domain_state[PSCI_CPU_PWR_LVL]));
-
-       /*
-        * Plat. management: Perform the platform specific actions
-        * before we change the state of the cpu e.g. enabling the
-        * gic or zeroing the mailbox register. If anything goes
-        * wrong then assert as there is no way to recover from this
-        * situation.
-        */
-       psci_plat_pm_ops->pwr_domain_suspend_finish(state_info);
-
-       /*
-        * Arch. management: Enable the data cache, manage stack memory and
-        * restore the stashed EL3 architectural context from the 'cpu_context'
-        * structure for this cpu.
-        */
-       psci_do_pwrup_cache_maintenance();
-
-       /* Re-init the cntfrq_el0 register */
-       counter_freq = plat_get_syscnt_freq2();
-       write_cntfrq_el0(counter_freq);
-
-       /*
-        * Call the cpu suspend finish handler registered by the Secure Payload
-        * Dispatcher to let it do any bookeeping. If the handler encounters an
-        * error, it's expected to assert within
-        */
-       if (psci_spd_pm && psci_spd_pm->svc_suspend) {
-               max_off_lvl = psci_find_max_off_lvl(state_info);
-               assert (max_off_lvl != PSCI_INVALID_PWR_LVL);
-               psci_spd_pm->svc_suspend_finish(max_off_lvl);
-       }
-
-       /* Invalidate the suspend level for the cpu */
-       psci_set_suspend_pwrlvl(PSCI_INVALID_PWR_LVL);
-
-       /*
-        * Generic management: Now we just need to retrieve the
-        * information that we had stashed away during the suspend
-        * call to set this cpu on its way.
-        */
-       cm_prepare_el3_exit(NON_SECURE);
-}
diff --git a/services/std_svc/psci/psci_system_off.c b/services/std_svc/psci/psci_system_off.c
deleted file mode 100644 (file)
index 28315d6..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright (c) 2014-2015, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stddef.h>
-#include <arch_helpers.h>
-#include <assert.h>
-#include <debug.h>
-#include <platform.h>
-#include "psci_private.h"
-
-void psci_system_off(void)
-{
-       psci_print_power_domain_map();
-
-       assert(psci_plat_pm_ops->system_off);
-
-       /* Notify the Secure Payload Dispatcher */
-       if (psci_spd_pm && psci_spd_pm->svc_system_off) {
-               psci_spd_pm->svc_system_off();
-       }
-
-       /* Call the platform specific hook */
-       psci_plat_pm_ops->system_off();
-
-       /* This function does not return. We should never get here */
-}
-
-void psci_system_reset(void)
-{
-       psci_print_power_domain_map();
-
-       assert(psci_plat_pm_ops->system_reset);
-
-       /* Notify the Secure Payload Dispatcher */
-       if (psci_spd_pm && psci_spd_pm->svc_system_reset) {
-               psci_spd_pm->svc_system_reset();
-       }
-
-       /* Call the platform specific hook */
-       psci_plat_pm_ops->system_reset();
-
-       /* This function does not return. We should never get here */
-}