Implement ARM Standard Service
authorJeenu Viswambharan <jeenu.viswambharan@arm.com>
Fri, 28 Feb 2014 10:08:33 +0000 (10:08 +0000)
committerDan Handley <dan.handley@arm.com>
Thu, 20 Mar 2014 11:16:23 +0000 (11:16 +0000)
This patch implements ARM Standard Service as a runtime service and adds
support for call count, UID and revision information SMCs. The existing
PSCI implementation is subsumed by the Standard Service calls and all
PSCI calls are therefore dispatched by the Standard Service to the PSCI
handler.

At present, PSCI is the only specification under Standard Service. Thus
call count returns the number of PSCI calls implemented. As this is the
initial implementation, a revision number of 0.1 is returned for call
revision.

Fixes ARM-software/tf-issues#62

Change-Id: I6d4273f72ad6502636efa0f872e288b191a64bc1

23 files changed:
Makefile
bl31/bl31.mk
docs/rt-svc-writers-guide.md
include/psci.h
include/runtime_svc.h
include/std_svc.h [new file with mode: 0644]
services/psci/psci_afflvl_off.c [deleted file]
services/psci/psci_afflvl_on.c [deleted file]
services/psci/psci_afflvl_suspend.c [deleted file]
services/psci/psci_common.c [deleted file]
services/psci/psci_entry.S [deleted file]
services/psci/psci_main.c [deleted file]
services/psci/psci_private.h [deleted file]
services/psci/psci_setup.c [deleted file]
services/std_svc/psci/psci_afflvl_off.c [new file with mode: 0644]
services/std_svc/psci/psci_afflvl_on.c [new file with mode: 0644]
services/std_svc/psci/psci_afflvl_suspend.c [new file with mode: 0644]
services/std_svc/psci/psci_common.c [new file with mode: 0644]
services/std_svc/psci/psci_entry.S [new file with mode: 0644]
services/std_svc/psci/psci_main.c [new file with mode: 0644]
services/std_svc/psci/psci_private.h [new file with mode: 0644]
services/std_svc/psci/psci_setup.c [new file with mode: 0644]
services/std_svc/std_svc_setup.c [new file with mode: 0644]

index 5672f005fb1f19f4c2c4277b02444d2429ce95e3..18b7c8ae08cf8c6816261c751e08b04e955b4d97 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -136,7 +136,7 @@ INCLUDES            +=      -Ilib/include/                  \
                                -Iinclude/${ARCH}/              \
                                -Iinclude/                      \
                                -Iarch/system/gic               \
-                               -Iservices/psci                 \
+                               -Iservices/std_svc/psci         \
                                -Iinclude/stdlib                \
                                -Iinclude/stdlib/sys            \
                                -Iplat/${PLAT}                  \
index 5bc58c59785a5de815f090c3791d89df0cef7523..8f6998c6233510f7aa3b9c238eec82ac6b18b869 100644 (file)
@@ -33,13 +33,15 @@ vpath                       %.c     common                                  \
                                arch/system/gic                         \
                                plat/${PLAT}                            \
                                arch/${ARCH}                            \
-                               services/psci                           \
+                               services/std_svc                        \
+                               services/std_svc/psci                   \
                                lib/sync/locks/bakery                   \
                                plat/${PLAT}/${ARCH}                    \
                                ${PLAT_BL31_C_VPATH}
 
 vpath                  %.S     lib/arch/${ARCH}                        \
-                               services/psci                           \
+                               services/std_svc                        \
+                               services/std_svc/psci                   \
                                include                                 \
                                plat/${PLAT}/${ARCH}                    \
                                lib/sync/locks/exclusive                \
@@ -51,6 +53,7 @@ BL31_SOURCES          +=      bl31_arch_setup.c                       \
                                bl31_entrypoint.S                       \
                                runtime_exceptions.S                    \
                                bl31_main.c                             \
+                               std_svc_setup.c                         \
                                psci_entry.S                            \
                                psci_setup.c                            \
                                psci_common.c                           \
index 07394b056571c89aef35416529c1ea47ea49d3ea..2d13f745e9fcc0b99844e9693d326cb42e7d929d 100644 (file)
@@ -93,9 +93,10 @@ handler will be responsible for all SMC Functions within a given service type.
 3.  Getting started
 -------------------
 
-ARM Trusted Firmware has a [`services`] directory in the source tree under
-which each owning entity can place the implementation of its runtime service.
-The [PSCI] implementation is located here in the [`services/psci`] directory.
+ARM Trusted Firmware has a [`services`] directory in the source tree under which
+each owning entity can place the implementation of its runtime service.  The
+[PSCI] implementation is located here in the [`services/std_svc/psci`]
+directory.
 
 Runtime service sources will need to include the [`runtime_svc.h`] header file.
 
@@ -141,16 +142,16 @@ to ensure that the following conditions are met:
 3.  The `_type` is one of `SMC_TYPE_FAST` or `SMC_TYPE_STD`
 4.  `_setup` and `_smch` routines have been specified
 
-[`psci_steup.c`] provides an example of registering a runtime service:
+[`std_svc_setup.c`] provides an example of registering a runtime service:
 
-    /* Register PSCI as a run time service */
+    /* Register Standard Service Calls as runtime service */
     DECLARE_RT_SVC(
-            psci,
+            std_svc,
             OEN_STD_START,
             OEN_STD_END,
             SMC_TYPE_FAST,
-            psci_setup,
-            psci_smc_handler
+            std_svc_setup,
+            std_svc_smc_handler
     );
 
 
@@ -300,9 +301,9 @@ _Copyright (c) 2014, ARM Limited and Contributors. All rights reserved._
 
 [Firmware Design]:  ./firmware-design.md
 
-[`services`]:       ../services
-[`services/psci`]:  ../services/psci
-[`psci_steup.c`]:   ../services/psci/psci_setup.c
-[`runtime_svc.h`]:  ../include/runtime_svc.h
-[PSCI]:             http://infocenter.arm.com/help/topic/com.arm.doc.den0022b/index.html "Power State Coordination Interface PDD (ARM DEN 0022B.b)"
-[SMCCC]:            http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html "SMC Calling Convention PDD (ARM DEN 0028A)"
+[`services`]:               ../services
+[`services/std_svc/psci`]:  ../services/std_svc/psci
+[`std_svc_setup.c`]:        ../services/std_svc/std_svc_setup.c
+[`runtime_svc.h`]:          ../include/runtime_svc.h
+[PSCI]:                     http://infocenter.arm.com/help/topic/com.arm.doc.den0022b/index.html "Power State Coordination Interface PDD (ARM DEN 0022B.b)"
+[SMCCC]:                    http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html "SMC Calling Convention PDD (ARM DEN 0028A)"
index 922f95f39c845b1224cfb058096e106b6d497f0f..6fe1d8af375ef39349123244dd83add10e1b1067 100644 (file)
 #define PSCI_MIG_INFO_TYPE             0x84000006
 #define PSCI_MIG_INFO_UP_CPU_AARCH32   0x84000007
 #define PSCI_MIG_INFO_UP_CPU_AARCH64   0xc4000007
-#define PSCI_SYSTEM_OFF                0x84000008
+#define PSCI_SYSTEM_OFF                        0x84000008
 #define PSCI_SYSTEM_RESET              0x84000009
 
+/*
+ * Number of PSCI calls (above) implemented. System off and reset aren't
+ * implemented as yet
+ */
+#define PSCI_NUM_CALLS                 13
+
 /*******************************************************************************
  * PSCI Migrate and friends
  ******************************************************************************/
index ad202a9437bea27e79fd33681a7b6f935a30a650..148c0bc80548762195d1c156daf95d565feeae5e 100644 (file)
@@ -232,6 +232,28 @@ CASSERT(RT_SVC_DESC_HANDLE == __builtin_offsetof(rt_svc_desc, handle), \
                                        ((call_type & FUNCID_TYPE_MASK) \
                                         << FUNCID_OEN_WIDTH))
 
+
+/*
+ * Macro to define UUID for services. Apart from defining and initializing a
+ * uuid_t structure, this macro verifies that the first word of the defined UUID
+ * does not equal SMC_UNK. This is to ensure that the caller won't mistake the
+ * returned UUID in x0 for an invalid SMC error return
+ */
+#define DEFINE_SVC_UUID(_name, _tl, _tm, _th, _cl, _ch, \
+               _n0, _n1, _n2, _n3, _n4, _n5) \
+       CASSERT(_tl != SMC_UNK, invalid_svc_uuid);\
+       static const uuid_t _name = { \
+               _tl, _tm, _th, _cl, _ch, \
+               { _n0, _n1, _n2, _n3, _n4, _n5 } \
+       }
+
+/* Return a UUID in the SMC return registers */
+#define SMC_UUID_RET(_h, _uuid) \
+       SMC_RET4(handle, ((const uint32_t *) &(_uuid))[0], \
+                        ((const uint32_t *) &(_uuid))[1], \
+                        ((const uint32_t *) &(_uuid))[2], \
+                        ((const uint32_t *) &(_uuid))[3])
+
 /*******************************************************************************
  * Function & variable prototypes
  ******************************************************************************/
diff --git a/include/std_svc.h b/include/std_svc.h
new file mode 100644 (file)
index 0000000..cbd5b62
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __STD_SVC_H__
+#define __STD_SVC_H__
+
+/* SMC function IDs for Standard Service queries */
+
+#define ARM_STD_SVC_CALL_COUNT         0x8400ff00
+#define ARM_STD_SVC_UID                        0x8400ff01
+/*                                     0x8400ff02 is reserved */
+#define ARM_STD_SVC_VERSION            0x8400ff03
+
+/* ARM Standard Service Calls version numbers */
+#define STD_SVC_VERSION_MAJOR          0x0
+#define STD_SVC_VERSION_MINOR          0x1
+
+/* The macros below are used to identify PSCI calls from the SMC function ID */
+#define PSCI_FID_MASK                  0xffe0u
+#define PSCI_FID_VALUE                 0u
+#define is_psci_fid(_fid) \
+       (((_fid) & PSCI_FID_MASK) == PSCI_FID_VALUE)
+
+#endif /* __STD_SVC_H__ */
diff --git a/services/psci/psci_afflvl_off.c b/services/psci/psci_afflvl_off.c
deleted file mode 100644 (file)
index 3763f6f..0000000
+++ /dev/null
@@ -1,287 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdio.h>
-#include <string.h>
-#include <assert.h>
-#include <debug.h>
-#include <arch_helpers.h>
-#include <console.h>
-#include <platform.h>
-#include <psci.h>
-#include <psci_private.h>
-
-typedef int (*afflvl_off_handler)(unsigned long, aff_map_node *);
-
-/*******************************************************************************
- * The next three functions implement a handler for each supported affinity
- * level which is called when that affinity level is turned off.
- ******************************************************************************/
-static int psci_afflvl0_off(unsigned long mpidr, aff_map_node *cpu_node)
-{
-       unsigned int index, plat_state;
-       int rc = PSCI_E_SUCCESS;
-       unsigned long sctlr = read_sctlr();
-
-       assert(cpu_node->level == MPIDR_AFFLVL0);
-
-       /* State management: mark this cpu as turned off */
-       psci_set_state(cpu_node, PSCI_STATE_OFF);
-
-       /*
-        * Generic management: Get the index for clearing any lingering re-entry
-        * information and allow the secure world to switch itself off
-        */
-
-       /*
-        * Call the cpu off handler registered by the Secure Payload Dispatcher
-        * to let it do any bookeeping. Assume that the SPD always reports an
-        * E_DENIED error if SP refuse to power down
-        */
-       if (psci_spd_pm && psci_spd_pm->svc_off) {
-               rc = psci_spd_pm->svc_off(0);
-               if (rc)
-                       return rc;
-       }
-
-       index = cpu_node->data;
-       memset(&psci_ns_entry_info[index], 0, sizeof(psci_ns_entry_info[index]));
-
-       /*
-        * Arch. management. Perform the necessary steps to flush all
-        * cpu caches.
-        *
-        * TODO: This power down sequence varies across cpus so it needs to be
-        * abstracted out on the basis of the MIDR like in cpu_reset_handler().
-        * Do the bare minimal for the time being. Fix this before porting to
-        * Cortex models.
-        */
-       sctlr &= ~SCTLR_C_BIT;
-       write_sctlr(sctlr);
-
-       /*
-        * CAUTION: This flush to the level of unification makes an assumption
-        * about the cache hierarchy at affinity level 0 (cpu) in the platform.
-        * Ideally the platform should tell psci which levels to flush to exit
-        * coherency.
-        */
-       dcsw_op_louis(DCCISW);
-
-       /*
-        * Plat. management: Perform platform specific actions to turn this
-        * cpu off e.g. exit cpu coherency, program the power controller etc.
-        */
-       if (psci_plat_pm_ops->affinst_off) {
-
-               /* Get the current physical state of this cpu */
-               plat_state = psci_get_phys_state(cpu_node);
-               rc = psci_plat_pm_ops->affinst_off(mpidr,
-                                                  cpu_node->level,
-                                                  plat_state);
-       }
-
-       return rc;
-}
-
-static int psci_afflvl1_off(unsigned long mpidr, aff_map_node *cluster_node)
-{
-       int rc = PSCI_E_SUCCESS;
-       unsigned int plat_state;
-
-       /* Sanity check the cluster level */
-       assert(cluster_node->level == MPIDR_AFFLVL1);
-
-       /* State management: Decrement the cluster reference count */
-       psci_set_state(cluster_node, PSCI_STATE_OFF);
-
-       /*
-        * Keep the physical state of this cluster handy to decide
-        * what action needs to be taken
-        */
-       plat_state = psci_get_phys_state(cluster_node);
-
-       /*
-        * Arch. Management. Flush all levels of caches to PoC if
-        * the cluster is to be shutdown
-        */
-       if (plat_state == PSCI_STATE_OFF)
-               dcsw_op_all(DCCISW);
-
-       /*
-        * Plat. Management. Allow the platform to do its cluster
-        * specific bookeeping e.g. turn off interconnect coherency,
-        * program the power controller etc.
-        */
-       if (psci_plat_pm_ops->affinst_off)
-               rc = psci_plat_pm_ops->affinst_off(mpidr,
-                                                  cluster_node->level,
-                                                  plat_state);
-
-       return rc;
-}
-
-static int psci_afflvl2_off(unsigned long mpidr, aff_map_node *system_node)
-{
-       int rc = PSCI_E_SUCCESS;
-       unsigned int plat_state;
-
-       /* Cannot go beyond this level */
-       assert(system_node->level == MPIDR_AFFLVL2);
-
-       /* State management: Decrement the system reference count */
-       psci_set_state(system_node, PSCI_STATE_OFF);
-
-       /*
-        * Keep the physical state of the system handy to decide what
-        * action needs to be taken
-        */
-       plat_state = psci_get_phys_state(system_node);
-
-       /* No arch. and generic bookeeping to do here currently */
-
-       /*
-        * Plat. Management : Allow the platform to do its bookeeping
-        * at this affinity level
-        */
-       if (psci_plat_pm_ops->affinst_off)
-               rc = psci_plat_pm_ops->affinst_off(mpidr,
-                                                  system_node->level,
-                                                  plat_state);
-       return rc;
-}
-
-static const afflvl_off_handler psci_afflvl_off_handlers[] = {
-       psci_afflvl0_off,
-       psci_afflvl1_off,
-       psci_afflvl2_off,
-};
-
-/*******************************************************************************
- * This function takes an array of pointers to affinity instance nodes in the
- * topology tree and calls the off handler for the corresponding affinity
- * levels
- ******************************************************************************/
-static int psci_call_off_handlers(mpidr_aff_map_nodes mpidr_nodes,
-                                 int start_afflvl,
-                                 int end_afflvl,
-                                 unsigned long mpidr)
-{
-       int rc = PSCI_E_INVALID_PARAMS, level;
-       aff_map_node *node;
-
-       for (level = start_afflvl; level <= end_afflvl; level++) {
-               node = mpidr_nodes[level];
-               if (node == NULL)
-                       continue;
-
-               /*
-                * TODO: In case of an error should there be a way
-                * of restoring what we might have torn down at
-                * lower affinity levels.
-                */
-               rc = psci_afflvl_off_handlers[level](mpidr, node);
-               if (rc != PSCI_E_SUCCESS)
-                       break;
-       }
-
-       return rc;
-}
-
-/*******************************************************************************
- * Top level handler which is called when a cpu wants to power itself down.
- * It's assumed that along with turning the cpu off, higher affinity levels will
- * be turned off as far as possible. It traverses through all the affinity
- * levels performing generic, architectural, platform setup and state management
- * e.g. for a cluster that's to be powered off, it will call the platform
- * specific code which will disable coherency at the interconnect level if the
- * cpu is the last in the cluster. For a cpu it could mean programming the power
- * the power controller etc.
- *
- * The state of all the relevant affinity levels is changed prior to calling the
- * affinity level specific handlers as their actions would depend upon the state
- * the affinity level is about to enter.
- *
- * The affinity level specific handlers are called in ascending order i.e. from
- * the lowest to the highest affinity level implemented by the platform because
- * to turn off affinity level X it is neccesary to turn off affinity level X - 1
- * first.
- *
- * CAUTION: This function is called with coherent stacks so that coherency can
- * be turned off and caches can be flushed safely.
- ******************************************************************************/
-int psci_afflvl_off(unsigned long mpidr,
-                   int start_afflvl,
-                   int end_afflvl)
-{
-       int rc = PSCI_E_SUCCESS;
-       mpidr_aff_map_nodes mpidr_nodes;
-
-       mpidr &= MPIDR_AFFINITY_MASK;;
-
-       /*
-        * Collect the pointers to the nodes in the topology tree for
-        * each affinity instance in the mpidr. If this function does
-        * not return successfully then either the mpidr or the affinity
-        * levels are incorrect. In either case, we cannot return back
-        * to the caller as it would not know what to do.
-        */
-       rc = psci_get_aff_map_nodes(mpidr,
-                                   start_afflvl,
-                                   end_afflvl,
-                                   mpidr_nodes);
-       assert (rc == PSCI_E_SUCCESS);
-
-       /*
-        * This function acquires the lock corresponding to each affinity
-        * level so that by the time all locks are taken, the system topology
-        * is snapshot and state management can be done safely.
-        */
-       psci_acquire_afflvl_locks(mpidr,
-                                 start_afflvl,
-                                 end_afflvl,
-                                 mpidr_nodes);
-
-       /* Perform generic, architecture and platform specific handling */
-       rc = psci_call_off_handlers(mpidr_nodes,
-                                   start_afflvl,
-                                   end_afflvl,
-                                   mpidr);
-
-       /*
-        * Release the locks corresponding to each affinity level in the
-        * reverse order to which they were acquired.
-        */
-       psci_release_afflvl_locks(mpidr,
-                                 start_afflvl,
-                                 end_afflvl,
-                                 mpidr_nodes);
-
-       return rc;
-}
diff --git a/services/psci/psci_afflvl_on.c b/services/psci/psci_afflvl_on.c
deleted file mode 100644 (file)
index 0878f21..0000000
+++ /dev/null
@@ -1,485 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdio.h>
-#include <string.h>
-#include <assert.h>
-#include <debug.h>
-#include <arch_helpers.h>
-#include <console.h>
-#include <platform.h>
-#include <psci.h>
-#include <psci_private.h>
-#include <context_mgmt.h>
-
-typedef int (*afflvl_on_handler)(unsigned long,
-                                aff_map_node *,
-                                unsigned long,
-                                unsigned long);
-
-/*******************************************************************************
- * This function checks whether a cpu which has been requested to be turned on
- * is OFF to begin with.
- ******************************************************************************/
-static int cpu_on_validate_state(aff_map_node *node)
-{
-       unsigned int psci_state;
-
-       /* Get the raw psci state */
-       psci_state = psci_get_state(node);
-
-       if (psci_state == PSCI_STATE_ON || psci_state == PSCI_STATE_SUSPEND)
-               return PSCI_E_ALREADY_ON;
-
-       if (psci_state == PSCI_STATE_ON_PENDING)
-               return PSCI_E_ON_PENDING;
-
-       assert(psci_state == PSCI_STATE_OFF);
-       return PSCI_E_SUCCESS;
-}
-
-/*******************************************************************************
- * Handler routine to turn a cpu on. It takes care of any generic, architectural
- * or platform specific setup required.
- * TODO: Split this code across separate handlers for each type of setup?
- ******************************************************************************/
-static int psci_afflvl0_on(unsigned long target_cpu,
-                          aff_map_node *cpu_node,
-                          unsigned long ns_entrypoint,
-                          unsigned long context_id)
-{
-       unsigned int index, plat_state;
-       unsigned long psci_entrypoint;
-       int rc;
-
-       /* Sanity check to safeguard against data corruption */
-       assert(cpu_node->level == MPIDR_AFFLVL0);
-
-       /*
-        * Generic management: Ensure that the cpu is off to be
-        * turned on
-        */
-       rc = cpu_on_validate_state(cpu_node);
-       if (rc != PSCI_E_SUCCESS)
-               return rc;
-
-       /*
-        * Call the cpu on handler registered by the Secure Payload Dispatcher
-        * to let it do any bookeeping. If the handler encounters an error, it's
-        * expected to assert within
-        */
-       if (psci_spd_pm && psci_spd_pm->svc_on)
-               psci_spd_pm->svc_on(target_cpu);
-
-       /*
-        * Arch. management: Derive the re-entry information for
-        * the non-secure world from the non-secure state from
-        * where this call originated.
-        */
-       index = cpu_node->data;
-       rc = psci_set_ns_entry_info(index, ns_entrypoint, context_id);
-       if (rc != PSCI_E_SUCCESS)
-               return rc;
-
-       /* Set the secure world (EL3) re-entry point after BL1 */
-       psci_entrypoint = (unsigned long) psci_aff_on_finish_entry;
-
-       /* State management: Set this cpu's state as ON PENDING */
-       psci_set_state(cpu_node, PSCI_STATE_ON_PENDING);
-
-       /*
-        * Plat. management: Give the platform the current state
-        * of the target cpu to allow it to perform the necessary
-        * steps to power on.
-        */
-       if (psci_plat_pm_ops->affinst_on) {
-
-               /* Get the current physical state of this cpu */
-               plat_state = psci_get_phys_state(cpu_node);
-               rc = psci_plat_pm_ops->affinst_on(target_cpu,
-                                                 psci_entrypoint,
-                                                 ns_entrypoint,
-                                                 cpu_node->level,
-                                                 plat_state);
-       }
-
-       return rc;
-}
-
-/*******************************************************************************
- * Handler routine to turn a cluster on. It takes care or any generic, arch.
- * or platform specific setup required.
- * TODO: Split this code across separate handlers for each type of setup?
- ******************************************************************************/
-static int psci_afflvl1_on(unsigned long target_cpu,
-                          aff_map_node *cluster_node,
-                          unsigned long ns_entrypoint,
-                          unsigned long context_id)
-{
-       int rc = PSCI_E_SUCCESS;
-       unsigned int plat_state;
-       unsigned long psci_entrypoint;
-
-       assert(cluster_node->level == MPIDR_AFFLVL1);
-
-       /*
-        * There is no generic and arch. specific cluster
-        * management required
-        */
-
-       /* State management: Is not required while turning a cluster on */
-
-       /*
-        * Plat. management: Give the platform the current state
-        * of the target cpu to allow it to perform the necessary
-        * steps to power on.
-        */
-       if (psci_plat_pm_ops->affinst_on) {
-               plat_state = psci_get_phys_state(cluster_node);
-               psci_entrypoint = (unsigned long) psci_aff_on_finish_entry;
-               rc = psci_plat_pm_ops->affinst_on(target_cpu,
-                                                 psci_entrypoint,
-                                                 ns_entrypoint,
-                                                 cluster_node->level,
-                                                 plat_state);
-       }
-
-       return rc;
-}
-
-/*******************************************************************************
- * Handler routine to turn a cluster of clusters on. It takes care or any
- * generic, arch. or platform specific setup required.
- * TODO: Split this code across separate handlers for each type of setup?
- ******************************************************************************/
-static int psci_afflvl2_on(unsigned long target_cpu,
-                          aff_map_node *system_node,
-                          unsigned long ns_entrypoint,
-                          unsigned long context_id)
-{
-       int rc = PSCI_E_SUCCESS;
-       unsigned int plat_state;
-       unsigned long psci_entrypoint;
-
-       /* Cannot go beyond affinity level 2 in this psci imp. */
-       assert(system_node->level == MPIDR_AFFLVL2);
-
-       /*
-        * There is no generic and arch. specific system management
-        * required
-        */
-
-       /* State management: Is not required while turning a system on */
-
-       /*
-        * Plat. management: Give the platform the current state
-        * of the target cpu to allow it to perform the necessary
-        * steps to power on.
-        */
-       if (psci_plat_pm_ops->affinst_on) {
-               plat_state = psci_get_phys_state(system_node);
-               psci_entrypoint = (unsigned long) psci_aff_on_finish_entry;
-               rc = psci_plat_pm_ops->affinst_on(target_cpu,
-                                                 psci_entrypoint,
-                                                 ns_entrypoint,
-                                                 system_node->level,
-                                                 plat_state);
-       }
-
-       return rc;
-}
-
-/* Private data structure to make this handlers accessible through indexing */
-static const afflvl_on_handler psci_afflvl_on_handlers[] = {
-       psci_afflvl0_on,
-       psci_afflvl1_on,
-       psci_afflvl2_on,
-};
-
-/*******************************************************************************
- * This function takes an array of pointers to affinity instance nodes in the
- * topology tree and calls the on handler for the corresponding affinity
- * levels
- ******************************************************************************/
-static int psci_call_on_handlers(mpidr_aff_map_nodes target_cpu_nodes,
-                                int start_afflvl,
-                                int end_afflvl,
-                                unsigned long target_cpu,
-                                unsigned long entrypoint,
-                                unsigned long context_id)
-{
-       int rc = PSCI_E_INVALID_PARAMS, level;
-       aff_map_node *node;
-
-       for (level = end_afflvl; level >= start_afflvl; level--) {
-               node = target_cpu_nodes[level];
-               if (node == NULL)
-                       continue;
-
-               /*
-                * TODO: In case of an error should there be a way
-                * of undoing what we might have setup at higher
-                * affinity levels.
-                */
-               rc = psci_afflvl_on_handlers[level](target_cpu,
-                                                   node,
-                                                   entrypoint,
-                                                   context_id);
-               if (rc != PSCI_E_SUCCESS)
-                       break;
-       }
-
-       return rc;
-}
-
-/*******************************************************************************
- * Generic handler which is called to physically power on a cpu identified by
- * its mpidr. It traverses through all the affinity levels performing generic,
- * architectural, platform setup and state management e.g. for a cpu that is
- * to be powered on, it will ensure that enough information is stashed for it
- * to resume execution in the non-secure security state.
- *
- * The state of all the relevant affinity levels is changed after calling the
- * affinity level specific handlers as their actions would depend upon the state
- * the affinity level is currently in.
- *
- * The affinity level specific handlers are called in descending order i.e. from
- * the highest to the lowest affinity level implemented by the platform because
- * to turn on affinity level X it is neccesary to turn on affinity level X + 1
- * first.
- ******************************************************************************/
-int psci_afflvl_on(unsigned long target_cpu,
-                  unsigned long entrypoint,
-                  unsigned long context_id,
-                  int start_afflvl,
-                  int end_afflvl)
-{
-       int rc = PSCI_E_SUCCESS;
-       mpidr_aff_map_nodes target_cpu_nodes;
-       unsigned long mpidr = read_mpidr() & MPIDR_AFFINITY_MASK;
-
-       /*
-        * Collect the pointers to the nodes in the topology tree for
-        * each affinity instance in the mpidr. If this function does
-        * not return successfully then either the mpidr or the affinity
-        * levels are incorrect.
-        */
-       rc = psci_get_aff_map_nodes(target_cpu,
-                                   start_afflvl,
-                                   end_afflvl,
-                                   target_cpu_nodes);
-       if (rc != PSCI_E_SUCCESS)
-               return rc;
-
-
-       /*
-        * This function acquires the lock corresponding to each affinity
-        * level so that by the time all locks are taken, the system topology
-        * is snapshot and state management can be done safely.
-        */
-       psci_acquire_afflvl_locks(mpidr,
-                                 start_afflvl,
-                                 end_afflvl,
-                                 target_cpu_nodes);
-
-       /* Perform generic, architecture and platform specific handling. */
-       rc = psci_call_on_handlers(target_cpu_nodes,
-                                  start_afflvl,
-                                  end_afflvl,
-                                  target_cpu,
-                                  entrypoint,
-                                  context_id);
-
-       /*
-        * This loop releases the lock corresponding to each affinity level
-        * in the reverse order to which they were acquired.
-        */
-       psci_release_afflvl_locks(mpidr,
-                                 start_afflvl,
-                                 end_afflvl,
-                                 target_cpu_nodes);
-
-       return rc;
-}
-
-/*******************************************************************************
- * The following functions finish an earlier affinity power on request. They
- * are called by the common finisher routine in psci_common.c.
- ******************************************************************************/
-static unsigned int psci_afflvl0_on_finish(unsigned long mpidr,
-                                          aff_map_node *cpu_node)
-{
-       unsigned int index, plat_state, state, rc = PSCI_E_SUCCESS;
-
-       assert(cpu_node->level == MPIDR_AFFLVL0);
-
-       /* Ensure we have been explicitly woken up by another cpu */
-       state = psci_get_state(cpu_node);
-       assert(state == PSCI_STATE_ON_PENDING);
-
-       /*
-        * Plat. management: Perform the platform specific actions
-        * for this cpu e.g. enabling the gic or zeroing the mailbox
-        * register. The actual state of this cpu has already been
-        * changed.
-        */
-       if (psci_plat_pm_ops->affinst_on_finish) {
-
-               /* Get the physical state of this cpu */
-               plat_state = get_phys_state(state);
-               rc = psci_plat_pm_ops->affinst_on_finish(mpidr,
-                                                        cpu_node->level,
-                                                        plat_state);
-               assert(rc == PSCI_E_SUCCESS);
-       }
-
-       /*
-        * Arch. management: Turn on mmu & restore architectural state
-        */
-       enable_mmu();
-
-       /*
-        * All the platform specific actions for turning this cpu
-        * on have completed. Perform enough arch.initialization
-        * to run in the non-secure address space.
-        */
-       bl31_arch_setup();
-
-       /*
-        * Use the more complex exception vectors to enable SPD
-        * initialisation. SP_EL3 should point to a 'cpu_context'
-        * structure which has an exception stack allocated. The
-        * calling cpu should have set the context already
-        */
-       assert(cm_get_context(mpidr, NON_SECURE));
-       cm_set_next_eret_context(NON_SECURE);
-       write_vbar_el3((uint64_t) runtime_exceptions);
-
-       /*
-        * Call the cpu on finish handler registered by the Secure Payload
-        * Dispatcher to let it do any bookeeping. If the handler encounters an
-        * error, it's expected to assert within
-        */
-       if (psci_spd_pm && psci_spd_pm->svc_on_finish)
-               psci_spd_pm->svc_on_finish(0);
-
-       /*
-        * Generic management: Now we just need to retrieve the
-        * information that we had stashed away during the cpu_on
-        * call to set this cpu on its way. First get the index
-        * for restoring the re-entry info
-        */
-       index = cpu_node->data;
-       psci_get_ns_entry_info(index);
-
-       /* State management: mark this cpu as on */
-       psci_set_state(cpu_node, PSCI_STATE_ON);
-
-       /* Clean caches before re-entering normal world */
-       dcsw_op_louis(DCCSW);
-
-       return rc;
-}
-
-static unsigned int psci_afflvl1_on_finish(unsigned long mpidr,
-                                          aff_map_node *cluster_node)
-{
-       unsigned int plat_state, rc = PSCI_E_SUCCESS;
-
-       assert(cluster_node->level == MPIDR_AFFLVL1);
-
-       /*
-        * Plat. management: Perform the platform specific actions
-        * as per the old state of the cluster e.g. enabling
-        * coherency at the interconnect depends upon the state with
-        * which this cluster was powered up. If anything goes wrong
-        * then assert as there is no way to recover from this
-        * situation.
-        */
-       if (psci_plat_pm_ops->affinst_on_finish) {
-
-               /* Get the physical state of this cluster */
-               plat_state = psci_get_phys_state(cluster_node);
-               rc = psci_plat_pm_ops->affinst_on_finish(mpidr,
-                                                        cluster_node->level,
-                                                        plat_state);
-               assert(rc == PSCI_E_SUCCESS);
-       }
-
-       /* State management: Increment the cluster reference count */
-       psci_set_state(cluster_node, PSCI_STATE_ON);
-
-       return rc;
-}
-
-
-static unsigned int psci_afflvl2_on_finish(unsigned long mpidr,
-                                          aff_map_node *system_node)
-{
-       unsigned int plat_state, rc = PSCI_E_SUCCESS;
-
-       /* Cannot go beyond this affinity level */
-       assert(system_node->level == MPIDR_AFFLVL2);
-
-       /*
-        * Currently, there are no architectural actions to perform
-        * at the system level.
-        */
-
-       /*
-        * Plat. management: Perform the platform specific actions
-        * as per the old state of the cluster e.g. enabling
-        * coherency at the interconnect depends upon the state with
-        * which this cluster was powered up. If anything goes wrong
-        * then assert as there is no way to recover from this
-        * situation.
-        */
-       if (psci_plat_pm_ops->affinst_on_finish) {
-
-               /* Get the physical state of the system */
-               plat_state = psci_get_phys_state(system_node);
-               rc = psci_plat_pm_ops->affinst_on_finish(mpidr,
-                                                        system_node->level,
-                                                        plat_state);
-               assert(rc == PSCI_E_SUCCESS);
-       }
-
-       /* State management: Increment the system reference count */
-       psci_set_state(system_node, PSCI_STATE_ON);
-
-       return rc;
-}
-
-const afflvl_power_on_finisher psci_afflvl_on_finishers[] = {
-       psci_afflvl0_on_finish,
-       psci_afflvl1_on_finish,
-       psci_afflvl2_on_finish,
-};
-
diff --git a/services/psci/psci_afflvl_suspend.c b/services/psci/psci_afflvl_suspend.c
deleted file mode 100644 (file)
index 138d033..0000000
+++ /dev/null
@@ -1,557 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdio.h>
-#include <string.h>
-#include <assert.h>
-#include <debug.h>
-#include <arch_helpers.h>
-#include <console.h>
-#include <platform.h>
-#include <psci.h>
-#include <psci_private.h>
-#include <context_mgmt.h>
-
-typedef int (*afflvl_suspend_handler)(unsigned long,
-                                     aff_map_node *,
-                                     unsigned long,
-                                     unsigned long,
-                                     unsigned int);
-
-/*******************************************************************************
- * This function sets the affinity level till which the current cpu is being
- * powered down to during a cpu_suspend call
- ******************************************************************************/
-void psci_set_suspend_afflvl(aff_map_node *node, int afflvl)
-{
-       /*
-        * Check that nobody else is calling this function on our behalf &
-        * this information is being set only in the cpu node
-        */
-       assert(node->mpidr == (read_mpidr() & MPIDR_AFFINITY_MASK));
-       assert(node->level == MPIDR_AFFLVL0);
-
-       /*
-        * Store the affinity level we are powering down to in our context.
-        * The cache flush in the suspend code will ensure that this info
-        * is available immediately upon resuming.
-        */
-       psci_suspend_context[node->data].suspend_level = afflvl;
-}
-
-/*******************************************************************************
- * This function gets the affinity level till which the current cpu was powered
- * down during a cpu_suspend call.
- ******************************************************************************/
-int psci_get_suspend_afflvl(aff_map_node *node)
-{
-       /* Return the target affinity level */
-       return psci_suspend_context[node->data].suspend_level;
-}
-
-/*******************************************************************************
- * The next three functions implement a handler for each supported affinity
- * level which is called when that affinity level is about to be suspended.
- ******************************************************************************/
-static int psci_afflvl0_suspend(unsigned long mpidr,
-                               aff_map_node *cpu_node,
-                               unsigned long ns_entrypoint,
-                               unsigned long context_id,
-                               unsigned int power_state)
-{
-       unsigned int index, plat_state;
-       unsigned long psci_entrypoint, sctlr = read_sctlr();
-       el3_state *saved_el3_state;
-       int rc = PSCI_E_SUCCESS;
-
-       /* Sanity check to safeguard against data corruption */
-       assert(cpu_node->level == MPIDR_AFFLVL0);
-
-       /*
-        * Generic management: Store the re-entry information for the non-secure
-        * world and allow the secure world to suspend itself
-        */
-
-       /*
-        * Call the cpu suspend handler registered by the Secure Payload
-        * Dispatcher to let it do any bookeeping. If the handler encounters an
-        * error, it's expected to assert within
-        */
-       if (psci_spd_pm && psci_spd_pm->svc_suspend)
-               psci_spd_pm->svc_suspend(power_state);
-
-       /* State management: mark this cpu as suspended */
-       psci_set_state(cpu_node, PSCI_STATE_SUSPEND);
-
-       /*
-        * Generic management: Store the re-entry information for the
-        * non-secure world
-        */
-       index = cpu_node->data;
-       rc = psci_set_ns_entry_info(index, ns_entrypoint, context_id);
-       if (rc != PSCI_E_SUCCESS)
-               return rc;
-
-       /*
-        * Arch. management: Save the EL3 state in the 'cpu_context'
-        * structure that has been allocated for this cpu, flush the
-        * L1 caches and exit intra-cluster coherency et al
-        */
-       cm_el3_sysregs_context_save(NON_SECURE);
-       rc = PSCI_E_SUCCESS;
-
-       /*
-        * The EL3 state to PoC since it will be accessed after a
-        * reset with the caches turned off
-        */
-       saved_el3_state = get_el3state_ctx(cm_get_context(mpidr, NON_SECURE));
-       flush_dcache_range((uint64_t) saved_el3_state, sizeof(*saved_el3_state));
-
-       /* Set the secure world (EL3) re-entry point after BL1 */
-       psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
-
-       /*
-        * Arch. management. Perform the necessary steps to flush all
-        * cpu caches.
-        *
-        * TODO: This power down sequence varies across cpus so it needs to be
-        * abstracted out on the basis of the MIDR like in cpu_reset_handler().
-        * Do the bare minimal for the time being. Fix this before porting to
-        * Cortex models.
-        */
-       sctlr &= ~SCTLR_C_BIT;
-       write_sctlr(sctlr);
-
-       /*
-        * CAUTION: This flush to the level of unification makes an assumption
-        * about the cache hierarchy at affinity level 0 (cpu) in the platform.
-        * Ideally the platform should tell psci which levels to flush to exit
-        * coherency.
-        */
-       dcsw_op_louis(DCCISW);
-
-       /*
-        * Plat. management: Allow the platform to perform the
-        * necessary actions to turn off this cpu e.g. set the
-        * platform defined mailbox with the psci entrypoint,
-        * program the power controller etc.
-        */
-       if (psci_plat_pm_ops->affinst_suspend) {
-               plat_state = psci_get_phys_state(cpu_node);
-               rc = psci_plat_pm_ops->affinst_suspend(mpidr,
-                                                      psci_entrypoint,
-                                                      ns_entrypoint,
-                                                      cpu_node->level,
-                                                      plat_state);
-       }
-
-       return rc;
-}
-
-static int psci_afflvl1_suspend(unsigned long mpidr,
-                               aff_map_node *cluster_node,
-                               unsigned long ns_entrypoint,
-                               unsigned long context_id,
-                               unsigned int power_state)
-{
-       int rc = PSCI_E_SUCCESS;
-       unsigned int plat_state;
-       unsigned long psci_entrypoint;
-
-       /* Sanity check the cluster level */
-       assert(cluster_node->level == MPIDR_AFFLVL1);
-
-       /* State management: Decrement the cluster reference count */
-       psci_set_state(cluster_node, PSCI_STATE_SUSPEND);
-
-       /*
-        * Keep the physical state of this cluster handy to decide
-        * what action needs to be taken
-        */
-       plat_state = psci_get_phys_state(cluster_node);
-
-       /*
-        * Arch. management: Flush all levels of caches to PoC if the
-        * cluster is to be shutdown
-        */
-       if (plat_state == PSCI_STATE_OFF)
-               dcsw_op_all(DCCISW);
-
-       /*
-        * Plat. Management. Allow the platform to do its cluster
-        * specific bookeeping e.g. turn off interconnect coherency,
-        * program the power controller etc.
-        */
-       if (psci_plat_pm_ops->affinst_suspend) {
-
-               /*
-                * Sending the psci entrypoint is currently redundant
-                * beyond affinity level 0 but one never knows what a
-                * platform might do. Also it allows us to keep the
-                * platform handler prototype the same.
-                */
-               psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
-               rc = psci_plat_pm_ops->affinst_suspend(mpidr,
-                                                      psci_entrypoint,
-                                                      ns_entrypoint,
-                                                      cluster_node->level,
-                                                      plat_state);
-       }
-
-       return rc;
-}
-
-
-static int psci_afflvl2_suspend(unsigned long mpidr,
-                               aff_map_node *system_node,
-                               unsigned long ns_entrypoint,
-                               unsigned long context_id,
-                               unsigned int power_state)
-{
-       int rc = PSCI_E_SUCCESS;
-       unsigned int plat_state;
-       unsigned long psci_entrypoint;
-
-       /* Cannot go beyond this */
-       assert(system_node->level == MPIDR_AFFLVL2);
-
-       /* State management: Decrement the system reference count */
-       psci_set_state(system_node, PSCI_STATE_SUSPEND);
-
-       /*
-        * Keep the physical state of the system handy to decide what
-        * action needs to be taken
-        */
-       plat_state = psci_get_phys_state(system_node);
-
-       /*
-        * Plat. Management : Allow the platform to do its bookeeping
-        * at this affinity level
-        */
-       if (psci_plat_pm_ops->affinst_suspend) {
-
-               /*
-                * Sending the psci entrypoint is currently redundant
-                * beyond affinity level 0 but one never knows what a
-                * platform might do. Also it allows us to keep the
-                * platform handler prototype the same.
-                */
-               psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
-               rc = psci_plat_pm_ops->affinst_suspend(mpidr,
-                                                      psci_entrypoint,
-                                                      ns_entrypoint,
-                                                      system_node->level,
-                                                      plat_state);
-       }
-
-       return rc;
-}
-
-static const afflvl_suspend_handler psci_afflvl_suspend_handlers[] = {
-       psci_afflvl0_suspend,
-       psci_afflvl1_suspend,
-       psci_afflvl2_suspend,
-};
-
-/*******************************************************************************
- * This function takes an array of pointers to affinity instance nodes in the
- * topology tree and calls the suspend handler for the corresponding affinity
- * levels
- ******************************************************************************/
-static int psci_call_suspend_handlers(mpidr_aff_map_nodes mpidr_nodes,
-                                     int start_afflvl,
-                                     int end_afflvl,
-                                     unsigned long mpidr,
-                                     unsigned long entrypoint,
-                                     unsigned long context_id,
-                                     unsigned int power_state)
-{
-       int rc = PSCI_E_INVALID_PARAMS, level;
-       aff_map_node *node;
-
-       for (level = start_afflvl; level <= end_afflvl; level++) {
-               node = mpidr_nodes[level];
-               if (node == NULL)
-                       continue;
-
-               /*
-                * TODO: In case of an error should there be a way
-                * of restoring what we might have torn down at
-                * lower affinity levels.
-                */
-               rc = psci_afflvl_suspend_handlers[level](mpidr,
-                                                        node,
-                                                        entrypoint,
-                                                        context_id,
-                                                        power_state);
-               if (rc != PSCI_E_SUCCESS)
-                       break;
-       }
-
-       return rc;
-}
-
-/*******************************************************************************
- * Top level handler which is called when a cpu wants to suspend its execution.
- * It is assumed that along with turning the cpu off, higher affinity levels
- * until the target affinity level will be turned off as well. It traverses
- * through all the affinity levels performing generic, architectural, platform
- * setup and state management e.g. for a cluster that's to be suspended, it will
- * call the platform specific code which will disable coherency at the
- * interconnect level if the cpu is the last in the cluster. For a cpu it could
- * mean programming the power controller etc.
- *
- * The state of all the relevant affinity levels is changed prior to calling the
- * affinity level specific handlers as their actions would depend upon the state
- * the affinity level is about to enter.
- *
- * The affinity level specific handlers are called in ascending order i.e. from
- * the lowest to the highest affinity level implemented by the platform because
- * to turn off affinity level X it is neccesary to turn off affinity level X - 1
- * first.
- *
- * CAUTION: This function is called with coherent stacks so that coherency can
- * be turned off and caches can be flushed safely.
- ******************************************************************************/
-int psci_afflvl_suspend(unsigned long mpidr,
-                       unsigned long entrypoint,
-                       unsigned long context_id,
-                       unsigned int power_state,
-                       int start_afflvl,
-                       int end_afflvl)
-{
-       int rc = PSCI_E_SUCCESS;
-       mpidr_aff_map_nodes mpidr_nodes;
-
-       mpidr &= MPIDR_AFFINITY_MASK;
-
-       /*
-        * Collect the pointers to the nodes in the topology tree for
-        * each affinity instance in the mpidr. If this function does
-        * not return successfully then either the mpidr or the affinity
-        * levels are incorrect.
-        */
-       rc = psci_get_aff_map_nodes(mpidr,
-                                   start_afflvl,
-                                   end_afflvl,
-                                   mpidr_nodes);
-       if (rc != PSCI_E_SUCCESS)
-               return rc;
-
-       /*
-        * This function acquires the lock corresponding to each affinity
-        * level so that by the time all locks are taken, the system topology
-        * is snapshot and state management can be done safely.
-        */
-       psci_acquire_afflvl_locks(mpidr,
-                                 start_afflvl,
-                                 end_afflvl,
-                                 mpidr_nodes);
-
-
-       /* Save the affinity level till which this cpu can be powered down */
-       psci_set_suspend_afflvl(mpidr_nodes[MPIDR_AFFLVL0], end_afflvl);
-
-       /* Perform generic, architecture and platform specific handling */
-       rc = psci_call_suspend_handlers(mpidr_nodes,
-                                       start_afflvl,
-                                       end_afflvl,
-                                       mpidr,
-                                       entrypoint,
-                                       context_id,
-                                       power_state);
-
-       /*
-        * Release the locks corresponding to each affinity level in the
-        * reverse order to which they were acquired.
-        */
-       psci_release_afflvl_locks(mpidr,
-                                 start_afflvl,
-                                 end_afflvl,
-                                 mpidr_nodes);
-
-       return rc;
-}
-
-/*******************************************************************************
- * The following functions finish an earlier affinity suspend request. They
- * are called by the common finisher routine in psci_common.c.
- ******************************************************************************/
-static unsigned int psci_afflvl0_suspend_finish(unsigned long mpidr,
-                                               aff_map_node *cpu_node)
-{
-       unsigned int index, plat_state, state, rc = PSCI_E_SUCCESS;
-       int32_t suspend_level;
-
-       assert(cpu_node->level == MPIDR_AFFLVL0);
-
-       /* Ensure we have been woken up from a suspended state */
-       state = psci_get_state(cpu_node);
-       assert(state == PSCI_STATE_SUSPEND);
-
-       /*
-        * Plat. management: Perform the platform specific actions
-        * before we change the state of the cpu e.g. enabling the
-        * gic or zeroing the mailbox register. If anything goes
-        * wrong then assert as there is no way to recover from this
-        * situation.
-        */
-       if (psci_plat_pm_ops->affinst_suspend_finish) {
-
-               /* Get the physical state of this cpu */
-               plat_state = get_phys_state(state);
-               rc = psci_plat_pm_ops->affinst_suspend_finish(mpidr,
-                                                             cpu_node->level,
-                                                             plat_state);
-               assert(rc == PSCI_E_SUCCESS);
-       }
-
-       /* Get the index for restoring the re-entry information */
-       index = cpu_node->data;
-
-       /*
-        * Arch. management: Restore the stashed EL3 architectural
-        * context from the 'cpu_context' structure for this cpu.
-        */
-       cm_el3_sysregs_context_restore(NON_SECURE);
-       rc = PSCI_E_SUCCESS;
-
-       /*
-        * Use the more complex exception vectors to enable SPD
-        * initialisation. SP_EL3 should point to a 'cpu_context'
-        * structure which has an exception stack allocated. The
-        * non-secure context should have been set on this cpu
-        * prior to suspension.
-        */
-       assert(cm_get_context(mpidr, NON_SECURE));
-       cm_set_next_eret_context(NON_SECURE);
-       write_vbar_el3((uint64_t) runtime_exceptions);
-
-       /*
-        * Call the cpu suspend finish handler registered by the Secure Payload
-        * Dispatcher to let it do any bookeeping. If the handler encounters an
-        * error, it's expected to assert within
-        */
-       if (psci_spd_pm && psci_spd_pm->svc_suspend) {
-               suspend_level = psci_get_suspend_afflvl(cpu_node);
-               psci_spd_pm->svc_suspend_finish(suspend_level);
-       }
-
-       /*
-        * Generic management: Now we just need to retrieve the
-        * information that we had stashed away during the suspend
-        * call to set this cpu on its way.
-        */
-       psci_get_ns_entry_info(index);
-
-       /* State management: mark this cpu as on */
-       psci_set_state(cpu_node, PSCI_STATE_ON);
-
-       /* Clean caches before re-entering normal world */
-       dcsw_op_louis(DCCSW);
-
-       return rc;
-}
-
-static unsigned int psci_afflvl1_suspend_finish(unsigned long mpidr,
-                                               aff_map_node *cluster_node)
-{
-       unsigned int plat_state, rc = PSCI_E_SUCCESS;
-
-       assert(cluster_node->level == MPIDR_AFFLVL1);
-
-       /*
-        * Plat. management: Perform the platform specific actions
-        * as per the old state of the cluster e.g. enabling
-        * coherency at the interconnect depends upon the state with
-        * which this cluster was powered up. If anything goes wrong
-        * then assert as there is no way to recover from this
-        * situation.
-        */
-       if (psci_plat_pm_ops->affinst_suspend_finish) {
-
-               /* Get the physical state of this cpu */
-               plat_state = psci_get_phys_state(cluster_node);
-               rc = psci_plat_pm_ops->affinst_suspend_finish(mpidr,
-                                                             cluster_node->level,
-                                                             plat_state);
-               assert(rc == PSCI_E_SUCCESS);
-       }
-
-       /* State management: Increment the cluster reference count */
-       psci_set_state(cluster_node, PSCI_STATE_ON);
-
-       return rc;
-}
-
-
-static unsigned int psci_afflvl2_suspend_finish(unsigned long mpidr,
-                                               aff_map_node *system_node)
-{
-       unsigned int plat_state, rc = PSCI_E_SUCCESS;;
-
-       /* Cannot go beyond this affinity level */
-       assert(system_node->level == MPIDR_AFFLVL2);
-
-       /*
-        * Currently, there are no architectural actions to perform
-        * at the system level.
-        */
-
-       /*
-        * Plat. management: Perform the platform specific actions
-        * as per the old state of the cluster e.g. enabling
-        * coherency at the interconnect depends upon the state with
-        * which this cluster was powered up. If anything goes wrong
-        * then assert as there is no way to recover from this
-        * situation.
-        */
-       if (psci_plat_pm_ops->affinst_suspend_finish) {
-
-               /* Get the physical state of the system */
-               plat_state = psci_get_phys_state(system_node);
-               rc = psci_plat_pm_ops->affinst_suspend_finish(mpidr,
-                                                             system_node->level,
-                                                             plat_state);
-               assert(rc == PSCI_E_SUCCESS);
-       }
-
-       /* State management: Increment the system reference count */
-       psci_set_state(system_node, PSCI_STATE_ON);
-
-       return rc;
-}
-
-const afflvl_power_on_finisher psci_afflvl_suspend_finishers[] = {
-       psci_afflvl0_suspend_finish,
-       psci_afflvl1_suspend_finish,
-       psci_afflvl2_suspend_finish,
-};
-
diff --git a/services/psci/psci_common.c b/services/psci/psci_common.c
deleted file mode 100644 (file)
index 236309c..0000000
+++ /dev/null
@@ -1,568 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdio.h>
-#include <string.h>
-#include <assert.h>
-#include <arch_helpers.h>
-#include <console.h>
-#include <platform.h>
-#include <psci.h>
-#include <psci_private.h>
-#include <context_mgmt.h>
-#include <runtime_svc.h>
-#include "debug.h"
-
-/*
- * SPD power management operations, expected to be supplied by the registered
- * SPD on successful SP initialization
- */
-const spd_pm_ops *psci_spd_pm;
-
-/*******************************************************************************
- * Arrays that contains information needs to resume a cpu's execution when woken
- * out of suspend or off states. 'psci_ns_einfo_idx' keeps track of the next
- * free index in the 'psci_ns_entry_info' & 'psci_suspend_context' arrays. Each
- * cpu is allocated a single entry in each array during startup.
- ******************************************************************************/
-suspend_context psci_suspend_context[PSCI_NUM_AFFS];
-ns_entry_info psci_ns_entry_info[PSCI_NUM_AFFS];
-unsigned int psci_ns_einfo_idx;
-
-/*******************************************************************************
- * Grand array that holds the platform's topology information for state
- * management of affinity instances. Each node (aff_map_node) in the array
- * corresponds to an affinity instance e.g. cluster, cpu within an mpidr
- ******************************************************************************/
-aff_map_node psci_aff_map[PSCI_NUM_AFFS]
-__attribute__ ((section("tzfw_coherent_mem")));
-
-/*******************************************************************************
- * In a system, a certain number of affinity instances are present at an
- * affinity level. The cumulative number of instances across all levels are
- * stored in 'psci_aff_map'. The topology tree has been flattenned into this
- * array. To retrieve nodes, information about the extents of each affinity
- * level i.e. start index and end index needs to be present. 'psci_aff_limits'
- * stores this information.
- ******************************************************************************/
-aff_limits_node psci_aff_limits[MPIDR_MAX_AFFLVL + 1];
-
-/*******************************************************************************
- * Pointer to functions exported by the platform to complete power mgmt. ops
- ******************************************************************************/
-plat_pm_ops *psci_plat_pm_ops;
-
-/*******************************************************************************
- * Routine to return the maximum affinity level to traverse to after a cpu has
- * been physically powered up. It is expected to be called immediately after
- * reset from assembler code. It has to find its 'aff_map_node' instead of
- * getting it as an argument.
- * TODO: Calling psci_get_aff_map_node() with the MMU disabled is slow. Add
- * support to allow faster access to the target affinity level.
- ******************************************************************************/
-int get_power_on_target_afflvl(unsigned long mpidr)
-{
-       aff_map_node *node;
-       unsigned int state;
-
-       /* Retrieve our node from the topology tree */
-       node = psci_get_aff_map_node(mpidr & MPIDR_AFFINITY_MASK,
-                       MPIDR_AFFLVL0);
-       assert(node);
-
-       /*
-        * Return the maximum supported affinity level if this cpu was off.
-        * Call the handler in the suspend code if this cpu had been suspended.
-        * Any other state is invalid.
-        */
-       state = psci_get_state(node);
-       if (state == PSCI_STATE_ON_PENDING)
-               return get_max_afflvl();
-
-       if (state == PSCI_STATE_SUSPEND)
-               return psci_get_suspend_afflvl(node);
-
-       return PSCI_E_INVALID_PARAMS;
-}
-
-/*******************************************************************************
- * Simple routine to retrieve the maximum affinity level supported by the
- * platform and check that it makes sense.
- ******************************************************************************/
-int get_max_afflvl()
-{
-       int aff_lvl;
-
-       aff_lvl = plat_get_max_afflvl();
-       assert(aff_lvl <= MPIDR_MAX_AFFLVL && aff_lvl >= MPIDR_AFFLVL0);
-
-       return aff_lvl;
-}
-
-/*******************************************************************************
- * Simple routine to set the id of an affinity instance at a given level in the
- * mpidr.
- ******************************************************************************/
-unsigned long mpidr_set_aff_inst(unsigned long mpidr,
-                                unsigned char aff_inst,
-                                int aff_lvl)
-{
-       unsigned long aff_shift;
-
-       assert(aff_lvl <= MPIDR_AFFLVL3);
-
-       /*
-        * Decide the number of bits to shift by depending upon
-        * the affinity level
-        */
-       aff_shift = get_afflvl_shift(aff_lvl);
-
-       /* Clear the existing affinity instance & set the new one*/
-       mpidr &= ~(MPIDR_AFFLVL_MASK << aff_shift);
-       mpidr |= aff_inst << aff_shift;
-
-       return mpidr;
-}
-
-/*******************************************************************************
- * This function sanity checks a range of affinity levels.
- ******************************************************************************/
-int psci_check_afflvl_range(int start_afflvl, int end_afflvl)
-{
-       /* Sanity check the parameters passed */
-       if (end_afflvl > MPIDR_MAX_AFFLVL)
-               return PSCI_E_INVALID_PARAMS;
-
-       if (start_afflvl < MPIDR_AFFLVL0)
-               return PSCI_E_INVALID_PARAMS;
-
-       if (end_afflvl < start_afflvl)
-               return PSCI_E_INVALID_PARAMS;
-
-       return PSCI_E_SUCCESS;
-}
-
-/*******************************************************************************
- * This function is passed an array of pointers to affinity level nodes in the
- * topology tree for an mpidr. It picks up locks for each affinity level bottom
- * up in the range specified.
- ******************************************************************************/
-void psci_acquire_afflvl_locks(unsigned long mpidr,
-                              int start_afflvl,
-                              int end_afflvl,
-                              mpidr_aff_map_nodes mpidr_nodes)
-{
-       int level;
-
-       for (level = start_afflvl; level <= end_afflvl; level++) {
-               if (mpidr_nodes[level] == NULL)
-                       continue;
-               bakery_lock_get(mpidr, &mpidr_nodes[level]->lock);
-       }
-}
-
-/*******************************************************************************
- * This function is passed an array of pointers to affinity level nodes in the
- * topology tree for an mpidr. It releases the lock for each affinity level top
- * down in the range specified.
- ******************************************************************************/
-void psci_release_afflvl_locks(unsigned long mpidr,
-                              int start_afflvl,
-                              int end_afflvl,
-                              mpidr_aff_map_nodes mpidr_nodes)
-{
-       int level;
-
-       for (level = end_afflvl; level >= start_afflvl; level--) {
-               if (mpidr_nodes[level] == NULL)
-                       continue;
-               bakery_lock_release(mpidr, &mpidr_nodes[level]->lock);
-       }
-}
-
-/*******************************************************************************
- * Simple routine to determine whether an affinity instance at a given level
- * in an mpidr exists or not.
- ******************************************************************************/
-int psci_validate_mpidr(unsigned long mpidr, int level)
-{
-       aff_map_node *node;
-
-       node = psci_get_aff_map_node(mpidr, level);
-       if (node && (node->state & PSCI_AFF_PRESENT))
-               return PSCI_E_SUCCESS;
-       else
-               return PSCI_E_INVALID_PARAMS;
-}
-
-/*******************************************************************************
- * This function retrieves all the stashed information needed to correctly
- * resume a cpu's execution in the non-secure state after it has been physically
- * powered on i.e. turned ON or resumed from SUSPEND
- ******************************************************************************/
-void psci_get_ns_entry_info(unsigned int index)
-{
-       unsigned long sctlr = 0, scr, el_status, id_aa64pfr0;
-       uint64_t mpidr = read_mpidr();
-       cpu_context *ns_entry_context;
-       gp_regs *ns_entry_gpregs;
-
-       scr = read_scr();
-
-       /* Find out which EL we are going to */
-       id_aa64pfr0 = read_id_aa64pfr0_el1();
-       el_status = (id_aa64pfr0 >> ID_AA64PFR0_EL2_SHIFT) &
-               ID_AA64PFR0_ELX_MASK;
-
-       /* Restore endianess */
-       if (psci_ns_entry_info[index].sctlr & SCTLR_EE_BIT)
-               sctlr |= SCTLR_EE_BIT;
-       else
-               sctlr &= ~SCTLR_EE_BIT;
-
-       /* Turn off MMU and Caching */
-       sctlr &= ~(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_M_BIT);
-
-       /* Set the register width */
-       if (psci_ns_entry_info[index].scr & SCR_RW_BIT)
-               scr |= SCR_RW_BIT;
-       else
-               scr &= ~SCR_RW_BIT;
-
-       scr |= SCR_NS_BIT;
-
-       if (el_status)
-               write_sctlr_el2(sctlr);
-       else
-               write_sctlr_el1(sctlr);
-
-       /* Fulfill the cpu_on entry reqs. as per the psci spec */
-       ns_entry_context = (cpu_context *) cm_get_context(mpidr, NON_SECURE);
-       assert(ns_entry_context);
-
-       /*
-        * Setup general purpose registers to return the context id and
-        * prevent leakage of secure information into the normal world.
-        */
-       ns_entry_gpregs = get_gpregs_ctx(ns_entry_context);
-       write_ctx_reg(ns_entry_gpregs,
-                     CTX_GPREG_X0,
-                     psci_ns_entry_info[index].context_id);
-
-       /*
-        * Tell the context management library to setup EL3 system registers to
-        * be able to ERET into the ns state, and SP_EL3 points to the right
-        * context to exit from EL3 correctly.
-        */
-       cm_set_el3_eret_context(NON_SECURE,
-                       psci_ns_entry_info[index].eret_info.entrypoint,
-                       psci_ns_entry_info[index].eret_info.spsr,
-                       scr);
-
-       cm_set_next_eret_context(NON_SECURE);
-}
-
-/*******************************************************************************
- * This function retrieves and stashes all the information needed to correctly
- * resume a cpu's execution in the non-secure state after it has been physically
- * powered on i.e. turned ON or resumed from SUSPEND. This is done prior to
- * turning it on or before suspending it.
- ******************************************************************************/
-int psci_set_ns_entry_info(unsigned int index,
-                          unsigned long entrypoint,
-                          unsigned long context_id)
-{
-       int rc = PSCI_E_SUCCESS;
-       unsigned int rw, mode, ee, spsr = 0;
-       unsigned long id_aa64pfr0 = read_id_aa64pfr0_el1(), scr = read_scr();
-       unsigned long el_status;
-
-       /* Figure out what mode do we enter the non-secure world in */
-       el_status = (id_aa64pfr0 >> ID_AA64PFR0_EL2_SHIFT) &
-               ID_AA64PFR0_ELX_MASK;
-
-       /*
-        * Figure out whether the cpu enters the non-secure address space
-        * in aarch32 or aarch64
-        */
-       rw = scr & SCR_RW_BIT;
-       if (rw) {
-
-               /*
-                * Check whether a Thumb entry point has been provided for an
-                * aarch64 EL
-                */
-               if (entrypoint & 0x1)
-                       return PSCI_E_INVALID_PARAMS;
-
-               if (el_status && (scr & SCR_HCE_BIT)) {
-                       mode = MODE_EL2;
-                       ee = read_sctlr_el2() & SCTLR_EE_BIT;
-               } else {
-                       mode = MODE_EL1;
-                       ee = read_sctlr_el1() & SCTLR_EE_BIT;
-               }
-
-               spsr = DAIF_DBG_BIT | DAIF_ABT_BIT;
-               spsr |= DAIF_IRQ_BIT | DAIF_FIQ_BIT;
-               spsr <<= PSR_DAIF_SHIFT;
-               spsr |= make_spsr(mode, MODE_SP_ELX, !rw);
-
-               psci_ns_entry_info[index].sctlr |= ee;
-               psci_ns_entry_info[index].scr |= SCR_RW_BIT;
-       } else {
-
-               /* Check whether aarch32 has to be entered in Thumb mode */
-               if (entrypoint & 0x1)
-                       spsr = SPSR32_T_BIT;
-
-               if (el_status && (scr & SCR_HCE_BIT)) {
-                       mode = AARCH32_MODE_HYP;
-                       ee = read_sctlr_el2() & SCTLR_EE_BIT;
-               } else {
-                       mode = AARCH32_MODE_SVC;
-                       ee = read_sctlr_el1() & SCTLR_EE_BIT;
-               }
-
-               /*
-                * TODO: Choose async. exception bits if HYP mode is not
-                * implemented according to the values of SCR.{AW, FW} bits
-                */
-               spsr |= DAIF_ABT_BIT | DAIF_IRQ_BIT | DAIF_FIQ_BIT;
-               spsr <<= PSR_DAIF_SHIFT;
-               if (ee)
-                       spsr |= SPSR32_EE_BIT;
-               spsr |= mode;
-
-               /* Ensure that the CSPR.E and SCTLR.EE bits match */
-               psci_ns_entry_info[index].sctlr |= ee;
-               psci_ns_entry_info[index].scr &= ~SCR_RW_BIT;
-       }
-
-       psci_ns_entry_info[index].eret_info.entrypoint = entrypoint;
-       psci_ns_entry_info[index].eret_info.spsr = spsr;
-       psci_ns_entry_info[index].context_id = context_id;
-
-       return rc;
-}
-
-/*******************************************************************************
- * This function takes a pointer to an affinity node in the topology tree and
- * returns its state. State of a non-leaf node needs to be calculated.
- ******************************************************************************/
-unsigned short psci_get_state(aff_map_node *node)
-{
-       assert(node->level >= MPIDR_AFFLVL0 && node->level <= MPIDR_MAX_AFFLVL);
-
-       /* A cpu node just contains the state which can be directly returned */
-       if (node->level == MPIDR_AFFLVL0)
-               return (node->state >> PSCI_STATE_SHIFT) & PSCI_STATE_MASK;
-
-       /*
-        * For an affinity level higher than a cpu, the state has to be
-        * calculated. It depends upon the value of the reference count
-        * which is managed by each node at the next lower affinity level
-        * e.g. for a cluster, each cpu increments/decrements the reference
-        * count. If the reference count is 0 then the affinity level is
-        * OFF else ON.
-        */
-       if (node->ref_count)
-               return PSCI_STATE_ON;
-       else
-               return PSCI_STATE_OFF;
-}
-
-/*******************************************************************************
- * This function takes a pointer to an affinity node in the topology tree and
- * a target state. State of a non-leaf node needs to be converted to a reference
- * count. State of a leaf node can be set directly.
- ******************************************************************************/
-void psci_set_state(aff_map_node *node, unsigned short state)
-{
-       assert(node->level >= MPIDR_AFFLVL0 && node->level <= MPIDR_MAX_AFFLVL);
-
-       /*
-        * For an affinity level higher than a cpu, the state is used
-        * to decide whether the reference count is incremented or
-        * decremented. Entry into the ON_PENDING state does not have
-        * effect.
-        */
-       if (node->level > MPIDR_AFFLVL0) {
-               switch (state) {
-               case PSCI_STATE_ON:
-                       node->ref_count++;
-                       break;
-               case PSCI_STATE_OFF:
-               case PSCI_STATE_SUSPEND:
-                       node->ref_count--;
-                       break;
-               case PSCI_STATE_ON_PENDING:
-                       /*
-                        * An affinity level higher than a cpu will not undergo
-                        * a state change when it is about to be turned on
-                        */
-                       return;
-               default:
-                       assert(0);
-               }
-       } else {
-               node->state &= ~(PSCI_STATE_MASK << PSCI_STATE_SHIFT);
-               node->state |= (state & PSCI_STATE_MASK) << PSCI_STATE_SHIFT;
-       }
-}
-
-/*******************************************************************************
- * An affinity level could be on, on_pending, suspended or off. These are the
- * logical states it can be in. Physically either it is off or on. When it is in
- * the state on_pending then it is about to be turned on. It is not possible to
- * tell whether that's actually happenned or not. So we err on the side of
- * caution & treat the affinity level as being turned off.
- ******************************************************************************/
-unsigned short psci_get_phys_state(aff_map_node *node)
-{
-       unsigned int state;
-
-       state = psci_get_state(node);
-       return get_phys_state(state);
-}
-
-/*******************************************************************************
- * This function takes an array of pointers to affinity instance nodes in the
- * topology tree and calls the physical power on handler for the corresponding
- * affinity levels
- ******************************************************************************/
-static int psci_call_power_on_handlers(mpidr_aff_map_nodes mpidr_nodes,
-                                      int start_afflvl,
-                                      int end_afflvl,
-                                      afflvl_power_on_finisher *pon_handlers,
-                                      unsigned long mpidr)
-{
-       int rc = PSCI_E_INVALID_PARAMS, level;
-       aff_map_node *node;
-
-       for (level = end_afflvl; level >= start_afflvl; level--) {
-               node = mpidr_nodes[level];
-               if (node == NULL)
-                       continue;
-
-               /*
-                * If we run into any trouble while powering up an
-                * affinity instance, then there is no recovery path
-                * so simply return an error and let the caller take
-                * care of the situation.
-                */
-               rc = pon_handlers[level](mpidr, node);
-               if (rc != PSCI_E_SUCCESS)
-                       break;
-       }
-
-       return rc;
-}
-
-/*******************************************************************************
- * Generic handler which is called when a cpu is physically powered on. It
- * traverses through all the affinity levels performing generic, architectural,
- * platform setup and state management e.g. for a cluster that's been powered
- * on, it will call the platform specific code which will enable coherency at
- * the interconnect level. For a cpu it could mean turning on the MMU etc.
- *
- * The state of all the relevant affinity levels is changed after calling the
- * affinity level specific handlers as their actions would depend upon the state
- * the affinity level is exiting from.
- *
- * The affinity level specific handlers are called in descending order i.e. from
- * the highest to the lowest affinity level implemented by the platform because
- * to turn on affinity level X it is neccesary to turn on affinity level X + 1
- * first.
- *
- * CAUTION: This function is called with coherent stacks so that coherency and
- * the mmu can be turned on safely.
- ******************************************************************************/
-void psci_afflvl_power_on_finish(unsigned long mpidr,
-                                int start_afflvl,
-                                int end_afflvl,
-                                afflvl_power_on_finisher *pon_handlers)
-{
-       mpidr_aff_map_nodes mpidr_nodes;
-       int rc;
-
-       mpidr &= MPIDR_AFFINITY_MASK;
-
-       /*
-        * Collect the pointers to the nodes in the topology tree for
-        * each affinity instance in the mpidr. If this function does
-        * not return successfully then either the mpidr or the affinity
-        * levels are incorrect. Either case is an irrecoverable error.
-        */
-       rc = psci_get_aff_map_nodes(mpidr,
-                                   start_afflvl,
-                                   end_afflvl,
-                                   mpidr_nodes);
-       if (rc != PSCI_E_SUCCESS)
-               panic();
-
-       /*
-        * This function acquires the lock corresponding to each affinity
-        * level so that by the time all locks are taken, the system topology
-        * is snapshot and state management can be done safely.
-        */
-       psci_acquire_afflvl_locks(mpidr,
-                                 start_afflvl,
-                                 end_afflvl,
-                                 mpidr_nodes);
-
-       /* Perform generic, architecture and platform specific handling */
-       rc = psci_call_power_on_handlers(mpidr_nodes,
-                                        start_afflvl,
-                                        end_afflvl,
-                                        pon_handlers,
-                                        mpidr);
-       if (rc != PSCI_E_SUCCESS)
-               panic();
-
-       /*
-        * This loop releases the lock corresponding to each affinity level
-        * in the reverse order to which they were acquired.
-        */
-       psci_release_afflvl_locks(mpidr,
-                                 start_afflvl,
-                                 end_afflvl,
-                                 mpidr_nodes);
-}
-
-/*******************************************************************************
- * This function initializes the set of hooks that PSCI invokes as part of power
- * management operation. The power management hooks are expected to be provided
- * by the SPD, after it finishes all its initialization
- ******************************************************************************/
-void psci_register_spd_pm_hook(const spd_pm_ops *pm)
-{
-       psci_spd_pm = pm;
-}
diff --git a/services/psci/psci_entry.S b/services/psci/psci_entry.S
deleted file mode 100644 (file)
index 361dfde..0000000
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch.h>
-#include <platform.h>
-#include <psci.h>
-#include <psci_private.h>
-#include <runtime_svc.h>
-#include <asm_macros.S>
-#include <cm_macros.S>
-
-       .globl  psci_aff_on_finish_entry
-       .globl  psci_aff_suspend_finish_entry
-       .globl  __psci_cpu_off
-       .globl  __psci_cpu_suspend
-
-       .section        .text, "ax"; .align 3
-
-       /* -----------------------------------------------------
-        * This cpu has been physically powered up. Depending
-        * upon whether it was resumed from suspend or simply
-        * turned on, call the common power on finisher with
-        * the handlers (chosen depending upon original state).
-        * For ease, the finisher is called with coherent
-        * stacks. This allows the cluster/cpu finishers to
-        * enter coherency and enable the mmu without running
-        * into issues. We switch back to normal stacks once
-        * all this is done.
-        * -----------------------------------------------------
-        */
-psci_aff_on_finish_entry:
-       adr     x23, psci_afflvl_on_finishers
-       b       psci_aff_common_finish_entry
-
-psci_aff_suspend_finish_entry:
-       adr     x23, psci_afflvl_suspend_finishers
-
-psci_aff_common_finish_entry:
-       adr     x22, psci_afflvl_power_on_finish
-
-       /* ---------------------------------------------
-        * Exceptions should not occur at this point.
-        * Set VBAR in order to handle and report any
-        * that do occur
-        * ---------------------------------------------
-        */
-       adr     x0, early_exceptions
-       msr     vbar_el3, x0
-       isb
-
-       /* ---------------------------------------------
-        * Use SP_EL0 for the C runtime stack.
-        * ---------------------------------------------
-        */
-       msr     spsel, #0
-       isb
-
-       bl      read_mpidr
-       mov     x19, x0
-       bl      platform_set_coherent_stack
-
-       /* ---------------------------------------------
-        * Call the finishers starting from affinity
-        * level 0.
-        * ---------------------------------------------
-        */
-       mov     x0, x19
-       bl      get_power_on_target_afflvl
-       cmp     x0, xzr
-       b.lt    _panic
-       mov     x3, x23
-       mov     x2, x0
-       mov     x0, x19
-       mov     x1, #MPIDR_AFFLVL0
-       blr     x22
-
-       /* --------------------------------------------
-        * Give ourselves a stack allocated in Normal
-        * -IS-WBWA memory
-        * --------------------------------------------
-        */
-       mov     x0, x19
-       bl      platform_set_stack
-
-       zero_callee_saved_regs
-       b       el3_exit
-_panic:
-       b       _panic
-
-       /* -----------------------------------------------------
-        * The following two stubs give the calling cpu a
-        * coherent stack to allow flushing of caches without
-        * suffering from stack coherency issues
-        * -----------------------------------------------------
-        */
-__psci_cpu_off:
-       func_prologue
-       sub     sp, sp, #0x10
-       stp     x19, x20, [sp, #0]
-       mov     x19, sp
-       bl      read_mpidr
-       bl      platform_set_coherent_stack
-       bl      psci_cpu_off
-       mov     x1, #PSCI_E_SUCCESS
-       cmp     x0, x1
-       b.eq    final_wfi
-       mov     sp, x19
-       ldp     x19, x20, [sp,#0]
-       add     sp, sp, #0x10
-       func_epilogue
-       ret
-
-__psci_cpu_suspend:
-       func_prologue
-       sub     sp, sp, #0x20
-       stp     x19, x20, [sp, #0]
-       stp     x21, x22, [sp, #0x10]
-       mov     x19, sp
-       mov     x20, x0
-       mov     x21, x1
-       mov     x22, x2
-       bl      read_mpidr
-       bl      platform_set_coherent_stack
-       mov     x0, x20
-       mov     x1, x21
-       mov     x2, x22
-       bl      psci_cpu_suspend
-       mov     x1, #PSCI_E_SUCCESS
-       cmp     x0, x1
-       b.eq    final_wfi
-       mov     sp, x19
-       ldp     x21, x22, [sp,#0x10]
-       ldp     x19, x20, [sp,#0]
-       add     sp, sp, #0x20
-       func_epilogue
-       ret
-
-final_wfi:
-       dsb     sy
-       wfi
-wfi_spill:
-       b       wfi_spill
-
diff --git a/services/psci/psci_main.c b/services/psci/psci_main.c
deleted file mode 100644 (file)
index ca3a5a0..0000000
+++ /dev/null
@@ -1,271 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdio.h>
-#include <string.h>
-#include <assert.h>
-#include <arch_helpers.h>
-#include <console.h>
-#include <platform.h>
-#include <psci_private.h>
-#include <runtime_svc.h>
-#include <debug.h>
-#include <context_mgmt.h>
-
-/*******************************************************************************
- * PSCI frontend api for servicing SMCs. Described in the PSCI spec.
- ******************************************************************************/
-int psci_cpu_on(unsigned long target_cpu,
-               unsigned long entrypoint,
-               unsigned long context_id)
-
-{
-       int rc;
-       unsigned int start_afflvl, end_afflvl;
-
-       /* Determine if the cpu exists of not */
-       rc = psci_validate_mpidr(target_cpu, MPIDR_AFFLVL0);
-       if (rc != PSCI_E_SUCCESS) {
-               goto exit;
-       }
-
-       /*
-        * To turn this cpu on, specify which affinity
-        * levels need to be turned on
-        */
-       start_afflvl = MPIDR_AFFLVL0;
-       end_afflvl = get_max_afflvl();
-       rc = psci_afflvl_on(target_cpu,
-                           entrypoint,
-                           context_id,
-                           start_afflvl,
-                           end_afflvl);
-
-exit:
-       return rc;
-}
-
-unsigned int psci_version(void)
-{
-       return PSCI_MAJOR_VER | PSCI_MINOR_VER;
-}
-
-int psci_cpu_suspend(unsigned int power_state,
-                    unsigned long entrypoint,
-                    unsigned long context_id)
-{
-       int rc;
-       unsigned long mpidr;
-       unsigned int target_afflvl, pstate_type;
-
-       /* TODO: Standby states are not supported at the moment */
-       pstate_type = psci_get_pstate_type(power_state);
-       if (pstate_type == 0) {
-               rc = PSCI_E_INVALID_PARAMS;
-               goto exit;
-       }
-
-       /* Sanity check the requested state */
-       target_afflvl = psci_get_pstate_afflvl(power_state);
-       if (target_afflvl > MPIDR_MAX_AFFLVL) {
-               rc = PSCI_E_INVALID_PARAMS;
-               goto exit;
-       }
-
-       mpidr = read_mpidr();
-       rc = psci_afflvl_suspend(mpidr,
-                                entrypoint,
-                                context_id,
-                                power_state,
-                                MPIDR_AFFLVL0,
-                                target_afflvl);
-
-exit:
-       if (rc != PSCI_E_SUCCESS)
-               assert(rc == PSCI_E_INVALID_PARAMS);
-       return rc;
-}
-
-int psci_cpu_off(void)
-{
-       int rc;
-       unsigned long mpidr;
-       int target_afflvl = get_max_afflvl();
-
-       mpidr = read_mpidr();
-
-       /*
-        * Traverse from the highest to the lowest affinity level. When the
-        * lowest affinity level is hit, all the locks are acquired. State
-        * management is done immediately followed by cpu, cluster ...
-        * ..target_afflvl specific actions as this function unwinds back.
-        */
-       rc = psci_afflvl_off(mpidr, MPIDR_AFFLVL0, target_afflvl);
-
-       /*
-        * The only error cpu_off can return is E_DENIED. So check if that's
-        * indeed the case.
-        */
-       assert (rc == PSCI_E_SUCCESS || rc == PSCI_E_DENIED);
-
-       return rc;
-}
-
-int psci_affinity_info(unsigned long target_affinity,
-                      unsigned int lowest_affinity_level)
-{
-       int rc = PSCI_E_INVALID_PARAMS;
-       unsigned int aff_state;
-       aff_map_node *node;
-
-       if (lowest_affinity_level > get_max_afflvl())
-               return rc;
-
-       node = psci_get_aff_map_node(target_affinity, lowest_affinity_level);
-       if (node && (node->state & PSCI_AFF_PRESENT)) {
-
-               /*
-                * TODO: For affinity levels higher than 0 i.e. cpu, the
-                * state will always be either ON or OFF. Need to investigate
-                * how critical is it to support ON_PENDING here.
-                */
-               aff_state = psci_get_state(node);
-
-               /* A suspended cpu is available & on for the OS */
-               if (aff_state == PSCI_STATE_SUSPEND) {
-                       aff_state = PSCI_STATE_ON;
-               }
-
-               rc = aff_state;
-       }
-
-       return rc;
-}
-
-/* Unimplemented */
-int psci_migrate(unsigned int target_cpu)
-{
-       return PSCI_E_NOT_SUPPORTED;
-}
-
-/* Unimplemented */
-unsigned int psci_migrate_info_type(void)
-{
-       return PSCI_TOS_NOT_PRESENT_MP;
-}
-
-unsigned long psci_migrate_info_up_cpu(void)
-{
-       /*
-        * Return value of this currently unsupported call depends upon
-        * what psci_migrate_info_type() returns.
-        */
-       return PSCI_E_SUCCESS;
-}
-
-/* Unimplemented */
-void psci_system_off(void)
-{
-       assert(0);
-}
-
-/* Unimplemented */
-void psci_system_reset(void)
-{
-       assert(0);
-}
-
-/*******************************************************************************
- * PSCI top level handler for servicing SMCs.
- ******************************************************************************/
-uint64_t psci_smc_handler(uint32_t smc_fid,
-                         uint64_t x1,
-                         uint64_t x2,
-                         uint64_t x3,
-                         uint64_t x4,
-                         void *cookie,
-                         void *handle,
-                         uint64_t flags)
-{
-       uint64_t rc;
-
-       switch (smc_fid) {
-       case PSCI_VERSION:
-               rc = psci_version();
-               break;
-
-       case PSCI_CPU_OFF:
-               rc = __psci_cpu_off();
-               break;
-
-       case PSCI_CPU_SUSPEND_AARCH64:
-       case PSCI_CPU_SUSPEND_AARCH32:
-               rc = __psci_cpu_suspend(x1, x2, x3);
-               break;
-
-       case PSCI_CPU_ON_AARCH64:
-       case PSCI_CPU_ON_AARCH32:
-               rc = psci_cpu_on(x1, x2, x3);
-               break;
-
-       case PSCI_AFFINITY_INFO_AARCH32:
-       case PSCI_AFFINITY_INFO_AARCH64:
-               rc = psci_affinity_info(x1, x2);
-               break;
-
-       case PSCI_MIG_AARCH32:
-       case PSCI_MIG_AARCH64:
-               rc = psci_migrate(x1);
-               break;
-
-       case PSCI_MIG_INFO_TYPE:
-               rc = psci_migrate_info_type();
-               break;
-
-       case PSCI_MIG_INFO_UP_CPU_AARCH32:
-       case PSCI_MIG_INFO_UP_CPU_AARCH64:
-               rc = psci_migrate_info_up_cpu();
-               break;
-
-       case PSCI_SYSTEM_OFF:
-               psci_system_off();
-               assert(0);
-
-       case PSCI_SYSTEM_RESET:
-               psci_system_reset();
-               assert(0);
-
-       default:
-               rc = SMC_UNK;
-               WARN("Unimplemented psci call -> 0x%x \n", smc_fid);
-       }
-
-       SMC_RET1(handle, rc);
-}
diff --git a/services/psci/psci_private.h b/services/psci/psci_private.h
deleted file mode 100644 (file)
index 4f47ca5..0000000
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __PSCI_PRIVATE_H__
-#define __PSCI_PRIVATE_H__
-
-#include <arch.h>
-#include <bakery_lock.h>
-
-#ifndef __ASSEMBLY__
-/*******************************************************************************
- * The following two data structures hold the generic information to bringup
- * a suspended/hotplugged out cpu
- ******************************************************************************/
-typedef struct {
-       unsigned long entrypoint;
-       unsigned long spsr;
-} eret_params;
-
-typedef struct {
-       eret_params eret_info;
-       unsigned long context_id;
-       unsigned int scr;
-       unsigned int sctlr;
-} ns_entry_info;
-
-/*******************************************************************************
- * The following two data structures hold the topology tree which in turn tracks
- * the state of the all the affinity instances supported by the platform.
- ******************************************************************************/
-typedef struct {
-       unsigned long mpidr;
-       unsigned short ref_count;
-       unsigned char state;
-       unsigned char level;
-       unsigned int data;
-       bakery_lock lock;
-} aff_map_node;
-
-typedef struct {
-       int min;
-       int max;
-} aff_limits_node;
-
-/*******************************************************************************
- * This data structure holds secure world context that needs to be preserved
- * across cpu_suspend calls which enter the power down state.
- ******************************************************************************/
-typedef struct {
-       /* Align the suspend level to allow per-cpu lockless access */
-       int suspend_level
-       __attribute__((__aligned__(CACHE_WRITEBACK_GRANULE)));
-} suspend_context;
-
-typedef aff_map_node (*mpidr_aff_map_nodes[MPIDR_MAX_AFFLVL]);
-typedef unsigned int (*afflvl_power_on_finisher)(unsigned long,
-                                                aff_map_node *);
-
-/*******************************************************************************
- * Data prototypes
- ******************************************************************************/
-extern suspend_context psci_suspend_context[PSCI_NUM_AFFS];
-extern ns_entry_info psci_ns_entry_info[PSCI_NUM_AFFS];
-extern unsigned int psci_ns_einfo_idx;
-extern aff_limits_node psci_aff_limits[MPIDR_MAX_AFFLVL + 1];
-extern plat_pm_ops *psci_plat_pm_ops;
-extern aff_map_node psci_aff_map[PSCI_NUM_AFFS];
-extern afflvl_power_on_finisher psci_afflvl_off_finish_handlers[];
-extern afflvl_power_on_finisher psci_afflvl_sus_finish_handlers[];
-
-/*******************************************************************************
- * SPD's power management hooks registered with PSCI
- ******************************************************************************/
-extern const spd_pm_ops *psci_spd_pm;
-
-/*******************************************************************************
- * Function prototypes
- ******************************************************************************/
-/* Private exported functions from psci_common.c */
-extern int get_max_afflvl(void);
-extern unsigned short psci_get_state(aff_map_node *node);
-extern unsigned short psci_get_phys_state(aff_map_node *node);
-extern void psci_set_state(aff_map_node *node, unsigned short state);
-extern void psci_get_ns_entry_info(unsigned int index);
-extern unsigned long mpidr_set_aff_inst(unsigned long, unsigned char, int);
-extern int psci_validate_mpidr(unsigned long, int);
-extern int get_power_on_target_afflvl(unsigned long mpidr);
-extern void psci_afflvl_power_on_finish(unsigned long,
-                                               int,
-                                               int,
-                                               afflvl_power_on_finisher *);
-extern int psci_set_ns_entry_info(unsigned int index,
-                                 unsigned long entrypoint,
-                                 unsigned long context_id);
-extern int psci_check_afflvl_range(int start_afflvl, int end_afflvl);
-extern void psci_acquire_afflvl_locks(unsigned long mpidr,
-                                     int start_afflvl,
-                                     int end_afflvl,
-                                     mpidr_aff_map_nodes mpidr_nodes);
-extern void psci_release_afflvl_locks(unsigned long mpidr,
-                                     int start_afflvl,
-                                     int end_afflvl,
-                                     mpidr_aff_map_nodes mpidr_nodes);
-
-/* Private exported functions from psci_setup.c */
-extern int psci_get_aff_map_nodes(unsigned long mpidr,
-                                 int start_afflvl,
-                                 int end_afflvl,
-                                 mpidr_aff_map_nodes mpidr_nodes);
-extern aff_map_node *psci_get_aff_map_node(unsigned long, int);
-
-/* Private exported functions from psci_affinity_on.c */
-extern int psci_afflvl_on(unsigned long,
-                         unsigned long,
-                         unsigned long,
-                         int,
-                         int);
-
-/* Private exported functions from psci_affinity_off.c */
-extern int psci_afflvl_off(unsigned long, int, int);
-
-/* Private exported functions from psci_affinity_suspend.c */
-extern void psci_set_suspend_afflvl(aff_map_node *node, int afflvl);
-extern int psci_get_suspend_afflvl(aff_map_node *node);
-extern int psci_afflvl_suspend(unsigned long,
-                              unsigned long,
-                              unsigned long,
-                              unsigned int,
-                              int,
-                              int);
-extern unsigned int psci_afflvl_suspend_finish(unsigned long, int, int);
-
-/* Private exported functions from psci_main.c */
-extern uint64_t psci_smc_handler(uint32_t smc_fid,
-                                uint64_t x1,
-                                uint64_t x2,
-                                uint64_t x3,
-                                uint64_t x4,
-                                void *cookie,
-                                void *handle,
-                                uint64_t flags);
-#endif /*__ASSEMBLY__*/
-
-#endif /* __PSCI_PRIVATE_H__ */
diff --git a/services/psci/psci_setup.c b/services/psci/psci_setup.c
deleted file mode 100644 (file)
index 8d7903c..0000000
+++ /dev/null
@@ -1,355 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdio.h>
-#include <string.h>
-#include <assert.h>
-#include <arch_helpers.h>
-#include <console.h>
-#include <platform.h>
-#include <psci_private.h>
-#include <context_mgmt.h>
-#include <runtime_svc.h>
-
-/*******************************************************************************
- * Per cpu non-secure contexts used to program the architectural state prior
- * return to the normal world.
- * TODO: Use the memory allocator to set aside memory for the contexts instead
- * of relying on platform defined constants. Using PSCI_NUM_AFFS will be an
- * overkill.
- ******************************************************************************/
-static cpu_context psci_ns_context[PLATFORM_CORE_COUNT];
-
-/*******************************************************************************
- * Routines for retrieving the node corresponding to an affinity level instance
- * in the mpidr. The first one uses binary search to find the node corresponding
- * to the mpidr (key) at a particular affinity level. The second routine decides
- * extents of the binary search at each affinity level.
- ******************************************************************************/
-static int psci_aff_map_get_idx(unsigned long key,
-                               int min_idx,
-                               int max_idx)
-{
-       int mid;
-
-       /*
-        * Terminating condition: If the max and min indices have crossed paths
-        * during the binary search then the key has not been found.
-        */
-       if (max_idx < min_idx)
-               return PSCI_E_INVALID_PARAMS;
-
-       /*
-        * Bisect the array around 'mid' and then recurse into the array chunk
-        * where the key is likely to be found. The mpidrs in each node in the
-        * 'psci_aff_map' for a given affinity level are stored in an ascending
-        * order which makes the binary search possible.
-        */
-       mid = min_idx + ((max_idx - min_idx) >> 1);     /* Divide by 2 */
-       if (psci_aff_map[mid].mpidr > key)
-               return psci_aff_map_get_idx(key, min_idx, mid - 1);
-       else if (psci_aff_map[mid].mpidr < key)
-               return psci_aff_map_get_idx(key, mid + 1, max_idx);
-       else
-               return mid;
-}
-
-aff_map_node *psci_get_aff_map_node(unsigned long mpidr, int aff_lvl)
-{
-       int rc;
-
-       /* Right shift the mpidr to the required affinity level */
-       mpidr = mpidr_mask_lower_afflvls(mpidr, aff_lvl);
-
-       rc = psci_aff_map_get_idx(mpidr,
-                                 psci_aff_limits[aff_lvl].min,
-                                 psci_aff_limits[aff_lvl].max);
-       if (rc >= 0)
-               return &psci_aff_map[rc];
-       else
-               return NULL;
-}
-
-/*******************************************************************************
- * This function populates an array with nodes corresponding to a given range of
- * affinity levels in an mpidr. It returns successfully only when the affinity
- * levels are correct, the mpidr is valid i.e. no affinity level is absent from
- * the topology tree & the affinity instance at level 0 is not absent.
- ******************************************************************************/
-int psci_get_aff_map_nodes(unsigned long mpidr,
-                          int start_afflvl,
-                          int end_afflvl,
-                          mpidr_aff_map_nodes mpidr_nodes)
-{
-       int rc = PSCI_E_INVALID_PARAMS, level;
-       aff_map_node *node;
-
-       rc = psci_check_afflvl_range(start_afflvl, end_afflvl);
-       if (rc != PSCI_E_SUCCESS)
-               return rc;
-
-       for (level = start_afflvl; level <= end_afflvl; level++) {
-
-               /*
-                * Grab the node for each affinity level. No affinity level
-                * can be missing as that would mean that the topology tree
-                * is corrupted.
-                */
-               node = psci_get_aff_map_node(mpidr, level);
-               if (node == NULL) {
-                       rc = PSCI_E_INVALID_PARAMS;
-                       break;
-               }
-
-               /*
-                * Skip absent affinity levels unless it's afffinity level 0.
-                * An absent cpu means that the mpidr is invalid. Save the
-                * pointer to the node for the present affinity level
-                */
-               if (!(node->state & PSCI_AFF_PRESENT)) {
-                       if (level == MPIDR_AFFLVL0) {
-                               rc = PSCI_E_INVALID_PARAMS;
-                               break;
-                       }
-
-                       mpidr_nodes[level] = NULL;
-               } else
-                       mpidr_nodes[level] = node;
-       }
-
-       return rc;
-}
-
-/*******************************************************************************
- * Function which initializes the 'aff_map_node' corresponding to an affinity
- * level instance. Each node has a unique mpidr, level and bakery lock. The data
- * field is opaque and holds affinity level specific data e.g. for affinity
- * level 0 it contains the index into arrays that hold the secure/non-secure
- * state for a cpu that's been turned on/off
- ******************************************************************************/
-static void psci_init_aff_map_node(unsigned long mpidr,
-                                  int level,
-                                  unsigned int idx)
-{
-       unsigned char state;
-       uint32_t linear_id;
-       psci_aff_map[idx].mpidr = mpidr;
-       psci_aff_map[idx].level = level;
-       bakery_lock_init(&psci_aff_map[idx].lock);
-
-       /*
-        * If an affinity instance is present then mark it as OFF to begin with.
-        */
-       state = plat_get_aff_state(level, mpidr);
-       psci_aff_map[idx].state = state;
-
-       if (level == MPIDR_AFFLVL0) {
-
-               /*
-                * Mark the cpu as OFF. Higher affinity level reference counts
-                * have already been memset to 0
-                */
-               if (state & PSCI_AFF_PRESENT)
-                       psci_set_state(&psci_aff_map[idx], PSCI_STATE_OFF);
-
-               /* Ensure that we have not overflowed the psci_ns_einfo array */
-               assert(psci_ns_einfo_idx < PSCI_NUM_AFFS);
-
-               psci_aff_map[idx].data = psci_ns_einfo_idx;
-               psci_ns_einfo_idx++;
-
-               /*
-                * Associate a non-secure context with this affinity
-                * instance through the context management library.
-                */
-               linear_id = platform_get_core_pos(mpidr);
-               assert(linear_id < PLATFORM_CORE_COUNT);
-
-               cm_set_context(mpidr,
-                               (void *) &psci_ns_context[linear_id],
-                               NON_SECURE);
-
-               /* Initialize exception stack in the context */
-               cm_init_exception_stack(mpidr, NON_SECURE);
-       }
-
-       return;
-}
-
-/*******************************************************************************
- * Core routine used by the Breadth-First-Search algorithm to populate the
- * affinity tree. Each level in the tree corresponds to an affinity level. This
- * routine's aim is to traverse to the target affinity level and populate nodes
- * in the 'psci_aff_map' for all the siblings at that level. It uses the current
- * affinity level to keep track of how many levels from the root of the tree
- * have been traversed. If the current affinity level != target affinity level,
- * then the platform is asked to return the number of children that each
- * affinity instance has at the current affinity level. Traversal is then done
- * for each child at the next lower level i.e. current affinity level - 1.
- *
- * CAUTION: This routine assumes that affinity instance ids are allocated in a
- * monotonically increasing manner at each affinity level in a mpidr starting
- * from 0. If the platform breaks this assumption then this code will have to
- * be reworked accordingly.
- ******************************************************************************/
-static unsigned int psci_init_aff_map(unsigned long mpidr,
-                                     unsigned int affmap_idx,
-                                     int cur_afflvl,
-                                     int tgt_afflvl)
-{
-       unsigned int ctr, aff_count;
-
-       assert(cur_afflvl >= tgt_afflvl);
-
-       /*
-        * Find the number of siblings at the current affinity level &
-        * assert if there are none 'cause then we have been invoked with
-        * an invalid mpidr.
-        */
-       aff_count = plat_get_aff_count(cur_afflvl, mpidr);
-       assert(aff_count);
-
-       if (tgt_afflvl < cur_afflvl) {
-               for (ctr = 0; ctr < aff_count; ctr++) {
-                       mpidr = mpidr_set_aff_inst(mpidr, ctr, cur_afflvl);
-                       affmap_idx = psci_init_aff_map(mpidr,
-                                                      affmap_idx,
-                                                      cur_afflvl - 1,
-                                                      tgt_afflvl);
-               }
-       } else {
-               for (ctr = 0; ctr < aff_count; ctr++, affmap_idx++) {
-                       mpidr = mpidr_set_aff_inst(mpidr, ctr, cur_afflvl);
-                       psci_init_aff_map_node(mpidr, cur_afflvl, affmap_idx);
-               }
-
-               /* affmap_idx is 1 greater than the max index of cur_afflvl */
-               psci_aff_limits[cur_afflvl].max = affmap_idx - 1;
-       }
-
-       return affmap_idx;
-}
-
-/*******************************************************************************
- * This function initializes the topology tree by querying the platform. To do
- * so, it's helper routines implement a Breadth-First-Search. At each affinity
- * level the platform conveys the number of affinity instances that exist i.e.
- * the affinity count. The algorithm populates the psci_aff_map recursively
- * using this information. On a platform that implements two clusters of 4 cpus
- * each, the populated aff_map_array would look like this:
- *
- *            <- cpus cluster0 -><- cpus cluster1 ->
- * ---------------------------------------------------
- * | 0  | 1  | 0  | 1  | 2  | 3  | 0  | 1  | 2  | 3  |
- * ---------------------------------------------------
- *           ^                                       ^
- * cluster __|                                 cpu __|
- * limit                                      limit
- *
- * The first 2 entries are of the cluster nodes. The next 4 entries are of cpus
- * within cluster 0. The last 4 entries are of cpus within cluster 1.
- * The 'psci_aff_limits' array contains the max & min index of each affinity
- * level within the 'psci_aff_map' array. This allows restricting search of a
- * node at an affinity level between the indices in the limits array.
- ******************************************************************************/
-int32_t psci_setup(void)
-{
-       unsigned long mpidr = read_mpidr();
-       int afflvl, affmap_idx, max_afflvl;
-       aff_map_node *node;
-
-       psci_ns_einfo_idx = 0;
-       psci_plat_pm_ops = NULL;
-
-       /* Find out the maximum affinity level that the platform implements */
-       max_afflvl = get_max_afflvl();
-       assert(max_afflvl <= MPIDR_MAX_AFFLVL);
-
-       /*
-        * This call traverses the topology tree with help from the platform and
-        * populates the affinity map using a breadth-first-search recursively.
-        * We assume that the platform allocates affinity instance ids from 0
-        * onwards at each affinity level in the mpidr. FIRST_MPIDR = 0.0.0.0
-        */
-       affmap_idx = 0;
-       for (afflvl = max_afflvl; afflvl >= MPIDR_AFFLVL0; afflvl--) {
-               affmap_idx = psci_init_aff_map(FIRST_MPIDR,
-                                              affmap_idx,
-                                              max_afflvl,
-                                              afflvl);
-       }
-
-       /*
-        * Set the bounds for the affinity counts of each level in the map. Also
-        * flush out the entire array so that it's visible to subsequent power
-        * management operations. The 'psci_aff_map' array is allocated in
-        * coherent memory so does not need flushing. The 'psci_aff_limits'
-        * array is allocated in normal memory. It will be accessed when the mmu
-        * is off e.g. after reset. Hence it needs to be flushed.
-        */
-       for (afflvl = MPIDR_AFFLVL0; afflvl < max_afflvl; afflvl++) {
-               psci_aff_limits[afflvl].min =
-                       psci_aff_limits[afflvl + 1].max + 1;
-       }
-
-       flush_dcache_range((unsigned long) psci_aff_limits,
-                          sizeof(psci_aff_limits));
-
-       /*
-        * Mark the affinity instances in our mpidr as ON. No need to lock as
-        * this is the primary cpu.
-        */
-       mpidr &= MPIDR_AFFINITY_MASK;
-       for (afflvl = MPIDR_AFFLVL0; afflvl <= max_afflvl; afflvl++) {
-
-               node = psci_get_aff_map_node(mpidr, afflvl);
-               assert(node);
-
-               /* Mark each present node as ON. */
-               if (node->state & PSCI_AFF_PRESENT)
-                       psci_set_state(node, PSCI_STATE_ON);
-       }
-
-       platform_setup_pm(&psci_plat_pm_ops);
-       assert(psci_plat_pm_ops);
-
-       return 0;
-}
-
-/* Register PSCI as a run time service */
-DECLARE_RT_SVC(
-               psci,
-
-               OEN_STD_START,
-               OEN_STD_END,
-               SMC_TYPE_FAST,
-               psci_setup,
-               psci_smc_handler
-);
diff --git a/services/std_svc/psci/psci_afflvl_off.c b/services/std_svc/psci/psci_afflvl_off.c
new file mode 100644 (file)
index 0000000..3763f6f
--- /dev/null
@@ -0,0 +1,287 @@
+/*
+ * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include <debug.h>
+#include <arch_helpers.h>
+#include <console.h>
+#include <platform.h>
+#include <psci.h>
+#include <psci_private.h>
+
+typedef int (*afflvl_off_handler)(unsigned long, aff_map_node *);
+
+/*******************************************************************************
+ * The next three functions implement a handler for each supported affinity
+ * level which is called when that affinity level is turned off.
+ ******************************************************************************/
+static int psci_afflvl0_off(unsigned long mpidr, aff_map_node *cpu_node)
+{
+       unsigned int index, plat_state;
+       int rc = PSCI_E_SUCCESS;
+       unsigned long sctlr = read_sctlr();
+
+       assert(cpu_node->level == MPIDR_AFFLVL0);
+
+       /* State management: mark this cpu as turned off */
+       psci_set_state(cpu_node, PSCI_STATE_OFF);
+
+       /*
+        * Generic management: Get the index for clearing any lingering re-entry
+        * information and allow the secure world to switch itself off
+        */
+
+       /*
+        * Call the cpu off handler registered by the Secure Payload Dispatcher
+        * to let it do any bookeeping. Assume that the SPD always reports an
+        * E_DENIED error if SP refuse to power down
+        */
+       if (psci_spd_pm && psci_spd_pm->svc_off) {
+               rc = psci_spd_pm->svc_off(0);
+               if (rc)
+                       return rc;
+       }
+
+       index = cpu_node->data;
+       memset(&psci_ns_entry_info[index], 0, sizeof(psci_ns_entry_info[index]));
+
+       /*
+        * Arch. management. Perform the necessary steps to flush all
+        * cpu caches.
+        *
+        * TODO: This power down sequence varies across cpus so it needs to be
+        * abstracted out on the basis of the MIDR like in cpu_reset_handler().
+        * Do the bare minimal for the time being. Fix this before porting to
+        * Cortex models.
+        */
+       sctlr &= ~SCTLR_C_BIT;
+       write_sctlr(sctlr);
+
+       /*
+        * CAUTION: This flush to the level of unification makes an assumption
+        * about the cache hierarchy at affinity level 0 (cpu) in the platform.
+        * Ideally the platform should tell psci which levels to flush to exit
+        * coherency.
+        */
+       dcsw_op_louis(DCCISW);
+
+       /*
+        * Plat. management: Perform platform specific actions to turn this
+        * cpu off e.g. exit cpu coherency, program the power controller etc.
+        */
+       if (psci_plat_pm_ops->affinst_off) {
+
+               /* Get the current physical state of this cpu */
+               plat_state = psci_get_phys_state(cpu_node);
+               rc = psci_plat_pm_ops->affinst_off(mpidr,
+                                                  cpu_node->level,
+                                                  plat_state);
+       }
+
+       return rc;
+}
+
+static int psci_afflvl1_off(unsigned long mpidr, aff_map_node *cluster_node)
+{
+       int rc = PSCI_E_SUCCESS;
+       unsigned int plat_state;
+
+       /* Sanity check the cluster level */
+       assert(cluster_node->level == MPIDR_AFFLVL1);
+
+       /* State management: Decrement the cluster reference count */
+       psci_set_state(cluster_node, PSCI_STATE_OFF);
+
+       /*
+        * Keep the physical state of this cluster handy to decide
+        * what action needs to be taken
+        */
+       plat_state = psci_get_phys_state(cluster_node);
+
+       /*
+        * Arch. Management. Flush all levels of caches to PoC if
+        * the cluster is to be shutdown
+        */
+       if (plat_state == PSCI_STATE_OFF)
+               dcsw_op_all(DCCISW);
+
+       /*
+        * Plat. Management. Allow the platform to do its cluster
+        * specific bookeeping e.g. turn off interconnect coherency,
+        * program the power controller etc.
+        */
+       if (psci_plat_pm_ops->affinst_off)
+               rc = psci_plat_pm_ops->affinst_off(mpidr,
+                                                  cluster_node->level,
+                                                  plat_state);
+
+       return rc;
+}
+
+static int psci_afflvl2_off(unsigned long mpidr, aff_map_node *system_node)
+{
+       int rc = PSCI_E_SUCCESS;
+       unsigned int plat_state;
+
+       /* Cannot go beyond this level */
+       assert(system_node->level == MPIDR_AFFLVL2);
+
+       /* State management: Decrement the system reference count */
+       psci_set_state(system_node, PSCI_STATE_OFF);
+
+       /*
+        * Keep the physical state of the system handy to decide what
+        * action needs to be taken
+        */
+       plat_state = psci_get_phys_state(system_node);
+
+       /* No arch. and generic bookeeping to do here currently */
+
+       /*
+        * Plat. Management : Allow the platform to do its bookeeping
+        * at this affinity level
+        */
+       if (psci_plat_pm_ops->affinst_off)
+               rc = psci_plat_pm_ops->affinst_off(mpidr,
+                                                  system_node->level,
+                                                  plat_state);
+       return rc;
+}
+
+static const afflvl_off_handler psci_afflvl_off_handlers[] = {
+       psci_afflvl0_off,
+       psci_afflvl1_off,
+       psci_afflvl2_off,
+};
+
+/*******************************************************************************
+ * This function takes an array of pointers to affinity instance nodes in the
+ * topology tree and calls the off handler for the corresponding affinity
+ * levels
+ ******************************************************************************/
+static int psci_call_off_handlers(mpidr_aff_map_nodes mpidr_nodes,
+                                 int start_afflvl,
+                                 int end_afflvl,
+                                 unsigned long mpidr)
+{
+       int rc = PSCI_E_INVALID_PARAMS, level;
+       aff_map_node *node;
+
+       for (level = start_afflvl; level <= end_afflvl; level++) {
+               node = mpidr_nodes[level];
+               if (node == NULL)
+                       continue;
+
+               /*
+                * TODO: In case of an error should there be a way
+                * of restoring what we might have torn down at
+                * lower affinity levels.
+                */
+               rc = psci_afflvl_off_handlers[level](mpidr, node);
+               if (rc != PSCI_E_SUCCESS)
+                       break;
+       }
+
+       return rc;
+}
+
+/*******************************************************************************
+ * Top level handler which is called when a cpu wants to power itself down.
+ * It's assumed that along with turning the cpu off, higher affinity levels will
+ * be turned off as far as possible. It traverses through all the affinity
+ * levels performing generic, architectural, platform setup and state management
+ * e.g. for a cluster that's to be powered off, it will call the platform
+ * specific code which will disable coherency at the interconnect level if the
+ * cpu is the last in the cluster. For a cpu it could mean programming the power
+ * the power controller etc.
+ *
+ * The state of all the relevant affinity levels is changed prior to calling the
+ * affinity level specific handlers as their actions would depend upon the state
+ * the affinity level is about to enter.
+ *
+ * The affinity level specific handlers are called in ascending order i.e. from
+ * the lowest to the highest affinity level implemented by the platform because
+ * to turn off affinity level X it is neccesary to turn off affinity level X - 1
+ * first.
+ *
+ * CAUTION: This function is called with coherent stacks so that coherency can
+ * be turned off and caches can be flushed safely.
+ ******************************************************************************/
+int psci_afflvl_off(unsigned long mpidr,
+                   int start_afflvl,
+                   int end_afflvl)
+{
+       int rc = PSCI_E_SUCCESS;
+       mpidr_aff_map_nodes mpidr_nodes;
+
+       mpidr &= MPIDR_AFFINITY_MASK;;
+
+       /*
+        * Collect the pointers to the nodes in the topology tree for
+        * each affinity instance in the mpidr. If this function does
+        * not return successfully then either the mpidr or the affinity
+        * levels are incorrect. In either case, we cannot return back
+        * to the caller as it would not know what to do.
+        */
+       rc = psci_get_aff_map_nodes(mpidr,
+                                   start_afflvl,
+                                   end_afflvl,
+                                   mpidr_nodes);
+       assert (rc == PSCI_E_SUCCESS);
+
+       /*
+        * This function acquires the lock corresponding to each affinity
+        * level so that by the time all locks are taken, the system topology
+        * is snapshot and state management can be done safely.
+        */
+       psci_acquire_afflvl_locks(mpidr,
+                                 start_afflvl,
+                                 end_afflvl,
+                                 mpidr_nodes);
+
+       /* Perform generic, architecture and platform specific handling */
+       rc = psci_call_off_handlers(mpidr_nodes,
+                                   start_afflvl,
+                                   end_afflvl,
+                                   mpidr);
+
+       /*
+        * Release the locks corresponding to each affinity level in the
+        * reverse order to which they were acquired.
+        */
+       psci_release_afflvl_locks(mpidr,
+                                 start_afflvl,
+                                 end_afflvl,
+                                 mpidr_nodes);
+
+       return rc;
+}
diff --git a/services/std_svc/psci/psci_afflvl_on.c b/services/std_svc/psci/psci_afflvl_on.c
new file mode 100644 (file)
index 0000000..0878f21
--- /dev/null
@@ -0,0 +1,485 @@
+/*
+ * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include <debug.h>
+#include <arch_helpers.h>
+#include <console.h>
+#include <platform.h>
+#include <psci.h>
+#include <psci_private.h>
+#include <context_mgmt.h>
+
+typedef int (*afflvl_on_handler)(unsigned long,
+                                aff_map_node *,
+                                unsigned long,
+                                unsigned long);
+
+/*******************************************************************************
+ * This function checks whether a cpu which has been requested to be turned on
+ * is OFF to begin with.
+ ******************************************************************************/
+static int cpu_on_validate_state(aff_map_node *node)
+{
+       unsigned int psci_state;
+
+       /* Get the raw psci state */
+       psci_state = psci_get_state(node);
+
+       if (psci_state == PSCI_STATE_ON || psci_state == PSCI_STATE_SUSPEND)
+               return PSCI_E_ALREADY_ON;
+
+       if (psci_state == PSCI_STATE_ON_PENDING)
+               return PSCI_E_ON_PENDING;
+
+       assert(psci_state == PSCI_STATE_OFF);
+       return PSCI_E_SUCCESS;
+}
+
+/*******************************************************************************
+ * Handler routine to turn a cpu on. It takes care of any generic, architectural
+ * or platform specific setup required.
+ * TODO: Split this code across separate handlers for each type of setup?
+ ******************************************************************************/
+static int psci_afflvl0_on(unsigned long target_cpu,
+                          aff_map_node *cpu_node,
+                          unsigned long ns_entrypoint,
+                          unsigned long context_id)
+{
+       unsigned int index, plat_state;
+       unsigned long psci_entrypoint;
+       int rc;
+
+       /* Sanity check to safeguard against data corruption */
+       assert(cpu_node->level == MPIDR_AFFLVL0);
+
+       /*
+        * Generic management: Ensure that the cpu is off to be
+        * turned on
+        */
+       rc = cpu_on_validate_state(cpu_node);
+       if (rc != PSCI_E_SUCCESS)
+               return rc;
+
+       /*
+        * Call the cpu on handler registered by the Secure Payload Dispatcher
+        * to let it do any bookeeping. If the handler encounters an error, it's
+        * expected to assert within
+        */
+       if (psci_spd_pm && psci_spd_pm->svc_on)
+               psci_spd_pm->svc_on(target_cpu);
+
+       /*
+        * Arch. management: Derive the re-entry information for
+        * the non-secure world from the non-secure state from
+        * where this call originated.
+        */
+       index = cpu_node->data;
+       rc = psci_set_ns_entry_info(index, ns_entrypoint, context_id);
+       if (rc != PSCI_E_SUCCESS)
+               return rc;
+
+       /* Set the secure world (EL3) re-entry point after BL1 */
+       psci_entrypoint = (unsigned long) psci_aff_on_finish_entry;
+
+       /* State management: Set this cpu's state as ON PENDING */
+       psci_set_state(cpu_node, PSCI_STATE_ON_PENDING);
+
+       /*
+        * Plat. management: Give the platform the current state
+        * of the target cpu to allow it to perform the necessary
+        * steps to power on.
+        */
+       if (psci_plat_pm_ops->affinst_on) {
+
+               /* Get the current physical state of this cpu */
+               plat_state = psci_get_phys_state(cpu_node);
+               rc = psci_plat_pm_ops->affinst_on(target_cpu,
+                                                 psci_entrypoint,
+                                                 ns_entrypoint,
+                                                 cpu_node->level,
+                                                 plat_state);
+       }
+
+       return rc;
+}
+
+/*******************************************************************************
+ * Handler routine to turn a cluster on. It takes care or any generic, arch.
+ * or platform specific setup required.
+ * TODO: Split this code across separate handlers for each type of setup?
+ ******************************************************************************/
+static int psci_afflvl1_on(unsigned long target_cpu,
+                          aff_map_node *cluster_node,
+                          unsigned long ns_entrypoint,
+                          unsigned long context_id)
+{
+       int rc = PSCI_E_SUCCESS;
+       unsigned int plat_state;
+       unsigned long psci_entrypoint;
+
+       assert(cluster_node->level == MPIDR_AFFLVL1);
+
+       /*
+        * There is no generic and arch. specific cluster
+        * management required
+        */
+
+       /* State management: Is not required while turning a cluster on */
+
+       /*
+        * Plat. management: Give the platform the current state
+        * of the target cpu to allow it to perform the necessary
+        * steps to power on.
+        */
+       if (psci_plat_pm_ops->affinst_on) {
+               plat_state = psci_get_phys_state(cluster_node);
+               psci_entrypoint = (unsigned long) psci_aff_on_finish_entry;
+               rc = psci_plat_pm_ops->affinst_on(target_cpu,
+                                                 psci_entrypoint,
+                                                 ns_entrypoint,
+                                                 cluster_node->level,
+                                                 plat_state);
+       }
+
+       return rc;
+}
+
+/*******************************************************************************
+ * Handler routine to turn a cluster of clusters on. It takes care or any
+ * generic, arch. or platform specific setup required.
+ * TODO: Split this code across separate handlers for each type of setup?
+ ******************************************************************************/
+static int psci_afflvl2_on(unsigned long target_cpu,
+                          aff_map_node *system_node,
+                          unsigned long ns_entrypoint,
+                          unsigned long context_id)
+{
+       int rc = PSCI_E_SUCCESS;
+       unsigned int plat_state;
+       unsigned long psci_entrypoint;
+
+       /* Cannot go beyond affinity level 2 in this psci imp. */
+       assert(system_node->level == MPIDR_AFFLVL2);
+
+       /*
+        * There is no generic and arch. specific system management
+        * required
+        */
+
+       /* State management: Is not required while turning a system on */
+
+       /*
+        * Plat. management: Give the platform the current state
+        * of the target cpu to allow it to perform the necessary
+        * steps to power on.
+        */
+       if (psci_plat_pm_ops->affinst_on) {
+               plat_state = psci_get_phys_state(system_node);
+               psci_entrypoint = (unsigned long) psci_aff_on_finish_entry;
+               rc = psci_plat_pm_ops->affinst_on(target_cpu,
+                                                 psci_entrypoint,
+                                                 ns_entrypoint,
+                                                 system_node->level,
+                                                 plat_state);
+       }
+
+       return rc;
+}
+
+/* Private data structure to make this handlers accessible through indexing */
+static const afflvl_on_handler psci_afflvl_on_handlers[] = {
+       psci_afflvl0_on,
+       psci_afflvl1_on,
+       psci_afflvl2_on,
+};
+
+/*******************************************************************************
+ * This function takes an array of pointers to affinity instance nodes in the
+ * topology tree and calls the on handler for the corresponding affinity
+ * levels
+ ******************************************************************************/
+static int psci_call_on_handlers(mpidr_aff_map_nodes target_cpu_nodes,
+                                int start_afflvl,
+                                int end_afflvl,
+                                unsigned long target_cpu,
+                                unsigned long entrypoint,
+                                unsigned long context_id)
+{
+       int rc = PSCI_E_INVALID_PARAMS, level;
+       aff_map_node *node;
+
+       for (level = end_afflvl; level >= start_afflvl; level--) {
+               node = target_cpu_nodes[level];
+               if (node == NULL)
+                       continue;
+
+               /*
+                * TODO: In case of an error should there be a way
+                * of undoing what we might have setup at higher
+                * affinity levels.
+                */
+               rc = psci_afflvl_on_handlers[level](target_cpu,
+                                                   node,
+                                                   entrypoint,
+                                                   context_id);
+               if (rc != PSCI_E_SUCCESS)
+                       break;
+       }
+
+       return rc;
+}
+
+/*******************************************************************************
+ * Generic handler which is called to physically power on a cpu identified by
+ * its mpidr. It traverses through all the affinity levels performing generic,
+ * architectural, platform setup and state management e.g. for a cpu that is
+ * to be powered on, it will ensure that enough information is stashed for it
+ * to resume execution in the non-secure security state.
+ *
+ * The state of all the relevant affinity levels is changed after calling the
+ * affinity level specific handlers as their actions would depend upon the state
+ * the affinity level is currently in.
+ *
+ * The affinity level specific handlers are called in descending order i.e. from
+ * the highest to the lowest affinity level implemented by the platform because
+ * to turn on affinity level X it is neccesary to turn on affinity level X + 1
+ * first.
+ ******************************************************************************/
+int psci_afflvl_on(unsigned long target_cpu,
+                  unsigned long entrypoint,
+                  unsigned long context_id,
+                  int start_afflvl,
+                  int end_afflvl)
+{
+       int rc = PSCI_E_SUCCESS;
+       mpidr_aff_map_nodes target_cpu_nodes;
+       unsigned long mpidr = read_mpidr() & MPIDR_AFFINITY_MASK;
+
+       /*
+        * Collect the pointers to the nodes in the topology tree for
+        * each affinity instance in the mpidr. If this function does
+        * not return successfully then either the mpidr or the affinity
+        * levels are incorrect.
+        */
+       rc = psci_get_aff_map_nodes(target_cpu,
+                                   start_afflvl,
+                                   end_afflvl,
+                                   target_cpu_nodes);
+       if (rc != PSCI_E_SUCCESS)
+               return rc;
+
+
+       /*
+        * This function acquires the lock corresponding to each affinity
+        * level so that by the time all locks are taken, the system topology
+        * is snapshot and state management can be done safely.
+        */
+       psci_acquire_afflvl_locks(mpidr,
+                                 start_afflvl,
+                                 end_afflvl,
+                                 target_cpu_nodes);
+
+       /* Perform generic, architecture and platform specific handling. */
+       rc = psci_call_on_handlers(target_cpu_nodes,
+                                  start_afflvl,
+                                  end_afflvl,
+                                  target_cpu,
+                                  entrypoint,
+                                  context_id);
+
+       /*
+        * This loop releases the lock corresponding to each affinity level
+        * in the reverse order to which they were acquired.
+        */
+       psci_release_afflvl_locks(mpidr,
+                                 start_afflvl,
+                                 end_afflvl,
+                                 target_cpu_nodes);
+
+       return rc;
+}
+
+/*******************************************************************************
+ * The following functions finish an earlier affinity power on request. They
+ * are called by the common finisher routine in psci_common.c.
+ ******************************************************************************/
+static unsigned int psci_afflvl0_on_finish(unsigned long mpidr,
+                                          aff_map_node *cpu_node)
+{
+       unsigned int index, plat_state, state, rc = PSCI_E_SUCCESS;
+
+       assert(cpu_node->level == MPIDR_AFFLVL0);
+
+       /* Ensure we have been explicitly woken up by another cpu */
+       state = psci_get_state(cpu_node);
+       assert(state == PSCI_STATE_ON_PENDING);
+
+       /*
+        * Plat. management: Perform the platform specific actions
+        * for this cpu e.g. enabling the gic or zeroing the mailbox
+        * register. The actual state of this cpu has already been
+        * changed.
+        */
+       if (psci_plat_pm_ops->affinst_on_finish) {
+
+               /* Get the physical state of this cpu */
+               plat_state = get_phys_state(state);
+               rc = psci_plat_pm_ops->affinst_on_finish(mpidr,
+                                                        cpu_node->level,
+                                                        plat_state);
+               assert(rc == PSCI_E_SUCCESS);
+       }
+
+       /*
+        * Arch. management: Turn on mmu & restore architectural state
+        */
+       enable_mmu();
+
+       /*
+        * All the platform specific actions for turning this cpu
+        * on have completed. Perform enough arch.initialization
+        * to run in the non-secure address space.
+        */
+       bl31_arch_setup();
+
+       /*
+        * Use the more complex exception vectors to enable SPD
+        * initialisation. SP_EL3 should point to a 'cpu_context'
+        * structure which has an exception stack allocated. The
+        * calling cpu should have set the context already
+        */
+       assert(cm_get_context(mpidr, NON_SECURE));
+       cm_set_next_eret_context(NON_SECURE);
+       write_vbar_el3((uint64_t) runtime_exceptions);
+
+       /*
+        * Call the cpu on finish handler registered by the Secure Payload
+        * Dispatcher to let it do any bookeeping. If the handler encounters an
+        * error, it's expected to assert within
+        */
+       if (psci_spd_pm && psci_spd_pm->svc_on_finish)
+               psci_spd_pm->svc_on_finish(0);
+
+       /*
+        * Generic management: Now we just need to retrieve the
+        * information that we had stashed away during the cpu_on
+        * call to set this cpu on its way. First get the index
+        * for restoring the re-entry info
+        */
+       index = cpu_node->data;
+       psci_get_ns_entry_info(index);
+
+       /* State management: mark this cpu as on */
+       psci_set_state(cpu_node, PSCI_STATE_ON);
+
+       /* Clean caches before re-entering normal world */
+       dcsw_op_louis(DCCSW);
+
+       return rc;
+}
+
+static unsigned int psci_afflvl1_on_finish(unsigned long mpidr,
+                                          aff_map_node *cluster_node)
+{
+       unsigned int plat_state, rc = PSCI_E_SUCCESS;
+
+       assert(cluster_node->level == MPIDR_AFFLVL1);
+
+       /*
+        * Plat. management: Perform the platform specific actions
+        * as per the old state of the cluster e.g. enabling
+        * coherency at the interconnect depends upon the state with
+        * which this cluster was powered up. If anything goes wrong
+        * then assert as there is no way to recover from this
+        * situation.
+        */
+       if (psci_plat_pm_ops->affinst_on_finish) {
+
+               /* Get the physical state of this cluster */
+               plat_state = psci_get_phys_state(cluster_node);
+               rc = psci_plat_pm_ops->affinst_on_finish(mpidr,
+                                                        cluster_node->level,
+                                                        plat_state);
+               assert(rc == PSCI_E_SUCCESS);
+       }
+
+       /* State management: Increment the cluster reference count */
+       psci_set_state(cluster_node, PSCI_STATE_ON);
+
+       return rc;
+}
+
+
+static unsigned int psci_afflvl2_on_finish(unsigned long mpidr,
+                                          aff_map_node *system_node)
+{
+       unsigned int plat_state, rc = PSCI_E_SUCCESS;
+
+       /* Cannot go beyond this affinity level */
+       assert(system_node->level == MPIDR_AFFLVL2);
+
+       /*
+        * Currently, there are no architectural actions to perform
+        * at the system level.
+        */
+
+       /*
+        * Plat. management: Perform the platform specific actions
+        * as per the old state of the cluster e.g. enabling
+        * coherency at the interconnect depends upon the state with
+        * which this cluster was powered up. If anything goes wrong
+        * then assert as there is no way to recover from this
+        * situation.
+        */
+       if (psci_plat_pm_ops->affinst_on_finish) {
+
+               /* Get the physical state of the system */
+               plat_state = psci_get_phys_state(system_node);
+               rc = psci_plat_pm_ops->affinst_on_finish(mpidr,
+                                                        system_node->level,
+                                                        plat_state);
+               assert(rc == PSCI_E_SUCCESS);
+       }
+
+       /* State management: Increment the system reference count */
+       psci_set_state(system_node, PSCI_STATE_ON);
+
+       return rc;
+}
+
+const afflvl_power_on_finisher psci_afflvl_on_finishers[] = {
+       psci_afflvl0_on_finish,
+       psci_afflvl1_on_finish,
+       psci_afflvl2_on_finish,
+};
+
diff --git a/services/std_svc/psci/psci_afflvl_suspend.c b/services/std_svc/psci/psci_afflvl_suspend.c
new file mode 100644 (file)
index 0000000..138d033
--- /dev/null
@@ -0,0 +1,557 @@
+/*
+ * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include <debug.h>
+#include <arch_helpers.h>
+#include <console.h>
+#include <platform.h>
+#include <psci.h>
+#include <psci_private.h>
+#include <context_mgmt.h>
+
+typedef int (*afflvl_suspend_handler)(unsigned long,
+                                     aff_map_node *,
+                                     unsigned long,
+                                     unsigned long,
+                                     unsigned int);
+
+/*******************************************************************************
+ * This function sets the affinity level till which the current cpu is being
+ * powered down to during a cpu_suspend call
+ ******************************************************************************/
+void psci_set_suspend_afflvl(aff_map_node *node, int afflvl)
+{
+       /*
+        * Check that nobody else is calling this function on our behalf &
+        * this information is being set only in the cpu node
+        */
+       assert(node->mpidr == (read_mpidr() & MPIDR_AFFINITY_MASK));
+       assert(node->level == MPIDR_AFFLVL0);
+
+       /*
+        * Store the affinity level we are powering down to in our context.
+        * The cache flush in the suspend code will ensure that this info
+        * is available immediately upon resuming.
+        */
+       psci_suspend_context[node->data].suspend_level = afflvl;
+}
+
+/*******************************************************************************
+ * This function gets the affinity level till which the current cpu was powered
+ * down during a cpu_suspend call.
+ ******************************************************************************/
+int psci_get_suspend_afflvl(aff_map_node *node)
+{
+       /* Return the target affinity level */
+       return psci_suspend_context[node->data].suspend_level;
+}
+
+/*******************************************************************************
+ * The next three functions implement a handler for each supported affinity
+ * level which is called when that affinity level is about to be suspended.
+ ******************************************************************************/
+static int psci_afflvl0_suspend(unsigned long mpidr,
+                               aff_map_node *cpu_node,
+                               unsigned long ns_entrypoint,
+                               unsigned long context_id,
+                               unsigned int power_state)
+{
+       unsigned int index, plat_state;
+       unsigned long psci_entrypoint, sctlr = read_sctlr();
+       el3_state *saved_el3_state;
+       int rc = PSCI_E_SUCCESS;
+
+       /* Sanity check to safeguard against data corruption */
+       assert(cpu_node->level == MPIDR_AFFLVL0);
+
+       /*
+        * Generic management: Store the re-entry information for the non-secure
+        * world and allow the secure world to suspend itself
+        */
+
+       /*
+        * Call the cpu suspend handler registered by the Secure Payload
+        * Dispatcher to let it do any bookeeping. If the handler encounters an
+        * error, it's expected to assert within
+        */
+       if (psci_spd_pm && psci_spd_pm->svc_suspend)
+               psci_spd_pm->svc_suspend(power_state);
+
+       /* State management: mark this cpu as suspended */
+       psci_set_state(cpu_node, PSCI_STATE_SUSPEND);
+
+       /*
+        * Generic management: Store the re-entry information for the
+        * non-secure world
+        */
+       index = cpu_node->data;
+       rc = psci_set_ns_entry_info(index, ns_entrypoint, context_id);
+       if (rc != PSCI_E_SUCCESS)
+               return rc;
+
+       /*
+        * Arch. management: Save the EL3 state in the 'cpu_context'
+        * structure that has been allocated for this cpu, flush the
+        * L1 caches and exit intra-cluster coherency et al
+        */
+       cm_el3_sysregs_context_save(NON_SECURE);
+       rc = PSCI_E_SUCCESS;
+
+       /*
+        * The EL3 state to PoC since it will be accessed after a
+        * reset with the caches turned off
+        */
+       saved_el3_state = get_el3state_ctx(cm_get_context(mpidr, NON_SECURE));
+       flush_dcache_range((uint64_t) saved_el3_state, sizeof(*saved_el3_state));
+
+       /* Set the secure world (EL3) re-entry point after BL1 */
+       psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
+
+       /*
+        * Arch. management. Perform the necessary steps to flush all
+        * cpu caches.
+        *
+        * TODO: This power down sequence varies across cpus so it needs to be
+        * abstracted out on the basis of the MIDR like in cpu_reset_handler().
+        * Do the bare minimal for the time being. Fix this before porting to
+        * Cortex models.
+        */
+       sctlr &= ~SCTLR_C_BIT;
+       write_sctlr(sctlr);
+
+       /*
+        * CAUTION: This flush to the level of unification makes an assumption
+        * about the cache hierarchy at affinity level 0 (cpu) in the platform.
+        * Ideally the platform should tell psci which levels to flush to exit
+        * coherency.
+        */
+       dcsw_op_louis(DCCISW);
+
+       /*
+        * Plat. management: Allow the platform to perform the
+        * necessary actions to turn off this cpu e.g. set the
+        * platform defined mailbox with the psci entrypoint,
+        * program the power controller etc.
+        */
+       if (psci_plat_pm_ops->affinst_suspend) {
+               plat_state = psci_get_phys_state(cpu_node);
+               rc = psci_plat_pm_ops->affinst_suspend(mpidr,
+                                                      psci_entrypoint,
+                                                      ns_entrypoint,
+                                                      cpu_node->level,
+                                                      plat_state);
+       }
+
+       return rc;
+}
+
+static int psci_afflvl1_suspend(unsigned long mpidr,
+                               aff_map_node *cluster_node,
+                               unsigned long ns_entrypoint,
+                               unsigned long context_id,
+                               unsigned int power_state)
+{
+       int rc = PSCI_E_SUCCESS;
+       unsigned int plat_state;
+       unsigned long psci_entrypoint;
+
+       /* Sanity check the cluster level */
+       assert(cluster_node->level == MPIDR_AFFLVL1);
+
+       /* State management: Decrement the cluster reference count */
+       psci_set_state(cluster_node, PSCI_STATE_SUSPEND);
+
+       /*
+        * Keep the physical state of this cluster handy to decide
+        * what action needs to be taken
+        */
+       plat_state = psci_get_phys_state(cluster_node);
+
+       /*
+        * Arch. management: Flush all levels of caches to PoC if the
+        * cluster is to be shutdown
+        */
+       if (plat_state == PSCI_STATE_OFF)
+               dcsw_op_all(DCCISW);
+
+       /*
+        * Plat. Management. Allow the platform to do its cluster
+        * specific bookeeping e.g. turn off interconnect coherency,
+        * program the power controller etc.
+        */
+       if (psci_plat_pm_ops->affinst_suspend) {
+
+               /*
+                * Sending the psci entrypoint is currently redundant
+                * beyond affinity level 0 but one never knows what a
+                * platform might do. Also it allows us to keep the
+                * platform handler prototype the same.
+                */
+               psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
+               rc = psci_plat_pm_ops->affinst_suspend(mpidr,
+                                                      psci_entrypoint,
+                                                      ns_entrypoint,
+                                                      cluster_node->level,
+                                                      plat_state);
+       }
+
+       return rc;
+}
+
+
+static int psci_afflvl2_suspend(unsigned long mpidr,
+                               aff_map_node *system_node,
+                               unsigned long ns_entrypoint,
+                               unsigned long context_id,
+                               unsigned int power_state)
+{
+       int rc = PSCI_E_SUCCESS;
+       unsigned int plat_state;
+       unsigned long psci_entrypoint;
+
+       /* Cannot go beyond this */
+       assert(system_node->level == MPIDR_AFFLVL2);
+
+       /* State management: Decrement the system reference count */
+       psci_set_state(system_node, PSCI_STATE_SUSPEND);
+
+       /*
+        * Keep the physical state of the system handy to decide what
+        * action needs to be taken
+        */
+       plat_state = psci_get_phys_state(system_node);
+
+       /*
+        * Plat. Management : Allow the platform to do its bookeeping
+        * at this affinity level
+        */
+       if (psci_plat_pm_ops->affinst_suspend) {
+
+               /*
+                * Sending the psci entrypoint is currently redundant
+                * beyond affinity level 0 but one never knows what a
+                * platform might do. Also it allows us to keep the
+                * platform handler prototype the same.
+                */
+               psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
+               rc = psci_plat_pm_ops->affinst_suspend(mpidr,
+                                                      psci_entrypoint,
+                                                      ns_entrypoint,
+                                                      system_node->level,
+                                                      plat_state);
+       }
+
+       return rc;
+}
+
+static const afflvl_suspend_handler psci_afflvl_suspend_handlers[] = {
+       psci_afflvl0_suspend,
+       psci_afflvl1_suspend,
+       psci_afflvl2_suspend,
+};
+
+/*******************************************************************************
+ * This function takes an array of pointers to affinity instance nodes in the
+ * topology tree and calls the suspend handler for the corresponding affinity
+ * levels
+ ******************************************************************************/
+static int psci_call_suspend_handlers(mpidr_aff_map_nodes mpidr_nodes,
+                                     int start_afflvl,
+                                     int end_afflvl,
+                                     unsigned long mpidr,
+                                     unsigned long entrypoint,
+                                     unsigned long context_id,
+                                     unsigned int power_state)
+{
+       int rc = PSCI_E_INVALID_PARAMS, level;
+       aff_map_node *node;
+
+       for (level = start_afflvl; level <= end_afflvl; level++) {
+               node = mpidr_nodes[level];
+               if (node == NULL)
+                       continue;
+
+               /*
+                * TODO: In case of an error should there be a way
+                * of restoring what we might have torn down at
+                * lower affinity levels.
+                */
+               rc = psci_afflvl_suspend_handlers[level](mpidr,
+                                                        node,
+                                                        entrypoint,
+                                                        context_id,
+                                                        power_state);
+               if (rc != PSCI_E_SUCCESS)
+                       break;
+       }
+
+       return rc;
+}
+
+/*******************************************************************************
+ * Top level handler which is called when a cpu wants to suspend its execution.
+ * It is assumed that along with turning the cpu off, higher affinity levels
+ * until the target affinity level will be turned off as well. It traverses
+ * through all the affinity levels performing generic, architectural, platform
+ * setup and state management e.g. for a cluster that's to be suspended, it will
+ * call the platform specific code which will disable coherency at the
+ * interconnect level if the cpu is the last in the cluster. For a cpu it could
+ * mean programming the power controller etc.
+ *
+ * The state of all the relevant affinity levels is changed prior to calling the
+ * affinity level specific handlers as their actions would depend upon the state
+ * the affinity level is about to enter.
+ *
+ * The affinity level specific handlers are called in ascending order i.e. from
+ * the lowest to the highest affinity level implemented by the platform because
+ * to turn off affinity level X it is neccesary to turn off affinity level X - 1
+ * first.
+ *
+ * CAUTION: This function is called with coherent stacks so that coherency can
+ * be turned off and caches can be flushed safely.
+ ******************************************************************************/
+int psci_afflvl_suspend(unsigned long mpidr,
+                       unsigned long entrypoint,
+                       unsigned long context_id,
+                       unsigned int power_state,
+                       int start_afflvl,
+                       int end_afflvl)
+{
+       int rc = PSCI_E_SUCCESS;
+       mpidr_aff_map_nodes mpidr_nodes;
+
+       mpidr &= MPIDR_AFFINITY_MASK;
+
+       /*
+        * Collect the pointers to the nodes in the topology tree for
+        * each affinity instance in the mpidr. If this function does
+        * not return successfully then either the mpidr or the affinity
+        * levels are incorrect.
+        */
+       rc = psci_get_aff_map_nodes(mpidr,
+                                   start_afflvl,
+                                   end_afflvl,
+                                   mpidr_nodes);
+       if (rc != PSCI_E_SUCCESS)
+               return rc;
+
+       /*
+        * This function acquires the lock corresponding to each affinity
+        * level so that by the time all locks are taken, the system topology
+        * is snapshot and state management can be done safely.
+        */
+       psci_acquire_afflvl_locks(mpidr,
+                                 start_afflvl,
+                                 end_afflvl,
+                                 mpidr_nodes);
+
+
+       /* Save the affinity level till which this cpu can be powered down */
+       psci_set_suspend_afflvl(mpidr_nodes[MPIDR_AFFLVL0], end_afflvl);
+
+       /* Perform generic, architecture and platform specific handling */
+       rc = psci_call_suspend_handlers(mpidr_nodes,
+                                       start_afflvl,
+                                       end_afflvl,
+                                       mpidr,
+                                       entrypoint,
+                                       context_id,
+                                       power_state);
+
+       /*
+        * Release the locks corresponding to each affinity level in the
+        * reverse order to which they were acquired.
+        */
+       psci_release_afflvl_locks(mpidr,
+                                 start_afflvl,
+                                 end_afflvl,
+                                 mpidr_nodes);
+
+       return rc;
+}
+
+/*******************************************************************************
+ * The following functions finish an earlier affinity suspend request. They
+ * are called by the common finisher routine in psci_common.c.
+ ******************************************************************************/
+static unsigned int psci_afflvl0_suspend_finish(unsigned long mpidr,
+                                               aff_map_node *cpu_node)
+{
+       unsigned int index, plat_state, state, rc = PSCI_E_SUCCESS;
+       int32_t suspend_level;
+
+       assert(cpu_node->level == MPIDR_AFFLVL0);
+
+       /* Ensure we have been woken up from a suspended state */
+       state = psci_get_state(cpu_node);
+       assert(state == PSCI_STATE_SUSPEND);
+
+       /*
+        * Plat. management: Perform the platform specific actions
+        * before we change the state of the cpu e.g. enabling the
+        * gic or zeroing the mailbox register. If anything goes
+        * wrong then assert as there is no way to recover from this
+        * situation.
+        */
+       if (psci_plat_pm_ops->affinst_suspend_finish) {
+
+               /* Get the physical state of this cpu */
+               plat_state = get_phys_state(state);
+               rc = psci_plat_pm_ops->affinst_suspend_finish(mpidr,
+                                                             cpu_node->level,
+                                                             plat_state);
+               assert(rc == PSCI_E_SUCCESS);
+       }
+
+       /* Get the index for restoring the re-entry information */
+       index = cpu_node->data;
+
+       /*
+        * Arch. management: Restore the stashed EL3 architectural
+        * context from the 'cpu_context' structure for this cpu.
+        */
+       cm_el3_sysregs_context_restore(NON_SECURE);
+       rc = PSCI_E_SUCCESS;
+
+       /*
+        * Use the more complex exception vectors to enable SPD
+        * initialisation. SP_EL3 should point to a 'cpu_context'
+        * structure which has an exception stack allocated. The
+        * non-secure context should have been set on this cpu
+        * prior to suspension.
+        */
+       assert(cm_get_context(mpidr, NON_SECURE));
+       cm_set_next_eret_context(NON_SECURE);
+       write_vbar_el3((uint64_t) runtime_exceptions);
+
+       /*
+        * Call the cpu suspend finish handler registered by the Secure Payload
+        * Dispatcher to let it do any bookeeping. If the handler encounters an
+        * error, it's expected to assert within
+        */
+       if (psci_spd_pm && psci_spd_pm->svc_suspend) {
+               suspend_level = psci_get_suspend_afflvl(cpu_node);
+               psci_spd_pm->svc_suspend_finish(suspend_level);
+       }
+
+       /*
+        * Generic management: Now we just need to retrieve the
+        * information that we had stashed away during the suspend
+        * call to set this cpu on its way.
+        */
+       psci_get_ns_entry_info(index);
+
+       /* State management: mark this cpu as on */
+       psci_set_state(cpu_node, PSCI_STATE_ON);
+
+       /* Clean caches before re-entering normal world */
+       dcsw_op_louis(DCCSW);
+
+       return rc;
+}
+
+static unsigned int psci_afflvl1_suspend_finish(unsigned long mpidr,
+                                               aff_map_node *cluster_node)
+{
+       unsigned int plat_state, rc = PSCI_E_SUCCESS;
+
+       assert(cluster_node->level == MPIDR_AFFLVL1);
+
+       /*
+        * Plat. management: Perform the platform specific actions
+        * as per the old state of the cluster e.g. enabling
+        * coherency at the interconnect depends upon the state with
+        * which this cluster was powered up. If anything goes wrong
+        * then assert as there is no way to recover from this
+        * situation.
+        */
+       if (psci_plat_pm_ops->affinst_suspend_finish) {
+
+               /* Get the physical state of this cpu */
+               plat_state = psci_get_phys_state(cluster_node);
+               rc = psci_plat_pm_ops->affinst_suspend_finish(mpidr,
+                                                             cluster_node->level,
+                                                             plat_state);
+               assert(rc == PSCI_E_SUCCESS);
+       }
+
+       /* State management: Increment the cluster reference count */
+       psci_set_state(cluster_node, PSCI_STATE_ON);
+
+       return rc;
+}
+
+
+static unsigned int psci_afflvl2_suspend_finish(unsigned long mpidr,
+                                               aff_map_node *system_node)
+{
+       unsigned int plat_state, rc = PSCI_E_SUCCESS;;
+
+       /* Cannot go beyond this affinity level */
+       assert(system_node->level == MPIDR_AFFLVL2);
+
+       /*
+        * Currently, there are no architectural actions to perform
+        * at the system level.
+        */
+
+       /*
+        * Plat. management: Perform the platform specific actions
+        * as per the old state of the cluster e.g. enabling
+        * coherency at the interconnect depends upon the state with
+        * which this cluster was powered up. If anything goes wrong
+        * then assert as there is no way to recover from this
+        * situation.
+        */
+       if (psci_plat_pm_ops->affinst_suspend_finish) {
+
+               /* Get the physical state of the system */
+               plat_state = psci_get_phys_state(system_node);
+               rc = psci_plat_pm_ops->affinst_suspend_finish(mpidr,
+                                                             system_node->level,
+                                                             plat_state);
+               assert(rc == PSCI_E_SUCCESS);
+       }
+
+       /* State management: Increment the system reference count */
+       psci_set_state(system_node, PSCI_STATE_ON);
+
+       return rc;
+}
+
+const afflvl_power_on_finisher psci_afflvl_suspend_finishers[] = {
+       psci_afflvl0_suspend_finish,
+       psci_afflvl1_suspend_finish,
+       psci_afflvl2_suspend_finish,
+};
+
diff --git a/services/std_svc/psci/psci_common.c b/services/std_svc/psci/psci_common.c
new file mode 100644 (file)
index 0000000..236309c
--- /dev/null
@@ -0,0 +1,568 @@
+/*
+ * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include <arch_helpers.h>
+#include <console.h>
+#include <platform.h>
+#include <psci.h>
+#include <psci_private.h>
+#include <context_mgmt.h>
+#include <runtime_svc.h>
+#include "debug.h"
+
+/*
+ * SPD power management operations, expected to be supplied by the registered
+ * SPD on successful SP initialization
+ */
+const spd_pm_ops *psci_spd_pm;
+
+/*******************************************************************************
+ * Arrays that contains information needs to resume a cpu's execution when woken
+ * out of suspend or off states. 'psci_ns_einfo_idx' keeps track of the next
+ * free index in the 'psci_ns_entry_info' & 'psci_suspend_context' arrays. Each
+ * cpu is allocated a single entry in each array during startup.
+ ******************************************************************************/
+suspend_context psci_suspend_context[PSCI_NUM_AFFS];
+ns_entry_info psci_ns_entry_info[PSCI_NUM_AFFS];
+unsigned int psci_ns_einfo_idx;
+
+/*******************************************************************************
+ * Grand array that holds the platform's topology information for state
+ * management of affinity instances. Each node (aff_map_node) in the array
+ * corresponds to an affinity instance e.g. cluster, cpu within an mpidr
+ ******************************************************************************/
+aff_map_node psci_aff_map[PSCI_NUM_AFFS]
+__attribute__ ((section("tzfw_coherent_mem")));
+
+/*******************************************************************************
+ * In a system, a certain number of affinity instances are present at an
+ * affinity level. The cumulative number of instances across all levels are
+ * stored in 'psci_aff_map'. The topology tree has been flattenned into this
+ * array. To retrieve nodes, information about the extents of each affinity
+ * level i.e. start index and end index needs to be present. 'psci_aff_limits'
+ * stores this information.
+ ******************************************************************************/
+aff_limits_node psci_aff_limits[MPIDR_MAX_AFFLVL + 1];
+
+/*******************************************************************************
+ * Pointer to functions exported by the platform to complete power mgmt. ops
+ ******************************************************************************/
+plat_pm_ops *psci_plat_pm_ops;
+
+/*******************************************************************************
+ * Routine to return the maximum affinity level to traverse to after a cpu has
+ * been physically powered up. It is expected to be called immediately after
+ * reset from assembler code. It has to find its 'aff_map_node' instead of
+ * getting it as an argument.
+ * TODO: Calling psci_get_aff_map_node() with the MMU disabled is slow. Add
+ * support to allow faster access to the target affinity level.
+ ******************************************************************************/
+int get_power_on_target_afflvl(unsigned long mpidr)
+{
+       aff_map_node *node;
+       unsigned int state;
+
+       /* Retrieve our node from the topology tree */
+       node = psci_get_aff_map_node(mpidr & MPIDR_AFFINITY_MASK,
+                       MPIDR_AFFLVL0);
+       assert(node);
+
+       /*
+        * Return the maximum supported affinity level if this cpu was off.
+        * Call the handler in the suspend code if this cpu had been suspended.
+        * Any other state is invalid.
+        */
+       state = psci_get_state(node);
+       if (state == PSCI_STATE_ON_PENDING)
+               return get_max_afflvl();
+
+       if (state == PSCI_STATE_SUSPEND)
+               return psci_get_suspend_afflvl(node);
+
+       return PSCI_E_INVALID_PARAMS;
+}
+
+/*******************************************************************************
+ * Simple routine to retrieve the maximum affinity level supported by the
+ * platform and check that it makes sense.
+ ******************************************************************************/
+int get_max_afflvl()
+{
+       int aff_lvl;
+
+       aff_lvl = plat_get_max_afflvl();
+       assert(aff_lvl <= MPIDR_MAX_AFFLVL && aff_lvl >= MPIDR_AFFLVL0);
+
+       return aff_lvl;
+}
+
+/*******************************************************************************
+ * Simple routine to set the id of an affinity instance at a given level in the
+ * mpidr.
+ ******************************************************************************/
+unsigned long mpidr_set_aff_inst(unsigned long mpidr,
+                                unsigned char aff_inst,
+                                int aff_lvl)
+{
+       unsigned long aff_shift;
+
+       assert(aff_lvl <= MPIDR_AFFLVL3);
+
+       /*
+        * Decide the number of bits to shift by depending upon
+        * the affinity level
+        */
+       aff_shift = get_afflvl_shift(aff_lvl);
+
+       /* Clear the existing affinity instance & set the new one*/
+       mpidr &= ~(MPIDR_AFFLVL_MASK << aff_shift);
+       mpidr |= aff_inst << aff_shift;
+
+       return mpidr;
+}
+
+/*******************************************************************************
+ * This function sanity checks a range of affinity levels.
+ ******************************************************************************/
+int psci_check_afflvl_range(int start_afflvl, int end_afflvl)
+{
+       /* Sanity check the parameters passed */
+       if (end_afflvl > MPIDR_MAX_AFFLVL)
+               return PSCI_E_INVALID_PARAMS;
+
+       if (start_afflvl < MPIDR_AFFLVL0)
+               return PSCI_E_INVALID_PARAMS;
+
+       if (end_afflvl < start_afflvl)
+               return PSCI_E_INVALID_PARAMS;
+
+       return PSCI_E_SUCCESS;
+}
+
+/*******************************************************************************
+ * This function is passed an array of pointers to affinity level nodes in the
+ * topology tree for an mpidr. It picks up locks for each affinity level bottom
+ * up in the range specified.
+ ******************************************************************************/
+void psci_acquire_afflvl_locks(unsigned long mpidr,
+                              int start_afflvl,
+                              int end_afflvl,
+                              mpidr_aff_map_nodes mpidr_nodes)
+{
+       int level;
+
+       for (level = start_afflvl; level <= end_afflvl; level++) {
+               if (mpidr_nodes[level] == NULL)
+                       continue;
+               bakery_lock_get(mpidr, &mpidr_nodes[level]->lock);
+       }
+}
+
+/*******************************************************************************
+ * This function is passed an array of pointers to affinity level nodes in the
+ * topology tree for an mpidr. It releases the lock for each affinity level top
+ * down in the range specified.
+ ******************************************************************************/
+void psci_release_afflvl_locks(unsigned long mpidr,
+                              int start_afflvl,
+                              int end_afflvl,
+                              mpidr_aff_map_nodes mpidr_nodes)
+{
+       int level;
+
+       for (level = end_afflvl; level >= start_afflvl; level--) {
+               if (mpidr_nodes[level] == NULL)
+                       continue;
+               bakery_lock_release(mpidr, &mpidr_nodes[level]->lock);
+       }
+}
+
+/*******************************************************************************
+ * Simple routine to determine whether an affinity instance at a given level
+ * in an mpidr exists or not.
+ ******************************************************************************/
+int psci_validate_mpidr(unsigned long mpidr, int level)
+{
+       aff_map_node *node;
+
+       node = psci_get_aff_map_node(mpidr, level);
+       if (node && (node->state & PSCI_AFF_PRESENT))
+               return PSCI_E_SUCCESS;
+       else
+               return PSCI_E_INVALID_PARAMS;
+}
+
+/*******************************************************************************
+ * This function retrieves all the stashed information needed to correctly
+ * resume a cpu's execution in the non-secure state after it has been physically
+ * powered on i.e. turned ON or resumed from SUSPEND
+ ******************************************************************************/
+void psci_get_ns_entry_info(unsigned int index)
+{
+       unsigned long sctlr = 0, scr, el_status, id_aa64pfr0;
+       uint64_t mpidr = read_mpidr();
+       cpu_context *ns_entry_context;
+       gp_regs *ns_entry_gpregs;
+
+       scr = read_scr();
+
+       /* Find out which EL we are going to */
+       id_aa64pfr0 = read_id_aa64pfr0_el1();
+       el_status = (id_aa64pfr0 >> ID_AA64PFR0_EL2_SHIFT) &
+               ID_AA64PFR0_ELX_MASK;
+
+       /* Restore endianess */
+       if (psci_ns_entry_info[index].sctlr & SCTLR_EE_BIT)
+               sctlr |= SCTLR_EE_BIT;
+       else
+               sctlr &= ~SCTLR_EE_BIT;
+
+       /* Turn off MMU and Caching */
+       sctlr &= ~(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_M_BIT);
+
+       /* Set the register width */
+       if (psci_ns_entry_info[index].scr & SCR_RW_BIT)
+               scr |= SCR_RW_BIT;
+       else
+               scr &= ~SCR_RW_BIT;
+
+       scr |= SCR_NS_BIT;
+
+       if (el_status)
+               write_sctlr_el2(sctlr);
+       else
+               write_sctlr_el1(sctlr);
+
+       /* Fulfill the cpu_on entry reqs. as per the psci spec */
+       ns_entry_context = (cpu_context *) cm_get_context(mpidr, NON_SECURE);
+       assert(ns_entry_context);
+
+       /*
+        * Setup general purpose registers to return the context id and
+        * prevent leakage of secure information into the normal world.
+        */
+       ns_entry_gpregs = get_gpregs_ctx(ns_entry_context);
+       write_ctx_reg(ns_entry_gpregs,
+                     CTX_GPREG_X0,
+                     psci_ns_entry_info[index].context_id);
+
+       /*
+        * Tell the context management library to setup EL3 system registers to
+        * be able to ERET into the ns state, and SP_EL3 points to the right
+        * context to exit from EL3 correctly.
+        */
+       cm_set_el3_eret_context(NON_SECURE,
+                       psci_ns_entry_info[index].eret_info.entrypoint,
+                       psci_ns_entry_info[index].eret_info.spsr,
+                       scr);
+
+       cm_set_next_eret_context(NON_SECURE);
+}
+
+/*******************************************************************************
+ * This function retrieves and stashes all the information needed to correctly
+ * resume a cpu's execution in the non-secure state after it has been physically
+ * powered on i.e. turned ON or resumed from SUSPEND. This is done prior to
+ * turning it on or before suspending it.
+ ******************************************************************************/
+int psci_set_ns_entry_info(unsigned int index,
+                          unsigned long entrypoint,
+                          unsigned long context_id)
+{
+       int rc = PSCI_E_SUCCESS;
+       unsigned int rw, mode, ee, spsr = 0;
+       unsigned long id_aa64pfr0 = read_id_aa64pfr0_el1(), scr = read_scr();
+       unsigned long el_status;
+
+       /* Figure out what mode do we enter the non-secure world in */
+       el_status = (id_aa64pfr0 >> ID_AA64PFR0_EL2_SHIFT) &
+               ID_AA64PFR0_ELX_MASK;
+
+       /*
+        * Figure out whether the cpu enters the non-secure address space
+        * in aarch32 or aarch64
+        */
+       rw = scr & SCR_RW_BIT;
+       if (rw) {
+
+               /*
+                * Check whether a Thumb entry point has been provided for an
+                * aarch64 EL
+                */
+               if (entrypoint & 0x1)
+                       return PSCI_E_INVALID_PARAMS;
+
+               if (el_status && (scr & SCR_HCE_BIT)) {
+                       mode = MODE_EL2;
+                       ee = read_sctlr_el2() & SCTLR_EE_BIT;
+               } else {
+                       mode = MODE_EL1;
+                       ee = read_sctlr_el1() & SCTLR_EE_BIT;
+               }
+
+               spsr = DAIF_DBG_BIT | DAIF_ABT_BIT;
+               spsr |= DAIF_IRQ_BIT | DAIF_FIQ_BIT;
+               spsr <<= PSR_DAIF_SHIFT;
+               spsr |= make_spsr(mode, MODE_SP_ELX, !rw);
+
+               psci_ns_entry_info[index].sctlr |= ee;
+               psci_ns_entry_info[index].scr |= SCR_RW_BIT;
+       } else {
+
+               /* Check whether aarch32 has to be entered in Thumb mode */
+               if (entrypoint & 0x1)
+                       spsr = SPSR32_T_BIT;
+
+               if (el_status && (scr & SCR_HCE_BIT)) {
+                       mode = AARCH32_MODE_HYP;
+                       ee = read_sctlr_el2() & SCTLR_EE_BIT;
+               } else {
+                       mode = AARCH32_MODE_SVC;
+                       ee = read_sctlr_el1() & SCTLR_EE_BIT;
+               }
+
+               /*
+                * TODO: Choose async. exception bits if HYP mode is not
+                * implemented according to the values of SCR.{AW, FW} bits
+                */
+               spsr |= DAIF_ABT_BIT | DAIF_IRQ_BIT | DAIF_FIQ_BIT;
+               spsr <<= PSR_DAIF_SHIFT;
+               if (ee)
+                       spsr |= SPSR32_EE_BIT;
+               spsr |= mode;
+
+               /* Ensure that the CSPR.E and SCTLR.EE bits match */
+               psci_ns_entry_info[index].sctlr |= ee;
+               psci_ns_entry_info[index].scr &= ~SCR_RW_BIT;
+       }
+
+       psci_ns_entry_info[index].eret_info.entrypoint = entrypoint;
+       psci_ns_entry_info[index].eret_info.spsr = spsr;
+       psci_ns_entry_info[index].context_id = context_id;
+
+       return rc;
+}
+
+/*******************************************************************************
+ * This function takes a pointer to an affinity node in the topology tree and
+ * returns its state. State of a non-leaf node needs to be calculated.
+ ******************************************************************************/
+unsigned short psci_get_state(aff_map_node *node)
+{
+       assert(node->level >= MPIDR_AFFLVL0 && node->level <= MPIDR_MAX_AFFLVL);
+
+       /* A cpu node just contains the state which can be directly returned */
+       if (node->level == MPIDR_AFFLVL0)
+               return (node->state >> PSCI_STATE_SHIFT) & PSCI_STATE_MASK;
+
+       /*
+        * For an affinity level higher than a cpu, the state has to be
+        * calculated. It depends upon the value of the reference count
+        * which is managed by each node at the next lower affinity level
+        * e.g. for a cluster, each cpu increments/decrements the reference
+        * count. If the reference count is 0 then the affinity level is
+        * OFF else ON.
+        */
+       if (node->ref_count)
+               return PSCI_STATE_ON;
+       else
+               return PSCI_STATE_OFF;
+}
+
+/*******************************************************************************
+ * This function takes a pointer to an affinity node in the topology tree and
+ * a target state. State of a non-leaf node needs to be converted to a reference
+ * count. State of a leaf node can be set directly.
+ ******************************************************************************/
+void psci_set_state(aff_map_node *node, unsigned short state)
+{
+       assert(node->level >= MPIDR_AFFLVL0 && node->level <= MPIDR_MAX_AFFLVL);
+
+       /*
+        * For an affinity level higher than a cpu, the state is used
+        * to decide whether the reference count is incremented or
+        * decremented. Entry into the ON_PENDING state does not have
+        * effect.
+        */
+       if (node->level > MPIDR_AFFLVL0) {
+               switch (state) {
+               case PSCI_STATE_ON:
+                       node->ref_count++;
+                       break;
+               case PSCI_STATE_OFF:
+               case PSCI_STATE_SUSPEND:
+                       node->ref_count--;
+                       break;
+               case PSCI_STATE_ON_PENDING:
+                       /*
+                        * An affinity level higher than a cpu will not undergo
+                        * a state change when it is about to be turned on
+                        */
+                       return;
+               default:
+                       assert(0);
+               }
+       } else {
+               node->state &= ~(PSCI_STATE_MASK << PSCI_STATE_SHIFT);
+               node->state |= (state & PSCI_STATE_MASK) << PSCI_STATE_SHIFT;
+       }
+}
+
+/*******************************************************************************
+ * An affinity level could be on, on_pending, suspended or off. These are the
+ * logical states it can be in. Physically either it is off or on. When it is in
+ * the state on_pending then it is about to be turned on. It is not possible to
+ * tell whether that's actually happenned or not. So we err on the side of
+ * caution & treat the affinity level as being turned off.
+ ******************************************************************************/
+unsigned short psci_get_phys_state(aff_map_node *node)
+{
+       unsigned int state;
+
+       state = psci_get_state(node);
+       return get_phys_state(state);
+}
+
+/*******************************************************************************
+ * This function takes an array of pointers to affinity instance nodes in the
+ * topology tree and calls the physical power on handler for the corresponding
+ * affinity levels
+ ******************************************************************************/
+static int psci_call_power_on_handlers(mpidr_aff_map_nodes mpidr_nodes,
+                                      int start_afflvl,
+                                      int end_afflvl,
+                                      afflvl_power_on_finisher *pon_handlers,
+                                      unsigned long mpidr)
+{
+       int rc = PSCI_E_INVALID_PARAMS, level;
+       aff_map_node *node;
+
+       for (level = end_afflvl; level >= start_afflvl; level--) {
+               node = mpidr_nodes[level];
+               if (node == NULL)
+                       continue;
+
+               /*
+                * If we run into any trouble while powering up an
+                * affinity instance, then there is no recovery path
+                * so simply return an error and let the caller take
+                * care of the situation.
+                */
+               rc = pon_handlers[level](mpidr, node);
+               if (rc != PSCI_E_SUCCESS)
+                       break;
+       }
+
+       return rc;
+}
+
+/*******************************************************************************
+ * Generic handler which is called when a cpu is physically powered on. It
+ * traverses through all the affinity levels performing generic, architectural,
+ * platform setup and state management e.g. for a cluster that's been powered
+ * on, it will call the platform specific code which will enable coherency at
+ * the interconnect level. For a cpu it could mean turning on the MMU etc.
+ *
+ * The state of all the relevant affinity levels is changed after calling the
+ * affinity level specific handlers as their actions would depend upon the state
+ * the affinity level is exiting from.
+ *
+ * The affinity level specific handlers are called in descending order i.e. from
+ * the highest to the lowest affinity level implemented by the platform because
+ * to turn on affinity level X it is neccesary to turn on affinity level X + 1
+ * first.
+ *
+ * CAUTION: This function is called with coherent stacks so that coherency and
+ * the mmu can be turned on safely.
+ ******************************************************************************/
+void psci_afflvl_power_on_finish(unsigned long mpidr,
+                                int start_afflvl,
+                                int end_afflvl,
+                                afflvl_power_on_finisher *pon_handlers)
+{
+       mpidr_aff_map_nodes mpidr_nodes;
+       int rc;
+
+       mpidr &= MPIDR_AFFINITY_MASK;
+
+       /*
+        * Collect the pointers to the nodes in the topology tree for
+        * each affinity instance in the mpidr. If this function does
+        * not return successfully then either the mpidr or the affinity
+        * levels are incorrect. Either case is an irrecoverable error.
+        */
+       rc = psci_get_aff_map_nodes(mpidr,
+                                   start_afflvl,
+                                   end_afflvl,
+                                   mpidr_nodes);
+       if (rc != PSCI_E_SUCCESS)
+               panic();
+
+       /*
+        * This function acquires the lock corresponding to each affinity
+        * level so that by the time all locks are taken, the system topology
+        * is snapshot and state management can be done safely.
+        */
+       psci_acquire_afflvl_locks(mpidr,
+                                 start_afflvl,
+                                 end_afflvl,
+                                 mpidr_nodes);
+
+       /* Perform generic, architecture and platform specific handling */
+       rc = psci_call_power_on_handlers(mpidr_nodes,
+                                        start_afflvl,
+                                        end_afflvl,
+                                        pon_handlers,
+                                        mpidr);
+       if (rc != PSCI_E_SUCCESS)
+               panic();
+
+       /*
+        * This loop releases the lock corresponding to each affinity level
+        * in the reverse order to which they were acquired.
+        */
+       psci_release_afflvl_locks(mpidr,
+                                 start_afflvl,
+                                 end_afflvl,
+                                 mpidr_nodes);
+}
+
+/*******************************************************************************
+ * This function initializes the set of hooks that PSCI invokes as part of power
+ * management operation. The power management hooks are expected to be provided
+ * by the SPD, after it finishes all its initialization
+ ******************************************************************************/
+void psci_register_spd_pm_hook(const spd_pm_ops *pm)
+{
+       psci_spd_pm = pm;
+}
diff --git a/services/std_svc/psci/psci_entry.S b/services/std_svc/psci/psci_entry.S
new file mode 100644 (file)
index 0000000..361dfde
--- /dev/null
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <platform.h>
+#include <psci.h>
+#include <psci_private.h>
+#include <runtime_svc.h>
+#include <asm_macros.S>
+#include <cm_macros.S>
+
+       .globl  psci_aff_on_finish_entry
+       .globl  psci_aff_suspend_finish_entry
+       .globl  __psci_cpu_off
+       .globl  __psci_cpu_suspend
+
+       .section        .text, "ax"; .align 3
+
+       /* -----------------------------------------------------
+        * This cpu has been physically powered up. Depending
+        * upon whether it was resumed from suspend or simply
+        * turned on, call the common power on finisher with
+        * the handlers (chosen depending upon original state).
+        * For ease, the finisher is called with coherent
+        * stacks. This allows the cluster/cpu finishers to
+        * enter coherency and enable the mmu without running
+        * into issues. We switch back to normal stacks once
+        * all this is done.
+        * -----------------------------------------------------
+        */
+psci_aff_on_finish_entry:
+       adr     x23, psci_afflvl_on_finishers
+       b       psci_aff_common_finish_entry
+
+psci_aff_suspend_finish_entry:
+       adr     x23, psci_afflvl_suspend_finishers
+
+psci_aff_common_finish_entry:
+       adr     x22, psci_afflvl_power_on_finish
+
+       /* ---------------------------------------------
+        * Exceptions should not occur at this point.
+        * Set VBAR in order to handle and report any
+        * that do occur
+        * ---------------------------------------------
+        */
+       adr     x0, early_exceptions
+       msr     vbar_el3, x0
+       isb
+
+       /* ---------------------------------------------
+        * Use SP_EL0 for the C runtime stack.
+        * ---------------------------------------------
+        */
+       msr     spsel, #0
+       isb
+
+       bl      read_mpidr
+       mov     x19, x0
+       bl      platform_set_coherent_stack
+
+       /* ---------------------------------------------
+        * Call the finishers starting from affinity
+        * level 0.
+        * ---------------------------------------------
+        */
+       mov     x0, x19
+       bl      get_power_on_target_afflvl
+       cmp     x0, xzr
+       b.lt    _panic
+       mov     x3, x23
+       mov     x2, x0
+       mov     x0, x19
+       mov     x1, #MPIDR_AFFLVL0
+       blr     x22
+
+       /* --------------------------------------------
+        * Give ourselves a stack allocated in Normal
+        * -IS-WBWA memory
+        * --------------------------------------------
+        */
+       mov     x0, x19
+       bl      platform_set_stack
+
+       zero_callee_saved_regs
+       b       el3_exit
+_panic:
+       b       _panic
+
+       /* -----------------------------------------------------
+        * The following two stubs give the calling cpu a
+        * coherent stack to allow flushing of caches without
+        * suffering from stack coherency issues
+        * -----------------------------------------------------
+        */
+__psci_cpu_off:
+       func_prologue
+       sub     sp, sp, #0x10
+       stp     x19, x20, [sp, #0]
+       mov     x19, sp
+       bl      read_mpidr
+       bl      platform_set_coherent_stack
+       bl      psci_cpu_off
+       mov     x1, #PSCI_E_SUCCESS
+       cmp     x0, x1
+       b.eq    final_wfi
+       mov     sp, x19
+       ldp     x19, x20, [sp,#0]
+       add     sp, sp, #0x10
+       func_epilogue
+       ret
+
+__psci_cpu_suspend:
+       func_prologue
+       sub     sp, sp, #0x20
+       stp     x19, x20, [sp, #0]
+       stp     x21, x22, [sp, #0x10]
+       mov     x19, sp
+       mov     x20, x0
+       mov     x21, x1
+       mov     x22, x2
+       bl      read_mpidr
+       bl      platform_set_coherent_stack
+       mov     x0, x20
+       mov     x1, x21
+       mov     x2, x22
+       bl      psci_cpu_suspend
+       mov     x1, #PSCI_E_SUCCESS
+       cmp     x0, x1
+       b.eq    final_wfi
+       mov     sp, x19
+       ldp     x21, x22, [sp,#0x10]
+       ldp     x19, x20, [sp,#0]
+       add     sp, sp, #0x20
+       func_epilogue
+       ret
+
+final_wfi:
+       dsb     sy
+       wfi
+wfi_spill:
+       b       wfi_spill
+
diff --git a/services/std_svc/psci/psci_main.c b/services/std_svc/psci/psci_main.c
new file mode 100644 (file)
index 0000000..6bf0583
--- /dev/null
@@ -0,0 +1,251 @@
+/*
+ * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include <arch_helpers.h>
+#include <console.h>
+#include <platform.h>
+#include <psci_private.h>
+#include <runtime_svc.h>
+#include <debug.h>
+#include <context_mgmt.h>
+
+/*******************************************************************************
+ * PSCI frontend api for servicing SMCs. Described in the PSCI spec.
+ ******************************************************************************/
+int psci_cpu_on(unsigned long target_cpu,
+               unsigned long entrypoint,
+               unsigned long context_id)
+
+{
+       int rc;
+       unsigned int start_afflvl, end_afflvl;
+
+       /* Determine if the cpu exists of not */
+       rc = psci_validate_mpidr(target_cpu, MPIDR_AFFLVL0);
+       if (rc != PSCI_E_SUCCESS) {
+               goto exit;
+       }
+
+       /*
+        * To turn this cpu on, specify which affinity
+        * levels need to be turned on
+        */
+       start_afflvl = MPIDR_AFFLVL0;
+       end_afflvl = get_max_afflvl();
+       rc = psci_afflvl_on(target_cpu,
+                           entrypoint,
+                           context_id,
+                           start_afflvl,
+                           end_afflvl);
+
+exit:
+       return rc;
+}
+
+unsigned int psci_version(void)
+{
+       return PSCI_MAJOR_VER | PSCI_MINOR_VER;
+}
+
+int psci_cpu_suspend(unsigned int power_state,
+                    unsigned long entrypoint,
+                    unsigned long context_id)
+{
+       int rc;
+       unsigned long mpidr;
+       unsigned int target_afflvl, pstate_type;
+
+       /* TODO: Standby states are not supported at the moment */
+       pstate_type = psci_get_pstate_type(power_state);
+       if (pstate_type == 0) {
+               rc = PSCI_E_INVALID_PARAMS;
+               goto exit;
+       }
+
+       /* Sanity check the requested state */
+       target_afflvl = psci_get_pstate_afflvl(power_state);
+       if (target_afflvl > MPIDR_MAX_AFFLVL) {
+               rc = PSCI_E_INVALID_PARAMS;
+               goto exit;
+       }
+
+       mpidr = read_mpidr();
+       rc = psci_afflvl_suspend(mpidr,
+                                entrypoint,
+                                context_id,
+                                power_state,
+                                MPIDR_AFFLVL0,
+                                target_afflvl);
+
+exit:
+       if (rc != PSCI_E_SUCCESS)
+               assert(rc == PSCI_E_INVALID_PARAMS);
+       return rc;
+}
+
+int psci_cpu_off(void)
+{
+       int rc;
+       unsigned long mpidr;
+       int target_afflvl = get_max_afflvl();
+
+       mpidr = read_mpidr();
+
+       /*
+        * Traverse from the highest to the lowest affinity level. When the
+        * lowest affinity level is hit, all the locks are acquired. State
+        * management is done immediately followed by cpu, cluster ...
+        * ..target_afflvl specific actions as this function unwinds back.
+        */
+       rc = psci_afflvl_off(mpidr, MPIDR_AFFLVL0, target_afflvl);
+
+       /*
+        * The only error cpu_off can return is E_DENIED. So check if that's
+        * indeed the case.
+        */
+       assert (rc == PSCI_E_SUCCESS || rc == PSCI_E_DENIED);
+
+       return rc;
+}
+
+int psci_affinity_info(unsigned long target_affinity,
+                      unsigned int lowest_affinity_level)
+{
+       int rc = PSCI_E_INVALID_PARAMS;
+       unsigned int aff_state;
+       aff_map_node *node;
+
+       if (lowest_affinity_level > get_max_afflvl())
+               return rc;
+
+       node = psci_get_aff_map_node(target_affinity, lowest_affinity_level);
+       if (node && (node->state & PSCI_AFF_PRESENT)) {
+
+               /*
+                * TODO: For affinity levels higher than 0 i.e. cpu, the
+                * state will always be either ON or OFF. Need to investigate
+                * how critical is it to support ON_PENDING here.
+                */
+               aff_state = psci_get_state(node);
+
+               /* A suspended cpu is available & on for the OS */
+               if (aff_state == PSCI_STATE_SUSPEND) {
+                       aff_state = PSCI_STATE_ON;
+               }
+
+               rc = aff_state;
+       }
+
+       return rc;
+}
+
+/* Unimplemented */
+int psci_migrate(unsigned int target_cpu)
+{
+       return PSCI_E_NOT_SUPPORTED;
+}
+
+/* Unimplemented */
+unsigned int psci_migrate_info_type(void)
+{
+       return PSCI_TOS_NOT_PRESENT_MP;
+}
+
+unsigned long psci_migrate_info_up_cpu(void)
+{
+       /*
+        * Return value of this currently unsupported call depends upon
+        * what psci_migrate_info_type() returns.
+        */
+       return PSCI_E_SUCCESS;
+}
+
+/*******************************************************************************
+ * PSCI top level handler for servicing SMCs.
+ ******************************************************************************/
+uint64_t psci_smc_handler(uint32_t smc_fid,
+                         uint64_t x1,
+                         uint64_t x2,
+                         uint64_t x3,
+                         uint64_t x4,
+                         void *cookie,
+                         void *handle,
+                         uint64_t flags)
+{
+       uint64_t rc;
+
+       switch (smc_fid) {
+       case PSCI_VERSION:
+               rc = psci_version();
+               break;
+
+       case PSCI_CPU_OFF:
+               rc = __psci_cpu_off();
+               break;
+
+       case PSCI_CPU_SUSPEND_AARCH64:
+       case PSCI_CPU_SUSPEND_AARCH32:
+               rc = __psci_cpu_suspend(x1, x2, x3);
+               break;
+
+       case PSCI_CPU_ON_AARCH64:
+       case PSCI_CPU_ON_AARCH32:
+               rc = psci_cpu_on(x1, x2, x3);
+               break;
+
+       case PSCI_AFFINITY_INFO_AARCH32:
+       case PSCI_AFFINITY_INFO_AARCH64:
+               rc = psci_affinity_info(x1, x2);
+               break;
+
+       case PSCI_MIG_AARCH32:
+       case PSCI_MIG_AARCH64:
+               rc = psci_migrate(x1);
+               break;
+
+       case PSCI_MIG_INFO_TYPE:
+               rc = psci_migrate_info_type();
+               break;
+
+       case PSCI_MIG_INFO_UP_CPU_AARCH32:
+       case PSCI_MIG_INFO_UP_CPU_AARCH64:
+               rc = psci_migrate_info_up_cpu();
+               break;
+
+       default:
+               rc = SMC_UNK;
+               WARN("Unimplemented PSCI Call: 0x%x \n", smc_fid);
+       }
+
+       SMC_RET1(handle, rc);
+}
diff --git a/services/std_svc/psci/psci_private.h b/services/std_svc/psci/psci_private.h
new file mode 100644 (file)
index 0000000..2d9d12b
--- /dev/null
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __PSCI_PRIVATE_H__
+#define __PSCI_PRIVATE_H__
+
+#include <arch.h>
+#include <bakery_lock.h>
+
+#ifndef __ASSEMBLY__
+/*******************************************************************************
+ * The following two data structures hold the generic information to bringup
+ * a suspended/hotplugged out cpu
+ ******************************************************************************/
+typedef struct {
+       unsigned long entrypoint;
+       unsigned long spsr;
+} eret_params;
+
+typedef struct {
+       eret_params eret_info;
+       unsigned long context_id;
+       unsigned int scr;
+       unsigned int sctlr;
+} ns_entry_info;
+
+/*******************************************************************************
+ * The following two data structures hold the topology tree which in turn tracks
+ * the state of the all the affinity instances supported by the platform.
+ ******************************************************************************/
+typedef struct {
+       unsigned long mpidr;
+       unsigned short ref_count;
+       unsigned char state;
+       unsigned char level;
+       unsigned int data;
+       bakery_lock lock;
+} aff_map_node;
+
+typedef struct {
+       int min;
+       int max;
+} aff_limits_node;
+
+/*******************************************************************************
+ * This data structure holds secure world context that needs to be preserved
+ * across cpu_suspend calls which enter the power down state.
+ ******************************************************************************/
+typedef struct {
+       /* Align the suspend level to allow per-cpu lockless access */
+       int suspend_level
+       __attribute__((__aligned__(CACHE_WRITEBACK_GRANULE)));
+} suspend_context;
+
+typedef aff_map_node (*mpidr_aff_map_nodes[MPIDR_MAX_AFFLVL]);
+typedef unsigned int (*afflvl_power_on_finisher)(unsigned long,
+                                                aff_map_node *);
+
+/*******************************************************************************
+ * Data prototypes
+ ******************************************************************************/
+extern suspend_context psci_suspend_context[PSCI_NUM_AFFS];
+extern ns_entry_info psci_ns_entry_info[PSCI_NUM_AFFS];
+extern unsigned int psci_ns_einfo_idx;
+extern aff_limits_node psci_aff_limits[MPIDR_MAX_AFFLVL + 1];
+extern plat_pm_ops *psci_plat_pm_ops;
+extern aff_map_node psci_aff_map[PSCI_NUM_AFFS];
+extern afflvl_power_on_finisher psci_afflvl_off_finish_handlers[];
+extern afflvl_power_on_finisher psci_afflvl_sus_finish_handlers[];
+
+/*******************************************************************************
+ * SPD's power management hooks registered with PSCI
+ ******************************************************************************/
+extern const spd_pm_ops *psci_spd_pm;
+
+/*******************************************************************************
+ * Function prototypes
+ ******************************************************************************/
+/* Private exported functions from psci_common.c */
+extern int get_max_afflvl(void);
+extern unsigned short psci_get_state(aff_map_node *node);
+extern unsigned short psci_get_phys_state(aff_map_node *node);
+extern void psci_set_state(aff_map_node *node, unsigned short state);
+extern void psci_get_ns_entry_info(unsigned int index);
+extern unsigned long mpidr_set_aff_inst(unsigned long, unsigned char, int);
+extern int psci_validate_mpidr(unsigned long, int);
+extern int get_power_on_target_afflvl(unsigned long mpidr);
+extern void psci_afflvl_power_on_finish(unsigned long,
+                                               int,
+                                               int,
+                                               afflvl_power_on_finisher *);
+extern int psci_set_ns_entry_info(unsigned int index,
+                                 unsigned long entrypoint,
+                                 unsigned long context_id);
+extern int psci_check_afflvl_range(int start_afflvl, int end_afflvl);
+extern void psci_acquire_afflvl_locks(unsigned long mpidr,
+                                     int start_afflvl,
+                                     int end_afflvl,
+                                     mpidr_aff_map_nodes mpidr_nodes);
+extern void psci_release_afflvl_locks(unsigned long mpidr,
+                                     int start_afflvl,
+                                     int end_afflvl,
+                                     mpidr_aff_map_nodes mpidr_nodes);
+
+/* Private exported functions from psci_setup.c */
+extern int psci_get_aff_map_nodes(unsigned long mpidr,
+                                 int start_afflvl,
+                                 int end_afflvl,
+                                 mpidr_aff_map_nodes mpidr_nodes);
+extern aff_map_node *psci_get_aff_map_node(unsigned long, int);
+
+/* Private exported functions from psci_affinity_on.c */
+extern int psci_afflvl_on(unsigned long,
+                         unsigned long,
+                         unsigned long,
+                         int,
+                         int);
+
+/* Private exported functions from psci_affinity_off.c */
+extern int psci_afflvl_off(unsigned long, int, int);
+
+/* Private exported functions from psci_affinity_suspend.c */
+extern void psci_set_suspend_afflvl(aff_map_node *node, int afflvl);
+extern int psci_get_suspend_afflvl(aff_map_node *node);
+extern int psci_afflvl_suspend(unsigned long,
+                              unsigned long,
+                              unsigned long,
+                              unsigned int,
+                              int,
+                              int);
+extern unsigned int psci_afflvl_suspend_finish(unsigned long, int, int);
+
+/* Private exported functions from psci_main.c */
+extern uint64_t psci_smc_handler(uint32_t smc_fid,
+                                uint64_t x1,
+                                uint64_t x2,
+                                uint64_t x3,
+                                uint64_t x4,
+                                void *cookie,
+                                void *handle,
+                                uint64_t flags);
+
+/* PSCI setup function */
+extern int32_t psci_setup(void);
+
+#endif /*__ASSEMBLY__*/
+
+#endif /* __PSCI_PRIVATE_H__ */
diff --git a/services/std_svc/psci/psci_setup.c b/services/std_svc/psci/psci_setup.c
new file mode 100644 (file)
index 0000000..e3a5d5d
--- /dev/null
@@ -0,0 +1,344 @@
+/*
+ * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include <arch_helpers.h>
+#include <console.h>
+#include <platform.h>
+#include <psci_private.h>
+#include <context_mgmt.h>
+#include <runtime_svc.h>
+
+/*******************************************************************************
+ * Per cpu non-secure contexts used to program the architectural state prior
+ * return to the normal world.
+ * TODO: Use the memory allocator to set aside memory for the contexts instead
+ * of relying on platform defined constants. Using PSCI_NUM_AFFS will be an
+ * overkill.
+ ******************************************************************************/
+static cpu_context psci_ns_context[PLATFORM_CORE_COUNT];
+
+/*******************************************************************************
+ * Routines for retrieving the node corresponding to an affinity level instance
+ * in the mpidr. The first one uses binary search to find the node corresponding
+ * to the mpidr (key) at a particular affinity level. The second routine decides
+ * extents of the binary search at each affinity level.
+ ******************************************************************************/
+static int psci_aff_map_get_idx(unsigned long key,
+                               int min_idx,
+                               int max_idx)
+{
+       int mid;
+
+       /*
+        * Terminating condition: If the max and min indices have crossed paths
+        * during the binary search then the key has not been found.
+        */
+       if (max_idx < min_idx)
+               return PSCI_E_INVALID_PARAMS;
+
+       /*
+        * Bisect the array around 'mid' and then recurse into the array chunk
+        * where the key is likely to be found. The mpidrs in each node in the
+        * 'psci_aff_map' for a given affinity level are stored in an ascending
+        * order which makes the binary search possible.
+        */
+       mid = min_idx + ((max_idx - min_idx) >> 1);     /* Divide by 2 */
+       if (psci_aff_map[mid].mpidr > key)
+               return psci_aff_map_get_idx(key, min_idx, mid - 1);
+       else if (psci_aff_map[mid].mpidr < key)
+               return psci_aff_map_get_idx(key, mid + 1, max_idx);
+       else
+               return mid;
+}
+
+aff_map_node *psci_get_aff_map_node(unsigned long mpidr, int aff_lvl)
+{
+       int rc;
+
+       /* Right shift the mpidr to the required affinity level */
+       mpidr = mpidr_mask_lower_afflvls(mpidr, aff_lvl);
+
+       rc = psci_aff_map_get_idx(mpidr,
+                                 psci_aff_limits[aff_lvl].min,
+                                 psci_aff_limits[aff_lvl].max);
+       if (rc >= 0)
+               return &psci_aff_map[rc];
+       else
+               return NULL;
+}
+
+/*******************************************************************************
+ * This function populates an array with nodes corresponding to a given range of
+ * affinity levels in an mpidr. It returns successfully only when the affinity
+ * levels are correct, the mpidr is valid i.e. no affinity level is absent from
+ * the topology tree & the affinity instance at level 0 is not absent.
+ ******************************************************************************/
+int psci_get_aff_map_nodes(unsigned long mpidr,
+                          int start_afflvl,
+                          int end_afflvl,
+                          mpidr_aff_map_nodes mpidr_nodes)
+{
+       int rc = PSCI_E_INVALID_PARAMS, level;
+       aff_map_node *node;
+
+       rc = psci_check_afflvl_range(start_afflvl, end_afflvl);
+       if (rc != PSCI_E_SUCCESS)
+               return rc;
+
+       for (level = start_afflvl; level <= end_afflvl; level++) {
+
+               /*
+                * Grab the node for each affinity level. No affinity level
+                * can be missing as that would mean that the topology tree
+                * is corrupted.
+                */
+               node = psci_get_aff_map_node(mpidr, level);
+               if (node == NULL) {
+                       rc = PSCI_E_INVALID_PARAMS;
+                       break;
+               }
+
+               /*
+                * Skip absent affinity levels unless it's afffinity level 0.
+                * An absent cpu means that the mpidr is invalid. Save the
+                * pointer to the node for the present affinity level
+                */
+               if (!(node->state & PSCI_AFF_PRESENT)) {
+                       if (level == MPIDR_AFFLVL0) {
+                               rc = PSCI_E_INVALID_PARAMS;
+                               break;
+                       }
+
+                       mpidr_nodes[level] = NULL;
+               } else
+                       mpidr_nodes[level] = node;
+       }
+
+       return rc;
+}
+
+/*******************************************************************************
+ * Function which initializes the 'aff_map_node' corresponding to an affinity
+ * level instance. Each node has a unique mpidr, level and bakery lock. The data
+ * field is opaque and holds affinity level specific data e.g. for affinity
+ * level 0 it contains the index into arrays that hold the secure/non-secure
+ * state for a cpu that's been turned on/off
+ ******************************************************************************/
+static void psci_init_aff_map_node(unsigned long mpidr,
+                                  int level,
+                                  unsigned int idx)
+{
+       unsigned char state;
+       uint32_t linear_id;
+       psci_aff_map[idx].mpidr = mpidr;
+       psci_aff_map[idx].level = level;
+       bakery_lock_init(&psci_aff_map[idx].lock);
+
+       /*
+        * If an affinity instance is present then mark it as OFF to begin with.
+        */
+       state = plat_get_aff_state(level, mpidr);
+       psci_aff_map[idx].state = state;
+
+       if (level == MPIDR_AFFLVL0) {
+
+               /*
+                * Mark the cpu as OFF. Higher affinity level reference counts
+                * have already been memset to 0
+                */
+               if (state & PSCI_AFF_PRESENT)
+                       psci_set_state(&psci_aff_map[idx], PSCI_STATE_OFF);
+
+               /* Ensure that we have not overflowed the psci_ns_einfo array */
+               assert(psci_ns_einfo_idx < PSCI_NUM_AFFS);
+
+               psci_aff_map[idx].data = psci_ns_einfo_idx;
+               psci_ns_einfo_idx++;
+
+               /*
+                * Associate a non-secure context with this affinity
+                * instance through the context management library.
+                */
+               linear_id = platform_get_core_pos(mpidr);
+               assert(linear_id < PLATFORM_CORE_COUNT);
+
+               cm_set_context(mpidr,
+                               (void *) &psci_ns_context[linear_id],
+                               NON_SECURE);
+
+               /* Initialize exception stack in the context */
+               cm_init_exception_stack(mpidr, NON_SECURE);
+       }
+
+       return;
+}
+
+/*******************************************************************************
+ * Core routine used by the Breadth-First-Search algorithm to populate the
+ * affinity tree. Each level in the tree corresponds to an affinity level. This
+ * routine's aim is to traverse to the target affinity level and populate nodes
+ * in the 'psci_aff_map' for all the siblings at that level. It uses the current
+ * affinity level to keep track of how many levels from the root of the tree
+ * have been traversed. If the current affinity level != target affinity level,
+ * then the platform is asked to return the number of children that each
+ * affinity instance has at the current affinity level. Traversal is then done
+ * for each child at the next lower level i.e. current affinity level - 1.
+ *
+ * CAUTION: This routine assumes that affinity instance ids are allocated in a
+ * monotonically increasing manner at each affinity level in a mpidr starting
+ * from 0. If the platform breaks this assumption then this code will have to
+ * be reworked accordingly.
+ ******************************************************************************/
+static unsigned int psci_init_aff_map(unsigned long mpidr,
+                                     unsigned int affmap_idx,
+                                     int cur_afflvl,
+                                     int tgt_afflvl)
+{
+       unsigned int ctr, aff_count;
+
+       assert(cur_afflvl >= tgt_afflvl);
+
+       /*
+        * Find the number of siblings at the current affinity level &
+        * assert if there are none 'cause then we have been invoked with
+        * an invalid mpidr.
+        */
+       aff_count = plat_get_aff_count(cur_afflvl, mpidr);
+       assert(aff_count);
+
+       if (tgt_afflvl < cur_afflvl) {
+               for (ctr = 0; ctr < aff_count; ctr++) {
+                       mpidr = mpidr_set_aff_inst(mpidr, ctr, cur_afflvl);
+                       affmap_idx = psci_init_aff_map(mpidr,
+                                                      affmap_idx,
+                                                      cur_afflvl - 1,
+                                                      tgt_afflvl);
+               }
+       } else {
+               for (ctr = 0; ctr < aff_count; ctr++, affmap_idx++) {
+                       mpidr = mpidr_set_aff_inst(mpidr, ctr, cur_afflvl);
+                       psci_init_aff_map_node(mpidr, cur_afflvl, affmap_idx);
+               }
+
+               /* affmap_idx is 1 greater than the max index of cur_afflvl */
+               psci_aff_limits[cur_afflvl].max = affmap_idx - 1;
+       }
+
+       return affmap_idx;
+}
+
+/*******************************************************************************
+ * This function initializes the topology tree by querying the platform. To do
+ * so, it's helper routines implement a Breadth-First-Search. At each affinity
+ * level the platform conveys the number of affinity instances that exist i.e.
+ * the affinity count. The algorithm populates the psci_aff_map recursively
+ * using this information. On a platform that implements two clusters of 4 cpus
+ * each, the populated aff_map_array would look like this:
+ *
+ *            <- cpus cluster0 -><- cpus cluster1 ->
+ * ---------------------------------------------------
+ * | 0  | 1  | 0  | 1  | 2  | 3  | 0  | 1  | 2  | 3  |
+ * ---------------------------------------------------
+ *           ^                                       ^
+ * cluster __|                                 cpu __|
+ * limit                                      limit
+ *
+ * The first 2 entries are of the cluster nodes. The next 4 entries are of cpus
+ * within cluster 0. The last 4 entries are of cpus within cluster 1.
+ * The 'psci_aff_limits' array contains the max & min index of each affinity
+ * level within the 'psci_aff_map' array. This allows restricting search of a
+ * node at an affinity level between the indices in the limits array.
+ ******************************************************************************/
+int32_t psci_setup(void)
+{
+       unsigned long mpidr = read_mpidr();
+       int afflvl, affmap_idx, max_afflvl;
+       aff_map_node *node;
+
+       psci_ns_einfo_idx = 0;
+       psci_plat_pm_ops = NULL;
+
+       /* Find out the maximum affinity level that the platform implements */
+       max_afflvl = get_max_afflvl();
+       assert(max_afflvl <= MPIDR_MAX_AFFLVL);
+
+       /*
+        * This call traverses the topology tree with help from the platform and
+        * populates the affinity map using a breadth-first-search recursively.
+        * We assume that the platform allocates affinity instance ids from 0
+        * onwards at each affinity level in the mpidr. FIRST_MPIDR = 0.0.0.0
+        */
+       affmap_idx = 0;
+       for (afflvl = max_afflvl; afflvl >= MPIDR_AFFLVL0; afflvl--) {
+               affmap_idx = psci_init_aff_map(FIRST_MPIDR,
+                                              affmap_idx,
+                                              max_afflvl,
+                                              afflvl);
+       }
+
+       /*
+        * Set the bounds for the affinity counts of each level in the map. Also
+        * flush out the entire array so that it's visible to subsequent power
+        * management operations. The 'psci_aff_map' array is allocated in
+        * coherent memory so does not need flushing. The 'psci_aff_limits'
+        * array is allocated in normal memory. It will be accessed when the mmu
+        * is off e.g. after reset. Hence it needs to be flushed.
+        */
+       for (afflvl = MPIDR_AFFLVL0; afflvl < max_afflvl; afflvl++) {
+               psci_aff_limits[afflvl].min =
+                       psci_aff_limits[afflvl + 1].max + 1;
+       }
+
+       flush_dcache_range((unsigned long) psci_aff_limits,
+                          sizeof(psci_aff_limits));
+
+       /*
+        * Mark the affinity instances in our mpidr as ON. No need to lock as
+        * this is the primary cpu.
+        */
+       mpidr &= MPIDR_AFFINITY_MASK;
+       for (afflvl = MPIDR_AFFLVL0; afflvl <= max_afflvl; afflvl++) {
+
+               node = psci_get_aff_map_node(mpidr, afflvl);
+               assert(node);
+
+               /* Mark each present node as ON. */
+               if (node->state & PSCI_AFF_PRESENT)
+                       psci_set_state(node, PSCI_STATE_ON);
+       }
+
+       platform_setup_pm(&psci_plat_pm_ops);
+       assert(psci_plat_pm_ops);
+
+       return 0;
+}
diff --git a/services/std_svc/std_svc_setup.c b/services/std_svc/std_svc_setup.c
new file mode 100644 (file)
index 0000000..080836a
--- /dev/null
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <uuid.h>
+#include <context_mgmt.h>
+#include <runtime_svc.h>
+#include <std_svc.h>
+#include <psci.h>
+#include <psci_private.h>
+#include <debug.h>
+
+/* Standard Service UUID */
+DEFINE_SVC_UUID(arm_svc_uid,
+               0x108d905b, 0xf863, 0x47e8, 0xae, 0x2d,
+               0xc0, 0xfb, 0x56, 0x41, 0xf6, 0xe2);
+
+/* Setup Standard Services */
+static int32_t std_svc_setup(void)
+{
+       /*
+        * PSCI is the only specification implemented as a Standard Service.
+        * Invoke PSCI setup from here
+        */
+       return psci_setup();
+}
+
+/*
+ * Top-level Standard Service SMC handler. This handler will in turn dispatch
+ * calls to PSCI SMC handler
+ */
+uint64_t std_svc_smc_handler(uint32_t smc_fid,
+                            uint64_t x1,
+                            uint64_t x2,
+                            uint64_t x3,
+                            uint64_t x4,
+                            void *cookie,
+                            void *handle,
+                            uint64_t flags)
+{
+       /*
+        * Dispatch PSCI calls to PSCI SMC handler and return its return
+        * value
+        */
+       if (is_psci_fid(smc_fid)) {
+               return psci_smc_handler(smc_fid, x1, x2, x3, x4, cookie,
+                               handle, flags);
+       }
+
+       switch (smc_fid) {
+       case ARM_STD_SVC_CALL_COUNT:
+               /*
+                * Return the number of Standard Service Calls. PSCI is the only
+                * standard service implemented; so return number of PSCI calls
+                */
+               SMC_RET1(handle, PSCI_NUM_CALLS);
+
+       case ARM_STD_SVC_UID:
+               /* Return UID to the caller */
+               SMC_UUID_RET(handle, arm_svc_uid);
+
+       case ARM_STD_SVC_VERSION:
+               /* Return the version of current implementation */
+               SMC_RET2(handle, STD_SVC_VERSION_MAJOR, STD_SVC_VERSION_MINOR);
+
+       default:
+               WARN("Unimplemented Standard Service Call: 0x%x \n", smc_fid);
+               SMC_RET1(handle, SMC_UNK);
+       }
+}
+
+/* Register Standard Service Calls as runtime service */
+DECLARE_RT_SVC(
+               std_svc,
+
+               OEN_STD_START,
+               OEN_STD_END,
+               SMC_TYPE_FAST,
+               std_svc_setup,
+               std_svc_smc_handler
+);