return &psci_req_local_pwr_states[pwrlvl - 1][cpu_idx];
}
+/*
+ * psci_non_cpu_pd_nodes can be placed either in normal memory or coherent
+ * memory.
+ *
+ * With !USE_COHERENT_MEM, psci_non_cpu_pd_nodes is placed in normal memory,
+ * it's accessed by both cached and non-cached participants. To serve the common
+ * minimum, perform a cache flush before read and after write so that non-cached
+ * participants operate on latest data in main memory.
+ *
+ * When USE_COHERENT_MEM is used, psci_non_cpu_pd_nodes is placed in coherent
+ * memory. With HW_ASSISTED_COHERENCY, all PSCI participants are cache-coherent.
+ * In both cases, no cache operations are required.
+ */
+
+/*
+ * Retrieve local state of non-CPU power domain node from a non-cached CPU,
+ * after any required cache maintenance operation.
+ */
+static plat_local_state_t get_non_cpu_pd_node_local_state(
+ unsigned int parent_idx)
+{
+#if !USE_COHERENT_MEM || !HW_ASSISTED_COHERENCY
+ flush_dcache_range(
+ (uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
+ sizeof(psci_non_cpu_pd_nodes[parent_idx]));
+#endif
+ return psci_non_cpu_pd_nodes[parent_idx].local_state;
+}
+
+/*
+ * Update local state of non-CPU power domain node from a cached CPU; perform
+ * any required cache maintenance operation afterwards.
+ */
+static void set_non_cpu_pd_node_local_state(unsigned int parent_idx,
+ plat_local_state_t state)
+{
+ psci_non_cpu_pd_nodes[parent_idx].local_state = state;
+#if !USE_COHERENT_MEM || !HW_ASSISTED_COHERENCY
+ flush_dcache_range(
+ (uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
+ sizeof(psci_non_cpu_pd_nodes[parent_idx]));
+#endif
+}
+
/******************************************************************************
* Helper function to return the current local power state of each power domain
* from the current cpu power domain to its ancestor at the 'end_pwrlvl'. This
/* Copy the local power state from node to state_info */
for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
-#if !USE_COHERENT_MEM
- /*
- * If using normal memory for psci_non_cpu_pd_nodes, we need
- * to flush before reading the local power state as another
- * cpu in the same power domain could have updated it and this
- * code runs before caches are enabled.
- */
- flush_dcache_range(
- (uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
- sizeof(psci_non_cpu_pd_nodes[parent_idx]));
-#endif
- pd_state[lvl] = psci_non_cpu_pd_nodes[parent_idx].local_state;
+ pd_state[lvl] = get_non_cpu_pd_node_local_state(parent_idx);
parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
}
psci_set_cpu_local_state(pd_state[PSCI_CPU_PWR_LVL]);
/*
- * Need to flush as local_state will be accessed with Data Cache
+ * Need to flush as local_state might be accessed with Data Cache
* disabled during power on
*/
- flush_cpu_data(psci_svc_cpu_data.local_state);
+ psci_flush_cpu_data(psci_svc_cpu_data.local_state);
parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node;
/* Copy the local_state from state_info */
for (lvl = 1; lvl <= end_pwrlvl; lvl++) {
- psci_non_cpu_pd_nodes[parent_idx].local_state = pd_state[lvl];
-#if !USE_COHERENT_MEM
- flush_dcache_range(
- (uintptr_t)&psci_non_cpu_pd_nodes[parent_idx],
- sizeof(psci_non_cpu_pd_nodes[parent_idx]));
-#endif
+ set_non_cpu_pd_node_local_state(parent_idx, pd_state[lvl]);
parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
}
}
/* Reset the local_state to RUN for the non cpu power domains. */
for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
- psci_non_cpu_pd_nodes[parent_idx].local_state =
- PSCI_LOCAL_STATE_RUN;
-#if !USE_COHERENT_MEM
- flush_dcache_range(
- (uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
- sizeof(psci_non_cpu_pd_nodes[parent_idx]));
-#endif
+ set_non_cpu_pd_node_local_state(parent_idx,
+ PSCI_LOCAL_STATE_RUN);
psci_set_req_local_pwr_state(lvl,
cpu_idx,
PSCI_LOCAL_STATE_RUN);
psci_set_aff_info_state(AFF_STATE_ON);
psci_set_cpu_local_state(PSCI_LOCAL_STATE_RUN);
- flush_cpu_data(psci_svc_cpu_data);
+ psci_flush_cpu_data(psci_svc_cpu_data);
}
/******************************************************************************
*/
if (rc == PSCI_E_SUCCESS) {
/*
- * Set the affinity info state to OFF. This writes directly to
- * main memory as caches are disabled, so cache maintenance is
+ * Set the affinity info state to OFF. When caches are disabled,
+ * this writes directly to main memory, so cache maintenance is
* required to ensure that later cached reads of aff_info_state
- * return AFF_STATE_OFF. A dsbish() ensures ordering of the
+ * return AFF_STATE_OFF. A dsbish() ensures ordering of the
* update to the affinity info state prior to cache line
* invalidation.
*/
- flush_cpu_data(psci_svc_cpu_data.aff_info_state);
+ psci_flush_cpu_data(psci_svc_cpu_data.aff_info_state);
psci_set_aff_info_state(AFF_STATE_OFF);
- dsbish();
- inv_cpu_data(psci_svc_cpu_data.aff_info_state);
+ psci_dsbish();
+ psci_inv_cpu_data(psci_svc_cpu_data.aff_info_state);
#if ENABLE_RUNTIME_INSTRUMENTATION
#include <psci.h>
#include <spinlock.h>
+#if HW_ASSISTED_COHERENCY
+/*
+ * On systems with hardware-assisted coherency, make PSCI cache operations NOP,
+ * as PSCI participants are cache-coherent, and there's no need for explicit
+ * cache maintenance operations or barriers to coordinate their state.
+ */
+#define psci_flush_dcache_range(addr, size)
+#define psci_flush_cpu_data(member)
+#define psci_inv_cpu_data(member)
+
+#define psci_dsbish()
+#else
+/*
+ * If not all PSCI participants are cache-coherent, perform cache maintenance
+ * and issue barriers wherever required to coordinate state.
+ */
+#define psci_flush_dcache_range(addr, size) flush_dcache_range(addr, size)
+#define psci_flush_cpu_data(member) flush_cpu_data(member)
+#define psci_inv_cpu_data(member) inv_cpu_data(member)
+
+#define psci_dsbish() dsbish()
+#endif
+
/*
* The following helper macros abstract the interface to the Bakery
* Lock API.
/* Set the power state to OFF state */
svc_cpu_data->local_state = PLAT_MAX_OFF_STATE;
- flush_dcache_range((uintptr_t)svc_cpu_data,
+ psci_flush_dcache_range((uintptr_t)svc_cpu_data,
sizeof(*svc_cpu_data));
cm_set_context_by_index(node_idx,
/*
* Flush `psci_plat_pm_ops` as it will be accessed by secondary CPUs
- * during warm boot before data cache is enabled.
+ * during warm boot, possibly before data cache is enabled.
*/
- flush_dcache_range((uintptr_t)&psci_plat_pm_ops,
+ psci_flush_dcache_range((uintptr_t)&psci_plat_pm_ops,
sizeof(psci_plat_pm_ops));
/* Initialize the psci capability */