DEFINE_SYSOP_FUNC(sev)
DEFINE_SYSOP_TYPE_FUNC(dsb, sy)
DEFINE_SYSOP_TYPE_FUNC(dmb, sy)
+DEFINE_SYSOP_TYPE_FUNC(dmb, st)
+DEFINE_SYSOP_TYPE_FUNC(dmb, ld)
DEFINE_SYSOP_TYPE_FUNC(dsb, ish)
DEFINE_SYSOP_TYPE_FUNC(dmb, ish)
DEFINE_SYSOP_FUNC(isb)
#define ARM_G0_IRQS ARM_IRQ_SEC_SGI_0, \
ARM_IRQ_SEC_SGI_6
-#define ARM_SHARED_RAM_ATTR ((PLAT_ARM_SHARED_RAM_CACHED ? \
- MT_MEMORY : MT_DEVICE) \
- | MT_RW | MT_SECURE)
-
#define ARM_MAP_SHARED_RAM MAP_REGION_FLAT( \
ARM_SHARED_RAM_BASE, \
ARM_SHARED_RAM_SIZE, \
- ARM_SHARED_RAM_ATTR)
+ MT_DEVICE | MT_RW | MT_SECURE)
#define ARM_MAP_NS_DRAM1 MAP_REGION_FLAT( \
ARM_NS_DRAM1_BASE, \
/*************************************************************************
* Definitions common to all ARM Compute SubSystems (CSS)
*************************************************************************/
-#define MHU_PAYLOAD_CACHED 0
-
#define NSROM_BASE 0x1f000000
#define NSROM_SIZE 0x00001000
#define SCP_BL2U_BASE BL31_BASE
-#define PLAT_ARM_SHARED_RAM_CACHED MHU_PAYLOAD_CACHED
-
/* Load address of Non-Secure Image for CSS platform ports */
#define PLAT_ARM_NS_IMAGE_OFFSET 0xE0000000
#define PLAT_ARM_DRAM2_SIZE MAKE_ULL(0x780000000)
-#define PLAT_ARM_SHARED_RAM_CACHED 1
-
/*
* Load address of BL33 for this platform port
*/
assert((PLAT_ARM_TRUSTED_MAILBOX_BASE >= ARM_SHARED_RAM_BASE) &&
((PLAT_ARM_TRUSTED_MAILBOX_BASE + sizeof(*mailbox)) <= \
(ARM_SHARED_RAM_BASE + ARM_SHARED_RAM_SIZE)));
-
- /* Flush data cache if the mail box shared RAM is cached */
-#if PLAT_ARM_SHARED_RAM_CACHED
- flush_dcache_range((uintptr_t) mailbox, sizeof(*mailbox));
-#endif
}
/*******************************************************************************
static void scp_boot_message_send(size_t payload_size)
{
- /* Make sure payload can be seen by SCP */
- if (MHU_PAYLOAD_CACHED)
- flush_dcache_range(BOM_SHARED_MEM,
- sizeof(bom_cmd_t) + payload_size);
+ /* Ensure that any write to the BOM payload area is seen by SCP before
+ * we write to the MHU register. If these 2 writes were reordered by
+ * the CPU then SCP would read stale payload data */
+ dmbst();
/* Send command to SCP */
mhu_secure_message_send(BOM_MHU_SLOT_ID);
panic();
}
- /* Make sure we see the reply from the SCP and not any stale data */
- if (MHU_PAYLOAD_CACHED)
- inv_dcache_range(BOM_SHARED_MEM, size);
+ /* Ensure that any read to the BOM payload area is done after reading
+ * the MHU register. If these 2 reads were reordered then the CPU would
+ * read invalid payload data */
+ dmbld();
return *(uint32_t *) BOM_SHARED_MEM;
}
static void scpi_secure_message_send(size_t payload_size)
{
- /* Make sure payload can be seen by SCP */
- if (MHU_PAYLOAD_CACHED)
- flush_dcache_range(SCPI_SHARED_MEM_AP_TO_SCP,
- sizeof(scpi_cmd_t) + payload_size);
+ /* Ensure that any write to the SCPI payload area is seen by SCP before
+ * we write to the MHU register. If these 2 writes were reordered by
+ * the CPU then SCP would read stale payload data */
+ dmbst();
mhu_secure_message_send(SCPI_MHU_SLOT_ID);
}
panic();
}
- /* Make sure we don't read stale data */
- if (MHU_PAYLOAD_CACHED)
- inv_dcache_range(SCPI_SHARED_MEM_SCP_TO_AP, sizeof(*cmd));
+ /* Ensure that any read to the SCPI payload area is done after reading
+ * the MHU register. If these 2 reads were reordered then the CPU would
+ * read invalid payload data */
+ dmbld();
memcpy(cmd, (void *) SCPI_SHARED_MEM_SCP_TO_AP, sizeof(*cmd));
}