*/
#include <assert.h>
+#include <context_mgmt.h>
#include <debug.h>
+#include <errno.h>
#include <smccc.h>
#include <smccc_helpers.h>
#include <spci_svc.h>
#include <spinlock.h>
+#include <sprt_host.h>
+#include <sprt_svc.h>
#include <string.h>
#include <utils.h>
SMC_RET1(handle, SPCI_SUCCESS);
}
+/*******************************************************************************
+ * This function requests a Secure Service from a given handle and client ID.
+ ******************************************************************************/
+static uint64_t spci_service_request_blocking(void *handle,
+ uint32_t smc_fid, u_register_t x1, u_register_t x2,
+ u_register_t x3, u_register_t x4, u_register_t x5,
+ u_register_t x6, u_register_t x7)
+{
+ spci_handle_t *handle_info;
+ sp_context_t *sp_ctx;
+ cpu_context_t *cpu_ctx;
+ uint32_t rx0;
+ u_register_t rx1, rx2, rx3;
+ uint16_t request_handle, client_id;
+
+ /* Get handle array lock */
+ spin_lock(&spci_handles_lock);
+
+ /* Get pointer to struct of this open handle and client ID. */
+ request_handle = (x7 >> 16U) & 0x0000FFFFU;
+ client_id = x7 & 0x0000FFFFU;
+
+ handle_info = spci_handle_info_get(request_handle, client_id);
+ if (handle_info == NULL) {
+ spin_unlock(&spci_handles_lock);
+
+ WARN("SPCI_SERVICE_TUN_REQUEST_BLOCKING: Not found.\n");
+ WARN(" Handle 0x%04x. Client ID 0x%04x\n", request_handle,
+ client_id);
+
+ SMC_RET1(handle, SPCI_BUSY);
+ }
+
+ /* Get pointer to the Secure Partition that handles the service */
+ sp_ctx = handle_info->sp_ctx;
+ assert(sp_ctx != NULL);
+ cpu_ctx = &(sp_ctx->cpu_ctx);
+
+ /* Blocking requests are only allowed if the queue is empty */
+ if (handle_info->num_active_requests > 0) {
+ spin_unlock(&spci_handles_lock);
+
+ SMC_RET1(handle, SPCI_BUSY);
+ }
+
+ /* Prevent this handle from being closed */
+ handle_info->num_active_requests += 1;
+
+ /* Release handle lock */
+ spin_unlock(&spci_handles_lock);
+
+ /* Save the Normal world context */
+ cm_el1_sysregs_context_save(NON_SECURE);
+
+ /* Wait until the Secure Partition is idle and set it to busy. */
+ sp_state_wait_switch(sp_ctx, SP_STATE_IDLE, SP_STATE_BUSY);
+
+ /* Pass arguments to the Secure Partition */
+ struct sprt_queue_entry_message message = {
+ .type = SPRT_MSG_TYPE_SERVICE_TUN_REQUEST,
+ .client_id = client_id,
+ .service_handle = request_handle,
+ .session_id = x6,
+ .token = 0, /* No token needed for blocking requests */
+ .args = {smc_fid, x1, x2, x3, x4, x5}
+ };
+
+ spin_lock(&(sp_ctx->spm_sp_buffer_lock));
+ int rc = sprt_push_message((void *)sp_ctx->spm_sp_buffer_base, &message,
+ SPRT_QUEUE_NUM_BLOCKING);
+ spin_unlock(&(sp_ctx->spm_sp_buffer_lock));
+ if (rc != 0) {
+ /*
+ * This shouldn't happen, blocking requests can only be made if
+ * the request queue is empty.
+ */
+ assert(rc == -ENOMEM);
+ ERROR("SPCI_SERVICE_TUN_REQUEST_BLOCKING: Queue is full.\n");
+ panic();
+ }
+
+ /* Jump to the Secure Partition. */
+ rx0 = spm_sp_synchronous_entry(sp_ctx);
+
+ /* Verify returned value */
+ if (rx0 != SPRT_PUT_RESPONSE_AARCH64) {
+ ERROR("SPM: %s: Unexpected x0 value 0x%x\n", __func__, rx0);
+ panic();
+ }
+
+ rx1 = read_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X3);
+ rx2 = read_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X4);
+ rx3 = read_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X5);
+
+ /* Flag Secure Partition as idle. */
+ assert(sp_ctx->state == SP_STATE_BUSY);
+ sp_state_set(sp_ctx, SP_STATE_IDLE);
+
+ /* Decrease count of requests. */
+ spin_lock(&spci_handles_lock);
+ handle_info->num_active_requests -= 1;
+ spin_unlock(&spci_handles_lock);
+
+ /* Restore non-secure state */
+ cm_el1_sysregs_context_restore(NON_SECURE);
+ cm_set_next_eret_context(NON_SECURE);
+
+ SMC_RET4(handle, SPCI_SUCCESS, rx1, rx2, rx3);
+}
+
/*******************************************************************************
* This function handles all SMCs in the range reserved for SPCI.
******************************************************************************/
case SPCI_FID_SERVICE_HANDLE_CLOSE:
return spci_service_handle_close(handle, x1);
+ case SPCI_FID_SERVICE_REQUEST_BLOCKING:
+ {
+ uint64_t x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+ uint64_t x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+ uint64_t x7 = SMC_GET_GP(handle, CTX_GPREG_X7);
+
+ return spci_service_request_blocking(handle,
+ smc_fid, x1, x2, x3, x4, x5, x6, x7);
+ }
+
default:
break;
}
/* Base and size of the shared SPM<->SP buffer */
uintptr_t spm_sp_buffer_base;
size_t spm_sp_buffer_size;
+ spinlock_t spm_sp_buffer_lock;
} sp_context_t;
+/* Functions used to enter/exit a Secure Partition synchronously */
+uint64_t spm_sp_synchronous_entry(sp_context_t *sp_ctx);
+__dead2 void spm_sp_synchronous_exit(uint64_t rc);
+
/* Assembly helpers */
uint64_t spm_secure_partition_enter(uint64_t *c_rt_ctx);
void __dead2 spm_secure_partition_exit(uint64_t c_rt_ctx, uint64_t ret);
/* Secure Partition setup */
void spm_sp_setup(sp_context_t *sp_ctx);
+/* Secure Partition state management helpers */
+void sp_state_set(sp_context_t *sp_ptr, sp_state_t state);
+void sp_state_wait_switch(sp_context_t *sp_ptr, sp_state_t from, sp_state_t to);
+int sp_state_try_switch(sp_context_t *sp_ptr, sp_state_t from, sp_state_t to);
+
/* Functions related to the translation tables management */
xlat_ctx_t *spm_sp_xlat_context_alloc(void);
void sp_map_memory_regions(sp_context_t *sp_ctx);