SPM: Support blocking calls
authorAntonio Nino Diaz <antonio.ninodiaz@arm.com>
Thu, 8 Nov 2018 14:21:19 +0000 (14:21 +0000)
committerAntonio Nino Diaz <antonio.ninodiaz@arm.com>
Tue, 11 Dec 2018 15:04:24 +0000 (15:04 +0000)
Note that the arguments passed during the SMC call don't comply with the
SPCI specifications. This will be fixed in following patches, but it is
needed to implement a few more SPCI SMCs to be able to do it. The
current code allows us to start testing it.

Change-Id: Ief0e75d072b311737fcdb0c6a60ba5b7406a9ee5
Signed-off-by: Antonio Nino Diaz <antonio.ninodiaz@arm.com>
services/std_svc/spm/sp_setup.c
services/std_svc/spm/spci.c
services/std_svc/spm/spm_main.c
services/std_svc/spm/spm_private.h
services/std_svc/spm/sprt.c

index 83390784247b3f611d0db13a49a599e2b8e32d0c..aca779f5df877e3eb5c6db1f7683b32aa1ed177f 100644 (file)
@@ -14,6 +14,7 @@
 #include <platform_def.h>
 #include <platform.h>
 #include <sp_res_desc.h>
+#include <sprt_host.h>
 #include <string.h>
 #include <xlat_tables_v2.h>
 
@@ -132,4 +133,13 @@ void spm_sp_setup(sp_context_t *sp_ctx)
         */
        write_ctx_reg(get_sysregs_ctx(ctx), CTX_CPACR_EL1,
                        CPACR_EL1_FPEN(CPACR_EL1_FP_TRAP_NONE));
+
+       /*
+        * Prepare shared buffers
+        * ----------------------
+        */
+
+       /* Initialize SPRT queues */
+       sprt_initialize_queues((void *)sp_ctx->spm_sp_buffer_base,
+                              sp_ctx->spm_sp_buffer_size);
 }
index cb1b8fdea3d2bb8f4f7d5c14a1b8f28277de036e..cbb0f3cc379dc12f52340c950f82f8081fc98469 100644 (file)
@@ -5,11 +5,15 @@
  */
 
 #include <assert.h>
+#include <context_mgmt.h>
 #include <debug.h>
+#include <errno.h>
 #include <smccc.h>
 #include <smccc_helpers.h>
 #include <spci_svc.h>
 #include <spinlock.h>
+#include <sprt_host.h>
+#include <sprt_svc.h>
 #include <string.h>
 #include <utils.h>
 
@@ -242,6 +246,116 @@ static uint64_t spci_service_handle_close(void *handle, u_register_t x1)
        SMC_RET1(handle, SPCI_SUCCESS);
 }
 
+/*******************************************************************************
+ * This function requests a Secure Service from a given handle and client ID.
+ ******************************************************************************/
+static uint64_t spci_service_request_blocking(void *handle,
+                       uint32_t smc_fid, u_register_t x1, u_register_t x2,
+                       u_register_t x3, u_register_t x4, u_register_t x5,
+                       u_register_t x6, u_register_t x7)
+{
+       spci_handle_t *handle_info;
+       sp_context_t *sp_ctx;
+       cpu_context_t *cpu_ctx;
+       uint32_t rx0;
+       u_register_t rx1, rx2, rx3;
+       uint16_t request_handle, client_id;
+
+       /* Get handle array lock */
+       spin_lock(&spci_handles_lock);
+
+       /* Get pointer to struct of this open handle and client ID. */
+       request_handle = (x7 >> 16U) & 0x0000FFFFU;
+       client_id = x7 & 0x0000FFFFU;
+
+       handle_info = spci_handle_info_get(request_handle, client_id);
+       if (handle_info == NULL) {
+               spin_unlock(&spci_handles_lock);
+
+               WARN("SPCI_SERVICE_TUN_REQUEST_BLOCKING: Not found.\n");
+               WARN("  Handle 0x%04x. Client ID 0x%04x\n", request_handle,
+                    client_id);
+
+               SMC_RET1(handle, SPCI_BUSY);
+       }
+
+       /* Get pointer to the Secure Partition that handles the service */
+       sp_ctx = handle_info->sp_ctx;
+       assert(sp_ctx != NULL);
+       cpu_ctx = &(sp_ctx->cpu_ctx);
+
+       /* Blocking requests are only allowed if the queue is empty */
+       if (handle_info->num_active_requests > 0) {
+               spin_unlock(&spci_handles_lock);
+
+               SMC_RET1(handle, SPCI_BUSY);
+       }
+
+       /* Prevent this handle from being closed */
+       handle_info->num_active_requests += 1;
+
+       /* Release handle lock */
+       spin_unlock(&spci_handles_lock);
+
+       /* Save the Normal world context */
+       cm_el1_sysregs_context_save(NON_SECURE);
+
+       /* Wait until the Secure Partition is idle and set it to busy. */
+       sp_state_wait_switch(sp_ctx, SP_STATE_IDLE, SP_STATE_BUSY);
+
+       /* Pass arguments to the Secure Partition */
+       struct sprt_queue_entry_message message = {
+               .type = SPRT_MSG_TYPE_SERVICE_TUN_REQUEST,
+               .client_id = client_id,
+               .service_handle = request_handle,
+               .session_id = x6,
+               .token = 0, /* No token needed for blocking requests */
+               .args = {smc_fid, x1, x2, x3, x4, x5}
+       };
+
+       spin_lock(&(sp_ctx->spm_sp_buffer_lock));
+       int rc = sprt_push_message((void *)sp_ctx->spm_sp_buffer_base, &message,
+                                  SPRT_QUEUE_NUM_BLOCKING);
+       spin_unlock(&(sp_ctx->spm_sp_buffer_lock));
+       if (rc != 0) {
+               /*
+                * This shouldn't happen, blocking requests can only be made if
+                * the request queue is empty.
+                */
+               assert(rc == -ENOMEM);
+               ERROR("SPCI_SERVICE_TUN_REQUEST_BLOCKING: Queue is full.\n");
+               panic();
+       }
+
+       /* Jump to the Secure Partition. */
+       rx0 = spm_sp_synchronous_entry(sp_ctx);
+
+       /* Verify returned value */
+       if (rx0 != SPRT_PUT_RESPONSE_AARCH64) {
+               ERROR("SPM: %s: Unexpected x0 value 0x%x\n", __func__, rx0);
+               panic();
+       }
+
+       rx1 = read_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X3);
+       rx2 = read_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X4);
+       rx3 = read_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X5);
+
+       /* Flag Secure Partition as idle. */
+       assert(sp_ctx->state == SP_STATE_BUSY);
+       sp_state_set(sp_ctx, SP_STATE_IDLE);
+
+       /* Decrease count of requests. */
+       spin_lock(&spci_handles_lock);
+       handle_info->num_active_requests -= 1;
+       spin_unlock(&spci_handles_lock);
+
+       /* Restore non-secure state */
+       cm_el1_sysregs_context_restore(NON_SECURE);
+       cm_set_next_eret_context(NON_SECURE);
+
+       SMC_RET4(handle, SPCI_SUCCESS, rx1, rx2, rx3);
+}
+
 /*******************************************************************************
  * This function handles all SMCs in the range reserved for SPCI.
  ******************************************************************************/
@@ -285,6 +399,16 @@ uint64_t spci_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2,
                case SPCI_FID_SERVICE_HANDLE_CLOSE:
                        return spci_service_handle_close(handle, x1);
 
+               case SPCI_FID_SERVICE_REQUEST_BLOCKING:
+               {
+                       uint64_t x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+                       uint64_t x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+                       uint64_t x7 = SMC_GET_GP(handle, CTX_GPREG_X7);
+
+                       return spci_service_request_blocking(handle,
+                                       smc_fid, x1, x2, x3, x4, x5, x6, x7);
+               }
+
                default:
                        break;
                }
index ed188d48a42f97caafb353bcea1a7b5dd537780f..b1d5dd86d7acb94cd7f62cf2a69a06ece858c2d6 100644 (file)
@@ -18,6 +18,7 @@
 #include <spinlock.h>
 #include <string.h>
 #include <spm_svc.h>
+#include <sprt_svc.h>
 #include <utils.h>
 #include <xlat_tables_v2.h>
 
@@ -133,7 +134,7 @@ int sp_state_try_switch(sp_context_t *sp_ptr, sp_state_t from, sp_state_t to)
  * This function takes an SP context pointer and performs a synchronous entry
  * into it.
  ******************************************************************************/
-static uint64_t spm_sp_synchronous_entry(sp_context_t *sp_ctx)
+uint64_t spm_sp_synchronous_entry(sp_context_t *sp_ctx)
 {
        uint64_t rc;
        unsigned int linear_id = plat_my_core_pos();
@@ -165,7 +166,7 @@ static uint64_t spm_sp_synchronous_entry(sp_context_t *sp_ctx)
  * This function returns to the place where spm_sp_synchronous_entry() was
  * called originally.
  ******************************************************************************/
-__dead2 static void spm_sp_synchronous_exit(uint64_t rc)
+__dead2 void spm_sp_synchronous_exit(uint64_t rc)
 {
        /* Get context of the SP in use by this CPU. */
        unsigned int linear_id = plat_my_core_pos();
@@ -202,7 +203,10 @@ static int32_t spm_init(void)
                ctx->state = SP_STATE_RESET;
 
                rc = spm_sp_synchronous_entry(ctx);
-               assert(rc == 0);
+               if (rc != SPRT_YIELD_AARCH64) {
+                       ERROR("Unexpected return value 0x%llx\n", rc);
+                       panic();
+               }
 
                ctx->state = SP_STATE_IDLE;
 
index 9a4e82b27dcf9625e31c0052132fa97d6d98cad3..a8234c36f08ee6b7a84e34d417f2c12de4bd3ff2 100644 (file)
@@ -61,8 +61,13 @@ typedef struct sp_context {
        /* Base and size of the shared SPM<->SP buffer */
        uintptr_t spm_sp_buffer_base;
        size_t spm_sp_buffer_size;
+       spinlock_t spm_sp_buffer_lock;
 } sp_context_t;
 
+/* Functions used to enter/exit a Secure Partition synchronously */
+uint64_t spm_sp_synchronous_entry(sp_context_t *sp_ctx);
+__dead2 void spm_sp_synchronous_exit(uint64_t rc);
+
 /* Assembly helpers */
 uint64_t spm_secure_partition_enter(uint64_t *c_rt_ctx);
 void __dead2 spm_secure_partition_exit(uint64_t c_rt_ctx, uint64_t ret);
@@ -70,6 +75,11 @@ void __dead2 spm_secure_partition_exit(uint64_t c_rt_ctx, uint64_t ret);
 /* Secure Partition setup */
 void spm_sp_setup(sp_context_t *sp_ctx);
 
+/* Secure Partition state management helpers */
+void sp_state_set(sp_context_t *sp_ptr, sp_state_t state);
+void sp_state_wait_switch(sp_context_t *sp_ptr, sp_state_t from, sp_state_t to);
+int sp_state_try_switch(sp_context_t *sp_ptr, sp_state_t from, sp_state_t to);
+
 /* Functions related to the translation tables management */
 xlat_ctx_t *spm_sp_xlat_context_alloc(void);
 void sp_map_memory_regions(sp_context_t *sp_ctx);
index 8d0c510b933f9bb22642b9281d4b1557c5bd9ae4..5330025e798499b551f1c7f036ad8c6b16614ac1 100644 (file)
@@ -39,6 +39,20 @@ uint64_t sprt_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2,
        case SPRT_VERSION:
                SMC_RET1(handle, SPRT_VERSION_COMPILED);
 
+       case SPRT_PUT_RESPONSE_AARCH64:
+               /*
+                * Registers x1-x3 aren't saved by default to the context,
+                * but they are needed after spm_sp_synchronous_exit() because
+                * they hold return values.
+                */
+               SMC_SET_GP(handle, CTX_GPREG_X1, x1);
+               SMC_SET_GP(handle, CTX_GPREG_X2, x2);
+               SMC_SET_GP(handle, CTX_GPREG_X3, x3);
+               spm_sp_synchronous_exit(SPRT_PUT_RESPONSE_AARCH64);
+
+       case SPRT_YIELD_AARCH64:
+               spm_sp_synchronous_exit(SPRT_YIELD_AARCH64);
+
        default:
                break;
        }