powerpc/pseries/svm: Force SWIOTLB for secure guests
authorAnshuman Khandual <khandual@linux.vnet.ibm.com>
Tue, 20 Aug 2019 02:13:24 +0000 (23:13 -0300)
committerMichael Ellerman <mpe@ellerman.id.au>
Thu, 29 Aug 2019 23:55:41 +0000 (09:55 +1000)
SWIOTLB checks range of incoming CPU addresses to be bounced and sees if
the device can access it through its DMA window without requiring bouncing.
In such cases it just chooses to skip bouncing. But for cases like secure
guests on powerpc platform all addresses need to be bounced into the shared
pool of memory because the host cannot access it otherwise. Hence the need
to do the bouncing is not related to device's DMA window and use of bounce
buffers is forced by setting swiotlb_force.

Also, connect the shared memory conversion functions into the
ARCH_HAS_MEM_ENCRYPT hooks and call swiotlb_update_mem_attributes() to
convert SWIOTLB's memory pool to shared memory.

Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com>
[ bauerman: Use ARCH_HAS_MEM_ENCRYPT hooks to share swiotlb memory pool. ]
Signed-off-by: Thiago Jung Bauermann <bauerman@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20190820021326.6884-15-bauerman@linux.ibm.com
arch/powerpc/include/asm/mem_encrypt.h [new file with mode: 0644]
arch/powerpc/platforms/pseries/Kconfig
arch/powerpc/platforms/pseries/svm.c

diff --git a/arch/powerpc/include/asm/mem_encrypt.h b/arch/powerpc/include/asm/mem_encrypt.h
new file mode 100644 (file)
index 0000000..ba9dab0
--- /dev/null
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * SVM helper functions
+ *
+ * Copyright 2018 IBM Corporation
+ */
+
+#ifndef _ASM_POWERPC_MEM_ENCRYPT_H
+#define _ASM_POWERPC_MEM_ENCRYPT_H
+
+#include <asm/svm.h>
+
+static inline bool mem_encrypt_active(void)
+{
+       return is_secure_guest();
+}
+
+static inline bool force_dma_unencrypted(struct device *dev)
+{
+       return is_secure_guest();
+}
+
+int set_memory_encrypted(unsigned long addr, int numpages);
+int set_memory_decrypted(unsigned long addr, int numpages);
+
+#endif /* _ASM_POWERPC_MEM_ENCRYPT_H */
index d09deb05bb6627e19c6732fc857d4cf06761767b..9e35cddddf7307df8c39782dd9cd008ec3aa1638 100644 (file)
@@ -149,6 +149,9 @@ config PAPR_SCM
 config PPC_SVM
        bool "Secure virtual machine (SVM) support for POWER"
        depends on PPC_PSERIES
+       select SWIOTLB
+       select ARCH_HAS_MEM_ENCRYPT
+       select ARCH_HAS_FORCE_DMA_UNENCRYPTED
        help
         There are certain POWER platforms which support secure guests using
         the Protected Execution Facility, with the help of an Ultravisor
index 2b2b1a77ca1e1ca084da31ffa2d77589025ae952..40c0637203d5bf67e013a075c28abf220f7a771e 100644 (file)
@@ -7,8 +7,53 @@
  */
 
 #include <linux/mm.h>
+#include <asm/machdep.h>
+#include <asm/svm.h>
+#include <asm/swiotlb.h>
 #include <asm/ultravisor.h>
 
+static int __init init_svm(void)
+{
+       if (!is_secure_guest())
+               return 0;
+
+       /* Don't release the SWIOTLB buffer. */
+       ppc_swiotlb_enable = 1;
+
+       /*
+        * Since the guest memory is inaccessible to the host, devices always
+        * need to use the SWIOTLB buffer for DMA even if dma_capable() says
+        * otherwise.
+        */
+       swiotlb_force = SWIOTLB_FORCE;
+
+       /* Share the SWIOTLB buffer with the host. */
+       swiotlb_update_mem_attributes();
+
+       return 0;
+}
+machine_early_initcall(pseries, init_svm);
+
+int set_memory_encrypted(unsigned long addr, int numpages)
+{
+       if (!PAGE_ALIGNED(addr))
+               return -EINVAL;
+
+       uv_unshare_page(PHYS_PFN(__pa(addr)), numpages);
+
+       return 0;
+}
+
+int set_memory_decrypted(unsigned long addr, int numpages)
+{
+       if (!PAGE_ALIGNED(addr))
+               return -EINVAL;
+
+       uv_share_page(PHYS_PFN(__pa(addr)), numpages);
+
+       return 0;
+}
+
 /* There's one dispatch log per CPU. */
 #define NR_DTL_PAGE (DISPATCH_LOG_BYTES * CONFIG_NR_CPUS / PAGE_SIZE)