KVM: PPC: Inform the userspace about TCE update failures
authorAlexey Kardashevskiy <aik@ozlabs.ru>
Mon, 10 Sep 2018 08:29:09 +0000 (18:29 +1000)
committerMichael Ellerman <mpe@ellerman.id.au>
Tue, 2 Oct 2018 13:09:27 +0000 (23:09 +1000)
We return H_TOO_HARD from TCE update handlers when we think that
the next handler (realmode -> virtual mode -> user mode) has a chance to
handle the request; H_HARDWARE/H_CLOSED otherwise.

This changes the handlers to return H_TOO_HARD on every error giving
the userspace an opportunity to handle any request or at least log
them all.

Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/kvm/book3s_64_vio.c
arch/powerpc/kvm/book3s_64_vio_hv.c

index 3c17977ffea6b3b63f828627128db1b83bf6ac83..984cec822a983261623f4175717bf7af0a7f1315 100644 (file)
@@ -401,7 +401,7 @@ static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm,
        long ret;
 
        if (WARN_ON_ONCE(iommu_tce_xchg(tbl, entry, &hpa, &dir)))
-               return H_HARDWARE;
+               return H_TOO_HARD;
 
        if (dir == DMA_NONE)
                return H_SUCCESS;
@@ -449,15 +449,15 @@ long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
                return H_TOO_HARD;
 
        if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa)))
-               return H_HARDWARE;
+               return H_TOO_HARD;
 
        if (mm_iommu_mapped_inc(mem))
-               return H_CLOSED;
+               return H_TOO_HARD;
 
        ret = iommu_tce_xchg(tbl, entry, &hpa, &dir);
        if (WARN_ON_ONCE(ret)) {
                mm_iommu_mapped_dec(mem);
-               return H_HARDWARE;
+               return H_TOO_HARD;
        }
 
        if (dir != DMA_NONE)
index c2848e0b1b71a5013c2a27d5858f4a1704ddee93..7388b660e64887cc69bd67d632165b02cd99cbe8 100644 (file)
@@ -300,10 +300,10 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
 
        if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift,
                        &hpa)))
-               return H_HARDWARE;
+               return H_TOO_HARD;
 
        if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
-               return H_CLOSED;
+               return H_TOO_HARD;
 
        ret = iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir);
        if (ret) {
@@ -501,7 +501,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
 
                rmap = (void *) vmalloc_to_phys(rmap);
                if (WARN_ON_ONCE_RM(!rmap))
-                       return H_HARDWARE;
+                       return H_TOO_HARD;
 
                /*
                 * Synchronize with the MMU notifier callbacks in