int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
-int kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
- struct kvm_vcpu_events *events);
+int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu_events *events);
-int kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
- struct kvm_vcpu_events *events);
+int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu_events *events);
#define KVM_ARCH_WANT_MMU_NOTIFIER
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
return -EINVAL;
}
-int kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
- struct kvm_vcpu_events *events)
+int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu_events *events)
{
- memset(events, 0, sizeof(*events));
-
events->exception.serror_pending = !!(vcpu->arch.hcr_el2 & HCR_VSE);
events->exception.serror_has_esr = cpus_have_const_cap(ARM64_HAS_RAS_EXTN);
return 0;
}
-int kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
- struct kvm_vcpu_events *events)
+int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu_events *events)
{
- int i;
bool serror_pending = events->exception.serror_pending;
bool has_esr = events->exception.serror_has_esr;
- /* check whether the reserved field is zero */
- for (i = 0; i < ARRAY_SIZE(events->reserved); i++)
- if (events->reserved[i])
- return -EINVAL;
-
- /* check whether the pad field is zero */
- for (i = 0; i < ARRAY_SIZE(events->exception.pad); i++)
- if (events->exception.pad[i])
- return -EINVAL;
-
if (serror_pending && has_esr) {
if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
return -EINVAL;
return ret;
}
+#ifdef __KVM_HAVE_VCPU_EVENTS /* temporary: until 32bit is wired up */
+static int kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu_events *events)
+{
+ memset(events, 0, sizeof(*events));
+
+ return __kvm_arm_vcpu_get_events(vcpu, events);
+}
+
+static int kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu_events *events)
+{
+ int i;
+
+ /* check whether the reserved field is zero */
+ for (i = 0; i < ARRAY_SIZE(events->reserved); i++)
+ if (events->reserved[i])
+ return -EINVAL;
+
+ /* check whether the pad field is zero */
+ for (i = 0; i < ARRAY_SIZE(events->exception.pad); i++)
+ if (events->exception.pad[i])
+ return -EINVAL;
+
+ return __kvm_arm_vcpu_set_events(vcpu, events);
+}
+#endif /* __KVM_HAVE_VCPU_EVENTS */
+
long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{