return rc;
}
-/* Called with kvm_lock held */
+/* Called with xive->lock held */
static int xive_check_provisioning(struct kvm *kvm, u8 prio)
{
struct kvmppc_xive *xive = kvm->arch.xive;
struct kvm_vcpu *vcpu;
int i, rc;
- lockdep_assert_held(&kvm->lock);
+ lockdep_assert_held(&xive->lock);
/* Already provisioned ? */
if (xive->qmap & (1 << prio))
irq, server, priority);
/* First, check provisioning of queues */
- if (priority != MASKED)
+ if (priority != MASKED) {
+ mutex_lock(&xive->lock);
rc = xive_check_provisioning(xive->kvm,
xive_prio_from_guest(priority));
+ mutex_unlock(&xive->lock);
+ }
if (rc) {
pr_devel(" provisioning failure %d !\n", rc);
return rc;
return -ENOMEM;
/* We need to synchronize with queue provisioning */
- mutex_lock(&vcpu->kvm->lock);
+ mutex_lock(&xive->lock);
vcpu->arch.xive_vcpu = xc;
xc->xive = xive;
xc->vcpu = vcpu;
xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_00);
bail:
- mutex_unlock(&vcpu->kvm->lock);
+ mutex_unlock(&xive->lock);
if (r) {
kvmppc_xive_cleanup_vcpu(vcpu);
return r;
struct kvmppc_xive_src_block *kvmppc_xive_create_src_block(
struct kvmppc_xive *xive, int irq)
{
- struct kvm *kvm = xive->kvm;
struct kvmppc_xive_src_block *sb;
int i, bid;
bid = irq >> KVMPPC_XICS_ICS_SHIFT;
- mutex_lock(&kvm->lock);
+ mutex_lock(&xive->lock);
/* block already exists - somebody else got here first */
if (xive->src_blocks[bid])
xive->max_sbid = bid;
out:
- mutex_unlock(&kvm->lock);
+ mutex_unlock(&xive->lock);
return xive->src_blocks[bid];
}
/* If we have a priority target the interrupt */
if (act_prio != MASKED) {
/* First, check provisioning of queues */
- mutex_lock(&xive->kvm->lock);
+ mutex_lock(&xive->lock);
rc = xive_check_provisioning(xive->kvm, act_prio);
- mutex_unlock(&xive->kvm->lock);
+ mutex_unlock(&xive->lock);
/* Target interrupt */
if (rc == 0)
dev->private = xive;
xive->dev = dev;
xive->kvm = kvm;
+ mutex_init(&xive->lock);
/* Already there ? */
if (kvm->arch.xive)
return -EINVAL;
}
- mutex_lock(&vcpu->kvm->lock);
+ mutex_lock(&xive->lock);
if (kvmppc_xive_find_server(vcpu->kvm, server_num)) {
pr_devel("Duplicate !\n");
/* TODO: reset all queues to a clean state ? */
bail:
- mutex_unlock(&vcpu->kvm->lock);
+ mutex_unlock(&xive->lock);
if (rc)
kvmppc_xive_native_cleanup_vcpu(vcpu);
pr_devel("%s\n", __func__);
- mutex_lock(&kvm->lock);
+ mutex_lock(&xive->lock);
kvm_for_each_vcpu(i, vcpu, kvm) {
struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
}
}
- mutex_unlock(&kvm->lock);
+ mutex_unlock(&xive->lock);
return 0;
}
pr_devel("%s\n", __func__);
- mutex_lock(&kvm->lock);
+ mutex_lock(&xive->lock);
for (i = 0; i <= xive->max_sbid; i++) {
struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
kvm_for_each_vcpu(i, vcpu, kvm) {
kvmppc_xive_native_vcpu_eq_sync(vcpu);
}
- mutex_unlock(&kvm->lock);
+ mutex_unlock(&xive->lock);
return 0;
}
}
/*
- * Called when device fd is closed
+ * Called when device fd is closed. kvm->lock is held.
*/
static void kvmppc_xive_native_release(struct kvm_device *dev)
{
xive->kvm = kvm;
kvm->arch.xive = xive;
mutex_init(&xive->mapping_lock);
+ mutex_init(&xive->lock);
/*
* Allocate a bunch of VPs. KVM_MAX_VCPUS is a large value for