struct mlx5_vxlan {
struct mlx5_core_dev *mdev;
spinlock_t lock; /* protect vxlan table */
- int num_ports;
/* max_num_ports is usuallly 4, 16 buckets is more than enough */
DECLARE_HASHTABLE(htable, 4);
+ int num_ports;
+ struct mutex sync_lock; /* sync add/del port HW operations */
};
struct mlx5_vxlan_port {
return 0;
}
+ mutex_lock(&vxlan->sync_lock);
if (vxlan->num_ports >= mlx5_vxlan_max_udp_ports(vxlan->mdev)) {
mlx5_core_info(vxlan->mdev,
"UDP port (%d) not offloaded, max number of UDP ports (%d) are already offloaded\n",
port, mlx5_vxlan_max_udp_ports(vxlan->mdev));
ret = -ENOSPC;
- return ret;
+ goto unlock;
}
ret = mlx5_vxlan_core_add_port_cmd(vxlan->mdev, port);
if (ret)
- return ret;
+ goto unlock;
vxlanp = kzalloc(sizeof(*vxlanp), GFP_KERNEL);
if (!vxlanp) {
spin_unlock_bh(&vxlan->lock);
vxlan->num_ports++;
+ mutex_unlock(&vxlan->sync_lock);
return 0;
err_delete_port:
mlx5_vxlan_core_del_port_cmd(vxlan->mdev, port);
+
+unlock:
+ mutex_unlock(&vxlan->sync_lock);
return ret;
}
bool remove = false;
int ret = 0;
+ mutex_lock(&vxlan->sync_lock);
+
spin_lock_bh(&vxlan->lock);
vxlanp = mlx5_vxlan_lookup_port_locked(vxlan, port);
if (!vxlanp) {
kfree(vxlanp);
vxlan->num_ports--;
}
+
+ mutex_unlock(&vxlan->sync_lock);
+
return ret;
}
return ERR_PTR(-ENOMEM);
vxlan->mdev = mdev;
+ mutex_init(&vxlan->sync_lock);
spin_lock_init(&vxlan->lock);
hash_init(vxlan->htable);