return -1;
}
-static bool mlx5_lag_is_bonded(struct mlx5_lag *ldev)
+static bool __mlx5_lag_is_active(struct mlx5_lag *ldev)
{
return !!(ldev->flags & MLX5_LAG_FLAG_BONDED);
}
do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev);
- if (do_bond && !mlx5_lag_is_bonded(ldev)) {
+ if (do_bond && !__mlx5_lag_is_active(ldev)) {
if (!sriov_enabled)
for (i = 0; i < MLX5_MAX_PORTS; i++)
mlx5_remove_dev_by_protocol(ldev->pf[i].dev,
mlx5_add_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_nic_vport_enable_roce(dev1);
}
- } else if (do_bond && mlx5_lag_is_bonded(ldev)) {
+ } else if (do_bond && __mlx5_lag_is_active(ldev)) {
mlx5_modify_lag(ldev, &tracker);
- } else if (!do_bond && mlx5_lag_is_bonded(ldev)) {
+ } else if (!do_bond && __mlx5_lag_is_active(ldev)) {
if (!sriov_enabled) {
mlx5_remove_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_nic_vport_disable_roce(dev1);
if (!ldev)
return;
- if (mlx5_lag_is_bonded(ldev))
+ if (__mlx5_lag_is_active(ldev))
mlx5_deactivate_lag(ldev);
mlx5_lag_dev_remove_pf(ldev, dev);
mutex_lock(&lag_mutex);
ldev = mlx5_lag_dev_get(dev);
- res = ldev && mlx5_lag_is_bonded(ldev);
+ res = ldev && __mlx5_lag_is_active(ldev);
mutex_unlock(&lag_mutex);
return res;
mutex_lock(&lag_mutex);
ldev = mlx5_lag_dev_get(dev);
- if (!(ldev && mlx5_lag_is_bonded(ldev)))
+ if (!(ldev && __mlx5_lag_is_active(ldev)))
goto unlock;
if (ldev->tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
return true;
ldev = mlx5_lag_dev_get(dev);
- if (!ldev || !mlx5_lag_is_bonded(ldev) || ldev->pf[0].dev == dev)
+ if (!ldev || !__mlx5_lag_is_active(ldev) || ldev->pf[0].dev == dev)
return true;
/* If bonded, we do not add an IB device for PF1. */
mutex_lock(&lag_mutex);
ldev = mlx5_lag_dev_get(dev);
- if (ldev && mlx5_lag_is_bonded(ldev)) {
+ if (ldev && __mlx5_lag_is_active(ldev)) {
num_ports = MLX5_MAX_PORTS;
mdev[0] = ldev->pf[0].dev;
mdev[1] = ldev->pf[1].dev;