struct macb *bp = netdev_priv(netdev);
struct ethtool_rx_flow_spec *fs = &cmd->fs;
struct ethtool_rx_fs_item *item, *newfs;
+ unsigned long flags;
int ret = -EINVAL;
bool added = false;
htonl(fs->h_u.tcp_ip4_spec.ip4dst),
htons(fs->h_u.tcp_ip4_spec.psrc), htons(fs->h_u.tcp_ip4_spec.pdst));
+ spin_lock_irqsave(&bp->rx_fs_lock, flags);
+
/* find correct place to add in list */
list_for_each_entry(item, &bp->rx_fs_list.list, list) {
if (item->fs.location > newfs->fs.location) {
if (netdev->features & NETIF_F_NTUPLE)
gem_enable_flow_filters(bp, 1);
+ spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
return 0;
err:
+ spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
kfree(newfs);
return ret;
}
struct macb *bp = netdev_priv(netdev);
struct ethtool_rx_fs_item *item;
struct ethtool_rx_flow_spec *fs;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bp->rx_fs_lock, flags);
list_for_each_entry(item, &bp->rx_fs_list.list, list) {
if (item->fs.location == cmd->fs.location) {
gem_writel_n(bp, SCRT2, fs->location, 0);
list_del(&item->list);
- kfree(item);
bp->rx_fs_list.count--;
+ spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
+ kfree(item);
return 0;
}
}
+ spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
return -EINVAL;
}
static int gem_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
{
struct macb *bp = netdev_priv(netdev);
- unsigned long flags;
int ret;
- spin_lock_irqsave(&bp->rx_fs_lock, flags);
-
switch (cmd->cmd) {
case ETHTOOL_SRXCLSRLINS:
if ((cmd->fs.location >= bp->max_tuples)
ret = -EOPNOTSUPP;
}
- spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
return ret;
}