static struct lock_class_key bonding_netdev_xmit_lock_key;
+static void bond_set_lockdep_class_one(struct netdev_queue *txq)
+{
+ lockdep_set_class(&txq->_xmit_lock,
+ &bonding_netdev_xmit_lock_key);
+}
+
+static void bond_set_lockdep_class(struct net_device *dev)
+{
+ bond_set_lockdep_class_one(&dev->tx_queue);
+}
+
/* Create a new bond based on the specified name and bonding parameters.
* If name is NULL, obtain a suitable "bond%d" name for us.
* Caller must NOT hold rtnl_lock; we need to release it here before we
goto out_bond;
}
- lockdep_set_class(&bond_dev->_xmit_lock, &bonding_netdev_xmit_lock_key);
+ bond_set_lockdep_class(bond_dev);
netif_carrier_off(bond_dev);
*/
static struct lock_class_key bpq_netdev_xmit_lock_key;
+static void bpq_set_lockdep_class_one(struct netdev_queue *txq)
+{
+ lockdep_set_class(&txq->_xmit_lock, &bpq_netdev_xmit_lock_key);
+}
+
+static void bpq_set_lockdep_class(struct net_device *dev)
+{
+ bpq_set_lockdep_class_one(&dev->tx_queue);
+}
+
/* ------------------------------------------------------------------------ */
err = register_netdevice(ndev);
if (err)
goto error;
- lockdep_set_class(&ndev->_xmit_lock, &bpq_netdev_xmit_lock_key);
+ bpq_set_lockdep_class(ndev);
/* List protected by RTNL */
list_add_rcu(&bpq->bpq_list, &bpq_devices);
#define MACVLAN_STATE_MASK \
((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
+static void macvlan_set_lockdep_class_one(struct netdev_queue *txq)
+{
+ lockdep_set_class(&txq->_xmit_lock,
+ &macvlan_netdev_xmit_lock_key);
+}
+
+static void macvlan_set_lockdep_class(struct net_device *dev)
+{
+ macvlan_set_lockdep_class_one(&dev->tx_queue);
+}
+
static int macvlan_init(struct net_device *dev)
{
struct macvlan_dev *vlan = netdev_priv(dev);
dev->features = lowerdev->features & MACVLAN_FEATURES;
dev->iflink = lowerdev->ifindex;
- lockdep_set_class(&dev->_xmit_lock, &macvlan_netdev_xmit_lock_key);
+ macvlan_set_lockdep_class(dev);
+
return 0;
}
*/
static struct lock_class_key hostap_netdev_xmit_lock_key;
+static void prism2_set_lockdep_class_one(struct netdev_queue *txq)
+{
+ lockdep_set_class(&txq->_xmit_lock,
+ &hostap_netdev_xmit_lock_key);
+}
+
+static void prism2_set_lockdep_class(struct net_device *dev)
+{
+ prism2_set_lockdep_class_one(&dev->tx_queue);
+}
static struct net_device *
prism2_init_local_data(struct prism2_helper_functions *funcs, int card_idx,
if (ret >= 0)
ret = register_netdevice(dev);
- lockdep_set_class(&dev->_xmit_lock, &hostap_netdev_xmit_lock_key);
+ prism2_set_lockdep_class(dev);
rtnl_unlock();
if (ret < 0) {
printk(KERN_WARNING "%s: register netdevice failed!\n",
struct net_device *dev;
struct Qdisc *qdisc;
struct sk_buff *gso_skb;
+ spinlock_t _xmit_lock;
+ int xmit_lock_owner;
struct Qdisc *qdisc_sleeping;
struct list_head qdisc_list;
struct netdev_queue *next_sched;
/*
* One part is mostly used on xmit path (device)
*/
- /* hard_start_xmit synchronizer */
- spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
- /* cpu id of processor entered to hard_start_xmit or -1,
- if nobody entered there.
- */
- int xmit_lock_owner;
void *priv; /* pointer to private data */
int (*hard_start_xmit) (struct sk_buff *skb,
struct net_device *dev);
*
* Get network device transmit lock
*/
-static inline void __netif_tx_lock(struct net_device *dev, int cpu)
+static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
{
- spin_lock(&dev->_xmit_lock);
- dev->xmit_lock_owner = cpu;
+ spin_lock(&txq->_xmit_lock);
+ txq->xmit_lock_owner = cpu;
}
static inline void netif_tx_lock(struct net_device *dev)
{
- __netif_tx_lock(dev, smp_processor_id());
+ __netif_tx_lock(&dev->tx_queue, smp_processor_id());
+}
+
+static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
+{
+ spin_lock_bh(&txq->_xmit_lock);
+ txq->xmit_lock_owner = smp_processor_id();
}
static inline void netif_tx_lock_bh(struct net_device *dev)
{
- spin_lock_bh(&dev->_xmit_lock);
- dev->xmit_lock_owner = smp_processor_id();
+ __netif_tx_lock_bh(&dev->tx_queue);
}
-static inline int netif_tx_trylock(struct net_device *dev)
+static inline int __netif_tx_trylock(struct netdev_queue *txq)
{
- int ok = spin_trylock(&dev->_xmit_lock);
+ int ok = spin_trylock(&txq->_xmit_lock);
if (likely(ok))
- dev->xmit_lock_owner = smp_processor_id();
+ txq->xmit_lock_owner = smp_processor_id();
return ok;
}
+static inline int netif_tx_trylock(struct net_device *dev)
+{
+ return __netif_tx_trylock(&dev->tx_queue);
+}
+
+static inline void __netif_tx_unlock(struct netdev_queue *txq)
+{
+ txq->xmit_lock_owner = -1;
+ spin_unlock(&txq->_xmit_lock);
+}
+
static inline void netif_tx_unlock(struct net_device *dev)
{
- dev->xmit_lock_owner = -1;
- spin_unlock(&dev->_xmit_lock);
+ __netif_tx_unlock(&dev->tx_queue);
+}
+
+static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
+{
+ txq->xmit_lock_owner = -1;
+ spin_unlock_bh(&txq->_xmit_lock);
}
static inline void netif_tx_unlock_bh(struct net_device *dev)
{
- dev->xmit_lock_owner = -1;
- spin_unlock_bh(&dev->_xmit_lock);
+ __netif_tx_unlock_bh(&dev->tx_queue);
}
-#define HARD_TX_LOCK(dev, cpu) { \
+#define HARD_TX_LOCK(dev, txq, cpu) { \
if ((dev->features & NETIF_F_LLTX) == 0) { \
- __netif_tx_lock(dev, cpu); \
+ __netif_tx_lock(txq, cpu); \
} \
}
-#define HARD_TX_UNLOCK(dev) { \
+#define HARD_TX_UNLOCK(dev, txq) { \
if ((dev->features & NETIF_F_LLTX) == 0) { \
- netif_tx_unlock(dev); \
+ __netif_tx_unlock(txq); \
} \
}
*/
static struct lock_class_key vlan_netdev_xmit_lock_key;
+static void vlan_dev_set_lockdep_one(struct netdev_queue *txq,
+ int subclass)
+{
+ lockdep_set_class_and_subclass(&txq->_xmit_lock,
+ &vlan_netdev_xmit_lock_key, subclass);
+}
+
+static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass)
+{
+ vlan_dev_set_lockdep_one(&dev->tx_queue, subclass);
+}
+
static const struct header_ops vlan_header_ops = {
.create = vlan_dev_hard_header,
.rebuild = vlan_dev_rebuild_header,
if (is_vlan_dev(real_dev))
subclass = 1;
- lockdep_set_class_and_subclass(&dev->_xmit_lock,
- &vlan_netdev_xmit_lock_key, subclass);
+ vlan_dev_set_lockdep_class(dev, subclass);
return 0;
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/*
- * register_netdevice() inits dev->_xmit_lock and sets lockdep class
+ * register_netdevice() inits txq->_xmit_lock and sets lockdep class
* according to dev->type
*/
static const unsigned short netdev_lock_type[] =
if (dev->flags & IFF_UP) {
int cpu = smp_processor_id(); /* ok because BHs are off */
- if (dev->xmit_lock_owner != cpu) {
+ if (txq->xmit_lock_owner != cpu) {
- HARD_TX_LOCK(dev, cpu);
+ HARD_TX_LOCK(dev, txq, cpu);
if (!netif_queue_stopped(dev) &&
!netif_subqueue_stopped(dev, skb)) {
rc = 0;
if (!dev_hard_start_xmit(skb, dev)) {
- HARD_TX_UNLOCK(dev);
+ HARD_TX_UNLOCK(dev, txq);
goto out;
}
}
- HARD_TX_UNLOCK(dev);
+ HARD_TX_UNLOCK(dev, txq);
if (net_ratelimit())
printk(KERN_CRIT "Virtual device %s asks to "
"queue packet!\n", dev->name);
dev_put(dev);
}
+static void __netdev_init_queue_locks_one(struct netdev_queue *dev_queue,
+ struct net_device *dev)
+{
+ spin_lock_init(&dev_queue->_xmit_lock);
+ netdev_set_lockdep_class(&dev_queue->_xmit_lock, dev->type);
+ dev_queue->xmit_lock_owner = -1;
+}
+
+static void netdev_init_queue_locks(struct net_device *dev)
+{
+ __netdev_init_queue_locks_one(&dev->tx_queue, dev);
+ __netdev_init_queue_locks_one(&dev->rx_queue, dev);
+}
+
/**
* register_netdevice - register a network device
* @dev: device to register
BUG_ON(!dev_net(dev));
net = dev_net(dev);
- spin_lock_init(&dev->_xmit_lock);
- netdev_set_lockdep_class(&dev->_xmit_lock, dev->type);
- dev->xmit_lock_owner = -1;
+ netdev_init_queue_locks(dev);
dev->iflink = -1;
*/
static struct lock_class_key nr_netdev_xmit_lock_key;
+static void nr_set_lockdep_one(struct netdev_queue *txq)
+{
+ lockdep_set_class(&txq->_xmit_lock, &nr_netdev_xmit_lock_key);
+}
+
+static void nr_set_lockdep_key(struct net_device *dev)
+{
+ nr_set_lockdep_one(&dev->tx_queue);
+}
+
/*
* Socket removal during an interrupt is now safe.
*/
free_netdev(dev);
goto fail;
}
- lockdep_set_class(&dev->_xmit_lock, &nr_netdev_xmit_lock_key);
+ nr_set_lockdep_key(dev);
dev_nr[i] = dev;
}
*/
static struct lock_class_key rose_netdev_xmit_lock_key;
+static void rose_set_lockdep_one(struct netdev_queue *txq)
+{
+ lockdep_set_class(&txq->_xmit_lock, &rose_netdev_xmit_lock_key);
+}
+
+static void rose_set_lockdep_key(struct net_device *dev)
+{
+ rose_set_lockdep_one(&dev->tx_queue);
+}
+
/*
* Convert a ROSE address into text.
*/
free_netdev(dev);
goto fail;
}
- lockdep_set_class(&dev->_xmit_lock, &rose_netdev_xmit_lock_key);
+ rose_set_lockdep_key(dev);
dev_rose[i] = dev;
}
struct netdev_queue *dev_queue,
struct Qdisc *q)
{
- struct net_device *dev = dev_queue->dev;
int ret;
- if (unlikely(dev->xmit_lock_owner == smp_processor_id())) {
+ if (unlikely(dev_queue->xmit_lock_owner == smp_processor_id())) {
/*
* Same CPU holding the lock. It may be a transient
* configuration error, when hard_start_xmit() recurses. We
kfree_skb(skb);
if (net_ratelimit())
printk(KERN_WARNING "Dead loop on netdevice %s, "
- "fix it urgently!\n", dev->name);
+ "fix it urgently!\n", dev_queue->dev->name);
ret = qdisc_qlen(q);
} else {
/*
dev = txq->dev;
- HARD_TX_LOCK(dev, smp_processor_id());
+ HARD_TX_LOCK(dev, txq, smp_processor_id());
if (!netif_subqueue_stopped(dev, skb))
ret = dev_hard_start_xmit(skb, dev);
- HARD_TX_UNLOCK(dev);
+ HARD_TX_UNLOCK(dev, txq);
spin_lock(&txq->lock);
q = txq->qdisc;