rx_handler_func_t *rx_handler;
void *rx_handler_data;
- struct netdev_queue rx_queue; /* use two cache lines */
+ struct netdev_queue ingress_queue; /* use two cache lines */
/*
* Cache lines mostly used on transmit path
skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
- rxq = &dev->rx_queue;
+ rxq = &dev->ingress_queue;
q = rxq->qdisc;
if (q != &noop_qdisc) {
struct packet_type **pt_prev,
int *ret, struct net_device *orig_dev)
{
- if (skb->dev->rx_queue.qdisc == &noop_qdisc)
+ if (skb->dev->ingress_queue.qdisc == &noop_qdisc)
goto out;
if (*pt_prev) {
static void netdev_init_queue_locks(struct net_device *dev)
{
netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
- __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
+ __netdev_init_queue_locks_one(dev, &dev->ingress_queue, NULL);
}
unsigned long netdev_fix_features(unsigned long features, const char *name)
static void netdev_init_queues(struct net_device *dev)
{
- netdev_init_one_queue(dev, &dev->rx_queue, NULL);
+ netdev_init_one_queue(dev, &dev->ingress_queue, NULL);
netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
spin_lock_init(&dev->tx_global_lock);
}
if (q)
goto out;
- q = qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle);
+ q = qdisc_match_from_root(dev->ingress_queue.qdisc_sleeping, handle);
out:
return q;
}
}
for (i = 0; i < num_q; i++) {
- struct netdev_queue *dev_queue = &dev->rx_queue;
+ struct netdev_queue *dev_queue = &dev->ingress_queue;
if (!ingress)
dev_queue = netdev_get_tx_queue(dev, i);
return -ENOENT;
q = qdisc_leaf(p, clid);
} else { /* ingress */
- q = dev->rx_queue.qdisc_sleeping;
+ q = dev->ingress_queue.qdisc_sleeping;
}
} else {
q = dev->qdisc;
return -ENOENT;
q = qdisc_leaf(p, clid);
} else { /*ingress */
- q = dev->rx_queue.qdisc_sleeping;
+ q = dev->ingress_queue.qdisc_sleeping;
}
} else {
q = dev->qdisc;
if (!(n->nlmsg_flags&NLM_F_CREATE))
return -ENOENT;
if (clid == TC_H_INGRESS)
- q = qdisc_create(dev, &dev->rx_queue, p,
+ q = qdisc_create(dev, &dev->ingress_queue, p,
tcm->tcm_parent, tcm->tcm_parent,
tca, &err);
else {
if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx) < 0)
goto done;
- dev_queue = &dev->rx_queue;
+ dev_queue = &dev->ingress_queue;
if (tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb, &q_idx, s_q_idx) < 0)
goto done;
if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t) < 0)
goto done;
- dev_queue = &dev->rx_queue;
+ dev_queue = &dev->ingress_queue;
if (tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb, &t, s_t) < 0)
goto done;
need_watchdog = 0;
netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog);
- transition_one_qdisc(dev, &dev->rx_queue, NULL);
+ transition_one_qdisc(dev, &dev->ingress_queue, NULL);
if (need_watchdog) {
dev->trans_start = jiffies;
void dev_deactivate(struct net_device *dev)
{
netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc);
- dev_deactivate_queue(dev, &dev->rx_queue, &noop_qdisc);
+ dev_deactivate_queue(dev, &dev->ingress_queue, &noop_qdisc);
dev_watchdog_down(dev);
{
dev->qdisc = &noop_qdisc;
netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
- dev_init_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc);
+ dev_init_scheduler_queue(dev, &dev->ingress_queue, &noop_qdisc);
setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev);
}
void dev_shutdown(struct net_device *dev)
{
netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
- shutdown_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc);
+ shutdown_scheduler_queue(dev, &dev->ingress_queue, &noop_qdisc);
qdisc_destroy(dev->qdisc);
dev->qdisc = &noop_qdisc;