return -EOPNOTSUPP;
}
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0))
-static void ag71xx_oom_timer_handler(unsigned long data)
-{
- struct net_device *dev = (struct net_device *) data;
- struct ag71xx *ag = netdev_priv(dev);
-#else
static void ag71xx_oom_timer_handler(struct timer_list *t)
{
struct ag71xx *ag = from_timer(ag, t, oom_timer);
-#endif
napi_schedule(&ag->napi);
}
unsigned int offset = ag->rx_buf_offset;
int ring_mask = BIT(ring->order) - 1;
int ring_size = BIT(ring->order);
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0))
struct list_head rx_list;
struct sk_buff *next;
-#else
- struct sk_buff_head queue;
-#endif
struct sk_buff *skb;
int done = 0;
DBG("%s: rx packets, limit=%d, curr=%u, dirty=%u\n",
dev->name, limit, ring->curr, ring->dirty);
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0))
INIT_LIST_HEAD(&rx_list);
-#else
- skb_queue_head_init(&queue);
-#endif
while (done < limit) {
unsigned int i = ring->curr & ring_mask;
} else {
skb->dev = dev;
skb->ip_summed = CHECKSUM_NONE;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0))
list_add_tail(&skb->list, &rx_list);
-#else
- __skb_queue_tail(&queue, skb);
-#endif
}
next:
ag71xx_ring_rx_refill(ag);
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0))
list_for_each_entry_safe(skb, next, &rx_list, list)
skb->protocol = eth_type_trans(skb, dev);
netif_receive_skb_list(&rx_list);
-#else
- while ((skb = __skb_dequeue(&queue)) != NULL) {
- skb->protocol = eth_type_trans(skb, dev);
- netif_receive_skb(skb);
- }
-#endif
DBG("%s: rx finish, curr=%u, dirty=%u, done=%d\n",
dev->name, ring->curr, ring->dirty, done);
INIT_DELAYED_WORK(&ag->restart_work, ag71xx_restart_work_func);
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0))
- init_timer(&ag->oom_timer);
- ag->oom_timer.data = (unsigned long) dev;
- ag->oom_timer.function = ag71xx_oom_timer_handler;
-#else
timer_setup(&ag->oom_timer, ag71xx_oom_timer_handler, 0);
-#endif
tx_size = AG71XX_TX_RING_SIZE_DEFAULT;
ag->rx_ring.order = ag71xx_ring_size_order(AG71XX_RX_RING_SIZE_DEFAULT);