static inline int32_t cvm_oct_adjust_skb_to_free(int32_t skb_to_free, int fau)
{
int32_t undo;
- undo = skb_to_free > 0 ? MAX_SKB_TO_FREE : skb_to_free + MAX_SKB_TO_FREE;
+ undo = skb_to_free > 0 ? MAX_SKB_TO_FREE : skb_to_free +
+ MAX_SKB_TO_FREE;
if (undo > 0)
cvmx_fau_atomic_add32(fau, -undo);
- skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ? MAX_SKB_TO_FREE : -skb_to_free;
+ skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ? MAX_SKB_TO_FREE :
+ -skb_to_free;
return skb_to_free;
}
for (qos = 0; qos < queues_per_port; qos++) {
if (skb_queue_len(&priv->tx_free_list[qos]) == 0)
continue;
- skb_to_free = cvmx_fau_fetch_and_add32(priv->fau+qos*4, MAX_SKB_TO_FREE);
- skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau+qos*4);
+ skb_to_free = cvmx_fau_fetch_and_add32(priv->fau+qos*4,
+ MAX_SKB_TO_FREE);
+ skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
+ priv->fau+qos*4);
total_freed += skb_to_free;
struct sk_buff *to_free_list = NULL;
spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
while (skb_to_free > 0) {
- struct sk_buff *t = __skb_dequeue(&priv->tx_free_list[qos]);
+ struct sk_buff *t;
+ t = __skb_dequeue(&priv->tx_free_list[qos]);
t->next = to_free_list;
to_free_list = t;
skb_to_free--;
}
- spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
+ spin_unlock_irqrestore(&priv->tx_free_list[qos].lock,
+ flags);
/* Do the actual freeing outside of the lock. */
while (to_free_list) {
struct sk_buff *t = to_free_list;
if (unlikely(__skb_linearize(skb))) {
queue_type = QUEUE_DROP;
if (USE_ASYNC_IOBDMA) {
- /* Get the number of skbuffs in use by the hardware */
+ /*
+ * Get the number of skbuffs in use
+ * by the hardware
+ */
CVMX_SYNCIOBDMA;
- skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
+ skb_to_free =
+ cvmx_scratch_read64(CVMX_SCR_SCRATCH);
} else {
- /* Get the number of skbuffs in use by the hardware */
- skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
- MAX_SKB_TO_FREE);
+ /*
+ * Get the number of skbuffs in use
+ * by the hardware
+ */
+ skb_to_free = cvmx_fau_fetch_and_add32(
+ priv->fau + qos * 4, MAX_SKB_TO_FREE);
}
- skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau + qos * 4);
+ skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
+ priv->fau + qos * 4);
spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
goto skip_xmit;
}
CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
struct skb_frag_struct *fs = skb_shinfo(skb)->frags + i;
- hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)(page_address(fs->page.p) + fs->page_offset));
+ hw_buffer.s.addr = XKPHYS_TO_PHYS(
+ (u64)(page_address(fs->page.p) +
+ fs->page_offset));
hw_buffer.s.size = fs->size;
CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64;
}
*/
pko_command.s.dontfree = 0;
- hw_buffer.s.back = ((unsigned long)skb->data >> 7) - ((unsigned long)fpa_head >> 7);
+ hw_buffer.s.back = ((unsigned long)skb->data >> 7) -
+ ((unsigned long)fpa_head >> 7);
+
*(struct sk_buff **)(fpa_head - sizeof(void *)) = skb;
/*
queue_type = QUEUE_HW;
}
if (USE_ASYNC_IOBDMA)
- cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH, FAU_TOTAL_TX_TO_CLEAN, 1);
+ cvmx_fau_async_fetch_and_add32(
+ CVMX_SCR_SCRATCH, FAU_TOTAL_TX_TO_CLEAN, 1);
spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
/* Drop this packet if we have too many already queued to the HW */
- if (unlikely(skb_queue_len(&priv->tx_free_list[qos]) >= MAX_OUT_QUEUE_DEPTH)) {
+ if (unlikely(skb_queue_len(&priv->tx_free_list[qos]) >=
+ MAX_OUT_QUEUE_DEPTH)) {
+
if (dev->tx_queue_len != 0) {
/* Drop the lock when notifying the core. */
- spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
+ spin_unlock_irqrestore(&priv->tx_free_list[qos].lock,
+ flags);
netif_stop_queue(dev);
- spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
+ spin_lock_irqsave(&priv->tx_free_list[qos].lock,
+ flags);
} else {
/* If not using normal queueing. */
queue_type = QUEUE_DROP;
priv->queue + qos,
pko_command, hw_buffer,
CVMX_PKO_LOCK_NONE))) {
- printk_ratelimited("%s: Failed to send the packet\n", dev->name);
+ printk_ratelimited("%s: Failed to send the packet\n",
+ dev->name);
queue_type = QUEUE_DROP;
}
skip_xmit:
cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2);
} else {
- total_to_clean = cvmx_fau_fetch_and_add32(FAU_TOTAL_TX_TO_CLEAN, 1);
+ total_to_clean = cvmx_fau_fetch_and_add32(
+ FAU_TOTAL_TX_TO_CLEAN, 1);
}
if (total_to_clean & 0x3ff) {