* Get the number of skbuffs in use
* by the hardware
*/
- skb_to_free = cvmx_fau_fetch_and_add32(
- priv->fau + qos * 4, MAX_SKB_TO_FREE);
+ skb_to_free =
+ cvmx_fau_fetch_and_add32(priv->fau +
+ qos * 4,
+ MAX_SKB_TO_FREE);
}
skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
priv->fau +
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
struct skb_frag_struct *fs = skb_shinfo(skb)->frags + i;
- hw_buffer.s.addr = XKPHYS_TO_PHYS(
- (u64)(page_address(fs->page.p) +
- fs->page_offset));
+ hw_buffer.s.addr =
+ XKPHYS_TO_PHYS((u64)(page_address(fs->page.p) +
+ fs->page_offset));
hw_buffer.s.size = fs->size;
CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64;
}
queue_type = QUEUE_HW;
}
if (USE_ASYNC_IOBDMA)
- cvmx_fau_async_fetch_and_add32(
- CVMX_SCR_SCRATCH, FAU_TOTAL_TX_TO_CLEAN, 1);
+ cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH,
+ FAU_TOTAL_TX_TO_CLEAN, 1);
spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2);
} else {
- total_to_clean = cvmx_fau_fetch_and_add32(
- FAU_TOTAL_TX_TO_CLEAN, 1);
+ total_to_clean =
+ cvmx_fau_fetch_and_add32(FAU_TOTAL_TX_TO_CLEAN, 1);
}
if (total_to_clean & 0x3ff) {