{
struct ravb_private *priv = netdev_priv(ndev);
struct net_device_stats *stats = &priv->stats[q];
+ int num_tx_desc = priv->num_tx_desc;
struct ravb_tx_desc *desc;
int free_num = 0;
int entry;
bool txed;
entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
- NUM_TX_DESC);
+ num_tx_desc);
desc = &priv->tx_ring[q][entry];
txed = desc->die_dt == DT_FEMPTY;
if (free_txed_only && !txed)
dma_rmb();
size = le16_to_cpu(desc->ds_tagl) & TX_DS;
/* Free the original skb. */
- if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
+ if (priv->tx_skb[q][entry / num_tx_desc]) {
dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
size, DMA_TO_DEVICE);
/* Last packet descriptor? */
- if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
- entry /= NUM_TX_DESC;
+ if (entry % num_tx_desc == num_tx_desc - 1) {
+ entry /= num_tx_desc;
dev_kfree_skb_any(priv->tx_skb[q][entry]);
priv->tx_skb[q][entry] = NULL;
if (txed)
static void ravb_ring_free(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
+ int num_tx_desc = priv->num_tx_desc;
int ring_size;
int i;
ravb_tx_free(ndev, q, false);
ring_size = sizeof(struct ravb_tx_desc) *
- (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
+ (priv->num_tx_ring[q] * num_tx_desc + 1);
dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
priv->tx_desc_dma[q]);
priv->tx_ring[q] = NULL;
static void ravb_ring_format(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
+ int num_tx_desc = priv->num_tx_desc;
struct ravb_ex_rx_desc *rx_desc;
struct ravb_tx_desc *tx_desc;
struct ravb_desc *desc;
int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] *
- NUM_TX_DESC;
+ num_tx_desc;
dma_addr_t dma_addr;
int i;
for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q];
i++, tx_desc++) {
tx_desc->die_dt = DT_EEMPTY;
- tx_desc++;
- tx_desc->die_dt = DT_EEMPTY;
+ if (num_tx_desc > 1) {
+ tx_desc++;
+ tx_desc->die_dt = DT_EEMPTY;
+ }
}
tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
tx_desc->die_dt = DT_LINKFIX; /* type */
static int ravb_ring_init(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
+ int num_tx_desc = priv->num_tx_desc;
struct sk_buff *skb;
int ring_size;
int i;
priv->rx_skb[q][i] = skb;
}
- /* Allocate rings for the aligned buffers */
- priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] +
- DPTR_ALIGN - 1, GFP_KERNEL);
- if (!priv->tx_align[q])
- goto error;
+ if (num_tx_desc > 1) {
+ /* Allocate rings for the aligned buffers */
+ priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] +
+ DPTR_ALIGN - 1, GFP_KERNEL);
+ if (!priv->tx_align[q])
+ goto error;
+ }
/* Allocate all RX descriptors. */
ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
/* Allocate all TX descriptors. */
ring_size = sizeof(struct ravb_tx_desc) *
- (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
+ (priv->num_tx_ring[q] * num_tx_desc + 1);
priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
&priv->tx_desc_dma[q],
GFP_KERNEL);
static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
+ int num_tx_desc = priv->num_tx_desc;
u16 q = skb_get_queue_mapping(skb);
struct ravb_tstamp_skb *ts_skb;
struct ravb_tx_desc *desc;
spin_lock_irqsave(&priv->lock, flags);
if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) *
- NUM_TX_DESC) {
+ num_tx_desc) {
netif_err(priv, tx_queued, ndev,
"still transmitting with the full ring!\n");
netif_stop_subqueue(ndev, q);
if (skb_put_padto(skb, ETH_ZLEN))
goto exit;
- entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * NUM_TX_DESC);
- priv->tx_skb[q][entry / NUM_TX_DESC] = skb;
-
- buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
- entry / NUM_TX_DESC * DPTR_ALIGN;
- len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
- /* Zero length DMA descriptors are problematic as they seem to
- * terminate DMA transfers. Avoid them by simply using a length of
- * DPTR_ALIGN (4) when skb data is aligned to DPTR_ALIGN.
- *
- * As skb is guaranteed to have at least ETH_ZLEN (60) bytes of
- * data by the call to skb_put_padto() above this is safe with
- * respect to both the length of the first DMA descriptor (len)
- * overflowing the available data and the length of the second DMA
- * descriptor (skb->len - len) being negative.
- */
- if (len == 0)
- len = DPTR_ALIGN;
+ entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * num_tx_desc);
+ priv->tx_skb[q][entry / num_tx_desc] = skb;
+
+ if (num_tx_desc > 1) {
+ buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
+ entry / num_tx_desc * DPTR_ALIGN;
+ len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
+
+ /* Zero length DMA descriptors are problematic as they seem
+ * to terminate DMA transfers. Avoid them by simply using a
+ * length of DPTR_ALIGN (4) when skb data is aligned to
+ * DPTR_ALIGN.
+ *
+ * As skb is guaranteed to have at least ETH_ZLEN (60)
+ * bytes of data by the call to skb_put_padto() above this
+ * is safe with respect to both the length of the first DMA
+ * descriptor (len) overflowing the available data and the
+ * length of the second DMA descriptor (skb->len - len)
+ * being negative.
+ */
+ if (len == 0)
+ len = DPTR_ALIGN;
- memcpy(buffer, skb->data, len);
- dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE);
- if (dma_mapping_error(ndev->dev.parent, dma_addr))
- goto drop;
+ memcpy(buffer, skb->data, len);
+ dma_addr = dma_map_single(ndev->dev.parent, buffer, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ndev->dev.parent, dma_addr))
+ goto drop;
- desc = &priv->tx_ring[q][entry];
- desc->ds_tagl = cpu_to_le16(len);
- desc->dptr = cpu_to_le32(dma_addr);
+ desc = &priv->tx_ring[q][entry];
+ desc->ds_tagl = cpu_to_le16(len);
+ desc->dptr = cpu_to_le32(dma_addr);
- buffer = skb->data + len;
- len = skb->len - len;
- dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE);
- if (dma_mapping_error(ndev->dev.parent, dma_addr))
- goto unmap;
+ buffer = skb->data + len;
+ len = skb->len - len;
+ dma_addr = dma_map_single(ndev->dev.parent, buffer, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ndev->dev.parent, dma_addr))
+ goto unmap;
- desc++;
+ desc++;
+ } else {
+ desc = &priv->tx_ring[q][entry];
+ len = skb->len;
+ dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ndev->dev.parent, dma_addr))
+ goto drop;
+ }
desc->ds_tagl = cpu_to_le16(len);
desc->dptr = cpu_to_le32(dma_addr);
if (q == RAVB_NC) {
ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
if (!ts_skb) {
- desc--;
- dma_unmap_single(ndev->dev.parent, dma_addr, len,
- DMA_TO_DEVICE);
+ if (num_tx_desc > 1) {
+ desc--;
+ dma_unmap_single(ndev->dev.parent, dma_addr,
+ len, DMA_TO_DEVICE);
+ }
goto unmap;
}
ts_skb->skb = skb;
skb_tx_timestamp(skb);
/* Descriptor type must be set after all the above writes */
dma_wmb();
- desc->die_dt = DT_FEND;
- desc--;
- desc->die_dt = DT_FSTART;
-
+ if (num_tx_desc > 1) {
+ desc->die_dt = DT_FEND;
+ desc--;
+ desc->die_dt = DT_FSTART;
+ } else {
+ desc->die_dt = DT_FSINGLE;
+ }
ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q);
- priv->cur_tx[q] += NUM_TX_DESC;
+ priv->cur_tx[q] += num_tx_desc;
if (priv->cur_tx[q] - priv->dirty_tx[q] >
- (priv->num_tx_ring[q] - 1) * NUM_TX_DESC &&
+ (priv->num_tx_ring[q] - 1) * num_tx_desc &&
!ravb_tx_free(ndev, q, true))
netif_stop_subqueue(ndev, q);
le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE);
drop:
dev_kfree_skb_any(skb);
- priv->tx_skb[q][entry / NUM_TX_DESC] = NULL;
+ priv->tx_skb[q][entry / num_tx_desc] = NULL;
goto exit;
}
ndev->max_mtu = 2048 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
ndev->min_mtu = ETH_MIN_MTU;
+ priv->num_tx_desc = chip_id == RCAR_GEN2 ?
+ NUM_TX_DESC_GEN2 : NUM_TX_DESC_GEN3;
+
/* Set function */
ndev->netdev_ops = &ravb_netdev_ops;
ndev->ethtool_ops = &ravb_ethtool_ops;