ring->cur = 0;
ring->dirty = 0;
- memset(&ring->rx, 0, sizeof(ring->rx));
hw_if->rx_desc_init(channel);
}
return 0;
}
-static void xgbe_realloc_rx_buffer(struct xgbe_channel *channel)
-{
- struct xgbe_prv_data *pdata = channel->pdata;
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
- struct xgbe_ring *ring = channel->rx_ring;
- struct xgbe_ring_data *rdata;
- int i;
-
- DBGPR("-->xgbe_realloc_rx_buffer: rx_ring->rx.realloc_index = %u\n",
- ring->rx.realloc_index);
-
- for (i = 0; i < ring->dirty; i++) {
- rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index);
-
- /* Reset rdata values */
- xgbe_unmap_rdata(pdata, rdata);
-
- if (xgbe_map_rx_buffer(pdata, ring, rdata))
- break;
-
- hw_if->rx_desc_reset(rdata);
-
- ring->rx.realloc_index++;
- }
- ring->dirty = 0;
-
- DBGPR("<--xgbe_realloc_rx_buffer\n");
-}
-
void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
{
DBGPR("-->xgbe_init_function_ptrs_desc\n");
desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
desc_if->free_ring_resources = xgbe_free_ring_resources;
desc_if->map_tx_skb = xgbe_map_tx_skb;
- desc_if->realloc_rx_buffer = xgbe_realloc_rx_buffer;
+ desc_if->map_rx_buffer = xgbe_map_rx_buffer;
desc_if->unmap_rdata = xgbe_unmap_rdata;
desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
return (ring->rdesc_count - (ring->cur - ring->dirty));
}
+static inline unsigned int xgbe_rx_dirty_desc(struct xgbe_ring *ring)
+{
+ return (ring->cur - ring->dirty);
+}
+
static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
struct xgbe_ring *ring, unsigned int count)
{
static void xgbe_rx_refresh(struct xgbe_channel *channel)
{
struct xgbe_prv_data *pdata = channel->pdata;
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_desc_if *desc_if = &pdata->desc_if;
struct xgbe_ring *ring = channel->rx_ring;
struct xgbe_ring_data *rdata;
- desc_if->realloc_rx_buffer(channel);
+ while (ring->dirty != ring->cur) {
+ rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
+
+ /* Reset rdata values */
+ desc_if->unmap_rdata(pdata, rdata);
+
+ if (desc_if->map_rx_buffer(pdata, ring, rdata))
+ break;
+
+ hw_if->rx_desc_reset(rdata);
+
+ ring->dirty++;
+ }
/* Update the Rx Tail Pointer Register with address of
* the last cleaned entry */
- rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index - 1);
+ rdata = XGBE_GET_DESC_DATA(ring, ring->dirty - 1);
XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
lower_32_bits(rdata->rdesc_dma));
}
read_again:
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
- if (ring->dirty > (XGBE_RX_DESC_CNT >> 3))
+ if (xgbe_rx_dirty_desc(ring) > (XGBE_RX_DESC_CNT >> 3))
xgbe_rx_refresh(channel);
if (hw_if->dev_read(channel))
received++;
ring->cur++;
- ring->dirty++;
incomplete = XGMAC_GET_BITS(packet->attributes,
RX_PACKET_ATTRIBUTES,
* cur - Tx: index of descriptor to be used for current transfer
* Rx: index of descriptor to check for packet availability
* dirty - Tx: index of descriptor to check for transfer complete
- * Rx: count of descriptors in which a packet has been received
- * (used with skb_realloc_index to refresh the ring)
+ * Rx: index of descriptor to check for buffer reallocation
*/
unsigned int cur;
unsigned int dirty;
unsigned short cur_mss;
unsigned short cur_vlan_ctag;
} tx;
-
- struct {
- unsigned int realloc_index;
- unsigned int realloc_threshold;
- } rx;
};
} ____cacheline_aligned;
int (*alloc_ring_resources)(struct xgbe_prv_data *);
void (*free_ring_resources)(struct xgbe_prv_data *);
int (*map_tx_skb)(struct xgbe_channel *, struct sk_buff *);
- void (*realloc_rx_buffer)(struct xgbe_channel *);
+ int (*map_rx_buffer)(struct xgbe_prv_data *, struct xgbe_ring *,
+ struct xgbe_ring_data *);
void (*unmap_rdata)(struct xgbe_prv_data *, struct xgbe_ring_data *);
void (*wrapper_tx_desc_init)(struct xgbe_prv_data *);
void (*wrapper_rx_desc_init)(struct xgbe_prv_data *);