** HISTORY
** $Date $Author $Comment
** 07 JUL 2009 Xu Liang Init Version
+**
+** Copyright 2017 Alexander Couzens <lynxis@fe80.eu>
*******************************************************************************/
#define IFX_ATM_VER_MAJOR 1
/* set htu entry */
set_htu_entry(vpi, vci, conn, vcc->qos.aal == ATM_AAL5 ? 1 : 0, 0);
+ *MBOX_IGU1_ISRC |= (1 << (conn + FIRST_QSB_QID + 16));
+ *MBOX_IGU1_IER |= (1 << (conn + FIRST_QSB_QID + 16));
+
ret = 0;
PPE_OPEN_EXIT:
int ret;
int conn;
int desc_base;
+ int byteoff;
+ int required;
+ /* the len of the data without offset and header */
+ int datalen;
+ unsigned long flags;
struct tx_descriptor reg_desc = {0};
+ struct tx_inband_header *header;
struct sk_buff *new_skb;
if ( vcc == NULL || skb == NULL )
return -EINVAL;
- skb_get(skb);
- atm_free_tx_skb_vcc(skb, vcc);
conn = find_vcc(vcc);
if ( conn < 0 ) {
goto PPE_SEND_FAIL;
}
- if ( vcc->qos.aal == ATM_AAL5 ) {
- int byteoff;
- int datalen;
- struct tx_inband_header *header;
+ byteoff = (unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1);
+ required = sizeof(*header) + byteoff;
+ if (!skb_clone_writable(skb, required)) {
+ int expand_by = 0;
+ int ret;
- byteoff = (unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1);
- if ( skb_headroom(skb) < byteoff + TX_INBAND_HEADER_LENGTH )
- new_skb = skb_duplicate(skb);
- else
- new_skb = skb_break_away_from_protocol(skb);
- if ( new_skb == NULL ) {
- pr_err("either skb_duplicate or skb_break_away_from_protocol fail\n");
- ret = -ENOMEM;
- goto PPE_SEND_FAIL;
- }
- dev_kfree_skb_any(skb);
- skb = new_skb;
+ if (skb_headroom(skb) < required)
+ expand_by = required - skb_headroom(skb);
- datalen = skb->len;
- byteoff = (unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1);
+ ret = pskb_expand_head(skb, expand_by, 0, GFP_ATOMIC);
+ if (ret) {
+ printk("pskb_expand_head failed.\n");
+ atm_free_tx_skb_vcc(skb, vcc);
+ return ret;
+ }
+ }
- skb_push(skb, byteoff + TX_INBAND_HEADER_LENGTH);
+ datalen = skb->len;
+ header = (void *)skb_push(skb, byteoff + TX_INBAND_HEADER_LENGTH);
- header = (struct tx_inband_header *)skb->data;
+ if ( vcc->qos.aal == ATM_AAL5 ) {
/* setup inband trailer */
header->uu = 0;
header->cpi = 0;
reg_desc.byteoff = byteoff;
reg_desc.iscell = 0;
} else {
- /* if data pointer is not aligned, allocate new sk_buff */
- if ( ((unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1)) != 0 ) {
- pr_err("skb->data not aligned\n");
- new_skb = skb_duplicate(skb);
- } else
- new_skb = skb_break_away_from_protocol(skb);
- if ( new_skb == NULL ) {
- pr_err("either skb_duplicate or skb_break_away_from_protocol fail\n");
- ret = -ENOMEM;
- goto PPE_SEND_FAIL;
- }
- dev_kfree_skb_any(skb);
- skb = new_skb;
-
reg_desc.dataptr = (unsigned int)skb->data >> 2;
reg_desc.datalen = skb->len;
- reg_desc.byteoff = 0;
+ reg_desc.byteoff = byteoff;
reg_desc.iscell = 1;
}
reg_desc.c = 1;
reg_desc.sop = reg_desc.eop = 1;
+ spin_lock_irqsave(&g_atm_priv_data.conn[conn].lock, flags);
desc_base = get_tx_desc(conn);
if ( desc_base < 0 ) {
+ spin_unlock_irqrestore(&g_atm_priv_data.conn[conn].lock, flags);
pr_debug("ALLOC_TX_CONNECTION_FAIL\n");
ret = -EIO;
goto PPE_SEND_FAIL;
}
-
- if ( vcc->stats )
- atomic_inc(&vcc->stats->tx);
- if ( vcc->qos.aal == ATM_AAL5 )
- g_atm_priv_data.wtx_pdu++;
-
/* update descriptor send pointer */
if ( g_atm_priv_data.conn[conn].tx_skb[desc_base] != NULL )
dev_kfree_skb_any(g_atm_priv_data.conn[conn].tx_skb[desc_base]);
g_atm_priv_data.conn[conn].tx_skb[desc_base] = skb;
+ spin_unlock_irqrestore(&g_atm_priv_data.conn[conn].lock, flags);
+
+ if ( vcc->stats )
+ atomic_inc(&vcc->stats->tx);
+ if ( vcc->qos.aal == ATM_AAL5 )
+ g_atm_priv_data.wtx_pdu++;
/* write discriptor to memory and write back cache */
g_atm_priv_data.conn[conn].tx_desc[desc_base] = reg_desc;
dma_cache_wback((unsigned long)skb->data, skb->len);
return new_skb;
}
+static void free_tx_ring(unsigned int queue)
+{
+ unsigned long flags;
+ int i;
+ struct connection *conn = &g_atm_priv_data.conn[queue];
+ struct sk_buff *skb;
+
+ if (!conn)
+ return;
+
+ spin_lock_irqsave(&conn->lock, flags);
+
+ for (i = 0; i < dma_tx_descriptor_length; i++) {
+ if (conn->tx_desc[i].own == 0 && conn->tx_skb[i] != NULL) {
+ skb = conn->tx_skb[i];
+ conn->tx_skb[i] = NULL;
+ atm_free_tx_skb_vcc(skb, ATM_SKB(skb)->vcc);
+ }
+ }
+ spin_unlock_irqrestore(&conn->lock, flags);
+}
+
+static void mailbox_tx_handler(unsigned int queue_bitmap)
+{
+ int i;
+ int bit;
+
+ /* only get valid queues */
+ queue_bitmap &= g_atm_priv_data.conn_table;
+
+ for ( i = 0, bit = 1; i < MAX_PVC_NUMBER; i++, bit <<= 1 ) {
+ if (queue_bitmap & bit)
+ free_tx_ring(i);
+ }
+}
+
static inline void mailbox_oam_rx_handler(void)
{
unsigned int vlddes = WRX_DMA_CHANNEL_CONFIG(RX_DMA_CH_OAM)->vlddes;
static void do_ppe_tasklet(unsigned long data)
{
+ unsigned int irqs = *MBOX_IGU1_ISR;
*MBOX_IGU1_ISRC = *MBOX_IGU1_ISR;
- mailbox_oam_rx_handler();
- mailbox_aal_rx_handler();
+
+ if (irqs & (1 << RX_DMA_CH_AAL))
+ mailbox_aal_rx_handler();
+ if (irqs & (1 << RX_DMA_CH_OAM))
+ mailbox_oam_rx_handler();
+
+ /* any valid tx irqs */
+ if ((irqs >> (FIRST_QSB_QID + 16)) & g_atm_priv_data.conn_table)
+ mailbox_tx_handler(irqs >> (FIRST_QSB_QID + 16));
if ((*MBOX_IGU1_ISR & ((1 << RX_DMA_CH_AAL) | (1 << RX_DMA_CH_OAM))) != 0)
tasklet_schedule(&g_dma_tasklet);
+ else if (*MBOX_IGU1_ISR >> (FIRST_QSB_QID + 16)) /* TX queue */
+ tasklet_schedule(&g_dma_tasklet);
else
enable_irq(PPE_MAILBOX_IGU1_INT);
}
p_tx_desc = (volatile struct tx_descriptor *)((((unsigned int)g_atm_priv_data.tx_desc_base + DESC_ALIGNMENT - 1) & ~(DESC_ALIGNMENT - 1)) | KSEG1);
ppskb = (struct sk_buff **)(((unsigned int)g_atm_priv_data.tx_skb_base + 3) & ~3);
for ( i = 0; i < MAX_PVC_NUMBER; i++ ) {
+ spin_lock_init(&g_atm_priv_data.conn[i].lock);
g_atm_priv_data.conn[i].tx_desc = &p_tx_desc[i * dma_tx_descriptor_length];
g_atm_priv_data.conn[i].tx_skb = &ppskb[i * dma_tx_descriptor_length];
}
int ret;
int port_num;
struct port_cell_info port_cell = {0};
- int i, j;
char ver_str[256];
match = of_match_device(ltq_atm_match, &pdev->dev);