{
struct cpsw_priv *priv = netdev_priv(ndev);
struct cpsw_common *cpsw = priv->cpsw;
- int ret;
+ int descs_num, ret;
/* ignore ering->tx_pending - only rx_pending adjustment is supported */
ering->rx_pending > (cpsw->descs_pool_size - CPSW_MAX_QUEUES))
return -EINVAL;
- if (ering->rx_pending == cpdma_get_num_rx_descs(cpsw->dma))
+ descs_num = cpdma_get_num_rx_descs(cpsw->dma);
+ if (ering->rx_pending == descs_num)
return 0;
cpsw_suspend_data_pass(ndev);
- cpdma_set_num_rx_descs(cpsw->dma, ering->rx_pending);
+ ret = cpdma_set_num_rx_descs(cpsw->dma, ering->rx_pending);
+ if (ret) {
+ if (cpsw_resume_data_pass(ndev))
+ goto err;
- if (cpsw->usage_count)
- cpdma_chan_split_pool(cpsw->dma);
+ return ret;
+ }
ret = cpsw_resume_data_pass(ndev);
if (!ret)
return 0;
-
+err:
+ cpdma_set_num_rx_descs(cpsw->dma, descs_num);
dev_err(cpsw->dev, "cannot set ring params, closing device\n");
dev_close(ndev);
return ret;
return ctlr->num_tx_desc;
}
-void cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc)
+int cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc)
{
+ unsigned long flags;
+ int temp, ret;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+
+ temp = ctlr->num_rx_desc;
ctlr->num_rx_desc = num_rx_desc;
ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc;
+ ret = cpdma_chan_split_pool(ctlr);
+ if (ret) {
+ ctlr->num_rx_desc = temp;
+ ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc;
+ }
+
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+
+ return ret;
}
int cpdma_control_get(struct cpdma_ctlr *ctlr, int control);
int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value);
int cpdma_get_num_rx_descs(struct cpdma_ctlr *ctlr);
-void cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc);
+int cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc);
int cpdma_get_num_tx_descs(struct cpdma_ctlr *ctlr);
-int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr);
#endif