static void device_get_options(struct vnt_private *priv)
{
- POPTIONS pOpts = &priv->sOpts;
+ struct vnt_options *pOpts = &priv->sOpts;
- pOpts->nRxDescs0 = RX_DESC_DEF0;
- pOpts->nRxDescs1 = RX_DESC_DEF1;
- pOpts->nTxDescs[0] = TX_DESC_DEF0;
- pOpts->nTxDescs[1] = TX_DESC_DEF1;
+ pOpts->rx_descs0 = RX_DESC_DEF0;
+ pOpts->rx_descs1 = RX_DESC_DEF1;
+ pOpts->tx_descs[0] = TX_DESC_DEF0;
+ pOpts->tx_descs[1] = TX_DESC_DEF1;
pOpts->int_works = INT_WORKS_DEF;
pOpts->short_retry = SHORT_RETRY_DEF;
/*allocate all RD/TD rings a single pool*/
vir_pool = dma_zalloc_coherent(&priv->pcid->dev,
- priv->sOpts.nRxDescs0 * sizeof(struct vnt_rx_desc) +
- priv->sOpts.nRxDescs1 * sizeof(struct vnt_rx_desc) +
- priv->sOpts.nTxDescs[0] * sizeof(struct vnt_tx_desc) +
- priv->sOpts.nTxDescs[1] * sizeof(struct vnt_tx_desc),
+ priv->sOpts.rx_descs0 * sizeof(struct vnt_rx_desc) +
+ priv->sOpts.rx_descs1 * sizeof(struct vnt_rx_desc) +
+ priv->sOpts.tx_descs[0] * sizeof(struct vnt_tx_desc) +
+ priv->sOpts.tx_descs[1] * sizeof(struct vnt_tx_desc),
&priv->pool_dma, GFP_ATOMIC);
if (vir_pool == NULL) {
dev_err(&priv->pcid->dev, "allocate desc dma memory failed\n");
priv->aRD0Ring = vir_pool;
priv->aRD1Ring = vir_pool +
- priv->sOpts.nRxDescs0 * sizeof(struct vnt_rx_desc);
+ priv->sOpts.rx_descs0 * sizeof(struct vnt_rx_desc);
priv->rd0_pool_dma = priv->pool_dma;
priv->rd1_pool_dma = priv->rd0_pool_dma +
- priv->sOpts.nRxDescs0 * sizeof(struct vnt_rx_desc);
+ priv->sOpts.rx_descs0 * sizeof(struct vnt_rx_desc);
priv->tx0_bufs = dma_zalloc_coherent(&priv->pcid->dev,
- priv->sOpts.nTxDescs[0] * PKT_BUF_SZ +
- priv->sOpts.nTxDescs[1] * PKT_BUF_SZ +
+ priv->sOpts.tx_descs[0] * PKT_BUF_SZ +
+ priv->sOpts.tx_descs[1] * PKT_BUF_SZ +
CB_BEACON_BUF_SIZE +
CB_MAX_BUF_SIZE,
&priv->tx_bufs_dma0,
dev_err(&priv->pcid->dev, "allocate buf dma memory failed\n");
dma_free_coherent(&priv->pcid->dev,
- priv->sOpts.nRxDescs0 * sizeof(struct vnt_rx_desc) +
- priv->sOpts.nRxDescs1 * sizeof(struct vnt_rx_desc) +
- priv->sOpts.nTxDescs[0] * sizeof(struct vnt_tx_desc) +
- priv->sOpts.nTxDescs[1] * sizeof(struct vnt_tx_desc),
+ priv->sOpts.rx_descs0 * sizeof(struct vnt_rx_desc) +
+ priv->sOpts.rx_descs1 * sizeof(struct vnt_rx_desc) +
+ priv->sOpts.tx_descs[0] * sizeof(struct vnt_tx_desc) +
+ priv->sOpts.tx_descs[1] * sizeof(struct vnt_tx_desc),
vir_pool, priv->pool_dma);
return false;
}
priv->td0_pool_dma = priv->rd1_pool_dma +
- priv->sOpts.nRxDescs1 * sizeof(struct vnt_rx_desc);
+ priv->sOpts.rx_descs1 * sizeof(struct vnt_rx_desc);
priv->td1_pool_dma = priv->td0_pool_dma +
- priv->sOpts.nTxDescs[0] * sizeof(struct vnt_tx_desc);
+ priv->sOpts.tx_descs[0] * sizeof(struct vnt_tx_desc);
/* vir_pool: pvoid type */
priv->apTD0Rings = vir_pool
- + priv->sOpts.nRxDescs0 * sizeof(struct vnt_rx_desc)
- + priv->sOpts.nRxDescs1 * sizeof(struct vnt_rx_desc);
+ + priv->sOpts.rx_descs0 * sizeof(struct vnt_rx_desc)
+ + priv->sOpts.rx_descs1 * sizeof(struct vnt_rx_desc);
priv->apTD1Rings = vir_pool
- + priv->sOpts.nRxDescs0 * sizeof(struct vnt_rx_desc)
- + priv->sOpts.nRxDescs1 * sizeof(struct vnt_rx_desc)
- + priv->sOpts.nTxDescs[0] * sizeof(struct vnt_tx_desc);
+ + priv->sOpts.rx_descs0 * sizeof(struct vnt_rx_desc)
+ + priv->sOpts.rx_descs1 * sizeof(struct vnt_rx_desc)
+ + priv->sOpts.tx_descs[0] * sizeof(struct vnt_tx_desc);
priv->tx1_bufs = priv->tx0_bufs +
- priv->sOpts.nTxDescs[0] * PKT_BUF_SZ;
+ priv->sOpts.tx_descs[0] * PKT_BUF_SZ;
priv->tx_beacon_bufs = priv->tx1_bufs +
- priv->sOpts.nTxDescs[1] * PKT_BUF_SZ;
+ priv->sOpts.tx_descs[1] * PKT_BUF_SZ;
priv->pbyTmpBuff = priv->tx_beacon_bufs +
CB_BEACON_BUF_SIZE;
priv->tx_bufs_dma1 = priv->tx_bufs_dma0 +
- priv->sOpts.nTxDescs[0] * PKT_BUF_SZ;
+ priv->sOpts.tx_descs[0] * PKT_BUF_SZ;
priv->tx_beacon_dma = priv->tx_bufs_dma1 +
- priv->sOpts.nTxDescs[1] * PKT_BUF_SZ;
+ priv->sOpts.tx_descs[1] * PKT_BUF_SZ;
return true;
}
static void device_free_rings(struct vnt_private *priv)
{
dma_free_coherent(&priv->pcid->dev,
- priv->sOpts.nRxDescs0 * sizeof(struct vnt_rx_desc) +
- priv->sOpts.nRxDescs1 * sizeof(struct vnt_rx_desc) +
- priv->sOpts.nTxDescs[0] * sizeof(struct vnt_tx_desc) +
- priv->sOpts.nTxDescs[1] * sizeof(struct vnt_tx_desc),
+ priv->sOpts.rx_descs0 * sizeof(struct vnt_rx_desc) +
+ priv->sOpts.rx_descs1 * sizeof(struct vnt_rx_desc) +
+ priv->sOpts.tx_descs[0] * sizeof(struct vnt_tx_desc) +
+ priv->sOpts.tx_descs[1] * sizeof(struct vnt_tx_desc),
priv->aRD0Ring, priv->pool_dma);
if (priv->tx0_bufs)
dma_free_coherent(&priv->pcid->dev,
- priv->sOpts.nTxDescs[0] * PKT_BUF_SZ +
- priv->sOpts.nTxDescs[1] * PKT_BUF_SZ +
+ priv->sOpts.tx_descs[0] * PKT_BUF_SZ +
+ priv->sOpts.tx_descs[1] * PKT_BUF_SZ +
CB_BEACON_BUF_SIZE +
CB_MAX_BUF_SIZE,
priv->tx0_bufs, priv->tx_bufs_dma0);
struct vnt_rx_desc *desc;
/* Init the RD0 ring entries */
- for (i = 0; i < priv->sOpts.nRxDescs0;
+ for (i = 0; i < priv->sOpts.rx_descs0;
i ++, curr += sizeof(struct vnt_rx_desc)) {
desc = &priv->aRD0Ring[i];
desc->rd_info = alloc_rd_info();
if (!device_alloc_rx_buf(priv, desc))
dev_err(&priv->pcid->dev, "can not alloc rx bufs\n");
- desc->next = &(priv->aRD0Ring[(i+1) % priv->sOpts.nRxDescs0]);
+ desc->next = &(priv->aRD0Ring[(i+1) % priv->sOpts.rx_descs0]);
desc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_rx_desc));
}
struct vnt_rx_desc *desc;
/* Init the RD1 ring entries */
- for (i = 0; i < priv->sOpts.nRxDescs1;
+ for (i = 0; i < priv->sOpts.rx_descs1;
i ++, curr += sizeof(struct vnt_rx_desc)) {
desc = &priv->aRD1Ring[i];
desc->rd_info = alloc_rd_info();
if (!device_alloc_rx_buf(priv, desc))
dev_err(&priv->pcid->dev, "can not alloc rx bufs\n");
- desc->next = &(priv->aRD1Ring[(i+1) % priv->sOpts.nRxDescs1]);
+ desc->next = &(priv->aRD1Ring[(i+1) % priv->sOpts.rx_descs1]);
desc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_rx_desc));
}
{
int i;
- for (i = 0; i < priv->sOpts.nRxDescs0; i++) {
+ for (i = 0; i < priv->sOpts.rx_descs0; i++) {
struct vnt_rx_desc *desc = &(priv->aRD0Ring[i]);
struct vnt_rd_info *rd_info = desc->rd_info;
{
int i;
- for (i = 0; i < priv->sOpts.nRxDescs1; i++) {
+ for (i = 0; i < priv->sOpts.rx_descs1; i++) {
struct vnt_rx_desc *desc = &priv->aRD1Ring[i];
struct vnt_rd_info *rd_info = desc->rd_info;
struct vnt_tx_desc *desc;
curr = priv->td0_pool_dma;
- for (i = 0; i < priv->sOpts.nTxDescs[0];
+ for (i = 0; i < priv->sOpts.tx_descs[0];
i++, curr += sizeof(struct vnt_tx_desc)) {
desc = &priv->apTD0Rings[i];
desc->td_info = alloc_td_info();
desc->td_info->buf = priv->tx0_bufs + i * PKT_BUF_SZ;
desc->td_info->buf_dma = priv->tx_bufs_dma0 + i * PKT_BUF_SZ;
- desc->next = &(priv->apTD0Rings[(i+1) % priv->sOpts.nTxDescs[0]]);
+ desc->next = &(priv->apTD0Rings[(i+1) % priv->sOpts.tx_descs[0]]);
desc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_tx_desc));
}
/* Init the TD ring entries */
curr = priv->td1_pool_dma;
- for (i = 0; i < priv->sOpts.nTxDescs[1];
+ for (i = 0; i < priv->sOpts.tx_descs[1];
i++, curr += sizeof(struct vnt_tx_desc)) {
desc = &priv->apTD1Rings[i];
desc->td_info = alloc_td_info();
desc->td_info->buf = priv->tx1_bufs + i * PKT_BUF_SZ;
desc->td_info->buf_dma = priv->tx_bufs_dma1 + i * PKT_BUF_SZ;
- desc->next = &(priv->apTD1Rings[(i + 1) % priv->sOpts.nTxDescs[1]]);
+ desc->next = &(priv->apTD1Rings[(i + 1) % priv->sOpts.tx_descs[1]]);
desc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_tx_desc));
}
{
int i;
- for (i = 0; i < priv->sOpts.nTxDescs[0]; i++) {
+ for (i = 0; i < priv->sOpts.tx_descs[0]; i++) {
struct vnt_tx_desc *desc = &priv->apTD0Rings[i];
struct vnt_td_info *td_info = desc->td_info;
{
int i;
- for (i = 0; i < priv->sOpts.nTxDescs[1]; i++) {
+ for (i = 0; i < priv->sOpts.tx_descs[1]; i++) {
struct vnt_tx_desc *desc = &priv->apTD1Rings[i];
struct vnt_td_info *td_info = desc->td_info;