q->len = len;
q->entry_size = entry_size;
q->size = len * entry_size;
- q->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, q->size,
- &q->dma, GFP_KERNEL);
+ q->va = dma_zalloc_coherent(&dev->nic_info.pdev->dev, q->size,
+ &q->dma, GFP_KERNEL);
if (!q->va)
return -ENOMEM;
- memset(q->va, 0, q->size);
return 0;
}
return -ENOMEM;
ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_CREATE_CQ,
OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
- cq->va = dma_alloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL);
+ cq->va = dma_zalloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL);
if (!cq->va) {
status = -ENOMEM;
goto mem_err;
}
- memset(cq->va, 0, cq->len);
page_size = cq->len / hw_pages;
cmd->cmd.pgsz_pgcnt = (page_size / OCRDMA_MIN_Q_PAGE_SIZE) <<
OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT;
qp->sq.max_cnt = max_wqe_allocated;
len = (hw_pages * hw_page_size);
- qp->sq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
+ qp->sq.va = dma_zalloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
if (!qp->sq.va)
return -EINVAL;
- memset(qp->sq.va, 0, len);
qp->sq.len = len;
qp->sq.pa = pa;
qp->sq.entry_size = dev->attr.wqe_size;
qp->rq.max_cnt = max_rqe_allocated;
len = (hw_pages * hw_page_size);
- qp->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
+ qp->rq.va = dma_zalloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
if (!qp->rq.va)
return -ENOMEM;
- memset(qp->rq.va, 0, len);
qp->rq.pa = pa;
qp->rq.len = len;
qp->rq.entry_size = dev->attr.rqe_size;
if (dev->attr.ird == 0)
return 0;
- qp->ird_q_va = dma_alloc_coherent(&pdev->dev, ird_q_len,
- &pa, GFP_KERNEL);
+ qp->ird_q_va = dma_zalloc_coherent(&pdev->dev, ird_q_len, &pa,
+ GFP_KERNEL);
if (!qp->ird_q_va)
return -ENOMEM;
- memset(qp->ird_q_va, 0, ird_q_len);
ocrdma_build_q_pages(&cmd->ird_addr[0], dev->attr.num_ird_pages,
pa, ird_page_size);
for (; i < ird_q_len / dev->attr.rqe_size; i++) {
mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req),
sizeof(struct ocrdma_rdma_stats_resp));
- mem->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, mem->size,
- &mem->pa, GFP_KERNEL);
+ mem->va = dma_zalloc_coherent(&dev->nic_info.pdev->dev, mem->size,
+ &mem->pa, GFP_KERNEL);
if (!mem->va) {
pr_err("%s: stats mbox allocation failed\n", __func__);
return false;
}
- memset(mem->va, 0, mem->size);
-
/* Alloc debugfs mem */
mem->debugfs_mem = kzalloc(OCRDMA_MAX_DBGFS_MEM, GFP_KERNEL);
if (!mem->debugfs_mem)
INIT_LIST_HEAD(&ctx->mm_head);
mutex_init(&ctx->mm_list_lock);
- ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len,
- &ctx->ah_tbl.pa, GFP_KERNEL);
+ ctx->ah_tbl.va = dma_zalloc_coherent(&pdev->dev, map_len,
+ &ctx->ah_tbl.pa, GFP_KERNEL);
if (!ctx->ah_tbl.va) {
kfree(ctx);
return ERR_PTR(-ENOMEM);
}
- memset(ctx->ah_tbl.va, 0, map_len);
ctx->ah_tbl.len = map_len;
memset(&resp, 0, sizeof(resp));
return -ENOMEM;
for (i = 0; i < mr->num_pbls; i++) {
- va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
+ va = dma_zalloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
if (!va) {
ocrdma_free_mr_pbl_tbl(dev, mr);
status = -ENOMEM;
break;
}
- memset(va, 0, dma_len);
mr->pbl_table[i].va = va;
mr->pbl_table[i].pa = pa;
}