struct rxe_pkt_info *pkt,
struct rxe_send_wqe *wqe)
{
- struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
int ret;
- ret = copy_data(rxe, qp->pd, IB_ACCESS_LOCAL_WRITE,
+ ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
&wqe->dma, payload_addr(pkt),
payload_size(pkt), to_mem_obj, NULL);
if (ret)
struct rxe_pkt_info *pkt,
struct rxe_send_wqe *wqe)
{
- struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
int ret;
u64 atomic_orig = atmack_orig(pkt);
- ret = copy_data(rxe, qp->pd, IB_ACCESS_LOCAL_WRITE,
+ ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
&wqe->dma, &atomic_orig,
sizeof(u64), to_mem_obj, NULL);
if (ret)
from_mem_obj,
};
-int rxe_mem_init_dma(struct rxe_dev *rxe, struct rxe_pd *pd,
+int rxe_mem_init_dma(struct rxe_pd *pd,
int access, struct rxe_mem *mem);
-int rxe_mem_init_user(struct rxe_dev *rxe, struct rxe_pd *pd, u64 start,
+int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
u64 length, u64 iova, int access, struct ib_udata *udata,
struct rxe_mem *mr);
-int rxe_mem_init_fast(struct rxe_dev *rxe, struct rxe_pd *pd,
+int rxe_mem_init_fast(struct rxe_pd *pd,
int max_pages, struct rxe_mem *mem);
int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr,
int length, enum copy_direction dir, u32 *crcp);
-int copy_data(struct rxe_dev *rxe, struct rxe_pd *pd, int access,
+int copy_data(struct rxe_pd *pd, int access,
struct rxe_dma_info *dma, void *addr, int length,
enum copy_direction dir, u32 *crcp);
}
}
-static int rxe_mem_alloc(struct rxe_dev *rxe, struct rxe_mem *mem, int num_buf)
+static int rxe_mem_alloc(struct rxe_mem *mem, int num_buf)
{
int i;
int num_map;
return -ENOMEM;
}
-int rxe_mem_init_dma(struct rxe_dev *rxe, struct rxe_pd *pd,
+int rxe_mem_init_dma(struct rxe_pd *pd,
int access, struct rxe_mem *mem)
{
rxe_mem_init(access, mem);
return 0;
}
-int rxe_mem_init_user(struct rxe_dev *rxe, struct rxe_pd *pd, u64 start,
+int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
u64 length, u64 iova, int access, struct ib_udata *udata,
struct rxe_mem *mem)
{
rxe_mem_init(access, mem);
- err = rxe_mem_alloc(rxe, mem, num_buf);
+ err = rxe_mem_alloc(mem, num_buf);
if (err) {
pr_warn("err %d from rxe_mem_alloc\n", err);
ib_umem_release(umem);
return err;
}
-int rxe_mem_init_fast(struct rxe_dev *rxe, struct rxe_pd *pd,
+int rxe_mem_init_fast(struct rxe_pd *pd,
int max_pages, struct rxe_mem *mem)
{
int err;
/* In fastreg, we also set the rkey */
mem->ibmr.rkey = mem->ibmr.lkey;
- err = rxe_mem_alloc(rxe, mem, max_pages);
+ err = rxe_mem_alloc(mem, max_pages);
if (err)
goto err1;
* under the control of a dma descriptor
*/
int copy_data(
- struct rxe_dev *rxe,
struct rxe_pd *pd,
int access,
struct rxe_dma_info *dma,
wqe->dma.resid -= paylen;
wqe->dma.sge_offset += paylen;
} else {
- err = copy_data(rxe, qp->pd, 0, &wqe->dma,
+ err = copy_data(qp->pd, 0, &wqe->dma,
payload_addr(pkt), paylen,
from_mem_obj,
&crc);
int data_len)
{
int err;
- struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
- err = copy_data(rxe, qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma,
+ err = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma,
data_addr, data_len, to_mem_obj, NULL);
if (unlikely(err))
return (err == -ENOSPC) ? RESPST_ERR_LENGTH
rxe_add_ref(pd);
- err = rxe_mem_init_dma(rxe, pd, access, mr);
+ err = rxe_mem_init_dma(pd, access, mr);
if (err)
goto err2;
rxe_add_ref(pd);
- err = rxe_mem_init_user(rxe, pd, start, length, iova,
+ err = rxe_mem_init_user(pd, start, length, iova,
access, udata, mr);
if (err)
goto err3;
rxe_add_ref(pd);
- err = rxe_mem_init_fast(rxe, pd, max_num_sg, mr);
+ err = rxe_mem_init_fast(pd, max_num_sg, mr);
if (err)
goto err2;