), \
TP_ARGS(wc, frwr))
+DECLARE_EVENT_CLASS(xprtrdma_mr,
+ TP_PROTO(
+ const struct rpcrdma_mr *mr
+ ),
+
+ TP_ARGS(mr),
+
+ TP_STRUCT__entry(
+ __field(const void *, mr)
+ __field(u32, handle)
+ __field(u32, length)
+ __field(u64, offset)
+ ),
+
+ TP_fast_assign(
+ __entry->mr = mr;
+ __entry->handle = mr->mr_handle;
+ __entry->length = mr->mr_length;
+ __entry->offset = mr->mr_offset;
+ ),
+
+ TP_printk("mr=%p %u@0x%016llx:0x%08x",
+ __entry->mr, __entry->length,
+ (unsigned long long)__entry->offset,
+ __entry->handle
+ )
+);
+
+#define DEFINE_MR_EVENT(name) \
+ DEFINE_EVENT(xprtrdma_mr, name, \
+ TP_PROTO( \
+ const struct rpcrdma_mr *mr \
+ ), \
+ TP_ARGS(mr))
+
/**
** Call events
**/
);
DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg);
+DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li);
+DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake);
+
+DEFINE_MR_EVENT(xprtrdma_localinv);
+DEFINE_MR_EVENT(xprtrdma_dma_unmap);
+DEFINE_MR_EVENT(xprtrdma_remoteinv);
/**
** Reply events
pr_err("rpcrdma: FMR reset failed (%d), %p released\n", rc, mr);
r_xprt->rx_stats.mrs_orphaned++;
+ trace_xprtrdma_dma_unmap(mr);
ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
mr->mr_sg, mr->mr_nents, mr->mr_dir);
list_for_each_entry(mr, mrs, mr_list) {
dprintk("RPC: %s: unmapping fmr %p\n",
__func__, &mr->fmr);
+ trace_xprtrdma_localinv(mr);
list_add_tail(&mr->fmr.fm_mr->list, &unmap_list);
}
r_xprt->rx_stats.local_inv_needed++;
*/
while (!list_empty(mrs)) {
mr = rpcrdma_mr_pop(mrs);
- dprintk("RPC: %s: DMA unmapping fmr %p\n",
- __func__, &mr->fmr);
list_del(&mr->fmr.fm_mr->list);
rpcrdma_mr_unmap_and_put(mr);
}
int rc;
rc = __frwr_mr_reset(ia, mr);
- if (state != FRWR_FLUSHED_LI)
+ if (state != FRWR_FLUSHED_LI) {
+ trace_xprtrdma_dma_unmap(mr);
ib_dma_unmap_sg(ia->ri_device,
mr->mr_sg, mr->mr_nents, mr->mr_dir);
+ }
if (rc)
goto out_release;
static void
frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
{
- struct rpcrdma_frwr *frwr;
- struct ib_cqe *cqe;
+ struct ib_cqe *cqe = wc->wr_cqe;
+ struct rpcrdma_frwr *frwr = container_of(cqe, struct rpcrdma_frwr,
+ fr_cqe);
/* WARNING: Only wr_cqe and status are reliable at this point */
if (wc->status != IB_WC_SUCCESS) {
- cqe = wc->wr_cqe;
- frwr = container_of(cqe, struct rpcrdma_frwr, fr_cqe);
frwr->fr_state = FRWR_FLUSHED_LI;
__frwr_sendcompletion_flush(wc, "localinv");
}
+ trace_xprtrdma_wc_li(wc, frwr);
}
/**
static void
frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
{
- struct rpcrdma_frwr *frwr;
- struct ib_cqe *cqe;
+ struct ib_cqe *cqe = wc->wr_cqe;
+ struct rpcrdma_frwr *frwr = container_of(cqe, struct rpcrdma_frwr,
+ fr_cqe);
/* WARNING: Only wr_cqe and status are reliable at this point */
- cqe = wc->wr_cqe;
- frwr = container_of(cqe, struct rpcrdma_frwr, fr_cqe);
if (wc->status != IB_WC_SUCCESS) {
frwr->fr_state = FRWR_FLUSHED_LI;
__frwr_sendcompletion_flush(wc, "localinv");
}
complete(&frwr->fr_linv_done);
+ trace_xprtrdma_wc_li_wake(wc, frwr);
}
/* Post a REG_MR Work Request to register a memory region
list_for_each_entry(mr, mrs, mr_list)
if (mr->mr_handle == rep->rr_inv_rkey) {
list_del(&mr->mr_list);
+ trace_xprtrdma_remoteinv(mr);
mr->frwr.fr_state = FRWR_IS_INVALID;
rpcrdma_mr_unmap_and_put(mr);
break; /* only one invalidated MR per RPC */
mr->frwr.fr_state = FRWR_IS_INVALID;
frwr = &mr->frwr;
-
- dprintk("RPC: %s: invalidating frwr %p\n",
- __func__, frwr);
+ trace_xprtrdma_localinv(mr);
frwr->fr_cqe.done = frwr_wc_localinv;
last = &frwr->fr_invwr;
unmap:
while (!list_empty(mrs)) {
mr = rpcrdma_mr_pop(mrs);
- dprintk("RPC: %s: DMA unmapping frwr %p\n",
- __func__, &mr->frwr);
rpcrdma_mr_unmap_and_put(mr);
}
return;