xprtrdma: Don't clear RPC_BC_PA_IN_USE on pre-allocated rpc_rqst's
authorChuck Lever <chuck.lever@oracle.com>
Fri, 15 Dec 2017 01:57:39 +0000 (20:57 -0500)
committerAnna Schumaker <Anna.Schumaker@Netapp.com>
Tue, 16 Jan 2018 16:19:49 +0000 (11:19 -0500)
No need for the overhead of atomically setting and clearing this bit
flag for every use of a pre-allocated backchannel rpc_rqst. These
are a distinct pool of rpc_rqsts that are used only for callback
operations, so it is safe to simply leave the bit set.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
net/sunrpc/xprtrdma/backchannel.c

index 6c66a4f855075062648b8cdbc7014b6cfbc12ddd..3c7998a72191840dfe32a2fcfd2d75748f601314 100644 (file)
@@ -120,6 +120,7 @@ int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs)
                rqst->rq_xprt = &r_xprt->rx_xprt;
                INIT_LIST_HEAD(&rqst->rq_list);
                INIT_LIST_HEAD(&rqst->rq_bc_list);
+               __set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state);
 
                if (rpcrdma_bc_setup_rqst(r_xprt, rqst))
                        goto out_free;
@@ -284,11 +285,6 @@ void xprt_rdma_bc_free_rqst(struct rpc_rqst *rqst)
        dprintk("RPC:       %s: freeing rqst %p (req %p)\n",
                __func__, rqst, rpcr_to_rdmar(rqst));
 
-       smp_mb__before_atomic();
-       WARN_ON_ONCE(!test_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state));
-       clear_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state);
-       smp_mb__after_atomic();
-
        spin_lock_bh(&xprt->bc_pa_lock);
        list_add_tail(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
        spin_unlock_bh(&xprt->bc_pa_lock);
@@ -343,7 +339,6 @@ void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
        rqst->rq_xid = *p;
 
        rqst->rq_private_buf.len = size;
-       set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state);
 
        buf = &rqst->rq_rcv_buf;
        memset(buf, 0, sizeof(*buf));