staging/rdma/hfi1: add common routine for queuing acks
authorMike Marciniszyn <mike.marciniszyn@intel.com>
Tue, 10 Nov 2015 14:13:55 +0000 (09:13 -0500)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 20 Nov 2015 00:58:18 +0000 (16:58 -0800)
This patch is a prelimary patch required to
coalesce acks.

The routine to "schedule" a QP for sending a NAK is
now centralized in rc_defer_ack().  The flag is changed
for clarity since the all acks will potentially use
the deferral  mechanism.

Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/staging/rdma/hfi1/driver.c
drivers/staging/rdma/hfi1/rc.c
drivers/staging/rdma/hfi1/verbs.h

index 72ab5e145f495f4e4ca908e7fec7852396430728..487d58778d700086bfba63e158cdbb0fbe8a79e5 100644 (file)
@@ -714,8 +714,8 @@ static inline void process_rcv_qp_work(struct hfi1_packet *packet)
         */
        list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) {
                list_del_init(&qp->rspwait);
-               if (qp->r_flags & HFI1_R_RSP_NAK) {
-                       qp->r_flags &= ~HFI1_R_RSP_NAK;
+               if (qp->r_flags & HFI1_R_RSP_DEFERED_ACK) {
+                       qp->r_flags &= ~HFI1_R_RSP_DEFERED_ACK;
                        hfi1_send_rc_ack(rcd, qp, 0);
                }
                if (qp->r_flags & HFI1_R_RSP_SEND) {
index fd23907f18fed23de1a1b1f11c2f4ae611af4478..0c10012cc397f0425979bf936f22f442f38ae10e 100644 (file)
@@ -1608,6 +1608,16 @@ bail:
        return;
 }
 
+static inline void rc_defered_ack(struct hfi1_ctxtdata *rcd,
+                                 struct hfi1_qp *qp)
+{
+       if (list_empty(&qp->rspwait)) {
+               qp->r_flags |= HFI1_R_RSP_DEFERED_ACK;
+               atomic_inc(&qp->refcount);
+               list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
+       }
+}
+
 /**
  * rc_rcv_error - process an incoming duplicate or error RC packet
  * @ohdr: the other headers for this packet
@@ -1650,11 +1660,7 @@ static noinline int rc_rcv_error(struct hfi1_other_headers *ohdr, void *data,
                         * in the receive queue have been processed.
                         * Otherwise, we end up propagating congestion.
                         */
-                       if (list_empty(&qp->rspwait)) {
-                               qp->r_flags |= HFI1_R_RSP_NAK;
-                               atomic_inc(&qp->refcount);
-                               list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
-                       }
+                       rc_defered_ack(rcd, qp);
                }
                goto done;
        }
@@ -2337,11 +2343,7 @@ rnr_nak:
        qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
        qp->r_ack_psn = qp->r_psn;
        /* Queue RNR NAK for later */
-       if (list_empty(&qp->rspwait)) {
-               qp->r_flags |= HFI1_R_RSP_NAK;
-               atomic_inc(&qp->refcount);
-               list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
-       }
+       rc_defered_ack(rcd, qp);
        return;
 
 nack_op_err:
@@ -2349,11 +2351,7 @@ nack_op_err:
        qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
        qp->r_ack_psn = qp->r_psn;
        /* Queue NAK for later */
-       if (list_empty(&qp->rspwait)) {
-               qp->r_flags |= HFI1_R_RSP_NAK;
-               atomic_inc(&qp->refcount);
-               list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
-       }
+       rc_defered_ack(rcd, qp);
        return;
 
 nack_inv_unlck:
@@ -2363,11 +2361,7 @@ nack_inv:
        qp->r_nak_state = IB_NAK_INVALID_REQUEST;
        qp->r_ack_psn = qp->r_psn;
        /* Queue NAK for later */
-       if (list_empty(&qp->rspwait)) {
-               qp->r_flags |= HFI1_R_RSP_NAK;
-               atomic_inc(&qp->refcount);
-               list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
-       }
+       rc_defered_ack(rcd, qp);
        return;
 
 nack_acc_unlck:
@@ -2421,13 +2415,7 @@ void hfi1_rc_hdrerr(
                         * Otherwise, we end up
                         * propagating congestion.
                         */
-                       if (list_empty(&qp->rspwait)) {
-                               qp->r_flags |= HFI1_R_RSP_NAK;
-                               atomic_inc(&qp->refcount);
-                               list_add_tail(
-                                       &qp->rspwait,
-                                       &rcd->qp_wait_list);
-                               }
+                       rc_defered_ack(rcd, qp);
                } /* Out of sequence NAK */
        } /* QP Request NAKs */
 }
index fdbe0f9d5f3121831be43439b16a83e21076519e..6a49a3ca96b457bbd445a751893f645ac53f4102 100644 (file)
@@ -553,11 +553,13 @@ struct hfi1_qp {
 /*
  * Bit definitions for r_flags.
  */
-#define HFI1_R_REUSE_SGE 0x01
-#define HFI1_R_RDMAR_SEQ 0x02
-#define HFI1_R_RSP_NAK   0x04
-#define HFI1_R_RSP_SEND  0x08
-#define HFI1_R_COMM_EST  0x10
+#define HFI1_R_REUSE_SGE       0x01
+#define HFI1_R_RDMAR_SEQ       0x02
+/* defer ack until end of interrupt session */
+#define HFI1_R_RSP_DEFERED_ACK 0x04
+/* relay ack to send engine */
+#define HFI1_R_RSP_SEND        0x08
+#define HFI1_R_COMM_EST        0x10
 
 /*
  * Bit definitions for s_flags.