rds: rdma: update rdma transport for tos
authorSantosh Shilimkar <santosh.shilimkar@oracle.com>
Sat, 13 Oct 2018 14:13:23 +0000 (22:13 +0800)
committerSantosh Shilimkar <santosh.shilimkar@oracle.com>
Mon, 4 Feb 2019 22:59:13 +0000 (14:59 -0800)
For RDMA transports, RDS TOS is an extension of IB QoS(Annex A13)
to provide clients the ability to segregate traffic flows for
different type of data. RDMA CM abstract it for ULPs using
rdma_set_service_type(). Internally, each traffic flow is
represented by a connection with all of its independent resources
like that of a normal connection, and is differentiated by
service type. In other words, there can be multiple qp connections
between an IP pair and each supports a unique service type.

The feature has been added from RDSv4.1 onwards and supports
rolling upgrades. RDMA connection metadata also carries the tos
information to set up SL on end to end context. The original
code was developed by Bang Nguyen in downstream kernel back in
2.6.32 kernel days and it has evolved over period of time.

Reviewed-by: Sowmini Varadhan <sowmini.varadhan@oracle.com>
Signed-off-by: Santosh Shilimkar <santosh.shilimkar@oracle.com>
[yanjun.zhu@oracle.com: Adapted original patch with ipv6 changes]
Signed-off-by: Zhu Yanjun <yanjun.zhu@oracle.com>
net/rds/ib.h
net/rds/ib_cm.c
net/rds/ib_recv.c
net/rds/ib_send.c
net/rds/rdma_transport.c
net/rds/send.c

index 71ff356ee7020d881b35d71747076f28530cb5e7..752f92235a3800c1b5d89ffa3ae678d5e744cb77 100644 (file)
@@ -67,7 +67,9 @@ struct rds_ib_conn_priv_cmn {
        u8                      ricpc_protocol_major;
        u8                      ricpc_protocol_minor;
        __be16                  ricpc_protocol_minor_mask;      /* bitmask */
-       __be32                  ricpc_reserved1;
+       u8                      ricpc_dp_toss;
+       u8                      ripc_reserved1;
+       __be16                  ripc_reserved2;
        __be64                  ricpc_ack_seq;
        __be32                  ricpc_credit;   /* non-zero enables flow ctl */
 };
index 70518e329a9e583d275323b81010c0d245a2e4f6..66c6eb56072b712015fdaebb3dfc8af17408d91e 100644 (file)
@@ -144,9 +144,9 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
                }
        }
 
-       pr_notice("RDS/IB: %s conn connected <%pI6c,%pI6c> version %u.%u%s\n",
+       pr_notice("RDS/IB: %s conn connected <%pI6c,%pI6c,%d> version %u.%u%s\n",
                  ic->i_active_side ? "Active" : "Passive",
-                 &conn->c_laddr, &conn->c_faddr,
+                 &conn->c_laddr, &conn->c_faddr, conn->c_tos,
                  RDS_PROTOCOL_MAJOR(conn->c_version),
                  RDS_PROTOCOL_MINOR(conn->c_version),
                  ic->i_flowctl ? ", flow control" : "");
@@ -222,6 +222,7 @@ static void rds_ib_cm_fill_conn_param(struct rds_connection *conn,
                            cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS);
                        dp->ricp_v6.dp_ack_seq =
                            cpu_to_be64(rds_ib_piggyb_ack(ic));
+                       dp->ricp_v6.dp_cmn.ricpc_dp_toss = conn->c_tos;
 
                        conn_param->private_data = &dp->ricp_v6;
                        conn_param->private_data_len = sizeof(dp->ricp_v6);
@@ -236,6 +237,7 @@ static void rds_ib_cm_fill_conn_param(struct rds_connection *conn,
                            cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS);
                        dp->ricp_v4.dp_ack_seq =
                            cpu_to_be64(rds_ib_piggyb_ack(ic));
+                       dp->ricp_v4.dp_cmn.ricpc_dp_toss = conn->c_tos;
 
                        conn_param->private_data = &dp->ricp_v4;
                        conn_param->private_data_len = sizeof(dp->ricp_v4);
@@ -391,10 +393,9 @@ static void rds_ib_qp_event_handler(struct ib_event *event, void *data)
                rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST);
                break;
        default:
-               rdsdebug("Fatal QP Event %u (%s) "
-                       "- connection %pI6c->%pI6c, reconnecting\n",
-                       event->event, ib_event_msg(event->event),
-                       &conn->c_laddr, &conn->c_faddr);
+               rdsdebug("Fatal QP Event %u (%s) - connection %pI6c->%pI6c, reconnecting\n",
+                        event->event, ib_event_msg(event->event),
+                        &conn->c_laddr, &conn->c_faddr);
                rds_conn_drop(conn);
                break;
        }
@@ -662,11 +663,11 @@ static u32 rds_ib_protocol_compatible(struct rdma_cm_event *event, bool isv6)
 
        /* Even if len is crap *now* I still want to check it. -ASG */
        if (event->param.conn.private_data_len < data_len || major == 0)
-               return RDS_PROTOCOL_3_0;
+               return RDS_PROTOCOL_4_0;
 
        common = be16_to_cpu(mask) & RDS_IB_SUPPORTED_PROTOCOLS;
-       if (major == 3 && common) {
-               version = RDS_PROTOCOL_3_0;
+       if (major == 4 && common) {
+               version = RDS_PROTOCOL_4_0;
                while ((common >>= 1) != 0)
                        version++;
        } else if (RDS_PROTOCOL_COMPAT_VERSION ==
@@ -778,15 +779,16 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
                daddr6 = &d_mapped_addr;
        }
 
-       rdsdebug("saddr %pI6c daddr %pI6c RDSv%u.%u lguid 0x%llx fguid "
-                "0x%llx\n", saddr6, daddr6,
-                RDS_PROTOCOL_MAJOR(version), RDS_PROTOCOL_MINOR(version),
+       rdsdebug("saddr %pI6c daddr %pI6c RDSv%u.%u lguid 0x%llx fguid 0x%llx, tos:%d\n",
+                saddr6, daddr6, RDS_PROTOCOL_MAJOR(version),
+                RDS_PROTOCOL_MINOR(version),
                 (unsigned long long)be64_to_cpu(lguid),
-                (unsigned long long)be64_to_cpu(fguid));
+                (unsigned long long)be64_to_cpu(fguid), dp_cmn->ricpc_dp_toss);
 
        /* RDS/IB is not currently netns aware, thus init_net */
        conn = rds_conn_create(&init_net, daddr6, saddr6,
-                              &rds_ib_transport, 0, GFP_KERNEL, ifindex);
+                              &rds_ib_transport, dp_cmn->ricpc_dp_toss,
+                              GFP_KERNEL, ifindex);
        if (IS_ERR(conn)) {
                rdsdebug("rds_conn_create failed (%ld)\n", PTR_ERR(conn));
                conn = NULL;
@@ -868,7 +870,7 @@ int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id, bool isv6)
 
        /* If the peer doesn't do protocol negotiation, we must
         * default to RDSv3.0 */
-       rds_ib_set_protocol(conn, RDS_PROTOCOL_VERSION);
+       rds_ib_set_protocol(conn, RDS_PROTOCOL_4_1);
        ic->i_flowctl = rds_ib_sysctl_flow_control;     /* advertise flow control */
 
        ret = rds_ib_setup_qp(conn);
index 2f16146e4ec94e580b26bf6e4e12395a6d882a4b..d395eec98959e1a6c6d16d8388e629bda0a02b17 100644 (file)
@@ -986,9 +986,9 @@ void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic,
        } else {
                /* We expect errors as the qp is drained during shutdown */
                if (rds_conn_up(conn) || rds_conn_connecting(conn))
-                       rds_ib_conn_error(conn, "recv completion on <%pI6c,%pI6c> had status %u (%s), disconnecting and reconnecting\n",
+                       rds_ib_conn_error(conn, "recv completion on <%pI6c,%pI6c, %d> had status %u (%s), disconnecting and reconnecting\n",
                                          &conn->c_laddr, &conn->c_faddr,
-                                         wc->status,
+                                         conn->c_tos, wc->status,
                                          ib_wc_status_msg(wc->status));
        }
 
index 4e0c36acf86604dd793218e3053076d16a36f6c7..09c46f2e97fad8f7139348b5f8c2ec1ad526968a 100644 (file)
@@ -305,8 +305,9 @@ void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc)
 
        /* We expect errors as the qp is drained during shutdown */
        if (wc->status != IB_WC_SUCCESS && rds_conn_up(conn)) {
-               rds_ib_conn_error(conn, "send completion on <%pI6c,%pI6c> had status %u (%s), disconnecting and reconnecting\n",
-                                 &conn->c_laddr, &conn->c_faddr, wc->status,
+               rds_ib_conn_error(conn, "send completion on <%pI6c,%pI6c,%d> had status %u (%s), disconnecting and reconnecting\n",
+                                 &conn->c_laddr, &conn->c_faddr,
+                                 conn->c_tos, wc->status,
                                  ib_wc_status_msg(wc->status));
        }
 }
index e37f91537d297c1c5ba7425a11888fab7f827e37..46bce8389066017836cdc71a9a26a69345bd880f 100644 (file)
@@ -83,6 +83,7 @@ static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id,
                break;
 
        case RDMA_CM_EVENT_ADDR_RESOLVED:
+               rdma_set_service_type(cm_id, conn->c_tos);
                /* XXX do we need to clean up if this fails? */
                ret = rdma_resolve_route(cm_id,
                                         RDS_RDMA_RESOLVE_TIMEOUT_MS);
index c555e121b908bad328263abba739e80a096e5229..166dd578c1cc9ee2cfb90776ceca593cdeaf40d7 100644 (file)
@@ -1277,12 +1277,13 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
 
        /* rds_conn_create has a spinlock that runs with IRQ off.
         * Caching the conn in the socket helps a lot. */
-       if (rs->rs_conn && ipv6_addr_equal(&rs->rs_conn->c_faddr, &daddr)) {
+       if (rs->rs_conn && ipv6_addr_equal(&rs->rs_conn->c_faddr, &daddr) &&
+           rs->rs_tos == rs->rs_conn->c_tos) {
                conn = rs->rs_conn;
        } else {
                conn = rds_conn_create_outgoing(sock_net(sock->sk),
                                                &rs->rs_bound_addr, &daddr,
-                                               rs->rs_transport, 0,
+                                               rs->rs_transport, rs->rs_tos,
                                                sock->sk->sk_allocation,
                                                scope_id);
                if (IS_ERR(conn)) {