__u64 indications[IEEE_8021QAZ_MAX_TCS];
};
+/* CEE DCBX std supported values */
+#define CEE_DCBX_MAX_PGS 8
+#define CEE_DCBX_MAX_PRIO 8
+
+/**
+ * struct cee_pg - CEE Prioity-Group managed object
+ *
+ * @willing: willing bit in the PG tlv
+ * @error: error bit in the PG tlv
+ * @pg_en: enable bit of the PG feature
+ * @tcs_supported: number of traffic classes supported
+ * @pg_bw: bandwidth percentage for each priority group
+ * @prio_pg: priority to PG mapping indexed by priority
+ */
+struct cee_pg {
+ __u8 willing;
+ __u8 error;
+ __u8 pg_en;
+ __u8 tcs_supported;
+ __u8 pg_bw[CEE_DCBX_MAX_PGS];
+ __u8 prio_pg[CEE_DCBX_MAX_PGS];
+};
+
+/**
+ * struct cee_pfc - CEE PFC managed object
+ *
+ * @willing: willing bit in the PFC tlv
+ * @error: error bit in the PFC tlv
+ * @pfc_en: bitmap indicating pfc enabled traffic classes
+ * @tcs_supported: number of traffic classes supported
+ */
+struct cee_pfc {
+ __u8 willing;
+ __u8 error;
+ __u8 pfc_en;
+ __u8 tcs_supported;
+};
+
+
/* This structure contains the IEEE 802.1Qaz APP managed object. This
* object is also used for the CEE std as well. There is no difference
* between the objects.
* @DCB_CMD_SDCBX: set DCBX engine configuration
* @DCB_CMD_GFEATCFG: get DCBX features flags
* @DCB_CMD_SFEATCFG: set DCBX features negotiation flags
+ * @DCB_CMD_CEE_GET: get CEE aggregated configuration
*/
enum dcbnl_commands {
DCB_CMD_UNDEFINED,
DCB_CMD_GFEATCFG,
DCB_CMD_SFEATCFG,
+ DCB_CMD_CEE_GET,
+
__DCB_CMD_ENUM_MAX,
DCB_CMD_MAX = __DCB_CMD_ENUM_MAX - 1,
};
* @DCB_ATTR_IEEE: IEEE 802.1Qaz supported attributes (NLA_NESTED)
* @DCB_ATTR_DCBX: DCBX engine configuration in the device (NLA_U8)
* @DCB_ATTR_FEATCFG: DCBX features flags (NLA_NESTED)
+ * @DCB_ATTR_CEE: CEE std supported attributes (NLA_NESTED)
*/
enum dcbnl_attrs {
DCB_ATTR_UNDEFINED,
DCB_ATTR_DCBX,
DCB_ATTR_FEATCFG,
+ /* CEE nested attributes */
+ DCB_ATTR_CEE,
+
__DCB_ATTR_ENUM_MAX,
DCB_ATTR_MAX = __DCB_ATTR_ENUM_MAX - 1,
};
};
#define DCB_ATTR_IEEE_APP_MAX (__DCB_ATTR_IEEE_APP_MAX - 1)
+/**
+ * enum cee_attrs - CEE DCBX get attributes
+ *
+ * @DCB_ATTR_CEE_UNSPEC: unspecified
+ * @DCB_ATTR_CEE_PEER_PG: peer PG configuration - get only
+ * @DCB_ATTR_CEE_PEER_PFC: peer PFC configuration - get only
+ * @DCB_ATTR_CEE_PEER_APP: peer APP tlv - get only
+ */
+enum cee_attrs {
+ DCB_ATTR_CEE_UNSPEC,
+ DCB_ATTR_CEE_PEER_PG,
+ DCB_ATTR_CEE_PEER_PFC,
+ DCB_ATTR_CEE_PEER_APP_TABLE,
+ __DCB_ATTR_CEE_MAX
+};
+#define DCB_ATTR_CEE_MAX (__DCB_ATTR_CEE_MAX - 1)
+
+enum peer_app_attr {
+ DCB_ATTR_CEE_PEER_APP_UNSPEC,
+ DCB_ATTR_CEE_PEER_APP_INFO,
+ DCB_ATTR_CEE_PEER_APP,
+ __DCB_ATTR_CEE_PEER_APP_MAX
+};
+#define DCB_ATTR_CEE_PEER_APP_MAX (__DCB_ATTR_CEE_PEER_APP_MAX - 1)
+
/**
* enum dcbnl_pfc_attrs - DCB Priority Flow Control user priority nested attrs
*
return err;
}
-static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb)
+static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb,
+ int app_nested_type, int app_info_type,
+ int app_entry_type)
{
struct dcb_peer_app_info info;
struct dcb_app *table = NULL;
*/
err = -EMSGSIZE;
- app = nla_nest_start(skb, DCB_ATTR_IEEE_PEER_APP);
+ app = nla_nest_start(skb, app_nested_type);
if (!app)
goto nla_put_failure;
+ if (app_info_type)
+ NLA_PUT(skb, app_info_type, sizeof(info), &info);
+
for (i = 0; i < app_count; i++)
- NLA_PUT(skb, DCB_ATTR_IEEE_APP, sizeof(struct dcb_app),
+ NLA_PUT(skb, app_entry_type, sizeof(struct dcb_app),
&table[i]);
nla_nest_end(skb, app);
}
if (ops->peer_getappinfo && ops->peer_getapptable) {
- err = dcbnl_build_peer_app(netdev, skb);
+ err = dcbnl_build_peer_app(netdev, skb,
+ DCB_ATTR_IEEE_PEER_APP,
+ DCB_ATTR_IEEE_APP_UNSPEC,
+ DCB_ATTR_IEEE_APP);
if (err)
goto nla_put_failure;
}
return ret;
}
+/* Handle CEE DCBX GET commands. */
+static int dcbnl_cee_get(struct net_device *netdev, struct nlattr **tb,
+ u32 pid, u32 seq, u16 flags)
+{
+ struct sk_buff *skb;
+ struct nlmsghdr *nlh;
+ struct dcbmsg *dcb;
+ struct nlattr *cee;
+ const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
+ int err;
+
+ if (!ops)
+ return -EOPNOTSUPP;
+
+ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!skb)
+ return -ENOBUFS;
+
+ nlh = NLMSG_NEW(skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
+
+ dcb = NLMSG_DATA(nlh);
+ dcb->dcb_family = AF_UNSPEC;
+ dcb->cmd = DCB_CMD_CEE_GET;
+
+ NLA_PUT_STRING(skb, DCB_ATTR_IFNAME, netdev->name);
+
+ cee = nla_nest_start(skb, DCB_ATTR_CEE);
+ if (!cee)
+ goto nla_put_failure;
+
+ /* get peer info if available */
+ if (ops->cee_peer_getpg) {
+ struct cee_pg pg;
+ err = ops->cee_peer_getpg(netdev, &pg);
+ if (!err)
+ NLA_PUT(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg);
+ }
+
+ if (ops->cee_peer_getpfc) {
+ struct cee_pfc pfc;
+ err = ops->cee_peer_getpfc(netdev, &pfc);
+ if (!err)
+ NLA_PUT(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc);
+ }
+
+ if (ops->peer_getappinfo && ops->peer_getapptable) {
+ err = dcbnl_build_peer_app(netdev, skb,
+ DCB_ATTR_CEE_PEER_APP_TABLE,
+ DCB_ATTR_CEE_PEER_APP_INFO,
+ DCB_ATTR_CEE_PEER_APP);
+ if (err)
+ goto nla_put_failure;
+ }
+
+ nla_nest_end(skb, cee);
+ nlmsg_end(skb, nlh);
+
+ return rtnl_unicast(skb, &init_net, pid);
+nla_put_failure:
+ nlmsg_cancel(skb, nlh);
+nlmsg_failure:
+ kfree_skb(skb);
+ return -1;
+}
+
static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
{
struct net *net = sock_net(skb->sk);
ret = dcbnl_setfeatcfg(netdev, tb, pid, nlh->nlmsg_seq,
nlh->nlmsg_flags);
goto out;
+ case DCB_CMD_CEE_GET:
+ ret = dcbnl_cee_get(netdev, tb, pid, nlh->nlmsg_seq,
+ nlh->nlmsg_flags);
+ goto out;
default:
goto errout;
}