2 * Copyright (c) 2008-2011, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Author: Lucy Liu <lucy.liu@intel.com>
20 #include <linux/netdevice.h>
21 #include <linux/netlink.h>
22 #include <linux/slab.h>
23 #include <net/netlink.h>
24 #include <net/rtnetlink.h>
25 #include <linux/dcbnl.h>
26 #include <net/dcbevent.h>
27 #include <linux/rtnetlink.h>
28 #include <linux/module.h>
32 * Data Center Bridging (DCB) is a collection of Ethernet enhancements
33 * intended to allow network traffic with differing requirements
34 * (highly reliable, no drops vs. best effort vs. low latency) to operate
35 * and co-exist on Ethernet. Current DCB features are:
37 * Enhanced Transmission Selection (aka Priority Grouping [PG]) - provides a
38 * framework for assigning bandwidth guarantees to traffic classes.
40 * Priority-based Flow Control (PFC) - provides a flow control mechanism which
41 * can work independently for each 802.1p priority.
43 * Congestion Notification - provides a mechanism for end-to-end congestion
44 * control for protocols which do not have built-in congestion management.
46 * More information about the emerging standards for these Ethernet features
47 * can be found at: http://www.ieee802.org/1/pages/dcbridges.html
49 * This file implements an rtnetlink interface to allow configuration of DCB
50 * features for capable devices.
53 MODULE_AUTHOR("Lucy Liu, <lucy.liu@intel.com>");
54 MODULE_DESCRIPTION("Data Center Bridging netlink interface");
55 MODULE_LICENSE("GPL");
57 /**************** DCB attribute policies *************************************/
59 /* DCB netlink attributes policy */
60 static const struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = {
61 [DCB_ATTR_IFNAME] = {.type = NLA_NUL_STRING, .len = IFNAMSIZ - 1},
62 [DCB_ATTR_STATE] = {.type = NLA_U8},
63 [DCB_ATTR_PFC_CFG] = {.type = NLA_NESTED},
64 [DCB_ATTR_PG_CFG] = {.type = NLA_NESTED},
65 [DCB_ATTR_SET_ALL] = {.type = NLA_U8},
66 [DCB_ATTR_PERM_HWADDR] = {.type = NLA_FLAG},
67 [DCB_ATTR_CAP] = {.type = NLA_NESTED},
68 [DCB_ATTR_PFC_STATE] = {.type = NLA_U8},
69 [DCB_ATTR_BCN] = {.type = NLA_NESTED},
70 [DCB_ATTR_APP] = {.type = NLA_NESTED},
71 [DCB_ATTR_IEEE] = {.type = NLA_NESTED},
72 [DCB_ATTR_DCBX] = {.type = NLA_U8},
73 [DCB_ATTR_FEATCFG] = {.type = NLA_NESTED},
76 /* DCB priority flow control to User Priority nested attributes */
77 static const struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = {
78 [DCB_PFC_UP_ATTR_0] = {.type = NLA_U8},
79 [DCB_PFC_UP_ATTR_1] = {.type = NLA_U8},
80 [DCB_PFC_UP_ATTR_2] = {.type = NLA_U8},
81 [DCB_PFC_UP_ATTR_3] = {.type = NLA_U8},
82 [DCB_PFC_UP_ATTR_4] = {.type = NLA_U8},
83 [DCB_PFC_UP_ATTR_5] = {.type = NLA_U8},
84 [DCB_PFC_UP_ATTR_6] = {.type = NLA_U8},
85 [DCB_PFC_UP_ATTR_7] = {.type = NLA_U8},
86 [DCB_PFC_UP_ATTR_ALL] = {.type = NLA_FLAG},
89 /* DCB priority grouping nested attributes */
90 static const struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = {
91 [DCB_PG_ATTR_TC_0] = {.type = NLA_NESTED},
92 [DCB_PG_ATTR_TC_1] = {.type = NLA_NESTED},
93 [DCB_PG_ATTR_TC_2] = {.type = NLA_NESTED},
94 [DCB_PG_ATTR_TC_3] = {.type = NLA_NESTED},
95 [DCB_PG_ATTR_TC_4] = {.type = NLA_NESTED},
96 [DCB_PG_ATTR_TC_5] = {.type = NLA_NESTED},
97 [DCB_PG_ATTR_TC_6] = {.type = NLA_NESTED},
98 [DCB_PG_ATTR_TC_7] = {.type = NLA_NESTED},
99 [DCB_PG_ATTR_TC_ALL] = {.type = NLA_NESTED},
100 [DCB_PG_ATTR_BW_ID_0] = {.type = NLA_U8},
101 [DCB_PG_ATTR_BW_ID_1] = {.type = NLA_U8},
102 [DCB_PG_ATTR_BW_ID_2] = {.type = NLA_U8},
103 [DCB_PG_ATTR_BW_ID_3] = {.type = NLA_U8},
104 [DCB_PG_ATTR_BW_ID_4] = {.type = NLA_U8},
105 [DCB_PG_ATTR_BW_ID_5] = {.type = NLA_U8},
106 [DCB_PG_ATTR_BW_ID_6] = {.type = NLA_U8},
107 [DCB_PG_ATTR_BW_ID_7] = {.type = NLA_U8},
108 [DCB_PG_ATTR_BW_ID_ALL] = {.type = NLA_FLAG},
111 /* DCB traffic class nested attributes. */
112 static const struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = {
113 [DCB_TC_ATTR_PARAM_PGID] = {.type = NLA_U8},
114 [DCB_TC_ATTR_PARAM_UP_MAPPING] = {.type = NLA_U8},
115 [DCB_TC_ATTR_PARAM_STRICT_PRIO] = {.type = NLA_U8},
116 [DCB_TC_ATTR_PARAM_BW_PCT] = {.type = NLA_U8},
117 [DCB_TC_ATTR_PARAM_ALL] = {.type = NLA_FLAG},
120 /* DCB capabilities nested attributes. */
121 static const struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = {
122 [DCB_CAP_ATTR_ALL] = {.type = NLA_FLAG},
123 [DCB_CAP_ATTR_PG] = {.type = NLA_U8},
124 [DCB_CAP_ATTR_PFC] = {.type = NLA_U8},
125 [DCB_CAP_ATTR_UP2TC] = {.type = NLA_U8},
126 [DCB_CAP_ATTR_PG_TCS] = {.type = NLA_U8},
127 [DCB_CAP_ATTR_PFC_TCS] = {.type = NLA_U8},
128 [DCB_CAP_ATTR_GSP] = {.type = NLA_U8},
129 [DCB_CAP_ATTR_BCN] = {.type = NLA_U8},
130 [DCB_CAP_ATTR_DCBX] = {.type = NLA_U8},
133 /* DCB capabilities nested attributes. */
134 static const struct nla_policy dcbnl_numtcs_nest[DCB_NUMTCS_ATTR_MAX + 1] = {
135 [DCB_NUMTCS_ATTR_ALL] = {.type = NLA_FLAG},
136 [DCB_NUMTCS_ATTR_PG] = {.type = NLA_U8},
137 [DCB_NUMTCS_ATTR_PFC] = {.type = NLA_U8},
140 /* DCB BCN nested attributes. */
141 static const struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = {
142 [DCB_BCN_ATTR_RP_0] = {.type = NLA_U8},
143 [DCB_BCN_ATTR_RP_1] = {.type = NLA_U8},
144 [DCB_BCN_ATTR_RP_2] = {.type = NLA_U8},
145 [DCB_BCN_ATTR_RP_3] = {.type = NLA_U8},
146 [DCB_BCN_ATTR_RP_4] = {.type = NLA_U8},
147 [DCB_BCN_ATTR_RP_5] = {.type = NLA_U8},
148 [DCB_BCN_ATTR_RP_6] = {.type = NLA_U8},
149 [DCB_BCN_ATTR_RP_7] = {.type = NLA_U8},
150 [DCB_BCN_ATTR_RP_ALL] = {.type = NLA_FLAG},
151 [DCB_BCN_ATTR_BCNA_0] = {.type = NLA_U32},
152 [DCB_BCN_ATTR_BCNA_1] = {.type = NLA_U32},
153 [DCB_BCN_ATTR_ALPHA] = {.type = NLA_U32},
154 [DCB_BCN_ATTR_BETA] = {.type = NLA_U32},
155 [DCB_BCN_ATTR_GD] = {.type = NLA_U32},
156 [DCB_BCN_ATTR_GI] = {.type = NLA_U32},
157 [DCB_BCN_ATTR_TMAX] = {.type = NLA_U32},
158 [DCB_BCN_ATTR_TD] = {.type = NLA_U32},
159 [DCB_BCN_ATTR_RMIN] = {.type = NLA_U32},
160 [DCB_BCN_ATTR_W] = {.type = NLA_U32},
161 [DCB_BCN_ATTR_RD] = {.type = NLA_U32},
162 [DCB_BCN_ATTR_RU] = {.type = NLA_U32},
163 [DCB_BCN_ATTR_WRTT] = {.type = NLA_U32},
164 [DCB_BCN_ATTR_RI] = {.type = NLA_U32},
165 [DCB_BCN_ATTR_C] = {.type = NLA_U32},
166 [DCB_BCN_ATTR_ALL] = {.type = NLA_FLAG},
169 /* DCB APP nested attributes. */
170 static const struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = {
171 [DCB_APP_ATTR_IDTYPE] = {.type = NLA_U8},
172 [DCB_APP_ATTR_ID] = {.type = NLA_U16},
173 [DCB_APP_ATTR_PRIORITY] = {.type = NLA_U8},
176 /* IEEE 802.1Qaz nested attributes. */
177 static const struct nla_policy dcbnl_ieee_policy[DCB_ATTR_IEEE_MAX + 1] = {
178 [DCB_ATTR_IEEE_ETS] = {.len = sizeof(struct ieee_ets)},
179 [DCB_ATTR_IEEE_PFC] = {.len = sizeof(struct ieee_pfc)},
180 [DCB_ATTR_IEEE_APP_TABLE] = {.type = NLA_NESTED},
183 static const struct nla_policy dcbnl_ieee_app[DCB_ATTR_IEEE_APP_MAX + 1] = {
184 [DCB_ATTR_IEEE_APP] = {.len = sizeof(struct dcb_app)},
187 /* DCB number of traffic classes nested attributes. */
188 static const struct nla_policy dcbnl_featcfg_nest[DCB_FEATCFG_ATTR_MAX + 1] = {
189 [DCB_FEATCFG_ATTR_ALL] = {.type = NLA_FLAG},
190 [DCB_FEATCFG_ATTR_PG] = {.type = NLA_U8},
191 [DCB_FEATCFG_ATTR_PFC] = {.type = NLA_U8},
192 [DCB_FEATCFG_ATTR_APP] = {.type = NLA_U8},
195 static LIST_HEAD(dcb_app_list);
196 static DEFINE_SPINLOCK(dcb_lock);
198 /* standard netlink reply call */
199 static int dcbnl_reply(u8 value, u8 event, u8 cmd, u8 attr, u32 pid,
202 struct sk_buff *dcbnl_skb;
204 struct nlmsghdr *nlh;
207 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
211 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, event, sizeof(*dcb), flags);
213 dcb = NLMSG_DATA(nlh);
214 dcb->dcb_family = AF_UNSPEC;
218 ret = nla_put_u8(dcbnl_skb, attr, value);
222 /* end the message, assign the nlmsg_len. */
223 nlmsg_end(dcbnl_skb, nlh);
224 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
231 kfree_skb(dcbnl_skb);
235 static int dcbnl_getstate(struct net_device *netdev, struct nlattr **tb,
236 u32 pid, u32 seq, u16 flags)
240 /* if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->getstate) */
241 if (!netdev->dcbnl_ops->getstate)
244 ret = dcbnl_reply(netdev->dcbnl_ops->getstate(netdev), RTM_GETDCB,
245 DCB_CMD_GSTATE, DCB_ATTR_STATE, pid, seq, flags);
250 static int dcbnl_getpfccfg(struct net_device *netdev, struct nlattr **tb,
251 u32 pid, u32 seq, u16 flags)
253 struct sk_buff *dcbnl_skb;
254 struct nlmsghdr *nlh;
256 struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1], *nest;
262 if (!tb[DCB_ATTR_PFC_CFG] || !netdev->dcbnl_ops->getpfccfg)
265 ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX,
266 tb[DCB_ATTR_PFC_CFG],
271 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
275 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
277 dcb = NLMSG_DATA(nlh);
278 dcb->dcb_family = AF_UNSPEC;
279 dcb->cmd = DCB_CMD_PFC_GCFG;
281 nest = nla_nest_start(dcbnl_skb, DCB_ATTR_PFC_CFG);
285 if (data[DCB_PFC_UP_ATTR_ALL])
288 for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
289 if (!getall && !data[i])
292 netdev->dcbnl_ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0,
294 ret = nla_put_u8(dcbnl_skb, i, value);
297 nla_nest_cancel(dcbnl_skb, nest);
301 nla_nest_end(dcbnl_skb, nest);
303 nlmsg_end(dcbnl_skb, nlh);
305 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
312 kfree_skb(dcbnl_skb);
317 static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlattr **tb,
318 u32 pid, u32 seq, u16 flags)
320 struct sk_buff *dcbnl_skb;
321 struct nlmsghdr *nlh;
323 u8 perm_addr[MAX_ADDR_LEN];
326 if (!netdev->dcbnl_ops->getpermhwaddr)
329 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
333 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
335 dcb = NLMSG_DATA(nlh);
336 dcb->dcb_family = AF_UNSPEC;
337 dcb->cmd = DCB_CMD_GPERM_HWADDR;
339 memset(perm_addr, 0, sizeof(perm_addr));
340 netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr);
342 ret = nla_put(dcbnl_skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr),
345 nlmsg_end(dcbnl_skb, nlh);
347 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
354 kfree_skb(dcbnl_skb);
359 static int dcbnl_getcap(struct net_device *netdev, struct nlattr **tb,
360 u32 pid, u32 seq, u16 flags)
362 struct sk_buff *dcbnl_skb;
363 struct nlmsghdr *nlh;
365 struct nlattr *data[DCB_CAP_ATTR_MAX + 1], *nest;
371 if (!tb[DCB_ATTR_CAP] || !netdev->dcbnl_ops->getcap)
374 ret = nla_parse_nested(data, DCB_CAP_ATTR_MAX, tb[DCB_ATTR_CAP],
379 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
383 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
385 dcb = NLMSG_DATA(nlh);
386 dcb->dcb_family = AF_UNSPEC;
387 dcb->cmd = DCB_CMD_GCAP;
389 nest = nla_nest_start(dcbnl_skb, DCB_ATTR_CAP);
393 if (data[DCB_CAP_ATTR_ALL])
396 for (i = DCB_CAP_ATTR_ALL+1; i <= DCB_CAP_ATTR_MAX; i++) {
397 if (!getall && !data[i])
400 if (!netdev->dcbnl_ops->getcap(netdev, i, &value)) {
401 ret = nla_put_u8(dcbnl_skb, i, value);
404 nla_nest_cancel(dcbnl_skb, nest);
409 nla_nest_end(dcbnl_skb, nest);
411 nlmsg_end(dcbnl_skb, nlh);
413 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
420 kfree_skb(dcbnl_skb);
425 static int dcbnl_getnumtcs(struct net_device *netdev, struct nlattr **tb,
426 u32 pid, u32 seq, u16 flags)
428 struct sk_buff *dcbnl_skb;
429 struct nlmsghdr *nlh;
431 struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1], *nest;
437 if (!tb[DCB_ATTR_NUMTCS] || !netdev->dcbnl_ops->getnumtcs)
440 ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS],
447 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
453 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
455 dcb = NLMSG_DATA(nlh);
456 dcb->dcb_family = AF_UNSPEC;
457 dcb->cmd = DCB_CMD_GNUMTCS;
459 nest = nla_nest_start(dcbnl_skb, DCB_ATTR_NUMTCS);
465 if (data[DCB_NUMTCS_ATTR_ALL])
468 for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
469 if (!getall && !data[i])
472 ret = netdev->dcbnl_ops->getnumtcs(netdev, i, &value);
474 ret = nla_put_u8(dcbnl_skb, i, value);
477 nla_nest_cancel(dcbnl_skb, nest);
485 nla_nest_end(dcbnl_skb, nest);
487 nlmsg_end(dcbnl_skb, nlh);
489 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
498 kfree_skb(dcbnl_skb);
503 static int dcbnl_setnumtcs(struct net_device *netdev, struct nlattr **tb,
504 u32 pid, u32 seq, u16 flags)
506 struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1];
511 if (!tb[DCB_ATTR_NUMTCS] || !netdev->dcbnl_ops->setnumtcs)
514 ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS],
522 for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
526 value = nla_get_u8(data[i]);
528 ret = netdev->dcbnl_ops->setnumtcs(netdev, i, value);
535 ret = dcbnl_reply(!!ret, RTM_SETDCB, DCB_CMD_SNUMTCS,
536 DCB_ATTR_NUMTCS, pid, seq, flags);
542 static int dcbnl_getpfcstate(struct net_device *netdev, struct nlattr **tb,
543 u32 pid, u32 seq, u16 flags)
547 if (!netdev->dcbnl_ops->getpfcstate)
550 ret = dcbnl_reply(netdev->dcbnl_ops->getpfcstate(netdev), RTM_GETDCB,
551 DCB_CMD_PFC_GSTATE, DCB_ATTR_PFC_STATE,
557 static int dcbnl_setpfcstate(struct net_device *netdev, struct nlattr **tb,
558 u32 pid, u32 seq, u16 flags)
563 if (!tb[DCB_ATTR_PFC_STATE] || !netdev->dcbnl_ops->setpfcstate)
566 value = nla_get_u8(tb[DCB_ATTR_PFC_STATE]);
568 netdev->dcbnl_ops->setpfcstate(netdev, value);
570 ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_PFC_SSTATE, DCB_ATTR_PFC_STATE,
576 static int dcbnl_getapp(struct net_device *netdev, struct nlattr **tb,
577 u32 pid, u32 seq, u16 flags)
579 struct sk_buff *dcbnl_skb;
580 struct nlmsghdr *nlh;
582 struct nlattr *app_nest;
583 struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
588 if (!tb[DCB_ATTR_APP])
591 ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
597 /* all must be non-null */
598 if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
599 (!app_tb[DCB_APP_ATTR_ID]))
602 /* either by eth type or by socket number */
603 idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
604 if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
605 (idtype != DCB_APP_IDTYPE_PORTNUM))
608 id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
610 if (netdev->dcbnl_ops->getapp) {
611 up = netdev->dcbnl_ops->getapp(netdev, idtype, id);
613 struct dcb_app app = {
617 up = dcb_getapp(netdev, &app);
621 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
625 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
626 dcb = NLMSG_DATA(nlh);
627 dcb->dcb_family = AF_UNSPEC;
628 dcb->cmd = DCB_CMD_GAPP;
630 app_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_APP);
634 ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_IDTYPE, idtype);
638 ret = nla_put_u16(dcbnl_skb, DCB_APP_ATTR_ID, id);
642 ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_PRIORITY, up);
646 nla_nest_end(dcbnl_skb, app_nest);
647 nlmsg_end(dcbnl_skb, nlh);
649 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
656 nla_nest_cancel(dcbnl_skb, app_nest);
658 kfree_skb(dcbnl_skb);
663 static int dcbnl_setapp(struct net_device *netdev, struct nlattr **tb,
664 u32 pid, u32 seq, u16 flags)
666 int err, ret = -EINVAL;
669 struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
671 if (!tb[DCB_ATTR_APP])
674 ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
680 /* all must be non-null */
681 if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
682 (!app_tb[DCB_APP_ATTR_ID]) ||
683 (!app_tb[DCB_APP_ATTR_PRIORITY]))
686 /* either by eth type or by socket number */
687 idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
688 if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
689 (idtype != DCB_APP_IDTYPE_PORTNUM))
692 id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
693 up = nla_get_u8(app_tb[DCB_APP_ATTR_PRIORITY]);
695 if (netdev->dcbnl_ops->setapp) {
696 err = netdev->dcbnl_ops->setapp(netdev, idtype, id, up);
699 app.selector = idtype;
702 err = dcb_setapp(netdev, &app);
705 ret = dcbnl_reply(err, RTM_SETDCB, DCB_CMD_SAPP, DCB_ATTR_APP,
711 static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlattr **tb,
712 u32 pid, u32 seq, u16 flags, int dir)
714 struct sk_buff *dcbnl_skb;
715 struct nlmsghdr *nlh;
717 struct nlattr *pg_nest, *param_nest, *data;
718 struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
719 struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
720 u8 prio, pgid, tc_pct, up_map;
725 if (!tb[DCB_ATTR_PG_CFG] ||
726 !netdev->dcbnl_ops->getpgtccfgtx ||
727 !netdev->dcbnl_ops->getpgtccfgrx ||
728 !netdev->dcbnl_ops->getpgbwgcfgtx ||
729 !netdev->dcbnl_ops->getpgbwgcfgrx)
732 ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX,
733 tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest);
738 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
742 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
744 dcb = NLMSG_DATA(nlh);
745 dcb->dcb_family = AF_UNSPEC;
746 dcb->cmd = (dir) ? DCB_CMD_PGRX_GCFG : DCB_CMD_PGTX_GCFG;
748 pg_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_PG_CFG);
752 if (pg_tb[DCB_PG_ATTR_TC_ALL])
755 for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
756 if (!getall && !pg_tb[i])
759 if (pg_tb[DCB_PG_ATTR_TC_ALL])
760 data = pg_tb[DCB_PG_ATTR_TC_ALL];
763 ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX,
764 data, dcbnl_tc_param_nest);
768 param_nest = nla_nest_start(dcbnl_skb, i);
772 pgid = DCB_ATTR_VALUE_UNDEFINED;
773 prio = DCB_ATTR_VALUE_UNDEFINED;
774 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
775 up_map = DCB_ATTR_VALUE_UNDEFINED;
779 netdev->dcbnl_ops->getpgtccfgrx(netdev,
780 i - DCB_PG_ATTR_TC_0, &prio,
781 &pgid, &tc_pct, &up_map);
784 netdev->dcbnl_ops->getpgtccfgtx(netdev,
785 i - DCB_PG_ATTR_TC_0, &prio,
786 &pgid, &tc_pct, &up_map);
789 if (param_tb[DCB_TC_ATTR_PARAM_PGID] ||
790 param_tb[DCB_TC_ATTR_PARAM_ALL]) {
791 ret = nla_put_u8(dcbnl_skb,
792 DCB_TC_ATTR_PARAM_PGID, pgid);
796 if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING] ||
797 param_tb[DCB_TC_ATTR_PARAM_ALL]) {
798 ret = nla_put_u8(dcbnl_skb,
799 DCB_TC_ATTR_PARAM_UP_MAPPING, up_map);
803 if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO] ||
804 param_tb[DCB_TC_ATTR_PARAM_ALL]) {
805 ret = nla_put_u8(dcbnl_skb,
806 DCB_TC_ATTR_PARAM_STRICT_PRIO, prio);
810 if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT] ||
811 param_tb[DCB_TC_ATTR_PARAM_ALL]) {
812 ret = nla_put_u8(dcbnl_skb, DCB_TC_ATTR_PARAM_BW_PCT,
817 nla_nest_end(dcbnl_skb, param_nest);
820 if (pg_tb[DCB_PG_ATTR_BW_ID_ALL])
825 for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
826 if (!getall && !pg_tb[i])
829 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
833 netdev->dcbnl_ops->getpgbwgcfgrx(netdev,
834 i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
837 netdev->dcbnl_ops->getpgbwgcfgtx(netdev,
838 i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
840 ret = nla_put_u8(dcbnl_skb, i, tc_pct);
846 nla_nest_end(dcbnl_skb, pg_nest);
848 nlmsg_end(dcbnl_skb, nlh);
850 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
857 nla_nest_cancel(dcbnl_skb, param_nest);
859 nla_nest_cancel(dcbnl_skb, pg_nest);
862 kfree_skb(dcbnl_skb);
868 static int dcbnl_pgtx_getcfg(struct net_device *netdev, struct nlattr **tb,
869 u32 pid, u32 seq, u16 flags)
871 return __dcbnl_pg_getcfg(netdev, tb, pid, seq, flags, 0);
874 static int dcbnl_pgrx_getcfg(struct net_device *netdev, struct nlattr **tb,
875 u32 pid, u32 seq, u16 flags)
877 return __dcbnl_pg_getcfg(netdev, tb, pid, seq, flags, 1);
880 static int dcbnl_setstate(struct net_device *netdev, struct nlattr **tb,
881 u32 pid, u32 seq, u16 flags)
886 if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->setstate)
889 value = nla_get_u8(tb[DCB_ATTR_STATE]);
891 ret = dcbnl_reply(netdev->dcbnl_ops->setstate(netdev, value),
892 RTM_SETDCB, DCB_CMD_SSTATE, DCB_ATTR_STATE,
898 static int dcbnl_setpfccfg(struct net_device *netdev, struct nlattr **tb,
899 u32 pid, u32 seq, u16 flags)
901 struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1];
906 if (!tb[DCB_ATTR_PFC_CFG] || !netdev->dcbnl_ops->setpfccfg)
909 ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX,
910 tb[DCB_ATTR_PFC_CFG],
915 for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
918 value = nla_get_u8(data[i]);
919 netdev->dcbnl_ops->setpfccfg(netdev,
920 data[i]->nla_type - DCB_PFC_UP_ATTR_0, value);
923 ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_PFC_SCFG, DCB_ATTR_PFC_CFG,
929 static int dcbnl_setall(struct net_device *netdev, struct nlattr **tb,
930 u32 pid, u32 seq, u16 flags)
934 if (!tb[DCB_ATTR_SET_ALL] || !netdev->dcbnl_ops->setall)
937 ret = dcbnl_reply(netdev->dcbnl_ops->setall(netdev), RTM_SETDCB,
938 DCB_CMD_SET_ALL, DCB_ATTR_SET_ALL, pid, seq, flags);
943 static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlattr **tb,
944 u32 pid, u32 seq, u16 flags, int dir)
946 struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
947 struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
955 if (!tb[DCB_ATTR_PG_CFG] ||
956 !netdev->dcbnl_ops->setpgtccfgtx ||
957 !netdev->dcbnl_ops->setpgtccfgrx ||
958 !netdev->dcbnl_ops->setpgbwgcfgtx ||
959 !netdev->dcbnl_ops->setpgbwgcfgrx)
962 ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX,
963 tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest);
967 for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
971 ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX,
972 pg_tb[i], dcbnl_tc_param_nest);
976 pgid = DCB_ATTR_VALUE_UNDEFINED;
977 prio = DCB_ATTR_VALUE_UNDEFINED;
978 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
979 up_map = DCB_ATTR_VALUE_UNDEFINED;
981 if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO])
983 nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO]);
985 if (param_tb[DCB_TC_ATTR_PARAM_PGID])
986 pgid = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_PGID]);
988 if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT])
989 tc_pct = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_BW_PCT]);
991 if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING])
993 nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING]);
995 /* dir: Tx = 0, Rx = 1 */
998 netdev->dcbnl_ops->setpgtccfgrx(netdev,
999 i - DCB_PG_ATTR_TC_0,
1000 prio, pgid, tc_pct, up_map);
1003 netdev->dcbnl_ops->setpgtccfgtx(netdev,
1004 i - DCB_PG_ATTR_TC_0,
1005 prio, pgid, tc_pct, up_map);
1009 for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
1013 tc_pct = nla_get_u8(pg_tb[i]);
1015 /* dir: Tx = 0, Rx = 1 */
1018 netdev->dcbnl_ops->setpgbwgcfgrx(netdev,
1019 i - DCB_PG_ATTR_BW_ID_0, tc_pct);
1022 netdev->dcbnl_ops->setpgbwgcfgtx(netdev,
1023 i - DCB_PG_ATTR_BW_ID_0, tc_pct);
1027 ret = dcbnl_reply(0, RTM_SETDCB,
1028 (dir ? DCB_CMD_PGRX_SCFG : DCB_CMD_PGTX_SCFG),
1029 DCB_ATTR_PG_CFG, pid, seq, flags);
1035 static int dcbnl_pgtx_setcfg(struct net_device *netdev, struct nlattr **tb,
1036 u32 pid, u32 seq, u16 flags)
1038 return __dcbnl_pg_setcfg(netdev, tb, pid, seq, flags, 0);
1041 static int dcbnl_pgrx_setcfg(struct net_device *netdev, struct nlattr **tb,
1042 u32 pid, u32 seq, u16 flags)
1044 return __dcbnl_pg_setcfg(netdev, tb, pid, seq, flags, 1);
1047 static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlattr **tb,
1048 u32 pid, u32 seq, u16 flags)
1050 struct sk_buff *dcbnl_skb;
1051 struct nlmsghdr *nlh;
1053 struct nlattr *bcn_nest;
1054 struct nlattr *bcn_tb[DCB_BCN_ATTR_MAX + 1];
1058 bool getall = false;
1061 if (!tb[DCB_ATTR_BCN] || !netdev->dcbnl_ops->getbcnrp ||
1062 !netdev->dcbnl_ops->getbcncfg)
1065 ret = nla_parse_nested(bcn_tb, DCB_BCN_ATTR_MAX,
1066 tb[DCB_ATTR_BCN], dcbnl_bcn_nest);
1071 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1075 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
1077 dcb = NLMSG_DATA(nlh);
1078 dcb->dcb_family = AF_UNSPEC;
1079 dcb->cmd = DCB_CMD_BCN_GCFG;
1081 bcn_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_BCN);
1085 if (bcn_tb[DCB_BCN_ATTR_ALL])
1088 for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
1089 if (!getall && !bcn_tb[i])
1092 netdev->dcbnl_ops->getbcnrp(netdev, i - DCB_BCN_ATTR_RP_0,
1094 ret = nla_put_u8(dcbnl_skb, i, value_byte);
1099 for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
1100 if (!getall && !bcn_tb[i])
1103 netdev->dcbnl_ops->getbcncfg(netdev, i,
1105 ret = nla_put_u32(dcbnl_skb, i, value_integer);
1110 nla_nest_end(dcbnl_skb, bcn_nest);
1112 nlmsg_end(dcbnl_skb, nlh);
1114 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
1121 nla_nest_cancel(dcbnl_skb, bcn_nest);
1124 kfree_skb(dcbnl_skb);
1130 static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlattr **tb,
1131 u32 pid, u32 seq, u16 flags)
1133 struct nlattr *data[DCB_BCN_ATTR_MAX + 1];
1139 if (!tb[DCB_ATTR_BCN] || !netdev->dcbnl_ops->setbcncfg ||
1140 !netdev->dcbnl_ops->setbcnrp)
1143 ret = nla_parse_nested(data, DCB_BCN_ATTR_MAX,
1149 for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
1150 if (data[i] == NULL)
1152 value_byte = nla_get_u8(data[i]);
1153 netdev->dcbnl_ops->setbcnrp(netdev,
1154 data[i]->nla_type - DCB_BCN_ATTR_RP_0, value_byte);
1157 for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
1158 if (data[i] == NULL)
1160 value_int = nla_get_u32(data[i]);
1161 netdev->dcbnl_ops->setbcncfg(netdev,
1165 ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_BCN_SCFG, DCB_ATTR_BCN,
1171 static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb,
1172 int app_nested_type, int app_info_type,
1175 struct dcb_peer_app_info info;
1176 struct dcb_app *table = NULL;
1177 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1183 * retrieve the peer app configuration form the driver. If the driver
1184 * handlers fail exit without doing anything
1186 err = ops->peer_getappinfo(netdev, &info, &app_count);
1187 if (!err && app_count) {
1188 table = kmalloc(sizeof(struct dcb_app) * app_count, GFP_KERNEL);
1192 err = ops->peer_getapptable(netdev, table);
1200 * build the message, from here on the only possible failure
1201 * is due to the skb size
1205 app = nla_nest_start(skb, app_nested_type);
1207 goto nla_put_failure;
1210 NLA_PUT(skb, app_info_type, sizeof(info), &info);
1212 for (i = 0; i < app_count; i++)
1213 NLA_PUT(skb, app_entry_type, sizeof(struct dcb_app),
1216 nla_nest_end(skb, app);
1225 /* Handle IEEE 802.1Qaz GET commands. */
1226 static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
1228 struct nlattr *ieee, *app;
1229 struct dcb_app_type *itr;
1230 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1232 int err = -EMSGSIZE;
1234 NLA_PUT_STRING(skb, DCB_ATTR_IFNAME, netdev->name);
1236 ieee = nla_nest_start(skb, DCB_ATTR_IEEE);
1238 goto nla_put_failure;
1240 if (ops->ieee_getets) {
1241 struct ieee_ets ets;
1242 memset(&ets, 0, sizeof(ets));
1243 err = ops->ieee_getets(netdev, &ets);
1245 NLA_PUT(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets);
1248 if (ops->ieee_getpfc) {
1249 struct ieee_pfc pfc;
1250 memset(&pfc, 0, sizeof(pfc));
1251 err = ops->ieee_getpfc(netdev, &pfc);
1253 NLA_PUT(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc);
1256 app = nla_nest_start(skb, DCB_ATTR_IEEE_APP_TABLE);
1258 goto nla_put_failure;
1260 spin_lock(&dcb_lock);
1261 list_for_each_entry(itr, &dcb_app_list, list) {
1262 if (itr->ifindex == netdev->ifindex) {
1263 err = nla_put(skb, DCB_ATTR_IEEE_APP, sizeof(itr->app),
1266 spin_unlock(&dcb_lock);
1267 goto nla_put_failure;
1272 if (netdev->dcbnl_ops->getdcbx)
1273 dcbx = netdev->dcbnl_ops->getdcbx(netdev);
1277 spin_unlock(&dcb_lock);
1278 nla_nest_end(skb, app);
1280 /* get peer info if available */
1281 if (ops->ieee_peer_getets) {
1282 struct ieee_ets ets;
1283 memset(&ets, 0, sizeof(ets));
1284 err = ops->ieee_peer_getets(netdev, &ets);
1286 NLA_PUT(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets);
1289 if (ops->ieee_peer_getpfc) {
1290 struct ieee_pfc pfc;
1291 memset(&pfc, 0, sizeof(pfc));
1292 err = ops->ieee_peer_getpfc(netdev, &pfc);
1294 NLA_PUT(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc);
1297 if (ops->peer_getappinfo && ops->peer_getapptable) {
1298 err = dcbnl_build_peer_app(netdev, skb,
1299 DCB_ATTR_IEEE_PEER_APP,
1300 DCB_ATTR_IEEE_APP_UNSPEC,
1303 goto nla_put_failure;
1306 nla_nest_end(skb, ieee);
1308 err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
1310 goto nla_put_failure;
1319 static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
1322 u8 pgid, up_map, prio, tc_pct;
1323 const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
1324 int i = dir ? DCB_ATTR_CEE_TX_PG : DCB_ATTR_CEE_RX_PG;
1325 struct nlattr *pg = nla_nest_start(skb, i);
1328 goto nla_put_failure;
1330 for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
1331 struct nlattr *tc_nest = nla_nest_start(skb, i);
1334 goto nla_put_failure;
1336 pgid = DCB_ATTR_VALUE_UNDEFINED;
1337 prio = DCB_ATTR_VALUE_UNDEFINED;
1338 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
1339 up_map = DCB_ATTR_VALUE_UNDEFINED;
1342 ops->getpgtccfgrx(dev, i - DCB_PG_ATTR_TC_0,
1343 &prio, &pgid, &tc_pct, &up_map);
1345 ops->getpgtccfgtx(dev, i - DCB_PG_ATTR_TC_0,
1346 &prio, &pgid, &tc_pct, &up_map);
1348 NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_PGID, pgid);
1349 NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map);
1350 NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio);
1351 NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct);
1352 nla_nest_end(skb, tc_nest);
1355 for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
1356 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
1359 ops->getpgbwgcfgrx(dev, i - DCB_PG_ATTR_BW_ID_0,
1362 ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0,
1364 NLA_PUT_U8(skb, i, tc_pct);
1366 nla_nest_end(skb, pg);
1373 static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
1375 struct nlattr *cee, *app;
1376 struct dcb_app_type *itr;
1377 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1378 int dcbx, i, err = -EMSGSIZE;
1381 NLA_PUT_STRING(skb, DCB_ATTR_IFNAME, netdev->name);
1383 cee = nla_nest_start(skb, DCB_ATTR_CEE);
1385 goto nla_put_failure;
1388 if (ops->getpgtccfgtx && ops->getpgbwgcfgtx) {
1389 err = dcbnl_cee_pg_fill(skb, netdev, 1);
1391 goto nla_put_failure;
1394 if (ops->getpgtccfgrx && ops->getpgbwgcfgrx) {
1395 err = dcbnl_cee_pg_fill(skb, netdev, 0);
1397 goto nla_put_failure;
1401 if (ops->getpfccfg) {
1402 struct nlattr *pfc_nest = nla_nest_start(skb, DCB_ATTR_CEE_PFC);
1405 goto nla_put_failure;
1407 for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
1408 ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value);
1409 NLA_PUT_U8(skb, i, value);
1411 nla_nest_end(skb, pfc_nest);
1415 spin_lock(&dcb_lock);
1416 app = nla_nest_start(skb, DCB_ATTR_CEE_APP_TABLE);
1420 list_for_each_entry(itr, &dcb_app_list, list) {
1421 if (itr->ifindex == netdev->ifindex) {
1422 struct nlattr *app_nest = nla_nest_start(skb,
1427 err = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE,
1432 err = nla_put_u16(skb, DCB_APP_ATTR_ID,
1437 err = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY,
1442 nla_nest_end(skb, app_nest);
1445 nla_nest_end(skb, app);
1447 if (netdev->dcbnl_ops->getdcbx)
1448 dcbx = netdev->dcbnl_ops->getdcbx(netdev);
1452 spin_unlock(&dcb_lock);
1454 /* features flags */
1455 if (ops->getfeatcfg) {
1456 struct nlattr *feat = nla_nest_start(skb, DCB_ATTR_CEE_FEAT);
1458 goto nla_put_failure;
1460 for (i = DCB_FEATCFG_ATTR_ALL + 1; i <= DCB_FEATCFG_ATTR_MAX;
1462 if (!ops->getfeatcfg(netdev, i, &value))
1463 NLA_PUT_U8(skb, i, value);
1465 nla_nest_end(skb, feat);
1468 /* peer info if available */
1469 if (ops->cee_peer_getpg) {
1471 memset(&pg, 0, sizeof(pg));
1472 err = ops->cee_peer_getpg(netdev, &pg);
1474 NLA_PUT(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg);
1477 if (ops->cee_peer_getpfc) {
1479 memset(&pfc, 0, sizeof(pfc));
1480 err = ops->cee_peer_getpfc(netdev, &pfc);
1482 NLA_PUT(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc);
1485 if (ops->peer_getappinfo && ops->peer_getapptable) {
1486 err = dcbnl_build_peer_app(netdev, skb,
1487 DCB_ATTR_CEE_PEER_APP_TABLE,
1488 DCB_ATTR_CEE_PEER_APP_INFO,
1489 DCB_ATTR_CEE_PEER_APP);
1491 goto nla_put_failure;
1493 nla_nest_end(skb, cee);
1497 err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
1499 goto nla_put_failure;
1504 spin_unlock(&dcb_lock);
1509 static int dcbnl_notify(struct net_device *dev, int event, int cmd,
1510 u32 seq, u32 pid, int dcbx_ver)
1512 struct net *net = dev_net(dev);
1513 struct sk_buff *skb;
1514 struct nlmsghdr *nlh;
1516 const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
1522 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1526 nlh = nlmsg_put(skb, pid, 0, event, sizeof(*dcb), 0);
1532 dcb = NLMSG_DATA(nlh);
1533 dcb->dcb_family = AF_UNSPEC;
1536 if (dcbx_ver == DCB_CAP_DCBX_VER_IEEE)
1537 err = dcbnl_ieee_fill(skb, dev);
1539 err = dcbnl_cee_fill(skb, dev);
1542 /* Report error to broadcast listeners */
1543 nlmsg_cancel(skb, nlh);
1545 rtnl_set_sk_err(net, RTNLGRP_DCB, err);
1547 /* End nlmsg and notify broadcast listeners */
1548 nlmsg_end(skb, nlh);
1549 rtnl_notify(skb, net, 0, RTNLGRP_DCB, NULL, GFP_KERNEL);
1555 int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd,
1558 return dcbnl_notify(dev, event, cmd, seq, pid, DCB_CAP_DCBX_VER_IEEE);
1560 EXPORT_SYMBOL(dcbnl_ieee_notify);
1562 int dcbnl_cee_notify(struct net_device *dev, int event, int cmd,
1565 return dcbnl_notify(dev, event, cmd, seq, pid, DCB_CAP_DCBX_VER_CEE);
1567 EXPORT_SYMBOL(dcbnl_cee_notify);
1569 /* Handle IEEE 802.1Qaz SET commands. If any requested operation can not
1570 * be completed the entire msg is aborted and error value is returned.
1571 * No attempt is made to reconcile the case where only part of the
1572 * cmd can be completed.
1574 static int dcbnl_ieee_set(struct net_device *netdev, struct nlattr **tb,
1575 u32 pid, u32 seq, u16 flags)
1577 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1578 struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
1579 int err = -EOPNOTSUPP;
1584 if (!tb[DCB_ATTR_IEEE])
1587 err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX,
1588 tb[DCB_ATTR_IEEE], dcbnl_ieee_policy);
1592 if (ieee[DCB_ATTR_IEEE_ETS] && ops->ieee_setets) {
1593 struct ieee_ets *ets = nla_data(ieee[DCB_ATTR_IEEE_ETS]);
1594 err = ops->ieee_setets(netdev, ets);
1599 if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) {
1600 struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
1601 err = ops->ieee_setpfc(netdev, pfc);
1606 if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
1607 struct nlattr *attr;
1610 nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
1611 struct dcb_app *app_data;
1612 if (nla_type(attr) != DCB_ATTR_IEEE_APP)
1614 app_data = nla_data(attr);
1615 if (ops->ieee_setapp)
1616 err = ops->ieee_setapp(netdev, app_data);
1618 err = dcb_ieee_setapp(netdev, app_data);
1625 dcbnl_reply(err, RTM_SETDCB, DCB_CMD_IEEE_SET, DCB_ATTR_IEEE,
1627 dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, seq, 0);
1631 static int dcbnl_ieee_get(struct net_device *netdev, struct nlattr **tb,
1632 u32 pid, u32 seq, u16 flags)
1634 struct net *net = dev_net(netdev);
1635 struct sk_buff *skb;
1636 struct nlmsghdr *nlh;
1638 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1644 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1648 nlh = nlmsg_put(skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
1654 dcb = NLMSG_DATA(nlh);
1655 dcb->dcb_family = AF_UNSPEC;
1656 dcb->cmd = DCB_CMD_IEEE_GET;
1658 err = dcbnl_ieee_fill(skb, netdev);
1661 nlmsg_cancel(skb, nlh);
1664 nlmsg_end(skb, nlh);
1665 err = rtnl_unicast(skb, net, pid);
1671 static int dcbnl_ieee_del(struct net_device *netdev, struct nlattr **tb,
1672 u32 pid, u32 seq, u16 flags)
1674 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1675 struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
1676 int err = -EOPNOTSUPP;
1681 if (!tb[DCB_ATTR_IEEE])
1684 err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX,
1685 tb[DCB_ATTR_IEEE], dcbnl_ieee_policy);
1689 if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
1690 struct nlattr *attr;
1693 nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
1694 struct dcb_app *app_data;
1696 if (nla_type(attr) != DCB_ATTR_IEEE_APP)
1698 app_data = nla_data(attr);
1699 if (ops->ieee_delapp)
1700 err = ops->ieee_delapp(netdev, app_data);
1702 err = dcb_ieee_delapp(netdev, app_data);
1709 dcbnl_reply(err, RTM_SETDCB, DCB_CMD_IEEE_DEL, DCB_ATTR_IEEE,
1711 dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_DEL, seq, 0);
1716 /* DCBX configuration */
1717 static int dcbnl_getdcbx(struct net_device *netdev, struct nlattr **tb,
1718 u32 pid, u32 seq, u16 flags)
1722 if (!netdev->dcbnl_ops->getdcbx)
1725 ret = dcbnl_reply(netdev->dcbnl_ops->getdcbx(netdev), RTM_GETDCB,
1726 DCB_CMD_GDCBX, DCB_ATTR_DCBX, pid, seq, flags);
1731 static int dcbnl_setdcbx(struct net_device *netdev, struct nlattr **tb,
1732 u32 pid, u32 seq, u16 flags)
1737 if (!netdev->dcbnl_ops->setdcbx)
1740 if (!tb[DCB_ATTR_DCBX])
1743 value = nla_get_u8(tb[DCB_ATTR_DCBX]);
1745 ret = dcbnl_reply(netdev->dcbnl_ops->setdcbx(netdev, value),
1746 RTM_SETDCB, DCB_CMD_SDCBX, DCB_ATTR_DCBX,
1752 static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlattr **tb,
1753 u32 pid, u32 seq, u16 flags)
1755 struct sk_buff *dcbnl_skb;
1756 struct nlmsghdr *nlh;
1758 struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1], *nest;
1763 if (!netdev->dcbnl_ops->getfeatcfg)
1766 if (!tb[DCB_ATTR_FEATCFG])
1769 ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG],
1770 dcbnl_featcfg_nest);
1774 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1780 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
1782 dcb = NLMSG_DATA(nlh);
1783 dcb->dcb_family = AF_UNSPEC;
1784 dcb->cmd = DCB_CMD_GFEATCFG;
1786 nest = nla_nest_start(dcbnl_skb, DCB_ATTR_FEATCFG);
1789 goto nla_put_failure;
1792 if (data[DCB_FEATCFG_ATTR_ALL])
1795 for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
1796 if (!getall && !data[i])
1799 ret = netdev->dcbnl_ops->getfeatcfg(netdev, i, &value);
1801 ret = nla_put_u8(dcbnl_skb, i, value);
1804 nla_nest_cancel(dcbnl_skb, nest);
1805 goto nla_put_failure;
1808 nla_nest_end(dcbnl_skb, nest);
1810 nlmsg_end(dcbnl_skb, nlh);
1812 return rtnl_unicast(dcbnl_skb, &init_net, pid);
1814 nlmsg_cancel(dcbnl_skb, nlh);
1816 kfree_skb(dcbnl_skb);
1821 static int dcbnl_setfeatcfg(struct net_device *netdev, struct nlattr **tb,
1822 u32 pid, u32 seq, u16 flags)
1824 struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1];
1828 if (!netdev->dcbnl_ops->setfeatcfg)
1831 if (!tb[DCB_ATTR_FEATCFG])
1834 ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG],
1835 dcbnl_featcfg_nest);
1840 for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
1841 if (data[i] == NULL)
1844 value = nla_get_u8(data[i]);
1846 ret = netdev->dcbnl_ops->setfeatcfg(netdev, i, value);
1852 dcbnl_reply(ret, RTM_SETDCB, DCB_CMD_SFEATCFG, DCB_ATTR_FEATCFG,
1858 /* Handle CEE DCBX GET commands. */
1859 static int dcbnl_cee_get(struct net_device *netdev, struct nlattr **tb,
1860 u32 pid, u32 seq, u16 flags)
1862 struct net *net = dev_net(netdev);
1863 struct sk_buff *skb;
1864 struct nlmsghdr *nlh;
1866 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1872 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1876 nlh = nlmsg_put(skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
1882 dcb = NLMSG_DATA(nlh);
1883 dcb->dcb_family = AF_UNSPEC;
1884 dcb->cmd = DCB_CMD_CEE_GET;
1886 err = dcbnl_cee_fill(skb, netdev);
1889 nlmsg_cancel(skb, nlh);
1892 nlmsg_end(skb, nlh);
1893 err = rtnl_unicast(skb, net, pid);
1898 static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1900 struct net *net = sock_net(skb->sk);
1901 struct net_device *netdev;
1902 struct dcbmsg *dcb = (struct dcbmsg *)NLMSG_DATA(nlh);
1903 struct nlattr *tb[DCB_ATTR_MAX + 1];
1904 u32 pid = skb ? NETLINK_CB(skb).pid : 0;
1907 if (!net_eq(net, &init_net))
1910 ret = nlmsg_parse(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX,
1915 if (!tb[DCB_ATTR_IFNAME])
1918 netdev = dev_get_by_name(&init_net, nla_data(tb[DCB_ATTR_IFNAME]));
1922 if (!netdev->dcbnl_ops)
1926 case DCB_CMD_GSTATE:
1927 ret = dcbnl_getstate(netdev, tb, pid, nlh->nlmsg_seq,
1930 case DCB_CMD_PFC_GCFG:
1931 ret = dcbnl_getpfccfg(netdev, tb, pid, nlh->nlmsg_seq,
1934 case DCB_CMD_GPERM_HWADDR:
1935 ret = dcbnl_getperm_hwaddr(netdev, tb, pid, nlh->nlmsg_seq,
1938 case DCB_CMD_PGTX_GCFG:
1939 ret = dcbnl_pgtx_getcfg(netdev, tb, pid, nlh->nlmsg_seq,
1942 case DCB_CMD_PGRX_GCFG:
1943 ret = dcbnl_pgrx_getcfg(netdev, tb, pid, nlh->nlmsg_seq,
1946 case DCB_CMD_BCN_GCFG:
1947 ret = dcbnl_bcn_getcfg(netdev, tb, pid, nlh->nlmsg_seq,
1950 case DCB_CMD_SSTATE:
1951 ret = dcbnl_setstate(netdev, tb, pid, nlh->nlmsg_seq,
1954 case DCB_CMD_PFC_SCFG:
1955 ret = dcbnl_setpfccfg(netdev, tb, pid, nlh->nlmsg_seq,
1959 case DCB_CMD_SET_ALL:
1960 ret = dcbnl_setall(netdev, tb, pid, nlh->nlmsg_seq,
1963 case DCB_CMD_PGTX_SCFG:
1964 ret = dcbnl_pgtx_setcfg(netdev, tb, pid, nlh->nlmsg_seq,
1967 case DCB_CMD_PGRX_SCFG:
1968 ret = dcbnl_pgrx_setcfg(netdev, tb, pid, nlh->nlmsg_seq,
1972 ret = dcbnl_getcap(netdev, tb, pid, nlh->nlmsg_seq,
1975 case DCB_CMD_GNUMTCS:
1976 ret = dcbnl_getnumtcs(netdev, tb, pid, nlh->nlmsg_seq,
1979 case DCB_CMD_SNUMTCS:
1980 ret = dcbnl_setnumtcs(netdev, tb, pid, nlh->nlmsg_seq,
1983 case DCB_CMD_PFC_GSTATE:
1984 ret = dcbnl_getpfcstate(netdev, tb, pid, nlh->nlmsg_seq,
1987 case DCB_CMD_PFC_SSTATE:
1988 ret = dcbnl_setpfcstate(netdev, tb, pid, nlh->nlmsg_seq,
1991 case DCB_CMD_BCN_SCFG:
1992 ret = dcbnl_bcn_setcfg(netdev, tb, pid, nlh->nlmsg_seq,
1996 ret = dcbnl_getapp(netdev, tb, pid, nlh->nlmsg_seq,
2000 ret = dcbnl_setapp(netdev, tb, pid, nlh->nlmsg_seq,
2003 case DCB_CMD_IEEE_SET:
2004 ret = dcbnl_ieee_set(netdev, tb, pid, nlh->nlmsg_seq,
2007 case DCB_CMD_IEEE_GET:
2008 ret = dcbnl_ieee_get(netdev, tb, pid, nlh->nlmsg_seq,
2011 case DCB_CMD_IEEE_DEL:
2012 ret = dcbnl_ieee_del(netdev, tb, pid, nlh->nlmsg_seq,
2016 ret = dcbnl_getdcbx(netdev, tb, pid, nlh->nlmsg_seq,
2020 ret = dcbnl_setdcbx(netdev, tb, pid, nlh->nlmsg_seq,
2023 case DCB_CMD_GFEATCFG:
2024 ret = dcbnl_getfeatcfg(netdev, tb, pid, nlh->nlmsg_seq,
2027 case DCB_CMD_SFEATCFG:
2028 ret = dcbnl_setfeatcfg(netdev, tb, pid, nlh->nlmsg_seq,
2031 case DCB_CMD_CEE_GET:
2032 ret = dcbnl_cee_get(netdev, tb, pid, nlh->nlmsg_seq,
2046 * dcb_getapp - retrieve the DCBX application user priority
2048 * On success returns a non-zero 802.1p user priority bitmap
2049 * otherwise returns 0 as the invalid user priority bitmap to
2050 * indicate an error.
2052 u8 dcb_getapp(struct net_device *dev, struct dcb_app *app)
2054 struct dcb_app_type *itr;
2057 spin_lock(&dcb_lock);
2058 list_for_each_entry(itr, &dcb_app_list, list) {
2059 if (itr->app.selector == app->selector &&
2060 itr->app.protocol == app->protocol &&
2061 itr->ifindex == dev->ifindex) {
2062 prio = itr->app.priority;
2066 spin_unlock(&dcb_lock);
2070 EXPORT_SYMBOL(dcb_getapp);
2073 * dcb_setapp - add CEE dcb application data to app list
2075 * Priority 0 is an invalid priority in CEE spec. This routine
2076 * removes applications from the app list if the priority is
2079 int dcb_setapp(struct net_device *dev, struct dcb_app *new)
2081 struct dcb_app_type *itr;
2082 struct dcb_app_type event;
2084 event.ifindex = dev->ifindex;
2085 memcpy(&event.app, new, sizeof(event.app));
2086 if (dev->dcbnl_ops->getdcbx)
2087 event.dcbx = dev->dcbnl_ops->getdcbx(dev);
2089 spin_lock(&dcb_lock);
2090 /* Search for existing match and replace */
2091 list_for_each_entry(itr, &dcb_app_list, list) {
2092 if (itr->app.selector == new->selector &&
2093 itr->app.protocol == new->protocol &&
2094 itr->ifindex == dev->ifindex) {
2096 itr->app.priority = new->priority;
2098 list_del(&itr->list);
2104 /* App type does not exist add new application type */
2105 if (new->priority) {
2106 struct dcb_app_type *entry;
2107 entry = kmalloc(sizeof(struct dcb_app_type), GFP_ATOMIC);
2109 spin_unlock(&dcb_lock);
2113 memcpy(&entry->app, new, sizeof(*new));
2114 entry->ifindex = dev->ifindex;
2115 list_add(&entry->list, &dcb_app_list);
2118 spin_unlock(&dcb_lock);
2119 call_dcbevent_notifiers(DCB_APP_EVENT, &event);
2122 EXPORT_SYMBOL(dcb_setapp);
2125 * dcb_ieee_getapp_mask - retrieve the IEEE DCB application priority
2127 * Helper routine which on success returns a non-zero 802.1Qaz user
2128 * priority bitmap otherwise returns 0 to indicate the dcb_app was
2129 * not found in APP list.
2131 u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app)
2133 struct dcb_app_type *itr;
2136 spin_lock(&dcb_lock);
2137 list_for_each_entry(itr, &dcb_app_list, list) {
2138 if (itr->app.selector == app->selector &&
2139 itr->app.protocol == app->protocol &&
2140 itr->ifindex == dev->ifindex) {
2141 prio |= 1 << itr->app.priority;
2144 spin_unlock(&dcb_lock);
2148 EXPORT_SYMBOL(dcb_ieee_getapp_mask);
2151 * dcb_ieee_setapp - add IEEE dcb application data to app list
2153 * This adds Application data to the list. Multiple application
2154 * entries may exists for the same selector and protocol as long
2155 * as the priorities are different.
2157 int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
2159 struct dcb_app_type *itr, *entry;
2160 struct dcb_app_type event;
2163 event.ifindex = dev->ifindex;
2164 memcpy(&event.app, new, sizeof(event.app));
2165 if (dev->dcbnl_ops->getdcbx)
2166 event.dcbx = dev->dcbnl_ops->getdcbx(dev);
2168 spin_lock(&dcb_lock);
2169 /* Search for existing match and abort if found */
2170 list_for_each_entry(itr, &dcb_app_list, list) {
2171 if (itr->app.selector == new->selector &&
2172 itr->app.protocol == new->protocol &&
2173 itr->app.priority == new->priority &&
2174 itr->ifindex == dev->ifindex) {
2180 /* App entry does not exist add new entry */
2181 entry = kmalloc(sizeof(struct dcb_app_type), GFP_ATOMIC);
2187 memcpy(&entry->app, new, sizeof(*new));
2188 entry->ifindex = dev->ifindex;
2189 list_add(&entry->list, &dcb_app_list);
2191 spin_unlock(&dcb_lock);
2193 call_dcbevent_notifiers(DCB_APP_EVENT, &event);
2196 EXPORT_SYMBOL(dcb_ieee_setapp);
2199 * dcb_ieee_delapp - delete IEEE dcb application data from list
2201 * This removes a matching APP data from the APP list
2203 int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del)
2205 struct dcb_app_type *itr;
2206 struct dcb_app_type event;
2209 event.ifindex = dev->ifindex;
2210 memcpy(&event.app, del, sizeof(event.app));
2211 if (dev->dcbnl_ops->getdcbx)
2212 event.dcbx = dev->dcbnl_ops->getdcbx(dev);
2214 spin_lock(&dcb_lock);
2215 /* Search for existing match and remove it. */
2216 list_for_each_entry(itr, &dcb_app_list, list) {
2217 if (itr->app.selector == del->selector &&
2218 itr->app.protocol == del->protocol &&
2219 itr->app.priority == del->priority &&
2220 itr->ifindex == dev->ifindex) {
2221 list_del(&itr->list);
2229 spin_unlock(&dcb_lock);
2231 call_dcbevent_notifiers(DCB_APP_EVENT, &event);
2234 EXPORT_SYMBOL(dcb_ieee_delapp);
2236 static void dcb_flushapp(void)
2238 struct dcb_app_type *app;
2239 struct dcb_app_type *tmp;
2241 spin_lock(&dcb_lock);
2242 list_for_each_entry_safe(app, tmp, &dcb_app_list, list) {
2243 list_del(&app->list);
2246 spin_unlock(&dcb_lock);
2249 static int __init dcbnl_init(void)
2251 INIT_LIST_HEAD(&dcb_app_list);
2253 rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL, NULL);
2254 rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL, NULL);
2258 module_init(dcbnl_init);
2260 static void __exit dcbnl_exit(void)
2262 rtnl_unregister(PF_UNSPEC, RTM_GETDCB);
2263 rtnl_unregister(PF_UNSPEC, RTM_SETDCB);
2266 module_exit(dcbnl_exit);