aboutsummaryrefslogtreecommitdiffstats
path: root/net/dcb/dcbnl.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/dcb/dcbnl.c')
-rw-r--r--net/dcb/dcbnl.c435
1 files changed, 428 insertions, 7 deletions
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index 19ac2b985485..d900ab99814a 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -23,6 +23,7 @@
23#include <net/netlink.h> 23#include <net/netlink.h>
24#include <net/rtnetlink.h> 24#include <net/rtnetlink.h>
25#include <linux/dcbnl.h> 25#include <linux/dcbnl.h>
26#include <net/dcbevent.h>
26#include <linux/rtnetlink.h> 27#include <linux/rtnetlink.h>
27#include <net/sock.h> 28#include <net/sock.h>
28 29
@@ -66,6 +67,9 @@ static const struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = {
66 [DCB_ATTR_PFC_STATE] = {.type = NLA_U8}, 67 [DCB_ATTR_PFC_STATE] = {.type = NLA_U8},
67 [DCB_ATTR_BCN] = {.type = NLA_NESTED}, 68 [DCB_ATTR_BCN] = {.type = NLA_NESTED},
68 [DCB_ATTR_APP] = {.type = NLA_NESTED}, 69 [DCB_ATTR_APP] = {.type = NLA_NESTED},
70 [DCB_ATTR_IEEE] = {.type = NLA_NESTED},
71 [DCB_ATTR_DCBX] = {.type = NLA_U8},
72 [DCB_ATTR_FEATCFG] = {.type = NLA_NESTED},
69}; 73};
70 74
71/* DCB priority flow control to User Priority nested attributes */ 75/* DCB priority flow control to User Priority nested attributes */
@@ -122,6 +126,7 @@ static const struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = {
122 [DCB_CAP_ATTR_PFC_TCS] = {.type = NLA_U8}, 126 [DCB_CAP_ATTR_PFC_TCS] = {.type = NLA_U8},
123 [DCB_CAP_ATTR_GSP] = {.type = NLA_U8}, 127 [DCB_CAP_ATTR_GSP] = {.type = NLA_U8},
124 [DCB_CAP_ATTR_BCN] = {.type = NLA_U8}, 128 [DCB_CAP_ATTR_BCN] = {.type = NLA_U8},
129 [DCB_CAP_ATTR_DCBX] = {.type = NLA_U8},
125}; 130};
126 131
127/* DCB capabilities nested attributes. */ 132/* DCB capabilities nested attributes. */
@@ -167,6 +172,28 @@ static const struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = {
167 [DCB_APP_ATTR_PRIORITY] = {.type = NLA_U8}, 172 [DCB_APP_ATTR_PRIORITY] = {.type = NLA_U8},
168}; 173};
169 174
175/* IEEE 802.1Qaz nested attributes. */
176static const struct nla_policy dcbnl_ieee_policy[DCB_ATTR_IEEE_MAX + 1] = {
177 [DCB_ATTR_IEEE_ETS] = {.len = sizeof(struct ieee_ets)},
178 [DCB_ATTR_IEEE_PFC] = {.len = sizeof(struct ieee_pfc)},
179 [DCB_ATTR_IEEE_APP_TABLE] = {.type = NLA_NESTED},
180};
181
182static const struct nla_policy dcbnl_ieee_app[DCB_ATTR_IEEE_APP_MAX + 1] = {
183 [DCB_ATTR_IEEE_APP] = {.len = sizeof(struct dcb_app)},
184};
185
186/* DCB number of traffic classes nested attributes. */
187static const struct nla_policy dcbnl_featcfg_nest[DCB_FEATCFG_ATTR_MAX + 1] = {
188 [DCB_FEATCFG_ATTR_ALL] = {.type = NLA_FLAG},
189 [DCB_FEATCFG_ATTR_PG] = {.type = NLA_U8},
190 [DCB_FEATCFG_ATTR_PFC] = {.type = NLA_U8},
191 [DCB_FEATCFG_ATTR_APP] = {.type = NLA_U8},
192};
193
194static LIST_HEAD(dcb_app_list);
195static DEFINE_SPINLOCK(dcb_lock);
196
170/* standard netlink reply call */ 197/* standard netlink reply call */
171static int dcbnl_reply(u8 value, u8 event, u8 cmd, u8 attr, u32 pid, 198static int dcbnl_reply(u8 value, u8 event, u8 cmd, u8 attr, u32 pid,
172 u32 seq, u16 flags) 199 u32 seq, u16 flags)
@@ -622,12 +649,12 @@ out:
622static int dcbnl_setapp(struct net_device *netdev, struct nlattr **tb, 649static int dcbnl_setapp(struct net_device *netdev, struct nlattr **tb,
623 u32 pid, u32 seq, u16 flags) 650 u32 pid, u32 seq, u16 flags)
624{ 651{
625 int ret = -EINVAL; 652 int err, ret = -EINVAL;
626 u16 id; 653 u16 id;
627 u8 up, idtype; 654 u8 up, idtype;
628 struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1]; 655 struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
629 656
630 if (!tb[DCB_ATTR_APP] || !netdev->dcbnl_ops->setapp) 657 if (!tb[DCB_ATTR_APP])
631 goto out; 658 goto out;
632 659
633 ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP], 660 ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
@@ -651,9 +678,18 @@ static int dcbnl_setapp(struct net_device *netdev, struct nlattr **tb,
651 id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]); 678 id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
652 up = nla_get_u8(app_tb[DCB_APP_ATTR_PRIORITY]); 679 up = nla_get_u8(app_tb[DCB_APP_ATTR_PRIORITY]);
653 680
654 ret = dcbnl_reply(netdev->dcbnl_ops->setapp(netdev, idtype, id, up), 681 if (netdev->dcbnl_ops->setapp) {
655 RTM_SETDCB, DCB_CMD_SAPP, DCB_ATTR_APP, 682 err = netdev->dcbnl_ops->setapp(netdev, idtype, id, up);
656 pid, seq, flags); 683 } else {
684 struct dcb_app app;
685 app.selector = idtype;
686 app.protocol = id;
687 app.priority = up;
688 err = dcb_setapp(netdev, &app);
689 }
690
691 ret = dcbnl_reply(err, RTM_SETDCB, DCB_CMD_SAPP, DCB_ATTR_APP,
692 pid, seq, flags);
657out: 693out:
658 return ret; 694 return ret;
659} 695}
@@ -1118,6 +1154,281 @@ err:
1118 return ret; 1154 return ret;
1119} 1155}
1120 1156
1157/* Handle IEEE 802.1Qaz SET commands. If any requested operation can not
1158 * be completed the entire msg is aborted and error value is returned.
1159 * No attempt is made to reconcile the case where only part of the
1160 * cmd can be completed.
1161 */
1162static int dcbnl_ieee_set(struct net_device *netdev, struct nlattr **tb,
1163 u32 pid, u32 seq, u16 flags)
1164{
1165 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1166 struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
1167 int err = -EOPNOTSUPP;
1168
1169 if (!ops)
1170 goto err;
1171
1172 err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX,
1173 tb[DCB_ATTR_IEEE], dcbnl_ieee_policy);
1174 if (err)
1175 goto err;
1176
1177 if (ieee[DCB_ATTR_IEEE_ETS] && ops->ieee_setets) {
1178 struct ieee_ets *ets = nla_data(ieee[DCB_ATTR_IEEE_ETS]);
1179 err = ops->ieee_setets(netdev, ets);
1180 if (err)
1181 goto err;
1182 }
1183
1184 if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setets) {
1185 struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
1186 err = ops->ieee_setpfc(netdev, pfc);
1187 if (err)
1188 goto err;
1189 }
1190
1191 if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
1192 struct nlattr *attr;
1193 int rem;
1194
1195 nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
1196 struct dcb_app *app_data;
1197 if (nla_type(attr) != DCB_ATTR_IEEE_APP)
1198 continue;
1199 app_data = nla_data(attr);
1200 if (ops->ieee_setapp)
1201 err = ops->ieee_setapp(netdev, app_data);
1202 else
1203 err = dcb_setapp(netdev, app_data);
1204 if (err)
1205 goto err;
1206 }
1207 }
1208
1209err:
1210 dcbnl_reply(err, RTM_SETDCB, DCB_CMD_IEEE_SET, DCB_ATTR_IEEE,
1211 pid, seq, flags);
1212 return err;
1213}
1214
1215
1216/* Handle IEEE 802.1Qaz GET commands. */
1217static int dcbnl_ieee_get(struct net_device *netdev, struct nlattr **tb,
1218 u32 pid, u32 seq, u16 flags)
1219{
1220 struct sk_buff *skb;
1221 struct nlmsghdr *nlh;
1222 struct dcbmsg *dcb;
1223 struct nlattr *ieee, *app;
1224 struct dcb_app_type *itr;
1225 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1226 int err;
1227
1228 if (!ops)
1229 return -EOPNOTSUPP;
1230
1231 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1232 if (!skb)
1233 return -ENOBUFS;
1234
1235 nlh = NLMSG_NEW(skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
1236
1237 dcb = NLMSG_DATA(nlh);
1238 dcb->dcb_family = AF_UNSPEC;
1239 dcb->cmd = DCB_CMD_IEEE_GET;
1240
1241 NLA_PUT_STRING(skb, DCB_ATTR_IFNAME, netdev->name);
1242
1243 ieee = nla_nest_start(skb, DCB_ATTR_IEEE);
1244 if (!ieee)
1245 goto nla_put_failure;
1246
1247 if (ops->ieee_getets) {
1248 struct ieee_ets ets;
1249 err = ops->ieee_getets(netdev, &ets);
1250 if (!err)
1251 NLA_PUT(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets);
1252 }
1253
1254 if (ops->ieee_getpfc) {
1255 struct ieee_pfc pfc;
1256 err = ops->ieee_getpfc(netdev, &pfc);
1257 if (!err)
1258 NLA_PUT(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc);
1259 }
1260
1261 app = nla_nest_start(skb, DCB_ATTR_IEEE_APP_TABLE);
1262 if (!app)
1263 goto nla_put_failure;
1264
1265 spin_lock(&dcb_lock);
1266 list_for_each_entry(itr, &dcb_app_list, list) {
1267 if (strncmp(itr->name, netdev->name, IFNAMSIZ) == 0) {
1268 err = nla_put(skb, DCB_ATTR_IEEE_APP, sizeof(itr->app),
1269 &itr->app);
1270 if (err) {
1271 spin_unlock(&dcb_lock);
1272 goto nla_put_failure;
1273 }
1274 }
1275 }
1276 spin_unlock(&dcb_lock);
1277 nla_nest_end(skb, app);
1278
1279 nla_nest_end(skb, ieee);
1280 nlmsg_end(skb, nlh);
1281
1282 return rtnl_unicast(skb, &init_net, pid);
1283nla_put_failure:
1284 nlmsg_cancel(skb, nlh);
1285nlmsg_failure:
1286 kfree_skb(skb);
1287 return -1;
1288}
1289
1290/* DCBX configuration */
1291static int dcbnl_getdcbx(struct net_device *netdev, struct nlattr **tb,
1292 u32 pid, u32 seq, u16 flags)
1293{
1294 int ret;
1295
1296 if (!netdev->dcbnl_ops->getdcbx)
1297 return -EOPNOTSUPP;
1298
1299 ret = dcbnl_reply(netdev->dcbnl_ops->getdcbx(netdev), RTM_GETDCB,
1300 DCB_CMD_GDCBX, DCB_ATTR_DCBX, pid, seq, flags);
1301
1302 return ret;
1303}
1304
1305static int dcbnl_setdcbx(struct net_device *netdev, struct nlattr **tb,
1306 u32 pid, u32 seq, u16 flags)
1307{
1308 int ret;
1309 u8 value;
1310
1311 if (!netdev->dcbnl_ops->setdcbx)
1312 return -EOPNOTSUPP;
1313
1314 if (!tb[DCB_ATTR_DCBX])
1315 return -EINVAL;
1316
1317 value = nla_get_u8(tb[DCB_ATTR_DCBX]);
1318
1319 ret = dcbnl_reply(netdev->dcbnl_ops->setdcbx(netdev, value),
1320 RTM_SETDCB, DCB_CMD_SDCBX, DCB_ATTR_DCBX,
1321 pid, seq, flags);
1322
1323 return ret;
1324}
1325
1326static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlattr **tb,
1327 u32 pid, u32 seq, u16 flags)
1328{
1329 struct sk_buff *dcbnl_skb;
1330 struct nlmsghdr *nlh;
1331 struct dcbmsg *dcb;
1332 struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1], *nest;
1333 u8 value;
1334 int ret, i;
1335 int getall = 0;
1336
1337 if (!netdev->dcbnl_ops->getfeatcfg)
1338 return -EOPNOTSUPP;
1339
1340 if (!tb[DCB_ATTR_FEATCFG])
1341 return -EINVAL;
1342
1343 ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG],
1344 dcbnl_featcfg_nest);
1345 if (ret)
1346 goto err_out;
1347
1348 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1349 if (!dcbnl_skb) {
1350 ret = -ENOBUFS;
1351 goto err_out;
1352 }
1353
1354 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
1355
1356 dcb = NLMSG_DATA(nlh);
1357 dcb->dcb_family = AF_UNSPEC;
1358 dcb->cmd = DCB_CMD_GFEATCFG;
1359
1360 nest = nla_nest_start(dcbnl_skb, DCB_ATTR_FEATCFG);
1361 if (!nest) {
1362 ret = -EMSGSIZE;
1363 goto nla_put_failure;
1364 }
1365
1366 if (data[DCB_FEATCFG_ATTR_ALL])
1367 getall = 1;
1368
1369 for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
1370 if (!getall && !data[i])
1371 continue;
1372
1373 ret = netdev->dcbnl_ops->getfeatcfg(netdev, i, &value);
1374 if (!ret)
1375 ret = nla_put_u8(dcbnl_skb, i, value);
1376
1377 if (ret) {
1378 nla_nest_cancel(dcbnl_skb, nest);
1379 goto nla_put_failure;
1380 }
1381 }
1382 nla_nest_end(dcbnl_skb, nest);
1383
1384 nlmsg_end(dcbnl_skb, nlh);
1385
1386 return rtnl_unicast(dcbnl_skb, &init_net, pid);
1387nla_put_failure:
1388 nlmsg_cancel(dcbnl_skb, nlh);
1389nlmsg_failure:
1390 kfree_skb(dcbnl_skb);
1391err_out:
1392 return ret;
1393}
1394
1395static int dcbnl_setfeatcfg(struct net_device *netdev, struct nlattr **tb,
1396 u32 pid, u32 seq, u16 flags)
1397{
1398 struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1];
1399 int ret, i;
1400 u8 value;
1401
1402 if (!netdev->dcbnl_ops->setfeatcfg)
1403 return -ENOTSUPP;
1404
1405 if (!tb[DCB_ATTR_FEATCFG])
1406 return -EINVAL;
1407
1408 ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG],
1409 dcbnl_featcfg_nest);
1410
1411 if (ret)
1412 goto err;
1413
1414 for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
1415 if (data[i] == NULL)
1416 continue;
1417
1418 value = nla_get_u8(data[i]);
1419
1420 ret = netdev->dcbnl_ops->setfeatcfg(netdev, i, value);
1421
1422 if (ret)
1423 goto err;
1424 }
1425err:
1426 dcbnl_reply(ret, RTM_SETDCB, DCB_CMD_SFEATCFG, DCB_ATTR_FEATCFG,
1427 pid, seq, flags);
1428
1429 return ret;
1430}
1431
1121static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 1432static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1122{ 1433{
1123 struct net *net = sock_net(skb->sk); 1434 struct net *net = sock_net(skb->sk);
@@ -1223,6 +1534,30 @@ static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1223 ret = dcbnl_setapp(netdev, tb, pid, nlh->nlmsg_seq, 1534 ret = dcbnl_setapp(netdev, tb, pid, nlh->nlmsg_seq,
1224 nlh->nlmsg_flags); 1535 nlh->nlmsg_flags);
1225 goto out; 1536 goto out;
1537 case DCB_CMD_IEEE_SET:
1538 ret = dcbnl_ieee_set(netdev, tb, pid, nlh->nlmsg_seq,
1539 nlh->nlmsg_flags);
1540 goto out;
1541 case DCB_CMD_IEEE_GET:
1542 ret = dcbnl_ieee_get(netdev, tb, pid, nlh->nlmsg_seq,
1543 nlh->nlmsg_flags);
1544 goto out;
1545 case DCB_CMD_GDCBX:
1546 ret = dcbnl_getdcbx(netdev, tb, pid, nlh->nlmsg_seq,
1547 nlh->nlmsg_flags);
1548 goto out;
1549 case DCB_CMD_SDCBX:
1550 ret = dcbnl_setdcbx(netdev, tb, pid, nlh->nlmsg_seq,
1551 nlh->nlmsg_flags);
1552 goto out;
1553 case DCB_CMD_GFEATCFG:
1554 ret = dcbnl_getfeatcfg(netdev, tb, pid, nlh->nlmsg_seq,
1555 nlh->nlmsg_flags);
1556 goto out;
1557 case DCB_CMD_SFEATCFG:
1558 ret = dcbnl_setfeatcfg(netdev, tb, pid, nlh->nlmsg_seq,
1559 nlh->nlmsg_flags);
1560 goto out;
1226 default: 1561 default:
1227 goto errout; 1562 goto errout;
1228 } 1563 }
@@ -1233,8 +1568,95 @@ out:
1233 return ret; 1568 return ret;
1234} 1569}
1235 1570
1571/**
1572 * dcb_getapp - retrieve the DCBX application user priority
1573 *
1574 * On success returns a non-zero 802.1p user priority bitmap
1575 * otherwise returns 0 as the invalid user priority bitmap to
1576 * indicate an error.
1577 */
1578u8 dcb_getapp(struct net_device *dev, struct dcb_app *app)
1579{
1580 struct dcb_app_type *itr;
1581 u8 prio = 0;
1582
1583 spin_lock(&dcb_lock);
1584 list_for_each_entry(itr, &dcb_app_list, list) {
1585 if (itr->app.selector == app->selector &&
1586 itr->app.protocol == app->protocol &&
1587 (strncmp(itr->name, dev->name, IFNAMSIZ) == 0)) {
1588 prio = itr->app.priority;
1589 break;
1590 }
1591 }
1592 spin_unlock(&dcb_lock);
1593
1594 return prio;
1595}
1596EXPORT_SYMBOL(dcb_getapp);
1597
1598/**
1599 * ixgbe_dcbnl_setapp - add dcb application data to app list
1600 *
1601 * Priority 0 is the default priority this removes applications
1602 * from the app list if the priority is set to zero.
1603 */
1604u8 dcb_setapp(struct net_device *dev, struct dcb_app *new)
1605{
1606 struct dcb_app_type *itr;
1607
1608 spin_lock(&dcb_lock);
1609 /* Search for existing match and replace */
1610 list_for_each_entry(itr, &dcb_app_list, list) {
1611 if (itr->app.selector == new->selector &&
1612 itr->app.protocol == new->protocol &&
1613 (strncmp(itr->name, dev->name, IFNAMSIZ) == 0)) {
1614 if (new->priority)
1615 itr->app.priority = new->priority;
1616 else {
1617 list_del(&itr->list);
1618 kfree(itr);
1619 }
1620 goto out;
1621 }
1622 }
1623 /* App type does not exist add new application type */
1624 if (new->priority) {
1625 struct dcb_app_type *entry;
1626 entry = kmalloc(sizeof(struct dcb_app_type), GFP_ATOMIC);
1627 if (!entry) {
1628 spin_unlock(&dcb_lock);
1629 return -ENOMEM;
1630 }
1631
1632 memcpy(&entry->app, new, sizeof(*new));
1633 strncpy(entry->name, dev->name, IFNAMSIZ);
1634 list_add(&entry->list, &dcb_app_list);
1635 }
1636out:
1637 spin_unlock(&dcb_lock);
1638 call_dcbevent_notifiers(DCB_APP_EVENT, new);
1639 return 0;
1640}
1641EXPORT_SYMBOL(dcb_setapp);
1642
1643static void dcb_flushapp(void)
1644{
1645 struct dcb_app_type *app;
1646 struct dcb_app_type *tmp;
1647
1648 spin_lock(&dcb_lock);
1649 list_for_each_entry_safe(app, tmp, &dcb_app_list, list) {
1650 list_del(&app->list);
1651 kfree(app);
1652 }
1653 spin_unlock(&dcb_lock);
1654}
1655
1236static int __init dcbnl_init(void) 1656static int __init dcbnl_init(void)
1237{ 1657{
1658 INIT_LIST_HEAD(&dcb_app_list);
1659
1238 rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL); 1660 rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL);
1239 rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL); 1661 rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL);
1240 1662
@@ -1246,7 +1668,6 @@ static void __exit dcbnl_exit(void)
1246{ 1668{
1247 rtnl_unregister(PF_UNSPEC, RTM_GETDCB); 1669 rtnl_unregister(PF_UNSPEC, RTM_GETDCB);
1248 rtnl_unregister(PF_UNSPEC, RTM_SETDCB); 1670 rtnl_unregister(PF_UNSPEC, RTM_SETDCB);
1671 dcb_flushapp();
1249} 1672}
1250module_exit(dcbnl_exit); 1673module_exit(dcbnl_exit);
1251
1252