aboutsummaryrefslogtreecommitdiffstats
path: root/net/dcb/dcbnl.c
diff options
context:
space:
mode:
authorShmulik Ravid <shmulikr@broadcom.com>2011-07-05 02:16:25 -0400
committerDavid S. Miller <davem@davemloft.net>2011-07-06 02:42:17 -0400
commit5b7f7626743e0912958981343b47ac0ab2206b1c (patch)
tree1530285d4304e1c5cbad44214f2a19a9dfa8f518 /net/dcb/dcbnl.c
parent37cf4d1a9b0903b874a638d0f8649873ddde8a12 (diff)
dcbnl: Add CEE notification
This patch add an unsolicited notification of the DCBX negotiated parameters for the CEE flavor of the DCBX protocol. The notification message is identical to the aggregated CEE get operation and holds all the pertinent local and peer information. The notification routine is exported so it can be invoked by drivers supporting an embedded DCBX stack. Signed-off-by: Shmulik Ravid <shmulikr@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/dcb/dcbnl.c')
-rw-r--r--net/dcb/dcbnl.c415
1 files changed, 225 insertions, 190 deletions
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index d5b45a201c1b..6a015f211fee 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -1310,8 +1310,196 @@ nla_put_failure:
1310 return err; 1310 return err;
1311} 1311}
1312 1312
1313int dcbnl_notify(struct net_device *dev, int event, int cmd, 1313static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
1314 u32 seq, u32 pid) 1314 int dir)
1315{
1316 u8 pgid, up_map, prio, tc_pct;
1317 const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
1318 int i = dir ? DCB_ATTR_CEE_TX_PG : DCB_ATTR_CEE_RX_PG;
1319 struct nlattr *pg = nla_nest_start(skb, i);
1320
1321 if (!pg)
1322 goto nla_put_failure;
1323
1324 for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
1325 struct nlattr *tc_nest = nla_nest_start(skb, i);
1326
1327 if (!tc_nest)
1328 goto nla_put_failure;
1329
1330 pgid = DCB_ATTR_VALUE_UNDEFINED;
1331 prio = DCB_ATTR_VALUE_UNDEFINED;
1332 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
1333 up_map = DCB_ATTR_VALUE_UNDEFINED;
1334
1335 if (!dir)
1336 ops->getpgtccfgrx(dev, i - DCB_PG_ATTR_TC_0,
1337 &prio, &pgid, &tc_pct, &up_map);
1338 else
1339 ops->getpgtccfgtx(dev, i - DCB_PG_ATTR_TC_0,
1340 &prio, &pgid, &tc_pct, &up_map);
1341
1342 NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_PGID, pgid);
1343 NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map);
1344 NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio);
1345 NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct);
1346 nla_nest_end(skb, tc_nest);
1347 }
1348
1349 for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
1350 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
1351
1352 if (!dir)
1353 ops->getpgbwgcfgrx(dev, i - DCB_PG_ATTR_BW_ID_0,
1354 &tc_pct);
1355 else
1356 ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0,
1357 &tc_pct);
1358 NLA_PUT_U8(skb, i, tc_pct);
1359 }
1360 nla_nest_end(skb, pg);
1361 return 0;
1362
1363nla_put_failure:
1364 return -EMSGSIZE;
1365}
1366
1367static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
1368{
1369 struct nlattr *cee, *app;
1370 struct dcb_app_type *itr;
1371 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1372 int dcbx, i, err = -EMSGSIZE;
1373 u8 value;
1374
1375 NLA_PUT_STRING(skb, DCB_ATTR_IFNAME, netdev->name);
1376
1377 cee = nla_nest_start(skb, DCB_ATTR_CEE);
1378 if (!cee)
1379 goto nla_put_failure;
1380
1381 /* local pg */
1382 if (ops->getpgtccfgtx && ops->getpgbwgcfgtx) {
1383 err = dcbnl_cee_pg_fill(skb, netdev, 1);
1384 if (err)
1385 goto nla_put_failure;
1386 }
1387
1388 if (ops->getpgtccfgrx && ops->getpgbwgcfgrx) {
1389 err = dcbnl_cee_pg_fill(skb, netdev, 0);
1390 if (err)
1391 goto nla_put_failure;
1392 }
1393
1394 /* local pfc */
1395 if (ops->getpfccfg) {
1396 struct nlattr *pfc_nest = nla_nest_start(skb, DCB_ATTR_CEE_PFC);
1397
1398 if (!pfc_nest)
1399 goto nla_put_failure;
1400
1401 for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
1402 ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value);
1403 NLA_PUT_U8(skb, i, value);
1404 }
1405 nla_nest_end(skb, pfc_nest);
1406 }
1407
1408 /* local app */
1409 spin_lock(&dcb_lock);
1410 app = nla_nest_start(skb, DCB_ATTR_CEE_APP_TABLE);
1411 if (!app)
1412 goto nla_put_failure;
1413
1414 list_for_each_entry(itr, &dcb_app_list, list) {
1415 if (strncmp(itr->name, netdev->name, IFNAMSIZ) == 0) {
1416 struct nlattr *app_nest = nla_nest_start(skb,
1417 DCB_ATTR_APP);
1418 if (!app_nest)
1419 goto dcb_unlock;
1420
1421 err = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE,
1422 itr->app.selector);
1423 if (err)
1424 goto dcb_unlock;
1425
1426 err = nla_put_u16(skb, DCB_APP_ATTR_ID,
1427 itr->app.protocol);
1428 if (err)
1429 goto dcb_unlock;
1430
1431 err = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY,
1432 itr->app.priority);
1433 if (err)
1434 goto dcb_unlock;
1435
1436 nla_nest_end(skb, app_nest);
1437 }
1438 }
1439 nla_nest_end(skb, app);
1440
1441 if (netdev->dcbnl_ops->getdcbx)
1442 dcbx = netdev->dcbnl_ops->getdcbx(netdev);
1443 else
1444 dcbx = -EOPNOTSUPP;
1445
1446 spin_unlock(&dcb_lock);
1447
1448 /* features flags */
1449 if (ops->getfeatcfg) {
1450 struct nlattr *feat = nla_nest_start(skb, DCB_ATTR_CEE_FEAT);
1451 if (!feat)
1452 goto nla_put_failure;
1453
1454 for (i = DCB_FEATCFG_ATTR_ALL + 1; i <= DCB_FEATCFG_ATTR_MAX;
1455 i++)
1456 if (!ops->getfeatcfg(netdev, i, &value))
1457 NLA_PUT_U8(skb, i, value);
1458
1459 nla_nest_end(skb, feat);
1460 }
1461
1462 /* peer info if available */
1463 if (ops->cee_peer_getpg) {
1464 struct cee_pg pg;
1465 err = ops->cee_peer_getpg(netdev, &pg);
1466 if (!err)
1467 NLA_PUT(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg);
1468 }
1469
1470 if (ops->cee_peer_getpfc) {
1471 struct cee_pfc pfc;
1472 err = ops->cee_peer_getpfc(netdev, &pfc);
1473 if (!err)
1474 NLA_PUT(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc);
1475 }
1476
1477 if (ops->peer_getappinfo && ops->peer_getapptable) {
1478 err = dcbnl_build_peer_app(netdev, skb,
1479 DCB_ATTR_CEE_PEER_APP_TABLE,
1480 DCB_ATTR_CEE_PEER_APP_INFO,
1481 DCB_ATTR_CEE_PEER_APP);
1482 if (err)
1483 goto nla_put_failure;
1484 }
1485 nla_nest_end(skb, cee);
1486
1487 /* DCBX state */
1488 if (dcbx >= 0) {
1489 err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
1490 if (err)
1491 goto nla_put_failure;
1492 }
1493 return 0;
1494
1495dcb_unlock:
1496 spin_unlock(&dcb_lock);
1497nla_put_failure:
1498 return err;
1499}
1500
1501static int dcbnl_notify(struct net_device *dev, int event, int cmd,
1502 u32 seq, u32 pid, int dcbx_ver)
1315{ 1503{
1316 struct net *net = dev_net(dev); 1504 struct net *net = dev_net(dev);
1317 struct sk_buff *skb; 1505 struct sk_buff *skb;
@@ -1337,7 +1525,11 @@ int dcbnl_notify(struct net_device *dev, int event, int cmd,
1337 dcb->dcb_family = AF_UNSPEC; 1525 dcb->dcb_family = AF_UNSPEC;
1338 dcb->cmd = cmd; 1526 dcb->cmd = cmd;
1339 1527
1340 err = dcbnl_ieee_fill(skb, dev); 1528 if (dcbx_ver == DCB_CAP_DCBX_VER_IEEE)
1529 err = dcbnl_ieee_fill(skb, dev);
1530 else
1531 err = dcbnl_cee_fill(skb, dev);
1532
1341 if (err < 0) { 1533 if (err < 0) {
1342 /* Report error to broadcast listeners */ 1534 /* Report error to broadcast listeners */
1343 nlmsg_cancel(skb, nlh); 1535 nlmsg_cancel(skb, nlh);
@@ -1351,7 +1543,20 @@ int dcbnl_notify(struct net_device *dev, int event, int cmd,
1351 1543
1352 return err; 1544 return err;
1353} 1545}
1354EXPORT_SYMBOL(dcbnl_notify); 1546
1547int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd,
1548 u32 seq, u32 pid)
1549{
1550 return dcbnl_notify(dev, event, cmd, seq, pid, DCB_CAP_DCBX_VER_IEEE);
1551}
1552EXPORT_SYMBOL(dcbnl_ieee_notify);
1553
1554int dcbnl_cee_notify(struct net_device *dev, int event, int cmd,
1555 u32 seq, u32 pid)
1556{
1557 return dcbnl_notify(dev, event, cmd, seq, pid, DCB_CAP_DCBX_VER_CEE);
1558}
1559EXPORT_SYMBOL(dcbnl_cee_notify);
1355 1560
1356/* Handle IEEE 802.1Qaz SET commands. If any requested operation can not 1561/* Handle IEEE 802.1Qaz SET commands. If any requested operation can not
1357 * be completed the entire msg is aborted and error value is returned. 1562 * be completed the entire msg is aborted and error value is returned.
@@ -1411,7 +1616,7 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlattr **tb,
1411err: 1616err:
1412 dcbnl_reply(err, RTM_SETDCB, DCB_CMD_IEEE_SET, DCB_ATTR_IEEE, 1617 dcbnl_reply(err, RTM_SETDCB, DCB_CMD_IEEE_SET, DCB_ATTR_IEEE,
1413 pid, seq, flags); 1618 pid, seq, flags);
1414 dcbnl_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, seq, 0); 1619 dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, seq, 0);
1415 return err; 1620 return err;
1416} 1621}
1417 1622
@@ -1495,7 +1700,7 @@ static int dcbnl_ieee_del(struct net_device *netdev, struct nlattr **tb,
1495err: 1700err:
1496 dcbnl_reply(err, RTM_SETDCB, DCB_CMD_IEEE_DEL, DCB_ATTR_IEEE, 1701 dcbnl_reply(err, RTM_SETDCB, DCB_CMD_IEEE_DEL, DCB_ATTR_IEEE,
1497 pid, seq, flags); 1702 pid, seq, flags);
1498 dcbnl_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_DEL, seq, 0); 1703 dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_DEL, seq, 0);
1499 return err; 1704 return err;
1500} 1705}
1501 1706
@@ -1642,72 +1847,16 @@ err:
1642 return ret; 1847 return ret;
1643} 1848}
1644 1849
1645static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
1646 int dir)
1647{
1648 u8 pgid, up_map, prio, tc_pct;
1649 const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
1650 int i = dir ? DCB_ATTR_CEE_TX_PG : DCB_ATTR_CEE_RX_PG;
1651 struct nlattr *pg = nla_nest_start(skb, i);
1652
1653 if (!pg)
1654 goto nla_put_failure;
1655
1656 for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
1657 struct nlattr *tc_nest = nla_nest_start(skb, i);
1658
1659 if (!tc_nest)
1660 goto nla_put_failure;
1661
1662 pgid = DCB_ATTR_VALUE_UNDEFINED;
1663 prio = DCB_ATTR_VALUE_UNDEFINED;
1664 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
1665 up_map = DCB_ATTR_VALUE_UNDEFINED;
1666
1667 if (!dir)
1668 ops->getpgtccfgrx(dev, i - DCB_PG_ATTR_TC_0,
1669 &prio, &pgid, &tc_pct, &up_map);
1670 else
1671 ops->getpgtccfgtx(dev, i - DCB_PG_ATTR_TC_0,
1672 &prio, &pgid, &tc_pct, &up_map);
1673
1674 NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_PGID, pgid);
1675 NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map);
1676 NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio);
1677 NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct);
1678 nla_nest_end(skb, tc_nest);
1679 }
1680
1681 for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
1682 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
1683
1684 if (!dir)
1685 ops->getpgbwgcfgrx(dev, i - DCB_PG_ATTR_BW_ID_0,
1686 &tc_pct);
1687 else
1688 ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0,
1689 &tc_pct);
1690 NLA_PUT_U8(skb, i, tc_pct);
1691 }
1692 nla_nest_end(skb, pg);
1693 return 0;
1694
1695nla_put_failure:
1696 return -EMSGSIZE;
1697}
1698
1699/* Handle CEE DCBX GET commands. */ 1850/* Handle CEE DCBX GET commands. */
1700static int dcbnl_cee_get(struct net_device *netdev, struct nlattr **tb, 1851static int dcbnl_cee_get(struct net_device *netdev, struct nlattr **tb,
1701 u32 pid, u32 seq, u16 flags) 1852 u32 pid, u32 seq, u16 flags)
1702{ 1853{
1854 struct net *net = dev_net(netdev);
1703 struct sk_buff *skb; 1855 struct sk_buff *skb;
1704 struct nlmsghdr *nlh; 1856 struct nlmsghdr *nlh;
1705 struct dcbmsg *dcb; 1857 struct dcbmsg *dcb;
1706 struct nlattr *cee, *app;
1707 struct dcb_app_type *itr;
1708 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; 1858 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1709 int dcbx, i, err = -EMSGSIZE; 1859 int err;
1710 u8 value;
1711 1860
1712 if (!ops) 1861 if (!ops)
1713 return -EOPNOTSUPP; 1862 return -EOPNOTSUPP;
@@ -1716,139 +1865,25 @@ static int dcbnl_cee_get(struct net_device *netdev, struct nlattr **tb,
1716 if (!skb) 1865 if (!skb)
1717 return -ENOBUFS; 1866 return -ENOBUFS;
1718 1867
1719 nlh = NLMSG_NEW(skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags); 1868 nlh = nlmsg_put(skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
1869 if (nlh == NULL) {
1870 nlmsg_free(skb);
1871 return -EMSGSIZE;
1872 }
1720 1873
1721 dcb = NLMSG_DATA(nlh); 1874 dcb = NLMSG_DATA(nlh);
1722 dcb->dcb_family = AF_UNSPEC; 1875 dcb->dcb_family = AF_UNSPEC;
1723 dcb->cmd = DCB_CMD_CEE_GET; 1876 dcb->cmd = DCB_CMD_CEE_GET;
1724 1877
1725 NLA_PUT_STRING(skb, DCB_ATTR_IFNAME, netdev->name); 1878 err = dcbnl_cee_fill(skb, netdev);
1726
1727 cee = nla_nest_start(skb, DCB_ATTR_CEE);
1728 if (!cee)
1729 goto nla_put_failure;
1730
1731 /* local pg */
1732 if (ops->getpgtccfgtx && ops->getpgbwgcfgtx) {
1733 err = dcbnl_cee_pg_fill(skb, netdev, 1);
1734 if (err)
1735 goto nla_put_failure;
1736 }
1737
1738 if (ops->getpgtccfgrx && ops->getpgbwgcfgrx) {
1739 err = dcbnl_cee_pg_fill(skb, netdev, 0);
1740 if (err)
1741 goto nla_put_failure;
1742 }
1743
1744 /* local pfc */
1745 if (ops->getpfccfg) {
1746 struct nlattr *pfc_nest = nla_nest_start(skb, DCB_ATTR_CEE_PFC);
1747
1748 if (!pfc_nest)
1749 goto nla_put_failure;
1750 1879
1751 for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) { 1880 if (err < 0) {
1752 ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value); 1881 nlmsg_cancel(skb, nlh);
1753 NLA_PUT_U8(skb, i, value); 1882 nlmsg_free(skb);
1754 } 1883 } else {
1755 nla_nest_end(skb, pfc_nest); 1884 nlmsg_end(skb, nlh);
1756 } 1885 err = rtnl_unicast(skb, net, pid);
1757
1758 /* local app */
1759 spin_lock(&dcb_lock);
1760 app = nla_nest_start(skb, DCB_ATTR_CEE_APP_TABLE);
1761 if (!app)
1762 goto nla_put_failure;
1763
1764 list_for_each_entry(itr, &dcb_app_list, list) {
1765 if (strncmp(itr->name, netdev->name, IFNAMSIZ) == 0) {
1766 struct nlattr *app_nest = nla_nest_start(skb,
1767 DCB_ATTR_APP);
1768 if (!app_nest)
1769 goto dcb_unlock;
1770
1771 err = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE,
1772 itr->app.selector);
1773 if (err)
1774 goto dcb_unlock;
1775
1776 err = nla_put_u16(skb, DCB_APP_ATTR_ID,
1777 itr->app.protocol);
1778 if (err)
1779 goto dcb_unlock;
1780
1781 err = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY,
1782 itr->app.priority);
1783 if (err)
1784 goto dcb_unlock;
1785
1786 nla_nest_end(skb, app_nest);
1787 }
1788 }
1789 nla_nest_end(skb, app);
1790
1791 if (netdev->dcbnl_ops->getdcbx)
1792 dcbx = netdev->dcbnl_ops->getdcbx(netdev);
1793 else
1794 dcbx = -EOPNOTSUPP;
1795
1796 spin_unlock(&dcb_lock);
1797
1798 /* features flags */
1799 if (ops->getfeatcfg) {
1800 struct nlattr *feat = nla_nest_start(skb, DCB_ATTR_CEE_FEAT);
1801 if (!feat)
1802 goto nla_put_failure;
1803
1804 for (i = DCB_FEATCFG_ATTR_ALL + 1; i <= DCB_FEATCFG_ATTR_MAX;
1805 i++)
1806 if (!ops->getfeatcfg(netdev, i, &value))
1807 NLA_PUT_U8(skb, i, value);
1808
1809 nla_nest_end(skb, feat);
1810 }
1811
1812 /* peer info if available */
1813 if (ops->cee_peer_getpg) {
1814 struct cee_pg pg;
1815 err = ops->cee_peer_getpg(netdev, &pg);
1816 if (!err)
1817 NLA_PUT(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg);
1818 }
1819
1820 if (ops->cee_peer_getpfc) {
1821 struct cee_pfc pfc;
1822 err = ops->cee_peer_getpfc(netdev, &pfc);
1823 if (!err)
1824 NLA_PUT(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc);
1825 }
1826
1827 if (ops->peer_getappinfo && ops->peer_getapptable) {
1828 err = dcbnl_build_peer_app(netdev, skb,
1829 DCB_ATTR_CEE_PEER_APP_TABLE,
1830 DCB_ATTR_CEE_PEER_APP_INFO,
1831 DCB_ATTR_CEE_PEER_APP);
1832 if (err)
1833 goto nla_put_failure;
1834 }
1835 nla_nest_end(skb, cee);
1836
1837 /* DCBX state */
1838 if (dcbx >= 0) {
1839 err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
1840 if (err)
1841 goto nla_put_failure;
1842 } 1886 }
1843 nlmsg_end(skb, nlh);
1844 return rtnl_unicast(skb, &init_net, pid);
1845
1846dcb_unlock:
1847 spin_unlock(&dcb_lock);
1848nla_put_failure:
1849 nlmsg_cancel(skb, nlh);
1850nlmsg_failure:
1851 nlmsg_free(skb);
1852 return err; 1887 return err;
1853} 1888}
1854 1889