diff options
| author | Jiri Pirko <jiri@mellanox.com> | 2015-10-01 05:03:46 -0400 |
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2015-10-03 07:49:40 -0400 |
| commit | 9e8f4a548ab4710002c23c94c4b1bbde91b5e335 (patch) | |
| tree | c2fe29079ccd3dd3a5218ad966ed0bab047d5e69 /net/switchdev | |
| parent | 648b4a995a057187ddd77cdb181e6a0b24ab2959 (diff) | |
switchdev: push object ID back to object structure
Suggested-by: Scott Feldman <sfeldma@gmail.com>
Signed-off-by: Jiri Pirko <jiri@mellanox.com>
Acked-by: Scott Feldman <sfeldma@gmail.com>
Reviewed-by: Vivien Didelot <vivien.didelot@savoirfairelinux.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/switchdev')
| -rw-r--r-- | net/switchdev/switchdev.c | 57 |
1 files changed, 29 insertions, 28 deletions
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c index 0402b3633100..6e4a4f9ad927 100644 --- a/net/switchdev/switchdev.c +++ b/net/switchdev/switchdev.c | |||
| @@ -270,7 +270,6 @@ int switchdev_port_attr_set(struct net_device *dev, struct switchdev_attr *attr) | |||
| 270 | EXPORT_SYMBOL_GPL(switchdev_port_attr_set); | 270 | EXPORT_SYMBOL_GPL(switchdev_port_attr_set); |
| 271 | 271 | ||
| 272 | static int __switchdev_port_obj_add(struct net_device *dev, | 272 | static int __switchdev_port_obj_add(struct net_device *dev, |
| 273 | enum switchdev_obj_id id, | ||
| 274 | const struct switchdev_obj *obj, | 273 | const struct switchdev_obj *obj, |
| 275 | struct switchdev_trans *trans) | 274 | struct switchdev_trans *trans) |
| 276 | { | 275 | { |
| @@ -280,7 +279,7 @@ static int __switchdev_port_obj_add(struct net_device *dev, | |||
| 280 | int err = -EOPNOTSUPP; | 279 | int err = -EOPNOTSUPP; |
| 281 | 280 | ||
| 282 | if (ops && ops->switchdev_port_obj_add) | 281 | if (ops && ops->switchdev_port_obj_add) |
| 283 | return ops->switchdev_port_obj_add(dev, id, obj, trans); | 282 | return ops->switchdev_port_obj_add(dev, obj, trans); |
| 284 | 283 | ||
| 285 | /* Switch device port(s) may be stacked under | 284 | /* Switch device port(s) may be stacked under |
| 286 | * bond/team/vlan dev, so recurse down to add object on | 285 | * bond/team/vlan dev, so recurse down to add object on |
| @@ -288,7 +287,7 @@ static int __switchdev_port_obj_add(struct net_device *dev, | |||
| 288 | */ | 287 | */ |
| 289 | 288 | ||
| 290 | netdev_for_each_lower_dev(dev, lower_dev, iter) { | 289 | netdev_for_each_lower_dev(dev, lower_dev, iter) { |
| 291 | err = __switchdev_port_obj_add(lower_dev, id, obj, trans); | 290 | err = __switchdev_port_obj_add(lower_dev, obj, trans); |
| 292 | if (err) | 291 | if (err) |
| 293 | break; | 292 | break; |
| 294 | } | 293 | } |
| @@ -309,7 +308,7 @@ static int __switchdev_port_obj_add(struct net_device *dev, | |||
| 309 | * | 308 | * |
| 310 | * rtnl_lock must be held. | 309 | * rtnl_lock must be held. |
| 311 | */ | 310 | */ |
| 312 | int switchdev_port_obj_add(struct net_device *dev, enum switchdev_obj_id id, | 311 | int switchdev_port_obj_add(struct net_device *dev, |
| 313 | const struct switchdev_obj *obj) | 312 | const struct switchdev_obj *obj) |
| 314 | { | 313 | { |
| 315 | struct switchdev_trans trans; | 314 | struct switchdev_trans trans; |
| @@ -327,7 +326,7 @@ int switchdev_port_obj_add(struct net_device *dev, enum switchdev_obj_id id, | |||
| 327 | */ | 326 | */ |
| 328 | 327 | ||
| 329 | trans.ph_prepare = true; | 328 | trans.ph_prepare = true; |
| 330 | err = __switchdev_port_obj_add(dev, id, obj, &trans); | 329 | err = __switchdev_port_obj_add(dev, obj, &trans); |
| 331 | if (err) { | 330 | if (err) { |
| 332 | /* Prepare phase failed: abort the transaction. Any | 331 | /* Prepare phase failed: abort the transaction. Any |
| 333 | * resources reserved in the prepare phase are | 332 | * resources reserved in the prepare phase are |
| @@ -346,8 +345,8 @@ int switchdev_port_obj_add(struct net_device *dev, enum switchdev_obj_id id, | |||
| 346 | */ | 345 | */ |
| 347 | 346 | ||
| 348 | trans.ph_prepare = false; | 347 | trans.ph_prepare = false; |
| 349 | err = __switchdev_port_obj_add(dev, id, obj, &trans); | 348 | err = __switchdev_port_obj_add(dev, obj, &trans); |
| 350 | WARN(err, "%s: Commit of object (id=%d) failed.\n", dev->name, id); | 349 | WARN(err, "%s: Commit of object (id=%d) failed.\n", dev->name, obj->id); |
| 351 | switchdev_trans_items_warn_destroy(dev, &trans); | 350 | switchdev_trans_items_warn_destroy(dev, &trans); |
| 352 | 351 | ||
| 353 | return err; | 352 | return err; |
| @@ -361,7 +360,7 @@ EXPORT_SYMBOL_GPL(switchdev_port_obj_add); | |||
| 361 | * @id: object ID | 360 | * @id: object ID |
| 362 | * @obj: object to delete | 361 | * @obj: object to delete |
| 363 | */ | 362 | */ |
| 364 | int switchdev_port_obj_del(struct net_device *dev, enum switchdev_obj_id id, | 363 | int switchdev_port_obj_del(struct net_device *dev, |
| 365 | const struct switchdev_obj *obj) | 364 | const struct switchdev_obj *obj) |
| 366 | { | 365 | { |
| 367 | const struct switchdev_ops *ops = dev->switchdev_ops; | 366 | const struct switchdev_ops *ops = dev->switchdev_ops; |
| @@ -370,7 +369,7 @@ int switchdev_port_obj_del(struct net_device *dev, enum switchdev_obj_id id, | |||
| 370 | int err = -EOPNOTSUPP; | 369 | int err = -EOPNOTSUPP; |
| 371 | 370 | ||
| 372 | if (ops && ops->switchdev_port_obj_del) | 371 | if (ops && ops->switchdev_port_obj_del) |
| 373 | return ops->switchdev_port_obj_del(dev, id, obj); | 372 | return ops->switchdev_port_obj_del(dev, obj); |
| 374 | 373 | ||
| 375 | /* Switch device port(s) may be stacked under | 374 | /* Switch device port(s) may be stacked under |
| 376 | * bond/team/vlan dev, so recurse down to delete object on | 375 | * bond/team/vlan dev, so recurse down to delete object on |
| @@ -378,7 +377,7 @@ int switchdev_port_obj_del(struct net_device *dev, enum switchdev_obj_id id, | |||
| 378 | */ | 377 | */ |
| 379 | 378 | ||
| 380 | netdev_for_each_lower_dev(dev, lower_dev, iter) { | 379 | netdev_for_each_lower_dev(dev, lower_dev, iter) { |
| 381 | err = switchdev_port_obj_del(lower_dev, id, obj); | 380 | err = switchdev_port_obj_del(lower_dev, obj); |
| 382 | if (err) | 381 | if (err) |
| 383 | break; | 382 | break; |
| 384 | } | 383 | } |
| @@ -395,8 +394,7 @@ EXPORT_SYMBOL_GPL(switchdev_port_obj_del); | |||
| 395 | * @obj: object to dump | 394 | * @obj: object to dump |
| 396 | * @cb: function to call with a filled object | 395 | * @cb: function to call with a filled object |
| 397 | */ | 396 | */ |
| 398 | int switchdev_port_obj_dump(struct net_device *dev, enum switchdev_obj_id id, | 397 | int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj, |
| 399 | struct switchdev_obj *obj, | ||
| 400 | switchdev_obj_dump_cb_t *cb) | 398 | switchdev_obj_dump_cb_t *cb) |
| 401 | { | 399 | { |
| 402 | const struct switchdev_ops *ops = dev->switchdev_ops; | 400 | const struct switchdev_ops *ops = dev->switchdev_ops; |
| @@ -405,7 +403,7 @@ int switchdev_port_obj_dump(struct net_device *dev, enum switchdev_obj_id id, | |||
| 405 | int err = -EOPNOTSUPP; | 403 | int err = -EOPNOTSUPP; |
| 406 | 404 | ||
| 407 | if (ops && ops->switchdev_port_obj_dump) | 405 | if (ops && ops->switchdev_port_obj_dump) |
| 408 | return ops->switchdev_port_obj_dump(dev, id, obj, cb); | 406 | return ops->switchdev_port_obj_dump(dev, obj, cb); |
| 409 | 407 | ||
| 410 | /* Switch device port(s) may be stacked under | 408 | /* Switch device port(s) may be stacked under |
| 411 | * bond/team/vlan dev, so recurse down to dump objects on | 409 | * bond/team/vlan dev, so recurse down to dump objects on |
| @@ -413,7 +411,7 @@ int switchdev_port_obj_dump(struct net_device *dev, enum switchdev_obj_id id, | |||
| 413 | */ | 411 | */ |
| 414 | 412 | ||
| 415 | netdev_for_each_lower_dev(dev, lower_dev, iter) { | 413 | netdev_for_each_lower_dev(dev, lower_dev, iter) { |
| 416 | err = switchdev_port_obj_dump(lower_dev, id, obj, cb); | 414 | err = switchdev_port_obj_dump(lower_dev, obj, cb); |
| 417 | break; | 415 | break; |
| 418 | } | 416 | } |
| 419 | 417 | ||
| @@ -579,6 +577,7 @@ static int switchdev_port_vlan_fill(struct sk_buff *skb, struct net_device *dev, | |||
| 579 | u32 filter_mask) | 577 | u32 filter_mask) |
| 580 | { | 578 | { |
| 581 | struct switchdev_vlan_dump dump = { | 579 | struct switchdev_vlan_dump dump = { |
| 580 | .vlan.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, | ||
| 582 | .skb = skb, | 581 | .skb = skb, |
| 583 | .filter_mask = filter_mask, | 582 | .filter_mask = filter_mask, |
| 584 | }; | 583 | }; |
| @@ -586,8 +585,7 @@ static int switchdev_port_vlan_fill(struct sk_buff *skb, struct net_device *dev, | |||
| 586 | 585 | ||
| 587 | if ((filter_mask & RTEXT_FILTER_BRVLAN) || | 586 | if ((filter_mask & RTEXT_FILTER_BRVLAN) || |
| 588 | (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) { | 587 | (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) { |
| 589 | err = switchdev_port_obj_dump(dev, SWITCHDEV_OBJ_ID_PORT_VLAN, | 588 | err = switchdev_port_obj_dump(dev, &dump.vlan.obj, |
| 590 | &dump.vlan.obj, | ||
| 591 | switchdev_port_vlan_dump_cb); | 589 | switchdev_port_vlan_dump_cb); |
| 592 | if (err) | 590 | if (err) |
| 593 | goto err_out; | 591 | goto err_out; |
| @@ -701,12 +699,13 @@ static int switchdev_port_br_setlink_protinfo(struct net_device *dev, | |||
| 701 | static int switchdev_port_br_afspec(struct net_device *dev, | 699 | static int switchdev_port_br_afspec(struct net_device *dev, |
| 702 | struct nlattr *afspec, | 700 | struct nlattr *afspec, |
| 703 | int (*f)(struct net_device *dev, | 701 | int (*f)(struct net_device *dev, |
| 704 | enum switchdev_obj_id id, | ||
| 705 | const struct switchdev_obj *obj)) | 702 | const struct switchdev_obj *obj)) |
| 706 | { | 703 | { |
| 707 | struct nlattr *attr; | 704 | struct nlattr *attr; |
| 708 | struct bridge_vlan_info *vinfo; | 705 | struct bridge_vlan_info *vinfo; |
| 709 | struct switchdev_obj_port_vlan vlan = { {}, 0 }; | 706 | struct switchdev_obj_port_vlan vlan = { |
| 707 | .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, | ||
| 708 | }; | ||
| 710 | int rem; | 709 | int rem; |
| 711 | int err; | 710 | int err; |
| 712 | 711 | ||
| @@ -727,7 +726,7 @@ static int switchdev_port_br_afspec(struct net_device *dev, | |||
| 727 | vlan.vid_end = vinfo->vid; | 726 | vlan.vid_end = vinfo->vid; |
| 728 | if (vlan.vid_end <= vlan.vid_begin) | 727 | if (vlan.vid_end <= vlan.vid_begin) |
| 729 | return -EINVAL; | 728 | return -EINVAL; |
| 730 | err = f(dev, SWITCHDEV_OBJ_ID_PORT_VLAN, &vlan.obj); | 729 | err = f(dev, &vlan.obj); |
| 731 | if (err) | 730 | if (err) |
| 732 | return err; | 731 | return err; |
| 733 | memset(&vlan, 0, sizeof(vlan)); | 732 | memset(&vlan, 0, sizeof(vlan)); |
| @@ -736,7 +735,7 @@ static int switchdev_port_br_afspec(struct net_device *dev, | |||
| 736 | return -EINVAL; | 735 | return -EINVAL; |
| 737 | vlan.vid_begin = vinfo->vid; | 736 | vlan.vid_begin = vinfo->vid; |
| 738 | vlan.vid_end = vinfo->vid; | 737 | vlan.vid_end = vinfo->vid; |
| 739 | err = f(dev, SWITCHDEV_OBJ_ID_PORT_VLAN, &vlan.obj); | 738 | err = f(dev, &vlan.obj); |
| 740 | if (err) | 739 | if (err) |
| 741 | return err; | 740 | return err; |
| 742 | memset(&vlan, 0, sizeof(vlan)); | 741 | memset(&vlan, 0, sizeof(vlan)); |
| @@ -822,11 +821,12 @@ int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], | |||
| 822 | u16 vid, u16 nlm_flags) | 821 | u16 vid, u16 nlm_flags) |
| 823 | { | 822 | { |
| 824 | struct switchdev_obj_port_fdb fdb = { | 823 | struct switchdev_obj_port_fdb fdb = { |
| 824 | .obj.id = SWITCHDEV_OBJ_ID_PORT_FDB, | ||
| 825 | .addr = addr, | 825 | .addr = addr, |
| 826 | .vid = vid, | 826 | .vid = vid, |
| 827 | }; | 827 | }; |
| 828 | 828 | ||
| 829 | return switchdev_port_obj_add(dev, SWITCHDEV_OBJ_ID_PORT_FDB, &fdb.obj); | 829 | return switchdev_port_obj_add(dev, &fdb.obj); |
| 830 | } | 830 | } |
| 831 | EXPORT_SYMBOL_GPL(switchdev_port_fdb_add); | 831 | EXPORT_SYMBOL_GPL(switchdev_port_fdb_add); |
| 832 | 832 | ||
| @@ -846,11 +846,12 @@ int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], | |||
| 846 | u16 vid) | 846 | u16 vid) |
| 847 | { | 847 | { |
| 848 | struct switchdev_obj_port_fdb fdb = { | 848 | struct switchdev_obj_port_fdb fdb = { |
| 849 | .obj.id = SWITCHDEV_OBJ_ID_PORT_FDB, | ||
| 849 | .addr = addr, | 850 | .addr = addr, |
| 850 | .vid = vid, | 851 | .vid = vid, |
| 851 | }; | 852 | }; |
| 852 | 853 | ||
| 853 | return switchdev_port_obj_del(dev, SWITCHDEV_OBJ_ID_PORT_FDB, &fdb.obj); | 854 | return switchdev_port_obj_del(dev, &fdb.obj); |
| 854 | } | 855 | } |
| 855 | EXPORT_SYMBOL_GPL(switchdev_port_fdb_del); | 856 | EXPORT_SYMBOL_GPL(switchdev_port_fdb_del); |
| 856 | 857 | ||
| @@ -922,14 +923,14 @@ int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, | |||
| 922 | struct net_device *filter_dev, int idx) | 923 | struct net_device *filter_dev, int idx) |
| 923 | { | 924 | { |
| 924 | struct switchdev_fdb_dump dump = { | 925 | struct switchdev_fdb_dump dump = { |
| 926 | .fdb.obj.id = SWITCHDEV_OBJ_ID_PORT_FDB, | ||
| 925 | .dev = dev, | 927 | .dev = dev, |
| 926 | .skb = skb, | 928 | .skb = skb, |
| 927 | .cb = cb, | 929 | .cb = cb, |
| 928 | .idx = idx, | 930 | .idx = idx, |
| 929 | }; | 931 | }; |
| 930 | 932 | ||
| 931 | switchdev_port_obj_dump(dev, SWITCHDEV_OBJ_ID_PORT_FDB, &dump.fdb.obj, | 933 | switchdev_port_obj_dump(dev, &dump.fdb.obj, switchdev_port_fdb_dump_cb); |
| 932 | switchdev_port_fdb_dump_cb); | ||
| 933 | return dump.idx; | 934 | return dump.idx; |
| 934 | } | 935 | } |
| 935 | EXPORT_SYMBOL_GPL(switchdev_port_fdb_dump); | 936 | EXPORT_SYMBOL_GPL(switchdev_port_fdb_dump); |
| @@ -1008,6 +1009,7 @@ int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi, | |||
| 1008 | u8 tos, u8 type, u32 nlflags, u32 tb_id) | 1009 | u8 tos, u8 type, u32 nlflags, u32 tb_id) |
| 1009 | { | 1010 | { |
| 1010 | struct switchdev_obj_ipv4_fib ipv4_fib = { | 1011 | struct switchdev_obj_ipv4_fib ipv4_fib = { |
| 1012 | .obj.id = SWITCHDEV_OBJ_ID_IPV4_FIB, | ||
| 1011 | .dst = dst, | 1013 | .dst = dst, |
| 1012 | .dst_len = dst_len, | 1014 | .dst_len = dst_len, |
| 1013 | .fi = fi, | 1015 | .fi = fi, |
| @@ -1035,8 +1037,7 @@ int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi, | |||
| 1035 | if (!dev) | 1037 | if (!dev) |
| 1036 | return 0; | 1038 | return 0; |
| 1037 | 1039 | ||
| 1038 | err = switchdev_port_obj_add(dev, SWITCHDEV_OBJ_ID_IPV4_FIB, | 1040 | err = switchdev_port_obj_add(dev, &ipv4_fib.obj); |
| 1039 | &ipv4_fib.obj); | ||
| 1040 | if (!err) | 1041 | if (!err) |
| 1041 | fi->fib_flags |= RTNH_F_OFFLOAD; | 1042 | fi->fib_flags |= RTNH_F_OFFLOAD; |
| 1042 | 1043 | ||
| @@ -1060,6 +1061,7 @@ int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi, | |||
| 1060 | u8 tos, u8 type, u32 tb_id) | 1061 | u8 tos, u8 type, u32 tb_id) |
| 1061 | { | 1062 | { |
| 1062 | struct switchdev_obj_ipv4_fib ipv4_fib = { | 1063 | struct switchdev_obj_ipv4_fib ipv4_fib = { |
| 1064 | .obj.id = SWITCHDEV_OBJ_ID_IPV4_FIB, | ||
| 1063 | .dst = dst, | 1065 | .dst = dst, |
| 1064 | .dst_len = dst_len, | 1066 | .dst_len = dst_len, |
| 1065 | .fi = fi, | 1067 | .fi = fi, |
| @@ -1078,8 +1080,7 @@ int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi, | |||
| 1078 | if (!dev) | 1080 | if (!dev) |
| 1079 | return 0; | 1081 | return 0; |
| 1080 | 1082 | ||
| 1081 | err = switchdev_port_obj_del(dev, SWITCHDEV_OBJ_ID_IPV4_FIB, | 1083 | err = switchdev_port_obj_del(dev, &ipv4_fib.obj); |
| 1082 | &ipv4_fib.obj); | ||
| 1083 | if (!err) | 1084 | if (!err) |
| 1084 | fi->fib_flags &= ~RTNH_F_OFFLOAD; | 1085 | fi->fib_flags &= ~RTNH_F_OFFLOAD; |
| 1085 | 1086 | ||
