aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c10
-rw-r--r--net/core/ethtool.c2
-rw-r--r--net/core/neighbour.c2
-rw-r--r--net/core/net_namespace.c31
-rw-r--r--net/core/netpoll.c2
-rw-r--r--net/core/netprio_cgroup.c15
-rw-r--r--net/core/rtnetlink.c79
-rw-r--r--net/core/sock.c7
8 files changed, 96 insertions, 52 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 115dee1d985d..6ca32f6b3105 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3500,14 +3500,20 @@ static inline gro_result_t
3500__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 3500__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3501{ 3501{
3502 struct sk_buff *p; 3502 struct sk_buff *p;
3503 unsigned int maclen = skb->dev->hard_header_len;
3503 3504
3504 for (p = napi->gro_list; p; p = p->next) { 3505 for (p = napi->gro_list; p; p = p->next) {
3505 unsigned long diffs; 3506 unsigned long diffs;
3506 3507
3507 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev; 3508 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3508 diffs |= p->vlan_tci ^ skb->vlan_tci; 3509 diffs |= p->vlan_tci ^ skb->vlan_tci;
3509 diffs |= compare_ether_header(skb_mac_header(p), 3510 if (maclen == ETH_HLEN)
3510 skb_gro_mac_header(skb)); 3511 diffs |= compare_ether_header(skb_mac_header(p),
3512 skb_gro_mac_header(skb));
3513 else if (!diffs)
3514 diffs = memcmp(skb_mac_header(p),
3515 skb_gro_mac_header(skb),
3516 maclen);
3511 NAPI_GRO_CB(p)->same_flow = !diffs; 3517 NAPI_GRO_CB(p)->same_flow = !diffs;
3512 NAPI_GRO_CB(p)->flush = 0; 3518 NAPI_GRO_CB(p)->flush = 0;
3513 } 3519 }
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 369b41894527..3f79db1b612a 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1190,6 +1190,8 @@ static noinline_for_stack int ethtool_flash_device(struct net_device *dev,
1190 if (!dev->ethtool_ops->flash_device) 1190 if (!dev->ethtool_ops->flash_device)
1191 return -EOPNOTSUPP; 1191 return -EOPNOTSUPP;
1192 1192
1193 efl.data[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0;
1194
1193 return dev->ethtool_ops->flash_device(dev, &efl); 1195 return dev->ethtool_ops->flash_device(dev, &efl);
1194} 1196}
1195 1197
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index e287346e0934..2a83914b0277 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -826,6 +826,8 @@ next_elt:
826 write_unlock_bh(&tbl->lock); 826 write_unlock_bh(&tbl->lock);
827 cond_resched(); 827 cond_resched();
828 write_lock_bh(&tbl->lock); 828 write_lock_bh(&tbl->lock);
829 nht = rcu_dereference_protected(tbl->nht,
830 lockdep_is_held(&tbl->lock));
829 } 831 }
830 /* Cycle through all hash buckets every base_reachable_time/2 ticks. 832 /* Cycle through all hash buckets every base_reachable_time/2 ticks.
831 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2 833 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index aefcd7acbffa..0e950fda9a0a 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -30,6 +30,20 @@ EXPORT_SYMBOL(init_net);
30 30
31#define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */ 31#define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */
32 32
33static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS;
34
35static struct net_generic *net_alloc_generic(void)
36{
37 struct net_generic *ng;
38 size_t generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]);
39
40 ng = kzalloc(generic_size, GFP_KERNEL);
41 if (ng)
42 ng->len = max_gen_ptrs;
43
44 return ng;
45}
46
33static int net_assign_generic(struct net *net, int id, void *data) 47static int net_assign_generic(struct net *net, int id, void *data)
34{ 48{
35 struct net_generic *ng, *old_ng; 49 struct net_generic *ng, *old_ng;
@@ -43,8 +57,7 @@ static int net_assign_generic(struct net *net, int id, void *data)
43 if (old_ng->len >= id) 57 if (old_ng->len >= id)
44 goto assign; 58 goto assign;
45 59
46 ng = kzalloc(sizeof(struct net_generic) + 60 ng = net_alloc_generic();
47 id * sizeof(void *), GFP_KERNEL);
48 if (ng == NULL) 61 if (ng == NULL)
49 return -ENOMEM; 62 return -ENOMEM;
50 63
@@ -59,7 +72,6 @@ static int net_assign_generic(struct net *net, int id, void *data)
59 * the old copy for kfree after a grace period. 72 * the old copy for kfree after a grace period.
60 */ 73 */
61 74
62 ng->len = id;
63 memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*)); 75 memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*));
64 76
65 rcu_assign_pointer(net->gen, ng); 77 rcu_assign_pointer(net->gen, ng);
@@ -161,18 +173,6 @@ out_undo:
161 goto out; 173 goto out;
162} 174}
163 175
164static struct net_generic *net_alloc_generic(void)
165{
166 struct net_generic *ng;
167 size_t generic_size = sizeof(struct net_generic) +
168 INITIAL_NET_GEN_PTRS * sizeof(void *);
169
170 ng = kzalloc(generic_size, GFP_KERNEL);
171 if (ng)
172 ng->len = INITIAL_NET_GEN_PTRS;
173
174 return ng;
175}
176 176
177#ifdef CONFIG_NET_NS 177#ifdef CONFIG_NET_NS
178static struct kmem_cache *net_cachep; 178static struct kmem_cache *net_cachep;
@@ -483,6 +483,7 @@ again:
483 } 483 }
484 return error; 484 return error;
485 } 485 }
486 max_gen_ptrs = max_t(unsigned int, max_gen_ptrs, *ops->id);
486 } 487 }
487 error = __register_pernet_operations(list, ops); 488 error = __register_pernet_operations(list, ops);
488 if (error) { 489 if (error) {
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 556b08298669..ddefc513b44a 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -194,7 +194,7 @@ static void netpoll_poll_dev(struct net_device *dev)
194 194
195 poll_napi(dev); 195 poll_napi(dev);
196 196
197 if (dev->priv_flags & IFF_SLAVE) { 197 if (dev->flags & IFF_SLAVE) {
198 if (dev->npinfo) { 198 if (dev->npinfo) {
199 struct net_device *bond_dev = dev->master; 199 struct net_device *bond_dev = dev->master;
200 struct sk_buff *skb; 200 struct sk_buff *skb;
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index 3a9fd4826b75..4dacc44637ef 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -58,11 +58,12 @@ static int get_prioidx(u32 *prio)
58 58
59 spin_lock_irqsave(&prioidx_map_lock, flags); 59 spin_lock_irqsave(&prioidx_map_lock, flags);
60 prioidx = find_first_zero_bit(prioidx_map, sizeof(unsigned long) * PRIOIDX_SZ); 60 prioidx = find_first_zero_bit(prioidx_map, sizeof(unsigned long) * PRIOIDX_SZ);
61 if (prioidx == sizeof(unsigned long) * PRIOIDX_SZ) {
62 spin_unlock_irqrestore(&prioidx_map_lock, flags);
63 return -ENOSPC;
64 }
61 set_bit(prioidx, prioidx_map); 65 set_bit(prioidx, prioidx_map);
62 spin_unlock_irqrestore(&prioidx_map_lock, flags); 66 spin_unlock_irqrestore(&prioidx_map_lock, flags);
63 if (prioidx == sizeof(unsigned long) * PRIOIDX_SZ)
64 return -ENOSPC;
65
66 atomic_set(&max_prioidx, prioidx); 67 atomic_set(&max_prioidx, prioidx);
67 *prio = prioidx; 68 *prio = prioidx;
68 return 0; 69 return 0;
@@ -107,7 +108,7 @@ static void extend_netdev_table(struct net_device *dev, u32 new_len)
107static void update_netdev_tables(void) 108static void update_netdev_tables(void)
108{ 109{
109 struct net_device *dev; 110 struct net_device *dev;
110 u32 max_len = atomic_read(&max_prioidx); 111 u32 max_len = atomic_read(&max_prioidx) + 1;
111 struct netprio_map *map; 112 struct netprio_map *map;
112 113
113 rtnl_lock(); 114 rtnl_lock();
@@ -270,7 +271,6 @@ static int netprio_device_event(struct notifier_block *unused,
270{ 271{
271 struct net_device *dev = ptr; 272 struct net_device *dev = ptr;
272 struct netprio_map *old; 273 struct netprio_map *old;
273 u32 max_len = atomic_read(&max_prioidx);
274 274
275 /* 275 /*
276 * Note this is called with rtnl_lock held so we have update side 276 * Note this is called with rtnl_lock held so we have update side
@@ -278,11 +278,6 @@ static int netprio_device_event(struct notifier_block *unused,
278 */ 278 */
279 279
280 switch (event) { 280 switch (event) {
281
282 case NETDEV_REGISTER:
283 if (max_len)
284 extend_netdev_table(dev, max_len);
285 break;
286 case NETDEV_UNREGISTER: 281 case NETDEV_UNREGISTER:
287 old = rtnl_dereference(dev->priomap); 282 old = rtnl_dereference(dev->priomap);
288 RCU_INIT_POINTER(dev->priomap, NULL); 283 RCU_INIT_POINTER(dev->priomap, NULL);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index f16444bc6cbb..f965dce6f20f 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -60,7 +60,6 @@ struct rtnl_link {
60}; 60};
61 61
62static DEFINE_MUTEX(rtnl_mutex); 62static DEFINE_MUTEX(rtnl_mutex);
63static u16 min_ifinfo_dump_size;
64 63
65void rtnl_lock(void) 64void rtnl_lock(void)
66{ 65{
@@ -724,10 +723,11 @@ static void copy_rtnl_link_stats64(void *v, const struct rtnl_link_stats64 *b)
724} 723}
725 724
726/* All VF info */ 725/* All VF info */
727static inline int rtnl_vfinfo_size(const struct net_device *dev) 726static inline int rtnl_vfinfo_size(const struct net_device *dev,
727 u32 ext_filter_mask)
728{ 728{
729 if (dev->dev.parent && dev_is_pci(dev->dev.parent)) { 729 if (dev->dev.parent && dev_is_pci(dev->dev.parent) &&
730 730 (ext_filter_mask & RTEXT_FILTER_VF)) {
731 int num_vfs = dev_num_vf(dev->dev.parent); 731 int num_vfs = dev_num_vf(dev->dev.parent);
732 size_t size = nla_total_size(sizeof(struct nlattr)); 732 size_t size = nla_total_size(sizeof(struct nlattr));
733 size += nla_total_size(num_vfs * sizeof(struct nlattr)); 733 size += nla_total_size(num_vfs * sizeof(struct nlattr));
@@ -766,7 +766,8 @@ static size_t rtnl_port_size(const struct net_device *dev)
766 return port_self_size; 766 return port_self_size;
767} 767}
768 768
769static noinline size_t if_nlmsg_size(const struct net_device *dev) 769static noinline size_t if_nlmsg_size(const struct net_device *dev,
770 u32 ext_filter_mask)
770{ 771{
771 return NLMSG_ALIGN(sizeof(struct ifinfomsg)) 772 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
772 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ 773 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
@@ -784,8 +785,9 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev)
784 + nla_total_size(4) /* IFLA_MASTER */ 785 + nla_total_size(4) /* IFLA_MASTER */
785 + nla_total_size(1) /* IFLA_OPERSTATE */ 786 + nla_total_size(1) /* IFLA_OPERSTATE */
786 + nla_total_size(1) /* IFLA_LINKMODE */ 787 + nla_total_size(1) /* IFLA_LINKMODE */
787 + nla_total_size(4) /* IFLA_NUM_VF */ 788 + nla_total_size(ext_filter_mask
788 + rtnl_vfinfo_size(dev) /* IFLA_VFINFO_LIST */ 789 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
790 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
789 + rtnl_port_size(dev) /* IFLA_VF_PORTS + IFLA_PORT_SELF */ 791 + rtnl_port_size(dev) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
790 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */ 792 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
791 + rtnl_link_get_af_size(dev); /* IFLA_AF_SPEC */ 793 + rtnl_link_get_af_size(dev); /* IFLA_AF_SPEC */
@@ -868,7 +870,7 @@ static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev)
868 870
869static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, 871static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
870 int type, u32 pid, u32 seq, u32 change, 872 int type, u32 pid, u32 seq, u32 change,
871 unsigned int flags) 873 unsigned int flags, u32 ext_filter_mask)
872{ 874{
873 struct ifinfomsg *ifm; 875 struct ifinfomsg *ifm;
874 struct nlmsghdr *nlh; 876 struct nlmsghdr *nlh;
@@ -941,10 +943,11 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
941 goto nla_put_failure; 943 goto nla_put_failure;
942 copy_rtnl_link_stats64(nla_data(attr), stats); 944 copy_rtnl_link_stats64(nla_data(attr), stats);
943 945
944 if (dev->dev.parent) 946 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF))
945 NLA_PUT_U32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent)); 947 NLA_PUT_U32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent));
946 948
947 if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent) { 949 if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent
950 && (ext_filter_mask & RTEXT_FILTER_VF)) {
948 int i; 951 int i;
949 952
950 struct nlattr *vfinfo, *vf; 953 struct nlattr *vfinfo, *vf;
@@ -1048,6 +1051,8 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1048 struct net_device *dev; 1051 struct net_device *dev;
1049 struct hlist_head *head; 1052 struct hlist_head *head;
1050 struct hlist_node *node; 1053 struct hlist_node *node;
1054 struct nlattr *tb[IFLA_MAX+1];
1055 u32 ext_filter_mask = 0;
1051 1056
1052 s_h = cb->args[0]; 1057 s_h = cb->args[0];
1053 s_idx = cb->args[1]; 1058 s_idx = cb->args[1];
@@ -1055,6 +1060,13 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1055 rcu_read_lock(); 1060 rcu_read_lock();
1056 cb->seq = net->dev_base_seq; 1061 cb->seq = net->dev_base_seq;
1057 1062
1063 if (nlmsg_parse(cb->nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
1064 ifla_policy) >= 0) {
1065
1066 if (tb[IFLA_EXT_MASK])
1067 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
1068 }
1069
1058 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 1070 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
1059 idx = 0; 1071 idx = 0;
1060 head = &net->dev_index_head[h]; 1072 head = &net->dev_index_head[h];
@@ -1064,7 +1076,8 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1064 if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, 1076 if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
1065 NETLINK_CB(cb->skb).pid, 1077 NETLINK_CB(cb->skb).pid,
1066 cb->nlh->nlmsg_seq, 0, 1078 cb->nlh->nlmsg_seq, 0,
1067 NLM_F_MULTI) <= 0) 1079 NLM_F_MULTI,
1080 ext_filter_mask) <= 0)
1068 goto out; 1081 goto out;
1069 1082
1070 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 1083 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
@@ -1100,6 +1113,7 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
1100 [IFLA_VF_PORTS] = { .type = NLA_NESTED }, 1113 [IFLA_VF_PORTS] = { .type = NLA_NESTED },
1101 [IFLA_PORT_SELF] = { .type = NLA_NESTED }, 1114 [IFLA_PORT_SELF] = { .type = NLA_NESTED },
1102 [IFLA_AF_SPEC] = { .type = NLA_NESTED }, 1115 [IFLA_AF_SPEC] = { .type = NLA_NESTED },
1116 [IFLA_EXT_MASK] = { .type = NLA_U32 },
1103}; 1117};
1104EXPORT_SYMBOL(ifla_policy); 1118EXPORT_SYMBOL(ifla_policy);
1105 1119
@@ -1509,6 +1523,7 @@ errout:
1509 1523
1510 if (send_addr_notify) 1524 if (send_addr_notify)
1511 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 1525 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
1526
1512 return err; 1527 return err;
1513} 1528}
1514 1529
@@ -1839,6 +1854,7 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
1839 struct net_device *dev = NULL; 1854 struct net_device *dev = NULL;
1840 struct sk_buff *nskb; 1855 struct sk_buff *nskb;
1841 int err; 1856 int err;
1857 u32 ext_filter_mask = 0;
1842 1858
1843 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy); 1859 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
1844 if (err < 0) 1860 if (err < 0)
@@ -1847,6 +1863,9 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
1847 if (tb[IFLA_IFNAME]) 1863 if (tb[IFLA_IFNAME])
1848 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); 1864 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
1849 1865
1866 if (tb[IFLA_EXT_MASK])
1867 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
1868
1850 ifm = nlmsg_data(nlh); 1869 ifm = nlmsg_data(nlh);
1851 if (ifm->ifi_index > 0) 1870 if (ifm->ifi_index > 0)
1852 dev = __dev_get_by_index(net, ifm->ifi_index); 1871 dev = __dev_get_by_index(net, ifm->ifi_index);
@@ -1858,12 +1877,12 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
1858 if (dev == NULL) 1877 if (dev == NULL)
1859 return -ENODEV; 1878 return -ENODEV;
1860 1879
1861 nskb = nlmsg_new(if_nlmsg_size(dev), GFP_KERNEL); 1880 nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL);
1862 if (nskb == NULL) 1881 if (nskb == NULL)
1863 return -ENOBUFS; 1882 return -ENOBUFS;
1864 1883
1865 err = rtnl_fill_ifinfo(nskb, dev, RTM_NEWLINK, NETLINK_CB(skb).pid, 1884 err = rtnl_fill_ifinfo(nskb, dev, RTM_NEWLINK, NETLINK_CB(skb).pid,
1866 nlh->nlmsg_seq, 0, 0); 1885 nlh->nlmsg_seq, 0, 0, ext_filter_mask);
1867 if (err < 0) { 1886 if (err < 0) {
1868 /* -EMSGSIZE implies BUG in if_nlmsg_size */ 1887 /* -EMSGSIZE implies BUG in if_nlmsg_size */
1869 WARN_ON(err == -EMSGSIZE); 1888 WARN_ON(err == -EMSGSIZE);
@@ -1874,8 +1893,32 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
1874 return err; 1893 return err;
1875} 1894}
1876 1895
1877static u16 rtnl_calcit(struct sk_buff *skb) 1896static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
1878{ 1897{
1898 struct net *net = sock_net(skb->sk);
1899 struct net_device *dev;
1900 struct nlattr *tb[IFLA_MAX+1];
1901 u32 ext_filter_mask = 0;
1902 u16 min_ifinfo_dump_size = 0;
1903
1904 if (nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
1905 ifla_policy) >= 0) {
1906 if (tb[IFLA_EXT_MASK])
1907 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
1908 }
1909
1910 if (!ext_filter_mask)
1911 return NLMSG_GOODSIZE;
1912 /*
1913 * traverse the list of net devices and compute the minimum
1914 * buffer size based upon the filter mask.
1915 */
1916 list_for_each_entry(dev, &net->dev_base_head, dev_list) {
1917 min_ifinfo_dump_size = max_t(u16, min_ifinfo_dump_size,
1918 if_nlmsg_size(dev,
1919 ext_filter_mask));
1920 }
1921
1879 return min_ifinfo_dump_size; 1922 return min_ifinfo_dump_size;
1880} 1923}
1881 1924
@@ -1910,13 +1953,11 @@ void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change)
1910 int err = -ENOBUFS; 1953 int err = -ENOBUFS;
1911 size_t if_info_size; 1954 size_t if_info_size;
1912 1955
1913 skb = nlmsg_new((if_info_size = if_nlmsg_size(dev)), GFP_KERNEL); 1956 skb = nlmsg_new((if_info_size = if_nlmsg_size(dev, 0)), GFP_KERNEL);
1914 if (skb == NULL) 1957 if (skb == NULL)
1915 goto errout; 1958 goto errout;
1916 1959
1917 min_ifinfo_dump_size = max_t(u16, if_info_size, min_ifinfo_dump_size); 1960 err = rtnl_fill_ifinfo(skb, dev, type, 0, 0, change, 0, 0);
1918
1919 err = rtnl_fill_ifinfo(skb, dev, type, 0, 0, change, 0);
1920 if (err < 0) { 1961 if (err < 0) {
1921 /* -EMSGSIZE implies BUG in if_nlmsg_size() */ 1962 /* -EMSGSIZE implies BUG in if_nlmsg_size() */
1922 WARN_ON(err == -EMSGSIZE); 1963 WARN_ON(err == -EMSGSIZE);
@@ -1974,7 +2015,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1974 return -EOPNOTSUPP; 2015 return -EOPNOTSUPP;
1975 calcit = rtnl_get_calcit(family, type); 2016 calcit = rtnl_get_calcit(family, type);
1976 if (calcit) 2017 if (calcit)
1977 min_dump_alloc = calcit(skb); 2018 min_dump_alloc = calcit(skb, nlh);
1978 2019
1979 __rtnl_unlock(); 2020 __rtnl_unlock();
1980 rtnl = net->rtnl; 2021 rtnl = net->rtnl;
diff --git a/net/core/sock.c b/net/core/sock.c
index 3e81fd2e3c75..02f8dfe320b7 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1171,13 +1171,10 @@ EXPORT_SYMBOL(sock_update_classid);
1171 1171
1172void sock_update_netprioidx(struct sock *sk) 1172void sock_update_netprioidx(struct sock *sk)
1173{ 1173{
1174 struct cgroup_netprio_state *state;
1175 if (in_interrupt()) 1174 if (in_interrupt())
1176 return; 1175 return;
1177 rcu_read_lock(); 1176
1178 state = task_netprio_state(current); 1177 sk->sk_cgrp_prioidx = task_netprioidx(current);
1179 sk->sk_cgrp_prioidx = state ? state->prioidx : 0;
1180 rcu_read_unlock();
1181} 1178}
1182EXPORT_SYMBOL_GPL(sock_update_netprioidx); 1179EXPORT_SYMBOL_GPL(sock_update_netprioidx);
1183#endif 1180#endif