aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2012-03-05 03:20:08 -0500
committerIngo Molnar <mingo@elte.hu>2012-03-05 03:20:08 -0500
commit737f24bda723fdf89ecaacb99fa2bf5683c32799 (patch)
tree35495fff3e9956679cb5468e74e6814c8e44ee66 /net/core
parent8eedce996556d7d06522cd3a0e6069141c8dffe0 (diff)
parentb7c924274c456499264d1cfa3d44063bb11eb5db (diff)
Merge branch 'perf/urgent' into perf/core
Conflicts: tools/perf/builtin-record.c tools/perf/builtin-top.c tools/perf/perf.h tools/perf/util/top.h Merge reason: resolve these cherry-picking conflicts. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c10
-rw-r--r--net/core/ethtool.c2
-rw-r--r--net/core/neighbour.c2
-rw-r--r--net/core/netpoll.c2
-rw-r--r--net/core/netprio_cgroup.c15
-rw-r--r--net/core/rtnetlink.c78
-rw-r--r--net/core/sock.c7
7 files changed, 77 insertions, 39 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index da7ce7f0e566..6982bfd6a781 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3500,14 +3500,20 @@ static inline gro_result_t
3500__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 3500__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3501{ 3501{
3502 struct sk_buff *p; 3502 struct sk_buff *p;
3503 unsigned int maclen = skb->dev->hard_header_len;
3503 3504
3504 for (p = napi->gro_list; p; p = p->next) { 3505 for (p = napi->gro_list; p; p = p->next) {
3505 unsigned long diffs; 3506 unsigned long diffs;
3506 3507
3507 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev; 3508 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3508 diffs |= p->vlan_tci ^ skb->vlan_tci; 3509 diffs |= p->vlan_tci ^ skb->vlan_tci;
3509 diffs |= compare_ether_header(skb_mac_header(p), 3510 if (maclen == ETH_HLEN)
3510 skb_gro_mac_header(skb)); 3511 diffs |= compare_ether_header(skb_mac_header(p),
3512 skb_gro_mac_header(skb));
3513 else if (!diffs)
3514 diffs = memcmp(skb_mac_header(p),
3515 skb_gro_mac_header(skb),
3516 maclen);
3511 NAPI_GRO_CB(p)->same_flow = !diffs; 3517 NAPI_GRO_CB(p)->same_flow = !diffs;
3512 NAPI_GRO_CB(p)->flush = 0; 3518 NAPI_GRO_CB(p)->flush = 0;
3513 } 3519 }
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 369b41894527..3f79db1b612a 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1190,6 +1190,8 @@ static noinline_for_stack int ethtool_flash_device(struct net_device *dev,
1190 if (!dev->ethtool_ops->flash_device) 1190 if (!dev->ethtool_ops->flash_device)
1191 return -EOPNOTSUPP; 1191 return -EOPNOTSUPP;
1192 1192
1193 efl.data[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0;
1194
1193 return dev->ethtool_ops->flash_device(dev, &efl); 1195 return dev->ethtool_ops->flash_device(dev, &efl);
1194} 1196}
1195 1197
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index e287346e0934..2a83914b0277 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -826,6 +826,8 @@ next_elt:
826 write_unlock_bh(&tbl->lock); 826 write_unlock_bh(&tbl->lock);
827 cond_resched(); 827 cond_resched();
828 write_lock_bh(&tbl->lock); 828 write_lock_bh(&tbl->lock);
829 nht = rcu_dereference_protected(tbl->nht,
830 lockdep_is_held(&tbl->lock));
829 } 831 }
830 /* Cycle through all hash buckets every base_reachable_time/2 ticks. 832 /* Cycle through all hash buckets every base_reachable_time/2 ticks.
831 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2 833 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 556b08298669..ddefc513b44a 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -194,7 +194,7 @@ static void netpoll_poll_dev(struct net_device *dev)
194 194
195 poll_napi(dev); 195 poll_napi(dev);
196 196
197 if (dev->priv_flags & IFF_SLAVE) { 197 if (dev->flags & IFF_SLAVE) {
198 if (dev->npinfo) { 198 if (dev->npinfo) {
199 struct net_device *bond_dev = dev->master; 199 struct net_device *bond_dev = dev->master;
200 struct sk_buff *skb; 200 struct sk_buff *skb;
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index 3a9fd4826b75..4dacc44637ef 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -58,11 +58,12 @@ static int get_prioidx(u32 *prio)
58 58
59 spin_lock_irqsave(&prioidx_map_lock, flags); 59 spin_lock_irqsave(&prioidx_map_lock, flags);
60 prioidx = find_first_zero_bit(prioidx_map, sizeof(unsigned long) * PRIOIDX_SZ); 60 prioidx = find_first_zero_bit(prioidx_map, sizeof(unsigned long) * PRIOIDX_SZ);
61 if (prioidx == sizeof(unsigned long) * PRIOIDX_SZ) {
62 spin_unlock_irqrestore(&prioidx_map_lock, flags);
63 return -ENOSPC;
64 }
61 set_bit(prioidx, prioidx_map); 65 set_bit(prioidx, prioidx_map);
62 spin_unlock_irqrestore(&prioidx_map_lock, flags); 66 spin_unlock_irqrestore(&prioidx_map_lock, flags);
63 if (prioidx == sizeof(unsigned long) * PRIOIDX_SZ)
64 return -ENOSPC;
65
66 atomic_set(&max_prioidx, prioidx); 67 atomic_set(&max_prioidx, prioidx);
67 *prio = prioidx; 68 *prio = prioidx;
68 return 0; 69 return 0;
@@ -107,7 +108,7 @@ static void extend_netdev_table(struct net_device *dev, u32 new_len)
107static void update_netdev_tables(void) 108static void update_netdev_tables(void)
108{ 109{
109 struct net_device *dev; 110 struct net_device *dev;
110 u32 max_len = atomic_read(&max_prioidx); 111 u32 max_len = atomic_read(&max_prioidx) + 1;
111 struct netprio_map *map; 112 struct netprio_map *map;
112 113
113 rtnl_lock(); 114 rtnl_lock();
@@ -270,7 +271,6 @@ static int netprio_device_event(struct notifier_block *unused,
270{ 271{
271 struct net_device *dev = ptr; 272 struct net_device *dev = ptr;
272 struct netprio_map *old; 273 struct netprio_map *old;
273 u32 max_len = atomic_read(&max_prioidx);
274 274
275 /* 275 /*
276 * Note this is called with rtnl_lock held so we have update side 276 * Note this is called with rtnl_lock held so we have update side
@@ -278,11 +278,6 @@ static int netprio_device_event(struct notifier_block *unused,
278 */ 278 */
279 279
280 switch (event) { 280 switch (event) {
281
282 case NETDEV_REGISTER:
283 if (max_len)
284 extend_netdev_table(dev, max_len);
285 break;
286 case NETDEV_UNREGISTER: 281 case NETDEV_UNREGISTER:
287 old = rtnl_dereference(dev->priomap); 282 old = rtnl_dereference(dev->priomap);
288 RCU_INIT_POINTER(dev->priomap, NULL); 283 RCU_INIT_POINTER(dev->priomap, NULL);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 65aebd450027..606a6e8f3671 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -60,7 +60,6 @@ struct rtnl_link {
60}; 60};
61 61
62static DEFINE_MUTEX(rtnl_mutex); 62static DEFINE_MUTEX(rtnl_mutex);
63static u16 min_ifinfo_dump_size;
64 63
65void rtnl_lock(void) 64void rtnl_lock(void)
66{ 65{
@@ -724,10 +723,11 @@ static void copy_rtnl_link_stats64(void *v, const struct rtnl_link_stats64 *b)
724} 723}
725 724
726/* All VF info */ 725/* All VF info */
727static inline int rtnl_vfinfo_size(const struct net_device *dev) 726static inline int rtnl_vfinfo_size(const struct net_device *dev,
727 u32 ext_filter_mask)
728{ 728{
729 if (dev->dev.parent && dev_is_pci(dev->dev.parent)) { 729 if (dev->dev.parent && dev_is_pci(dev->dev.parent) &&
730 730 (ext_filter_mask & RTEXT_FILTER_VF)) {
731 int num_vfs = dev_num_vf(dev->dev.parent); 731 int num_vfs = dev_num_vf(dev->dev.parent);
732 size_t size = nla_total_size(sizeof(struct nlattr)); 732 size_t size = nla_total_size(sizeof(struct nlattr));
733 size += nla_total_size(num_vfs * sizeof(struct nlattr)); 733 size += nla_total_size(num_vfs * sizeof(struct nlattr));
@@ -766,7 +766,8 @@ static size_t rtnl_port_size(const struct net_device *dev)
766 return port_self_size; 766 return port_self_size;
767} 767}
768 768
769static noinline size_t if_nlmsg_size(const struct net_device *dev) 769static noinline size_t if_nlmsg_size(const struct net_device *dev,
770 u32 ext_filter_mask)
770{ 771{
771 return NLMSG_ALIGN(sizeof(struct ifinfomsg)) 772 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
772 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ 773 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
@@ -784,8 +785,9 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev)
784 + nla_total_size(4) /* IFLA_MASTER */ 785 + nla_total_size(4) /* IFLA_MASTER */
785 + nla_total_size(1) /* IFLA_OPERSTATE */ 786 + nla_total_size(1) /* IFLA_OPERSTATE */
786 + nla_total_size(1) /* IFLA_LINKMODE */ 787 + nla_total_size(1) /* IFLA_LINKMODE */
787 + nla_total_size(4) /* IFLA_NUM_VF */ 788 + nla_total_size(ext_filter_mask
788 + rtnl_vfinfo_size(dev) /* IFLA_VFINFO_LIST */ 789 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
790 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
789 + rtnl_port_size(dev) /* IFLA_VF_PORTS + IFLA_PORT_SELF */ 791 + rtnl_port_size(dev) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
790 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */ 792 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
791 + rtnl_link_get_af_size(dev); /* IFLA_AF_SPEC */ 793 + rtnl_link_get_af_size(dev); /* IFLA_AF_SPEC */
@@ -868,7 +870,7 @@ static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev)
868 870
869static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, 871static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
870 int type, u32 pid, u32 seq, u32 change, 872 int type, u32 pid, u32 seq, u32 change,
871 unsigned int flags) 873 unsigned int flags, u32 ext_filter_mask)
872{ 874{
873 struct ifinfomsg *ifm; 875 struct ifinfomsg *ifm;
874 struct nlmsghdr *nlh; 876 struct nlmsghdr *nlh;
@@ -941,10 +943,11 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
941 goto nla_put_failure; 943 goto nla_put_failure;
942 copy_rtnl_link_stats64(nla_data(attr), stats); 944 copy_rtnl_link_stats64(nla_data(attr), stats);
943 945
944 if (dev->dev.parent) 946 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF))
945 NLA_PUT_U32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent)); 947 NLA_PUT_U32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent));
946 948
947 if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent) { 949 if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent
950 && (ext_filter_mask & RTEXT_FILTER_VF)) {
948 int i; 951 int i;
949 952
950 struct nlattr *vfinfo, *vf; 953 struct nlattr *vfinfo, *vf;
@@ -1048,6 +1051,8 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1048 struct net_device *dev; 1051 struct net_device *dev;
1049 struct hlist_head *head; 1052 struct hlist_head *head;
1050 struct hlist_node *node; 1053 struct hlist_node *node;
1054 struct nlattr *tb[IFLA_MAX+1];
1055 u32 ext_filter_mask = 0;
1051 1056
1052 s_h = cb->args[0]; 1057 s_h = cb->args[0];
1053 s_idx = cb->args[1]; 1058 s_idx = cb->args[1];
@@ -1055,6 +1060,12 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1055 rcu_read_lock(); 1060 rcu_read_lock();
1056 cb->seq = net->dev_base_seq; 1061 cb->seq = net->dev_base_seq;
1057 1062
1063 nlmsg_parse(cb->nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
1064 ifla_policy);
1065
1066 if (tb[IFLA_EXT_MASK])
1067 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
1068
1058 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 1069 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
1059 idx = 0; 1070 idx = 0;
1060 head = &net->dev_index_head[h]; 1071 head = &net->dev_index_head[h];
@@ -1064,7 +1075,8 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1064 if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, 1075 if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
1065 NETLINK_CB(cb->skb).pid, 1076 NETLINK_CB(cb->skb).pid,
1066 cb->nlh->nlmsg_seq, 0, 1077 cb->nlh->nlmsg_seq, 0,
1067 NLM_F_MULTI) <= 0) 1078 NLM_F_MULTI,
1079 ext_filter_mask) <= 0)
1068 goto out; 1080 goto out;
1069 1081
1070 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 1082 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
@@ -1100,6 +1112,7 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
1100 [IFLA_VF_PORTS] = { .type = NLA_NESTED }, 1112 [IFLA_VF_PORTS] = { .type = NLA_NESTED },
1101 [IFLA_PORT_SELF] = { .type = NLA_NESTED }, 1113 [IFLA_PORT_SELF] = { .type = NLA_NESTED },
1102 [IFLA_AF_SPEC] = { .type = NLA_NESTED }, 1114 [IFLA_AF_SPEC] = { .type = NLA_NESTED },
1115 [IFLA_EXT_MASK] = { .type = NLA_U32 },
1103}; 1116};
1104EXPORT_SYMBOL(ifla_policy); 1117EXPORT_SYMBOL(ifla_policy);
1105 1118
@@ -1509,8 +1522,6 @@ errout:
1509 1522
1510 if (send_addr_notify) 1523 if (send_addr_notify)
1511 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 1524 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
1512 min_ifinfo_dump_size = max_t(u16, if_nlmsg_size(dev),
1513 min_ifinfo_dump_size);
1514 1525
1515 return err; 1526 return err;
1516} 1527}
@@ -1842,6 +1853,7 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
1842 struct net_device *dev = NULL; 1853 struct net_device *dev = NULL;
1843 struct sk_buff *nskb; 1854 struct sk_buff *nskb;
1844 int err; 1855 int err;
1856 u32 ext_filter_mask = 0;
1845 1857
1846 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy); 1858 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
1847 if (err < 0) 1859 if (err < 0)
@@ -1850,6 +1862,9 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
1850 if (tb[IFLA_IFNAME]) 1862 if (tb[IFLA_IFNAME])
1851 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); 1863 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
1852 1864
1865 if (tb[IFLA_EXT_MASK])
1866 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
1867
1853 ifm = nlmsg_data(nlh); 1868 ifm = nlmsg_data(nlh);
1854 if (ifm->ifi_index > 0) 1869 if (ifm->ifi_index > 0)
1855 dev = __dev_get_by_index(net, ifm->ifi_index); 1870 dev = __dev_get_by_index(net, ifm->ifi_index);
@@ -1861,12 +1876,12 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
1861 if (dev == NULL) 1876 if (dev == NULL)
1862 return -ENODEV; 1877 return -ENODEV;
1863 1878
1864 nskb = nlmsg_new(if_nlmsg_size(dev), GFP_KERNEL); 1879 nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL);
1865 if (nskb == NULL) 1880 if (nskb == NULL)
1866 return -ENOBUFS; 1881 return -ENOBUFS;
1867 1882
1868 err = rtnl_fill_ifinfo(nskb, dev, RTM_NEWLINK, NETLINK_CB(skb).pid, 1883 err = rtnl_fill_ifinfo(nskb, dev, RTM_NEWLINK, NETLINK_CB(skb).pid,
1869 nlh->nlmsg_seq, 0, 0); 1884 nlh->nlmsg_seq, 0, 0, ext_filter_mask);
1870 if (err < 0) { 1885 if (err < 0) {
1871 /* -EMSGSIZE implies BUG in if_nlmsg_size */ 1886 /* -EMSGSIZE implies BUG in if_nlmsg_size */
1872 WARN_ON(err == -EMSGSIZE); 1887 WARN_ON(err == -EMSGSIZE);
@@ -1877,8 +1892,31 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
1877 return err; 1892 return err;
1878} 1893}
1879 1894
1880static u16 rtnl_calcit(struct sk_buff *skb) 1895static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
1881{ 1896{
1897 struct net *net = sock_net(skb->sk);
1898 struct net_device *dev;
1899 struct nlattr *tb[IFLA_MAX+1];
1900 u32 ext_filter_mask = 0;
1901 u16 min_ifinfo_dump_size = 0;
1902
1903 nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX, ifla_policy);
1904
1905 if (tb[IFLA_EXT_MASK])
1906 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
1907
1908 if (!ext_filter_mask)
1909 return NLMSG_GOODSIZE;
1910 /*
1911 * traverse the list of net devices and compute the minimum
1912 * buffer size based upon the filter mask.
1913 */
1914 list_for_each_entry(dev, &net->dev_base_head, dev_list) {
1915 min_ifinfo_dump_size = max_t(u16, min_ifinfo_dump_size,
1916 if_nlmsg_size(dev,
1917 ext_filter_mask));
1918 }
1919
1882 return min_ifinfo_dump_size; 1920 return min_ifinfo_dump_size;
1883} 1921}
1884 1922
@@ -1913,13 +1951,11 @@ void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change)
1913 int err = -ENOBUFS; 1951 int err = -ENOBUFS;
1914 size_t if_info_size; 1952 size_t if_info_size;
1915 1953
1916 skb = nlmsg_new((if_info_size = if_nlmsg_size(dev)), GFP_KERNEL); 1954 skb = nlmsg_new((if_info_size = if_nlmsg_size(dev, 0)), GFP_KERNEL);
1917 if (skb == NULL) 1955 if (skb == NULL)
1918 goto errout; 1956 goto errout;
1919 1957
1920 min_ifinfo_dump_size = max_t(u16, if_info_size, min_ifinfo_dump_size); 1958 err = rtnl_fill_ifinfo(skb, dev, type, 0, 0, change, 0, 0);
1921
1922 err = rtnl_fill_ifinfo(skb, dev, type, 0, 0, change, 0);
1923 if (err < 0) { 1959 if (err < 0) {
1924 /* -EMSGSIZE implies BUG in if_nlmsg_size() */ 1960 /* -EMSGSIZE implies BUG in if_nlmsg_size() */
1925 WARN_ON(err == -EMSGSIZE); 1961 WARN_ON(err == -EMSGSIZE);
@@ -1977,7 +2013,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1977 return -EOPNOTSUPP; 2013 return -EOPNOTSUPP;
1978 calcit = rtnl_get_calcit(family, type); 2014 calcit = rtnl_get_calcit(family, type);
1979 if (calcit) 2015 if (calcit)
1980 min_dump_alloc = calcit(skb); 2016 min_dump_alloc = calcit(skb, nlh);
1981 2017
1982 __rtnl_unlock(); 2018 __rtnl_unlock();
1983 rtnl = net->rtnl; 2019 rtnl = net->rtnl;
diff --git a/net/core/sock.c b/net/core/sock.c
index 3a4e5817a2a7..95aff9c7419b 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1171,13 +1171,10 @@ EXPORT_SYMBOL(sock_update_classid);
1171 1171
1172void sock_update_netprioidx(struct sock *sk) 1172void sock_update_netprioidx(struct sock *sk)
1173{ 1173{
1174 struct cgroup_netprio_state *state;
1175 if (in_interrupt()) 1174 if (in_interrupt())
1176 return; 1175 return;
1177 rcu_read_lock(); 1176
1178 state = task_netprio_state(current); 1177 sk->sk_cgrp_prioidx = task_netprioidx(current);
1179 sk->sk_cgrp_prioidx = state ? state->prioidx : 0;
1180 rcu_read_unlock();
1181} 1178}
1182EXPORT_SYMBOL_GPL(sock_update_netprioidx); 1179EXPORT_SYMBOL_GPL(sock_update_netprioidx);
1183#endif 1180#endif