diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-08-13 23:48:46 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-08-13 23:48:46 -0400 |
commit | 7a49efae71397cf7e9299bbb22b2d12f7cf12428 (patch) | |
tree | d4e47542448b0018f4f684e4097efb05db6bd355 /net | |
parent | 0ff8285075a1242dbc969b6b4b1719d692931a02 (diff) | |
parent | 877acedc0d3ea07f7b36573ed2f1f479c2c1eefd (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (56 commits)
netns: Fix crash by making igmp per namespace
bnx2x: Version update
bnx2x: Checkpatch compliance
bnx2x: Spelling mistakes
bnx2x: Minor code improvements
bnx2x: Driver info
bnx2x: 1G LED does not turn off
bnx2x: 8073 PHY changes
bnx2x: Change GPIO for any port
bnx2x: Pause settings
bnx2x: Link order with external PHY
bnx2x: No LRO without Rx checksum
bnx2x: Wrong structure size
bnx2x: WoL capability
bnx2x: Clearing MAC addresses filters
bnx2x: Delay in while loops
bnx2x: PBA Table Page Alignment Workaround
bnx2x: Self-test false positive
bnx2x: Memory allocation
bnx2x: HW attention lock
...
Diffstat (limited to 'net')
32 files changed, 196 insertions, 199 deletions
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c index 57abe8266be1..a89f32fa94f6 100644 --- a/net/core/gen_estimator.c +++ b/net/core/gen_estimator.c | |||
@@ -99,7 +99,7 @@ struct gen_estimator_head | |||
99 | 99 | ||
100 | static struct gen_estimator_head elist[EST_MAX_INTERVAL+1]; | 100 | static struct gen_estimator_head elist[EST_MAX_INTERVAL+1]; |
101 | 101 | ||
102 | /* Protects against NULL dereference */ | 102 | /* Protects against NULL dereference and RCU write-side */ |
103 | static DEFINE_RWLOCK(est_lock); | 103 | static DEFINE_RWLOCK(est_lock); |
104 | 104 | ||
105 | static void est_timer(unsigned long arg) | 105 | static void est_timer(unsigned long arg) |
@@ -185,6 +185,7 @@ int gen_new_estimator(struct gnet_stats_basic *bstats, | |||
185 | est->last_packets = bstats->packets; | 185 | est->last_packets = bstats->packets; |
186 | est->avpps = rate_est->pps<<10; | 186 | est->avpps = rate_est->pps<<10; |
187 | 187 | ||
188 | write_lock_bh(&est_lock); | ||
188 | if (!elist[idx].timer.function) { | 189 | if (!elist[idx].timer.function) { |
189 | INIT_LIST_HEAD(&elist[idx].list); | 190 | INIT_LIST_HEAD(&elist[idx].list); |
190 | setup_timer(&elist[idx].timer, est_timer, idx); | 191 | setup_timer(&elist[idx].timer, est_timer, idx); |
@@ -194,6 +195,7 @@ int gen_new_estimator(struct gnet_stats_basic *bstats, | |||
194 | mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx)); | 195 | mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx)); |
195 | 196 | ||
196 | list_add_rcu(&est->list, &elist[idx].list); | 197 | list_add_rcu(&est->list, &elist[idx].list); |
198 | write_unlock_bh(&est_lock); | ||
197 | return 0; | 199 | return 0; |
198 | } | 200 | } |
199 | 201 | ||
@@ -212,7 +214,6 @@ static void __gen_kill_estimator(struct rcu_head *head) | |||
212 | * Removes the rate estimator specified by &bstats and &rate_est | 214 | * Removes the rate estimator specified by &bstats and &rate_est |
213 | * and deletes the timer. | 215 | * and deletes the timer. |
214 | * | 216 | * |
215 | * NOTE: Called under rtnl_mutex | ||
216 | */ | 217 | */ |
217 | void gen_kill_estimator(struct gnet_stats_basic *bstats, | 218 | void gen_kill_estimator(struct gnet_stats_basic *bstats, |
218 | struct gnet_stats_rate_est *rate_est) | 219 | struct gnet_stats_rate_est *rate_est) |
@@ -226,17 +227,17 @@ void gen_kill_estimator(struct gnet_stats_basic *bstats, | |||
226 | if (!elist[idx].timer.function) | 227 | if (!elist[idx].timer.function) |
227 | continue; | 228 | continue; |
228 | 229 | ||
230 | write_lock_bh(&est_lock); | ||
229 | list_for_each_entry_safe(e, n, &elist[idx].list, list) { | 231 | list_for_each_entry_safe(e, n, &elist[idx].list, list) { |
230 | if (e->rate_est != rate_est || e->bstats != bstats) | 232 | if (e->rate_est != rate_est || e->bstats != bstats) |
231 | continue; | 233 | continue; |
232 | 234 | ||
233 | write_lock_bh(&est_lock); | ||
234 | e->bstats = NULL; | 235 | e->bstats = NULL; |
235 | write_unlock_bh(&est_lock); | ||
236 | 236 | ||
237 | list_del_rcu(&e->list); | 237 | list_del_rcu(&e->list); |
238 | call_rcu(&e->e_rcu, __gen_kill_estimator); | 238 | call_rcu(&e->e_rcu, __gen_kill_estimator); |
239 | } | 239 | } |
240 | write_unlock_bh(&est_lock); | ||
240 | } | 241 | } |
241 | } | 242 | } |
242 | 243 | ||
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 526236453908..a756847e3814 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -1961,6 +1961,8 @@ static int pktgen_setup_dev(struct pktgen_dev *pkt_dev, const char *ifname) | |||
1961 | */ | 1961 | */ |
1962 | static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) | 1962 | static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) |
1963 | { | 1963 | { |
1964 | int ntxq; | ||
1965 | |||
1964 | if (!pkt_dev->odev) { | 1966 | if (!pkt_dev->odev) { |
1965 | printk(KERN_ERR "pktgen: ERROR: pkt_dev->odev == NULL in " | 1967 | printk(KERN_ERR "pktgen: ERROR: pkt_dev->odev == NULL in " |
1966 | "setup_inject.\n"); | 1968 | "setup_inject.\n"); |
@@ -1969,6 +1971,33 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) | |||
1969 | return; | 1971 | return; |
1970 | } | 1972 | } |
1971 | 1973 | ||
1974 | /* make sure that we don't pick a non-existing transmit queue */ | ||
1975 | ntxq = pkt_dev->odev->real_num_tx_queues; | ||
1976 | if (ntxq <= num_online_cpus() && (pkt_dev->flags & F_QUEUE_MAP_CPU)) { | ||
1977 | printk(KERN_WARNING "pktgen: WARNING: QUEUE_MAP_CPU " | ||
1978 | "disabled because CPU count (%d) exceeds number ", | ||
1979 | num_online_cpus()); | ||
1980 | printk(KERN_WARNING "pktgen: WARNING: of tx queues " | ||
1981 | "(%d) on %s \n", ntxq, pkt_dev->odev->name); | ||
1982 | pkt_dev->flags &= ~F_QUEUE_MAP_CPU; | ||
1983 | } | ||
1984 | if (ntxq <= pkt_dev->queue_map_min) { | ||
1985 | printk(KERN_WARNING "pktgen: WARNING: Requested " | ||
1986 | "queue_map_min (%d) exceeds number of tx\n", | ||
1987 | pkt_dev->queue_map_min); | ||
1988 | printk(KERN_WARNING "pktgen: WARNING: queues (%d) on " | ||
1989 | "%s, resetting\n", ntxq, pkt_dev->odev->name); | ||
1990 | pkt_dev->queue_map_min = ntxq - 1; | ||
1991 | } | ||
1992 | if (ntxq <= pkt_dev->queue_map_max) { | ||
1993 | printk(KERN_WARNING "pktgen: WARNING: Requested " | ||
1994 | "queue_map_max (%d) exceeds number of tx\n", | ||
1995 | pkt_dev->queue_map_max); | ||
1996 | printk(KERN_WARNING "pktgen: WARNING: queues (%d) on " | ||
1997 | "%s, resetting\n", ntxq, pkt_dev->odev->name); | ||
1998 | pkt_dev->queue_map_max = ntxq - 1; | ||
1999 | } | ||
2000 | |||
1972 | /* Default to the interface's mac if not explicitly set. */ | 2001 | /* Default to the interface's mac if not explicitly set. */ |
1973 | 2002 | ||
1974 | if (is_zero_ether_addr(pkt_dev->src_mac)) | 2003 | if (is_zero_ether_addr(pkt_dev->src_mac)) |
diff --git a/net/dccp/proto.c b/net/dccp/proto.c index b622d9744856..1ca3b26eed0f 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c | |||
@@ -474,6 +474,11 @@ static int dccp_setsockopt_change(struct sock *sk, int type, | |||
474 | 474 | ||
475 | if (copy_from_user(&opt, optval, sizeof(opt))) | 475 | if (copy_from_user(&opt, optval, sizeof(opt))) |
476 | return -EFAULT; | 476 | return -EFAULT; |
477 | /* | ||
478 | * rfc4340: 6.1. Change Options | ||
479 | */ | ||
480 | if (opt.dccpsf_len < 1) | ||
481 | return -EINVAL; | ||
477 | 482 | ||
478 | val = kmalloc(opt.dccpsf_len, GFP_KERNEL); | 483 | val = kmalloc(opt.dccpsf_len, GFP_KERNEL); |
479 | if (!val) | 484 | if (!val) |
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 6203ece53606..f70fac612596 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c | |||
@@ -289,6 +289,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) | |||
289 | struct rtable *rt; | 289 | struct rtable *rt; |
290 | struct iphdr *pip; | 290 | struct iphdr *pip; |
291 | struct igmpv3_report *pig; | 291 | struct igmpv3_report *pig; |
292 | struct net *net = dev_net(dev); | ||
292 | 293 | ||
293 | skb = alloc_skb(size + LL_ALLOCATED_SPACE(dev), GFP_ATOMIC); | 294 | skb = alloc_skb(size + LL_ALLOCATED_SPACE(dev), GFP_ATOMIC); |
294 | if (skb == NULL) | 295 | if (skb == NULL) |
@@ -299,7 +300,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) | |||
299 | .nl_u = { .ip4_u = { | 300 | .nl_u = { .ip4_u = { |
300 | .daddr = IGMPV3_ALL_MCR } }, | 301 | .daddr = IGMPV3_ALL_MCR } }, |
301 | .proto = IPPROTO_IGMP }; | 302 | .proto = IPPROTO_IGMP }; |
302 | if (ip_route_output_key(&init_net, &rt, &fl)) { | 303 | if (ip_route_output_key(net, &rt, &fl)) { |
303 | kfree_skb(skb); | 304 | kfree_skb(skb); |
304 | return NULL; | 305 | return NULL; |
305 | } | 306 | } |
@@ -629,6 +630,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc, | |||
629 | struct igmphdr *ih; | 630 | struct igmphdr *ih; |
630 | struct rtable *rt; | 631 | struct rtable *rt; |
631 | struct net_device *dev = in_dev->dev; | 632 | struct net_device *dev = in_dev->dev; |
633 | struct net *net = dev_net(dev); | ||
632 | __be32 group = pmc ? pmc->multiaddr : 0; | 634 | __be32 group = pmc ? pmc->multiaddr : 0; |
633 | __be32 dst; | 635 | __be32 dst; |
634 | 636 | ||
@@ -643,7 +645,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc, | |||
643 | struct flowi fl = { .oif = dev->ifindex, | 645 | struct flowi fl = { .oif = dev->ifindex, |
644 | .nl_u = { .ip4_u = { .daddr = dst } }, | 646 | .nl_u = { .ip4_u = { .daddr = dst } }, |
645 | .proto = IPPROTO_IGMP }; | 647 | .proto = IPPROTO_IGMP }; |
646 | if (ip_route_output_key(&init_net, &rt, &fl)) | 648 | if (ip_route_output_key(net, &rt, &fl)) |
647 | return -1; | 649 | return -1; |
648 | } | 650 | } |
649 | if (rt->rt_src == 0) { | 651 | if (rt->rt_src == 0) { |
@@ -1196,9 +1198,6 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr) | |||
1196 | 1198 | ||
1197 | ASSERT_RTNL(); | 1199 | ASSERT_RTNL(); |
1198 | 1200 | ||
1199 | if (!net_eq(dev_net(in_dev->dev), &init_net)) | ||
1200 | return; | ||
1201 | |||
1202 | for (im=in_dev->mc_list; im; im=im->next) { | 1201 | for (im=in_dev->mc_list; im; im=im->next) { |
1203 | if (im->multiaddr == addr) { | 1202 | if (im->multiaddr == addr) { |
1204 | im->users++; | 1203 | im->users++; |
@@ -1278,9 +1277,6 @@ void ip_mc_dec_group(struct in_device *in_dev, __be32 addr) | |||
1278 | 1277 | ||
1279 | ASSERT_RTNL(); | 1278 | ASSERT_RTNL(); |
1280 | 1279 | ||
1281 | if (!net_eq(dev_net(in_dev->dev), &init_net)) | ||
1282 | return; | ||
1283 | |||
1284 | for (ip=&in_dev->mc_list; (i=*ip)!=NULL; ip=&i->next) { | 1280 | for (ip=&in_dev->mc_list; (i=*ip)!=NULL; ip=&i->next) { |
1285 | if (i->multiaddr==addr) { | 1281 | if (i->multiaddr==addr) { |
1286 | if (--i->users == 0) { | 1282 | if (--i->users == 0) { |
@@ -1308,9 +1304,6 @@ void ip_mc_down(struct in_device *in_dev) | |||
1308 | 1304 | ||
1309 | ASSERT_RTNL(); | 1305 | ASSERT_RTNL(); |
1310 | 1306 | ||
1311 | if (!net_eq(dev_net(in_dev->dev), &init_net)) | ||
1312 | return; | ||
1313 | |||
1314 | for (i=in_dev->mc_list; i; i=i->next) | 1307 | for (i=in_dev->mc_list; i; i=i->next) |
1315 | igmp_group_dropped(i); | 1308 | igmp_group_dropped(i); |
1316 | 1309 | ||
@@ -1331,9 +1324,6 @@ void ip_mc_init_dev(struct in_device *in_dev) | |||
1331 | { | 1324 | { |
1332 | ASSERT_RTNL(); | 1325 | ASSERT_RTNL(); |
1333 | 1326 | ||
1334 | if (!net_eq(dev_net(in_dev->dev), &init_net)) | ||
1335 | return; | ||
1336 | |||
1337 | in_dev->mc_tomb = NULL; | 1327 | in_dev->mc_tomb = NULL; |
1338 | #ifdef CONFIG_IP_MULTICAST | 1328 | #ifdef CONFIG_IP_MULTICAST |
1339 | in_dev->mr_gq_running = 0; | 1329 | in_dev->mr_gq_running = 0; |
@@ -1357,9 +1347,6 @@ void ip_mc_up(struct in_device *in_dev) | |||
1357 | 1347 | ||
1358 | ASSERT_RTNL(); | 1348 | ASSERT_RTNL(); |
1359 | 1349 | ||
1360 | if (!net_eq(dev_net(in_dev->dev), &init_net)) | ||
1361 | return; | ||
1362 | |||
1363 | ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS); | 1350 | ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS); |
1364 | 1351 | ||
1365 | for (i=in_dev->mc_list; i; i=i->next) | 1352 | for (i=in_dev->mc_list; i; i=i->next) |
@@ -1376,9 +1363,6 @@ void ip_mc_destroy_dev(struct in_device *in_dev) | |||
1376 | 1363 | ||
1377 | ASSERT_RTNL(); | 1364 | ASSERT_RTNL(); |
1378 | 1365 | ||
1379 | if (!net_eq(dev_net(in_dev->dev), &init_net)) | ||
1380 | return; | ||
1381 | |||
1382 | /* Deactivate timers */ | 1366 | /* Deactivate timers */ |
1383 | ip_mc_down(in_dev); | 1367 | ip_mc_down(in_dev); |
1384 | 1368 | ||
@@ -1395,7 +1379,7 @@ void ip_mc_destroy_dev(struct in_device *in_dev) | |||
1395 | write_unlock_bh(&in_dev->mc_list_lock); | 1379 | write_unlock_bh(&in_dev->mc_list_lock); |
1396 | } | 1380 | } |
1397 | 1381 | ||
1398 | static struct in_device * ip_mc_find_dev(struct ip_mreqn *imr) | 1382 | static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr) |
1399 | { | 1383 | { |
1400 | struct flowi fl = { .nl_u = { .ip4_u = | 1384 | struct flowi fl = { .nl_u = { .ip4_u = |
1401 | { .daddr = imr->imr_multiaddr.s_addr } } }; | 1385 | { .daddr = imr->imr_multiaddr.s_addr } } }; |
@@ -1404,19 +1388,19 @@ static struct in_device * ip_mc_find_dev(struct ip_mreqn *imr) | |||
1404 | struct in_device *idev = NULL; | 1388 | struct in_device *idev = NULL; |
1405 | 1389 | ||
1406 | if (imr->imr_ifindex) { | 1390 | if (imr->imr_ifindex) { |
1407 | idev = inetdev_by_index(&init_net, imr->imr_ifindex); | 1391 | idev = inetdev_by_index(net, imr->imr_ifindex); |
1408 | if (idev) | 1392 | if (idev) |
1409 | __in_dev_put(idev); | 1393 | __in_dev_put(idev); |
1410 | return idev; | 1394 | return idev; |
1411 | } | 1395 | } |
1412 | if (imr->imr_address.s_addr) { | 1396 | if (imr->imr_address.s_addr) { |
1413 | dev = ip_dev_find(&init_net, imr->imr_address.s_addr); | 1397 | dev = ip_dev_find(net, imr->imr_address.s_addr); |
1414 | if (!dev) | 1398 | if (!dev) |
1415 | return NULL; | 1399 | return NULL; |
1416 | dev_put(dev); | 1400 | dev_put(dev); |
1417 | } | 1401 | } |
1418 | 1402 | ||
1419 | if (!dev && !ip_route_output_key(&init_net, &rt, &fl)) { | 1403 | if (!dev && !ip_route_output_key(net, &rt, &fl)) { |
1420 | dev = rt->u.dst.dev; | 1404 | dev = rt->u.dst.dev; |
1421 | ip_rt_put(rt); | 1405 | ip_rt_put(rt); |
1422 | } | 1406 | } |
@@ -1754,18 +1738,16 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr) | |||
1754 | struct ip_mc_socklist *iml=NULL, *i; | 1738 | struct ip_mc_socklist *iml=NULL, *i; |
1755 | struct in_device *in_dev; | 1739 | struct in_device *in_dev; |
1756 | struct inet_sock *inet = inet_sk(sk); | 1740 | struct inet_sock *inet = inet_sk(sk); |
1741 | struct net *net = sock_net(sk); | ||
1757 | int ifindex; | 1742 | int ifindex; |
1758 | int count = 0; | 1743 | int count = 0; |
1759 | 1744 | ||
1760 | if (!ipv4_is_multicast(addr)) | 1745 | if (!ipv4_is_multicast(addr)) |
1761 | return -EINVAL; | 1746 | return -EINVAL; |
1762 | 1747 | ||
1763 | if (!net_eq(sock_net(sk), &init_net)) | ||
1764 | return -EPROTONOSUPPORT; | ||
1765 | |||
1766 | rtnl_lock(); | 1748 | rtnl_lock(); |
1767 | 1749 | ||
1768 | in_dev = ip_mc_find_dev(imr); | 1750 | in_dev = ip_mc_find_dev(net, imr); |
1769 | 1751 | ||
1770 | if (!in_dev) { | 1752 | if (!in_dev) { |
1771 | iml = NULL; | 1753 | iml = NULL; |
@@ -1827,15 +1809,13 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr) | |||
1827 | struct inet_sock *inet = inet_sk(sk); | 1809 | struct inet_sock *inet = inet_sk(sk); |
1828 | struct ip_mc_socklist *iml, **imlp; | 1810 | struct ip_mc_socklist *iml, **imlp; |
1829 | struct in_device *in_dev; | 1811 | struct in_device *in_dev; |
1812 | struct net *net = sock_net(sk); | ||
1830 | __be32 group = imr->imr_multiaddr.s_addr; | 1813 | __be32 group = imr->imr_multiaddr.s_addr; |
1831 | u32 ifindex; | 1814 | u32 ifindex; |
1832 | int ret = -EADDRNOTAVAIL; | 1815 | int ret = -EADDRNOTAVAIL; |
1833 | 1816 | ||
1834 | if (!net_eq(sock_net(sk), &init_net)) | ||
1835 | return -EPROTONOSUPPORT; | ||
1836 | |||
1837 | rtnl_lock(); | 1817 | rtnl_lock(); |
1838 | in_dev = ip_mc_find_dev(imr); | 1818 | in_dev = ip_mc_find_dev(net, imr); |
1839 | ifindex = imr->imr_ifindex; | 1819 | ifindex = imr->imr_ifindex; |
1840 | for (imlp = &inet->mc_list; (iml = *imlp) != NULL; imlp = &iml->next) { | 1820 | for (imlp = &inet->mc_list; (iml = *imlp) != NULL; imlp = &iml->next) { |
1841 | if (iml->multi.imr_multiaddr.s_addr != group) | 1821 | if (iml->multi.imr_multiaddr.s_addr != group) |
@@ -1873,21 +1853,19 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct | |||
1873 | struct in_device *in_dev = NULL; | 1853 | struct in_device *in_dev = NULL; |
1874 | struct inet_sock *inet = inet_sk(sk); | 1854 | struct inet_sock *inet = inet_sk(sk); |
1875 | struct ip_sf_socklist *psl; | 1855 | struct ip_sf_socklist *psl; |
1856 | struct net *net = sock_net(sk); | ||
1876 | int leavegroup = 0; | 1857 | int leavegroup = 0; |
1877 | int i, j, rv; | 1858 | int i, j, rv; |
1878 | 1859 | ||
1879 | if (!ipv4_is_multicast(addr)) | 1860 | if (!ipv4_is_multicast(addr)) |
1880 | return -EINVAL; | 1861 | return -EINVAL; |
1881 | 1862 | ||
1882 | if (!net_eq(sock_net(sk), &init_net)) | ||
1883 | return -EPROTONOSUPPORT; | ||
1884 | |||
1885 | rtnl_lock(); | 1863 | rtnl_lock(); |
1886 | 1864 | ||
1887 | imr.imr_multiaddr.s_addr = mreqs->imr_multiaddr; | 1865 | imr.imr_multiaddr.s_addr = mreqs->imr_multiaddr; |
1888 | imr.imr_address.s_addr = mreqs->imr_interface; | 1866 | imr.imr_address.s_addr = mreqs->imr_interface; |
1889 | imr.imr_ifindex = ifindex; | 1867 | imr.imr_ifindex = ifindex; |
1890 | in_dev = ip_mc_find_dev(&imr); | 1868 | in_dev = ip_mc_find_dev(net, &imr); |
1891 | 1869 | ||
1892 | if (!in_dev) { | 1870 | if (!in_dev) { |
1893 | err = -ENODEV; | 1871 | err = -ENODEV; |
@@ -2007,6 +1985,7 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex) | |||
2007 | struct in_device *in_dev; | 1985 | struct in_device *in_dev; |
2008 | struct inet_sock *inet = inet_sk(sk); | 1986 | struct inet_sock *inet = inet_sk(sk); |
2009 | struct ip_sf_socklist *newpsl, *psl; | 1987 | struct ip_sf_socklist *newpsl, *psl; |
1988 | struct net *net = sock_net(sk); | ||
2010 | int leavegroup = 0; | 1989 | int leavegroup = 0; |
2011 | 1990 | ||
2012 | if (!ipv4_is_multicast(addr)) | 1991 | if (!ipv4_is_multicast(addr)) |
@@ -2015,15 +1994,12 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex) | |||
2015 | msf->imsf_fmode != MCAST_EXCLUDE) | 1994 | msf->imsf_fmode != MCAST_EXCLUDE) |
2016 | return -EINVAL; | 1995 | return -EINVAL; |
2017 | 1996 | ||
2018 | if (!net_eq(sock_net(sk), &init_net)) | ||
2019 | return -EPROTONOSUPPORT; | ||
2020 | |||
2021 | rtnl_lock(); | 1997 | rtnl_lock(); |
2022 | 1998 | ||
2023 | imr.imr_multiaddr.s_addr = msf->imsf_multiaddr; | 1999 | imr.imr_multiaddr.s_addr = msf->imsf_multiaddr; |
2024 | imr.imr_address.s_addr = msf->imsf_interface; | 2000 | imr.imr_address.s_addr = msf->imsf_interface; |
2025 | imr.imr_ifindex = ifindex; | 2001 | imr.imr_ifindex = ifindex; |
2026 | in_dev = ip_mc_find_dev(&imr); | 2002 | in_dev = ip_mc_find_dev(net, &imr); |
2027 | 2003 | ||
2028 | if (!in_dev) { | 2004 | if (!in_dev) { |
2029 | err = -ENODEV; | 2005 | err = -ENODEV; |
@@ -2094,19 +2070,17 @@ int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf, | |||
2094 | struct in_device *in_dev; | 2070 | struct in_device *in_dev; |
2095 | struct inet_sock *inet = inet_sk(sk); | 2071 | struct inet_sock *inet = inet_sk(sk); |
2096 | struct ip_sf_socklist *psl; | 2072 | struct ip_sf_socklist *psl; |
2073 | struct net *net = sock_net(sk); | ||
2097 | 2074 | ||
2098 | if (!ipv4_is_multicast(addr)) | 2075 | if (!ipv4_is_multicast(addr)) |
2099 | return -EINVAL; | 2076 | return -EINVAL; |
2100 | 2077 | ||
2101 | if (!net_eq(sock_net(sk), &init_net)) | ||
2102 | return -EPROTONOSUPPORT; | ||
2103 | |||
2104 | rtnl_lock(); | 2078 | rtnl_lock(); |
2105 | 2079 | ||
2106 | imr.imr_multiaddr.s_addr = msf->imsf_multiaddr; | 2080 | imr.imr_multiaddr.s_addr = msf->imsf_multiaddr; |
2107 | imr.imr_address.s_addr = msf->imsf_interface; | 2081 | imr.imr_address.s_addr = msf->imsf_interface; |
2108 | imr.imr_ifindex = 0; | 2082 | imr.imr_ifindex = 0; |
2109 | in_dev = ip_mc_find_dev(&imr); | 2083 | in_dev = ip_mc_find_dev(net, &imr); |
2110 | 2084 | ||
2111 | if (!in_dev) { | 2085 | if (!in_dev) { |
2112 | err = -ENODEV; | 2086 | err = -ENODEV; |
@@ -2163,9 +2137,6 @@ int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf, | |||
2163 | if (!ipv4_is_multicast(addr)) | 2137 | if (!ipv4_is_multicast(addr)) |
2164 | return -EINVAL; | 2138 | return -EINVAL; |
2165 | 2139 | ||
2166 | if (!net_eq(sock_net(sk), &init_net)) | ||
2167 | return -EPROTONOSUPPORT; | ||
2168 | |||
2169 | rtnl_lock(); | 2140 | rtnl_lock(); |
2170 | 2141 | ||
2171 | err = -EADDRNOTAVAIL; | 2142 | err = -EADDRNOTAVAIL; |
@@ -2246,19 +2217,17 @@ void ip_mc_drop_socket(struct sock *sk) | |||
2246 | { | 2217 | { |
2247 | struct inet_sock *inet = inet_sk(sk); | 2218 | struct inet_sock *inet = inet_sk(sk); |
2248 | struct ip_mc_socklist *iml; | 2219 | struct ip_mc_socklist *iml; |
2220 | struct net *net = sock_net(sk); | ||
2249 | 2221 | ||
2250 | if (inet->mc_list == NULL) | 2222 | if (inet->mc_list == NULL) |
2251 | return; | 2223 | return; |
2252 | 2224 | ||
2253 | if (!net_eq(sock_net(sk), &init_net)) | ||
2254 | return; | ||
2255 | |||
2256 | rtnl_lock(); | 2225 | rtnl_lock(); |
2257 | while ((iml = inet->mc_list) != NULL) { | 2226 | while ((iml = inet->mc_list) != NULL) { |
2258 | struct in_device *in_dev; | 2227 | struct in_device *in_dev; |
2259 | inet->mc_list = iml->next; | 2228 | inet->mc_list = iml->next; |
2260 | 2229 | ||
2261 | in_dev = inetdev_by_index(&init_net, iml->multi.imr_ifindex); | 2230 | in_dev = inetdev_by_index(net, iml->multi.imr_ifindex); |
2262 | (void) ip_mc_leave_src(sk, iml, in_dev); | 2231 | (void) ip_mc_leave_src(sk, iml, in_dev); |
2263 | if (in_dev != NULL) { | 2232 | if (in_dev != NULL) { |
2264 | ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr); | 2233 | ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr); |
diff --git a/net/ipv4/ipvs/ip_vs_app.c b/net/ipv4/ipvs/ip_vs_app.c index 1f1897a1a702..201b8ea3020d 100644 --- a/net/ipv4/ipvs/ip_vs_app.c +++ b/net/ipv4/ipvs/ip_vs_app.c | |||
@@ -608,7 +608,7 @@ int ip_vs_skb_replace(struct sk_buff *skb, gfp_t pri, | |||
608 | } | 608 | } |
609 | 609 | ||
610 | 610 | ||
611 | int ip_vs_app_init(void) | 611 | int __init ip_vs_app_init(void) |
612 | { | 612 | { |
613 | /* we will replace it with proc_net_ipvs_create() soon */ | 613 | /* we will replace it with proc_net_ipvs_create() soon */ |
614 | proc_net_fops_create(&init_net, "ip_vs_app", 0, &ip_vs_app_fops); | 614 | proc_net_fops_create(&init_net, "ip_vs_app", 0, &ip_vs_app_fops); |
diff --git a/net/ipv4/ipvs/ip_vs_conn.c b/net/ipv4/ipvs/ip_vs_conn.c index f8bdae47a77f..44a6872dc245 100644 --- a/net/ipv4/ipvs/ip_vs_conn.c +++ b/net/ipv4/ipvs/ip_vs_conn.c | |||
@@ -965,7 +965,7 @@ static void ip_vs_conn_flush(void) | |||
965 | } | 965 | } |
966 | 966 | ||
967 | 967 | ||
968 | int ip_vs_conn_init(void) | 968 | int __init ip_vs_conn_init(void) |
969 | { | 969 | { |
970 | int idx; | 970 | int idx; |
971 | 971 | ||
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c index 9a5ace0b4dd6..6379705a8dcb 100644 --- a/net/ipv4/ipvs/ip_vs_ctl.c +++ b/net/ipv4/ipvs/ip_vs_ctl.c | |||
@@ -683,9 +683,22 @@ static void | |||
683 | ip_vs_zero_stats(struct ip_vs_stats *stats) | 683 | ip_vs_zero_stats(struct ip_vs_stats *stats) |
684 | { | 684 | { |
685 | spin_lock_bh(&stats->lock); | 685 | spin_lock_bh(&stats->lock); |
686 | memset(stats, 0, (char *)&stats->lock - (char *)stats); | 686 | |
687 | spin_unlock_bh(&stats->lock); | 687 | stats->conns = 0; |
688 | stats->inpkts = 0; | ||
689 | stats->outpkts = 0; | ||
690 | stats->inbytes = 0; | ||
691 | stats->outbytes = 0; | ||
692 | |||
693 | stats->cps = 0; | ||
694 | stats->inpps = 0; | ||
695 | stats->outpps = 0; | ||
696 | stats->inbps = 0; | ||
697 | stats->outbps = 0; | ||
698 | |||
688 | ip_vs_zero_estimator(stats); | 699 | ip_vs_zero_estimator(stats); |
700 | |||
701 | spin_unlock_bh(&stats->lock); | ||
689 | } | 702 | } |
690 | 703 | ||
691 | /* | 704 | /* |
@@ -1589,7 +1602,7 @@ static struct ctl_table vs_vars[] = { | |||
1589 | { .ctl_name = 0 } | 1602 | { .ctl_name = 0 } |
1590 | }; | 1603 | }; |
1591 | 1604 | ||
1592 | struct ctl_path net_vs_ctl_path[] = { | 1605 | const struct ctl_path net_vs_ctl_path[] = { |
1593 | { .procname = "net", .ctl_name = CTL_NET, }, | 1606 | { .procname = "net", .ctl_name = CTL_NET, }, |
1594 | { .procname = "ipv4", .ctl_name = NET_IPV4, }, | 1607 | { .procname = "ipv4", .ctl_name = NET_IPV4, }, |
1595 | { .procname = "vs", }, | 1608 | { .procname = "vs", }, |
@@ -1784,7 +1797,9 @@ static const struct file_operations ip_vs_info_fops = { | |||
1784 | 1797 | ||
1785 | #endif | 1798 | #endif |
1786 | 1799 | ||
1787 | struct ip_vs_stats ip_vs_stats; | 1800 | struct ip_vs_stats ip_vs_stats = { |
1801 | .lock = __SPIN_LOCK_UNLOCKED(ip_vs_stats.lock), | ||
1802 | }; | ||
1788 | 1803 | ||
1789 | #ifdef CONFIG_PROC_FS | 1804 | #ifdef CONFIG_PROC_FS |
1790 | static int ip_vs_stats_show(struct seq_file *seq, void *v) | 1805 | static int ip_vs_stats_show(struct seq_file *seq, void *v) |
@@ -2306,7 +2321,7 @@ static struct nf_sockopt_ops ip_vs_sockopts = { | |||
2306 | }; | 2321 | }; |
2307 | 2322 | ||
2308 | 2323 | ||
2309 | int ip_vs_control_init(void) | 2324 | int __init ip_vs_control_init(void) |
2310 | { | 2325 | { |
2311 | int ret; | 2326 | int ret; |
2312 | int idx; | 2327 | int idx; |
@@ -2333,8 +2348,6 @@ int ip_vs_control_init(void) | |||
2333 | INIT_LIST_HEAD(&ip_vs_rtable[idx]); | 2348 | INIT_LIST_HEAD(&ip_vs_rtable[idx]); |
2334 | } | 2349 | } |
2335 | 2350 | ||
2336 | memset(&ip_vs_stats, 0, sizeof(ip_vs_stats)); | ||
2337 | spin_lock_init(&ip_vs_stats.lock); | ||
2338 | ip_vs_new_estimator(&ip_vs_stats); | 2351 | ip_vs_new_estimator(&ip_vs_stats); |
2339 | 2352 | ||
2340 | /* Hook the defense timer */ | 2353 | /* Hook the defense timer */ |
diff --git a/net/ipv4/ipvs/ip_vs_dh.c b/net/ipv4/ipvs/ip_vs_dh.c index 8afc1503ed20..fa66824d264f 100644 --- a/net/ipv4/ipvs/ip_vs_dh.c +++ b/net/ipv4/ipvs/ip_vs_dh.c | |||
@@ -233,6 +233,7 @@ static struct ip_vs_scheduler ip_vs_dh_scheduler = | |||
233 | .name = "dh", | 233 | .name = "dh", |
234 | .refcnt = ATOMIC_INIT(0), | 234 | .refcnt = ATOMIC_INIT(0), |
235 | .module = THIS_MODULE, | 235 | .module = THIS_MODULE, |
236 | .n_list = LIST_HEAD_INIT(ip_vs_dh_scheduler.n_list), | ||
236 | .init_service = ip_vs_dh_init_svc, | 237 | .init_service = ip_vs_dh_init_svc, |
237 | .done_service = ip_vs_dh_done_svc, | 238 | .done_service = ip_vs_dh_done_svc, |
238 | .update_service = ip_vs_dh_update_svc, | 239 | .update_service = ip_vs_dh_update_svc, |
@@ -242,7 +243,6 @@ static struct ip_vs_scheduler ip_vs_dh_scheduler = | |||
242 | 243 | ||
243 | static int __init ip_vs_dh_init(void) | 244 | static int __init ip_vs_dh_init(void) |
244 | { | 245 | { |
245 | INIT_LIST_HEAD(&ip_vs_dh_scheduler.n_list); | ||
246 | return register_ip_vs_scheduler(&ip_vs_dh_scheduler); | 246 | return register_ip_vs_scheduler(&ip_vs_dh_scheduler); |
247 | } | 247 | } |
248 | 248 | ||
diff --git a/net/ipv4/ipvs/ip_vs_est.c b/net/ipv4/ipvs/ip_vs_est.c index bc04eedd6dbb..5a20f93bd7f9 100644 --- a/net/ipv4/ipvs/ip_vs_est.c +++ b/net/ipv4/ipvs/ip_vs_est.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/types.h> | 17 | #include <linux/types.h> |
18 | #include <linux/interrupt.h> | 18 | #include <linux/interrupt.h> |
19 | #include <linux/sysctl.h> | 19 | #include <linux/sysctl.h> |
20 | #include <linux/list.h> | ||
20 | 21 | ||
21 | #include <net/ip_vs.h> | 22 | #include <net/ip_vs.h> |
22 | 23 | ||
@@ -44,28 +45,11 @@ | |||
44 | */ | 45 | */ |
45 | 46 | ||
46 | 47 | ||
47 | struct ip_vs_estimator | 48 | static void estimation_timer(unsigned long arg); |
48 | { | ||
49 | struct ip_vs_estimator *next; | ||
50 | struct ip_vs_stats *stats; | ||
51 | |||
52 | u32 last_conns; | ||
53 | u32 last_inpkts; | ||
54 | u32 last_outpkts; | ||
55 | u64 last_inbytes; | ||
56 | u64 last_outbytes; | ||
57 | |||
58 | u32 cps; | ||
59 | u32 inpps; | ||
60 | u32 outpps; | ||
61 | u32 inbps; | ||
62 | u32 outbps; | ||
63 | }; | ||
64 | |||
65 | 49 | ||
66 | static struct ip_vs_estimator *est_list = NULL; | 50 | static LIST_HEAD(est_list); |
67 | static DEFINE_RWLOCK(est_lock); | 51 | static DEFINE_SPINLOCK(est_lock); |
68 | static struct timer_list est_timer; | 52 | static DEFINE_TIMER(est_timer, estimation_timer, 0, 0); |
69 | 53 | ||
70 | static void estimation_timer(unsigned long arg) | 54 | static void estimation_timer(unsigned long arg) |
71 | { | 55 | { |
@@ -76,9 +60,9 @@ static void estimation_timer(unsigned long arg) | |||
76 | u64 n_inbytes, n_outbytes; | 60 | u64 n_inbytes, n_outbytes; |
77 | u32 rate; | 61 | u32 rate; |
78 | 62 | ||
79 | read_lock(&est_lock); | 63 | spin_lock(&est_lock); |
80 | for (e = est_list; e; e = e->next) { | 64 | list_for_each_entry(e, &est_list, list) { |
81 | s = e->stats; | 65 | s = container_of(e, struct ip_vs_stats, est); |
82 | 66 | ||
83 | spin_lock(&s->lock); | 67 | spin_lock(&s->lock); |
84 | n_conns = s->conns; | 68 | n_conns = s->conns; |
@@ -114,19 +98,16 @@ static void estimation_timer(unsigned long arg) | |||
114 | s->outbps = (e->outbps+0xF)>>5; | 98 | s->outbps = (e->outbps+0xF)>>5; |
115 | spin_unlock(&s->lock); | 99 | spin_unlock(&s->lock); |
116 | } | 100 | } |
117 | read_unlock(&est_lock); | 101 | spin_unlock(&est_lock); |
118 | mod_timer(&est_timer, jiffies + 2*HZ); | 102 | mod_timer(&est_timer, jiffies + 2*HZ); |
119 | } | 103 | } |
120 | 104 | ||
121 | int ip_vs_new_estimator(struct ip_vs_stats *stats) | 105 | void ip_vs_new_estimator(struct ip_vs_stats *stats) |
122 | { | 106 | { |
123 | struct ip_vs_estimator *est; | 107 | struct ip_vs_estimator *est = &stats->est; |
124 | 108 | ||
125 | est = kzalloc(sizeof(*est), GFP_KERNEL); | 109 | INIT_LIST_HEAD(&est->list); |
126 | if (est == NULL) | ||
127 | return -ENOMEM; | ||
128 | 110 | ||
129 | est->stats = stats; | ||
130 | est->last_conns = stats->conns; | 111 | est->last_conns = stats->conns; |
131 | est->cps = stats->cps<<10; | 112 | est->cps = stats->cps<<10; |
132 | 113 | ||
@@ -142,59 +123,40 @@ int ip_vs_new_estimator(struct ip_vs_stats *stats) | |||
142 | est->last_outbytes = stats->outbytes; | 123 | est->last_outbytes = stats->outbytes; |
143 | est->outbps = stats->outbps<<5; | 124 | est->outbps = stats->outbps<<5; |
144 | 125 | ||
145 | write_lock_bh(&est_lock); | 126 | spin_lock_bh(&est_lock); |
146 | est->next = est_list; | 127 | if (list_empty(&est_list)) |
147 | if (est->next == NULL) { | 128 | mod_timer(&est_timer, jiffies + 2 * HZ); |
148 | setup_timer(&est_timer, estimation_timer, 0); | 129 | list_add(&est->list, &est_list); |
149 | est_timer.expires = jiffies + 2*HZ; | 130 | spin_unlock_bh(&est_lock); |
150 | add_timer(&est_timer); | ||
151 | } | ||
152 | est_list = est; | ||
153 | write_unlock_bh(&est_lock); | ||
154 | return 0; | ||
155 | } | 131 | } |
156 | 132 | ||
157 | void ip_vs_kill_estimator(struct ip_vs_stats *stats) | 133 | void ip_vs_kill_estimator(struct ip_vs_stats *stats) |
158 | { | 134 | { |
159 | struct ip_vs_estimator *est, **pest; | 135 | struct ip_vs_estimator *est = &stats->est; |
160 | int killed = 0; | 136 | |
161 | 137 | spin_lock_bh(&est_lock); | |
162 | write_lock_bh(&est_lock); | 138 | list_del(&est->list); |
163 | pest = &est_list; | 139 | while (list_empty(&est_list) && try_to_del_timer_sync(&est_timer) < 0) { |
164 | while ((est=*pest) != NULL) { | 140 | spin_unlock_bh(&est_lock); |
165 | if (est->stats != stats) { | 141 | cpu_relax(); |
166 | pest = &est->next; | 142 | spin_lock_bh(&est_lock); |
167 | continue; | ||
168 | } | ||
169 | *pest = est->next; | ||
170 | kfree(est); | ||
171 | killed++; | ||
172 | } | 143 | } |
173 | if (killed && est_list == NULL) | 144 | spin_unlock_bh(&est_lock); |
174 | del_timer_sync(&est_timer); | ||
175 | write_unlock_bh(&est_lock); | ||
176 | } | 145 | } |
177 | 146 | ||
178 | void ip_vs_zero_estimator(struct ip_vs_stats *stats) | 147 | void ip_vs_zero_estimator(struct ip_vs_stats *stats) |
179 | { | 148 | { |
180 | struct ip_vs_estimator *e; | 149 | struct ip_vs_estimator *est = &stats->est; |
181 | 150 | ||
182 | write_lock_bh(&est_lock); | 151 | /* set counters zero, caller must hold the stats->lock lock */ |
183 | for (e = est_list; e; e = e->next) { | 152 | est->last_inbytes = 0; |
184 | if (e->stats != stats) | 153 | est->last_outbytes = 0; |
185 | continue; | 154 | est->last_conns = 0; |
186 | 155 | est->last_inpkts = 0; | |
187 | /* set counters zero */ | 156 | est->last_outpkts = 0; |
188 | e->last_conns = 0; | 157 | est->cps = 0; |
189 | e->last_inpkts = 0; | 158 | est->inpps = 0; |
190 | e->last_outpkts = 0; | 159 | est->outpps = 0; |
191 | e->last_inbytes = 0; | 160 | est->inbps = 0; |
192 | e->last_outbytes = 0; | 161 | est->outbps = 0; |
193 | e->cps = 0; | ||
194 | e->inpps = 0; | ||
195 | e->outpps = 0; | ||
196 | e->inbps = 0; | ||
197 | e->outbps = 0; | ||
198 | } | ||
199 | write_unlock_bh(&est_lock); | ||
200 | } | 162 | } |
diff --git a/net/ipv4/ipvs/ip_vs_lblc.c b/net/ipv4/ipvs/ip_vs_lblc.c index 0efa3db4b180..7a6a319f544a 100644 --- a/net/ipv4/ipvs/ip_vs_lblc.c +++ b/net/ipv4/ipvs/ip_vs_lblc.c | |||
@@ -539,6 +539,7 @@ static struct ip_vs_scheduler ip_vs_lblc_scheduler = | |||
539 | .name = "lblc", | 539 | .name = "lblc", |
540 | .refcnt = ATOMIC_INIT(0), | 540 | .refcnt = ATOMIC_INIT(0), |
541 | .module = THIS_MODULE, | 541 | .module = THIS_MODULE, |
542 | .n_list = LIST_HEAD_INIT(ip_vs_lblc_scheduler.n_list), | ||
542 | .init_service = ip_vs_lblc_init_svc, | 543 | .init_service = ip_vs_lblc_init_svc, |
543 | .done_service = ip_vs_lblc_done_svc, | 544 | .done_service = ip_vs_lblc_done_svc, |
544 | .update_service = ip_vs_lblc_update_svc, | 545 | .update_service = ip_vs_lblc_update_svc, |
@@ -550,7 +551,6 @@ static int __init ip_vs_lblc_init(void) | |||
550 | { | 551 | { |
551 | int ret; | 552 | int ret; |
552 | 553 | ||
553 | INIT_LIST_HEAD(&ip_vs_lblc_scheduler.n_list); | ||
554 | sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars_table); | 554 | sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars_table); |
555 | ret = register_ip_vs_scheduler(&ip_vs_lblc_scheduler); | 555 | ret = register_ip_vs_scheduler(&ip_vs_lblc_scheduler); |
556 | if (ret) | 556 | if (ret) |
diff --git a/net/ipv4/ipvs/ip_vs_lblcr.c b/net/ipv4/ipvs/ip_vs_lblcr.c index 8e3bbeb45138..c234e73968a6 100644 --- a/net/ipv4/ipvs/ip_vs_lblcr.c +++ b/net/ipv4/ipvs/ip_vs_lblcr.c | |||
@@ -728,6 +728,7 @@ static struct ip_vs_scheduler ip_vs_lblcr_scheduler = | |||
728 | .name = "lblcr", | 728 | .name = "lblcr", |
729 | .refcnt = ATOMIC_INIT(0), | 729 | .refcnt = ATOMIC_INIT(0), |
730 | .module = THIS_MODULE, | 730 | .module = THIS_MODULE, |
731 | .n_list = LIST_HEAD_INIT(ip_vs_lblcr_scheduler.n_list), | ||
731 | .init_service = ip_vs_lblcr_init_svc, | 732 | .init_service = ip_vs_lblcr_init_svc, |
732 | .done_service = ip_vs_lblcr_done_svc, | 733 | .done_service = ip_vs_lblcr_done_svc, |
733 | .update_service = ip_vs_lblcr_update_svc, | 734 | .update_service = ip_vs_lblcr_update_svc, |
@@ -739,7 +740,6 @@ static int __init ip_vs_lblcr_init(void) | |||
739 | { | 740 | { |
740 | int ret; | 741 | int ret; |
741 | 742 | ||
742 | INIT_LIST_HEAD(&ip_vs_lblcr_scheduler.n_list); | ||
743 | sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars_table); | 743 | sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars_table); |
744 | ret = register_ip_vs_scheduler(&ip_vs_lblcr_scheduler); | 744 | ret = register_ip_vs_scheduler(&ip_vs_lblcr_scheduler); |
745 | if (ret) | 745 | if (ret) |
diff --git a/net/ipv4/ipvs/ip_vs_lc.c b/net/ipv4/ipvs/ip_vs_lc.c index ac9f08e065d5..ebcdbf75ac65 100644 --- a/net/ipv4/ipvs/ip_vs_lc.c +++ b/net/ipv4/ipvs/ip_vs_lc.c | |||
@@ -98,6 +98,7 @@ static struct ip_vs_scheduler ip_vs_lc_scheduler = { | |||
98 | .name = "lc", | 98 | .name = "lc", |
99 | .refcnt = ATOMIC_INIT(0), | 99 | .refcnt = ATOMIC_INIT(0), |
100 | .module = THIS_MODULE, | 100 | .module = THIS_MODULE, |
101 | .n_list = LIST_HEAD_INIT(ip_vs_lc_scheduler.n_list), | ||
101 | .init_service = ip_vs_lc_init_svc, | 102 | .init_service = ip_vs_lc_init_svc, |
102 | .done_service = ip_vs_lc_done_svc, | 103 | .done_service = ip_vs_lc_done_svc, |
103 | .update_service = ip_vs_lc_update_svc, | 104 | .update_service = ip_vs_lc_update_svc, |
@@ -107,7 +108,6 @@ static struct ip_vs_scheduler ip_vs_lc_scheduler = { | |||
107 | 108 | ||
108 | static int __init ip_vs_lc_init(void) | 109 | static int __init ip_vs_lc_init(void) |
109 | { | 110 | { |
110 | INIT_LIST_HEAD(&ip_vs_lc_scheduler.n_list); | ||
111 | return register_ip_vs_scheduler(&ip_vs_lc_scheduler) ; | 111 | return register_ip_vs_scheduler(&ip_vs_lc_scheduler) ; |
112 | } | 112 | } |
113 | 113 | ||
diff --git a/net/ipv4/ipvs/ip_vs_nq.c b/net/ipv4/ipvs/ip_vs_nq.c index a46bf258d420..92f3a6770031 100644 --- a/net/ipv4/ipvs/ip_vs_nq.c +++ b/net/ipv4/ipvs/ip_vs_nq.c | |||
@@ -136,6 +136,7 @@ static struct ip_vs_scheduler ip_vs_nq_scheduler = | |||
136 | .name = "nq", | 136 | .name = "nq", |
137 | .refcnt = ATOMIC_INIT(0), | 137 | .refcnt = ATOMIC_INIT(0), |
138 | .module = THIS_MODULE, | 138 | .module = THIS_MODULE, |
139 | .n_list = LIST_HEAD_INIT(ip_vs_nq_scheduler.n_list), | ||
139 | .init_service = ip_vs_nq_init_svc, | 140 | .init_service = ip_vs_nq_init_svc, |
140 | .done_service = ip_vs_nq_done_svc, | 141 | .done_service = ip_vs_nq_done_svc, |
141 | .update_service = ip_vs_nq_update_svc, | 142 | .update_service = ip_vs_nq_update_svc, |
@@ -145,7 +146,6 @@ static struct ip_vs_scheduler ip_vs_nq_scheduler = | |||
145 | 146 | ||
146 | static int __init ip_vs_nq_init(void) | 147 | static int __init ip_vs_nq_init(void) |
147 | { | 148 | { |
148 | INIT_LIST_HEAD(&ip_vs_nq_scheduler.n_list); | ||
149 | return register_ip_vs_scheduler(&ip_vs_nq_scheduler); | 149 | return register_ip_vs_scheduler(&ip_vs_nq_scheduler); |
150 | } | 150 | } |
151 | 151 | ||
diff --git a/net/ipv4/ipvs/ip_vs_proto.c b/net/ipv4/ipvs/ip_vs_proto.c index 876714f23d65..6099a88fc200 100644 --- a/net/ipv4/ipvs/ip_vs_proto.c +++ b/net/ipv4/ipvs/ip_vs_proto.c | |||
@@ -43,7 +43,7 @@ static struct ip_vs_protocol *ip_vs_proto_table[IP_VS_PROTO_TAB_SIZE]; | |||
43 | /* | 43 | /* |
44 | * register an ipvs protocol | 44 | * register an ipvs protocol |
45 | */ | 45 | */ |
46 | static int __used register_ip_vs_protocol(struct ip_vs_protocol *pp) | 46 | static int __used __init register_ip_vs_protocol(struct ip_vs_protocol *pp) |
47 | { | 47 | { |
48 | unsigned hash = IP_VS_PROTO_HASH(pp->protocol); | 48 | unsigned hash = IP_VS_PROTO_HASH(pp->protocol); |
49 | 49 | ||
@@ -190,7 +190,7 @@ ip_vs_tcpudp_debug_packet(struct ip_vs_protocol *pp, | |||
190 | } | 190 | } |
191 | 191 | ||
192 | 192 | ||
193 | int ip_vs_protocol_init(void) | 193 | int __init ip_vs_protocol_init(void) |
194 | { | 194 | { |
195 | char protocols[64]; | 195 | char protocols[64]; |
196 | #define REGISTER_PROTOCOL(p) \ | 196 | #define REGISTER_PROTOCOL(p) \ |
diff --git a/net/ipv4/ipvs/ip_vs_rr.c b/net/ipv4/ipvs/ip_vs_rr.c index c8db12d39e61..358110d17e59 100644 --- a/net/ipv4/ipvs/ip_vs_rr.c +++ b/net/ipv4/ipvs/ip_vs_rr.c | |||
@@ -94,6 +94,7 @@ static struct ip_vs_scheduler ip_vs_rr_scheduler = { | |||
94 | .name = "rr", /* name */ | 94 | .name = "rr", /* name */ |
95 | .refcnt = ATOMIC_INIT(0), | 95 | .refcnt = ATOMIC_INIT(0), |
96 | .module = THIS_MODULE, | 96 | .module = THIS_MODULE, |
97 | .n_list = LIST_HEAD_INIT(ip_vs_rr_scheduler.n_list), | ||
97 | .init_service = ip_vs_rr_init_svc, | 98 | .init_service = ip_vs_rr_init_svc, |
98 | .done_service = ip_vs_rr_done_svc, | 99 | .done_service = ip_vs_rr_done_svc, |
99 | .update_service = ip_vs_rr_update_svc, | 100 | .update_service = ip_vs_rr_update_svc, |
@@ -102,7 +103,6 @@ static struct ip_vs_scheduler ip_vs_rr_scheduler = { | |||
102 | 103 | ||
103 | static int __init ip_vs_rr_init(void) | 104 | static int __init ip_vs_rr_init(void) |
104 | { | 105 | { |
105 | INIT_LIST_HEAD(&ip_vs_rr_scheduler.n_list); | ||
106 | return register_ip_vs_scheduler(&ip_vs_rr_scheduler); | 106 | return register_ip_vs_scheduler(&ip_vs_rr_scheduler); |
107 | } | 107 | } |
108 | 108 | ||
diff --git a/net/ipv4/ipvs/ip_vs_sched.c b/net/ipv4/ipvs/ip_vs_sched.c index b64767309855..a46ad9e35016 100644 --- a/net/ipv4/ipvs/ip_vs_sched.c +++ b/net/ipv4/ipvs/ip_vs_sched.c | |||
@@ -184,7 +184,7 @@ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler) | |||
184 | 184 | ||
185 | write_lock_bh(&__ip_vs_sched_lock); | 185 | write_lock_bh(&__ip_vs_sched_lock); |
186 | 186 | ||
187 | if (scheduler->n_list.next != &scheduler->n_list) { | 187 | if (!list_empty(&scheduler->n_list)) { |
188 | write_unlock_bh(&__ip_vs_sched_lock); | 188 | write_unlock_bh(&__ip_vs_sched_lock); |
189 | ip_vs_use_count_dec(); | 189 | ip_vs_use_count_dec(); |
190 | IP_VS_ERR("register_ip_vs_scheduler(): [%s] scheduler " | 190 | IP_VS_ERR("register_ip_vs_scheduler(): [%s] scheduler " |
@@ -229,7 +229,7 @@ int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler) | |||
229 | } | 229 | } |
230 | 230 | ||
231 | write_lock_bh(&__ip_vs_sched_lock); | 231 | write_lock_bh(&__ip_vs_sched_lock); |
232 | if (scheduler->n_list.next == &scheduler->n_list) { | 232 | if (list_empty(&scheduler->n_list)) { |
233 | write_unlock_bh(&__ip_vs_sched_lock); | 233 | write_unlock_bh(&__ip_vs_sched_lock); |
234 | IP_VS_ERR("unregister_ip_vs_scheduler(): [%s] scheduler " | 234 | IP_VS_ERR("unregister_ip_vs_scheduler(): [%s] scheduler " |
235 | "is not in the list. failed\n", scheduler->name); | 235 | "is not in the list. failed\n", scheduler->name); |
diff --git a/net/ipv4/ipvs/ip_vs_sed.c b/net/ipv4/ipvs/ip_vs_sed.c index 2a7d31358181..77663d84cbd1 100644 --- a/net/ipv4/ipvs/ip_vs_sed.c +++ b/net/ipv4/ipvs/ip_vs_sed.c | |||
@@ -138,6 +138,7 @@ static struct ip_vs_scheduler ip_vs_sed_scheduler = | |||
138 | .name = "sed", | 138 | .name = "sed", |
139 | .refcnt = ATOMIC_INIT(0), | 139 | .refcnt = ATOMIC_INIT(0), |
140 | .module = THIS_MODULE, | 140 | .module = THIS_MODULE, |
141 | .n_list = LIST_HEAD_INIT(ip_vs_sed_scheduler.n_list), | ||
141 | .init_service = ip_vs_sed_init_svc, | 142 | .init_service = ip_vs_sed_init_svc, |
142 | .done_service = ip_vs_sed_done_svc, | 143 | .done_service = ip_vs_sed_done_svc, |
143 | .update_service = ip_vs_sed_update_svc, | 144 | .update_service = ip_vs_sed_update_svc, |
@@ -147,7 +148,6 @@ static struct ip_vs_scheduler ip_vs_sed_scheduler = | |||
147 | 148 | ||
148 | static int __init ip_vs_sed_init(void) | 149 | static int __init ip_vs_sed_init(void) |
149 | { | 150 | { |
150 | INIT_LIST_HEAD(&ip_vs_sed_scheduler.n_list); | ||
151 | return register_ip_vs_scheduler(&ip_vs_sed_scheduler); | 151 | return register_ip_vs_scheduler(&ip_vs_sed_scheduler); |
152 | } | 152 | } |
153 | 153 | ||
diff --git a/net/ipv4/ipvs/ip_vs_sh.c b/net/ipv4/ipvs/ip_vs_sh.c index b8fdfac65001..7b979e228056 100644 --- a/net/ipv4/ipvs/ip_vs_sh.c +++ b/net/ipv4/ipvs/ip_vs_sh.c | |||
@@ -230,6 +230,7 @@ static struct ip_vs_scheduler ip_vs_sh_scheduler = | |||
230 | .name = "sh", | 230 | .name = "sh", |
231 | .refcnt = ATOMIC_INIT(0), | 231 | .refcnt = ATOMIC_INIT(0), |
232 | .module = THIS_MODULE, | 232 | .module = THIS_MODULE, |
233 | .n_list = LIST_HEAD_INIT(ip_vs_sh_scheduler.n_list), | ||
233 | .init_service = ip_vs_sh_init_svc, | 234 | .init_service = ip_vs_sh_init_svc, |
234 | .done_service = ip_vs_sh_done_svc, | 235 | .done_service = ip_vs_sh_done_svc, |
235 | .update_service = ip_vs_sh_update_svc, | 236 | .update_service = ip_vs_sh_update_svc, |
@@ -239,7 +240,6 @@ static struct ip_vs_scheduler ip_vs_sh_scheduler = | |||
239 | 240 | ||
240 | static int __init ip_vs_sh_init(void) | 241 | static int __init ip_vs_sh_init(void) |
241 | { | 242 | { |
242 | INIT_LIST_HEAD(&ip_vs_sh_scheduler.n_list); | ||
243 | return register_ip_vs_scheduler(&ip_vs_sh_scheduler); | 243 | return register_ip_vs_scheduler(&ip_vs_sh_scheduler); |
244 | } | 244 | } |
245 | 245 | ||
diff --git a/net/ipv4/ipvs/ip_vs_sync.c b/net/ipv4/ipvs/ip_vs_sync.c index 45e9bd96c286..a652da2c3200 100644 --- a/net/ipv4/ipvs/ip_vs_sync.c +++ b/net/ipv4/ipvs/ip_vs_sync.c | |||
@@ -904,9 +904,9 @@ int stop_sync_thread(int state) | |||
904 | * progress of stopping the master sync daemon. | 904 | * progress of stopping the master sync daemon. |
905 | */ | 905 | */ |
906 | 906 | ||
907 | spin_lock(&ip_vs_sync_lock); | 907 | spin_lock_bh(&ip_vs_sync_lock); |
908 | ip_vs_sync_state &= ~IP_VS_STATE_MASTER; | 908 | ip_vs_sync_state &= ~IP_VS_STATE_MASTER; |
909 | spin_unlock(&ip_vs_sync_lock); | 909 | spin_unlock_bh(&ip_vs_sync_lock); |
910 | kthread_stop(sync_master_thread); | 910 | kthread_stop(sync_master_thread); |
911 | sync_master_thread = NULL; | 911 | sync_master_thread = NULL; |
912 | } else if (state == IP_VS_STATE_BACKUP) { | 912 | } else if (state == IP_VS_STATE_BACKUP) { |
diff --git a/net/ipv4/ipvs/ip_vs_wlc.c b/net/ipv4/ipvs/ip_vs_wlc.c index 772c3cb4eca1..9b0ef86bb1f7 100644 --- a/net/ipv4/ipvs/ip_vs_wlc.c +++ b/net/ipv4/ipvs/ip_vs_wlc.c | |||
@@ -126,6 +126,7 @@ static struct ip_vs_scheduler ip_vs_wlc_scheduler = | |||
126 | .name = "wlc", | 126 | .name = "wlc", |
127 | .refcnt = ATOMIC_INIT(0), | 127 | .refcnt = ATOMIC_INIT(0), |
128 | .module = THIS_MODULE, | 128 | .module = THIS_MODULE, |
129 | .n_list = LIST_HEAD_INIT(ip_vs_wlc_scheduler.n_list), | ||
129 | .init_service = ip_vs_wlc_init_svc, | 130 | .init_service = ip_vs_wlc_init_svc, |
130 | .done_service = ip_vs_wlc_done_svc, | 131 | .done_service = ip_vs_wlc_done_svc, |
131 | .update_service = ip_vs_wlc_update_svc, | 132 | .update_service = ip_vs_wlc_update_svc, |
@@ -135,7 +136,6 @@ static struct ip_vs_scheduler ip_vs_wlc_scheduler = | |||
135 | 136 | ||
136 | static int __init ip_vs_wlc_init(void) | 137 | static int __init ip_vs_wlc_init(void) |
137 | { | 138 | { |
138 | INIT_LIST_HEAD(&ip_vs_wlc_scheduler.n_list); | ||
139 | return register_ip_vs_scheduler(&ip_vs_wlc_scheduler); | 139 | return register_ip_vs_scheduler(&ip_vs_wlc_scheduler); |
140 | } | 140 | } |
141 | 141 | ||
diff --git a/net/ipv4/ipvs/ip_vs_wrr.c b/net/ipv4/ipvs/ip_vs_wrr.c index 1d6932d7dc97..0d86a79b87b5 100644 --- a/net/ipv4/ipvs/ip_vs_wrr.c +++ b/net/ipv4/ipvs/ip_vs_wrr.c | |||
@@ -212,6 +212,7 @@ static struct ip_vs_scheduler ip_vs_wrr_scheduler = { | |||
212 | .name = "wrr", | 212 | .name = "wrr", |
213 | .refcnt = ATOMIC_INIT(0), | 213 | .refcnt = ATOMIC_INIT(0), |
214 | .module = THIS_MODULE, | 214 | .module = THIS_MODULE, |
215 | .n_list = LIST_HEAD_INIT(ip_vs_wrr_scheduler.n_list), | ||
215 | .init_service = ip_vs_wrr_init_svc, | 216 | .init_service = ip_vs_wrr_init_svc, |
216 | .done_service = ip_vs_wrr_done_svc, | 217 | .done_service = ip_vs_wrr_done_svc, |
217 | .update_service = ip_vs_wrr_update_svc, | 218 | .update_service = ip_vs_wrr_update_svc, |
@@ -220,7 +221,6 @@ static struct ip_vs_scheduler ip_vs_wrr_scheduler = { | |||
220 | 221 | ||
221 | static int __init ip_vs_wrr_init(void) | 222 | static int __init ip_vs_wrr_init(void) |
222 | { | 223 | { |
223 | INIT_LIST_HEAD(&ip_vs_wrr_scheduler.n_list); | ||
224 | return register_ip_vs_scheduler(&ip_vs_wrr_scheduler) ; | 224 | return register_ip_vs_scheduler(&ip_vs_wrr_scheduler) ; |
225 | } | 225 | } |
226 | 226 | ||
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 383d17359d01..8e42fbbd5761 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -989,7 +989,9 @@ int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) | |||
989 | up->encap_rcv != NULL) { | 989 | up->encap_rcv != NULL) { |
990 | int ret; | 990 | int ret; |
991 | 991 | ||
992 | bh_unlock_sock(sk); | ||
992 | ret = (*up->encap_rcv)(sk, skb); | 993 | ret = (*up->encap_rcv)(sk, skb); |
994 | bh_lock_sock(sk); | ||
993 | if (ret <= 0) { | 995 | if (ret <= 0) { |
994 | UDP_INC_STATS_BH(sock_net(sk), | 996 | UDP_INC_STATS_BH(sock_net(sk), |
995 | UDP_MIB_INDATAGRAMS, | 997 | UDP_MIB_INDATAGRAMS, |
@@ -1092,7 +1094,7 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb, | |||
1092 | if (skb1) { | 1094 | if (skb1) { |
1093 | int ret = 0; | 1095 | int ret = 0; |
1094 | 1096 | ||
1095 | bh_lock_sock_nested(sk); | 1097 | bh_lock_sock(sk); |
1096 | if (!sock_owned_by_user(sk)) | 1098 | if (!sock_owned_by_user(sk)) |
1097 | ret = udp_queue_rcv_skb(sk, skb1); | 1099 | ret = udp_queue_rcv_skb(sk, skb1); |
1098 | else | 1100 | else |
@@ -1194,7 +1196,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], | |||
1194 | 1196 | ||
1195 | if (sk != NULL) { | 1197 | if (sk != NULL) { |
1196 | int ret = 0; | 1198 | int ret = 0; |
1197 | bh_lock_sock_nested(sk); | 1199 | bh_lock_sock(sk); |
1198 | if (!sock_owned_by_user(sk)) | 1200 | if (!sock_owned_by_user(sk)) |
1199 | ret = udp_queue_rcv_skb(sk, skb); | 1201 | ret = udp_queue_rcv_skb(sk, skb); |
1200 | else | 1202 | else |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 5a3e87e4b18f..41b165ffb369 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -2187,8 +2187,9 @@ static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt, | |||
2187 | #endif | 2187 | #endif |
2188 | NLA_PUT_U32(skb, RTA_IIF, iif); | 2188 | NLA_PUT_U32(skb, RTA_IIF, iif); |
2189 | } else if (dst) { | 2189 | } else if (dst) { |
2190 | struct inet6_dev *idev = ip6_dst_idev(&rt->u.dst); | ||
2190 | struct in6_addr saddr_buf; | 2191 | struct in6_addr saddr_buf; |
2191 | if (ipv6_dev_get_saddr(ip6_dst_idev(&rt->u.dst)->dev, | 2192 | if (ipv6_dev_get_saddr(idev ? idev->dev : NULL, |
2192 | dst, 0, &saddr_buf) == 0) | 2193 | dst, 0, &saddr_buf) == 0) |
2193 | NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf); | 2194 | NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf); |
2194 | } | 2195 | } |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index d1477b350f76..a6aecf76a71b 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -379,7 +379,7 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, | |||
379 | uh->source, saddr, dif))) { | 379 | uh->source, saddr, dif))) { |
380 | struct sk_buff *buff = skb_clone(skb, GFP_ATOMIC); | 380 | struct sk_buff *buff = skb_clone(skb, GFP_ATOMIC); |
381 | if (buff) { | 381 | if (buff) { |
382 | bh_lock_sock_nested(sk2); | 382 | bh_lock_sock(sk2); |
383 | if (!sock_owned_by_user(sk2)) | 383 | if (!sock_owned_by_user(sk2)) |
384 | udpv6_queue_rcv_skb(sk2, buff); | 384 | udpv6_queue_rcv_skb(sk2, buff); |
385 | else | 385 | else |
@@ -387,7 +387,7 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, | |||
387 | bh_unlock_sock(sk2); | 387 | bh_unlock_sock(sk2); |
388 | } | 388 | } |
389 | } | 389 | } |
390 | bh_lock_sock_nested(sk); | 390 | bh_lock_sock(sk); |
391 | if (!sock_owned_by_user(sk)) | 391 | if (!sock_owned_by_user(sk)) |
392 | udpv6_queue_rcv_skb(sk, skb); | 392 | udpv6_queue_rcv_skb(sk, skb); |
393 | else | 393 | else |
@@ -508,7 +508,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], | |||
508 | 508 | ||
509 | /* deliver */ | 509 | /* deliver */ |
510 | 510 | ||
511 | bh_lock_sock_nested(sk); | 511 | bh_lock_sock(sk); |
512 | if (!sock_owned_by_user(sk)) | 512 | if (!sock_owned_by_user(sk)) |
513 | udpv6_queue_rcv_skb(sk, skb); | 513 | udpv6_queue_rcv_skb(sk, skb); |
514 | else | 514 | else |
diff --git a/net/rxrpc/ar-accept.c b/net/rxrpc/ar-accept.c index bdfb77417794..77228f28fa36 100644 --- a/net/rxrpc/ar-accept.c +++ b/net/rxrpc/ar-accept.c | |||
@@ -100,7 +100,7 @@ static int rxrpc_accept_incoming_call(struct rxrpc_local *local, | |||
100 | 100 | ||
101 | trans = rxrpc_get_transport(local, peer, GFP_NOIO); | 101 | trans = rxrpc_get_transport(local, peer, GFP_NOIO); |
102 | rxrpc_put_peer(peer); | 102 | rxrpc_put_peer(peer); |
103 | if (!trans) { | 103 | if (IS_ERR(trans)) { |
104 | _debug("no trans"); | 104 | _debug("no trans"); |
105 | ret = -EBUSY; | 105 | ret = -EBUSY; |
106 | goto error; | 106 | goto error; |
diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 26c7e1f9a350..9974b3f04f05 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c | |||
@@ -751,7 +751,7 @@ static int tca_action_flush(struct nlattr *nla, struct nlmsghdr *n, u32 pid) | |||
751 | struct nlattr *tb[TCA_ACT_MAX+1]; | 751 | struct nlattr *tb[TCA_ACT_MAX+1]; |
752 | struct nlattr *kind; | 752 | struct nlattr *kind; |
753 | struct tc_action *a = create_a(0); | 753 | struct tc_action *a = create_a(0); |
754 | int err = -EINVAL; | 754 | int err = -ENOMEM; |
755 | 755 | ||
756 | if (a == NULL) { | 756 | if (a == NULL) { |
757 | printk("tca_action_flush: couldnt create tc_action\n"); | 757 | printk("tca_action_flush: couldnt create tc_action\n"); |
@@ -762,7 +762,7 @@ static int tca_action_flush(struct nlattr *nla, struct nlmsghdr *n, u32 pid) | |||
762 | if (!skb) { | 762 | if (!skb) { |
763 | printk("tca_action_flush: failed skb alloc\n"); | 763 | printk("tca_action_flush: failed skb alloc\n"); |
764 | kfree(a); | 764 | kfree(a); |
765 | return -ENOBUFS; | 765 | return err; |
766 | } | 766 | } |
767 | 767 | ||
768 | b = skb_tail_pointer(skb); | 768 | b = skb_tail_pointer(skb); |
@@ -790,6 +790,8 @@ static int tca_action_flush(struct nlattr *nla, struct nlmsghdr *n, u32 pid) | |||
790 | err = a->ops->walk(skb, &dcb, RTM_DELACTION, a); | 790 | err = a->ops->walk(skb, &dcb, RTM_DELACTION, a); |
791 | if (err < 0) | 791 | if (err < 0) |
792 | goto nla_put_failure; | 792 | goto nla_put_failure; |
793 | if (err == 0) | ||
794 | goto noflush_out; | ||
793 | 795 | ||
794 | nla_nest_end(skb, nest); | 796 | nla_nest_end(skb, nest); |
795 | 797 | ||
@@ -807,6 +809,7 @@ nla_put_failure: | |||
807 | nlmsg_failure: | 809 | nlmsg_failure: |
808 | module_put(a->ops->owner); | 810 | module_put(a->ops->owner); |
809 | err_out: | 811 | err_out: |
812 | noflush_out: | ||
810 | kfree_skb(skb); | 813 | kfree_skb(skb); |
811 | kfree(a); | 814 | kfree(a); |
812 | return err; | 815 | return err; |
@@ -824,8 +827,10 @@ tca_action_gd(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int event) | |||
824 | return ret; | 827 | return ret; |
825 | 828 | ||
826 | if (event == RTM_DELACTION && n->nlmsg_flags&NLM_F_ROOT) { | 829 | if (event == RTM_DELACTION && n->nlmsg_flags&NLM_F_ROOT) { |
827 | if (tb[0] != NULL && tb[1] == NULL) | 830 | if (tb[1] != NULL) |
828 | return tca_action_flush(tb[0], n, pid); | 831 | return tca_action_flush(tb[1], n, pid); |
832 | else | ||
833 | return -EINVAL; | ||
829 | } | 834 | } |
830 | 835 | ||
831 | for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { | 836 | for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { |
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index ba1d121f3127..c25465e5607a 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -183,6 +183,21 @@ EXPORT_SYMBOL(unregister_qdisc); | |||
183 | (root qdisc, all its children, children of children etc.) | 183 | (root qdisc, all its children, children of children etc.) |
184 | */ | 184 | */ |
185 | 185 | ||
186 | struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle) | ||
187 | { | ||
188 | struct Qdisc *q; | ||
189 | |||
190 | if (!(root->flags & TCQ_F_BUILTIN) && | ||
191 | root->handle == handle) | ||
192 | return root; | ||
193 | |||
194 | list_for_each_entry(q, &root->list, list) { | ||
195 | if (q->handle == handle) | ||
196 | return q; | ||
197 | } | ||
198 | return NULL; | ||
199 | } | ||
200 | |||
186 | struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) | 201 | struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) |
187 | { | 202 | { |
188 | unsigned int i; | 203 | unsigned int i; |
@@ -191,16 +206,11 @@ struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) | |||
191 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | 206 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); |
192 | struct Qdisc *q, *txq_root = txq->qdisc_sleeping; | 207 | struct Qdisc *q, *txq_root = txq->qdisc_sleeping; |
193 | 208 | ||
194 | if (!(txq_root->flags & TCQ_F_BUILTIN) && | 209 | q = qdisc_match_from_root(txq_root, handle); |
195 | txq_root->handle == handle) | 210 | if (q) |
196 | return txq_root; | 211 | return q; |
197 | |||
198 | list_for_each_entry(q, &txq_root->list, list) { | ||
199 | if (q->handle == handle) | ||
200 | return q; | ||
201 | } | ||
202 | } | 212 | } |
203 | return NULL; | 213 | return qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle); |
204 | } | 214 | } |
205 | 215 | ||
206 | static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid) | 216 | static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid) |
@@ -321,7 +331,7 @@ static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt) | |||
321 | if (!s || tsize != s->tsize || (!tab && tsize > 0)) | 331 | if (!s || tsize != s->tsize || (!tab && tsize > 0)) |
322 | return ERR_PTR(-EINVAL); | 332 | return ERR_PTR(-EINVAL); |
323 | 333 | ||
324 | spin_lock(&qdisc_stab_lock); | 334 | spin_lock_bh(&qdisc_stab_lock); |
325 | 335 | ||
326 | list_for_each_entry(stab, &qdisc_stab_list, list) { | 336 | list_for_each_entry(stab, &qdisc_stab_list, list) { |
327 | if (memcmp(&stab->szopts, s, sizeof(*s))) | 337 | if (memcmp(&stab->szopts, s, sizeof(*s))) |
@@ -329,11 +339,11 @@ static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt) | |||
329 | if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16))) | 339 | if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16))) |
330 | continue; | 340 | continue; |
331 | stab->refcnt++; | 341 | stab->refcnt++; |
332 | spin_unlock(&qdisc_stab_lock); | 342 | spin_unlock_bh(&qdisc_stab_lock); |
333 | return stab; | 343 | return stab; |
334 | } | 344 | } |
335 | 345 | ||
336 | spin_unlock(&qdisc_stab_lock); | 346 | spin_unlock_bh(&qdisc_stab_lock); |
337 | 347 | ||
338 | stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL); | 348 | stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL); |
339 | if (!stab) | 349 | if (!stab) |
@@ -344,9 +354,9 @@ static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt) | |||
344 | if (tsize > 0) | 354 | if (tsize > 0) |
345 | memcpy(stab->data, tab, tsize * sizeof(u16)); | 355 | memcpy(stab->data, tab, tsize * sizeof(u16)); |
346 | 356 | ||
347 | spin_lock(&qdisc_stab_lock); | 357 | spin_lock_bh(&qdisc_stab_lock); |
348 | list_add_tail(&stab->list, &qdisc_stab_list); | 358 | list_add_tail(&stab->list, &qdisc_stab_list); |
349 | spin_unlock(&qdisc_stab_lock); | 359 | spin_unlock_bh(&qdisc_stab_lock); |
350 | 360 | ||
351 | return stab; | 361 | return stab; |
352 | } | 362 | } |
@@ -356,14 +366,14 @@ void qdisc_put_stab(struct qdisc_size_table *tab) | |||
356 | if (!tab) | 366 | if (!tab) |
357 | return; | 367 | return; |
358 | 368 | ||
359 | spin_lock(&qdisc_stab_lock); | 369 | spin_lock_bh(&qdisc_stab_lock); |
360 | 370 | ||
361 | if (--tab->refcnt == 0) { | 371 | if (--tab->refcnt == 0) { |
362 | list_del(&tab->list); | 372 | list_del(&tab->list); |
363 | kfree(tab); | 373 | kfree(tab); |
364 | } | 374 | } |
365 | 375 | ||
366 | spin_unlock(&qdisc_stab_lock); | 376 | spin_unlock_bh(&qdisc_stab_lock); |
367 | } | 377 | } |
368 | EXPORT_SYMBOL(qdisc_put_stab); | 378 | EXPORT_SYMBOL(qdisc_put_stab); |
369 | 379 | ||
@@ -908,7 +918,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
908 | return -ENOENT; | 918 | return -ENOENT; |
909 | q = qdisc_leaf(p, clid); | 919 | q = qdisc_leaf(p, clid); |
910 | } else { /* ingress */ | 920 | } else { /* ingress */ |
911 | q = dev->rx_queue.qdisc; | 921 | q = dev->rx_queue.qdisc_sleeping; |
912 | } | 922 | } |
913 | } else { | 923 | } else { |
914 | struct netdev_queue *dev_queue; | 924 | struct netdev_queue *dev_queue; |
@@ -978,7 +988,7 @@ replay: | |||
978 | return -ENOENT; | 988 | return -ENOENT; |
979 | q = qdisc_leaf(p, clid); | 989 | q = qdisc_leaf(p, clid); |
980 | } else { /*ingress */ | 990 | } else { /*ingress */ |
981 | q = dev->rx_queue.qdisc; | 991 | q = dev->rx_queue.qdisc_sleeping; |
982 | } | 992 | } |
983 | } else { | 993 | } else { |
984 | struct netdev_queue *dev_queue; | 994 | struct netdev_queue *dev_queue; |
@@ -1529,11 +1539,11 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) | |||
1529 | t = 0; | 1539 | t = 0; |
1530 | 1540 | ||
1531 | dev_queue = netdev_get_tx_queue(dev, 0); | 1541 | dev_queue = netdev_get_tx_queue(dev, 0); |
1532 | if (tc_dump_tclass_root(dev_queue->qdisc, skb, tcm, cb, &t, s_t) < 0) | 1542 | if (tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb, &t, s_t) < 0) |
1533 | goto done; | 1543 | goto done; |
1534 | 1544 | ||
1535 | dev_queue = &dev->rx_queue; | 1545 | dev_queue = &dev->rx_queue; |
1536 | if (tc_dump_tclass_root(dev_queue->qdisc, skb, tcm, cb, &t, s_t) < 0) | 1546 | if (tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb, &t, s_t) < 0) |
1537 | goto done; | 1547 | goto done; |
1538 | 1548 | ||
1539 | done: | 1549 | done: |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 7cf83b37459d..468574682caa 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -647,7 +647,7 @@ static void dev_deactivate_queue(struct net_device *dev, | |||
647 | } | 647 | } |
648 | } | 648 | } |
649 | 649 | ||
650 | static bool some_qdisc_is_running(struct net_device *dev, int lock) | 650 | static bool some_qdisc_is_busy(struct net_device *dev, int lock) |
651 | { | 651 | { |
652 | unsigned int i; | 652 | unsigned int i; |
653 | 653 | ||
@@ -658,13 +658,14 @@ static bool some_qdisc_is_running(struct net_device *dev, int lock) | |||
658 | int val; | 658 | int val; |
659 | 659 | ||
660 | dev_queue = netdev_get_tx_queue(dev, i); | 660 | dev_queue = netdev_get_tx_queue(dev, i); |
661 | q = dev_queue->qdisc; | 661 | q = dev_queue->qdisc_sleeping; |
662 | root_lock = qdisc_lock(q); | 662 | root_lock = qdisc_lock(q); |
663 | 663 | ||
664 | if (lock) | 664 | if (lock) |
665 | spin_lock_bh(root_lock); | 665 | spin_lock_bh(root_lock); |
666 | 666 | ||
667 | val = test_bit(__QDISC_STATE_RUNNING, &q->state); | 667 | val = (test_bit(__QDISC_STATE_RUNNING, &q->state) || |
668 | test_bit(__QDISC_STATE_SCHED, &q->state)); | ||
668 | 669 | ||
669 | if (lock) | 670 | if (lock) |
670 | spin_unlock_bh(root_lock); | 671 | spin_unlock_bh(root_lock); |
@@ -689,14 +690,14 @@ void dev_deactivate(struct net_device *dev) | |||
689 | 690 | ||
690 | /* Wait for outstanding qdisc_run calls. */ | 691 | /* Wait for outstanding qdisc_run calls. */ |
691 | do { | 692 | do { |
692 | while (some_qdisc_is_running(dev, 0)) | 693 | while (some_qdisc_is_busy(dev, 0)) |
693 | yield(); | 694 | yield(); |
694 | 695 | ||
695 | /* | 696 | /* |
696 | * Double-check inside queue lock to ensure that all effects | 697 | * Double-check inside queue lock to ensure that all effects |
697 | * of the queue run are visible when we return. | 698 | * of the queue run are visible when we return. |
698 | */ | 699 | */ |
699 | running = some_qdisc_is_running(dev, 1); | 700 | running = some_qdisc_is_busy(dev, 1); |
700 | 701 | ||
701 | /* | 702 | /* |
702 | * The running flag should never be set at this point because | 703 | * The running flag should never be set at this point because |
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index be35422711a3..6febd245e62b 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
@@ -1279,7 +1279,8 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg) | |||
1279 | 1279 | ||
1280 | /* delete from hash and active; remainder in destroy_class */ | 1280 | /* delete from hash and active; remainder in destroy_class */ |
1281 | qdisc_class_hash_remove(&q->clhash, &cl->common); | 1281 | qdisc_class_hash_remove(&q->clhash, &cl->common); |
1282 | cl->parent->children--; | 1282 | if (cl->parent) |
1283 | cl->parent->children--; | ||
1283 | 1284 | ||
1284 | if (cl->prio_activity) | 1285 | if (cl->prio_activity) |
1285 | htb_deactivate(q, cl); | 1286 | htb_deactivate(q, cl); |
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c index 0326d3060bc7..0747d8a9232f 100644 --- a/net/tipc/subscr.c +++ b/net/tipc/subscr.c | |||
@@ -85,7 +85,7 @@ static struct top_srv topsrv = { 0 }; | |||
85 | 85 | ||
86 | static u32 htohl(u32 in, int swap) | 86 | static u32 htohl(u32 in, int swap) |
87 | { | 87 | { |
88 | return swap ? (u32)___constant_swab32(in) : in; | 88 | return swap ? swab32(in) : in; |
89 | } | 89 | } |
90 | 90 | ||
91 | /** | 91 | /** |
diff --git a/net/wireless/wext.c b/net/wireless/wext.c index df5b3886c36b..d98ffb75119a 100644 --- a/net/wireless/wext.c +++ b/net/wireless/wext.c | |||
@@ -1277,6 +1277,7 @@ static int rtnetlink_fill_iwinfo(struct sk_buff *skb, struct net_device *dev, | |||
1277 | r->ifi_flags = dev_get_flags(dev); | 1277 | r->ifi_flags = dev_get_flags(dev); |
1278 | r->ifi_change = 0; /* Wireless changes don't affect those flags */ | 1278 | r->ifi_change = 0; /* Wireless changes don't affect those flags */ |
1279 | 1279 | ||
1280 | NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name); | ||
1280 | /* Add the wireless events in the netlink packet */ | 1281 | /* Add the wireless events in the netlink packet */ |
1281 | NLA_PUT(skb, IFLA_WIRELESS, event_len, event); | 1282 | NLA_PUT(skb, IFLA_WIRELESS, event_len, event); |
1282 | 1283 | ||
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c index 3f964db908a7..ac25b4c0e982 100644 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c | |||
@@ -112,16 +112,13 @@ error_nolock: | |||
112 | int xfrm_output_resume(struct sk_buff *skb, int err) | 112 | int xfrm_output_resume(struct sk_buff *skb, int err) |
113 | { | 113 | { |
114 | while (likely((err = xfrm_output_one(skb, err)) == 0)) { | 114 | while (likely((err = xfrm_output_one(skb, err)) == 0)) { |
115 | struct xfrm_state *x; | ||
116 | |||
117 | nf_reset(skb); | 115 | nf_reset(skb); |
118 | 116 | ||
119 | err = skb->dst->ops->local_out(skb); | 117 | err = skb->dst->ops->local_out(skb); |
120 | if (unlikely(err != 1)) | 118 | if (unlikely(err != 1)) |
121 | goto out; | 119 | goto out; |
122 | 120 | ||
123 | x = skb->dst->xfrm; | 121 | if (!skb->dst->xfrm) |
124 | if (!x) | ||
125 | return dst_output(skb); | 122 | return dst_output(skb); |
126 | 123 | ||
127 | err = nf_hook(skb->dst->ops->family, | 124 | err = nf_hook(skb->dst->ops->family, |