diff options
author | David S. Miller <davem@davemloft.net> | 2008-11-11 18:43:02 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-11-11 18:43:02 -0500 |
commit | 7e452baf6b96b5aeba097afd91501d33d390cc97 (patch) | |
tree | 9b0e062d3677d50d731ffd0fba47423bfdee9253 /net | |
parent | 3ac38c3a2e7dac3f8f35a56eb85c27881a4c3833 (diff) | |
parent | f21f237cf55494c3a4209de323281a3b0528da10 (diff) |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts:
drivers/message/fusion/mptlan.c
drivers/net/sfc/ethtool.c
net/mac80211/debugfs_sta.c
Diffstat (limited to 'net')
-rw-r--r-- | net/8021q/vlanproc.c | 2 | ||||
-rw-r--r-- | net/core/skbuff.c | 2 | ||||
-rw-r--r-- | net/dsa/slave.c | 72 | ||||
-rw-r--r-- | net/dsa/tag_dsa.c | 1 | ||||
-rw-r--r-- | net/dsa/tag_edsa.c | 1 | ||||
-rw-r--r-- | net/dsa/tag_trailer.c | 1 | ||||
-rw-r--r-- | net/ipv4/proc.c | 58 | ||||
-rw-r--r-- | net/ipv6/ip6mr.c | 9 | ||||
-rw-r--r-- | net/key/af_key.c | 1 | ||||
-rw-r--r-- | net/netfilter/ipvs/ip_vs_xmit.c | 3 | ||||
-rw-r--r-- | net/unix/af_unix.c | 31 | ||||
-rw-r--r-- | net/unix/garbage.c | 49 |
12 files changed, 174 insertions, 56 deletions
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c index 0feefa4e1a4b..3628e0a81b40 100644 --- a/net/8021q/vlanproc.c +++ b/net/8021q/vlanproc.c | |||
@@ -314,7 +314,7 @@ static int vlandev_seq_show(struct seq_file *seq, void *offset) | |||
314 | dev_info->ingress_priority_map[6], | 314 | dev_info->ingress_priority_map[6], |
315 | dev_info->ingress_priority_map[7]); | 315 | dev_info->ingress_priority_map[7]); |
316 | 316 | ||
317 | seq_printf(seq, "EGRESSS priority Mappings: "); | 317 | seq_printf(seq, " EGRESS priority mappings: "); |
318 | for (i = 0; i < 16; i++) { | 318 | for (i = 0; i < 16; i++) { |
319 | const struct vlan_priority_tci_mapping *mp | 319 | const struct vlan_priority_tci_mapping *mp |
320 | = dev_info->egress_priority_map[i]; | 320 | = dev_info->egress_priority_map[i]; |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index f24a4951008b..267185a848f6 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -486,8 +486,8 @@ int skb_recycle_check(struct sk_buff *skb, int skb_size) | |||
486 | shinfo->frag_list = NULL; | 486 | shinfo->frag_list = NULL; |
487 | 487 | ||
488 | memset(skb, 0, offsetof(struct sk_buff, tail)); | 488 | memset(skb, 0, offsetof(struct sk_buff, tail)); |
489 | skb_reset_tail_pointer(skb); | ||
490 | skb->data = skb->head + NET_SKB_PAD; | 489 | skb->data = skb->head + NET_SKB_PAD; |
490 | skb_reset_tail_pointer(skb); | ||
491 | 491 | ||
492 | return 1; | 492 | return 1; |
493 | } | 493 | } |
diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 7384bad81652..a3a410d20da0 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c | |||
@@ -10,6 +10,7 @@ | |||
10 | 10 | ||
11 | #include <linux/list.h> | 11 | #include <linux/list.h> |
12 | #include <linux/netdevice.h> | 12 | #include <linux/netdevice.h> |
13 | #include <linux/etherdevice.h> | ||
13 | #include <linux/phy.h> | 14 | #include <linux/phy.h> |
14 | #include "dsa_priv.h" | 15 | #include "dsa_priv.h" |
15 | 16 | ||
@@ -49,11 +50,57 @@ void dsa_slave_mii_bus_init(struct dsa_switch *ds) | |||
49 | /* slave device handling ****************************************************/ | 50 | /* slave device handling ****************************************************/ |
50 | static int dsa_slave_open(struct net_device *dev) | 51 | static int dsa_slave_open(struct net_device *dev) |
51 | { | 52 | { |
53 | struct dsa_slave_priv *p = netdev_priv(dev); | ||
54 | struct net_device *master = p->parent->master_netdev; | ||
55 | int err; | ||
56 | |||
57 | if (!(master->flags & IFF_UP)) | ||
58 | return -ENETDOWN; | ||
59 | |||
60 | if (compare_ether_addr(dev->dev_addr, master->dev_addr)) { | ||
61 | err = dev_unicast_add(master, dev->dev_addr, ETH_ALEN); | ||
62 | if (err < 0) | ||
63 | goto out; | ||
64 | } | ||
65 | |||
66 | if (dev->flags & IFF_ALLMULTI) { | ||
67 | err = dev_set_allmulti(master, 1); | ||
68 | if (err < 0) | ||
69 | goto del_unicast; | ||
70 | } | ||
71 | if (dev->flags & IFF_PROMISC) { | ||
72 | err = dev_set_promiscuity(master, 1); | ||
73 | if (err < 0) | ||
74 | goto clear_allmulti; | ||
75 | } | ||
76 | |||
52 | return 0; | 77 | return 0; |
78 | |||
79 | clear_allmulti: | ||
80 | if (dev->flags & IFF_ALLMULTI) | ||
81 | dev_set_allmulti(master, -1); | ||
82 | del_unicast: | ||
83 | if (compare_ether_addr(dev->dev_addr, master->dev_addr)) | ||
84 | dev_unicast_delete(master, dev->dev_addr, ETH_ALEN); | ||
85 | out: | ||
86 | return err; | ||
53 | } | 87 | } |
54 | 88 | ||
55 | static int dsa_slave_close(struct net_device *dev) | 89 | static int dsa_slave_close(struct net_device *dev) |
56 | { | 90 | { |
91 | struct dsa_slave_priv *p = netdev_priv(dev); | ||
92 | struct net_device *master = p->parent->master_netdev; | ||
93 | |||
94 | dev_mc_unsync(master, dev); | ||
95 | dev_unicast_unsync(master, dev); | ||
96 | if (dev->flags & IFF_ALLMULTI) | ||
97 | dev_set_allmulti(master, -1); | ||
98 | if (dev->flags & IFF_PROMISC) | ||
99 | dev_set_promiscuity(master, -1); | ||
100 | |||
101 | if (compare_ether_addr(dev->dev_addr, master->dev_addr)) | ||
102 | dev_unicast_delete(master, dev->dev_addr, ETH_ALEN); | ||
103 | |||
57 | return 0; | 104 | return 0; |
58 | } | 105 | } |
59 | 106 | ||
@@ -77,9 +124,30 @@ static void dsa_slave_set_rx_mode(struct net_device *dev) | |||
77 | dev_unicast_sync(master, dev); | 124 | dev_unicast_sync(master, dev); |
78 | } | 125 | } |
79 | 126 | ||
80 | static int dsa_slave_set_mac_address(struct net_device *dev, void *addr) | 127 | static int dsa_slave_set_mac_address(struct net_device *dev, void *a) |
81 | { | 128 | { |
82 | memcpy(dev->dev_addr, addr + 2, 6); | 129 | struct dsa_slave_priv *p = netdev_priv(dev); |
130 | struct net_device *master = p->parent->master_netdev; | ||
131 | struct sockaddr *addr = a; | ||
132 | int err; | ||
133 | |||
134 | if (!is_valid_ether_addr(addr->sa_data)) | ||
135 | return -EADDRNOTAVAIL; | ||
136 | |||
137 | if (!(dev->flags & IFF_UP)) | ||
138 | goto out; | ||
139 | |||
140 | if (compare_ether_addr(addr->sa_data, master->dev_addr)) { | ||
141 | err = dev_unicast_add(master, addr->sa_data, ETH_ALEN); | ||
142 | if (err < 0) | ||
143 | return err; | ||
144 | } | ||
145 | |||
146 | if (compare_ether_addr(dev->dev_addr, master->dev_addr)) | ||
147 | dev_unicast_delete(master, dev->dev_addr, ETH_ALEN); | ||
148 | |||
149 | out: | ||
150 | memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); | ||
83 | 151 | ||
84 | return 0; | 152 | return 0; |
85 | } | 153 | } |
diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c index ff55823a6534..f99a019b939e 100644 --- a/net/dsa/tag_dsa.c +++ b/net/dsa/tag_dsa.c | |||
@@ -159,6 +159,7 @@ static int dsa_rcv(struct sk_buff *skb, struct net_device *dev, | |||
159 | 159 | ||
160 | skb->dev = ds->ports[source_port]; | 160 | skb->dev = ds->ports[source_port]; |
161 | skb_push(skb, ETH_HLEN); | 161 | skb_push(skb, ETH_HLEN); |
162 | skb->pkt_type = PACKET_HOST; | ||
162 | skb->protocol = eth_type_trans(skb, skb->dev); | 163 | skb->protocol = eth_type_trans(skb, skb->dev); |
163 | 164 | ||
164 | skb->dev->stats.rx_packets++; | 165 | skb->dev->stats.rx_packets++; |
diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c index 24b1c76fa7a4..328ec957f786 100644 --- a/net/dsa/tag_edsa.c +++ b/net/dsa/tag_edsa.c | |||
@@ -178,6 +178,7 @@ static int edsa_rcv(struct sk_buff *skb, struct net_device *dev, | |||
178 | 178 | ||
179 | skb->dev = ds->ports[source_port]; | 179 | skb->dev = ds->ports[source_port]; |
180 | skb_push(skb, ETH_HLEN); | 180 | skb_push(skb, ETH_HLEN); |
181 | skb->pkt_type = PACKET_HOST; | ||
181 | skb->protocol = eth_type_trans(skb, skb->dev); | 182 | skb->protocol = eth_type_trans(skb, skb->dev); |
182 | 183 | ||
183 | skb->dev->stats.rx_packets++; | 184 | skb->dev->stats.rx_packets++; |
diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c index 3bfd2e55877a..b59132878ad1 100644 --- a/net/dsa/tag_trailer.c +++ b/net/dsa/tag_trailer.c | |||
@@ -95,6 +95,7 @@ static int trailer_rcv(struct sk_buff *skb, struct net_device *dev, | |||
95 | 95 | ||
96 | skb->dev = ds->ports[source_port]; | 96 | skb->dev = ds->ports[source_port]; |
97 | skb_push(skb, ETH_HLEN); | 97 | skb_push(skb, ETH_HLEN); |
98 | skb->pkt_type = PACKET_HOST; | ||
98 | skb->protocol = eth_type_trans(skb, skb->dev); | 99 | skb->protocol = eth_type_trans(skb, skb->dev); |
99 | 100 | ||
100 | skb->dev->stats.rx_packets++; | 101 | skb->dev->stats.rx_packets++; |
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 8f5a403f6f6b..a631a1f110ca 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c | |||
@@ -237,43 +237,45 @@ static const struct snmp_mib snmp4_net_list[] = { | |||
237 | SNMP_MIB_SENTINEL | 237 | SNMP_MIB_SENTINEL |
238 | }; | 238 | }; |
239 | 239 | ||
240 | static void icmpmsg_put_line(struct seq_file *seq, unsigned long *vals, | ||
241 | unsigned short *type, int count) | ||
242 | { | ||
243 | int j; | ||
244 | |||
245 | if (count) { | ||
246 | seq_printf(seq, "\nIcmpMsg:"); | ||
247 | for (j = 0; j < count; ++j) | ||
248 | seq_printf(seq, " %sType%u", | ||
249 | type[j] & 0x100 ? "Out" : "In", | ||
250 | type[j] & 0xff); | ||
251 | seq_printf(seq, "\nIcmpMsg:"); | ||
252 | for (j = 0; j < count; ++j) | ||
253 | seq_printf(seq, " %lu", vals[j]); | ||
254 | } | ||
255 | } | ||
256 | |||
240 | static void icmpmsg_put(struct seq_file *seq) | 257 | static void icmpmsg_put(struct seq_file *seq) |
241 | { | 258 | { |
242 | #define PERLINE 16 | 259 | #define PERLINE 16 |
243 | 260 | ||
244 | int j, i, count; | 261 | int i, count; |
245 | static int out[PERLINE]; | 262 | unsigned short type[PERLINE]; |
263 | unsigned long vals[PERLINE], val; | ||
246 | struct net *net = seq->private; | 264 | struct net *net = seq->private; |
247 | 265 | ||
248 | count = 0; | 266 | count = 0; |
249 | for (i = 0; i < ICMPMSG_MIB_MAX; i++) { | 267 | for (i = 0; i < ICMPMSG_MIB_MAX; i++) { |
250 | 268 | val = snmp_fold_field((void **) net->mib.icmpmsg_statistics, i); | |
251 | if (snmp_fold_field((void **) net->mib.icmpmsg_statistics, i)) | 269 | if (val) { |
252 | out[count++] = i; | 270 | type[count] = i; |
253 | if (count < PERLINE) | 271 | vals[count++] = val; |
254 | continue; | 272 | } |
255 | 273 | if (count == PERLINE) { | |
256 | seq_printf(seq, "\nIcmpMsg:"); | 274 | icmpmsg_put_line(seq, vals, type, count); |
257 | for (j = 0; j < PERLINE; ++j) | 275 | count = 0; |
258 | seq_printf(seq, " %sType%u", i & 0x100 ? "Out" : "In", | 276 | } |
259 | i & 0xff); | ||
260 | seq_printf(seq, "\nIcmpMsg: "); | ||
261 | for (j = 0; j < PERLINE; ++j) | ||
262 | seq_printf(seq, " %lu", | ||
263 | snmp_fold_field((void **) net->mib.icmpmsg_statistics, | ||
264 | out[j])); | ||
265 | seq_putc(seq, '\n'); | ||
266 | } | ||
267 | if (count) { | ||
268 | seq_printf(seq, "\nIcmpMsg:"); | ||
269 | for (j = 0; j < count; ++j) | ||
270 | seq_printf(seq, " %sType%u", out[j] & 0x100 ? "Out" : | ||
271 | "In", out[j] & 0xff); | ||
272 | seq_printf(seq, "\nIcmpMsg:"); | ||
273 | for (j = 0; j < count; ++j) | ||
274 | seq_printf(seq, " %lu", snmp_fold_field((void **) | ||
275 | net->mib.icmpmsg_statistics, out[j])); | ||
276 | } | 277 | } |
278 | icmpmsg_put_line(seq, vals, type, count); | ||
277 | 279 | ||
278 | #undef PERLINE | 280 | #undef PERLINE |
279 | } | 281 | } |
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index c491fb98a5e3..b17377d6f260 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c | |||
@@ -980,14 +980,15 @@ int __init ip6_mr_init(void) | |||
980 | goto proc_cache_fail; | 980 | goto proc_cache_fail; |
981 | #endif | 981 | #endif |
982 | return 0; | 982 | return 0; |
983 | reg_notif_fail: | ||
984 | kmem_cache_destroy(mrt_cachep); | ||
985 | #ifdef CONFIG_PROC_FS | 983 | #ifdef CONFIG_PROC_FS |
986 | proc_vif_fail: | ||
987 | unregister_netdevice_notifier(&ip6_mr_notifier); | ||
988 | proc_cache_fail: | 984 | proc_cache_fail: |
989 | proc_net_remove(&init_net, "ip6_mr_vif"); | 985 | proc_net_remove(&init_net, "ip6_mr_vif"); |
986 | proc_vif_fail: | ||
987 | unregister_netdevice_notifier(&ip6_mr_notifier); | ||
990 | #endif | 988 | #endif |
989 | reg_notif_fail: | ||
990 | del_timer(&ipmr_expire_timer); | ||
991 | kmem_cache_destroy(mrt_cachep); | ||
991 | return err; | 992 | return err; |
992 | } | 993 | } |
993 | 994 | ||
diff --git a/net/key/af_key.c b/net/key/af_key.c index 3440a4637f01..5b22e011653b 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
@@ -3188,6 +3188,7 @@ static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt, | |||
3188 | return xp; | 3188 | return xp; |
3189 | 3189 | ||
3190 | out: | 3190 | out: |
3191 | xp->walk.dead = 1; | ||
3191 | xfrm_policy_destroy(xp); | 3192 | xfrm_policy_destroy(xp); |
3192 | return NULL; | 3193 | return NULL; |
3193 | } | 3194 | } |
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index 2f3672190734..425ab144f15d 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c | |||
@@ -711,7 +711,8 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
711 | iph = ipv6_hdr(skb); | 711 | iph = ipv6_hdr(skb); |
712 | iph->version = 6; | 712 | iph->version = 6; |
713 | iph->nexthdr = IPPROTO_IPV6; | 713 | iph->nexthdr = IPPROTO_IPV6; |
714 | iph->payload_len = old_iph->payload_len + sizeof(old_iph); | 714 | iph->payload_len = old_iph->payload_len; |
715 | be16_add_cpu(&iph->payload_len, sizeof(*old_iph)); | ||
715 | iph->priority = old_iph->priority; | 716 | iph->priority = old_iph->priority; |
716 | memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl)); | 717 | memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl)); |
717 | iph->daddr = rt->rt6i_dst.addr; | 718 | iph->daddr = rt->rt6i_dst.addr; |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 7d2e4f8f8172..4a39771d037e 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -1302,14 +1302,23 @@ static void unix_destruct_fds(struct sk_buff *skb) | |||
1302 | sock_wfree(skb); | 1302 | sock_wfree(skb); |
1303 | } | 1303 | } |
1304 | 1304 | ||
1305 | static void unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) | 1305 | static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) |
1306 | { | 1306 | { |
1307 | int i; | 1307 | int i; |
1308 | |||
1309 | /* | ||
1310 | * Need to duplicate file references for the sake of garbage | ||
1311 | * collection. Otherwise a socket in the fps might become a | ||
1312 | * candidate for GC while the skb is not yet queued. | ||
1313 | */ | ||
1314 | UNIXCB(skb).fp = scm_fp_dup(scm->fp); | ||
1315 | if (!UNIXCB(skb).fp) | ||
1316 | return -ENOMEM; | ||
1317 | |||
1308 | for (i=scm->fp->count-1; i>=0; i--) | 1318 | for (i=scm->fp->count-1; i>=0; i--) |
1309 | unix_inflight(scm->fp->fp[i]); | 1319 | unix_inflight(scm->fp->fp[i]); |
1310 | UNIXCB(skb).fp = scm->fp; | ||
1311 | skb->destructor = unix_destruct_fds; | 1320 | skb->destructor = unix_destruct_fds; |
1312 | scm->fp = NULL; | 1321 | return 0; |
1313 | } | 1322 | } |
1314 | 1323 | ||
1315 | /* | 1324 | /* |
@@ -1368,8 +1377,11 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock, | |||
1368 | goto out; | 1377 | goto out; |
1369 | 1378 | ||
1370 | memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); | 1379 | memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); |
1371 | if (siocb->scm->fp) | 1380 | if (siocb->scm->fp) { |
1372 | unix_attach_fds(siocb->scm, skb); | 1381 | err = unix_attach_fds(siocb->scm, skb); |
1382 | if (err) | ||
1383 | goto out_free; | ||
1384 | } | ||
1373 | unix_get_secdata(siocb->scm, skb); | 1385 | unix_get_secdata(siocb->scm, skb); |
1374 | 1386 | ||
1375 | skb_reset_transport_header(skb); | 1387 | skb_reset_transport_header(skb); |
@@ -1538,8 +1550,13 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, | |||
1538 | size = min_t(int, size, skb_tailroom(skb)); | 1550 | size = min_t(int, size, skb_tailroom(skb)); |
1539 | 1551 | ||
1540 | memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); | 1552 | memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); |
1541 | if (siocb->scm->fp) | 1553 | if (siocb->scm->fp) { |
1542 | unix_attach_fds(siocb->scm, skb); | 1554 | err = unix_attach_fds(siocb->scm, skb); |
1555 | if (err) { | ||
1556 | kfree_skb(skb); | ||
1557 | goto out_err; | ||
1558 | } | ||
1559 | } | ||
1543 | 1560 | ||
1544 | if ((err = memcpy_fromiovec(skb_put(skb,size), msg->msg_iov, size)) != 0) { | 1561 | if ((err = memcpy_fromiovec(skb_put(skb,size), msg->msg_iov, size)) != 0) { |
1545 | kfree_skb(skb); | 1562 | kfree_skb(skb); |
diff --git a/net/unix/garbage.c b/net/unix/garbage.c index 00734e22ec15..5a0061d6b9bc 100644 --- a/net/unix/garbage.c +++ b/net/unix/garbage.c | |||
@@ -186,8 +186,17 @@ static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *), | |||
186 | */ | 186 | */ |
187 | struct sock *sk = unix_get_socket(*fp++); | 187 | struct sock *sk = unix_get_socket(*fp++); |
188 | if (sk) { | 188 | if (sk) { |
189 | hit = true; | 189 | struct unix_sock *u = unix_sk(sk); |
190 | func(unix_sk(sk)); | 190 | |
191 | /* | ||
192 | * Ignore non-candidates, they could | ||
193 | * have been added to the queues after | ||
194 | * starting the garbage collection | ||
195 | */ | ||
196 | if (u->gc_candidate) { | ||
197 | hit = true; | ||
198 | func(u); | ||
199 | } | ||
191 | } | 200 | } |
192 | } | 201 | } |
193 | if (hit && hitlist != NULL) { | 202 | if (hit && hitlist != NULL) { |
@@ -249,11 +258,11 @@ static void inc_inflight_move_tail(struct unix_sock *u) | |||
249 | { | 258 | { |
250 | atomic_long_inc(&u->inflight); | 259 | atomic_long_inc(&u->inflight); |
251 | /* | 260 | /* |
252 | * If this is still a candidate, move it to the end of the | 261 | * If this still might be part of a cycle, move it to the end |
253 | * list, so that it's checked even if it was already passed | 262 | * of the list, so that it's checked even if it was already |
254 | * over | 263 | * passed over |
255 | */ | 264 | */ |
256 | if (u->gc_candidate) | 265 | if (u->gc_maybe_cycle) |
257 | list_move_tail(&u->link, &gc_candidates); | 266 | list_move_tail(&u->link, &gc_candidates); |
258 | } | 267 | } |
259 | 268 | ||
@@ -267,6 +276,7 @@ void unix_gc(void) | |||
267 | struct unix_sock *next; | 276 | struct unix_sock *next; |
268 | struct sk_buff_head hitlist; | 277 | struct sk_buff_head hitlist; |
269 | struct list_head cursor; | 278 | struct list_head cursor; |
279 | LIST_HEAD(not_cycle_list); | ||
270 | 280 | ||
271 | spin_lock(&unix_gc_lock); | 281 | spin_lock(&unix_gc_lock); |
272 | 282 | ||
@@ -282,10 +292,14 @@ void unix_gc(void) | |||
282 | * | 292 | * |
283 | * Holding unix_gc_lock will protect these candidates from | 293 | * Holding unix_gc_lock will protect these candidates from |
284 | * being detached, and hence from gaining an external | 294 | * being detached, and hence from gaining an external |
285 | * reference. This also means, that since there are no | 295 | * reference. Since there are no possible receivers, all |
286 | * possible receivers, the receive queues of these sockets are | 296 | * buffers currently on the candidates' queues stay there |
287 | * static during the GC, even though the dequeue is done | 297 | * during the garbage collection. |
288 | * before the detach without atomicity guarantees. | 298 | * |
299 | * We also know that no new candidate can be added onto the | ||
300 | * receive queues. Other, non candidate sockets _can_ be | ||
301 | * added to queue, so we must make sure only to touch | ||
302 | * candidates. | ||
289 | */ | 303 | */ |
290 | list_for_each_entry_safe(u, next, &gc_inflight_list, link) { | 304 | list_for_each_entry_safe(u, next, &gc_inflight_list, link) { |
291 | long total_refs; | 305 | long total_refs; |
@@ -299,6 +313,7 @@ void unix_gc(void) | |||
299 | if (total_refs == inflight_refs) { | 313 | if (total_refs == inflight_refs) { |
300 | list_move_tail(&u->link, &gc_candidates); | 314 | list_move_tail(&u->link, &gc_candidates); |
301 | u->gc_candidate = 1; | 315 | u->gc_candidate = 1; |
316 | u->gc_maybe_cycle = 1; | ||
302 | } | 317 | } |
303 | } | 318 | } |
304 | 319 | ||
@@ -325,14 +340,24 @@ void unix_gc(void) | |||
325 | list_move(&cursor, &u->link); | 340 | list_move(&cursor, &u->link); |
326 | 341 | ||
327 | if (atomic_long_read(&u->inflight) > 0) { | 342 | if (atomic_long_read(&u->inflight) > 0) { |
328 | list_move_tail(&u->link, &gc_inflight_list); | 343 | list_move_tail(&u->link, ¬_cycle_list); |
329 | u->gc_candidate = 0; | 344 | u->gc_maybe_cycle = 0; |
330 | scan_children(&u->sk, inc_inflight_move_tail, NULL); | 345 | scan_children(&u->sk, inc_inflight_move_tail, NULL); |
331 | } | 346 | } |
332 | } | 347 | } |
333 | list_del(&cursor); | 348 | list_del(&cursor); |
334 | 349 | ||
335 | /* | 350 | /* |
351 | * not_cycle_list contains those sockets which do not make up a | ||
352 | * cycle. Restore these to the inflight list. | ||
353 | */ | ||
354 | while (!list_empty(¬_cycle_list)) { | ||
355 | u = list_entry(not_cycle_list.next, struct unix_sock, link); | ||
356 | u->gc_candidate = 0; | ||
357 | list_move_tail(&u->link, &gc_inflight_list); | ||
358 | } | ||
359 | |||
360 | /* | ||
336 | * Now gc_candidates contains only garbage. Restore original | 361 | * Now gc_candidates contains only garbage. Restore original |
337 | * inflight counters for these as well, and remove the skbuffs | 362 | * inflight counters for these as well, and remove the skbuffs |
338 | * which are creating the cycle(s). | 363 | * which are creating the cycle(s). |