aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/ax25/af_ax25.c3
-rw-r--r--net/ax25/ax25_std_timer.c8
-rw-r--r--net/core/dev.c43
-rw-r--r--net/core/rtnetlink.c2
-rw-r--r--net/ipv4/tcp_hybla.c6
-rw-r--r--net/ipv4/tcp_input.c3
-rw-r--r--net/netrom/af_netrom.c2
7 files changed, 33 insertions, 34 deletions
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 01c83e2a4c19..28c71574a781 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -317,6 +317,9 @@ void ax25_destroy_socket(ax25_cb *ax25)
317 /* Queue the unaccepted socket for death */ 317 /* Queue the unaccepted socket for death */
318 sock_orphan(skb->sk); 318 sock_orphan(skb->sk);
319 319
320 /* 9A4GL: hack to release unaccepted sockets */
321 skb->sk->sk_state = TCP_LISTEN;
322
320 ax25_start_heartbeat(sax25); 323 ax25_start_heartbeat(sax25);
321 sax25->state = AX25_STATE_0; 324 sax25->state = AX25_STATE_0;
322 } 325 }
diff --git a/net/ax25/ax25_std_timer.c b/net/ax25/ax25_std_timer.c
index cdc7e751ef36..96e4b9273250 100644
--- a/net/ax25/ax25_std_timer.c
+++ b/net/ax25/ax25_std_timer.c
@@ -39,9 +39,11 @@ void ax25_std_heartbeat_expiry(ax25_cb *ax25)
39 39
40 switch (ax25->state) { 40 switch (ax25->state) {
41 case AX25_STATE_0: 41 case AX25_STATE_0:
42 if (!sk || 42 /* Magic here: If we listen() and a new link dies before it
43 sock_flag(sk, SOCK_DESTROY) || 43 is accepted() it isn't 'dead' so doesn't get removed. */
44 sock_flag(sk, SOCK_DEAD)) { 44 if (!sk || sock_flag(sk, SOCK_DESTROY) ||
45 (sk->sk_state == TCP_LISTEN &&
46 sock_flag(sk, SOCK_DEAD))) {
45 if (sk) { 47 if (sk) {
46 sock_hold(sk); 48 sock_hold(sk);
47 ax25_destroy_socket(ax25); 49 ax25_destroy_socket(ax25);
diff --git a/net/core/dev.c b/net/core/dev.c
index 7091040e32ac..1408a083fe4e 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2949,6 +2949,12 @@ int netdev_set_master(struct net_device *slave, struct net_device *master)
2949 return 0; 2949 return 0;
2950} 2950}
2951 2951
2952static void dev_change_rx_flags(struct net_device *dev, int flags)
2953{
2954 if (dev->flags & IFF_UP && dev->change_rx_flags)
2955 dev->change_rx_flags(dev, flags);
2956}
2957
2952static int __dev_set_promiscuity(struct net_device *dev, int inc) 2958static int __dev_set_promiscuity(struct net_device *dev, int inc)
2953{ 2959{
2954 unsigned short old_flags = dev->flags; 2960 unsigned short old_flags = dev->flags;
@@ -2986,8 +2992,7 @@ static int __dev_set_promiscuity(struct net_device *dev, int inc)
2986 current->uid, current->gid, 2992 current->uid, current->gid,
2987 audit_get_sessionid(current)); 2993 audit_get_sessionid(current));
2988 2994
2989 if (dev->change_rx_flags) 2995 dev_change_rx_flags(dev, IFF_PROMISC);
2990 dev->change_rx_flags(dev, IFF_PROMISC);
2991 } 2996 }
2992 return 0; 2997 return 0;
2993} 2998}
@@ -3053,8 +3058,7 @@ int dev_set_allmulti(struct net_device *dev, int inc)
3053 } 3058 }
3054 } 3059 }
3055 if (dev->flags ^ old_flags) { 3060 if (dev->flags ^ old_flags) {
3056 if (dev->change_rx_flags) 3061 dev_change_rx_flags(dev, IFF_ALLMULTI);
3057 dev->change_rx_flags(dev, IFF_ALLMULTI);
3058 dev_set_rx_mode(dev); 3062 dev_set_rx_mode(dev);
3059 } 3063 }
3060 return 0; 3064 return 0;
@@ -3392,8 +3396,8 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
3392 * Load in the correct multicast list now the flags have changed. 3396 * Load in the correct multicast list now the flags have changed.
3393 */ 3397 */
3394 3398
3395 if (dev->change_rx_flags && (old_flags ^ flags) & IFF_MULTICAST) 3399 if ((old_flags ^ flags) & IFF_MULTICAST)
3396 dev->change_rx_flags(dev, IFF_MULTICAST); 3400 dev_change_rx_flags(dev, IFF_MULTICAST);
3397 3401
3398 dev_set_rx_mode(dev); 3402 dev_set_rx_mode(dev);
3399 3403
@@ -3867,14 +3871,11 @@ static int dev_new_index(struct net *net)
3867} 3871}
3868 3872
3869/* Delayed registration/unregisteration */ 3873/* Delayed registration/unregisteration */
3870static DEFINE_SPINLOCK(net_todo_list_lock);
3871static LIST_HEAD(net_todo_list); 3874static LIST_HEAD(net_todo_list);
3872 3875
3873static void net_set_todo(struct net_device *dev) 3876static void net_set_todo(struct net_device *dev)
3874{ 3877{
3875 spin_lock(&net_todo_list_lock);
3876 list_add_tail(&dev->todo_list, &net_todo_list); 3878 list_add_tail(&dev->todo_list, &net_todo_list);
3877 spin_unlock(&net_todo_list_lock);
3878} 3879}
3879 3880
3880static void rollback_registered(struct net_device *dev) 3881static void rollback_registered(struct net_device *dev)
@@ -4201,33 +4202,24 @@ static void netdev_wait_allrefs(struct net_device *dev)
4201 * free_netdev(y1); 4202 * free_netdev(y1);
4202 * free_netdev(y2); 4203 * free_netdev(y2);
4203 * 4204 *
4204 * We are invoked by rtnl_unlock() after it drops the semaphore. 4205 * We are invoked by rtnl_unlock().
4205 * This allows us to deal with problems: 4206 * This allows us to deal with problems:
4206 * 1) We can delete sysfs objects which invoke hotplug 4207 * 1) We can delete sysfs objects which invoke hotplug
4207 * without deadlocking with linkwatch via keventd. 4208 * without deadlocking with linkwatch via keventd.
4208 * 2) Since we run with the RTNL semaphore not held, we can sleep 4209 * 2) Since we run with the RTNL semaphore not held, we can sleep
4209 * safely in order to wait for the netdev refcnt to drop to zero. 4210 * safely in order to wait for the netdev refcnt to drop to zero.
4211 *
4212 * We must not return until all unregister events added during
4213 * the interval the lock was held have been completed.
4210 */ 4214 */
4211static DEFINE_MUTEX(net_todo_run_mutex);
4212void netdev_run_todo(void) 4215void netdev_run_todo(void)
4213{ 4216{
4214 struct list_head list; 4217 struct list_head list;
4215 4218
4216 /* Need to guard against multiple cpu's getting out of order. */
4217 mutex_lock(&net_todo_run_mutex);
4218
4219 /* Not safe to do outside the semaphore. We must not return
4220 * until all unregister events invoked by the local processor
4221 * have been completed (either by this todo run, or one on
4222 * another cpu).
4223 */
4224 if (list_empty(&net_todo_list))
4225 goto out;
4226
4227 /* Snapshot list, allow later requests */ 4219 /* Snapshot list, allow later requests */
4228 spin_lock(&net_todo_list_lock);
4229 list_replace_init(&net_todo_list, &list); 4220 list_replace_init(&net_todo_list, &list);
4230 spin_unlock(&net_todo_list_lock); 4221
4222 __rtnl_unlock();
4231 4223
4232 while (!list_empty(&list)) { 4224 while (!list_empty(&list)) {
4233 struct net_device *dev 4225 struct net_device *dev
@@ -4259,9 +4251,6 @@ void netdev_run_todo(void)
4259 /* Free network device */ 4251 /* Free network device */
4260 kobject_put(&dev->dev.kobj); 4252 kobject_put(&dev->dev.kobj);
4261 } 4253 }
4262
4263out:
4264 mutex_unlock(&net_todo_run_mutex);
4265} 4254}
4266 4255
4267static struct net_device_stats *internal_stats(struct net_device *dev) 4256static struct net_device_stats *internal_stats(struct net_device *dev)
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 8862498fd4a6..3630131fa1fa 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -73,7 +73,7 @@ void __rtnl_unlock(void)
73 73
74void rtnl_unlock(void) 74void rtnl_unlock(void)
75{ 75{
76 mutex_unlock(&rtnl_mutex); 76 /* This fellow will unlock it for us. */
77 netdev_run_todo(); 77 netdev_run_todo();
78} 78}
79 79
diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c
index bfcbd148a89d..c209e054a634 100644
--- a/net/ipv4/tcp_hybla.c
+++ b/net/ipv4/tcp_hybla.c
@@ -150,7 +150,11 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
150 ca->snd_cwnd_cents -= 128; 150 ca->snd_cwnd_cents -= 128;
151 tp->snd_cwnd_cnt = 0; 151 tp->snd_cwnd_cnt = 0;
152 } 152 }
153 153 /* check when cwnd has not been incremented for a while */
154 if (increment == 0 && odd == 0 && tp->snd_cwnd_cnt >= tp->snd_cwnd) {
155 tp->snd_cwnd++;
156 tp->snd_cwnd_cnt = 0;
157 }
154 /* clamp down slowstart cwnd to ssthresh value. */ 158 /* clamp down slowstart cwnd to ssthresh value. */
155 if (is_slowstart) 159 if (is_slowstart)
156 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); 160 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 63da39372d40..d77c0d29e239 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4957,7 +4957,8 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
4957 goto no_ack; 4957 goto no_ack;
4958 } 4958 }
4959 4959
4960 __tcp_ack_snd_check(sk, 0); 4960 if (!copied_early || tp->rcv_nxt != tp->rcv_wup)
4961 __tcp_ack_snd_check(sk, 0);
4961no_ack: 4962no_ack:
4962#ifdef CONFIG_NET_DMA 4963#ifdef CONFIG_NET_DMA
4963 if (copied_early) 4964 if (copied_early)
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 532e4faa29f7..9f1ea4a27b35 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -525,6 +525,7 @@ static int nr_release(struct socket *sock)
525 if (sk == NULL) return 0; 525 if (sk == NULL) return 0;
526 526
527 sock_hold(sk); 527 sock_hold(sk);
528 sock_orphan(sk);
528 lock_sock(sk); 529 lock_sock(sk);
529 nr = nr_sk(sk); 530 nr = nr_sk(sk);
530 531
@@ -548,7 +549,6 @@ static int nr_release(struct socket *sock)
548 sk->sk_state = TCP_CLOSE; 549 sk->sk_state = TCP_CLOSE;
549 sk->sk_shutdown |= SEND_SHUTDOWN; 550 sk->sk_shutdown |= SEND_SHUTDOWN;
550 sk->sk_state_change(sk); 551 sk->sk_state_change(sk);
551 sock_orphan(sk);
552 sock_set_flag(sk, SOCK_DESTROY); 552 sock_set_flag(sk, SOCK_DESTROY);
553 break; 553 break;
554 554