diff options
author | Steven Whitehouse <swhiteho@redhat.com> | 2006-05-12 10:48:52 -0400 |
---|---|---|
committer | Steven Whitehouse <swhiteho@redhat.com> | 2006-05-12 10:48:52 -0400 |
commit | 7d63b54a65ce902f9aaa8efe8192aa3b983264d4 (patch) | |
tree | 250a77bebe92cbd6edac70a649866044295876db /net | |
parent | fd88de569b802c4a04aaa6ee74667775f4aed8c6 (diff) | |
parent | d8c3291c73b958243b33f8509d4507e76dafd055 (diff) |
Merge branch 'master'
Diffstat (limited to 'net')
55 files changed, 386 insertions, 298 deletions
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index dbf9b47681f7..a2e0dd047e9f 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c | |||
@@ -228,6 +228,8 @@ ax25_cb *ax25_find_cb(ax25_address *src_addr, ax25_address *dest_addr, | |||
228 | return NULL; | 228 | return NULL; |
229 | } | 229 | } |
230 | 230 | ||
231 | EXPORT_SYMBOL(ax25_find_cb); | ||
232 | |||
231 | void ax25_send_to_raw(ax25_address *addr, struct sk_buff *skb, int proto) | 233 | void ax25_send_to_raw(ax25_address *addr, struct sk_buff *skb, int proto) |
232 | { | 234 | { |
233 | ax25_cb *s; | 235 | ax25_cb *s; |
@@ -424,6 +426,26 @@ static int ax25_ctl_ioctl(const unsigned int cmd, void __user *arg) | |||
424 | return 0; | 426 | return 0; |
425 | } | 427 | } |
426 | 428 | ||
429 | static void ax25_fillin_cb_from_dev(ax25_cb *ax25, ax25_dev *ax25_dev) | ||
430 | { | ||
431 | ax25->rtt = msecs_to_jiffies(ax25_dev->values[AX25_VALUES_T1]) / 2; | ||
432 | ax25->t1 = msecs_to_jiffies(ax25_dev->values[AX25_VALUES_T1]); | ||
433 | ax25->t2 = msecs_to_jiffies(ax25_dev->values[AX25_VALUES_T2]); | ||
434 | ax25->t3 = msecs_to_jiffies(ax25_dev->values[AX25_VALUES_T3]); | ||
435 | ax25->n2 = ax25_dev->values[AX25_VALUES_N2]; | ||
436 | ax25->paclen = ax25_dev->values[AX25_VALUES_PACLEN]; | ||
437 | ax25->idle = msecs_to_jiffies(ax25_dev->values[AX25_VALUES_IDLE]); | ||
438 | ax25->backoff = ax25_dev->values[AX25_VALUES_BACKOFF]; | ||
439 | |||
440 | if (ax25_dev->values[AX25_VALUES_AXDEFMODE]) { | ||
441 | ax25->modulus = AX25_EMODULUS; | ||
442 | ax25->window = ax25_dev->values[AX25_VALUES_EWINDOW]; | ||
443 | } else { | ||
444 | ax25->modulus = AX25_MODULUS; | ||
445 | ax25->window = ax25_dev->values[AX25_VALUES_WINDOW]; | ||
446 | } | ||
447 | } | ||
448 | |||
427 | /* | 449 | /* |
428 | * Fill in a created AX.25 created control block with the default | 450 | * Fill in a created AX.25 created control block with the default |
429 | * values for a particular device. | 451 | * values for a particular device. |
@@ -433,39 +455,28 @@ void ax25_fillin_cb(ax25_cb *ax25, ax25_dev *ax25_dev) | |||
433 | ax25->ax25_dev = ax25_dev; | 455 | ax25->ax25_dev = ax25_dev; |
434 | 456 | ||
435 | if (ax25->ax25_dev != NULL) { | 457 | if (ax25->ax25_dev != NULL) { |
436 | ax25->rtt = ax25_dev->values[AX25_VALUES_T1] / 2; | 458 | ax25_fillin_cb_from_dev(ax25, ax25_dev); |
437 | ax25->t1 = ax25_dev->values[AX25_VALUES_T1]; | 459 | return; |
438 | ax25->t2 = ax25_dev->values[AX25_VALUES_T2]; | 460 | } |
439 | ax25->t3 = ax25_dev->values[AX25_VALUES_T3]; | 461 | |
440 | ax25->n2 = ax25_dev->values[AX25_VALUES_N2]; | 462 | /* |
441 | ax25->paclen = ax25_dev->values[AX25_VALUES_PACLEN]; | 463 | * No device, use kernel / AX.25 spec default values |
442 | ax25->idle = ax25_dev->values[AX25_VALUES_IDLE]; | 464 | */ |
443 | ax25->backoff = ax25_dev->values[AX25_VALUES_BACKOFF]; | 465 | ax25->rtt = msecs_to_jiffies(AX25_DEF_T1) / 2; |
444 | 466 | ax25->t1 = msecs_to_jiffies(AX25_DEF_T1); | |
445 | if (ax25_dev->values[AX25_VALUES_AXDEFMODE]) { | 467 | ax25->t2 = msecs_to_jiffies(AX25_DEF_T2); |
446 | ax25->modulus = AX25_EMODULUS; | 468 | ax25->t3 = msecs_to_jiffies(AX25_DEF_T3); |
447 | ax25->window = ax25_dev->values[AX25_VALUES_EWINDOW]; | 469 | ax25->n2 = AX25_DEF_N2; |
448 | } else { | 470 | ax25->paclen = AX25_DEF_PACLEN; |
449 | ax25->modulus = AX25_MODULUS; | 471 | ax25->idle = msecs_to_jiffies(AX25_DEF_IDLE); |
450 | ax25->window = ax25_dev->values[AX25_VALUES_WINDOW]; | 472 | ax25->backoff = AX25_DEF_BACKOFF; |
451 | } | 473 | |
474 | if (AX25_DEF_AXDEFMODE) { | ||
475 | ax25->modulus = AX25_EMODULUS; | ||
476 | ax25->window = AX25_DEF_EWINDOW; | ||
452 | } else { | 477 | } else { |
453 | ax25->rtt = AX25_DEF_T1 / 2; | 478 | ax25->modulus = AX25_MODULUS; |
454 | ax25->t1 = AX25_DEF_T1; | 479 | ax25->window = AX25_DEF_WINDOW; |
455 | ax25->t2 = AX25_DEF_T2; | ||
456 | ax25->t3 = AX25_DEF_T3; | ||
457 | ax25->n2 = AX25_DEF_N2; | ||
458 | ax25->paclen = AX25_DEF_PACLEN; | ||
459 | ax25->idle = AX25_DEF_IDLE; | ||
460 | ax25->backoff = AX25_DEF_BACKOFF; | ||
461 | |||
462 | if (AX25_DEF_AXDEFMODE) { | ||
463 | ax25->modulus = AX25_EMODULUS; | ||
464 | ax25->window = AX25_DEF_EWINDOW; | ||
465 | } else { | ||
466 | ax25->modulus = AX25_MODULUS; | ||
467 | ax25->window = AX25_DEF_WINDOW; | ||
468 | } | ||
469 | } | 480 | } |
470 | } | 481 | } |
471 | 482 | ||
@@ -1979,24 +1990,6 @@ static struct notifier_block ax25_dev_notifier = { | |||
1979 | .notifier_call =ax25_device_event, | 1990 | .notifier_call =ax25_device_event, |
1980 | }; | 1991 | }; |
1981 | 1992 | ||
1982 | EXPORT_SYMBOL(ax25_hard_header); | ||
1983 | EXPORT_SYMBOL(ax25_rebuild_header); | ||
1984 | EXPORT_SYMBOL(ax25_findbyuid); | ||
1985 | EXPORT_SYMBOL(ax25_find_cb); | ||
1986 | EXPORT_SYMBOL(ax25_linkfail_register); | ||
1987 | EXPORT_SYMBOL(ax25_linkfail_release); | ||
1988 | EXPORT_SYMBOL(ax25_listen_register); | ||
1989 | EXPORT_SYMBOL(ax25_listen_release); | ||
1990 | EXPORT_SYMBOL(ax25_protocol_register); | ||
1991 | EXPORT_SYMBOL(ax25_protocol_release); | ||
1992 | EXPORT_SYMBOL(ax25_send_frame); | ||
1993 | EXPORT_SYMBOL(ax25_uid_policy); | ||
1994 | EXPORT_SYMBOL(ax25cmp); | ||
1995 | EXPORT_SYMBOL(ax2asc); | ||
1996 | EXPORT_SYMBOL(asc2ax); | ||
1997 | EXPORT_SYMBOL(null_ax25_address); | ||
1998 | EXPORT_SYMBOL(ax25_display_timer); | ||
1999 | |||
2000 | static int __init ax25_init(void) | 1993 | static int __init ax25_init(void) |
2001 | { | 1994 | { |
2002 | int rc = proto_register(&ax25_proto, 0); | 1995 | int rc = proto_register(&ax25_proto, 0); |
diff --git a/net/ax25/ax25_addr.c b/net/ax25/ax25_addr.c index 0164a155b8c4..5f0896ad0042 100644 --- a/net/ax25/ax25_addr.c +++ b/net/ax25/ax25_addr.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/socket.h> | 11 | #include <linux/socket.h> |
12 | #include <linux/in.h> | 12 | #include <linux/in.h> |
13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
14 | #include <linux/module.h> | ||
14 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
15 | #include <linux/timer.h> | 16 | #include <linux/timer.h> |
16 | #include <linux/string.h> | 17 | #include <linux/string.h> |
@@ -33,6 +34,8 @@ | |||
33 | */ | 34 | */ |
34 | ax25_address null_ax25_address = {{0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x00}}; | 35 | ax25_address null_ax25_address = {{0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x00}}; |
35 | 36 | ||
37 | EXPORT_SYMBOL(null_ax25_address); | ||
38 | |||
36 | /* | 39 | /* |
37 | * ax25 -> ascii conversion | 40 | * ax25 -> ascii conversion |
38 | */ | 41 | */ |
@@ -64,6 +67,8 @@ char *ax2asc(char *buf, ax25_address *a) | |||
64 | 67 | ||
65 | } | 68 | } |
66 | 69 | ||
70 | EXPORT_SYMBOL(ax2asc); | ||
71 | |||
67 | /* | 72 | /* |
68 | * ascii -> ax25 conversion | 73 | * ascii -> ax25 conversion |
69 | */ | 74 | */ |
@@ -97,6 +102,8 @@ void asc2ax(ax25_address *addr, char *callsign) | |||
97 | addr->ax25_call[6] &= 0x1E; | 102 | addr->ax25_call[6] &= 0x1E; |
98 | } | 103 | } |
99 | 104 | ||
105 | EXPORT_SYMBOL(asc2ax); | ||
106 | |||
100 | /* | 107 | /* |
101 | * Compare two ax.25 addresses | 108 | * Compare two ax.25 addresses |
102 | */ | 109 | */ |
@@ -116,6 +123,8 @@ int ax25cmp(ax25_address *a, ax25_address *b) | |||
116 | return 2; /* Partial match */ | 123 | return 2; /* Partial match */ |
117 | } | 124 | } |
118 | 125 | ||
126 | EXPORT_SYMBOL(ax25cmp); | ||
127 | |||
119 | /* | 128 | /* |
120 | * Compare two AX.25 digipeater paths. | 129 | * Compare two AX.25 digipeater paths. |
121 | */ | 130 | */ |
diff --git a/net/ax25/ax25_ds_timer.c b/net/ax25/ax25_ds_timer.c index 061083efc1dc..5961459935eb 100644 --- a/net/ax25/ax25_ds_timer.c +++ b/net/ax25/ax25_ds_timer.c | |||
@@ -61,7 +61,8 @@ void ax25_ds_set_timer(ax25_dev *ax25_dev) | |||
61 | return; | 61 | return; |
62 | 62 | ||
63 | del_timer(&ax25_dev->dama.slave_timer); | 63 | del_timer(&ax25_dev->dama.slave_timer); |
64 | ax25_dev->dama.slave_timeout = ax25_dev->values[AX25_VALUES_DS_TIMEOUT] / 10; | 64 | ax25_dev->dama.slave_timeout = |
65 | msecs_to_jiffies(ax25_dev->values[AX25_VALUES_DS_TIMEOUT]) / 10; | ||
65 | ax25_ds_add_timer(ax25_dev); | 66 | ax25_ds_add_timer(ax25_dev); |
66 | } | 67 | } |
67 | 68 | ||
diff --git a/net/ax25/ax25_iface.c b/net/ax25/ax25_iface.c index d68aff100729..3bb152710b77 100644 --- a/net/ax25/ax25_iface.c +++ b/net/ax25/ax25_iface.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/socket.h> | 12 | #include <linux/socket.h> |
13 | #include <linux/in.h> | 13 | #include <linux/in.h> |
14 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
15 | #include <linux/module.h> | ||
15 | #include <linux/sched.h> | 16 | #include <linux/sched.h> |
16 | #include <linux/spinlock.h> | 17 | #include <linux/spinlock.h> |
17 | #include <linux/timer.h> | 18 | #include <linux/timer.h> |
@@ -74,6 +75,8 @@ int ax25_protocol_register(unsigned int pid, | |||
74 | return 1; | 75 | return 1; |
75 | } | 76 | } |
76 | 77 | ||
78 | EXPORT_SYMBOL(ax25_protocol_register); | ||
79 | |||
77 | void ax25_protocol_release(unsigned int pid) | 80 | void ax25_protocol_release(unsigned int pid) |
78 | { | 81 | { |
79 | struct protocol_struct *s, *protocol; | 82 | struct protocol_struct *s, *protocol; |
@@ -106,6 +109,8 @@ void ax25_protocol_release(unsigned int pid) | |||
106 | write_unlock(&protocol_list_lock); | 109 | write_unlock(&protocol_list_lock); |
107 | } | 110 | } |
108 | 111 | ||
112 | EXPORT_SYMBOL(ax25_protocol_release); | ||
113 | |||
109 | int ax25_linkfail_register(void (*func)(ax25_cb *, int)) | 114 | int ax25_linkfail_register(void (*func)(ax25_cb *, int)) |
110 | { | 115 | { |
111 | struct linkfail_struct *linkfail; | 116 | struct linkfail_struct *linkfail; |
@@ -123,6 +128,8 @@ int ax25_linkfail_register(void (*func)(ax25_cb *, int)) | |||
123 | return 1; | 128 | return 1; |
124 | } | 129 | } |
125 | 130 | ||
131 | EXPORT_SYMBOL(ax25_linkfail_register); | ||
132 | |||
126 | void ax25_linkfail_release(void (*func)(ax25_cb *, int)) | 133 | void ax25_linkfail_release(void (*func)(ax25_cb *, int)) |
127 | { | 134 | { |
128 | struct linkfail_struct *s, *linkfail; | 135 | struct linkfail_struct *s, *linkfail; |
@@ -155,6 +162,8 @@ void ax25_linkfail_release(void (*func)(ax25_cb *, int)) | |||
155 | spin_unlock_bh(&linkfail_lock); | 162 | spin_unlock_bh(&linkfail_lock); |
156 | } | 163 | } |
157 | 164 | ||
165 | EXPORT_SYMBOL(ax25_linkfail_release); | ||
166 | |||
158 | int ax25_listen_register(ax25_address *callsign, struct net_device *dev) | 167 | int ax25_listen_register(ax25_address *callsign, struct net_device *dev) |
159 | { | 168 | { |
160 | struct listen_struct *listen; | 169 | struct listen_struct *listen; |
@@ -176,6 +185,8 @@ int ax25_listen_register(ax25_address *callsign, struct net_device *dev) | |||
176 | return 1; | 185 | return 1; |
177 | } | 186 | } |
178 | 187 | ||
188 | EXPORT_SYMBOL(ax25_listen_register); | ||
189 | |||
179 | void ax25_listen_release(ax25_address *callsign, struct net_device *dev) | 190 | void ax25_listen_release(ax25_address *callsign, struct net_device *dev) |
180 | { | 191 | { |
181 | struct listen_struct *s, *listen; | 192 | struct listen_struct *s, *listen; |
@@ -208,6 +219,8 @@ void ax25_listen_release(ax25_address *callsign, struct net_device *dev) | |||
208 | spin_unlock_bh(&listen_lock); | 219 | spin_unlock_bh(&listen_lock); |
209 | } | 220 | } |
210 | 221 | ||
222 | EXPORT_SYMBOL(ax25_listen_release); | ||
223 | |||
211 | int (*ax25_protocol_function(unsigned int pid))(struct sk_buff *, ax25_cb *) | 224 | int (*ax25_protocol_function(unsigned int pid))(struct sk_buff *, ax25_cb *) |
212 | { | 225 | { |
213 | int (*res)(struct sk_buff *, ax25_cb *) = NULL; | 226 | int (*res)(struct sk_buff *, ax25_cb *) = NULL; |
diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c index d643dac3eccc..a0b534f80f17 100644 --- a/net/ax25/ax25_ip.c +++ b/net/ax25/ax25_ip.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/socket.h> | 12 | #include <linux/socket.h> |
13 | #include <linux/in.h> | 13 | #include <linux/in.h> |
14 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
15 | #include <linux/module.h> | ||
15 | #include <linux/sched.h> | 16 | #include <linux/sched.h> |
16 | #include <linux/timer.h> | 17 | #include <linux/timer.h> |
17 | #include <linux/string.h> | 18 | #include <linux/string.h> |
@@ -221,3 +222,5 @@ int ax25_rebuild_header(struct sk_buff *skb) | |||
221 | 222 | ||
222 | #endif | 223 | #endif |
223 | 224 | ||
225 | EXPORT_SYMBOL(ax25_hard_header); | ||
226 | EXPORT_SYMBOL(ax25_rebuild_header); | ||
diff --git a/net/ax25/ax25_out.c b/net/ax25/ax25_out.c index 5fc048dcd39a..5d99852b239c 100644 --- a/net/ax25/ax25_out.c +++ b/net/ax25/ax25_out.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/socket.h> | 14 | #include <linux/socket.h> |
15 | #include <linux/in.h> | 15 | #include <linux/in.h> |
16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
17 | #include <linux/module.h> | ||
17 | #include <linux/sched.h> | 18 | #include <linux/sched.h> |
18 | #include <linux/timer.h> | 19 | #include <linux/timer.h> |
19 | #include <linux/string.h> | 20 | #include <linux/string.h> |
@@ -104,6 +105,8 @@ ax25_cb *ax25_send_frame(struct sk_buff *skb, int paclen, ax25_address *src, ax2 | |||
104 | return ax25; /* We had to create it */ | 105 | return ax25; /* We had to create it */ |
105 | } | 106 | } |
106 | 107 | ||
108 | EXPORT_SYMBOL(ax25_send_frame); | ||
109 | |||
107 | /* | 110 | /* |
108 | * All outgoing AX.25 I frames pass via this routine. Therefore this is | 111 | * All outgoing AX.25 I frames pass via this routine. Therefore this is |
109 | * where the fragmentation of frames takes place. If fragment is set to | 112 | * where the fragmentation of frames takes place. If fragment is set to |
diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c index f04f8630fd28..5ac98250797b 100644 --- a/net/ax25/ax25_route.c +++ b/net/ax25/ax25_route.c | |||
@@ -360,7 +360,7 @@ struct file_operations ax25_route_fops = { | |||
360 | /* | 360 | /* |
361 | * Find AX.25 route | 361 | * Find AX.25 route |
362 | * | 362 | * |
363 | * Only routes with a refernce rout of zero can be destroyed. | 363 | * Only routes with a reference count of zero can be destroyed. |
364 | */ | 364 | */ |
365 | static ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev) | 365 | static ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev) |
366 | { | 366 | { |
diff --git a/net/ax25/ax25_timer.c b/net/ax25/ax25_timer.c index 7a6b50a14554..ec254057f212 100644 --- a/net/ax25/ax25_timer.c +++ b/net/ax25/ax25_timer.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/socket.h> | 18 | #include <linux/socket.h> |
19 | #include <linux/in.h> | 19 | #include <linux/in.h> |
20 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
21 | #include <linux/module.h> | ||
21 | #include <linux/jiffies.h> | 22 | #include <linux/jiffies.h> |
22 | #include <linux/timer.h> | 23 | #include <linux/timer.h> |
23 | #include <linux/string.h> | 24 | #include <linux/string.h> |
@@ -137,6 +138,8 @@ unsigned long ax25_display_timer(struct timer_list *timer) | |||
137 | return timer->expires - jiffies; | 138 | return timer->expires - jiffies; |
138 | } | 139 | } |
139 | 140 | ||
141 | EXPORT_SYMBOL(ax25_display_timer); | ||
142 | |||
140 | static void ax25_heartbeat_expiry(unsigned long param) | 143 | static void ax25_heartbeat_expiry(unsigned long param) |
141 | { | 144 | { |
142 | int proto = AX25_PROTO_STD_SIMPLEX; | 145 | int proto = AX25_PROTO_STD_SIMPLEX; |
diff --git a/net/ax25/ax25_uid.c b/net/ax25/ax25_uid.c index b8b5854bce9a..5e9a81e8b214 100644 --- a/net/ax25/ax25_uid.c +++ b/net/ax25/ax25_uid.c | |||
@@ -49,6 +49,8 @@ static DEFINE_RWLOCK(ax25_uid_lock); | |||
49 | 49 | ||
50 | int ax25_uid_policy = 0; | 50 | int ax25_uid_policy = 0; |
51 | 51 | ||
52 | EXPORT_SYMBOL(ax25_uid_policy); | ||
53 | |||
52 | ax25_uid_assoc *ax25_findbyuid(uid_t uid) | 54 | ax25_uid_assoc *ax25_findbyuid(uid_t uid) |
53 | { | 55 | { |
54 | ax25_uid_assoc *ax25_uid, *res = NULL; | 56 | ax25_uid_assoc *ax25_uid, *res = NULL; |
@@ -67,6 +69,8 @@ ax25_uid_assoc *ax25_findbyuid(uid_t uid) | |||
67 | return res; | 69 | return res; |
68 | } | 70 | } |
69 | 71 | ||
72 | EXPORT_SYMBOL(ax25_findbyuid); | ||
73 | |||
70 | int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax) | 74 | int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax) |
71 | { | 75 | { |
72 | ax25_uid_assoc *ax25_uid; | 76 | ax25_uid_assoc *ax25_uid; |
diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c index 894a22558d9d..bdb64c36df12 100644 --- a/net/ax25/sysctl_net_ax25.c +++ b/net/ax25/sysctl_net_ax25.c | |||
@@ -18,14 +18,14 @@ static int min_backoff[1], max_backoff[] = {2}; | |||
18 | static int min_conmode[1], max_conmode[] = {2}; | 18 | static int min_conmode[1], max_conmode[] = {2}; |
19 | static int min_window[] = {1}, max_window[] = {7}; | 19 | static int min_window[] = {1}, max_window[] = {7}; |
20 | static int min_ewindow[] = {1}, max_ewindow[] = {63}; | 20 | static int min_ewindow[] = {1}, max_ewindow[] = {63}; |
21 | static int min_t1[] = {1}, max_t1[] = {30 * HZ}; | 21 | static int min_t1[] = {1}, max_t1[] = {30000}; |
22 | static int min_t2[] = {1}, max_t2[] = {20 * HZ}; | 22 | static int min_t2[] = {1}, max_t2[] = {20000}; |
23 | static int min_t3[1], max_t3[] = {3600 * HZ}; | 23 | static int min_t3[1], max_t3[] = {3600000}; |
24 | static int min_idle[1], max_idle[] = {65535 * HZ}; | 24 | static int min_idle[1], max_idle[] = {65535000}; |
25 | static int min_n2[] = {1}, max_n2[] = {31}; | 25 | static int min_n2[] = {1}, max_n2[] = {31}; |
26 | static int min_paclen[] = {1}, max_paclen[] = {512}; | 26 | static int min_paclen[] = {1}, max_paclen[] = {512}; |
27 | static int min_proto[1], max_proto[] = { AX25_PROTO_MAX }; | 27 | static int min_proto[1], max_proto[] = { AX25_PROTO_MAX }; |
28 | static int min_ds_timeout[1], max_ds_timeout[] = {65535 * HZ}; | 28 | static int min_ds_timeout[1], max_ds_timeout[] = {65535000}; |
29 | 29 | ||
30 | static struct ctl_table_header *ax25_table_header; | 30 | static struct ctl_table_header *ax25_table_header; |
31 | 31 | ||
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index 59eef42d4a42..ad1c7af65ec8 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c | |||
@@ -308,26 +308,19 @@ int br_add_bridge(const char *name) | |||
308 | if (ret) | 308 | if (ret) |
309 | goto err2; | 309 | goto err2; |
310 | 310 | ||
311 | /* network device kobject is not setup until | ||
312 | * after rtnl_unlock does it's hotplug magic. | ||
313 | * so hold reference to avoid race. | ||
314 | */ | ||
315 | dev_hold(dev); | ||
316 | rtnl_unlock(); | ||
317 | |||
318 | ret = br_sysfs_addbr(dev); | 311 | ret = br_sysfs_addbr(dev); |
319 | dev_put(dev); | 312 | if (ret) |
320 | 313 | goto err3; | |
321 | if (ret) | 314 | rtnl_unlock(); |
322 | unregister_netdev(dev); | 315 | return 0; |
323 | out: | ||
324 | return ret; | ||
325 | 316 | ||
317 | err3: | ||
318 | unregister_netdev(dev); | ||
326 | err2: | 319 | err2: |
327 | free_netdev(dev); | 320 | free_netdev(dev); |
328 | err1: | 321 | err1: |
329 | rtnl_unlock(); | 322 | rtnl_unlock(); |
330 | goto out; | 323 | return ret; |
331 | } | 324 | } |
332 | 325 | ||
333 | int br_del_bridge(const char *name) | 326 | int br_del_bridge(const char *name) |
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index b0b7f55c1edd..bfa4d8c333f7 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c | |||
@@ -66,6 +66,7 @@ int br_handle_frame_finish(struct sk_buff *skb) | |||
66 | } | 66 | } |
67 | 67 | ||
68 | if (is_multicast_ether_addr(dest)) { | 68 | if (is_multicast_ether_addr(dest)) { |
69 | br->statistics.multicast++; | ||
69 | br_flood_forward(br, skb, !passedup); | 70 | br_flood_forward(br, skb, !passedup); |
70 | if (!passedup) | 71 | if (!passedup) |
71 | br_pass_frame_up(br, skb); | 72 | br_pass_frame_up(br, skb); |
diff --git a/net/core/dev.c b/net/core/dev.c index 3bad1afc89fa..2dce673a039b 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -193,7 +193,7 @@ static inline struct hlist_head *dev_index_hash(int ifindex) | |||
193 | * Our notifier list | 193 | * Our notifier list |
194 | */ | 194 | */ |
195 | 195 | ||
196 | static BLOCKING_NOTIFIER_HEAD(netdev_chain); | 196 | static RAW_NOTIFIER_HEAD(netdev_chain); |
197 | 197 | ||
198 | /* | 198 | /* |
199 | * Device drivers call our routines to queue packets here. We empty the | 199 | * Device drivers call our routines to queue packets here. We empty the |
@@ -736,7 +736,7 @@ int dev_change_name(struct net_device *dev, char *newname) | |||
736 | if (!err) { | 736 | if (!err) { |
737 | hlist_del(&dev->name_hlist); | 737 | hlist_del(&dev->name_hlist); |
738 | hlist_add_head(&dev->name_hlist, dev_name_hash(dev->name)); | 738 | hlist_add_head(&dev->name_hlist, dev_name_hash(dev->name)); |
739 | blocking_notifier_call_chain(&netdev_chain, | 739 | raw_notifier_call_chain(&netdev_chain, |
740 | NETDEV_CHANGENAME, dev); | 740 | NETDEV_CHANGENAME, dev); |
741 | } | 741 | } |
742 | 742 | ||
@@ -751,7 +751,7 @@ int dev_change_name(struct net_device *dev, char *newname) | |||
751 | */ | 751 | */ |
752 | void netdev_features_change(struct net_device *dev) | 752 | void netdev_features_change(struct net_device *dev) |
753 | { | 753 | { |
754 | blocking_notifier_call_chain(&netdev_chain, NETDEV_FEAT_CHANGE, dev); | 754 | raw_notifier_call_chain(&netdev_chain, NETDEV_FEAT_CHANGE, dev); |
755 | } | 755 | } |
756 | EXPORT_SYMBOL(netdev_features_change); | 756 | EXPORT_SYMBOL(netdev_features_change); |
757 | 757 | ||
@@ -766,7 +766,7 @@ EXPORT_SYMBOL(netdev_features_change); | |||
766 | void netdev_state_change(struct net_device *dev) | 766 | void netdev_state_change(struct net_device *dev) |
767 | { | 767 | { |
768 | if (dev->flags & IFF_UP) { | 768 | if (dev->flags & IFF_UP) { |
769 | blocking_notifier_call_chain(&netdev_chain, | 769 | raw_notifier_call_chain(&netdev_chain, |
770 | NETDEV_CHANGE, dev); | 770 | NETDEV_CHANGE, dev); |
771 | rtmsg_ifinfo(RTM_NEWLINK, dev, 0); | 771 | rtmsg_ifinfo(RTM_NEWLINK, dev, 0); |
772 | } | 772 | } |
@@ -864,7 +864,7 @@ int dev_open(struct net_device *dev) | |||
864 | /* | 864 | /* |
865 | * ... and announce new interface. | 865 | * ... and announce new interface. |
866 | */ | 866 | */ |
867 | blocking_notifier_call_chain(&netdev_chain, NETDEV_UP, dev); | 867 | raw_notifier_call_chain(&netdev_chain, NETDEV_UP, dev); |
868 | } | 868 | } |
869 | return ret; | 869 | return ret; |
870 | } | 870 | } |
@@ -887,7 +887,7 @@ int dev_close(struct net_device *dev) | |||
887 | * Tell people we are going down, so that they can | 887 | * Tell people we are going down, so that they can |
888 | * prepare to death, when device is still operating. | 888 | * prepare to death, when device is still operating. |
889 | */ | 889 | */ |
890 | blocking_notifier_call_chain(&netdev_chain, NETDEV_GOING_DOWN, dev); | 890 | raw_notifier_call_chain(&netdev_chain, NETDEV_GOING_DOWN, dev); |
891 | 891 | ||
892 | dev_deactivate(dev); | 892 | dev_deactivate(dev); |
893 | 893 | ||
@@ -924,7 +924,7 @@ int dev_close(struct net_device *dev) | |||
924 | /* | 924 | /* |
925 | * Tell people we are down | 925 | * Tell people we are down |
926 | */ | 926 | */ |
927 | blocking_notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev); | 927 | raw_notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev); |
928 | 928 | ||
929 | return 0; | 929 | return 0; |
930 | } | 930 | } |
@@ -955,7 +955,7 @@ int register_netdevice_notifier(struct notifier_block *nb) | |||
955 | int err; | 955 | int err; |
956 | 956 | ||
957 | rtnl_lock(); | 957 | rtnl_lock(); |
958 | err = blocking_notifier_chain_register(&netdev_chain, nb); | 958 | err = raw_notifier_chain_register(&netdev_chain, nb); |
959 | if (!err) { | 959 | if (!err) { |
960 | for (dev = dev_base; dev; dev = dev->next) { | 960 | for (dev = dev_base; dev; dev = dev->next) { |
961 | nb->notifier_call(nb, NETDEV_REGISTER, dev); | 961 | nb->notifier_call(nb, NETDEV_REGISTER, dev); |
@@ -983,7 +983,7 @@ int unregister_netdevice_notifier(struct notifier_block *nb) | |||
983 | int err; | 983 | int err; |
984 | 984 | ||
985 | rtnl_lock(); | 985 | rtnl_lock(); |
986 | err = blocking_notifier_chain_unregister(&netdev_chain, nb); | 986 | err = raw_notifier_chain_unregister(&netdev_chain, nb); |
987 | rtnl_unlock(); | 987 | rtnl_unlock(); |
988 | return err; | 988 | return err; |
989 | } | 989 | } |
@@ -994,12 +994,12 @@ int unregister_netdevice_notifier(struct notifier_block *nb) | |||
994 | * @v: pointer passed unmodified to notifier function | 994 | * @v: pointer passed unmodified to notifier function |
995 | * | 995 | * |
996 | * Call all network notifier blocks. Parameters and return value | 996 | * Call all network notifier blocks. Parameters and return value |
997 | * are as for blocking_notifier_call_chain(). | 997 | * are as for raw_notifier_call_chain(). |
998 | */ | 998 | */ |
999 | 999 | ||
1000 | int call_netdevice_notifiers(unsigned long val, void *v) | 1000 | int call_netdevice_notifiers(unsigned long val, void *v) |
1001 | { | 1001 | { |
1002 | return blocking_notifier_call_chain(&netdev_chain, val, v); | 1002 | return raw_notifier_call_chain(&netdev_chain, val, v); |
1003 | } | 1003 | } |
1004 | 1004 | ||
1005 | /* When > 0 there are consumers of rx skb time stamps */ | 1005 | /* When > 0 there are consumers of rx skb time stamps */ |
@@ -2308,7 +2308,7 @@ int dev_change_flags(struct net_device *dev, unsigned flags) | |||
2308 | if (dev->flags & IFF_UP && | 2308 | if (dev->flags & IFF_UP && |
2309 | ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI | | 2309 | ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI | |
2310 | IFF_VOLATILE))) | 2310 | IFF_VOLATILE))) |
2311 | blocking_notifier_call_chain(&netdev_chain, | 2311 | raw_notifier_call_chain(&netdev_chain, |
2312 | NETDEV_CHANGE, dev); | 2312 | NETDEV_CHANGE, dev); |
2313 | 2313 | ||
2314 | if ((flags ^ dev->gflags) & IFF_PROMISC) { | 2314 | if ((flags ^ dev->gflags) & IFF_PROMISC) { |
@@ -2353,7 +2353,7 @@ int dev_set_mtu(struct net_device *dev, int new_mtu) | |||
2353 | else | 2353 | else |
2354 | dev->mtu = new_mtu; | 2354 | dev->mtu = new_mtu; |
2355 | if (!err && dev->flags & IFF_UP) | 2355 | if (!err && dev->flags & IFF_UP) |
2356 | blocking_notifier_call_chain(&netdev_chain, | 2356 | raw_notifier_call_chain(&netdev_chain, |
2357 | NETDEV_CHANGEMTU, dev); | 2357 | NETDEV_CHANGEMTU, dev); |
2358 | return err; | 2358 | return err; |
2359 | } | 2359 | } |
@@ -2370,7 +2370,7 @@ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa) | |||
2370 | return -ENODEV; | 2370 | return -ENODEV; |
2371 | err = dev->set_mac_address(dev, sa); | 2371 | err = dev->set_mac_address(dev, sa); |
2372 | if (!err) | 2372 | if (!err) |
2373 | blocking_notifier_call_chain(&netdev_chain, | 2373 | raw_notifier_call_chain(&netdev_chain, |
2374 | NETDEV_CHANGEADDR, dev); | 2374 | NETDEV_CHANGEADDR, dev); |
2375 | return err; | 2375 | return err; |
2376 | } | 2376 | } |
@@ -2427,7 +2427,7 @@ static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd) | |||
2427 | return -EINVAL; | 2427 | return -EINVAL; |
2428 | memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data, | 2428 | memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data, |
2429 | min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len)); | 2429 | min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len)); |
2430 | blocking_notifier_call_chain(&netdev_chain, | 2430 | raw_notifier_call_chain(&netdev_chain, |
2431 | NETDEV_CHANGEADDR, dev); | 2431 | NETDEV_CHANGEADDR, dev); |
2432 | return 0; | 2432 | return 0; |
2433 | 2433 | ||
@@ -2777,6 +2777,8 @@ int register_netdevice(struct net_device *dev) | |||
2777 | BUG_ON(dev_boot_phase); | 2777 | BUG_ON(dev_boot_phase); |
2778 | ASSERT_RTNL(); | 2778 | ASSERT_RTNL(); |
2779 | 2779 | ||
2780 | might_sleep(); | ||
2781 | |||
2780 | /* When net_device's are persistent, this will be fatal. */ | 2782 | /* When net_device's are persistent, this will be fatal. */ |
2781 | BUG_ON(dev->reg_state != NETREG_UNINITIALIZED); | 2783 | BUG_ON(dev->reg_state != NETREG_UNINITIALIZED); |
2782 | 2784 | ||
@@ -2863,6 +2865,11 @@ int register_netdevice(struct net_device *dev) | |||
2863 | if (!dev->rebuild_header) | 2865 | if (!dev->rebuild_header) |
2864 | dev->rebuild_header = default_rebuild_header; | 2866 | dev->rebuild_header = default_rebuild_header; |
2865 | 2867 | ||
2868 | ret = netdev_register_sysfs(dev); | ||
2869 | if (ret) | ||
2870 | goto out_err; | ||
2871 | dev->reg_state = NETREG_REGISTERED; | ||
2872 | |||
2866 | /* | 2873 | /* |
2867 | * Default initial state at registry is that the | 2874 | * Default initial state at registry is that the |
2868 | * device is present. | 2875 | * device is present. |
@@ -2878,14 +2885,11 @@ int register_netdevice(struct net_device *dev) | |||
2878 | hlist_add_head(&dev->name_hlist, head); | 2885 | hlist_add_head(&dev->name_hlist, head); |
2879 | hlist_add_head(&dev->index_hlist, dev_index_hash(dev->ifindex)); | 2886 | hlist_add_head(&dev->index_hlist, dev_index_hash(dev->ifindex)); |
2880 | dev_hold(dev); | 2887 | dev_hold(dev); |
2881 | dev->reg_state = NETREG_REGISTERING; | ||
2882 | write_unlock_bh(&dev_base_lock); | 2888 | write_unlock_bh(&dev_base_lock); |
2883 | 2889 | ||
2884 | /* Notify protocols, that a new device appeared. */ | 2890 | /* Notify protocols, that a new device appeared. */ |
2885 | blocking_notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev); | 2891 | raw_notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev); |
2886 | 2892 | ||
2887 | /* Finish registration after unlock */ | ||
2888 | net_set_todo(dev); | ||
2889 | ret = 0; | 2893 | ret = 0; |
2890 | 2894 | ||
2891 | out: | 2895 | out: |
@@ -2961,7 +2965,7 @@ static void netdev_wait_allrefs(struct net_device *dev) | |||
2961 | rtnl_lock(); | 2965 | rtnl_lock(); |
2962 | 2966 | ||
2963 | /* Rebroadcast unregister notification */ | 2967 | /* Rebroadcast unregister notification */ |
2964 | blocking_notifier_call_chain(&netdev_chain, | 2968 | raw_notifier_call_chain(&netdev_chain, |
2965 | NETDEV_UNREGISTER, dev); | 2969 | NETDEV_UNREGISTER, dev); |
2966 | 2970 | ||
2967 | if (test_bit(__LINK_STATE_LINKWATCH_PENDING, | 2971 | if (test_bit(__LINK_STATE_LINKWATCH_PENDING, |
@@ -3008,7 +3012,7 @@ static void netdev_wait_allrefs(struct net_device *dev) | |||
3008 | * | 3012 | * |
3009 | * We are invoked by rtnl_unlock() after it drops the semaphore. | 3013 | * We are invoked by rtnl_unlock() after it drops the semaphore. |
3010 | * This allows us to deal with problems: | 3014 | * This allows us to deal with problems: |
3011 | * 1) We can create/delete sysfs objects which invoke hotplug | 3015 | * 1) We can delete sysfs objects which invoke hotplug |
3012 | * without deadlocking with linkwatch via keventd. | 3016 | * without deadlocking with linkwatch via keventd. |
3013 | * 2) Since we run with the RTNL semaphore not held, we can sleep | 3017 | * 2) Since we run with the RTNL semaphore not held, we can sleep |
3014 | * safely in order to wait for the netdev refcnt to drop to zero. | 3018 | * safely in order to wait for the netdev refcnt to drop to zero. |
@@ -3017,8 +3021,6 @@ static DEFINE_MUTEX(net_todo_run_mutex); | |||
3017 | void netdev_run_todo(void) | 3021 | void netdev_run_todo(void) |
3018 | { | 3022 | { |
3019 | struct list_head list = LIST_HEAD_INIT(list); | 3023 | struct list_head list = LIST_HEAD_INIT(list); |
3020 | int err; | ||
3021 | |||
3022 | 3024 | ||
3023 | /* Need to guard against multiple cpu's getting out of order. */ | 3025 | /* Need to guard against multiple cpu's getting out of order. */ |
3024 | mutex_lock(&net_todo_run_mutex); | 3026 | mutex_lock(&net_todo_run_mutex); |
@@ -3041,40 +3043,29 @@ void netdev_run_todo(void) | |||
3041 | = list_entry(list.next, struct net_device, todo_list); | 3043 | = list_entry(list.next, struct net_device, todo_list); |
3042 | list_del(&dev->todo_list); | 3044 | list_del(&dev->todo_list); |
3043 | 3045 | ||
3044 | switch(dev->reg_state) { | 3046 | if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { |
3045 | case NETREG_REGISTERING: | 3047 | printk(KERN_ERR "network todo '%s' but state %d\n", |
3046 | dev->reg_state = NETREG_REGISTERED; | 3048 | dev->name, dev->reg_state); |
3047 | err = netdev_register_sysfs(dev); | 3049 | dump_stack(); |
3048 | if (err) | 3050 | continue; |
3049 | printk(KERN_ERR "%s: failed sysfs registration (%d)\n", | 3051 | } |
3050 | dev->name, err); | ||
3051 | break; | ||
3052 | |||
3053 | case NETREG_UNREGISTERING: | ||
3054 | netdev_unregister_sysfs(dev); | ||
3055 | dev->reg_state = NETREG_UNREGISTERED; | ||
3056 | |||
3057 | netdev_wait_allrefs(dev); | ||
3058 | 3052 | ||
3059 | /* paranoia */ | 3053 | netdev_unregister_sysfs(dev); |
3060 | BUG_ON(atomic_read(&dev->refcnt)); | 3054 | dev->reg_state = NETREG_UNREGISTERED; |
3061 | BUG_TRAP(!dev->ip_ptr); | ||
3062 | BUG_TRAP(!dev->ip6_ptr); | ||
3063 | BUG_TRAP(!dev->dn_ptr); | ||
3064 | 3055 | ||
3056 | netdev_wait_allrefs(dev); | ||
3065 | 3057 | ||
3066 | /* It must be the very last action, | 3058 | /* paranoia */ |
3067 | * after this 'dev' may point to freed up memory. | 3059 | BUG_ON(atomic_read(&dev->refcnt)); |
3068 | */ | 3060 | BUG_TRAP(!dev->ip_ptr); |
3069 | if (dev->destructor) | 3061 | BUG_TRAP(!dev->ip6_ptr); |
3070 | dev->destructor(dev); | 3062 | BUG_TRAP(!dev->dn_ptr); |
3071 | break; | ||
3072 | 3063 | ||
3073 | default: | 3064 | /* It must be the very last action, |
3074 | printk(KERN_ERR "network todo '%s' but state %d\n", | 3065 | * after this 'dev' may point to freed up memory. |
3075 | dev->name, dev->reg_state); | 3066 | */ |
3076 | break; | 3067 | if (dev->destructor) |
3077 | } | 3068 | dev->destructor(dev); |
3078 | } | 3069 | } |
3079 | 3070 | ||
3080 | out: | 3071 | out: |
@@ -3216,7 +3207,7 @@ int unregister_netdevice(struct net_device *dev) | |||
3216 | /* Notify protocols, that we are about to destroy | 3207 | /* Notify protocols, that we are about to destroy |
3217 | this device. They should clean all the things. | 3208 | this device. They should clean all the things. |
3218 | */ | 3209 | */ |
3219 | blocking_notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev); | 3210 | raw_notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev); |
3220 | 3211 | ||
3221 | /* | 3212 | /* |
3222 | * Flush the multicast chain | 3213 | * Flush the multicast chain |
diff --git a/net/core/link_watch.c b/net/core/link_watch.c index 341de44c7ed1..646937cc2d84 100644 --- a/net/core/link_watch.c +++ b/net/core/link_watch.c | |||
@@ -170,13 +170,13 @@ void linkwatch_fire_event(struct net_device *dev) | |||
170 | spin_unlock_irqrestore(&lweventlist_lock, flags); | 170 | spin_unlock_irqrestore(&lweventlist_lock, flags); |
171 | 171 | ||
172 | if (!test_and_set_bit(LW_RUNNING, &linkwatch_flags)) { | 172 | if (!test_and_set_bit(LW_RUNNING, &linkwatch_flags)) { |
173 | unsigned long thisevent = jiffies; | 173 | unsigned long delay = linkwatch_nextevent - jiffies; |
174 | 174 | ||
175 | if (thisevent >= linkwatch_nextevent) { | 175 | /* If we wrap around we'll delay it by at most HZ. */ |
176 | if (!delay || delay > HZ) | ||
176 | schedule_work(&linkwatch_work); | 177 | schedule_work(&linkwatch_work); |
177 | } else { | 178 | else |
178 | schedule_delayed_work(&linkwatch_work, linkwatch_nextevent - thisevent); | 179 | schedule_delayed_work(&linkwatch_work, delay); |
179 | } | ||
180 | } | 180 | } |
181 | } | 181 | } |
182 | } | 182 | } |
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index c12990c9c603..47a6fceb6771 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c | |||
@@ -29,7 +29,7 @@ static const char fmt_ulong[] = "%lu\n"; | |||
29 | 29 | ||
30 | static inline int dev_isalive(const struct net_device *dev) | 30 | static inline int dev_isalive(const struct net_device *dev) |
31 | { | 31 | { |
32 | return dev->reg_state == NETREG_REGISTERED; | 32 | return dev->reg_state <= NETREG_REGISTERED; |
33 | } | 33 | } |
34 | 34 | ||
35 | /* use same locking rules as GIF* ioctl's */ | 35 | /* use same locking rules as GIF* ioctl's */ |
@@ -445,58 +445,33 @@ static struct class net_class = { | |||
445 | 445 | ||
446 | void netdev_unregister_sysfs(struct net_device * net) | 446 | void netdev_unregister_sysfs(struct net_device * net) |
447 | { | 447 | { |
448 | struct class_device * class_dev = &(net->class_dev); | 448 | class_device_del(&(net->class_dev)); |
449 | |||
450 | if (net->get_stats) | ||
451 | sysfs_remove_group(&class_dev->kobj, &netstat_group); | ||
452 | |||
453 | #ifdef WIRELESS_EXT | ||
454 | if (net->get_wireless_stats || (net->wireless_handlers && | ||
455 | net->wireless_handlers->get_wireless_stats)) | ||
456 | sysfs_remove_group(&class_dev->kobj, &wireless_group); | ||
457 | #endif | ||
458 | class_device_del(class_dev); | ||
459 | |||
460 | } | 449 | } |
461 | 450 | ||
462 | /* Create sysfs entries for network device. */ | 451 | /* Create sysfs entries for network device. */ |
463 | int netdev_register_sysfs(struct net_device *net) | 452 | int netdev_register_sysfs(struct net_device *net) |
464 | { | 453 | { |
465 | struct class_device *class_dev = &(net->class_dev); | 454 | struct class_device *class_dev = &(net->class_dev); |
466 | int ret; | 455 | struct attribute_group **groups = net->sysfs_groups; |
467 | 456 | ||
457 | class_device_initialize(class_dev); | ||
468 | class_dev->class = &net_class; | 458 | class_dev->class = &net_class; |
469 | class_dev->class_data = net; | 459 | class_dev->class_data = net; |
460 | class_dev->groups = groups; | ||
470 | 461 | ||
462 | BUILD_BUG_ON(BUS_ID_SIZE < IFNAMSIZ); | ||
471 | strlcpy(class_dev->class_id, net->name, BUS_ID_SIZE); | 463 | strlcpy(class_dev->class_id, net->name, BUS_ID_SIZE); |
472 | if ((ret = class_device_register(class_dev))) | ||
473 | goto out; | ||
474 | 464 | ||
475 | if (net->get_stats && | 465 | if (net->get_stats) |
476 | (ret = sysfs_create_group(&class_dev->kobj, &netstat_group))) | 466 | *groups++ = &netstat_group; |
477 | goto out_unreg; | ||
478 | 467 | ||
479 | #ifdef WIRELESS_EXT | 468 | #ifdef WIRELESS_EXT |
480 | if (net->get_wireless_stats || (net->wireless_handlers && | 469 | if (net->get_wireless_stats |
481 | net->wireless_handlers->get_wireless_stats)) { | 470 | || (net->wireless_handlers && net->wireless_handlers->get_wireless_stats)) |
482 | ret = sysfs_create_group(&class_dev->kobj, &wireless_group); | 471 | *groups++ = &wireless_group; |
483 | if (ret) | ||
484 | goto out_cleanup; | ||
485 | } | ||
486 | return 0; | ||
487 | out_cleanup: | ||
488 | if (net->get_stats) | ||
489 | sysfs_remove_group(&class_dev->kobj, &netstat_group); | ||
490 | #else | ||
491 | return 0; | ||
492 | #endif | 472 | #endif |
493 | 473 | ||
494 | out_unreg: | 474 | return class_device_add(class_dev); |
495 | printk(KERN_WARNING "%s: sysfs attribute registration failed %d\n", | ||
496 | net->name, ret); | ||
497 | class_device_unregister(class_dev); | ||
498 | out: | ||
499 | return ret; | ||
500 | } | 475 | } |
501 | 476 | ||
502 | int netdev_sysfs_init(void) | 477 | int netdev_sysfs_init(void) |
diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 1ff7328b0e17..2e0ee8355c41 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c | |||
@@ -848,6 +848,7 @@ static int dccp_close_state(struct sock *sk) | |||
848 | void dccp_close(struct sock *sk, long timeout) | 848 | void dccp_close(struct sock *sk, long timeout) |
849 | { | 849 | { |
850 | struct sk_buff *skb; | 850 | struct sk_buff *skb; |
851 | int state; | ||
851 | 852 | ||
852 | lock_sock(sk); | 853 | lock_sock(sk); |
853 | 854 | ||
@@ -882,6 +883,11 @@ void dccp_close(struct sock *sk, long timeout) | |||
882 | sk_stream_wait_close(sk, timeout); | 883 | sk_stream_wait_close(sk, timeout); |
883 | 884 | ||
884 | adjudge_to_death: | 885 | adjudge_to_death: |
886 | state = sk->sk_state; | ||
887 | sock_hold(sk); | ||
888 | sock_orphan(sk); | ||
889 | atomic_inc(sk->sk_prot->orphan_count); | ||
890 | |||
885 | /* | 891 | /* |
886 | * It is the last release_sock in its life. It will remove backlog. | 892 | * It is the last release_sock in its life. It will remove backlog. |
887 | */ | 893 | */ |
@@ -894,8 +900,9 @@ adjudge_to_death: | |||
894 | bh_lock_sock(sk); | 900 | bh_lock_sock(sk); |
895 | BUG_TRAP(!sock_owned_by_user(sk)); | 901 | BUG_TRAP(!sock_owned_by_user(sk)); |
896 | 902 | ||
897 | sock_hold(sk); | 903 | /* Have we already been destroyed by a softirq or backlog? */ |
898 | sock_orphan(sk); | 904 | if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED) |
905 | goto out; | ||
899 | 906 | ||
900 | /* | 907 | /* |
901 | * The last release_sock may have processed the CLOSE or RESET | 908 | * The last release_sock may have processed the CLOSE or RESET |
@@ -915,12 +922,12 @@ adjudge_to_death: | |||
915 | #endif | 922 | #endif |
916 | } | 923 | } |
917 | 924 | ||
918 | atomic_inc(sk->sk_prot->orphan_count); | ||
919 | if (sk->sk_state == DCCP_CLOSED) | 925 | if (sk->sk_state == DCCP_CLOSED) |
920 | inet_csk_destroy_sock(sk); | 926 | inet_csk_destroy_sock(sk); |
921 | 927 | ||
922 | /* Otherwise, socket is reprieved until protocol close. */ | 928 | /* Otherwise, socket is reprieved until protocol close. */ |
923 | 929 | ||
930 | out: | ||
924 | bh_unlock_sock(sk); | 931 | bh_unlock_sock(sk); |
925 | local_bh_enable(); | 932 | local_bh_enable(); |
926 | sock_put(sk); | 933 | sock_put(sk); |
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c index 7c8692c26bfe..66e230c3b328 100644 --- a/net/decnet/dn_neigh.c +++ b/net/decnet/dn_neigh.c | |||
@@ -493,7 +493,6 @@ struct elist_cb_state { | |||
493 | static void neigh_elist_cb(struct neighbour *neigh, void *_info) | 493 | static void neigh_elist_cb(struct neighbour *neigh, void *_info) |
494 | { | 494 | { |
495 | struct elist_cb_state *s = _info; | 495 | struct elist_cb_state *s = _info; |
496 | struct dn_dev *dn_db; | ||
497 | struct dn_neigh *dn; | 496 | struct dn_neigh *dn; |
498 | 497 | ||
499 | if (neigh->dev != s->dev) | 498 | if (neigh->dev != s->dev) |
@@ -503,10 +502,6 @@ static void neigh_elist_cb(struct neighbour *neigh, void *_info) | |||
503 | if (!(dn->flags & (DN_NDFLAG_R1|DN_NDFLAG_R2))) | 502 | if (!(dn->flags & (DN_NDFLAG_R1|DN_NDFLAG_R2))) |
504 | return; | 503 | return; |
505 | 504 | ||
506 | dn_db = (struct dn_dev *) s->dev->dn_ptr; | ||
507 | if (dn_db->parms.forwarding == 1 && (dn->flags & DN_NDFLAG_R2)) | ||
508 | return; | ||
509 | |||
510 | if (s->t == s->n) | 505 | if (s->t == s->n) |
511 | s->rs = dn_find_slot(s->ptr, s->n, dn->priority); | 506 | s->rs = dn_find_slot(s->ptr, s->n, dn->priority); |
512 | else | 507 | else |
diff --git a/net/ieee80211/softmac/ieee80211softmac_assoc.c b/net/ieee80211/softmac/ieee80211softmac_assoc.c index fb79ce7d6439..57ea9f6f465c 100644 --- a/net/ieee80211/softmac/ieee80211softmac_assoc.c +++ b/net/ieee80211/softmac/ieee80211softmac_assoc.c | |||
@@ -51,11 +51,12 @@ ieee80211softmac_assoc(struct ieee80211softmac_device *mac, struct ieee80211soft | |||
51 | spin_lock_irqsave(&mac->lock, flags); | 51 | spin_lock_irqsave(&mac->lock, flags); |
52 | mac->associnfo.associating = 1; | 52 | mac->associnfo.associating = 1; |
53 | mac->associated = 0; /* just to make sure */ | 53 | mac->associated = 0; /* just to make sure */ |
54 | spin_unlock_irqrestore(&mac->lock, flags); | ||
55 | 54 | ||
56 | /* Set a timer for timeout */ | 55 | /* Set a timer for timeout */ |
57 | /* FIXME: make timeout configurable */ | 56 | /* FIXME: make timeout configurable */ |
58 | schedule_delayed_work(&mac->associnfo.timeout, 5 * HZ); | 57 | if (likely(mac->running)) |
58 | schedule_delayed_work(&mac->associnfo.timeout, 5 * HZ); | ||
59 | spin_unlock_irqrestore(&mac->lock, flags); | ||
59 | } | 60 | } |
60 | 61 | ||
61 | void | 62 | void |
@@ -319,6 +320,9 @@ ieee80211softmac_handle_assoc_response(struct net_device * dev, | |||
319 | u16 status = le16_to_cpup(&resp->status); | 320 | u16 status = le16_to_cpup(&resp->status); |
320 | struct ieee80211softmac_network *network = NULL; | 321 | struct ieee80211softmac_network *network = NULL; |
321 | unsigned long flags; | 322 | unsigned long flags; |
323 | |||
324 | if (unlikely(!mac->running)) | ||
325 | return -ENODEV; | ||
322 | 326 | ||
323 | spin_lock_irqsave(&mac->lock, flags); | 327 | spin_lock_irqsave(&mac->lock, flags); |
324 | 328 | ||
@@ -377,10 +381,16 @@ ieee80211softmac_handle_disassoc(struct net_device * dev, | |||
377 | { | 381 | { |
378 | struct ieee80211softmac_device *mac = ieee80211_priv(dev); | 382 | struct ieee80211softmac_device *mac = ieee80211_priv(dev); |
379 | unsigned long flags; | 383 | unsigned long flags; |
384 | |||
385 | if (unlikely(!mac->running)) | ||
386 | return -ENODEV; | ||
387 | |||
380 | if (memcmp(disassoc->header.addr2, mac->associnfo.bssid, ETH_ALEN)) | 388 | if (memcmp(disassoc->header.addr2, mac->associnfo.bssid, ETH_ALEN)) |
381 | return 0; | 389 | return 0; |
390 | |||
382 | if (memcmp(disassoc->header.addr1, mac->dev->dev_addr, ETH_ALEN)) | 391 | if (memcmp(disassoc->header.addr1, mac->dev->dev_addr, ETH_ALEN)) |
383 | return 0; | 392 | return 0; |
393 | |||
384 | dprintk(KERN_INFO PFX "got disassoc frame\n"); | 394 | dprintk(KERN_INFO PFX "got disassoc frame\n"); |
385 | netif_carrier_off(dev); | 395 | netif_carrier_off(dev); |
386 | spin_lock_irqsave(&mac->lock, flags); | 396 | spin_lock_irqsave(&mac->lock, flags); |
@@ -400,6 +410,9 @@ ieee80211softmac_handle_reassoc_req(struct net_device * dev, | |||
400 | struct ieee80211softmac_device *mac = ieee80211_priv(dev); | 410 | struct ieee80211softmac_device *mac = ieee80211_priv(dev); |
401 | struct ieee80211softmac_network *network; | 411 | struct ieee80211softmac_network *network; |
402 | 412 | ||
413 | if (unlikely(!mac->running)) | ||
414 | return -ENODEV; | ||
415 | |||
403 | network = ieee80211softmac_get_network_by_bssid(mac, resp->header.addr3); | 416 | network = ieee80211softmac_get_network_by_bssid(mac, resp->header.addr3); |
404 | if (!network) { | 417 | if (!network) { |
405 | dprintkl(KERN_INFO PFX "reassoc request from unknown network\n"); | 418 | dprintkl(KERN_INFO PFX "reassoc request from unknown network\n"); |
diff --git a/net/ieee80211/softmac/ieee80211softmac_auth.c b/net/ieee80211/softmac/ieee80211softmac_auth.c index 9a0eac6c61eb..06e332624665 100644 --- a/net/ieee80211/softmac/ieee80211softmac_auth.c +++ b/net/ieee80211/softmac/ieee80211softmac_auth.c | |||
@@ -86,6 +86,11 @@ ieee80211softmac_auth_queue(void *data) | |||
86 | 86 | ||
87 | /* Lock and set flags */ | 87 | /* Lock and set flags */ |
88 | spin_lock_irqsave(&mac->lock, flags); | 88 | spin_lock_irqsave(&mac->lock, flags); |
89 | if (unlikely(!mac->running)) { | ||
90 | /* Prevent reschedule on workqueue flush */ | ||
91 | spin_unlock_irqrestore(&mac->lock, flags); | ||
92 | return; | ||
93 | } | ||
89 | net->authenticated = 0; | 94 | net->authenticated = 0; |
90 | net->authenticating = 1; | 95 | net->authenticating = 1; |
91 | /* add a timeout call so we eventually give up waiting for an auth reply */ | 96 | /* add a timeout call so we eventually give up waiting for an auth reply */ |
@@ -124,6 +129,9 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth) | |||
124 | unsigned long flags; | 129 | unsigned long flags; |
125 | u8 * data; | 130 | u8 * data; |
126 | 131 | ||
132 | if (unlikely(!mac->running)) | ||
133 | return -ENODEV; | ||
134 | |||
127 | /* Find correct auth queue item */ | 135 | /* Find correct auth queue item */ |
128 | spin_lock_irqsave(&mac->lock, flags); | 136 | spin_lock_irqsave(&mac->lock, flags); |
129 | list_for_each(list_ptr, &mac->auth_queue) { | 137 | list_for_each(list_ptr, &mac->auth_queue) { |
@@ -298,8 +306,6 @@ ieee80211softmac_deauth_from_net(struct ieee80211softmac_device *mac, | |||
298 | 306 | ||
299 | /* can't transmit data right now... */ | 307 | /* can't transmit data right now... */ |
300 | netif_carrier_off(mac->dev); | 308 | netif_carrier_off(mac->dev); |
301 | /* let's try to re-associate */ | ||
302 | schedule_work(&mac->associnfo.work); | ||
303 | spin_unlock_irqrestore(&mac->lock, flags); | 309 | spin_unlock_irqrestore(&mac->lock, flags); |
304 | } | 310 | } |
305 | 311 | ||
@@ -338,6 +344,9 @@ ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *de | |||
338 | struct ieee80211softmac_network *net = NULL; | 344 | struct ieee80211softmac_network *net = NULL; |
339 | struct ieee80211softmac_device *mac = ieee80211_priv(dev); | 345 | struct ieee80211softmac_device *mac = ieee80211_priv(dev); |
340 | 346 | ||
347 | if (unlikely(!mac->running)) | ||
348 | return -ENODEV; | ||
349 | |||
341 | if (!deauth) { | 350 | if (!deauth) { |
342 | dprintk("deauth without deauth packet. eek!\n"); | 351 | dprintk("deauth without deauth packet. eek!\n"); |
343 | return 0; | 352 | return 0; |
@@ -360,5 +369,8 @@ ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *de | |||
360 | } | 369 | } |
361 | 370 | ||
362 | ieee80211softmac_deauth_from_net(mac, net); | 371 | ieee80211softmac_deauth_from_net(mac, net); |
372 | |||
373 | /* let's try to re-associate */ | ||
374 | schedule_work(&mac->associnfo.work); | ||
363 | return 0; | 375 | return 0; |
364 | } | 376 | } |
diff --git a/net/ieee80211/softmac/ieee80211softmac_module.c b/net/ieee80211/softmac/ieee80211softmac_module.c index be83bdc1644a..6252be2c0db9 100644 --- a/net/ieee80211/softmac/ieee80211softmac_module.c +++ b/net/ieee80211/softmac/ieee80211softmac_module.c | |||
@@ -89,6 +89,8 @@ ieee80211softmac_clear_pending_work(struct ieee80211softmac_device *sm) | |||
89 | ieee80211softmac_wait_for_scan(sm); | 89 | ieee80211softmac_wait_for_scan(sm); |
90 | 90 | ||
91 | spin_lock_irqsave(&sm->lock, flags); | 91 | spin_lock_irqsave(&sm->lock, flags); |
92 | sm->running = 0; | ||
93 | |||
92 | /* Free all pending assoc work items */ | 94 | /* Free all pending assoc work items */ |
93 | cancel_delayed_work(&sm->associnfo.work); | 95 | cancel_delayed_work(&sm->associnfo.work); |
94 | 96 | ||
@@ -204,6 +206,8 @@ void ieee80211softmac_start(struct net_device *dev) | |||
204 | assert(0); | 206 | assert(0); |
205 | if (mac->txrates_change) | 207 | if (mac->txrates_change) |
206 | mac->txrates_change(dev, change, &oldrates); | 208 | mac->txrates_change(dev, change, &oldrates); |
209 | |||
210 | mac->running = 1; | ||
207 | } | 211 | } |
208 | EXPORT_SYMBOL_GPL(ieee80211softmac_start); | 212 | EXPORT_SYMBOL_GPL(ieee80211softmac_start); |
209 | 213 | ||
diff --git a/net/ieee80211/softmac/ieee80211softmac_scan.c b/net/ieee80211/softmac/ieee80211softmac_scan.c index 2b9e7edfa3ce..d31cf77498c4 100644 --- a/net/ieee80211/softmac/ieee80211softmac_scan.c +++ b/net/ieee80211/softmac/ieee80211softmac_scan.c | |||
@@ -115,7 +115,15 @@ void ieee80211softmac_scan(void *d) | |||
115 | // TODO: is this if correct, or should we do this only if scanning from assoc request? | 115 | // TODO: is this if correct, or should we do this only if scanning from assoc request? |
116 | if (sm->associnfo.req_essid.len) | 116 | if (sm->associnfo.req_essid.len) |
117 | ieee80211softmac_send_mgt_frame(sm, &sm->associnfo.req_essid, IEEE80211_STYPE_PROBE_REQ, 0); | 117 | ieee80211softmac_send_mgt_frame(sm, &sm->associnfo.req_essid, IEEE80211_STYPE_PROBE_REQ, 0); |
118 | |||
119 | spin_lock_irqsave(&sm->lock, flags); | ||
120 | if (unlikely(!sm->running)) { | ||
121 | /* Prevent reschedule on workqueue flush */ | ||
122 | spin_unlock_irqrestore(&sm->lock, flags); | ||
123 | break; | ||
124 | } | ||
118 | schedule_delayed_work(&si->softmac_scan, IEEE80211SOFTMAC_PROBE_DELAY); | 125 | schedule_delayed_work(&si->softmac_scan, IEEE80211SOFTMAC_PROBE_DELAY); |
126 | spin_unlock_irqrestore(&sm->lock, flags); | ||
119 | return; | 127 | return; |
120 | } else { | 128 | } else { |
121 | dprintk(PFX "Not probing Channel %d (not allowed here)\n", si->channels[current_channel_idx].channel); | 129 | dprintk(PFX "Not probing Channel %d (not allowed here)\n", si->channels[current_channel_idx].channel); |
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index dc206f1f914f..0a277453526b 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
@@ -1257,7 +1257,7 @@ out_unregister_udp_proto: | |||
1257 | goto out; | 1257 | goto out; |
1258 | } | 1258 | } |
1259 | 1259 | ||
1260 | module_init(inet_init); | 1260 | fs_initcall(inet_init); |
1261 | 1261 | ||
1262 | /* ------------------------------------------------------------------------ */ | 1262 | /* ------------------------------------------------------------------------ */ |
1263 | 1263 | ||
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 18d7fad474d7..c9026dbf4c93 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c | |||
@@ -337,7 +337,7 @@ static inline int ip_rcv_finish(struct sk_buff *skb) | |||
337 | * Initialise the virtual path cache for the packet. It describes | 337 | * Initialise the virtual path cache for the packet. It describes |
338 | * how the packet travels inside Linux networking. | 338 | * how the packet travels inside Linux networking. |
339 | */ | 339 | */ |
340 | if (likely(skb->dst == NULL)) { | 340 | if (skb->dst == NULL) { |
341 | int err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, | 341 | int err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, |
342 | skb->dev); | 342 | skb->dev); |
343 | if (unlikely(err)) { | 343 | if (unlikely(err)) { |
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index 9bebad07bf2e..cbcae6544622 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c | |||
@@ -209,7 +209,7 @@ int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb) | |||
209 | 209 | ||
210 | void ip_options_fragment(struct sk_buff * skb) | 210 | void ip_options_fragment(struct sk_buff * skb) |
211 | { | 211 | { |
212 | unsigned char * optptr = skb->nh.raw; | 212 | unsigned char * optptr = skb->nh.raw + sizeof(struct iphdr); |
213 | struct ip_options * opt = &(IPCB(skb)->opt); | 213 | struct ip_options * opt = &(IPCB(skb)->opt); |
214 | int l = opt->optlen; | 214 | int l = opt->optlen; |
215 | int optlen; | 215 | int optlen; |
diff --git a/net/ipv4/netfilter/ip_conntrack_helper_h323.c b/net/ipv4/netfilter/ip_conntrack_helper_h323.c index 2c2fb700d835..518f581d39ec 100644 --- a/net/ipv4/netfilter/ip_conntrack_helper_h323.c +++ b/net/ipv4/netfilter/ip_conntrack_helper_h323.c | |||
@@ -162,6 +162,8 @@ static int get_tpkt_data(struct sk_buff **pskb, struct ip_conntrack *ct, | |||
162 | 162 | ||
163 | /* Validate TPKT length */ | 163 | /* Validate TPKT length */ |
164 | tpktlen = tpkt[2] * 256 + tpkt[3]; | 164 | tpktlen = tpkt[2] * 256 + tpkt[3]; |
165 | if (tpktlen < 4) | ||
166 | goto clear_out; | ||
165 | if (tpktlen > tcpdatalen) { | 167 | if (tpktlen > tcpdatalen) { |
166 | if (tcpdatalen == 4) { /* Separate TPKT header */ | 168 | if (tcpdatalen == 4) { /* Separate TPKT header */ |
167 | /* Netmeeting sends TPKT header and data separately */ | 169 | /* Netmeeting sends TPKT header and data separately */ |
diff --git a/net/ipv4/netfilter/ip_conntrack_helper_h323_asn1.c b/net/ipv4/netfilter/ip_conntrack_helper_h323_asn1.c index 48078002e450..355a53a5b6cd 100644 --- a/net/ipv4/netfilter/ip_conntrack_helper_h323_asn1.c +++ b/net/ipv4/netfilter/ip_conntrack_helper_h323_asn1.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * ip_conntrack_helper_h323_asn1.c - BER and PER decoding library for H.323 | 2 | * ip_conntrack_helper_h323_asn1.c - BER and PER decoding library for H.323 |
3 | * conntrack/NAT module. | 3 | * conntrack/NAT module. |
4 | * | 4 | * |
5 | * Copyright (c) 2006 by Jing Min Zhao <zhaojingmin@hotmail.com> | 5 | * Copyright (c) 2006 by Jing Min Zhao <zhaojingmin@users.sourceforge.net> |
6 | * | 6 | * |
7 | * This source code is licensed under General Public License version 2. | 7 | * This source code is licensed under General Public License version 2. |
8 | * | 8 | * |
@@ -703,6 +703,10 @@ int decode_choice(bitstr_t * bs, field_t * f, char *base, int level) | |||
703 | type = get_bits(bs, f->sz); | 703 | type = get_bits(bs, f->sz); |
704 | } | 704 | } |
705 | 705 | ||
706 | /* Write Type */ | ||
707 | if (base) | ||
708 | *(unsigned *) base = type; | ||
709 | |||
706 | /* Check Range */ | 710 | /* Check Range */ |
707 | if (type >= f->ub) { /* Newer version? */ | 711 | if (type >= f->ub) { /* Newer version? */ |
708 | BYTE_ALIGN(bs); | 712 | BYTE_ALIGN(bs); |
@@ -712,10 +716,6 @@ int decode_choice(bitstr_t * bs, field_t * f, char *base, int level) | |||
712 | return H323_ERROR_NONE; | 716 | return H323_ERROR_NONE; |
713 | } | 717 | } |
714 | 718 | ||
715 | /* Write Type */ | ||
716 | if (base) | ||
717 | *(unsigned *) base = type; | ||
718 | |||
719 | /* Transfer to son level */ | 719 | /* Transfer to son level */ |
720 | son = &f->fields[type]; | 720 | son = &f->fields[type]; |
721 | if (son->attr & STOP) { | 721 | if (son->attr & STOP) { |
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_sctp.c b/net/ipv4/netfilter/ip_conntrack_proto_sctp.c index 5259abd0fb42..0416073c5600 100644 --- a/net/ipv4/netfilter/ip_conntrack_proto_sctp.c +++ b/net/ipv4/netfilter/ip_conntrack_proto_sctp.c | |||
@@ -235,12 +235,15 @@ static int do_basic_checks(struct ip_conntrack *conntrack, | |||
235 | flag = 1; | 235 | flag = 1; |
236 | } | 236 | } |
237 | 237 | ||
238 | /* Cookie Ack/Echo chunks not the first OR | 238 | /* |
239 | Init / Init Ack / Shutdown compl chunks not the only chunks */ | 239 | * Cookie Ack/Echo chunks not the first OR |
240 | if ((sch->type == SCTP_CID_COOKIE_ACK | 240 | * Init / Init Ack / Shutdown compl chunks not the only chunks |
241 | * OR zero-length. | ||
242 | */ | ||
243 | if (((sch->type == SCTP_CID_COOKIE_ACK | ||
241 | || sch->type == SCTP_CID_COOKIE_ECHO | 244 | || sch->type == SCTP_CID_COOKIE_ECHO |
242 | || flag) | 245 | || flag) |
243 | && count !=0 ) { | 246 | && count !=0) || !sch->length) { |
244 | DEBUGP("Basic checks failed\n"); | 247 | DEBUGP("Basic checks failed\n"); |
245 | return 1; | 248 | return 1; |
246 | } | 249 | } |
diff --git a/net/ipv4/netfilter/ip_nat_standalone.c b/net/ipv4/netfilter/ip_nat_standalone.c index 8f760b28617e..67e676783da9 100644 --- a/net/ipv4/netfilter/ip_nat_standalone.c +++ b/net/ipv4/netfilter/ip_nat_standalone.c | |||
@@ -219,8 +219,10 @@ ip_nat_out(unsigned int hooknum, | |||
219 | const struct net_device *out, | 219 | const struct net_device *out, |
220 | int (*okfn)(struct sk_buff *)) | 220 | int (*okfn)(struct sk_buff *)) |
221 | { | 221 | { |
222 | #ifdef CONFIG_XFRM | ||
222 | struct ip_conntrack *ct; | 223 | struct ip_conntrack *ct; |
223 | enum ip_conntrack_info ctinfo; | 224 | enum ip_conntrack_info ctinfo; |
225 | #endif | ||
224 | unsigned int ret; | 226 | unsigned int ret; |
225 | 227 | ||
226 | /* root is playing with raw sockets. */ | 228 | /* root is playing with raw sockets. */ |
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index d25ac8ba6eba..cee3397ec277 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
@@ -956,15 +956,16 @@ struct compat_ipt_standard_target | |||
956 | compat_int_t verdict; | 956 | compat_int_t verdict; |
957 | }; | 957 | }; |
958 | 958 | ||
959 | #define IPT_ST_OFFSET (sizeof(struct ipt_standard_target) - \ | ||
960 | sizeof(struct compat_ipt_standard_target)) | ||
961 | |||
962 | struct compat_ipt_standard | 959 | struct compat_ipt_standard |
963 | { | 960 | { |
964 | struct compat_ipt_entry entry; | 961 | struct compat_ipt_entry entry; |
965 | struct compat_ipt_standard_target target; | 962 | struct compat_ipt_standard_target target; |
966 | }; | 963 | }; |
967 | 964 | ||
965 | #define IPT_ST_LEN XT_ALIGN(sizeof(struct ipt_standard_target)) | ||
966 | #define IPT_ST_COMPAT_LEN COMPAT_XT_ALIGN(sizeof(struct compat_ipt_standard_target)) | ||
967 | #define IPT_ST_OFFSET (IPT_ST_LEN - IPT_ST_COMPAT_LEN) | ||
968 | |||
968 | static int compat_ipt_standard_fn(void *target, | 969 | static int compat_ipt_standard_fn(void *target, |
969 | void **dstptr, int *size, int convert) | 970 | void **dstptr, int *size, int convert) |
970 | { | 971 | { |
@@ -975,35 +976,29 @@ static int compat_ipt_standard_fn(void *target, | |||
975 | ret = 0; | 976 | ret = 0; |
976 | switch (convert) { | 977 | switch (convert) { |
977 | case COMPAT_TO_USER: | 978 | case COMPAT_TO_USER: |
978 | pst = (struct ipt_standard_target *)target; | 979 | pst = target; |
979 | memcpy(&compat_st.target, &pst->target, | 980 | memcpy(&compat_st.target, &pst->target, |
980 | sizeof(struct ipt_entry_target)); | 981 | sizeof(compat_st.target)); |
981 | compat_st.verdict = pst->verdict; | 982 | compat_st.verdict = pst->verdict; |
982 | if (compat_st.verdict > 0) | 983 | if (compat_st.verdict > 0) |
983 | compat_st.verdict -= | 984 | compat_st.verdict -= |
984 | compat_calc_jump(compat_st.verdict); | 985 | compat_calc_jump(compat_st.verdict); |
985 | compat_st.target.u.user.target_size = | 986 | compat_st.target.u.user.target_size = IPT_ST_COMPAT_LEN; |
986 | sizeof(struct compat_ipt_standard_target); | 987 | if (copy_to_user(*dstptr, &compat_st, IPT_ST_COMPAT_LEN)) |
987 | if (__copy_to_user(*dstptr, &compat_st, | ||
988 | sizeof(struct compat_ipt_standard_target))) | ||
989 | ret = -EFAULT; | 988 | ret = -EFAULT; |
990 | *size -= IPT_ST_OFFSET; | 989 | *size -= IPT_ST_OFFSET; |
991 | *dstptr += sizeof(struct compat_ipt_standard_target); | 990 | *dstptr += IPT_ST_COMPAT_LEN; |
992 | break; | 991 | break; |
993 | case COMPAT_FROM_USER: | 992 | case COMPAT_FROM_USER: |
994 | pcompat_st = | 993 | pcompat_st = target; |
995 | (struct compat_ipt_standard_target *)target; | 994 | memcpy(&st.target, &pcompat_st->target, IPT_ST_COMPAT_LEN); |
996 | memcpy(&st.target, &pcompat_st->target, | ||
997 | sizeof(struct ipt_entry_target)); | ||
998 | st.verdict = pcompat_st->verdict; | 995 | st.verdict = pcompat_st->verdict; |
999 | if (st.verdict > 0) | 996 | if (st.verdict > 0) |
1000 | st.verdict += compat_calc_jump(st.verdict); | 997 | st.verdict += compat_calc_jump(st.verdict); |
1001 | st.target.u.user.target_size = | 998 | st.target.u.user.target_size = IPT_ST_LEN; |
1002 | sizeof(struct ipt_standard_target); | 999 | memcpy(*dstptr, &st, IPT_ST_LEN); |
1003 | memcpy(*dstptr, &st, | ||
1004 | sizeof(struct ipt_standard_target)); | ||
1005 | *size += IPT_ST_OFFSET; | 1000 | *size += IPT_ST_OFFSET; |
1006 | *dstptr += sizeof(struct ipt_standard_target); | 1001 | *dstptr += IPT_ST_LEN; |
1007 | break; | 1002 | break; |
1008 | case COMPAT_CALC_SIZE: | 1003 | case COMPAT_CALC_SIZE: |
1009 | *size += IPT_ST_OFFSET; | 1004 | *size += IPT_ST_OFFSET; |
@@ -1446,7 +1441,7 @@ static int compat_copy_entry_to_user(struct ipt_entry *e, | |||
1446 | ret = -EFAULT; | 1441 | ret = -EFAULT; |
1447 | origsize = *size; | 1442 | origsize = *size; |
1448 | ce = (struct compat_ipt_entry __user *)*dstptr; | 1443 | ce = (struct compat_ipt_entry __user *)*dstptr; |
1449 | if (__copy_to_user(ce, e, sizeof(struct ipt_entry))) | 1444 | if (copy_to_user(ce, e, sizeof(struct ipt_entry))) |
1450 | goto out; | 1445 | goto out; |
1451 | 1446 | ||
1452 | *dstptr += sizeof(struct compat_ipt_entry); | 1447 | *dstptr += sizeof(struct compat_ipt_entry); |
@@ -1464,9 +1459,9 @@ static int compat_copy_entry_to_user(struct ipt_entry *e, | |||
1464 | goto out; | 1459 | goto out; |
1465 | ret = -EFAULT; | 1460 | ret = -EFAULT; |
1466 | next_offset = e->next_offset - (origsize - *size); | 1461 | next_offset = e->next_offset - (origsize - *size); |
1467 | if (__put_user(target_offset, &ce->target_offset)) | 1462 | if (put_user(target_offset, &ce->target_offset)) |
1468 | goto out; | 1463 | goto out; |
1469 | if (__put_user(next_offset, &ce->next_offset)) | 1464 | if (put_user(next_offset, &ce->next_offset)) |
1470 | goto out; | 1465 | goto out; |
1471 | return 0; | 1466 | return 0; |
1472 | out: | 1467 | out: |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 87f68e787d0c..e2b7b8055037 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -1468,6 +1468,7 @@ void tcp_close(struct sock *sk, long timeout) | |||
1468 | { | 1468 | { |
1469 | struct sk_buff *skb; | 1469 | struct sk_buff *skb; |
1470 | int data_was_unread = 0; | 1470 | int data_was_unread = 0; |
1471 | int state; | ||
1471 | 1472 | ||
1472 | lock_sock(sk); | 1473 | lock_sock(sk); |
1473 | sk->sk_shutdown = SHUTDOWN_MASK; | 1474 | sk->sk_shutdown = SHUTDOWN_MASK; |
@@ -1544,6 +1545,11 @@ void tcp_close(struct sock *sk, long timeout) | |||
1544 | sk_stream_wait_close(sk, timeout); | 1545 | sk_stream_wait_close(sk, timeout); |
1545 | 1546 | ||
1546 | adjudge_to_death: | 1547 | adjudge_to_death: |
1548 | state = sk->sk_state; | ||
1549 | sock_hold(sk); | ||
1550 | sock_orphan(sk); | ||
1551 | atomic_inc(sk->sk_prot->orphan_count); | ||
1552 | |||
1547 | /* It is the last release_sock in its life. It will remove backlog. */ | 1553 | /* It is the last release_sock in its life. It will remove backlog. */ |
1548 | release_sock(sk); | 1554 | release_sock(sk); |
1549 | 1555 | ||
@@ -1555,8 +1561,9 @@ adjudge_to_death: | |||
1555 | bh_lock_sock(sk); | 1561 | bh_lock_sock(sk); |
1556 | BUG_TRAP(!sock_owned_by_user(sk)); | 1562 | BUG_TRAP(!sock_owned_by_user(sk)); |
1557 | 1563 | ||
1558 | sock_hold(sk); | 1564 | /* Have we already been destroyed by a softirq or backlog? */ |
1559 | sock_orphan(sk); | 1565 | if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE) |
1566 | goto out; | ||
1560 | 1567 | ||
1561 | /* This is a (useful) BSD violating of the RFC. There is a | 1568 | /* This is a (useful) BSD violating of the RFC. There is a |
1562 | * problem with TCP as specified in that the other end could | 1569 | * problem with TCP as specified in that the other end could |
@@ -1584,7 +1591,6 @@ adjudge_to_death: | |||
1584 | if (tmo > TCP_TIMEWAIT_LEN) { | 1591 | if (tmo > TCP_TIMEWAIT_LEN) { |
1585 | inet_csk_reset_keepalive_timer(sk, tcp_fin_time(sk)); | 1592 | inet_csk_reset_keepalive_timer(sk, tcp_fin_time(sk)); |
1586 | } else { | 1593 | } else { |
1587 | atomic_inc(sk->sk_prot->orphan_count); | ||
1588 | tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); | 1594 | tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); |
1589 | goto out; | 1595 | goto out; |
1590 | } | 1596 | } |
@@ -1603,7 +1609,6 @@ adjudge_to_death: | |||
1603 | NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY); | 1609 | NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY); |
1604 | } | 1610 | } |
1605 | } | 1611 | } |
1606 | atomic_inc(sk->sk_prot->orphan_count); | ||
1607 | 1612 | ||
1608 | if (sk->sk_state == TCP_CLOSE) | 1613 | if (sk->sk_state == TCP_CLOSE) |
1609 | inet_csk_destroy_sock(sk); | 1614 | inet_csk_destroy_sock(sk); |
diff --git a/net/ipv4/tcp_highspeed.c b/net/ipv4/tcp_highspeed.c index e0e9d1383c7c..b72fa55dfb84 100644 --- a/net/ipv4/tcp_highspeed.c +++ b/net/ipv4/tcp_highspeed.c | |||
@@ -137,8 +137,8 @@ static void hstcp_cong_avoid(struct sock *sk, u32 adk, u32 rtt, | |||
137 | if (tp->snd_cwnd < tp->snd_cwnd_clamp) { | 137 | if (tp->snd_cwnd < tp->snd_cwnd_clamp) { |
138 | tp->snd_cwnd_cnt += ca->ai; | 138 | tp->snd_cwnd_cnt += ca->ai; |
139 | if (tp->snd_cwnd_cnt >= tp->snd_cwnd) { | 139 | if (tp->snd_cwnd_cnt >= tp->snd_cwnd) { |
140 | tp->snd_cwnd++; | ||
141 | tp->snd_cwnd_cnt -= tp->snd_cwnd; | 140 | tp->snd_cwnd_cnt -= tp->snd_cwnd; |
141 | tp->snd_cwnd++; | ||
142 | } | 142 | } |
143 | } | 143 | } |
144 | } | 144 | } |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index a28ae593b976..743016baa048 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -465,7 +465,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, | |||
465 | TCP_INC_STATS(TCP_MIB_OUTSEGS); | 465 | TCP_INC_STATS(TCP_MIB_OUTSEGS); |
466 | 466 | ||
467 | err = icsk->icsk_af_ops->queue_xmit(skb, 0); | 467 | err = icsk->icsk_af_ops->queue_xmit(skb, 0); |
468 | if (unlikely(err <= 0)) | 468 | if (likely(err <= 0)) |
469 | return err; | 469 | return err; |
470 | 470 | ||
471 | tcp_enter_cwr(sk); | 471 | tcp_enter_cwr(sk); |
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c index 32ad229b4fed..4ef8efaf6a67 100644 --- a/net/ipv4/xfrm4_output.c +++ b/net/ipv4/xfrm4_output.c | |||
@@ -62,7 +62,7 @@ static void xfrm4_encap(struct sk_buff *skb) | |||
62 | top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) ? | 62 | top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) ? |
63 | 0 : (iph->frag_off & htons(IP_DF)); | 63 | 0 : (iph->frag_off & htons(IP_DF)); |
64 | if (!top_iph->frag_off) | 64 | if (!top_iph->frag_off) |
65 | __ip_select_ident(top_iph, dst, 0); | 65 | __ip_select_ident(top_iph, dst->child, 0); |
66 | 66 | ||
67 | top_iph->ttl = dst_metric(dst->child, RTAX_HOPLIMIT); | 67 | top_iph->ttl = dst_metric(dst->child, RTAX_HOPLIMIT); |
68 | 68 | ||
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index f8f3a37a1494..eb2865d5ae28 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c | |||
@@ -173,6 +173,7 @@ int inet6_csk_xmit(struct sk_buff *skb, int ipfragok) | |||
173 | 173 | ||
174 | if (err) { | 174 | if (err) { |
175 | sk->sk_err_soft = -err; | 175 | sk->sk_err_soft = -err; |
176 | kfree_skb(skb); | ||
176 | return err; | 177 | return err; |
177 | } | 178 | } |
178 | 179 | ||
@@ -181,6 +182,7 @@ int inet6_csk_xmit(struct sk_buff *skb, int ipfragok) | |||
181 | 182 | ||
182 | if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) { | 183 | if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) { |
183 | sk->sk_route_caps = 0; | 184 | sk->sk_route_caps = 0; |
185 | kfree_skb(skb); | ||
184 | return err; | 186 | return err; |
185 | } | 187 | } |
186 | 188 | ||
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 79078747a646..0190e39096b9 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -317,7 +317,7 @@ static struct rt6_info *rt6_select(struct rt6_info **head, int oif, | |||
317 | __FUNCTION__, head, head ? *head : NULL, oif); | 317 | __FUNCTION__, head, head ? *head : NULL, oif); |
318 | 318 | ||
319 | for (rt = rt0, metric = rt0->rt6i_metric; | 319 | for (rt = rt0, metric = rt0->rt6i_metric; |
320 | rt && rt->rt6i_metric == metric; | 320 | rt && rt->rt6i_metric == metric && (!last || rt != rt0); |
321 | rt = rt->u.next) { | 321 | rt = rt->u.next) { |
322 | int m; | 322 | int m; |
323 | 323 | ||
@@ -343,9 +343,12 @@ static struct rt6_info *rt6_select(struct rt6_info **head, int oif, | |||
343 | (strict & RT6_SELECT_F_REACHABLE) && | 343 | (strict & RT6_SELECT_F_REACHABLE) && |
344 | last && last != rt0) { | 344 | last && last != rt0) { |
345 | /* no entries matched; do round-robin */ | 345 | /* no entries matched; do round-robin */ |
346 | static spinlock_t lock = SPIN_LOCK_UNLOCKED; | ||
347 | spin_lock(&lock); | ||
346 | *head = rt0->u.next; | 348 | *head = rt0->u.next; |
347 | rt0->u.next = last->u.next; | 349 | rt0->u.next = last->u.next; |
348 | last->u.next = rt0; | 350 | last->u.next = rt0; |
351 | spin_unlock(&lock); | ||
349 | } | 352 | } |
350 | 353 | ||
351 | RT6_TRACE("%s() => %p, score=%d\n", | 354 | RT6_TRACE("%s() => %p, score=%d\n", |
diff --git a/net/irda/irias_object.c b/net/irda/irias_object.c index c6d169fbdceb..82e665c79991 100644 --- a/net/irda/irias_object.c +++ b/net/irda/irias_object.c | |||
@@ -257,7 +257,6 @@ struct ias_attrib *irias_find_attrib(struct ias_object *obj, char *name) | |||
257 | /* Unsafe (locking), attrib might change */ | 257 | /* Unsafe (locking), attrib might change */ |
258 | return attrib; | 258 | return attrib; |
259 | } | 259 | } |
260 | EXPORT_SYMBOL(irias_find_attrib); | ||
261 | 260 | ||
262 | /* | 261 | /* |
263 | * Function irias_add_attribute (obj, attrib) | 262 | * Function irias_add_attribute (obj, attrib) |
@@ -484,7 +483,6 @@ struct ias_value *irias_new_string_value(char *string) | |||
484 | 483 | ||
485 | return value; | 484 | return value; |
486 | } | 485 | } |
487 | EXPORT_SYMBOL(irias_new_string_value); | ||
488 | 486 | ||
489 | /* | 487 | /* |
490 | * Function irias_new_octseq_value (octets, len) | 488 | * Function irias_new_octseq_value (octets, len) |
@@ -519,7 +517,6 @@ struct ias_value *irias_new_octseq_value(__u8 *octseq , int len) | |||
519 | memcpy(value->t.oct_seq, octseq , len); | 517 | memcpy(value->t.oct_seq, octseq , len); |
520 | return value; | 518 | return value; |
521 | } | 519 | } |
522 | EXPORT_SYMBOL(irias_new_octseq_value); | ||
523 | 520 | ||
524 | struct ias_value *irias_new_missing_value(void) | 521 | struct ias_value *irias_new_missing_value(void) |
525 | { | 522 | { |
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index 9cccc325b687..0c6da496cfa9 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c | |||
@@ -240,12 +240,15 @@ static int do_basic_checks(struct nf_conn *conntrack, | |||
240 | flag = 1; | 240 | flag = 1; |
241 | } | 241 | } |
242 | 242 | ||
243 | /* Cookie Ack/Echo chunks not the first OR | 243 | /* |
244 | Init / Init Ack / Shutdown compl chunks not the only chunks */ | 244 | * Cookie Ack/Echo chunks not the first OR |
245 | if ((sch->type == SCTP_CID_COOKIE_ACK | 245 | * Init / Init Ack / Shutdown compl chunks not the only chunks |
246 | * OR zero-length. | ||
247 | */ | ||
248 | if (((sch->type == SCTP_CID_COOKIE_ACK | ||
246 | || sch->type == SCTP_CID_COOKIE_ECHO | 249 | || sch->type == SCTP_CID_COOKIE_ECHO |
247 | || flag) | 250 | || flag) |
248 | && count !=0 ) { | 251 | && count !=0) || !sch->length) { |
249 | DEBUGP("Basic checks failed\n"); | 252 | DEBUGP("Basic checks failed\n"); |
250 | return 1; | 253 | return 1; |
251 | } | 254 | } |
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 17abf60f9570..99293c63ff73 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c | |||
@@ -289,7 +289,7 @@ int xt_compat_match(void *match, void **dstptr, int *size, int convert) | |||
289 | case COMPAT_TO_USER: | 289 | case COMPAT_TO_USER: |
290 | pm = (struct xt_entry_match *)match; | 290 | pm = (struct xt_entry_match *)match; |
291 | msize = pm->u.user.match_size; | 291 | msize = pm->u.user.match_size; |
292 | if (__copy_to_user(*dstptr, pm, msize)) { | 292 | if (copy_to_user(*dstptr, pm, msize)) { |
293 | ret = -EFAULT; | 293 | ret = -EFAULT; |
294 | break; | 294 | break; |
295 | } | 295 | } |
@@ -366,7 +366,7 @@ int xt_compat_target(void *target, void **dstptr, int *size, int convert) | |||
366 | case COMPAT_TO_USER: | 366 | case COMPAT_TO_USER: |
367 | pt = (struct xt_entry_target *)target; | 367 | pt = (struct xt_entry_target *)target; |
368 | tsize = pt->u.user.target_size; | 368 | tsize = pt->u.user.target_size; |
369 | if (__copy_to_user(*dstptr, pt, tsize)) { | 369 | if (copy_to_user(*dstptr, pt, tsize)) { |
370 | ret = -EFAULT; | 370 | ret = -EFAULT; |
371 | break; | 371 | break; |
372 | } | 372 | } |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 2a233ffcf618..3862e73d14d7 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -56,12 +56,12 @@ | |||
56 | #include <linux/mm.h> | 56 | #include <linux/mm.h> |
57 | #include <linux/types.h> | 57 | #include <linux/types.h> |
58 | #include <linux/audit.h> | 58 | #include <linux/audit.h> |
59 | #include <linux/selinux.h> | ||
59 | 60 | ||
60 | #include <net/sock.h> | 61 | #include <net/sock.h> |
61 | #include <net/scm.h> | 62 | #include <net/scm.h> |
62 | #include <net/netlink.h> | 63 | #include <net/netlink.h> |
63 | 64 | ||
64 | #define Nprintk(a...) | ||
65 | #define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8) | 65 | #define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8) |
66 | 66 | ||
67 | struct netlink_sock { | 67 | struct netlink_sock { |
@@ -1157,6 +1157,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock, | |||
1157 | NETLINK_CB(skb).dst_pid = dst_pid; | 1157 | NETLINK_CB(skb).dst_pid = dst_pid; |
1158 | NETLINK_CB(skb).dst_group = dst_group; | 1158 | NETLINK_CB(skb).dst_group = dst_group; |
1159 | NETLINK_CB(skb).loginuid = audit_get_loginuid(current->audit_context); | 1159 | NETLINK_CB(skb).loginuid = audit_get_loginuid(current->audit_context); |
1160 | selinux_get_task_sid(current, &(NETLINK_CB(skb).sid)); | ||
1160 | memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); | 1161 | memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); |
1161 | 1162 | ||
1162 | /* What can I do? Netlink is asynchronous, so that | 1163 | /* What can I do? Netlink is asynchronous, so that |
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index d44981f5a619..3669cb953e6e 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c | |||
@@ -425,11 +425,16 @@ static int nr_create(struct socket *sock, int protocol) | |||
425 | 425 | ||
426 | nr_init_timers(sk); | 426 | nr_init_timers(sk); |
427 | 427 | ||
428 | nr->t1 = sysctl_netrom_transport_timeout; | 428 | nr->t1 = |
429 | nr->t2 = sysctl_netrom_transport_acknowledge_delay; | 429 | msecs_to_jiffies(sysctl_netrom_transport_timeout); |
430 | nr->n2 = sysctl_netrom_transport_maximum_tries; | 430 | nr->t2 = |
431 | nr->t4 = sysctl_netrom_transport_busy_delay; | 431 | msecs_to_jiffies(sysctl_netrom_transport_acknowledge_delay); |
432 | nr->idle = sysctl_netrom_transport_no_activity_timeout; | 432 | nr->n2 = |
433 | msecs_to_jiffies(sysctl_netrom_transport_maximum_tries); | ||
434 | nr->t4 = | ||
435 | msecs_to_jiffies(sysctl_netrom_transport_busy_delay); | ||
436 | nr->idle = | ||
437 | msecs_to_jiffies(sysctl_netrom_transport_no_activity_timeout); | ||
433 | nr->window = sysctl_netrom_transport_requested_window_size; | 438 | nr->window = sysctl_netrom_transport_requested_window_size; |
434 | 439 | ||
435 | nr->bpqext = 1; | 440 | nr->bpqext = 1; |
@@ -1365,8 +1370,6 @@ static struct notifier_block nr_dev_notifier = { | |||
1365 | 1370 | ||
1366 | static struct net_device **dev_nr; | 1371 | static struct net_device **dev_nr; |
1367 | 1372 | ||
1368 | static char banner[] __initdata = KERN_INFO "G4KLX NET/ROM for Linux. Version 0.7 for AX25.037 Linux 2.4\n"; | ||
1369 | |||
1370 | static int __init nr_proto_init(void) | 1373 | static int __init nr_proto_init(void) |
1371 | { | 1374 | { |
1372 | int i; | 1375 | int i; |
@@ -1414,7 +1417,6 @@ static int __init nr_proto_init(void) | |||
1414 | } | 1417 | } |
1415 | 1418 | ||
1416 | register_netdevice_notifier(&nr_dev_notifier); | 1419 | register_netdevice_notifier(&nr_dev_notifier); |
1417 | printk(banner); | ||
1418 | 1420 | ||
1419 | ax25_protocol_register(AX25_P_NETROM, nr_route_frame); | 1421 | ax25_protocol_register(AX25_P_NETROM, nr_route_frame); |
1420 | ax25_linkfail_register(nr_link_failed); | 1422 | ax25_linkfail_register(nr_link_failed); |
diff --git a/net/netrom/nr_dev.c b/net/netrom/nr_dev.c index 509afddae569..621e5586ab03 100644 --- a/net/netrom/nr_dev.c +++ b/net/netrom/nr_dev.c | |||
@@ -185,7 +185,6 @@ static struct net_device_stats *nr_get_stats(struct net_device *dev) | |||
185 | 185 | ||
186 | void nr_setup(struct net_device *dev) | 186 | void nr_setup(struct net_device *dev) |
187 | { | 187 | { |
188 | SET_MODULE_OWNER(dev); | ||
189 | dev->mtu = NR_MAX_PACKET_SIZE; | 188 | dev->mtu = NR_MAX_PACKET_SIZE; |
190 | dev->hard_start_xmit = nr_xmit; | 189 | dev->hard_start_xmit = nr_xmit; |
191 | dev->open = nr_open; | 190 | dev->open = nr_open; |
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index ea65396d1619..55564efccf11 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c | |||
@@ -518,11 +518,11 @@ static int rose_create(struct socket *sock, int protocol) | |||
518 | init_timer(&rose->timer); | 518 | init_timer(&rose->timer); |
519 | init_timer(&rose->idletimer); | 519 | init_timer(&rose->idletimer); |
520 | 520 | ||
521 | rose->t1 = sysctl_rose_call_request_timeout; | 521 | rose->t1 = msecs_to_jiffies(sysctl_rose_call_request_timeout); |
522 | rose->t2 = sysctl_rose_reset_request_timeout; | 522 | rose->t2 = msecs_to_jiffies(sysctl_rose_reset_request_timeout); |
523 | rose->t3 = sysctl_rose_clear_request_timeout; | 523 | rose->t3 = msecs_to_jiffies(sysctl_rose_clear_request_timeout); |
524 | rose->hb = sysctl_rose_ack_hold_back_timeout; | 524 | rose->hb = msecs_to_jiffies(sysctl_rose_ack_hold_back_timeout); |
525 | rose->idle = sysctl_rose_no_activity_timeout; | 525 | rose->idle = msecs_to_jiffies(sysctl_rose_no_activity_timeout); |
526 | 526 | ||
527 | rose->state = ROSE_STATE_0; | 527 | rose->state = ROSE_STATE_0; |
528 | 528 | ||
@@ -1469,8 +1469,6 @@ static struct notifier_block rose_dev_notifier = { | |||
1469 | 1469 | ||
1470 | static struct net_device **dev_rose; | 1470 | static struct net_device **dev_rose; |
1471 | 1471 | ||
1472 | static const char banner[] = KERN_INFO "F6FBB/G4KLX ROSE for Linux. Version 0.62 for AX25.037 Linux 2.4\n"; | ||
1473 | |||
1474 | static int __init rose_proto_init(void) | 1472 | static int __init rose_proto_init(void) |
1475 | { | 1473 | { |
1476 | int i; | 1474 | int i; |
@@ -1519,7 +1517,6 @@ static int __init rose_proto_init(void) | |||
1519 | 1517 | ||
1520 | sock_register(&rose_family_ops); | 1518 | sock_register(&rose_family_ops); |
1521 | register_netdevice_notifier(&rose_dev_notifier); | 1519 | register_netdevice_notifier(&rose_dev_notifier); |
1522 | printk(banner); | ||
1523 | 1520 | ||
1524 | ax25_protocol_register(AX25_P_ROSE, rose_route_frame); | 1521 | ax25_protocol_register(AX25_P_ROSE, rose_route_frame); |
1525 | ax25_linkfail_register(rose_link_failed); | 1522 | ax25_linkfail_register(rose_link_failed); |
diff --git a/net/rose/rose_dev.c b/net/rose/rose_dev.c index d297af737d10..2a1bf8e119e5 100644 --- a/net/rose/rose_dev.c +++ b/net/rose/rose_dev.c | |||
@@ -135,7 +135,6 @@ static struct net_device_stats *rose_get_stats(struct net_device *dev) | |||
135 | 135 | ||
136 | void rose_setup(struct net_device *dev) | 136 | void rose_setup(struct net_device *dev) |
137 | { | 137 | { |
138 | SET_MODULE_OWNER(dev); | ||
139 | dev->mtu = ROSE_MAX_PACKET_SIZE - 2; | 138 | dev->mtu = ROSE_MAX_PACKET_SIZE - 2; |
140 | dev->hard_start_xmit = rose_xmit; | 139 | dev->hard_start_xmit = rose_xmit; |
141 | dev->open = rose_open; | 140 | dev->open = rose_open; |
diff --git a/net/rose/rose_link.c b/net/rose/rose_link.c index 09e9e9d04d92..bd86a63960ce 100644 --- a/net/rose/rose_link.c +++ b/net/rose/rose_link.c | |||
@@ -40,7 +40,8 @@ void rose_start_ftimer(struct rose_neigh *neigh) | |||
40 | 40 | ||
41 | neigh->ftimer.data = (unsigned long)neigh; | 41 | neigh->ftimer.data = (unsigned long)neigh; |
42 | neigh->ftimer.function = &rose_ftimer_expiry; | 42 | neigh->ftimer.function = &rose_ftimer_expiry; |
43 | neigh->ftimer.expires = jiffies + sysctl_rose_link_fail_timeout; | 43 | neigh->ftimer.expires = |
44 | jiffies + msecs_to_jiffies(sysctl_rose_link_fail_timeout); | ||
44 | 45 | ||
45 | add_timer(&neigh->ftimer); | 46 | add_timer(&neigh->ftimer); |
46 | } | 47 | } |
@@ -51,7 +52,8 @@ static void rose_start_t0timer(struct rose_neigh *neigh) | |||
51 | 52 | ||
52 | neigh->t0timer.data = (unsigned long)neigh; | 53 | neigh->t0timer.data = (unsigned long)neigh; |
53 | neigh->t0timer.function = &rose_t0timer_expiry; | 54 | neigh->t0timer.function = &rose_t0timer_expiry; |
54 | neigh->t0timer.expires = jiffies + sysctl_rose_restart_request_timeout; | 55 | neigh->t0timer.expires = |
56 | jiffies + msecs_to_jiffies(sysctl_rose_restart_request_timeout); | ||
55 | 57 | ||
56 | add_timer(&neigh->t0timer); | 58 | add_timer(&neigh->t0timer); |
57 | } | 59 | } |
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c index 8631b65a7312..a22542fa1bc8 100644 --- a/net/rose/rose_route.c +++ b/net/rose/rose_route.c | |||
@@ -48,8 +48,6 @@ static DEFINE_SPINLOCK(rose_route_list_lock); | |||
48 | 48 | ||
49 | struct rose_neigh *rose_loopback_neigh; | 49 | struct rose_neigh *rose_loopback_neigh; |
50 | 50 | ||
51 | static void rose_remove_neigh(struct rose_neigh *); | ||
52 | |||
53 | /* | 51 | /* |
54 | * Add a new route to a node, and in the process add the node and the | 52 | * Add a new route to a node, and in the process add the node and the |
55 | * neighbour if it is new. | 53 | * neighbour if it is new. |
@@ -235,11 +233,8 @@ static void rose_remove_neigh(struct rose_neigh *rose_neigh) | |||
235 | 233 | ||
236 | skb_queue_purge(&rose_neigh->queue); | 234 | skb_queue_purge(&rose_neigh->queue); |
237 | 235 | ||
238 | spin_lock_bh(&rose_neigh_list_lock); | ||
239 | |||
240 | if ((s = rose_neigh_list) == rose_neigh) { | 236 | if ((s = rose_neigh_list) == rose_neigh) { |
241 | rose_neigh_list = rose_neigh->next; | 237 | rose_neigh_list = rose_neigh->next; |
242 | spin_unlock_bh(&rose_neigh_list_lock); | ||
243 | kfree(rose_neigh->digipeat); | 238 | kfree(rose_neigh->digipeat); |
244 | kfree(rose_neigh); | 239 | kfree(rose_neigh); |
245 | return; | 240 | return; |
@@ -248,7 +243,6 @@ static void rose_remove_neigh(struct rose_neigh *rose_neigh) | |||
248 | while (s != NULL && s->next != NULL) { | 243 | while (s != NULL && s->next != NULL) { |
249 | if (s->next == rose_neigh) { | 244 | if (s->next == rose_neigh) { |
250 | s->next = rose_neigh->next; | 245 | s->next = rose_neigh->next; |
251 | spin_unlock_bh(&rose_neigh_list_lock); | ||
252 | kfree(rose_neigh->digipeat); | 246 | kfree(rose_neigh->digipeat); |
253 | kfree(rose_neigh); | 247 | kfree(rose_neigh); |
254 | return; | 248 | return; |
@@ -256,7 +250,6 @@ static void rose_remove_neigh(struct rose_neigh *rose_neigh) | |||
256 | 250 | ||
257 | s = s->next; | 251 | s = s->next; |
258 | } | 252 | } |
259 | spin_unlock_bh(&rose_neigh_list_lock); | ||
260 | } | 253 | } |
261 | 254 | ||
262 | /* | 255 | /* |
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 91132f6871d7..f1c7bd29f2cd 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c | |||
@@ -974,10 +974,10 @@ hfsc_adjust_levels(struct hfsc_class *cl) | |||
974 | do { | 974 | do { |
975 | level = 0; | 975 | level = 0; |
976 | list_for_each_entry(p, &cl->children, siblings) { | 976 | list_for_each_entry(p, &cl->children, siblings) { |
977 | if (p->level > level) | 977 | if (p->level >= level) |
978 | level = p->level; | 978 | level = p->level + 1; |
979 | } | 979 | } |
980 | cl->level = level + 1; | 980 | cl->level = level; |
981 | } while ((cl = cl->cl_parent) != NULL); | 981 | } while ((cl = cl->cl_parent) != NULL); |
982 | } | 982 | } |
983 | 983 | ||
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 7228d30512c7..5a4a4d0ae502 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c | |||
@@ -167,7 +167,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
167 | if (count == 0) { | 167 | if (count == 0) { |
168 | sch->qstats.drops++; | 168 | sch->qstats.drops++; |
169 | kfree_skb(skb); | 169 | kfree_skb(skb); |
170 | return NET_XMIT_DROP; | 170 | return NET_XMIT_BYPASS; |
171 | } | 171 | } |
172 | 172 | ||
173 | /* | 173 | /* |
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c index 297b8951463e..cf0c767d43ae 100644 --- a/net/sctp/inqueue.c +++ b/net/sctp/inqueue.c | |||
@@ -149,6 +149,7 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue) | |||
149 | /* This is the first chunk in the packet. */ | 149 | /* This is the first chunk in the packet. */ |
150 | chunk->singleton = 1; | 150 | chunk->singleton = 1; |
151 | ch = (sctp_chunkhdr_t *) chunk->skb->data; | 151 | ch = (sctp_chunkhdr_t *) chunk->skb->data; |
152 | chunk->data_accepted = 0; | ||
152 | } | 153 | } |
153 | 154 | ||
154 | chunk->chunk_hdr = ch; | 155 | chunk->chunk_hdr = ch; |
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 2b9a832b29a7..8cdba51ec076 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
@@ -636,8 +636,9 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep, | |||
636 | */ | 636 | */ |
637 | chunk->subh.cookie_hdr = | 637 | chunk->subh.cookie_hdr = |
638 | (struct sctp_signed_cookie *)chunk->skb->data; | 638 | (struct sctp_signed_cookie *)chunk->skb->data; |
639 | skb_pull(chunk->skb, | 639 | if (!pskb_pull(chunk->skb, ntohs(chunk->chunk_hdr->length) - |
640 | ntohs(chunk->chunk_hdr->length) - sizeof(sctp_chunkhdr_t)); | 640 | sizeof(sctp_chunkhdr_t))) |
641 | goto nomem; | ||
641 | 642 | ||
642 | /* 5.1 D) Upon reception of the COOKIE ECHO chunk, Endpoint | 643 | /* 5.1 D) Upon reception of the COOKIE ECHO chunk, Endpoint |
643 | * "Z" will reply with a COOKIE ACK chunk after building a TCB | 644 | * "Z" will reply with a COOKIE ACK chunk after building a TCB |
@@ -965,7 +966,8 @@ sctp_disposition_t sctp_sf_beat_8_3(const struct sctp_endpoint *ep, | |||
965 | */ | 966 | */ |
966 | chunk->subh.hb_hdr = (sctp_heartbeathdr_t *) chunk->skb->data; | 967 | chunk->subh.hb_hdr = (sctp_heartbeathdr_t *) chunk->skb->data; |
967 | paylen = ntohs(chunk->chunk_hdr->length) - sizeof(sctp_chunkhdr_t); | 968 | paylen = ntohs(chunk->chunk_hdr->length) - sizeof(sctp_chunkhdr_t); |
968 | skb_pull(chunk->skb, paylen); | 969 | if (!pskb_pull(chunk->skb, paylen)) |
970 | goto nomem; | ||
969 | 971 | ||
970 | reply = sctp_make_heartbeat_ack(asoc, chunk, | 972 | reply = sctp_make_heartbeat_ack(asoc, chunk, |
971 | chunk->subh.hb_hdr, paylen); | 973 | chunk->subh.hb_hdr, paylen); |
@@ -1860,8 +1862,9 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep, | |||
1860 | * are in good shape. | 1862 | * are in good shape. |
1861 | */ | 1863 | */ |
1862 | chunk->subh.cookie_hdr = (struct sctp_signed_cookie *)chunk->skb->data; | 1864 | chunk->subh.cookie_hdr = (struct sctp_signed_cookie *)chunk->skb->data; |
1863 | skb_pull(chunk->skb, ntohs(chunk->chunk_hdr->length) - | 1865 | if (!pskb_pull(chunk->skb, ntohs(chunk->chunk_hdr->length) - |
1864 | sizeof(sctp_chunkhdr_t)); | 1866 | sizeof(sctp_chunkhdr_t))) |
1867 | goto nomem; | ||
1865 | 1868 | ||
1866 | /* In RFC 2960 5.2.4 3, if both Verification Tags in the State Cookie | 1869 | /* In RFC 2960 5.2.4 3, if both Verification Tags in the State Cookie |
1867 | * of a duplicate COOKIE ECHO match the Verification Tags of the | 1870 | * of a duplicate COOKIE ECHO match the Verification Tags of the |
@@ -5151,7 +5154,9 @@ static int sctp_eat_data(const struct sctp_association *asoc, | |||
5151 | int tmp; | 5154 | int tmp; |
5152 | __u32 tsn; | 5155 | __u32 tsn; |
5153 | int account_value; | 5156 | int account_value; |
5157 | struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map; | ||
5154 | struct sock *sk = asoc->base.sk; | 5158 | struct sock *sk = asoc->base.sk; |
5159 | int rcvbuf_over = 0; | ||
5155 | 5160 | ||
5156 | data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *)chunk->skb->data; | 5161 | data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *)chunk->skb->data; |
5157 | skb_pull(chunk->skb, sizeof(sctp_datahdr_t)); | 5162 | skb_pull(chunk->skb, sizeof(sctp_datahdr_t)); |
@@ -5162,10 +5167,16 @@ static int sctp_eat_data(const struct sctp_association *asoc, | |||
5162 | /* ASSERT: Now skb->data is really the user data. */ | 5167 | /* ASSERT: Now skb->data is really the user data. */ |
5163 | 5168 | ||
5164 | /* | 5169 | /* |
5165 | * if we are established, and we have used up our receive | 5170 | * If we are established, and we have used up our receive buffer |
5166 | * buffer memory, drop the frame | 5171 | * memory, think about droping the frame. |
5167 | */ | 5172 | * Note that we have an opportunity to improve performance here. |
5168 | if (asoc->state == SCTP_STATE_ESTABLISHED) { | 5173 | * If we accept one chunk from an skbuff, we have to keep all the |
5174 | * memory of that skbuff around until the chunk is read into user | ||
5175 | * space. Therefore, once we accept 1 chunk we may as well accept all | ||
5176 | * remaining chunks in the skbuff. The data_accepted flag helps us do | ||
5177 | * that. | ||
5178 | */ | ||
5179 | if ((asoc->state == SCTP_STATE_ESTABLISHED) && (!chunk->data_accepted)) { | ||
5169 | /* | 5180 | /* |
5170 | * If the receive buffer policy is 1, then each | 5181 | * If the receive buffer policy is 1, then each |
5171 | * association can allocate up to sk_rcvbuf bytes | 5182 | * association can allocate up to sk_rcvbuf bytes |
@@ -5176,9 +5187,25 @@ static int sctp_eat_data(const struct sctp_association *asoc, | |||
5176 | account_value = atomic_read(&asoc->rmem_alloc); | 5187 | account_value = atomic_read(&asoc->rmem_alloc); |
5177 | else | 5188 | else |
5178 | account_value = atomic_read(&sk->sk_rmem_alloc); | 5189 | account_value = atomic_read(&sk->sk_rmem_alloc); |
5179 | 5190 | if (account_value > sk->sk_rcvbuf) { | |
5180 | if (account_value > sk->sk_rcvbuf) | 5191 | /* |
5181 | return SCTP_IERROR_IGNORE_TSN; | 5192 | * We need to make forward progress, even when we are |
5193 | * under memory pressure, so we always allow the | ||
5194 | * next tsn after the ctsn ack point to be accepted. | ||
5195 | * This lets us avoid deadlocks in which we have to | ||
5196 | * drop frames that would otherwise let us drain the | ||
5197 | * receive queue. | ||
5198 | */ | ||
5199 | if ((sctp_tsnmap_get_ctsn(map) + 1) != tsn) | ||
5200 | return SCTP_IERROR_IGNORE_TSN; | ||
5201 | |||
5202 | /* | ||
5203 | * We're going to accept the frame but we should renege | ||
5204 | * to make space for it. This will send us down that | ||
5205 | * path later in this function. | ||
5206 | */ | ||
5207 | rcvbuf_over = 1; | ||
5208 | } | ||
5182 | } | 5209 | } |
5183 | 5210 | ||
5184 | /* Process ECN based congestion. | 5211 | /* Process ECN based congestion. |
@@ -5226,6 +5253,7 @@ static int sctp_eat_data(const struct sctp_association *asoc, | |||
5226 | datalen -= sizeof(sctp_data_chunk_t); | 5253 | datalen -= sizeof(sctp_data_chunk_t); |
5227 | 5254 | ||
5228 | deliver = SCTP_CMD_CHUNK_ULP; | 5255 | deliver = SCTP_CMD_CHUNK_ULP; |
5256 | chunk->data_accepted = 1; | ||
5229 | 5257 | ||
5230 | /* Think about partial delivery. */ | 5258 | /* Think about partial delivery. */ |
5231 | if ((datalen >= asoc->rwnd) && (!asoc->ulpq.pd_mode)) { | 5259 | if ((datalen >= asoc->rwnd) && (!asoc->ulpq.pd_mode)) { |
@@ -5242,7 +5270,8 @@ static int sctp_eat_data(const struct sctp_association *asoc, | |||
5242 | * large spill over. | 5270 | * large spill over. |
5243 | */ | 5271 | */ |
5244 | if (!asoc->rwnd || asoc->rwnd_over || | 5272 | if (!asoc->rwnd || asoc->rwnd_over || |
5245 | (datalen > asoc->rwnd + asoc->frag_point)) { | 5273 | (datalen > asoc->rwnd + asoc->frag_point) || |
5274 | rcvbuf_over) { | ||
5246 | 5275 | ||
5247 | /* If this is the next TSN, consider reneging to make | 5276 | /* If this is the next TSN, consider reneging to make |
5248 | * room. Note: Playing nice with a confused sender. A | 5277 | * room. Note: Playing nice with a confused sender. A |
@@ -5250,8 +5279,8 @@ static int sctp_eat_data(const struct sctp_association *asoc, | |||
5250 | * space and in the future we may want to detect and | 5279 | * space and in the future we may want to detect and |
5251 | * do more drastic reneging. | 5280 | * do more drastic reneging. |
5252 | */ | 5281 | */ |
5253 | if (sctp_tsnmap_has_gap(&asoc->peer.tsn_map) && | 5282 | if (sctp_tsnmap_has_gap(map) && |
5254 | (sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + 1) == tsn) { | 5283 | (sctp_tsnmap_get_ctsn(map) + 1) == tsn) { |
5255 | SCTP_DEBUG_PRINTK("Reneging for tsn:%u\n", tsn); | 5284 | SCTP_DEBUG_PRINTK("Reneging for tsn:%u\n", tsn); |
5256 | deliver = SCTP_CMD_RENEGE; | 5285 | deliver = SCTP_CMD_RENEGE; |
5257 | } else { | 5286 | } else { |
diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c index 75ef10408764..8bcca5676151 100644 --- a/net/sctp/sm_statetable.c +++ b/net/sctp/sm_statetable.c | |||
@@ -366,9 +366,9 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type, | |||
366 | /* SCTP_STATE_EMPTY */ \ | 366 | /* SCTP_STATE_EMPTY */ \ |
367 | {.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \ | 367 | {.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \ |
368 | /* SCTP_STATE_CLOSED */ \ | 368 | /* SCTP_STATE_CLOSED */ \ |
369 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | 369 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ |
370 | /* SCTP_STATE_COOKIE_WAIT */ \ | 370 | /* SCTP_STATE_COOKIE_WAIT */ \ |
371 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | 371 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ |
372 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 372 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
373 | {.fn = sctp_sf_do_ecne, .name = "sctp_sf_do_ecne"}, \ | 373 | {.fn = sctp_sf_do_ecne, .name = "sctp_sf_do_ecne"}, \ |
374 | /* SCTP_STATE_ESTABLISHED */ \ | 374 | /* SCTP_STATE_ESTABLISHED */ \ |
@@ -380,7 +380,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type, | |||
380 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | 380 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ |
381 | {.fn = sctp_sf_do_ecne, .name = "sctp_sf_do_ecne"}, \ | 381 | {.fn = sctp_sf_do_ecne, .name = "sctp_sf_do_ecne"}, \ |
382 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | 382 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ |
383 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | 383 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ |
384 | } /* TYPE_SCTP_ECN_ECNE */ | 384 | } /* TYPE_SCTP_ECN_ECNE */ |
385 | 385 | ||
386 | #define TYPE_SCTP_ECN_CWR { \ | 386 | #define TYPE_SCTP_ECN_CWR { \ |
@@ -401,7 +401,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type, | |||
401 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | 401 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ |
402 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 402 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ |
403 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | 403 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ |
404 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | 404 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ |
405 | } /* TYPE_SCTP_ECN_CWR */ | 405 | } /* TYPE_SCTP_ECN_CWR */ |
406 | 406 | ||
407 | #define TYPE_SCTP_SHUTDOWN_COMPLETE { \ | 407 | #define TYPE_SCTP_SHUTDOWN_COMPLETE { \ |
@@ -647,7 +647,7 @@ chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = { | |||
647 | /* SCTP_STATE_EMPTY */ \ | 647 | /* SCTP_STATE_EMPTY */ \ |
648 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | 648 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ |
649 | /* SCTP_STATE_CLOSED */ \ | 649 | /* SCTP_STATE_CLOSED */ \ |
650 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | 650 | {.fn = sctp_sf_error_closed, .name = "sctp_sf_error_closed"}, \ |
651 | /* SCTP_STATE_COOKIE_WAIT */ \ | 651 | /* SCTP_STATE_COOKIE_WAIT */ \ |
652 | {.fn = sctp_sf_do_prm_requestheartbeat, \ | 652 | {.fn = sctp_sf_do_prm_requestheartbeat, \ |
653 | .name = "sctp_sf_do_prm_requestheartbeat"}, \ | 653 | .name = "sctp_sf_do_prm_requestheartbeat"}, \ |
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index 2080b2d28c98..575e556aeb3e 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c | |||
@@ -279,6 +279,7 @@ static inline void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq, | |||
279 | static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *queue, struct sk_buff *f_frag, struct sk_buff *l_frag) | 279 | static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *queue, struct sk_buff *f_frag, struct sk_buff *l_frag) |
280 | { | 280 | { |
281 | struct sk_buff *pos; | 281 | struct sk_buff *pos; |
282 | struct sk_buff *new = NULL; | ||
282 | struct sctp_ulpevent *event; | 283 | struct sctp_ulpevent *event; |
283 | struct sk_buff *pnext, *last; | 284 | struct sk_buff *pnext, *last; |
284 | struct sk_buff *list = skb_shinfo(f_frag)->frag_list; | 285 | struct sk_buff *list = skb_shinfo(f_frag)->frag_list; |
@@ -297,11 +298,33 @@ static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *qu | |||
297 | */ | 298 | */ |
298 | if (last) | 299 | if (last) |
299 | last->next = pos; | 300 | last->next = pos; |
300 | else | 301 | else { |
301 | skb_shinfo(f_frag)->frag_list = pos; | 302 | if (skb_cloned(f_frag)) { |
303 | /* This is a cloned skb, we can't just modify | ||
304 | * the frag_list. We need a new skb to do that. | ||
305 | * Instead of calling skb_unshare(), we'll do it | ||
306 | * ourselves since we need to delay the free. | ||
307 | */ | ||
308 | new = skb_copy(f_frag, GFP_ATOMIC); | ||
309 | if (!new) | ||
310 | return NULL; /* try again later */ | ||
311 | |||
312 | new->sk = f_frag->sk; | ||
313 | |||
314 | skb_shinfo(new)->frag_list = pos; | ||
315 | } else | ||
316 | skb_shinfo(f_frag)->frag_list = pos; | ||
317 | } | ||
302 | 318 | ||
303 | /* Remove the first fragment from the reassembly queue. */ | 319 | /* Remove the first fragment from the reassembly queue. */ |
304 | __skb_unlink(f_frag, queue); | 320 | __skb_unlink(f_frag, queue); |
321 | |||
322 | /* if we did unshare, then free the old skb and re-assign */ | ||
323 | if (new) { | ||
324 | kfree_skb(f_frag); | ||
325 | f_frag = new; | ||
326 | } | ||
327 | |||
305 | while (pos) { | 328 | while (pos) { |
306 | 329 | ||
307 | pnext = pos->next; | 330 | pnext = pos->next; |
diff --git a/net/socket.c b/net/socket.c index 0ce12dfc7a71..02948b622bd2 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -267,6 +267,8 @@ int move_addr_to_user(void *kaddr, int klen, void __user *uaddr, int __user *ule | |||
267 | return -EINVAL; | 267 | return -EINVAL; |
268 | if(len) | 268 | if(len) |
269 | { | 269 | { |
270 | if (audit_sockaddr(klen, kaddr)) | ||
271 | return -ENOMEM; | ||
270 | if(copy_to_user(uaddr,kaddr,len)) | 272 | if(copy_to_user(uaddr,kaddr,len)) |
271 | return -EFAULT; | 273 | return -EFAULT; |
272 | } | 274 | } |
diff --git a/net/x25/x25_timer.c b/net/x25/x25_timer.c index 0a92e1da3922..71ff3088f6fe 100644 --- a/net/x25/x25_timer.c +++ b/net/x25/x25_timer.c | |||
@@ -114,8 +114,9 @@ static void x25_heartbeat_expiry(unsigned long param) | |||
114 | if (sock_flag(sk, SOCK_DESTROY) || | 114 | if (sock_flag(sk, SOCK_DESTROY) || |
115 | (sk->sk_state == TCP_LISTEN && | 115 | (sk->sk_state == TCP_LISTEN && |
116 | sock_flag(sk, SOCK_DEAD))) { | 116 | sock_flag(sk, SOCK_DEAD))) { |
117 | bh_unlock_sock(sk); | ||
117 | x25_destroy_socket(sk); | 118 | x25_destroy_socket(sk); |
118 | goto unlock; | 119 | return; |
119 | } | 120 | } |
120 | break; | 121 | break; |
121 | 122 | ||
@@ -128,7 +129,6 @@ static void x25_heartbeat_expiry(unsigned long param) | |||
128 | } | 129 | } |
129 | restart_heartbeat: | 130 | restart_heartbeat: |
130 | x25_start_heartbeat(sk); | 131 | x25_start_heartbeat(sk); |
131 | unlock: | ||
132 | bh_unlock_sock(sk); | 132 | bh_unlock_sock(sk); |
133 | } | 133 | } |
134 | 134 | ||
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index c3725fe2a8fb..b469c8b54613 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -57,12 +57,12 @@ int xfrm_register_type(struct xfrm_type *type, unsigned short family) | |||
57 | return -EAFNOSUPPORT; | 57 | return -EAFNOSUPPORT; |
58 | typemap = afinfo->type_map; | 58 | typemap = afinfo->type_map; |
59 | 59 | ||
60 | write_lock(&typemap->lock); | 60 | write_lock_bh(&typemap->lock); |
61 | if (likely(typemap->map[type->proto] == NULL)) | 61 | if (likely(typemap->map[type->proto] == NULL)) |
62 | typemap->map[type->proto] = type; | 62 | typemap->map[type->proto] = type; |
63 | else | 63 | else |
64 | err = -EEXIST; | 64 | err = -EEXIST; |
65 | write_unlock(&typemap->lock); | 65 | write_unlock_bh(&typemap->lock); |
66 | xfrm_policy_put_afinfo(afinfo); | 66 | xfrm_policy_put_afinfo(afinfo); |
67 | return err; | 67 | return err; |
68 | } | 68 | } |
@@ -78,12 +78,12 @@ int xfrm_unregister_type(struct xfrm_type *type, unsigned short family) | |||
78 | return -EAFNOSUPPORT; | 78 | return -EAFNOSUPPORT; |
79 | typemap = afinfo->type_map; | 79 | typemap = afinfo->type_map; |
80 | 80 | ||
81 | write_lock(&typemap->lock); | 81 | write_lock_bh(&typemap->lock); |
82 | if (unlikely(typemap->map[type->proto] != type)) | 82 | if (unlikely(typemap->map[type->proto] != type)) |
83 | err = -ENOENT; | 83 | err = -ENOENT; |
84 | else | 84 | else |
85 | typemap->map[type->proto] = NULL; | 85 | typemap->map[type->proto] = NULL; |
86 | write_unlock(&typemap->lock); | 86 | write_unlock_bh(&typemap->lock); |
87 | xfrm_policy_put_afinfo(afinfo); | 87 | xfrm_policy_put_afinfo(afinfo); |
88 | return err; | 88 | return err; |
89 | } | 89 | } |
@@ -1251,7 +1251,7 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) | |||
1251 | return -EINVAL; | 1251 | return -EINVAL; |
1252 | if (unlikely(afinfo->family >= NPROTO)) | 1252 | if (unlikely(afinfo->family >= NPROTO)) |
1253 | return -EAFNOSUPPORT; | 1253 | return -EAFNOSUPPORT; |
1254 | write_lock(&xfrm_policy_afinfo_lock); | 1254 | write_lock_bh(&xfrm_policy_afinfo_lock); |
1255 | if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL)) | 1255 | if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL)) |
1256 | err = -ENOBUFS; | 1256 | err = -ENOBUFS; |
1257 | else { | 1257 | else { |
@@ -1268,7 +1268,7 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) | |||
1268 | afinfo->garbage_collect = __xfrm_garbage_collect; | 1268 | afinfo->garbage_collect = __xfrm_garbage_collect; |
1269 | xfrm_policy_afinfo[afinfo->family] = afinfo; | 1269 | xfrm_policy_afinfo[afinfo->family] = afinfo; |
1270 | } | 1270 | } |
1271 | write_unlock(&xfrm_policy_afinfo_lock); | 1271 | write_unlock_bh(&xfrm_policy_afinfo_lock); |
1272 | return err; | 1272 | return err; |
1273 | } | 1273 | } |
1274 | EXPORT_SYMBOL(xfrm_policy_register_afinfo); | 1274 | EXPORT_SYMBOL(xfrm_policy_register_afinfo); |
@@ -1280,7 +1280,7 @@ int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo) | |||
1280 | return -EINVAL; | 1280 | return -EINVAL; |
1281 | if (unlikely(afinfo->family >= NPROTO)) | 1281 | if (unlikely(afinfo->family >= NPROTO)) |
1282 | return -EAFNOSUPPORT; | 1282 | return -EAFNOSUPPORT; |
1283 | write_lock(&xfrm_policy_afinfo_lock); | 1283 | write_lock_bh(&xfrm_policy_afinfo_lock); |
1284 | if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) { | 1284 | if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) { |
1285 | if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo)) | 1285 | if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo)) |
1286 | err = -EINVAL; | 1286 | err = -EINVAL; |
@@ -1294,7 +1294,7 @@ int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo) | |||
1294 | afinfo->garbage_collect = NULL; | 1294 | afinfo->garbage_collect = NULL; |
1295 | } | 1295 | } |
1296 | } | 1296 | } |
1297 | write_unlock(&xfrm_policy_afinfo_lock); | 1297 | write_unlock_bh(&xfrm_policy_afinfo_lock); |
1298 | return err; | 1298 | return err; |
1299 | } | 1299 | } |
1300 | EXPORT_SYMBOL(xfrm_policy_unregister_afinfo); | 1300 | EXPORT_SYMBOL(xfrm_policy_unregister_afinfo); |
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 3dc3e1f3b7aa..93a2f36ad3db 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c | |||
@@ -1061,7 +1061,7 @@ int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo) | |||
1061 | return -EINVAL; | 1061 | return -EINVAL; |
1062 | if (unlikely(afinfo->family >= NPROTO)) | 1062 | if (unlikely(afinfo->family >= NPROTO)) |
1063 | return -EAFNOSUPPORT; | 1063 | return -EAFNOSUPPORT; |
1064 | write_lock(&xfrm_state_afinfo_lock); | 1064 | write_lock_bh(&xfrm_state_afinfo_lock); |
1065 | if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL)) | 1065 | if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL)) |
1066 | err = -ENOBUFS; | 1066 | err = -ENOBUFS; |
1067 | else { | 1067 | else { |
@@ -1069,7 +1069,7 @@ int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo) | |||
1069 | afinfo->state_byspi = xfrm_state_byspi; | 1069 | afinfo->state_byspi = xfrm_state_byspi; |
1070 | xfrm_state_afinfo[afinfo->family] = afinfo; | 1070 | xfrm_state_afinfo[afinfo->family] = afinfo; |
1071 | } | 1071 | } |
1072 | write_unlock(&xfrm_state_afinfo_lock); | 1072 | write_unlock_bh(&xfrm_state_afinfo_lock); |
1073 | return err; | 1073 | return err; |
1074 | } | 1074 | } |
1075 | EXPORT_SYMBOL(xfrm_state_register_afinfo); | 1075 | EXPORT_SYMBOL(xfrm_state_register_afinfo); |
@@ -1081,7 +1081,7 @@ int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo) | |||
1081 | return -EINVAL; | 1081 | return -EINVAL; |
1082 | if (unlikely(afinfo->family >= NPROTO)) | 1082 | if (unlikely(afinfo->family >= NPROTO)) |
1083 | return -EAFNOSUPPORT; | 1083 | return -EAFNOSUPPORT; |
1084 | write_lock(&xfrm_state_afinfo_lock); | 1084 | write_lock_bh(&xfrm_state_afinfo_lock); |
1085 | if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) { | 1085 | if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) { |
1086 | if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo)) | 1086 | if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo)) |
1087 | err = -EINVAL; | 1087 | err = -EINVAL; |
@@ -1091,7 +1091,7 @@ int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo) | |||
1091 | afinfo->state_bydst = NULL; | 1091 | afinfo->state_bydst = NULL; |
1092 | } | 1092 | } |
1093 | } | 1093 | } |
1094 | write_unlock(&xfrm_state_afinfo_lock); | 1094 | write_unlock_bh(&xfrm_state_afinfo_lock); |
1095 | return err; | 1095 | return err; |
1096 | } | 1096 | } |
1097 | EXPORT_SYMBOL(xfrm_state_unregister_afinfo); | 1097 | EXPORT_SYMBOL(xfrm_state_unregister_afinfo); |