aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/ax25/af_ax25.c4
-rw-r--r--net/ax25/ax25_iface.c103
-rw-r--r--net/ax25/ax25_route.c2
-rw-r--r--net/ipv4/route.c3
-rw-r--r--net/ipv4/tcp_ipv4.c3
-rw-r--r--net/ipv4/udp.c13
-rw-r--r--net/ipv6/netfilter/Kconfig5
-rw-r--r--net/netlabel/netlabel_cipso_v4.c37
-rw-r--r--net/netrom/af_netrom.c15
-rw-r--r--net/netrom/nr_dev.c24
-rw-r--r--net/netrom/nr_route.c19
-rw-r--r--net/rose/af_rose.c18
-rw-r--r--net/rose/rose_dev.c22
-rw-r--r--net/rose/rose_loopback.c5
-rw-r--r--net/rose/rose_route.c47
-rw-r--r--net/sctp/ipv6.c4
-rw-r--r--net/sctp/protocol.c4
-rw-r--r--net/sctp/sm_make_chunk.c24
-rw-r--r--net/sctp/sm_statefuns.c32
-rw-r--r--net/sctp/socket.c34
-rw-r--r--net/sctp/ulpevent.c20
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_mech.c2
22 files changed, 229 insertions, 211 deletions
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 6cabf6d8a751..42233df2b099 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1088,8 +1088,8 @@ out:
1088/* 1088/*
1089 * FIXME: nonblock behaviour looks like it may have a bug. 1089 * FIXME: nonblock behaviour looks like it may have a bug.
1090 */ 1090 */
1091static int ax25_connect(struct socket *sock, struct sockaddr *uaddr, 1091static int __must_check ax25_connect(struct socket *sock,
1092 int addr_len, int flags) 1092 struct sockaddr *uaddr, int addr_len, int flags)
1093{ 1093{
1094 struct sock *sk = sock->sk; 1094 struct sock *sk = sock->sk;
1095 ax25_cb *ax25 = ax25_sk(sk), *ax25t; 1095 ax25_cb *ax25 = ax25_sk(sk), *ax25t;
diff --git a/net/ax25/ax25_iface.c b/net/ax25/ax25_iface.c
index 07ac0207eb69..aff3e652c2d1 100644
--- a/net/ax25/ax25_iface.c
+++ b/net/ax25/ax25_iface.c
@@ -29,17 +29,10 @@
29#include <linux/mm.h> 29#include <linux/mm.h>
30#include <linux/interrupt.h> 30#include <linux/interrupt.h>
31 31
32static struct protocol_struct { 32static struct ax25_protocol *protocol_list;
33 struct protocol_struct *next;
34 unsigned int pid;
35 int (*func)(struct sk_buff *, ax25_cb *);
36} *protocol_list = NULL;
37static DEFINE_RWLOCK(protocol_list_lock); 33static DEFINE_RWLOCK(protocol_list_lock);
38 34
39static struct linkfail_struct { 35static HLIST_HEAD(ax25_linkfail_list);
40 struct linkfail_struct *next;
41 void (*func)(ax25_cb *, int);
42} *linkfail_list = NULL;
43static DEFINE_SPINLOCK(linkfail_lock); 36static DEFINE_SPINLOCK(linkfail_lock);
44 37
45static struct listen_struct { 38static struct listen_struct {
@@ -49,36 +42,23 @@ static struct listen_struct {
49} *listen_list = NULL; 42} *listen_list = NULL;
50static DEFINE_SPINLOCK(listen_lock); 43static DEFINE_SPINLOCK(listen_lock);
51 44
52int ax25_protocol_register(unsigned int pid, 45/*
53 int (*func)(struct sk_buff *, ax25_cb *)) 46 * Do not register the internal protocols AX25_P_TEXT, AX25_P_SEGMENT,
47 * AX25_P_IP or AX25_P_ARP ...
48 */
49void ax25_register_pid(struct ax25_protocol *ap)
54{ 50{
55 struct protocol_struct *protocol;
56
57 if (pid == AX25_P_TEXT || pid == AX25_P_SEGMENT)
58 return 0;
59#ifdef CONFIG_INET
60 if (pid == AX25_P_IP || pid == AX25_P_ARP)
61 return 0;
62#endif
63 if ((protocol = kmalloc(sizeof(*protocol), GFP_ATOMIC)) == NULL)
64 return 0;
65
66 protocol->pid = pid;
67 protocol->func = func;
68
69 write_lock_bh(&protocol_list_lock); 51 write_lock_bh(&protocol_list_lock);
70 protocol->next = protocol_list; 52 ap->next = protocol_list;
71 protocol_list = protocol; 53 protocol_list = ap;
72 write_unlock_bh(&protocol_list_lock); 54 write_unlock_bh(&protocol_list_lock);
73
74 return 1;
75} 55}
76 56
77EXPORT_SYMBOL(ax25_protocol_register); 57EXPORT_SYMBOL_GPL(ax25_register_pid);
78 58
79void ax25_protocol_release(unsigned int pid) 59void ax25_protocol_release(unsigned int pid)
80{ 60{
81 struct protocol_struct *s, *protocol; 61 struct ax25_protocol *s, *protocol;
82 62
83 write_lock_bh(&protocol_list_lock); 63 write_lock_bh(&protocol_list_lock);
84 protocol = protocol_list; 64 protocol = protocol_list;
@@ -110,54 +90,19 @@ void ax25_protocol_release(unsigned int pid)
110 90
111EXPORT_SYMBOL(ax25_protocol_release); 91EXPORT_SYMBOL(ax25_protocol_release);
112 92
113int ax25_linkfail_register(void (*func)(ax25_cb *, int)) 93void ax25_linkfail_register(struct ax25_linkfail *lf)
114{ 94{
115 struct linkfail_struct *linkfail;
116
117 if ((linkfail = kmalloc(sizeof(*linkfail), GFP_ATOMIC)) == NULL)
118 return 0;
119
120 linkfail->func = func;
121
122 spin_lock_bh(&linkfail_lock); 95 spin_lock_bh(&linkfail_lock);
123 linkfail->next = linkfail_list; 96 hlist_add_head(&lf->lf_node, &ax25_linkfail_list);
124 linkfail_list = linkfail;
125 spin_unlock_bh(&linkfail_lock); 97 spin_unlock_bh(&linkfail_lock);
126
127 return 1;
128} 98}
129 99
130EXPORT_SYMBOL(ax25_linkfail_register); 100EXPORT_SYMBOL(ax25_linkfail_register);
131 101
132void ax25_linkfail_release(void (*func)(ax25_cb *, int)) 102void ax25_linkfail_release(struct ax25_linkfail *lf)
133{ 103{
134 struct linkfail_struct *s, *linkfail;
135
136 spin_lock_bh(&linkfail_lock); 104 spin_lock_bh(&linkfail_lock);
137 linkfail = linkfail_list; 105 hlist_del_init(&lf->lf_node);
138 if (linkfail == NULL) {
139 spin_unlock_bh(&linkfail_lock);
140 return;
141 }
142
143 if (linkfail->func == func) {
144 linkfail_list = linkfail->next;
145 spin_unlock_bh(&linkfail_lock);
146 kfree(linkfail);
147 return;
148 }
149
150 while (linkfail != NULL && linkfail->next != NULL) {
151 if (linkfail->next->func == func) {
152 s = linkfail->next;
153 linkfail->next = linkfail->next->next;
154 spin_unlock_bh(&linkfail_lock);
155 kfree(s);
156 return;
157 }
158
159 linkfail = linkfail->next;
160 }
161 spin_unlock_bh(&linkfail_lock); 106 spin_unlock_bh(&linkfail_lock);
162} 107}
163 108
@@ -171,7 +116,7 @@ int ax25_listen_register(ax25_address *callsign, struct net_device *dev)
171 return 0; 116 return 0;
172 117
173 if ((listen = kmalloc(sizeof(*listen), GFP_ATOMIC)) == NULL) 118 if ((listen = kmalloc(sizeof(*listen), GFP_ATOMIC)) == NULL)
174 return 0; 119 return -ENOMEM;
175 120
176 listen->callsign = *callsign; 121 listen->callsign = *callsign;
177 listen->dev = dev; 122 listen->dev = dev;
@@ -181,7 +126,7 @@ int ax25_listen_register(ax25_address *callsign, struct net_device *dev)
181 listen_list = listen; 126 listen_list = listen;
182 spin_unlock_bh(&listen_lock); 127 spin_unlock_bh(&listen_lock);
183 128
184 return 1; 129 return 0;
185} 130}
186 131
187EXPORT_SYMBOL(ax25_listen_register); 132EXPORT_SYMBOL(ax25_listen_register);
@@ -223,7 +168,7 @@ EXPORT_SYMBOL(ax25_listen_release);
223int (*ax25_protocol_function(unsigned int pid))(struct sk_buff *, ax25_cb *) 168int (*ax25_protocol_function(unsigned int pid))(struct sk_buff *, ax25_cb *)
224{ 169{
225 int (*res)(struct sk_buff *, ax25_cb *) = NULL; 170 int (*res)(struct sk_buff *, ax25_cb *) = NULL;
226 struct protocol_struct *protocol; 171 struct ax25_protocol *protocol;
227 172
228 read_lock(&protocol_list_lock); 173 read_lock(&protocol_list_lock);
229 for (protocol = protocol_list; protocol != NULL; protocol = protocol->next) 174 for (protocol = protocol_list; protocol != NULL; protocol = protocol->next)
@@ -242,7 +187,8 @@ int ax25_listen_mine(ax25_address *callsign, struct net_device *dev)
242 187
243 spin_lock_bh(&listen_lock); 188 spin_lock_bh(&listen_lock);
244 for (listen = listen_list; listen != NULL; listen = listen->next) 189 for (listen = listen_list; listen != NULL; listen = listen->next)
245 if (ax25cmp(&listen->callsign, callsign) == 0 && (listen->dev == dev || listen->dev == NULL)) { 190 if (ax25cmp(&listen->callsign, callsign) == 0 &&
191 (listen->dev == dev || listen->dev == NULL)) {
246 spin_unlock_bh(&listen_lock); 192 spin_unlock_bh(&listen_lock);
247 return 1; 193 return 1;
248 } 194 }
@@ -253,17 +199,18 @@ int ax25_listen_mine(ax25_address *callsign, struct net_device *dev)
253 199
254void ax25_link_failed(ax25_cb *ax25, int reason) 200void ax25_link_failed(ax25_cb *ax25, int reason)
255{ 201{
256 struct linkfail_struct *linkfail; 202 struct ax25_linkfail *lf;
203 struct hlist_node *node;
257 204
258 spin_lock_bh(&linkfail_lock); 205 spin_lock_bh(&linkfail_lock);
259 for (linkfail = linkfail_list; linkfail != NULL; linkfail = linkfail->next) 206 hlist_for_each_entry(lf, node, &ax25_linkfail_list, lf_node)
260 (linkfail->func)(ax25, reason); 207 lf->func(ax25, reason);
261 spin_unlock_bh(&linkfail_lock); 208 spin_unlock_bh(&linkfail_lock);
262} 209}
263 210
264int ax25_protocol_is_registered(unsigned int pid) 211int ax25_protocol_is_registered(unsigned int pid)
265{ 212{
266 struct protocol_struct *protocol; 213 struct ax25_protocol *protocol;
267 int res = 0; 214 int res = 0;
268 215
269 read_lock_bh(&protocol_list_lock); 216 read_lock_bh(&protocol_list_lock);
diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c
index 8580356ace5c..0a0381622b1c 100644
--- a/net/ax25/ax25_route.c
+++ b/net/ax25/ax25_route.c
@@ -71,7 +71,7 @@ void ax25_rt_device_down(struct net_device *dev)
71 write_unlock(&ax25_route_lock); 71 write_unlock(&ax25_route_lock);
72} 72}
73 73
74static int ax25_rt_add(struct ax25_routes_struct *route) 74static int __must_check ax25_rt_add(struct ax25_routes_struct *route)
75{ 75{
76 ax25_route *ax25_rt; 76 ax25_route *ax25_rt;
77 ax25_dev *ax25_dev; 77 ax25_dev *ax25_dev;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 1aaff0a2e098..2daa0dc19d33 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1325,7 +1325,8 @@ void ip_rt_send_redirect(struct sk_buff *skb)
1325 /* Check for load limit; set rate_last to the latest sent 1325 /* Check for load limit; set rate_last to the latest sent
1326 * redirect. 1326 * redirect.
1327 */ 1327 */
1328 if (time_after(jiffies, 1328 if (rt->u.dst.rate_tokens == 0 ||
1329 time_after(jiffies,
1329 (rt->u.dst.rate_last + 1330 (rt->u.dst.rate_last +
1330 (ip_rt_redirect_load << rt->u.dst.rate_tokens)))) { 1331 (ip_rt_redirect_load << rt->u.dst.rate_tokens)))) {
1331 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway); 1332 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index a1222d6968c4..bf7a22412bcb 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -928,6 +928,7 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
928 if (tp->md5sig_info->entries4 == 0) { 928 if (tp->md5sig_info->entries4 == 0) {
929 kfree(tp->md5sig_info->keys4); 929 kfree(tp->md5sig_info->keys4);
930 tp->md5sig_info->keys4 = NULL; 930 tp->md5sig_info->keys4 = NULL;
931 tp->md5sig_info->alloced4 = 0;
931 } else if (tp->md5sig_info->entries4 != i) { 932 } else if (tp->md5sig_info->entries4 != i) {
932 /* Need to do some manipulation */ 933 /* Need to do some manipulation */
933 memcpy(&tp->md5sig_info->keys4[i], 934 memcpy(&tp->md5sig_info->keys4[i],
@@ -1185,7 +1186,7 @@ done_opts:
1185 return 0; 1186 return 0;
1186 1187
1187 if (hash_expected && !hash_location) { 1188 if (hash_expected && !hash_location) {
1188 LIMIT_NETDEBUG(KERN_INFO "MD5 Hash NOT expected but found " 1189 LIMIT_NETDEBUG(KERN_INFO "MD5 Hash expected but NOT found "
1189 "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)\n", 1190 "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)\n",
1190 NIPQUAD(iph->saddr), ntohs(th->source), 1191 NIPQUAD(iph->saddr), ntohs(th->source),
1191 NIPQUAD(iph->daddr), ntohs(th->dest)); 1192 NIPQUAD(iph->daddr), ntohs(th->dest));
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 035915fc9ed3..cfff930f2baf 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -165,11 +165,14 @@ int __udp_lib_get_port(struct sock *sk, unsigned short snum,
165 goto gotit; 165 goto gotit;
166 } 166 }
167 size = 0; 167 size = 0;
168 sk_for_each(sk2, node, head) 168 sk_for_each(sk2, node, head) {
169 if (++size < best_size_so_far) { 169 if (++size >= best_size_so_far)
170 best_size_so_far = size; 170 goto next;
171 best = result; 171 }
172 } 172 best_size_so_far = size;
173 best = result;
174 next:
175 ;
173 } 176 }
174 result = best; 177 result = best;
175 for(i = 0; i < (1 << 16) / UDP_HTABLE_SIZE; i++, result += UDP_HTABLE_SIZE) { 178 for(i = 0; i < (1 << 16) / UDP_HTABLE_SIZE; i++, result += UDP_HTABLE_SIZE) {
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index fc3e5eb4bc3f..adcd6131df2a 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -7,7 +7,7 @@ menu "IPv6: Netfilter Configuration (EXPERIMENTAL)"
7 7
8config NF_CONNTRACK_IPV6 8config NF_CONNTRACK_IPV6
9 tristate "IPv6 connection tracking support (EXPERIMENTAL)" 9 tristate "IPv6 connection tracking support (EXPERIMENTAL)"
10 depends on EXPERIMENTAL && NF_CONNTRACK 10 depends on INET && IPV6 && EXPERIMENTAL && NF_CONNTRACK
11 ---help--- 11 ---help---
12 Connection tracking keeps a record of what packets have passed 12 Connection tracking keeps a record of what packets have passed
13 through your machine, in order to figure out how they are related 13 through your machine, in order to figure out how they are related
@@ -21,6 +21,7 @@ config NF_CONNTRACK_IPV6
21 21
22config IP6_NF_QUEUE 22config IP6_NF_QUEUE
23 tristate "IP6 Userspace queueing via NETLINK (OBSOLETE)" 23 tristate "IP6 Userspace queueing via NETLINK (OBSOLETE)"
24 depends on INET && IPV6 && NETFILTER && EXPERIMENTAL
24 ---help--- 25 ---help---
25 26
26 This option adds a queue handler to the kernel for IPv6 27 This option adds a queue handler to the kernel for IPv6
@@ -41,7 +42,7 @@ config IP6_NF_QUEUE
41 42
42config IP6_NF_IPTABLES 43config IP6_NF_IPTABLES
43 tristate "IP6 tables support (required for filtering)" 44 tristate "IP6 tables support (required for filtering)"
44 depends on NETFILTER_XTABLES 45 depends on INET && IPV6 && EXPERIMENTAL && NETFILTER_XTABLES
45 help 46 help
46 ip6tables is a general, extensible packet identification framework. 47 ip6tables is a general, extensible packet identification framework.
47 Currently only the packet filtering and packet mangling subsystem 48 Currently only the packet filtering and packet mangling subsystem
diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c
index 743b05734a49..4afc75f9e377 100644
--- a/net/netlabel/netlabel_cipso_v4.c
+++ b/net/netlabel/netlabel_cipso_v4.c
@@ -162,6 +162,7 @@ static int netlbl_cipsov4_add_std(struct genl_info *info)
162 struct nlattr *nla_b; 162 struct nlattr *nla_b;
163 int nla_a_rem; 163 int nla_a_rem;
164 int nla_b_rem; 164 int nla_b_rem;
165 u32 iter;
165 166
166 if (!info->attrs[NLBL_CIPSOV4_A_TAGLST] || 167 if (!info->attrs[NLBL_CIPSOV4_A_TAGLST] ||
167 !info->attrs[NLBL_CIPSOV4_A_MLSLVLLST]) 168 !info->attrs[NLBL_CIPSOV4_A_MLSLVLLST])
@@ -185,20 +186,31 @@ static int netlbl_cipsov4_add_std(struct genl_info *info)
185 ret_val = netlbl_cipsov4_add_common(info, doi_def); 186 ret_val = netlbl_cipsov4_add_common(info, doi_def);
186 if (ret_val != 0) 187 if (ret_val != 0)
187 goto add_std_failure; 188 goto add_std_failure;
189 ret_val = -EINVAL;
188 190
189 nla_for_each_nested(nla_a, 191 nla_for_each_nested(nla_a,
190 info->attrs[NLBL_CIPSOV4_A_MLSLVLLST], 192 info->attrs[NLBL_CIPSOV4_A_MLSLVLLST],
191 nla_a_rem) 193 nla_a_rem)
192 if (nla_a->nla_type == NLBL_CIPSOV4_A_MLSLVL) { 194 if (nla_a->nla_type == NLBL_CIPSOV4_A_MLSLVL) {
195 if (nla_validate_nested(nla_a,
196 NLBL_CIPSOV4_A_MAX,
197 netlbl_cipsov4_genl_policy) != 0)
198 goto add_std_failure;
193 nla_for_each_nested(nla_b, nla_a, nla_b_rem) 199 nla_for_each_nested(nla_b, nla_a, nla_b_rem)
194 switch (nla_b->nla_type) { 200 switch (nla_b->nla_type) {
195 case NLBL_CIPSOV4_A_MLSLVLLOC: 201 case NLBL_CIPSOV4_A_MLSLVLLOC:
202 if (nla_get_u32(nla_b) >
203 CIPSO_V4_MAX_LOC_LVLS)
204 goto add_std_failure;
196 if (nla_get_u32(nla_b) >= 205 if (nla_get_u32(nla_b) >=
197 doi_def->map.std->lvl.local_size) 206 doi_def->map.std->lvl.local_size)
198 doi_def->map.std->lvl.local_size = 207 doi_def->map.std->lvl.local_size =
199 nla_get_u32(nla_b) + 1; 208 nla_get_u32(nla_b) + 1;
200 break; 209 break;
201 case NLBL_CIPSOV4_A_MLSLVLREM: 210 case NLBL_CIPSOV4_A_MLSLVLREM:
211 if (nla_get_u32(nla_b) >
212 CIPSO_V4_MAX_REM_LVLS)
213 goto add_std_failure;
202 if (nla_get_u32(nla_b) >= 214 if (nla_get_u32(nla_b) >=
203 doi_def->map.std->lvl.cipso_size) 215 doi_def->map.std->lvl.cipso_size)
204 doi_def->map.std->lvl.cipso_size = 216 doi_def->map.std->lvl.cipso_size =
@@ -206,9 +218,6 @@ static int netlbl_cipsov4_add_std(struct genl_info *info)
206 break; 218 break;
207 } 219 }
208 } 220 }
209 if (doi_def->map.std->lvl.local_size > CIPSO_V4_MAX_LOC_LVLS ||
210 doi_def->map.std->lvl.cipso_size > CIPSO_V4_MAX_REM_LVLS)
211 goto add_std_failure;
212 doi_def->map.std->lvl.local = kcalloc(doi_def->map.std->lvl.local_size, 221 doi_def->map.std->lvl.local = kcalloc(doi_def->map.std->lvl.local_size,
213 sizeof(u32), 222 sizeof(u32),
214 GFP_KERNEL); 223 GFP_KERNEL);
@@ -223,6 +232,10 @@ static int netlbl_cipsov4_add_std(struct genl_info *info)
223 ret_val = -ENOMEM; 232 ret_val = -ENOMEM;
224 goto add_std_failure; 233 goto add_std_failure;
225 } 234 }
235 for (iter = 0; iter < doi_def->map.std->lvl.local_size; iter++)
236 doi_def->map.std->lvl.local[iter] = CIPSO_V4_INV_LVL;
237 for (iter = 0; iter < doi_def->map.std->lvl.cipso_size; iter++)
238 doi_def->map.std->lvl.cipso[iter] = CIPSO_V4_INV_LVL;
226 nla_for_each_nested(nla_a, 239 nla_for_each_nested(nla_a,
227 info->attrs[NLBL_CIPSOV4_A_MLSLVLLST], 240 info->attrs[NLBL_CIPSOV4_A_MLSLVLLST],
228 nla_a_rem) 241 nla_a_rem)
@@ -230,11 +243,6 @@ static int netlbl_cipsov4_add_std(struct genl_info *info)
230 struct nlattr *lvl_loc; 243 struct nlattr *lvl_loc;
231 struct nlattr *lvl_rem; 244 struct nlattr *lvl_rem;
232 245
233 if (nla_validate_nested(nla_a,
234 NLBL_CIPSOV4_A_MAX,
235 netlbl_cipsov4_genl_policy) != 0)
236 goto add_std_failure;
237
238 lvl_loc = nla_find_nested(nla_a, 246 lvl_loc = nla_find_nested(nla_a,
239 NLBL_CIPSOV4_A_MLSLVLLOC); 247 NLBL_CIPSOV4_A_MLSLVLLOC);
240 lvl_rem = nla_find_nested(nla_a, 248 lvl_rem = nla_find_nested(nla_a,
@@ -264,12 +272,18 @@ static int netlbl_cipsov4_add_std(struct genl_info *info)
264 nla_for_each_nested(nla_b, nla_a, nla_b_rem) 272 nla_for_each_nested(nla_b, nla_a, nla_b_rem)
265 switch (nla_b->nla_type) { 273 switch (nla_b->nla_type) {
266 case NLBL_CIPSOV4_A_MLSCATLOC: 274 case NLBL_CIPSOV4_A_MLSCATLOC:
275 if (nla_get_u32(nla_b) >
276 CIPSO_V4_MAX_LOC_CATS)
277 goto add_std_failure;
267 if (nla_get_u32(nla_b) >= 278 if (nla_get_u32(nla_b) >=
268 doi_def->map.std->cat.local_size) 279 doi_def->map.std->cat.local_size)
269 doi_def->map.std->cat.local_size = 280 doi_def->map.std->cat.local_size =
270 nla_get_u32(nla_b) + 1; 281 nla_get_u32(nla_b) + 1;
271 break; 282 break;
272 case NLBL_CIPSOV4_A_MLSCATREM: 283 case NLBL_CIPSOV4_A_MLSCATREM:
284 if (nla_get_u32(nla_b) >
285 CIPSO_V4_MAX_REM_CATS)
286 goto add_std_failure;
273 if (nla_get_u32(nla_b) >= 287 if (nla_get_u32(nla_b) >=
274 doi_def->map.std->cat.cipso_size) 288 doi_def->map.std->cat.cipso_size)
275 doi_def->map.std->cat.cipso_size = 289 doi_def->map.std->cat.cipso_size =
@@ -277,9 +291,6 @@ static int netlbl_cipsov4_add_std(struct genl_info *info)
277 break; 291 break;
278 } 292 }
279 } 293 }
280 if (doi_def->map.std->cat.local_size > CIPSO_V4_MAX_LOC_CATS ||
281 doi_def->map.std->cat.cipso_size > CIPSO_V4_MAX_REM_CATS)
282 goto add_std_failure;
283 doi_def->map.std->cat.local = kcalloc( 294 doi_def->map.std->cat.local = kcalloc(
284 doi_def->map.std->cat.local_size, 295 doi_def->map.std->cat.local_size,
285 sizeof(u32), 296 sizeof(u32),
@@ -296,6 +307,10 @@ static int netlbl_cipsov4_add_std(struct genl_info *info)
296 ret_val = -ENOMEM; 307 ret_val = -ENOMEM;
297 goto add_std_failure; 308 goto add_std_failure;
298 } 309 }
310 for (iter = 0; iter < doi_def->map.std->cat.local_size; iter++)
311 doi_def->map.std->cat.local[iter] = CIPSO_V4_INV_CAT;
312 for (iter = 0; iter < doi_def->map.std->cat.cipso_size; iter++)
313 doi_def->map.std->cat.cipso[iter] = CIPSO_V4_INV_CAT;
299 nla_for_each_nested(nla_a, 314 nla_for_each_nested(nla_a,
300 info->attrs[NLBL_CIPSOV4_A_MLSCATLST], 315 info->attrs[NLBL_CIPSOV4_A_MLSCATLST],
301 nla_a_rem) 316 nla_a_rem)
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 1d50f801f181..43bbe2c9e49a 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -1377,6 +1377,15 @@ static struct notifier_block nr_dev_notifier = {
1377 1377
1378static struct net_device **dev_nr; 1378static struct net_device **dev_nr;
1379 1379
1380static struct ax25_protocol nr_pid = {
1381 .pid = AX25_P_NETROM,
1382 .func = nr_route_frame
1383};
1384
1385static struct ax25_linkfail nr_linkfail_notifier = {
1386 .func = nr_link_failed,
1387};
1388
1380static int __init nr_proto_init(void) 1389static int __init nr_proto_init(void)
1381{ 1390{
1382 int i; 1391 int i;
@@ -1424,8 +1433,8 @@ static int __init nr_proto_init(void)
1424 1433
1425 register_netdevice_notifier(&nr_dev_notifier); 1434 register_netdevice_notifier(&nr_dev_notifier);
1426 1435
1427 ax25_protocol_register(AX25_P_NETROM, nr_route_frame); 1436 ax25_register_pid(&nr_pid);
1428 ax25_linkfail_register(nr_link_failed); 1437 ax25_linkfail_register(&nr_linkfail_notifier);
1429 1438
1430#ifdef CONFIG_SYSCTL 1439#ifdef CONFIG_SYSCTL
1431 nr_register_sysctl(); 1440 nr_register_sysctl();
@@ -1474,7 +1483,7 @@ static void __exit nr_exit(void)
1474 nr_unregister_sysctl(); 1483 nr_unregister_sysctl();
1475#endif 1484#endif
1476 1485
1477 ax25_linkfail_release(nr_link_failed); 1486 ax25_linkfail_release(&nr_linkfail_notifier);
1478 ax25_protocol_release(AX25_P_NETROM); 1487 ax25_protocol_release(AX25_P_NETROM);
1479 1488
1480 unregister_netdevice_notifier(&nr_dev_notifier); 1489 unregister_netdevice_notifier(&nr_dev_notifier);
diff --git a/net/netrom/nr_dev.c b/net/netrom/nr_dev.c
index 9b8eb54971ab..4700d5225b78 100644
--- a/net/netrom/nr_dev.c
+++ b/net/netrom/nr_dev.c
@@ -128,25 +128,37 @@ static int nr_header(struct sk_buff *skb, struct net_device *dev, unsigned short
128 return -37; 128 return -37;
129} 129}
130 130
131static int nr_set_mac_address(struct net_device *dev, void *addr) 131static int __must_check nr_set_mac_address(struct net_device *dev, void *addr)
132{ 132{
133 struct sockaddr *sa = addr; 133 struct sockaddr *sa = addr;
134 int err;
135
136 if (!memcmp(dev->dev_addr, sa->sa_data, dev->addr_len))
137 return 0;
138
139 if (dev->flags & IFF_UP) {
140 err = ax25_listen_register((ax25_address *)sa->sa_data, NULL);
141 if (err)
142 return err;
134 143
135 if (dev->flags & IFF_UP)
136 ax25_listen_release((ax25_address *)dev->dev_addr, NULL); 144 ax25_listen_release((ax25_address *)dev->dev_addr, NULL);
145 }
137 146
138 memcpy(dev->dev_addr, sa->sa_data, dev->addr_len); 147 memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
139 148
140 if (dev->flags & IFF_UP)
141 ax25_listen_register((ax25_address *)dev->dev_addr, NULL);
142
143 return 0; 149 return 0;
144} 150}
145 151
146static int nr_open(struct net_device *dev) 152static int nr_open(struct net_device *dev)
147{ 153{
154 int err;
155
156 err = ax25_listen_register((ax25_address *)dev->dev_addr, NULL);
157 if (err)
158 return err;
159
148 netif_start_queue(dev); 160 netif_start_queue(dev);
149 ax25_listen_register((ax25_address *)dev->dev_addr, NULL); 161
150 return 0; 162 return 0;
151} 163}
152 164
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index 0096105bcd47..8f88964099ef 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -87,8 +87,9 @@ static void nr_remove_neigh(struct nr_neigh *);
87 * Add a new route to a node, and in the process add the node and the 87 * Add a new route to a node, and in the process add the node and the
88 * neighbour if it is new. 88 * neighbour if it is new.
89 */ 89 */
90static int nr_add_node(ax25_address *nr, const char *mnemonic, ax25_address *ax25, 90static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic,
91 ax25_digi *ax25_digi, struct net_device *dev, int quality, int obs_count) 91 ax25_address *ax25, ax25_digi *ax25_digi, struct net_device *dev,
92 int quality, int obs_count)
92{ 93{
93 struct nr_node *nr_node; 94 struct nr_node *nr_node;
94 struct nr_neigh *nr_neigh; 95 struct nr_neigh *nr_neigh;
@@ -406,7 +407,8 @@ static int nr_del_node(ax25_address *callsign, ax25_address *neighbour, struct n
406/* 407/*
407 * Lock a neighbour with a quality. 408 * Lock a neighbour with a quality.
408 */ 409 */
409static int nr_add_neigh(ax25_address *callsign, ax25_digi *ax25_digi, struct net_device *dev, unsigned int quality) 410static int __must_check nr_add_neigh(ax25_address *callsign,
411 ax25_digi *ax25_digi, struct net_device *dev, unsigned int quality)
410{ 412{
411 struct nr_neigh *nr_neigh; 413 struct nr_neigh *nr_neigh;
412 414
@@ -777,9 +779,13 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
777 nr_src = (ax25_address *)(skb->data + 0); 779 nr_src = (ax25_address *)(skb->data + 0);
778 nr_dest = (ax25_address *)(skb->data + 7); 780 nr_dest = (ax25_address *)(skb->data + 7);
779 781
780 if (ax25 != NULL) 782 if (ax25 != NULL) {
781 nr_add_node(nr_src, "", &ax25->dest_addr, ax25->digipeat, 783 ret = nr_add_node(nr_src, "", &ax25->dest_addr, ax25->digipeat,
782 ax25->ax25_dev->dev, 0, sysctl_netrom_obsolescence_count_initialiser); 784 ax25->ax25_dev->dev, 0,
785 sysctl_netrom_obsolescence_count_initialiser);
786 if (ret)
787 return ret;
788 }
783 789
784 if ((dev = nr_dev_get(nr_dest)) != NULL) { /* Its for me */ 790 if ((dev = nr_dev_get(nr_dest)) != NULL) { /* Its for me */
785 if (ax25 == NULL) /* Its from me */ 791 if (ax25 == NULL) /* Its from me */
@@ -844,6 +850,7 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
844 ret = (nr_neigh->ax25 != NULL); 850 ret = (nr_neigh->ax25 != NULL);
845 nr_node_unlock(nr_node); 851 nr_node_unlock(nr_node);
846 nr_node_put(nr_node); 852 nr_node_put(nr_node);
853
847 return ret; 854 return ret;
848} 855}
849 856
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 08a542855654..9e279464c9d1 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -1314,7 +1314,8 @@ static int rose_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1314 if (copy_from_user(&rose_callsign, argp, sizeof(ax25_address))) 1314 if (copy_from_user(&rose_callsign, argp, sizeof(ax25_address)))
1315 return -EFAULT; 1315 return -EFAULT;
1316 if (ax25cmp(&rose_callsign, &null_ax25_address) != 0) 1316 if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
1317 ax25_listen_register(&rose_callsign, NULL); 1317 return ax25_listen_register(&rose_callsign, NULL);
1318
1318 return 0; 1319 return 0;
1319 1320
1320 case SIOCRSGL2CALL: 1321 case SIOCRSGL2CALL:
@@ -1481,6 +1482,15 @@ static struct notifier_block rose_dev_notifier = {
1481 1482
1482static struct net_device **dev_rose; 1483static struct net_device **dev_rose;
1483 1484
1485static struct ax25_protocol rose_pid = {
1486 .pid = AX25_P_ROSE,
1487 .func = rose_route_frame
1488};
1489
1490static struct ax25_linkfail rose_linkfail_notifier = {
1491 .func = rose_link_failed
1492};
1493
1484static int __init rose_proto_init(void) 1494static int __init rose_proto_init(void)
1485{ 1495{
1486 int i; 1496 int i;
@@ -1530,8 +1540,8 @@ static int __init rose_proto_init(void)
1530 sock_register(&rose_family_ops); 1540 sock_register(&rose_family_ops);
1531 register_netdevice_notifier(&rose_dev_notifier); 1541 register_netdevice_notifier(&rose_dev_notifier);
1532 1542
1533 ax25_protocol_register(AX25_P_ROSE, rose_route_frame); 1543 ax25_register_pid(&rose_pid);
1534 ax25_linkfail_register(rose_link_failed); 1544 ax25_linkfail_register(&rose_linkfail_notifier);
1535 1545
1536#ifdef CONFIG_SYSCTL 1546#ifdef CONFIG_SYSCTL
1537 rose_register_sysctl(); 1547 rose_register_sysctl();
@@ -1579,7 +1589,7 @@ static void __exit rose_exit(void)
1579 rose_rt_free(); 1589 rose_rt_free();
1580 1590
1581 ax25_protocol_release(AX25_P_ROSE); 1591 ax25_protocol_release(AX25_P_ROSE);
1582 ax25_linkfail_release(rose_link_failed); 1592 ax25_linkfail_release(&rose_linkfail_notifier);
1583 1593
1584 if (ax25cmp(&rose_callsign, &null_ax25_address) != 0) 1594 if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
1585 ax25_listen_release(&rose_callsign, NULL); 1595 ax25_listen_release(&rose_callsign, NULL);
diff --git a/net/rose/rose_dev.c b/net/rose/rose_dev.c
index 7c279e2659ec..50824d345fa6 100644
--- a/net/rose/rose_dev.c
+++ b/net/rose/rose_dev.c
@@ -93,20 +93,34 @@ static int rose_rebuild_header(struct sk_buff *skb)
93static int rose_set_mac_address(struct net_device *dev, void *addr) 93static int rose_set_mac_address(struct net_device *dev, void *addr)
94{ 94{
95 struct sockaddr *sa = addr; 95 struct sockaddr *sa = addr;
96 int err;
96 97
97 rose_del_loopback_node((rose_address *)dev->dev_addr); 98 if (!memcpy(dev->dev_addr, sa->sa_data, dev->addr_len))
99 return 0;
98 100
99 memcpy(dev->dev_addr, sa->sa_data, dev->addr_len); 101 if (dev->flags & IFF_UP) {
102 err = rose_add_loopback_node((rose_address *)dev->dev_addr);
103 if (err)
104 return err;
105
106 rose_del_loopback_node((rose_address *)dev->dev_addr);
107 }
100 108
101 rose_add_loopback_node((rose_address *)dev->dev_addr); 109 memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
102 110
103 return 0; 111 return 0;
104} 112}
105 113
106static int rose_open(struct net_device *dev) 114static int rose_open(struct net_device *dev)
107{ 115{
116 int err;
117
118 err = rose_add_loopback_node((rose_address *)dev->dev_addr);
119 if (err)
120 return err;
121
108 netif_start_queue(dev); 122 netif_start_queue(dev);
109 rose_add_loopback_node((rose_address *)dev->dev_addr); 123
110 return 0; 124 return 0;
111} 125}
112 126
diff --git a/net/rose/rose_loopback.c b/net/rose/rose_loopback.c
index 103b4d38f88a..3e41bd93ab9f 100644
--- a/net/rose/rose_loopback.c
+++ b/net/rose/rose_loopback.c
@@ -79,7 +79,8 @@ static void rose_loopback_timer(unsigned long param)
79 79
80 skb->h.raw = skb->data; 80 skb->h.raw = skb->data;
81 81
82 if ((sk = rose_find_socket(lci_o, rose_loopback_neigh)) != NULL) { 82 sk = rose_find_socket(lci_o, &rose_loopback_neigh);
83 if (sk) {
83 if (rose_process_rx_frame(sk, skb) == 0) 84 if (rose_process_rx_frame(sk, skb) == 0)
84 kfree_skb(skb); 85 kfree_skb(skb);
85 continue; 86 continue;
@@ -87,7 +88,7 @@ static void rose_loopback_timer(unsigned long param)
87 88
88 if (frametype == ROSE_CALL_REQUEST) { 89 if (frametype == ROSE_CALL_REQUEST) {
89 if ((dev = rose_dev_get(dest)) != NULL) { 90 if ((dev = rose_dev_get(dest)) != NULL) {
90 if (rose_rx_call_request(skb, dev, rose_loopback_neigh, lci_o) == 0) 91 if (rose_rx_call_request(skb, dev, &rose_loopback_neigh, lci_o) == 0)
91 kfree_skb(skb); 92 kfree_skb(skb);
92 } else { 93 } else {
93 kfree_skb(skb); 94 kfree_skb(skb);
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index 7252344779a0..8028c0d425dc 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -46,13 +46,13 @@ static DEFINE_SPINLOCK(rose_neigh_list_lock);
46static struct rose_route *rose_route_list; 46static struct rose_route *rose_route_list;
47static DEFINE_SPINLOCK(rose_route_list_lock); 47static DEFINE_SPINLOCK(rose_route_list_lock);
48 48
49struct rose_neigh *rose_loopback_neigh; 49struct rose_neigh rose_loopback_neigh;
50 50
51/* 51/*
52 * Add a new route to a node, and in the process add the node and the 52 * Add a new route to a node, and in the process add the node and the
53 * neighbour if it is new. 53 * neighbour if it is new.
54 */ 54 */
55static int rose_add_node(struct rose_route_struct *rose_route, 55static int __must_check rose_add_node(struct rose_route_struct *rose_route,
56 struct net_device *dev) 56 struct net_device *dev)
57{ 57{
58 struct rose_node *rose_node, *rose_tmpn, *rose_tmpp; 58 struct rose_node *rose_node, *rose_tmpn, *rose_tmpp;
@@ -361,33 +361,30 @@ out:
361/* 361/*
362 * Add the loopback neighbour. 362 * Add the loopback neighbour.
363 */ 363 */
364int rose_add_loopback_neigh(void) 364void rose_add_loopback_neigh(void)
365{ 365{
366 if ((rose_loopback_neigh = kmalloc(sizeof(struct rose_neigh), GFP_ATOMIC)) == NULL) 366 struct rose_neigh *sn = &rose_loopback_neigh;
367 return -ENOMEM;
368 367
369 rose_loopback_neigh->callsign = null_ax25_address; 368 sn->callsign = null_ax25_address;
370 rose_loopback_neigh->digipeat = NULL; 369 sn->digipeat = NULL;
371 rose_loopback_neigh->ax25 = NULL; 370 sn->ax25 = NULL;
372 rose_loopback_neigh->dev = NULL; 371 sn->dev = NULL;
373 rose_loopback_neigh->count = 0; 372 sn->count = 0;
374 rose_loopback_neigh->use = 0; 373 sn->use = 0;
375 rose_loopback_neigh->dce_mode = 1; 374 sn->dce_mode = 1;
376 rose_loopback_neigh->loopback = 1; 375 sn->loopback = 1;
377 rose_loopback_neigh->number = rose_neigh_no++; 376 sn->number = rose_neigh_no++;
378 rose_loopback_neigh->restarted = 1; 377 sn->restarted = 1;
379 378
380 skb_queue_head_init(&rose_loopback_neigh->queue); 379 skb_queue_head_init(&sn->queue);
381 380
382 init_timer(&rose_loopback_neigh->ftimer); 381 init_timer(&sn->ftimer);
383 init_timer(&rose_loopback_neigh->t0timer); 382 init_timer(&sn->t0timer);
384 383
385 spin_lock_bh(&rose_neigh_list_lock); 384 spin_lock_bh(&rose_neigh_list_lock);
386 rose_loopback_neigh->next = rose_neigh_list; 385 sn->next = rose_neigh_list;
387 rose_neigh_list = rose_loopback_neigh; 386 rose_neigh_list = sn;
388 spin_unlock_bh(&rose_neigh_list_lock); 387 spin_unlock_bh(&rose_neigh_list_lock);
389
390 return 0;
391} 388}
392 389
393/* 390/*
@@ -421,13 +418,13 @@ int rose_add_loopback_node(rose_address *address)
421 rose_node->mask = 10; 418 rose_node->mask = 10;
422 rose_node->count = 1; 419 rose_node->count = 1;
423 rose_node->loopback = 1; 420 rose_node->loopback = 1;
424 rose_node->neighbour[0] = rose_loopback_neigh; 421 rose_node->neighbour[0] = &rose_loopback_neigh;
425 422
426 /* Insert at the head of list. Address is always mask=10 */ 423 /* Insert at the head of list. Address is always mask=10 */
427 rose_node->next = rose_node_list; 424 rose_node->next = rose_node_list;
428 rose_node_list = rose_node; 425 rose_node_list = rose_node;
429 426
430 rose_loopback_neigh->count++; 427 rose_loopback_neigh.count++;
431 428
432out: 429out:
433 spin_unlock_bh(&rose_node_list_lock); 430 spin_unlock_bh(&rose_node_list_lock);
@@ -458,7 +455,7 @@ void rose_del_loopback_node(rose_address *address)
458 455
459 rose_remove_node(rose_node); 456 rose_remove_node(rose_node);
460 457
461 rose_loopback_neigh->count--; 458 rose_loopback_neigh.count--;
462 459
463out: 460out:
464 spin_unlock_bh(&rose_node_list_lock); 461 spin_unlock_bh(&rose_node_list_lock);
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index d8d36dee5ab6..ef36be073a13 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -79,8 +79,8 @@
79#include <asm/uaccess.h> 79#include <asm/uaccess.h>
80 80
81/* Event handler for inet6 address addition/deletion events. */ 81/* Event handler for inet6 address addition/deletion events. */
82int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev, 82static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
83 void *ptr) 83 void *ptr)
84{ 84{
85 struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr; 85 struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
86 struct sctp_sockaddr_entry *addr; 86 struct sctp_sockaddr_entry *addr;
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 3a3db56729ce..225f39b5d595 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -601,8 +601,8 @@ static void sctp_v4_seq_dump_addr(struct seq_file *seq, union sctp_addr *addr)
601} 601}
602 602
603/* Event handler for inet address addition/deletion events. */ 603/* Event handler for inet address addition/deletion events. */
604int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev, 604static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
605 void *ptr) 605 void *ptr)
606{ 606{
607 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; 607 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
608 struct sctp_sockaddr_entry *addr; 608 struct sctp_sockaddr_entry *addr;
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 30927d3a597f..f0bbe36799cf 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -184,7 +184,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
184 struct sctp_sock *sp; 184 struct sctp_sock *sp;
185 sctp_supported_addrs_param_t sat; 185 sctp_supported_addrs_param_t sat;
186 __be16 types[2]; 186 __be16 types[2];
187 sctp_adaption_ind_param_t aiparam; 187 sctp_adaptation_ind_param_t aiparam;
188 188
189 /* RFC 2960 3.3.2 Initiation (INIT) (1) 189 /* RFC 2960 3.3.2 Initiation (INIT) (1)
190 * 190 *
@@ -249,9 +249,9 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
249 sctp_addto_chunk(retval, sizeof(ecap_param), &ecap_param); 249 sctp_addto_chunk(retval, sizeof(ecap_param), &ecap_param);
250 if (sctp_prsctp_enable) 250 if (sctp_prsctp_enable)
251 sctp_addto_chunk(retval, sizeof(prsctp_param), &prsctp_param); 251 sctp_addto_chunk(retval, sizeof(prsctp_param), &prsctp_param);
252 aiparam.param_hdr.type = SCTP_PARAM_ADAPTION_LAYER_IND; 252 aiparam.param_hdr.type = SCTP_PARAM_ADAPTATION_LAYER_IND;
253 aiparam.param_hdr.length = htons(sizeof(aiparam)); 253 aiparam.param_hdr.length = htons(sizeof(aiparam));
254 aiparam.adaption_ind = htonl(sp->adaption_ind); 254 aiparam.adaptation_ind = htonl(sp->adaptation_ind);
255 sctp_addto_chunk(retval, sizeof(aiparam), &aiparam); 255 sctp_addto_chunk(retval, sizeof(aiparam), &aiparam);
256nodata: 256nodata:
257 kfree(addrs.v); 257 kfree(addrs.v);
@@ -269,7 +269,7 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
269 sctp_cookie_param_t *cookie; 269 sctp_cookie_param_t *cookie;
270 int cookie_len; 270 int cookie_len;
271 size_t chunksize; 271 size_t chunksize;
272 sctp_adaption_ind_param_t aiparam; 272 sctp_adaptation_ind_param_t aiparam;
273 273
274 retval = NULL; 274 retval = NULL;
275 275
@@ -323,9 +323,9 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
323 if (asoc->peer.prsctp_capable) 323 if (asoc->peer.prsctp_capable)
324 sctp_addto_chunk(retval, sizeof(prsctp_param), &prsctp_param); 324 sctp_addto_chunk(retval, sizeof(prsctp_param), &prsctp_param);
325 325
326 aiparam.param_hdr.type = SCTP_PARAM_ADAPTION_LAYER_IND; 326 aiparam.param_hdr.type = SCTP_PARAM_ADAPTATION_LAYER_IND;
327 aiparam.param_hdr.length = htons(sizeof(aiparam)); 327 aiparam.param_hdr.length = htons(sizeof(aiparam));
328 aiparam.adaption_ind = htonl(sctp_sk(asoc->base.sk)->adaption_ind); 328 aiparam.adaptation_ind = htonl(sctp_sk(asoc->base.sk)->adaptation_ind);
329 sctp_addto_chunk(retval, sizeof(aiparam), &aiparam); 329 sctp_addto_chunk(retval, sizeof(aiparam), &aiparam);
330 330
331 /* We need to remove the const qualifier at this point. */ 331 /* We need to remove the const qualifier at this point. */
@@ -1300,8 +1300,8 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep,
1300 /* Remember PR-SCTP capability. */ 1300 /* Remember PR-SCTP capability. */
1301 cookie->c.prsctp_capable = asoc->peer.prsctp_capable; 1301 cookie->c.prsctp_capable = asoc->peer.prsctp_capable;
1302 1302
1303 /* Save adaption indication in the cookie. */ 1303 /* Save adaptation indication in the cookie. */
1304 cookie->c.adaption_ind = asoc->peer.adaption_ind; 1304 cookie->c.adaptation_ind = asoc->peer.adaptation_ind;
1305 1305
1306 /* Set an expiration time for the cookie. */ 1306 /* Set an expiration time for the cookie. */
1307 do_gettimeofday(&cookie->c.expiration); 1307 do_gettimeofday(&cookie->c.expiration);
@@ -1512,7 +1512,7 @@ no_hmac:
1512 retval->addip_serial = retval->c.initial_tsn; 1512 retval->addip_serial = retval->c.initial_tsn;
1513 retval->adv_peer_ack_point = retval->ctsn_ack_point; 1513 retval->adv_peer_ack_point = retval->ctsn_ack_point;
1514 retval->peer.prsctp_capable = retval->c.prsctp_capable; 1514 retval->peer.prsctp_capable = retval->c.prsctp_capable;
1515 retval->peer.adaption_ind = retval->c.adaption_ind; 1515 retval->peer.adaptation_ind = retval->c.adaptation_ind;
1516 1516
1517 /* The INIT stuff will be done by the side effects. */ 1517 /* The INIT stuff will be done by the side effects. */
1518 return retval; 1518 return retval;
@@ -1743,7 +1743,7 @@ static int sctp_verify_param(const struct sctp_association *asoc,
1743 case SCTP_PARAM_HEARTBEAT_INFO: 1743 case SCTP_PARAM_HEARTBEAT_INFO:
1744 case SCTP_PARAM_UNRECOGNIZED_PARAMETERS: 1744 case SCTP_PARAM_UNRECOGNIZED_PARAMETERS:
1745 case SCTP_PARAM_ECN_CAPABLE: 1745 case SCTP_PARAM_ECN_CAPABLE:
1746 case SCTP_PARAM_ADAPTION_LAYER_IND: 1746 case SCTP_PARAM_ADAPTATION_LAYER_IND:
1747 break; 1747 break;
1748 1748
1749 case SCTP_PARAM_HOST_NAME_ADDRESS: 1749 case SCTP_PARAM_HOST_NAME_ADDRESS:
@@ -2098,8 +2098,8 @@ static int sctp_process_param(struct sctp_association *asoc,
2098 asoc->peer.ecn_capable = 1; 2098 asoc->peer.ecn_capable = 1;
2099 break; 2099 break;
2100 2100
2101 case SCTP_PARAM_ADAPTION_LAYER_IND: 2101 case SCTP_PARAM_ADAPTATION_LAYER_IND:
2102 asoc->peer.adaption_ind = param.aind->adaption_ind; 2102 asoc->peer.adaptation_ind = param.aind->adaptation_ind;
2103 break; 2103 break;
2104 2104
2105 case SCTP_PARAM_FWD_TSN_SUPPORT: 2105 case SCTP_PARAM_FWD_TSN_SUPPORT:
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 27cc444aaf11..aa51d190bfb2 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -688,12 +688,12 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
688 goto nomem_ev; 688 goto nomem_ev;
689 689
690 /* Sockets API Draft Section 5.3.1.6 690 /* Sockets API Draft Section 5.3.1.6
691 * When a peer sends a Adaption Layer Indication parameter , SCTP 691 * When a peer sends a Adaptation Layer Indication parameter , SCTP
692 * delivers this notification to inform the application that of the 692 * delivers this notification to inform the application that of the
693 * peers requested adaption layer. 693 * peers requested adaptation layer.
694 */ 694 */
695 if (new_asoc->peer.adaption_ind) { 695 if (new_asoc->peer.adaptation_ind) {
696 ai_ev = sctp_ulpevent_make_adaption_indication(new_asoc, 696 ai_ev = sctp_ulpevent_make_adaptation_indication(new_asoc,
697 GFP_ATOMIC); 697 GFP_ATOMIC);
698 if (!ai_ev) 698 if (!ai_ev)
699 goto nomem_aiev; 699 goto nomem_aiev;
@@ -820,12 +820,12 @@ sctp_disposition_t sctp_sf_do_5_1E_ca(const struct sctp_endpoint *ep,
820 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); 820 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
821 821
822 /* Sockets API Draft Section 5.3.1.6 822 /* Sockets API Draft Section 5.3.1.6
823 * When a peer sends a Adaption Layer Indication parameter , SCTP 823 * When a peer sends a Adaptation Layer Indication parameter , SCTP
824 * delivers this notification to inform the application that of the 824 * delivers this notification to inform the application that of the
825 * peers requested adaption layer. 825 * peers requested adaptation layer.
826 */ 826 */
827 if (asoc->peer.adaption_ind) { 827 if (asoc->peer.adaptation_ind) {
828 ev = sctp_ulpevent_make_adaption_indication(asoc, GFP_ATOMIC); 828 ev = sctp_ulpevent_make_adaptation_indication(asoc, GFP_ATOMIC);
829 if (!ev) 829 if (!ev)
830 goto nomem; 830 goto nomem;
831 831
@@ -1698,12 +1698,12 @@ static sctp_disposition_t sctp_sf_do_dupcook_b(const struct sctp_endpoint *ep,
1698 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); 1698 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
1699 1699
1700 /* Sockets API Draft Section 5.3.1.6 1700 /* Sockets API Draft Section 5.3.1.6
1701 * When a peer sends a Adaption Layer Indication parameter , SCTP 1701 * When a peer sends a Adaptation Layer Indication parameter , SCTP
1702 * delivers this notification to inform the application that of the 1702 * delivers this notification to inform the application that of the
1703 * peers requested adaption layer. 1703 * peers requested adaptation layer.
1704 */ 1704 */
1705 if (asoc->peer.adaption_ind) { 1705 if (asoc->peer.adaptation_ind) {
1706 ev = sctp_ulpevent_make_adaption_indication(asoc, GFP_ATOMIC); 1706 ev = sctp_ulpevent_make_adaptation_indication(asoc, GFP_ATOMIC);
1707 if (!ev) 1707 if (!ev)
1708 goto nomem_ev; 1708 goto nomem_ev;
1709 1709
@@ -1791,12 +1791,12 @@ static sctp_disposition_t sctp_sf_do_dupcook_d(const struct sctp_endpoint *ep,
1791 goto nomem; 1791 goto nomem;
1792 1792
1793 /* Sockets API Draft Section 5.3.1.6 1793 /* Sockets API Draft Section 5.3.1.6
1794 * When a peer sends a Adaption Layer Indication parameter, 1794 * When a peer sends a Adaptation Layer Indication parameter,
1795 * SCTP delivers this notification to inform the application 1795 * SCTP delivers this notification to inform the application
1796 * that of the peers requested adaption layer. 1796 * that of the peers requested adaptation layer.
1797 */ 1797 */
1798 if (asoc->peer.adaption_ind) { 1798 if (asoc->peer.adaptation_ind) {
1799 ai_ev = sctp_ulpevent_make_adaption_indication(asoc, 1799 ai_ev = sctp_ulpevent_make_adaptation_indication(asoc,
1800 GFP_ATOMIC); 1800 GFP_ATOMIC);
1801 if (!ai_ev) 1801 if (!ai_ev)
1802 goto nomem; 1802 goto nomem;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index bdd8bd428b64..388d0fb1a377 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -2731,17 +2731,17 @@ static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optva
2731 return err; 2731 return err;
2732} 2732}
2733 2733
2734static int sctp_setsockopt_adaption_layer(struct sock *sk, char __user *optval, 2734static int sctp_setsockopt_adaptation_layer(struct sock *sk, char __user *optval,
2735 int optlen) 2735 int optlen)
2736{ 2736{
2737 struct sctp_setadaption adaption; 2737 struct sctp_setadaptation adaptation;
2738 2738
2739 if (optlen != sizeof(struct sctp_setadaption)) 2739 if (optlen != sizeof(struct sctp_setadaptation))
2740 return -EINVAL; 2740 return -EINVAL;
2741 if (copy_from_user(&adaption, optval, optlen)) 2741 if (copy_from_user(&adaptation, optval, optlen))
2742 return -EFAULT; 2742 return -EFAULT;
2743 2743
2744 sctp_sk(sk)->adaption_ind = adaption.ssb_adaption_ind; 2744 sctp_sk(sk)->adaptation_ind = adaptation.ssb_adaptation_ind;
2745 2745
2746 return 0; 2746 return 0;
2747} 2747}
@@ -2894,8 +2894,8 @@ SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname,
2894 case SCTP_MAXSEG: 2894 case SCTP_MAXSEG:
2895 retval = sctp_setsockopt_maxseg(sk, optval, optlen); 2895 retval = sctp_setsockopt_maxseg(sk, optval, optlen);
2896 break; 2896 break;
2897 case SCTP_ADAPTION_LAYER: 2897 case SCTP_ADAPTATION_LAYER:
2898 retval = sctp_setsockopt_adaption_layer(sk, optval, optlen); 2898 retval = sctp_setsockopt_adaptation_layer(sk, optval, optlen);
2899 break; 2899 break;
2900 case SCTP_CONTEXT: 2900 case SCTP_CONTEXT:
2901 retval = sctp_setsockopt_context(sk, optval, optlen); 2901 retval = sctp_setsockopt_context(sk, optval, optlen);
@@ -3123,7 +3123,7 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
3123 /* User specified fragmentation limit. */ 3123 /* User specified fragmentation limit. */
3124 sp->user_frag = 0; 3124 sp->user_frag = 0;
3125 3125
3126 sp->adaption_ind = 0; 3126 sp->adaptation_ind = 0;
3127 3127
3128 sp->pf = sctp_get_pf_specific(sk->sk_family); 3128 sp->pf = sctp_get_pf_specific(sk->sk_family);
3129 3129
@@ -4210,21 +4210,21 @@ static int sctp_getsockopt_primary_addr(struct sock *sk, int len,
4210} 4210}
4211 4211
4212/* 4212/*
4213 * 7.1.11 Set Adaption Layer Indicator (SCTP_ADAPTION_LAYER) 4213 * 7.1.11 Set Adaptation Layer Indicator (SCTP_ADAPTATION_LAYER)
4214 * 4214 *
4215 * Requests that the local endpoint set the specified Adaption Layer 4215 * Requests that the local endpoint set the specified Adaptation Layer
4216 * Indication parameter for all future INIT and INIT-ACK exchanges. 4216 * Indication parameter for all future INIT and INIT-ACK exchanges.
4217 */ 4217 */
4218static int sctp_getsockopt_adaption_layer(struct sock *sk, int len, 4218static int sctp_getsockopt_adaptation_layer(struct sock *sk, int len,
4219 char __user *optval, int __user *optlen) 4219 char __user *optval, int __user *optlen)
4220{ 4220{
4221 struct sctp_setadaption adaption; 4221 struct sctp_setadaptation adaptation;
4222 4222
4223 if (len != sizeof(struct sctp_setadaption)) 4223 if (len != sizeof(struct sctp_setadaptation))
4224 return -EINVAL; 4224 return -EINVAL;
4225 4225
4226 adaption.ssb_adaption_ind = sctp_sk(sk)->adaption_ind; 4226 adaptation.ssb_adaptation_ind = sctp_sk(sk)->adaptation_ind;
4227 if (copy_to_user(optval, &adaption, len)) 4227 if (copy_to_user(optval, &adaptation, len))
4228 return -EFAULT; 4228 return -EFAULT;
4229 4229
4230 return 0; 4230 return 0;
@@ -4635,8 +4635,8 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
4635 retval = sctp_getsockopt_peer_addr_info(sk, len, optval, 4635 retval = sctp_getsockopt_peer_addr_info(sk, len, optval,
4636 optlen); 4636 optlen);
4637 break; 4637 break;
4638 case SCTP_ADAPTION_LAYER: 4638 case SCTP_ADAPTATION_LAYER:
4639 retval = sctp_getsockopt_adaption_layer(sk, len, optval, 4639 retval = sctp_getsockopt_adaptation_layer(sk, len, optval,
4640 optlen); 4640 optlen);
4641 break; 4641 break;
4642 case SCTP_CONTEXT: 4642 case SCTP_CONTEXT:
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index 93ac63b055ba..445e07a7ac4b 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -609,31 +609,31 @@ fail:
609 return NULL; 609 return NULL;
610} 610}
611 611
612/* Create and initialize a SCTP_ADAPTION_INDICATION notification. 612/* Create and initialize a SCTP_ADAPTATION_INDICATION notification.
613 * 613 *
614 * Socket Extensions for SCTP 614 * Socket Extensions for SCTP
615 * 5.3.1.6 SCTP_ADAPTION_INDICATION 615 * 5.3.1.6 SCTP_ADAPTATION_INDICATION
616 */ 616 */
617struct sctp_ulpevent *sctp_ulpevent_make_adaption_indication( 617struct sctp_ulpevent *sctp_ulpevent_make_adaptation_indication(
618 const struct sctp_association *asoc, gfp_t gfp) 618 const struct sctp_association *asoc, gfp_t gfp)
619{ 619{
620 struct sctp_ulpevent *event; 620 struct sctp_ulpevent *event;
621 struct sctp_adaption_event *sai; 621 struct sctp_adaptation_event *sai;
622 struct sk_buff *skb; 622 struct sk_buff *skb;
623 623
624 event = sctp_ulpevent_new(sizeof(struct sctp_adaption_event), 624 event = sctp_ulpevent_new(sizeof(struct sctp_adaptation_event),
625 MSG_NOTIFICATION, gfp); 625 MSG_NOTIFICATION, gfp);
626 if (!event) 626 if (!event)
627 goto fail; 627 goto fail;
628 628
629 skb = sctp_event2skb(event); 629 skb = sctp_event2skb(event);
630 sai = (struct sctp_adaption_event *) 630 sai = (struct sctp_adaptation_event *)
631 skb_put(skb, sizeof(struct sctp_adaption_event)); 631 skb_put(skb, sizeof(struct sctp_adaptation_event));
632 632
633 sai->sai_type = SCTP_ADAPTION_INDICATION; 633 sai->sai_type = SCTP_ADAPTATION_INDICATION;
634 sai->sai_flags = 0; 634 sai->sai_flags = 0;
635 sai->sai_length = sizeof(struct sctp_adaption_event); 635 sai->sai_length = sizeof(struct sctp_adaptation_event);
636 sai->sai_adaption_ind = asoc->peer.adaption_ind; 636 sai->sai_adaptation_ind = asoc->peer.adaptation_ind;
637 sctp_ulpevent_set_owner(event, asoc); 637 sctp_ulpevent_set_owner(event, asoc);
638 sai->sai_assoc_id = sctp_assoc2id(asoc); 638 sai->sai_assoc_id = sctp_assoc2id(asoc);
639 639
diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c
index 41465072d0b5..8ef3f1c19435 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_mech.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_mech.c
@@ -228,7 +228,7 @@ static int __init init_spkm3_module(void)
228 status = gss_mech_register(&gss_spkm3_mech); 228 status = gss_mech_register(&gss_spkm3_mech);
229 if (status) 229 if (status)
230 printk("Failed to register spkm3 gss mechanism!\n"); 230 printk("Failed to register spkm3 gss mechanism!\n");
231 return 0; 231 return status;
232} 232}
233 233
234static void __exit cleanup_spkm3_module(void) 234static void __exit cleanup_spkm3_module(void)