diff options
author | Ralf Baechle <ralf@linux-mips.org> | 2006-12-14 18:51:23 -0500 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2006-12-18 00:59:11 -0500 |
commit | a4282717c102aef2bfab1d947c392de4d8abc0ec (patch) | |
tree | ef178b6eea8cd9a8790ae58a6c19ff2aa877bcc1 /net/ax25 | |
parent | 58bc57471514be9206ebcda90b1076f6be41d1c7 (diff) |
[AX.25]: Fix unchecked ax25_linkfail_register uses
ax25_linkfail_register uses kmalloc and the callers were ignoring the
error value. Rewrite to let the caller deal with the allocation. This
allows the use of static allocation of kmalloc use entirely.
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ax25')
-rw-r--r-- | net/ax25/ax25_iface.c | 55 |
1 files changed, 9 insertions, 46 deletions
diff --git a/net/ax25/ax25_iface.c b/net/ax25/ax25_iface.c index 51e293420b7f..aff3e652c2d1 100644 --- a/net/ax25/ax25_iface.c +++ b/net/ax25/ax25_iface.c | |||
@@ -32,10 +32,7 @@ | |||
32 | static struct ax25_protocol *protocol_list; | 32 | static struct ax25_protocol *protocol_list; |
33 | static DEFINE_RWLOCK(protocol_list_lock); | 33 | static DEFINE_RWLOCK(protocol_list_lock); |
34 | 34 | ||
35 | static struct linkfail_struct { | 35 | static HLIST_HEAD(ax25_linkfail_list); |
36 | struct linkfail_struct *next; | ||
37 | void (*func)(ax25_cb *, int); | ||
38 | } *linkfail_list = NULL; | ||
39 | static DEFINE_SPINLOCK(linkfail_lock); | 36 | static DEFINE_SPINLOCK(linkfail_lock); |
40 | 37 | ||
41 | static struct listen_struct { | 38 | static struct listen_struct { |
@@ -93,54 +90,19 @@ void ax25_protocol_release(unsigned int pid) | |||
93 | 90 | ||
94 | EXPORT_SYMBOL(ax25_protocol_release); | 91 | EXPORT_SYMBOL(ax25_protocol_release); |
95 | 92 | ||
96 | int ax25_linkfail_register(void (*func)(ax25_cb *, int)) | 93 | void ax25_linkfail_register(struct ax25_linkfail *lf) |
97 | { | 94 | { |
98 | struct linkfail_struct *linkfail; | ||
99 | |||
100 | if ((linkfail = kmalloc(sizeof(*linkfail), GFP_ATOMIC)) == NULL) | ||
101 | return 0; | ||
102 | |||
103 | linkfail->func = func; | ||
104 | |||
105 | spin_lock_bh(&linkfail_lock); | 95 | spin_lock_bh(&linkfail_lock); |
106 | linkfail->next = linkfail_list; | 96 | hlist_add_head(&lf->lf_node, &ax25_linkfail_list); |
107 | linkfail_list = linkfail; | ||
108 | spin_unlock_bh(&linkfail_lock); | 97 | spin_unlock_bh(&linkfail_lock); |
109 | |||
110 | return 1; | ||
111 | } | 98 | } |
112 | 99 | ||
113 | EXPORT_SYMBOL(ax25_linkfail_register); | 100 | EXPORT_SYMBOL(ax25_linkfail_register); |
114 | 101 | ||
115 | void ax25_linkfail_release(void (*func)(ax25_cb *, int)) | 102 | void ax25_linkfail_release(struct ax25_linkfail *lf) |
116 | { | 103 | { |
117 | struct linkfail_struct *s, *linkfail; | ||
118 | |||
119 | spin_lock_bh(&linkfail_lock); | 104 | spin_lock_bh(&linkfail_lock); |
120 | linkfail = linkfail_list; | 105 | hlist_del_init(&lf->lf_node); |
121 | if (linkfail == NULL) { | ||
122 | spin_unlock_bh(&linkfail_lock); | ||
123 | return; | ||
124 | } | ||
125 | |||
126 | if (linkfail->func == func) { | ||
127 | linkfail_list = linkfail->next; | ||
128 | spin_unlock_bh(&linkfail_lock); | ||
129 | kfree(linkfail); | ||
130 | return; | ||
131 | } | ||
132 | |||
133 | while (linkfail != NULL && linkfail->next != NULL) { | ||
134 | if (linkfail->next->func == func) { | ||
135 | s = linkfail->next; | ||
136 | linkfail->next = linkfail->next->next; | ||
137 | spin_unlock_bh(&linkfail_lock); | ||
138 | kfree(s); | ||
139 | return; | ||
140 | } | ||
141 | |||
142 | linkfail = linkfail->next; | ||
143 | } | ||
144 | spin_unlock_bh(&linkfail_lock); | 106 | spin_unlock_bh(&linkfail_lock); |
145 | } | 107 | } |
146 | 108 | ||
@@ -237,11 +199,12 @@ int ax25_listen_mine(ax25_address *callsign, struct net_device *dev) | |||
237 | 199 | ||
238 | void ax25_link_failed(ax25_cb *ax25, int reason) | 200 | void ax25_link_failed(ax25_cb *ax25, int reason) |
239 | { | 201 | { |
240 | struct linkfail_struct *linkfail; | 202 | struct ax25_linkfail *lf; |
203 | struct hlist_node *node; | ||
241 | 204 | ||
242 | spin_lock_bh(&linkfail_lock); | 205 | spin_lock_bh(&linkfail_lock); |
243 | for (linkfail = linkfail_list; linkfail != NULL; linkfail = linkfail->next) | 206 | hlist_for_each_entry(lf, node, &ax25_linkfail_list, lf_node) |
244 | (linkfail->func)(ax25, reason); | 207 | lf->func(ax25, reason); |
245 | spin_unlock_bh(&linkfail_lock); | 208 | spin_unlock_bh(&linkfail_lock); |
246 | } | 209 | } |
247 | 210 | ||