diff options
Diffstat (limited to 'net/core/sock.c')
-rw-r--r-- | net/core/sock.c | 93 |
1 files changed, 93 insertions, 0 deletions
diff --git a/net/core/sock.c b/net/core/sock.c index e110b900414..a96ea7dd0fc 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -187,6 +187,99 @@ static void sock_disable_timestamp(struct sock *sk) | |||
187 | } | 187 | } |
188 | 188 | ||
189 | 189 | ||
190 | int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | ||
191 | { | ||
192 | int err = 0; | ||
193 | int skb_len; | ||
194 | |||
195 | /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces | ||
196 | number of warnings when compiling with -W --ANK | ||
197 | */ | ||
198 | if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= | ||
199 | (unsigned)sk->sk_rcvbuf) { | ||
200 | err = -ENOMEM; | ||
201 | goto out; | ||
202 | } | ||
203 | |||
204 | /* It would be deadlock, if sock_queue_rcv_skb is used | ||
205 | with socket lock! We assume that users of this | ||
206 | function are lock free. | ||
207 | */ | ||
208 | err = sk_filter(sk, skb, 1); | ||
209 | if (err) | ||
210 | goto out; | ||
211 | |||
212 | skb->dev = NULL; | ||
213 | skb_set_owner_r(skb, sk); | ||
214 | |||
215 | /* Cache the SKB length before we tack it onto the receive | ||
216 | * queue. Once it is added it no longer belongs to us and | ||
217 | * may be freed by other threads of control pulling packets | ||
218 | * from the queue. | ||
219 | */ | ||
220 | skb_len = skb->len; | ||
221 | |||
222 | skb_queue_tail(&sk->sk_receive_queue, skb); | ||
223 | |||
224 | if (!sock_flag(sk, SOCK_DEAD)) | ||
225 | sk->sk_data_ready(sk, skb_len); | ||
226 | out: | ||
227 | return err; | ||
228 | } | ||
229 | EXPORT_SYMBOL(sock_queue_rcv_skb); | ||
230 | |||
231 | int sk_receive_skb(struct sock *sk, struct sk_buff *skb) | ||
232 | { | ||
233 | int rc = NET_RX_SUCCESS; | ||
234 | |||
235 | if (sk_filter(sk, skb, 0)) | ||
236 | goto discard_and_relse; | ||
237 | |||
238 | skb->dev = NULL; | ||
239 | |||
240 | bh_lock_sock(sk); | ||
241 | if (!sock_owned_by_user(sk)) | ||
242 | rc = sk->sk_backlog_rcv(sk, skb); | ||
243 | else | ||
244 | sk_add_backlog(sk, skb); | ||
245 | bh_unlock_sock(sk); | ||
246 | out: | ||
247 | sock_put(sk); | ||
248 | return rc; | ||
249 | discard_and_relse: | ||
250 | kfree_skb(skb); | ||
251 | goto out; | ||
252 | } | ||
253 | EXPORT_SYMBOL(sk_receive_skb); | ||
254 | |||
255 | struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) | ||
256 | { | ||
257 | struct dst_entry *dst = sk->sk_dst_cache; | ||
258 | |||
259 | if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { | ||
260 | sk->sk_dst_cache = NULL; | ||
261 | dst_release(dst); | ||
262 | return NULL; | ||
263 | } | ||
264 | |||
265 | return dst; | ||
266 | } | ||
267 | EXPORT_SYMBOL(__sk_dst_check); | ||
268 | |||
269 | struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie) | ||
270 | { | ||
271 | struct dst_entry *dst = sk_dst_get(sk); | ||
272 | |||
273 | if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { | ||
274 | sk_dst_reset(sk); | ||
275 | dst_release(dst); | ||
276 | return NULL; | ||
277 | } | ||
278 | |||
279 | return dst; | ||
280 | } | ||
281 | EXPORT_SYMBOL(sk_dst_check); | ||
282 | |||
190 | /* | 283 | /* |
191 | * This is meant for all protocols to use and covers goings on | 284 | * This is meant for all protocols to use and covers goings on |
192 | * at the socket level. Everything here is generic. | 285 | * at the socket level. Everything here is generic. |