diff options
-rw-r--r-- | include/net/sock.h | 91 | ||||
-rw-r--r-- | net/core/sock.c | 93 |
2 files changed, 97 insertions, 87 deletions
diff --git a/include/net/sock.h b/include/net/sock.h index 2aa73c0ec6c2..af2b0544586e 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -938,28 +938,7 @@ static inline void sock_put(struct sock *sk) | |||
938 | sk_free(sk); | 938 | sk_free(sk); |
939 | } | 939 | } |
940 | 940 | ||
941 | static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb) | 941 | extern int sk_receive_skb(struct sock *sk, struct sk_buff *skb); |
942 | { | ||
943 | int rc = NET_RX_SUCCESS; | ||
944 | |||
945 | if (sk_filter(sk, skb, 0)) | ||
946 | goto discard_and_relse; | ||
947 | |||
948 | skb->dev = NULL; | ||
949 | |||
950 | bh_lock_sock(sk); | ||
951 | if (!sock_owned_by_user(sk)) | ||
952 | rc = sk->sk_backlog_rcv(sk, skb); | ||
953 | else | ||
954 | sk_add_backlog(sk, skb); | ||
955 | bh_unlock_sock(sk); | ||
956 | out: | ||
957 | sock_put(sk); | ||
958 | return rc; | ||
959 | discard_and_relse: | ||
960 | kfree_skb(skb); | ||
961 | goto out; | ||
962 | } | ||
963 | 942 | ||
964 | /* Detach socket from process context. | 943 | /* Detach socket from process context. |
965 | * Announce socket dead, detach it from wait queue and inode. | 944 | * Announce socket dead, detach it from wait queue and inode. |
@@ -1044,33 +1023,9 @@ sk_dst_reset(struct sock *sk) | |||
1044 | write_unlock(&sk->sk_dst_lock); | 1023 | write_unlock(&sk->sk_dst_lock); |
1045 | } | 1024 | } |
1046 | 1025 | ||
1047 | static inline struct dst_entry * | 1026 | extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie); |
1048 | __sk_dst_check(struct sock *sk, u32 cookie) | ||
1049 | { | ||
1050 | struct dst_entry *dst = sk->sk_dst_cache; | ||
1051 | |||
1052 | if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { | ||
1053 | sk->sk_dst_cache = NULL; | ||
1054 | dst_release(dst); | ||
1055 | return NULL; | ||
1056 | } | ||
1057 | |||
1058 | return dst; | ||
1059 | } | ||
1060 | |||
1061 | static inline struct dst_entry * | ||
1062 | sk_dst_check(struct sock *sk, u32 cookie) | ||
1063 | { | ||
1064 | struct dst_entry *dst = sk_dst_get(sk); | ||
1065 | 1027 | ||
1066 | if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { | 1028 | extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie); |
1067 | sk_dst_reset(sk); | ||
1068 | dst_release(dst); | ||
1069 | return NULL; | ||
1070 | } | ||
1071 | |||
1072 | return dst; | ||
1073 | } | ||
1074 | 1029 | ||
1075 | static inline void sk_setup_caps(struct sock *sk, struct dst_entry *dst) | 1030 | static inline void sk_setup_caps(struct sock *sk, struct dst_entry *dst) |
1076 | { | 1031 | { |
@@ -1140,45 +1095,7 @@ extern void sk_reset_timer(struct sock *sk, struct timer_list* timer, | |||
1140 | 1095 | ||
1141 | extern void sk_stop_timer(struct sock *sk, struct timer_list* timer); | 1096 | extern void sk_stop_timer(struct sock *sk, struct timer_list* timer); |
1142 | 1097 | ||
1143 | static inline int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | 1098 | extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); |
1144 | { | ||
1145 | int err = 0; | ||
1146 | int skb_len; | ||
1147 | |||
1148 | /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces | ||
1149 | number of warnings when compiling with -W --ANK | ||
1150 | */ | ||
1151 | if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= | ||
1152 | (unsigned)sk->sk_rcvbuf) { | ||
1153 | err = -ENOMEM; | ||
1154 | goto out; | ||
1155 | } | ||
1156 | |||
1157 | /* It would be deadlock, if sock_queue_rcv_skb is used | ||
1158 | with socket lock! We assume that users of this | ||
1159 | function are lock free. | ||
1160 | */ | ||
1161 | err = sk_filter(sk, skb, 1); | ||
1162 | if (err) | ||
1163 | goto out; | ||
1164 | |||
1165 | skb->dev = NULL; | ||
1166 | skb_set_owner_r(skb, sk); | ||
1167 | |||
1168 | /* Cache the SKB length before we tack it onto the receive | ||
1169 | * queue. Once it is added it no longer belongs to us and | ||
1170 | * may be freed by other threads of control pulling packets | ||
1171 | * from the queue. | ||
1172 | */ | ||
1173 | skb_len = skb->len; | ||
1174 | |||
1175 | skb_queue_tail(&sk->sk_receive_queue, skb); | ||
1176 | |||
1177 | if (!sock_flag(sk, SOCK_DEAD)) | ||
1178 | sk->sk_data_ready(sk, skb_len); | ||
1179 | out: | ||
1180 | return err; | ||
1181 | } | ||
1182 | 1099 | ||
1183 | static inline int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) | 1100 | static inline int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) |
1184 | { | 1101 | { |
diff --git a/net/core/sock.c b/net/core/sock.c index e110b9004147..a96ea7dd0fc1 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -187,6 +187,99 @@ static void sock_disable_timestamp(struct sock *sk) | |||
187 | } | 187 | } |
188 | 188 | ||
189 | 189 | ||
190 | int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | ||
191 | { | ||
192 | int err = 0; | ||
193 | int skb_len; | ||
194 | |||
195 | /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces | ||
196 | number of warnings when compiling with -W --ANK | ||
197 | */ | ||
198 | if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= | ||
199 | (unsigned)sk->sk_rcvbuf) { | ||
200 | err = -ENOMEM; | ||
201 | goto out; | ||
202 | } | ||
203 | |||
204 | /* It would be deadlock, if sock_queue_rcv_skb is used | ||
205 | with socket lock! We assume that users of this | ||
206 | function are lock free. | ||
207 | */ | ||
208 | err = sk_filter(sk, skb, 1); | ||
209 | if (err) | ||
210 | goto out; | ||
211 | |||
212 | skb->dev = NULL; | ||
213 | skb_set_owner_r(skb, sk); | ||
214 | |||
215 | /* Cache the SKB length before we tack it onto the receive | ||
216 | * queue. Once it is added it no longer belongs to us and | ||
217 | * may be freed by other threads of control pulling packets | ||
218 | * from the queue. | ||
219 | */ | ||
220 | skb_len = skb->len; | ||
221 | |||
222 | skb_queue_tail(&sk->sk_receive_queue, skb); | ||
223 | |||
224 | if (!sock_flag(sk, SOCK_DEAD)) | ||
225 | sk->sk_data_ready(sk, skb_len); | ||
226 | out: | ||
227 | return err; | ||
228 | } | ||
229 | EXPORT_SYMBOL(sock_queue_rcv_skb); | ||
230 | |||
231 | int sk_receive_skb(struct sock *sk, struct sk_buff *skb) | ||
232 | { | ||
233 | int rc = NET_RX_SUCCESS; | ||
234 | |||
235 | if (sk_filter(sk, skb, 0)) | ||
236 | goto discard_and_relse; | ||
237 | |||
238 | skb->dev = NULL; | ||
239 | |||
240 | bh_lock_sock(sk); | ||
241 | if (!sock_owned_by_user(sk)) | ||
242 | rc = sk->sk_backlog_rcv(sk, skb); | ||
243 | else | ||
244 | sk_add_backlog(sk, skb); | ||
245 | bh_unlock_sock(sk); | ||
246 | out: | ||
247 | sock_put(sk); | ||
248 | return rc; | ||
249 | discard_and_relse: | ||
250 | kfree_skb(skb); | ||
251 | goto out; | ||
252 | } | ||
253 | EXPORT_SYMBOL(sk_receive_skb); | ||
254 | |||
255 | struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) | ||
256 | { | ||
257 | struct dst_entry *dst = sk->sk_dst_cache; | ||
258 | |||
259 | if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { | ||
260 | sk->sk_dst_cache = NULL; | ||
261 | dst_release(dst); | ||
262 | return NULL; | ||
263 | } | ||
264 | |||
265 | return dst; | ||
266 | } | ||
267 | EXPORT_SYMBOL(__sk_dst_check); | ||
268 | |||
269 | struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie) | ||
270 | { | ||
271 | struct dst_entry *dst = sk_dst_get(sk); | ||
272 | |||
273 | if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { | ||
274 | sk_dst_reset(sk); | ||
275 | dst_release(dst); | ||
276 | return NULL; | ||
277 | } | ||
278 | |||
279 | return dst; | ||
280 | } | ||
281 | EXPORT_SYMBOL(sk_dst_check); | ||
282 | |||
190 | /* | 283 | /* |
191 | * This is meant for all protocols to use and covers goings on | 284 | * This is meant for all protocols to use and covers goings on |
192 | * at the socket level. Everything here is generic. | 285 | * at the socket level. Everything here is generic. |