diff options
Diffstat (limited to 'net/core/sock.c')
| -rw-r--r-- | net/core/sock.c | 38 |
1 files changed, 23 insertions, 15 deletions
diff --git a/net/core/sock.c b/net/core/sock.c index e1f6f225f012..c5812bbc2cc9 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
| @@ -340,8 +340,12 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested) | |||
| 340 | rc = sk_backlog_rcv(sk, skb); | 340 | rc = sk_backlog_rcv(sk, skb); |
| 341 | 341 | ||
| 342 | mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); | 342 | mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); |
| 343 | } else | 343 | } else if (sk_add_backlog(sk, skb)) { |
| 344 | sk_add_backlog(sk, skb); | 344 | bh_unlock_sock(sk); |
| 345 | atomic_inc(&sk->sk_drops); | ||
| 346 | goto discard_and_relse; | ||
| 347 | } | ||
| 348 | |||
| 345 | bh_unlock_sock(sk); | 349 | bh_unlock_sock(sk); |
| 346 | out: | 350 | out: |
| 347 | sock_put(sk); | 351 | sock_put(sk); |
| @@ -741,7 +745,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, | |||
| 741 | struct timeval tm; | 745 | struct timeval tm; |
| 742 | } v; | 746 | } v; |
| 743 | 747 | ||
| 744 | unsigned int lv = sizeof(int); | 748 | int lv = sizeof(int); |
| 745 | int len; | 749 | int len; |
| 746 | 750 | ||
| 747 | if (get_user(len, optlen)) | 751 | if (get_user(len, optlen)) |
| @@ -1073,7 +1077,8 @@ static void __sk_free(struct sock *sk) | |||
| 1073 | if (sk->sk_destruct) | 1077 | if (sk->sk_destruct) |
| 1074 | sk->sk_destruct(sk); | 1078 | sk->sk_destruct(sk); |
| 1075 | 1079 | ||
| 1076 | filter = rcu_dereference(sk->sk_filter); | 1080 | filter = rcu_dereference_check(sk->sk_filter, |
| 1081 | atomic_read(&sk->sk_wmem_alloc) == 0); | ||
| 1077 | if (filter) { | 1082 | if (filter) { |
| 1078 | sk_filter_uncharge(sk, filter); | 1083 | sk_filter_uncharge(sk, filter); |
| 1079 | rcu_assign_pointer(sk->sk_filter, NULL); | 1084 | rcu_assign_pointer(sk->sk_filter, NULL); |
| @@ -1138,6 +1143,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority) | |||
| 1138 | sock_lock_init(newsk); | 1143 | sock_lock_init(newsk); |
| 1139 | bh_lock_sock(newsk); | 1144 | bh_lock_sock(newsk); |
| 1140 | newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; | 1145 | newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; |
| 1146 | newsk->sk_backlog.len = 0; | ||
| 1141 | 1147 | ||
| 1142 | atomic_set(&newsk->sk_rmem_alloc, 0); | 1148 | atomic_set(&newsk->sk_rmem_alloc, 0); |
| 1143 | /* | 1149 | /* |
| @@ -1541,6 +1547,12 @@ static void __release_sock(struct sock *sk) | |||
| 1541 | 1547 | ||
| 1542 | bh_lock_sock(sk); | 1548 | bh_lock_sock(sk); |
| 1543 | } while ((skb = sk->sk_backlog.head) != NULL); | 1549 | } while ((skb = sk->sk_backlog.head) != NULL); |
| 1550 | |||
| 1551 | /* | ||
| 1552 | * Doing the zeroing here guarantee we can not loop forever | ||
| 1553 | * while a wild producer attempts to flood us. | ||
| 1554 | */ | ||
| 1555 | sk->sk_backlog.len = 0; | ||
| 1544 | } | 1556 | } |
| 1545 | 1557 | ||
| 1546 | /** | 1558 | /** |
| @@ -1873,6 +1885,7 @@ void sock_init_data(struct socket *sock, struct sock *sk) | |||
| 1873 | sk->sk_allocation = GFP_KERNEL; | 1885 | sk->sk_allocation = GFP_KERNEL; |
| 1874 | sk->sk_rcvbuf = sysctl_rmem_default; | 1886 | sk->sk_rcvbuf = sysctl_rmem_default; |
| 1875 | sk->sk_sndbuf = sysctl_wmem_default; | 1887 | sk->sk_sndbuf = sysctl_wmem_default; |
| 1888 | sk->sk_backlog.limit = sk->sk_rcvbuf << 1; | ||
| 1876 | sk->sk_state = TCP_CLOSE; | 1889 | sk->sk_state = TCP_CLOSE; |
| 1877 | sk_set_socket(sk, sock); | 1890 | sk_set_socket(sk, sock); |
| 1878 | 1891 | ||
| @@ -2140,13 +2153,13 @@ int sock_prot_inuse_get(struct net *net, struct proto *prot) | |||
| 2140 | } | 2153 | } |
| 2141 | EXPORT_SYMBOL_GPL(sock_prot_inuse_get); | 2154 | EXPORT_SYMBOL_GPL(sock_prot_inuse_get); |
| 2142 | 2155 | ||
| 2143 | static int sock_inuse_init_net(struct net *net) | 2156 | static int __net_init sock_inuse_init_net(struct net *net) |
| 2144 | { | 2157 | { |
| 2145 | net->core.inuse = alloc_percpu(struct prot_inuse); | 2158 | net->core.inuse = alloc_percpu(struct prot_inuse); |
| 2146 | return net->core.inuse ? 0 : -ENOMEM; | 2159 | return net->core.inuse ? 0 : -ENOMEM; |
| 2147 | } | 2160 | } |
| 2148 | 2161 | ||
| 2149 | static void sock_inuse_exit_net(struct net *net) | 2162 | static void __net_exit sock_inuse_exit_net(struct net *net) |
| 2150 | { | 2163 | { |
| 2151 | free_percpu(net->core.inuse); | 2164 | free_percpu(net->core.inuse); |
| 2152 | } | 2165 | } |
| @@ -2228,13 +2241,10 @@ int proto_register(struct proto *prot, int alloc_slab) | |||
| 2228 | } | 2241 | } |
| 2229 | 2242 | ||
| 2230 | if (prot->rsk_prot != NULL) { | 2243 | if (prot->rsk_prot != NULL) { |
| 2231 | static const char mask[] = "request_sock_%s"; | 2244 | prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name); |
| 2232 | |||
| 2233 | prot->rsk_prot->slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL); | ||
| 2234 | if (prot->rsk_prot->slab_name == NULL) | 2245 | if (prot->rsk_prot->slab_name == NULL) |
| 2235 | goto out_free_sock_slab; | 2246 | goto out_free_sock_slab; |
| 2236 | 2247 | ||
| 2237 | sprintf(prot->rsk_prot->slab_name, mask, prot->name); | ||
| 2238 | prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name, | 2248 | prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name, |
| 2239 | prot->rsk_prot->obj_size, 0, | 2249 | prot->rsk_prot->obj_size, 0, |
| 2240 | SLAB_HWCACHE_ALIGN, NULL); | 2250 | SLAB_HWCACHE_ALIGN, NULL); |
| @@ -2247,14 +2257,11 @@ int proto_register(struct proto *prot, int alloc_slab) | |||
| 2247 | } | 2257 | } |
| 2248 | 2258 | ||
| 2249 | if (prot->twsk_prot != NULL) { | 2259 | if (prot->twsk_prot != NULL) { |
| 2250 | static const char mask[] = "tw_sock_%s"; | 2260 | prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name); |
| 2251 | |||
| 2252 | prot->twsk_prot->twsk_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL); | ||
| 2253 | 2261 | ||
| 2254 | if (prot->twsk_prot->twsk_slab_name == NULL) | 2262 | if (prot->twsk_prot->twsk_slab_name == NULL) |
| 2255 | goto out_free_request_sock_slab; | 2263 | goto out_free_request_sock_slab; |
| 2256 | 2264 | ||
| 2257 | sprintf(prot->twsk_prot->twsk_slab_name, mask, prot->name); | ||
| 2258 | prot->twsk_prot->twsk_slab = | 2265 | prot->twsk_prot->twsk_slab = |
| 2259 | kmem_cache_create(prot->twsk_prot->twsk_slab_name, | 2266 | kmem_cache_create(prot->twsk_prot->twsk_slab_name, |
| 2260 | prot->twsk_prot->twsk_obj_size, | 2267 | prot->twsk_prot->twsk_obj_size, |
| @@ -2281,7 +2288,8 @@ out_free_request_sock_slab: | |||
| 2281 | prot->rsk_prot->slab = NULL; | 2288 | prot->rsk_prot->slab = NULL; |
| 2282 | } | 2289 | } |
| 2283 | out_free_request_sock_slab_name: | 2290 | out_free_request_sock_slab_name: |
| 2284 | kfree(prot->rsk_prot->slab_name); | 2291 | if (prot->rsk_prot) |
| 2292 | kfree(prot->rsk_prot->slab_name); | ||
| 2285 | out_free_sock_slab: | 2293 | out_free_sock_slab: |
| 2286 | kmem_cache_destroy(prot->slab); | 2294 | kmem_cache_destroy(prot->slab); |
| 2287 | prot->slab = NULL; | 2295 | prot->slab = NULL; |
