diff options
Diffstat (limited to 'net')
-rw-r--r-- | net/bluetooth/hci_sysfs.c | 4 | ||||
-rw-r--r-- | net/bluetooth/l2cap.c | 9 | ||||
-rw-r--r-- | net/core/pktgen.c | 15 | ||||
-rw-r--r-- | net/ipv4/inet_connection_sock.c | 34 | ||||
-rw-r--r-- | net/ipv4/ip_sockglue.c | 7 | ||||
-rw-r--r-- | net/ipv4/tcp.c | 59 | ||||
-rw-r--r-- | net/ipv4/tcp_minisocks.c | 5 | ||||
-rw-r--r-- | net/ipv6/ipv6_sockglue.c | 6 | ||||
-rw-r--r-- | net/unix/af_unix.c | 2 |
9 files changed, 107 insertions, 34 deletions
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c index 7f939ce29801..2bc6f6a8de68 100644 --- a/net/bluetooth/hci_sysfs.c +++ b/net/bluetooth/hci_sysfs.c | |||
@@ -92,6 +92,8 @@ static void add_conn(struct work_struct *work) | |||
92 | 92 | ||
93 | dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle); | 93 | dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle); |
94 | 94 | ||
95 | dev_set_drvdata(&conn->dev, conn); | ||
96 | |||
95 | if (device_add(&conn->dev) < 0) { | 97 | if (device_add(&conn->dev) < 0) { |
96 | BT_ERR("Failed to register connection device"); | 98 | BT_ERR("Failed to register connection device"); |
97 | return; | 99 | return; |
@@ -144,8 +146,6 @@ void hci_conn_init_sysfs(struct hci_conn *conn) | |||
144 | conn->dev.class = bt_class; | 146 | conn->dev.class = bt_class; |
145 | conn->dev.parent = &hdev->dev; | 147 | conn->dev.parent = &hdev->dev; |
146 | 148 | ||
147 | dev_set_drvdata(&conn->dev, conn); | ||
148 | |||
149 | device_initialize(&conn->dev); | 149 | device_initialize(&conn->dev); |
150 | 150 | ||
151 | INIT_WORK(&conn->work_add, add_conn); | 151 | INIT_WORK(&conn->work_add, add_conn); |
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c index 4b66bd579f4a..d65101d92ee5 100644 --- a/net/bluetooth/l2cap.c +++ b/net/bluetooth/l2cap.c | |||
@@ -555,12 +555,12 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status) | |||
555 | 555 | ||
556 | conn->feat_mask = 0; | 556 | conn->feat_mask = 0; |
557 | 557 | ||
558 | setup_timer(&conn->info_timer, l2cap_info_timeout, | ||
559 | (unsigned long) conn); | ||
560 | |||
561 | spin_lock_init(&conn->lock); | 558 | spin_lock_init(&conn->lock); |
562 | rwlock_init(&conn->chan_list.lock); | 559 | rwlock_init(&conn->chan_list.lock); |
563 | 560 | ||
561 | setup_timer(&conn->info_timer, l2cap_info_timeout, | ||
562 | (unsigned long) conn); | ||
563 | |||
564 | conn->disc_reason = 0x13; | 564 | conn->disc_reason = 0x13; |
565 | 565 | ||
566 | return conn; | 566 | return conn; |
@@ -783,6 +783,9 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent) | |||
783 | /* Default config options */ | 783 | /* Default config options */ |
784 | pi->conf_len = 0; | 784 | pi->conf_len = 0; |
785 | pi->flush_to = L2CAP_DEFAULT_FLUSH_TO; | 785 | pi->flush_to = L2CAP_DEFAULT_FLUSH_TO; |
786 | skb_queue_head_init(TX_QUEUE(sk)); | ||
787 | skb_queue_head_init(SREJ_QUEUE(sk)); | ||
788 | INIT_LIST_HEAD(SREJ_LIST(sk)); | ||
786 | } | 789 | } |
787 | 790 | ||
788 | static struct proto l2cap_proto = { | 791 | static struct proto l2cap_proto = { |
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 1da0e038df78..5ce017bf4afa 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -335,6 +335,7 @@ struct pktgen_dev { | |||
335 | __u32 cur_src_mac_offset; | 335 | __u32 cur_src_mac_offset; |
336 | __be32 cur_saddr; | 336 | __be32 cur_saddr; |
337 | __be32 cur_daddr; | 337 | __be32 cur_daddr; |
338 | __u16 ip_id; | ||
338 | __u16 cur_udp_dst; | 339 | __u16 cur_udp_dst; |
339 | __u16 cur_udp_src; | 340 | __u16 cur_udp_src; |
340 | __u16 cur_queue_map; | 341 | __u16 cur_queue_map; |
@@ -2630,6 +2631,8 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, | |||
2630 | iph->protocol = IPPROTO_UDP; /* UDP */ | 2631 | iph->protocol = IPPROTO_UDP; /* UDP */ |
2631 | iph->saddr = pkt_dev->cur_saddr; | 2632 | iph->saddr = pkt_dev->cur_saddr; |
2632 | iph->daddr = pkt_dev->cur_daddr; | 2633 | iph->daddr = pkt_dev->cur_daddr; |
2634 | iph->id = htons(pkt_dev->ip_id); | ||
2635 | pkt_dev->ip_id++; | ||
2633 | iph->frag_off = 0; | 2636 | iph->frag_off = 0; |
2634 | iplen = 20 + 8 + datalen; | 2637 | iplen = 20 + 8 + datalen; |
2635 | iph->tot_len = htons(iplen); | 2638 | iph->tot_len = htons(iplen); |
@@ -2641,24 +2644,26 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, | |||
2641 | skb->dev = odev; | 2644 | skb->dev = odev; |
2642 | skb->pkt_type = PACKET_HOST; | 2645 | skb->pkt_type = PACKET_HOST; |
2643 | 2646 | ||
2644 | if (pkt_dev->nfrags <= 0) | 2647 | if (pkt_dev->nfrags <= 0) { |
2645 | pgh = (struct pktgen_hdr *)skb_put(skb, datalen); | 2648 | pgh = (struct pktgen_hdr *)skb_put(skb, datalen); |
2646 | else { | 2649 | memset(pgh + 1, 0, datalen - sizeof(struct pktgen_hdr)); |
2650 | } else { | ||
2647 | int frags = pkt_dev->nfrags; | 2651 | int frags = pkt_dev->nfrags; |
2648 | int i; | 2652 | int i, len; |
2649 | 2653 | ||
2650 | pgh = (struct pktgen_hdr *)(((char *)(udph)) + 8); | 2654 | pgh = (struct pktgen_hdr *)(((char *)(udph)) + 8); |
2651 | 2655 | ||
2652 | if (frags > MAX_SKB_FRAGS) | 2656 | if (frags > MAX_SKB_FRAGS) |
2653 | frags = MAX_SKB_FRAGS; | 2657 | frags = MAX_SKB_FRAGS; |
2654 | if (datalen > frags * PAGE_SIZE) { | 2658 | if (datalen > frags * PAGE_SIZE) { |
2655 | skb_put(skb, datalen - frags * PAGE_SIZE); | 2659 | len = datalen - frags * PAGE_SIZE; |
2660 | memset(skb_put(skb, len), 0, len); | ||
2656 | datalen = frags * PAGE_SIZE; | 2661 | datalen = frags * PAGE_SIZE; |
2657 | } | 2662 | } |
2658 | 2663 | ||
2659 | i = 0; | 2664 | i = 0; |
2660 | while (datalen > 0) { | 2665 | while (datalen > 0) { |
2661 | struct page *page = alloc_pages(GFP_KERNEL, 0); | 2666 | struct page *page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0); |
2662 | skb_shinfo(skb)->frags[i].page = page; | 2667 | skb_shinfo(skb)->frags[i].page = page; |
2663 | skb_shinfo(skb)->frags[i].page_offset = 0; | 2668 | skb_shinfo(skb)->frags[i].page_offset = 0; |
2664 | skb_shinfo(skb)->frags[i].size = | 2669 | skb_shinfo(skb)->frags[i].size = |
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index f6a0af759932..26fb50e91311 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
@@ -447,6 +447,28 @@ extern int sysctl_tcp_synack_retries; | |||
447 | 447 | ||
448 | EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add); | 448 | EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add); |
449 | 449 | ||
450 | /* Decide when to expire the request and when to resend SYN-ACK */ | ||
451 | static inline void syn_ack_recalc(struct request_sock *req, const int thresh, | ||
452 | const int max_retries, | ||
453 | const u8 rskq_defer_accept, | ||
454 | int *expire, int *resend) | ||
455 | { | ||
456 | if (!rskq_defer_accept) { | ||
457 | *expire = req->retrans >= thresh; | ||
458 | *resend = 1; | ||
459 | return; | ||
460 | } | ||
461 | *expire = req->retrans >= thresh && | ||
462 | (!inet_rsk(req)->acked || req->retrans >= max_retries); | ||
463 | /* | ||
464 | * Do not resend while waiting for data after ACK, | ||
465 | * start to resend on end of deferring period to give | ||
466 | * last chance for data or ACK to create established socket. | ||
467 | */ | ||
468 | *resend = !inet_rsk(req)->acked || | ||
469 | req->retrans >= rskq_defer_accept - 1; | ||
470 | } | ||
471 | |||
450 | void inet_csk_reqsk_queue_prune(struct sock *parent, | 472 | void inet_csk_reqsk_queue_prune(struct sock *parent, |
451 | const unsigned long interval, | 473 | const unsigned long interval, |
452 | const unsigned long timeout, | 474 | const unsigned long timeout, |
@@ -502,9 +524,15 @@ void inet_csk_reqsk_queue_prune(struct sock *parent, | |||
502 | reqp=&lopt->syn_table[i]; | 524 | reqp=&lopt->syn_table[i]; |
503 | while ((req = *reqp) != NULL) { | 525 | while ((req = *reqp) != NULL) { |
504 | if (time_after_eq(now, req->expires)) { | 526 | if (time_after_eq(now, req->expires)) { |
505 | if ((req->retrans < thresh || | 527 | int expire = 0, resend = 0; |
506 | (inet_rsk(req)->acked && req->retrans < max_retries)) | 528 | |
507 | && !req->rsk_ops->rtx_syn_ack(parent, req)) { | 529 | syn_ack_recalc(req, thresh, max_retries, |
530 | queue->rskq_defer_accept, | ||
531 | &expire, &resend); | ||
532 | if (!expire && | ||
533 | (!resend || | ||
534 | !req->rsk_ops->rtx_syn_ack(parent, req) || | ||
535 | inet_rsk(req)->acked)) { | ||
508 | unsigned long timeo; | 536 | unsigned long timeo; |
509 | 537 | ||
510 | if (req->retrans++ == 0) | 538 | if (req->retrans++ == 0) |
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 2445fedec0b8..a72f43ce33be 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c | |||
@@ -634,17 +634,16 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
634 | break; | 634 | break; |
635 | } | 635 | } |
636 | dev = ip_dev_find(sock_net(sk), mreq.imr_address.s_addr); | 636 | dev = ip_dev_find(sock_net(sk), mreq.imr_address.s_addr); |
637 | if (dev) { | 637 | if (dev) |
638 | mreq.imr_ifindex = dev->ifindex; | 638 | mreq.imr_ifindex = dev->ifindex; |
639 | dev_put(dev); | ||
640 | } | ||
641 | } else | 639 | } else |
642 | dev = __dev_get_by_index(sock_net(sk), mreq.imr_ifindex); | 640 | dev = dev_get_by_index(sock_net(sk), mreq.imr_ifindex); |
643 | 641 | ||
644 | 642 | ||
645 | err = -EADDRNOTAVAIL; | 643 | err = -EADDRNOTAVAIL; |
646 | if (!dev) | 644 | if (!dev) |
647 | break; | 645 | break; |
646 | dev_put(dev); | ||
648 | 647 | ||
649 | err = -EINVAL; | 648 | err = -EINVAL; |
650 | if (sk->sk_bound_dev_if && | 649 | if (sk->sk_bound_dev_if && |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 206a291dff03..e0cfa633680a 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -326,6 +326,43 @@ void tcp_enter_memory_pressure(struct sock *sk) | |||
326 | 326 | ||
327 | EXPORT_SYMBOL(tcp_enter_memory_pressure); | 327 | EXPORT_SYMBOL(tcp_enter_memory_pressure); |
328 | 328 | ||
329 | /* Convert seconds to retransmits based on initial and max timeout */ | ||
330 | static u8 secs_to_retrans(int seconds, int timeout, int rto_max) | ||
331 | { | ||
332 | u8 res = 0; | ||
333 | |||
334 | if (seconds > 0) { | ||
335 | int period = timeout; | ||
336 | |||
337 | res = 1; | ||
338 | while (seconds > period && res < 255) { | ||
339 | res++; | ||
340 | timeout <<= 1; | ||
341 | if (timeout > rto_max) | ||
342 | timeout = rto_max; | ||
343 | period += timeout; | ||
344 | } | ||
345 | } | ||
346 | return res; | ||
347 | } | ||
348 | |||
349 | /* Convert retransmits to seconds based on initial and max timeout */ | ||
350 | static int retrans_to_secs(u8 retrans, int timeout, int rto_max) | ||
351 | { | ||
352 | int period = 0; | ||
353 | |||
354 | if (retrans > 0) { | ||
355 | period = timeout; | ||
356 | while (--retrans) { | ||
357 | timeout <<= 1; | ||
358 | if (timeout > rto_max) | ||
359 | timeout = rto_max; | ||
360 | period += timeout; | ||
361 | } | ||
362 | } | ||
363 | return period; | ||
364 | } | ||
365 | |||
329 | /* | 366 | /* |
330 | * Wait for a TCP event. | 367 | * Wait for a TCP event. |
331 | * | 368 | * |
@@ -1405,7 +1442,9 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
1405 | goto found_ok_skb; | 1442 | goto found_ok_skb; |
1406 | if (tcp_hdr(skb)->fin) | 1443 | if (tcp_hdr(skb)->fin) |
1407 | goto found_fin_ok; | 1444 | goto found_fin_ok; |
1408 | WARN_ON(!(flags & MSG_PEEK)); | 1445 | WARN(!(flags & MSG_PEEK), KERN_INFO "recvmsg bug 2: " |
1446 | "copied %X seq %X\n", *seq, | ||
1447 | TCP_SKB_CB(skb)->seq); | ||
1409 | } | 1448 | } |
1410 | 1449 | ||
1411 | /* Well, if we have backlog, try to process it now yet. */ | 1450 | /* Well, if we have backlog, try to process it now yet. */ |
@@ -2163,16 +2202,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level, | |||
2163 | break; | 2202 | break; |
2164 | 2203 | ||
2165 | case TCP_DEFER_ACCEPT: | 2204 | case TCP_DEFER_ACCEPT: |
2166 | icsk->icsk_accept_queue.rskq_defer_accept = 0; | 2205 | /* Translate value in seconds to number of retransmits */ |
2167 | if (val > 0) { | 2206 | icsk->icsk_accept_queue.rskq_defer_accept = |
2168 | /* Translate value in seconds to number of | 2207 | secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ, |
2169 | * retransmits */ | 2208 | TCP_RTO_MAX / HZ); |
2170 | while (icsk->icsk_accept_queue.rskq_defer_accept < 32 && | ||
2171 | val > ((TCP_TIMEOUT_INIT / HZ) << | ||
2172 | icsk->icsk_accept_queue.rskq_defer_accept)) | ||
2173 | icsk->icsk_accept_queue.rskq_defer_accept++; | ||
2174 | icsk->icsk_accept_queue.rskq_defer_accept++; | ||
2175 | } | ||
2176 | break; | 2209 | break; |
2177 | 2210 | ||
2178 | case TCP_WINDOW_CLAMP: | 2211 | case TCP_WINDOW_CLAMP: |
@@ -2353,8 +2386,8 @@ static int do_tcp_getsockopt(struct sock *sk, int level, | |||
2353 | val = (val ? : sysctl_tcp_fin_timeout) / HZ; | 2386 | val = (val ? : sysctl_tcp_fin_timeout) / HZ; |
2354 | break; | 2387 | break; |
2355 | case TCP_DEFER_ACCEPT: | 2388 | case TCP_DEFER_ACCEPT: |
2356 | val = !icsk->icsk_accept_queue.rskq_defer_accept ? 0 : | 2389 | val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept, |
2357 | ((TCP_TIMEOUT_INIT / HZ) << (icsk->icsk_accept_queue.rskq_defer_accept - 1)); | 2390 | TCP_TIMEOUT_INIT / HZ, TCP_RTO_MAX / HZ); |
2358 | break; | 2391 | break; |
2359 | case TCP_WINDOW_CLAMP: | 2392 | case TCP_WINDOW_CLAMP: |
2360 | val = tp->window_clamp; | 2393 | val = tp->window_clamp; |
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index e320afea07fc..4c03598ed924 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -641,10 +641,9 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, | |||
641 | if (!(flg & TCP_FLAG_ACK)) | 641 | if (!(flg & TCP_FLAG_ACK)) |
642 | return NULL; | 642 | return NULL; |
643 | 643 | ||
644 | /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */ | 644 | /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */ |
645 | if (inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && | 645 | if (req->retrans < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && |
646 | TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { | 646 | TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { |
647 | inet_csk(sk)->icsk_accept_queue.rskq_defer_accept--; | ||
648 | inet_rsk(req)->acked = 1; | 647 | inet_rsk(req)->acked = 1; |
649 | return NULL; | 648 | return NULL; |
650 | } | 649 | } |
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 68566de4bcc5..430454ee5ead 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c | |||
@@ -497,13 +497,17 @@ done: | |||
497 | goto e_inval; | 497 | goto e_inval; |
498 | 498 | ||
499 | if (val) { | 499 | if (val) { |
500 | struct net_device *dev; | ||
501 | |||
500 | if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != val) | 502 | if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != val) |
501 | goto e_inval; | 503 | goto e_inval; |
502 | 504 | ||
503 | if (__dev_get_by_index(net, val) == NULL) { | 505 | dev = dev_get_by_index(net, val); |
506 | if (!dev) { | ||
504 | retv = -ENODEV; | 507 | retv = -ENODEV; |
505 | break; | 508 | break; |
506 | } | 509 | } |
510 | dev_put(dev); | ||
507 | } | 511 | } |
508 | np->mcast_oif = val; | 512 | np->mcast_oif = val; |
509 | retv = 0; | 513 | retv = 0; |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 0f133c5a8d3c..3291902f0b88 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -1074,6 +1074,8 @@ restart: | |||
1074 | err = -ECONNREFUSED; | 1074 | err = -ECONNREFUSED; |
1075 | if (other->sk_state != TCP_LISTEN) | 1075 | if (other->sk_state != TCP_LISTEN) |
1076 | goto out_unlock; | 1076 | goto out_unlock; |
1077 | if (other->sk_shutdown & RCV_SHUTDOWN) | ||
1078 | goto out_unlock; | ||
1077 | 1079 | ||
1078 | if (unix_recvq_full(other)) { | 1080 | if (unix_recvq_full(other)) { |
1079 | err = -EAGAIN; | 1081 | err = -EAGAIN; |