aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-10-04 13:37:06 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-10-04 13:37:06 -0400
commit8a04b45367c7943f8f7f30257d42e2106ab7a0bf (patch)
treeccc4e270380f72ed4f8fde7ba286e23c9cfa5136 /net
parenta8062e421f8a5800c83b85ab10c4cefd8409844f (diff)
parent805e969f6151eda7bc1a57e9c737054230acc3cc (diff)
Merge git://github.com/davem330/net
* git://github.com/davem330/net: pch_gbe: Fixed the issue on which a network freezes pch_gbe: Fixed the issue on which PC was frozen when link was downed. make PACKET_STATISTICS getsockopt report consistently between ring and non-ring net: xen-netback: correctly restart Tx after a VM restore/migrate bonding: properly stop queuing work when requested can bcm: fix incomplete tx_setup fix RDSRDMA: Fix cleanup of rds_iw_mr_pool net: Documentation: Fix type of variables ibmveth: Fix oops on request_irq failure ipv6: nullify ipv6_ac_list and ipv6_fl_list when creating new socket cxgb4: Fix EEH on IBM P7IOC can bcm: fix tx_setup off-by-one errors MAINTAINERS: tehuti: Alexander Indenbaum's address bounces dp83640: reduce driver noise ptp: fix L2 event message recognition
Diffstat (limited to 'net')
-rw-r--r--net/can/bcm.c53
-rw-r--r--net/ipv6/tcp_ipv6.c3
-rw-r--r--net/packet/af_packet.c5
-rw-r--r--net/rds/iw_rdma.c13
4 files changed, 40 insertions, 34 deletions
diff --git a/net/can/bcm.c b/net/can/bcm.c
index d6c8ae5b2e6a..c84963d2dee6 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -344,6 +344,18 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
344 } 344 }
345} 345}
346 346
347static void bcm_tx_start_timer(struct bcm_op *op)
348{
349 if (op->kt_ival1.tv64 && op->count)
350 hrtimer_start(&op->timer,
351 ktime_add(ktime_get(), op->kt_ival1),
352 HRTIMER_MODE_ABS);
353 else if (op->kt_ival2.tv64)
354 hrtimer_start(&op->timer,
355 ktime_add(ktime_get(), op->kt_ival2),
356 HRTIMER_MODE_ABS);
357}
358
347static void bcm_tx_timeout_tsklet(unsigned long data) 359static void bcm_tx_timeout_tsklet(unsigned long data)
348{ 360{
349 struct bcm_op *op = (struct bcm_op *)data; 361 struct bcm_op *op = (struct bcm_op *)data;
@@ -365,26 +377,12 @@ static void bcm_tx_timeout_tsklet(unsigned long data)
365 377
366 bcm_send_to_user(op, &msg_head, NULL, 0); 378 bcm_send_to_user(op, &msg_head, NULL, 0);
367 } 379 }
368 }
369
370 if (op->kt_ival1.tv64 && (op->count > 0)) {
371
372 /* send (next) frame */
373 bcm_can_tx(op); 380 bcm_can_tx(op);
374 hrtimer_start(&op->timer,
375 ktime_add(ktime_get(), op->kt_ival1),
376 HRTIMER_MODE_ABS);
377 381
378 } else { 382 } else if (op->kt_ival2.tv64)
379 if (op->kt_ival2.tv64) { 383 bcm_can_tx(op);
380 384
381 /* send (next) frame */ 385 bcm_tx_start_timer(op);
382 bcm_can_tx(op);
383 hrtimer_start(&op->timer,
384 ktime_add(ktime_get(), op->kt_ival2),
385 HRTIMER_MODE_ABS);
386 }
387 }
388} 386}
389 387
390/* 388/*
@@ -964,23 +962,20 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
964 hrtimer_cancel(&op->timer); 962 hrtimer_cancel(&op->timer);
965 } 963 }
966 964
967 if ((op->flags & STARTTIMER) && 965 if (op->flags & STARTTIMER) {
968 ((op->kt_ival1.tv64 && op->count) || op->kt_ival2.tv64)) { 966 hrtimer_cancel(&op->timer);
969
970 /* spec: send can_frame when starting timer */ 967 /* spec: send can_frame when starting timer */
971 op->flags |= TX_ANNOUNCE; 968 op->flags |= TX_ANNOUNCE;
972
973 if (op->kt_ival1.tv64 && (op->count > 0)) {
974 /* op->count-- is done in bcm_tx_timeout_handler */
975 hrtimer_start(&op->timer, op->kt_ival1,
976 HRTIMER_MODE_REL);
977 } else
978 hrtimer_start(&op->timer, op->kt_ival2,
979 HRTIMER_MODE_REL);
980 } 969 }
981 970
982 if (op->flags & TX_ANNOUNCE) 971 if (op->flags & TX_ANNOUNCE) {
983 bcm_can_tx(op); 972 bcm_can_tx(op);
973 if (op->count)
974 op->count--;
975 }
976
977 if (op->flags & STARTTIMER)
978 bcm_tx_start_timer(op);
984 979
985 return msg_head->nframes * CFSIZ + MHSIZ; 980 return msg_head->nframes * CFSIZ + MHSIZ;
986} 981}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 3c9fa618b69d..79cc6469508d 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1383,6 +1383,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1383 newtp->af_specific = &tcp_sock_ipv6_mapped_specific; 1383 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1384#endif 1384#endif
1385 1385
1386 newnp->ipv6_ac_list = NULL;
1387 newnp->ipv6_fl_list = NULL;
1386 newnp->pktoptions = NULL; 1388 newnp->pktoptions = NULL;
1387 newnp->opt = NULL; 1389 newnp->opt = NULL;
1388 newnp->mcast_oif = inet6_iif(skb); 1390 newnp->mcast_oif = inet6_iif(skb);
@@ -1447,6 +1449,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1447 First: no IPv4 options. 1449 First: no IPv4 options.
1448 */ 1450 */
1449 newinet->inet_opt = NULL; 1451 newinet->inet_opt = NULL;
1452 newnp->ipv6_ac_list = NULL;
1450 newnp->ipv6_fl_list = NULL; 1453 newnp->ipv6_fl_list = NULL;
1451 1454
1452 /* Clone RX bits */ 1455 /* Clone RX bits */
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index c698cec0a445..fabb4fafa281 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -961,7 +961,10 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
961 return 0; 961 return 0;
962 962
963drop_n_acct: 963drop_n_acct:
964 po->stats.tp_drops = atomic_inc_return(&sk->sk_drops); 964 spin_lock(&sk->sk_receive_queue.lock);
965 po->stats.tp_drops++;
966 atomic_inc(&sk->sk_drops);
967 spin_unlock(&sk->sk_receive_queue.lock);
965 968
966drop_n_restore: 969drop_n_restore:
967 if (skb_head != skb->data && skb_shared(skb)) { 970 if (skb_head != skb->data && skb_shared(skb)) {
diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c
index 8b77edbab272..4e1de171866c 100644
--- a/net/rds/iw_rdma.c
+++ b/net/rds/iw_rdma.c
@@ -84,7 +84,8 @@ static int rds_iw_map_fastreg(struct rds_iw_mr_pool *pool,
84static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); 84static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
85static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool, 85static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool,
86 struct list_head *unmap_list, 86 struct list_head *unmap_list,
87 struct list_head *kill_list); 87 struct list_head *kill_list,
88 int *unpinned);
88static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); 89static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
89 90
90static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwdev, struct rdma_cm_id **cm_id) 91static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwdev, struct rdma_cm_id **cm_id)
@@ -499,7 +500,7 @@ static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all)
499 LIST_HEAD(unmap_list); 500 LIST_HEAD(unmap_list);
500 LIST_HEAD(kill_list); 501 LIST_HEAD(kill_list);
501 unsigned long flags; 502 unsigned long flags;
502 unsigned int nfreed = 0, ncleaned = 0, free_goal; 503 unsigned int nfreed = 0, ncleaned = 0, unpinned = 0, free_goal;
503 int ret = 0; 504 int ret = 0;
504 505
505 rds_iw_stats_inc(s_iw_rdma_mr_pool_flush); 506 rds_iw_stats_inc(s_iw_rdma_mr_pool_flush);
@@ -524,7 +525,8 @@ static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all)
524 * will be destroyed by the unmap function. 525 * will be destroyed by the unmap function.
525 */ 526 */
526 if (!list_empty(&unmap_list)) { 527 if (!list_empty(&unmap_list)) {
527 ncleaned = rds_iw_unmap_fastreg_list(pool, &unmap_list, &kill_list); 528 ncleaned = rds_iw_unmap_fastreg_list(pool, &unmap_list,
529 &kill_list, &unpinned);
528 /* If we've been asked to destroy all MRs, move those 530 /* If we've been asked to destroy all MRs, move those
529 * that were simply cleaned to the kill list */ 531 * that were simply cleaned to the kill list */
530 if (free_all) 532 if (free_all)
@@ -548,6 +550,7 @@ static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all)
548 spin_unlock_irqrestore(&pool->list_lock, flags); 550 spin_unlock_irqrestore(&pool->list_lock, flags);
549 } 551 }
550 552
553 atomic_sub(unpinned, &pool->free_pinned);
551 atomic_sub(ncleaned, &pool->dirty_count); 554 atomic_sub(ncleaned, &pool->dirty_count);
552 atomic_sub(nfreed, &pool->item_count); 555 atomic_sub(nfreed, &pool->item_count);
553 556
@@ -828,7 +831,8 @@ static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool,
828 831
829static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool, 832static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool,
830 struct list_head *unmap_list, 833 struct list_head *unmap_list,
831 struct list_head *kill_list) 834 struct list_head *kill_list,
835 int *unpinned)
832{ 836{
833 struct rds_iw_mapping *mapping, *next; 837 struct rds_iw_mapping *mapping, *next;
834 unsigned int ncleaned = 0; 838 unsigned int ncleaned = 0;
@@ -855,6 +859,7 @@ static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool,
855 859
856 spin_lock_irqsave(&pool->list_lock, flags); 860 spin_lock_irqsave(&pool->list_lock, flags);
857 list_for_each_entry_safe(mapping, next, unmap_list, m_list) { 861 list_for_each_entry_safe(mapping, next, unmap_list, m_list) {
862 *unpinned += mapping->m_sg.len;
858 list_move(&mapping->m_list, &laundered); 863 list_move(&mapping->m_list, &laundered);
859 ncleaned++; 864 ncleaned++;
860 } 865 }