aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2010-03-20 18:24:29 -0400
committerDavid S. Miller <davem@davemloft.net>2010-03-20 18:24:29 -0400
commite77c8e83dd587f2616d7ff20d23a897891e6e20d (patch)
treeba5d2ce6541119f329b2fd51181aaae8528b38f0 /net/ipv4
parent641cb85e68945878d520d5fc3c2dc64aa1dda868 (diff)
parentaf98441397227a5a4f212cd48710eea72a14dbdb (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/ipmr.c3
-rw-r--r--net/ipv4/route.c14
-rw-r--r--net/ipv4/tcp.c65
-rw-r--r--net/ipv4/tcp_input.c3
-rw-r--r--net/ipv4/tcp_ipv4.c5
5 files changed, 65 insertions, 25 deletions
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 8582e12e4a62..0b9d03c54dc3 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -802,6 +802,9 @@ static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock)
802 int line; 802 int line;
803 struct mfc_cache *uc, *c, **cp; 803 struct mfc_cache *uc, *c, **cp;
804 804
805 if (mfc->mfcc_parent >= MAXVIFS)
806 return -ENFILE;
807
805 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); 808 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
806 809
807 for (cp = &net->ipv4.mfc_cache_array[line]; 810 for (cp = &net->ipv4.mfc_cache_array[line];
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index a770df2493d2..32d396196df8 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1441,7 +1441,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1441 dev_hold(rt->u.dst.dev); 1441 dev_hold(rt->u.dst.dev);
1442 if (rt->idev) 1442 if (rt->idev)
1443 in_dev_hold(rt->idev); 1443 in_dev_hold(rt->idev);
1444 rt->u.dst.obsolete = 0; 1444 rt->u.dst.obsolete = -1;
1445 rt->u.dst.lastuse = jiffies; 1445 rt->u.dst.lastuse = jiffies;
1446 rt->u.dst.path = &rt->u.dst; 1446 rt->u.dst.path = &rt->u.dst;
1447 rt->u.dst.neighbour = NULL; 1447 rt->u.dst.neighbour = NULL;
@@ -1506,7 +1506,7 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1506 struct dst_entry *ret = dst; 1506 struct dst_entry *ret = dst;
1507 1507
1508 if (rt) { 1508 if (rt) {
1509 if (dst->obsolete) { 1509 if (dst->obsolete > 0) {
1510 ip_rt_put(rt); 1510 ip_rt_put(rt);
1511 ret = NULL; 1511 ret = NULL;
1512 } else if ((rt->rt_flags & RTCF_REDIRECTED) || 1512 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
@@ -1726,7 +1726,9 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1726 1726
1727static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie) 1727static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1728{ 1728{
1729 return NULL; 1729 if (rt_is_expired((struct rtable *)dst))
1730 return NULL;
1731 return dst;
1730} 1732}
1731 1733
1732static void ipv4_dst_destroy(struct dst_entry *dst) 1734static void ipv4_dst_destroy(struct dst_entry *dst)
@@ -1888,7 +1890,8 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1888 if (!rth) 1890 if (!rth)
1889 goto e_nobufs; 1891 goto e_nobufs;
1890 1892
1891 rth->u.dst.output= ip_rt_bug; 1893 rth->u.dst.output = ip_rt_bug;
1894 rth->u.dst.obsolete = -1;
1892 1895
1893 atomic_set(&rth->u.dst.__refcnt, 1); 1896 atomic_set(&rth->u.dst.__refcnt, 1);
1894 rth->u.dst.flags= DST_HOST; 1897 rth->u.dst.flags= DST_HOST;
@@ -2054,6 +2057,7 @@ static int __mkroute_input(struct sk_buff *skb,
2054 rth->fl.oif = 0; 2057 rth->fl.oif = 0;
2055 rth->rt_spec_dst= spec_dst; 2058 rth->rt_spec_dst= spec_dst;
2056 2059
2060 rth->u.dst.obsolete = -1;
2057 rth->u.dst.input = ip_forward; 2061 rth->u.dst.input = ip_forward;
2058 rth->u.dst.output = ip_output; 2062 rth->u.dst.output = ip_output;
2059 rth->rt_genid = rt_genid(dev_net(rth->u.dst.dev)); 2063 rth->rt_genid = rt_genid(dev_net(rth->u.dst.dev));
@@ -2218,6 +2222,7 @@ local_input:
2218 goto e_nobufs; 2222 goto e_nobufs;
2219 2223
2220 rth->u.dst.output= ip_rt_bug; 2224 rth->u.dst.output= ip_rt_bug;
2225 rth->u.dst.obsolete = -1;
2221 rth->rt_genid = rt_genid(net); 2226 rth->rt_genid = rt_genid(net);
2222 2227
2223 atomic_set(&rth->u.dst.__refcnt, 1); 2228 atomic_set(&rth->u.dst.__refcnt, 1);
@@ -2444,6 +2449,7 @@ static int __mkroute_output(struct rtable **result,
2444 rth->rt_spec_dst= fl->fl4_src; 2449 rth->rt_spec_dst= fl->fl4_src;
2445 2450
2446 rth->u.dst.output=ip_output; 2451 rth->u.dst.output=ip_output;
2452 rth->u.dst.obsolete = -1;
2447 rth->rt_genid = rt_genid(dev_net(dev_out)); 2453 rth->rt_genid = rt_genid(dev_net(dev_out));
2448 2454
2449 RT_CACHE_STAT_INC(out_slow_tot); 2455 RT_CACHE_STAT_INC(out_slow_tot);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 5901010fad55..6afb6d8662b2 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -429,7 +429,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
429 if (tp->urg_seq == tp->copied_seq && 429 if (tp->urg_seq == tp->copied_seq &&
430 !sock_flag(sk, SOCK_URGINLINE) && 430 !sock_flag(sk, SOCK_URGINLINE) &&
431 tp->urg_data) 431 tp->urg_data)
432 target--; 432 target++;
433 433
434 /* Potential race condition. If read of tp below will 434 /* Potential race condition. If read of tp below will
435 * escape above sk->sk_state, we can be illegally awaken 435 * escape above sk->sk_state, we can be illegally awaken
@@ -1254,6 +1254,39 @@ static void tcp_prequeue_process(struct sock *sk)
1254 tp->ucopy.memory = 0; 1254 tp->ucopy.memory = 0;
1255} 1255}
1256 1256
1257#ifdef CONFIG_NET_DMA
1258static void tcp_service_net_dma(struct sock *sk, bool wait)
1259{
1260 dma_cookie_t done, used;
1261 dma_cookie_t last_issued;
1262 struct tcp_sock *tp = tcp_sk(sk);
1263
1264 if (!tp->ucopy.dma_chan)
1265 return;
1266
1267 last_issued = tp->ucopy.dma_cookie;
1268 dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1269
1270 do {
1271 if (dma_async_memcpy_complete(tp->ucopy.dma_chan,
1272 last_issued, &done,
1273 &used) == DMA_SUCCESS) {
1274 /* Safe to free early-copied skbs now */
1275 __skb_queue_purge(&sk->sk_async_wait_queue);
1276 break;
1277 } else {
1278 struct sk_buff *skb;
1279 while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
1280 (dma_async_is_complete(skb->dma_cookie, done,
1281 used) == DMA_SUCCESS)) {
1282 __skb_dequeue(&sk->sk_async_wait_queue);
1283 kfree_skb(skb);
1284 }
1285 }
1286 } while (wait);
1287}
1288#endif
1289
1257static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) 1290static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1258{ 1291{
1259 struct sk_buff *skb; 1292 struct sk_buff *skb;
@@ -1546,6 +1579,10 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1546 /* __ Set realtime policy in scheduler __ */ 1579 /* __ Set realtime policy in scheduler __ */
1547 } 1580 }
1548 1581
1582#ifdef CONFIG_NET_DMA
1583 if (tp->ucopy.dma_chan)
1584 dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1585#endif
1549 if (copied >= target) { 1586 if (copied >= target) {
1550 /* Do not sleep, just process backlog. */ 1587 /* Do not sleep, just process backlog. */
1551 release_sock(sk); 1588 release_sock(sk);
@@ -1554,6 +1591,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1554 sk_wait_data(sk, &timeo); 1591 sk_wait_data(sk, &timeo);
1555 1592
1556#ifdef CONFIG_NET_DMA 1593#ifdef CONFIG_NET_DMA
1594 tcp_service_net_dma(sk, false); /* Don't block */
1557 tp->ucopy.wakeup = 0; 1595 tp->ucopy.wakeup = 0;
1558#endif 1596#endif
1559 1597
@@ -1633,6 +1671,9 @@ do_prequeue:
1633 copied = -EFAULT; 1671 copied = -EFAULT;
1634 break; 1672 break;
1635 } 1673 }
1674
1675 dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1676
1636 if ((offset + used) == skb->len) 1677 if ((offset + used) == skb->len)
1637 copied_early = 1; 1678 copied_early = 1;
1638 1679
@@ -1702,27 +1743,9 @@ skip_copy:
1702 } 1743 }
1703 1744
1704#ifdef CONFIG_NET_DMA 1745#ifdef CONFIG_NET_DMA
1705 if (tp->ucopy.dma_chan) { 1746 tcp_service_net_dma(sk, true); /* Wait for queue to drain */
1706 dma_cookie_t done, used; 1747 tp->ucopy.dma_chan = NULL;
1707
1708 dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1709
1710 while (dma_async_memcpy_complete(tp->ucopy.dma_chan,
1711 tp->ucopy.dma_cookie, &done,
1712 &used) == DMA_IN_PROGRESS) {
1713 /* do partial cleanup of sk_async_wait_queue */
1714 while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
1715 (dma_async_is_complete(skb->dma_cookie, done,
1716 used) == DMA_SUCCESS)) {
1717 __skb_dequeue(&sk->sk_async_wait_queue);
1718 kfree_skb(skb);
1719 }
1720 }
1721 1748
1722 /* Safe to free early-copied skbs now */
1723 __skb_queue_purge(&sk->sk_async_wait_queue);
1724 tp->ucopy.dma_chan = NULL;
1725 }
1726 if (tp->ucopy.pinned_list) { 1749 if (tp->ucopy.pinned_list) {
1727 dma_unpin_iovec_pages(tp->ucopy.pinned_list); 1750 dma_unpin_iovec_pages(tp->ucopy.pinned_list);
1728 tp->ucopy.pinned_list = NULL; 1751 tp->ucopy.pinned_list = NULL;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 788851ca8c5d..c096a4218b8f 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2511,6 +2511,9 @@ static void tcp_mark_head_lost(struct sock *sk, int packets)
2511 int err; 2511 int err;
2512 unsigned int mss; 2512 unsigned int mss;
2513 2513
2514 if (packets == 0)
2515 return;
2516
2514 WARN_ON(packets > tp->packets_out); 2517 WARN_ON(packets > tp->packets_out);
2515 if (tp->lost_skb_hint) { 2518 if (tp->lost_skb_hint) {
2516 skb = tp->lost_skb_hint; 2519 skb = tp->lost_skb_hint;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 70df40980a87..f4df5f931f36 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -370,6 +370,11 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
370 if (sk->sk_state == TCP_CLOSE) 370 if (sk->sk_state == TCP_CLOSE)
371 goto out; 371 goto out;
372 372
373 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
374 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
375 goto out;
376 }
377
373 icsk = inet_csk(sk); 378 icsk = inet_csk(sk);
374 tp = tcp_sk(sk); 379 tp = tcp_sk(sk);
375 seq = ntohl(th->seq); 380 seq = ntohl(th->seq);