aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2009-05-22 14:28:35 -0400
committerJens Axboe <jens.axboe@oracle.com>2009-05-22 14:28:35 -0400
commit9bd7de51ee8537094656149eaf45338cadb7d7d4 (patch)
treea691987319dfcdb07de2bce619746f88d26882c4 /net
parente4b636366c00738b9609cda307014d71b1225b7f (diff)
parent6a44587ee716ac911082cbdec766e5b3f051c071 (diff)
Merge branch 'master' into for-2.6.31
Conflicts: drivers/ide/ide-io.c Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'net')
-rw-r--r--net/bridge/br_input.c5
-rw-r--r--net/bridge/br_stp.c3
-rw-r--r--net/core/gen_estimator.c13
-rw-r--r--net/core/netpoll.c8
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/ipv4/Kconfig2
-rw-r--r--net/ipv4/ipconfig.c12
-rw-r--r--net/ipv4/tcp.c5
-rw-r--r--net/sched/sch_teql.c5
9 files changed, 42 insertions, 13 deletions
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 30b88777c3df..5ee1a3682bf2 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -134,6 +134,10 @@ struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb)
134 if (skb->protocol == htons(ETH_P_PAUSE)) 134 if (skb->protocol == htons(ETH_P_PAUSE))
135 goto drop; 135 goto drop;
136 136
137 /* If STP is turned off, then forward */
138 if (p->br->stp_enabled == BR_NO_STP && dest[5] == 0)
139 goto forward;
140
137 if (NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev, 141 if (NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev,
138 NULL, br_handle_local_finish)) 142 NULL, br_handle_local_finish))
139 return NULL; /* frame consumed by filter */ 143 return NULL; /* frame consumed by filter */
@@ -141,6 +145,7 @@ struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb)
141 return skb; /* continue processing */ 145 return skb; /* continue processing */
142 } 146 }
143 147
148forward:
144 switch (p->state) { 149 switch (p->state) {
145 case BR_STATE_FORWARDING: 150 case BR_STATE_FORWARDING:
146 rhook = rcu_dereference(br_should_route_hook); 151 rhook = rcu_dereference(br_should_route_hook);
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index 6e63ec3f1fcf..0660515f3992 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -297,6 +297,9 @@ void br_topology_change_detection(struct net_bridge *br)
297{ 297{
298 int isroot = br_is_root_bridge(br); 298 int isroot = br_is_root_bridge(br);
299 299
300 if (br->stp_enabled != BR_KERNEL_STP)
301 return;
302
300 pr_info("%s: topology change detected, %s\n", br->dev->name, 303 pr_info("%s: topology change detected, %s\n", br->dev->name,
301 isroot ? "propagating" : "sending tcn bpdu"); 304 isroot ? "propagating" : "sending tcn bpdu");
302 305
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 9cc9f95b109e..6d62d4618cfc 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -66,9 +66,9 @@
66 66
67 NOTES. 67 NOTES.
68 68
69 * The stored value for avbps is scaled by 2^5, so that maximal 69 * avbps is scaled by 2^5, avpps is scaled by 2^10.
70 rate is ~1Gbit, avpps is scaled by 2^10. 70 * both values are reported as 32 bit unsigned values. bps can
71 71 overflow for fast links : max speed being 34360Mbit/sec
72 * Minimal interval is HZ/4=250msec (it is the greatest common divisor 72 * Minimal interval is HZ/4=250msec (it is the greatest common divisor
73 for HZ=100 and HZ=1024 8)), maximal interval 73 for HZ=100 and HZ=1024 8)), maximal interval
74 is (HZ*2^EST_MAX_INTERVAL)/4 = 8sec. Shorter intervals 74 is (HZ*2^EST_MAX_INTERVAL)/4 = 8sec. Shorter intervals
@@ -86,9 +86,9 @@ struct gen_estimator
86 spinlock_t *stats_lock; 86 spinlock_t *stats_lock;
87 int ewma_log; 87 int ewma_log;
88 u64 last_bytes; 88 u64 last_bytes;
89 u64 avbps;
89 u32 last_packets; 90 u32 last_packets;
90 u32 avpps; 91 u32 avpps;
91 u32 avbps;
92 struct rcu_head e_rcu; 92 struct rcu_head e_rcu;
93 struct rb_node node; 93 struct rb_node node;
94}; 94};
@@ -115,6 +115,7 @@ static void est_timer(unsigned long arg)
115 rcu_read_lock(); 115 rcu_read_lock();
116 list_for_each_entry_rcu(e, &elist[idx].list, list) { 116 list_for_each_entry_rcu(e, &elist[idx].list, list) {
117 u64 nbytes; 117 u64 nbytes;
118 u64 brate;
118 u32 npackets; 119 u32 npackets;
119 u32 rate; 120 u32 rate;
120 121
@@ -125,9 +126,9 @@ static void est_timer(unsigned long arg)
125 126
126 nbytes = e->bstats->bytes; 127 nbytes = e->bstats->bytes;
127 npackets = e->bstats->packets; 128 npackets = e->bstats->packets;
128 rate = (nbytes - e->last_bytes)<<(7 - idx); 129 brate = (nbytes - e->last_bytes)<<(7 - idx);
129 e->last_bytes = nbytes; 130 e->last_bytes = nbytes;
130 e->avbps += ((long)rate - (long)e->avbps) >> e->ewma_log; 131 e->avbps += ((s64)(brate - e->avbps)) >> e->ewma_log;
131 e->rate_est->bps = (e->avbps+0xF)>>5; 132 e->rate_est->bps = (e->avbps+0xF)>>5;
132 133
133 rate = (npackets - e->last_packets)<<(12 - idx); 134 rate = (npackets - e->last_packets)<<(12 - idx);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index b5873bdff612..64f51eec6576 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -175,9 +175,13 @@ static void service_arp_queue(struct netpoll_info *npi)
175void netpoll_poll(struct netpoll *np) 175void netpoll_poll(struct netpoll *np)
176{ 176{
177 struct net_device *dev = np->dev; 177 struct net_device *dev = np->dev;
178 const struct net_device_ops *ops = dev->netdev_ops; 178 const struct net_device_ops *ops;
179
180 if (!dev || !netif_running(dev))
181 return;
179 182
180 if (!dev || !netif_running(dev) || !ops->ndo_poll_controller) 183 ops = dev->netdev_ops;
184 if (!ops->ndo_poll_controller)
181 return; 185 return;
182 186
183 /* Process pending work on NIC */ 187 /* Process pending work on NIC */
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index d152394b2611..e505b5392e1e 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2288,7 +2288,7 @@ unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
2288next_skb: 2288next_skb:
2289 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; 2289 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
2290 2290
2291 if (abs_offset < block_limit) { 2291 if (abs_offset < block_limit && !st->frag_data) {
2292 *data = st->cur_skb->data + (abs_offset - st->stepped_offset); 2292 *data = st->cur_skb->data + (abs_offset - st->stepped_offset);
2293 return block_limit - abs_offset; 2293 return block_limit - abs_offset;
2294 } 2294 }
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 9d26a3da37e5..5b919f7b45db 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -408,7 +408,7 @@ config INET_XFRM_MODE_BEET
408 408
409config INET_LRO 409config INET_LRO
410 bool "Large Receive Offload (ipv4/tcp)" 410 bool "Large Receive Offload (ipv4/tcp)"
411 411 default y
412 ---help--- 412 ---help---
413 Support for Large Receive Offload (ipv4/tcp). 413 Support for Large Receive Offload (ipv4/tcp).
414 414
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 90d22ae0a419..88bf051d0cbb 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -139,6 +139,8 @@ __be32 ic_servaddr = NONE; /* Boot server IP address */
139__be32 root_server_addr = NONE; /* Address of NFS server */ 139__be32 root_server_addr = NONE; /* Address of NFS server */
140u8 root_server_path[256] = { 0, }; /* Path to mount as root */ 140u8 root_server_path[256] = { 0, }; /* Path to mount as root */
141 141
142u32 ic_dev_xid; /* Device under configuration */
143
142/* vendor class identifier */ 144/* vendor class identifier */
143static char vendor_class_identifier[253] __initdata; 145static char vendor_class_identifier[253] __initdata;
144 146
@@ -932,6 +934,13 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str
932 goto drop_unlock; 934 goto drop_unlock;
933 } 935 }
934 936
937 /* Is it a reply for the device we are configuring? */
938 if (b->xid != ic_dev_xid) {
939 if (net_ratelimit())
940 printk(KERN_ERR "DHCP/BOOTP: Ignoring delayed packet \n");
941 goto drop_unlock;
942 }
943
935 /* Parse extensions */ 944 /* Parse extensions */
936 if (ext_len >= 4 && 945 if (ext_len >= 4 &&
937 !memcmp(b->exten, ic_bootp_cookie, 4)) { /* Check magic cookie */ 946 !memcmp(b->exten, ic_bootp_cookie, 4)) { /* Check magic cookie */
@@ -1115,6 +1124,9 @@ static int __init ic_dynamic(void)
1115 get_random_bytes(&timeout, sizeof(timeout)); 1124 get_random_bytes(&timeout, sizeof(timeout));
1116 timeout = CONF_BASE_TIMEOUT + (timeout % (unsigned) CONF_TIMEOUT_RANDOM); 1125 timeout = CONF_BASE_TIMEOUT + (timeout % (unsigned) CONF_TIMEOUT_RANDOM);
1117 for (;;) { 1126 for (;;) {
1127 /* Track the device we are configuring */
1128 ic_dev_xid = d->xid;
1129
1118#ifdef IPCONFIG_BOOTP 1130#ifdef IPCONFIG_BOOTP
1119 if (do_bootp && (d->able & IC_BOOTP)) 1131 if (do_bootp && (d->able & IC_BOOTP))
1120 ic_bootp_send_if(d, jiffies - start_jiffies); 1132 ic_bootp_send_if(d, jiffies - start_jiffies);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 1d7f49c6f0ca..7a0f0b27bf1f 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1321,6 +1321,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1321 struct task_struct *user_recv = NULL; 1321 struct task_struct *user_recv = NULL;
1322 int copied_early = 0; 1322 int copied_early = 0;
1323 struct sk_buff *skb; 1323 struct sk_buff *skb;
1324 u32 urg_hole = 0;
1324 1325
1325 lock_sock(sk); 1326 lock_sock(sk);
1326 1327
@@ -1532,7 +1533,8 @@ do_prequeue:
1532 } 1533 }
1533 } 1534 }
1534 } 1535 }
1535 if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) { 1536 if ((flags & MSG_PEEK) &&
1537 (peek_seq - copied - urg_hole != tp->copied_seq)) {
1536 if (net_ratelimit()) 1538 if (net_ratelimit())
1537 printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n", 1539 printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
1538 current->comm, task_pid_nr(current)); 1540 current->comm, task_pid_nr(current));
@@ -1553,6 +1555,7 @@ do_prequeue:
1553 if (!urg_offset) { 1555 if (!urg_offset) {
1554 if (!sock_flag(sk, SOCK_URGINLINE)) { 1556 if (!sock_flag(sk, SOCK_URGINLINE)) {
1555 ++*seq; 1557 ++*seq;
1558 urg_hole++;
1556 offset++; 1559 offset++;
1557 used--; 1560 used--;
1558 if (!used) 1561 if (!used)
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index ec697cebb63b..3b6418297231 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -303,6 +303,8 @@ restart:
303 switch (teql_resolve(skb, skb_res, slave)) { 303 switch (teql_resolve(skb, skb_res, slave)) {
304 case 0: 304 case 0:
305 if (__netif_tx_trylock(slave_txq)) { 305 if (__netif_tx_trylock(slave_txq)) {
306 unsigned int length = qdisc_pkt_len(skb);
307
306 if (!netif_tx_queue_stopped(slave_txq) && 308 if (!netif_tx_queue_stopped(slave_txq) &&
307 !netif_tx_queue_frozen(slave_txq) && 309 !netif_tx_queue_frozen(slave_txq) &&
308 slave_ops->ndo_start_xmit(skb, slave) == 0) { 310 slave_ops->ndo_start_xmit(skb, slave) == 0) {
@@ -310,8 +312,7 @@ restart:
310 master->slaves = NEXT_SLAVE(q); 312 master->slaves = NEXT_SLAVE(q);
311 netif_wake_queue(dev); 313 netif_wake_queue(dev);
312 master->stats.tx_packets++; 314 master->stats.tx_packets++;
313 master->stats.tx_bytes += 315 master->stats.tx_bytes += length;
314 qdisc_pkt_len(skb);
315 return 0; 316 return 0;
316 } 317 }
317 __netif_tx_unlock(slave_txq); 318 __netif_tx_unlock(slave_txq);