aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-06-01 04:01:03 -0400
committerIngo Molnar <mingo@elte.hu>2009-06-01 04:01:39 -0400
commit23db9f430be9325a861c7762c1ffadad9ca528a8 (patch)
tree1ebb681611c96f17aa4f96e28d6923824a8b210f /net
parent27b9613b7be39412775d0ab80f57229aa73bb07d (diff)
parent3218911f839b6c85acbf872ad264ea69aa4d89ad (diff)
Merge branch 'linus' into perfcounters/core
Merge reason: merge almost-rc8 into perfcounters/core, which was -rc6 based - to pick up the latest upstream fixes. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'net')
-rw-r--r--net/bridge/br_input.c5
-rw-r--r--net/bridge/br_stp.c3
-rw-r--r--net/core/gen_estimator.c13
-rw-r--r--net/core/netpoll.c8
-rw-r--r--net/core/pktgen.c2
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/ipv4/Kconfig2
-rw-r--r--net/ipv4/fib_trie.c6
-rw-r--r--net/ipv4/ipconfig.c12
-rw-r--r--net/ipv4/route.c60
-rw-r--r--net/ipv4/tcp.c5
-rw-r--r--net/ipv4/tcp_vegas.c11
-rw-r--r--net/ipv6/route.c3
-rw-r--r--net/rxrpc/ar-connection.c12
-rw-r--r--net/sched/sch_teql.c5
-rw-r--r--net/sunrpc/svcsock.c35
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c12
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c10
-rw-r--r--net/sunrpc/xprtrdma/verbs.c3
-rw-r--r--net/wireless/reg.c7
-rw-r--r--net/wireless/wext.c7
21 files changed, 141 insertions, 82 deletions
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 30b88777c3df..5ee1a3682bf2 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -134,6 +134,10 @@ struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb)
134 if (skb->protocol == htons(ETH_P_PAUSE)) 134 if (skb->protocol == htons(ETH_P_PAUSE))
135 goto drop; 135 goto drop;
136 136
137 /* If STP is turned off, then forward */
138 if (p->br->stp_enabled == BR_NO_STP && dest[5] == 0)
139 goto forward;
140
137 if (NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev, 141 if (NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev,
138 NULL, br_handle_local_finish)) 142 NULL, br_handle_local_finish))
139 return NULL; /* frame consumed by filter */ 143 return NULL; /* frame consumed by filter */
@@ -141,6 +145,7 @@ struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb)
141 return skb; /* continue processing */ 145 return skb; /* continue processing */
142 } 146 }
143 147
148forward:
144 switch (p->state) { 149 switch (p->state) {
145 case BR_STATE_FORWARDING: 150 case BR_STATE_FORWARDING:
146 rhook = rcu_dereference(br_should_route_hook); 151 rhook = rcu_dereference(br_should_route_hook);
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index 6e63ec3f1fcf..0660515f3992 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -297,6 +297,9 @@ void br_topology_change_detection(struct net_bridge *br)
297{ 297{
298 int isroot = br_is_root_bridge(br); 298 int isroot = br_is_root_bridge(br);
299 299
300 if (br->stp_enabled != BR_KERNEL_STP)
301 return;
302
300 pr_info("%s: topology change detected, %s\n", br->dev->name, 303 pr_info("%s: topology change detected, %s\n", br->dev->name,
301 isroot ? "propagating" : "sending tcn bpdu"); 304 isroot ? "propagating" : "sending tcn bpdu");
302 305
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 9cc9f95b109e..6d62d4618cfc 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -66,9 +66,9 @@
66 66
67 NOTES. 67 NOTES.
68 68
69 * The stored value for avbps is scaled by 2^5, so that maximal 69 * avbps is scaled by 2^5, avpps is scaled by 2^10.
70 rate is ~1Gbit, avpps is scaled by 2^10. 70 * both values are reported as 32 bit unsigned values. bps can
71 71 overflow for fast links : max speed being 34360Mbit/sec
72 * Minimal interval is HZ/4=250msec (it is the greatest common divisor 72 * Minimal interval is HZ/4=250msec (it is the greatest common divisor
73 for HZ=100 and HZ=1024 8)), maximal interval 73 for HZ=100 and HZ=1024 8)), maximal interval
74 is (HZ*2^EST_MAX_INTERVAL)/4 = 8sec. Shorter intervals 74 is (HZ*2^EST_MAX_INTERVAL)/4 = 8sec. Shorter intervals
@@ -86,9 +86,9 @@ struct gen_estimator
86 spinlock_t *stats_lock; 86 spinlock_t *stats_lock;
87 int ewma_log; 87 int ewma_log;
88 u64 last_bytes; 88 u64 last_bytes;
89 u64 avbps;
89 u32 last_packets; 90 u32 last_packets;
90 u32 avpps; 91 u32 avpps;
91 u32 avbps;
92 struct rcu_head e_rcu; 92 struct rcu_head e_rcu;
93 struct rb_node node; 93 struct rb_node node;
94}; 94};
@@ -115,6 +115,7 @@ static void est_timer(unsigned long arg)
115 rcu_read_lock(); 115 rcu_read_lock();
116 list_for_each_entry_rcu(e, &elist[idx].list, list) { 116 list_for_each_entry_rcu(e, &elist[idx].list, list) {
117 u64 nbytes; 117 u64 nbytes;
118 u64 brate;
118 u32 npackets; 119 u32 npackets;
119 u32 rate; 120 u32 rate;
120 121
@@ -125,9 +126,9 @@ static void est_timer(unsigned long arg)
125 126
126 nbytes = e->bstats->bytes; 127 nbytes = e->bstats->bytes;
127 npackets = e->bstats->packets; 128 npackets = e->bstats->packets;
128 rate = (nbytes - e->last_bytes)<<(7 - idx); 129 brate = (nbytes - e->last_bytes)<<(7 - idx);
129 e->last_bytes = nbytes; 130 e->last_bytes = nbytes;
130 e->avbps += ((long)rate - (long)e->avbps) >> e->ewma_log; 131 e->avbps += ((s64)(brate - e->avbps)) >> e->ewma_log;
131 e->rate_est->bps = (e->avbps+0xF)>>5; 132 e->rate_est->bps = (e->avbps+0xF)>>5;
132 133
133 rate = (npackets - e->last_packets)<<(12 - idx); 134 rate = (npackets - e->last_packets)<<(12 - idx);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index b5873bdff612..64f51eec6576 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -175,9 +175,13 @@ static void service_arp_queue(struct netpoll_info *npi)
175void netpoll_poll(struct netpoll *np) 175void netpoll_poll(struct netpoll *np)
176{ 176{
177 struct net_device *dev = np->dev; 177 struct net_device *dev = np->dev;
178 const struct net_device_ops *ops = dev->netdev_ops; 178 const struct net_device_ops *ops;
179
180 if (!dev || !netif_running(dev))
181 return;
179 182
180 if (!dev || !netif_running(dev) || !ops->ndo_poll_controller) 183 ops = dev->netdev_ops;
184 if (!ops->ndo_poll_controller)
181 return; 185 return;
182 186
183 /* Process pending work on NIC */ 187 /* Process pending work on NIC */
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 3779c1438c11..0666a827bc62 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2447,7 +2447,7 @@ static inline void free_SAs(struct pktgen_dev *pkt_dev)
2447 if (pkt_dev->cflows) { 2447 if (pkt_dev->cflows) {
2448 /* let go of the SAs if we have them */ 2448 /* let go of the SAs if we have them */
2449 int i = 0; 2449 int i = 0;
2450 for (; i < pkt_dev->nflows; i++){ 2450 for (; i < pkt_dev->cflows; i++) {
2451 struct xfrm_state *x = pkt_dev->flows[i].x; 2451 struct xfrm_state *x = pkt_dev->flows[i].x;
2452 if (x) { 2452 if (x) {
2453 xfrm_state_put(x); 2453 xfrm_state_put(x);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index d152394b2611..e505b5392e1e 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2288,7 +2288,7 @@ unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
2288next_skb: 2288next_skb:
2289 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; 2289 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
2290 2290
2291 if (abs_offset < block_limit) { 2291 if (abs_offset < block_limit && !st->frag_data) {
2292 *data = st->cur_skb->data + (abs_offset - st->stepped_offset); 2292 *data = st->cur_skb->data + (abs_offset - st->stepped_offset);
2293 return block_limit - abs_offset; 2293 return block_limit - abs_offset;
2294 } 2294 }
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 9d26a3da37e5..5b919f7b45db 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -408,7 +408,7 @@ config INET_XFRM_MODE_BEET
408 408
409config INET_LRO 409config INET_LRO
410 bool "Large Receive Offload (ipv4/tcp)" 410 bool "Large Receive Offload (ipv4/tcp)"
411 411 default y
412 ---help--- 412 ---help---
413 Support for Large Receive Offload (ipv4/tcp). 413 Support for Large Receive Offload (ipv4/tcp).
414 414
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index ec0ae490f0b6..33c7c85dfe40 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -986,9 +986,12 @@ fib_find_node(struct trie *t, u32 key)
986static struct node *trie_rebalance(struct trie *t, struct tnode *tn) 986static struct node *trie_rebalance(struct trie *t, struct tnode *tn)
987{ 987{
988 int wasfull; 988 int wasfull;
989 t_key cindex, key = tn->key; 989 t_key cindex, key;
990 struct tnode *tp; 990 struct tnode *tp;
991 991
992 preempt_disable();
993 key = tn->key;
994
992 while (tn != NULL && (tp = node_parent((struct node *)tn)) != NULL) { 995 while (tn != NULL && (tp = node_parent((struct node *)tn)) != NULL) {
993 cindex = tkey_extract_bits(key, tp->pos, tp->bits); 996 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
994 wasfull = tnode_full(tp, tnode_get_child(tp, cindex)); 997 wasfull = tnode_full(tp, tnode_get_child(tp, cindex));
@@ -1007,6 +1010,7 @@ static struct node *trie_rebalance(struct trie *t, struct tnode *tn)
1007 if (IS_TNODE(tn)) 1010 if (IS_TNODE(tn))
1008 tn = (struct tnode *)resize(t, (struct tnode *)tn); 1011 tn = (struct tnode *)resize(t, (struct tnode *)tn);
1009 1012
1013 preempt_enable();
1010 return (struct node *)tn; 1014 return (struct node *)tn;
1011} 1015}
1012 1016
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 90d22ae0a419..88bf051d0cbb 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -139,6 +139,8 @@ __be32 ic_servaddr = NONE; /* Boot server IP address */
139__be32 root_server_addr = NONE; /* Address of NFS server */ 139__be32 root_server_addr = NONE; /* Address of NFS server */
140u8 root_server_path[256] = { 0, }; /* Path to mount as root */ 140u8 root_server_path[256] = { 0, }; /* Path to mount as root */
141 141
142u32 ic_dev_xid; /* Device under configuration */
143
142/* vendor class identifier */ 144/* vendor class identifier */
143static char vendor_class_identifier[253] __initdata; 145static char vendor_class_identifier[253] __initdata;
144 146
@@ -932,6 +934,13 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str
932 goto drop_unlock; 934 goto drop_unlock;
933 } 935 }
934 936
937 /* Is it a reply for the device we are configuring? */
938 if (b->xid != ic_dev_xid) {
939 if (net_ratelimit())
940 printk(KERN_ERR "DHCP/BOOTP: Ignoring delayed packet \n");
941 goto drop_unlock;
942 }
943
935 /* Parse extensions */ 944 /* Parse extensions */
936 if (ext_len >= 4 && 945 if (ext_len >= 4 &&
937 !memcmp(b->exten, ic_bootp_cookie, 4)) { /* Check magic cookie */ 946 !memcmp(b->exten, ic_bootp_cookie, 4)) { /* Check magic cookie */
@@ -1115,6 +1124,9 @@ static int __init ic_dynamic(void)
1115 get_random_bytes(&timeout, sizeof(timeout)); 1124 get_random_bytes(&timeout, sizeof(timeout));
1116 timeout = CONF_BASE_TIMEOUT + (timeout % (unsigned) CONF_TIMEOUT_RANDOM); 1125 timeout = CONF_BASE_TIMEOUT + (timeout % (unsigned) CONF_TIMEOUT_RANDOM);
1117 for (;;) { 1126 for (;;) {
1127 /* Track the device we are configuring */
1128 ic_dev_xid = d->xid;
1129
1118#ifdef IPCONFIG_BOOTP 1130#ifdef IPCONFIG_BOOTP
1119 if (do_bootp && (d->able & IC_BOOTP)) 1131 if (do_bootp && (d->able & IC_BOOTP))
1120 ic_bootp_send_if(d, jiffies - start_jiffies); 1132 ic_bootp_send_if(d, jiffies - start_jiffies);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index c4c60e9f068a..28205e5bfa9b 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -784,8 +784,8 @@ static void rt_check_expire(void)
784{ 784{
785 static unsigned int rover; 785 static unsigned int rover;
786 unsigned int i = rover, goal; 786 unsigned int i = rover, goal;
787 struct rtable *rth, **rthp; 787 struct rtable *rth, *aux, **rthp;
788 unsigned long length = 0, samples = 0; 788 unsigned long samples = 0;
789 unsigned long sum = 0, sum2 = 0; 789 unsigned long sum = 0, sum2 = 0;
790 u64 mult; 790 u64 mult;
791 791
@@ -795,9 +795,9 @@ static void rt_check_expire(void)
795 goal = (unsigned int)mult; 795 goal = (unsigned int)mult;
796 if (goal > rt_hash_mask) 796 if (goal > rt_hash_mask)
797 goal = rt_hash_mask + 1; 797 goal = rt_hash_mask + 1;
798 length = 0;
799 for (; goal > 0; goal--) { 798 for (; goal > 0; goal--) {
800 unsigned long tmo = ip_rt_gc_timeout; 799 unsigned long tmo = ip_rt_gc_timeout;
800 unsigned long length;
801 801
802 i = (i + 1) & rt_hash_mask; 802 i = (i + 1) & rt_hash_mask;
803 rthp = &rt_hash_table[i].chain; 803 rthp = &rt_hash_table[i].chain;
@@ -809,8 +809,10 @@ static void rt_check_expire(void)
809 809
810 if (*rthp == NULL) 810 if (*rthp == NULL)
811 continue; 811 continue;
812 length = 0;
812 spin_lock_bh(rt_hash_lock_addr(i)); 813 spin_lock_bh(rt_hash_lock_addr(i));
813 while ((rth = *rthp) != NULL) { 814 while ((rth = *rthp) != NULL) {
815 prefetch(rth->u.dst.rt_next);
814 if (rt_is_expired(rth)) { 816 if (rt_is_expired(rth)) {
815 *rthp = rth->u.dst.rt_next; 817 *rthp = rth->u.dst.rt_next;
816 rt_free(rth); 818 rt_free(rth);
@@ -819,33 +821,30 @@ static void rt_check_expire(void)
819 if (rth->u.dst.expires) { 821 if (rth->u.dst.expires) {
820 /* Entry is expired even if it is in use */ 822 /* Entry is expired even if it is in use */
821 if (time_before_eq(jiffies, rth->u.dst.expires)) { 823 if (time_before_eq(jiffies, rth->u.dst.expires)) {
824nofree:
822 tmo >>= 1; 825 tmo >>= 1;
823 rthp = &rth->u.dst.rt_next; 826 rthp = &rth->u.dst.rt_next;
824 /* 827 /*
825 * Only bump our length if the hash 828 * We only count entries on
826 * inputs on entries n and n+1 are not
827 * the same, we only count entries on
828 * a chain with equal hash inputs once 829 * a chain with equal hash inputs once
829 * so that entries for different QOS 830 * so that entries for different QOS
830 * levels, and other non-hash input 831 * levels, and other non-hash input
831 * attributes don't unfairly skew 832 * attributes don't unfairly skew
832 * the length computation 833 * the length computation
833 */ 834 */
834 if ((*rthp == NULL) || 835 for (aux = rt_hash_table[i].chain;;) {
835 !compare_hash_inputs(&(*rthp)->fl, 836 if (aux == rth) {
836 &rth->fl)) 837 length += ONE;
837 length += ONE; 838 break;
839 }
840 if (compare_hash_inputs(&aux->fl, &rth->fl))
841 break;
842 aux = aux->u.dst.rt_next;
843 }
838 continue; 844 continue;
839 } 845 }
840 } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) { 846 } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout))
841 tmo >>= 1; 847 goto nofree;
842 rthp = &rth->u.dst.rt_next;
843 if ((*rthp == NULL) ||
844 !compare_hash_inputs(&(*rthp)->fl,
845 &rth->fl))
846 length += ONE;
847 continue;
848 }
849 848
850 /* Cleanup aged off entries. */ 849 /* Cleanup aged off entries. */
851 *rthp = rth->u.dst.rt_next; 850 *rthp = rth->u.dst.rt_next;
@@ -1068,7 +1067,6 @@ out: return 0;
1068static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp) 1067static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp)
1069{ 1068{
1070 struct rtable *rth, **rthp; 1069 struct rtable *rth, **rthp;
1071 struct rtable *rthi;
1072 unsigned long now; 1070 unsigned long now;
1073 struct rtable *cand, **candp; 1071 struct rtable *cand, **candp;
1074 u32 min_score; 1072 u32 min_score;
@@ -1088,7 +1086,6 @@ restart:
1088 } 1086 }
1089 1087
1090 rthp = &rt_hash_table[hash].chain; 1088 rthp = &rt_hash_table[hash].chain;
1091 rthi = NULL;
1092 1089
1093 spin_lock_bh(rt_hash_lock_addr(hash)); 1090 spin_lock_bh(rt_hash_lock_addr(hash));
1094 while ((rth = *rthp) != NULL) { 1091 while ((rth = *rthp) != NULL) {
@@ -1134,17 +1131,6 @@ restart:
1134 chain_length++; 1131 chain_length++;
1135 1132
1136 rthp = &rth->u.dst.rt_next; 1133 rthp = &rth->u.dst.rt_next;
1137
1138 /*
1139 * check to see if the next entry in the chain
1140 * contains the same hash input values as rt. If it does
1141 * This is where we will insert into the list, instead of
1142 * at the head. This groups entries that differ by aspects not
1143 * relvant to the hash function together, which we use to adjust
1144 * our chain length
1145 */
1146 if (*rthp && compare_hash_inputs(&(*rthp)->fl, &rt->fl))
1147 rthi = rth;
1148 } 1134 }
1149 1135
1150 if (cand) { 1136 if (cand) {
@@ -1205,10 +1191,7 @@ restart:
1205 } 1191 }
1206 } 1192 }
1207 1193
1208 if (rthi) 1194 rt->u.dst.rt_next = rt_hash_table[hash].chain;
1209 rt->u.dst.rt_next = rthi->u.dst.rt_next;
1210 else
1211 rt->u.dst.rt_next = rt_hash_table[hash].chain;
1212 1195
1213#if RT_CACHE_DEBUG >= 2 1196#if RT_CACHE_DEBUG >= 2
1214 if (rt->u.dst.rt_next) { 1197 if (rt->u.dst.rt_next) {
@@ -1224,10 +1207,7 @@ restart:
1224 * previous writes to rt are comitted to memory 1207 * previous writes to rt are comitted to memory
1225 * before making rt visible to other CPUS. 1208 * before making rt visible to other CPUS.
1226 */ 1209 */
1227 if (rthi) 1210 rcu_assign_pointer(rt_hash_table[hash].chain, rt);
1228 rcu_assign_pointer(rthi->u.dst.rt_next, rt);
1229 else
1230 rcu_assign_pointer(rt_hash_table[hash].chain, rt);
1231 1211
1232 spin_unlock_bh(rt_hash_lock_addr(hash)); 1212 spin_unlock_bh(rt_hash_lock_addr(hash));
1233 *rp = rt; 1213 *rp = rt;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 1d7f49c6f0ca..7a0f0b27bf1f 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1321,6 +1321,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1321 struct task_struct *user_recv = NULL; 1321 struct task_struct *user_recv = NULL;
1322 int copied_early = 0; 1322 int copied_early = 0;
1323 struct sk_buff *skb; 1323 struct sk_buff *skb;
1324 u32 urg_hole = 0;
1324 1325
1325 lock_sock(sk); 1326 lock_sock(sk);
1326 1327
@@ -1532,7 +1533,8 @@ do_prequeue:
1532 } 1533 }
1533 } 1534 }
1534 } 1535 }
1535 if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) { 1536 if ((flags & MSG_PEEK) &&
1537 (peek_seq - copied - urg_hole != tp->copied_seq)) {
1536 if (net_ratelimit()) 1538 if (net_ratelimit())
1537 printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n", 1539 printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
1538 current->comm, task_pid_nr(current)); 1540 current->comm, task_pid_nr(current));
@@ -1553,6 +1555,7 @@ do_prequeue:
1553 if (!urg_offset) { 1555 if (!urg_offset) {
1554 if (!sock_flag(sk, SOCK_URGINLINE)) { 1556 if (!sock_flag(sk, SOCK_URGINLINE)) {
1555 ++*seq; 1557 ++*seq;
1558 urg_hole++;
1556 offset++; 1559 offset++;
1557 used--; 1560 used--;
1558 if (!used) 1561 if (!used)
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
index a453aac91bd3..c6743eec9b7d 100644
--- a/net/ipv4/tcp_vegas.c
+++ b/net/ipv4/tcp_vegas.c
@@ -158,6 +158,11 @@ void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event)
158} 158}
159EXPORT_SYMBOL_GPL(tcp_vegas_cwnd_event); 159EXPORT_SYMBOL_GPL(tcp_vegas_cwnd_event);
160 160
161static inline u32 tcp_vegas_ssthresh(struct tcp_sock *tp)
162{
163 return min(tp->snd_ssthresh, tp->snd_cwnd-1);
164}
165
161static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) 166static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
162{ 167{
163 struct tcp_sock *tp = tcp_sk(sk); 168 struct tcp_sock *tp = tcp_sk(sk);
@@ -221,11 +226,10 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
221 */ 226 */
222 diff = tp->snd_cwnd * (rtt-vegas->baseRTT) / vegas->baseRTT; 227 diff = tp->snd_cwnd * (rtt-vegas->baseRTT) / vegas->baseRTT;
223 228
224 if (diff > gamma && tp->snd_ssthresh > 2 ) { 229 if (diff > gamma && tp->snd_cwnd <= tp->snd_ssthresh) {
225 /* Going too fast. Time to slow down 230 /* Going too fast. Time to slow down
226 * and switch to congestion avoidance. 231 * and switch to congestion avoidance.
227 */ 232 */
228 tp->snd_ssthresh = 2;
229 233
230 /* Set cwnd to match the actual rate 234 /* Set cwnd to match the actual rate
231 * exactly: 235 * exactly:
@@ -235,6 +239,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
235 * utilization. 239 * utilization.
236 */ 240 */
237 tp->snd_cwnd = min(tp->snd_cwnd, (u32)target_cwnd+1); 241 tp->snd_cwnd = min(tp->snd_cwnd, (u32)target_cwnd+1);
242 tp->snd_ssthresh = tcp_vegas_ssthresh(tp);
238 243
239 } else if (tp->snd_cwnd <= tp->snd_ssthresh) { 244 } else if (tp->snd_cwnd <= tp->snd_ssthresh) {
240 /* Slow start. */ 245 /* Slow start. */
@@ -250,6 +255,8 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
250 * we slow down. 255 * we slow down.
251 */ 256 */
252 tp->snd_cwnd--; 257 tp->snd_cwnd--;
258 tp->snd_ssthresh
259 = tcp_vegas_ssthresh(tp);
253 } else if (diff < alpha) { 260 } else if (diff < alpha) {
254 /* We don't have enough extra packets 261 /* We don't have enough extra packets
255 * in the network, so speed up. 262 * in the network, so speed up.
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 1394ddb6e35c..032a5ec391c5 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -137,6 +137,7 @@ static struct rt6_info ip6_null_entry_template = {
137 } 137 }
138 }, 138 },
139 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), 139 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
140 .rt6i_protocol = RTPROT_KERNEL,
140 .rt6i_metric = ~(u32) 0, 141 .rt6i_metric = ~(u32) 0,
141 .rt6i_ref = ATOMIC_INIT(1), 142 .rt6i_ref = ATOMIC_INIT(1),
142}; 143};
@@ -159,6 +160,7 @@ static struct rt6_info ip6_prohibit_entry_template = {
159 } 160 }
160 }, 161 },
161 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), 162 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
163 .rt6i_protocol = RTPROT_KERNEL,
162 .rt6i_metric = ~(u32) 0, 164 .rt6i_metric = ~(u32) 0,
163 .rt6i_ref = ATOMIC_INIT(1), 165 .rt6i_ref = ATOMIC_INIT(1),
164}; 166};
@@ -176,6 +178,7 @@ static struct rt6_info ip6_blk_hole_entry_template = {
176 } 178 }
177 }, 179 },
178 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), 180 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
181 .rt6i_protocol = RTPROT_KERNEL,
179 .rt6i_metric = ~(u32) 0, 182 .rt6i_metric = ~(u32) 0,
180 .rt6i_ref = ATOMIC_INIT(1), 183 .rt6i_ref = ATOMIC_INIT(1),
181}; 184};
diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
index 0f1218b8d289..67e38a056240 100644
--- a/net/rxrpc/ar-connection.c
+++ b/net/rxrpc/ar-connection.c
@@ -343,9 +343,9 @@ static int rxrpc_connect_exclusive(struct rxrpc_sock *rx,
343 /* not yet present - create a candidate for a new connection 343 /* not yet present - create a candidate for a new connection
344 * and then redo the check */ 344 * and then redo the check */
345 conn = rxrpc_alloc_connection(gfp); 345 conn = rxrpc_alloc_connection(gfp);
346 if (IS_ERR(conn)) { 346 if (!conn) {
347 _leave(" = %ld", PTR_ERR(conn)); 347 _leave(" = -ENOMEM");
348 return PTR_ERR(conn); 348 return -ENOMEM;
349 } 349 }
350 350
351 conn->trans = trans; 351 conn->trans = trans;
@@ -508,9 +508,9 @@ int rxrpc_connect_call(struct rxrpc_sock *rx,
508 /* not yet present - create a candidate for a new connection and then 508 /* not yet present - create a candidate for a new connection and then
509 * redo the check */ 509 * redo the check */
510 candidate = rxrpc_alloc_connection(gfp); 510 candidate = rxrpc_alloc_connection(gfp);
511 if (IS_ERR(candidate)) { 511 if (!candidate) {
512 _leave(" = %ld", PTR_ERR(candidate)); 512 _leave(" = -ENOMEM");
513 return PTR_ERR(candidate); 513 return -ENOMEM;
514 } 514 }
515 515
516 candidate->trans = trans; 516 candidate->trans = trans;
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index ec697cebb63b..3b6418297231 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -303,6 +303,8 @@ restart:
303 switch (teql_resolve(skb, skb_res, slave)) { 303 switch (teql_resolve(skb, skb_res, slave)) {
304 case 0: 304 case 0:
305 if (__netif_tx_trylock(slave_txq)) { 305 if (__netif_tx_trylock(slave_txq)) {
306 unsigned int length = qdisc_pkt_len(skb);
307
306 if (!netif_tx_queue_stopped(slave_txq) && 308 if (!netif_tx_queue_stopped(slave_txq) &&
307 !netif_tx_queue_frozen(slave_txq) && 309 !netif_tx_queue_frozen(slave_txq) &&
308 slave_ops->ndo_start_xmit(skb, slave) == 0) { 310 slave_ops->ndo_start_xmit(skb, slave) == 0) {
@@ -310,8 +312,7 @@ restart:
310 master->slaves = NEXT_SLAVE(q); 312 master->slaves = NEXT_SLAVE(q);
311 netif_wake_queue(dev); 313 netif_wake_queue(dev);
312 master->stats.tx_packets++; 314 master->stats.tx_packets++;
313 master->stats.tx_bytes += 315 master->stats.tx_bytes += length;
314 qdisc_pkt_len(skb);
315 return 0; 316 return 0;
316 } 317 }
317 __netif_tx_unlock(slave_txq); 318 __netif_tx_unlock(slave_txq);
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index af3198814c15..9d504234af4a 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -345,6 +345,7 @@ static void svc_sock_setbufsize(struct socket *sock, unsigned int snd,
345 lock_sock(sock->sk); 345 lock_sock(sock->sk);
346 sock->sk->sk_sndbuf = snd * 2; 346 sock->sk->sk_sndbuf = snd * 2;
347 sock->sk->sk_rcvbuf = rcv * 2; 347 sock->sk->sk_rcvbuf = rcv * 2;
348 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK;
348 release_sock(sock->sk); 349 release_sock(sock->sk);
349#endif 350#endif
350} 351}
@@ -796,6 +797,23 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
796 test_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags), 797 test_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags),
797 test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags)); 798 test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags));
798 799
800 if (test_and_clear_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags))
801 /* sndbuf needs to have room for one request
802 * per thread, otherwise we can stall even when the
803 * network isn't a bottleneck.
804 *
805 * We count all threads rather than threads in a
806 * particular pool, which provides an upper bound
807 * on the number of threads which will access the socket.
808 *
809 * rcvbuf just needs to be able to hold a few requests.
810 * Normally they will be removed from the queue
811 * as soon a a complete request arrives.
812 */
813 svc_sock_setbufsize(svsk->sk_sock,
814 (serv->sv_nrthreads+3) * serv->sv_max_mesg,
815 3 * serv->sv_max_mesg);
816
799 clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); 817 clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
800 818
801 /* Receive data. If we haven't got the record length yet, get 819 /* Receive data. If we haven't got the record length yet, get
@@ -1043,6 +1061,15 @@ static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv)
1043 1061
1044 tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF; 1062 tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF;
1045 1063
1064 /* initialise setting must have enough space to
1065 * receive and respond to one request.
1066 * svc_tcp_recvfrom will re-adjust if necessary
1067 */
1068 svc_sock_setbufsize(svsk->sk_sock,
1069 3 * svsk->sk_xprt.xpt_server->sv_max_mesg,
1070 3 * svsk->sk_xprt.xpt_server->sv_max_mesg);
1071
1072 set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
1046 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); 1073 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
1047 if (sk->sk_state != TCP_ESTABLISHED) 1074 if (sk->sk_state != TCP_ESTABLISHED)
1048 set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); 1075 set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
@@ -1112,14 +1139,8 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
1112 /* Initialize the socket */ 1139 /* Initialize the socket */
1113 if (sock->type == SOCK_DGRAM) 1140 if (sock->type == SOCK_DGRAM)
1114 svc_udp_init(svsk, serv); 1141 svc_udp_init(svsk, serv);
1115 else { 1142 else
1116 /* initialise setting must have enough space to
1117 * receive and respond to one request.
1118 */
1119 svc_sock_setbufsize(svsk->sk_sock, 4 * serv->sv_max_mesg,
1120 4 * serv->sv_max_mesg);
1121 svc_tcp_init(svsk, serv); 1143 svc_tcp_init(svsk, serv);
1122 }
1123 1144
1124 dprintk("svc: svc_setup_socket created %p (inet %p)\n", 1145 dprintk("svc: svc_setup_socket created %p (inet %p)\n",
1125 svsk, svsk->sk_sk); 1146 svsk, svsk->sk_sk);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index 8b510c5e8777..f11be72a1a80 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -128,7 +128,8 @@ static int fast_reg_xdr(struct svcxprt_rdma *xprt,
128 page_bytes -= sge_bytes; 128 page_bytes -= sge_bytes;
129 129
130 frmr->page_list->page_list[page_no] = 130 frmr->page_list->page_list[page_no] =
131 ib_dma_map_page(xprt->sc_cm_id->device, page, 0, 131 ib_dma_map_single(xprt->sc_cm_id->device,
132 page_address(page),
132 PAGE_SIZE, DMA_TO_DEVICE); 133 PAGE_SIZE, DMA_TO_DEVICE);
133 if (ib_dma_mapping_error(xprt->sc_cm_id->device, 134 if (ib_dma_mapping_error(xprt->sc_cm_id->device,
134 frmr->page_list->page_list[page_no])) 135 frmr->page_list->page_list[page_no]))
@@ -532,18 +533,17 @@ static int send_reply(struct svcxprt_rdma *rdma,
532 clear_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags); 533 clear_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags);
533 534
534 /* Prepare the SGE for the RPCRDMA Header */ 535 /* Prepare the SGE for the RPCRDMA Header */
536 ctxt->sge[0].lkey = rdma->sc_dma_lkey;
537 ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp);
535 ctxt->sge[0].addr = 538 ctxt->sge[0].addr =
536 ib_dma_map_page(rdma->sc_cm_id->device, 539 ib_dma_map_single(rdma->sc_cm_id->device, page_address(page),
537 page, 0, PAGE_SIZE, DMA_TO_DEVICE); 540 ctxt->sge[0].length, DMA_TO_DEVICE);
538 if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr)) 541 if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr))
539 goto err; 542 goto err;
540 atomic_inc(&rdma->sc_dma_used); 543 atomic_inc(&rdma->sc_dma_used);
541 544
542 ctxt->direction = DMA_TO_DEVICE; 545 ctxt->direction = DMA_TO_DEVICE;
543 546
544 ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp);
545 ctxt->sge[0].lkey = rdma->sc_dma_lkey;
546
547 /* Determine how many of our SGE are to be transmitted */ 547 /* Determine how many of our SGE are to be transmitted */
548 for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) { 548 for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) {
549 sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count); 549 sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 4b0c2fa15e0b..5151f9f6c573 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -500,8 +500,8 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
500 BUG_ON(sge_no >= xprt->sc_max_sge); 500 BUG_ON(sge_no >= xprt->sc_max_sge);
501 page = svc_rdma_get_page(); 501 page = svc_rdma_get_page();
502 ctxt->pages[sge_no] = page; 502 ctxt->pages[sge_no] = page;
503 pa = ib_dma_map_page(xprt->sc_cm_id->device, 503 pa = ib_dma_map_single(xprt->sc_cm_id->device,
504 page, 0, PAGE_SIZE, 504 page_address(page), PAGE_SIZE,
505 DMA_FROM_DEVICE); 505 DMA_FROM_DEVICE);
506 if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa)) 506 if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa))
507 goto err_put_ctxt; 507 goto err_put_ctxt;
@@ -1315,8 +1315,8 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
1315 length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va); 1315 length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va);
1316 1316
1317 /* Prepare SGE for local address */ 1317 /* Prepare SGE for local address */
1318 sge.addr = ib_dma_map_page(xprt->sc_cm_id->device, 1318 sge.addr = ib_dma_map_single(xprt->sc_cm_id->device,
1319 p, 0, PAGE_SIZE, DMA_FROM_DEVICE); 1319 page_address(p), PAGE_SIZE, DMA_FROM_DEVICE);
1320 if (ib_dma_mapping_error(xprt->sc_cm_id->device, sge.addr)) { 1320 if (ib_dma_mapping_error(xprt->sc_cm_id->device, sge.addr)) {
1321 put_page(p); 1321 put_page(p);
1322 return; 1322 return;
@@ -1343,7 +1343,7 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
1343 if (ret) { 1343 if (ret) {
1344 dprintk("svcrdma: Error %d posting send for protocol error\n", 1344 dprintk("svcrdma: Error %d posting send for protocol error\n",
1345 ret); 1345 ret);
1346 ib_dma_unmap_page(xprt->sc_cm_id->device, 1346 ib_dma_unmap_single(xprt->sc_cm_id->device,
1347 sge.addr, PAGE_SIZE, 1347 sge.addr, PAGE_SIZE,
1348 DMA_FROM_DEVICE); 1348 DMA_FROM_DEVICE);
1349 svc_rdma_put_context(ctxt, 1); 1349 svc_rdma_put_context(ctxt, 1);
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 3b21e0cc5e69..465aafc2007f 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -1495,7 +1495,8 @@ rpcrdma_register_frmr_external(struct rpcrdma_mr_seg *seg,
1495 frmr_wr.wr.fast_reg.page_shift = PAGE_SHIFT; 1495 frmr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
1496 frmr_wr.wr.fast_reg.length = i << PAGE_SHIFT; 1496 frmr_wr.wr.fast_reg.length = i << PAGE_SHIFT;
1497 frmr_wr.wr.fast_reg.access_flags = (writing ? 1497 frmr_wr.wr.fast_reg.access_flags = (writing ?
1498 IB_ACCESS_REMOTE_WRITE : IB_ACCESS_REMOTE_READ); 1498 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
1499 IB_ACCESS_REMOTE_READ);
1499 frmr_wr.wr.fast_reg.rkey = seg1->mr_chunk.rl_mw->r.frmr.fr_mr->rkey; 1500 frmr_wr.wr.fast_reg.rkey = seg1->mr_chunk.rl_mw->r.frmr.fr_mr->rkey;
1500 DECR_CQCOUNT(&r_xprt->rx_ep); 1501 DECR_CQCOUNT(&r_xprt->rx_ep);
1501 1502
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 08265ca15785..487cb627ddba 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -1551,6 +1551,13 @@ static int regulatory_hint_core(const char *alpha2)
1551 1551
1552 queue_regulatory_request(request); 1552 queue_regulatory_request(request);
1553 1553
1554 /*
1555 * This ensures last_request is populated once modules
1556 * come swinging in and calling regulatory hints and
1557 * wiphy_apply_custom_regulatory().
1558 */
1559 flush_scheduled_work();
1560
1554 return 0; 1561 return 0;
1555} 1562}
1556 1563
diff --git a/net/wireless/wext.c b/net/wireless/wext.c
index cb6a5bb85d80..0e59f9ae9b81 100644
--- a/net/wireless/wext.c
+++ b/net/wireless/wext.c
@@ -786,6 +786,13 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
786 err = -EFAULT; 786 err = -EFAULT;
787 goto out; 787 goto out;
788 } 788 }
789
790 if (cmd == SIOCSIWENCODEEXT) {
791 struct iw_encode_ext *ee = (void *) extra;
792
793 if (iwp->length < sizeof(*ee) + ee->key_len)
794 return -EFAULT;
795 }
789 } 796 }
790 797
791 err = handler(dev, info, (union iwreq_data *) iwp, extra); 798 err = handler(dev, info, (union iwreq_data *) iwp, extra);