aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/dev.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-04-06 13:37:38 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-04-06 13:37:38 -0400
commit23f347ef63aa36b5a001b6791f657cd0e2a04de3 (patch)
treece06ebdccd16b99265b3e74f8e9b7bd1e29cf465 /net/core/dev.c
parent314489bd4c7780fde6a069783d5128f6cef52919 (diff)
parent110c43304db6f06490961529536c362d9ac5732f (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking updates from David Miller: 1) Fix inaccuracies in network driver interface documentation, from Ben Hutchings. 2) Fix handling of negative offsets in BPF JITs, from Jan Seiffert. 3) Compile warning, locking, and refcounting fixes in netfilter's xt_CT, from Pablo Neira Ayuso. 4) phonet sendmsg needs to validate user length just like any other datagram protocol, fix from Sasha Levin. 5) Ipv6 multicast code uses wrong loop index, from RongQing Li. 6) Link handling and firmware fixes in bnx2x driver from Yaniv Rosner and Yuval Mintz. 7) mlx4 erroneously allocates 4 pages at a time, regardless of page size, fix from Thadeu Lima de Souza Cascardo. 8) SCTP socket option wasn't extended in a backwards compatible way, fix from Thomas Graf. 9) Add missing address change event emissions to bonding, from Shlomo Pongratz. 10) /proc/net/dev regressed because it uses a private offset to track where we are in the hash table, but this doesn't track the offset pullback that the seq_file code does resulting in some entries being missed in large dumps. Fix from Eric Dumazet. 11) do_tcp_sendpage() unloads the send queue way too fast, because it invokes tcp_push() when it shouldn't. Let the natural sequence generated by the splice paths, and the assosciated MSG_MORE settings, guide the tcp_push() calls. Otherwise what goes out of TCP is spaghetti and doesn't batch effectively into GSO/TSO clusters. From Eric Dumazet. 12) Once we put a SKB into either the netlink receiver's queue or a socket error queue, it can be consumed and freed up, therefore we cannot touch it after queueing it like that. Fixes from Eric Dumazet. 13) PPP has this annoying behavior in that for every transmit call it immediately stops the TX queue, then calls down into the next layer to transmit the PPP frame. But if that next layer can take it immediately, it just un-stops the TX queue right before returning from the transmit method. Besides being useless work, it makes several facilities unusable, in particular things like the equalizers. Well behaved devices should only stop the TX queue when they really are full, and in PPP's case when it gets backlogged to the downstream device. David Woodhouse therefore fixed PPP to not stop the TX queue until it's downstream can't take data any more. 14) IFF_UNICAST_FLT got accidently lost in some recent stmmac driver changes, re-add. From Marc Kleine-Budde. 15) Fix link flaps in ixgbe, from Eric W. Multanen. 16) Descriptor writeback fixes in e1000e from Matthew Vick. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (47 commits) net: fix a race in sock_queue_err_skb() netlink: fix races after skb queueing doc, net: Update ndo_start_xmit return type and values doc, net: Remove instruction to set net_device::trans_start doc, net: Update netdev operation names doc, net: Update documentation of synchronisation for TX multiqueue doc, net: Remove obsolete reference to dev->poll ethtool: Remove exception to the requirement of holding RTNL lock MAINTAINERS: update for Marvell Ethernet drivers bonding: properly unset current_arp_slave on slave link up phonet: Check input from user before allocating tcp: tcp_sendpages() should call tcp_push() once ipv6: fix array index in ip6_mc_add_src() mlx4: allocate just enough pages instead of always 4 pages stmmac: re-add IFF_UNICAST_FLT for dwmac1000 bnx2x: Clear MDC/MDIO warning message bnx2x: Fix BCM57711+BCM84823 link issue bnx2x: Clear BCM84833 LED after fan failure bnx2x: Fix BCM84833 PHY FW version presentation bnx2x: Fix link issue for BCM8727 boards. ...
Diffstat (limited to 'net/core/dev.c')
-rw-r--r--net/core/dev.c58
1 files changed, 13 insertions, 45 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 6c7dc9d78e10..c25d453b2803 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4028,54 +4028,41 @@ static int dev_ifconf(struct net *net, char __user *arg)
4028 4028
4029#ifdef CONFIG_PROC_FS 4029#ifdef CONFIG_PROC_FS
4030 4030
4031#define BUCKET_SPACE (32 - NETDEV_HASHBITS) 4031#define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1)
4032
4033struct dev_iter_state {
4034 struct seq_net_private p;
4035 unsigned int pos; /* bucket << BUCKET_SPACE + offset */
4036};
4037 4032
4038#define get_bucket(x) ((x) >> BUCKET_SPACE) 4033#define get_bucket(x) ((x) >> BUCKET_SPACE)
4039#define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1)) 4034#define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
4040#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o)) 4035#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
4041 4036
4042static inline struct net_device *dev_from_same_bucket(struct seq_file *seq) 4037static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff_t *pos)
4043{ 4038{
4044 struct dev_iter_state *state = seq->private;
4045 struct net *net = seq_file_net(seq); 4039 struct net *net = seq_file_net(seq);
4046 struct net_device *dev; 4040 struct net_device *dev;
4047 struct hlist_node *p; 4041 struct hlist_node *p;
4048 struct hlist_head *h; 4042 struct hlist_head *h;
4049 unsigned int count, bucket, offset; 4043 unsigned int count = 0, offset = get_offset(*pos);
4050 4044
4051 bucket = get_bucket(state->pos); 4045 h = &net->dev_name_head[get_bucket(*pos)];
4052 offset = get_offset(state->pos);
4053 h = &net->dev_name_head[bucket];
4054 count = 0;
4055 hlist_for_each_entry_rcu(dev, p, h, name_hlist) { 4046 hlist_for_each_entry_rcu(dev, p, h, name_hlist) {
4056 if (count++ == offset) { 4047 if (++count == offset)
4057 state->pos = set_bucket_offset(bucket, count);
4058 return dev; 4048 return dev;
4059 }
4060 } 4049 }
4061 4050
4062 return NULL; 4051 return NULL;
4063} 4052}
4064 4053
4065static inline struct net_device *dev_from_new_bucket(struct seq_file *seq) 4054static inline struct net_device *dev_from_bucket(struct seq_file *seq, loff_t *pos)
4066{ 4055{
4067 struct dev_iter_state *state = seq->private;
4068 struct net_device *dev; 4056 struct net_device *dev;
4069 unsigned int bucket; 4057 unsigned int bucket;
4070 4058
4071 bucket = get_bucket(state->pos);
4072 do { 4059 do {
4073 dev = dev_from_same_bucket(seq); 4060 dev = dev_from_same_bucket(seq, pos);
4074 if (dev) 4061 if (dev)
4075 return dev; 4062 return dev;
4076 4063
4077 bucket++; 4064 bucket = get_bucket(*pos) + 1;
4078 state->pos = set_bucket_offset(bucket, 0); 4065 *pos = set_bucket_offset(bucket, 1);
4079 } while (bucket < NETDEV_HASHENTRIES); 4066 } while (bucket < NETDEV_HASHENTRIES);
4080 4067
4081 return NULL; 4068 return NULL;
@@ -4088,33 +4075,20 @@ static inline struct net_device *dev_from_new_bucket(struct seq_file *seq)
4088void *dev_seq_start(struct seq_file *seq, loff_t *pos) 4075void *dev_seq_start(struct seq_file *seq, loff_t *pos)
4089 __acquires(RCU) 4076 __acquires(RCU)
4090{ 4077{
4091 struct dev_iter_state *state = seq->private;
4092
4093 rcu_read_lock(); 4078 rcu_read_lock();
4094 if (!*pos) 4079 if (!*pos)
4095 return SEQ_START_TOKEN; 4080 return SEQ_START_TOKEN;
4096 4081
4097 /* check for end of the hash */ 4082 if (get_bucket(*pos) >= NETDEV_HASHENTRIES)
4098 if (state->pos == 0 && *pos > 1)
4099 return NULL; 4083 return NULL;
4100 4084
4101 return dev_from_new_bucket(seq); 4085 return dev_from_bucket(seq, pos);
4102} 4086}
4103 4087
4104void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) 4088void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4105{ 4089{
4106 struct net_device *dev;
4107
4108 ++*pos; 4090 ++*pos;
4109 4091 return dev_from_bucket(seq, pos);
4110 if (v == SEQ_START_TOKEN)
4111 return dev_from_new_bucket(seq);
4112
4113 dev = dev_from_same_bucket(seq);
4114 if (dev)
4115 return dev;
4116
4117 return dev_from_new_bucket(seq);
4118} 4092}
4119 4093
4120void dev_seq_stop(struct seq_file *seq, void *v) 4094void dev_seq_stop(struct seq_file *seq, void *v)
@@ -4213,13 +4187,7 @@ static const struct seq_operations dev_seq_ops = {
4213static int dev_seq_open(struct inode *inode, struct file *file) 4187static int dev_seq_open(struct inode *inode, struct file *file)
4214{ 4188{
4215 return seq_open_net(inode, file, &dev_seq_ops, 4189 return seq_open_net(inode, file, &dev_seq_ops,
4216 sizeof(struct dev_iter_state)); 4190 sizeof(struct seq_net_private));
4217}
4218
4219int dev_seq_open_ops(struct inode *inode, struct file *file,
4220 const struct seq_operations *ops)
4221{
4222 return seq_open_net(inode, file, ops, sizeof(struct dev_iter_state));
4223} 4191}
4224 4192
4225static const struct file_operations dev_seq_fops = { 4193static const struct file_operations dev_seq_fops = {