aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-07-16 17:09:34 -0400
committerDavid S. Miller <davem@davemloft.net>2014-07-16 17:09:34 -0400
commit1a98c69af1ecd97bfd1f4e4539924a9192434e36 (patch)
treea243defcf921ea174f8e43fce11d06830a6a9c36 /net/core
parent7a575f6b907ea5d207d2b5010293c189616eae34 (diff)
parentb6603fe574af289dbe9eb9fb4c540bca04f5a053 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c30
-rw-r--r--net/core/iovec.c55
-rw-r--r--net/core/neighbour.c9
3 files changed, 23 insertions, 71 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 2c98f10ee62a..138ab897de7d 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -148,6 +148,9 @@ struct list_head ptype_all __read_mostly; /* Taps */
148static struct list_head offload_base __read_mostly; 148static struct list_head offload_base __read_mostly;
149 149
150static int netif_rx_internal(struct sk_buff *skb); 150static int netif_rx_internal(struct sk_buff *skb);
151static int call_netdevice_notifiers_info(unsigned long val,
152 struct net_device *dev,
153 struct netdev_notifier_info *info);
151 154
152/* 155/*
153 * The @dev_base_head list is protected by @dev_base_lock and the rtnl 156 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
@@ -1214,7 +1217,11 @@ EXPORT_SYMBOL(netdev_features_change);
1214void netdev_state_change(struct net_device *dev) 1217void netdev_state_change(struct net_device *dev)
1215{ 1218{
1216 if (dev->flags & IFF_UP) { 1219 if (dev->flags & IFF_UP) {
1217 call_netdevice_notifiers(NETDEV_CHANGE, dev); 1220 struct netdev_notifier_change_info change_info;
1221
1222 change_info.flags_changed = 0;
1223 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
1224 &change_info.info);
1218 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL); 1225 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1219 } 1226 }
1220} 1227}
@@ -4234,9 +4241,8 @@ static int process_backlog(struct napi_struct *napi, int quota)
4234#endif 4241#endif
4235 napi->weight = weight_p; 4242 napi->weight = weight_p;
4236 local_irq_disable(); 4243 local_irq_disable();
4237 while (work < quota) { 4244 while (1) {
4238 struct sk_buff *skb; 4245 struct sk_buff *skb;
4239 unsigned int qlen;
4240 4246
4241 while ((skb = __skb_dequeue(&sd->process_queue))) { 4247 while ((skb = __skb_dequeue(&sd->process_queue))) {
4242 local_irq_enable(); 4248 local_irq_enable();
@@ -4250,24 +4256,24 @@ static int process_backlog(struct napi_struct *napi, int quota)
4250 } 4256 }
4251 4257
4252 rps_lock(sd); 4258 rps_lock(sd);
4253 qlen = skb_queue_len(&sd->input_pkt_queue); 4259 if (skb_queue_empty(&sd->input_pkt_queue)) {
4254 if (qlen)
4255 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4256 &sd->process_queue);
4257
4258 if (qlen < quota - work) {
4259 /* 4260 /*
4260 * Inline a custom version of __napi_complete(). 4261 * Inline a custom version of __napi_complete().
4261 * only current cpu owns and manipulates this napi, 4262 * only current cpu owns and manipulates this napi,
4262 * and NAPI_STATE_SCHED is the only possible flag set on backlog. 4263 * and NAPI_STATE_SCHED is the only possible flag set
4263 * we can use a plain write instead of clear_bit(), 4264 * on backlog.
4265 * We can use a plain write instead of clear_bit(),
4264 * and we dont need an smp_mb() memory barrier. 4266 * and we dont need an smp_mb() memory barrier.
4265 */ 4267 */
4266 list_del(&napi->poll_list); 4268 list_del(&napi->poll_list);
4267 napi->state = 0; 4269 napi->state = 0;
4270 rps_unlock(sd);
4268 4271
4269 quota = work + qlen; 4272 break;
4270 } 4273 }
4274
4275 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4276 &sd->process_queue);
4271 rps_unlock(sd); 4277 rps_unlock(sd);
4272 } 4278 }
4273 local_irq_enable(); 4279 local_irq_enable();
diff --git a/net/core/iovec.c b/net/core/iovec.c
index b61869429f4c..827dd6beb49c 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -75,61 +75,6 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
75} 75}
76 76
77/* 77/*
78 * Copy kernel to iovec. Returns -EFAULT on error.
79 */
80
81int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata,
82 int offset, int len)
83{
84 int copy;
85 for (; len > 0; ++iov) {
86 /* Skip over the finished iovecs */
87 if (unlikely(offset >= iov->iov_len)) {
88 offset -= iov->iov_len;
89 continue;
90 }
91 copy = min_t(unsigned int, iov->iov_len - offset, len);
92 if (copy_to_user(iov->iov_base + offset, kdata, copy))
93 return -EFAULT;
94 offset = 0;
95 kdata += copy;
96 len -= copy;
97 }
98
99 return 0;
100}
101EXPORT_SYMBOL(memcpy_toiovecend);
102
103/*
104 * Copy iovec to kernel. Returns -EFAULT on error.
105 */
106
107int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
108 int offset, int len)
109{
110 /* Skip over the finished iovecs */
111 while (offset >= iov->iov_len) {
112 offset -= iov->iov_len;
113 iov++;
114 }
115
116 while (len > 0) {
117 u8 __user *base = iov->iov_base + offset;
118 int copy = min_t(unsigned int, len, iov->iov_len - offset);
119
120 offset = 0;
121 if (copy_from_user(kdata, base, copy))
122 return -EFAULT;
123 len -= copy;
124 kdata += copy;
125 iov++;
126 }
127
128 return 0;
129}
130EXPORT_SYMBOL(memcpy_fromiovecend);
131
132/*
133 * And now for the all-in-one: copy and checksum from a user iovec 78 * And now for the all-in-one: copy and checksum from a user iovec
134 * directly to a datagram 79 * directly to a datagram
135 * Calls to csum_partial but the last must be in 32 bit chunks 80 * Calls to csum_partial but the last must be in 32 bit chunks
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 32d872eec7f5..559890b0f0a2 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -3059,11 +3059,12 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
3059 memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0, 3059 memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
3060 sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL])); 3060 sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
3061 } else { 3061 } else {
3062 struct neigh_table *tbl = p->tbl;
3062 dev_name_source = "default"; 3063 dev_name_source = "default";
3063 t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = (int *)(p + 1); 3064 t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3064 t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = (int *)(p + 1) + 1; 3065 t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3065 t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = (int *)(p + 1) + 2; 3066 t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3066 t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = (int *)(p + 1) + 3; 3067 t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
3067 } 3068 }
3068 3069
3069 if (handler) { 3070 if (handler) {