aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c94
-rw-r--r--net/core/neighbour.c90
-rw-r--r--net/core/netpoll.c71
-rw-r--r--net/core/skbuff.c3
4 files changed, 161 insertions, 97 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 6ca32f6b3105..763a0eda7158 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -446,7 +446,7 @@ void __dev_remove_pack(struct packet_type *pt)
446 } 446 }
447 } 447 }
448 448
449 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt); 449 pr_warn("dev_remove_pack: %p not found\n", pt);
450out: 450out:
451 spin_unlock(&ptype_lock); 451 spin_unlock(&ptype_lock);
452} 452}
@@ -1039,8 +1039,7 @@ rollback:
1039 memcpy(dev->name, oldname, IFNAMSIZ); 1039 memcpy(dev->name, oldname, IFNAMSIZ);
1040 goto rollback; 1040 goto rollback;
1041 } else { 1041 } else {
1042 printk(KERN_ERR 1042 pr_err("%s: name change rollback failed: %d\n",
1043 "%s: name change rollback failed: %d.\n",
1044 dev->name, ret); 1043 dev->name, ret);
1045 } 1044 }
1046 } 1045 }
@@ -1139,9 +1138,8 @@ void dev_load(struct net *net, const char *name)
1139 no_module = request_module("netdev-%s", name); 1138 no_module = request_module("netdev-%s", name);
1140 if (no_module && capable(CAP_SYS_MODULE)) { 1139 if (no_module && capable(CAP_SYS_MODULE)) {
1141 if (!request_module("%s", name)) 1140 if (!request_module("%s", name))
1142 pr_err("Loading kernel module for a network device " 1141 pr_err("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
1143"with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s " 1142 name);
1144"instead\n", name);
1145 } 1143 }
1146} 1144}
1147EXPORT_SYMBOL(dev_load); 1145EXPORT_SYMBOL(dev_load);
@@ -1655,10 +1653,9 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1655 if (skb_network_header(skb2) < skb2->data || 1653 if (skb_network_header(skb2) < skb2->data ||
1656 skb2->network_header > skb2->tail) { 1654 skb2->network_header > skb2->tail) {
1657 if (net_ratelimit()) 1655 if (net_ratelimit())
1658 printk(KERN_CRIT "protocol %04x is " 1656 pr_crit("protocol %04x is buggy, dev %s\n",
1659 "buggy, dev %s\n", 1657 ntohs(skb2->protocol),
1660 ntohs(skb2->protocol), 1658 dev->name);
1661 dev->name);
1662 skb_reset_network_header(skb2); 1659 skb_reset_network_header(skb2);
1663 } 1660 }
1664 1661
@@ -1691,9 +1688,7 @@ static void netif_setup_tc(struct net_device *dev, unsigned int txq)
1691 1688
1692 /* If TC0 is invalidated disable TC mapping */ 1689 /* If TC0 is invalidated disable TC mapping */
1693 if (tc->offset + tc->count > txq) { 1690 if (tc->offset + tc->count > txq) {
1694 pr_warning("Number of in use tx queues changed " 1691 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
1695 "invalidating tc mappings. Priority "
1696 "traffic classification disabled!\n");
1697 dev->num_tc = 0; 1692 dev->num_tc = 0;
1698 return; 1693 return;
1699 } 1694 }
@@ -1704,11 +1699,8 @@ static void netif_setup_tc(struct net_device *dev, unsigned int txq)
1704 1699
1705 tc = &dev->tc_to_txq[q]; 1700 tc = &dev->tc_to_txq[q];
1706 if (tc->offset + tc->count > txq) { 1701 if (tc->offset + tc->count > txq) {
1707 pr_warning("Number of in use tx queues " 1702 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1708 "changed. Priority %i to tc " 1703 i, q);
1709 "mapping %i is no longer valid "
1710 "setting map to 0\n",
1711 i, q);
1712 netdev_set_prio_tc_map(dev, i, 0); 1704 netdev_set_prio_tc_map(dev, i, 0);
1713 } 1705 }
1714 } 1706 }
@@ -2014,8 +2006,7 @@ EXPORT_SYMBOL(skb_gso_segment);
2014void netdev_rx_csum_fault(struct net_device *dev) 2006void netdev_rx_csum_fault(struct net_device *dev)
2015{ 2007{
2016 if (net_ratelimit()) { 2008 if (net_ratelimit()) {
2017 printk(KERN_ERR "%s: hw csum failure.\n", 2009 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
2018 dev ? dev->name : "<unknown>");
2019 dump_stack(); 2010 dump_stack();
2020 } 2011 }
2021} 2012}
@@ -2332,9 +2323,9 @@ static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
2332{ 2323{
2333 if (unlikely(queue_index >= dev->real_num_tx_queues)) { 2324 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
2334 if (net_ratelimit()) { 2325 if (net_ratelimit()) {
2335 pr_warning("%s selects TX queue %d, but " 2326 pr_warn("%s selects TX queue %d, but real number of TX queues is %d\n",
2336 "real number of TX queues is %d\n", 2327 dev->name, queue_index,
2337 dev->name, queue_index, dev->real_num_tx_queues); 2328 dev->real_num_tx_queues);
2338 } 2329 }
2339 return 0; 2330 return 0;
2340 } 2331 }
@@ -2578,16 +2569,16 @@ int dev_queue_xmit(struct sk_buff *skb)
2578 } 2569 }
2579 HARD_TX_UNLOCK(dev, txq); 2570 HARD_TX_UNLOCK(dev, txq);
2580 if (net_ratelimit()) 2571 if (net_ratelimit())
2581 printk(KERN_CRIT "Virtual device %s asks to " 2572 pr_crit("Virtual device %s asks to queue packet!\n",
2582 "queue packet!\n", dev->name); 2573 dev->name);
2583 } else { 2574 } else {
2584 /* Recursion is detected! It is possible, 2575 /* Recursion is detected! It is possible,
2585 * unfortunately 2576 * unfortunately
2586 */ 2577 */
2587recursion_alert: 2578recursion_alert:
2588 if (net_ratelimit()) 2579 if (net_ratelimit())
2589 printk(KERN_CRIT "Dead loop on virtual device " 2580 pr_crit("Dead loop on virtual device %s, fix it urgently!\n",
2590 "%s, fix it urgently!\n", dev->name); 2581 dev->name);
2591 } 2582 }
2592 } 2583 }
2593 2584
@@ -3069,8 +3060,8 @@ static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
3069 3060
3070 if (unlikely(MAX_RED_LOOP < ttl++)) { 3061 if (unlikely(MAX_RED_LOOP < ttl++)) {
3071 if (net_ratelimit()) 3062 if (net_ratelimit())
3072 pr_warning( "Redir loop detected Dropping packet (%d->%d)\n", 3063 pr_warn("Redir loop detected Dropping packet (%d->%d)\n",
3073 skb->skb_iif, dev->ifindex); 3064 skb->skb_iif, dev->ifindex);
3074 return TC_ACT_SHOT; 3065 return TC_ACT_SHOT;
3075 } 3066 }
3076 3067
@@ -4497,16 +4488,15 @@ static int __dev_set_promiscuity(struct net_device *dev, int inc)
4497 dev->flags &= ~IFF_PROMISC; 4488 dev->flags &= ~IFF_PROMISC;
4498 else { 4489 else {
4499 dev->promiscuity -= inc; 4490 dev->promiscuity -= inc;
4500 printk(KERN_WARNING "%s: promiscuity touches roof, " 4491 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
4501 "set promiscuity failed, promiscuity feature " 4492 dev->name);
4502 "of device might be broken.\n", dev->name);
4503 return -EOVERFLOW; 4493 return -EOVERFLOW;
4504 } 4494 }
4505 } 4495 }
4506 if (dev->flags != old_flags) { 4496 if (dev->flags != old_flags) {
4507 printk(KERN_INFO "device %s %s promiscuous mode\n", 4497 pr_info("device %s %s promiscuous mode\n",
4508 dev->name, (dev->flags & IFF_PROMISC) ? "entered" : 4498 dev->name,
4509 "left"); 4499 dev->flags & IFF_PROMISC ? "entered" : "left");
4510 if (audit_enabled) { 4500 if (audit_enabled) {
4511 current_uid_gid(&uid, &gid); 4501 current_uid_gid(&uid, &gid);
4512 audit_log(current->audit_context, GFP_ATOMIC, 4502 audit_log(current->audit_context, GFP_ATOMIC,
@@ -4579,9 +4569,8 @@ int dev_set_allmulti(struct net_device *dev, int inc)
4579 dev->flags &= ~IFF_ALLMULTI; 4569 dev->flags &= ~IFF_ALLMULTI;
4580 else { 4570 else {
4581 dev->allmulti -= inc; 4571 dev->allmulti -= inc;
4582 printk(KERN_WARNING "%s: allmulti touches roof, " 4572 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
4583 "set allmulti failed, allmulti feature of " 4573 dev->name);
4584 "device might be broken.\n", dev->name);
4585 return -EOVERFLOW; 4574 return -EOVERFLOW;
4586 } 4575 }
4587 } 4576 }
@@ -5238,8 +5227,8 @@ static void rollback_registered_many(struct list_head *head)
5238 * devices and proceed with the remaining. 5227 * devices and proceed with the remaining.
5239 */ 5228 */
5240 if (dev->reg_state == NETREG_UNINITIALIZED) { 5229 if (dev->reg_state == NETREG_UNINITIALIZED) {
5241 pr_debug("unregister_netdevice: device %s/%p never " 5230 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
5242 "was registered\n", dev->name, dev); 5231 dev->name, dev);
5243 5232
5244 WARN_ON(1); 5233 WARN_ON(1);
5245 list_del(&dev->unreg_list); 5234 list_del(&dev->unreg_list);
@@ -5471,7 +5460,7 @@ static int netif_alloc_rx_queues(struct net_device *dev)
5471 5460
5472 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL); 5461 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
5473 if (!rx) { 5462 if (!rx) {
5474 pr_err("netdev: Unable to allocate %u rx queues.\n", count); 5463 pr_err("netdev: Unable to allocate %u rx queues\n", count);
5475 return -ENOMEM; 5464 return -ENOMEM;
5476 } 5465 }
5477 dev->_rx = rx; 5466 dev->_rx = rx;
@@ -5505,8 +5494,7 @@ static int netif_alloc_netdev_queues(struct net_device *dev)
5505 5494
5506 tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL); 5495 tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL);
5507 if (!tx) { 5496 if (!tx) {
5508 pr_err("netdev: Unable to allocate %u tx queues.\n", 5497 pr_err("netdev: Unable to allocate %u tx queues\n", count);
5509 count);
5510 return -ENOMEM; 5498 return -ENOMEM;
5511 } 5499 }
5512 dev->_tx = tx; 5500 dev->_tx = tx;
@@ -5765,10 +5753,8 @@ static void netdev_wait_allrefs(struct net_device *dev)
5765 refcnt = netdev_refcnt_read(dev); 5753 refcnt = netdev_refcnt_read(dev);
5766 5754
5767 if (time_after(jiffies, warning_time + 10 * HZ)) { 5755 if (time_after(jiffies, warning_time + 10 * HZ)) {
5768 printk(KERN_EMERG "unregister_netdevice: " 5756 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
5769 "waiting for %s to become free. Usage " 5757 dev->name, refcnt);
5770 "count = %d\n",
5771 dev->name, refcnt);
5772 warning_time = jiffies; 5758 warning_time = jiffies;
5773 } 5759 }
5774 } 5760 }
@@ -5819,7 +5805,7 @@ void netdev_run_todo(void)
5819 list_del(&dev->todo_list); 5805 list_del(&dev->todo_list);
5820 5806
5821 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { 5807 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
5822 printk(KERN_ERR "network todo '%s' but state %d\n", 5808 pr_err("network todo '%s' but state %d\n",
5823 dev->name, dev->reg_state); 5809 dev->name, dev->reg_state);
5824 dump_stack(); 5810 dump_stack();
5825 continue; 5811 continue;
@@ -5935,15 +5921,13 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
5935 BUG_ON(strlen(name) >= sizeof(dev->name)); 5921 BUG_ON(strlen(name) >= sizeof(dev->name));
5936 5922
5937 if (txqs < 1) { 5923 if (txqs < 1) {
5938 pr_err("alloc_netdev: Unable to allocate device " 5924 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
5939 "with zero queues.\n");
5940 return NULL; 5925 return NULL;
5941 } 5926 }
5942 5927
5943#ifdef CONFIG_RPS 5928#ifdef CONFIG_RPS
5944 if (rxqs < 1) { 5929 if (rxqs < 1) {
5945 pr_err("alloc_netdev: Unable to allocate device " 5930 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
5946 "with zero RX queues.\n");
5947 return NULL; 5931 return NULL;
5948 } 5932 }
5949#endif 5933#endif
@@ -5959,7 +5943,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
5959 5943
5960 p = kzalloc(alloc_size, GFP_KERNEL); 5944 p = kzalloc(alloc_size, GFP_KERNEL);
5961 if (!p) { 5945 if (!p) {
5962 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n"); 5946 pr_err("alloc_netdev: Unable to allocate device\n");
5963 return NULL; 5947 return NULL;
5964 } 5948 }
5965 5949
@@ -6492,8 +6476,8 @@ static void __net_exit default_device_exit(struct net *net)
6492 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex); 6476 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
6493 err = dev_change_net_namespace(dev, &init_net, fb_name); 6477 err = dev_change_net_namespace(dev, &init_net, fb_name);
6494 if (err) { 6478 if (err) {
6495 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n", 6479 pr_emerg("%s: failed to move %s to init_net: %d\n",
6496 __func__, dev->name, err); 6480 __func__, dev->name, err);
6497 BUG(); 6481 BUG();
6498 } 6482 }
6499 } 6483 }
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index e287346e0934..f98ec444133a 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -2165,6 +2165,35 @@ nla_put_failure:
2165 return -EMSGSIZE; 2165 return -EMSGSIZE;
2166} 2166}
2167 2167
2168static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2169 u32 pid, u32 seq, int type, unsigned int flags,
2170 struct neigh_table *tbl)
2171{
2172 struct nlmsghdr *nlh;
2173 struct ndmsg *ndm;
2174
2175 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2176 if (nlh == NULL)
2177 return -EMSGSIZE;
2178
2179 ndm = nlmsg_data(nlh);
2180 ndm->ndm_family = tbl->family;
2181 ndm->ndm_pad1 = 0;
2182 ndm->ndm_pad2 = 0;
2183 ndm->ndm_flags = pn->flags | NTF_PROXY;
2184 ndm->ndm_type = NDA_DST;
2185 ndm->ndm_ifindex = pn->dev->ifindex;
2186 ndm->ndm_state = NUD_NONE;
2187
2188 NLA_PUT(skb, NDA_DST, tbl->key_len, pn->key);
2189
2190 return nlmsg_end(skb, nlh);
2191
2192nla_put_failure:
2193 nlmsg_cancel(skb, nlh);
2194 return -EMSGSIZE;
2195}
2196
2168static void neigh_update_notify(struct neighbour *neigh) 2197static void neigh_update_notify(struct neighbour *neigh)
2169{ 2198{
2170 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh); 2199 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
@@ -2214,23 +2243,78 @@ out:
2214 return rc; 2243 return rc;
2215} 2244}
2216 2245
2246static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2247 struct netlink_callback *cb)
2248{
2249 struct pneigh_entry *n;
2250 struct net *net = sock_net(skb->sk);
2251 int rc, h, s_h = cb->args[3];
2252 int idx, s_idx = idx = cb->args[4];
2253
2254 read_lock_bh(&tbl->lock);
2255
2256 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
2257 if (h < s_h)
2258 continue;
2259 if (h > s_h)
2260 s_idx = 0;
2261 for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2262 if (dev_net(n->dev) != net)
2263 continue;
2264 if (idx < s_idx)
2265 goto next;
2266 if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
2267 cb->nlh->nlmsg_seq,
2268 RTM_NEWNEIGH,
2269 NLM_F_MULTI, tbl) <= 0) {
2270 read_unlock_bh(&tbl->lock);
2271 rc = -1;
2272 goto out;
2273 }
2274 next:
2275 idx++;
2276 }
2277 }
2278
2279 read_unlock_bh(&tbl->lock);
2280 rc = skb->len;
2281out:
2282 cb->args[3] = h;
2283 cb->args[4] = idx;
2284 return rc;
2285
2286}
2287
2217static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb) 2288static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2218{ 2289{
2219 struct neigh_table *tbl; 2290 struct neigh_table *tbl;
2220 int t, family, s_t; 2291 int t, family, s_t;
2292 int proxy = 0;
2293 int err = 0;
2221 2294
2222 read_lock(&neigh_tbl_lock); 2295 read_lock(&neigh_tbl_lock);
2223 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family; 2296 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2297
2298 /* check for full ndmsg structure presence, family member is
2299 * the same for both structures
2300 */
2301 if (nlmsg_len(cb->nlh) >= sizeof(struct ndmsg) &&
2302 ((struct ndmsg *) nlmsg_data(cb->nlh))->ndm_flags == NTF_PROXY)
2303 proxy = 1;
2304
2224 s_t = cb->args[0]; 2305 s_t = cb->args[0];
2225 2306
2226 for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) { 2307 for (tbl = neigh_tables, t = 0; tbl && (err >= 0);
2308 tbl = tbl->next, t++) {
2227 if (t < s_t || (family && tbl->family != family)) 2309 if (t < s_t || (family && tbl->family != family))
2228 continue; 2310 continue;
2229 if (t > s_t) 2311 if (t > s_t)
2230 memset(&cb->args[1], 0, sizeof(cb->args) - 2312 memset(&cb->args[1], 0, sizeof(cb->args) -
2231 sizeof(cb->args[0])); 2313 sizeof(cb->args[0]));
2232 if (neigh_dump_table(tbl, skb, cb) < 0) 2314 if (proxy)
2233 break; 2315 err = pneigh_dump_table(tbl, skb, cb);
2316 else
2317 err = neigh_dump_table(tbl, skb, cb);
2234 } 2318 }
2235 read_unlock(&neigh_tbl_lock); 2319 read_unlock(&neigh_tbl_lock);
2236 2320
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index ddefc513b44a..3d84fb9d8873 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -9,6 +9,8 @@
9 * Copyright (C) 2002 Red Hat, Inc. 9 * Copyright (C) 2002 Red Hat, Inc.
10 */ 10 */
11 11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
12#include <linux/moduleparam.h> 14#include <linux/moduleparam.h>
13#include <linux/netdevice.h> 15#include <linux/netdevice.h>
14#include <linux/etherdevice.h> 16#include <linux/etherdevice.h>
@@ -45,9 +47,11 @@ static atomic_t trapped;
45#define NETPOLL_RX_ENABLED 1 47#define NETPOLL_RX_ENABLED 1
46#define NETPOLL_RX_DROP 2 48#define NETPOLL_RX_DROP 2
47 49
48#define MAX_SKB_SIZE \ 50#define MAX_SKB_SIZE \
49 (MAX_UDP_CHUNK + sizeof(struct udphdr) + \ 51 (sizeof(struct ethhdr) + \
50 sizeof(struct iphdr) + sizeof(struct ethhdr)) 52 sizeof(struct iphdr) + \
53 sizeof(struct udphdr) + \
54 MAX_UDP_CHUNK)
51 55
52static void zap_completion_queue(void); 56static void zap_completion_queue(void);
53static void arp_reply(struct sk_buff *skb); 57static void arp_reply(struct sk_buff *skb);
@@ -55,6 +59,13 @@ static void arp_reply(struct sk_buff *skb);
55static unsigned int carrier_timeout = 4; 59static unsigned int carrier_timeout = 4;
56module_param(carrier_timeout, uint, 0644); 60module_param(carrier_timeout, uint, 0644);
57 61
62#define np_info(np, fmt, ...) \
63 pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
64#define np_err(np, fmt, ...) \
65 pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
66#define np_notice(np, fmt, ...) \
67 pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
68
58static void queue_process(struct work_struct *work) 69static void queue_process(struct work_struct *work)
59{ 70{
60 struct netpoll_info *npinfo = 71 struct netpoll_info *npinfo =
@@ -627,18 +638,12 @@ out:
627 638
628void netpoll_print_options(struct netpoll *np) 639void netpoll_print_options(struct netpoll *np)
629{ 640{
630 printk(KERN_INFO "%s: local port %d\n", 641 np_info(np, "local port %d\n", np->local_port);
631 np->name, np->local_port); 642 np_info(np, "local IP %pI4\n", &np->local_ip);
632 printk(KERN_INFO "%s: local IP %pI4\n", 643 np_info(np, "interface '%s'\n", np->dev_name);
633 np->name, &np->local_ip); 644 np_info(np, "remote port %d\n", np->remote_port);
634 printk(KERN_INFO "%s: interface '%s'\n", 645 np_info(np, "remote IP %pI4\n", &np->remote_ip);
635 np->name, np->dev_name); 646 np_info(np, "remote ethernet address %pM\n", np->remote_mac);
636 printk(KERN_INFO "%s: remote port %d\n",
637 np->name, np->remote_port);
638 printk(KERN_INFO "%s: remote IP %pI4\n",
639 np->name, &np->remote_ip);
640 printk(KERN_INFO "%s: remote ethernet address %pM\n",
641 np->name, np->remote_mac);
642} 647}
643EXPORT_SYMBOL(netpoll_print_options); 648EXPORT_SYMBOL(netpoll_print_options);
644 649
@@ -680,8 +685,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
680 goto parse_failed; 685 goto parse_failed;
681 *delim = 0; 686 *delim = 0;
682 if (*cur == ' ' || *cur == '\t') 687 if (*cur == ' ' || *cur == '\t')
683 printk(KERN_INFO "%s: warning: whitespace" 688 np_info(np, "warning: whitespace is not allowed\n");
684 "is not allowed\n", np->name);
685 np->remote_port = simple_strtol(cur, NULL, 10); 689 np->remote_port = simple_strtol(cur, NULL, 10);
686 cur = delim; 690 cur = delim;
687 } 691 }
@@ -705,8 +709,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
705 return 0; 709 return 0;
706 710
707 parse_failed: 711 parse_failed:
708 printk(KERN_INFO "%s: couldn't parse config at '%s'!\n", 712 np_info(np, "couldn't parse config at '%s'!\n", cur);
709 np->name, cur);
710 return -1; 713 return -1;
711} 714}
712EXPORT_SYMBOL(netpoll_parse_options); 715EXPORT_SYMBOL(netpoll_parse_options);
@@ -721,8 +724,8 @@ int __netpoll_setup(struct netpoll *np)
721 724
722 if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) || 725 if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
723 !ndev->netdev_ops->ndo_poll_controller) { 726 !ndev->netdev_ops->ndo_poll_controller) {
724 printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n", 727 np_err(np, "%s doesn't support polling, aborting\n",
725 np->name, np->dev_name); 728 np->dev_name);
726 err = -ENOTSUPP; 729 err = -ENOTSUPP;
727 goto out; 730 goto out;
728 } 731 }
@@ -785,14 +788,12 @@ int netpoll_setup(struct netpoll *np)
785 if (np->dev_name) 788 if (np->dev_name)
786 ndev = dev_get_by_name(&init_net, np->dev_name); 789 ndev = dev_get_by_name(&init_net, np->dev_name);
787 if (!ndev) { 790 if (!ndev) {
788 printk(KERN_ERR "%s: %s doesn't exist, aborting.\n", 791 np_err(np, "%s doesn't exist, aborting\n", np->dev_name);
789 np->name, np->dev_name);
790 return -ENODEV; 792 return -ENODEV;
791 } 793 }
792 794
793 if (ndev->master) { 795 if (ndev->master) {
794 printk(KERN_ERR "%s: %s is a slave device, aborting.\n", 796 np_err(np, "%s is a slave device, aborting\n", np->dev_name);
795 np->name, np->dev_name);
796 err = -EBUSY; 797 err = -EBUSY;
797 goto put; 798 goto put;
798 } 799 }
@@ -800,16 +801,14 @@ int netpoll_setup(struct netpoll *np)
800 if (!netif_running(ndev)) { 801 if (!netif_running(ndev)) {
801 unsigned long atmost, atleast; 802 unsigned long atmost, atleast;
802 803
803 printk(KERN_INFO "%s: device %s not up yet, forcing it\n", 804 np_info(np, "device %s not up yet, forcing it\n", np->dev_name);
804 np->name, np->dev_name);
805 805
806 rtnl_lock(); 806 rtnl_lock();
807 err = dev_open(ndev); 807 err = dev_open(ndev);
808 rtnl_unlock(); 808 rtnl_unlock();
809 809
810 if (err) { 810 if (err) {
811 printk(KERN_ERR "%s: failed to open %s\n", 811 np_err(np, "failed to open %s\n", ndev->name);
812 np->name, ndev->name);
813 goto put; 812 goto put;
814 } 813 }
815 814
@@ -817,9 +816,7 @@ int netpoll_setup(struct netpoll *np)
817 atmost = jiffies + carrier_timeout * HZ; 816 atmost = jiffies + carrier_timeout * HZ;
818 while (!netif_carrier_ok(ndev)) { 817 while (!netif_carrier_ok(ndev)) {
819 if (time_after(jiffies, atmost)) { 818 if (time_after(jiffies, atmost)) {
820 printk(KERN_NOTICE 819 np_notice(np, "timeout waiting for carrier\n");
821 "%s: timeout waiting for carrier\n",
822 np->name);
823 break; 820 break;
824 } 821 }
825 msleep(1); 822 msleep(1);
@@ -831,9 +828,7 @@ int netpoll_setup(struct netpoll *np)
831 */ 828 */
832 829
833 if (time_before(jiffies, atleast)) { 830 if (time_before(jiffies, atleast)) {
834 printk(KERN_NOTICE "%s: carrier detect appears" 831 np_notice(np, "carrier detect appears untrustworthy, waiting 4 seconds\n");
835 " untrustworthy, waiting 4 seconds\n",
836 np->name);
837 msleep(4000); 832 msleep(4000);
838 } 833 }
839 } 834 }
@@ -844,15 +839,15 @@ int netpoll_setup(struct netpoll *np)
844 839
845 if (!in_dev || !in_dev->ifa_list) { 840 if (!in_dev || !in_dev->ifa_list) {
846 rcu_read_unlock(); 841 rcu_read_unlock();
847 printk(KERN_ERR "%s: no IP address for %s, aborting\n", 842 np_err(np, "no IP address for %s, aborting\n",
848 np->name, np->dev_name); 843 np->dev_name);
849 err = -EDESTADDRREQ; 844 err = -EDESTADDRREQ;
850 goto put; 845 goto put;
851 } 846 }
852 847
853 np->local_ip = in_dev->ifa_list->ifa_local; 848 np->local_ip = in_dev->ifa_list->ifa_local;
854 rcu_read_unlock(); 849 rcu_read_unlock();
855 printk(KERN_INFO "%s: local IP %pI4\n", np->name, &np->local_ip); 850 np_info(np, "local IP %pI4\n", &np->local_ip);
856 } 851 }
857 852
858 np->dev = ndev; 853 np->dev = ndev;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index da0c97f2fab4..f3a530780753 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2906,7 +2906,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2906 nskb->prev = p; 2906 nskb->prev = p;
2907 2907
2908 nskb->data_len += p->len; 2908 nskb->data_len += p->len;
2909 nskb->truesize += p->len; 2909 nskb->truesize += p->truesize;
2910 nskb->len += p->len; 2910 nskb->len += p->len;
2911 2911
2912 *head = nskb; 2912 *head = nskb;
@@ -2916,6 +2916,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2916 p = nskb; 2916 p = nskb;
2917 2917
2918merge: 2918merge:
2919 p->truesize += skb->truesize - len;
2919 if (offset > headlen) { 2920 if (offset > headlen) {
2920 unsigned int eat = offset - headlen; 2921 unsigned int eat = offset - headlen;
2921 2922