aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-01-29 21:08:37 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-29 21:08:37 -0500
commit1d494f36d1fde04188341bf3d3b1a14cdf6fb2c9 (patch)
treeb8849264f0e8e59b8466c6b6a2db9df71d1ee59a
parent19ba20f455a8e9cf15c12891e751fd73c9026292 (diff)
parentc044dc2132d19d8c643cdd340f21afcec177c046 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: "Several fixups, of note: 1) Fix unlock of not held spinlock in RXRPC code, from Alexey Khoroshilov. 2) Call pci_disable_device() from the correct shutdown path in bnx2x driver, from Yuval Mintz. 3) Fix qeth build on s390 for some configurations, from Eugene Crosser. 4) Cure locking bugs in bond_loadbalance_arp_mon(), from Ding Tianhong. 5) Must do netif_napi_add() before registering netdevice in sky2 driver, from Stanislaw Gruszka. 6) Fix lost bug fix during merge due to code movement in ieee802154, noticed and fixed by the eagle eyed Stephen Rothwell. 7) Get rid of resource leak in xen-netfront driver, from Annie Li. 8) Bounds checks in qlcnic driver are off by one, from Manish Chopra. 9) TPROXY can leak sockets when TCP early demux is enabled, fix from Holger Eitzenberger" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (32 commits) qeth: fix build of s390 allmodconfig bonding: fix locking in bond_loadbalance_arp_mon() tun: add device name(iff) field to proc fdinfo entry DT: net: davinci_emac: "ti, davinci-no-bd-ram" property is actually optional DT: net: davinci_emac: "ti, davinci-rmii-en" property is actually optional bnx2x: Fix generic option settings net: Fix warning on make htmldocs caused by skbuff.c llc: remove noisy WARN from llc_mac_hdr_init qlcnic: Fix loopback test failure qlcnic: Fix tx timeout. qlcnic: Fix initialization of vlan list. qlcnic: Correct off-by-one errors in bounds checks net: Document promote_secondaries net: gre: use icmp_hdr() to get inner ip header i40e: Add missing braces to i40e_dcb_need_reconfig() xen-netfront: fix resource leak in netfront net: 6lowpan: fixup for code movement hyperv: Add support for physically discontinuous receive buffer sky2: initialize napi before registering device net: Fix memory leak if TPROXY used with TCP early demux ...
-rw-r--r--Documentation/devicetree/bindings/net/davinci_emac.txt4
-rw-r--r--Documentation/networking/ip-sysctl.txt6
-rw-r--r--Documentation/networking/packet_mmap.txt1
-rw-r--r--drivers/hv/channel.c14
-rw-r--r--drivers/net/bonding/bond_main.c96
-rw-r--r--drivers/net/bonding/bonding.h13
-rw-r--r--drivers/net/ethernet/8390/apne.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c78
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c3
-rw-r--r--drivers/net/ethernet/marvell/sky2.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c19
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c6
-rw-r--r--drivers/net/hyperv/hyperv_net.h2
-rw-r--r--drivers/net/hyperv/netvsc.c7
-rw-r--r--drivers/net/tun.c27
-rw-r--r--drivers/net/xen-netfront.c88
-rw-r--r--drivers/s390/net/qeth_core.h5
-rw-r--r--drivers/s390/net/qeth_core_main.c18
-rw-r--r--drivers/s390/net/qeth_l2_main.c41
-rw-r--r--drivers/s390/net/qeth_l3_main.c8
-rw-r--r--include/linux/skbuff.h1
-rw-r--r--net/core/skbuff.c27
-rw-r--r--net/ieee802154/6lowpan_iphc.c2
-rw-r--r--net/ipv4/ip_gre.c2
-rw-r--r--net/ipv4/ip_input.c2
-rw-r--r--net/ipv4/ip_tunnel.c3
-rw-r--r--net/ipv6/ip6_input.c2
-rw-r--r--net/llc/llc_output.c2
-rw-r--r--net/rxrpc/ar-connection.c2
-rw-r--r--net/rxrpc/ar-recvmsg.c7
-rw-r--r--net/sched/sch_tbf.c13
34 files changed, 303 insertions, 225 deletions
diff --git a/Documentation/devicetree/bindings/net/davinci_emac.txt b/Documentation/devicetree/bindings/net/davinci_emac.txt
index ca0911a20e8b..6e356d15154a 100644
--- a/Documentation/devicetree/bindings/net/davinci_emac.txt
+++ b/Documentation/devicetree/bindings/net/davinci_emac.txt
@@ -10,8 +10,6 @@ Required properties:
10- ti,davinci-ctrl-mod-reg-offset: offset to control module register 10- ti,davinci-ctrl-mod-reg-offset: offset to control module register
11- ti,davinci-ctrl-ram-offset: offset to control module ram 11- ti,davinci-ctrl-ram-offset: offset to control module ram
12- ti,davinci-ctrl-ram-size: size of control module ram 12- ti,davinci-ctrl-ram-size: size of control module ram
13- ti,davinci-rmii-en: use RMII
14- ti,davinci-no-bd-ram: has the emac controller BD RAM
15- interrupts: interrupt mapping for the davinci emac interrupts sources: 13- interrupts: interrupt mapping for the davinci emac interrupts sources:
16 4 sources: <Receive Threshold Interrupt 14 4 sources: <Receive Threshold Interrupt
17 Receive Interrupt 15 Receive Interrupt
@@ -22,6 +20,8 @@ Optional properties:
22- phy-handle: Contains a phandle to an Ethernet PHY. 20- phy-handle: Contains a phandle to an Ethernet PHY.
23 If absent, davinci_emac driver defaults to 100/FULL. 21 If absent, davinci_emac driver defaults to 100/FULL.
24- local-mac-address : 6 bytes, mac address 22- local-mac-address : 6 bytes, mac address
23- ti,davinci-rmii-en: 1 byte, 1 means use RMII
24- ti,davinci-no-bd-ram: boolean, does EMAC have BD RAM?
25 25
26Example (enbw_cmc board): 26Example (enbw_cmc board):
27 eth0: emac@1e20000 { 27 eth0: emac@1e20000 {
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 5de03740cdd5..ab42c95f9985 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -1088,6 +1088,12 @@ igmpv3_unsolicited_report_interval - INTEGER
1088 IGMPv3 report retransmit will take place. 1088 IGMPv3 report retransmit will take place.
1089 Default: 1000 (1 seconds) 1089 Default: 1000 (1 seconds)
1090 1090
1091promote_secondaries - BOOLEAN
1092 When a primary IP address is removed from this interface
1093 promote a corresponding secondary IP address instead of
1094 removing all the corresponding secondary IP addresses.
1095
1096
1091tag - INTEGER 1097tag - INTEGER
1092 Allows you to write a number, which can be used as required. 1098 Allows you to write a number, which can be used as required.
1093 Default value is 0. 1099 Default value is 0.
diff --git a/Documentation/networking/packet_mmap.txt b/Documentation/networking/packet_mmap.txt
index 91ffe1d9e8ca..1404674c0a02 100644
--- a/Documentation/networking/packet_mmap.txt
+++ b/Documentation/networking/packet_mmap.txt
@@ -583,6 +583,7 @@ Currently implemented fanout policies are:
583 - PACKET_FANOUT_CPU: schedule to socket by CPU packet arrives on 583 - PACKET_FANOUT_CPU: schedule to socket by CPU packet arrives on
584 - PACKET_FANOUT_RND: schedule to socket by random selection 584 - PACKET_FANOUT_RND: schedule to socket by random selection
585 - PACKET_FANOUT_ROLLOVER: if one socket is full, rollover to another 585 - PACKET_FANOUT_ROLLOVER: if one socket is full, rollover to another
586 - PACKET_FANOUT_QM: schedule to socket by skbs recorded queue_mapping
586 587
587Minimal example code by David S. Miller (try things like "./test eth0 hash", 588Minimal example code by David S. Miller (try things like "./test eth0 hash",
588"./test eth0 lb", etc.): 589"./test eth0 lb", etc.):
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index cea623c36ae2..69ea36f07b4d 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -209,7 +209,6 @@ static int create_gpadl_header(void *kbuffer, u32 size,
209{ 209{
210 int i; 210 int i;
211 int pagecount; 211 int pagecount;
212 unsigned long long pfn;
213 struct vmbus_channel_gpadl_header *gpadl_header; 212 struct vmbus_channel_gpadl_header *gpadl_header;
214 struct vmbus_channel_gpadl_body *gpadl_body; 213 struct vmbus_channel_gpadl_body *gpadl_body;
215 struct vmbus_channel_msginfo *msgheader; 214 struct vmbus_channel_msginfo *msgheader;
@@ -219,7 +218,6 @@ static int create_gpadl_header(void *kbuffer, u32 size,
219 int pfnsum, pfncount, pfnleft, pfncurr, pfnsize; 218 int pfnsum, pfncount, pfnleft, pfncurr, pfnsize;
220 219
221 pagecount = size >> PAGE_SHIFT; 220 pagecount = size >> PAGE_SHIFT;
222 pfn = virt_to_phys(kbuffer) >> PAGE_SHIFT;
223 221
224 /* do we need a gpadl body msg */ 222 /* do we need a gpadl body msg */
225 pfnsize = MAX_SIZE_CHANNEL_MESSAGE - 223 pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
@@ -248,7 +246,8 @@ static int create_gpadl_header(void *kbuffer, u32 size,
248 gpadl_header->range[0].byte_offset = 0; 246 gpadl_header->range[0].byte_offset = 0;
249 gpadl_header->range[0].byte_count = size; 247 gpadl_header->range[0].byte_count = size;
250 for (i = 0; i < pfncount; i++) 248 for (i = 0; i < pfncount; i++)
251 gpadl_header->range[0].pfn_array[i] = pfn+i; 249 gpadl_header->range[0].pfn_array[i] = slow_virt_to_phys(
250 kbuffer + PAGE_SIZE * i) >> PAGE_SHIFT;
252 *msginfo = msgheader; 251 *msginfo = msgheader;
253 *messagecount = 1; 252 *messagecount = 1;
254 253
@@ -301,7 +300,9 @@ static int create_gpadl_header(void *kbuffer, u32 size,
301 * so the hypervisor gurantees that this is ok. 300 * so the hypervisor gurantees that this is ok.
302 */ 301 */
303 for (i = 0; i < pfncurr; i++) 302 for (i = 0; i < pfncurr; i++)
304 gpadl_body->pfn[i] = pfn + pfnsum + i; 303 gpadl_body->pfn[i] = slow_virt_to_phys(
304 kbuffer + PAGE_SIZE * (pfnsum + i)) >>
305 PAGE_SHIFT;
305 306
306 /* add to msg header */ 307 /* add to msg header */
307 list_add_tail(&msgbody->msglistentry, 308 list_add_tail(&msgbody->msglistentry,
@@ -327,7 +328,8 @@ static int create_gpadl_header(void *kbuffer, u32 size,
327 gpadl_header->range[0].byte_offset = 0; 328 gpadl_header->range[0].byte_offset = 0;
328 gpadl_header->range[0].byte_count = size; 329 gpadl_header->range[0].byte_count = size;
329 for (i = 0; i < pagecount; i++) 330 for (i = 0; i < pagecount; i++)
330 gpadl_header->range[0].pfn_array[i] = pfn+i; 331 gpadl_header->range[0].pfn_array[i] = slow_virt_to_phys(
332 kbuffer + PAGE_SIZE * i) >> PAGE_SHIFT;
331 333
332 *msginfo = msgheader; 334 *msginfo = msgheader;
333 *messagecount = 1; 335 *messagecount = 1;
@@ -344,7 +346,7 @@ nomem:
344 * vmbus_establish_gpadl - Estabish a GPADL for the specified buffer 346 * vmbus_establish_gpadl - Estabish a GPADL for the specified buffer
345 * 347 *
346 * @channel: a channel 348 * @channel: a channel
347 * @kbuffer: from kmalloc 349 * @kbuffer: from kmalloc or vmalloc
348 * @size: page-size multiple 350 * @size: page-size multiple
349 * @gpadl_handle: some funky thing 351 * @gpadl_handle: some funky thing
350 */ 352 */
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index a7db819bca92..4c08018d7333 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2346,7 +2346,7 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
2346 arp_work.work); 2346 arp_work.work);
2347 struct slave *slave, *oldcurrent; 2347 struct slave *slave, *oldcurrent;
2348 struct list_head *iter; 2348 struct list_head *iter;
2349 int do_failover = 0; 2349 int do_failover = 0, slave_state_changed = 0;
2350 2350
2351 if (!bond_has_slaves(bond)) 2351 if (!bond_has_slaves(bond))
2352 goto re_arm; 2352 goto re_arm;
@@ -2370,7 +2370,7 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
2370 bond_time_in_interval(bond, slave->dev->last_rx, 1)) { 2370 bond_time_in_interval(bond, slave->dev->last_rx, 1)) {
2371 2371
2372 slave->link = BOND_LINK_UP; 2372 slave->link = BOND_LINK_UP;
2373 bond_set_active_slave(slave); 2373 slave_state_changed = 1;
2374 2374
2375 /* primary_slave has no meaning in round-robin 2375 /* primary_slave has no meaning in round-robin
2376 * mode. the window of a slave being up and 2376 * mode. the window of a slave being up and
@@ -2399,7 +2399,7 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
2399 !bond_time_in_interval(bond, slave->dev->last_rx, 2)) { 2399 !bond_time_in_interval(bond, slave->dev->last_rx, 2)) {
2400 2400
2401 slave->link = BOND_LINK_DOWN; 2401 slave->link = BOND_LINK_DOWN;
2402 bond_set_backup_slave(slave); 2402 slave_state_changed = 1;
2403 2403
2404 if (slave->link_failure_count < UINT_MAX) 2404 if (slave->link_failure_count < UINT_MAX)
2405 slave->link_failure_count++; 2405 slave->link_failure_count++;
@@ -2426,19 +2426,24 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
2426 2426
2427 rcu_read_unlock(); 2427 rcu_read_unlock();
2428 2428
2429 if (do_failover) { 2429 if (do_failover || slave_state_changed) {
2430 /* the bond_select_active_slave must hold RTNL
2431 * and curr_slave_lock for write.
2432 */
2433 if (!rtnl_trylock()) 2430 if (!rtnl_trylock())
2434 goto re_arm; 2431 goto re_arm;
2435 block_netpoll_tx();
2436 write_lock_bh(&bond->curr_slave_lock);
2437 2432
2438 bond_select_active_slave(bond); 2433 if (slave_state_changed) {
2434 bond_slave_state_change(bond);
2435 } else if (do_failover) {
2436 /* the bond_select_active_slave must hold RTNL
2437 * and curr_slave_lock for write.
2438 */
2439 block_netpoll_tx();
2440 write_lock_bh(&bond->curr_slave_lock);
2439 2441
2440 write_unlock_bh(&bond->curr_slave_lock); 2442 bond_select_active_slave(bond);
2441 unblock_netpoll_tx(); 2443
2444 write_unlock_bh(&bond->curr_slave_lock);
2445 unblock_netpoll_tx();
2446 }
2442 rtnl_unlock(); 2447 rtnl_unlock();
2443 } 2448 }
2444 2449
@@ -2599,45 +2604,51 @@ do_failover:
2599 2604
2600/* 2605/*
2601 * Send ARP probes for active-backup mode ARP monitor. 2606 * Send ARP probes for active-backup mode ARP monitor.
2602 *
2603 * Called with rcu_read_lock hold.
2604 */ 2607 */
2605static void bond_ab_arp_probe(struct bonding *bond) 2608static bool bond_ab_arp_probe(struct bonding *bond)
2606{ 2609{
2607 struct slave *slave, *before = NULL, *new_slave = NULL, 2610 struct slave *slave, *before = NULL, *new_slave = NULL,
2608 *curr_arp_slave = rcu_dereference(bond->current_arp_slave); 2611 *curr_arp_slave, *curr_active_slave;
2609 struct list_head *iter; 2612 struct list_head *iter;
2610 bool found = false; 2613 bool found = false;
2611 2614
2612 read_lock(&bond->curr_slave_lock); 2615 rcu_read_lock();
2616 curr_arp_slave = rcu_dereference(bond->current_arp_slave);
2617 curr_active_slave = rcu_dereference(bond->curr_active_slave);
2613 2618
2614 if (curr_arp_slave && bond->curr_active_slave) 2619 if (curr_arp_slave && curr_active_slave)
2615 pr_info("PROBE: c_arp %s && cas %s BAD\n", 2620 pr_info("PROBE: c_arp %s && cas %s BAD\n",
2616 curr_arp_slave->dev->name, 2621 curr_arp_slave->dev->name,
2617 bond->curr_active_slave->dev->name); 2622 curr_active_slave->dev->name);
2618 2623
2619 if (bond->curr_active_slave) { 2624 if (curr_active_slave) {
2620 bond_arp_send_all(bond, bond->curr_active_slave); 2625 bond_arp_send_all(bond, curr_active_slave);
2621 read_unlock(&bond->curr_slave_lock); 2626 rcu_read_unlock();
2622 return; 2627 return true;
2623 } 2628 }
2624 2629 rcu_read_unlock();
2625 read_unlock(&bond->curr_slave_lock);
2626 2630
2627 /* if we don't have a curr_active_slave, search for the next available 2631 /* if we don't have a curr_active_slave, search for the next available
2628 * backup slave from the current_arp_slave and make it the candidate 2632 * backup slave from the current_arp_slave and make it the candidate
2629 * for becoming the curr_active_slave 2633 * for becoming the curr_active_slave
2630 */ 2634 */
2631 2635
2636 if (!rtnl_trylock())
2637 return false;
2638 /* curr_arp_slave might have gone away */
2639 curr_arp_slave = ACCESS_ONCE(bond->current_arp_slave);
2640
2632 if (!curr_arp_slave) { 2641 if (!curr_arp_slave) {
2633 curr_arp_slave = bond_first_slave_rcu(bond); 2642 curr_arp_slave = bond_first_slave(bond);
2634 if (!curr_arp_slave) 2643 if (!curr_arp_slave) {
2635 return; 2644 rtnl_unlock();
2645 return true;
2646 }
2636 } 2647 }
2637 2648
2638 bond_set_slave_inactive_flags(curr_arp_slave); 2649 bond_set_slave_inactive_flags(curr_arp_slave);
2639 2650
2640 bond_for_each_slave_rcu(bond, slave, iter) { 2651 bond_for_each_slave(bond, slave, iter) {
2641 if (!found && !before && IS_UP(slave->dev)) 2652 if (!found && !before && IS_UP(slave->dev))
2642 before = slave; 2653 before = slave;
2643 2654
@@ -2667,21 +2678,26 @@ static void bond_ab_arp_probe(struct bonding *bond)
2667 if (!new_slave && before) 2678 if (!new_slave && before)
2668 new_slave = before; 2679 new_slave = before;
2669 2680
2670 if (!new_slave) 2681 if (!new_slave) {
2671 return; 2682 rtnl_unlock();
2683 return true;
2684 }
2672 2685
2673 new_slave->link = BOND_LINK_BACK; 2686 new_slave->link = BOND_LINK_BACK;
2674 bond_set_slave_active_flags(new_slave); 2687 bond_set_slave_active_flags(new_slave);
2675 bond_arp_send_all(bond, new_slave); 2688 bond_arp_send_all(bond, new_slave);
2676 new_slave->jiffies = jiffies; 2689 new_slave->jiffies = jiffies;
2677 rcu_assign_pointer(bond->current_arp_slave, new_slave); 2690 rcu_assign_pointer(bond->current_arp_slave, new_slave);
2691 rtnl_unlock();
2692
2693 return true;
2678} 2694}
2679 2695
2680static void bond_activebackup_arp_mon(struct work_struct *work) 2696static void bond_activebackup_arp_mon(struct work_struct *work)
2681{ 2697{
2682 struct bonding *bond = container_of(work, struct bonding, 2698 struct bonding *bond = container_of(work, struct bonding,
2683 arp_work.work); 2699 arp_work.work);
2684 bool should_notify_peers = false; 2700 bool should_notify_peers = false, should_commit = false;
2685 int delta_in_ticks; 2701 int delta_in_ticks;
2686 2702
2687 delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval); 2703 delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
@@ -2690,12 +2706,11 @@ static void bond_activebackup_arp_mon(struct work_struct *work)
2690 goto re_arm; 2706 goto re_arm;
2691 2707
2692 rcu_read_lock(); 2708 rcu_read_lock();
2693
2694 should_notify_peers = bond_should_notify_peers(bond); 2709 should_notify_peers = bond_should_notify_peers(bond);
2710 should_commit = bond_ab_arp_inspect(bond);
2711 rcu_read_unlock();
2695 2712
2696 if (bond_ab_arp_inspect(bond)) { 2713 if (should_commit) {
2697 rcu_read_unlock();
2698
2699 /* Race avoidance with bond_close flush of workqueue */ 2714 /* Race avoidance with bond_close flush of workqueue */
2700 if (!rtnl_trylock()) { 2715 if (!rtnl_trylock()) {
2701 delta_in_ticks = 1; 2716 delta_in_ticks = 1;
@@ -2704,13 +2719,14 @@ static void bond_activebackup_arp_mon(struct work_struct *work)
2704 } 2719 }
2705 2720
2706 bond_ab_arp_commit(bond); 2721 bond_ab_arp_commit(bond);
2707
2708 rtnl_unlock(); 2722 rtnl_unlock();
2709 rcu_read_lock();
2710 } 2723 }
2711 2724
2712 bond_ab_arp_probe(bond); 2725 if (!bond_ab_arp_probe(bond)) {
2713 rcu_read_unlock(); 2726 /* rtnl locking failed, re-arm */
2727 delta_in_ticks = 1;
2728 should_notify_peers = false;
2729 }
2714 2730
2715re_arm: 2731re_arm:
2716 if (bond->params.arp_interval) 2732 if (bond->params.arp_interval)
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 1a9062f4e0d6..86ccfb9f71cc 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -303,6 +303,19 @@ static inline void bond_set_backup_slave(struct slave *slave)
303 } 303 }
304} 304}
305 305
306static inline void bond_slave_state_change(struct bonding *bond)
307{
308 struct list_head *iter;
309 struct slave *tmp;
310
311 bond_for_each_slave(bond, tmp, iter) {
312 if (tmp->link == BOND_LINK_UP)
313 bond_set_active_slave(tmp);
314 else if (tmp->link == BOND_LINK_DOWN)
315 bond_set_backup_slave(tmp);
316 }
317}
318
306static inline int bond_slave_state(struct slave *slave) 319static inline int bond_slave_state(struct slave *slave)
307{ 320{
308 return slave->backup; 321 return slave->backup;
diff --git a/drivers/net/ethernet/8390/apne.c b/drivers/net/ethernet/8390/apne.c
index 811fa5d5c697..30104b60da85 100644
--- a/drivers/net/ethernet/8390/apne.c
+++ b/drivers/net/ethernet/8390/apne.c
@@ -212,7 +212,6 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr)
212 int neX000, ctron; 212 int neX000, ctron;
213#endif 213#endif
214 static unsigned version_printed; 214 static unsigned version_printed;
215 struct ei_device *ei_local = netdev_priv(dev);
216 215
217 if ((apne_msg_enable & NETIF_MSG_DRV) && (version_printed++ == 0)) 216 if ((apne_msg_enable & NETIF_MSG_DRV) && (version_printed++ == 0))
218 netdev_info(dev, version); 217 netdev_info(dev, version);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 92a467ff4104..38fc794c1655 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -358,49 +358,47 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
358 358
359 cfg_idx = bnx2x_get_link_cfg_idx(bp); 359 cfg_idx = bnx2x_get_link_cfg_idx(bp);
360 old_multi_phy_config = bp->link_params.multi_phy_config; 360 old_multi_phy_config = bp->link_params.multi_phy_config;
361 switch (cmd->port) { 361 if (cmd->port != bnx2x_get_port_type(bp)) {
362 case PORT_TP: 362 switch (cmd->port) {
363 if (bp->port.supported[cfg_idx] & SUPPORTED_TP) 363 case PORT_TP:
364 break; /* no port change */ 364 if (!(bp->port.supported[0] & SUPPORTED_TP ||
365 365 bp->port.supported[1] & SUPPORTED_TP)) {
366 if (!(bp->port.supported[0] & SUPPORTED_TP || 366 DP(BNX2X_MSG_ETHTOOL,
367 bp->port.supported[1] & SUPPORTED_TP)) { 367 "Unsupported port type\n");
368 DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n"); 368 return -EINVAL;
369 return -EINVAL; 369 }
370 } 370 bp->link_params.multi_phy_config &=
371 bp->link_params.multi_phy_config &= 371 ~PORT_HW_CFG_PHY_SELECTION_MASK;
372 ~PORT_HW_CFG_PHY_SELECTION_MASK; 372 if (bp->link_params.multi_phy_config &
373 if (bp->link_params.multi_phy_config & 373 PORT_HW_CFG_PHY_SWAPPED_ENABLED)
374 PORT_HW_CFG_PHY_SWAPPED_ENABLED) 374 bp->link_params.multi_phy_config |=
375 bp->link_params.multi_phy_config |= 375 PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
376 PORT_HW_CFG_PHY_SELECTION_SECOND_PHY; 376 else
377 else 377 bp->link_params.multi_phy_config |=
378 bp->link_params.multi_phy_config |= 378 PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
379 PORT_HW_CFG_PHY_SELECTION_FIRST_PHY; 379 break;
380 break; 380 case PORT_FIBRE:
381 case PORT_FIBRE: 381 case PORT_DA:
382 case PORT_DA: 382 if (!(bp->port.supported[0] & SUPPORTED_FIBRE ||
383 if (bp->port.supported[cfg_idx] & SUPPORTED_FIBRE) 383 bp->port.supported[1] & SUPPORTED_FIBRE)) {
384 break; /* no port change */ 384 DP(BNX2X_MSG_ETHTOOL,
385 385 "Unsupported port type\n");
386 if (!(bp->port.supported[0] & SUPPORTED_FIBRE || 386 return -EINVAL;
387 bp->port.supported[1] & SUPPORTED_FIBRE)) { 387 }
388 bp->link_params.multi_phy_config &=
389 ~PORT_HW_CFG_PHY_SELECTION_MASK;
390 if (bp->link_params.multi_phy_config &
391 PORT_HW_CFG_PHY_SWAPPED_ENABLED)
392 bp->link_params.multi_phy_config |=
393 PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
394 else
395 bp->link_params.multi_phy_config |=
396 PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
397 break;
398 default:
388 DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n"); 399 DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n");
389 return -EINVAL; 400 return -EINVAL;
390 } 401 }
391 bp->link_params.multi_phy_config &=
392 ~PORT_HW_CFG_PHY_SELECTION_MASK;
393 if (bp->link_params.multi_phy_config &
394 PORT_HW_CFG_PHY_SWAPPED_ENABLED)
395 bp->link_params.multi_phy_config |=
396 PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
397 else
398 bp->link_params.multi_phy_config |=
399 PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
400 break;
401 default:
402 DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n");
403 return -EINVAL;
404 } 402 }
405 /* Save new config in case command complete successfully */ 403 /* Save new config in case command complete successfully */
406 new_multi_phy_config = bp->link_params.multi_phy_config; 404 new_multi_phy_config = bp->link_params.multi_phy_config;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index e118a3ec62bc..c9c445e7b4a5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -13102,9 +13102,9 @@ static void __bnx2x_remove(struct pci_dev *pdev,
13102 13102
13103 if (atomic_read(&pdev->enable_cnt) == 1) 13103 if (atomic_read(&pdev->enable_cnt) == 1)
13104 pci_release_regions(pdev); 13104 pci_release_regions(pdev);
13105 }
13106 13105
13107 pci_disable_device(pdev); 13106 pci_disable_device(pdev);
13107 }
13108} 13108}
13109 13109
13110static void bnx2x_remove_one(struct pci_dev *pdev) 13110static void bnx2x_remove_one(struct pci_dev *pdev)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index a4b940862b83..b901371ca361 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -4440,9 +4440,10 @@ bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
4440 /* Check if APP Table has changed */ 4440 /* Check if APP Table has changed */
4441 if (memcmp(&new_cfg->app, 4441 if (memcmp(&new_cfg->app,
4442 &old_cfg->app, 4442 &old_cfg->app,
4443 sizeof(new_cfg->app))) 4443 sizeof(new_cfg->app))) {
4444 need_reconfig = true; 4444 need_reconfig = true;
4445 dev_info(&pf->pdev->dev, "APP Table change detected.\n"); 4445 dev_info(&pf->pdev->dev, "APP Table change detected.\n");
4446 }
4446 4447
4447 return need_reconfig; 4448 return need_reconfig;
4448} 4449}
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 6509935d145e..55a37ae11440 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -5020,6 +5020,8 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
5020 } 5020 }
5021 } 5021 }
5022 5022
5023 netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_WEIGHT);
5024
5023 err = register_netdev(dev); 5025 err = register_netdev(dev);
5024 if (err) { 5026 if (err) {
5025 dev_err(&pdev->dev, "cannot register net device\n"); 5027 dev_err(&pdev->dev, "cannot register net device\n");
@@ -5028,8 +5030,6 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
5028 5030
5029 netif_carrier_off(dev); 5031 netif_carrier_off(dev);
5030 5032
5031 netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_WEIGHT);
5032
5033 sky2_show_addr(dev); 5033 sky2_show_addr(dev);
5034 5034
5035 if (hw->ports > 1) { 5035 if (hw->ports > 1) {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 30874cda8476..54ebf300332a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -683,12 +683,17 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
683 adapter->ahw->linkup = 0; 683 adapter->ahw->linkup = 0;
684 netif_carrier_off(netdev); 684 netif_carrier_off(netdev);
685 } else if (!adapter->ahw->linkup && linkup) { 685 } else if (!adapter->ahw->linkup && linkup) {
686 /* Do not advertise Link up if the port is in loopback mode */ 686 adapter->ahw->linkup = 1;
687 if (qlcnic_83xx_check(adapter) && adapter->ahw->lb_mode) 687
688 /* Do not advertise Link up to the stack if device
689 * is in loopback mode
690 */
691 if (qlcnic_83xx_check(adapter) && adapter->ahw->lb_mode) {
692 netdev_info(netdev, "NIC Link is up for loopback test\n");
688 return; 693 return;
694 }
689 695
690 netdev_info(netdev, "NIC Link is up\n"); 696 netdev_info(netdev, "NIC Link is up\n");
691 adapter->ahw->linkup = 1;
692 netif_carrier_on(netdev); 697 netif_carrier_on(netdev);
693 } 698 }
694} 699}
@@ -1150,13 +1155,13 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
1150 u16 lro_length, length, data_offset, t_vid, vid = 0xffff; 1155 u16 lro_length, length, data_offset, t_vid, vid = 0xffff;
1151 u32 seq_number; 1156 u32 seq_number;
1152 1157
1153 if (unlikely(ring > adapter->max_rds_rings)) 1158 if (unlikely(ring >= adapter->max_rds_rings))
1154 return NULL; 1159 return NULL;
1155 1160
1156 rds_ring = &recv_ctx->rds_rings[ring]; 1161 rds_ring = &recv_ctx->rds_rings[ring];
1157 1162
1158 index = qlcnic_get_lro_sts_refhandle(sts_data0); 1163 index = qlcnic_get_lro_sts_refhandle(sts_data0);
1159 if (unlikely(index > rds_ring->num_desc)) 1164 if (unlikely(index >= rds_ring->num_desc))
1160 return NULL; 1165 return NULL;
1161 1166
1162 buffer = &rds_ring->rx_buf_arr[index]; 1167 buffer = &rds_ring->rx_buf_arr[index];
@@ -1662,13 +1667,13 @@ qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter,
1662 u16 vid = 0xffff; 1667 u16 vid = 0xffff;
1663 int err; 1668 int err;
1664 1669
1665 if (unlikely(ring > adapter->max_rds_rings)) 1670 if (unlikely(ring >= adapter->max_rds_rings))
1666 return NULL; 1671 return NULL;
1667 1672
1668 rds_ring = &recv_ctx->rds_rings[ring]; 1673 rds_ring = &recv_ctx->rds_rings[ring];
1669 1674
1670 index = qlcnic_83xx_hndl(sts_data[0]); 1675 index = qlcnic_83xx_hndl(sts_data[0]);
1671 if (unlikely(index > rds_ring->num_desc)) 1676 if (unlikely(index >= rds_ring->num_desc))
1672 return NULL; 1677 return NULL;
1673 1678
1674 buffer = &rds_ring->rx_buf_arr[index]; 1679 buffer = &rds_ring->rx_buf_arr[index];
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 1f79d47c45fa..ba78c7481fa3 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -1837,6 +1837,7 @@ int __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1837 qlcnic_linkevent_request(adapter, 1); 1837 qlcnic_linkevent_request(adapter, 1);
1838 1838
1839 adapter->ahw->reset_context = 0; 1839 adapter->ahw->reset_context = 0;
1840 netif_tx_start_all_queues(netdev);
1840 return 0; 1841 return 0;
1841} 1842}
1842 1843
@@ -2704,14 +2705,8 @@ static int qlcnic_open(struct net_device *netdev)
2704 2705
2705 err = __qlcnic_up(adapter, netdev); 2706 err = __qlcnic_up(adapter, netdev);
2706 if (err) 2707 if (err)
2707 goto err_out; 2708 qlcnic_detach(adapter);
2708
2709 netif_tx_start_all_queues(netdev);
2710
2711 return 0;
2712 2709
2713err_out:
2714 qlcnic_detach(adapter);
2715 return err; 2710 return err;
2716} 2711}
2717 2712
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 17a1ca2050f4..0638c1810d54 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -448,8 +448,7 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
448 return 0; 448 return 0;
449} 449}
450 450
451static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter, 451static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter)
452 struct qlcnic_info *info)
453{ 452{
454 struct qlcnic_sriov *sriov = adapter->ahw->sriov; 453 struct qlcnic_sriov *sriov = adapter->ahw->sriov;
455 struct qlcnic_cmd_args cmd; 454 struct qlcnic_cmd_args cmd;
@@ -495,10 +494,6 @@ static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
495 if (err) 494 if (err)
496 return -EIO; 495 return -EIO;
497 496
498 err = qlcnic_sriov_get_vf_acl(adapter, &nic_info);
499 if (err)
500 return err;
501
502 if (qlcnic_83xx_get_port_info(adapter)) 497 if (qlcnic_83xx_get_port_info(adapter))
503 return -EIO; 498 return -EIO;
504 499
@@ -555,6 +550,10 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
555 if (err) 550 if (err)
556 goto err_out_send_channel_term; 551 goto err_out_send_channel_term;
557 552
553 err = qlcnic_sriov_get_vf_acl(adapter);
554 if (err)
555 goto err_out_send_channel_term;
556
558 err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac); 557 err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac);
559 if (err) 558 if (err)
560 goto err_out_send_channel_term; 559 goto err_out_send_channel_term;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index d93aa87408c2..a2e7d2c96e36 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1524,9 +1524,9 @@ static void stmmac_check_ether_addr(struct stmmac_priv *priv)
1524 priv->dev->dev_addr, 0); 1524 priv->dev->dev_addr, 0);
1525 if (!is_valid_ether_addr(priv->dev->dev_addr)) 1525 if (!is_valid_ether_addr(priv->dev->dev_addr))
1526 eth_hw_addr_random(priv->dev); 1526 eth_hw_addr_random(priv->dev);
1527 pr_info("%s: device MAC address %pM\n", priv->dev->name,
1528 priv->dev->dev_addr);
1527 } 1529 }
1528 pr_warn("%s: device MAC address %pM\n", priv->dev->name,
1529 priv->dev->dev_addr);
1530} 1530}
1531 1531
1532/** 1532/**
@@ -1635,7 +1635,7 @@ static int stmmac_hw_setup(struct net_device *dev)
1635 stmmac_mmc_setup(priv); 1635 stmmac_mmc_setup(priv);
1636 1636
1637 ret = stmmac_init_ptp(priv); 1637 ret = stmmac_init_ptp(priv);
1638 if (ret) 1638 if (ret && ret != -EOPNOTSUPP)
1639 pr_warn("%s: failed PTP initialisation\n", __func__); 1639 pr_warn("%s: failed PTP initialisation\n", __func__);
1640 1640
1641#ifdef CONFIG_STMMAC_DEBUG_FS 1641#ifdef CONFIG_STMMAC_DEBUG_FS
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index a26eecb1212c..7b594ce3f21d 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -462,7 +462,7 @@ struct nvsp_message {
462 462
463#define NETVSC_MTU 65536 463#define NETVSC_MTU 65536
464 464
465#define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024*2) /* 2MB */ 465#define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024*16) /* 16MB */
466 466
467#define NETVSC_RECEIVE_BUFFER_ID 0xcafe 467#define NETVSC_RECEIVE_BUFFER_ID 0xcafe
468 468
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 93b485b96249..03a2c6e17158 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -136,8 +136,7 @@ static int netvsc_destroy_recv_buf(struct netvsc_device *net_device)
136 136
137 if (net_device->recv_buf) { 137 if (net_device->recv_buf) {
138 /* Free up the receive buffer */ 138 /* Free up the receive buffer */
139 free_pages((unsigned long)net_device->recv_buf, 139 vfree(net_device->recv_buf);
140 get_order(net_device->recv_buf_size));
141 net_device->recv_buf = NULL; 140 net_device->recv_buf = NULL;
142 } 141 }
143 142
@@ -163,9 +162,7 @@ static int netvsc_init_recv_buf(struct hv_device *device)
163 return -ENODEV; 162 return -ENODEV;
164 ndev = net_device->ndev; 163 ndev = net_device->ndev;
165 164
166 net_device->recv_buf = 165 net_device->recv_buf = vzalloc(net_device->recv_buf_size);
167 (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
168 get_order(net_device->recv_buf_size));
169 if (!net_device->recv_buf) { 166 if (!net_device->recv_buf) {
170 netdev_err(ndev, "unable to allocate receive " 167 netdev_err(ndev, "unable to allocate receive "
171 "buffer of size %d\n", net_device->recv_buf_size); 168 "buffer of size %d\n", net_device->recv_buf_size);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index bcf01af4b879..44c4db8450f0 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -69,6 +69,7 @@
69#include <net/netns/generic.h> 69#include <net/netns/generic.h>
70#include <net/rtnetlink.h> 70#include <net/rtnetlink.h>
71#include <net/sock.h> 71#include <net/sock.h>
72#include <linux/seq_file.h>
72 73
73#include <asm/uaccess.h> 74#include <asm/uaccess.h>
74 75
@@ -2228,6 +2229,27 @@ static int tun_chr_close(struct inode *inode, struct file *file)
2228 return 0; 2229 return 0;
2229} 2230}
2230 2231
2232#ifdef CONFIG_PROC_FS
2233static int tun_chr_show_fdinfo(struct seq_file *m, struct file *f)
2234{
2235 struct tun_struct *tun;
2236 struct ifreq ifr;
2237
2238 memset(&ifr, 0, sizeof(ifr));
2239
2240 rtnl_lock();
2241 tun = tun_get(f);
2242 if (tun)
2243 tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
2244 rtnl_unlock();
2245
2246 if (tun)
2247 tun_put(tun);
2248
2249 return seq_printf(m, "iff:\t%s\n", ifr.ifr_name);
2250}
2251#endif
2252
2231static const struct file_operations tun_fops = { 2253static const struct file_operations tun_fops = {
2232 .owner = THIS_MODULE, 2254 .owner = THIS_MODULE,
2233 .llseek = no_llseek, 2255 .llseek = no_llseek,
@@ -2242,7 +2264,10 @@ static const struct file_operations tun_fops = {
2242#endif 2264#endif
2243 .open = tun_chr_open, 2265 .open = tun_chr_open,
2244 .release = tun_chr_close, 2266 .release = tun_chr_close,
2245 .fasync = tun_chr_fasync 2267 .fasync = tun_chr_fasync,
2268#ifdef CONFIG_PROC_FS
2269 .show_fdinfo = tun_chr_show_fdinfo,
2270#endif
2246}; 2271};
2247 2272
2248static struct miscdevice tun_miscdev = { 2273static struct miscdevice tun_miscdev = {
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index e955c5692986..ff04d4f95baa 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -117,6 +117,7 @@ struct netfront_info {
117 } tx_skbs[NET_TX_RING_SIZE]; 117 } tx_skbs[NET_TX_RING_SIZE];
118 grant_ref_t gref_tx_head; 118 grant_ref_t gref_tx_head;
119 grant_ref_t grant_tx_ref[NET_TX_RING_SIZE]; 119 grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
120 struct page *grant_tx_page[NET_TX_RING_SIZE];
120 unsigned tx_skb_freelist; 121 unsigned tx_skb_freelist;
121 122
122 spinlock_t rx_lock ____cacheline_aligned_in_smp; 123 spinlock_t rx_lock ____cacheline_aligned_in_smp;
@@ -396,6 +397,7 @@ static void xennet_tx_buf_gc(struct net_device *dev)
396 gnttab_release_grant_reference( 397 gnttab_release_grant_reference(
397 &np->gref_tx_head, np->grant_tx_ref[id]); 398 &np->gref_tx_head, np->grant_tx_ref[id]);
398 np->grant_tx_ref[id] = GRANT_INVALID_REF; 399 np->grant_tx_ref[id] = GRANT_INVALID_REF;
400 np->grant_tx_page[id] = NULL;
399 add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id); 401 add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id);
400 dev_kfree_skb_irq(skb); 402 dev_kfree_skb_irq(skb);
401 } 403 }
@@ -452,6 +454,7 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
452 gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, 454 gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
453 mfn, GNTMAP_readonly); 455 mfn, GNTMAP_readonly);
454 456
457 np->grant_tx_page[id] = virt_to_page(data);
455 tx->gref = np->grant_tx_ref[id] = ref; 458 tx->gref = np->grant_tx_ref[id] = ref;
456 tx->offset = offset; 459 tx->offset = offset;
457 tx->size = len; 460 tx->size = len;
@@ -497,6 +500,7 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
497 np->xbdev->otherend_id, 500 np->xbdev->otherend_id,
498 mfn, GNTMAP_readonly); 501 mfn, GNTMAP_readonly);
499 502
503 np->grant_tx_page[id] = page;
500 tx->gref = np->grant_tx_ref[id] = ref; 504 tx->gref = np->grant_tx_ref[id] = ref;
501 tx->offset = offset; 505 tx->offset = offset;
502 tx->size = bytes; 506 tx->size = bytes;
@@ -596,6 +600,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
596 mfn = virt_to_mfn(data); 600 mfn = virt_to_mfn(data);
597 gnttab_grant_foreign_access_ref( 601 gnttab_grant_foreign_access_ref(
598 ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly); 602 ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
603 np->grant_tx_page[id] = virt_to_page(data);
599 tx->gref = np->grant_tx_ref[id] = ref; 604 tx->gref = np->grant_tx_ref[id] = ref;
600 tx->offset = offset; 605 tx->offset = offset;
601 tx->size = len; 606 tx->size = len;
@@ -1085,10 +1090,11 @@ static void xennet_release_tx_bufs(struct netfront_info *np)
1085 continue; 1090 continue;
1086 1091
1087 skb = np->tx_skbs[i].skb; 1092 skb = np->tx_skbs[i].skb;
1088 gnttab_end_foreign_access_ref(np->grant_tx_ref[i], 1093 get_page(np->grant_tx_page[i]);
1089 GNTMAP_readonly); 1094 gnttab_end_foreign_access(np->grant_tx_ref[i],
1090 gnttab_release_grant_reference(&np->gref_tx_head, 1095 GNTMAP_readonly,
1091 np->grant_tx_ref[i]); 1096 (unsigned long)page_address(np->grant_tx_page[i]));
1097 np->grant_tx_page[i] = NULL;
1092 np->grant_tx_ref[i] = GRANT_INVALID_REF; 1098 np->grant_tx_ref[i] = GRANT_INVALID_REF;
1093 add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i); 1099 add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i);
1094 dev_kfree_skb_irq(skb); 1100 dev_kfree_skb_irq(skb);
@@ -1097,78 +1103,35 @@ static void xennet_release_tx_bufs(struct netfront_info *np)
1097 1103
1098static void xennet_release_rx_bufs(struct netfront_info *np) 1104static void xennet_release_rx_bufs(struct netfront_info *np)
1099{ 1105{
1100 struct mmu_update *mmu = np->rx_mmu;
1101 struct multicall_entry *mcl = np->rx_mcl;
1102 struct sk_buff_head free_list;
1103 struct sk_buff *skb;
1104 unsigned long mfn;
1105 int xfer = 0, noxfer = 0, unused = 0;
1106 int id, ref; 1106 int id, ref;
1107 1107
1108 dev_warn(&np->netdev->dev, "%s: fix me for copying receiver.\n",
1109 __func__);
1110 return;
1111
1112 skb_queue_head_init(&free_list);
1113
1114 spin_lock_bh(&np->rx_lock); 1108 spin_lock_bh(&np->rx_lock);
1115 1109
1116 for (id = 0; id < NET_RX_RING_SIZE; id++) { 1110 for (id = 0; id < NET_RX_RING_SIZE; id++) {
1117 ref = np->grant_rx_ref[id]; 1111 struct sk_buff *skb;
1118 if (ref == GRANT_INVALID_REF) { 1112 struct page *page;
1119 unused++;
1120 continue;
1121 }
1122 1113
1123 skb = np->rx_skbs[id]; 1114 skb = np->rx_skbs[id];
1124 mfn = gnttab_end_foreign_transfer_ref(ref); 1115 if (!skb)
1125 gnttab_release_grant_reference(&np->gref_rx_head, ref);
1126 np->grant_rx_ref[id] = GRANT_INVALID_REF;
1127
1128 if (0 == mfn) {
1129 skb_shinfo(skb)->nr_frags = 0;
1130 dev_kfree_skb(skb);
1131 noxfer++;
1132 continue; 1116 continue;
1133 }
1134 1117
1135 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 1118 ref = np->grant_rx_ref[id];
1136 /* Remap the page. */ 1119 if (ref == GRANT_INVALID_REF)
1137 const struct page *page = 1120 continue;
1138 skb_frag_page(&skb_shinfo(skb)->frags[0]);
1139 unsigned long pfn = page_to_pfn(page);
1140 void *vaddr = page_address(page);
1141 1121
1142 MULTI_update_va_mapping(mcl, (unsigned long)vaddr, 1122 page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
1143 mfn_pte(mfn, PAGE_KERNEL),
1144 0);
1145 mcl++;
1146 mmu->ptr = ((u64)mfn << PAGE_SHIFT)
1147 | MMU_MACHPHYS_UPDATE;
1148 mmu->val = pfn;
1149 mmu++;
1150 1123
1151 set_phys_to_machine(pfn, mfn); 1124 /* gnttab_end_foreign_access() needs a page ref until
1152 } 1125 * foreign access is ended (which may be deferred).
1153 __skb_queue_tail(&free_list, skb); 1126 */
1154 xfer++; 1127 get_page(page);
1155 } 1128 gnttab_end_foreign_access(ref, 0,
1156 1129 (unsigned long)page_address(page));
1157 dev_info(&np->netdev->dev, "%s: %d xfer, %d noxfer, %d unused\n", 1130 np->grant_rx_ref[id] = GRANT_INVALID_REF;
1158 __func__, xfer, noxfer, unused);
1159 1131
1160 if (xfer) { 1132 kfree_skb(skb);
1161 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1162 /* Do all the remapping work and M2P updates. */
1163 MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu,
1164 NULL, DOMID_SELF);
1165 mcl++;
1166 HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl);
1167 }
1168 } 1133 }
1169 1134
1170 __skb_queue_purge(&free_list);
1171
1172 spin_unlock_bh(&np->rx_lock); 1135 spin_unlock_bh(&np->rx_lock);
1173} 1136}
1174 1137
@@ -1339,6 +1302,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1339 for (i = 0; i < NET_RX_RING_SIZE; i++) { 1302 for (i = 0; i < NET_RX_RING_SIZE; i++) {
1340 np->rx_skbs[i] = NULL; 1303 np->rx_skbs[i] = NULL;
1341 np->grant_rx_ref[i] = GRANT_INVALID_REF; 1304 np->grant_rx_ref[i] = GRANT_INVALID_REF;
1305 np->grant_tx_page[i] = NULL;
1342 } 1306 }
1343 1307
1344 /* A grant for every tx ring slot */ 1308 /* A grant for every tx ring slot */
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index ac0bdded060f..a0de045eb227 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -738,6 +738,8 @@ struct qeth_discipline {
738 int (*freeze)(struct ccwgroup_device *); 738 int (*freeze)(struct ccwgroup_device *);
739 int (*thaw) (struct ccwgroup_device *); 739 int (*thaw) (struct ccwgroup_device *);
740 int (*restore)(struct ccwgroup_device *); 740 int (*restore)(struct ccwgroup_device *);
741 int (*control_event_handler)(struct qeth_card *card,
742 struct qeth_ipa_cmd *cmd);
741}; 743};
742 744
743struct qeth_vlan_vid { 745struct qeth_vlan_vid {
@@ -948,13 +950,10 @@ int qeth_query_card_info(struct qeth_card *card,
948int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *, 950int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *,
949 int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long), 951 int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long),
950 void *reply_param); 952 void *reply_param);
951void qeth_bridge_state_change(struct qeth_card *card, struct qeth_ipa_cmd *cmd);
952void qeth_bridgeport_query_support(struct qeth_card *card);
953int qeth_bridgeport_query_ports(struct qeth_card *card, 953int qeth_bridgeport_query_ports(struct qeth_card *card,
954 enum qeth_sbp_roles *role, enum qeth_sbp_states *state); 954 enum qeth_sbp_roles *role, enum qeth_sbp_states *state);
955int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role); 955int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role);
956int qeth_bridgeport_an_set(struct qeth_card *card, int enable); 956int qeth_bridgeport_an_set(struct qeth_card *card, int enable);
957void qeth_bridge_host_event(struct qeth_card *card, struct qeth_ipa_cmd *cmd);
958int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int); 957int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int);
959int qeth_get_elements_no(struct qeth_card *, struct sk_buff *, int); 958int qeth_get_elements_no(struct qeth_card *, struct sk_buff *, int);
960int qeth_get_elements_for_frags(struct sk_buff *); 959int qeth_get_elements_for_frags(struct sk_buff *);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index c05dacbf4e23..c3a83df07894 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -69,6 +69,7 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
69static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int); 69static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
70 70
71struct workqueue_struct *qeth_wq; 71struct workqueue_struct *qeth_wq;
72EXPORT_SYMBOL_GPL(qeth_wq);
72 73
73static void qeth_close_dev_handler(struct work_struct *work) 74static void qeth_close_dev_handler(struct work_struct *work)
74{ 75{
@@ -616,15 +617,12 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
616 qeth_schedule_recovery(card); 617 qeth_schedule_recovery(card);
617 return NULL; 618 return NULL;
618 case IPA_CMD_SETBRIDGEPORT: 619 case IPA_CMD_SETBRIDGEPORT:
619 if (cmd->data.sbp.hdr.command_code ==
620 IPA_SBP_BRIDGE_PORT_STATE_CHANGE) {
621 qeth_bridge_state_change(card, cmd);
622 return NULL;
623 } else
624 return cmd;
625 case IPA_CMD_ADDRESS_CHANGE_NOTIF: 620 case IPA_CMD_ADDRESS_CHANGE_NOTIF:
626 qeth_bridge_host_event(card, cmd); 621 if (card->discipline->control_event_handler
627 return NULL; 622 (card, cmd))
623 return cmd;
624 else
625 return NULL;
628 case IPA_CMD_MODCCID: 626 case IPA_CMD_MODCCID:
629 return cmd; 627 return cmd;
630 case IPA_CMD_REGISTER_LOCAL_ADDR: 628 case IPA_CMD_REGISTER_LOCAL_ADDR:
@@ -4973,10 +4971,6 @@ retriable:
4973 qeth_query_setadapterparms(card); 4971 qeth_query_setadapterparms(card);
4974 if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) 4972 if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST))
4975 qeth_query_setdiagass(card); 4973 qeth_query_setdiagass(card);
4976 qeth_bridgeport_query_support(card);
4977 if (card->options.sbp.supported_funcs)
4978 dev_info(&card->gdev->dev,
4979 "The device represents a HiperSockets Bridge Capable Port\n");
4980 return 0; 4974 return 0;
4981out: 4975out:
4982 dev_warn(&card->gdev->dev, "The qeth device driver failed to recover " 4976 dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 914d2c121fd8..0710550093ce 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -33,6 +33,11 @@ static int qeth_l2_send_setdelmac(struct qeth_card *, __u8 *,
33 unsigned long)); 33 unsigned long));
34static void qeth_l2_set_multicast_list(struct net_device *); 34static void qeth_l2_set_multicast_list(struct net_device *);
35static int qeth_l2_recover(void *); 35static int qeth_l2_recover(void *);
36static void qeth_bridgeport_query_support(struct qeth_card *card);
37static void qeth_bridge_state_change(struct qeth_card *card,
38 struct qeth_ipa_cmd *cmd);
39static void qeth_bridge_host_event(struct qeth_card *card,
40 struct qeth_ipa_cmd *cmd);
36 41
37static int qeth_l2_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 42static int qeth_l2_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
38{ 43{
@@ -989,6 +994,10 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
989 rc = -ENODEV; 994 rc = -ENODEV;
990 goto out_remove; 995 goto out_remove;
991 } 996 }
997 qeth_bridgeport_query_support(card);
998 if (card->options.sbp.supported_funcs)
999 dev_info(&card->gdev->dev,
1000 "The device represents a HiperSockets Bridge Capable Port\n");
992 qeth_trace_features(card); 1001 qeth_trace_features(card);
993 1002
994 if (!card->dev && qeth_l2_setup_netdev(card)) { 1003 if (!card->dev && qeth_l2_setup_netdev(card)) {
@@ -1233,6 +1242,26 @@ out:
1233 return rc; 1242 return rc;
1234} 1243}
1235 1244
1245/* Returns zero if the command is successfully "consumed" */
1246static int qeth_l2_control_event(struct qeth_card *card,
1247 struct qeth_ipa_cmd *cmd)
1248{
1249 switch (cmd->hdr.command) {
1250 case IPA_CMD_SETBRIDGEPORT:
1251 if (cmd->data.sbp.hdr.command_code ==
1252 IPA_SBP_BRIDGE_PORT_STATE_CHANGE) {
1253 qeth_bridge_state_change(card, cmd);
1254 return 0;
1255 } else
1256 return 1;
1257 case IPA_CMD_ADDRESS_CHANGE_NOTIF:
1258 qeth_bridge_host_event(card, cmd);
1259 return 0;
1260 default:
1261 return 1;
1262 }
1263}
1264
1236struct qeth_discipline qeth_l2_discipline = { 1265struct qeth_discipline qeth_l2_discipline = {
1237 .start_poll = qeth_qdio_start_poll, 1266 .start_poll = qeth_qdio_start_poll,
1238 .input_handler = (qdio_handler_t *) qeth_qdio_input_handler, 1267 .input_handler = (qdio_handler_t *) qeth_qdio_input_handler,
@@ -1246,6 +1275,7 @@ struct qeth_discipline qeth_l2_discipline = {
1246 .freeze = qeth_l2_pm_suspend, 1275 .freeze = qeth_l2_pm_suspend,
1247 .thaw = qeth_l2_pm_resume, 1276 .thaw = qeth_l2_pm_resume,
1248 .restore = qeth_l2_pm_resume, 1277 .restore = qeth_l2_pm_resume,
1278 .control_event_handler = qeth_l2_control_event,
1249}; 1279};
1250EXPORT_SYMBOL_GPL(qeth_l2_discipline); 1280EXPORT_SYMBOL_GPL(qeth_l2_discipline);
1251 1281
@@ -1463,7 +1493,8 @@ static void qeth_bridge_state_change_worker(struct work_struct *work)
1463 kfree(data); 1493 kfree(data);
1464} 1494}
1465 1495
1466void qeth_bridge_state_change(struct qeth_card *card, struct qeth_ipa_cmd *cmd) 1496static void qeth_bridge_state_change(struct qeth_card *card,
1497 struct qeth_ipa_cmd *cmd)
1467{ 1498{
1468 struct qeth_sbp_state_change *qports = 1499 struct qeth_sbp_state_change *qports =
1469 &cmd->data.sbp.data.state_change; 1500 &cmd->data.sbp.data.state_change;
@@ -1488,7 +1519,6 @@ void qeth_bridge_state_change(struct qeth_card *card, struct qeth_ipa_cmd *cmd)
1488 sizeof(struct qeth_sbp_state_change) + extrasize); 1519 sizeof(struct qeth_sbp_state_change) + extrasize);
1489 queue_work(qeth_wq, &data->worker); 1520 queue_work(qeth_wq, &data->worker);
1490} 1521}
1491EXPORT_SYMBOL(qeth_bridge_state_change);
1492 1522
1493struct qeth_bridge_host_data { 1523struct qeth_bridge_host_data {
1494 struct work_struct worker; 1524 struct work_struct worker;
@@ -1528,7 +1558,8 @@ static void qeth_bridge_host_event_worker(struct work_struct *work)
1528 kfree(data); 1558 kfree(data);
1529} 1559}
1530 1560
1531void qeth_bridge_host_event(struct qeth_card *card, struct qeth_ipa_cmd *cmd) 1561static void qeth_bridge_host_event(struct qeth_card *card,
1562 struct qeth_ipa_cmd *cmd)
1532{ 1563{
1533 struct qeth_ipacmd_addr_change *hostevs = 1564 struct qeth_ipacmd_addr_change *hostevs =
1534 &cmd->data.addrchange; 1565 &cmd->data.addrchange;
@@ -1560,7 +1591,6 @@ void qeth_bridge_host_event(struct qeth_card *card, struct qeth_ipa_cmd *cmd)
1560 sizeof(struct qeth_ipacmd_addr_change) + extrasize); 1591 sizeof(struct qeth_ipacmd_addr_change) + extrasize);
1561 queue_work(qeth_wq, &data->worker); 1592 queue_work(qeth_wq, &data->worker);
1562} 1593}
1563EXPORT_SYMBOL(qeth_bridge_host_event);
1564 1594
1565/* SETBRIDGEPORT support; sending commands */ 1595/* SETBRIDGEPORT support; sending commands */
1566 1596
@@ -1683,7 +1713,7 @@ static int qeth_bridgeport_query_support_cb(struct qeth_card *card,
1683 * Sets bitmask of supported setbridgeport subfunctions in the qeth_card 1713 * Sets bitmask of supported setbridgeport subfunctions in the qeth_card
1684 * strucutre: card->options.sbp.supported_funcs. 1714 * strucutre: card->options.sbp.supported_funcs.
1685 */ 1715 */
1686void qeth_bridgeport_query_support(struct qeth_card *card) 1716static void qeth_bridgeport_query_support(struct qeth_card *card)
1687{ 1717{
1688 struct qeth_cmd_buffer *iob; 1718 struct qeth_cmd_buffer *iob;
1689 struct qeth_ipa_cmd *cmd; 1719 struct qeth_ipa_cmd *cmd;
@@ -1709,7 +1739,6 @@ void qeth_bridgeport_query_support(struct qeth_card *card)
1709 } 1739 }
1710 card->options.sbp.supported_funcs = cbctl.data.supported; 1740 card->options.sbp.supported_funcs = cbctl.data.supported;
1711} 1741}
1712EXPORT_SYMBOL_GPL(qeth_bridgeport_query_support);
1713 1742
1714static int qeth_bridgeport_query_ports_cb(struct qeth_card *card, 1743static int qeth_bridgeport_query_ports_cb(struct qeth_card *card,
1715 struct qeth_reply *reply, unsigned long data) 1744 struct qeth_reply *reply, unsigned long data)
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index c1b0b2761f8d..0f430424c3b8 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -3593,6 +3593,13 @@ out:
3593 return rc; 3593 return rc;
3594} 3594}
3595 3595
3596/* Returns zero if the command is successfully "consumed" */
3597static int qeth_l3_control_event(struct qeth_card *card,
3598 struct qeth_ipa_cmd *cmd)
3599{
3600 return 1;
3601}
3602
3596struct qeth_discipline qeth_l3_discipline = { 3603struct qeth_discipline qeth_l3_discipline = {
3597 .start_poll = qeth_qdio_start_poll, 3604 .start_poll = qeth_qdio_start_poll,
3598 .input_handler = (qdio_handler_t *) qeth_qdio_input_handler, 3605 .input_handler = (qdio_handler_t *) qeth_qdio_input_handler,
@@ -3606,6 +3613,7 @@ struct qeth_discipline qeth_l3_discipline = {
3606 .freeze = qeth_l3_pm_suspend, 3613 .freeze = qeth_l3_pm_suspend,
3607 .thaw = qeth_l3_pm_resume, 3614 .thaw = qeth_l3_pm_resume,
3608 .restore = qeth_l3_pm_resume, 3615 .restore = qeth_l3_pm_resume,
3616 .control_event_handler = qeth_l3_control_event,
3609}; 3617};
3610EXPORT_SYMBOL_GPL(qeth_l3_discipline); 3618EXPORT_SYMBOL_GPL(qeth_l3_discipline);
3611 3619
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 1f689e62e4cb..f589c9af8cbf 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2456,6 +2456,7 @@ void skb_zerocopy(struct sk_buff *to, const struct sk_buff *from,
2456void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len); 2456void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
2457int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen); 2457int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
2458void skb_scrub_packet(struct sk_buff *skb, bool xnet); 2458void skb_scrub_packet(struct sk_buff *skb, bool xnet);
2459unsigned int skb_gso_transport_seglen(const struct sk_buff *skb);
2459struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features); 2460struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
2460 2461
2461struct skb_checksum_ops { 2462struct skb_checksum_ops {
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 8f519dbb358b..5976ef0846bd 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -47,6 +47,8 @@
47#include <linux/in.h> 47#include <linux/in.h>
48#include <linux/inet.h> 48#include <linux/inet.h>
49#include <linux/slab.h> 49#include <linux/slab.h>
50#include <linux/tcp.h>
51#include <linux/udp.h>
50#include <linux/netdevice.h> 52#include <linux/netdevice.h>
51#ifdef CONFIG_NET_CLS_ACT 53#ifdef CONFIG_NET_CLS_ACT
52#include <net/pkt_sched.h> 54#include <net/pkt_sched.h>
@@ -2119,7 +2121,7 @@ EXPORT_SYMBOL_GPL(skb_zerocopy_headlen);
2119/** 2121/**
2120 * skb_zerocopy - Zero copy skb to skb 2122 * skb_zerocopy - Zero copy skb to skb
2121 * @to: destination buffer 2123 * @to: destination buffer
2122 * @source: source buffer 2124 * @from: source buffer
2123 * @len: number of bytes to copy from source buffer 2125 * @len: number of bytes to copy from source buffer
2124 * @hlen: size of linear headroom in destination buffer 2126 * @hlen: size of linear headroom in destination buffer
2125 * 2127 *
@@ -3916,3 +3918,26 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
3916 nf_reset_trace(skb); 3918 nf_reset_trace(skb);
3917} 3919}
3918EXPORT_SYMBOL_GPL(skb_scrub_packet); 3920EXPORT_SYMBOL_GPL(skb_scrub_packet);
3921
3922/**
3923 * skb_gso_transport_seglen - Return length of individual segments of a gso packet
3924 *
3925 * @skb: GSO skb
3926 *
3927 * skb_gso_transport_seglen is used to determine the real size of the
3928 * individual segments, including Layer4 headers (TCP/UDP).
3929 *
3930 * The MAC/L2 or network (IP, IPv6) headers are not accounted for.
3931 */
3932unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
3933{
3934 const struct skb_shared_info *shinfo = skb_shinfo(skb);
3935 unsigned int hdr_len;
3936
3937 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
3938 hdr_len = tcp_hdrlen(skb);
3939 else
3940 hdr_len = sizeof(struct udphdr);
3941 return hdr_len + shinfo->gso_size;
3942}
3943EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
diff --git a/net/ieee802154/6lowpan_iphc.c b/net/ieee802154/6lowpan_iphc.c
index 083f905bf109..860aa2d445ba 100644
--- a/net/ieee802154/6lowpan_iphc.c
+++ b/net/ieee802154/6lowpan_iphc.c
@@ -678,7 +678,7 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
678 hc06_ptr += 3; 678 hc06_ptr += 3;
679 } else { 679 } else {
680 /* compress nothing */ 680 /* compress nothing */
681 memcpy(hc06_ptr, &hdr, 4); 681 memcpy(hc06_ptr, hdr, 4);
682 /* replace the top byte with new ECN | DSCP format */ 682 /* replace the top byte with new ECN | DSCP format */
683 *hc06_ptr = tmp; 683 *hc06_ptr = tmp;
684 hc06_ptr += 4; 684 hc06_ptr += 4;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index e7a92fdb36f6..ec4f762efda5 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -178,7 +178,7 @@ static int ipgre_err(struct sk_buff *skb, u32 info,
178 else 178 else
179 itn = net_generic(net, ipgre_net_id); 179 itn = net_generic(net, ipgre_net_id);
180 180
181 iph = (const struct iphdr *)skb->data; 181 iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
182 t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags, 182 t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
183 iph->daddr, iph->saddr, tpi->key); 183 iph->daddr, iph->saddr, tpi->key);
184 184
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 054a3e97d822..3d4da2c16b6a 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -314,7 +314,7 @@ static int ip_rcv_finish(struct sk_buff *skb)
314 const struct iphdr *iph = ip_hdr(skb); 314 const struct iphdr *iph = ip_hdr(skb);
315 struct rtable *rt; 315 struct rtable *rt;
316 316
317 if (sysctl_ip_early_demux && !skb_dst(skb)) { 317 if (sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) {
318 const struct net_protocol *ipprot; 318 const struct net_protocol *ipprot;
319 int protocol = iph->protocol; 319 int protocol = iph->protocol;
320 320
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index c0e3cb72ad70..bd28f386bd02 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -40,6 +40,7 @@
40#include <linux/if_ether.h> 40#include <linux/if_ether.h>
41#include <linux/if_vlan.h> 41#include <linux/if_vlan.h>
42#include <linux/rculist.h> 42#include <linux/rculist.h>
43#include <linux/err.h>
43 44
44#include <net/sock.h> 45#include <net/sock.h>
45#include <net/ip.h> 46#include <net/ip.h>
@@ -930,7 +931,7 @@ int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
930 } 931 }
931 rtnl_unlock(); 932 rtnl_unlock();
932 933
933 return PTR_RET(itn->fb_tunnel_dev); 934 return PTR_ERR_OR_ZERO(itn->fb_tunnel_dev);
934} 935}
935EXPORT_SYMBOL_GPL(ip_tunnel_init_net); 936EXPORT_SYMBOL_GPL(ip_tunnel_init_net);
936 937
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 302d6fb1ff2b..51d54dc376f3 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -49,7 +49,7 @@
49 49
50int ip6_rcv_finish(struct sk_buff *skb) 50int ip6_rcv_finish(struct sk_buff *skb)
51{ 51{
52 if (sysctl_ip_early_demux && !skb_dst(skb)) { 52 if (sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) {
53 const struct inet6_protocol *ipprot; 53 const struct inet6_protocol *ipprot;
54 54
55 ipprot = rcu_dereference(inet6_protos[ipv6_hdr(skb)->nexthdr]); 55 ipprot = rcu_dereference(inet6_protos[ipv6_hdr(skb)->nexthdr]);
diff --git a/net/llc/llc_output.c b/net/llc/llc_output.c
index 2dae8a5df23f..94425e421213 100644
--- a/net/llc/llc_output.c
+++ b/net/llc/llc_output.c
@@ -43,7 +43,7 @@ int llc_mac_hdr_init(struct sk_buff *skb,
43 rc = 0; 43 rc = 0;
44 break; 44 break;
45 default: 45 default:
46 WARN(1, "device type not supported: %d\n", skb->dev->type); 46 break;
47 } 47 }
48 return rc; 48 return rc;
49} 49}
diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
index 4106ca95ec86..7bf5b5b9e8b9 100644
--- a/net/rxrpc/ar-connection.c
+++ b/net/rxrpc/ar-connection.c
@@ -381,6 +381,8 @@ static int rxrpc_connect_exclusive(struct rxrpc_sock *rx,
381 381
382 rxrpc_assign_connection_id(conn); 382 rxrpc_assign_connection_id(conn);
383 rx->conn = conn; 383 rx->conn = conn;
384 } else {
385 spin_lock(&trans->client_lock);
384 } 386 }
385 387
386 /* we've got a connection with a free channel and we can now attach the 388 /* we've got a connection with a free channel and we can now attach the
diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c
index 898492a8d61b..34b5490dde65 100644
--- a/net/rxrpc/ar-recvmsg.c
+++ b/net/rxrpc/ar-recvmsg.c
@@ -180,7 +180,8 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
180 if (copy > len - copied) 180 if (copy > len - copied)
181 copy = len - copied; 181 copy = len - copied;
182 182
183 if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 183 if (skb->ip_summed == CHECKSUM_UNNECESSARY ||
184 skb->ip_summed == CHECKSUM_PARTIAL) {
184 ret = skb_copy_datagram_iovec(skb, offset, 185 ret = skb_copy_datagram_iovec(skb, offset,
185 msg->msg_iov, copy); 186 msg->msg_iov, copy);
186 } else { 187 } else {
@@ -353,6 +354,10 @@ csum_copy_error:
353 if (continue_call) 354 if (continue_call)
354 rxrpc_put_call(continue_call); 355 rxrpc_put_call(continue_call);
355 rxrpc_kill_skb(skb); 356 rxrpc_kill_skb(skb);
357 if (!(flags & MSG_PEEK)) {
358 if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
359 BUG();
360 }
356 skb_kill_datagram(&rx->sk, skb, flags); 361 skb_kill_datagram(&rx->sk, skb, flags);
357 rxrpc_put_call(call); 362 rxrpc_put_call(call);
358 return -EAGAIN; 363 return -EAGAIN;
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index fbba5b0ec121..1cb413fead89 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -21,7 +21,6 @@
21#include <net/netlink.h> 21#include <net/netlink.h>
22#include <net/sch_generic.h> 22#include <net/sch_generic.h>
23#include <net/pkt_sched.h> 23#include <net/pkt_sched.h>
24#include <net/tcp.h>
25 24
26 25
27/* Simple Token Bucket Filter. 26/* Simple Token Bucket Filter.
@@ -148,16 +147,10 @@ static u64 psched_ns_t2l(const struct psched_ratecfg *r,
148 * Return length of individual segments of a gso packet, 147 * Return length of individual segments of a gso packet,
149 * including all headers (MAC, IP, TCP/UDP) 148 * including all headers (MAC, IP, TCP/UDP)
150 */ 149 */
151static unsigned int skb_gso_seglen(const struct sk_buff *skb) 150static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
152{ 151{
153 unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb); 152 unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
154 const struct skb_shared_info *shinfo = skb_shinfo(skb); 153 return hdr_len + skb_gso_transport_seglen(skb);
155
156 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
157 hdr_len += tcp_hdrlen(skb);
158 else
159 hdr_len += sizeof(struct udphdr);
160 return hdr_len + shinfo->gso_size;
161} 154}
162 155
163/* GSO packet is too big, segment it so that tbf can transmit 156/* GSO packet is too big, segment it so that tbf can transmit
@@ -202,7 +195,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
202 int ret; 195 int ret;
203 196
204 if (qdisc_pkt_len(skb) > q->max_size) { 197 if (qdisc_pkt_len(skb) > q->max_size) {
205 if (skb_is_gso(skb) && skb_gso_seglen(skb) <= q->max_size) 198 if (skb_is_gso(skb) && skb_gso_mac_seglen(skb) <= q->max_size)
206 return tbf_segment(skb, sch); 199 return tbf_segment(skb, sch);
207 return qdisc_reshape_fail(skb, sch); 200 return qdisc_reshape_fail(skb, sch);
208 } 201 }