aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-05-04 23:10:04 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-05-04 23:10:04 -0400
commit1aaf6d3d3d1e95f4be07e32dd84aa1c93855fbbd (patch)
tree49e1fad1e1a1a3c7f2792c3554a876abfd58739a
parentf589e9bfcfc4ec2b59bf36b994b75012c155799e (diff)
parent777c2300865cb9b1b1791862ed23da677abfe6dc (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Several routines do not use netdev_features_t to hold such bitmasks, fixes from Patrick McHardy and Bjørn Mork. 2) Update cpsw IRQ software state and the actual HW irq enabling in the correct order. From Mugunthan V N. 3) When sending tipc packets to multiple bearers, we have to make copies of the SKB rather than just giving the original SKB directly. Fix from Gerlando Falauto. 4) Fix race with bridging topology change timer, from Stephen Hemminger. 5) Fix TCPv6 segmentation handling in GRE and VXLAN, from Pravin B Shelar. 6) Endian bug in USB pegasus driver, from Dan Carpenter. 7) Fix crashes on MTU reduction in USB asix driver, from Holger Eitzenberger. 8) Don't allow the kernel to BUG() just because the user puts some crap in an AF_PACKET mmap() ring descriptor. Fix from Daniel Borkmann. 9) Don't use variable sized arrays on the stack in xen-netback, from Wei Liu. 10) Fix stats reporting and an unbalanced napi_disable() in be2net driver. From Somnath Kotur and Ajit Khaparde. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (25 commits) cxgb4: fix error recovery when t4_fw_hello returns a positive value sky2: Fix crash on receiving VLAN frames packet: tpacket_v3: do not trigger bug() on wrong header status asix: fix BUG in receive path when lowering MTU net: qmi_wwan: Add Telewell TW-LTE 4G usbnet: pegasus: endian bug in write_mii_word() vxlan: Fix TCPv6 segmentation. gre: Fix GREv4 TCPv6 segmentation. bridge: fix race with topology change timer tipc: pskb_copy() buffers when sending on more than one bearer tipc: tipc_bcbearer_send(): simplify bearer selection tipc: cosmetic: clean up comments and break a long line drivers: net: cpsw: irq not disabled in cpsw isr in particular sequence xen-netback: better names for thresholds xen-netback: avoid allocating variable size array on stack xen-netback: remove redundent parameter in netbk_count_requests be2net: Fix to fail probe if MSI-X enable fails for a VF be2net: avoid napi_disable() when it has not been enabled be2net: Fix firmware download for Lancer be2net: Fix to receive Multicast Packets when Promiscuous mode is enabled on certain devices ...
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c35
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c54
-rw-r--r--drivers/net/ethernet/marvell/sky2.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c2
-rw-r--r--drivers/net/usb/asix_common.c3
-rw-r--r--drivers/net/usb/pegasus.c3
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/xen-netback/netback.c69
-rw-r--r--net/8021q/vlan_dev.c2
-rw-r--r--net/bridge/br_stp_timer.c2
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/ethtool.c2
-rw-r--r--net/ipv4/af_inet.c1
-rw-r--r--net/ipv4/gre.c4
-rw-r--r--net/ipv4/udp.c7
-rw-r--r--net/packet/af_packet.c53
-rw-r--r--net/tipc/bcast.c40
21 files changed, 178 insertions, 110 deletions
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index c59ec3ddaa66..3cd397d60434 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -5204,7 +5204,7 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
5204 5204
5205 if (t4_wait_dev_ready(adap) < 0) 5205 if (t4_wait_dev_ready(adap) < 0)
5206 return PCI_ERS_RESULT_DISCONNECT; 5206 return PCI_ERS_RESULT_DISCONNECT;
5207 if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL)) 5207 if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0)
5208 return PCI_ERS_RESULT_DISCONNECT; 5208 return PCI_ERS_RESULT_DISCONNECT;
5209 adap->flags |= FW_OK; 5209 adap->flags |= FW_OK;
5210 if (adap_init1(adap, &c)) 5210 if (adap_init1(adap, &c))
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 234ce6f07544..f544b297c9ab 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -327,6 +327,7 @@ enum vf_state {
327 327
328#define BE_FLAGS_LINK_STATUS_INIT 1 328#define BE_FLAGS_LINK_STATUS_INIT 1
329#define BE_FLAGS_WORKER_SCHEDULED (1 << 3) 329#define BE_FLAGS_WORKER_SCHEDULED (1 << 3)
330#define BE_FLAGS_NAPI_ENABLED (1 << 9)
330#define BE_UC_PMAC_COUNT 30 331#define BE_UC_PMAC_COUNT 30
331#define BE_VF_UC_PMAC_COUNT 2 332#define BE_VF_UC_PMAC_COUNT 2
332#define BE_FLAGS_QNQ_ASYNC_EVT_RCVD (1 << 11) 333#define BE_FLAGS_QNQ_ASYNC_EVT_RCVD (1 << 11)
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 25d3290b8cac..e1e5bb9d9054 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -961,19 +961,8 @@ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
961 OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL); 961 OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL);
962 962
963 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 963 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
964 if (lancer_chip(adapter)) { 964
965 req->hdr.version = 2; 965 if (BEx_chip(adapter)) {
966 req->page_size = 1; /* 1 for 4K */
967 AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
968 no_delay);
969 AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
970 __ilog2_u32(cq->len/256));
971 AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
972 AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
973 ctxt, 1);
974 AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
975 ctxt, eq->id);
976 } else {
977 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt, 966 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
978 coalesce_wm); 967 coalesce_wm);
979 AMAP_SET_BITS(struct amap_cq_context_be, nodelay, 968 AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
@@ -983,6 +972,18 @@ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
983 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1); 972 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
984 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1); 973 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
985 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id); 974 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
975 } else {
976 req->hdr.version = 2;
977 req->page_size = 1; /* 1 for 4K */
978 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt,
979 no_delay);
980 AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
981 __ilog2_u32(cq->len/256));
982 AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
983 AMAP_SET_BITS(struct amap_cq_context_v2, eventable,
984 ctxt, 1);
985 AMAP_SET_BITS(struct amap_cq_context_v2, eqid,
986 ctxt, eq->id);
986 } 987 }
987 988
988 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 989 be_dws_cpu_to_le(ctxt, sizeof(req->context));
@@ -1763,10 +1764,12 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1763 req->if_id = cpu_to_le32(adapter->if_handle); 1764 req->if_id = cpu_to_le32(adapter->if_handle);
1764 if (flags & IFF_PROMISC) { 1765 if (flags & IFF_PROMISC) {
1765 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS | 1766 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1766 BE_IF_FLAGS_VLAN_PROMISCUOUS); 1767 BE_IF_FLAGS_VLAN_PROMISCUOUS |
1768 BE_IF_FLAGS_MCAST_PROMISCUOUS);
1767 if (value == ON) 1769 if (value == ON)
1768 req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS | 1770 req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1769 BE_IF_FLAGS_VLAN_PROMISCUOUS); 1771 BE_IF_FLAGS_VLAN_PROMISCUOUS |
1772 BE_IF_FLAGS_MCAST_PROMISCUOUS);
1770 } else if (flags & IFF_ALLMULTI) { 1773 } else if (flags & IFF_ALLMULTI) {
1771 req->if_flags_mask = req->if_flags = 1774 req->if_flags_mask = req->if_flags =
1772 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS); 1775 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
@@ -2084,7 +2087,7 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2084 spin_unlock_bh(&adapter->mcc_lock); 2087 spin_unlock_bh(&adapter->mcc_lock);
2085 2088
2086 if (!wait_for_completion_timeout(&adapter->flash_compl, 2089 if (!wait_for_completion_timeout(&adapter->flash_compl,
2087 msecs_to_jiffies(30000))) 2090 msecs_to_jiffies(60000)))
2088 status = -1; 2091 status = -1;
2089 else 2092 else
2090 status = adapter->flash_status; 2093 status = adapter->flash_status;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index a855668e0cc5..025bdb0d1764 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -381,7 +381,7 @@ struct amap_cq_context_be {
381 u8 rsvd5[32]; /* dword 3*/ 381 u8 rsvd5[32]; /* dword 3*/
382} __packed; 382} __packed;
383 383
384struct amap_cq_context_lancer { 384struct amap_cq_context_v2 {
385 u8 rsvd0[12]; /* dword 0*/ 385 u8 rsvd0[12]; /* dword 0*/
386 u8 coalescwm[2]; /* dword 0*/ 386 u8 coalescwm[2]; /* dword 0*/
387 u8 nodelay; /* dword 0*/ 387 u8 nodelay; /* dword 0*/
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 5733cde88e2c..3d4461adb3b4 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -85,6 +85,7 @@ static const struct be_ethtool_stat et_stats[] = {
85 {DRVSTAT_INFO(tx_pauseframes)}, 85 {DRVSTAT_INFO(tx_pauseframes)},
86 {DRVSTAT_INFO(tx_controlframes)}, 86 {DRVSTAT_INFO(tx_controlframes)},
87 {DRVSTAT_INFO(rx_priority_pause_frames)}, 87 {DRVSTAT_INFO(rx_priority_pause_frames)},
88 {DRVSTAT_INFO(tx_priority_pauseframes)},
88 /* Received packets dropped when an internal fifo going into 89 /* Received packets dropped when an internal fifo going into
89 * main packet buffer tank (PMEM) overflows. 90 * main packet buffer tank (PMEM) overflows.
90 */ 91 */
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 4babc8a4a543..6c52a60dcdb7 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -410,6 +410,7 @@ static void populate_be_v1_stats(struct be_adapter *adapter)
410 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop; 410 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
411 drvs->tx_pauseframes = port_stats->tx_pauseframes; 411 drvs->tx_pauseframes = port_stats->tx_pauseframes;
412 drvs->tx_controlframes = port_stats->tx_controlframes; 412 drvs->tx_controlframes = port_stats->tx_controlframes;
413 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
413 drvs->jabber_events = port_stats->jabber_events; 414 drvs->jabber_events = port_stats->jabber_events;
414 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf; 415 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
415 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr; 416 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
@@ -471,11 +472,26 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
471 ACCESS_ONCE(*acc) = newacc; 472 ACCESS_ONCE(*acc) = newacc;
472} 473}
473 474
475void populate_erx_stats(struct be_adapter *adapter,
476 struct be_rx_obj *rxo,
477 u32 erx_stat)
478{
479 if (!BEx_chip(adapter))
480 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
481 else
482 /* below erx HW counter can actually wrap around after
483 * 65535. Driver accumulates a 32-bit value
484 */
485 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
486 (u16)erx_stat);
487}
488
474void be_parse_stats(struct be_adapter *adapter) 489void be_parse_stats(struct be_adapter *adapter)
475{ 490{
476 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter); 491 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
477 struct be_rx_obj *rxo; 492 struct be_rx_obj *rxo;
478 int i; 493 int i;
494 u32 erx_stat;
479 495
480 if (lancer_chip(adapter)) { 496 if (lancer_chip(adapter)) {
481 populate_lancer_stats(adapter); 497 populate_lancer_stats(adapter);
@@ -488,12 +504,8 @@ void be_parse_stats(struct be_adapter *adapter)
488 504
489 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */ 505 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
490 for_all_rx_queues(adapter, rxo, i) { 506 for_all_rx_queues(adapter, rxo, i) {
491 /* below erx HW counter can actually wrap around after 507 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
492 * 65535. Driver accumulates a 32-bit value 508 populate_erx_stats(adapter, rxo, erx_stat);
493 */
494 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
495 (u16)erx->rx_drops_no_fragments \
496 [rxo->q.id]);
497 } 509 }
498 } 510 }
499} 511}
@@ -2378,7 +2390,7 @@ static uint be_num_rss_want(struct be_adapter *adapter)
2378 return num; 2390 return num;
2379} 2391}
2380 2392
2381static void be_msix_enable(struct be_adapter *adapter) 2393static int be_msix_enable(struct be_adapter *adapter)
2382{ 2394{
2383#define BE_MIN_MSIX_VECTORS 1 2395#define BE_MIN_MSIX_VECTORS 1
2384 int i, status, num_vec, num_roce_vec = 0; 2396 int i, status, num_vec, num_roce_vec = 0;
@@ -2403,13 +2415,17 @@ static void be_msix_enable(struct be_adapter *adapter)
2403 goto done; 2415 goto done;
2404 } else if (status >= BE_MIN_MSIX_VECTORS) { 2416 } else if (status >= BE_MIN_MSIX_VECTORS) {
2405 num_vec = status; 2417 num_vec = status;
2406 if (pci_enable_msix(adapter->pdev, adapter->msix_entries, 2418 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2407 num_vec) == 0) 2419 num_vec);
2420 if (!status)
2408 goto done; 2421 goto done;
2409 } 2422 }
2410 2423
2411 dev_warn(dev, "MSIx enable failed\n"); 2424 dev_warn(dev, "MSIx enable failed\n");
2412 return; 2425 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2426 if (!be_physfn(adapter))
2427 return status;
2428 return 0;
2413done: 2429done:
2414 if (be_roce_supported(adapter)) { 2430 if (be_roce_supported(adapter)) {
2415 if (num_vec > num_roce_vec) { 2431 if (num_vec > num_roce_vec) {
@@ -2423,7 +2439,7 @@ done:
2423 } else 2439 } else
2424 adapter->num_msix_vec = num_vec; 2440 adapter->num_msix_vec = num_vec;
2425 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec); 2441 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
2426 return; 2442 return 0;
2427} 2443}
2428 2444
2429static inline int be_msix_vec_get(struct be_adapter *adapter, 2445static inline int be_msix_vec_get(struct be_adapter *adapter,
@@ -2536,8 +2552,11 @@ static int be_close(struct net_device *netdev)
2536 2552
2537 be_roce_dev_close(adapter); 2553 be_roce_dev_close(adapter);
2538 2554
2539 for_all_evt_queues(adapter, eqo, i) 2555 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2540 napi_disable(&eqo->napi); 2556 for_all_evt_queues(adapter, eqo, i)
2557 napi_disable(&eqo->napi);
2558 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2559 }
2541 2560
2542 be_async_mcc_disable(adapter); 2561 be_async_mcc_disable(adapter);
2543 2562
@@ -2631,7 +2650,9 @@ static int be_open(struct net_device *netdev)
2631 if (status) 2650 if (status)
2632 goto err; 2651 goto err;
2633 2652
2634 be_irq_register(adapter); 2653 status = be_irq_register(adapter);
2654 if (status)
2655 goto err;
2635 2656
2636 for_all_rx_queues(adapter, rxo, i) 2657 for_all_rx_queues(adapter, rxo, i)
2637 be_cq_notify(adapter, rxo->cq.id, true, 0); 2658 be_cq_notify(adapter, rxo->cq.id, true, 0);
@@ -2645,6 +2666,7 @@ static int be_open(struct net_device *netdev)
2645 napi_enable(&eqo->napi); 2666 napi_enable(&eqo->napi);
2646 be_eq_notify(adapter, eqo->q.id, true, false, 0); 2667 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2647 } 2668 }
2669 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
2648 2670
2649 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0); 2671 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2650 if (!status) 2672 if (!status)
@@ -3100,7 +3122,9 @@ static int be_setup(struct be_adapter *adapter)
3100 if (status) 3122 if (status)
3101 goto err; 3123 goto err;
3102 3124
3103 be_msix_enable(adapter); 3125 status = be_msix_enable(adapter);
3126 if (status)
3127 goto err;
3104 3128
3105 status = be_evt_queues_create(adapter); 3129 status = be_evt_queues_create(adapter);
3106 if (status) 3130 if (status)
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 256ae789c143..d175bbd3ffd3 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -2496,10 +2496,12 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2,
2496 skb->ip_summed = re->skb->ip_summed; 2496 skb->ip_summed = re->skb->ip_summed;
2497 skb->csum = re->skb->csum; 2497 skb->csum = re->skb->csum;
2498 skb->rxhash = re->skb->rxhash; 2498 skb->rxhash = re->skb->rxhash;
2499 skb->vlan_proto = re->skb->vlan_proto;
2499 skb->vlan_tci = re->skb->vlan_tci; 2500 skb->vlan_tci = re->skb->vlan_tci;
2500 2501
2501 pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr, 2502 pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr,
2502 length, PCI_DMA_FROMDEVICE); 2503 length, PCI_DMA_FROMDEVICE);
2504 re->skb->vlan_proto = 0;
2503 re->skb->vlan_tci = 0; 2505 re->skb->vlan_tci = 0;
2504 re->skb->rxhash = 0; 2506 re->skb->rxhash = 0;
2505 re->skb->ip_summed = CHECKSUM_NONE; 2507 re->skb->ip_summed = CHECKSUM_NONE;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 59c43918883e..21a5b291b4b3 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -555,8 +555,8 @@ static int cpsw_poll(struct napi_struct *napi, int budget)
555 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); 555 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
556 prim_cpsw = cpsw_get_slave_priv(priv, 0); 556 prim_cpsw = cpsw_get_slave_priv(priv, 0);
557 if (prim_cpsw->irq_enabled == false) { 557 if (prim_cpsw->irq_enabled == false) {
558 cpsw_enable_irq(priv);
559 prim_cpsw->irq_enabled = true; 558 prim_cpsw->irq_enabled = true;
559 cpsw_enable_irq(priv);
560 } 560 }
561 } 561 }
562 562
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index f7f623a5390e..577c72d5f369 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -100,6 +100,9 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
100 netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n", 100 netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
101 rx->size); 101 rx->size);
102 kfree_skb(rx->ax_skb); 102 kfree_skb(rx->ax_skb);
103 rx->ax_skb = NULL;
104 rx->size = 0U;
105
103 return 0; 106 return 0;
104 } 107 }
105 108
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 09699054b54f..03e8a15d7deb 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -256,8 +256,9 @@ static int mdio_read(struct net_device *dev, int phy_id, int loc)
256static void mdio_write(struct net_device *dev, int phy_id, int loc, int val) 256static void mdio_write(struct net_device *dev, int phy_id, int loc, int val)
257{ 257{
258 pegasus_t *pegasus = netdev_priv(dev); 258 pegasus_t *pegasus = netdev_priv(dev);
259 u16 data = val;
259 260
260 write_mii_word(pegasus, phy_id, loc, (__u16 *)&val); 261 write_mii_word(pegasus, phy_id, loc, &data);
261} 262}
262 263
263static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata) 264static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata)
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 5a88e72090ce..834e405fb57a 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -548,6 +548,7 @@ static const struct usb_device_id products[] = {
548 {QMI_FIXED_INTF(0x19d2, 0x0265, 4)}, /* ONDA MT8205 4G LTE */ 548 {QMI_FIXED_INTF(0x19d2, 0x0265, 4)}, /* ONDA MT8205 4G LTE */
549 {QMI_FIXED_INTF(0x19d2, 0x0284, 4)}, /* ZTE MF880 */ 549 {QMI_FIXED_INTF(0x19d2, 0x0284, 4)}, /* ZTE MF880 */
550 {QMI_FIXED_INTF(0x19d2, 0x0326, 4)}, /* ZTE MF821D */ 550 {QMI_FIXED_INTF(0x19d2, 0x0326, 4)}, /* ZTE MF821D */
551 {QMI_FIXED_INTF(0x19d2, 0x0412, 4)}, /* Telewell TW-LTE 4G */
551 {QMI_FIXED_INTF(0x19d2, 0x1008, 4)}, /* ZTE (Vodafone) K3570-Z */ 552 {QMI_FIXED_INTF(0x19d2, 0x1008, 4)}, /* ZTE (Vodafone) K3570-Z */
552 {QMI_FIXED_INTF(0x19d2, 0x1010, 4)}, /* ZTE (Vodafone) K3571-Z */ 553 {QMI_FIXED_INTF(0x19d2, 0x1010, 4)}, /* ZTE (Vodafone) K3571-Z */
553 {QMI_FIXED_INTF(0x19d2, 0x1012, 4)}, 554 {QMI_FIXED_INTF(0x19d2, 0x1012, 4)},
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index a2865f17c667..37984e6d4e99 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -51,9 +51,17 @@
51 * This is the maximum slots a skb can have. If a guest sends a skb 51 * This is the maximum slots a skb can have. If a guest sends a skb
52 * which exceeds this limit it is considered malicious. 52 * which exceeds this limit it is considered malicious.
53 */ 53 */
54#define MAX_SKB_SLOTS_DEFAULT 20 54#define FATAL_SKB_SLOTS_DEFAULT 20
55static unsigned int max_skb_slots = MAX_SKB_SLOTS_DEFAULT; 55static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
56module_param(max_skb_slots, uint, 0444); 56module_param(fatal_skb_slots, uint, 0444);
57
58/*
59 * To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating
60 * the maximum slots a valid packet can use. Now this value is defined
61 * to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by
62 * all backend.
63 */
64#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
57 65
58typedef unsigned int pending_ring_idx_t; 66typedef unsigned int pending_ring_idx_t;
59#define INVALID_PENDING_RING_IDX (~0U) 67#define INVALID_PENDING_RING_IDX (~0U)
@@ -928,18 +936,20 @@ static void netbk_fatal_tx_err(struct xenvif *vif)
928 936
929static int netbk_count_requests(struct xenvif *vif, 937static int netbk_count_requests(struct xenvif *vif,
930 struct xen_netif_tx_request *first, 938 struct xen_netif_tx_request *first,
931 RING_IDX first_idx,
932 struct xen_netif_tx_request *txp, 939 struct xen_netif_tx_request *txp,
933 int work_to_do) 940 int work_to_do)
934{ 941{
935 RING_IDX cons = vif->tx.req_cons; 942 RING_IDX cons = vif->tx.req_cons;
936 int slots = 0; 943 int slots = 0;
937 int drop_err = 0; 944 int drop_err = 0;
945 int more_data;
938 946
939 if (!(first->flags & XEN_NETTXF_more_data)) 947 if (!(first->flags & XEN_NETTXF_more_data))
940 return 0; 948 return 0;
941 949
942 do { 950 do {
951 struct xen_netif_tx_request dropped_tx = { 0 };
952
943 if (slots >= work_to_do) { 953 if (slots >= work_to_do) {
944 netdev_err(vif->dev, 954 netdev_err(vif->dev,
945 "Asked for %d slots but exceeds this limit\n", 955 "Asked for %d slots but exceeds this limit\n",
@@ -951,28 +961,32 @@ static int netbk_count_requests(struct xenvif *vif,
951 /* This guest is really using too many slots and 961 /* This guest is really using too many slots and
952 * considered malicious. 962 * considered malicious.
953 */ 963 */
954 if (unlikely(slots >= max_skb_slots)) { 964 if (unlikely(slots >= fatal_skb_slots)) {
955 netdev_err(vif->dev, 965 netdev_err(vif->dev,
956 "Malicious frontend using %d slots, threshold %u\n", 966 "Malicious frontend using %d slots, threshold %u\n",
957 slots, max_skb_slots); 967 slots, fatal_skb_slots);
958 netbk_fatal_tx_err(vif); 968 netbk_fatal_tx_err(vif);
959 return -E2BIG; 969 return -E2BIG;
960 } 970 }
961 971
962 /* Xen network protocol had implicit dependency on 972 /* Xen network protocol had implicit dependency on
963 * MAX_SKB_FRAGS. XEN_NETIF_NR_SLOTS_MIN is set to the 973 * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
964 * historical MAX_SKB_FRAGS value 18 to honor the same 974 * the historical MAX_SKB_FRAGS value 18 to honor the
965 * behavior as before. Any packet using more than 18 975 * same behavior as before. Any packet using more than
966 * slots but less than max_skb_slots slots is dropped 976 * 18 slots but less than fatal_skb_slots slots is
977 * dropped
967 */ 978 */
968 if (!drop_err && slots >= XEN_NETIF_NR_SLOTS_MIN) { 979 if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
969 if (net_ratelimit()) 980 if (net_ratelimit())
970 netdev_dbg(vif->dev, 981 netdev_dbg(vif->dev,
971 "Too many slots (%d) exceeding limit (%d), dropping packet\n", 982 "Too many slots (%d) exceeding limit (%d), dropping packet\n",
972 slots, XEN_NETIF_NR_SLOTS_MIN); 983 slots, XEN_NETBK_LEGACY_SLOTS_MAX);
973 drop_err = -E2BIG; 984 drop_err = -E2BIG;
974 } 985 }
975 986
987 if (drop_err)
988 txp = &dropped_tx;
989
976 memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + slots), 990 memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + slots),
977 sizeof(*txp)); 991 sizeof(*txp));
978 992
@@ -1002,10 +1016,16 @@ static int netbk_count_requests(struct xenvif *vif,
1002 netbk_fatal_tx_err(vif); 1016 netbk_fatal_tx_err(vif);
1003 return -EINVAL; 1017 return -EINVAL;
1004 } 1018 }
1005 } while ((txp++)->flags & XEN_NETTXF_more_data); 1019
1020 more_data = txp->flags & XEN_NETTXF_more_data;
1021
1022 if (!drop_err)
1023 txp++;
1024
1025 } while (more_data);
1006 1026
1007 if (drop_err) { 1027 if (drop_err) {
1008 netbk_tx_err(vif, first, first_idx + slots); 1028 netbk_tx_err(vif, first, cons + slots);
1009 return drop_err; 1029 return drop_err;
1010 } 1030 }
1011 1031
@@ -1042,7 +1062,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
1042 struct pending_tx_info *first = NULL; 1062 struct pending_tx_info *first = NULL;
1043 1063
1044 /* At this point shinfo->nr_frags is in fact the number of 1064 /* At this point shinfo->nr_frags is in fact the number of
1045 * slots, which can be as large as XEN_NETIF_NR_SLOTS_MIN. 1065 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
1046 */ 1066 */
1047 nr_slots = shinfo->nr_frags; 1067 nr_slots = shinfo->nr_frags;
1048 1068
@@ -1404,12 +1424,12 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1404 struct sk_buff *skb; 1424 struct sk_buff *skb;
1405 int ret; 1425 int ret;
1406 1426
1407 while ((nr_pending_reqs(netbk) + XEN_NETIF_NR_SLOTS_MIN 1427 while ((nr_pending_reqs(netbk) + XEN_NETBK_LEGACY_SLOTS_MAX
1408 < MAX_PENDING_REQS) && 1428 < MAX_PENDING_REQS) &&
1409 !list_empty(&netbk->net_schedule_list)) { 1429 !list_empty(&netbk->net_schedule_list)) {
1410 struct xenvif *vif; 1430 struct xenvif *vif;
1411 struct xen_netif_tx_request txreq; 1431 struct xen_netif_tx_request txreq;
1412 struct xen_netif_tx_request txfrags[max_skb_slots]; 1432 struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
1413 struct page *page; 1433 struct page *page;
1414 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1]; 1434 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
1415 u16 pending_idx; 1435 u16 pending_idx;
@@ -1470,8 +1490,7 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1470 continue; 1490 continue;
1471 } 1491 }
1472 1492
1473 ret = netbk_count_requests(vif, &txreq, idx, 1493 ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do);
1474 txfrags, work_to_do);
1475 if (unlikely(ret < 0)) 1494 if (unlikely(ret < 0))
1476 continue; 1495 continue;
1477 1496
@@ -1498,7 +1517,7 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1498 pending_idx = netbk->pending_ring[index]; 1517 pending_idx = netbk->pending_ring[index];
1499 1518
1500 data_len = (txreq.size > PKT_PROT_LEN && 1519 data_len = (txreq.size > PKT_PROT_LEN &&
1501 ret < XEN_NETIF_NR_SLOTS_MIN) ? 1520 ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
1502 PKT_PROT_LEN : txreq.size; 1521 PKT_PROT_LEN : txreq.size;
1503 1522
1504 skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN, 1523 skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN,
@@ -1777,7 +1796,7 @@ static inline int rx_work_todo(struct xen_netbk *netbk)
1777static inline int tx_work_todo(struct xen_netbk *netbk) 1796static inline int tx_work_todo(struct xen_netbk *netbk)
1778{ 1797{
1779 1798
1780 if ((nr_pending_reqs(netbk) + XEN_NETIF_NR_SLOTS_MIN 1799 if ((nr_pending_reqs(netbk) + XEN_NETBK_LEGACY_SLOTS_MAX
1781 < MAX_PENDING_REQS) && 1800 < MAX_PENDING_REQS) &&
1782 !list_empty(&netbk->net_schedule_list)) 1801 !list_empty(&netbk->net_schedule_list))
1783 return 1; 1802 return 1;
@@ -1862,11 +1881,11 @@ static int __init netback_init(void)
1862 if (!xen_domain()) 1881 if (!xen_domain())
1863 return -ENODEV; 1882 return -ENODEV;
1864 1883
1865 if (max_skb_slots < XEN_NETIF_NR_SLOTS_MIN) { 1884 if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
1866 printk(KERN_INFO 1885 printk(KERN_INFO
1867 "xen-netback: max_skb_slots too small (%d), bump it to XEN_NETIF_NR_SLOTS_MIN (%d)\n", 1886 "xen-netback: fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
1868 max_skb_slots, XEN_NETIF_NR_SLOTS_MIN); 1887 fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
1869 max_skb_slots = XEN_NETIF_NR_SLOTS_MIN; 1888 fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
1870 } 1889 }
1871 1890
1872 xen_netbk_group_nr = num_online_cpus(); 1891 xen_netbk_group_nr = num_online_cpus();
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 8af508536d36..3a8c8fd63c88 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -628,7 +628,7 @@ static netdev_features_t vlan_dev_fix_features(struct net_device *dev,
628 netdev_features_t features) 628 netdev_features_t features)
629{ 629{
630 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; 630 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
631 u32 old_features = features; 631 netdev_features_t old_features = features;
632 632
633 features &= real_dev->vlan_features; 633 features &= real_dev->vlan_features;
634 features |= NETIF_F_RXCSUM; 634 features |= NETIF_F_RXCSUM;
diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c
index c3530a81a33b..950663d4d330 100644
--- a/net/bridge/br_stp_timer.c
+++ b/net/bridge/br_stp_timer.c
@@ -107,7 +107,7 @@ static void br_tcn_timer_expired(unsigned long arg)
107 107
108 br_debug(br, "tcn timer expired\n"); 108 br_debug(br, "tcn timer expired\n");
109 spin_lock(&br->lock); 109 spin_lock(&br->lock);
110 if (br->dev->flags & IFF_UP) { 110 if (!br_is_root_bridge(br) && (br->dev->flags & IFF_UP)) {
111 br_transmit_tcn(br); 111 br_transmit_tcn(br);
112 112
113 mod_timer(&br->tcn_timer,jiffies + br->bridge_hello_time); 113 mod_timer(&br->tcn_timer,jiffies + br->bridge_hello_time);
diff --git a/net/core/dev.c b/net/core/dev.c
index 4040673f806a..40b1fadaf637 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2456,7 +2456,7 @@ EXPORT_SYMBOL(netif_skb_features);
2456 * 2. skb is fragmented and the device does not support SG. 2456 * 2. skb is fragmented and the device does not support SG.
2457 */ 2457 */
2458static inline int skb_needs_linearize(struct sk_buff *skb, 2458static inline int skb_needs_linearize(struct sk_buff *skb,
2459 int features) 2459 netdev_features_t features)
2460{ 2460{
2461 return skb_is_nonlinear(skb) && 2461 return skb_is_nonlinear(skb) &&
2462 ((skb_has_frag_list(skb) && 2462 ((skb_has_frag_list(skb) &&
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 5a934ef90f8b..22efdaa76ebf 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1421,7 +1421,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1421 void __user *useraddr = ifr->ifr_data; 1421 void __user *useraddr = ifr->ifr_data;
1422 u32 ethcmd; 1422 u32 ethcmd;
1423 int rc; 1423 int rc;
1424 u32 old_features; 1424 netdev_features_t old_features;
1425 1425
1426 if (!dev || !netif_device_present(dev)) 1426 if (!dev || !netif_device_present(dev))
1427 return -ENODEV; 1427 return -ENODEV;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index c61b3bb87a16..d01be2a3ae53 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1293,6 +1293,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
1293 SKB_GSO_DODGY | 1293 SKB_GSO_DODGY |
1294 SKB_GSO_TCP_ECN | 1294 SKB_GSO_TCP_ECN |
1295 SKB_GSO_GRE | 1295 SKB_GSO_GRE |
1296 SKB_GSO_TCPV6 |
1296 SKB_GSO_UDP_TUNNEL | 1297 SKB_GSO_UDP_TUNNEL |
1297 0))) 1298 0)))
1298 goto out; 1299 goto out;
diff --git a/net/ipv4/gre.c b/net/ipv4/gre.c
index d2d5a99fba09..cc22363965d2 100644
--- a/net/ipv4/gre.c
+++ b/net/ipv4/gre.c
@@ -121,6 +121,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
121 int ghl = GRE_HEADER_SECTION; 121 int ghl = GRE_HEADER_SECTION;
122 struct gre_base_hdr *greh; 122 struct gre_base_hdr *greh;
123 int mac_len = skb->mac_len; 123 int mac_len = skb->mac_len;
124 __be16 protocol = skb->protocol;
124 int tnl_hlen; 125 int tnl_hlen;
125 bool csum; 126 bool csum;
126 127
@@ -150,7 +151,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
150 151
151 /* setup inner skb. */ 152 /* setup inner skb. */
152 if (greh->protocol == htons(ETH_P_TEB)) { 153 if (greh->protocol == htons(ETH_P_TEB)) {
153 struct ethhdr *eth = eth_hdr(skb); 154 struct ethhdr *eth = (struct ethhdr *)skb_inner_mac_header(skb);
154 skb->protocol = eth->h_proto; 155 skb->protocol = eth->h_proto;
155 } else { 156 } else {
156 skb->protocol = greh->protocol; 157 skb->protocol = greh->protocol;
@@ -199,6 +200,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
199 skb_reset_mac_header(skb); 200 skb_reset_mac_header(skb);
200 skb_set_network_header(skb, mac_len); 201 skb_set_network_header(skb, mac_len);
201 skb->mac_len = mac_len; 202 skb->mac_len = mac_len;
203 skb->protocol = protocol;
202 } while ((skb = skb->next)); 204 } while ((skb = skb->next));
203out: 205out:
204 return segs; 206 return segs;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 6abbe6455129..0ae038a4c7a8 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -2311,8 +2311,10 @@ static struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
2311 struct sk_buff *segs = ERR_PTR(-EINVAL); 2311 struct sk_buff *segs = ERR_PTR(-EINVAL);
2312 int mac_len = skb->mac_len; 2312 int mac_len = skb->mac_len;
2313 int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); 2313 int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
2314 int outer_hlen; 2314 struct ethhdr *inner_eth = (struct ethhdr *)skb_inner_mac_header(skb);
2315 __be16 protocol = skb->protocol;
2315 netdev_features_t enc_features; 2316 netdev_features_t enc_features;
2317 int outer_hlen;
2316 2318
2317 if (unlikely(!pskb_may_pull(skb, tnl_hlen))) 2319 if (unlikely(!pskb_may_pull(skb, tnl_hlen)))
2318 goto out; 2320 goto out;
@@ -2322,6 +2324,8 @@ static struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
2322 skb_reset_mac_header(skb); 2324 skb_reset_mac_header(skb);
2323 skb_set_network_header(skb, skb_inner_network_offset(skb)); 2325 skb_set_network_header(skb, skb_inner_network_offset(skb));
2324 skb->mac_len = skb_inner_network_offset(skb); 2326 skb->mac_len = skb_inner_network_offset(skb);
2327 inner_eth = (struct ethhdr *)skb_mac_header(skb);
2328 skb->protocol = inner_eth->h_proto;
2325 2329
2326 /* segment inner packet. */ 2330 /* segment inner packet. */
2327 enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); 2331 enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
@@ -2358,6 +2362,7 @@ static struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
2358 2362
2359 } 2363 }
2360 skb->ip_summed = CHECKSUM_NONE; 2364 skb->ip_summed = CHECKSUM_NONE;
2365 skb->protocol = protocol;
2361 } while ((skb = skb->next)); 2366 } while ((skb = skb->next));
2362out: 2367out:
2363 return segs; 2368 return segs;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index dd5cd49b0e09..8ec1bca7f859 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -742,36 +742,33 @@ static void prb_open_block(struct tpacket_kbdq_core *pkc1,
742 742
743 smp_rmb(); 743 smp_rmb();
744 744
745 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd1))) { 745 /* We could have just memset this but we will lose the
746 * flexibility of making the priv area sticky
747 */
746 748
747 /* We could have just memset this but we will lose the 749 BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
748 * flexibility of making the priv area sticky 750 BLOCK_NUM_PKTS(pbd1) = 0;
749 */ 751 BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
750 BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
751 BLOCK_NUM_PKTS(pbd1) = 0;
752 BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
753 getnstimeofday(&ts);
754 h1->ts_first_pkt.ts_sec = ts.tv_sec;
755 h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
756 pkc1->pkblk_start = (char *)pbd1;
757 pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
758 BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
759 BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
760 pbd1->version = pkc1->version;
761 pkc1->prev = pkc1->nxt_offset;
762 pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
763 prb_thaw_queue(pkc1);
764 _prb_refresh_rx_retire_blk_timer(pkc1);
765 752
766 smp_wmb(); 753 getnstimeofday(&ts);
767 754
768 return; 755 h1->ts_first_pkt.ts_sec = ts.tv_sec;
769 } 756 h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
770 757
771 WARN(1, "ERROR block:%p is NOT FREE status:%d kactive_blk_num:%d\n", 758 pkc1->pkblk_start = (char *)pbd1;
772 pbd1, BLOCK_STATUS(pbd1), pkc1->kactive_blk_num); 759 pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
773 dump_stack(); 760
774 BUG(); 761 BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
762 BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
763
764 pbd1->version = pkc1->version;
765 pkc1->prev = pkc1->nxt_offset;
766 pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
767
768 prb_thaw_queue(pkc1);
769 _prb_refresh_rx_retire_blk_timer(pkc1);
770
771 smp_wmb();
775} 772}
776 773
777/* 774/*
@@ -862,10 +859,6 @@ static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
862 prb_close_block(pkc, pbd, po, status); 859 prb_close_block(pkc, pbd, po, status);
863 return; 860 return;
864 } 861 }
865
866 WARN(1, "ERROR-pbd[%d]:%p\n", pkc->kactive_blk_num, pbd);
867 dump_stack();
868 BUG();
869} 862}
870 863
871static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc, 864static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc,
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 25e159c2feb4..e5f3da507823 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -584,8 +584,7 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
584{ 584{
585 int bp_index; 585 int bp_index;
586 586
587 /* 587 /* Prepare broadcast link message for reliable transmission,
588 * Prepare broadcast link message for reliable transmission,
589 * if first time trying to send it; 588 * if first time trying to send it;
590 * preparation is skipped for broadcast link protocol messages 589 * preparation is skipped for broadcast link protocol messages
591 * since they are sent in an unreliable manner and don't need it 590 * since they are sent in an unreliable manner and don't need it
@@ -611,30 +610,43 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
611 for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) { 610 for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
612 struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary; 611 struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary;
613 struct tipc_bearer *s = bcbearer->bpairs[bp_index].secondary; 612 struct tipc_bearer *s = bcbearer->bpairs[bp_index].secondary;
613 struct tipc_bearer *b = p;
614 struct sk_buff *tbuf;
614 615
615 if (!p) 616 if (!p)
616 break; /* no more bearers to try */ 617 break; /* No more bearers to try */
618
619 if (tipc_bearer_blocked(p)) {
620 if (!s || tipc_bearer_blocked(s))
621 continue; /* Can't use either bearer */
622 b = s;
623 }
617 624
618 tipc_nmap_diff(&bcbearer->remains, &p->nodes, &bcbearer->remains_new); 625 tipc_nmap_diff(&bcbearer->remains, &b->nodes,
626 &bcbearer->remains_new);
619 if (bcbearer->remains_new.count == bcbearer->remains.count) 627 if (bcbearer->remains_new.count == bcbearer->remains.count)
620 continue; /* bearer pair doesn't add anything */ 628 continue; /* Nothing added by bearer pair */
621 629
622 if (!tipc_bearer_blocked(p)) 630 if (bp_index == 0) {
623 tipc_bearer_send(p, buf, &p->bcast_addr); 631 /* Use original buffer for first bearer */
624 else if (s && !tipc_bearer_blocked(s)) 632 tipc_bearer_send(b, buf, &b->bcast_addr);
625 /* unable to send on primary bearer */ 633 } else {
626 tipc_bearer_send(s, buf, &s->bcast_addr); 634 /* Avoid concurrent buffer access */
627 else 635 tbuf = pskb_copy(buf, GFP_ATOMIC);
628 /* unable to send on either bearer */ 636 if (!tbuf)
629 continue; 637 break;
638 tipc_bearer_send(b, tbuf, &b->bcast_addr);
639 kfree_skb(tbuf); /* Bearer keeps a clone */
640 }
630 641
642 /* Swap bearers for next packet */
631 if (s) { 643 if (s) {
632 bcbearer->bpairs[bp_index].primary = s; 644 bcbearer->bpairs[bp_index].primary = s;
633 bcbearer->bpairs[bp_index].secondary = p; 645 bcbearer->bpairs[bp_index].secondary = p;
634 } 646 }
635 647
636 if (bcbearer->remains_new.count == 0) 648 if (bcbearer->remains_new.count == 0)
637 break; /* all targets reached */ 649 break; /* All targets reached */
638 650
639 bcbearer->remains = bcbearer->remains_new; 651 bcbearer->remains = bcbearer->remains_new;
640 } 652 }