aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2012-12-17 18:22:27 -0500
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2012-12-17 18:22:27 -0500
commit376bddd34433065aeb9b9a140870537feecf90ef (patch)
treea40e2b84ad89f4b3ba968de65a4bf7ff6ccae835 /drivers/net
parentd526e85f60fce9aa2a1432cbd06e3cf20c1644c8 (diff)
parent667b504a2c411e4d5915a6e2260a3857ba9f797a (diff)
Merge remote-tracking branch 'agust/next' into next
Brings some 52xx updates. Also manually merged tools/perf/perf.h. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/bonding/bond_main.c100
-rw-r--r--drivers/net/bonding/bond_sysfs.c40
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c8
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c8
-rw-r--r--drivers/net/ethernet/8390/ne.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c162
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c24
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c6
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c3
-rw-r--r--drivers/net/ethernet/jme.c36
-rw-r--r--drivers/net/ethernet/marvell/skge.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c2
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c20
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c1
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c33
-rw-r--r--drivers/net/ethernet/realtek/r8169.c5
-rw-r--r--drivers/net/ethernet/sis/sis900.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c17
-rw-r--r--drivers/net/ethernet/tile/tilegx.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c16
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c8
-rw-r--r--drivers/net/irda/sir_dev.c2
-rw-r--r--drivers/net/phy/mdio-gpio.c11
-rw-r--r--drivers/net/team/team.c4
-rw-r--r--drivers/net/team/team_mode_broadcast.c6
-rw-r--r--drivers/net/usb/cdc_eem.c3
-rw-r--r--drivers/net/usb/cdc_ncm.c22
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/usb/smsc95xx.c5
-rw-r--r--drivers/net/usb/usbnet.c8
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c65
-rw-r--r--drivers/net/vxlan.c12
-rw-r--r--drivers/net/wan/ixp4xx_hss.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c10
-rw-r--r--drivers/net/wireless/b43legacy/pio.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c16
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/main.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rxon.c12
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c23
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c8
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c11
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c11
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c1
-rw-r--r--drivers/net/xen-netfront.c98
50 files changed, 534 insertions, 328 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index b2530b002125..a7d47350ea4b 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1379,6 +1379,8 @@ static void bond_compute_features(struct bonding *bond)
1379 struct net_device *bond_dev = bond->dev; 1379 struct net_device *bond_dev = bond->dev;
1380 netdev_features_t vlan_features = BOND_VLAN_FEATURES; 1380 netdev_features_t vlan_features = BOND_VLAN_FEATURES;
1381 unsigned short max_hard_header_len = ETH_HLEN; 1381 unsigned short max_hard_header_len = ETH_HLEN;
1382 unsigned int gso_max_size = GSO_MAX_SIZE;
1383 u16 gso_max_segs = GSO_MAX_SEGS;
1382 int i; 1384 int i;
1383 unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE; 1385 unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE;
1384 1386
@@ -1394,11 +1396,16 @@ static void bond_compute_features(struct bonding *bond)
1394 dst_release_flag &= slave->dev->priv_flags; 1396 dst_release_flag &= slave->dev->priv_flags;
1395 if (slave->dev->hard_header_len > max_hard_header_len) 1397 if (slave->dev->hard_header_len > max_hard_header_len)
1396 max_hard_header_len = slave->dev->hard_header_len; 1398 max_hard_header_len = slave->dev->hard_header_len;
1399
1400 gso_max_size = min(gso_max_size, slave->dev->gso_max_size);
1401 gso_max_segs = min(gso_max_segs, slave->dev->gso_max_segs);
1397 } 1402 }
1398 1403
1399done: 1404done:
1400 bond_dev->vlan_features = vlan_features; 1405 bond_dev->vlan_features = vlan_features;
1401 bond_dev->hard_header_len = max_hard_header_len; 1406 bond_dev->hard_header_len = max_hard_header_len;
1407 bond_dev->gso_max_segs = gso_max_segs;
1408 netif_set_gso_max_size(bond_dev, gso_max_size);
1402 1409
1403 flags = bond_dev->priv_flags & ~IFF_XMIT_DST_RELEASE; 1410 flags = bond_dev->priv_flags & ~IFF_XMIT_DST_RELEASE;
1404 bond_dev->priv_flags = flags | dst_release_flag; 1411 bond_dev->priv_flags = flags | dst_release_flag;
@@ -3452,6 +3459,28 @@ static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count)
3452 3459
3453/*-------------------------- Device entry points ----------------------------*/ 3460/*-------------------------- Device entry points ----------------------------*/
3454 3461
3462static void bond_work_init_all(struct bonding *bond)
3463{
3464 INIT_DELAYED_WORK(&bond->mcast_work,
3465 bond_resend_igmp_join_requests_delayed);
3466 INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
3467 INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor);
3468 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP)
3469 INIT_DELAYED_WORK(&bond->arp_work, bond_activebackup_arp_mon);
3470 else
3471 INIT_DELAYED_WORK(&bond->arp_work, bond_loadbalance_arp_mon);
3472 INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler);
3473}
3474
3475static void bond_work_cancel_all(struct bonding *bond)
3476{
3477 cancel_delayed_work_sync(&bond->mii_work);
3478 cancel_delayed_work_sync(&bond->arp_work);
3479 cancel_delayed_work_sync(&bond->alb_work);
3480 cancel_delayed_work_sync(&bond->ad_work);
3481 cancel_delayed_work_sync(&bond->mcast_work);
3482}
3483
3455static int bond_open(struct net_device *bond_dev) 3484static int bond_open(struct net_device *bond_dev)
3456{ 3485{
3457 struct bonding *bond = netdev_priv(bond_dev); 3486 struct bonding *bond = netdev_priv(bond_dev);
@@ -3474,41 +3503,27 @@ static int bond_open(struct net_device *bond_dev)
3474 } 3503 }
3475 read_unlock(&bond->lock); 3504 read_unlock(&bond->lock);
3476 3505
3477 INIT_DELAYED_WORK(&bond->mcast_work, bond_resend_igmp_join_requests_delayed); 3506 bond_work_init_all(bond);
3478 3507
3479 if (bond_is_lb(bond)) { 3508 if (bond_is_lb(bond)) {
3480 /* bond_alb_initialize must be called before the timer 3509 /* bond_alb_initialize must be called before the timer
3481 * is started. 3510 * is started.
3482 */ 3511 */
3483 if (bond_alb_initialize(bond, (bond->params.mode == BOND_MODE_ALB))) { 3512 if (bond_alb_initialize(bond, (bond->params.mode == BOND_MODE_ALB)))
3484 /* something went wrong - fail the open operation */
3485 return -ENOMEM; 3513 return -ENOMEM;
3486 }
3487
3488 INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
3489 queue_delayed_work(bond->wq, &bond->alb_work, 0); 3514 queue_delayed_work(bond->wq, &bond->alb_work, 0);
3490 } 3515 }
3491 3516
3492 if (bond->params.miimon) { /* link check interval, in milliseconds. */ 3517 if (bond->params.miimon) /* link check interval, in milliseconds. */
3493 INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor);
3494 queue_delayed_work(bond->wq, &bond->mii_work, 0); 3518 queue_delayed_work(bond->wq, &bond->mii_work, 0);
3495 }
3496 3519
3497 if (bond->params.arp_interval) { /* arp interval, in milliseconds. */ 3520 if (bond->params.arp_interval) { /* arp interval, in milliseconds. */
3498 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP)
3499 INIT_DELAYED_WORK(&bond->arp_work,
3500 bond_activebackup_arp_mon);
3501 else
3502 INIT_DELAYED_WORK(&bond->arp_work,
3503 bond_loadbalance_arp_mon);
3504
3505 queue_delayed_work(bond->wq, &bond->arp_work, 0); 3521 queue_delayed_work(bond->wq, &bond->arp_work, 0);
3506 if (bond->params.arp_validate) 3522 if (bond->params.arp_validate)
3507 bond->recv_probe = bond_arp_rcv; 3523 bond->recv_probe = bond_arp_rcv;
3508 } 3524 }
3509 3525
3510 if (bond->params.mode == BOND_MODE_8023AD) { 3526 if (bond->params.mode == BOND_MODE_8023AD) {
3511 INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler);
3512 queue_delayed_work(bond->wq, &bond->ad_work, 0); 3527 queue_delayed_work(bond->wq, &bond->ad_work, 0);
3513 /* register to receive LACPDUs */ 3528 /* register to receive LACPDUs */
3514 bond->recv_probe = bond_3ad_lacpdu_recv; 3529 bond->recv_probe = bond_3ad_lacpdu_recv;
@@ -3523,34 +3538,10 @@ static int bond_close(struct net_device *bond_dev)
3523 struct bonding *bond = netdev_priv(bond_dev); 3538 struct bonding *bond = netdev_priv(bond_dev);
3524 3539
3525 write_lock_bh(&bond->lock); 3540 write_lock_bh(&bond->lock);
3526
3527 bond->send_peer_notif = 0; 3541 bond->send_peer_notif = 0;
3528
3529 write_unlock_bh(&bond->lock); 3542 write_unlock_bh(&bond->lock);
3530 3543
3531 if (bond->params.miimon) { /* link check interval, in milliseconds. */ 3544 bond_work_cancel_all(bond);
3532 cancel_delayed_work_sync(&bond->mii_work);
3533 }
3534
3535 if (bond->params.arp_interval) { /* arp interval, in milliseconds. */
3536 cancel_delayed_work_sync(&bond->arp_work);
3537 }
3538
3539 switch (bond->params.mode) {
3540 case BOND_MODE_8023AD:
3541 cancel_delayed_work_sync(&bond->ad_work);
3542 break;
3543 case BOND_MODE_TLB:
3544 case BOND_MODE_ALB:
3545 cancel_delayed_work_sync(&bond->alb_work);
3546 break;
3547 default:
3548 break;
3549 }
3550
3551 if (delayed_work_pending(&bond->mcast_work))
3552 cancel_delayed_work_sync(&bond->mcast_work);
3553
3554 if (bond_is_lb(bond)) { 3545 if (bond_is_lb(bond)) {
3555 /* Must be called only after all 3546 /* Must be called only after all
3556 * slaves have been released 3547 * slaves have been released
@@ -4429,26 +4420,6 @@ static void bond_setup(struct net_device *bond_dev)
4429 bond_dev->features |= bond_dev->hw_features; 4420 bond_dev->features |= bond_dev->hw_features;
4430} 4421}
4431 4422
4432static void bond_work_cancel_all(struct bonding *bond)
4433{
4434 if (bond->params.miimon && delayed_work_pending(&bond->mii_work))
4435 cancel_delayed_work_sync(&bond->mii_work);
4436
4437 if (bond->params.arp_interval && delayed_work_pending(&bond->arp_work))
4438 cancel_delayed_work_sync(&bond->arp_work);
4439
4440 if (bond->params.mode == BOND_MODE_ALB &&
4441 delayed_work_pending(&bond->alb_work))
4442 cancel_delayed_work_sync(&bond->alb_work);
4443
4444 if (bond->params.mode == BOND_MODE_8023AD &&
4445 delayed_work_pending(&bond->ad_work))
4446 cancel_delayed_work_sync(&bond->ad_work);
4447
4448 if (delayed_work_pending(&bond->mcast_work))
4449 cancel_delayed_work_sync(&bond->mcast_work);
4450}
4451
4452/* 4423/*
4453* Destroy a bonding device. 4424* Destroy a bonding device.
4454* Must be under rtnl_lock when this function is called. 4425* Must be under rtnl_lock when this function is called.
@@ -4699,12 +4670,13 @@ static int bond_check_params(struct bond_params *params)
4699 arp_ip_count++) { 4670 arp_ip_count++) {
4700 /* not complete check, but should be good enough to 4671 /* not complete check, but should be good enough to
4701 catch mistakes */ 4672 catch mistakes */
4702 if (!isdigit(arp_ip_target[arp_ip_count][0])) { 4673 __be32 ip = in_aton(arp_ip_target[arp_ip_count]);
4674 if (!isdigit(arp_ip_target[arp_ip_count][0]) ||
4675 ip == 0 || ip == htonl(INADDR_BROADCAST)) {
4703 pr_warning("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n", 4676 pr_warning("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
4704 arp_ip_target[arp_ip_count]); 4677 arp_ip_target[arp_ip_count]);
4705 arp_interval = 0; 4678 arp_interval = 0;
4706 } else { 4679 } else {
4707 __be32 ip = in_aton(arp_ip_target[arp_ip_count]);
4708 arp_target[arp_ip_count] = ip; 4680 arp_target[arp_ip_count] = ip;
4709 } 4681 }
4710 } 4682 }
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index dc15d248443f..1877ed7ca086 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -513,6 +513,8 @@ static ssize_t bonding_store_arp_interval(struct device *d,
513 int new_value, ret = count; 513 int new_value, ret = count;
514 struct bonding *bond = to_bond(d); 514 struct bonding *bond = to_bond(d);
515 515
516 if (!rtnl_trylock())
517 return restart_syscall();
516 if (sscanf(buf, "%d", &new_value) != 1) { 518 if (sscanf(buf, "%d", &new_value) != 1) {
517 pr_err("%s: no arp_interval value specified.\n", 519 pr_err("%s: no arp_interval value specified.\n",
518 bond->dev->name); 520 bond->dev->name);
@@ -539,10 +541,6 @@ static ssize_t bonding_store_arp_interval(struct device *d,
539 pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n", 541 pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n",
540 bond->dev->name, bond->dev->name); 542 bond->dev->name, bond->dev->name);
541 bond->params.miimon = 0; 543 bond->params.miimon = 0;
542 if (delayed_work_pending(&bond->mii_work)) {
543 cancel_delayed_work(&bond->mii_work);
544 flush_workqueue(bond->wq);
545 }
546 } 544 }
547 if (!bond->params.arp_targets[0]) { 545 if (!bond->params.arp_targets[0]) {
548 pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n", 546 pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n",
@@ -554,19 +552,12 @@ static ssize_t bonding_store_arp_interval(struct device *d,
554 * timer will get fired off when the open function 552 * timer will get fired off when the open function
555 * is called. 553 * is called.
556 */ 554 */
557 if (!delayed_work_pending(&bond->arp_work)) { 555 cancel_delayed_work_sync(&bond->mii_work);
558 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) 556 queue_delayed_work(bond->wq, &bond->arp_work, 0);
559 INIT_DELAYED_WORK(&bond->arp_work,
560 bond_activebackup_arp_mon);
561 else
562 INIT_DELAYED_WORK(&bond->arp_work,
563 bond_loadbalance_arp_mon);
564
565 queue_delayed_work(bond->wq, &bond->arp_work, 0);
566 }
567 } 557 }
568 558
569out: 559out:
560 rtnl_unlock();
570 return ret; 561 return ret;
571} 562}
572static DEVICE_ATTR(arp_interval, S_IRUGO | S_IWUSR, 563static DEVICE_ATTR(arp_interval, S_IRUGO | S_IWUSR,
@@ -962,6 +953,8 @@ static ssize_t bonding_store_miimon(struct device *d,
962 int new_value, ret = count; 953 int new_value, ret = count;
963 struct bonding *bond = to_bond(d); 954 struct bonding *bond = to_bond(d);
964 955
956 if (!rtnl_trylock())
957 return restart_syscall();
965 if (sscanf(buf, "%d", &new_value) != 1) { 958 if (sscanf(buf, "%d", &new_value) != 1) {
966 pr_err("%s: no miimon value specified.\n", 959 pr_err("%s: no miimon value specified.\n",
967 bond->dev->name); 960 bond->dev->name);
@@ -993,10 +986,6 @@ static ssize_t bonding_store_miimon(struct device *d,
993 bond->params.arp_validate = 986 bond->params.arp_validate =
994 BOND_ARP_VALIDATE_NONE; 987 BOND_ARP_VALIDATE_NONE;
995 } 988 }
996 if (delayed_work_pending(&bond->arp_work)) {
997 cancel_delayed_work(&bond->arp_work);
998 flush_workqueue(bond->wq);
999 }
1000 } 989 }
1001 990
1002 if (bond->dev->flags & IFF_UP) { 991 if (bond->dev->flags & IFF_UP) {
@@ -1005,15 +994,12 @@ static ssize_t bonding_store_miimon(struct device *d,
1005 * timer will get fired off when the open function 994 * timer will get fired off when the open function
1006 * is called. 995 * is called.
1007 */ 996 */
1008 if (!delayed_work_pending(&bond->mii_work)) { 997 cancel_delayed_work_sync(&bond->arp_work);
1009 INIT_DELAYED_WORK(&bond->mii_work, 998 queue_delayed_work(bond->wq, &bond->mii_work, 0);
1010 bond_mii_monitor);
1011 queue_delayed_work(bond->wq,
1012 &bond->mii_work, 0);
1013 }
1014 } 999 }
1015 } 1000 }
1016out: 1001out:
1002 rtnl_unlock();
1017 return ret; 1003 return ret;
1018} 1004}
1019static DEVICE_ATTR(miimon, S_IRUGO | S_IWUSR, 1005static DEVICE_ATTR(miimon, S_IRUGO | S_IWUSR,
@@ -1060,7 +1046,7 @@ static ssize_t bonding_store_primary(struct device *d,
1060 goto out; 1046 goto out;
1061 } 1047 }
1062 1048
1063 sscanf(buf, "%16s", ifname); /* IFNAMSIZ */ 1049 sscanf(buf, "%15s", ifname); /* IFNAMSIZ */
1064 1050
1065 /* check to see if we are clearing primary */ 1051 /* check to see if we are clearing primary */
1066 if (!strlen(ifname) || buf[0] == '\n') { 1052 if (!strlen(ifname) || buf[0] == '\n') {
@@ -1237,7 +1223,7 @@ static ssize_t bonding_store_active_slave(struct device *d,
1237 goto out; 1223 goto out;
1238 } 1224 }
1239 1225
1240 sscanf(buf, "%16s", ifname); /* IFNAMSIZ */ 1226 sscanf(buf, "%15s", ifname); /* IFNAMSIZ */
1241 1227
1242 /* check to see if we are clearing active */ 1228 /* check to see if we are clearing active */
1243 if (!strlen(ifname) || buf[0] == '\n') { 1229 if (!strlen(ifname) || buf[0] == '\n') {
@@ -1582,6 +1568,7 @@ static ssize_t bonding_store_slaves_active(struct device *d,
1582 goto out; 1568 goto out;
1583 } 1569 }
1584 1570
1571 read_lock(&bond->lock);
1585 bond_for_each_slave(bond, slave, i) { 1572 bond_for_each_slave(bond, slave, i) {
1586 if (!bond_is_active_slave(slave)) { 1573 if (!bond_is_active_slave(slave)) {
1587 if (new_value) 1574 if (new_value)
@@ -1590,6 +1577,7 @@ static ssize_t bonding_store_slaves_active(struct device *d,
1590 slave->inactive = 1; 1577 slave->inactive = 1;
1591 } 1578 }
1592 } 1579 }
1580 read_unlock(&bond->lock);
1593out: 1581out:
1594 return ret; 1582 return ret;
1595} 1583}
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 86f26a1ede4c..25723d8ee201 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -519,8 +519,10 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
519 mc->pdev->dev.can.state = new_state; 519 mc->pdev->dev.can.state = new_state;
520 520
521 if (status_len & PCAN_USB_STATUSLEN_TIMESTAMP) { 521 if (status_len & PCAN_USB_STATUSLEN_TIMESTAMP) {
522 struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb);
523
522 peak_usb_get_ts_tv(&mc->pdev->time_ref, mc->ts16, &tv); 524 peak_usb_get_ts_tv(&mc->pdev->time_ref, mc->ts16, &tv);
523 skb->tstamp = timeval_to_ktime(tv); 525 hwts->hwtstamp = timeval_to_ktime(tv);
524 } 526 }
525 527
526 netif_rx(skb); 528 netif_rx(skb);
@@ -605,6 +607,7 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
605 struct sk_buff *skb; 607 struct sk_buff *skb;
606 struct can_frame *cf; 608 struct can_frame *cf;
607 struct timeval tv; 609 struct timeval tv;
610 struct skb_shared_hwtstamps *hwts;
608 611
609 skb = alloc_can_skb(mc->netdev, &cf); 612 skb = alloc_can_skb(mc->netdev, &cf);
610 if (!skb) 613 if (!skb)
@@ -652,7 +655,8 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
652 655
653 /* convert timestamp into kernel time */ 656 /* convert timestamp into kernel time */
654 peak_usb_get_ts_tv(&mc->pdev->time_ref, mc->ts16, &tv); 657 peak_usb_get_ts_tv(&mc->pdev->time_ref, mc->ts16, &tv);
655 skb->tstamp = timeval_to_ktime(tv); 658 hwts = skb_hwtstamps(skb);
659 hwts->hwtstamp = timeval_to_ktime(tv);
656 660
657 /* push the skb */ 661 /* push the skb */
658 netif_rx(skb); 662 netif_rx(skb);
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index e1626d92511a..30d79bfa5b10 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -532,6 +532,7 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if,
532 struct can_frame *can_frame; 532 struct can_frame *can_frame;
533 struct sk_buff *skb; 533 struct sk_buff *skb;
534 struct timeval tv; 534 struct timeval tv;
535 struct skb_shared_hwtstamps *hwts;
535 536
536 skb = alloc_can_skb(netdev, &can_frame); 537 skb = alloc_can_skb(netdev, &can_frame);
537 if (!skb) 538 if (!skb)
@@ -549,7 +550,8 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if,
549 memcpy(can_frame->data, rx->data, can_frame->can_dlc); 550 memcpy(can_frame->data, rx->data, can_frame->can_dlc);
550 551
551 peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(rx->ts32), &tv); 552 peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(rx->ts32), &tv);
552 skb->tstamp = timeval_to_ktime(tv); 553 hwts = skb_hwtstamps(skb);
554 hwts->hwtstamp = timeval_to_ktime(tv);
553 555
554 netif_rx(skb); 556 netif_rx(skb);
555 netdev->stats.rx_packets++; 557 netdev->stats.rx_packets++;
@@ -570,6 +572,7 @@ static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if,
570 u8 err_mask = 0; 572 u8 err_mask = 0;
571 struct sk_buff *skb; 573 struct sk_buff *skb;
572 struct timeval tv; 574 struct timeval tv;
575 struct skb_shared_hwtstamps *hwts;
573 576
574 /* nothing should be sent while in BUS_OFF state */ 577 /* nothing should be sent while in BUS_OFF state */
575 if (dev->can.state == CAN_STATE_BUS_OFF) 578 if (dev->can.state == CAN_STATE_BUS_OFF)
@@ -664,7 +667,8 @@ static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if,
664 dev->can.state = new_state; 667 dev->can.state = new_state;
665 668
666 peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(er->ts32), &tv); 669 peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(er->ts32), &tv);
667 skb->tstamp = timeval_to_ktime(tv); 670 hwts = skb_hwtstamps(skb);
671 hwts->hwtstamp = timeval_to_ktime(tv);
668 netif_rx(skb); 672 netif_rx(skb);
669 netdev->stats.rx_packets++; 673 netdev->stats.rx_packets++;
670 netdev->stats.rx_bytes += can_frame->can_dlc; 674 netdev->stats.rx_bytes += can_frame->can_dlc;
diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c
index d04911d33b64..47618e505355 100644
--- a/drivers/net/ethernet/8390/ne.c
+++ b/drivers/net/ethernet/8390/ne.c
@@ -813,6 +813,7 @@ static int __init ne_drv_probe(struct platform_device *pdev)
813 dev->irq = irq[this_dev]; 813 dev->irq = irq[this_dev];
814 dev->mem_end = bad[this_dev]; 814 dev->mem_end = bad[this_dev];
815 } 815 }
816 SET_NETDEV_DEV(dev, &pdev->dev);
816 err = do_ne_probe(dev); 817 err = do_ne_probe(dev);
817 if (err) { 818 if (err) {
818 free_netdev(dev); 819 free_netdev(dev);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index c65295dded39..6e5bdd1a31d9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1702,7 +1702,7 @@ static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
1702 SHMEM_EEE_ADV_STATUS_SHIFT); 1702 SHMEM_EEE_ADV_STATUS_SHIFT);
1703 if ((advertised != (eee_cfg & SHMEM_EEE_ADV_STATUS_MASK))) { 1703 if ((advertised != (eee_cfg & SHMEM_EEE_ADV_STATUS_MASK))) {
1704 DP(BNX2X_MSG_ETHTOOL, 1704 DP(BNX2X_MSG_ETHTOOL,
1705 "Direct manipulation of EEE advertisment is not supported\n"); 1705 "Direct manipulation of EEE advertisement is not supported\n");
1706 return -EINVAL; 1706 return -EINVAL;
1707 } 1707 }
1708 1708
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index e2e45ee5df33..f6cfdc6cf20f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -137,7 +137,16 @@
137#define LINK_20GTFD LINK_STATUS_SPEED_AND_DUPLEX_20GTFD 137#define LINK_20GTFD LINK_STATUS_SPEED_AND_DUPLEX_20GTFD
138#define LINK_20GXFD LINK_STATUS_SPEED_AND_DUPLEX_20GXFD 138#define LINK_20GXFD LINK_STATUS_SPEED_AND_DUPLEX_20GXFD
139 139
140 140#define LINK_UPDATE_MASK \
141 (LINK_STATUS_SPEED_AND_DUPLEX_MASK | \
142 LINK_STATUS_LINK_UP | \
143 LINK_STATUS_PHYSICAL_LINK_FLAG | \
144 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE | \
145 LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK | \
146 LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK | \
147 LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK | \
148 LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE | \
149 LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE)
141 150
142#define SFP_EEPROM_CON_TYPE_ADDR 0x2 151#define SFP_EEPROM_CON_TYPE_ADDR 0x2
143 #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7 152 #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7
@@ -3295,6 +3304,21 @@ static void bnx2x_serdes_deassert(struct bnx2x *bp, u8 port)
3295 DEFAULT_PHY_DEV_ADDR); 3304 DEFAULT_PHY_DEV_ADDR);
3296} 3305}
3297 3306
3307static void bnx2x_xgxs_specific_func(struct bnx2x_phy *phy,
3308 struct link_params *params,
3309 u32 action)
3310{
3311 struct bnx2x *bp = params->bp;
3312 switch (action) {
3313 case PHY_INIT:
3314 /* Set correct devad */
3315 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + params->port*0x18, 0);
3316 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + params->port*0x18,
3317 phy->def_md_devad);
3318 break;
3319 }
3320}
3321
3298static void bnx2x_xgxs_deassert(struct link_params *params) 3322static void bnx2x_xgxs_deassert(struct link_params *params)
3299{ 3323{
3300 struct bnx2x *bp = params->bp; 3324 struct bnx2x *bp = params->bp;
@@ -3309,10 +3333,8 @@ static void bnx2x_xgxs_deassert(struct link_params *params)
3309 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val); 3333 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
3310 udelay(500); 3334 udelay(500);
3311 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val); 3335 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
3312 3336 bnx2x_xgxs_specific_func(&params->phy[INT_PHY], params,
3313 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + port*0x18, 0); 3337 PHY_INIT);
3314 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
3315 params->phy[INT_PHY].def_md_devad);
3316} 3338}
3317 3339
3318static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy, 3340static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
@@ -3545,14 +3567,11 @@ static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy,
3545static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, 3567static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3546 struct link_params *params, 3568 struct link_params *params,
3547 struct link_vars *vars) { 3569 struct link_vars *vars) {
3548 u16 val16 = 0, lane, i; 3570 u16 lane, i, cl72_ctrl, an_adv = 0;
3571 u16 ucode_ver;
3549 struct bnx2x *bp = params->bp; 3572 struct bnx2x *bp = params->bp;
3550 static struct bnx2x_reg_set reg_set[] = { 3573 static struct bnx2x_reg_set reg_set[] = {
3551 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7}, 3574 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
3552 {MDIO_AN_DEVAD, MDIO_WC_REG_PAR_DET_10G_CTRL, 0},
3553 {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0},
3554 {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0xff},
3555 {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0x5555},
3556 {MDIO_PMA_DEVAD, MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0x0}, 3575 {MDIO_PMA_DEVAD, MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0x0},
3557 {MDIO_WC_DEVAD, MDIO_WC_REG_RX66_CONTROL, 0x7415}, 3576 {MDIO_WC_DEVAD, MDIO_WC_REG_RX66_CONTROL, 0x7415},
3558 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x6190}, 3577 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x6190},
@@ -3565,12 +3584,19 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3565 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, 3584 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
3566 reg_set[i].val); 3585 reg_set[i].val);
3567 3586
3587 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3588 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, &cl72_ctrl);
3589 cl72_ctrl &= 0xf8ff;
3590 cl72_ctrl |= 0x3800;
3591 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3592 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, cl72_ctrl);
3593
3568 /* Check adding advertisement for 1G KX */ 3594 /* Check adding advertisement for 1G KX */
3569 if (((vars->line_speed == SPEED_AUTO_NEG) && 3595 if (((vars->line_speed == SPEED_AUTO_NEG) &&
3570 (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || 3596 (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
3571 (vars->line_speed == SPEED_1000)) { 3597 (vars->line_speed == SPEED_1000)) {
3572 u32 addr = MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2; 3598 u32 addr = MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2;
3573 val16 |= (1<<5); 3599 an_adv |= (1<<5);
3574 3600
3575 /* Enable CL37 1G Parallel Detect */ 3601 /* Enable CL37 1G Parallel Detect */
3576 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, addr, 0x1); 3602 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, addr, 0x1);
@@ -3580,11 +3606,14 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3580 (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) || 3606 (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) ||
3581 (vars->line_speed == SPEED_10000)) { 3607 (vars->line_speed == SPEED_10000)) {
3582 /* Check adding advertisement for 10G KR */ 3608 /* Check adding advertisement for 10G KR */
3583 val16 |= (1<<7); 3609 an_adv |= (1<<7);
3584 /* Enable 10G Parallel Detect */ 3610 /* Enable 10G Parallel Detect */
3611 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
3612 MDIO_AER_BLOCK_AER_REG, 0);
3613
3585 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, 3614 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
3586 MDIO_WC_REG_PAR_DET_10G_CTRL, 1); 3615 MDIO_WC_REG_PAR_DET_10G_CTRL, 1);
3587 3616 bnx2x_set_aer_mmd(params, phy);
3588 DP(NETIF_MSG_LINK, "Advertize 10G\n"); 3617 DP(NETIF_MSG_LINK, "Advertize 10G\n");
3589 } 3618 }
3590 3619
@@ -3604,7 +3633,7 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3604 3633
3605 /* Advertised speeds */ 3634 /* Advertised speeds */
3606 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, 3635 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
3607 MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, val16); 3636 MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, an_adv);
3608 3637
3609 /* Advertised and set FEC (Forward Error Correction) */ 3638 /* Advertised and set FEC (Forward Error Correction) */
3610 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, 3639 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
@@ -3628,9 +3657,10 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3628 /* Set KR Autoneg Work-Around flag for Warpcore version older than D108 3657 /* Set KR Autoneg Work-Around flag for Warpcore version older than D108
3629 */ 3658 */
3630 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 3659 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3631 MDIO_WC_REG_UC_INFO_B1_VERSION, &val16); 3660 MDIO_WC_REG_UC_INFO_B1_VERSION, &ucode_ver);
3632 if (val16 < 0xd108) { 3661 if (ucode_ver < 0xd108) {
3633 DP(NETIF_MSG_LINK, "Enable AN KR work-around\n"); 3662 DP(NETIF_MSG_LINK, "Enable AN KR work-around. WC ver:0x%x\n",
3663 ucode_ver);
3634 vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY; 3664 vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
3635 } 3665 }
3636 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, 3666 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
@@ -3651,21 +3681,16 @@ static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy,
3651 struct link_vars *vars) 3681 struct link_vars *vars)
3652{ 3682{
3653 struct bnx2x *bp = params->bp; 3683 struct bnx2x *bp = params->bp;
3654 u16 i; 3684 u16 val16, i, lane;
3655 static struct bnx2x_reg_set reg_set[] = { 3685 static struct bnx2x_reg_set reg_set[] = {
3656 /* Disable Autoneg */ 3686 /* Disable Autoneg */
3657 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7}, 3687 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
3658 {MDIO_AN_DEVAD, MDIO_WC_REG_PAR_DET_10G_CTRL, 0},
3659 {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 3688 {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
3660 0x3f00}, 3689 0x3f00},
3661 {MDIO_AN_DEVAD, MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0}, 3690 {MDIO_AN_DEVAD, MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0},
3662 {MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0}, 3691 {MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0},
3663 {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL3_UP1, 0x1}, 3692 {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL3_UP1, 0x1},
3664 {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL5_MISC7, 0xa}, 3693 {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL5_MISC7, 0xa},
3665 /* Disable CL36 PCS Tx */
3666 {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0x0},
3667 /* Double Wide Single Data Rate @ pll rate */
3668 {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0xFFFF},
3669 /* Leave cl72 training enable, needed for KR */ 3694 /* Leave cl72 training enable, needed for KR */
3670 {MDIO_PMA_DEVAD, 3695 {MDIO_PMA_DEVAD,
3671 MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150, 3696 MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150,
@@ -3676,11 +3701,24 @@ static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy,
3676 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, 3701 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
3677 reg_set[i].val); 3702 reg_set[i].val);
3678 3703
3679 /* Leave CL72 enabled */ 3704 lane = bnx2x_get_warpcore_lane(phy, params);
3680 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, 3705 /* Global registers */
3681 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 3706 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
3682 0x3800); 3707 MDIO_AER_BLOCK_AER_REG, 0);
3708 /* Disable CL36 PCS Tx */
3709 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3710 MDIO_WC_REG_XGXSBLK1_LANECTRL0, &val16);
3711 val16 &= ~(0x0011 << lane);
3712 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3713 MDIO_WC_REG_XGXSBLK1_LANECTRL0, val16);
3683 3714
3715 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3716 MDIO_WC_REG_XGXSBLK1_LANECTRL1, &val16);
3717 val16 |= (0x0303 << (lane << 1));
3718 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3719 MDIO_WC_REG_XGXSBLK1_LANECTRL1, val16);
3720 /* Restore AER */
3721 bnx2x_set_aer_mmd(params, phy);
3684 /* Set speed via PMA/PMD register */ 3722 /* Set speed via PMA/PMD register */
3685 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 3723 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
3686 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040); 3724 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040);
@@ -4303,7 +4341,7 @@ static void bnx2x_warpcore_link_reset(struct bnx2x_phy *phy,
4303 struct link_params *params) 4341 struct link_params *params)
4304{ 4342{
4305 struct bnx2x *bp = params->bp; 4343 struct bnx2x *bp = params->bp;
4306 u16 val16; 4344 u16 val16, lane;
4307 bnx2x_sfp_e3_set_transmitter(params, phy, 0); 4345 bnx2x_sfp_e3_set_transmitter(params, phy, 0);
4308 bnx2x_set_mdio_clk(bp, params->chip_id, params->port); 4346 bnx2x_set_mdio_clk(bp, params->chip_id, params->port);
4309 bnx2x_set_aer_mmd(params, phy); 4347 bnx2x_set_aer_mmd(params, phy);
@@ -4340,6 +4378,30 @@ static void bnx2x_warpcore_link_reset(struct bnx2x_phy *phy,
4340 MDIO_WC_REG_XGXSBLK1_LANECTRL2, 4378 MDIO_WC_REG_XGXSBLK1_LANECTRL2,
4341 val16 & 0xff00); 4379 val16 & 0xff00);
4342 4380
4381 lane = bnx2x_get_warpcore_lane(phy, params);
4382 /* Disable CL36 PCS Tx */
4383 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
4384 MDIO_WC_REG_XGXSBLK1_LANECTRL0, &val16);
4385 val16 |= (0x11 << lane);
4386 if (phy->flags & FLAGS_WC_DUAL_MODE)
4387 val16 |= (0x22 << lane);
4388 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4389 MDIO_WC_REG_XGXSBLK1_LANECTRL0, val16);
4390
4391 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
4392 MDIO_WC_REG_XGXSBLK1_LANECTRL1, &val16);
4393 val16 &= ~(0x0303 << (lane << 1));
4394 val16 |= (0x0101 << (lane << 1));
4395 if (phy->flags & FLAGS_WC_DUAL_MODE) {
4396 val16 &= ~(0x0c0c << (lane << 1));
4397 val16 |= (0x0404 << (lane << 1));
4398 }
4399
4400 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4401 MDIO_WC_REG_XGXSBLK1_LANECTRL1, val16);
4402 /* Restore AER */
4403 bnx2x_set_aer_mmd(params, phy);
4404
4343} 4405}
4344 4406
4345static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy, 4407static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy,
@@ -6296,15 +6358,7 @@ static int bnx2x_update_link_down(struct link_params *params,
6296 vars->mac_type = MAC_TYPE_NONE; 6358 vars->mac_type = MAC_TYPE_NONE;
6297 6359
6298 /* Update shared memory */ 6360 /* Update shared memory */
6299 vars->link_status &= ~(LINK_STATUS_SPEED_AND_DUPLEX_MASK | 6361 vars->link_status &= ~LINK_UPDATE_MASK;
6300 LINK_STATUS_LINK_UP |
6301 LINK_STATUS_PHYSICAL_LINK_FLAG |
6302 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE |
6303 LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK |
6304 LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK |
6305 LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK |
6306 LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE |
6307 LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE);
6308 vars->line_speed = 0; 6362 vars->line_speed = 0;
6309 bnx2x_update_mng(params, vars->link_status); 6363 bnx2x_update_mng(params, vars->link_status);
6310 6364
@@ -6452,6 +6506,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6452 u16 ext_phy_line_speed = 0, prev_line_speed = vars->line_speed; 6506 u16 ext_phy_line_speed = 0, prev_line_speed = vars->line_speed;
6453 u8 active_external_phy = INT_PHY; 6507 u8 active_external_phy = INT_PHY;
6454 vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG; 6508 vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG;
6509 vars->link_status &= ~LINK_UPDATE_MASK;
6455 for (phy_index = INT_PHY; phy_index < params->num_phys; 6510 for (phy_index = INT_PHY; phy_index < params->num_phys;
6456 phy_index++) { 6511 phy_index++) {
6457 phy_vars[phy_index].flow_ctrl = 0; 6512 phy_vars[phy_index].flow_ctrl = 0;
@@ -7579,7 +7634,7 @@ static void bnx2x_warpcore_power_module(struct link_params *params,
7579static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy, 7634static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7580 struct link_params *params, 7635 struct link_params *params,
7581 u16 addr, u8 byte_cnt, 7636 u16 addr, u8 byte_cnt,
7582 u8 *o_buf) 7637 u8 *o_buf, u8 is_init)
7583{ 7638{
7584 int rc = 0; 7639 int rc = 0;
7585 u8 i, j = 0, cnt = 0; 7640 u8 i, j = 0, cnt = 0;
@@ -7596,10 +7651,10 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7596 /* 4 byte aligned address */ 7651 /* 4 byte aligned address */
7597 addr32 = addr & (~0x3); 7652 addr32 = addr & (~0x3);
7598 do { 7653 do {
7599 if (cnt == I2C_WA_PWR_ITER) { 7654 if ((!is_init) && (cnt == I2C_WA_PWR_ITER)) {
7600 bnx2x_warpcore_power_module(params, phy, 0); 7655 bnx2x_warpcore_power_module(params, phy, 0);
7601 /* Note that 100us are not enough here */ 7656 /* Note that 100us are not enough here */
7602 usleep_range(1000,1000); 7657 usleep_range(1000, 2000);
7603 bnx2x_warpcore_power_module(params, phy, 1); 7658 bnx2x_warpcore_power_module(params, phy, 1);
7604 } 7659 }
7605 rc = bnx2x_bsc_read(params, phy, 0xa0, addr32, 0, byte_cnt, 7660 rc = bnx2x_bsc_read(params, phy, 0xa0, addr32, 0, byte_cnt,
@@ -7719,7 +7774,7 @@ int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7719 break; 7774 break;
7720 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: 7775 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7721 rc = bnx2x_warpcore_read_sfp_module_eeprom(phy, params, addr, 7776 rc = bnx2x_warpcore_read_sfp_module_eeprom(phy, params, addr,
7722 byte_cnt, o_buf); 7777 byte_cnt, o_buf, 0);
7723 break; 7778 break;
7724 } 7779 }
7725 return rc; 7780 return rc;
@@ -7923,6 +7978,7 @@ static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
7923 7978
7924{ 7979{
7925 u8 val; 7980 u8 val;
7981 int rc;
7926 struct bnx2x *bp = params->bp; 7982 struct bnx2x *bp = params->bp;
7927 u16 timeout; 7983 u16 timeout;
7928 /* Initialization time after hot-plug may take up to 300ms for 7984 /* Initialization time after hot-plug may take up to 300ms for
@@ -7930,8 +7986,14 @@ static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
7930 */ 7986 */
7931 7987
7932 for (timeout = 0; timeout < 60; timeout++) { 7988 for (timeout = 0; timeout < 60; timeout++) {
7933 if (bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val) 7989 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
7934 == 0) { 7990 rc = bnx2x_warpcore_read_sfp_module_eeprom(phy,
7991 params, 1,
7992 1, &val, 1);
7993 else
7994 rc = bnx2x_read_sfp_module_eeprom(phy, params, 1, 1,
7995 &val);
7996 if (rc == 0) {
7935 DP(NETIF_MSG_LINK, 7997 DP(NETIF_MSG_LINK,
7936 "SFP+ module initialization took %d ms\n", 7998 "SFP+ module initialization took %d ms\n",
7937 timeout * 5); 7999 timeout * 5);
@@ -7939,7 +8001,8 @@ static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
7939 } 8001 }
7940 usleep_range(5000, 10000); 8002 usleep_range(5000, 10000);
7941 } 8003 }
7942 return -EINVAL; 8004 rc = bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val);
8005 return rc;
7943} 8006}
7944 8007
7945static void bnx2x_8727_power_module(struct bnx2x *bp, 8008static void bnx2x_8727_power_module(struct bnx2x *bp,
@@ -9878,7 +9941,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
9878 else 9941 else
9879 rc = bnx2x_8483x_disable_eee(phy, params, vars); 9942 rc = bnx2x_8483x_disable_eee(phy, params, vars);
9880 if (rc) { 9943 if (rc) {
9881 DP(NETIF_MSG_LINK, "Failed to set EEE advertisment\n"); 9944 DP(NETIF_MSG_LINK, "Failed to set EEE advertisement\n");
9882 return rc; 9945 return rc;
9883 } 9946 }
9884 } else { 9947 } else {
@@ -10993,7 +11056,7 @@ static struct bnx2x_phy phy_xgxs = {
10993 .format_fw_ver = (format_fw_ver_t)NULL, 11056 .format_fw_ver = (format_fw_ver_t)NULL,
10994 .hw_reset = (hw_reset_t)NULL, 11057 .hw_reset = (hw_reset_t)NULL,
10995 .set_link_led = (set_link_led_t)NULL, 11058 .set_link_led = (set_link_led_t)NULL,
10996 .phy_specific_func = (phy_specific_func_t)NULL 11059 .phy_specific_func = (phy_specific_func_t)bnx2x_xgxs_specific_func
10997}; 11060};
10998static struct bnx2x_phy phy_warpcore = { 11061static struct bnx2x_phy phy_warpcore = {
10999 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, 11062 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
@@ -11465,6 +11528,11 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
11465 phy->media_type = ETH_PHY_BASE_T; 11528 phy->media_type = ETH_PHY_BASE_T;
11466 break; 11529 break;
11467 case PORT_HW_CFG_NET_SERDES_IF_XFI: 11530 case PORT_HW_CFG_NET_SERDES_IF_XFI:
11531 phy->supported &= (SUPPORTED_1000baseT_Full |
11532 SUPPORTED_10000baseT_Full |
11533 SUPPORTED_FIBRE |
11534 SUPPORTED_Pause |
11535 SUPPORTED_Asym_Pause);
11468 phy->media_type = ETH_PHY_XFP_FIBER; 11536 phy->media_type = ETH_PHY_XFP_FIBER;
11469 break; 11537 break;
11470 case PORT_HW_CFG_NET_SERDES_IF_SFI: 11538 case PORT_HW_CFG_NET_SERDES_IF_SFI:
@@ -12919,7 +12987,7 @@ static u8 bnx2x_analyze_link_error(struct link_params *params,
12919 DP(NETIF_MSG_LINK, "Analyze TX Fault\n"); 12987 DP(NETIF_MSG_LINK, "Analyze TX Fault\n");
12920 break; 12988 break;
12921 default: 12989 default:
12922 DP(NETIF_MSG_LINK, "Analyze UNKOWN\n"); 12990 DP(NETIF_MSG_LINK, "Analyze UNKNOWN\n");
12923 } 12991 }
12924 DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up, 12992 DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up,
12925 old_status, status); 12993 old_status, status);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index d5648fc666bd..01611b33a93d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -6794,8 +6794,9 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
6794 6794
6795 bnx2x_init_block(bp, BLOCK_DORQ, init_phase); 6795 bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
6796 6796
6797 bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
6798
6797 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) { 6799 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
6798 bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
6799 6800
6800 if (IS_MF(bp)) 6801 if (IS_MF(bp))
6801 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246); 6802 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
@@ -9544,10 +9545,13 @@ static int __devinit bnx2x_prev_unload_common(struct bnx2x *bp)
9544 */ 9545 */
9545static void __devinit bnx2x_prev_interrupted_dmae(struct bnx2x *bp) 9546static void __devinit bnx2x_prev_interrupted_dmae(struct bnx2x *bp)
9546{ 9547{
9547 u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS); 9548 if (!CHIP_IS_E1x(bp)) {
9548 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { 9549 u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
9549 BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing"); 9550 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
9550 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << BP_FUNC(bp)); 9551 BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing");
9552 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
9553 1 << BP_FUNC(bp));
9554 }
9551 } 9555 }
9552} 9556}
9553 9557
@@ -11902,7 +11906,15 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11902 /* disable FCOE L2 queue for E1x */ 11906 /* disable FCOE L2 queue for E1x */
11903 if (CHIP_IS_E1x(bp)) 11907 if (CHIP_IS_E1x(bp))
11904 bp->flags |= NO_FCOE_FLAG; 11908 bp->flags |= NO_FCOE_FLAG;
11905 11909 /* disable FCOE for 57840 device, until FW supports it */
11910 switch (ent->driver_data) {
11911 case BCM57840_O:
11912 case BCM57840_4_10:
11913 case BCM57840_2_20:
11914 case BCM57840_MFO:
11915 case BCM57840_MF:
11916 bp->flags |= NO_FCOE_FLAG;
11917 }
11906#endif 11918#endif
11907 11919
11908 11920
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index c1cde11b0c6d..0df1284df497 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -3416,16 +3416,6 @@ static int adap_init0_config(struct adapter *adapter, int reset)
3416 finicsum, cfcsum); 3416 finicsum, cfcsum);
3417 3417
3418 /* 3418 /*
3419 * If we're a pure NIC driver then disable all offloading facilities.
3420 * This will allow the firmware to optimize aspects of the hardware
3421 * configuration which will result in improved performance.
3422 */
3423 caps_cmd.ofldcaps = 0;
3424 caps_cmd.iscsicaps = 0;
3425 caps_cmd.rdmacaps = 0;
3426 caps_cmd.fcoecaps = 0;
3427
3428 /*
3429 * And now tell the firmware to use the configuration we just loaded. 3419 * And now tell the firmware to use the configuration we just loaded.
3430 */ 3420 */
3431 caps_cmd.op_to_write = 3421 caps_cmd.op_to_write =
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 32eec15fe4c2..730ae2cfa49e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2519,6 +2519,7 @@ int t4_fw_bye(struct adapter *adap, unsigned int mbox)
2519{ 2519{
2520 struct fw_bye_cmd c; 2520 struct fw_bye_cmd c;
2521 2521
2522 memset(&c, 0, sizeof(c));
2522 INIT_CMD(c, BYE, WRITE); 2523 INIT_CMD(c, BYE, WRITE);
2523 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2524 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2524} 2525}
@@ -2535,6 +2536,7 @@ int t4_early_init(struct adapter *adap, unsigned int mbox)
2535{ 2536{
2536 struct fw_initialize_cmd c; 2537 struct fw_initialize_cmd c;
2537 2538
2539 memset(&c, 0, sizeof(c));
2538 INIT_CMD(c, INITIALIZE, WRITE); 2540 INIT_CMD(c, INITIALIZE, WRITE);
2539 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2541 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2540} 2542}
@@ -2551,6 +2553,7 @@ int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
2551{ 2553{
2552 struct fw_reset_cmd c; 2554 struct fw_reset_cmd c;
2553 2555
2556 memset(&c, 0, sizeof(c));
2554 INIT_CMD(c, RESET, WRITE); 2557 INIT_CMD(c, RESET, WRITE);
2555 c.val = htonl(reset); 2558 c.val = htonl(reset);
2556 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2559 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
@@ -2828,7 +2831,7 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
2828 HOSTPAGESIZEPF7(sge_hps)); 2831 HOSTPAGESIZEPF7(sge_hps));
2829 2832
2830 t4_set_reg_field(adap, SGE_CONTROL, 2833 t4_set_reg_field(adap, SGE_CONTROL,
2831 INGPADBOUNDARY(INGPADBOUNDARY_MASK) | 2834 INGPADBOUNDARY_MASK |
2832 EGRSTATUSPAGESIZE_MASK, 2835 EGRSTATUSPAGESIZE_MASK,
2833 INGPADBOUNDARY(fl_align_log - 5) | 2836 INGPADBOUNDARY(fl_align_log - 5) |
2834 EGRSTATUSPAGESIZE(stat_len != 64)); 2837 EGRSTATUSPAGESIZE(stat_len != 64));
@@ -3278,6 +3281,7 @@ int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
3278{ 3281{
3279 struct fw_vi_enable_cmd c; 3282 struct fw_vi_enable_cmd c;
3280 3283
3284 memset(&c, 0, sizeof(c));
3281 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST | 3285 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
3282 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid)); 3286 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
3283 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c)); 3287 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 1d03dcdd5e56..19ac096cb07b 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1353,8 +1353,11 @@ static int gfar_restore(struct device *dev)
1353 struct gfar_private *priv = dev_get_drvdata(dev); 1353 struct gfar_private *priv = dev_get_drvdata(dev);
1354 struct net_device *ndev = priv->ndev; 1354 struct net_device *ndev = priv->ndev;
1355 1355
1356 if (!netif_running(ndev)) 1356 if (!netif_running(ndev)) {
1357 netif_device_attach(ndev);
1358
1357 return 0; 1359 return 0;
1360 }
1358 1361
1359 gfar_init_bds(ndev); 1362 gfar_init_bds(ndev);
1360 init_registers(ndev); 1363 init_registers(ndev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 56b20d17d0e4..116f0e901bee 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -2673,6 +2673,9 @@ static int ixgbe_get_ts_info(struct net_device *dev,
2673 case ixgbe_mac_X540: 2673 case ixgbe_mac_X540:
2674 case ixgbe_mac_82599EB: 2674 case ixgbe_mac_82599EB:
2675 info->so_timestamping = 2675 info->so_timestamping =
2676 SOF_TIMESTAMPING_TX_SOFTWARE |
2677 SOF_TIMESTAMPING_RX_SOFTWARE |
2678 SOF_TIMESTAMPING_SOFTWARE |
2676 SOF_TIMESTAMPING_TX_HARDWARE | 2679 SOF_TIMESTAMPING_TX_HARDWARE |
2677 SOF_TIMESTAMPING_RX_HARDWARE | 2680 SOF_TIMESTAMPING_RX_HARDWARE |
2678 SOF_TIMESTAMPING_RAW_HARDWARE; 2681 SOF_TIMESTAMPING_RAW_HARDWARE;
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index f8064df10cc4..60ac46f4ac08 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1860,10 +1860,14 @@ jme_open(struct net_device *netdev)
1860 jme_clear_pm(jme); 1860 jme_clear_pm(jme);
1861 JME_NAPI_ENABLE(jme); 1861 JME_NAPI_ENABLE(jme);
1862 1862
1863 tasklet_enable(&jme->linkch_task); 1863 tasklet_init(&jme->linkch_task, jme_link_change_tasklet,
1864 tasklet_enable(&jme->txclean_task); 1864 (unsigned long) jme);
1865 tasklet_hi_enable(&jme->rxclean_task); 1865 tasklet_init(&jme->txclean_task, jme_tx_clean_tasklet,
1866 tasklet_hi_enable(&jme->rxempty_task); 1866 (unsigned long) jme);
1867 tasklet_init(&jme->rxclean_task, jme_rx_clean_tasklet,
1868 (unsigned long) jme);
1869 tasklet_init(&jme->rxempty_task, jme_rx_empty_tasklet,
1870 (unsigned long) jme);
1867 1871
1868 rc = jme_request_irq(jme); 1872 rc = jme_request_irq(jme);
1869 if (rc) 1873 if (rc)
@@ -1948,10 +1952,10 @@ jme_close(struct net_device *netdev)
1948 1952
1949 JME_NAPI_DISABLE(jme); 1953 JME_NAPI_DISABLE(jme);
1950 1954
1951 tasklet_disable(&jme->linkch_task); 1955 tasklet_kill(&jme->linkch_task);
1952 tasklet_disable(&jme->txclean_task); 1956 tasklet_kill(&jme->txclean_task);
1953 tasklet_disable(&jme->rxclean_task); 1957 tasklet_kill(&jme->rxclean_task);
1954 tasklet_disable(&jme->rxempty_task); 1958 tasklet_kill(&jme->rxempty_task);
1955 1959
1956 jme_disable_rx_engine(jme); 1960 jme_disable_rx_engine(jme);
1957 jme_disable_tx_engine(jme); 1961 jme_disable_tx_engine(jme);
@@ -3079,22 +3083,6 @@ jme_init_one(struct pci_dev *pdev,
3079 tasklet_init(&jme->pcc_task, 3083 tasklet_init(&jme->pcc_task,
3080 jme_pcc_tasklet, 3084 jme_pcc_tasklet,
3081 (unsigned long) jme); 3085 (unsigned long) jme);
3082 tasklet_init(&jme->linkch_task,
3083 jme_link_change_tasklet,
3084 (unsigned long) jme);
3085 tasklet_init(&jme->txclean_task,
3086 jme_tx_clean_tasklet,
3087 (unsigned long) jme);
3088 tasklet_init(&jme->rxclean_task,
3089 jme_rx_clean_tasklet,
3090 (unsigned long) jme);
3091 tasklet_init(&jme->rxempty_task,
3092 jme_rx_empty_tasklet,
3093 (unsigned long) jme);
3094 tasklet_disable_nosync(&jme->linkch_task);
3095 tasklet_disable_nosync(&jme->txclean_task);
3096 tasklet_disable_nosync(&jme->rxclean_task);
3097 tasklet_disable_nosync(&jme->rxempty_task);
3098 jme->dpi.cur = PCC_P1; 3086 jme->dpi.cur = PCC_P1;
3099 3087
3100 jme->reg_ghc = 0; 3088 jme->reg_ghc = 0;
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 9b9c2ac5c4c2..d19a143aa5a8 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -4026,7 +4026,7 @@ static void __devexit skge_remove(struct pci_dev *pdev)
4026 dev0 = hw->dev[0]; 4026 dev0 = hw->dev[0];
4027 unregister_netdev(dev0); 4027 unregister_netdev(dev0);
4028 4028
4029 tasklet_disable(&hw->phy_task); 4029 tasklet_kill(&hw->phy_task);
4030 4030
4031 spin_lock_irq(&hw->hw_lock); 4031 spin_lock_irq(&hw->hw_lock);
4032 hw->intr_mask = 0; 4032 hw->intr_mask = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
index 5d36795877cb..b799ab12a291 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
@@ -237,7 +237,7 @@ static int mlx4_en_dcbnl_ieee_setmaxrate(struct net_device *dev,
237 if (err) 237 if (err)
238 return err; 238 return err;
239 239
240 memcpy(priv->maxrate, tmp, sizeof(*priv->maxrate)); 240 memcpy(priv->maxrate, tmp, sizeof(priv->maxrate));
241 241
242 return 0; 242 return 0;
243} 243}
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 318fee91c79d..69e01977a1dd 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -5407,8 +5407,8 @@ static int netdev_close(struct net_device *dev)
5407 /* Delay for receive task to stop scheduling itself. */ 5407 /* Delay for receive task to stop scheduling itself. */
5408 msleep(2000 / HZ); 5408 msleep(2000 / HZ);
5409 5409
5410 tasklet_disable(&hw_priv->rx_tasklet); 5410 tasklet_kill(&hw_priv->rx_tasklet);
5411 tasklet_disable(&hw_priv->tx_tasklet); 5411 tasklet_kill(&hw_priv->tx_tasklet);
5412 free_irq(dev->irq, hw_priv->dev); 5412 free_irq(dev->irq, hw_priv->dev);
5413 5413
5414 transmit_cleanup(hw_priv, 0); 5414 transmit_cleanup(hw_priv, 0);
@@ -5459,8 +5459,10 @@ static int prepare_hardware(struct net_device *dev)
5459 rc = request_irq(dev->irq, netdev_intr, IRQF_SHARED, dev->name, dev); 5459 rc = request_irq(dev->irq, netdev_intr, IRQF_SHARED, dev->name, dev);
5460 if (rc) 5460 if (rc)
5461 return rc; 5461 return rc;
5462 tasklet_enable(&hw_priv->rx_tasklet); 5462 tasklet_init(&hw_priv->rx_tasklet, rx_proc_task,
5463 tasklet_enable(&hw_priv->tx_tasklet); 5463 (unsigned long) hw_priv);
5464 tasklet_init(&hw_priv->tx_tasklet, tx_proc_task,
5465 (unsigned long) hw_priv);
5464 5466
5465 hw->promiscuous = 0; 5467 hw->promiscuous = 0;
5466 hw->all_multi = 0; 5468 hw->all_multi = 0;
@@ -7033,16 +7035,6 @@ static int __devinit pcidev_init(struct pci_dev *pdev,
7033 spin_lock_init(&hw_priv->hwlock); 7035 spin_lock_init(&hw_priv->hwlock);
7034 mutex_init(&hw_priv->lock); 7036 mutex_init(&hw_priv->lock);
7035 7037
7036 /* tasklet is enabled. */
7037 tasklet_init(&hw_priv->rx_tasklet, rx_proc_task,
7038 (unsigned long) hw_priv);
7039 tasklet_init(&hw_priv->tx_tasklet, tx_proc_task,
7040 (unsigned long) hw_priv);
7041
7042 /* tasklet_enable will decrement the atomic counter. */
7043 tasklet_disable(&hw_priv->rx_tasklet);
7044 tasklet_disable(&hw_priv->tx_tasklet);
7045
7046 for (i = 0; i < TOTAL_PORT_NUM; i++) 7038 for (i = 0; i < TOTAL_PORT_NUM; i++)
7047 init_waitqueue_head(&hw_priv->counter[i].counter); 7039 init_waitqueue_head(&hw_priv->counter[i].counter);
7048 7040
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 53743f7a2ca9..af8b4142088c 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1524,6 +1524,7 @@ static int lpc_eth_drv_remove(struct platform_device *pdev)
1524 pldat->dma_buff_base_p); 1524 pldat->dma_buff_base_p);
1525 free_irq(ndev->irq, ndev); 1525 free_irq(ndev->irq, ndev);
1526 iounmap(pldat->net_base); 1526 iounmap(pldat->net_base);
1527 mdiobus_unregister(pldat->mii_bus);
1527 mdiobus_free(pldat->mii_bus); 1528 mdiobus_free(pldat->mii_bus);
1528 clk_disable(pldat->clk); 1529 clk_disable(pldat->clk);
1529 clk_put(pldat->clk); 1530 clk_put(pldat->clk);
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 1c818254b7be..609125a249d9 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -979,17 +979,6 @@ static void cp_init_hw (struct cp_private *cp)
979 cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0))); 979 cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
980 cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4))); 980 cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
981 981
982 cpw32_f(HiTxRingAddr, 0);
983 cpw32_f(HiTxRingAddr + 4, 0);
984
985 ring_dma = cp->ring_dma;
986 cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
987 cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
988
989 ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
990 cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
991 cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
992
993 cp_start_hw(cp); 982 cp_start_hw(cp);
994 cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */ 983 cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
995 984
@@ -1003,6 +992,17 @@ static void cp_init_hw (struct cp_private *cp)
1003 992
1004 cpw8(Config5, cpr8(Config5) & PMEStatus); 993 cpw8(Config5, cpr8(Config5) & PMEStatus);
1005 994
995 cpw32_f(HiTxRingAddr, 0);
996 cpw32_f(HiTxRingAddr + 4, 0);
997
998 ring_dma = cp->ring_dma;
999 cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
1000 cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
1001
1002 ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
1003 cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
1004 cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
1005
1006 cpw16(MultiIntr, 0); 1006 cpw16(MultiIntr, 0);
1007 1007
1008 cpw8_f(Cfg9346, Cfg9346_Lock); 1008 cpw8_f(Cfg9346, Cfg9346_Lock);
@@ -1060,17 +1060,22 @@ static int cp_init_rings (struct cp_private *cp)
1060 1060
1061static int cp_alloc_rings (struct cp_private *cp) 1061static int cp_alloc_rings (struct cp_private *cp)
1062{ 1062{
1063 struct device *d = &cp->pdev->dev;
1063 void *mem; 1064 void *mem;
1065 int rc;
1064 1066
1065 mem = dma_alloc_coherent(&cp->pdev->dev, CP_RING_BYTES, 1067 mem = dma_alloc_coherent(d, CP_RING_BYTES, &cp->ring_dma, GFP_KERNEL);
1066 &cp->ring_dma, GFP_KERNEL);
1067 if (!mem) 1068 if (!mem)
1068 return -ENOMEM; 1069 return -ENOMEM;
1069 1070
1070 cp->rx_ring = mem; 1071 cp->rx_ring = mem;
1071 cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE]; 1072 cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];
1072 1073
1073 return cp_init_rings(cp); 1074 rc = cp_init_rings(cp);
1075 if (rc < 0)
1076 dma_free_coherent(d, CP_RING_BYTES, cp->rx_ring, cp->ring_dma);
1077
1078 return rc;
1074} 1079}
1075 1080
1076static void cp_clean_rings (struct cp_private *cp) 1081static void cp_clean_rings (struct cp_private *cp)
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index e7ff886e8047..927aa33d4349 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -3827,6 +3827,8 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
3827 void __iomem *ioaddr = tp->mmio_addr; 3827 void __iomem *ioaddr = tp->mmio_addr;
3828 3828
3829 switch (tp->mac_version) { 3829 switch (tp->mac_version) {
3830 case RTL_GIGA_MAC_VER_25:
3831 case RTL_GIGA_MAC_VER_26:
3830 case RTL_GIGA_MAC_VER_29: 3832 case RTL_GIGA_MAC_VER_29:
3831 case RTL_GIGA_MAC_VER_30: 3833 case RTL_GIGA_MAC_VER_30:
3832 case RTL_GIGA_MAC_VER_32: 3834 case RTL_GIGA_MAC_VER_32:
@@ -4519,6 +4521,9 @@ static void rtl_set_rx_mode(struct net_device *dev)
4519 mc_filter[1] = swab32(data); 4521 mc_filter[1] = swab32(data);
4520 } 4522 }
4521 4523
4524 if (tp->mac_version == RTL_GIGA_MAC_VER_35)
4525 mc_filter[1] = mc_filter[0] = 0xffffffff;
4526
4522 RTL_W32(MAR0 + 4, mc_filter[1]); 4527 RTL_W32(MAR0 + 4, mc_filter[1]);
4523 RTL_W32(MAR0 + 0, mc_filter[0]); 4528 RTL_W32(MAR0 + 0, mc_filter[0]);
4524 4529
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index fb9f6b38511f..edf5edb13140 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -2479,7 +2479,7 @@ static int sis900_resume(struct pci_dev *pci_dev)
2479 netif_start_queue(net_dev); 2479 netif_start_queue(net_dev);
2480 2480
2481 /* Workaround for EDB */ 2481 /* Workaround for EDB */
2482 sis900_set_mode(ioaddr, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED); 2482 sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
2483 2483
2484 /* Enable all known interrupts by setting the interrupt mask. */ 2484 /* Enable all known interrupts by setting the interrupt mask. */
2485 sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE); 2485 sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE);
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 62d1baf111ea..c53c0f4e2ce3 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2110,7 +2110,7 @@ static void __devinit smsc911x_read_mac_address(struct net_device *dev)
2110static int __devinit smsc911x_init(struct net_device *dev) 2110static int __devinit smsc911x_init(struct net_device *dev)
2111{ 2111{
2112 struct smsc911x_data *pdata = netdev_priv(dev); 2112 struct smsc911x_data *pdata = netdev_priv(dev);
2113 unsigned int byte_test; 2113 unsigned int byte_test, mask;
2114 unsigned int to = 100; 2114 unsigned int to = 100;
2115 2115
2116 SMSC_TRACE(pdata, probe, "Driver Parameters:"); 2116 SMSC_TRACE(pdata, probe, "Driver Parameters:");
@@ -2130,9 +2130,22 @@ static int __devinit smsc911x_init(struct net_device *dev)
2130 /* 2130 /*
2131 * poll the READY bit in PMT_CTRL. Any other access to the device is 2131 * poll the READY bit in PMT_CTRL. Any other access to the device is
2132 * forbidden while this bit isn't set. Try for 100ms 2132 * forbidden while this bit isn't set. Try for 100ms
2133 *
2134 * Note that this test is done before the WORD_SWAP register is
2135 * programmed. So in some configurations the READY bit is at 16 before
2136 * WORD_SWAP is written to. This issue is worked around by waiting
2137 * until either bit 0 or bit 16 gets set in PMT_CTRL.
2138 *
2139 * SMSC has confirmed that checking bit 16 (marked as reserved in
2140 * the datasheet) is fine since these bits "will either never be set
2141 * or can only go high after READY does (so also indicate the device
2142 * is ready)".
2133 */ 2143 */
2134 while (!(smsc911x_reg_read(pdata, PMT_CTRL) & PMT_CTRL_READY_) && --to) 2144
2145 mask = PMT_CTRL_READY_ | swahw32(PMT_CTRL_READY_);
2146 while (!(smsc911x_reg_read(pdata, PMT_CTRL) & mask) && --to)
2135 udelay(1000); 2147 udelay(1000);
2148
2136 if (to == 0) { 2149 if (to == 0) {
2137 pr_err("Device not READY in 100ms aborting\n"); 2150 pr_err("Device not READY in 100ms aborting\n");
2138 return -ENODEV; 2151 return -ENODEV;
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 4e9810013850..66e025ad5df1 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -917,7 +917,7 @@ static int tile_net_setup_interrupts(struct net_device *dev)
917 ingress_irq = rc; 917 ingress_irq = rc;
918 tile_irq_activate(ingress_irq, TILE_IRQ_PERCPU); 918 tile_irq_activate(ingress_irq, TILE_IRQ_PERCPU);
919 rc = request_irq(ingress_irq, tile_net_handle_ingress_irq, 919 rc = request_irq(ingress_irq, tile_net_handle_ingress_irq,
920 0, NULL, NULL); 920 0, "tile_net", NULL);
921 if (rc != 0) { 921 if (rc != 0) {
922 netdev_err(dev, "request_irq failed: %d\n", rc); 922 netdev_err(dev, "request_irq failed: %d\n", rc);
923 destroy_irq(ingress_irq); 923 destroy_irq(ingress_irq);
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 0793299bd39e..a788501e978e 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -894,6 +894,8 @@ out:
894 return IRQ_HANDLED; 894 return IRQ_HANDLED;
895} 895}
896 896
897static void axienet_dma_err_handler(unsigned long data);
898
897/** 899/**
898 * axienet_open - Driver open routine. 900 * axienet_open - Driver open routine.
899 * @ndev: Pointer to net_device structure 901 * @ndev: Pointer to net_device structure
@@ -942,6 +944,10 @@ static int axienet_open(struct net_device *ndev)
942 phy_start(lp->phy_dev); 944 phy_start(lp->phy_dev);
943 } 945 }
944 946
947 /* Enable tasklets for Axi DMA error handling */
948 tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler,
949 (unsigned long) lp);
950
945 /* Enable interrupts for Axi DMA Tx */ 951 /* Enable interrupts for Axi DMA Tx */
946 ret = request_irq(lp->tx_irq, axienet_tx_irq, 0, ndev->name, ndev); 952 ret = request_irq(lp->tx_irq, axienet_tx_irq, 0, ndev->name, ndev);
947 if (ret) 953 if (ret)
@@ -950,8 +956,7 @@ static int axienet_open(struct net_device *ndev)
950 ret = request_irq(lp->rx_irq, axienet_rx_irq, 0, ndev->name, ndev); 956 ret = request_irq(lp->rx_irq, axienet_rx_irq, 0, ndev->name, ndev);
951 if (ret) 957 if (ret)
952 goto err_rx_irq; 958 goto err_rx_irq;
953 /* Enable tasklets for Axi DMA error handling */ 959
954 tasklet_enable(&lp->dma_err_tasklet);
955 return 0; 960 return 0;
956 961
957err_rx_irq: 962err_rx_irq:
@@ -960,6 +965,7 @@ err_tx_irq:
960 if (lp->phy_dev) 965 if (lp->phy_dev)
961 phy_disconnect(lp->phy_dev); 966 phy_disconnect(lp->phy_dev);
962 lp->phy_dev = NULL; 967 lp->phy_dev = NULL;
968 tasklet_kill(&lp->dma_err_tasklet);
963 dev_err(lp->dev, "request_irq() failed\n"); 969 dev_err(lp->dev, "request_irq() failed\n");
964 return ret; 970 return ret;
965} 971}
@@ -990,7 +996,7 @@ static int axienet_stop(struct net_device *ndev)
990 axienet_setoptions(ndev, lp->options & 996 axienet_setoptions(ndev, lp->options &
991 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 997 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
992 998
993 tasklet_disable(&lp->dma_err_tasklet); 999 tasklet_kill(&lp->dma_err_tasklet);
994 1000
995 free_irq(lp->tx_irq, ndev); 1001 free_irq(lp->tx_irq, ndev);
996 free_irq(lp->rx_irq, ndev); 1002 free_irq(lp->rx_irq, ndev);
@@ -1613,10 +1619,6 @@ static int __devinit axienet_of_probe(struct platform_device *op)
1613 goto err_iounmap_2; 1619 goto err_iounmap_2;
1614 } 1620 }
1615 1621
1616 tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler,
1617 (unsigned long) lp);
1618 tasklet_disable(&lp->dma_err_tasklet);
1619
1620 return 0; 1622 return 0;
1621 1623
1622err_iounmap_2: 1624err_iounmap_2:
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 98934bdf6acf..477d6729b17f 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -1102,10 +1102,12 @@ static int init_queues(struct port *port)
1102{ 1102{
1103 int i; 1103 int i;
1104 1104
1105 if (!ports_open) 1105 if (!ports_open) {
1106 if (!(dma_pool = dma_pool_create(DRV_NAME, NULL, 1106 dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev,
1107 POOL_ALLOC_SIZE, 32, 0))) 1107 POOL_ALLOC_SIZE, 32, 0);
1108 if (!dma_pool)
1108 return -ENOMEM; 1109 return -ENOMEM;
1110 }
1109 1111
1110 if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL, 1112 if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL,
1111 &port->desc_tab_phys))) 1113 &port->desc_tab_phys)))
diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c
index 5039f08f5a5b..43e9ab4f4d7e 100644
--- a/drivers/net/irda/sir_dev.c
+++ b/drivers/net/irda/sir_dev.c
@@ -222,7 +222,7 @@ static void sirdev_config_fsm(struct work_struct *work)
222 break; 222 break;
223 223
224 case SIRDEV_STATE_DONGLE_SPEED: 224 case SIRDEV_STATE_DONGLE_SPEED:
225 if (dev->dongle_drv->reset) { 225 if (dev->dongle_drv->set_speed) {
226 ret = dev->dongle_drv->set_speed(dev, fsm->param); 226 ret = dev->dongle_drv->set_speed(dev, fsm->param);
227 if (ret < 0) { 227 if (ret < 0) {
228 fsm->result = ret; 228 fsm->result = ret;
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 899274f2f9b1..2ed1140df3e9 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -185,17 +185,20 @@ static int __devinit mdio_gpio_probe(struct platform_device *pdev)
185{ 185{
186 struct mdio_gpio_platform_data *pdata; 186 struct mdio_gpio_platform_data *pdata;
187 struct mii_bus *new_bus; 187 struct mii_bus *new_bus;
188 int ret; 188 int ret, bus_id;
189 189
190 if (pdev->dev.of_node) 190 if (pdev->dev.of_node) {
191 pdata = mdio_gpio_of_get_data(pdev); 191 pdata = mdio_gpio_of_get_data(pdev);
192 else 192 bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio");
193 } else {
193 pdata = pdev->dev.platform_data; 194 pdata = pdev->dev.platform_data;
195 bus_id = pdev->id;
196 }
194 197
195 if (!pdata) 198 if (!pdata)
196 return -ENODEV; 199 return -ENODEV;
197 200
198 new_bus = mdio_gpio_bus_init(&pdev->dev, pdata, pdev->id); 201 new_bus = mdio_gpio_bus_init(&pdev->dev, pdata, bus_id);
199 if (!new_bus) 202 if (!new_bus)
200 return -ENODEV; 203 return -ENODEV;
201 204
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index d44cca327588..ad86660fb8f9 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1794,10 +1794,12 @@ static void team_setup(struct net_device *dev)
1794 1794
1795 dev->features |= NETIF_F_LLTX; 1795 dev->features |= NETIF_F_LLTX;
1796 dev->features |= NETIF_F_GRO; 1796 dev->features |= NETIF_F_GRO;
1797 dev->hw_features = NETIF_F_HW_VLAN_TX | 1797 dev->hw_features = TEAM_VLAN_FEATURES |
1798 NETIF_F_HW_VLAN_TX |
1798 NETIF_F_HW_VLAN_RX | 1799 NETIF_F_HW_VLAN_RX |
1799 NETIF_F_HW_VLAN_FILTER; 1800 NETIF_F_HW_VLAN_FILTER;
1800 1801
1802 dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM);
1801 dev->features |= dev->hw_features; 1803 dev->features |= dev->hw_features;
1802} 1804}
1803 1805
diff --git a/drivers/net/team/team_mode_broadcast.c b/drivers/net/team/team_mode_broadcast.c
index 9db0171e9366..c5db428e73fa 100644
--- a/drivers/net/team/team_mode_broadcast.c
+++ b/drivers/net/team/team_mode_broadcast.c
@@ -29,8 +29,8 @@ static bool bc_transmit(struct team *team, struct sk_buff *skb)
29 if (last) { 29 if (last) {
30 skb2 = skb_clone(skb, GFP_ATOMIC); 30 skb2 = skb_clone(skb, GFP_ATOMIC);
31 if (skb2) { 31 if (skb2) {
32 ret = team_dev_queue_xmit(team, last, 32 ret = !team_dev_queue_xmit(team, last,
33 skb2); 33 skb2);
34 if (!sum_ret) 34 if (!sum_ret)
35 sum_ret = ret; 35 sum_ret = ret;
36 } 36 }
@@ -39,7 +39,7 @@ static bool bc_transmit(struct team *team, struct sk_buff *skb)
39 } 39 }
40 } 40 }
41 if (last) { 41 if (last) {
42 ret = team_dev_queue_xmit(team, last, skb); 42 ret = !team_dev_queue_xmit(team, last, skb);
43 if (!sum_ret) 43 if (!sum_ret)
44 sum_ret = ret; 44 sum_ret = ret;
45 } 45 }
diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c
index c81e278629ff..08d55b6bf272 100644
--- a/drivers/net/usb/cdc_eem.c
+++ b/drivers/net/usb/cdc_eem.c
@@ -31,6 +31,7 @@
31#include <linux/usb/cdc.h> 31#include <linux/usb/cdc.h>
32#include <linux/usb/usbnet.h> 32#include <linux/usb/usbnet.h>
33#include <linux/gfp.h> 33#include <linux/gfp.h>
34#include <linux/if_vlan.h>
34 35
35 36
36/* 37/*
@@ -92,7 +93,7 @@ static int eem_bind(struct usbnet *dev, struct usb_interface *intf)
92 93
93 /* no jumbogram (16K) support for now */ 94 /* no jumbogram (16K) support for now */
94 95
95 dev->net->hard_header_len += EEM_HEAD + ETH_FCS_LEN; 96 dev->net->hard_header_len += EEM_HEAD + ETH_FCS_LEN + VLAN_HLEN;
96 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; 97 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
97 98
98 return 0; 99 return 0;
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 4cd582a4f625..74fab1a40156 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -540,10 +540,12 @@ advance:
540 (ctx->ether_desc == NULL) || (ctx->control != intf)) 540 (ctx->ether_desc == NULL) || (ctx->control != intf))
541 goto error; 541 goto error;
542 542
543 /* claim interfaces, if any */ 543 /* claim data interface, if different from control */
544 temp = usb_driver_claim_interface(driver, ctx->data, dev); 544 if (ctx->data != ctx->control) {
545 if (temp) 545 temp = usb_driver_claim_interface(driver, ctx->data, dev);
546 goto error; 546 if (temp)
547 goto error;
548 }
547 549
548 iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber; 550 iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber;
549 551
@@ -623,6 +625,10 @@ static void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf)
623 625
624 tasklet_kill(&ctx->bh); 626 tasklet_kill(&ctx->bh);
625 627
628 /* handle devices with combined control and data interface */
629 if (ctx->control == ctx->data)
630 ctx->data = NULL;
631
626 /* disconnect master --> disconnect slave */ 632 /* disconnect master --> disconnect slave */
627 if (intf == ctx->control && ctx->data) { 633 if (intf == ctx->control && ctx->data) {
628 usb_set_intfdata(ctx->data, NULL); 634 usb_set_intfdata(ctx->data, NULL);
@@ -1245,6 +1251,14 @@ static const struct usb_device_id cdc_devs[] = {
1245 .driver_info = (unsigned long) &wwan_info, 1251 .driver_info = (unsigned long) &wwan_info,
1246 }, 1252 },
1247 1253
1254 /* Huawei NCM devices disguised as vendor specific */
1255 { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x16),
1256 .driver_info = (unsigned long)&wwan_info,
1257 },
1258 { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x46),
1259 .driver_info = (unsigned long)&wwan_info,
1260 },
1261
1248 /* Generic CDC-NCM devices */ 1262 /* Generic CDC-NCM devices */
1249 { USB_INTERFACE_INFO(USB_CLASS_COMM, 1263 { USB_INTERFACE_INFO(USB_CLASS_COMM,
1250 USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE), 1264 USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 3b566fa0f8e6..1ea91f4237f0 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -385,6 +385,7 @@ static const struct usb_device_id products[] = {
385 }, 385 },
386 386
387 /* 3. Combined interface devices matching on interface number */ 387 /* 3. Combined interface devices matching on interface number */
388 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
388 {QMI_FIXED_INTF(0x19d2, 0x0002, 1)}, 389 {QMI_FIXED_INTF(0x19d2, 0x0002, 1)},
389 {QMI_FIXED_INTF(0x19d2, 0x0012, 1)}, 390 {QMI_FIXED_INTF(0x19d2, 0x0012, 1)},
390 {QMI_FIXED_INTF(0x19d2, 0x0017, 3)}, 391 {QMI_FIXED_INTF(0x19d2, 0x0017, 3)},
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 7479a5761d0d..362cb8cfeb92 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -184,7 +184,7 @@ static int smsc95xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
184 /* set the address, index & direction (read from PHY) */ 184 /* set the address, index & direction (read from PHY) */
185 phy_id &= dev->mii.phy_id_mask; 185 phy_id &= dev->mii.phy_id_mask;
186 idx &= dev->mii.reg_num_mask; 186 idx &= dev->mii.reg_num_mask;
187 addr = (phy_id << 11) | (idx << 6) | MII_READ_; 187 addr = (phy_id << 11) | (idx << 6) | MII_READ_ | MII_BUSY_;
188 ret = smsc95xx_write_reg(dev, MII_ADDR, addr); 188 ret = smsc95xx_write_reg(dev, MII_ADDR, addr);
189 check_warn_goto_done(ret, "Error writing MII_ADDR"); 189 check_warn_goto_done(ret, "Error writing MII_ADDR");
190 190
@@ -221,7 +221,7 @@ static void smsc95xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
221 /* set the address, index & direction (write to PHY) */ 221 /* set the address, index & direction (write to PHY) */
222 phy_id &= dev->mii.phy_id_mask; 222 phy_id &= dev->mii.phy_id_mask;
223 idx &= dev->mii.reg_num_mask; 223 idx &= dev->mii.reg_num_mask;
224 addr = (phy_id << 11) | (idx << 6) | MII_WRITE_; 224 addr = (phy_id << 11) | (idx << 6) | MII_WRITE_ | MII_BUSY_;
225 ret = smsc95xx_write_reg(dev, MII_ADDR, addr); 225 ret = smsc95xx_write_reg(dev, MII_ADDR, addr);
226 check_warn_goto_done(ret, "Error writing MII_ADDR"); 226 check_warn_goto_done(ret, "Error writing MII_ADDR");
227 227
@@ -1344,6 +1344,7 @@ static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev,
1344 } else { 1344 } else {
1345 u32 csum_preamble = smsc95xx_calc_csum_preamble(skb); 1345 u32 csum_preamble = smsc95xx_calc_csum_preamble(skb);
1346 skb_push(skb, 4); 1346 skb_push(skb, 4);
1347 cpu_to_le32s(&csum_preamble);
1347 memcpy(skb->data, &csum_preamble, 4); 1348 memcpy(skb->data, &csum_preamble, 4);
1348 } 1349 }
1349 } 1350 }
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index cb04f900cc46..edb81ed06950 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -359,10 +359,12 @@ static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
359void usbnet_defer_kevent (struct usbnet *dev, int work) 359void usbnet_defer_kevent (struct usbnet *dev, int work)
360{ 360{
361 set_bit (work, &dev->flags); 361 set_bit (work, &dev->flags);
362 if (!schedule_work (&dev->kevent)) 362 if (!schedule_work (&dev->kevent)) {
363 netdev_err(dev->net, "kevent %d may have been dropped\n", work); 363 if (net_ratelimit())
364 else 364 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
365 } else {
365 netdev_dbg(dev->net, "kevent %d scheduled\n", work); 366 netdev_dbg(dev->net, "kevent %d scheduled\n", work);
367 }
366} 368}
367EXPORT_SYMBOL_GPL(usbnet_defer_kevent); 369EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
368 370
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index ce9d4f2c9776..0ae1bcc6da73 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -744,28 +744,43 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
744 744
745 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 745 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
746 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; 746 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
747 u32 buf_size;
747 748
748 tbi = tq->buf_info + tq->tx_ring.next2fill; 749 buf_offset = 0;
749 tbi->map_type = VMXNET3_MAP_PAGE; 750 len = skb_frag_size(frag);
750 tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag, 751 while (len) {
751 0, skb_frag_size(frag), 752 tbi = tq->buf_info + tq->tx_ring.next2fill;
752 DMA_TO_DEVICE); 753 if (len < VMXNET3_MAX_TX_BUF_SIZE) {
754 buf_size = len;
755 dw2 |= len;
756 } else {
757 buf_size = VMXNET3_MAX_TX_BUF_SIZE;
758 /* spec says that for TxDesc.len, 0 == 2^14 */
759 }
760 tbi->map_type = VMXNET3_MAP_PAGE;
761 tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
762 buf_offset, buf_size,
763 DMA_TO_DEVICE);
753 764
754 tbi->len = skb_frag_size(frag); 765 tbi->len = buf_size;
755 766
756 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; 767 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
757 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); 768 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
758 769
759 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr); 770 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
760 gdesc->dword[2] = cpu_to_le32(dw2 | skb_frag_size(frag)); 771 gdesc->dword[2] = cpu_to_le32(dw2);
761 gdesc->dword[3] = 0; 772 gdesc->dword[3] = 0;
762 773
763 dev_dbg(&adapter->netdev->dev, 774 dev_dbg(&adapter->netdev->dev,
764 "txd[%u]: 0x%llu %u %u\n", 775 "txd[%u]: 0x%llu %u %u\n",
765 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr), 776 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
766 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]); 777 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
767 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); 778 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
768 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; 779 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
780
781 len -= buf_size;
782 buf_offset += buf_size;
783 }
769 } 784 }
770 785
771 ctx->eop_txd = gdesc; 786 ctx->eop_txd = gdesc;
@@ -886,6 +901,18 @@ vmxnet3_prepare_tso(struct sk_buff *skb,
886 } 901 }
887} 902}
888 903
904static int txd_estimate(const struct sk_buff *skb)
905{
906 int count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
907 int i;
908
909 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
910 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
911
912 count += VMXNET3_TXD_NEEDED(skb_frag_size(frag));
913 }
914 return count;
915}
889 916
890/* 917/*
891 * Transmits a pkt thru a given tq 918 * Transmits a pkt thru a given tq
@@ -914,9 +941,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
914 union Vmxnet3_GenericDesc tempTxDesc; 941 union Vmxnet3_GenericDesc tempTxDesc;
915#endif 942#endif
916 943
917 /* conservatively estimate # of descriptors to use */ 944 count = txd_estimate(skb);
918 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) +
919 skb_shinfo(skb)->nr_frags + 1;
920 945
921 ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP)); 946 ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP));
922 947
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 607976c00162..8b5c61917076 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * VXLAN: Virtual eXtensiable Local Area Network 2 * VXLAN: Virtual eXtensible Local Area Network
3 * 3 *
4 * Copyright (c) 2012 Vyatta Inc. 4 * Copyright (c) 2012 Vyatta Inc.
5 * 5 *
@@ -50,8 +50,8 @@
50 50
51#define VXLAN_N_VID (1u << 24) 51#define VXLAN_N_VID (1u << 24)
52#define VXLAN_VID_MASK (VXLAN_N_VID - 1) 52#define VXLAN_VID_MASK (VXLAN_N_VID - 1)
53/* VLAN + IP header + UDP + VXLAN */ 53/* IP header + UDP + VXLAN + Ethernet header */
54#define VXLAN_HEADROOM (4 + 20 + 8 + 8) 54#define VXLAN_HEADROOM (20 + 8 + 8 + 14)
55 55
56#define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */ 56#define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
57 57
@@ -816,7 +816,7 @@ static void vxlan_cleanup(unsigned long arg)
816 = container_of(p, struct vxlan_fdb, hlist); 816 = container_of(p, struct vxlan_fdb, hlist);
817 unsigned long timeout; 817 unsigned long timeout;
818 818
819 if (f->state == NUD_PERMANENT) 819 if (f->state & NUD_PERMANENT)
820 continue; 820 continue;
821 821
822 timeout = f->used + vxlan->age_interval * HZ; 822 timeout = f->used + vxlan->age_interval * HZ;
@@ -1102,6 +1102,10 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
1102 1102
1103 if (!tb[IFLA_MTU]) 1103 if (!tb[IFLA_MTU])
1104 dev->mtu = lowerdev->mtu - VXLAN_HEADROOM; 1104 dev->mtu = lowerdev->mtu - VXLAN_HEADROOM;
1105
1106 /* update header length based on lower device */
1107 dev->hard_header_len = lowerdev->hard_header_len +
1108 VXLAN_HEADROOM;
1105 } 1109 }
1106 1110
1107 if (data[IFLA_VXLAN_TOS]) 1111 if (data[IFLA_VXLAN_TOS])
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index 3f575afd8cfc..760776b3d66c 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -969,10 +969,12 @@ static int init_hdlc_queues(struct port *port)
969{ 969{
970 int i; 970 int i;
971 971
972 if (!ports_open) 972 if (!ports_open) {
973 if (!(dma_pool = dma_pool_create(DRV_NAME, NULL, 973 dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev,
974 POOL_ALLOC_SIZE, 32, 0))) 974 POOL_ALLOC_SIZE, 32, 0);
975 if (!dma_pool)
975 return -ENOMEM; 976 return -ENOMEM;
977 }
976 978
977 if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL, 979 if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL,
978 &port->desc_tab_phys))) 980 &port->desc_tab_phys)))
@@ -1363,7 +1365,7 @@ static int __devinit hss_init_one(struct platform_device *pdev)
1363 1365
1364 platform_set_drvdata(pdev, port); 1366 platform_set_drvdata(pdev, port);
1365 1367
1366 netdev_info(dev, "HSS-%i\n", port->id); 1368 netdev_info(dev, "initialized\n");
1367 return 0; 1369 return 0;
1368 1370
1369err_free_netdev: 1371err_free_netdev:
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 8e1559aba495..1829b445d0b0 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1456,7 +1456,7 @@ static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type)
1456 switch (type) { 1456 switch (type) {
1457 case ATH9K_RESET_POWER_ON: 1457 case ATH9K_RESET_POWER_ON:
1458 ret = ath9k_hw_set_reset_power_on(ah); 1458 ret = ath9k_hw_set_reset_power_on(ah);
1459 if (!ret) 1459 if (ret)
1460 ah->reset_power_on = true; 1460 ah->reset_power_on = true;
1461 break; 1461 break;
1462 case ATH9K_RESET_WARM: 1462 case ATH9K_RESET_WARM:
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 378bd70256b2..741918a2027b 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -312,6 +312,7 @@ static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
312 } 312 }
313 313
314 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list); 314 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
315 bf->bf_next = NULL;
315 list_del(&bf->list); 316 list_del(&bf->list);
316 317
317 spin_unlock_bh(&sc->tx.txbuflock); 318 spin_unlock_bh(&sc->tx.txbuflock);
@@ -393,7 +394,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
393 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first; 394 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first;
394 u32 ba[WME_BA_BMP_SIZE >> 5]; 395 u32 ba[WME_BA_BMP_SIZE >> 5];
395 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0; 396 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
396 bool rc_update = true; 397 bool rc_update = true, isba;
397 struct ieee80211_tx_rate rates[4]; 398 struct ieee80211_tx_rate rates[4];
398 struct ath_frame_info *fi; 399 struct ath_frame_info *fi;
399 int nframes; 400 int nframes;
@@ -437,13 +438,17 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
437 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK; 438 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
438 tid = ATH_AN_2_TID(an, tidno); 439 tid = ATH_AN_2_TID(an, tidno);
439 seq_first = tid->seq_start; 440 seq_first = tid->seq_start;
441 isba = ts->ts_flags & ATH9K_TX_BA;
440 442
441 /* 443 /*
442 * The hardware occasionally sends a tx status for the wrong TID. 444 * The hardware occasionally sends a tx status for the wrong TID.
443 * In this case, the BA status cannot be considered valid and all 445 * In this case, the BA status cannot be considered valid and all
444 * subframes need to be retransmitted 446 * subframes need to be retransmitted
447 *
448 * Only BlockAcks have a TID and therefore normal Acks cannot be
449 * checked
445 */ 450 */
446 if (tidno != ts->tid) 451 if (isba && tidno != ts->tid)
447 txok = false; 452 txok = false;
448 453
449 isaggr = bf_isaggr(bf); 454 isaggr = bf_isaggr(bf);
@@ -1774,6 +1779,7 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1774 list_add_tail(&bf->list, &bf_head); 1779 list_add_tail(&bf->list, &bf_head);
1775 bf->bf_state.bf_type = 0; 1780 bf->bf_state.bf_type = 0;
1776 1781
1782 bf->bf_next = NULL;
1777 bf->bf_lastbf = bf; 1783 bf->bf_lastbf = bf;
1778 ath_tx_fill_desc(sc, bf, txq, fi->framelen); 1784 ath_tx_fill_desc(sc, bf, txq, fi->framelen);
1779 ath_tx_txqaddbuf(sc, txq, &bf_head, false); 1785 ath_tx_txqaddbuf(sc, txq, &bf_head, false);
diff --git a/drivers/net/wireless/b43legacy/pio.c b/drivers/net/wireless/b43legacy/pio.c
index 192251adf986..282eedec675e 100644
--- a/drivers/net/wireless/b43legacy/pio.c
+++ b/drivers/net/wireless/b43legacy/pio.c
@@ -382,7 +382,7 @@ static void cancel_transfers(struct b43legacy_pioqueue *queue)
382{ 382{
383 struct b43legacy_pio_txpacket *packet, *tmp_packet; 383 struct b43legacy_pio_txpacket *packet, *tmp_packet;
384 384
385 tasklet_disable(&queue->txtask); 385 tasklet_kill(&queue->txtask);
386 386
387 list_for_each_entry_safe(packet, tmp_packet, &queue->txrunning, list) 387 list_for_each_entry_safe(packet, tmp_packet, &queue->txrunning, list)
388 free_txpacket(packet, 0); 388 free_txpacket(packet, 0);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index a6f1e8166008..481345c23ded 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -4401,7 +4401,7 @@ static s32 brcmf_mode_to_nl80211_iftype(s32 mode)
4401 4401
4402static void brcmf_wiphy_pno_params(struct wiphy *wiphy) 4402static void brcmf_wiphy_pno_params(struct wiphy *wiphy)
4403{ 4403{
4404#ifndef CONFIG_BRCMFISCAN 4404#ifndef CONFIG_BRCMISCAN
4405 /* scheduled scan settings */ 4405 /* scheduled scan settings */
4406 wiphy->max_sched_scan_ssids = BRCMF_PNO_MAX_PFN_COUNT; 4406 wiphy->max_sched_scan_ssids = BRCMF_PNO_MAX_PFN_COUNT;
4407 wiphy->max_match_sets = BRCMF_PNO_MAX_PFN_COUNT; 4407 wiphy->max_match_sets = BRCMF_PNO_MAX_PFN_COUNT;
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index ff8162d4c454..2d9eee93c743 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -521,7 +521,7 @@ static void iwlagn_mac_tx(struct ieee80211_hw *hw,
521 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); 521 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
522 522
523 if (iwlagn_tx_skb(priv, control->sta, skb)) 523 if (iwlagn_tx_skb(priv, control->sta, skb))
524 dev_kfree_skb_any(skb); 524 ieee80211_free_txskb(hw, skb);
525} 525}
526 526
527static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw, 527static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
@@ -1354,6 +1354,20 @@ static int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
1354 vif_priv->ctx = ctx; 1354 vif_priv->ctx = ctx;
1355 ctx->vif = vif; 1355 ctx->vif = vif;
1356 1356
1357 /*
1358 * In SNIFFER device type, the firmware reports the FCS to
1359 * the host, rather than snipping it off. Unfortunately,
1360 * mac80211 doesn't (yet) provide a per-packet flag for
1361 * this, so that we have to set the hardware flag based
1362 * on the interfaces added. As the monitor interface can
1363 * only be present by itself, and will be removed before
1364 * other interfaces are added, this is safe.
1365 */
1366 if (vif->type == NL80211_IFTYPE_MONITOR)
1367 priv->hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS;
1368 else
1369 priv->hw->flags &= ~IEEE80211_HW_RX_INCLUDES_FCS;
1370
1357 err = iwl_setup_interface(priv, ctx); 1371 err = iwl_setup_interface(priv, ctx);
1358 if (!err || reset) 1372 if (!err || reset)
1359 goto out; 1373 goto out;
diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
index 7ff3f1430678..408132cf83c1 100644
--- a/drivers/net/wireless/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
@@ -2114,7 +2114,7 @@ static void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
2114 2114
2115 info = IEEE80211_SKB_CB(skb); 2115 info = IEEE80211_SKB_CB(skb);
2116 iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]); 2116 iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]);
2117 dev_kfree_skb_any(skb); 2117 ieee80211_free_txskb(priv->hw, skb);
2118} 2118}
2119 2119
2120static void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) 2120static void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c
index 10896393e5a0..2830ea290502 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c
@@ -1012,12 +1012,12 @@ static void iwl_calc_basic_rates(struct iwl_priv *priv,
1012 * As a consequence, it's not as complicated as it sounds, just add 1012 * As a consequence, it's not as complicated as it sounds, just add
1013 * any lower rates to the ACK rate bitmap. 1013 * any lower rates to the ACK rate bitmap.
1014 */ 1014 */
1015 if (IWL_RATE_11M_INDEX < lowest_present_ofdm) 1015 if (IWL_RATE_11M_INDEX < lowest_present_cck)
1016 ofdm |= IWL_RATE_11M_MASK >> IWL_FIRST_CCK_RATE; 1016 cck |= IWL_RATE_11M_MASK >> IWL_FIRST_CCK_RATE;
1017 if (IWL_RATE_5M_INDEX < lowest_present_ofdm) 1017 if (IWL_RATE_5M_INDEX < lowest_present_cck)
1018 ofdm |= IWL_RATE_5M_MASK >> IWL_FIRST_CCK_RATE; 1018 cck |= IWL_RATE_5M_MASK >> IWL_FIRST_CCK_RATE;
1019 if (IWL_RATE_2M_INDEX < lowest_present_ofdm) 1019 if (IWL_RATE_2M_INDEX < lowest_present_cck)
1020 ofdm |= IWL_RATE_2M_MASK >> IWL_FIRST_CCK_RATE; 1020 cck |= IWL_RATE_2M_MASK >> IWL_FIRST_CCK_RATE;
1021 /* 1M already there or needed so always add */ 1021 /* 1M already there or needed so always add */
1022 cck |= IWL_RATE_1M_MASK >> IWL_FIRST_CCK_RATE; 1022 cck |= IWL_RATE_1M_MASK >> IWL_FIRST_CCK_RATE;
1023 1023
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index 17c8e5d82681..bb69f8f90b3b 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -321,6 +321,14 @@ static void iwl_rx_allocate(struct iwl_trans *trans, gfp_t priority)
321 dma_map_page(trans->dev, page, 0, 321 dma_map_page(trans->dev, page, 0,
322 PAGE_SIZE << trans_pcie->rx_page_order, 322 PAGE_SIZE << trans_pcie->rx_page_order,
323 DMA_FROM_DEVICE); 323 DMA_FROM_DEVICE);
324 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
325 rxb->page = NULL;
326 spin_lock_irqsave(&rxq->lock, flags);
327 list_add(&rxb->list, &rxq->rx_used);
328 spin_unlock_irqrestore(&rxq->lock, flags);
329 __free_pages(page, trans_pcie->rx_page_order);
330 return;
331 }
324 /* dma address must be no more than 36 bits */ 332 /* dma address must be no more than 36 bits */
325 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); 333 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
326 /* and also 256 byte aligned! */ 334 /* and also 256 byte aligned! */
@@ -488,8 +496,19 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
488 dma_map_page(trans->dev, rxb->page, 0, 496 dma_map_page(trans->dev, rxb->page, 0,
489 PAGE_SIZE << trans_pcie->rx_page_order, 497 PAGE_SIZE << trans_pcie->rx_page_order,
490 DMA_FROM_DEVICE); 498 DMA_FROM_DEVICE);
491 list_add_tail(&rxb->list, &rxq->rx_free); 499 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
492 rxq->free_count++; 500 /*
501 * free the page(s) as well to not break
502 * the invariant that the items on the used
503 * list have no page(s)
504 */
505 __free_pages(rxb->page, trans_pcie->rx_page_order);
506 rxb->page = NULL;
507 list_add_tail(&rxb->list, &rxq->rx_used);
508 } else {
509 list_add_tail(&rxb->list, &rxq->rx_free);
510 rxq->free_count++;
511 }
493 } else 512 } else
494 list_add_tail(&rxb->list, &rxq->rx_used); 513 list_add_tail(&rxb->list, &rxq->rx_used);
495 spin_unlock_irqrestore(&rxq->lock, flags); 514 spin_unlock_irqrestore(&rxq->lock, flags);
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 105e3af3c621..79a4ddc002d3 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -480,20 +480,12 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
480void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id) 480void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
481{ 481{
482 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 482 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
483 u16 rd_ptr, wr_ptr;
484 int n_bd = trans_pcie->txq[txq_id].q.n_bd;
485 483
486 if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) { 484 if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
487 WARN_ONCE(1, "queue %d not used", txq_id); 485 WARN_ONCE(1, "queue %d not used", txq_id);
488 return; 486 return;
489 } 487 }
490 488
491 rd_ptr = iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & (n_bd - 1);
492 wr_ptr = iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id));
493
494 WARN_ONCE(rd_ptr != wr_ptr, "queue %d isn't empty: [%d,%d]",
495 txq_id, rd_ptr, wr_ptr);
496
497 iwl_txq_set_inactive(trans, txq_id); 489 iwl_txq_set_inactive(trans, txq_id);
498 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); 490 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
499} 491}
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 8d465107f52b..ae9010ed58de 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -890,9 +890,6 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
890 return; 890 return;
891 } 891 }
892 cmd_node = adapter->curr_cmd; 892 cmd_node = adapter->curr_cmd;
893 if (cmd_node->wait_q_enabled)
894 adapter->cmd_wait_q.status = -ETIMEDOUT;
895
896 if (cmd_node) { 893 if (cmd_node) {
897 adapter->dbg.timeout_cmd_id = 894 adapter->dbg.timeout_cmd_id =
898 adapter->dbg.last_cmd_id[adapter->dbg.last_cmd_index]; 895 adapter->dbg.last_cmd_id[adapter->dbg.last_cmd_index];
@@ -938,6 +935,14 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
938 935
939 dev_err(adapter->dev, "ps_mode=%d ps_state=%d\n", 936 dev_err(adapter->dev, "ps_mode=%d ps_state=%d\n",
940 adapter->ps_mode, adapter->ps_state); 937 adapter->ps_mode, adapter->ps_state);
938
939 if (cmd_node->wait_q_enabled) {
940 adapter->cmd_wait_q.status = -ETIMEDOUT;
941 wake_up_interruptible(&adapter->cmd_wait_q.wait);
942 mwifiex_cancel_pending_ioctl(adapter);
943 /* reset cmd_sent flag to unblock new commands */
944 adapter->cmd_sent = false;
945 }
941 } 946 }
942 if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING) 947 if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING)
943 mwifiex_init_fw_complete(adapter); 948 mwifiex_init_fw_complete(adapter);
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index fc8a9bfa1248..82cf0fa2d9f6 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -161,7 +161,6 @@ static int mwifiex_sdio_suspend(struct device *dev)
161 struct sdio_mmc_card *card; 161 struct sdio_mmc_card *card;
162 struct mwifiex_adapter *adapter; 162 struct mwifiex_adapter *adapter;
163 mmc_pm_flag_t pm_flag = 0; 163 mmc_pm_flag_t pm_flag = 0;
164 int hs_actived = 0;
165 int i; 164 int i;
166 int ret = 0; 165 int ret = 0;
167 166
@@ -188,12 +187,14 @@ static int mwifiex_sdio_suspend(struct device *dev)
188 adapter = card->adapter; 187 adapter = card->adapter;
189 188
190 /* Enable the Host Sleep */ 189 /* Enable the Host Sleep */
191 hs_actived = mwifiex_enable_hs(adapter); 190 if (!mwifiex_enable_hs(adapter)) {
192 if (hs_actived) { 191 dev_err(adapter->dev, "cmd: failed to suspend\n");
193 pr_debug("cmd: suspend with MMC_PM_KEEP_POWER\n"); 192 return -EFAULT;
194 ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
195 } 193 }
196 194
195 dev_dbg(adapter->dev, "cmd: suspend with MMC_PM_KEEP_POWER\n");
196 ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
197
197 /* Indicate device suspended */ 198 /* Indicate device suspended */
198 adapter->is_suspended = true; 199 adapter->is_suspended = true;
199 200
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 01dc8891070c..59474ae0aec0 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -2449,7 +2449,7 @@ static int rt2800_get_gain_calibration_delta(struct rt2x00_dev *rt2x00dev)
2449 /* 2449 /*
2450 * Check if temperature compensation is supported. 2450 * Check if temperature compensation is supported.
2451 */ 2451 */
2452 if (tssi_bounds[4] == 0xff) 2452 if (tssi_bounds[4] == 0xff || step == 0xff)
2453 return 0; 2453 return 0;
2454 2454
2455 /* 2455 /*
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index 9970c2b1b199..b7e6607e6b6d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -297,6 +297,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
297 /*=== Customer ID ===*/ 297 /*=== Customer ID ===*/
298 /****** 8188CU ********/ 298 /****** 8188CU ********/
299 {RTL_USB_DEVICE(0x050d, 0x1102, rtl92cu_hal_cfg)}, /*Belkin - Edimax*/ 299 {RTL_USB_DEVICE(0x050d, 0x1102, rtl92cu_hal_cfg)}, /*Belkin - Edimax*/
300 {RTL_USB_DEVICE(0x050d, 0x11f2, rtl92cu_hal_cfg)}, /*Belkin - ISY*/
300 {RTL_USB_DEVICE(0x06f8, 0xe033, rtl92cu_hal_cfg)}, /*Hercules - Edimax*/ 301 {RTL_USB_DEVICE(0x06f8, 0xe033, rtl92cu_hal_cfg)}, /*Hercules - Edimax*/
301 {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/ 302 {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
302 {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/ 303 {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index caa011008cd0..fc24eb9b3948 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -452,29 +452,85 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
452 /* Grant backend access to each skb fragment page. */ 452 /* Grant backend access to each skb fragment page. */
453 for (i = 0; i < frags; i++) { 453 for (i = 0; i < frags; i++) {
454 skb_frag_t *frag = skb_shinfo(skb)->frags + i; 454 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
455 struct page *page = skb_frag_page(frag);
455 456
456 tx->flags |= XEN_NETTXF_more_data; 457 len = skb_frag_size(frag);
458 offset = frag->page_offset;
457 459
458 id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); 460 /* Data must not cross a page boundary. */
459 np->tx_skbs[id].skb = skb_get(skb); 461 BUG_ON(len + offset > PAGE_SIZE<<compound_order(page));
460 tx = RING_GET_REQUEST(&np->tx, prod++);
461 tx->id = id;
462 ref = gnttab_claim_grant_reference(&np->gref_tx_head);
463 BUG_ON((signed short)ref < 0);
464 462
465 mfn = pfn_to_mfn(page_to_pfn(skb_frag_page(frag))); 463 /* Skip unused frames from start of page */
466 gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, 464 page += offset >> PAGE_SHIFT;
467 mfn, GNTMAP_readonly); 465 offset &= ~PAGE_MASK;
468 466
469 tx->gref = np->grant_tx_ref[id] = ref; 467 while (len > 0) {
470 tx->offset = frag->page_offset; 468 unsigned long bytes;
471 tx->size = skb_frag_size(frag); 469
472 tx->flags = 0; 470 BUG_ON(offset >= PAGE_SIZE);
471
472 bytes = PAGE_SIZE - offset;
473 if (bytes > len)
474 bytes = len;
475
476 tx->flags |= XEN_NETTXF_more_data;
477
478 id = get_id_from_freelist(&np->tx_skb_freelist,
479 np->tx_skbs);
480 np->tx_skbs[id].skb = skb_get(skb);
481 tx = RING_GET_REQUEST(&np->tx, prod++);
482 tx->id = id;
483 ref = gnttab_claim_grant_reference(&np->gref_tx_head);
484 BUG_ON((signed short)ref < 0);
485
486 mfn = pfn_to_mfn(page_to_pfn(page));
487 gnttab_grant_foreign_access_ref(ref,
488 np->xbdev->otherend_id,
489 mfn, GNTMAP_readonly);
490
491 tx->gref = np->grant_tx_ref[id] = ref;
492 tx->offset = offset;
493 tx->size = bytes;
494 tx->flags = 0;
495
496 offset += bytes;
497 len -= bytes;
498
499 /* Next frame */
500 if (offset == PAGE_SIZE && len) {
501 BUG_ON(!PageCompound(page));
502 page++;
503 offset = 0;
504 }
505 }
473 } 506 }
474 507
475 np->tx.req_prod_pvt = prod; 508 np->tx.req_prod_pvt = prod;
476} 509}
477 510
511/*
512 * Count how many ring slots are required to send the frags of this
513 * skb. Each frag might be a compound page.
514 */
515static int xennet_count_skb_frag_slots(struct sk_buff *skb)
516{
517 int i, frags = skb_shinfo(skb)->nr_frags;
518 int pages = 0;
519
520 for (i = 0; i < frags; i++) {
521 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
522 unsigned long size = skb_frag_size(frag);
523 unsigned long offset = frag->page_offset;
524
525 /* Skip unused frames from start of page */
526 offset &= ~PAGE_MASK;
527
528 pages += PFN_UP(offset + size);
529 }
530
531 return pages;
532}
533
478static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) 534static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
479{ 535{
480 unsigned short id; 536 unsigned short id;
@@ -487,23 +543,23 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
487 grant_ref_t ref; 543 grant_ref_t ref;
488 unsigned long mfn; 544 unsigned long mfn;
489 int notify; 545 int notify;
490 int frags = skb_shinfo(skb)->nr_frags; 546 int slots;
491 unsigned int offset = offset_in_page(data); 547 unsigned int offset = offset_in_page(data);
492 unsigned int len = skb_headlen(skb); 548 unsigned int len = skb_headlen(skb);
493 unsigned long flags; 549 unsigned long flags;
494 550
495 frags += DIV_ROUND_UP(offset + len, PAGE_SIZE); 551 slots = DIV_ROUND_UP(offset + len, PAGE_SIZE) +
496 if (unlikely(frags > MAX_SKB_FRAGS + 1)) { 552 xennet_count_skb_frag_slots(skb);
497 printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n", 553 if (unlikely(slots > MAX_SKB_FRAGS + 1)) {
498 frags); 554 net_alert_ratelimited(
499 dump_stack(); 555 "xennet: skb rides the rocket: %d slots\n", slots);
500 goto drop; 556 goto drop;
501 } 557 }
502 558
503 spin_lock_irqsave(&np->tx_lock, flags); 559 spin_lock_irqsave(&np->tx_lock, flags);
504 560
505 if (unlikely(!netif_carrier_ok(dev) || 561 if (unlikely(!netif_carrier_ok(dev) ||
506 (frags > 1 && !xennet_can_sg(dev)) || 562 (slots > 1 && !xennet_can_sg(dev)) ||
507 netif_needs_gso(skb, netif_skb_features(skb)))) { 563 netif_needs_gso(skb, netif_skb_features(skb)))) {
508 spin_unlock_irqrestore(&np->tx_lock, flags); 564 spin_unlock_irqrestore(&np->tx_lock, flags);
509 goto drop; 565 goto drop;