aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/atm/solos-pci.c29
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c2
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c352
-rw-r--r--drivers/misc/iwmc3200top/main.c30
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/arm/ks8695net.c35
-rw-r--r--drivers/net/arm/w90p910_ether.c4
-rw-r--r--drivers/net/atl1c/atl1c_main.c2
-rw-r--r--drivers/net/bnx2x.h21
-rw-r--r--drivers/net/bnx2x_main.c370
-rw-r--r--drivers/net/bonding/bond_3ad.c85
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/can/Kconfig62
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/dev.c6
-rw-r--r--drivers/net/can/mcp251x.c18
-rw-r--r--drivers/net/can/mscan/Kconfig23
-rw-r--r--drivers/net/can/mscan/Makefile5
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c259
-rw-r--r--drivers/net/can/mscan/mscan.c668
-rw-r--r--drivers/net/can/mscan/mscan.h296
-rw-r--r--drivers/net/can/sja1000/Kconfig47
-rw-r--r--drivers/net/can/sja1000/sja1000.c2
-rw-r--r--drivers/net/can/usb/Kconfig10
-rw-r--r--drivers/net/can/usb/Makefile2
-rw-r--r--drivers/net/davinci_emac.c3
-rw-r--r--drivers/net/dm9000.c143
-rw-r--r--drivers/net/dm9000.h7
-rw-r--r--drivers/net/ethoc.c2
-rw-r--r--drivers/net/forcedeth.c5
-rw-r--r--drivers/net/gianfar.c43
-rw-r--r--drivers/net/gianfar_sysfs.c2
-rw-r--r--drivers/net/hamradio/mkiss.c2
-rw-r--r--drivers/net/igb/igb.h13
-rw-r--r--drivers/net/igb/igb_ethtool.c181
-rw-r--r--drivers/net/igb/igb_main.c213
-rw-r--r--drivers/net/ipg.c2
-rw-r--r--drivers/net/irda/irda-usb.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c8
-rw-r--r--drivers/net/macvlan.c78
-rw-r--r--drivers/net/niu.c2
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c2
-rw-r--r--drivers/net/pcmcia/nmclan_cs.c2
-rw-r--r--drivers/net/ppp_async.c2
-rw-r--r--drivers/net/ppp_generic.c13
-rw-r--r--drivers/net/pppoe.c2
-rw-r--r--drivers/net/pppol2tp.c4
-rw-r--r--drivers/net/qlge/qlge.h2
-rw-r--r--drivers/net/qlge/qlge_main.c26
-rw-r--r--drivers/net/r6040.c4
-rw-r--r--drivers/net/r8169.c4
-rw-r--r--drivers/net/s2io.c1
-rw-r--r--drivers/net/smsc911x.c2
-rw-r--r--drivers/net/smsc9420.c2
-rw-r--r--drivers/net/tg3.c706
-rw-r--r--drivers/net/tg3.h34
-rw-r--r--drivers/net/tokenring/3c359.c3
-rw-r--r--drivers/net/tokenring/olympic.c4
-rw-r--r--drivers/net/typhoon.c2
-rw-r--r--drivers/net/via-rhine.c2
-rw-r--r--drivers/net/via-velocity.c2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_defs.h246
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c359
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c10
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h12
-rw-r--r--drivers/net/wan/dscc4.c2
-rw-r--r--drivers/net/wireless/adm8211.c2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c6
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c141
-rw-r--r--drivers/net/wireless/ipw2x00/libipw.h8
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_module.c42
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-rs.c2
-rw-r--r--drivers/net/wireless/p54/p54pci.c2
-rw-r--r--drivers/net/wireless/p54/p54usb.c10
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180_dev.c2
-rw-r--r--drivers/s390/net/Makefile6
-rw-r--r--drivers/s390/net/claw.c82
-rw-r--r--drivers/s390/net/claw.h12
-rw-r--r--drivers/s390/net/ctcm_fsms.c1
-rw-r--r--drivers/s390/net/ctcm_fsms.h1
-rw-r--r--drivers/s390/net/ctcm_main.c109
-rw-r--r--drivers/s390/net/ctcm_main.h20
-rw-r--r--drivers/s390/net/ctcm_mpc.c1
-rw-r--r--drivers/s390/net/ctcm_sysfs.c11
-rw-r--r--drivers/s390/net/cu3088.c148
-rw-r--r--drivers/s390/net/cu3088.h41
-rw-r--r--drivers/s390/net/fsm.c1
-rw-r--r--drivers/s390/net/fsm.h2
-rw-r--r--drivers/s390/net/lcs.c101
-rw-r--r--drivers/s390/net/lcs.h18
-rw-r--r--drivers/s390/net/netiucv.c4
-rw-r--r--drivers/s390/net/qeth_core.h6
-rw-r--r--drivers/s390/net/qeth_core_main.c214
-rw-r--r--drivers/s390/net/qeth_core_mpc.h45
-rw-r--r--drivers/s390/net/qeth_core_sys.c83
-rw-r--r--drivers/s390/net/qeth_l2_main.c29
-rw-r--r--drivers/s390/net/qeth_l3.h2
-rw-r--r--drivers/s390/net/qeth_l3_main.c142
-rw-r--r--drivers/s390/net/qeth_l3_sys.c67
99 files changed, 3975 insertions, 1873 deletions
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index d7ad19d2603..51eed679a05 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -531,34 +531,37 @@ static int flash_upgrade(struct solos_card *card, int chip)
531 int numblocks = 0; 531 int numblocks = 0;
532 int offset; 532 int offset;
533 533
534 if (chip == 0) { 534 switch (chip) {
535 case 0:
535 fw_name = "solos-FPGA.bin"; 536 fw_name = "solos-FPGA.bin";
536 blocksize = FPGA_BLOCK; 537 blocksize = FPGA_BLOCK;
537 } 538 break;
538 539 case 1:
539 if (chip == 1) {
540 fw_name = "solos-Firmware.bin"; 540 fw_name = "solos-Firmware.bin";
541 blocksize = SOLOS_BLOCK; 541 blocksize = SOLOS_BLOCK;
542 } 542 break;
543 543 case 2:
544 if (chip == 2){
545 if (card->fpga_version > LEGACY_BUFFERS){ 544 if (card->fpga_version > LEGACY_BUFFERS){
546 fw_name = "solos-db-FPGA.bin"; 545 fw_name = "solos-db-FPGA.bin";
547 blocksize = FPGA_BLOCK; 546 blocksize = FPGA_BLOCK;
548 } else { 547 } else {
549 dev_info(&card->dev->dev, "FPGA version doesn't support daughter board upgrades\n"); 548 dev_info(&card->dev->dev, "FPGA version doesn't support"
549 " daughter board upgrades\n");
550 return -EPERM; 550 return -EPERM;
551 } 551 }
552 } 552 break;
553 553 case 3:
554 if (chip == 3){
555 if (card->fpga_version > LEGACY_BUFFERS){ 554 if (card->fpga_version > LEGACY_BUFFERS){
556 fw_name = "solos-Firmware.bin"; 555 fw_name = "solos-Firmware.bin";
557 blocksize = SOLOS_BLOCK; 556 blocksize = SOLOS_BLOCK;
558 } else { 557 } else {
559 dev_info(&card->dev->dev, "FPGA version doesn't support daughter board upgrades\n"); 558 dev_info(&card->dev->dev, "FPGA version doesn't support"
560 return -EPERM; 559 " daughter board upgrades\n");
560 return -EPERM;
561 } 561 }
562 break;
563 default:
564 return -ENODEV;
562 } 565 }
563 566
564 if (request_firmware(&fw, fw_name, &card->dev->dev)) 567 if (request_firmware(&fw, fw_name, &card->dev->dev))
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
index faed794cf75..a6624ad252c 100644
--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -5481,7 +5481,7 @@ HFCmulti_init(void)
5481 if (err) { 5481 if (err) {
5482 printk(KERN_ERR "error registering embedded driver: " 5482 printk(KERN_ERR "error registering embedded driver: "
5483 "%x\n", err); 5483 "%x\n", err);
5484 return -err; 5484 return err;
5485 } 5485 }
5486 HFC_cnt++; 5486 HFC_cnt++;
5487 printk(KERN_INFO "%d devices registered\n", HFC_cnt); 5487 printk(KERN_INFO "%d devices registered\n", HFC_cnt);
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index 2d14b64202a..642d5aaf53c 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -1535,10 +1535,8 @@ static int isdn_ppp_mp_bundle_array_init(void)
1535 int sz = ISDN_MAX_CHANNELS*sizeof(ippp_bundle); 1535 int sz = ISDN_MAX_CHANNELS*sizeof(ippp_bundle);
1536 if( (isdn_ppp_bundle_arr = kzalloc(sz, GFP_KERNEL)) == NULL ) 1536 if( (isdn_ppp_bundle_arr = kzalloc(sz, GFP_KERNEL)) == NULL )
1537 return -ENOMEM; 1537 return -ENOMEM;
1538 for (i = 0; i < ISDN_MAX_CHANNELS; i++) { 1538 for( i = 0; i < ISDN_MAX_CHANNELS; i++ )
1539 spin_lock_init(&isdn_ppp_bundle_arr[i].lock); 1539 spin_lock_init(&isdn_ppp_bundle_arr[i].lock);
1540 skb_queue_head_init(&isdn_ppp_bundle_arr[i].frags);
1541 }
1542 return 0; 1540 return 0;
1543} 1541}
1544 1542
@@ -1571,7 +1569,7 @@ static int isdn_ppp_mp_init( isdn_net_local * lp, ippp_bundle * add_to )
1571 if ((lp->netdev->pb = isdn_ppp_mp_bundle_alloc()) == NULL) 1569 if ((lp->netdev->pb = isdn_ppp_mp_bundle_alloc()) == NULL)
1572 return -ENOMEM; 1570 return -ENOMEM;
1573 lp->next = lp->last = lp; /* nobody else in a queue */ 1571 lp->next = lp->last = lp; /* nobody else in a queue */
1574 skb_queue_head_init(&lp->netdev->pb->frags); 1572 lp->netdev->pb->frags = NULL;
1575 lp->netdev->pb->frames = 0; 1573 lp->netdev->pb->frames = 0;
1576 lp->netdev->pb->seq = UINT_MAX; 1574 lp->netdev->pb->seq = UINT_MAX;
1577 } 1575 }
@@ -1583,29 +1581,28 @@ static int isdn_ppp_mp_init( isdn_net_local * lp, ippp_bundle * add_to )
1583 1581
1584static u32 isdn_ppp_mp_get_seq( int short_seq, 1582static u32 isdn_ppp_mp_get_seq( int short_seq,
1585 struct sk_buff * skb, u32 last_seq ); 1583 struct sk_buff * skb, u32 last_seq );
1586static void isdn_ppp_mp_discard(ippp_bundle *mp, struct sk_buff *from, 1584static struct sk_buff * isdn_ppp_mp_discard( ippp_bundle * mp,
1587 struct sk_buff *to); 1585 struct sk_buff * from, struct sk_buff * to );
1588static void isdn_ppp_mp_reassembly(isdn_net_dev *net_dev, isdn_net_local *lp, 1586static void isdn_ppp_mp_reassembly( isdn_net_dev * net_dev, isdn_net_local * lp,
1589 struct sk_buff *from, struct sk_buff *to, 1587 struct sk_buff * from, struct sk_buff * to );
1590 u32 lastseq); 1588static void isdn_ppp_mp_free_skb( ippp_bundle * mp, struct sk_buff * skb );
1591static void isdn_ppp_mp_free_skb(ippp_bundle *mp, struct sk_buff *skb);
1592static void isdn_ppp_mp_print_recv_pkt( int slot, struct sk_buff * skb ); 1589static void isdn_ppp_mp_print_recv_pkt( int slot, struct sk_buff * skb );
1593 1590
1594static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp, 1591static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp,
1595 struct sk_buff *skb) 1592 struct sk_buff *skb)
1596{ 1593{
1597 struct sk_buff *newfrag, *frag, *start, *nextf;
1598 u32 newseq, minseq, thisseq;
1599 isdn_mppp_stats *stats;
1600 struct ippp_struct *is; 1594 struct ippp_struct *is;
1595 isdn_net_local * lpq;
1596 ippp_bundle * mp;
1597 isdn_mppp_stats * stats;
1598 struct sk_buff * newfrag, * frag, * start, *nextf;
1599 u32 newseq, minseq, thisseq;
1601 unsigned long flags; 1600 unsigned long flags;
1602 isdn_net_local *lpq;
1603 ippp_bundle *mp;
1604 int slot; 1601 int slot;
1605 1602
1606 spin_lock_irqsave(&net_dev->pb->lock, flags); 1603 spin_lock_irqsave(&net_dev->pb->lock, flags);
1607 mp = net_dev->pb; 1604 mp = net_dev->pb;
1608 stats = &mp->stats; 1605 stats = &mp->stats;
1609 slot = lp->ppp_slot; 1606 slot = lp->ppp_slot;
1610 if (slot < 0 || slot >= ISDN_MAX_CHANNELS) { 1607 if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
1611 printk(KERN_ERR "%s: lp->ppp_slot(%d)\n", 1608 printk(KERN_ERR "%s: lp->ppp_slot(%d)\n",
@@ -1616,19 +1613,20 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp,
1616 return; 1613 return;
1617 } 1614 }
1618 is = ippp_table[slot]; 1615 is = ippp_table[slot];
1619 if (++mp->frames > stats->max_queue_len) 1616 if( ++mp->frames > stats->max_queue_len )
1620 stats->max_queue_len = mp->frames; 1617 stats->max_queue_len = mp->frames;
1621 1618
1622 if (is->debug & 0x8) 1619 if (is->debug & 0x8)
1623 isdn_ppp_mp_print_recv_pkt(lp->ppp_slot, skb); 1620 isdn_ppp_mp_print_recv_pkt(lp->ppp_slot, skb);
1624 1621
1625 newseq = isdn_ppp_mp_get_seq(is->mpppcfg & SC_IN_SHORT_SEQ, 1622 newseq = isdn_ppp_mp_get_seq(is->mpppcfg & SC_IN_SHORT_SEQ,
1626 skb, is->last_link_seqno); 1623 skb, is->last_link_seqno);
1624
1627 1625
1628 /* if this packet seq # is less than last already processed one, 1626 /* if this packet seq # is less than last already processed one,
1629 * toss it right away, but check for sequence start case first 1627 * toss it right away, but check for sequence start case first
1630 */ 1628 */
1631 if (mp->seq > MP_LONGSEQ_MAX && (newseq & MP_LONGSEQ_MAXBIT)) { 1629 if( mp->seq > MP_LONGSEQ_MAX && (newseq & MP_LONGSEQ_MAXBIT) ) {
1632 mp->seq = newseq; /* the first packet: required for 1630 mp->seq = newseq; /* the first packet: required for
1633 * rfc1990 non-compliant clients -- 1631 * rfc1990 non-compliant clients --
1634 * prevents constant packet toss */ 1632 * prevents constant packet toss */
@@ -1638,7 +1636,7 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp,
1638 spin_unlock_irqrestore(&mp->lock, flags); 1636 spin_unlock_irqrestore(&mp->lock, flags);
1639 return; 1637 return;
1640 } 1638 }
1641 1639
1642 /* find the minimum received sequence number over all links */ 1640 /* find the minimum received sequence number over all links */
1643 is->last_link_seqno = minseq = newseq; 1641 is->last_link_seqno = minseq = newseq;
1644 for (lpq = net_dev->queue;;) { 1642 for (lpq = net_dev->queue;;) {
@@ -1659,31 +1657,22 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp,
1659 * packets */ 1657 * packets */
1660 newfrag = skb; 1658 newfrag = skb;
1661 1659
1662 /* Insert new fragment into the proper sequence slot. */ 1660 /* if this new fragment is before the first one, then enqueue it now. */
1663 skb_queue_walk(&mp->frags, frag) { 1661 if ((frag = mp->frags) == NULL || MP_LT(newseq, MP_SEQ(frag))) {
1664 if (MP_SEQ(frag) == newseq) { 1662 newfrag->next = frag;
1665 isdn_ppp_mp_free_skb(mp, newfrag); 1663 mp->frags = frag = newfrag;
1666 newfrag = NULL; 1664 newfrag = NULL;
1667 break; 1665 }
1668 }
1669 if (MP_LT(newseq, MP_SEQ(frag))) {
1670 __skb_queue_before(&mp->frags, frag, newfrag);
1671 newfrag = NULL;
1672 break;
1673 }
1674 }
1675 if (newfrag)
1676 __skb_queue_tail(&mp->frags, newfrag);
1677 1666
1678 frag = skb_peek(&mp->frags); 1667 start = MP_FLAGS(frag) & MP_BEGIN_FRAG &&
1679 start = ((MP_FLAGS(frag) & MP_BEGIN_FRAG) && 1668 MP_SEQ(frag) == mp->seq ? frag : NULL;
1680 (MP_SEQ(frag) == mp->seq)) ? frag : NULL;
1681 if (!start)
1682 goto check_overflow;
1683 1669
1684 /* main fragment traversing loop 1670 /*
1671 * main fragment traversing loop
1685 * 1672 *
1686 * try to accomplish several tasks: 1673 * try to accomplish several tasks:
1674 * - insert new fragment into the proper sequence slot (once that's done
1675 * newfrag will be set to NULL)
1687 * - reassemble any complete fragment sequence (non-null 'start' 1676 * - reassemble any complete fragment sequence (non-null 'start'
1688 * indicates there is a continguous sequence present) 1677 * indicates there is a continguous sequence present)
1689 * - discard any incomplete sequences that are below minseq -- due 1678 * - discard any incomplete sequences that are below minseq -- due
@@ -1692,46 +1681,71 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp,
1692 * come to complete such sequence and it should be discarded 1681 * come to complete such sequence and it should be discarded
1693 * 1682 *
1694 * loop completes when we accomplished the following tasks: 1683 * loop completes when we accomplished the following tasks:
1684 * - new fragment is inserted in the proper sequence ('newfrag' is
1685 * set to NULL)
1695 * - we hit a gap in the sequence, so no reassembly/processing is 1686 * - we hit a gap in the sequence, so no reassembly/processing is
1696 * possible ('start' would be set to NULL) 1687 * possible ('start' would be set to NULL)
1697 * 1688 *
1698 * algorithm for this code is derived from code in the book 1689 * algorithm for this code is derived from code in the book
1699 * 'PPP Design And Debugging' by James Carlson (Addison-Wesley) 1690 * 'PPP Design And Debugging' by James Carlson (Addison-Wesley)
1700 */ 1691 */
1701 skb_queue_walk_safe(&mp->frags, frag, nextf) { 1692 while (start != NULL || newfrag != NULL) {
1702 thisseq = MP_SEQ(frag); 1693
1703 1694 thisseq = MP_SEQ(frag);
1704 /* check for misplaced start */ 1695 nextf = frag->next;
1705 if (start != frag && (MP_FLAGS(frag) & MP_BEGIN_FRAG)) { 1696
1706 printk(KERN_WARNING"isdn_mppp(seq %d): new " 1697 /* drop any duplicate fragments */
1707 "BEGIN flag with no prior END", thisseq); 1698 if (newfrag != NULL && thisseq == newseq) {
1708 stats->seqerrs++; 1699 isdn_ppp_mp_free_skb(mp, newfrag);
1709 stats->frame_drops++; 1700 newfrag = NULL;
1710 isdn_ppp_mp_discard(mp, start, frag); 1701 }
1711 start = frag; 1702
1712 } else if (MP_LE(thisseq, minseq)) { 1703 /* insert new fragment before next element if possible. */
1713 if (MP_FLAGS(frag) & MP_BEGIN_FRAG) 1704 if (newfrag != NULL && (nextf == NULL ||
1705 MP_LT(newseq, MP_SEQ(nextf)))) {
1706 newfrag->next = nextf;
1707 frag->next = nextf = newfrag;
1708 newfrag = NULL;
1709 }
1710
1711 if (start != NULL) {
1712 /* check for misplaced start */
1713 if (start != frag && (MP_FLAGS(frag) & MP_BEGIN_FRAG)) {
1714 printk(KERN_WARNING"isdn_mppp(seq %d): new "
1715 "BEGIN flag with no prior END", thisseq);
1716 stats->seqerrs++;
1717 stats->frame_drops++;
1718 start = isdn_ppp_mp_discard(mp, start,frag);
1719 nextf = frag->next;
1720 }
1721 } else if (MP_LE(thisseq, minseq)) {
1722 if (MP_FLAGS(frag) & MP_BEGIN_FRAG)
1714 start = frag; 1723 start = frag;
1715 else { 1724 else {
1716 if (MP_FLAGS(frag) & MP_END_FRAG) 1725 if (MP_FLAGS(frag) & MP_END_FRAG)
1717 stats->frame_drops++; 1726 stats->frame_drops++;
1718 __skb_unlink(skb, &mp->frags); 1727 if( mp->frags == frag )
1728 mp->frags = nextf;
1719 isdn_ppp_mp_free_skb(mp, frag); 1729 isdn_ppp_mp_free_skb(mp, frag);
1730 frag = nextf;
1720 continue; 1731 continue;
1721 } 1732 }
1722 } 1733 }
1723 1734
1724 /* if we have end fragment, then we have full reassembly 1735 /* if start is non-null and we have end fragment, then
1725 * sequence -- reassemble and process packet now 1736 * we have full reassembly sequence -- reassemble
1737 * and process packet now
1726 */ 1738 */
1727 if (MP_FLAGS(frag) & MP_END_FRAG) { 1739 if (start != NULL && (MP_FLAGS(frag) & MP_END_FRAG)) {
1728 minseq = mp->seq = (thisseq+1) & MP_LONGSEQ_MASK; 1740 minseq = mp->seq = (thisseq+1) & MP_LONGSEQ_MASK;
1729 /* Reassemble the packet then dispatch it */ 1741 /* Reassemble the packet then dispatch it */
1730 isdn_ppp_mp_reassembly(net_dev, lp, start, frag, thisseq); 1742 isdn_ppp_mp_reassembly(net_dev, lp, start, nextf);
1743
1744 start = NULL;
1745 frag = NULL;
1731 1746
1732 start = NULL; 1747 mp->frags = nextf;
1733 frag = NULL; 1748 }
1734 }
1735 1749
1736 /* check if need to update start pointer: if we just 1750 /* check if need to update start pointer: if we just
1737 * reassembled the packet and sequence is contiguous 1751 * reassembled the packet and sequence is contiguous
@@ -1742,25 +1756,26 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp,
1742 * below low watermark and set start to the next frag or 1756 * below low watermark and set start to the next frag or
1743 * clear start ptr. 1757 * clear start ptr.
1744 */ 1758 */
1745 if (nextf != (struct sk_buff *)&mp->frags && 1759 if (nextf != NULL &&
1746 ((thisseq+1) & MP_LONGSEQ_MASK) == MP_SEQ(nextf)) { 1760 ((thisseq+1) & MP_LONGSEQ_MASK) == MP_SEQ(nextf)) {
1747 /* if we just reassembled and the next one is here, 1761 /* if we just reassembled and the next one is here,
1748 * then start another reassembly. 1762 * then start another reassembly. */
1749 */ 1763
1750 if (frag == NULL) { 1764 if (frag == NULL) {
1751 if (MP_FLAGS(nextf) & MP_BEGIN_FRAG) 1765 if (MP_FLAGS(nextf) & MP_BEGIN_FRAG)
1752 start = nextf; 1766 start = nextf;
1753 else { 1767 else
1754 printk(KERN_WARNING"isdn_mppp(seq %d):" 1768 {
1755 " END flag with no following " 1769 printk(KERN_WARNING"isdn_mppp(seq %d):"
1756 "BEGIN", thisseq); 1770 " END flag with no following "
1771 "BEGIN", thisseq);
1757 stats->seqerrs++; 1772 stats->seqerrs++;
1758 } 1773 }
1759 } 1774 }
1760 } else { 1775
1761 if (nextf != (struct sk_buff *)&mp->frags && 1776 } else {
1762 frag != NULL && 1777 if ( nextf != NULL && frag != NULL &&
1763 MP_LT(thisseq, minseq)) { 1778 MP_LT(thisseq, minseq)) {
1764 /* we've got a break in the sequence 1779 /* we've got a break in the sequence
1765 * and we not at the end yet 1780 * and we not at the end yet
1766 * and we did not just reassembled 1781 * and we did not just reassembled
@@ -1769,39 +1784,41 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp,
1769 * discard all the frames below low watermark 1784 * discard all the frames below low watermark
1770 * and start over */ 1785 * and start over */
1771 stats->frame_drops++; 1786 stats->frame_drops++;
1772 isdn_ppp_mp_discard(mp, start, nextf); 1787 mp->frags = isdn_ppp_mp_discard(mp,start,nextf);
1773 } 1788 }
1774 /* break in the sequence, no reassembly */ 1789 /* break in the sequence, no reassembly */
1775 start = NULL; 1790 start = NULL;
1776 } 1791 }
1777 if (!start) 1792
1778 break; 1793 frag = nextf;
1779 } 1794 } /* while -- main loop */
1780 1795
1781check_overflow: 1796 if (mp->frags == NULL)
1797 mp->frags = frag;
1798
1782 /* rather straighforward way to deal with (not very) possible 1799 /* rather straighforward way to deal with (not very) possible
1783 * queue overflow 1800 * queue overflow */
1784 */
1785 if (mp->frames > MP_MAX_QUEUE_LEN) { 1801 if (mp->frames > MP_MAX_QUEUE_LEN) {
1786 stats->overflows++; 1802 stats->overflows++;
1787 skb_queue_walk_safe(&mp->frags, frag, nextf) { 1803 while (mp->frames > MP_MAX_QUEUE_LEN) {
1788 if (mp->frames <= MP_MAX_QUEUE_LEN) 1804 frag = mp->frags->next;
1789 break; 1805 isdn_ppp_mp_free_skb(mp, mp->frags);
1790 __skb_unlink(frag, &mp->frags); 1806 mp->frags = frag;
1791 isdn_ppp_mp_free_skb(mp, frag);
1792 } 1807 }
1793 } 1808 }
1794 spin_unlock_irqrestore(&mp->lock, flags); 1809 spin_unlock_irqrestore(&mp->lock, flags);
1795} 1810}
1796 1811
1797static void isdn_ppp_mp_cleanup(isdn_net_local *lp) 1812static void isdn_ppp_mp_cleanup( isdn_net_local * lp )
1798{ 1813{
1799 struct sk_buff *skb, *tmp; 1814 struct sk_buff * frag = lp->netdev->pb->frags;
1800 1815 struct sk_buff * nextfrag;
1801 skb_queue_walk_safe(&lp->netdev->pb->frags, skb, tmp) { 1816 while( frag ) {
1802 __skb_unlink(skb, &lp->netdev->pb->frags); 1817 nextfrag = frag->next;
1803 isdn_ppp_mp_free_skb(lp->netdev->pb, skb); 1818 isdn_ppp_mp_free_skb(lp->netdev->pb, frag);
1804 } 1819 frag = nextfrag;
1820 }
1821 lp->netdev->pb->frags = NULL;
1805} 1822}
1806 1823
1807static u32 isdn_ppp_mp_get_seq( int short_seq, 1824static u32 isdn_ppp_mp_get_seq( int short_seq,
@@ -1838,115 +1855,72 @@ static u32 isdn_ppp_mp_get_seq( int short_seq,
1838 return seq; 1855 return seq;
1839} 1856}
1840 1857
1841static void isdn_ppp_mp_discard(ippp_bundle *mp, struct sk_buff *from, 1858struct sk_buff * isdn_ppp_mp_discard( ippp_bundle * mp,
1842 struct sk_buff *to) 1859 struct sk_buff * from, struct sk_buff * to )
1843{ 1860{
1844 if (from) { 1861 if( from )
1845 struct sk_buff *skb, *tmp; 1862 while (from != to) {
1846 int freeing = 0; 1863 struct sk_buff * next = from->next;
1847 1864 isdn_ppp_mp_free_skb(mp, from);
1848 skb_queue_walk_safe(&mp->frags, skb, tmp) { 1865 from = next;
1849 if (skb == to)
1850 break;
1851 if (skb == from)
1852 freeing = 1;
1853 if (!freeing)
1854 continue;
1855 __skb_unlink(skb, &mp->frags);
1856 isdn_ppp_mp_free_skb(mp, skb);
1857 } 1866 }
1858 } 1867 return from;
1859}
1860
1861static unsigned int calc_tot_len(struct sk_buff_head *queue,
1862 struct sk_buff *from, struct sk_buff *to)
1863{
1864 unsigned int tot_len = 0;
1865 struct sk_buff *skb;
1866 int found_start = 0;
1867
1868 skb_queue_walk(queue, skb) {
1869 if (skb == from)
1870 found_start = 1;
1871 if (!found_start)
1872 continue;
1873 tot_len += skb->len - MP_HEADER_LEN;
1874 if (skb == to)
1875 break;
1876 }
1877 return tot_len;
1878} 1868}
1879 1869
1880/* Reassemble packet using fragments in the reassembly queue from 1870void isdn_ppp_mp_reassembly( isdn_net_dev * net_dev, isdn_net_local * lp,
1881 * 'from' until 'to', inclusive. 1871 struct sk_buff * from, struct sk_buff * to )
1882 */
1883static void isdn_ppp_mp_reassembly(isdn_net_dev *net_dev, isdn_net_local *lp,
1884 struct sk_buff *from, struct sk_buff *to,
1885 u32 lastseq)
1886{ 1872{
1887 ippp_bundle *mp = net_dev->pb; 1873 ippp_bundle * mp = net_dev->pb;
1888 unsigned int tot_len;
1889 struct sk_buff *skb;
1890 int proto; 1874 int proto;
1875 struct sk_buff * skb;
1876 unsigned int tot_len;
1891 1877
1892 if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) { 1878 if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) {
1893 printk(KERN_ERR "%s: lp->ppp_slot(%d) out of range\n", 1879 printk(KERN_ERR "%s: lp->ppp_slot(%d) out of range\n",
1894 __func__, lp->ppp_slot); 1880 __func__, lp->ppp_slot);
1895 return; 1881 return;
1896 } 1882 }
1897 1883 if( MP_FLAGS(from) == (MP_BEGIN_FRAG | MP_END_FRAG) ) {
1898 tot_len = calc_tot_len(&mp->frags, from, to); 1884 if( ippp_table[lp->ppp_slot]->debug & 0x40 )
1899
1900 if (MP_FLAGS(from) == (MP_BEGIN_FRAG | MP_END_FRAG)) {
1901 if (ippp_table[lp->ppp_slot]->debug & 0x40)
1902 printk(KERN_DEBUG "isdn_mppp: reassembly: frame %d, " 1885 printk(KERN_DEBUG "isdn_mppp: reassembly: frame %d, "
1903 "len %d\n", MP_SEQ(from), from->len); 1886 "len %d\n", MP_SEQ(from), from->len );
1904 skb = from; 1887 skb = from;
1905 skb_pull(skb, MP_HEADER_LEN); 1888 skb_pull(skb, MP_HEADER_LEN);
1906 __skb_unlink(skb, &mp->frags);
1907 mp->frames--; 1889 mp->frames--;
1908 } else { 1890 } else {
1909 struct sk_buff *walk, *tmp; 1891 struct sk_buff * frag;
1910 int found_start = 0; 1892 int n;
1911 1893
1912 if (ippp_table[lp->ppp_slot]->debug & 0x40) 1894 for(tot_len=n=0, frag=from; frag != to; frag=frag->next, n++)
1913 printk(KERN_DEBUG"isdn_mppp: reassembling frames %d " 1895 tot_len += frag->len - MP_HEADER_LEN;
1914 "to %d, len %d\n", MP_SEQ(from), lastseq,
1915 tot_len);
1916 1896
1917 skb = dev_alloc_skb(tot_len); 1897 if( ippp_table[lp->ppp_slot]->debug & 0x40 )
1918 if (!skb) 1898 printk(KERN_DEBUG"isdn_mppp: reassembling frames %d "
1899 "to %d, len %d\n", MP_SEQ(from),
1900 (MP_SEQ(from)+n-1) & MP_LONGSEQ_MASK, tot_len );
1901 if( (skb = dev_alloc_skb(tot_len)) == NULL ) {
1919 printk(KERN_ERR "isdn_mppp: cannot allocate sk buff " 1902 printk(KERN_ERR "isdn_mppp: cannot allocate sk buff "
1920 "of size %d\n", tot_len); 1903 "of size %d\n", tot_len);
1921 1904 isdn_ppp_mp_discard(mp, from, to);
1922 found_start = 0; 1905 return;
1923 skb_queue_walk_safe(&mp->frags, walk, tmp) { 1906 }
1924 if (walk == from)
1925 found_start = 1;
1926 if (!found_start)
1927 continue;
1928 1907
1929 if (skb) { 1908 while( from != to ) {
1930 unsigned int len = walk->len - MP_HEADER_LEN; 1909 unsigned int len = from->len - MP_HEADER_LEN;
1931 skb_copy_from_linear_data_offset(walk, MP_HEADER_LEN,
1932 skb_put(skb, len),
1933 len);
1934 }
1935 __skb_unlink(walk, &mp->frags);
1936 isdn_ppp_mp_free_skb(mp, walk);
1937 1910
1938 if (walk == to) 1911 skb_copy_from_linear_data_offset(from, MP_HEADER_LEN,
1939 break; 1912 skb_put(skb,len),
1913 len);
1914 frag = from->next;
1915 isdn_ppp_mp_free_skb(mp, from);
1916 from = frag;
1940 } 1917 }
1941 } 1918 }
1942 if (!skb)
1943 return;
1944
1945 proto = isdn_ppp_strip_proto(skb); 1919 proto = isdn_ppp_strip_proto(skb);
1946 isdn_ppp_push_higher(net_dev, lp, skb, proto); 1920 isdn_ppp_push_higher(net_dev, lp, skb, proto);
1947} 1921}
1948 1922
1949static void isdn_ppp_mp_free_skb(ippp_bundle *mp, struct sk_buff *skb) 1923static void isdn_ppp_mp_free_skb(ippp_bundle * mp, struct sk_buff * skb)
1950{ 1924{
1951 dev_kfree_skb(skb); 1925 dev_kfree_skb(skb);
1952 mp->frames--; 1926 mp->frames--;
diff --git a/drivers/misc/iwmc3200top/main.c b/drivers/misc/iwmc3200top/main.c
index 6e4e49113ab..02b3dadc8ab 100644
--- a/drivers/misc/iwmc3200top/main.c
+++ b/drivers/misc/iwmc3200top/main.c
@@ -41,36 +41,13 @@
41#define DRIVER_DESCRIPTION "Intel(R) IWMC 3200 Top Driver" 41#define DRIVER_DESCRIPTION "Intel(R) IWMC 3200 Top Driver"
42#define DRIVER_COPYRIGHT "Copyright (c) 2008 Intel Corporation." 42#define DRIVER_COPYRIGHT "Copyright (c) 2008 Intel Corporation."
43 43
44#define IWMCT_VERSION "0.1.62" 44#define DRIVER_VERSION "0.1.62"
45
46#ifdef REPOSITORY_LABEL
47#define RL REPOSITORY_LABEL
48#else
49#define RL local
50#endif
51
52#ifdef CONFIG_IWMC3200TOP_DEBUG
53#define VD "-d"
54#else
55#define VD
56#endif
57
58#define DRIVER_VERSION IWMCT_VERSION "-" __stringify(RL) VD
59 45
60MODULE_DESCRIPTION(DRIVER_DESCRIPTION); 46MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
61MODULE_VERSION(DRIVER_VERSION); 47MODULE_VERSION(DRIVER_VERSION);
62MODULE_LICENSE("GPL"); 48MODULE_LICENSE("GPL");
63MODULE_AUTHOR(DRIVER_COPYRIGHT); 49MODULE_AUTHOR(DRIVER_COPYRIGHT);
64 50
65
66/* FIXME: These can be found in sdio_ids.h in newer kernels */
67#ifndef SDIO_INTEL_VENDOR_ID
68#define SDIO_INTEL_VENDOR_ID 0x0089
69#endif
70#ifndef SDIO_DEVICE_ID_INTEL_IWMC3200TOP
71#define SDIO_DEVICE_ID_INTEL_IWMC3200TOP 0x1404
72#endif
73
74/* 51/*
75 * This workers main task is to wait for OP_OPR_ALIVE 52 * This workers main task is to wait for OP_OPR_ALIVE
76 * from TOP FW until ALIVE_MSG_TIMOUT timeout is elapsed. 53 * from TOP FW until ALIVE_MSG_TIMOUT timeout is elapsed.
@@ -662,8 +639,9 @@ static void iwmct_remove(struct sdio_func *func)
662 639
663 640
664static const struct sdio_device_id iwmct_ids[] = { 641static const struct sdio_device_id iwmct_ids[] = {
665 { SDIO_DEVICE(SDIO_INTEL_VENDOR_ID, SDIO_DEVICE_ID_INTEL_IWMC3200TOP)}, 642 /* Intel Wireless MultiCom 3200 Top Driver */
666 { /* end: all zeroes */ }, 643 { SDIO_DEVICE(SDIO_VENDOR_ID_INTEL, 0x1404)},
644 { }, /* Terminating entry */
667}; 645};
668 646
669MODULE_DEVICE_TABLE(sdio, iwmct_ids); 647MODULE_DEVICE_TABLE(sdio, iwmct_ids);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index e012c2e0825..6399abbdad6 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -3235,7 +3235,7 @@ config VIRTIO_NET
3235 3235
3236config VMXNET3 3236config VMXNET3
3237 tristate "VMware VMXNET3 ethernet driver" 3237 tristate "VMware VMXNET3 ethernet driver"
3238 depends on PCI && X86 && INET 3238 depends on PCI && INET
3239 help 3239 help
3240 This driver supports VMware's vmxnet3 virtual ethernet NIC. 3240 This driver supports VMware's vmxnet3 virtual ethernet NIC.
3241 To compile this driver as a module, choose M here: the 3241 To compile this driver as a module, choose M here: the
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c
index 0073d198715..be256b34cea 100644
--- a/drivers/net/arm/ks8695net.c
+++ b/drivers/net/arm/ks8695net.c
@@ -433,24 +433,16 @@ ks8695_rx_irq(int irq, void *dev_id)
433{ 433{
434 struct net_device *ndev = (struct net_device *)dev_id; 434 struct net_device *ndev = (struct net_device *)dev_id;
435 struct ks8695_priv *ksp = netdev_priv(ndev); 435 struct ks8695_priv *ksp = netdev_priv(ndev);
436 unsigned long status;
437
438 unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp);
439 436
440 spin_lock(&ksp->rx_lock); 437 spin_lock(&ksp->rx_lock);
441 438
442 status = readl(KS8695_IRQ_VA + KS8695_INTST); 439 if (napi_schedule_prep(&ksp->napi)) {
443 440 unsigned long status = readl(KS8695_IRQ_VA + KS8695_INTEN);
444 /*clean rx status bit*/ 441 unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp);
445 writel(status | mask_bit , KS8695_IRQ_VA + KS8695_INTST); 442 /*disable rx interrupt*/
446 443 status &= ~mask_bit;
447 if (status & mask_bit) { 444 writel(status , KS8695_IRQ_VA + KS8695_INTEN);
448 if (napi_schedule_prep(&ksp->napi)) { 445 __napi_schedule(&ksp->napi);
449 /*disable rx interrupt*/
450 status &= ~mask_bit;
451 writel(status , KS8695_IRQ_VA + KS8695_INTEN);
452 __napi_schedule(&ksp->napi);
453 }
454 } 446 }
455 447
456 spin_unlock(&ksp->rx_lock); 448 spin_unlock(&ksp->rx_lock);
@@ -552,14 +544,13 @@ rx_finished:
552 ksp->next_rx_desc_read = 544 ksp->next_rx_desc_read =
553 (last_rx_processed + 1) & 545 (last_rx_processed + 1) &
554 MAX_RX_DESC_MASK; 546 MAX_RX_DESC_MASK;
555
556 /* And refill the buffers */
557 ks8695_refill_rxbuffers(ksp);
558
559 /* Kick the RX DMA engine, in case it became
560 * suspended */
561 ks8695_writereg(ksp, KS8695_DRSC, 0);
562 } 547 }
548 /* And refill the buffers */
549 ks8695_refill_rxbuffers(ksp);
550
551 /* Kick the RX DMA engine, in case it became
552 * suspended */
553 ks8695_writereg(ksp, KS8695_DRSC, 0);
563 return received; 554 return received;
564} 555}
565 556
diff --git a/drivers/net/arm/w90p910_ether.c b/drivers/net/arm/w90p910_ether.c
index 25e2627eb11..b7f3866d546 100644
--- a/drivers/net/arm/w90p910_ether.c
+++ b/drivers/net/arm/w90p910_ether.c
@@ -160,8 +160,8 @@ struct w90p910_ether {
160 struct mii_if_info mii; 160 struct mii_if_info mii;
161 struct timer_list check_timer; 161 struct timer_list check_timer;
162 void __iomem *reg; 162 void __iomem *reg;
163 unsigned int rxirq; 163 int rxirq;
164 unsigned int txirq; 164 int txirq;
165 unsigned int cur_tx; 165 unsigned int cur_tx;
166 unsigned int cur_rx; 166 unsigned int cur_rx;
167 unsigned int finish_tx; 167 unsigned int finish_tx;
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index 5ef9e23435f..1e2f57d4c36 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -2135,7 +2135,7 @@ static int atl1c_request_irq(struct atl1c_adapter *adapter)
2135 2135
2136 if (!adapter->have_msi) 2136 if (!adapter->have_msi)
2137 flags |= IRQF_SHARED; 2137 flags |= IRQF_SHARED;
2138 err = request_irq(adapter->pdev->irq, &atl1c_intr, flags, 2138 err = request_irq(adapter->pdev->irq, atl1c_intr, flags,
2139 netdev->name, netdev); 2139 netdev->name, netdev);
2140 if (err) { 2140 if (err) {
2141 if (netif_msg_ifup(adapter)) 2141 if (netif_msg_ifup(adapter))
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h
index 928942b74ce..602ab86b639 100644
--- a/drivers/net/bnx2x.h
+++ b/drivers/net/bnx2x.h
@@ -259,9 +259,6 @@ struct bnx2x_eth_q_stats {
259struct bnx2x_fastpath { 259struct bnx2x_fastpath {
260 260
261 struct napi_struct napi; 261 struct napi_struct napi;
262
263 u8 is_rx_queue;
264
265 struct host_status_block *status_blk; 262 struct host_status_block *status_blk;
266 dma_addr_t status_blk_mapping; 263 dma_addr_t status_blk_mapping;
267 264
@@ -970,8 +967,7 @@ struct bnx2x {
970#define BNX2X_STATE_ERROR 0xf000 967#define BNX2X_STATE_ERROR 0xf000
971 968
972 int multi_mode; 969 int multi_mode;
973 int num_rx_queues; 970 int num_queues;
974 int num_tx_queues;
975 971
976 u32 rx_mode; 972 u32 rx_mode;
977#define BNX2X_RX_MODE_NONE 0 973#define BNX2X_RX_MODE_NONE 0
@@ -1074,20 +1070,15 @@ struct bnx2x {
1074}; 1070};
1075 1071
1076 1072
1077#define BNX2X_MAX_QUEUES(bp) (IS_E1HMF(bp) ? (MAX_CONTEXT/(2 * E1HVN_MAX)) \ 1073#define BNX2X_MAX_QUEUES(bp) (IS_E1HMF(bp) ? (MAX_CONTEXT/E1HVN_MAX) \
1078 : (MAX_CONTEXT/2)) 1074 : MAX_CONTEXT)
1079#define BNX2X_NUM_QUEUES(bp) (bp->num_rx_queues + bp->num_tx_queues) 1075#define BNX2X_NUM_QUEUES(bp) (bp->num_queues)
1080#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 2) 1076#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1)
1081 1077
1082#define for_each_rx_queue(bp, var) \
1083 for (var = 0; var < bp->num_rx_queues; var++)
1084#define for_each_tx_queue(bp, var) \
1085 for (var = bp->num_rx_queues; \
1086 var < BNX2X_NUM_QUEUES(bp); var++)
1087#define for_each_queue(bp, var) \ 1078#define for_each_queue(bp, var) \
1088 for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) 1079 for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++)
1089#define for_each_nondefault_queue(bp, var) \ 1080#define for_each_nondefault_queue(bp, var) \
1090 for (var = 1; var < bp->num_rx_queues; var++) 1081 for (var = 1; var < BNX2X_NUM_QUEUES(bp); var++)
1091 1082
1092 1083
1093void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32); 1084void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index e2cf686d111..77ba13520d8 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -57,7 +57,7 @@
57#include "bnx2x_init_ops.h" 57#include "bnx2x_init_ops.h"
58#include "bnx2x_dump.h" 58#include "bnx2x_dump.h"
59 59
60#define DRV_MODULE_VERSION "1.52.1-4" 60#define DRV_MODULE_VERSION "1.52.1-5"
61#define DRV_MODULE_RELDATE "2009/11/09" 61#define DRV_MODULE_RELDATE "2009/11/09"
62#define BNX2X_BC_VER 0x040200 62#define BNX2X_BC_VER 0x040200
63 63
@@ -91,15 +91,10 @@ module_param(multi_mode, int, 0);
91MODULE_PARM_DESC(multi_mode, " Multi queue mode " 91MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92 "(0 Disable; 1 Enable (default))"); 92 "(0 Disable; 1 Enable (default))");
93 93
94static int num_rx_queues; 94static int num_queues;
95module_param(num_rx_queues, int, 0); 95module_param(num_queues, int, 0);
96MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1" 96MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97 " (default is half number of CPUs)"); 97 " (default is as a number of CPUs)");
98
99static int num_tx_queues;
100module_param(num_tx_queues, int, 0);
101MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
102 " (default is half number of CPUs)");
103 98
104static int disable_tpa; 99static int disable_tpa;
105module_param(disable_tpa, int, 0); 100module_param(disable_tpa, int, 0);
@@ -558,7 +553,7 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
558 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx); 553 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
559 554
560 /* Rx */ 555 /* Rx */
561 for_each_rx_queue(bp, i) { 556 for_each_queue(bp, i) {
562 struct bnx2x_fastpath *fp = &bp->fp[i]; 557 struct bnx2x_fastpath *fp = &bp->fp[i];
563 558
564 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)" 559 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
@@ -575,7 +570,7 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
575 } 570 }
576 571
577 /* Tx */ 572 /* Tx */
578 for_each_tx_queue(bp, i) { 573 for_each_queue(bp, i) {
579 struct bnx2x_fastpath *fp = &bp->fp[i]; 574 struct bnx2x_fastpath *fp = &bp->fp[i];
580 575
581 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)" 576 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
@@ -590,7 +585,7 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
590 585
591 /* Rings */ 586 /* Rings */
592 /* Rx */ 587 /* Rx */
593 for_each_rx_queue(bp, i) { 588 for_each_queue(bp, i) {
594 struct bnx2x_fastpath *fp = &bp->fp[i]; 589 struct bnx2x_fastpath *fp = &bp->fp[i];
595 590
596 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10); 591 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
@@ -624,7 +619,7 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
624 } 619 }
625 620
626 /* Tx */ 621 /* Tx */
627 for_each_tx_queue(bp, i) { 622 for_each_queue(bp, i) {
628 struct bnx2x_fastpath *fp = &bp->fp[i]; 623 struct bnx2x_fastpath *fp = &bp->fp[i];
629 624
630 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10); 625 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
@@ -792,21 +787,13 @@ static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
792 barrier(); 787 barrier();
793} 788}
794 789
795static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) 790static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
796{ 791{
797 struct host_status_block *fpsb = fp->status_blk; 792 struct host_status_block *fpsb = fp->status_blk;
798 u16 rc = 0;
799 793
800 barrier(); /* status block is written to by the chip */ 794 barrier(); /* status block is written to by the chip */
801 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) { 795 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
802 fp->fp_c_idx = fpsb->c_status_block.status_block_index; 796 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
803 rc |= 1;
804 }
805 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
806 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
807 rc |= 2;
808 }
809 return rc;
810} 797}
811 798
812static u16 bnx2x_ack_int(struct bnx2x *bp) 799static u16 bnx2x_ack_int(struct bnx2x *bp)
@@ -846,6 +833,9 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
846 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; 833 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
847 int nbd; 834 int nbd;
848 835
836 /* prefetch skb end pointer to speedup dev_kfree_skb() */
837 prefetch(&skb->end);
838
849 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n", 839 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
850 idx, tx_buf, skb); 840 idx, tx_buf, skb);
851 841
@@ -890,7 +880,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
890 880
891 /* release skb */ 881 /* release skb */
892 WARN_ON(!skb); 882 WARN_ON(!skb);
893 dev_kfree_skb_any(skb); 883 dev_kfree_skb(skb);
894 tx_buf->first_bd = 0; 884 tx_buf->first_bd = 0;
895 tx_buf->skb = NULL; 885 tx_buf->skb = NULL;
896 886
@@ -920,19 +910,28 @@ static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
920 return (s16)(fp->bp->tx_ring_size) - used; 910 return (s16)(fp->bp->tx_ring_size) - used;
921} 911}
922 912
923static void bnx2x_tx_int(struct bnx2x_fastpath *fp) 913static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
914{
915 u16 hw_cons;
916
917 /* Tell compiler that status block fields can change */
918 barrier();
919 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
920 return hw_cons != fp->tx_pkt_cons;
921}
922
923static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
924{ 924{
925 struct bnx2x *bp = fp->bp; 925 struct bnx2x *bp = fp->bp;
926 struct netdev_queue *txq; 926 struct netdev_queue *txq;
927 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons; 927 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
928 int done = 0;
929 928
930#ifdef BNX2X_STOP_ON_ERROR 929#ifdef BNX2X_STOP_ON_ERROR
931 if (unlikely(bp->panic)) 930 if (unlikely(bp->panic))
932 return; 931 return -1;
933#endif 932#endif
934 933
935 txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues); 934 txq = netdev_get_tx_queue(bp->dev, fp->index);
936 hw_cons = le16_to_cpu(*fp->tx_cons_sb); 935 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
937 sw_cons = fp->tx_pkt_cons; 936 sw_cons = fp->tx_pkt_cons;
938 937
@@ -953,7 +952,6 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
953*/ 952*/
954 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons); 953 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
955 sw_cons++; 954 sw_cons++;
956 done++;
957 } 955 }
958 956
959 fp->tx_pkt_cons = sw_cons; 957 fp->tx_pkt_cons = sw_cons;
@@ -975,6 +973,7 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
975 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)) 973 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
976 netif_tx_wake_queue(txq); 974 netif_tx_wake_queue(txq);
977 } 975 }
976 return 0;
978} 977}
979 978
980#ifdef BCM_CNIC 979#ifdef BCM_CNIC
@@ -1561,6 +1560,8 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1561 } else { 1560 } else {
1562 rx_buf = &fp->rx_buf_ring[bd_cons]; 1561 rx_buf = &fp->rx_buf_ring[bd_cons];
1563 skb = rx_buf->skb; 1562 skb = rx_buf->skb;
1563 prefetch(skb);
1564 prefetch((u8 *)skb + 256);
1564 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len); 1565 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1565 pad = cqe->fast_path_cqe.placement_offset; 1566 pad = cqe->fast_path_cqe.placement_offset;
1566 1567
@@ -1742,27 +1743,13 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1742 if (unlikely(bp->panic)) 1743 if (unlikely(bp->panic))
1743 return IRQ_HANDLED; 1744 return IRQ_HANDLED;
1744#endif 1745#endif
1745 /* Handle Rx or Tx according to MSI-X vector */
1746 if (fp->is_rx_queue) {
1747 prefetch(fp->rx_cons_sb);
1748 prefetch(&fp->status_blk->u_status_block.status_block_index);
1749
1750 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1751
1752 } else {
1753 prefetch(fp->tx_cons_sb);
1754 prefetch(&fp->status_blk->c_status_block.status_block_index);
1755
1756 bnx2x_update_fpsb_idx(fp);
1757 rmb();
1758 bnx2x_tx_int(fp);
1759 1746
1760 /* Re-enable interrupts */ 1747 /* Handle Rx and Tx according to MSI-X vector */
1761 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 1748 prefetch(fp->rx_cons_sb);
1762 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1); 1749 prefetch(fp->tx_cons_sb);
1763 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 1750 prefetch(&fp->status_blk->u_status_block.status_block_index);
1764 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1); 1751 prefetch(&fp->status_blk->c_status_block.status_block_index);
1765 } 1752 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1766 1753
1767 return IRQ_HANDLED; 1754 return IRQ_HANDLED;
1768} 1755}
@@ -1797,31 +1784,14 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1797 1784
1798 mask = 0x2 << fp->sb_id; 1785 mask = 0x2 << fp->sb_id;
1799 if (status & mask) { 1786 if (status & mask) {
1800 /* Handle Rx or Tx according to SB id */ 1787 /* Handle Rx and Tx according to SB id */
1801 if (fp->is_rx_queue) { 1788 prefetch(fp->rx_cons_sb);
1802 prefetch(fp->rx_cons_sb); 1789 prefetch(&fp->status_blk->u_status_block.
1803 prefetch(&fp->status_blk->u_status_block. 1790 status_block_index);
1804 status_block_index); 1791 prefetch(fp->tx_cons_sb);
1805 1792 prefetch(&fp->status_blk->c_status_block.
1806 napi_schedule(&bnx2x_fp(bp, fp->index, napi)); 1793 status_block_index);
1807 1794 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1808 } else {
1809 prefetch(fp->tx_cons_sb);
1810 prefetch(&fp->status_blk->c_status_block.
1811 status_block_index);
1812
1813 bnx2x_update_fpsb_idx(fp);
1814 rmb();
1815 bnx2x_tx_int(fp);
1816
1817 /* Re-enable interrupts */
1818 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1819 le16_to_cpu(fp->fp_u_idx),
1820 IGU_INT_NOP, 1);
1821 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1822 le16_to_cpu(fp->fp_c_idx),
1823 IGU_INT_ENABLE, 1);
1824 }
1825 status &= ~mask; 1795 status &= ~mask;
1826 } 1796 }
1827 } 1797 }
@@ -2587,7 +2557,6 @@ static void bnx2x_e1h_disable(struct bnx2x *bp)
2587 int port = BP_PORT(bp); 2557 int port = BP_PORT(bp);
2588 2558
2589 netif_tx_disable(bp->dev); 2559 netif_tx_disable(bp->dev);
2590 bp->dev->trans_start = jiffies; /* prevent tx timeout */
2591 2560
2592 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); 2561 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2593 2562
@@ -4027,7 +3996,7 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
4027 estats->no_buff_discard_hi = 0; 3996 estats->no_buff_discard_hi = 0;
4028 estats->no_buff_discard_lo = 0; 3997 estats->no_buff_discard_lo = 0;
4029 3998
4030 for_each_rx_queue(bp, i) { 3999 for_each_queue(bp, i) {
4031 struct bnx2x_fastpath *fp = &bp->fp[i]; 4000 struct bnx2x_fastpath *fp = &bp->fp[i];
4032 int cl_id = fp->cl_id; 4001 int cl_id = fp->cl_id;
4033 struct tstorm_per_client_stats *tclient = 4002 struct tstorm_per_client_stats *tclient =
@@ -4244,7 +4213,7 @@ static void bnx2x_net_stats_update(struct bnx2x *bp)
4244 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi); 4213 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4245 4214
4246 nstats->rx_dropped = estats->mac_discard; 4215 nstats->rx_dropped = estats->mac_discard;
4247 for_each_rx_queue(bp, i) 4216 for_each_queue(bp, i)
4248 nstats->rx_dropped += 4217 nstats->rx_dropped +=
4249 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard); 4218 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4250 4219
@@ -4298,7 +4267,7 @@ static void bnx2x_drv_stats_update(struct bnx2x *bp)
4298 estats->rx_err_discard_pkt = 0; 4267 estats->rx_err_discard_pkt = 0;
4299 estats->rx_skb_alloc_failed = 0; 4268 estats->rx_skb_alloc_failed = 0;
4300 estats->hw_csum_err = 0; 4269 estats->hw_csum_err = 0;
4301 for_each_rx_queue(bp, i) { 4270 for_each_queue(bp, i) {
4302 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats; 4271 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4303 4272
4304 estats->driver_xoff += qstats->driver_xoff; 4273 estats->driver_xoff += qstats->driver_xoff;
@@ -4329,7 +4298,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
4329 4298
4330 if (bp->msglevel & NETIF_MSG_TIMER) { 4299 if (bp->msglevel & NETIF_MSG_TIMER) {
4331 struct bnx2x_fastpath *fp0_rx = bp->fp; 4300 struct bnx2x_fastpath *fp0_rx = bp->fp;
4332 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]); 4301 struct bnx2x_fastpath *fp0_tx = bp->fp;
4333 struct tstorm_per_client_stats *old_tclient = 4302 struct tstorm_per_client_stats *old_tclient =
4334 &bp->fp->old_tclient; 4303 &bp->fp->old_tclient;
4335 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats; 4304 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
@@ -4984,7 +4953,7 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
4984 4953
4985 if (bp->flags & TPA_ENABLE_FLAG) { 4954 if (bp->flags & TPA_ENABLE_FLAG) {
4986 4955
4987 for_each_rx_queue(bp, j) { 4956 for_each_queue(bp, j) {
4988 struct bnx2x_fastpath *fp = &bp->fp[j]; 4957 struct bnx2x_fastpath *fp = &bp->fp[j];
4989 4958
4990 for (i = 0; i < max_agg_queues; i++) { 4959 for (i = 0; i < max_agg_queues; i++) {
@@ -5007,16 +4976,13 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
5007 } 4976 }
5008 } 4977 }
5009 4978
5010 for_each_rx_queue(bp, j) { 4979 for_each_queue(bp, j) {
5011 struct bnx2x_fastpath *fp = &bp->fp[j]; 4980 struct bnx2x_fastpath *fp = &bp->fp[j];
5012 4981
5013 fp->rx_bd_cons = 0; 4982 fp->rx_bd_cons = 0;
5014 fp->rx_cons_sb = BNX2X_RX_SB_INDEX; 4983 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
5015 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX; 4984 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
5016 4985
5017 /* Mark queue as Rx */
5018 fp->is_rx_queue = 1;
5019
5020 /* "next page" elements initialization */ 4986 /* "next page" elements initialization */
5021 /* SGE ring */ 4987 /* SGE ring */
5022 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { 4988 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
@@ -5122,7 +5088,7 @@ static void bnx2x_init_tx_ring(struct bnx2x *bp)
5122{ 5088{
5123 int i, j; 5089 int i, j;
5124 5090
5125 for_each_tx_queue(bp, j) { 5091 for_each_queue(bp, j) {
5126 struct bnx2x_fastpath *fp = &bp->fp[j]; 5092 struct bnx2x_fastpath *fp = &bp->fp[j];
5127 5093
5128 for (i = 1; i <= NUM_TX_RINGS; i++) { 5094 for (i = 1; i <= NUM_TX_RINGS; i++) {
@@ -5148,10 +5114,6 @@ static void bnx2x_init_tx_ring(struct bnx2x *bp)
5148 fp->tx_cons_sb = BNX2X_TX_SB_INDEX; 5114 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5149 fp->tx_pkt = 0; 5115 fp->tx_pkt = 0;
5150 } 5116 }
5151
5152 /* clean tx statistics */
5153 for_each_rx_queue(bp, i)
5154 bnx2x_fp(bp, i, tx_pkt) = 0;
5155} 5117}
5156 5118
5157static void bnx2x_init_sp_ring(struct bnx2x *bp) 5119static void bnx2x_init_sp_ring(struct bnx2x *bp)
@@ -5180,7 +5142,8 @@ static void bnx2x_init_context(struct bnx2x *bp)
5180{ 5142{
5181 int i; 5143 int i;
5182 5144
5183 for_each_rx_queue(bp, i) { 5145 /* Rx */
5146 for_each_queue(bp, i) {
5184 struct eth_context *context = bnx2x_sp(bp, context[i].eth); 5147 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5185 struct bnx2x_fastpath *fp = &bp->fp[i]; 5148 struct bnx2x_fastpath *fp = &bp->fp[i];
5186 u8 cl_id = fp->cl_id; 5149 u8 cl_id = fp->cl_id;
@@ -5232,10 +5195,11 @@ static void bnx2x_init_context(struct bnx2x *bp)
5232 ETH_CONNECTION_TYPE); 5195 ETH_CONNECTION_TYPE);
5233 } 5196 }
5234 5197
5235 for_each_tx_queue(bp, i) { 5198 /* Tx */
5199 for_each_queue(bp, i) {
5236 struct bnx2x_fastpath *fp = &bp->fp[i]; 5200 struct bnx2x_fastpath *fp = &bp->fp[i];
5237 struct eth_context *context = 5201 struct eth_context *context =
5238 bnx2x_sp(bp, context[i - bp->num_rx_queues].eth); 5202 bnx2x_sp(bp, context[i].eth);
5239 5203
5240 context->cstorm_st_context.sb_index_number = 5204 context->cstorm_st_context.sb_index_number =
5241 C_SB_ETH_TX_CQ_INDEX; 5205 C_SB_ETH_TX_CQ_INDEX;
@@ -5263,7 +5227,7 @@ static void bnx2x_init_ind_table(struct bnx2x *bp)
5263 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++) 5227 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5264 REG_WR8(bp, BAR_TSTRORM_INTMEM + 5228 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5265 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i, 5229 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5266 bp->fp->cl_id + (i % bp->num_rx_queues)); 5230 bp->fp->cl_id + (i % bp->num_queues));
5267} 5231}
5268 5232
5269static void bnx2x_set_client_config(struct bnx2x *bp) 5233static void bnx2x_set_client_config(struct bnx2x *bp)
@@ -5507,7 +5471,7 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
5507 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) * 5471 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5508 SGE_PAGE_SIZE * PAGES_PER_SGE), 5472 SGE_PAGE_SIZE * PAGES_PER_SGE),
5509 (u32)0xffff); 5473 (u32)0xffff);
5510 for_each_rx_queue(bp, i) { 5474 for_each_queue(bp, i) {
5511 struct bnx2x_fastpath *fp = &bp->fp[i]; 5475 struct bnx2x_fastpath *fp = &bp->fp[i];
5512 5476
5513 REG_WR(bp, BAR_USTRORM_INTMEM + 5477 REG_WR(bp, BAR_USTRORM_INTMEM +
@@ -5542,7 +5506,7 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
5542 rx_pause.cqe_thr_high = 350; 5506 rx_pause.cqe_thr_high = 350;
5543 rx_pause.sge_thr_high = 0; 5507 rx_pause.sge_thr_high = 0;
5544 5508
5545 for_each_rx_queue(bp, i) { 5509 for_each_queue(bp, i) {
5546 struct bnx2x_fastpath *fp = &bp->fp[i]; 5510 struct bnx2x_fastpath *fp = &bp->fp[i];
5547 5511
5548 if (!fp->disable_tpa) { 5512 if (!fp->disable_tpa) {
@@ -5637,9 +5601,6 @@ static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5637#else 5601#else
5638 fp->sb_id = fp->cl_id; 5602 fp->sb_id = fp->cl_id;
5639#endif 5603#endif
5640 /* Suitable Rx and Tx SBs are served by the same client */
5641 if (i >= bp->num_rx_queues)
5642 fp->cl_id -= bp->num_rx_queues;
5643 DP(NETIF_MSG_IFUP, 5604 DP(NETIF_MSG_IFUP,
5644 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n", 5605 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5645 i, bp, fp->status_blk, fp->cl_id, fp->sb_id); 5606 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
@@ -6749,7 +6710,7 @@ static void bnx2x_free_mem(struct bnx2x *bp)
6749 sizeof(struct host_status_block)); 6710 sizeof(struct host_status_block));
6750 } 6711 }
6751 /* Rx */ 6712 /* Rx */
6752 for_each_rx_queue(bp, i) { 6713 for_each_queue(bp, i) {
6753 6714
6754 /* fastpath rx rings: rx_buf rx_desc rx_comp */ 6715 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6755 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring)); 6716 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
@@ -6769,7 +6730,7 @@ static void bnx2x_free_mem(struct bnx2x *bp)
6769 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); 6730 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6770 } 6731 }
6771 /* Tx */ 6732 /* Tx */
6772 for_each_tx_queue(bp, i) { 6733 for_each_queue(bp, i) {
6773 6734
6774 /* fastpath tx rings: tx_buf tx_desc */ 6735 /* fastpath tx rings: tx_buf tx_desc */
6775 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring)); 6736 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
@@ -6831,7 +6792,7 @@ static int bnx2x_alloc_mem(struct bnx2x *bp)
6831 sizeof(struct host_status_block)); 6792 sizeof(struct host_status_block));
6832 } 6793 }
6833 /* Rx */ 6794 /* Rx */
6834 for_each_rx_queue(bp, i) { 6795 for_each_queue(bp, i) {
6835 6796
6836 /* fastpath rx rings: rx_buf rx_desc rx_comp */ 6797 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6837 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring), 6798 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
@@ -6853,7 +6814,7 @@ static int bnx2x_alloc_mem(struct bnx2x *bp)
6853 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); 6814 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6854 } 6815 }
6855 /* Tx */ 6816 /* Tx */
6856 for_each_tx_queue(bp, i) { 6817 for_each_queue(bp, i) {
6857 6818
6858 /* fastpath tx rings: tx_buf tx_desc */ 6819 /* fastpath tx rings: tx_buf tx_desc */
6859 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring), 6820 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
@@ -6909,7 +6870,7 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6909{ 6870{
6910 int i; 6871 int i;
6911 6872
6912 for_each_tx_queue(bp, i) { 6873 for_each_queue(bp, i) {
6913 struct bnx2x_fastpath *fp = &bp->fp[i]; 6874 struct bnx2x_fastpath *fp = &bp->fp[i];
6914 6875
6915 u16 bd_cons = fp->tx_bd_cons; 6876 u16 bd_cons = fp->tx_bd_cons;
@@ -6927,7 +6888,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6927{ 6888{
6928 int i, j; 6889 int i, j;
6929 6890
6930 for_each_rx_queue(bp, j) { 6891 for_each_queue(bp, j) {
6931 struct bnx2x_fastpath *fp = &bp->fp[j]; 6892 struct bnx2x_fastpath *fp = &bp->fp[j];
6932 6893
6933 for (i = 0; i < NUM_RX_BD; i++) { 6894 for (i = 0; i < NUM_RX_BD; i++) {
@@ -7042,12 +7003,8 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7042#endif 7003#endif
7043 for_each_queue(bp, i) { 7004 for_each_queue(bp, i) {
7044 struct bnx2x_fastpath *fp = &bp->fp[i]; 7005 struct bnx2x_fastpath *fp = &bp->fp[i];
7045 7006 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7046 if (i < bp->num_rx_queues) 7007 bp->dev->name, i);
7047 sprintf(fp->name, "%s-rx-%d", bp->dev->name, i);
7048 else
7049 sprintf(fp->name, "%s-tx-%d",
7050 bp->dev->name, i - bp->num_rx_queues);
7051 7008
7052 rc = request_irq(bp->msix_table[i + offset].vector, 7009 rc = request_irq(bp->msix_table[i + offset].vector,
7053 bnx2x_msix_fp_int, 0, fp->name, fp); 7010 bnx2x_msix_fp_int, 0, fp->name, fp);
@@ -7106,7 +7063,7 @@ static void bnx2x_napi_enable(struct bnx2x *bp)
7106{ 7063{
7107 int i; 7064 int i;
7108 7065
7109 for_each_rx_queue(bp, i) 7066 for_each_queue(bp, i)
7110 napi_enable(&bnx2x_fp(bp, i, napi)); 7067 napi_enable(&bnx2x_fp(bp, i, napi));
7111} 7068}
7112 7069
@@ -7114,7 +7071,7 @@ static void bnx2x_napi_disable(struct bnx2x *bp)
7114{ 7071{
7115 int i; 7072 int i;
7116 7073
7117 for_each_rx_queue(bp, i) 7074 for_each_queue(bp, i)
7118 napi_disable(&bnx2x_fp(bp, i, napi)); 7075 napi_disable(&bnx2x_fp(bp, i, napi));
7119} 7076}
7120 7077
@@ -7140,7 +7097,6 @@ static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
7140 bnx2x_int_disable_sync(bp, disable_hw); 7097 bnx2x_int_disable_sync(bp, disable_hw);
7141 bnx2x_napi_disable(bp); 7098 bnx2x_napi_disable(bp);
7142 netif_tx_disable(bp->dev); 7099 netif_tx_disable(bp->dev);
7143 bp->dev->trans_start = jiffies; /* prevent tx timeout */
7144} 7100}
7145 7101
7146/* 7102/*
@@ -7410,88 +7366,60 @@ static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7410 7366
7411static int bnx2x_poll(struct napi_struct *napi, int budget); 7367static int bnx2x_poll(struct napi_struct *napi, int budget);
7412 7368
7413static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out, 7369static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
7414 int *num_tx_queues_out)
7415{ 7370{
7416 int _num_rx_queues = 0, _num_tx_queues = 0;
7417 7371
7418 switch (bp->multi_mode) { 7372 switch (bp->multi_mode) {
7419 case ETH_RSS_MODE_DISABLED: 7373 case ETH_RSS_MODE_DISABLED:
7420 _num_rx_queues = 1; 7374 bp->num_queues = 1;
7421 _num_tx_queues = 1;
7422 break; 7375 break;
7423 7376
7424 case ETH_RSS_MODE_REGULAR: 7377 case ETH_RSS_MODE_REGULAR:
7425 if (num_rx_queues) 7378 if (num_queues)
7426 _num_rx_queues = min_t(u32, num_rx_queues, 7379 bp->num_queues = min_t(u32, num_queues,
7427 BNX2X_MAX_QUEUES(bp)); 7380 BNX2X_MAX_QUEUES(bp));
7428 else
7429 _num_rx_queues = min_t(u32, num_online_cpus(),
7430 BNX2X_MAX_QUEUES(bp));
7431
7432 if (num_tx_queues)
7433 _num_tx_queues = min_t(u32, num_tx_queues,
7434 BNX2X_MAX_QUEUES(bp));
7435 else 7381 else
7436 _num_tx_queues = min_t(u32, num_online_cpus(), 7382 bp->num_queues = min_t(u32, num_online_cpus(),
7437 BNX2X_MAX_QUEUES(bp)); 7383 BNX2X_MAX_QUEUES(bp));
7438
7439 /* There must be not more Tx queues than Rx queues */
7440 if (_num_tx_queues > _num_rx_queues) {
7441 BNX2X_ERR("number of tx queues (%d) > "
7442 "number of rx queues (%d)"
7443 " defaulting to %d\n",
7444 _num_tx_queues, _num_rx_queues,
7445 _num_rx_queues);
7446 _num_tx_queues = _num_rx_queues;
7447 }
7448 break; 7384 break;
7449 7385
7450 7386
7451 default: 7387 default:
7452 _num_rx_queues = 1; 7388 bp->num_queues = 1;
7453 _num_tx_queues = 1;
7454 break; 7389 break;
7455 } 7390 }
7456
7457 *num_rx_queues_out = _num_rx_queues;
7458 *num_tx_queues_out = _num_tx_queues;
7459} 7391}
7460 7392
7461static int bnx2x_set_int_mode(struct bnx2x *bp) 7393static int bnx2x_set_num_queues(struct bnx2x *bp)
7462{ 7394{
7463 int rc = 0; 7395 int rc = 0;
7464 7396
7465 switch (int_mode) { 7397 switch (int_mode) {
7466 case INT_MODE_INTx: 7398 case INT_MODE_INTx:
7467 case INT_MODE_MSI: 7399 case INT_MODE_MSI:
7468 bp->num_rx_queues = 1; 7400 bp->num_queues = 1;
7469 bp->num_tx_queues = 1;
7470 DP(NETIF_MSG_IFUP, "set number of queues to 1\n"); 7401 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7471 break; 7402 break;
7472 7403
7473 case INT_MODE_MSIX: 7404 case INT_MODE_MSIX:
7474 default: 7405 default:
7475 /* Set interrupt mode according to bp->multi_mode value */ 7406 /* Set number of queues according to bp->multi_mode value */
7476 bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues, 7407 bnx2x_set_num_queues_msix(bp);
7477 &bp->num_tx_queues);
7478 7408
7479 DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n", 7409 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7480 bp->num_rx_queues, bp->num_tx_queues); 7410 bp->num_queues);
7481 7411
7482 /* if we can't use MSI-X we only need one fp, 7412 /* if we can't use MSI-X we only need one fp,
7483 * so try to enable MSI-X with the requested number of fp's 7413 * so try to enable MSI-X with the requested number of fp's
7484 * and fallback to MSI or legacy INTx with one fp 7414 * and fallback to MSI or legacy INTx with one fp
7485 */ 7415 */
7486 rc = bnx2x_enable_msix(bp); 7416 rc = bnx2x_enable_msix(bp);
7487 if (rc) { 7417 if (rc)
7488 /* failed to enable MSI-X */ 7418 /* failed to enable MSI-X */
7489 bp->num_rx_queues = 1; 7419 bp->num_queues = 1;
7490 bp->num_tx_queues = 1;
7491 }
7492 break; 7420 break;
7493 } 7421 }
7494 bp->dev->real_num_tx_queues = bp->num_tx_queues; 7422 bp->dev->real_num_tx_queues = bp->num_queues;
7495 return rc; 7423 return rc;
7496} 7424}
7497 7425
@@ -7513,16 +7441,16 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7513 7441
7514 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; 7442 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7515 7443
7516 rc = bnx2x_set_int_mode(bp); 7444 rc = bnx2x_set_num_queues(bp);
7517 7445
7518 if (bnx2x_alloc_mem(bp)) 7446 if (bnx2x_alloc_mem(bp))
7519 return -ENOMEM; 7447 return -ENOMEM;
7520 7448
7521 for_each_rx_queue(bp, i) 7449 for_each_queue(bp, i)
7522 bnx2x_fp(bp, i, disable_tpa) = 7450 bnx2x_fp(bp, i, disable_tpa) =
7523 ((bp->flags & TPA_ENABLE_FLAG) == 0); 7451 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7524 7452
7525 for_each_rx_queue(bp, i) 7453 for_each_queue(bp, i)
7526 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), 7454 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7527 bnx2x_poll, 128); 7455 bnx2x_poll, 128);
7528 7456
@@ -7536,7 +7464,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7536 } 7464 }
7537 } else { 7465 } else {
7538 /* Fall to INTx if failed to enable MSI-X due to lack of 7466 /* Fall to INTx if failed to enable MSI-X due to lack of
7539 memory (in bnx2x_set_int_mode()) */ 7467 memory (in bnx2x_set_num_queues()) */
7540 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx)) 7468 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7541 bnx2x_enable_msi(bp); 7469 bnx2x_enable_msi(bp);
7542 bnx2x_ack_int(bp); 7470 bnx2x_ack_int(bp);
@@ -7730,14 +7658,14 @@ load_error3:
7730 bp->port.pmf = 0; 7658 bp->port.pmf = 0;
7731 /* Free SKBs, SGEs, TPA pool and driver internals */ 7659 /* Free SKBs, SGEs, TPA pool and driver internals */
7732 bnx2x_free_skbs(bp); 7660 bnx2x_free_skbs(bp);
7733 for_each_rx_queue(bp, i) 7661 for_each_queue(bp, i)
7734 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 7662 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7735load_error2: 7663load_error2:
7736 /* Release IRQs */ 7664 /* Release IRQs */
7737 bnx2x_free_irq(bp); 7665 bnx2x_free_irq(bp);
7738load_error1: 7666load_error1:
7739 bnx2x_napi_disable(bp); 7667 bnx2x_napi_disable(bp);
7740 for_each_rx_queue(bp, i) 7668 for_each_queue(bp, i)
7741 netif_napi_del(&bnx2x_fp(bp, i, napi)); 7669 netif_napi_del(&bnx2x_fp(bp, i, napi));
7742 bnx2x_free_mem(bp); 7670 bnx2x_free_mem(bp);
7743 7671
@@ -7928,7 +7856,7 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7928 bnx2x_free_irq(bp); 7856 bnx2x_free_irq(bp);
7929 7857
7930 /* Wait until tx fastpath tasks complete */ 7858 /* Wait until tx fastpath tasks complete */
7931 for_each_tx_queue(bp, i) { 7859 for_each_queue(bp, i) {
7932 struct bnx2x_fastpath *fp = &bp->fp[i]; 7860 struct bnx2x_fastpath *fp = &bp->fp[i];
7933 7861
7934 cnt = 1000; 7862 cnt = 1000;
@@ -8071,9 +7999,9 @@ unload_error:
8071 7999
8072 /* Free SKBs, SGEs, TPA pool and driver internals */ 8000 /* Free SKBs, SGEs, TPA pool and driver internals */
8073 bnx2x_free_skbs(bp); 8001 bnx2x_free_skbs(bp);
8074 for_each_rx_queue(bp, i) 8002 for_each_queue(bp, i)
8075 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 8003 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8076 for_each_rx_queue(bp, i) 8004 for_each_queue(bp, i)
8077 netif_napi_del(&bnx2x_fp(bp, i, napi)); 8005 netif_napi_del(&bnx2x_fp(bp, i, napi));
8078 bnx2x_free_mem(bp); 8006 bnx2x_free_mem(bp);
8079 8007
@@ -10269,7 +10197,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
10269 struct sk_buff *skb; 10197 struct sk_buff *skb;
10270 unsigned char *packet; 10198 unsigned char *packet;
10271 struct bnx2x_fastpath *fp_rx = &bp->fp[0]; 10199 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
10272 struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues]; 10200 struct bnx2x_fastpath *fp_tx = &bp->fp[0];
10273 u16 tx_start_idx, tx_idx; 10201 u16 tx_start_idx, tx_idx;
10274 u16 rx_start_idx, rx_idx; 10202 u16 rx_start_idx, rx_idx;
10275 u16 pkt_prod, bd_prod; 10203 u16 pkt_prod, bd_prod;
@@ -10346,13 +10274,12 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
10346 10274
10347 fp_tx->tx_db.data.prod += 2; 10275 fp_tx->tx_db.data.prod += 2;
10348 barrier(); 10276 barrier();
10349 DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw); 10277 DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
10350 10278
10351 mmiowb(); 10279 mmiowb();
10352 10280
10353 num_pkts++; 10281 num_pkts++;
10354 fp_tx->tx_bd_prod += 2; /* start + pbd */ 10282 fp_tx->tx_bd_prod += 2; /* start + pbd */
10355 bp->dev->trans_start = jiffies;
10356 10283
10357 udelay(100); 10284 udelay(100);
10358 10285
@@ -10725,7 +10652,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10725 switch(stringset) { 10652 switch(stringset) {
10726 case ETH_SS_STATS: 10653 case ETH_SS_STATS:
10727 if (is_multi(bp)) { 10654 if (is_multi(bp)) {
10728 num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues; 10655 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
10729 if (!IS_E1HMF_MODE_STAT(bp)) 10656 if (!IS_E1HMF_MODE_STAT(bp))
10730 num_stats += BNX2X_NUM_STATS; 10657 num_stats += BNX2X_NUM_STATS;
10731 } else { 10658 } else {
@@ -10756,7 +10683,7 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10756 case ETH_SS_STATS: 10683 case ETH_SS_STATS:
10757 if (is_multi(bp)) { 10684 if (is_multi(bp)) {
10758 k = 0; 10685 k = 0;
10759 for_each_rx_queue(bp, i) { 10686 for_each_queue(bp, i) {
10760 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) 10687 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10761 sprintf(buf + (k + j)*ETH_GSTRING_LEN, 10688 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10762 bnx2x_q_stats_arr[j].string, i); 10689 bnx2x_q_stats_arr[j].string, i);
@@ -10793,7 +10720,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
10793 10720
10794 if (is_multi(bp)) { 10721 if (is_multi(bp)) {
10795 k = 0; 10722 k = 0;
10796 for_each_rx_queue(bp, i) { 10723 for_each_queue(bp, i) {
10797 hw_stats = (u32 *)&bp->fp[i].eth_q_stats; 10724 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10798 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) { 10725 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10799 if (bnx2x_q_stats_arr[j].size == 0) { 10726 if (bnx2x_q_stats_arr[j].size == 0) {
@@ -10989,54 +10916,60 @@ static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10989 10916
10990static int bnx2x_poll(struct napi_struct *napi, int budget) 10917static int bnx2x_poll(struct napi_struct *napi, int budget)
10991{ 10918{
10919 int work_done = 0;
10992 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath, 10920 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10993 napi); 10921 napi);
10994 struct bnx2x *bp = fp->bp; 10922 struct bnx2x *bp = fp->bp;
10995 int work_done = 0;
10996 10923
10924 while (1) {
10997#ifdef BNX2X_STOP_ON_ERROR 10925#ifdef BNX2X_STOP_ON_ERROR
10998 if (unlikely(bp->panic)) 10926 if (unlikely(bp->panic)) {
10999 goto poll_panic; 10927 napi_complete(napi);
10928 return 0;
10929 }
11000#endif 10930#endif
11001 10931
11002 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb); 10932 if (bnx2x_has_tx_work(fp))
11003 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256); 10933 bnx2x_tx_int(fp);
11004
11005 bnx2x_update_fpsb_idx(fp);
11006
11007 if (bnx2x_has_rx_work(fp)) {
11008 work_done = bnx2x_rx_int(fp, budget);
11009 10934
11010 /* must not complete if we consumed full budget */ 10935 if (bnx2x_has_rx_work(fp)) {
11011 if (work_done >= budget) 10936 work_done += bnx2x_rx_int(fp, budget - work_done);
11012 goto poll_again;
11013 }
11014 10937
11015 /* bnx2x_has_rx_work() reads the status block, thus we need to 10938 /* must not complete if we consumed full budget */
11016 * ensure that status block indices have been actually read 10939 if (work_done >= budget)
11017 * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work) 10940 break;
11018 * so that we won't write the "newer" value of the status block to IGU 10941 }
11019 * (if there was a DMA right after bnx2x_has_rx_work and
11020 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
11021 * may be postponed to right before bnx2x_ack_sb). In this case
11022 * there will never be another interrupt until there is another update
11023 * of the status block, while there is still unhandled work.
11024 */
11025 rmb();
11026 10942
11027 if (!bnx2x_has_rx_work(fp)) { 10943 /* Fall out from the NAPI loop if needed */
11028#ifdef BNX2X_STOP_ON_ERROR 10944 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
11029poll_panic: 10945 bnx2x_update_fpsb_idx(fp);
11030#endif 10946 /* bnx2x_has_rx_work() reads the status block, thus we need
11031 napi_complete(napi); 10947 * to ensure that status block indices have been actually read
10948 * (bnx2x_update_fpsb_idx) prior to this check
10949 * (bnx2x_has_rx_work) so that we won't write the "newer"
10950 * value of the status block to IGU (if there was a DMA right
10951 * after bnx2x_has_rx_work and if there is no rmb, the memory
10952 * reading (bnx2x_update_fpsb_idx) may be postponed to right
10953 * before bnx2x_ack_sb). In this case there will never be
10954 * another interrupt until there is another update of the
10955 * status block, while there is still unhandled work.
10956 */
10957 rmb();
11032 10958
11033 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 10959 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
11034 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1); 10960 napi_complete(napi);
11035 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 10961 /* Re-enable interrupts */
11036 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1); 10962 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10963 le16_to_cpu(fp->fp_c_idx),
10964 IGU_INT_NOP, 1);
10965 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10966 le16_to_cpu(fp->fp_u_idx),
10967 IGU_INT_ENABLE, 1);
10968 break;
10969 }
10970 }
11037 } 10971 }
11038 10972
11039poll_again:
11040 return work_done; 10973 return work_done;
11041} 10974}
11042 10975
@@ -11221,7 +11154,7 @@ exit_lbl:
11221static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) 11154static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11222{ 11155{
11223 struct bnx2x *bp = netdev_priv(dev); 11156 struct bnx2x *bp = netdev_priv(dev);
11224 struct bnx2x_fastpath *fp, *fp_stat; 11157 struct bnx2x_fastpath *fp;
11225 struct netdev_queue *txq; 11158 struct netdev_queue *txq;
11226 struct sw_tx_bd *tx_buf; 11159 struct sw_tx_bd *tx_buf;
11227 struct eth_tx_start_bd *tx_start_bd; 11160 struct eth_tx_start_bd *tx_start_bd;
@@ -11243,11 +11176,10 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11243 fp_index = skb_get_queue_mapping(skb); 11176 fp_index = skb_get_queue_mapping(skb);
11244 txq = netdev_get_tx_queue(dev, fp_index); 11177 txq = netdev_get_tx_queue(dev, fp_index);
11245 11178
11246 fp = &bp->fp[fp_index + bp->num_rx_queues]; 11179 fp = &bp->fp[fp_index];
11247 fp_stat = &bp->fp[fp_index];
11248 11180
11249 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) { 11181 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
11250 fp_stat->eth_q_stats.driver_xoff++; 11182 fp->eth_q_stats.driver_xoff++;
11251 netif_tx_stop_queue(txq); 11183 netif_tx_stop_queue(txq);
11252 BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); 11184 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
11253 return NETDEV_TX_BUSY; 11185 return NETDEV_TX_BUSY;
@@ -11473,7 +11405,7 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11473 11405
11474 fp->tx_db.data.prod += nbd; 11406 fp->tx_db.data.prod += nbd;
11475 barrier(); 11407 barrier();
11476 DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw); 11408 DOORBELL(bp, fp->index, fp->tx_db.raw);
11477 11409
11478 mmiowb(); 11410 mmiowb();
11479 11411
@@ -11484,11 +11416,11 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11484 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod 11416 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11485 if we put Tx into XOFF state. */ 11417 if we put Tx into XOFF state. */
11486 smp_mb(); 11418 smp_mb();
11487 fp_stat->eth_q_stats.driver_xoff++; 11419 fp->eth_q_stats.driver_xoff++;
11488 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3) 11420 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
11489 netif_tx_wake_queue(txq); 11421 netif_tx_wake_queue(txq);
11490 } 11422 }
11491 fp_stat->tx_pkt++; 11423 fp->tx_pkt++;
11492 11424
11493 return NETDEV_TX_OK; 11425 return NETDEV_TX_OK;
11494} 11426}
@@ -12376,9 +12308,9 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12376 12308
12377 /* Free SKBs, SGEs, TPA pool and driver internals */ 12309 /* Free SKBs, SGEs, TPA pool and driver internals */
12378 bnx2x_free_skbs(bp); 12310 bnx2x_free_skbs(bp);
12379 for_each_rx_queue(bp, i) 12311 for_each_queue(bp, i)
12380 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 12312 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
12381 for_each_rx_queue(bp, i) 12313 for_each_queue(bp, i)
12382 netif_napi_del(&bnx2x_fp(bp, i, napi)); 12314 netif_napi_del(&bnx2x_fp(bp, i, napi));
12383 bnx2x_free_mem(bp); 12315 bnx2x_free_mem(bp);
12384 12316
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 1d058192328..88c3fe80b35 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -446,6 +446,48 @@ static u16 __ad_timer_to_ticks(u16 timer_type, u16 par)
446///////////////////////////////////////////////////////////////////////////////// 446/////////////////////////////////////////////////////////////////////////////////
447 447
448/** 448/**
449 * __choose_matched - update a port's matched variable from a received lacpdu
450 * @lacpdu: the lacpdu we've received
451 * @port: the port we're looking at
452 *
453 * Update the value of the matched variable, using parameter values from a
454 * newly received lacpdu. Parameter values for the partner carried in the
455 * received PDU are compared with the corresponding operational parameter
456 * values for the actor. Matched is set to TRUE if all of these parameters
457 * match and the PDU parameter partner_state.aggregation has the same value as
458 * actor_oper_port_state.aggregation and lacp will actively maintain the link
459 * in the aggregation. Matched is also set to TRUE if the value of
460 * actor_state.aggregation in the received PDU is set to FALSE, i.e., indicates
461 * an individual link and lacp will actively maintain the link. Otherwise,
462 * matched is set to FALSE. LACP is considered to be actively maintaining the
463 * link if either the PDU's actor_state.lacp_activity variable is TRUE or both
464 * the actor's actor_oper_port_state.lacp_activity and the PDU's
465 * partner_state.lacp_activity variables are TRUE.
466 *
467 * Note: the AD_PORT_MATCHED "variable" is not specified by 802.3ad; it is
468 * used here to implement the language from 802.3ad 43.4.9 that requires
469 * recordPDU to "match" the LACPDU parameters to the stored values.
470 */
471static void __choose_matched(struct lacpdu *lacpdu, struct port *port)
472{
473 // check if all parameters are alike
474 if (((ntohs(lacpdu->partner_port) == port->actor_port_number) &&
475 (ntohs(lacpdu->partner_port_priority) == port->actor_port_priority) &&
476 !MAC_ADDRESS_COMPARE(&(lacpdu->partner_system), &(port->actor_system)) &&
477 (ntohs(lacpdu->partner_system_priority) == port->actor_system_priority) &&
478 (ntohs(lacpdu->partner_key) == port->actor_oper_port_key) &&
479 ((lacpdu->partner_state & AD_STATE_AGGREGATION) == (port->actor_oper_port_state & AD_STATE_AGGREGATION))) ||
480 // or this is individual link(aggregation == FALSE)
481 ((lacpdu->actor_state & AD_STATE_AGGREGATION) == 0)
482 ) {
483 // update the state machine Matched variable
484 port->sm_vars |= AD_PORT_MATCHED;
485 } else {
486 port->sm_vars &= ~AD_PORT_MATCHED;
487 }
488}
489
490/**
449 * __record_pdu - record parameters from a received lacpdu 491 * __record_pdu - record parameters from a received lacpdu
450 * @lacpdu: the lacpdu we've received 492 * @lacpdu: the lacpdu we've received
451 * @port: the port we're looking at 493 * @port: the port we're looking at
@@ -459,6 +501,7 @@ static void __record_pdu(struct lacpdu *lacpdu, struct port *port)
459 if (lacpdu && port) { 501 if (lacpdu && port) {
460 struct port_params *partner = &port->partner_oper; 502 struct port_params *partner = &port->partner_oper;
461 503
504 __choose_matched(lacpdu, port);
462 // record the new parameter values for the partner operational 505 // record the new parameter values for the partner operational
463 partner->port_number = ntohs(lacpdu->actor_port); 506 partner->port_number = ntohs(lacpdu->actor_port);
464 partner->port_priority = ntohs(lacpdu->actor_port_priority); 507 partner->port_priority = ntohs(lacpdu->actor_port_priority);
@@ -563,47 +606,6 @@ static void __update_default_selected(struct port *port)
563} 606}
564 607
565/** 608/**
566 * __choose_matched - update a port's matched variable from a received lacpdu
567 * @lacpdu: the lacpdu we've received
568 * @port: the port we're looking at
569 *
570 * Update the value of the matched variable, using parameter values from a
571 * newly received lacpdu. Parameter values for the partner carried in the
572 * received PDU are compared with the corresponding operational parameter
573 * values for the actor. Matched is set to TRUE if all of these parameters
574 * match and the PDU parameter partner_state.aggregation has the same value as
575 * actor_oper_port_state.aggregation and lacp will actively maintain the link
576 * in the aggregation. Matched is also set to TRUE if the value of
577 * actor_state.aggregation in the received PDU is set to FALSE, i.e., indicates
578 * an individual link and lacp will actively maintain the link. Otherwise,
579 * matched is set to FALSE. LACP is considered to be actively maintaining the
580 * link if either the PDU's actor_state.lacp_activity variable is TRUE or both
581 * the actor's actor_oper_port_state.lacp_activity and the PDU's
582 * partner_state.lacp_activity variables are TRUE.
583 */
584static void __choose_matched(struct lacpdu *lacpdu, struct port *port)
585{
586 // validate lacpdu and port
587 if (lacpdu && port) {
588 // check if all parameters are alike
589 if (((ntohs(lacpdu->partner_port) == port->actor_port_number) &&
590 (ntohs(lacpdu->partner_port_priority) == port->actor_port_priority) &&
591 !MAC_ADDRESS_COMPARE(&(lacpdu->partner_system), &(port->actor_system)) &&
592 (ntohs(lacpdu->partner_system_priority) == port->actor_system_priority) &&
593 (ntohs(lacpdu->partner_key) == port->actor_oper_port_key) &&
594 ((lacpdu->partner_state & AD_STATE_AGGREGATION) == (port->actor_oper_port_state & AD_STATE_AGGREGATION))) ||
595 // or this is individual link(aggregation == FALSE)
596 ((lacpdu->actor_state & AD_STATE_AGGREGATION) == 0)
597 ) {
598 // update the state machine Matched variable
599 port->sm_vars |= AD_PORT_MATCHED;
600 } else {
601 port->sm_vars &= ~AD_PORT_MATCHED;
602 }
603 }
604}
605
606/**
607 * __update_ntt - update a port's ntt variable from a received lacpdu 609 * __update_ntt - update a port's ntt variable from a received lacpdu
608 * @lacpdu: the lacpdu we've received 610 * @lacpdu: the lacpdu we've received
609 * @port: the port we're looking at 611 * @port: the port we're looking at
@@ -1134,7 +1136,6 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
1134 __update_selected(lacpdu, port); 1136 __update_selected(lacpdu, port);
1135 __update_ntt(lacpdu, port); 1137 __update_ntt(lacpdu, port);
1136 __record_pdu(lacpdu, port); 1138 __record_pdu(lacpdu, port);
1137 __choose_matched(lacpdu, port);
1138 port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(port->actor_oper_port_state & AD_STATE_LACP_TIMEOUT)); 1139 port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(port->actor_oper_port_state & AD_STATE_LACP_TIMEOUT));
1139 port->actor_oper_port_state &= ~AD_STATE_EXPIRED; 1140 port->actor_oper_port_state &= ~AD_STATE_EXPIRED;
1140 // verify that if the aggregator is enabled, the port is enabled too. 1141 // verify that if the aggregator is enabled, the port is enabled too.
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index ecea6c29413..726bd755338 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -158,7 +158,7 @@ MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to the
158static const char * const version = 158static const char * const version =
159 DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"; 159 DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n";
160 160
161int bond_net_id; 161int bond_net_id __read_mostly;
162 162
163static __be32 arp_target[BOND_MAX_ARP_TARGETS]; 163static __be32 arp_target[BOND_MAX_ARP_TARGETS];
164static int arp_ip_count; 164static int arp_ip_count;
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index b819cc2a429..bb803fa1e6a 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -35,63 +35,9 @@ config CAN_CALC_BITTIMING
35 arguments "tq", "prop_seg", "phase_seg1", "phase_seg2" and "sjw". 35 arguments "tq", "prop_seg", "phase_seg1", "phase_seg2" and "sjw".
36 If unsure, say Y. 36 If unsure, say Y.
37 37
38config CAN_SJA1000
39 depends on CAN_DEV && HAS_IOMEM
40 tristate "Philips SJA1000"
41 ---help---
42 Driver for the SJA1000 CAN controllers from Philips or NXP
43
44config CAN_SJA1000_ISA
45 depends on CAN_SJA1000 && ISA
46 tristate "ISA Bus based legacy SJA1000 driver"
47 ---help---
48 This driver adds legacy support for SJA1000 chips connected to
49 the ISA bus using I/O port, memory mapped or indirect access.
50
51config CAN_SJA1000_PLATFORM
52 depends on CAN_SJA1000
53 tristate "Generic Platform Bus based SJA1000 driver"
54 ---help---
55 This driver adds support for the SJA1000 chips connected to
56 the "platform bus" (Linux abstraction for directly to the
57 processor attached devices). Which can be found on various
58 boards from Phytec (http://www.phytec.de) like the PCM027,
59 PCM038.
60
61config CAN_SJA1000_OF_PLATFORM
62 depends on CAN_SJA1000 && PPC_OF
63 tristate "Generic OF Platform Bus based SJA1000 driver"
64 ---help---
65 This driver adds support for the SJA1000 chips connected to
66 the OpenFirmware "platform bus" found on embedded systems with
67 OpenFirmware bindings, e.g. if you have a PowerPC based system
68 you may want to enable this option.
69
70config CAN_EMS_PCI
71 tristate "EMS CPC-PCI, CPC-PCIe and CPC-104P Card"
72 depends on PCI && CAN_SJA1000
73 ---help---
74 This driver is for the one, two or four channel CPC-PCI,
75 CPC-PCIe and CPC-104P cards from EMS Dr. Thomas Wuensche
76 (http://www.ems-wuensche.de).
77
78config CAN_EMS_USB
79 tristate "EMS CPC-USB/ARM7 CAN/USB interface"
80 depends on USB && CAN_DEV
81 ---help---
82 This driver is for the one channel CPC-USB/ARM7 CAN/USB interface
83 from from EMS Dr. Thomas Wuensche (http://www.ems-wuensche.de).
84
85config CAN_KVASER_PCI
86 tristate "Kvaser PCIcanx and Kvaser PCIcan PCI Cards"
87 depends on PCI && CAN_SJA1000
88 ---help---
89 This driver is for the the PCIcanx and PCIcan cards (1, 2 or
90 4 channel) from Kvaser (http://www.kvaser.com).
91
92config CAN_AT91 38config CAN_AT91
93 tristate "Atmel AT91 onchip CAN controller" 39 tristate "Atmel AT91 onchip CAN controller"
94 depends on CAN && CAN_DEV && ARCH_AT91SAM9263 40 depends on CAN_DEV && ARCH_AT91SAM9263
95 ---help--- 41 ---help---
96 This is a driver for the SoC CAN controller in Atmel's AT91SAM9263. 42 This is a driver for the SoC CAN controller in Atmel's AT91SAM9263.
97 43
@@ -108,6 +54,12 @@ config CAN_MCP251X
108 ---help--- 54 ---help---
109 Driver for the Microchip MCP251x SPI CAN controllers. 55 Driver for the Microchip MCP251x SPI CAN controllers.
110 56
57source "drivers/net/can/mscan/Kconfig"
58
59source "drivers/net/can/sja1000/Kconfig"
60
61source "drivers/net/can/usb/Kconfig"
62
111config CAN_DEBUG_DEVICES 63config CAN_DEBUG_DEVICES
112 bool "CAN devices debugging messages" 64 bool "CAN devices debugging messages"
113 depends on CAN 65 depends on CAN
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 14891817ea5..56899fef1c6 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -10,6 +10,7 @@ can-dev-y := dev.o
10obj-y += usb/ 10obj-y += usb/
11 11
12obj-$(CONFIG_CAN_SJA1000) += sja1000/ 12obj-$(CONFIG_CAN_SJA1000) += sja1000/
13obj-$(CONFIG_CAN_MSCAN) += mscan/
13obj-$(CONFIG_CAN_AT91) += at91_can.o 14obj-$(CONFIG_CAN_AT91) += at91_can.o
14obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o 15obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o
15obj-$(CONFIG_CAN_MCP251X) += mcp251x.o 16obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 26c89aaeba6..c1bb29f0322 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -677,6 +677,11 @@ nla_put_failure:
677 return -EMSGSIZE; 677 return -EMSGSIZE;
678} 678}
679 679
680static size_t can_get_xstats_size(const struct net_device *dev)
681{
682 return sizeof(struct can_device_stats);
683}
684
680static int can_fill_xstats(struct sk_buff *skb, const struct net_device *dev) 685static int can_fill_xstats(struct sk_buff *skb, const struct net_device *dev)
681{ 686{
682 struct can_priv *priv = netdev_priv(dev); 687 struct can_priv *priv = netdev_priv(dev);
@@ -705,6 +710,7 @@ static struct rtnl_link_ops can_link_ops __read_mostly = {
705 .changelink = can_changelink, 710 .changelink = can_changelink,
706 .get_size = can_get_size, 711 .get_size = can_get_size,
707 .fill_info = can_fill_info, 712 .fill_info = can_fill_info,
713 .get_xstats_size = can_get_xstats_size,
708 .fill_xstats = can_fill_xstats, 714 .fill_xstats = can_fill_xstats,
709}; 715};
710 716
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 8f48f4b50b7..78b1b69b292 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -594,13 +594,7 @@ static int mcp251x_do_set_bittiming(struct net_device *net)
594static int mcp251x_setup(struct net_device *net, struct mcp251x_priv *priv, 594static int mcp251x_setup(struct net_device *net, struct mcp251x_priv *priv,
595 struct spi_device *spi) 595 struct spi_device *spi)
596{ 596{
597 int ret; 597 mcp251x_do_set_bittiming(net);
598
599 ret = open_candev(net);
600 if (ret) {
601 dev_err(&spi->dev, "unable to set initial baudrate!\n");
602 return ret;
603 }
604 598
605 /* Enable RX0->RX1 buffer roll over and disable filters */ 599 /* Enable RX0->RX1 buffer roll over and disable filters */
606 mcp251x_write_bits(spi, RXBCTRL(0), 600 mcp251x_write_bits(spi, RXBCTRL(0),
@@ -671,6 +665,12 @@ static int mcp251x_open(struct net_device *net)
671 struct mcp251x_platform_data *pdata = spi->dev.platform_data; 665 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
672 int ret; 666 int ret;
673 667
668 ret = open_candev(net);
669 if (ret) {
670 dev_err(&spi->dev, "unable to set initial baudrate!\n");
671 return ret;
672 }
673
674 if (pdata->transceiver_enable) 674 if (pdata->transceiver_enable)
675 pdata->transceiver_enable(1); 675 pdata->transceiver_enable(1);
676 676
@@ -684,6 +684,7 @@ static int mcp251x_open(struct net_device *net)
684 dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq); 684 dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
685 if (pdata->transceiver_enable) 685 if (pdata->transceiver_enable)
686 pdata->transceiver_enable(0); 686 pdata->transceiver_enable(0);
687 close_candev(net);
687 return ret; 688 return ret;
688 } 689 }
689 690
@@ -692,8 +693,10 @@ static int mcp251x_open(struct net_device *net)
692 ret = mcp251x_setup(net, priv, spi); 693 ret = mcp251x_setup(net, priv, spi);
693 if (ret) { 694 if (ret) {
694 free_irq(spi->irq, net); 695 free_irq(spi->irq, net);
696 mcp251x_hw_sleep(spi);
695 if (pdata->transceiver_enable) 697 if (pdata->transceiver_enable)
696 pdata->transceiver_enable(0); 698 pdata->transceiver_enable(0);
699 close_candev(net);
697 return ret; 700 return ret;
698 } 701 }
699 mcp251x_set_normal_mode(spi); 702 mcp251x_set_normal_mode(spi);
@@ -956,7 +959,6 @@ static int __devinit mcp251x_can_probe(struct spi_device *spi)
956 priv->can.bittiming_const = &mcp251x_bittiming_const; 959 priv->can.bittiming_const = &mcp251x_bittiming_const;
957 priv->can.do_set_mode = mcp251x_do_set_mode; 960 priv->can.do_set_mode = mcp251x_do_set_mode;
958 priv->can.clock.freq = pdata->oscillator_frequency / 2; 961 priv->can.clock.freq = pdata->oscillator_frequency / 2;
959 priv->can.do_set_bittiming = mcp251x_do_set_bittiming;
960 priv->net = net; 962 priv->net = net;
961 dev_set_drvdata(&spi->dev, priv); 963 dev_set_drvdata(&spi->dev, priv);
962 964
diff --git a/drivers/net/can/mscan/Kconfig b/drivers/net/can/mscan/Kconfig
new file mode 100644
index 00000000000..cd0f2d6f375
--- /dev/null
+++ b/drivers/net/can/mscan/Kconfig
@@ -0,0 +1,23 @@
1config CAN_MSCAN
2 depends on CAN_DEV && (PPC || M68K || M68KNOMMU)
3 tristate "Support for Freescale MSCAN based chips"
4 ---help---
5 The Motorola Scalable Controller Area Network (MSCAN) definition
6 is based on the MSCAN12 definition which is the specific
7 implementation of the Motorola Scalable CAN concept targeted for
8 the Motorola MC68HC12 Microcontroller Family.
9
10if CAN_MSCAN
11
12config CAN_MPC5XXX
13 tristate "Freescale MPC5xxx onboard CAN controller"
14 depends on PPC_MPC52xx
15 ---help---
16 If you say yes here you get support for Freescale's MPC5xxx
17 onboard CAN controller.
18
19 This driver can also be built as a module. If so, the module
20 will be called mscan-mpc5xxx.ko.
21
22endif
23
diff --git a/drivers/net/can/mscan/Makefile b/drivers/net/can/mscan/Makefile
new file mode 100644
index 00000000000..c9fab17cd8b
--- /dev/null
+++ b/drivers/net/can/mscan/Makefile
@@ -0,0 +1,5 @@
1
2obj-$(CONFIG_CAN_MPC5XXX) += mscan-mpc5xxx.o
3mscan-mpc5xxx-objs := mscan.o mpc5xxx_can.o
4
5ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
new file mode 100644
index 00000000000..1de6f6349b1
--- /dev/null
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -0,0 +1,259 @@
1/*
2 * CAN bus driver for the Freescale MPC5xxx embedded CPU.
3 *
4 * Copyright (C) 2004-2005 Andrey Volkov <avolkov@varma-el.com>,
5 * Varma Electronics Oy
6 * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
7 * Copyright (C) 2009 Wolfram Sang, Pengutronix <w.sang@pengutronix.de>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the version 2 of the GNU General Public License
11 * as published by the Free Software Foundation
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/interrupt.h>
26#include <linux/platform_device.h>
27#include <linux/netdevice.h>
28#include <linux/can.h>
29#include <linux/can/dev.h>
30#include <linux/of_platform.h>
31#include <sysdev/fsl_soc.h>
32#include <linux/io.h>
33#include <asm/mpc52xx.h>
34
35#include "mscan.h"
36
37#define DRV_NAME "mpc5xxx_can"
38
39static struct of_device_id mpc52xx_cdm_ids[] __devinitdata = {
40 { .compatible = "fsl,mpc5200-cdm", },
41 {}
42};
43
44/*
45 * Get frequency of the MSCAN clock source
46 *
47 * Either the oscillator clock (SYS_XTAL_IN) or the IP bus clock (IP_CLK)
48 * can be selected. According to the MPC5200 user's manual, the oscillator
49 * clock is the better choice as it has less jitter but due to a hardware
50 * bug, it can not be selected for the old MPC5200 Rev. A chips.
51 */
52
53static unsigned int __devinit mpc52xx_can_clock_freq(struct of_device *of,
54 int clock_src)
55{
56 unsigned int pvr;
57 struct mpc52xx_cdm __iomem *cdm;
58 struct device_node *np_cdm;
59 unsigned int freq;
60 u32 val;
61
62 pvr = mfspr(SPRN_PVR);
63
64 freq = mpc5xxx_get_bus_frequency(of->node);
65 if (!freq)
66 return 0;
67
68 if (clock_src == MSCAN_CLKSRC_BUS || pvr == 0x80822011)
69 return freq;
70
71 /* Determine SYS_XTAL_IN frequency from the clock domain settings */
72 np_cdm = of_find_matching_node(NULL, mpc52xx_cdm_ids);
73 if (!np_cdm) {
74 dev_err(&of->dev, "can't get clock node!\n");
75 return 0;
76 }
77 cdm = of_iomap(np_cdm, 0);
78 of_node_put(np_cdm);
79
80 if (in_8(&cdm->ipb_clk_sel) & 0x1)
81 freq *= 2;
82 val = in_be32(&cdm->rstcfg);
83
84 freq *= (val & (1 << 5)) ? 8 : 4;
85 freq /= (val & (1 << 6)) ? 12 : 16;
86
87 iounmap(cdm);
88
89 return freq;
90}
91
92static int __devinit mpc5xxx_can_probe(struct of_device *ofdev,
93 const struct of_device_id *id)
94{
95 struct device_node *np = ofdev->node;
96 struct net_device *dev;
97 struct mscan_priv *priv;
98 void __iomem *base;
99 const char *clk_src;
100 int err, irq, clock_src;
101
102 base = of_iomap(ofdev->node, 0);
103 if (!base) {
104 dev_err(&ofdev->dev, "couldn't ioremap\n");
105 err = -ENOMEM;
106 goto exit_release_mem;
107 }
108
109 irq = irq_of_parse_and_map(np, 0);
110 if (!irq) {
111 dev_err(&ofdev->dev, "no irq found\n");
112 err = -ENODEV;
113 goto exit_unmap_mem;
114 }
115
116 dev = alloc_mscandev();
117 if (!dev) {
118 err = -ENOMEM;
119 goto exit_dispose_irq;
120 }
121
122 priv = netdev_priv(dev);
123 priv->reg_base = base;
124 dev->irq = irq;
125
126 /*
127 * Either the oscillator clock (SYS_XTAL_IN) or the IP bus clock
128 * (IP_CLK) can be selected as MSCAN clock source. According to
129 * the MPC5200 user's manual, the oscillator clock is the better
130 * choice as it has less jitter. For this reason, it is selected
131 * by default.
132 */
133 clk_src = of_get_property(np, "fsl,mscan-clock-source", NULL);
134 if (clk_src && strcmp(clk_src, "ip") == 0)
135 clock_src = MSCAN_CLKSRC_BUS;
136 else
137 clock_src = MSCAN_CLKSRC_XTAL;
138 priv->can.clock.freq = mpc52xx_can_clock_freq(ofdev, clock_src);
139 if (!priv->can.clock.freq) {
140 dev_err(&ofdev->dev, "couldn't get MSCAN clock frequency\n");
141 err = -ENODEV;
142 goto exit_free_mscan;
143 }
144
145 SET_NETDEV_DEV(dev, &ofdev->dev);
146
147 err = register_mscandev(dev, clock_src);
148 if (err) {
149 dev_err(&ofdev->dev, "registering %s failed (err=%d)\n",
150 DRV_NAME, err);
151 goto exit_free_mscan;
152 }
153
154 dev_set_drvdata(&ofdev->dev, dev);
155
156 dev_info(&ofdev->dev, "MSCAN at 0x%p, irq %d, clock %d Hz\n",
157 priv->reg_base, dev->irq, priv->can.clock.freq);
158
159 return 0;
160
161exit_free_mscan:
162 free_candev(dev);
163exit_dispose_irq:
164 irq_dispose_mapping(irq);
165exit_unmap_mem:
166 iounmap(base);
167exit_release_mem:
168 return err;
169}
170
171static int __devexit mpc5xxx_can_remove(struct of_device *ofdev)
172{
173 struct net_device *dev = dev_get_drvdata(&ofdev->dev);
174 struct mscan_priv *priv = netdev_priv(dev);
175
176 dev_set_drvdata(&ofdev->dev, NULL);
177
178 unregister_mscandev(dev);
179 iounmap(priv->reg_base);
180 irq_dispose_mapping(dev->irq);
181 free_candev(dev);
182
183 return 0;
184}
185
186#ifdef CONFIG_PM
187static struct mscan_regs saved_regs;
188static int mpc5xxx_can_suspend(struct of_device *ofdev, pm_message_t state)
189{
190 struct net_device *dev = dev_get_drvdata(&ofdev->dev);
191 struct mscan_priv *priv = netdev_priv(dev);
192 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
193
194 _memcpy_fromio(&saved_regs, regs, sizeof(*regs));
195
196 return 0;
197}
198
199static int mpc5xxx_can_resume(struct of_device *ofdev)
200{
201 struct net_device *dev = dev_get_drvdata(&ofdev->dev);
202 struct mscan_priv *priv = netdev_priv(dev);
203 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
204
205 regs->canctl0 |= MSCAN_INITRQ;
206 while (!(regs->canctl1 & MSCAN_INITAK))
207 udelay(10);
208
209 regs->canctl1 = saved_regs.canctl1;
210 regs->canbtr0 = saved_regs.canbtr0;
211 regs->canbtr1 = saved_regs.canbtr1;
212 regs->canidac = saved_regs.canidac;
213
214 /* restore masks, buffers etc. */
215 _memcpy_toio(&regs->canidar1_0, (void *)&saved_regs.canidar1_0,
216 sizeof(*regs) - offsetof(struct mscan_regs, canidar1_0));
217
218 regs->canctl0 &= ~MSCAN_INITRQ;
219 regs->cantbsel = saved_regs.cantbsel;
220 regs->canrier = saved_regs.canrier;
221 regs->cantier = saved_regs.cantier;
222 regs->canctl0 = saved_regs.canctl0;
223
224 return 0;
225}
226#endif
227
228static struct of_device_id __devinitdata mpc5xxx_can_table[] = {
229 {.compatible = "fsl,mpc5200-mscan"},
230 {},
231};
232
233static struct of_platform_driver mpc5xxx_can_driver = {
234 .owner = THIS_MODULE,
235 .name = "mpc5xxx_can",
236 .probe = mpc5xxx_can_probe,
237 .remove = __devexit_p(mpc5xxx_can_remove),
238#ifdef CONFIG_PM
239 .suspend = mpc5xxx_can_suspend,
240 .resume = mpc5xxx_can_resume,
241#endif
242 .match_table = mpc5xxx_can_table,
243};
244
245static int __init mpc5xxx_can_init(void)
246{
247 return of_register_platform_driver(&mpc5xxx_can_driver);
248}
249module_init(mpc5xxx_can_init);
250
251static void __exit mpc5xxx_can_exit(void)
252{
253 return of_unregister_platform_driver(&mpc5xxx_can_driver);
254};
255module_exit(mpc5xxx_can_exit);
256
257MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
258MODULE_DESCRIPTION("Freescale MPC5200 CAN driver");
259MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
new file mode 100644
index 00000000000..bb06dfb58f2
--- /dev/null
+++ b/drivers/net/can/mscan/mscan.c
@@ -0,0 +1,668 @@
1/*
2 * CAN bus driver for the alone generic (as possible as) MSCAN controller.
3 *
4 * Copyright (C) 2005-2006 Andrey Volkov <avolkov@varma-el.com>,
5 * Varma Electronics Oy
6 * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
7 * Copytight (C) 2008-2009 Pengutronix <kernel@pengutronix.de>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the version 2 of the GNU General Public License
11 * as published by the Free Software Foundation
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/interrupt.h>
26#include <linux/delay.h>
27#include <linux/netdevice.h>
28#include <linux/if_arp.h>
29#include <linux/if_ether.h>
30#include <linux/list.h>
31#include <linux/can.h>
32#include <linux/can/dev.h>
33#include <linux/can/error.h>
34#include <linux/io.h>
35
36#include "mscan.h"
37
38static struct can_bittiming_const mscan_bittiming_const = {
39 .name = "mscan",
40 .tseg1_min = 4,
41 .tseg1_max = 16,
42 .tseg2_min = 2,
43 .tseg2_max = 8,
44 .sjw_max = 4,
45 .brp_min = 1,
46 .brp_max = 64,
47 .brp_inc = 1,
48};
49
50struct mscan_state {
51 u8 mode;
52 u8 canrier;
53 u8 cantier;
54};
55
56static enum can_state state_map[] = {
57 CAN_STATE_ERROR_ACTIVE,
58 CAN_STATE_ERROR_WARNING,
59 CAN_STATE_ERROR_PASSIVE,
60 CAN_STATE_BUS_OFF
61};
62
63static int mscan_set_mode(struct net_device *dev, u8 mode)
64{
65 struct mscan_priv *priv = netdev_priv(dev);
66 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
67 int ret = 0;
68 int i;
69 u8 canctl1;
70
71 if (mode != MSCAN_NORMAL_MODE) {
72 if (priv->tx_active) {
73 /* Abort transfers before going to sleep */#
74 out_8(&regs->cantarq, priv->tx_active);
75 /* Suppress TX done interrupts */
76 out_8(&regs->cantier, 0);
77 }
78
79 canctl1 = in_8(&regs->canctl1);
80 if ((mode & MSCAN_SLPRQ) && !(canctl1 & MSCAN_SLPAK)) {
81 setbits8(&regs->canctl0, MSCAN_SLPRQ);
82 for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) {
83 if (in_8(&regs->canctl1) & MSCAN_SLPAK)
84 break;
85 udelay(100);
86 }
87 /*
88 * The mscan controller will fail to enter sleep mode,
89 * while there are irregular activities on bus, like
90 * somebody keeps retransmitting. This behavior is
91 * undocumented and seems to differ between mscan built
92 * in mpc5200b and mpc5200. We proceed in that case,
93 * since otherwise the slprq will be kept set and the
94 * controller will get stuck. NOTE: INITRQ or CSWAI
95 * will abort all active transmit actions, if still
96 * any, at once.
97 */
98 if (i >= MSCAN_SET_MODE_RETRIES)
99 dev_dbg(dev->dev.parent,
100 "device failed to enter sleep mode. "
101 "We proceed anyhow.\n");
102 else
103 priv->can.state = CAN_STATE_SLEEPING;
104 }
105
106 if ((mode & MSCAN_INITRQ) && !(canctl1 & MSCAN_INITAK)) {
107 setbits8(&regs->canctl0, MSCAN_INITRQ);
108 for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) {
109 if (in_8(&regs->canctl1) & MSCAN_INITAK)
110 break;
111 }
112 if (i >= MSCAN_SET_MODE_RETRIES)
113 ret = -ENODEV;
114 }
115 if (!ret)
116 priv->can.state = CAN_STATE_STOPPED;
117
118 if (mode & MSCAN_CSWAI)
119 setbits8(&regs->canctl0, MSCAN_CSWAI);
120
121 } else {
122 canctl1 = in_8(&regs->canctl1);
123 if (canctl1 & (MSCAN_SLPAK | MSCAN_INITAK)) {
124 clrbits8(&regs->canctl0, MSCAN_SLPRQ | MSCAN_INITRQ);
125 for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) {
126 canctl1 = in_8(&regs->canctl1);
127 if (!(canctl1 & (MSCAN_INITAK | MSCAN_SLPAK)))
128 break;
129 }
130 if (i >= MSCAN_SET_MODE_RETRIES)
131 ret = -ENODEV;
132 else
133 priv->can.state = CAN_STATE_ERROR_ACTIVE;
134 }
135 }
136 return ret;
137}
138
139static int mscan_start(struct net_device *dev)
140{
141 struct mscan_priv *priv = netdev_priv(dev);
142 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
143 u8 canrflg;
144 int err;
145
146 out_8(&regs->canrier, 0);
147
148 INIT_LIST_HEAD(&priv->tx_head);
149 priv->prev_buf_id = 0;
150 priv->cur_pri = 0;
151 priv->tx_active = 0;
152 priv->shadow_canrier = 0;
153 priv->flags = 0;
154
155 err = mscan_set_mode(dev, MSCAN_NORMAL_MODE);
156 if (err)
157 return err;
158
159 canrflg = in_8(&regs->canrflg);
160 priv->shadow_statflg = canrflg & MSCAN_STAT_MSK;
161 priv->can.state = state_map[max(MSCAN_STATE_RX(canrflg),
162 MSCAN_STATE_TX(canrflg))];
163 out_8(&regs->cantier, 0);
164
165 /* Enable receive interrupts. */
166 out_8(&regs->canrier, MSCAN_OVRIE | MSCAN_RXFIE | MSCAN_CSCIE |
167 MSCAN_RSTATE1 | MSCAN_RSTATE0 | MSCAN_TSTATE1 | MSCAN_TSTATE0);
168
169 return 0;
170}
171
172static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
173{
174 struct can_frame *frame = (struct can_frame *)skb->data;
175 struct mscan_priv *priv = netdev_priv(dev);
176 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
177 int i, rtr, buf_id;
178 u32 can_id;
179
180 if (frame->can_dlc > 8)
181 return -EINVAL;
182
183 out_8(&regs->cantier, 0);
184
185 i = ~priv->tx_active & MSCAN_TXE;
186 buf_id = ffs(i) - 1;
187 switch (hweight8(i)) {
188 case 0:
189 netif_stop_queue(dev);
190 dev_err(dev->dev.parent, "Tx Ring full when queue awake!\n");
191 return NETDEV_TX_BUSY;
192 case 1:
193 /*
194 * if buf_id < 3, then current frame will be send out of order,
195 * since buffer with lower id have higher priority (hell..)
196 */
197 netif_stop_queue(dev);
198 case 2:
199 if (buf_id < priv->prev_buf_id) {
200 priv->cur_pri++;
201 if (priv->cur_pri == 0xff) {
202 set_bit(F_TX_WAIT_ALL, &priv->flags);
203 netif_stop_queue(dev);
204 }
205 }
206 set_bit(F_TX_PROGRESS, &priv->flags);
207 break;
208 }
209 priv->prev_buf_id = buf_id;
210 out_8(&regs->cantbsel, i);
211
212 rtr = frame->can_id & CAN_RTR_FLAG;
213
214 /* RTR is always the lowest bit of interest, then IDs follow */
215 if (frame->can_id & CAN_EFF_FLAG) {
216 can_id = (frame->can_id & CAN_EFF_MASK)
217 << (MSCAN_EFF_RTR_SHIFT + 1);
218 if (rtr)
219 can_id |= 1 << MSCAN_EFF_RTR_SHIFT;
220 out_be16(&regs->tx.idr3_2, can_id);
221
222 can_id >>= 16;
223 /* EFF_FLAGS are inbetween the IDs :( */
224 can_id = (can_id & 0x7) | ((can_id << 2) & 0xffe0)
225 | MSCAN_EFF_FLAGS;
226 } else {
227 can_id = (frame->can_id & CAN_SFF_MASK)
228 << (MSCAN_SFF_RTR_SHIFT + 1);
229 if (rtr)
230 can_id |= 1 << MSCAN_SFF_RTR_SHIFT;
231 }
232 out_be16(&regs->tx.idr1_0, can_id);
233
234 if (!rtr) {
235 void __iomem *data = &regs->tx.dsr1_0;
236 u16 *payload = (u16 *)frame->data;
237
238 /* It is safe to write into dsr[dlc+1] */
239 for (i = 0; i < (frame->can_dlc + 1) / 2; i++) {
240 out_be16(data, *payload++);
241 data += 2 + _MSCAN_RESERVED_DSR_SIZE;
242 }
243 }
244
245 out_8(&regs->tx.dlr, frame->can_dlc);
246 out_8(&regs->tx.tbpr, priv->cur_pri);
247
248 /* Start transmission. */
249 out_8(&regs->cantflg, 1 << buf_id);
250
251 if (!test_bit(F_TX_PROGRESS, &priv->flags))
252 dev->trans_start = jiffies;
253
254 list_add_tail(&priv->tx_queue[buf_id].list, &priv->tx_head);
255
256 can_put_echo_skb(skb, dev, buf_id);
257
258 /* Enable interrupt. */
259 priv->tx_active |= 1 << buf_id;
260 out_8(&regs->cantier, priv->tx_active);
261
262 return NETDEV_TX_OK;
263}
264
265/* This function returns the old state to see where we came from */
266static enum can_state check_set_state(struct net_device *dev, u8 canrflg)
267{
268 struct mscan_priv *priv = netdev_priv(dev);
269 enum can_state state, old_state = priv->can.state;
270
271 if (canrflg & MSCAN_CSCIF && old_state <= CAN_STATE_BUS_OFF) {
272 state = state_map[max(MSCAN_STATE_RX(canrflg),
273 MSCAN_STATE_TX(canrflg))];
274 priv->can.state = state;
275 }
276 return old_state;
277}
278
279static void mscan_get_rx_frame(struct net_device *dev, struct can_frame *frame)
280{
281 struct mscan_priv *priv = netdev_priv(dev);
282 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
283 u32 can_id;
284 int i;
285
286 can_id = in_be16(&regs->rx.idr1_0);
287 if (can_id & (1 << 3)) {
288 frame->can_id = CAN_EFF_FLAG;
289 can_id = ((can_id << 16) | in_be16(&regs->rx.idr3_2));
290 can_id = ((can_id & 0xffe00000) |
291 ((can_id & 0x7ffff) << 2)) >> 2;
292 } else {
293 can_id >>= 4;
294 frame->can_id = 0;
295 }
296
297 frame->can_id |= can_id >> 1;
298 if (can_id & 1)
299 frame->can_id |= CAN_RTR_FLAG;
300 frame->can_dlc = in_8(&regs->rx.dlr) & 0xf;
301
302 if (!(frame->can_id & CAN_RTR_FLAG)) {
303 void __iomem *data = &regs->rx.dsr1_0;
304 u16 *payload = (u16 *)frame->data;
305
306 for (i = 0; i < (frame->can_dlc + 1) / 2; i++) {
307 *payload++ = in_be16(data);
308 data += 2 + _MSCAN_RESERVED_DSR_SIZE;
309 }
310 }
311
312 out_8(&regs->canrflg, MSCAN_RXF);
313}
314
315static void mscan_get_err_frame(struct net_device *dev, struct can_frame *frame,
316 u8 canrflg)
317{
318 struct mscan_priv *priv = netdev_priv(dev);
319 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
320 struct net_device_stats *stats = &dev->stats;
321 enum can_state old_state;
322
323 dev_dbg(dev->dev.parent, "error interrupt (canrflg=%#x)\n", canrflg);
324 frame->can_id = CAN_ERR_FLAG;
325
326 if (canrflg & MSCAN_OVRIF) {
327 frame->can_id |= CAN_ERR_CRTL;
328 frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
329 stats->rx_over_errors++;
330 stats->rx_errors++;
331 } else {
332 frame->data[1] = 0;
333 }
334
335 old_state = check_set_state(dev, canrflg);
336 /* State changed */
337 if (old_state != priv->can.state) {
338 switch (priv->can.state) {
339 case CAN_STATE_ERROR_WARNING:
340 frame->can_id |= CAN_ERR_CRTL;
341 priv->can.can_stats.error_warning++;
342 if ((priv->shadow_statflg & MSCAN_RSTAT_MSK) <
343 (canrflg & MSCAN_RSTAT_MSK))
344 frame->data[1] |= CAN_ERR_CRTL_RX_WARNING;
345 if ((priv->shadow_statflg & MSCAN_TSTAT_MSK) <
346 (canrflg & MSCAN_TSTAT_MSK))
347 frame->data[1] |= CAN_ERR_CRTL_TX_WARNING;
348 break;
349 case CAN_STATE_ERROR_PASSIVE:
350 frame->can_id |= CAN_ERR_CRTL;
351 priv->can.can_stats.error_passive++;
352 frame->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
353 break;
354 case CAN_STATE_BUS_OFF:
355 frame->can_id |= CAN_ERR_BUSOFF;
356 /*
357 * The MSCAN on the MPC5200 does recover from bus-off
358 * automatically. To avoid that we stop the chip doing
359 * a light-weight stop (we are in irq-context).
360 */
361 out_8(&regs->cantier, 0);
362 out_8(&regs->canrier, 0);
363 setbits8(&regs->canctl0, MSCAN_SLPRQ | MSCAN_INITRQ);
364 can_bus_off(dev);
365 break;
366 default:
367 break;
368 }
369 }
370 priv->shadow_statflg = canrflg & MSCAN_STAT_MSK;
371 frame->can_dlc = CAN_ERR_DLC;
372 out_8(&regs->canrflg, MSCAN_ERR_IF);
373}
374
375static int mscan_rx_poll(struct napi_struct *napi, int quota)
376{
377 struct mscan_priv *priv = container_of(napi, struct mscan_priv, napi);
378 struct net_device *dev = napi->dev;
379 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
380 struct net_device_stats *stats = &dev->stats;
381 int npackets = 0;
382 int ret = 1;
383 struct sk_buff *skb;
384 struct can_frame *frame;
385 u8 canrflg;
386
387 while (npackets < quota) {
388 canrflg = in_8(&regs->canrflg);
389 if (!(canrflg & (MSCAN_RXF | MSCAN_ERR_IF)))
390 break;
391
392 skb = alloc_can_skb(dev, &frame);
393 if (!skb) {
394 if (printk_ratelimit())
395 dev_notice(dev->dev.parent, "packet dropped\n");
396 stats->rx_dropped++;
397 out_8(&regs->canrflg, canrflg);
398 continue;
399 }
400
401 if (canrflg & MSCAN_RXF)
402 mscan_get_rx_frame(dev, frame);
403 else if (canrflg & MSCAN_ERR_IF)
404 mscan_get_err_frame(dev, frame, canrflg);
405
406 stats->rx_packets++;
407 stats->rx_bytes += frame->can_dlc;
408 npackets++;
409 netif_receive_skb(skb);
410 }
411
412 if (!(in_8(&regs->canrflg) & (MSCAN_RXF | MSCAN_ERR_IF))) {
413 napi_complete(&priv->napi);
414 clear_bit(F_RX_PROGRESS, &priv->flags);
415 if (priv->can.state < CAN_STATE_BUS_OFF)
416 out_8(&regs->canrier, priv->shadow_canrier);
417 ret = 0;
418 }
419 return ret;
420}
421
422static irqreturn_t mscan_isr(int irq, void *dev_id)
423{
424 struct net_device *dev = (struct net_device *)dev_id;
425 struct mscan_priv *priv = netdev_priv(dev);
426 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
427 struct net_device_stats *stats = &dev->stats;
428 u8 cantier, cantflg, canrflg;
429 irqreturn_t ret = IRQ_NONE;
430
431 cantier = in_8(&regs->cantier) & MSCAN_TXE;
432 cantflg = in_8(&regs->cantflg) & cantier;
433
434 if (cantier && cantflg) {
435 struct list_head *tmp, *pos;
436
437 list_for_each_safe(pos, tmp, &priv->tx_head) {
438 struct tx_queue_entry *entry =
439 list_entry(pos, struct tx_queue_entry, list);
440 u8 mask = entry->mask;
441
442 if (!(cantflg & mask))
443 continue;
444
445 out_8(&regs->cantbsel, mask);
446 stats->tx_bytes += in_8(&regs->tx.dlr);
447 stats->tx_packets++;
448 can_get_echo_skb(dev, entry->id);
449 priv->tx_active &= ~mask;
450 list_del(pos);
451 }
452
453 if (list_empty(&priv->tx_head)) {
454 clear_bit(F_TX_WAIT_ALL, &priv->flags);
455 clear_bit(F_TX_PROGRESS, &priv->flags);
456 priv->cur_pri = 0;
457 } else {
458 dev->trans_start = jiffies;
459 }
460
461 if (!test_bit(F_TX_WAIT_ALL, &priv->flags))
462 netif_wake_queue(dev);
463
464 out_8(&regs->cantier, priv->tx_active);
465 ret = IRQ_HANDLED;
466 }
467
468 canrflg = in_8(&regs->canrflg);
469 if ((canrflg & ~MSCAN_STAT_MSK) &&
470 !test_and_set_bit(F_RX_PROGRESS, &priv->flags)) {
471 if (canrflg & ~MSCAN_STAT_MSK) {
472 priv->shadow_canrier = in_8(&regs->canrier);
473 out_8(&regs->canrier, 0);
474 napi_schedule(&priv->napi);
475 ret = IRQ_HANDLED;
476 } else {
477 clear_bit(F_RX_PROGRESS, &priv->flags);
478 }
479 }
480 return ret;
481}
482
483static int mscan_do_set_mode(struct net_device *dev, enum can_mode mode)
484{
485 struct mscan_priv *priv = netdev_priv(dev);
486 int ret = 0;
487
488 if (!priv->open_time)
489 return -EINVAL;
490
491 switch (mode) {
492 case CAN_MODE_START:
493 if (priv->can.state <= CAN_STATE_BUS_OFF)
494 mscan_set_mode(dev, MSCAN_INIT_MODE);
495 ret = mscan_start(dev);
496 if (ret)
497 break;
498 if (netif_queue_stopped(dev))
499 netif_wake_queue(dev);
500 break;
501
502 default:
503 ret = -EOPNOTSUPP;
504 break;
505 }
506 return ret;
507}
508
509static int mscan_do_set_bittiming(struct net_device *dev)
510{
511 struct mscan_priv *priv = netdev_priv(dev);
512 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
513 struct can_bittiming *bt = &priv->can.bittiming;
514 u8 btr0, btr1;
515
516 btr0 = BTR0_SET_BRP(bt->brp) | BTR0_SET_SJW(bt->sjw);
517 btr1 = (BTR1_SET_TSEG1(bt->prop_seg + bt->phase_seg1) |
518 BTR1_SET_TSEG2(bt->phase_seg2) |
519 BTR1_SET_SAM(priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES));
520
521 dev_info(dev->dev.parent, "setting BTR0=0x%02x BTR1=0x%02x\n",
522 btr0, btr1);
523
524 out_8(&regs->canbtr0, btr0);
525 out_8(&regs->canbtr1, btr1);
526
527 return 0;
528}
529
530static int mscan_open(struct net_device *dev)
531{
532 int ret;
533 struct mscan_priv *priv = netdev_priv(dev);
534 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
535
536 /* common open */
537 ret = open_candev(dev);
538 if (ret)
539 return ret;
540
541 napi_enable(&priv->napi);
542
543 ret = request_irq(dev->irq, mscan_isr, 0, dev->name, dev);
544 if (ret < 0) {
545 dev_err(dev->dev.parent, "failed to attach interrupt\n");
546 goto exit_napi_disable;
547 }
548
549 priv->open_time = jiffies;
550
551 clrbits8(&regs->canctl1, MSCAN_LISTEN);
552
553 ret = mscan_start(dev);
554 if (ret)
555 goto exit_free_irq;
556
557 netif_start_queue(dev);
558
559 return 0;
560
561exit_free_irq:
562 priv->open_time = 0;
563 free_irq(dev->irq, dev);
564exit_napi_disable:
565 napi_disable(&priv->napi);
566 close_candev(dev);
567 return ret;
568}
569
570static int mscan_close(struct net_device *dev)
571{
572 struct mscan_priv *priv = netdev_priv(dev);
573 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
574
575 netif_stop_queue(dev);
576 napi_disable(&priv->napi);
577
578 out_8(&regs->cantier, 0);
579 out_8(&regs->canrier, 0);
580 mscan_set_mode(dev, MSCAN_INIT_MODE);
581 close_candev(dev);
582 free_irq(dev->irq, dev);
583 priv->open_time = 0;
584
585 return 0;
586}
587
588static const struct net_device_ops mscan_netdev_ops = {
589 .ndo_open = mscan_open,
590 .ndo_stop = mscan_close,
591 .ndo_start_xmit = mscan_start_xmit,
592};
593
594int register_mscandev(struct net_device *dev, int clock_src)
595{
596 struct mscan_priv *priv = netdev_priv(dev);
597 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
598 u8 ctl1;
599
600 ctl1 = in_8(&regs->canctl1);
601 if (clock_src)
602 ctl1 |= MSCAN_CLKSRC;
603 else
604 ctl1 &= ~MSCAN_CLKSRC;
605
606 ctl1 |= MSCAN_CANE;
607 out_8(&regs->canctl1, ctl1);
608 udelay(100);
609
610 /* acceptance mask/acceptance code (accept everything) */
611 out_be16(&regs->canidar1_0, 0);
612 out_be16(&regs->canidar3_2, 0);
613 out_be16(&regs->canidar5_4, 0);
614 out_be16(&regs->canidar7_6, 0);
615
616 out_be16(&regs->canidmr1_0, 0xffff);
617 out_be16(&regs->canidmr3_2, 0xffff);
618 out_be16(&regs->canidmr5_4, 0xffff);
619 out_be16(&regs->canidmr7_6, 0xffff);
620 /* Two 32 bit Acceptance Filters */
621 out_8(&regs->canidac, MSCAN_AF_32BIT);
622
623 mscan_set_mode(dev, MSCAN_INIT_MODE);
624
625 return register_candev(dev);
626}
627
628void unregister_mscandev(struct net_device *dev)
629{
630 struct mscan_priv *priv = netdev_priv(dev);
631 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
632 mscan_set_mode(dev, MSCAN_INIT_MODE);
633 clrbits8(&regs->canctl1, MSCAN_CANE);
634 unregister_candev(dev);
635}
636
637struct net_device *alloc_mscandev(void)
638{
639 struct net_device *dev;
640 struct mscan_priv *priv;
641 int i;
642
643 dev = alloc_candev(sizeof(struct mscan_priv), MSCAN_ECHO_SKB_MAX);
644 if (!dev)
645 return NULL;
646 priv = netdev_priv(dev);
647
648 dev->netdev_ops = &mscan_netdev_ops;
649
650 dev->flags |= IFF_ECHO; /* we support local echo */
651
652 netif_napi_add(dev, &priv->napi, mscan_rx_poll, 8);
653
654 priv->can.bittiming_const = &mscan_bittiming_const;
655 priv->can.do_set_bittiming = mscan_do_set_bittiming;
656 priv->can.do_set_mode = mscan_do_set_mode;
657
658 for (i = 0; i < TX_QUEUE_SIZE; i++) {
659 priv->tx_queue[i].id = i;
660 priv->tx_queue[i].mask = 1 << i;
661 }
662
663 return dev;
664}
665
666MODULE_AUTHOR("Andrey Volkov <avolkov@varma-el.com>");
667MODULE_LICENSE("GPL v2");
668MODULE_DESCRIPTION("CAN port driver for a MSCAN based chips");
diff --git a/drivers/net/can/mscan/mscan.h b/drivers/net/can/mscan/mscan.h
new file mode 100644
index 00000000000..00fc4aaf1ed
--- /dev/null
+++ b/drivers/net/can/mscan/mscan.h
@@ -0,0 +1,296 @@
1/*
2 * Definitions of consts/structs to drive the Freescale MSCAN.
3 *
4 * Copyright (C) 2005-2006 Andrey Volkov <avolkov@varma-el.com>,
5 * Varma Electronics Oy
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the version 2 of the GNU General Public License
9 * as published by the Free Software Foundation
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#ifndef __MSCAN_H__
22#define __MSCAN_H__
23
24#include <linux/types.h>
25
26/* MSCAN control register 0 (CANCTL0) bits */
27#define MSCAN_RXFRM 0x80
28#define MSCAN_RXACT 0x40
29#define MSCAN_CSWAI 0x20
30#define MSCAN_SYNCH 0x10
31#define MSCAN_TIME 0x08
32#define MSCAN_WUPE 0x04
33#define MSCAN_SLPRQ 0x02
34#define MSCAN_INITRQ 0x01
35
36/* MSCAN control register 1 (CANCTL1) bits */
37#define MSCAN_CANE 0x80
38#define MSCAN_CLKSRC 0x40
39#define MSCAN_LOOPB 0x20
40#define MSCAN_LISTEN 0x10
41#define MSCAN_WUPM 0x04
42#define MSCAN_SLPAK 0x02
43#define MSCAN_INITAK 0x01
44
45/* Use the MPC5200 MSCAN variant? */
46#ifdef CONFIG_PPC
47#define MSCAN_FOR_MPC5200
48#endif
49
50#ifdef MSCAN_FOR_MPC5200
51#define MSCAN_CLKSRC_BUS 0
52#define MSCAN_CLKSRC_XTAL MSCAN_CLKSRC
53#else
54#define MSCAN_CLKSRC_BUS MSCAN_CLKSRC
55#define MSCAN_CLKSRC_XTAL 0
56#endif
57
58/* MSCAN receiver flag register (CANRFLG) bits */
59#define MSCAN_WUPIF 0x80
60#define MSCAN_CSCIF 0x40
61#define MSCAN_RSTAT1 0x20
62#define MSCAN_RSTAT0 0x10
63#define MSCAN_TSTAT1 0x08
64#define MSCAN_TSTAT0 0x04
65#define MSCAN_OVRIF 0x02
66#define MSCAN_RXF 0x01
67#define MSCAN_ERR_IF (MSCAN_OVRIF | MSCAN_CSCIF)
68#define MSCAN_RSTAT_MSK (MSCAN_RSTAT1 | MSCAN_RSTAT0)
69#define MSCAN_TSTAT_MSK (MSCAN_TSTAT1 | MSCAN_TSTAT0)
70#define MSCAN_STAT_MSK (MSCAN_RSTAT_MSK | MSCAN_TSTAT_MSK)
71
72#define MSCAN_STATE_BUS_OFF (MSCAN_RSTAT1 | MSCAN_RSTAT0 | \
73 MSCAN_TSTAT1 | MSCAN_TSTAT0)
74#define MSCAN_STATE_TX(canrflg) (((canrflg)&MSCAN_TSTAT_MSK)>>2)
75#define MSCAN_STATE_RX(canrflg) (((canrflg)&MSCAN_RSTAT_MSK)>>4)
76#define MSCAN_STATE_ACTIVE 0
77#define MSCAN_STATE_WARNING 1
78#define MSCAN_STATE_PASSIVE 2
79#define MSCAN_STATE_BUSOFF 3
80
81/* MSCAN receiver interrupt enable register (CANRIER) bits */
82#define MSCAN_WUPIE 0x80
83#define MSCAN_CSCIE 0x40
84#define MSCAN_RSTATE1 0x20
85#define MSCAN_RSTATE0 0x10
86#define MSCAN_TSTATE1 0x08
87#define MSCAN_TSTATE0 0x04
88#define MSCAN_OVRIE 0x02
89#define MSCAN_RXFIE 0x01
90
91/* MSCAN transmitter flag register (CANTFLG) bits */
92#define MSCAN_TXE2 0x04
93#define MSCAN_TXE1 0x02
94#define MSCAN_TXE0 0x01
95#define MSCAN_TXE (MSCAN_TXE2 | MSCAN_TXE1 | MSCAN_TXE0)
96
97/* MSCAN transmitter interrupt enable register (CANTIER) bits */
98#define MSCAN_TXIE2 0x04
99#define MSCAN_TXIE1 0x02
100#define MSCAN_TXIE0 0x01
101#define MSCAN_TXIE (MSCAN_TXIE2 | MSCAN_TXIE1 | MSCAN_TXIE0)
102
103/* MSCAN transmitter message abort request (CANTARQ) bits */
104#define MSCAN_ABTRQ2 0x04
105#define MSCAN_ABTRQ1 0x02
106#define MSCAN_ABTRQ0 0x01
107
108/* MSCAN transmitter message abort ack (CANTAAK) bits */
109#define MSCAN_ABTAK2 0x04
110#define MSCAN_ABTAK1 0x02
111#define MSCAN_ABTAK0 0x01
112
113/* MSCAN transmit buffer selection (CANTBSEL) bits */
114#define MSCAN_TX2 0x04
115#define MSCAN_TX1 0x02
116#define MSCAN_TX0 0x01
117
118/* MSCAN ID acceptance control register (CANIDAC) bits */
119#define MSCAN_IDAM1 0x20
120#define MSCAN_IDAM0 0x10
121#define MSCAN_IDHIT2 0x04
122#define MSCAN_IDHIT1 0x02
123#define MSCAN_IDHIT0 0x01
124
125#define MSCAN_AF_32BIT 0x00
126#define MSCAN_AF_16BIT MSCAN_IDAM0
127#define MSCAN_AF_8BIT MSCAN_IDAM1
128#define MSCAN_AF_CLOSED (MSCAN_IDAM0|MSCAN_IDAM1)
129#define MSCAN_AF_MASK (~(MSCAN_IDAM0|MSCAN_IDAM1))
130
131/* MSCAN Miscellaneous Register (CANMISC) bits */
132#define MSCAN_BOHOLD 0x01
133
134/* MSCAN Identifier Register (IDR) bits */
135#define MSCAN_SFF_RTR_SHIFT 4
136#define MSCAN_EFF_RTR_SHIFT 0
137#define MSCAN_EFF_FLAGS 0x18 /* IDE + SRR */
138
139#ifdef MSCAN_FOR_MPC5200
140#define _MSCAN_RESERVED_(n, num) u8 _res##n[num]
141#define _MSCAN_RESERVED_DSR_SIZE 2
142#else
143#define _MSCAN_RESERVED_(n, num)
144#define _MSCAN_RESERVED_DSR_SIZE 0
145#endif
146
147/* Structure of the hardware registers */
148struct mscan_regs {
149 /* (see doc S12MSCANV3/D) MPC5200 MSCAN */
150 u8 canctl0; /* + 0x00 0x00 */
151 u8 canctl1; /* + 0x01 0x01 */
152 _MSCAN_RESERVED_(1, 2); /* + 0x02 */
153 u8 canbtr0; /* + 0x04 0x02 */
154 u8 canbtr1; /* + 0x05 0x03 */
155 _MSCAN_RESERVED_(2, 2); /* + 0x06 */
156 u8 canrflg; /* + 0x08 0x04 */
157 u8 canrier; /* + 0x09 0x05 */
158 _MSCAN_RESERVED_(3, 2); /* + 0x0a */
159 u8 cantflg; /* + 0x0c 0x06 */
160 u8 cantier; /* + 0x0d 0x07 */
161 _MSCAN_RESERVED_(4, 2); /* + 0x0e */
162 u8 cantarq; /* + 0x10 0x08 */
163 u8 cantaak; /* + 0x11 0x09 */
164 _MSCAN_RESERVED_(5, 2); /* + 0x12 */
165 u8 cantbsel; /* + 0x14 0x0a */
166 u8 canidac; /* + 0x15 0x0b */
167 u8 reserved; /* + 0x16 0x0c */
168 _MSCAN_RESERVED_(6, 5); /* + 0x17 */
169#ifndef MSCAN_FOR_MPC5200
170 u8 canmisc; /* 0x0d */
171#endif
172 u8 canrxerr; /* + 0x1c 0x0e */
173 u8 cantxerr; /* + 0x1d 0x0f */
174 _MSCAN_RESERVED_(7, 2); /* + 0x1e */
175 u16 canidar1_0; /* + 0x20 0x10 */
176 _MSCAN_RESERVED_(8, 2); /* + 0x22 */
177 u16 canidar3_2; /* + 0x24 0x12 */
178 _MSCAN_RESERVED_(9, 2); /* + 0x26 */
179 u16 canidmr1_0; /* + 0x28 0x14 */
180 _MSCAN_RESERVED_(10, 2); /* + 0x2a */
181 u16 canidmr3_2; /* + 0x2c 0x16 */
182 _MSCAN_RESERVED_(11, 2); /* + 0x2e */
183 u16 canidar5_4; /* + 0x30 0x18 */
184 _MSCAN_RESERVED_(12, 2); /* + 0x32 */
185 u16 canidar7_6; /* + 0x34 0x1a */
186 _MSCAN_RESERVED_(13, 2); /* + 0x36 */
187 u16 canidmr5_4; /* + 0x38 0x1c */
188 _MSCAN_RESERVED_(14, 2); /* + 0x3a */
189 u16 canidmr7_6; /* + 0x3c 0x1e */
190 _MSCAN_RESERVED_(15, 2); /* + 0x3e */
191 struct {
192 u16 idr1_0; /* + 0x40 0x20 */
193 _MSCAN_RESERVED_(16, 2); /* + 0x42 */
194 u16 idr3_2; /* + 0x44 0x22 */
195 _MSCAN_RESERVED_(17, 2); /* + 0x46 */
196 u16 dsr1_0; /* + 0x48 0x24 */
197 _MSCAN_RESERVED_(18, 2); /* + 0x4a */
198 u16 dsr3_2; /* + 0x4c 0x26 */
199 _MSCAN_RESERVED_(19, 2); /* + 0x4e */
200 u16 dsr5_4; /* + 0x50 0x28 */
201 _MSCAN_RESERVED_(20, 2); /* + 0x52 */
202 u16 dsr7_6; /* + 0x54 0x2a */
203 _MSCAN_RESERVED_(21, 2); /* + 0x56 */
204 u8 dlr; /* + 0x58 0x2c */
205 u8:8; /* + 0x59 0x2d */
206 _MSCAN_RESERVED_(22, 2); /* + 0x5a */
207 u16 time; /* + 0x5c 0x2e */
208 } rx;
209 _MSCAN_RESERVED_(23, 2); /* + 0x5e */
210 struct {
211 u16 idr1_0; /* + 0x60 0x30 */
212 _MSCAN_RESERVED_(24, 2); /* + 0x62 */
213 u16 idr3_2; /* + 0x64 0x32 */
214 _MSCAN_RESERVED_(25, 2); /* + 0x66 */
215 u16 dsr1_0; /* + 0x68 0x34 */
216 _MSCAN_RESERVED_(26, 2); /* + 0x6a */
217 u16 dsr3_2; /* + 0x6c 0x36 */
218 _MSCAN_RESERVED_(27, 2); /* + 0x6e */
219 u16 dsr5_4; /* + 0x70 0x38 */
220 _MSCAN_RESERVED_(28, 2); /* + 0x72 */
221 u16 dsr7_6; /* + 0x74 0x3a */
222 _MSCAN_RESERVED_(29, 2); /* + 0x76 */
223 u8 dlr; /* + 0x78 0x3c */
224 u8 tbpr; /* + 0x79 0x3d */
225 _MSCAN_RESERVED_(30, 2); /* + 0x7a */
226 u16 time; /* + 0x7c 0x3e */
227 } tx;
228 _MSCAN_RESERVED_(31, 2); /* + 0x7e */
229} __attribute__ ((packed));
230
231#undef _MSCAN_RESERVED_
232#define MSCAN_REGION sizeof(struct mscan)
233
234#define MSCAN_NORMAL_MODE 0
235#define MSCAN_SLEEP_MODE MSCAN_SLPRQ
236#define MSCAN_INIT_MODE (MSCAN_INITRQ | MSCAN_SLPRQ)
237#define MSCAN_POWEROFF_MODE (MSCAN_CSWAI | MSCAN_SLPRQ)
238#define MSCAN_SET_MODE_RETRIES 255
239#define MSCAN_ECHO_SKB_MAX 3
240
241#define BTR0_BRP_MASK 0x3f
242#define BTR0_SJW_SHIFT 6
243#define BTR0_SJW_MASK (0x3 << BTR0_SJW_SHIFT)
244
245#define BTR1_TSEG1_MASK 0xf
246#define BTR1_TSEG2_SHIFT 4
247#define BTR1_TSEG2_MASK (0x7 << BTR1_TSEG2_SHIFT)
248#define BTR1_SAM_SHIFT 7
249
250#define BTR0_SET_BRP(brp) (((brp) - 1) & BTR0_BRP_MASK)
251#define BTR0_SET_SJW(sjw) ((((sjw) - 1) << BTR0_SJW_SHIFT) & \
252 BTR0_SJW_MASK)
253
254#define BTR1_SET_TSEG1(tseg1) (((tseg1) - 1) & BTR1_TSEG1_MASK)
255#define BTR1_SET_TSEG2(tseg2) ((((tseg2) - 1) << BTR1_TSEG2_SHIFT) & \
256 BTR1_TSEG2_MASK)
257#define BTR1_SET_SAM(sam) ((sam) ? 1 << BTR1_SAM_SHIFT : 0)
258
259#define F_RX_PROGRESS 0
260#define F_TX_PROGRESS 1
261#define F_TX_WAIT_ALL 2
262
263#define TX_QUEUE_SIZE 3
264
265struct tx_queue_entry {
266 struct list_head list;
267 u8 mask;
268 u8 id;
269};
270
271struct mscan_priv {
272 struct can_priv can; /* must be the first member */
273 long open_time;
274 unsigned long flags;
275 void __iomem *reg_base; /* ioremap'ed address to registers */
276 u8 shadow_statflg;
277 u8 shadow_canrier;
278 u8 cur_pri;
279 u8 prev_buf_id;
280 u8 tx_active;
281
282 struct list_head tx_head;
283 struct tx_queue_entry tx_queue[TX_QUEUE_SIZE];
284 struct napi_struct napi;
285};
286
287extern struct net_device *alloc_mscandev(void);
288/*
289 * clock_src:
290 * 1 = The MSCAN clock source is the onchip Bus Clock.
291 * 0 = The MSCAN clock source is the chip Oscillator Clock.
292 */
293extern int register_mscandev(struct net_device *dev, int clock_src);
294extern void unregister_mscandev(struct net_device *dev);
295
296#endif /* __MSCAN_H__ */
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
new file mode 100644
index 00000000000..4c674927f24
--- /dev/null
+++ b/drivers/net/can/sja1000/Kconfig
@@ -0,0 +1,47 @@
1menuconfig CAN_SJA1000
2 tristate "Philips/NXP SJA1000 devices"
3 depends on CAN_DEV && HAS_IOMEM
4
5if CAN_SJA1000
6
7config CAN_SJA1000_ISA
8 tristate "ISA Bus based legacy SJA1000 driver"
9 depends on ISA
10 ---help---
11 This driver adds legacy support for SJA1000 chips connected to
12 the ISA bus using I/O port, memory mapped or indirect access.
13
14config CAN_SJA1000_PLATFORM
15 tristate "Generic Platform Bus based SJA1000 driver"
16 ---help---
17 This driver adds support for the SJA1000 chips connected to
18 the "platform bus" (Linux abstraction for directly to the
19 processor attached devices). Which can be found on various
20 boards from Phytec (http://www.phytec.de) like the PCM027,
21 PCM038.
22
23config CAN_SJA1000_OF_PLATFORM
24 tristate "Generic OF Platform Bus based SJA1000 driver"
25 depends on PPC_OF
26 ---help---
27 This driver adds support for the SJA1000 chips connected to
28 the OpenFirmware "platform bus" found on embedded systems with
29 OpenFirmware bindings, e.g. if you have a PowerPC based system
30 you may want to enable this option.
31
32config CAN_EMS_PCI
33 tristate "EMS CPC-PCI, CPC-PCIe and CPC-104P Card"
34 depends on PCI
35 ---help---
36 This driver is for the one, two or four channel CPC-PCI,
37 CPC-PCIe and CPC-104P cards from EMS Dr. Thomas Wuensche
38 (http://www.ems-wuensche.de).
39
40config CAN_KVASER_PCI
41 tristate "Kvaser PCIcanx and Kvaser PCIcan PCI Cards"
42 depends on PCI
43 ---help---
44 This driver is for the the PCIcanx and PCIcan cards (1, 2 or
45 4 channel) from Kvaser (http://www.kvaser.com).
46
47endif
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 782a47fabf2..b4ba88a3107 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -516,7 +516,7 @@ static int sja1000_open(struct net_device *dev)
516 516
517 /* register interrupt handler, if not done by the device driver */ 517 /* register interrupt handler, if not done by the device driver */
518 if (!(priv->flags & SJA1000_CUSTOM_IRQ_HANDLER)) { 518 if (!(priv->flags & SJA1000_CUSTOM_IRQ_HANDLER)) {
519 err = request_irq(dev->irq, &sja1000_interrupt, priv->irq_flags, 519 err = request_irq(dev->irq, sja1000_interrupt, priv->irq_flags,
520 dev->name, (void *)dev); 520 dev->name, (void *)dev);
521 if (err) { 521 if (err) {
522 close_candev(dev); 522 close_candev(dev);
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
new file mode 100644
index 00000000000..bbc78e0b8a1
--- /dev/null
+++ b/drivers/net/can/usb/Kconfig
@@ -0,0 +1,10 @@
1menu "CAN USB interfaces"
2 depends on USB && CAN_DEV
3
4config CAN_EMS_USB
5 tristate "EMS CPC-USB/ARM7 CAN/USB interface"
6 ---help---
7 This driver is for the one channel CPC-USB/ARM7 CAN/USB interface
8 from from EMS Dr. Thomas Wuensche (http://www.ems-wuensche.de).
9
10endmenu
diff --git a/drivers/net/can/usb/Makefile b/drivers/net/can/usb/Makefile
index c3f75ba701b..0afd51d4c7a 100644
--- a/drivers/net/can/usb/Makefile
+++ b/drivers/net/can/usb/Makefile
@@ -3,3 +3,5 @@
3# 3#
4 4
5obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o 5obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o
6
7ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 79ce8e857ea..8edac8915ea 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -2136,9 +2136,6 @@ static int emac_poll(struct napi_struct *napi, int budget)
2136 u32 status = 0; 2136 u32 status = 0;
2137 u32 num_pkts = 0; 2137 u32 num_pkts = 0;
2138 2138
2139 if (!netif_running(ndev))
2140 return 0;
2141
2142 /* Check interrupt vectors and call packet processing */ 2139 /* Check interrupt vectors and call packet processing */
2143 status = emac_read(EMAC_MACINVECTOR); 2140 status = emac_read(EMAC_MACINVECTOR);
2144 2141
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 31b8bef49d2..3aab2e46600 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -100,6 +100,7 @@ typedef struct board_info {
100 100
101 unsigned int flags; 101 unsigned int flags;
102 unsigned int in_suspend :1; 102 unsigned int in_suspend :1;
103 unsigned int wake_supported :1;
103 int debug_level; 104 int debug_level;
104 105
105 enum dm9000_type type; 106 enum dm9000_type type;
@@ -116,6 +117,8 @@ typedef struct board_info {
116 struct resource *data_req; 117 struct resource *data_req;
117 struct resource *irq_res; 118 struct resource *irq_res;
118 119
120 int irq_wake;
121
119 struct mutex addr_lock; /* phy and eeprom access lock */ 122 struct mutex addr_lock; /* phy and eeprom access lock */
120 123
121 struct delayed_work phy_poll; 124 struct delayed_work phy_poll;
@@ -125,6 +128,7 @@ typedef struct board_info {
125 128
126 struct mii_if_info mii; 129 struct mii_if_info mii;
127 u32 msg_enable; 130 u32 msg_enable;
131 u32 wake_state;
128 132
129 int rx_csum; 133 int rx_csum;
130 int can_csum; 134 int can_csum;
@@ -568,6 +572,54 @@ static int dm9000_set_eeprom(struct net_device *dev,
568 return 0; 572 return 0;
569} 573}
570 574
575static void dm9000_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
576{
577 board_info_t *dm = to_dm9000_board(dev);
578
579 memset(w, 0, sizeof(struct ethtool_wolinfo));
580
581 /* note, we could probably support wake-phy too */
582 w->supported = dm->wake_supported ? WAKE_MAGIC : 0;
583 w->wolopts = dm->wake_state;
584}
585
586static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
587{
588 board_info_t *dm = to_dm9000_board(dev);
589 unsigned long flags;
590 u32 opts = w->wolopts;
591 u32 wcr = 0;
592
593 if (!dm->wake_supported)
594 return -EOPNOTSUPP;
595
596 if (opts & ~WAKE_MAGIC)
597 return -EINVAL;
598
599 if (opts & WAKE_MAGIC)
600 wcr |= WCR_MAGICEN;
601
602 mutex_lock(&dm->addr_lock);
603
604 spin_lock_irqsave(&dm->lock, flags);
605 iow(dm, DM9000_WCR, wcr);
606 spin_unlock_irqrestore(&dm->lock, flags);
607
608 mutex_unlock(&dm->addr_lock);
609
610 if (dm->wake_state != opts) {
611 /* change in wol state, update IRQ state */
612
613 if (!dm->wake_state)
614 set_irq_wake(dm->irq_wake, 1);
615 else if (dm->wake_state & !opts)
616 set_irq_wake(dm->irq_wake, 0);
617 }
618
619 dm->wake_state = opts;
620 return 0;
621}
622
571static const struct ethtool_ops dm9000_ethtool_ops = { 623static const struct ethtool_ops dm9000_ethtool_ops = {
572 .get_drvinfo = dm9000_get_drvinfo, 624 .get_drvinfo = dm9000_get_drvinfo,
573 .get_settings = dm9000_get_settings, 625 .get_settings = dm9000_get_settings,
@@ -576,6 +628,8 @@ static const struct ethtool_ops dm9000_ethtool_ops = {
576 .set_msglevel = dm9000_set_msglevel, 628 .set_msglevel = dm9000_set_msglevel,
577 .nway_reset = dm9000_nway_reset, 629 .nway_reset = dm9000_nway_reset,
578 .get_link = dm9000_get_link, 630 .get_link = dm9000_get_link,
631 .get_wol = dm9000_get_wol,
632 .set_wol = dm9000_set_wol,
579 .get_eeprom_len = dm9000_get_eeprom_len, 633 .get_eeprom_len = dm9000_get_eeprom_len,
580 .get_eeprom = dm9000_get_eeprom, 634 .get_eeprom = dm9000_get_eeprom,
581 .set_eeprom = dm9000_set_eeprom, 635 .set_eeprom = dm9000_set_eeprom,
@@ -722,6 +776,7 @@ dm9000_init_dm9000(struct net_device *dev)
722{ 776{
723 board_info_t *db = netdev_priv(dev); 777 board_info_t *db = netdev_priv(dev);
724 unsigned int imr; 778 unsigned int imr;
779 unsigned int ncr;
725 780
726 dm9000_dbg(db, 1, "entering %s\n", __func__); 781 dm9000_dbg(db, 1, "entering %s\n", __func__);
727 782
@@ -736,8 +791,15 @@ dm9000_init_dm9000(struct net_device *dev)
736 iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */ 791 iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */
737 iow(db, DM9000_GPR, 0); /* Enable PHY */ 792 iow(db, DM9000_GPR, 0); /* Enable PHY */
738 793
739 if (db->flags & DM9000_PLATF_EXT_PHY) 794 ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
740 iow(db, DM9000_NCR, NCR_EXT_PHY); 795
796 /* if wol is needed, then always set NCR_WAKEEN otherwise we end
797 * up dumping the wake events if we disable this. There is already
798 * a wake-mask in DM9000_WCR */
799 if (db->wake_supported)
800 ncr |= NCR_WAKEEN;
801
802 iow(db, DM9000_NCR, ncr);
741 803
742 /* Program operating register */ 804 /* Program operating register */
743 iow(db, DM9000_TCR, 0); /* TX Polling clear */ 805 iow(db, DM9000_TCR, 0); /* TX Polling clear */
@@ -1045,6 +1107,41 @@ static irqreturn_t dm9000_interrupt(int irq, void *dev_id)
1045 return IRQ_HANDLED; 1107 return IRQ_HANDLED;
1046} 1108}
1047 1109
1110static irqreturn_t dm9000_wol_interrupt(int irq, void *dev_id)
1111{
1112 struct net_device *dev = dev_id;
1113 board_info_t *db = netdev_priv(dev);
1114 unsigned long flags;
1115 unsigned nsr, wcr;
1116
1117 spin_lock_irqsave(&db->lock, flags);
1118
1119 nsr = ior(db, DM9000_NSR);
1120 wcr = ior(db, DM9000_WCR);
1121
1122 dev_dbg(db->dev, "%s: NSR=0x%02x, WCR=0x%02x\n", __func__, nsr, wcr);
1123
1124 if (nsr & NSR_WAKEST) {
1125 /* clear, so we can avoid */
1126 iow(db, DM9000_NSR, NSR_WAKEST);
1127
1128 if (wcr & WCR_LINKST)
1129 dev_info(db->dev, "wake by link status change\n");
1130 if (wcr & WCR_SAMPLEST)
1131 dev_info(db->dev, "wake by sample packet\n");
1132 if (wcr & WCR_MAGICST )
1133 dev_info(db->dev, "wake by magic packet\n");
1134 if (!(wcr & (WCR_LINKST | WCR_SAMPLEST | WCR_MAGICST)))
1135 dev_err(db->dev, "wake signalled with no reason? "
1136 "NSR=0x%02x, WSR=0x%02x\n", nsr, wcr);
1137
1138 }
1139
1140 spin_unlock_irqrestore(&db->lock, flags);
1141
1142 return (nsr & NSR_WAKEST) ? IRQ_HANDLED : IRQ_NONE;
1143}
1144
1048#ifdef CONFIG_NET_POLL_CONTROLLER 1145#ifdef CONFIG_NET_POLL_CONTROLLER
1049/* 1146/*
1050 *Used by netconsole 1147 *Used by netconsole
@@ -1299,6 +1396,29 @@ dm9000_probe(struct platform_device *pdev)
1299 goto out; 1396 goto out;
1300 } 1397 }
1301 1398
1399 db->irq_wake = platform_get_irq(pdev, 1);
1400 if (db->irq_wake >= 0) {
1401 dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake);
1402
1403 ret = request_irq(db->irq_wake, dm9000_wol_interrupt,
1404 IRQF_SHARED, dev_name(db->dev), ndev);
1405 if (ret) {
1406 dev_err(db->dev, "cannot get wakeup irq (%d)\n", ret);
1407 } else {
1408
1409 /* test to see if irq is really wakeup capable */
1410 ret = set_irq_wake(db->irq_wake, 1);
1411 if (ret) {
1412 dev_err(db->dev, "irq %d cannot set wakeup (%d)\n",
1413 db->irq_wake, ret);
1414 ret = 0;
1415 } else {
1416 set_irq_wake(db->irq_wake, 0);
1417 db->wake_supported = 1;
1418 }
1419 }
1420 }
1421
1302 iosize = resource_size(db->addr_res); 1422 iosize = resource_size(db->addr_res);
1303 db->addr_req = request_mem_region(db->addr_res->start, iosize, 1423 db->addr_req = request_mem_region(db->addr_res->start, iosize,
1304 pdev->name); 1424 pdev->name);
@@ -1490,10 +1610,14 @@ dm9000_drv_suspend(struct device *dev)
1490 db = netdev_priv(ndev); 1610 db = netdev_priv(ndev);
1491 db->in_suspend = 1; 1611 db->in_suspend = 1;
1492 1612
1493 if (netif_running(ndev)) { 1613 if (!netif_running(ndev))
1494 netif_device_detach(ndev); 1614 return 0;
1615
1616 netif_device_detach(ndev);
1617
1618 /* only shutdown if not using WoL */
1619 if (!db->wake_state)
1495 dm9000_shutdown(ndev); 1620 dm9000_shutdown(ndev);
1496 }
1497 } 1621 }
1498 return 0; 1622 return 0;
1499} 1623}
@@ -1506,10 +1630,13 @@ dm9000_drv_resume(struct device *dev)
1506 board_info_t *db = netdev_priv(ndev); 1630 board_info_t *db = netdev_priv(ndev);
1507 1631
1508 if (ndev) { 1632 if (ndev) {
1509
1510 if (netif_running(ndev)) { 1633 if (netif_running(ndev)) {
1511 dm9000_reset(db); 1634 /* reset if we were not in wake mode to ensure if
1512 dm9000_init_dm9000(ndev); 1635 * the device was powered off it is in a known state */
1636 if (!db->wake_state) {
1637 dm9000_reset(db);
1638 dm9000_init_dm9000(ndev);
1639 }
1513 1640
1514 netif_device_attach(ndev); 1641 netif_device_attach(ndev);
1515 } 1642 }
diff --git a/drivers/net/dm9000.h b/drivers/net/dm9000.h
index fb1c924d79b..55688bd1a3e 100644
--- a/drivers/net/dm9000.h
+++ b/drivers/net/dm9000.h
@@ -111,6 +111,13 @@
111#define RSR_CE (1<<1) 111#define RSR_CE (1<<1)
112#define RSR_FOE (1<<0) 112#define RSR_FOE (1<<0)
113 113
114#define WCR_LINKEN (1 << 5)
115#define WCR_SAMPLEEN (1 << 4)
116#define WCR_MAGICEN (1 << 3)
117#define WCR_LINKST (1 << 2)
118#define WCR_SAMPLEST (1 << 1)
119#define WCR_MAGICST (1 << 0)
120
114#define FCTR_HWOT(ot) (( ot & 0xf ) << 4 ) 121#define FCTR_HWOT(ot) (( ot & 0xf ) << 4 )
115#define FCTR_LWOT(ot) ( ot & 0xf ) 122#define FCTR_LWOT(ot) ( ot & 0xf )
116 123
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index f1c565282d5..96b6dc42fc7 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -640,7 +640,7 @@ static int ethoc_mdio_probe(struct net_device *dev)
640 return -ENXIO; 640 return -ENXIO;
641 } 641 }
642 642
643 phy = phy_connect(dev, dev_name(&phy->dev), &ethoc_mdio_poll, 0, 643 phy = phy_connect(dev, dev_name(&phy->dev), ethoc_mdio_poll, 0,
644 PHY_INTERFACE_MODE_GMII); 644 PHY_INTERFACE_MODE_GMII);
645 if (IS_ERR(phy)) { 645 if (IS_ERR(phy)) {
646 dev_err(&dev->dev, "could not attach to PHY\n"); 646 dev_err(&dev->dev, "could not attach to PHY\n");
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 0a1c2bb27d4..73fe9777720 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -5820,10 +5820,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5820 dev->dev_addr); 5820 dev->dev_addr);
5821 dev_printk(KERN_ERR, &pci_dev->dev, 5821 dev_printk(KERN_ERR, &pci_dev->dev,
5822 "Please complain to your hardware vendor. Switching to a random MAC.\n"); 5822 "Please complain to your hardware vendor. Switching to a random MAC.\n");
5823 dev->dev_addr[0] = 0x00; 5823 random_ether_addr(dev->dev_addr);
5824 dev->dev_addr[1] = 0x00;
5825 dev->dev_addr[2] = 0x6c;
5826 get_random_bytes(&dev->dev_addr[3], 3);
5827 } 5824 }
5828 5825
5829 dprintk(KERN_DEBUG "%s: MAC Address %pM\n", 5826 dprintk(KERN_DEBUG "%s: MAC Address %pM\n",
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 197b358e636..16def131c39 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -1246,7 +1246,7 @@ static int gfar_restore(struct device *dev)
1246 phy_start(priv->phydev); 1246 phy_start(priv->phydev);
1247 1247
1248 netif_device_attach(ndev); 1248 netif_device_attach(ndev);
1249 napi_enable(&priv->gfargrp.napi); 1249 enable_napi(priv);
1250 1250
1251 return 0; 1251 return 0;
1252} 1252}
@@ -1928,14 +1928,11 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1928 /* total number of fragments in the SKB */ 1928 /* total number of fragments in the SKB */
1929 nr_frags = skb_shinfo(skb)->nr_frags; 1929 nr_frags = skb_shinfo(skb)->nr_frags;
1930 1930
1931 spin_lock_irqsave(&tx_queue->txlock, flags);
1932
1933 /* check if there is space to queue this packet */ 1931 /* check if there is space to queue this packet */
1934 if ((nr_frags+1) > tx_queue->num_txbdfree) { 1932 if ((nr_frags+1) > tx_queue->num_txbdfree) {
1935 /* no space, stop the queue */ 1933 /* no space, stop the queue */
1936 netif_tx_stop_queue(txq); 1934 netif_tx_stop_queue(txq);
1937 dev->stats.tx_fifo_errors++; 1935 dev->stats.tx_fifo_errors++;
1938 spin_unlock_irqrestore(&tx_queue->txlock, flags);
1939 return NETDEV_TX_BUSY; 1936 return NETDEV_TX_BUSY;
1940 } 1937 }
1941 1938
@@ -1999,6 +1996,20 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1999 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); 1996 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
2000 1997
2001 /* 1998 /*
1999 * We can work in parallel with gfar_clean_tx_ring(), except
2000 * when modifying num_txbdfree. Note that we didn't grab the lock
2001 * when we were reading the num_txbdfree and checking for available
2002 * space, that's because outside of this function it can only grow,
2003 * and once we've got needed space, it cannot suddenly disappear.
2004 *
2005 * The lock also protects us from gfar_error(), which can modify
2006 * regs->tstat and thus retrigger the transfers, which is why we
2007 * also must grab the lock before setting ready bit for the first
2008 * to be transmitted BD.
2009 */
2010 spin_lock_irqsave(&tx_queue->txlock, flags);
2011
2012 /*
2002 * The powerpc-specific eieio() is used, as wmb() has too strong 2013 * The powerpc-specific eieio() is used, as wmb() has too strong
2003 * semantics (it requires synchronization between cacheable and 2014 * semantics (it requires synchronization between cacheable and
2004 * uncacheable mappings, which eieio doesn't provide and which we 2015 * uncacheable mappings, which eieio doesn't provide and which we
@@ -2225,6 +2236,8 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2225 skb_dirtytx = tx_queue->skb_dirtytx; 2236 skb_dirtytx = tx_queue->skb_dirtytx;
2226 2237
2227 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) { 2238 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
2239 unsigned long flags;
2240
2228 frags = skb_shinfo(skb)->nr_frags; 2241 frags = skb_shinfo(skb)->nr_frags;
2229 lbdp = skip_txbd(bdp, frags, base, tx_ring_size); 2242 lbdp = skip_txbd(bdp, frags, base, tx_ring_size);
2230 2243
@@ -2269,7 +2282,9 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2269 TX_RING_MOD_MASK(tx_ring_size); 2282 TX_RING_MOD_MASK(tx_ring_size);
2270 2283
2271 howmany++; 2284 howmany++;
2285 spin_lock_irqsave(&tx_queue->txlock, flags);
2272 tx_queue->num_txbdfree += frags + 1; 2286 tx_queue->num_txbdfree += frags + 1;
2287 spin_unlock_irqrestore(&tx_queue->txlock, flags);
2273 } 2288 }
2274 2289
2275 /* If we freed a buffer, we can restart transmission, if necessary */ 2290 /* If we freed a buffer, we can restart transmission, if necessary */
@@ -2504,8 +2519,6 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2504 skb_put(skb, pkt_len); 2519 skb_put(skb, pkt_len);
2505 dev->stats.rx_bytes += pkt_len; 2520 dev->stats.rx_bytes += pkt_len;
2506 2521
2507 if (in_irq() || irqs_disabled())
2508 printk("Interrupt problem!\n");
2509 gfar_process_frame(dev, skb, amount_pull); 2522 gfar_process_frame(dev, skb, amount_pull);
2510 2523
2511 } else { 2524 } else {
@@ -2550,7 +2563,6 @@ static int gfar_poll(struct napi_struct *napi, int budget)
2550 int tx_cleaned = 0, i, left_over_budget = budget; 2563 int tx_cleaned = 0, i, left_over_budget = budget;
2551 unsigned long serviced_queues = 0; 2564 unsigned long serviced_queues = 0;
2552 int num_queues = 0; 2565 int num_queues = 0;
2553 unsigned long flags;
2554 2566
2555 num_queues = gfargrp->num_rx_queues; 2567 num_queues = gfargrp->num_rx_queues;
2556 budget_per_queue = budget/num_queues; 2568 budget_per_queue = budget/num_queues;
@@ -2570,14 +2582,7 @@ static int gfar_poll(struct napi_struct *napi, int budget)
2570 rx_queue = priv->rx_queue[i]; 2582 rx_queue = priv->rx_queue[i];
2571 tx_queue = priv->tx_queue[rx_queue->qindex]; 2583 tx_queue = priv->tx_queue[rx_queue->qindex];
2572 2584
2573 /* If we fail to get the lock, 2585 tx_cleaned += gfar_clean_tx_ring(tx_queue);
2574 * don't bother with the TX BDs */
2575 if (spin_trylock_irqsave(&tx_queue->txlock, flags)) {
2576 tx_cleaned += gfar_clean_tx_ring(tx_queue);
2577 spin_unlock_irqrestore(&tx_queue->txlock,
2578 flags);
2579 }
2580
2581 rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue, 2586 rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue,
2582 budget_per_queue); 2587 budget_per_queue);
2583 rx_cleaned += rx_cleaned_per_queue; 2588 rx_cleaned += rx_cleaned_per_queue;
@@ -2945,14 +2950,22 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
2945 if (events & IEVENT_CRL) 2950 if (events & IEVENT_CRL)
2946 dev->stats.tx_aborted_errors++; 2951 dev->stats.tx_aborted_errors++;
2947 if (events & IEVENT_XFUN) { 2952 if (events & IEVENT_XFUN) {
2953 unsigned long flags;
2954
2948 if (netif_msg_tx_err(priv)) 2955 if (netif_msg_tx_err(priv))
2949 printk(KERN_DEBUG "%s: TX FIFO underrun, " 2956 printk(KERN_DEBUG "%s: TX FIFO underrun, "
2950 "packet dropped.\n", dev->name); 2957 "packet dropped.\n", dev->name);
2951 dev->stats.tx_dropped++; 2958 dev->stats.tx_dropped++;
2952 priv->extra_stats.tx_underrun++; 2959 priv->extra_stats.tx_underrun++;
2953 2960
2961 local_irq_save(flags);
2962 lock_tx_qs(priv);
2963
2954 /* Reactivate the Tx Queues */ 2964 /* Reactivate the Tx Queues */
2955 gfar_write(&regs->tstat, gfargrp->tstat); 2965 gfar_write(&regs->tstat, gfargrp->tstat);
2966
2967 unlock_tx_qs(priv);
2968 local_irq_restore(flags);
2956 } 2969 }
2957 if (netif_msg_tx_err(priv)) 2970 if (netif_msg_tx_err(priv))
2958 printk(KERN_DEBUG "%s: Transmit Error\n", dev->name); 2971 printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
diff --git a/drivers/net/gianfar_sysfs.c b/drivers/net/gianfar_sysfs.c
index 3724835d285..b31c9c8876e 100644
--- a/drivers/net/gianfar_sysfs.c
+++ b/drivers/net/gianfar_sysfs.c
@@ -186,7 +186,7 @@ static ssize_t gfar_set_rx_stash_index(struct device *dev,
186 temp = gfar_read(&regs->attreli); 186 temp = gfar_read(&regs->attreli);
187 temp &= ~ATTRELI_EI_MASK; 187 temp &= ~ATTRELI_EI_MASK;
188 temp |= ATTRELI_EI(index); 188 temp |= ATTRELI_EI(index);
189 gfar_write(&regs->attreli, flags); 189 gfar_write(&regs->attreli, temp);
190 190
191out: 191out:
192 unlock_rx_qs(priv); 192 unlock_rx_qs(priv);
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index fc9c57893f8..7db0a1c3216 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -903,7 +903,7 @@ static int mkiss_ioctl(struct tty_struct *tty, struct file *file,
903static long mkiss_compat_ioctl(struct tty_struct *tty, struct file *file, 903static long mkiss_compat_ioctl(struct tty_struct *tty, struct file *file,
904 unsigned int cmd, unsigned long arg) 904 unsigned int cmd, unsigned long arg)
905{ 905{
906 switch (arg) { 906 switch (cmd) {
907 case SIOCGIFNAME: 907 case SIOCGIFNAME:
908 case SIOCGIFENCAP: 908 case SIOCGIFENCAP:
909 case SIOCSIFENCAP: 909 case SIOCSIFENCAP:
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index 3298f5a11da..63abd1c0d75 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -59,10 +59,10 @@ struct igb_adapter;
59#define MAX_Q_VECTORS 8 59#define MAX_Q_VECTORS 8
60 60
61/* Transmit and receive queues */ 61/* Transmit and receive queues */
62#define IGB_MAX_RX_QUEUES (adapter->vfs_allocated_count ? \ 62#define IGB_MAX_RX_QUEUES (adapter->vfs_allocated_count ? 2 : \
63 (adapter->vfs_allocated_count > 6 ? 1 : 2) : 4) 63 (hw->mac.type > e1000_82575 ? 8 : 4))
64#define IGB_MAX_TX_QUEUES IGB_MAX_RX_QUEUES 64#define IGB_ABS_MAX_TX_QUEUES 8
65#define IGB_ABS_MAX_TX_QUEUES 4 65#define IGB_MAX_TX_QUEUES IGB_MAX_RX_QUEUES
66 66
67#define IGB_MAX_VF_MC_ENTRIES 30 67#define IGB_MAX_VF_MC_ENTRIES 30
68#define IGB_MAX_VF_FUNCTIONS 8 68#define IGB_MAX_VF_FUNCTIONS 8
@@ -249,10 +249,6 @@ struct igb_adapter {
249 u16 link_speed; 249 u16 link_speed;
250 u16 link_duplex; 250 u16 link_duplex;
251 251
252 unsigned int total_tx_bytes;
253 unsigned int total_tx_packets;
254 unsigned int total_rx_bytes;
255 unsigned int total_rx_packets;
256 /* Interrupt Throttle Rate */ 252 /* Interrupt Throttle Rate */
257 u32 rx_itr_setting; 253 u32 rx_itr_setting;
258 u32 tx_itr_setting; 254 u32 tx_itr_setting;
@@ -315,6 +311,7 @@ struct igb_adapter {
315 u16 rx_ring_count; 311 u16 rx_ring_count;
316 unsigned int vfs_allocated_count; 312 unsigned int vfs_allocated_count;
317 struct vf_data_storage *vf_data; 313 struct vf_data_storage *vf_data;
314 u32 rss_queues;
318}; 315};
319 316
320#define IGB_FLAG_HAS_MSI (1 << 0) 317#define IGB_FLAG_HAS_MSI (1 << 0)
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index 90b89a81f66..c1cde5b4490 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -37,77 +37,88 @@
37 37
38#include "igb.h" 38#include "igb.h"
39 39
40enum {NETDEV_STATS, IGB_STATS};
41
42struct igb_stats { 40struct igb_stats {
43 char stat_string[ETH_GSTRING_LEN]; 41 char stat_string[ETH_GSTRING_LEN];
44 int type;
45 int sizeof_stat; 42 int sizeof_stat;
46 int stat_offset; 43 int stat_offset;
47}; 44};
48 45
49#define IGB_STAT(m) IGB_STATS, \ 46#define IGB_STAT(_name, _stat) { \
50 FIELD_SIZEOF(struct igb_adapter, m), \ 47 .stat_string = _name, \
51 offsetof(struct igb_adapter, m) 48 .sizeof_stat = FIELD_SIZEOF(struct igb_adapter, _stat), \
52#define IGB_NETDEV_STAT(m) NETDEV_STATS, \ 49 .stat_offset = offsetof(struct igb_adapter, _stat) \
53 FIELD_SIZEOF(struct net_device, m), \ 50}
54 offsetof(struct net_device, m)
55
56static const struct igb_stats igb_gstrings_stats[] = { 51static const struct igb_stats igb_gstrings_stats[] = {
57 { "rx_packets", IGB_STAT(stats.gprc) }, 52 IGB_STAT("rx_packets", stats.gprc),
58 { "tx_packets", IGB_STAT(stats.gptc) }, 53 IGB_STAT("tx_packets", stats.gptc),
59 { "rx_bytes", IGB_STAT(stats.gorc) }, 54 IGB_STAT("rx_bytes", stats.gorc),
60 { "tx_bytes", IGB_STAT(stats.gotc) }, 55 IGB_STAT("tx_bytes", stats.gotc),
61 { "rx_broadcast", IGB_STAT(stats.bprc) }, 56 IGB_STAT("rx_broadcast", stats.bprc),
62 { "tx_broadcast", IGB_STAT(stats.bptc) }, 57 IGB_STAT("tx_broadcast", stats.bptc),
63 { "rx_multicast", IGB_STAT(stats.mprc) }, 58 IGB_STAT("rx_multicast", stats.mprc),
64 { "tx_multicast", IGB_STAT(stats.mptc) }, 59 IGB_STAT("tx_multicast", stats.mptc),
65 { "rx_errors", IGB_NETDEV_STAT(stats.rx_errors) }, 60 IGB_STAT("multicast", stats.mprc),
66 { "tx_errors", IGB_NETDEV_STAT(stats.tx_errors) }, 61 IGB_STAT("collisions", stats.colc),
67 { "tx_dropped", IGB_NETDEV_STAT(stats.tx_dropped) }, 62 IGB_STAT("rx_crc_errors", stats.crcerrs),
68 { "multicast", IGB_STAT(stats.mprc) }, 63 IGB_STAT("rx_no_buffer_count", stats.rnbc),
69 { "collisions", IGB_STAT(stats.colc) }, 64 IGB_STAT("rx_missed_errors", stats.mpc),
70 { "rx_length_errors", IGB_NETDEV_STAT(stats.rx_length_errors) }, 65 IGB_STAT("tx_aborted_errors", stats.ecol),
71 { "rx_over_errors", IGB_NETDEV_STAT(stats.rx_over_errors) }, 66 IGB_STAT("tx_carrier_errors", stats.tncrs),
72 { "rx_crc_errors", IGB_STAT(stats.crcerrs) }, 67 IGB_STAT("tx_window_errors", stats.latecol),
73 { "rx_frame_errors", IGB_NETDEV_STAT(stats.rx_frame_errors) }, 68 IGB_STAT("tx_abort_late_coll", stats.latecol),
74 { "rx_no_buffer_count", IGB_STAT(stats.rnbc) }, 69 IGB_STAT("tx_deferred_ok", stats.dc),
75 { "rx_queue_drop_packet_count", IGB_NETDEV_STAT(stats.rx_fifo_errors) }, 70 IGB_STAT("tx_single_coll_ok", stats.scc),
76 { "rx_missed_errors", IGB_STAT(stats.mpc) }, 71 IGB_STAT("tx_multi_coll_ok", stats.mcc),
77 { "tx_aborted_errors", IGB_STAT(stats.ecol) }, 72 IGB_STAT("tx_timeout_count", tx_timeout_count),
78 { "tx_carrier_errors", IGB_STAT(stats.tncrs) }, 73 IGB_STAT("rx_long_length_errors", stats.roc),
79 { "tx_fifo_errors", IGB_NETDEV_STAT(stats.tx_fifo_errors) }, 74 IGB_STAT("rx_short_length_errors", stats.ruc),
80 { "tx_heartbeat_errors", IGB_NETDEV_STAT(stats.tx_heartbeat_errors) }, 75 IGB_STAT("rx_align_errors", stats.algnerrc),
81 { "tx_window_errors", IGB_STAT(stats.latecol) }, 76 IGB_STAT("tx_tcp_seg_good", stats.tsctc),
82 { "tx_abort_late_coll", IGB_STAT(stats.latecol) }, 77 IGB_STAT("tx_tcp_seg_failed", stats.tsctfc),
83 { "tx_deferred_ok", IGB_STAT(stats.dc) }, 78 IGB_STAT("rx_flow_control_xon", stats.xonrxc),
84 { "tx_single_coll_ok", IGB_STAT(stats.scc) }, 79 IGB_STAT("rx_flow_control_xoff", stats.xoffrxc),
85 { "tx_multi_coll_ok", IGB_STAT(stats.mcc) }, 80 IGB_STAT("tx_flow_control_xon", stats.xontxc),
86 { "tx_timeout_count", IGB_STAT(tx_timeout_count) }, 81 IGB_STAT("tx_flow_control_xoff", stats.xofftxc),
87 { "rx_long_length_errors", IGB_STAT(stats.roc) }, 82 IGB_STAT("rx_long_byte_count", stats.gorc),
88 { "rx_short_length_errors", IGB_STAT(stats.ruc) }, 83 IGB_STAT("tx_dma_out_of_sync", stats.doosync),
89 { "rx_align_errors", IGB_STAT(stats.algnerrc) }, 84 IGB_STAT("tx_smbus", stats.mgptc),
90 { "tx_tcp_seg_good", IGB_STAT(stats.tsctc) }, 85 IGB_STAT("rx_smbus", stats.mgprc),
91 { "tx_tcp_seg_failed", IGB_STAT(stats.tsctfc) }, 86 IGB_STAT("dropped_smbus", stats.mgpdc),
92 { "rx_flow_control_xon", IGB_STAT(stats.xonrxc) }, 87};
93 { "rx_flow_control_xoff", IGB_STAT(stats.xoffrxc) }, 88
94 { "tx_flow_control_xon", IGB_STAT(stats.xontxc) }, 89#define IGB_NETDEV_STAT(_net_stat) { \
95 { "tx_flow_control_xoff", IGB_STAT(stats.xofftxc) }, 90 .stat_string = __stringify(_net_stat), \
96 { "rx_long_byte_count", IGB_STAT(stats.gorc) }, 91 .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \
97 { "tx_dma_out_of_sync", IGB_STAT(stats.doosync) }, 92 .stat_offset = offsetof(struct net_device_stats, _net_stat) \
98 { "tx_smbus", IGB_STAT(stats.mgptc) }, 93}
99 { "rx_smbus", IGB_STAT(stats.mgprc) }, 94static const struct igb_stats igb_gstrings_net_stats[] = {
100 { "dropped_smbus", IGB_STAT(stats.mgpdc) }, 95 IGB_NETDEV_STAT(rx_errors),
96 IGB_NETDEV_STAT(tx_errors),
97 IGB_NETDEV_STAT(tx_dropped),
98 IGB_NETDEV_STAT(rx_length_errors),
99 IGB_NETDEV_STAT(rx_over_errors),
100 IGB_NETDEV_STAT(rx_frame_errors),
101 IGB_NETDEV_STAT(rx_fifo_errors),
102 IGB_NETDEV_STAT(tx_fifo_errors),
103 IGB_NETDEV_STAT(tx_heartbeat_errors)
101}; 104};
102 105
106#define IGB_GLOBAL_STATS_LEN \
107 (sizeof(igb_gstrings_stats) / sizeof(struct igb_stats))
108#define IGB_NETDEV_STATS_LEN \
109 (sizeof(igb_gstrings_net_stats) / sizeof(struct igb_stats))
110#define IGB_RX_QUEUE_STATS_LEN \
111 (sizeof(struct igb_rx_queue_stats) / sizeof(u64))
112#define IGB_TX_QUEUE_STATS_LEN \
113 (sizeof(struct igb_tx_queue_stats) / sizeof(u64))
103#define IGB_QUEUE_STATS_LEN \ 114#define IGB_QUEUE_STATS_LEN \
104 ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \ 115 ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \
105 (sizeof(struct igb_rx_queue_stats) / sizeof(u64))) + \ 116 IGB_RX_QUEUE_STATS_LEN) + \
106 (((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues * \ 117 (((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues * \
107 (sizeof(struct igb_tx_queue_stats) / sizeof(u64)))) 118 IGB_TX_QUEUE_STATS_LEN))
108#define IGB_GLOBAL_STATS_LEN \ 119#define IGB_STATS_LEN \
109 (sizeof(igb_gstrings_stats) / sizeof(struct igb_stats)) 120 (IGB_GLOBAL_STATS_LEN + IGB_NETDEV_STATS_LEN + IGB_QUEUE_STATS_LEN)
110#define IGB_STATS_LEN (IGB_GLOBAL_STATS_LEN + IGB_QUEUE_STATS_LEN) 121
111static const char igb_gstrings_test[][ETH_GSTRING_LEN] = { 122static const char igb_gstrings_test[][ETH_GSTRING_LEN] = {
112 "Register test (offline)", "Eeprom test (offline)", 123 "Register test (offline)", "Eeprom test (offline)",
113 "Interrupt test (offline)", "Loopback test (offline)", 124 "Interrupt test (offline)", "Loopback test (offline)",
@@ -735,17 +746,17 @@ static int igb_set_ringparam(struct net_device *netdev,
735 struct igb_adapter *adapter = netdev_priv(netdev); 746 struct igb_adapter *adapter = netdev_priv(netdev);
736 struct igb_ring *temp_ring; 747 struct igb_ring *temp_ring;
737 int i, err = 0; 748 int i, err = 0;
738 u32 new_rx_count, new_tx_count; 749 u16 new_rx_count, new_tx_count;
739 750
740 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 751 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
741 return -EINVAL; 752 return -EINVAL;
742 753
743 new_rx_count = min(ring->rx_pending, (u32)IGB_MAX_RXD); 754 new_rx_count = min_t(u32, ring->rx_pending, IGB_MAX_RXD);
744 new_rx_count = max(new_rx_count, (u32)IGB_MIN_RXD); 755 new_rx_count = max_t(u16, new_rx_count, IGB_MIN_RXD);
745 new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); 756 new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);
746 757
747 new_tx_count = min(ring->tx_pending, (u32)IGB_MAX_TXD); 758 new_tx_count = min_t(u32, ring->tx_pending, IGB_MAX_TXD);
748 new_tx_count = max(new_tx_count, (u32)IGB_MIN_TXD); 759 new_tx_count = max_t(u16, new_tx_count, IGB_MIN_TXD);
749 new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); 760 new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
750 761
751 if ((new_tx_count == adapter->tx_ring_count) && 762 if ((new_tx_count == adapter->tx_ring_count) &&
@@ -1922,43 +1933,32 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
1922 struct ethtool_stats *stats, u64 *data) 1933 struct ethtool_stats *stats, u64 *data)
1923{ 1934{
1924 struct igb_adapter *adapter = netdev_priv(netdev); 1935 struct igb_adapter *adapter = netdev_priv(netdev);
1936 struct net_device_stats *net_stats = &netdev->stats;
1925 u64 *queue_stat; 1937 u64 *queue_stat;
1926 int stat_count_tx = sizeof(struct igb_tx_queue_stats) / sizeof(u64); 1938 int i, j, k;
1927 int stat_count_rx = sizeof(struct igb_rx_queue_stats) / sizeof(u64); 1939 char *p;
1928 int j;
1929 int i;
1930 char *p = NULL;
1931 1940
1932 igb_update_stats(adapter); 1941 igb_update_stats(adapter);
1933 1942
1934 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { 1943 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
1935 switch (igb_gstrings_stats[i].type) { 1944 p = (char *)adapter + igb_gstrings_stats[i].stat_offset;
1936 case NETDEV_STATS:
1937 p = (char *) netdev +
1938 igb_gstrings_stats[i].stat_offset;
1939 break;
1940 case IGB_STATS:
1941 p = (char *) adapter +
1942 igb_gstrings_stats[i].stat_offset;
1943 break;
1944 }
1945
1946 data[i] = (igb_gstrings_stats[i].sizeof_stat == 1945 data[i] = (igb_gstrings_stats[i].sizeof_stat ==
1947 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1946 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1948 } 1947 }
1948 for (j = 0; j < IGB_NETDEV_STATS_LEN; j++, i++) {
1949 p = (char *)net_stats + igb_gstrings_net_stats[j].stat_offset;
1950 data[i] = (igb_gstrings_net_stats[j].sizeof_stat ==
1951 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1952 }
1949 for (j = 0; j < adapter->num_tx_queues; j++) { 1953 for (j = 0; j < adapter->num_tx_queues; j++) {
1950 int k;
1951 queue_stat = (u64 *)&adapter->tx_ring[j].tx_stats; 1954 queue_stat = (u64 *)&adapter->tx_ring[j].tx_stats;
1952 for (k = 0; k < stat_count_tx; k++) 1955 for (k = 0; k < IGB_TX_QUEUE_STATS_LEN; k++, i++)
1953 data[i + k] = queue_stat[k]; 1956 data[i] = queue_stat[k];
1954 i += k;
1955 } 1957 }
1956 for (j = 0; j < adapter->num_rx_queues; j++) { 1958 for (j = 0; j < adapter->num_rx_queues; j++) {
1957 int k;
1958 queue_stat = (u64 *)&adapter->rx_ring[j].rx_stats; 1959 queue_stat = (u64 *)&adapter->rx_ring[j].rx_stats;
1959 for (k = 0; k < stat_count_rx; k++) 1960 for (k = 0; k < IGB_RX_QUEUE_STATS_LEN; k++, i++)
1960 data[i + k] = queue_stat[k]; 1961 data[i] = queue_stat[k];
1961 i += k;
1962 } 1962 }
1963} 1963}
1964 1964
@@ -1979,6 +1979,11 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
1979 ETH_GSTRING_LEN); 1979 ETH_GSTRING_LEN);
1980 p += ETH_GSTRING_LEN; 1980 p += ETH_GSTRING_LEN;
1981 } 1981 }
1982 for (i = 0; i < IGB_NETDEV_STATS_LEN; i++) {
1983 memcpy(p, igb_gstrings_net_stats[i].stat_string,
1984 ETH_GSTRING_LEN);
1985 p += ETH_GSTRING_LEN;
1986 }
1982 for (i = 0; i < adapter->num_tx_queues; i++) { 1987 for (i = 0; i < adapter->num_tx_queues; i++) {
1983 sprintf(p, "tx_queue_%u_packets", i); 1988 sprintf(p, "tx_queue_%u_packets", i);
1984 p += ETH_GSTRING_LEN; 1989 p += ETH_GSTRING_LEN;
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index b044c985df0..0cab5e2b089 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -296,10 +296,10 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
296 * and continue consuming queues in the same sequence 296 * and continue consuming queues in the same sequence
297 */ 297 */
298 if (adapter->vfs_allocated_count) { 298 if (adapter->vfs_allocated_count) {
299 for (; i < adapter->num_rx_queues; i++) 299 for (; i < adapter->rss_queues; i++)
300 adapter->rx_ring[i].reg_idx = rbase_offset + 300 adapter->rx_ring[i].reg_idx = rbase_offset +
301 Q_IDX_82576(i); 301 Q_IDX_82576(i);
302 for (; j < adapter->num_tx_queues; j++) 302 for (; j < adapter->rss_queues; j++)
303 adapter->tx_ring[j].reg_idx = rbase_offset + 303 adapter->tx_ring[j].reg_idx = rbase_offset +
304 Q_IDX_82576(j); 304 Q_IDX_82576(j);
305 } 305 }
@@ -618,14 +618,15 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter)
618 int numvecs, i; 618 int numvecs, i;
619 619
620 /* Number of supported queues. */ 620 /* Number of supported queues. */
621 adapter->num_rx_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus()); 621 adapter->num_rx_queues = adapter->rss_queues;
622 adapter->num_tx_queues = min_t(u32, IGB_MAX_TX_QUEUES, num_online_cpus()); 622 adapter->num_tx_queues = adapter->rss_queues;
623 623
624 /* start with one vector for every rx queue */ 624 /* start with one vector for every rx queue */
625 numvecs = adapter->num_rx_queues; 625 numvecs = adapter->num_rx_queues;
626 626
627 /* if tx handler is seperate add 1 for every tx queue */ 627 /* if tx handler is seperate add 1 for every tx queue */
628 numvecs += adapter->num_tx_queues; 628 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
629 numvecs += adapter->num_tx_queues;
629 630
630 /* store the number of vectors reserved for queues */ 631 /* store the number of vectors reserved for queues */
631 adapter->num_q_vectors = numvecs; 632 adapter->num_q_vectors = numvecs;
@@ -666,6 +667,7 @@ msi_only:
666 } 667 }
667#endif 668#endif
668 adapter->vfs_allocated_count = 0; 669 adapter->vfs_allocated_count = 0;
670 adapter->rss_queues = 1;
669 adapter->flags |= IGB_FLAG_QUEUE_PAIRS; 671 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
670 adapter->num_rx_queues = 1; 672 adapter->num_rx_queues = 1;
671 adapter->num_tx_queues = 1; 673 adapter->num_tx_queues = 1;
@@ -1566,56 +1568,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1566 } 1568 }
1567 1569
1568#endif 1570#endif
1569 switch (hw->mac.type) {
1570 case e1000_82576:
1571 /*
1572 * Initialize hardware timer: we keep it running just in case
1573 * that some program needs it later on.
1574 */
1575 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
1576 adapter->cycles.read = igb_read_clock;
1577 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
1578 adapter->cycles.mult = 1;
1579 /**
1580 * Scale the NIC clock cycle by a large factor so that
1581 * relatively small clock corrections can be added or
1582 * substracted at each clock tick. The drawbacks of a large
1583 * factor are a) that the clock register overflows more quickly
1584 * (not such a big deal) and b) that the increment per tick has
1585 * to fit into 24 bits. As a result we need to use a shift of
1586 * 19 so we can fit a value of 16 into the TIMINCA register.
1587 */
1588 adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
1589 wr32(E1000_TIMINCA,
1590 (1 << E1000_TIMINCA_16NS_SHIFT) |
1591 (16 << IGB_82576_TSYNC_SHIFT));
1592
1593 /* Set registers so that rollover occurs soon to test this. */
1594 wr32(E1000_SYSTIML, 0x00000000);
1595 wr32(E1000_SYSTIMH, 0xFF800000);
1596 wrfl();
1597
1598 timecounter_init(&adapter->clock,
1599 &adapter->cycles,
1600 ktime_to_ns(ktime_get_real()));
1601 /*
1602 * Synchronize our NIC clock against system wall clock. NIC
1603 * time stamp reading requires ~3us per sample, each sample
1604 * was pretty stable even under load => only require 10
1605 * samples for each offset comparison.
1606 */
1607 memset(&adapter->compare, 0, sizeof(adapter->compare));
1608 adapter->compare.source = &adapter->clock;
1609 adapter->compare.target = ktime_get_real;
1610 adapter->compare.num_samples = 10;
1611 timecompare_update(&adapter->compare, 0);
1612 break;
1613 case e1000_82575:
1614 /* 82575 does not support timesync */
1615 default:
1616 break;
1617 }
1618
1619 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); 1571 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
1620 /* print bus type/speed/width info */ 1572 /* print bus type/speed/width info */
1621 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", 1573 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
@@ -1781,6 +1733,70 @@ static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
1781#endif /* CONFIG_PCI_IOV */ 1733#endif /* CONFIG_PCI_IOV */
1782} 1734}
1783 1735
1736
1737/**
1738 * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp
1739 * @adapter: board private structure to initialize
1740 *
1741 * igb_init_hw_timer initializes the function pointer and values for the hw
1742 * timer found in hardware.
1743 **/
1744static void igb_init_hw_timer(struct igb_adapter *adapter)
1745{
1746 struct e1000_hw *hw = &adapter->hw;
1747
1748 switch (hw->mac.type) {
1749 case e1000_82576:
1750 /*
1751 * Initialize hardware timer: we keep it running just in case
1752 * that some program needs it later on.
1753 */
1754 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
1755 adapter->cycles.read = igb_read_clock;
1756 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
1757 adapter->cycles.mult = 1;
1758 /**
1759 * Scale the NIC clock cycle by a large factor so that
1760 * relatively small clock corrections can be added or
1761 * substracted at each clock tick. The drawbacks of a large
1762 * factor are a) that the clock register overflows more quickly
1763 * (not such a big deal) and b) that the increment per tick has
1764 * to fit into 24 bits. As a result we need to use a shift of
1765 * 19 so we can fit a value of 16 into the TIMINCA register.
1766 */
1767 adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
1768 wr32(E1000_TIMINCA,
1769 (1 << E1000_TIMINCA_16NS_SHIFT) |
1770 (16 << IGB_82576_TSYNC_SHIFT));
1771
1772 /* Set registers so that rollover occurs soon to test this. */
1773 wr32(E1000_SYSTIML, 0x00000000);
1774 wr32(E1000_SYSTIMH, 0xFF800000);
1775 wrfl();
1776
1777 timecounter_init(&adapter->clock,
1778 &adapter->cycles,
1779 ktime_to_ns(ktime_get_real()));
1780 /*
1781 * Synchronize our NIC clock against system wall clock. NIC
1782 * time stamp reading requires ~3us per sample, each sample
1783 * was pretty stable even under load => only require 10
1784 * samples for each offset comparison.
1785 */
1786 memset(&adapter->compare, 0, sizeof(adapter->compare));
1787 adapter->compare.source = &adapter->clock;
1788 adapter->compare.target = ktime_get_real;
1789 adapter->compare.num_samples = 10;
1790 timecompare_update(&adapter->compare, 0);
1791 break;
1792 case e1000_82575:
1793 /* 82575 does not support timesync */
1794 default:
1795 break;
1796 }
1797
1798}
1799
1784/** 1800/**
1785 * igb_sw_init - Initialize general software structures (struct igb_adapter) 1801 * igb_sw_init - Initialize general software structures (struct igb_adapter)
1786 * @adapter: board private structure to initialize 1802 * @adapter: board private structure to initialize
@@ -1810,12 +1826,24 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
1810 adapter->vfs_allocated_count = max_vfs; 1826 adapter->vfs_allocated_count = max_vfs;
1811 1827
1812#endif /* CONFIG_PCI_IOV */ 1828#endif /* CONFIG_PCI_IOV */
1829 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
1830
1831 /*
1832 * if rss_queues > 4 or vfs are going to be allocated with rss_queues
1833 * then we should combine the queues into a queue pair in order to
1834 * conserve interrupts due to limited supply
1835 */
1836 if ((adapter->rss_queues > 4) ||
1837 ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6)))
1838 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
1839
1813 /* This call may decrease the number of queues */ 1840 /* This call may decrease the number of queues */
1814 if (igb_init_interrupt_scheme(adapter)) { 1841 if (igb_init_interrupt_scheme(adapter)) {
1815 dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); 1842 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1816 return -ENOMEM; 1843 return -ENOMEM;
1817 } 1844 }
1818 1845
1846 igb_init_hw_timer(adapter);
1819 igb_probe_vfs(adapter); 1847 igb_probe_vfs(adapter);
1820 1848
1821 /* Explicitly disable IRQ since the NIC can be in any state. */ 1849 /* Explicitly disable IRQ since the NIC can be in any state. */
@@ -2000,7 +2028,7 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
2000 } 2028 }
2001 } 2029 }
2002 2030
2003 for (i = 0; i < IGB_MAX_TX_QUEUES; i++) { 2031 for (i = 0; i < IGB_ABS_MAX_TX_QUEUES; i++) {
2004 int r_idx = i % adapter->num_tx_queues; 2032 int r_idx = i % adapter->num_tx_queues;
2005 adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx]; 2033 adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx];
2006 } 2034 }
@@ -2184,7 +2212,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
2184 array_wr32(E1000_RSSRK(0), j, rsskey); 2212 array_wr32(E1000_RSSRK(0), j, rsskey);
2185 } 2213 }
2186 2214
2187 num_rx_queues = adapter->num_rx_queues; 2215 num_rx_queues = adapter->rss_queues;
2188 2216
2189 if (adapter->vfs_allocated_count) { 2217 if (adapter->vfs_allocated_count) {
2190 /* 82575 and 82576 supports 2 RSS queues for VMDq */ 2218 /* 82575 and 82576 supports 2 RSS queues for VMDq */
@@ -2240,7 +2268,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
2240 E1000_VT_CTL_DEFAULT_POOL_SHIFT; 2268 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2241 wr32(E1000_VT_CTL, vtctl); 2269 wr32(E1000_VT_CTL, vtctl);
2242 } 2270 }
2243 if (adapter->num_rx_queues > 1) 2271 if (adapter->rss_queues > 1)
2244 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q; 2272 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2245 else 2273 else
2246 mrqc = E1000_MRQC_ENABLE_VMDQ; 2274 mrqc = E1000_MRQC_ENABLE_VMDQ;
@@ -2370,7 +2398,7 @@ static inline void igb_set_vmolr(struct igb_adapter *adapter, int vfn)
2370 /* clear all bits that might not be set */ 2398 /* clear all bits that might not be set */
2371 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE); 2399 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
2372 2400
2373 if (adapter->num_rx_queues > 1 && vfn == adapter->vfs_allocated_count) 2401 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
2374 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */ 2402 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
2375 /* 2403 /*
2376 * for VMDq only allow the VFs and pool 0 to accept broadcast and 2404 * for VMDq only allow the VFs and pool 0 to accept broadcast and
@@ -2915,7 +2943,6 @@ static void igb_watchdog_task(struct work_struct *work)
2915 watchdog_task); 2943 watchdog_task);
2916 struct e1000_hw *hw = &adapter->hw; 2944 struct e1000_hw *hw = &adapter->hw;
2917 struct net_device *netdev = adapter->netdev; 2945 struct net_device *netdev = adapter->netdev;
2918 struct igb_ring *tx_ring = adapter->tx_ring;
2919 u32 link; 2946 u32 link;
2920 int i; 2947 int i;
2921 2948
@@ -2985,22 +3012,24 @@ static void igb_watchdog_task(struct work_struct *work)
2985 igb_update_stats(adapter); 3012 igb_update_stats(adapter);
2986 igb_update_adaptive(hw); 3013 igb_update_adaptive(hw);
2987 3014
2988 if (!netif_carrier_ok(netdev)) { 3015 for (i = 0; i < adapter->num_tx_queues; i++) {
2989 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) { 3016 struct igb_ring *tx_ring = &adapter->tx_ring[i];
3017 if (!netif_carrier_ok(netdev)) {
2990 /* We've lost link, so the controller stops DMA, 3018 /* We've lost link, so the controller stops DMA,
2991 * but we've got queued Tx work that's never going 3019 * but we've got queued Tx work that's never going
2992 * to get done, so reset controller to flush Tx. 3020 * to get done, so reset controller to flush Tx.
2993 * (Do the reset outside of interrupt context). */ 3021 * (Do the reset outside of interrupt context). */
2994 adapter->tx_timeout_count++; 3022 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
2995 schedule_work(&adapter->reset_task); 3023 adapter->tx_timeout_count++;
2996 /* return immediately since reset is imminent */ 3024 schedule_work(&adapter->reset_task);
2997 return; 3025 /* return immediately since reset is imminent */
3026 return;
3027 }
2998 } 3028 }
2999 }
3000 3029
3001 /* Force detection of hung controller every watchdog period */ 3030 /* Force detection of hung controller every watchdog period */
3002 for (i = 0; i < adapter->num_tx_queues; i++) 3031 tx_ring->detect_tx_hung = true;
3003 adapter->tx_ring[i].detect_tx_hung = true; 3032 }
3004 3033
3005 /* Cause software interrupt to ensure rx ring is cleaned */ 3034 /* Cause software interrupt to ensure rx ring is cleaned */
3006 if (adapter->msix_entries) { 3035 if (adapter->msix_entries) {
@@ -3761,7 +3790,7 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3761 3790
3762void igb_update_stats(struct igb_adapter *adapter) 3791void igb_update_stats(struct igb_adapter *adapter)
3763{ 3792{
3764 struct net_device *netdev = adapter->netdev; 3793 struct net_device_stats *net_stats = igb_get_stats(adapter->netdev);
3765 struct e1000_hw *hw = &adapter->hw; 3794 struct e1000_hw *hw = &adapter->hw;
3766 struct pci_dev *pdev = adapter->pdev; 3795 struct pci_dev *pdev = adapter->pdev;
3767 u32 rnbc; 3796 u32 rnbc;
@@ -3785,13 +3814,13 @@ void igb_update_stats(struct igb_adapter *adapter)
3785 for (i = 0; i < adapter->num_rx_queues; i++) { 3814 for (i = 0; i < adapter->num_rx_queues; i++) {
3786 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF; 3815 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
3787 adapter->rx_ring[i].rx_stats.drops += rqdpc_tmp; 3816 adapter->rx_ring[i].rx_stats.drops += rqdpc_tmp;
3788 netdev->stats.rx_fifo_errors += rqdpc_tmp; 3817 net_stats->rx_fifo_errors += rqdpc_tmp;
3789 bytes += adapter->rx_ring[i].rx_stats.bytes; 3818 bytes += adapter->rx_ring[i].rx_stats.bytes;
3790 packets += adapter->rx_ring[i].rx_stats.packets; 3819 packets += adapter->rx_ring[i].rx_stats.packets;
3791 } 3820 }
3792 3821
3793 netdev->stats.rx_bytes = bytes; 3822 net_stats->rx_bytes = bytes;
3794 netdev->stats.rx_packets = packets; 3823 net_stats->rx_packets = packets;
3795 3824
3796 bytes = 0; 3825 bytes = 0;
3797 packets = 0; 3826 packets = 0;
@@ -3799,8 +3828,8 @@ void igb_update_stats(struct igb_adapter *adapter)
3799 bytes += adapter->tx_ring[i].tx_stats.bytes; 3828 bytes += adapter->tx_ring[i].tx_stats.bytes;
3800 packets += adapter->tx_ring[i].tx_stats.packets; 3829 packets += adapter->tx_ring[i].tx_stats.packets;
3801 } 3830 }
3802 netdev->stats.tx_bytes = bytes; 3831 net_stats->tx_bytes = bytes;
3803 netdev->stats.tx_packets = packets; 3832 net_stats->tx_packets = packets;
3804 3833
3805 /* read stats registers */ 3834 /* read stats registers */
3806 adapter->stats.crcerrs += rd32(E1000_CRCERRS); 3835 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
@@ -3837,7 +3866,7 @@ void igb_update_stats(struct igb_adapter *adapter)
3837 rd32(E1000_GOTCH); /* clear GOTCL */ 3866 rd32(E1000_GOTCH); /* clear GOTCL */
3838 rnbc = rd32(E1000_RNBC); 3867 rnbc = rd32(E1000_RNBC);
3839 adapter->stats.rnbc += rnbc; 3868 adapter->stats.rnbc += rnbc;
3840 netdev->stats.rx_fifo_errors += rnbc; 3869 net_stats->rx_fifo_errors += rnbc;
3841 adapter->stats.ruc += rd32(E1000_RUC); 3870 adapter->stats.ruc += rd32(E1000_RUC);
3842 adapter->stats.rfc += rd32(E1000_RFC); 3871 adapter->stats.rfc += rd32(E1000_RFC);
3843 adapter->stats.rjc += rd32(E1000_RJC); 3872 adapter->stats.rjc += rd32(E1000_RJC);
@@ -3878,29 +3907,29 @@ void igb_update_stats(struct igb_adapter *adapter)
3878 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC); 3907 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
3879 3908
3880 /* Fill out the OS statistics structure */ 3909 /* Fill out the OS statistics structure */
3881 netdev->stats.multicast = adapter->stats.mprc; 3910 net_stats->multicast = adapter->stats.mprc;
3882 netdev->stats.collisions = adapter->stats.colc; 3911 net_stats->collisions = adapter->stats.colc;
3883 3912
3884 /* Rx Errors */ 3913 /* Rx Errors */
3885 3914
3886 /* RLEC on some newer hardware can be incorrect so build 3915 /* RLEC on some newer hardware can be incorrect so build
3887 * our own version based on RUC and ROC */ 3916 * our own version based on RUC and ROC */
3888 netdev->stats.rx_errors = adapter->stats.rxerrc + 3917 net_stats->rx_errors = adapter->stats.rxerrc +
3889 adapter->stats.crcerrs + adapter->stats.algnerrc + 3918 adapter->stats.crcerrs + adapter->stats.algnerrc +
3890 adapter->stats.ruc + adapter->stats.roc + 3919 adapter->stats.ruc + adapter->stats.roc +
3891 adapter->stats.cexterr; 3920 adapter->stats.cexterr;
3892 netdev->stats.rx_length_errors = adapter->stats.ruc + 3921 net_stats->rx_length_errors = adapter->stats.ruc +
3893 adapter->stats.roc; 3922 adapter->stats.roc;
3894 netdev->stats.rx_crc_errors = adapter->stats.crcerrs; 3923 net_stats->rx_crc_errors = adapter->stats.crcerrs;
3895 netdev->stats.rx_frame_errors = adapter->stats.algnerrc; 3924 net_stats->rx_frame_errors = adapter->stats.algnerrc;
3896 netdev->stats.rx_missed_errors = adapter->stats.mpc; 3925 net_stats->rx_missed_errors = adapter->stats.mpc;
3897 3926
3898 /* Tx Errors */ 3927 /* Tx Errors */
3899 netdev->stats.tx_errors = adapter->stats.ecol + 3928 net_stats->tx_errors = adapter->stats.ecol +
3900 adapter->stats.latecol; 3929 adapter->stats.latecol;
3901 netdev->stats.tx_aborted_errors = adapter->stats.ecol; 3930 net_stats->tx_aborted_errors = adapter->stats.ecol;
3902 netdev->stats.tx_window_errors = adapter->stats.latecol; 3931 net_stats->tx_window_errors = adapter->stats.latecol;
3903 netdev->stats.tx_carrier_errors = adapter->stats.tncrs; 3932 net_stats->tx_carrier_errors = adapter->stats.tncrs;
3904 3933
3905 /* Tx Dropped needs to be maintained elsewhere */ 3934 /* Tx Dropped needs to be maintained elsewhere */
3906 3935
@@ -4923,6 +4952,7 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
4923 struct sk_buff *skb; 4952 struct sk_buff *skb;
4924 bool cleaned = false; 4953 bool cleaned = false;
4925 int cleaned_count = 0; 4954 int cleaned_count = 0;
4955 int current_node = numa_node_id();
4926 unsigned int total_bytes = 0, total_packets = 0; 4956 unsigned int total_bytes = 0, total_packets = 0;
4927 unsigned int i; 4957 unsigned int i;
4928 u32 staterr; 4958 u32 staterr;
@@ -4977,7 +5007,8 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
4977 buffer_info->page_offset, 5007 buffer_info->page_offset,
4978 length); 5008 length);
4979 5009
4980 if (page_count(buffer_info->page) != 1) 5010 if ((page_count(buffer_info->page) != 1) ||
5011 (page_to_nid(buffer_info->page) != current_node))
4981 buffer_info->page = NULL; 5012 buffer_info->page = NULL;
4982 else 5013 else
4983 get_page(buffer_info->page); 5014 get_page(buffer_info->page);
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index 63056e7b9e2..ba8d246d05a 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -1751,7 +1751,7 @@ static int ipg_nic_open(struct net_device *dev)
1751 /* Register the interrupt line to be used by the IPG within 1751 /* Register the interrupt line to be used by the IPG within
1752 * the Linux system. 1752 * the Linux system.
1753 */ 1753 */
1754 rc = request_irq(pdev->irq, &ipg_interrupt_handler, IRQF_SHARED, 1754 rc = request_irq(pdev->irq, ipg_interrupt_handler, IRQF_SHARED,
1755 dev->name, dev); 1755 dev->name, dev);
1756 if (rc < 0) { 1756 if (rc < 0) {
1757 printk(KERN_INFO "%s: Error when requesting interrupt.\n", 1757 printk(KERN_INFO "%s: Error when requesting interrupt.\n",
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 215adf6377d..ae6eab3e5ee 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -852,7 +852,7 @@ static void irda_usb_receive(struct urb *urb)
852 * hot unplug of the dongle... 852 * hot unplug of the dongle...
853 * Lowest effective timer is 10ms... 853 * Lowest effective timer is 10ms...
854 * Jean II */ 854 * Jean II */
855 self->rx_defer_timer.function = &irda_usb_rx_defer_expired; 855 self->rx_defer_timer.function = irda_usb_rx_defer_expired;
856 self->rx_defer_timer.data = (unsigned long) urb; 856 self->rx_defer_timer.data = (unsigned long) urb;
857 mod_timer(&self->rx_defer_timer, jiffies + (10 * HZ / 1000)); 857 mod_timer(&self->rx_defer_timer, jiffies + (10 * HZ / 1000));
858 return; 858 return;
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 448e84d5660..dceed80f16f 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -1204,6 +1204,7 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
1204 adapter->link_check_timeout = jiffies; 1204 adapter->link_check_timeout = jiffies;
1205 if (!test_bit(__IXGBE_DOWN, &adapter->state)) { 1205 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1206 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); 1206 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
1207 IXGBE_WRITE_FLUSH(hw);
1207 schedule_work(&adapter->watchdog_task); 1208 schedule_work(&adapter->watchdog_task);
1208 } 1209 }
1209} 1210}
@@ -1339,8 +1340,6 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
1339 if (!q_vector->rxr_count) 1340 if (!q_vector->rxr_count)
1340 return IRQ_HANDLED; 1341 return IRQ_HANDLED;
1341 1342
1342 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1343 rx_ring = &(adapter->rx_ring[r_idx]);
1344 /* disable interrupts on this vector only */ 1343 /* disable interrupts on this vector only */
1345 ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx)); 1344 ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
1346 napi_schedule(&q_vector->napi); 1345 napi_schedule(&q_vector->napi);
@@ -3627,10 +3626,10 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
3627 * It's easy to be greedy for MSI-X vectors, but it really 3626 * It's easy to be greedy for MSI-X vectors, but it really
3628 * doesn't do us much good if we have a lot more vectors 3627 * doesn't do us much good if we have a lot more vectors
3629 * than CPU's. So let's be conservative and only ask for 3628 * than CPU's. So let's be conservative and only ask for
3630 * (roughly) twice the number of vectors as there are CPU's. 3629 * (roughly) the same number of vectors as there are CPU's.
3631 */ 3630 */
3632 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues, 3631 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
3633 (int)(num_online_cpus() * 2)) + NON_Q_VECTORS; 3632 (int)num_online_cpus()) + NON_Q_VECTORS;
3634 3633
3635 /* 3634 /*
3636 * At the same time, hardware can only support a maximum of 3635 * At the same time, hardware can only support a maximum of
@@ -5989,6 +5988,7 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
5989 } else { 5988 } else {
5990 pci_set_master(pdev); 5989 pci_set_master(pdev);
5991 pci_restore_state(pdev); 5990 pci_restore_state(pdev);
5991 pci_save_state(pdev);
5992 5992
5993 pci_wake_from_d3(pdev, false); 5993 pci_wake_from_d3(pdev, false);
5994 5994
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index d7dba3f6f76..ae2b5c79c55 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -38,12 +38,27 @@ struct macvlan_port {
38 struct list_head vlans; 38 struct list_head vlans;
39}; 39};
40 40
41/**
42 * struct macvlan_rx_stats - MACVLAN percpu rx stats
43 * @rx_packets: number of received packets
44 * @rx_bytes: number of received bytes
45 * @multicast: number of received multicast packets
46 * @rx_errors: number of errors
47 */
48struct macvlan_rx_stats {
49 unsigned long rx_packets;
50 unsigned long rx_bytes;
51 unsigned long multicast;
52 unsigned long rx_errors;
53};
54
41struct macvlan_dev { 55struct macvlan_dev {
42 struct net_device *dev; 56 struct net_device *dev;
43 struct list_head list; 57 struct list_head list;
44 struct hlist_node hlist; 58 struct hlist_node hlist;
45 struct macvlan_port *port; 59 struct macvlan_port *port;
46 struct net_device *lowerdev; 60 struct net_device *lowerdev;
61 struct macvlan_rx_stats *rx_stats;
47}; 62};
48 63
49 64
@@ -110,6 +125,7 @@ static void macvlan_broadcast(struct sk_buff *skb,
110 struct net_device *dev; 125 struct net_device *dev;
111 struct sk_buff *nskb; 126 struct sk_buff *nskb;
112 unsigned int i; 127 unsigned int i;
128 struct macvlan_rx_stats *rx_stats;
113 129
114 if (skb->protocol == htons(ETH_P_PAUSE)) 130 if (skb->protocol == htons(ETH_P_PAUSE))
115 return; 131 return;
@@ -117,17 +133,17 @@ static void macvlan_broadcast(struct sk_buff *skb,
117 for (i = 0; i < MACVLAN_HASH_SIZE; i++) { 133 for (i = 0; i < MACVLAN_HASH_SIZE; i++) {
118 hlist_for_each_entry_rcu(vlan, n, &port->vlan_hash[i], hlist) { 134 hlist_for_each_entry_rcu(vlan, n, &port->vlan_hash[i], hlist) {
119 dev = vlan->dev; 135 dev = vlan->dev;
136 rx_stats = per_cpu_ptr(vlan->rx_stats, smp_processor_id());
120 137
121 nskb = skb_clone(skb, GFP_ATOMIC); 138 nskb = skb_clone(skb, GFP_ATOMIC);
122 if (nskb == NULL) { 139 if (nskb == NULL) {
123 dev->stats.rx_errors++; 140 rx_stats->rx_errors++;
124 dev->stats.rx_dropped++;
125 continue; 141 continue;
126 } 142 }
127 143
128 dev->stats.rx_bytes += skb->len + ETH_HLEN; 144 rx_stats->rx_bytes += skb->len + ETH_HLEN;
129 dev->stats.rx_packets++; 145 rx_stats->rx_packets++;
130 dev->stats.multicast++; 146 rx_stats->multicast++;
131 147
132 nskb->dev = dev; 148 nskb->dev = dev;
133 if (!compare_ether_addr_64bits(eth->h_dest, dev->broadcast)) 149 if (!compare_ether_addr_64bits(eth->h_dest, dev->broadcast))
@@ -147,6 +163,7 @@ static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
147 const struct macvlan_port *port; 163 const struct macvlan_port *port;
148 const struct macvlan_dev *vlan; 164 const struct macvlan_dev *vlan;
149 struct net_device *dev; 165 struct net_device *dev;
166 struct macvlan_rx_stats *rx_stats;
150 167
151 port = rcu_dereference(skb->dev->macvlan_port); 168 port = rcu_dereference(skb->dev->macvlan_port);
152 if (port == NULL) 169 if (port == NULL)
@@ -166,16 +183,15 @@ static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
166 kfree_skb(skb); 183 kfree_skb(skb);
167 return NULL; 184 return NULL;
168 } 185 }
169 186 rx_stats = per_cpu_ptr(vlan->rx_stats, smp_processor_id());
170 skb = skb_share_check(skb, GFP_ATOMIC); 187 skb = skb_share_check(skb, GFP_ATOMIC);
171 if (skb == NULL) { 188 if (skb == NULL) {
172 dev->stats.rx_errors++; 189 rx_stats->rx_errors++;
173 dev->stats.rx_dropped++;
174 return NULL; 190 return NULL;
175 } 191 }
176 192
177 dev->stats.rx_bytes += skb->len + ETH_HLEN; 193 rx_stats->rx_bytes += skb->len + ETH_HLEN;
178 dev->stats.rx_packets++; 194 rx_stats->rx_packets++;
179 195
180 skb->dev = dev; 196 skb->dev = dev;
181 skb->pkt_type = PACKET_HOST; 197 skb->pkt_type = PACKET_HOST;
@@ -202,7 +218,7 @@ static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
202 } else 218 } else
203 txq->tx_dropped++; 219 txq->tx_dropped++;
204 220
205 return NETDEV_TX_OK; 221 return ret;
206} 222}
207 223
208static int macvlan_hard_header(struct sk_buff *skb, struct net_device *dev, 224static int macvlan_hard_header(struct sk_buff *skb, struct net_device *dev,
@@ -365,9 +381,47 @@ static int macvlan_init(struct net_device *dev)
365 381
366 macvlan_set_lockdep_class(dev); 382 macvlan_set_lockdep_class(dev);
367 383
384 vlan->rx_stats = alloc_percpu(struct macvlan_rx_stats);
385 if (!vlan->rx_stats)
386 return -ENOMEM;
387
368 return 0; 388 return 0;
369} 389}
370 390
391static void macvlan_uninit(struct net_device *dev)
392{
393 struct macvlan_dev *vlan = netdev_priv(dev);
394
395 free_percpu(vlan->rx_stats);
396}
397
398static struct net_device_stats *macvlan_dev_get_stats(struct net_device *dev)
399{
400 struct net_device_stats *stats = &dev->stats;
401 struct macvlan_dev *vlan = netdev_priv(dev);
402
403 dev_txq_stats_fold(dev, stats);
404
405 if (vlan->rx_stats) {
406 struct macvlan_rx_stats *p, rx = {0};
407 int i;
408
409 for_each_possible_cpu(i) {
410 p = per_cpu_ptr(vlan->rx_stats, i);
411 rx.rx_packets += p->rx_packets;
412 rx.rx_bytes += p->rx_bytes;
413 rx.rx_errors += p->rx_errors;
414 rx.multicast += p->multicast;
415 }
416 stats->rx_packets = rx.rx_packets;
417 stats->rx_bytes = rx.rx_bytes;
418 stats->rx_errors = rx.rx_errors;
419 stats->rx_dropped = rx.rx_errors;
420 stats->multicast = rx.multicast;
421 }
422 return stats;
423}
424
371static void macvlan_ethtool_get_drvinfo(struct net_device *dev, 425static void macvlan_ethtool_get_drvinfo(struct net_device *dev,
372 struct ethtool_drvinfo *drvinfo) 426 struct ethtool_drvinfo *drvinfo)
373{ 427{
@@ -404,6 +458,7 @@ static const struct ethtool_ops macvlan_ethtool_ops = {
404 458
405static const struct net_device_ops macvlan_netdev_ops = { 459static const struct net_device_ops macvlan_netdev_ops = {
406 .ndo_init = macvlan_init, 460 .ndo_init = macvlan_init,
461 .ndo_uninit = macvlan_uninit,
407 .ndo_open = macvlan_open, 462 .ndo_open = macvlan_open,
408 .ndo_stop = macvlan_stop, 463 .ndo_stop = macvlan_stop,
409 .ndo_start_xmit = macvlan_start_xmit, 464 .ndo_start_xmit = macvlan_start_xmit,
@@ -411,6 +466,7 @@ static const struct net_device_ops macvlan_netdev_ops = {
411 .ndo_change_rx_flags = macvlan_change_rx_flags, 466 .ndo_change_rx_flags = macvlan_change_rx_flags,
412 .ndo_set_mac_address = macvlan_set_mac_address, 467 .ndo_set_mac_address = macvlan_set_mac_address,
413 .ndo_set_multicast_list = macvlan_set_multicast_list, 468 .ndo_set_multicast_list = macvlan_set_multicast_list,
469 .ndo_get_stats = macvlan_dev_get_stats,
414 .ndo_validate_addr = eth_validate_addr, 470 .ndo_validate_addr = eth_validate_addr,
415}; 471};
416 472
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 44558fcb56a..8ce58c4c7dd 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -8143,7 +8143,7 @@ static void __devinit niu_vpd_parse_version(struct niu *np)
8143 int i; 8143 int i;
8144 8144
8145 for (i = 0; i < len - 5; i++) { 8145 for (i = 0; i < len - 5; i++) {
8146 if (!strncmp(s + i, "FCode ", 5)) 8146 if (!strncmp(s + i, "FCode ", 6))
8147 break; 8147 break;
8148 } 8148 }
8149 if (i >= len - 5) 8149 if (i >= len - 5)
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 7e01fbdb87e..57e09616330 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -264,7 +264,7 @@ static int fmvj18x_probe(struct pcmcia_device *link)
264 /* Interrupt setup */ 264 /* Interrupt setup */
265 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_HANDLE_PRESENT; 265 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_HANDLE_PRESENT;
266 link->irq.IRQInfo1 = IRQ_LEVEL_ID; 266 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
267 link->irq.Handler = &fjn_interrupt; 267 link->irq.Handler = fjn_interrupt;
268 link->irq.Instance = dev; 268 link->irq.Instance = dev;
269 269
270 /* General socket configuration */ 270 /* General socket configuration */
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c
index 5ed6339c52b..b12e69592d1 100644
--- a/drivers/net/pcmcia/nmclan_cs.c
+++ b/drivers/net/pcmcia/nmclan_cs.c
@@ -479,7 +479,7 @@ static int nmclan_probe(struct pcmcia_device *link)
479 link->io.IOAddrLines = 5; 479 link->io.IOAddrLines = 5;
480 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT; 480 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
481 link->irq.IRQInfo1 = IRQ_LEVEL_ID; 481 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
482 link->irq.Handler = &mace_interrupt; 482 link->irq.Handler = mace_interrupt;
483 link->irq.Instance = dev; 483 link->irq.Instance = dev;
484 link->conf.Attributes = CONF_ENABLE_IRQ; 484 link->conf.Attributes = CONF_ENABLE_IRQ;
485 link->conf.IntType = INT_MEMORY_AND_IO; 485 link->conf.IntType = INT_MEMORY_AND_IO;
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c
index 30b1b332676..c311fa6597f 100644
--- a/drivers/net/ppp_async.c
+++ b/drivers/net/ppp_async.c
@@ -36,7 +36,7 @@
36 36
37#define PPP_VERSION "2.4.2" 37#define PPP_VERSION "2.4.2"
38 38
39#define OBUFSIZE 256 39#define OBUFSIZE 4096
40 40
41/* Structure for storing local state. */ 41/* Structure for storing local state. */
42struct asyncppp { 42struct asyncppp {
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 9bf2a6be903..0a56a778af0 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -184,7 +184,7 @@ static atomic_t ppp_unit_count = ATOMIC_INIT(0);
184static atomic_t channel_count = ATOMIC_INIT(0); 184static atomic_t channel_count = ATOMIC_INIT(0);
185 185
186/* per-net private data for this module */ 186/* per-net private data for this module */
187static int ppp_net_id; 187static int ppp_net_id __read_mostly;
188struct ppp_net { 188struct ppp_net {
189 /* units to ppp mapping */ 189 /* units to ppp mapping */
190 struct idr units_idr; 190 struct idr units_idr;
@@ -1944,8 +1944,15 @@ ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
1944 } 1944 }
1945 1945
1946 /* Pull completed packets off the queue and receive them. */ 1946 /* Pull completed packets off the queue and receive them. */
1947 while ((skb = ppp_mp_reconstruct(ppp))) 1947 while ((skb = ppp_mp_reconstruct(ppp))) {
1948 ppp_receive_nonmp_frame(ppp, skb); 1948 if (pskb_may_pull(skb, 2))
1949 ppp_receive_nonmp_frame(ppp, skb);
1950 else {
1951 ++ppp->dev->stats.rx_length_errors;
1952 kfree_skb(skb);
1953 ppp_receive_error(ppp);
1954 }
1955 }
1949 1956
1950 return; 1957 return;
1951 1958
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index 60c8d233209..a1dcba255b0 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -97,7 +97,7 @@ static const struct proto_ops pppoe_ops;
97static struct ppp_channel_ops pppoe_chan_ops; 97static struct ppp_channel_ops pppoe_chan_ops;
98 98
99/* per-net private data for this module */ 99/* per-net private data for this module */
100static int pppoe_net_id; 100static int pppoe_net_id __read_mostly;
101struct pppoe_net { 101struct pppoe_net {
102 /* 102 /*
103 * we could use _single_ hash table for all 103 * we could use _single_ hash table for all
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
index 849cc9c62c2..c58b50f8ba3 100644
--- a/drivers/net/pppol2tp.c
+++ b/drivers/net/pppol2tp.c
@@ -232,7 +232,7 @@ static struct ppp_channel_ops pppol2tp_chan_ops = { pppol2tp_xmit , NULL };
232static const struct proto_ops pppol2tp_ops; 232static const struct proto_ops pppol2tp_ops;
233 233
234/* per-net private data for this module */ 234/* per-net private data for this module */
235static int pppol2tp_net_id; 235static int pppol2tp_net_id __read_mostly;
236struct pppol2tp_net { 236struct pppol2tp_net {
237 struct list_head pppol2tp_tunnel_list; 237 struct list_head pppol2tp_tunnel_list;
238 rwlock_t pppol2tp_tunnel_list_lock; 238 rwlock_t pppol2tp_tunnel_list_lock;
@@ -1537,7 +1537,7 @@ static struct sock *pppol2tp_prepare_tunnel_socket(struct net *net,
1537 * if the tunnel socket goes away. 1537 * if the tunnel socket goes away.
1538 */ 1538 */
1539 tunnel->old_sk_destruct = sk->sk_destruct; 1539 tunnel->old_sk_destruct = sk->sk_destruct;
1540 sk->sk_destruct = &pppol2tp_tunnel_destruct; 1540 sk->sk_destruct = pppol2tp_tunnel_destruct;
1541 1541
1542 tunnel->sock = sk; 1542 tunnel->sock = sk;
1543 sk->sk_allocation = GFP_ATOMIC; 1543 sk->sk_allocation = GFP_ATOMIC;
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 1f59f054452..862c1aaf386 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -16,7 +16,7 @@
16 */ 16 */
17#define DRV_NAME "qlge" 17#define DRV_NAME "qlge"
18#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " 18#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
19#define DRV_VERSION "v1.00.00-b3" 19#define DRV_VERSION "v1.00.00.23.00.00-01"
20 20
21#define PFX "qlge: " 21#define PFX "qlge: "
22#define QPRINTK(qdev, nlevel, klevel, fmt, args...) \ 22#define QPRINTK(qdev, nlevel, klevel, fmt, args...) \
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index bd8e164b121..7692299e782 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -69,9 +69,9 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
69#define MSIX_IRQ 0 69#define MSIX_IRQ 0
70#define MSI_IRQ 1 70#define MSI_IRQ 1
71#define LEG_IRQ 2 71#define LEG_IRQ 2
72static int irq_type = MSIX_IRQ; 72static int qlge_irq_type = MSIX_IRQ;
73module_param(irq_type, int, MSIX_IRQ); 73module_param(qlge_irq_type, int, MSIX_IRQ);
74MODULE_PARM_DESC(irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy."); 74MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
75 75
76static struct pci_device_id qlge_pci_tbl[] __devinitdata = { 76static struct pci_device_id qlge_pci_tbl[] __devinitdata = {
77 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)}, 77 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
@@ -2870,7 +2870,7 @@ static void ql_enable_msix(struct ql_adapter *qdev)
2870 int i, err; 2870 int i, err;
2871 2871
2872 /* Get the MSIX vectors. */ 2872 /* Get the MSIX vectors. */
2873 if (irq_type == MSIX_IRQ) { 2873 if (qlge_irq_type == MSIX_IRQ) {
2874 /* Try to alloc space for the msix struct, 2874 /* Try to alloc space for the msix struct,
2875 * if it fails then go to MSI/legacy. 2875 * if it fails then go to MSI/legacy.
2876 */ 2876 */
@@ -2878,7 +2878,7 @@ static void ql_enable_msix(struct ql_adapter *qdev)
2878 sizeof(struct msix_entry), 2878 sizeof(struct msix_entry),
2879 GFP_KERNEL); 2879 GFP_KERNEL);
2880 if (!qdev->msi_x_entry) { 2880 if (!qdev->msi_x_entry) {
2881 irq_type = MSI_IRQ; 2881 qlge_irq_type = MSI_IRQ;
2882 goto msi; 2882 goto msi;
2883 } 2883 }
2884 2884
@@ -2901,7 +2901,7 @@ static void ql_enable_msix(struct ql_adapter *qdev)
2901 QPRINTK(qdev, IFUP, WARNING, 2901 QPRINTK(qdev, IFUP, WARNING,
2902 "MSI-X Enable failed, trying MSI.\n"); 2902 "MSI-X Enable failed, trying MSI.\n");
2903 qdev->intr_count = 1; 2903 qdev->intr_count = 1;
2904 irq_type = MSI_IRQ; 2904 qlge_irq_type = MSI_IRQ;
2905 } else if (err == 0) { 2905 } else if (err == 0) {
2906 set_bit(QL_MSIX_ENABLED, &qdev->flags); 2906 set_bit(QL_MSIX_ENABLED, &qdev->flags);
2907 QPRINTK(qdev, IFUP, INFO, 2907 QPRINTK(qdev, IFUP, INFO,
@@ -2912,7 +2912,7 @@ static void ql_enable_msix(struct ql_adapter *qdev)
2912 } 2912 }
2913msi: 2913msi:
2914 qdev->intr_count = 1; 2914 qdev->intr_count = 1;
2915 if (irq_type == MSI_IRQ) { 2915 if (qlge_irq_type == MSI_IRQ) {
2916 if (!pci_enable_msi(qdev->pdev)) { 2916 if (!pci_enable_msi(qdev->pdev)) {
2917 set_bit(QL_MSI_ENABLED, &qdev->flags); 2917 set_bit(QL_MSI_ENABLED, &qdev->flags);
2918 QPRINTK(qdev, IFUP, INFO, 2918 QPRINTK(qdev, IFUP, INFO,
@@ -2920,7 +2920,7 @@ msi:
2920 return; 2920 return;
2921 } 2921 }
2922 } 2922 }
2923 irq_type = LEG_IRQ; 2923 qlge_irq_type = LEG_IRQ;
2924 QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n"); 2924 QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n");
2925} 2925}
2926 2926
@@ -3514,9 +3514,6 @@ int ql_wol(struct ql_adapter *qdev)
3514 } 3514 }
3515 3515
3516 if (qdev->wol) { 3516 if (qdev->wol) {
3517 /* Reroute all packets to Management Interface */
3518 ql_write32(qdev, MGMT_RCV_CFG, (MGMT_RCV_CFG_RM |
3519 (MGMT_RCV_CFG_RM << 16)));
3520 wol |= MB_WOL_MODE_ON; 3517 wol |= MB_WOL_MODE_ON;
3521 status = ql_mb_wol_mode(qdev, wol); 3518 status = ql_mb_wol_mode(qdev, wol);
3522 QPRINTK(qdev, DRV, ERR, "WOL %s (wol code 0x%x) on %s\n", 3519 QPRINTK(qdev, DRV, ERR, "WOL %s (wol code 0x%x) on %s\n",
@@ -3717,6 +3714,10 @@ static int qlge_open(struct net_device *ndev)
3717 int err = 0; 3714 int err = 0;
3718 struct ql_adapter *qdev = netdev_priv(ndev); 3715 struct ql_adapter *qdev = netdev_priv(ndev);
3719 3716
3717 err = ql_adapter_reset(qdev);
3718 if (err)
3719 return err;
3720
3720 err = ql_configure_rings(qdev); 3721 err = ql_configure_rings(qdev);
3721 if (err) 3722 if (err)
3722 return err; 3723 return err;
@@ -3950,9 +3951,6 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p)
3950 struct sockaddr *addr = p; 3951 struct sockaddr *addr = p;
3951 int status; 3952 int status;
3952 3953
3953 if (netif_running(ndev))
3954 return -EBUSY;
3955
3956 if (!is_valid_ether_addr(addr->sa_data)) 3954 if (!is_valid_ether_addr(addr->sa_data))
3957 return -EADDRNOTAVAIL; 3955 return -EADDRNOTAVAIL;
3958 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); 3956 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 7dfcb58b0eb..0f30ea4e97e 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -842,7 +842,7 @@ static int r6040_open(struct net_device *dev)
842 int ret; 842 int ret;
843 843
844 /* Request IRQ and Register interrupt handler */ 844 /* Request IRQ and Register interrupt handler */
845 ret = request_irq(dev->irq, &r6040_interrupt, 845 ret = request_irq(dev->irq, r6040_interrupt,
846 IRQF_SHARED, dev->name, dev); 846 IRQF_SHARED, dev->name, dev);
847 if (ret) 847 if (ret)
848 return ret; 848 return ret;
@@ -1085,7 +1085,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
1085 int bar = 0; 1085 int bar = 0;
1086 u16 *adrp; 1086 u16 *adrp;
1087 1087
1088 printk(KERN_INFO "%s\n", version); 1088 printk("%s\n", version);
1089 1089
1090 err = pci_enable_device(pdev); 1090 err = pci_enable_device(pdev);
1091 if (err) 1091 if (err)
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 1b0aa4cf89b..98f6c51b760 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -3243,9 +3243,9 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
3243static void rtl8169_set_rxbufsize(struct rtl8169_private *tp, 3243static void rtl8169_set_rxbufsize(struct rtl8169_private *tp,
3244 struct net_device *dev) 3244 struct net_device *dev)
3245{ 3245{
3246 unsigned int mtu = dev->mtu; 3246 unsigned int max_frame = dev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
3247 3247
3248 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE; 3248 tp->rx_buf_sz = (max_frame > RX_BUF_SIZE) ? max_frame : RX_BUF_SIZE;
3249} 3249}
3250 3250
3251static int rtl8169_open(struct net_device *dev) 3251static int rtl8169_open(struct net_device *dev)
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index ddccf5fa56b..0dd7839322b 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -3494,6 +3494,7 @@ static void s2io_reset(struct s2io_nic *sp)
3494 3494
3495 /* Restore the PCI state saved during initialization. */ 3495 /* Restore the PCI state saved during initialization. */
3496 pci_restore_state(sp->pdev); 3496 pci_restore_state(sp->pdev);
3497 pci_save_state(sp->pdev);
3497 pci_read_config_word(sp->pdev, 0x2, &val16); 3498 pci_read_config_word(sp->pdev, 0x2, &val16);
3498 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID) 3499 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3499 break; 3500 break;
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index 6a9f51d1d9f..7f01e60d517 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -986,7 +986,7 @@ static int smsc911x_poll(struct napi_struct *napi, int budget)
986 struct net_device *dev = pdata->dev; 986 struct net_device *dev = pdata->dev;
987 int npackets = 0; 987 int npackets = 0;
988 988
989 while (likely(netif_running(dev)) && (npackets < budget)) { 989 while (npackets < budget) {
990 unsigned int pktlength; 990 unsigned int pktlength;
991 unsigned int pktwords; 991 unsigned int pktwords;
992 struct sk_buff *skb; 992 struct sk_buff *skb;
diff --git a/drivers/net/smsc9420.c b/drivers/net/smsc9420.c
index b4909a2dec6..92e2bbe6b49 100644
--- a/drivers/net/smsc9420.c
+++ b/drivers/net/smsc9420.c
@@ -1161,7 +1161,7 @@ static int smsc9420_mii_probe(struct net_device *dev)
1161 phydev->phy_id); 1161 phydev->phy_id);
1162 1162
1163 phydev = phy_connect(dev, dev_name(&phydev->dev), 1163 phydev = phy_connect(dev, dev_name(&phydev->dev),
1164 &smsc9420_phy_adjust_link, 0, PHY_INTERFACE_MODE_MII); 1164 smsc9420_phy_adjust_link, 0, PHY_INTERFACE_MODE_MII);
1165 1165
1166 if (IS_ERR(phydev)) { 1166 if (IS_ERR(phydev)) {
1167 pr_err("%s: Could not attach to PHY\n", dev->name); 1167 pr_err("%s: Could not attach to PHY\n", dev->name);
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 47a4f094787..6e6db955b4a 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -68,8 +68,8 @@
68 68
69#define DRV_MODULE_NAME "tg3" 69#define DRV_MODULE_NAME "tg3"
70#define PFX DRV_MODULE_NAME ": " 70#define PFX DRV_MODULE_NAME ": "
71#define DRV_MODULE_VERSION "3.103" 71#define DRV_MODULE_VERSION "3.104"
72#define DRV_MODULE_RELDATE "November 2, 2009" 72#define DRV_MODULE_RELDATE "November 13, 2009"
73 73
74#define TG3_DEF_MAC_MODE 0 74#define TG3_DEF_MAC_MODE 0
75#define TG3_DEF_RX_MODE 0 75#define TG3_DEF_RX_MODE 0
@@ -137,6 +137,12 @@
137#define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ) 137#define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
138#define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ) 138#define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
139 139
140#define TG3_RX_STD_BUFF_RING_SIZE \
141 (sizeof(struct ring_info) * TG3_RX_RING_SIZE)
142
143#define TG3_RX_JMB_BUFF_RING_SIZE \
144 (sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE)
145
140/* minimum number of free TX descriptors required to wake up TX process */ 146/* minimum number of free TX descriptors required to wake up TX process */
141#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4) 147#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
142 148
@@ -235,6 +241,9 @@ static struct pci_device_id tg3_pci_tbl[] = {
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)}, 241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)}, 242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)}, 243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5724)},
238 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, 247 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
239 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, 248 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
240 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, 249 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
@@ -396,7 +405,7 @@ static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
396 TG3_64BIT_REG_LOW, val); 405 TG3_64BIT_REG_LOW, val);
397 return; 406 return;
398 } 407 }
399 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) { 408 if (off == TG3_RX_STD_PROD_IDX_REG) {
400 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX + 409 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
401 TG3_64BIT_REG_LOW, val); 410 TG3_64BIT_REG_LOW, val);
402 return; 411 return;
@@ -2249,7 +2258,7 @@ static void tg3_nvram_unlock(struct tg3 *tp)
2249static void tg3_enable_nvram_access(struct tg3 *tp) 2258static void tg3_enable_nvram_access(struct tg3 *tp)
2250{ 2259{
2251 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && 2260 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2252 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) { 2261 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2253 u32 nvaccess = tr32(NVRAM_ACCESS); 2262 u32 nvaccess = tr32(NVRAM_ACCESS);
2254 2263
2255 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE); 2264 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
@@ -2260,7 +2269,7 @@ static void tg3_enable_nvram_access(struct tg3 *tp)
2260static void tg3_disable_nvram_access(struct tg3 *tp) 2269static void tg3_disable_nvram_access(struct tg3 *tp)
2261{ 2270{
2262 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && 2271 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2263 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) { 2272 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2264 u32 nvaccess = tr32(NVRAM_ACCESS); 2273 u32 nvaccess = tr32(NVRAM_ACCESS);
2265 2274
2266 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE); 2275 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
@@ -4397,6 +4406,17 @@ static void tg3_tx(struct tg3_napi *tnapi)
4397 } 4406 }
4398} 4407}
4399 4408
4409static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4410{
4411 if (!ri->skb)
4412 return;
4413
4414 pci_unmap_single(tp->pdev, pci_unmap_addr(ri, mapping),
4415 map_sz, PCI_DMA_FROMDEVICE);
4416 dev_kfree_skb_any(ri->skb);
4417 ri->skb = NULL;
4418}
4419
4400/* Returns size of skb allocated or < 0 on error. 4420/* Returns size of skb allocated or < 0 on error.
4401 * 4421 *
4402 * We only need to fill in the address because the other members 4422 * We only need to fill in the address because the other members
@@ -4408,16 +4428,14 @@ static void tg3_tx(struct tg3_napi *tnapi)
4408 * buffers the cpu only reads the last cacheline of the RX descriptor 4428 * buffers the cpu only reads the last cacheline of the RX descriptor
4409 * (to fetch the error flags, vlan tag, checksum, and opaque cookie). 4429 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4410 */ 4430 */
4411static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key, 4431static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4412 int src_idx, u32 dest_idx_unmasked) 4432 u32 opaque_key, u32 dest_idx_unmasked)
4413{ 4433{
4414 struct tg3 *tp = tnapi->tp;
4415 struct tg3_rx_buffer_desc *desc; 4434 struct tg3_rx_buffer_desc *desc;
4416 struct ring_info *map, *src_map; 4435 struct ring_info *map, *src_map;
4417 struct sk_buff *skb; 4436 struct sk_buff *skb;
4418 dma_addr_t mapping; 4437 dma_addr_t mapping;
4419 int skb_size, dest_idx; 4438 int skb_size, dest_idx;
4420 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
4421 4439
4422 src_map = NULL; 4440 src_map = NULL;
4423 switch (opaque_key) { 4441 switch (opaque_key) {
@@ -4425,8 +4443,6 @@ static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key,
4425 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; 4443 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4426 desc = &tpr->rx_std[dest_idx]; 4444 desc = &tpr->rx_std[dest_idx];
4427 map = &tpr->rx_std_buffers[dest_idx]; 4445 map = &tpr->rx_std_buffers[dest_idx];
4428 if (src_idx >= 0)
4429 src_map = &tpr->rx_std_buffers[src_idx];
4430 skb_size = tp->rx_pkt_map_sz; 4446 skb_size = tp->rx_pkt_map_sz;
4431 break; 4447 break;
4432 4448
@@ -4434,8 +4450,6 @@ static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key,
4434 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; 4450 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4435 desc = &tpr->rx_jmb[dest_idx].std; 4451 desc = &tpr->rx_jmb[dest_idx].std;
4436 map = &tpr->rx_jmb_buffers[dest_idx]; 4452 map = &tpr->rx_jmb_buffers[dest_idx];
4437 if (src_idx >= 0)
4438 src_map = &tpr->rx_jmb_buffers[src_idx];
4439 skb_size = TG3_RX_JMB_MAP_SZ; 4453 skb_size = TG3_RX_JMB_MAP_SZ;
4440 break; 4454 break;
4441 4455
@@ -4465,9 +4479,6 @@ static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key,
4465 map->skb = skb; 4479 map->skb = skb;
4466 pci_unmap_addr_set(map, mapping, mapping); 4480 pci_unmap_addr_set(map, mapping, mapping);
4467 4481
4468 if (src_map != NULL)
4469 src_map->skb = NULL;
4470
4471 desc->addr_hi = ((u64)mapping >> 32); 4482 desc->addr_hi = ((u64)mapping >> 32);
4472 desc->addr_lo = ((u64)mapping & 0xffffffff); 4483 desc->addr_lo = ((u64)mapping & 0xffffffff);
4473 4484
@@ -4478,30 +4489,32 @@ static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key,
4478 * members of the RX descriptor are invariant. See notes above 4489 * members of the RX descriptor are invariant. See notes above
4479 * tg3_alloc_rx_skb for full details. 4490 * tg3_alloc_rx_skb for full details.
4480 */ 4491 */
4481static void tg3_recycle_rx(struct tg3_napi *tnapi, u32 opaque_key, 4492static void tg3_recycle_rx(struct tg3_napi *tnapi,
4482 int src_idx, u32 dest_idx_unmasked) 4493 struct tg3_rx_prodring_set *dpr,
4494 u32 opaque_key, int src_idx,
4495 u32 dest_idx_unmasked)
4483{ 4496{
4484 struct tg3 *tp = tnapi->tp; 4497 struct tg3 *tp = tnapi->tp;
4485 struct tg3_rx_buffer_desc *src_desc, *dest_desc; 4498 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4486 struct ring_info *src_map, *dest_map; 4499 struct ring_info *src_map, *dest_map;
4487 int dest_idx; 4500 int dest_idx;
4488 struct tg3_rx_prodring_set *tpr = &tp->prodring[0]; 4501 struct tg3_rx_prodring_set *spr = &tp->prodring[0];
4489 4502
4490 switch (opaque_key) { 4503 switch (opaque_key) {
4491 case RXD_OPAQUE_RING_STD: 4504 case RXD_OPAQUE_RING_STD:
4492 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; 4505 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4493 dest_desc = &tpr->rx_std[dest_idx]; 4506 dest_desc = &dpr->rx_std[dest_idx];
4494 dest_map = &tpr->rx_std_buffers[dest_idx]; 4507 dest_map = &dpr->rx_std_buffers[dest_idx];
4495 src_desc = &tpr->rx_std[src_idx]; 4508 src_desc = &spr->rx_std[src_idx];
4496 src_map = &tpr->rx_std_buffers[src_idx]; 4509 src_map = &spr->rx_std_buffers[src_idx];
4497 break; 4510 break;
4498 4511
4499 case RXD_OPAQUE_RING_JUMBO: 4512 case RXD_OPAQUE_RING_JUMBO:
4500 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; 4513 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4501 dest_desc = &tpr->rx_jmb[dest_idx].std; 4514 dest_desc = &dpr->rx_jmb[dest_idx].std;
4502 dest_map = &tpr->rx_jmb_buffers[dest_idx]; 4515 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4503 src_desc = &tpr->rx_jmb[src_idx].std; 4516 src_desc = &spr->rx_jmb[src_idx].std;
4504 src_map = &tpr->rx_jmb_buffers[src_idx]; 4517 src_map = &spr->rx_jmb_buffers[src_idx];
4505 break; 4518 break;
4506 4519
4507 default: 4520 default:
@@ -4513,7 +4526,6 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi, u32 opaque_key,
4513 pci_unmap_addr(src_map, mapping)); 4526 pci_unmap_addr(src_map, mapping));
4514 dest_desc->addr_hi = src_desc->addr_hi; 4527 dest_desc->addr_hi = src_desc->addr_hi;
4515 dest_desc->addr_lo = src_desc->addr_lo; 4528 dest_desc->addr_lo = src_desc->addr_lo;
4516
4517 src_map->skb = NULL; 4529 src_map->skb = NULL;
4518} 4530}
4519 4531
@@ -4545,10 +4557,11 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4545{ 4557{
4546 struct tg3 *tp = tnapi->tp; 4558 struct tg3 *tp = tnapi->tp;
4547 u32 work_mask, rx_std_posted = 0; 4559 u32 work_mask, rx_std_posted = 0;
4560 u32 std_prod_idx, jmb_prod_idx;
4548 u32 sw_idx = tnapi->rx_rcb_ptr; 4561 u32 sw_idx = tnapi->rx_rcb_ptr;
4549 u16 hw_idx; 4562 u16 hw_idx;
4550 int received; 4563 int received;
4551 struct tg3_rx_prodring_set *tpr = &tp->prodring[0]; 4564 struct tg3_rx_prodring_set *tpr = tnapi->prodring;
4552 4565
4553 hw_idx = *(tnapi->rx_rcb_prod_idx); 4566 hw_idx = *(tnapi->rx_rcb_prod_idx);
4554 /* 4567 /*
@@ -4558,7 +4571,10 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4558 rmb(); 4571 rmb();
4559 work_mask = 0; 4572 work_mask = 0;
4560 received = 0; 4573 received = 0;
4574 std_prod_idx = tpr->rx_std_prod_idx;
4575 jmb_prod_idx = tpr->rx_jmb_prod_idx;
4561 while (sw_idx != hw_idx && budget > 0) { 4576 while (sw_idx != hw_idx && budget > 0) {
4577 struct ring_info *ri;
4562 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx]; 4578 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4563 unsigned int len; 4579 unsigned int len;
4564 struct sk_buff *skb; 4580 struct sk_buff *skb;
@@ -4568,16 +4584,16 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4568 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; 4584 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4569 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; 4585 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4570 if (opaque_key == RXD_OPAQUE_RING_STD) { 4586 if (opaque_key == RXD_OPAQUE_RING_STD) {
4571 struct ring_info *ri = &tpr->rx_std_buffers[desc_idx]; 4587 ri = &tp->prodring[0].rx_std_buffers[desc_idx];
4572 dma_addr = pci_unmap_addr(ri, mapping); 4588 dma_addr = pci_unmap_addr(ri, mapping);
4573 skb = ri->skb; 4589 skb = ri->skb;
4574 post_ptr = &tpr->rx_std_ptr; 4590 post_ptr = &std_prod_idx;
4575 rx_std_posted++; 4591 rx_std_posted++;
4576 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { 4592 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4577 struct ring_info *ri = &tpr->rx_jmb_buffers[desc_idx]; 4593 ri = &tp->prodring[0].rx_jmb_buffers[desc_idx];
4578 dma_addr = pci_unmap_addr(ri, mapping); 4594 dma_addr = pci_unmap_addr(ri, mapping);
4579 skb = ri->skb; 4595 skb = ri->skb;
4580 post_ptr = &tpr->rx_jmb_ptr; 4596 post_ptr = &jmb_prod_idx;
4581 } else 4597 } else
4582 goto next_pkt_nopost; 4598 goto next_pkt_nopost;
4583 4599
@@ -4586,7 +4602,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4586 if ((desc->err_vlan & RXD_ERR_MASK) != 0 && 4602 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4587 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) { 4603 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4588 drop_it: 4604 drop_it:
4589 tg3_recycle_rx(tnapi, opaque_key, 4605 tg3_recycle_rx(tnapi, tpr, opaque_key,
4590 desc_idx, *post_ptr); 4606 desc_idx, *post_ptr);
4591 drop_it_no_recycle: 4607 drop_it_no_recycle:
4592 /* Other statistics kept track of by card. */ 4608 /* Other statistics kept track of by card. */
@@ -4606,11 +4622,13 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4606 ) { 4622 ) {
4607 int skb_size; 4623 int skb_size;
4608 4624
4609 skb_size = tg3_alloc_rx_skb(tnapi, opaque_key, 4625 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4610 desc_idx, *post_ptr); 4626 *post_ptr);
4611 if (skb_size < 0) 4627 if (skb_size < 0)
4612 goto drop_it; 4628 goto drop_it;
4613 4629
4630 ri->skb = NULL;
4631
4614 pci_unmap_single(tp->pdev, dma_addr, skb_size, 4632 pci_unmap_single(tp->pdev, dma_addr, skb_size,
4615 PCI_DMA_FROMDEVICE); 4633 PCI_DMA_FROMDEVICE);
4616 4634
@@ -4618,7 +4636,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4618 } else { 4636 } else {
4619 struct sk_buff *copy_skb; 4637 struct sk_buff *copy_skb;
4620 4638
4621 tg3_recycle_rx(tnapi, opaque_key, 4639 tg3_recycle_rx(tnapi, tpr, opaque_key,
4622 desc_idx, *post_ptr); 4640 desc_idx, *post_ptr);
4623 4641
4624 copy_skb = netdev_alloc_skb(tp->dev, 4642 copy_skb = netdev_alloc_skb(tp->dev,
@@ -4669,9 +4687,7 @@ next_pkt:
4669 4687
4670 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) { 4688 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4671 u32 idx = *post_ptr % TG3_RX_RING_SIZE; 4689 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
4672 4690 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, idx);
4673 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
4674 TG3_64BIT_REG_LOW, idx);
4675 work_mask &= ~RXD_OPAQUE_RING_STD; 4691 work_mask &= ~RXD_OPAQUE_RING_STD;
4676 rx_std_posted = 0; 4692 rx_std_posted = 0;
4677 } 4693 }
@@ -4691,33 +4707,45 @@ next_pkt_nopost:
4691 tw32_rx_mbox(tnapi->consmbox, sw_idx); 4707 tw32_rx_mbox(tnapi->consmbox, sw_idx);
4692 4708
4693 /* Refill RX ring(s). */ 4709 /* Refill RX ring(s). */
4694 if (work_mask & RXD_OPAQUE_RING_STD) { 4710 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) || tnapi == &tp->napi[1]) {
4695 sw_idx = tpr->rx_std_ptr % TG3_RX_RING_SIZE; 4711 if (work_mask & RXD_OPAQUE_RING_STD) {
4696 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW, 4712 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4697 sw_idx); 4713 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4698 } 4714 tpr->rx_std_prod_idx);
4699 if (work_mask & RXD_OPAQUE_RING_JUMBO) { 4715 }
4700 sw_idx = tpr->rx_jmb_ptr % TG3_RX_JUMBO_RING_SIZE; 4716 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4701 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW, 4717 tpr->rx_jmb_prod_idx = jmb_prod_idx %
4702 sw_idx); 4718 TG3_RX_JUMBO_RING_SIZE;
4719 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
4720 tpr->rx_jmb_prod_idx);
4721 }
4722 mmiowb();
4723 } else if (work_mask) {
4724 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
4725 * updated before the producer indices can be updated.
4726 */
4727 smp_wmb();
4728
4729 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4730 tpr->rx_jmb_prod_idx = jmb_prod_idx % TG3_RX_JUMBO_RING_SIZE;
4731
4732 napi_schedule(&tp->napi[1].napi);
4703 } 4733 }
4704 mmiowb();
4705 4734
4706 return received; 4735 return received;
4707} 4736}
4708 4737
4709static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) 4738static void tg3_poll_link(struct tg3 *tp)
4710{ 4739{
4711 struct tg3 *tp = tnapi->tp;
4712 struct tg3_hw_status *sblk = tnapi->hw_status;
4713
4714 /* handle link change and other phy events */ 4740 /* handle link change and other phy events */
4715 if (!(tp->tg3_flags & 4741 if (!(tp->tg3_flags &
4716 (TG3_FLAG_USE_LINKCHG_REG | 4742 (TG3_FLAG_USE_LINKCHG_REG |
4717 TG3_FLAG_POLL_SERDES))) { 4743 TG3_FLAG_POLL_SERDES))) {
4744 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
4745
4718 if (sblk->status & SD_STATUS_LINK_CHG) { 4746 if (sblk->status & SD_STATUS_LINK_CHG) {
4719 sblk->status = SD_STATUS_UPDATED | 4747 sblk->status = SD_STATUS_UPDATED |
4720 (sblk->status & ~SD_STATUS_LINK_CHG); 4748 (sblk->status & ~SD_STATUS_LINK_CHG);
4721 spin_lock(&tp->lock); 4749 spin_lock(&tp->lock);
4722 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { 4750 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4723 tw32_f(MAC_STATUS, 4751 tw32_f(MAC_STATUS,
@@ -4731,6 +4759,98 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
4731 spin_unlock(&tp->lock); 4759 spin_unlock(&tp->lock);
4732 } 4760 }
4733 } 4761 }
4762}
4763
4764static void tg3_rx_prodring_xfer(struct tg3 *tp,
4765 struct tg3_rx_prodring_set *dpr,
4766 struct tg3_rx_prodring_set *spr)
4767{
4768 u32 si, di, cpycnt, src_prod_idx;
4769 int i;
4770
4771 while (1) {
4772 src_prod_idx = spr->rx_std_prod_idx;
4773
4774 /* Make sure updates to the rx_std_buffers[] entries and the
4775 * standard producer index are seen in the correct order.
4776 */
4777 smp_rmb();
4778
4779 if (spr->rx_std_cons_idx == src_prod_idx)
4780 break;
4781
4782 if (spr->rx_std_cons_idx < src_prod_idx)
4783 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
4784 else
4785 cpycnt = TG3_RX_RING_SIZE - spr->rx_std_cons_idx;
4786
4787 cpycnt = min(cpycnt, TG3_RX_RING_SIZE - dpr->rx_std_prod_idx);
4788
4789 si = spr->rx_std_cons_idx;
4790 di = dpr->rx_std_prod_idx;
4791
4792 memcpy(&dpr->rx_std_buffers[di],
4793 &spr->rx_std_buffers[si],
4794 cpycnt * sizeof(struct ring_info));
4795
4796 for (i = 0; i < cpycnt; i++, di++, si++) {
4797 struct tg3_rx_buffer_desc *sbd, *dbd;
4798 sbd = &spr->rx_std[si];
4799 dbd = &dpr->rx_std[di];
4800 dbd->addr_hi = sbd->addr_hi;
4801 dbd->addr_lo = sbd->addr_lo;
4802 }
4803
4804 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) %
4805 TG3_RX_RING_SIZE;
4806 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) %
4807 TG3_RX_RING_SIZE;
4808 }
4809
4810 while (1) {
4811 src_prod_idx = spr->rx_jmb_prod_idx;
4812
4813 /* Make sure updates to the rx_jmb_buffers[] entries and
4814 * the jumbo producer index are seen in the correct order.
4815 */
4816 smp_rmb();
4817
4818 if (spr->rx_jmb_cons_idx == src_prod_idx)
4819 break;
4820
4821 if (spr->rx_jmb_cons_idx < src_prod_idx)
4822 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
4823 else
4824 cpycnt = TG3_RX_JUMBO_RING_SIZE - spr->rx_jmb_cons_idx;
4825
4826 cpycnt = min(cpycnt,
4827 TG3_RX_JUMBO_RING_SIZE - dpr->rx_jmb_prod_idx);
4828
4829 si = spr->rx_jmb_cons_idx;
4830 di = dpr->rx_jmb_prod_idx;
4831
4832 memcpy(&dpr->rx_jmb_buffers[di],
4833 &spr->rx_jmb_buffers[si],
4834 cpycnt * sizeof(struct ring_info));
4835
4836 for (i = 0; i < cpycnt; i++, di++, si++) {
4837 struct tg3_rx_buffer_desc *sbd, *dbd;
4838 sbd = &spr->rx_jmb[si].std;
4839 dbd = &dpr->rx_jmb[di].std;
4840 dbd->addr_hi = sbd->addr_hi;
4841 dbd->addr_lo = sbd->addr_lo;
4842 }
4843
4844 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) %
4845 TG3_RX_JUMBO_RING_SIZE;
4846 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) %
4847 TG3_RX_JUMBO_RING_SIZE;
4848 }
4849}
4850
4851static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
4852{
4853 struct tg3 *tp = tnapi->tp;
4734 4854
4735 /* run TX completion thread */ 4855 /* run TX completion thread */
4736 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) { 4856 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
@@ -4746,6 +4866,74 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
4746 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) 4866 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
4747 work_done += tg3_rx(tnapi, budget - work_done); 4867 work_done += tg3_rx(tnapi, budget - work_done);
4748 4868
4869 if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) {
4870 int i;
4871 u32 std_prod_idx = tp->prodring[0].rx_std_prod_idx;
4872 u32 jmb_prod_idx = tp->prodring[0].rx_jmb_prod_idx;
4873
4874 for (i = 2; i < tp->irq_cnt; i++)
4875 tg3_rx_prodring_xfer(tp, tnapi->prodring,
4876 tp->napi[i].prodring);
4877
4878 wmb();
4879
4880 if (std_prod_idx != tp->prodring[0].rx_std_prod_idx) {
4881 u32 mbox = TG3_RX_STD_PROD_IDX_REG;
4882 tw32_rx_mbox(mbox, tp->prodring[0].rx_std_prod_idx);
4883 }
4884
4885 if (jmb_prod_idx != tp->prodring[0].rx_jmb_prod_idx) {
4886 u32 mbox = TG3_RX_JMB_PROD_IDX_REG;
4887 tw32_rx_mbox(mbox, tp->prodring[0].rx_jmb_prod_idx);
4888 }
4889
4890 mmiowb();
4891 }
4892
4893 return work_done;
4894}
4895
4896static int tg3_poll_msix(struct napi_struct *napi, int budget)
4897{
4898 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
4899 struct tg3 *tp = tnapi->tp;
4900 int work_done = 0;
4901 struct tg3_hw_status *sblk = tnapi->hw_status;
4902
4903 while (1) {
4904 work_done = tg3_poll_work(tnapi, work_done, budget);
4905
4906 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4907 goto tx_recovery;
4908
4909 if (unlikely(work_done >= budget))
4910 break;
4911
4912 /* tp->last_tag is used in tg3_restart_ints() below
4913 * to tell the hw how much work has been processed,
4914 * so we must read it before checking for more work.
4915 */
4916 tnapi->last_tag = sblk->status_tag;
4917 tnapi->last_irq_tag = tnapi->last_tag;
4918 rmb();
4919
4920 /* check for RX/TX work to do */
4921 if (sblk->idx[0].tx_consumer == tnapi->tx_cons &&
4922 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr) {
4923 napi_complete(napi);
4924 /* Reenable interrupts. */
4925 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
4926 mmiowb();
4927 break;
4928 }
4929 }
4930
4931 return work_done;
4932
4933tx_recovery:
4934 /* work_done is guaranteed to be less than budget. */
4935 napi_complete(napi);
4936 schedule_work(&tp->reset_task);
4749 return work_done; 4937 return work_done;
4750} 4938}
4751 4939
@@ -4757,6 +4945,8 @@ static int tg3_poll(struct napi_struct *napi, int budget)
4757 struct tg3_hw_status *sblk = tnapi->hw_status; 4945 struct tg3_hw_status *sblk = tnapi->hw_status;
4758 4946
4759 while (1) { 4947 while (1) {
4948 tg3_poll_link(tp);
4949
4760 work_done = tg3_poll_work(tnapi, work_done, budget); 4950 work_done = tg3_poll_work(tnapi, work_done, budget);
4761 4951
4762 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) 4952 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
@@ -5119,11 +5309,11 @@ static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5119static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32); 5309static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32);
5120 5310
5121/* Workaround 4GB and 40-bit hardware DMA bugs. */ 5311/* Workaround 4GB and 40-bit hardware DMA bugs. */
5122static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb, 5312static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5123 u32 last_plus_one, u32 *start, 5313 struct sk_buff *skb, u32 last_plus_one,
5124 u32 base_flags, u32 mss) 5314 u32 *start, u32 base_flags, u32 mss)
5125{ 5315{
5126 struct tg3_napi *tnapi = &tp->napi[0]; 5316 struct tg3 *tp = tnapi->tp;
5127 struct sk_buff *new_skb; 5317 struct sk_buff *new_skb;
5128 dma_addr_t new_addr = 0; 5318 dma_addr_t new_addr = 0;
5129 u32 entry = *start; 5319 u32 entry = *start;
@@ -5206,7 +5396,7 @@ static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5206} 5396}
5207 5397
5208/* hard_start_xmit for devices that don't have any bugs and 5398/* hard_start_xmit for devices that don't have any bugs and
5209 * support TG3_FLG2_HW_TSO_2 only. 5399 * support TG3_FLG2_HW_TSO_2 and TG3_FLG2_HW_TSO_3 only.
5210 */ 5400 */
5211static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, 5401static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5212 struct net_device *dev) 5402 struct net_device *dev)
@@ -5265,7 +5455,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5265 hdrlen = ip_tcp_len + tcp_opt_len; 5455 hdrlen = ip_tcp_len + tcp_opt_len;
5266 } 5456 }
5267 5457
5268 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { 5458 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5269 mss |= (hdrlen & 0xc) << 12; 5459 mss |= (hdrlen & 0xc) << 12;
5270 if (hdrlen & 0x10) 5460 if (hdrlen & 0x10)
5271 base_flags |= 0x00000010; 5461 base_flags |= 0x00000010;
@@ -5392,9 +5582,13 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5392 struct skb_shared_info *sp; 5582 struct skb_shared_info *sp;
5393 int would_hit_hwbug; 5583 int would_hit_hwbug;
5394 dma_addr_t mapping; 5584 dma_addr_t mapping;
5395 struct tg3_napi *tnapi = &tp->napi[0]; 5585 struct tg3_napi *tnapi;
5586 struct netdev_queue *txq;
5396 5587
5397 len = skb_headlen(skb); 5588 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5589 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5590 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
5591 tnapi++;
5398 5592
5399 /* We are running in BH disabled context with netif_tx_lock 5593 /* We are running in BH disabled context with netif_tx_lock
5400 * and TX reclaim runs via tp->napi.poll inside of a software 5594 * and TX reclaim runs via tp->napi.poll inside of a software
@@ -5402,8 +5596,8 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5402 * no IRQ context deadlocks to worry about either. Rejoice! 5596 * no IRQ context deadlocks to worry about either. Rejoice!
5403 */ 5597 */
5404 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) { 5598 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5405 if (!netif_queue_stopped(dev)) { 5599 if (!netif_tx_queue_stopped(txq)) {
5406 netif_stop_queue(dev); 5600 netif_tx_stop_queue(txq);
5407 5601
5408 /* This is a hard error, log it. */ 5602 /* This is a hard error, log it. */
5409 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " 5603 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
@@ -5416,7 +5610,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5416 base_flags = 0; 5610 base_flags = 0;
5417 if (skb->ip_summed == CHECKSUM_PARTIAL) 5611 if (skb->ip_summed == CHECKSUM_PARTIAL)
5418 base_flags |= TXD_FLAG_TCPUDP_CSUM; 5612 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5419 mss = 0; 5613
5420 if ((mss = skb_shinfo(skb)->gso_size) != 0) { 5614 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5421 struct iphdr *iph; 5615 struct iphdr *iph;
5422 u32 tcp_opt_len, ip_tcp_len, hdr_len; 5616 u32 tcp_opt_len, ip_tcp_len, hdr_len;
@@ -5450,7 +5644,12 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5450 IPPROTO_TCP, 5644 IPPROTO_TCP,
5451 0); 5645 0);
5452 5646
5453 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) 5647 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5648 mss |= (hdr_len & 0xc) << 12;
5649 if (hdr_len & 0x10)
5650 base_flags |= 0x00000010;
5651 base_flags |= (hdr_len & 0x3e0) << 5;
5652 } else if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
5454 mss |= hdr_len << 9; 5653 mss |= hdr_len << 9;
5455 else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) || 5654 else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) ||
5456 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { 5655 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
@@ -5475,6 +5674,10 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5475 (vlan_tx_tag_get(skb) << 16)); 5674 (vlan_tx_tag_get(skb) << 16));
5476#endif 5675#endif
5477 5676
5677 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
5678 !mss && skb->len > ETH_DATA_LEN)
5679 base_flags |= TXD_FLAG_JMB_PKT;
5680
5478 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) { 5681 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
5479 dev_kfree_skb(skb); 5682 dev_kfree_skb(skb);
5480 goto out_unlock; 5683 goto out_unlock;
@@ -5488,6 +5691,8 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5488 5691
5489 would_hit_hwbug = 0; 5692 would_hit_hwbug = 0;
5490 5693
5694 len = skb_headlen(skb);
5695
5491 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8) 5696 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8)
5492 would_hit_hwbug = 1; 5697 would_hit_hwbug = 1;
5493 5698
@@ -5553,7 +5758,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5553 /* If the workaround fails due to memory/mapping 5758 /* If the workaround fails due to memory/mapping
5554 * failure, silently drop this packet. 5759 * failure, silently drop this packet.
5555 */ 5760 */
5556 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one, 5761 if (tigon3_dma_hwbug_workaround(tnapi, skb, last_plus_one,
5557 &start, base_flags, mss)) 5762 &start, base_flags, mss))
5558 goto out_unlock; 5763 goto out_unlock;
5559 5764
@@ -5561,13 +5766,13 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5561 } 5766 }
5562 5767
5563 /* Packets are ready, update Tx producer idx local and on card. */ 5768 /* Packets are ready, update Tx producer idx local and on card. */
5564 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, entry); 5769 tw32_tx_mbox(tnapi->prodmbox, entry);
5565 5770
5566 tnapi->tx_prod = entry; 5771 tnapi->tx_prod = entry;
5567 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { 5772 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5568 netif_stop_queue(dev); 5773 netif_tx_stop_queue(txq);
5569 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) 5774 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5570 netif_wake_queue(tp->dev); 5775 netif_tx_wake_queue(txq);
5571 } 5776 }
5572 5777
5573out_unlock: 5778out_unlock:
@@ -5638,36 +5843,33 @@ static void tg3_rx_prodring_free(struct tg3 *tp,
5638 struct tg3_rx_prodring_set *tpr) 5843 struct tg3_rx_prodring_set *tpr)
5639{ 5844{
5640 int i; 5845 int i;
5641 struct ring_info *rxp;
5642
5643 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5644 rxp = &tpr->rx_std_buffers[i];
5645 5846
5646 if (rxp->skb == NULL) 5847 if (tpr != &tp->prodring[0]) {
5647 continue; 5848 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
5849 i = (i + 1) % TG3_RX_RING_SIZE)
5850 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
5851 tp->rx_pkt_map_sz);
5852
5853 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
5854 for (i = tpr->rx_jmb_cons_idx;
5855 i != tpr->rx_jmb_prod_idx;
5856 i = (i + 1) % TG3_RX_JUMBO_RING_SIZE) {
5857 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
5858 TG3_RX_JMB_MAP_SZ);
5859 }
5860 }
5648 5861
5649 pci_unmap_single(tp->pdev, 5862 return;
5650 pci_unmap_addr(rxp, mapping),
5651 tp->rx_pkt_map_sz,
5652 PCI_DMA_FROMDEVICE);
5653 dev_kfree_skb_any(rxp->skb);
5654 rxp->skb = NULL;
5655 } 5863 }
5656 5864
5657 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { 5865 for (i = 0; i < TG3_RX_RING_SIZE; i++)
5658 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) { 5866 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
5659 rxp = &tpr->rx_jmb_buffers[i]; 5867 tp->rx_pkt_map_sz);
5660
5661 if (rxp->skb == NULL)
5662 continue;
5663 5868
5664 pci_unmap_single(tp->pdev, 5869 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
5665 pci_unmap_addr(rxp, mapping), 5870 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++)
5666 TG3_RX_JMB_MAP_SZ, 5871 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
5667 PCI_DMA_FROMDEVICE); 5872 TG3_RX_JMB_MAP_SZ);
5668 dev_kfree_skb_any(rxp->skb);
5669 rxp->skb = NULL;
5670 }
5671 } 5873 }
5672} 5874}
5673 5875
@@ -5682,7 +5884,19 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
5682 struct tg3_rx_prodring_set *tpr) 5884 struct tg3_rx_prodring_set *tpr)
5683{ 5885{
5684 u32 i, rx_pkt_dma_sz; 5886 u32 i, rx_pkt_dma_sz;
5685 struct tg3_napi *tnapi = &tp->napi[0]; 5887
5888 tpr->rx_std_cons_idx = 0;
5889 tpr->rx_std_prod_idx = 0;
5890 tpr->rx_jmb_cons_idx = 0;
5891 tpr->rx_jmb_prod_idx = 0;
5892
5893 if (tpr != &tp->prodring[0]) {
5894 memset(&tpr->rx_std_buffers[0], 0, TG3_RX_STD_BUFF_RING_SIZE);
5895 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE)
5896 memset(&tpr->rx_jmb_buffers[0], 0,
5897 TG3_RX_JMB_BUFF_RING_SIZE);
5898 goto done;
5899 }
5686 5900
5687 /* Zero out all descriptors. */ 5901 /* Zero out all descriptors. */
5688 memset(tpr->rx_std, 0, TG3_RX_RING_BYTES); 5902 memset(tpr->rx_std, 0, TG3_RX_RING_BYTES);
@@ -5709,7 +5923,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
5709 5923
5710 /* Now allocate fresh SKBs for each rx ring. */ 5924 /* Now allocate fresh SKBs for each rx ring. */
5711 for (i = 0; i < tp->rx_pending; i++) { 5925 for (i = 0; i < tp->rx_pending; i++) {
5712 if (tg3_alloc_rx_skb(tnapi, RXD_OPAQUE_RING_STD, -1, i) < 0) { 5926 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
5713 printk(KERN_WARNING PFX 5927 printk(KERN_WARNING PFX
5714 "%s: Using a smaller RX standard ring, " 5928 "%s: Using a smaller RX standard ring, "
5715 "only %d out of %d buffers were allocated " 5929 "only %d out of %d buffers were allocated "
@@ -5740,8 +5954,8 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
5740 } 5954 }
5741 5955
5742 for (i = 0; i < tp->rx_jumbo_pending; i++) { 5956 for (i = 0; i < tp->rx_jumbo_pending; i++) {
5743 if (tg3_alloc_rx_skb(tnapi, RXD_OPAQUE_RING_JUMBO, 5957 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO,
5744 -1, i) < 0) { 5958 i) < 0) {
5745 printk(KERN_WARNING PFX 5959 printk(KERN_WARNING PFX
5746 "%s: Using a smaller RX jumbo ring, " 5960 "%s: Using a smaller RX jumbo ring, "
5747 "only %d out of %d buffers were " 5961 "only %d out of %d buffers were "
@@ -5785,8 +5999,7 @@ static void tg3_rx_prodring_fini(struct tg3 *tp,
5785static int tg3_rx_prodring_init(struct tg3 *tp, 5999static int tg3_rx_prodring_init(struct tg3 *tp,
5786 struct tg3_rx_prodring_set *tpr) 6000 struct tg3_rx_prodring_set *tpr)
5787{ 6001{
5788 tpr->rx_std_buffers = kzalloc(sizeof(struct ring_info) * 6002 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE, GFP_KERNEL);
5789 TG3_RX_RING_SIZE, GFP_KERNEL);
5790 if (!tpr->rx_std_buffers) 6003 if (!tpr->rx_std_buffers)
5791 return -ENOMEM; 6004 return -ENOMEM;
5792 6005
@@ -5796,8 +6009,7 @@ static int tg3_rx_prodring_init(struct tg3 *tp,
5796 goto err_out; 6009 goto err_out;
5797 6010
5798 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { 6011 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
5799 tpr->rx_jmb_buffers = kzalloc(sizeof(struct ring_info) * 6012 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE,
5800 TG3_RX_JUMBO_RING_SIZE,
5801 GFP_KERNEL); 6013 GFP_KERNEL);
5802 if (!tpr->rx_jmb_buffers) 6014 if (!tpr->rx_jmb_buffers)
5803 goto err_out; 6015 goto err_out;
@@ -5853,9 +6065,10 @@ static void tg3_free_rings(struct tg3 *tp)
5853 6065
5854 dev_kfree_skb_any(skb); 6066 dev_kfree_skb_any(skb);
5855 } 6067 }
5856 }
5857 6068
5858 tg3_rx_prodring_free(tp, &tp->prodring[0]); 6069 if (tp->irq_cnt == 1 || j != tp->irq_cnt - 1)
6070 tg3_rx_prodring_free(tp, &tp->prodring[j]);
6071 }
5859} 6072}
5860 6073
5861/* Initialize tx/rx rings for packet processing. 6074/* Initialize tx/rx rings for packet processing.
@@ -5889,9 +6102,13 @@ static int tg3_init_rings(struct tg3 *tp)
5889 tnapi->rx_rcb_ptr = 0; 6102 tnapi->rx_rcb_ptr = 0;
5890 if (tnapi->rx_rcb) 6103 if (tnapi->rx_rcb)
5891 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); 6104 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6105
6106 if ((tp->irq_cnt == 1 || i != tp->irq_cnt - 1) &&
6107 tg3_rx_prodring_alloc(tp, &tp->prodring[i]))
6108 return -ENOMEM;
5892 } 6109 }
5893 6110
5894 return tg3_rx_prodring_alloc(tp, &tp->prodring[0]); 6111 return 0;
5895} 6112}
5896 6113
5897/* 6114/*
@@ -5935,7 +6152,8 @@ static void tg3_free_consistent(struct tg3 *tp)
5935 tp->hw_stats = NULL; 6152 tp->hw_stats = NULL;
5936 } 6153 }
5937 6154
5938 tg3_rx_prodring_fini(tp, &tp->prodring[0]); 6155 for (i = 0; i < (tp->irq_cnt == 1 ? 1 : tp->irq_cnt - 1); i++)
6156 tg3_rx_prodring_fini(tp, &tp->prodring[i]);
5939} 6157}
5940 6158
5941/* 6159/*
@@ -5946,8 +6164,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
5946{ 6164{
5947 int i; 6165 int i;
5948 6166
5949 if (tg3_rx_prodring_init(tp, &tp->prodring[0])) 6167 for (i = 0; i < (tp->irq_cnt == 1 ? 1 : tp->irq_cnt - 1); i++) {
5950 return -ENOMEM; 6168 if (tg3_rx_prodring_init(tp, &tp->prodring[i]))
6169 goto err_out;
6170 }
5951 6171
5952 tp->hw_stats = pci_alloc_consistent(tp->pdev, 6172 tp->hw_stats = pci_alloc_consistent(tp->pdev,
5953 sizeof(struct tg3_hw_stats), 6173 sizeof(struct tg3_hw_stats),
@@ -5991,6 +6211,11 @@ static int tg3_alloc_consistent(struct tg3 *tp)
5991 break; 6211 break;
5992 } 6212 }
5993 6213
6214 if (tp->irq_cnt == 1)
6215 tnapi->prodring = &tp->prodring[0];
6216 else if (i)
6217 tnapi->prodring = &tp->prodring[i - 1];
6218
5994 /* 6219 /*
5995 * If multivector RSS is enabled, vector 0 does not handle 6220 * If multivector RSS is enabled, vector 0 does not handle
5996 * rx or tx interrupts. Don't allocate any resources for it. 6221 * rx or tx interrupts. Don't allocate any resources for it.
@@ -7279,9 +7504,12 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7279 if (err) 7504 if (err)
7280 return err; 7505 return err;
7281 7506
7282 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 && 7507 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
7283 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 && 7508 val = tr32(TG3PCI_DMA_RW_CTRL) &
7284 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) { 7509 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
7510 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
7511 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
7512 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
7285 /* This value is determined during the probe time DMA 7513 /* This value is determined during the probe time DMA
7286 * engine test, tg3_test_dma. 7514 * engine test, tg3_test_dma.
7287 */ 7515 */
@@ -7404,8 +7632,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7404 ((u64) tpr->rx_std_mapping >> 32)); 7632 ((u64) tpr->rx_std_mapping >> 32));
7405 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, 7633 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7406 ((u64) tpr->rx_std_mapping & 0xffffffff)); 7634 ((u64) tpr->rx_std_mapping & 0xffffffff));
7407 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, 7635 if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS))
7408 NIC_SRAM_RX_BUFFER_DESC); 7636 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7637 NIC_SRAM_RX_BUFFER_DESC);
7409 7638
7410 /* Disable the mini ring */ 7639 /* Disable the mini ring */
7411 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 7640 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
@@ -7428,8 +7657,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7428 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, 7657 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7429 (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) | 7658 (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) |
7430 BDINFO_FLAGS_USE_EXT_RECV); 7659 BDINFO_FLAGS_USE_EXT_RECV);
7431 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, 7660 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7432 NIC_SRAM_RX_JUMBO_BUFFER_DESC); 7661 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7662 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7433 } else { 7663 } else {
7434 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, 7664 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7435 BDINFO_FLAGS_DISABLED); 7665 BDINFO_FLAGS_DISABLED);
@@ -7445,14 +7675,12 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7445 7675
7446 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val); 7676 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
7447 7677
7448 tpr->rx_std_ptr = tp->rx_pending; 7678 tpr->rx_std_prod_idx = tp->rx_pending;
7449 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW, 7679 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
7450 tpr->rx_std_ptr);
7451 7680
7452 tpr->rx_jmb_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ? 7681 tpr->rx_jmb_prod_idx = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
7453 tp->rx_jumbo_pending : 0; 7682 tp->rx_jumbo_pending : 0;
7454 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW, 7683 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
7455 tpr->rx_jmb_ptr);
7456 7684
7457 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { 7685 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
7458 tw32(STD_REPLENISH_LWM, 32); 7686 tw32(STD_REPLENISH_LWM, 32);
@@ -7515,7 +7743,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7515 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) 7743 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7516 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN; 7744 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
7517 7745
7518 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || 7746 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
7747 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
7519 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) 7748 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
7520 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN; 7749 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
7521 7750
@@ -9505,15 +9734,16 @@ static int tg3_set_tso(struct net_device *dev, u32 value)
9505 return 0; 9734 return 0;
9506 } 9735 }
9507 if ((dev->features & NETIF_F_IPV6_CSUM) && 9736 if ((dev->features & NETIF_F_IPV6_CSUM) &&
9508 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)) { 9737 ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
9738 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3))) {
9509 if (value) { 9739 if (value) {
9510 dev->features |= NETIF_F_TSO6; 9740 dev->features |= NETIF_F_TSO6;
9511 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || 9741 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
9742 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9512 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && 9743 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9513 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) || 9744 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9514 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || 9745 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9515 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || 9746 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9516 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
9517 dev->features |= NETIF_F_TSO_ECN; 9747 dev->features |= NETIF_F_TSO_ECN;
9518 } else 9748 } else
9519 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN); 9749 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
@@ -10962,7 +11192,7 @@ static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10962 11192
10963 /* NVRAM protection for TPM */ 11193 /* NVRAM protection for TPM */
10964 if (nvcfg1 & (1 << 27)) 11194 if (nvcfg1 & (1 << 27))
10965 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM; 11195 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
10966 11196
10967 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 11197 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10968 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ: 11198 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
@@ -11003,7 +11233,7 @@ static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11003 11233
11004 /* NVRAM protection for TPM */ 11234 /* NVRAM protection for TPM */
11005 if (nvcfg1 & (1 << 27)) { 11235 if (nvcfg1 & (1 << 27)) {
11006 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM; 11236 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11007 protect = 1; 11237 protect = 1;
11008 } 11238 }
11009 11239
@@ -11097,7 +11327,7 @@ static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11097 11327
11098 /* NVRAM protection for TPM */ 11328 /* NVRAM protection for TPM */
11099 if (nvcfg1 & (1 << 27)) { 11329 if (nvcfg1 & (1 << 27)) {
11100 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM; 11330 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11101 protect = 1; 11331 protect = 1;
11102 } 11332 }
11103 11333
@@ -11599,7 +11829,7 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
11599 11829
11600 tg3_enable_nvram_access(tp); 11830 tg3_enable_nvram_access(tp);
11601 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && 11831 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
11602 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) 11832 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM))
11603 tw32(NVRAM_WRITE1, 0x406); 11833 tw32(NVRAM_WRITE1, 0x406);
11604 11834
11605 grc_mode = tr32(GRC_MODE); 11835 grc_mode = tr32(GRC_MODE);
@@ -12475,10 +12705,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12475 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) { 12705 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
12476 u32 prod_id_asic_rev; 12706 u32 prod_id_asic_rev;
12477 12707
12478 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717C || 12708 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
12479 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717S || 12709 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
12480 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718C || 12710 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5724)
12481 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718S)
12482 pci_read_config_dword(tp->pdev, 12711 pci_read_config_dword(tp->pdev,
12483 TG3PCI_GEN2_PRODID_ASICREV, 12712 TG3PCI_GEN2_PRODID_ASICREV,
12484 &prod_id_asic_rev); 12713 &prod_id_asic_rev);
@@ -12661,6 +12890,29 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12661 tp->dev->features |= NETIF_F_IPV6_CSUM; 12890 tp->dev->features |= NETIF_F_IPV6_CSUM;
12662 } 12891 }
12663 12892
12893 /* Determine TSO capabilities */
12894 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
12895 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3;
12896 else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
12897 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12898 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
12899 else if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
12900 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
12901 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
12902 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
12903 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
12904 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12905 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12906 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
12907 tp->tg3_flags2 |= TG3_FLG2_TSO_BUG;
12908 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
12909 tp->fw_needed = FIRMWARE_TG3TSO5;
12910 else
12911 tp->fw_needed = FIRMWARE_TG3TSO;
12912 }
12913
12914 tp->irq_max = 1;
12915
12664 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { 12916 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
12665 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI; 12917 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
12666 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX || 12918 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
@@ -12672,31 +12924,21 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12672 12924
12673 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || 12925 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
12674 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { 12926 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12675 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
12676 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI; 12927 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
12677 } else {
12678 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
12679 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12680 ASIC_REV_5750 &&
12681 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
12682 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
12683 } 12928 }
12684 }
12685 12929
12686 tp->irq_max = 1; 12930 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
12687 12931 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX;
12688 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { 12932 tp->irq_max = TG3_IRQ_MAX_VECS;
12689 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX; 12933 }
12690 tp->irq_max = TG3_IRQ_MAX_VECS;
12691 } 12934 }
12692 12935
12693 if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) { 12936 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12694 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 12937 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12695 tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG; 12938 tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG;
12696 else { 12939 else if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) {
12697 tp->tg3_flags3 |= TG3_FLG3_4G_DMA_BNDRY_BUG; 12940 tp->tg3_flags3 |= TG3_FLG3_4G_DMA_BNDRY_BUG;
12698 tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG; 12941 tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG;
12699 }
12700 } 12942 }
12701 12943
12702 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || 12944 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
@@ -13297,6 +13539,11 @@ static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
13297#endif 13539#endif
13298#endif 13540#endif
13299 13541
13542 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13543 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
13544 goto out;
13545 }
13546
13300 if (!goal) 13547 if (!goal)
13301 goto out; 13548 goto out;
13302 13549
@@ -13491,7 +13738,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
13491{ 13738{
13492 dma_addr_t buf_dma; 13739 dma_addr_t buf_dma;
13493 u32 *buf, saved_dma_rwctrl; 13740 u32 *buf, saved_dma_rwctrl;
13494 int ret; 13741 int ret = 0;
13495 13742
13496 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma); 13743 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
13497 if (!buf) { 13744 if (!buf) {
@@ -13504,6 +13751,9 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
13504 13751
13505 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl); 13752 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
13506 13753
13754 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
13755 goto out;
13756
13507 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { 13757 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13508 /* DMA read watermark not used on PCIE */ 13758 /* DMA read watermark not used on PCIE */
13509 tp->dma_rwctrl |= 0x00180000; 13759 tp->dma_rwctrl |= 0x00180000;
@@ -13576,7 +13826,6 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
13576 tg3_switch_clocks(tp); 13826 tg3_switch_clocks(tp);
13577#endif 13827#endif
13578 13828
13579 ret = 0;
13580 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && 13829 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13581 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) 13830 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
13582 goto out; 13831 goto out;
@@ -13755,6 +14004,7 @@ static char * __devinit tg3_phy_string(struct tg3 *tp)
13755 case PHY_ID_BCM5756: return "5722/5756"; 14004 case PHY_ID_BCM5756: return "5722/5756";
13756 case PHY_ID_BCM5906: return "5906"; 14005 case PHY_ID_BCM5906: return "5906";
13757 case PHY_ID_BCM5761: return "5761"; 14006 case PHY_ID_BCM5761: return "5761";
14007 case PHY_ID_BCM5717: return "5717";
13758 case PHY_ID_BCM8002: return "8002/serdes"; 14008 case PHY_ID_BCM8002: return "8002/serdes";
13759 case 0: return "serdes"; 14009 case 0: return "serdes";
13760 default: return "unknown"; 14010 default: return "unknown";
@@ -13996,51 +14246,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
13996 tp->rx_pending = TG3_DEF_RX_RING_PENDING; 14246 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
13997 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING; 14247 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
13998 14248
13999 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
14000 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
14001 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
14002 for (i = 0; i < TG3_IRQ_MAX_VECS; i++) {
14003 struct tg3_napi *tnapi = &tp->napi[i];
14004
14005 tnapi->tp = tp;
14006 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
14007
14008 tnapi->int_mbox = intmbx;
14009 if (i < 4)
14010 intmbx += 0x8;
14011 else
14012 intmbx += 0x4;
14013
14014 tnapi->consmbox = rcvmbx;
14015 tnapi->prodmbox = sndmbx;
14016
14017 if (i)
14018 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
14019 else
14020 tnapi->coal_now = HOSTCC_MODE_NOW;
14021
14022 if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX))
14023 break;
14024
14025 /*
14026 * If we support MSIX, we'll be using RSS. If we're using
14027 * RSS, the first vector only handles link interrupts and the
14028 * remaining vectors handle rx and tx interrupts. Reuse the
14029 * mailbox values for the next iteration. The values we setup
14030 * above are still useful for the single vectored mode.
14031 */
14032 if (!i)
14033 continue;
14034
14035 rcvmbx += 0x8;
14036
14037 if (sndmbx & 0x4)
14038 sndmbx -= 0x4;
14039 else
14040 sndmbx += 0xc;
14041 }
14042
14043 netif_napi_add(dev, &tp->napi[0].napi, tg3_poll, 64);
14044 dev->ethtool_ops = &tg3_ethtool_ops; 14249 dev->ethtool_ops = &tg3_ethtool_ops;
14045 dev->watchdog_timeo = TG3_TX_TIMEOUT; 14250 dev->watchdog_timeo = TG3_TX_TIMEOUT;
14046 dev->irq = pdev->irq; 14251 dev->irq = pdev->irq;
@@ -14052,7 +14257,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14052 goto err_out_iounmap; 14257 goto err_out_iounmap;
14053 } 14258 }
14054 14259
14055 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) 14260 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
14261 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
14056 dev->netdev_ops = &tg3_netdev_ops; 14262 dev->netdev_ops = &tg3_netdev_ops;
14057 else 14263 else
14058 dev->netdev_ops = &tg3_netdev_ops_dma_bug; 14264 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
@@ -14099,46 +14305,39 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14099 14305
14100 tg3_init_bufmgr_config(tp); 14306 tg3_init_bufmgr_config(tp);
14101 14307
14102 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) 14308 /* Selectively allow TSO based on operating conditions */
14103 tp->fw_needed = FIRMWARE_TG3; 14309 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
14104 14310 (tp->fw_needed && !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)))
14105 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
14106 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; 14311 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
14312 else {
14313 tp->tg3_flags2 &= ~(TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG);
14314 tp->fw_needed = NULL;
14107 } 14315 }
14108 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || 14316
14109 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 || 14317 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14110 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 || 14318 tp->fw_needed = FIRMWARE_TG3;
14111 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14112 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
14113 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
14114 } else {
14115 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
14116 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14117 tp->fw_needed = FIRMWARE_TG3TSO5;
14118 else
14119 tp->fw_needed = FIRMWARE_TG3TSO;
14120 }
14121 14319
14122 /* TSO is on by default on chips that support hardware TSO. 14320 /* TSO is on by default on chips that support hardware TSO.
14123 * Firmware TSO on older chips gives lower performance, so it 14321 * Firmware TSO on older chips gives lower performance, so it
14124 * is off by default, but can be enabled using ethtool. 14322 * is off by default, but can be enabled using ethtool.
14125 */ 14323 */
14126 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { 14324 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) &&
14127 if (dev->features & NETIF_F_IP_CSUM) 14325 (dev->features & NETIF_F_IP_CSUM))
14128 dev->features |= NETIF_F_TSO; 14326 dev->features |= NETIF_F_TSO;
14129 if ((dev->features & NETIF_F_IPV6_CSUM) && 14327
14130 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)) 14328 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
14329 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3)) {
14330 if (dev->features & NETIF_F_IPV6_CSUM)
14131 dev->features |= NETIF_F_TSO6; 14331 dev->features |= NETIF_F_TSO6;
14132 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || 14332 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
14333 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14133 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && 14334 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14134 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) || 14335 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
14135 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || 14336 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14136 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || 14337 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14137 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
14138 dev->features |= NETIF_F_TSO_ECN; 14338 dev->features |= NETIF_F_TSO_ECN;
14139 } 14339 }
14140 14340
14141
14142 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 && 14341 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
14143 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) && 14342 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
14144 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) { 14343 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
@@ -14189,6 +14388,53 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14189 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; 14388 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
14190 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; 14389 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14191 14390
14391 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
14392 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
14393 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
14394 for (i = 0; i < TG3_IRQ_MAX_VECS; i++) {
14395 struct tg3_napi *tnapi = &tp->napi[i];
14396
14397 tnapi->tp = tp;
14398 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
14399
14400 tnapi->int_mbox = intmbx;
14401 if (i < 4)
14402 intmbx += 0x8;
14403 else
14404 intmbx += 0x4;
14405
14406 tnapi->consmbox = rcvmbx;
14407 tnapi->prodmbox = sndmbx;
14408
14409 if (i) {
14410 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
14411 netif_napi_add(dev, &tnapi->napi, tg3_poll_msix, 64);
14412 } else {
14413 tnapi->coal_now = HOSTCC_MODE_NOW;
14414 netif_napi_add(dev, &tnapi->napi, tg3_poll, 64);
14415 }
14416
14417 if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX))
14418 break;
14419
14420 /*
14421 * If we support MSIX, we'll be using RSS. If we're using
14422 * RSS, the first vector only handles link interrupts and the
14423 * remaining vectors handle rx and tx interrupts. Reuse the
14424 * mailbox values for the next iteration. The values we setup
14425 * above are still useful for the single vectored mode.
14426 */
14427 if (!i)
14428 continue;
14429
14430 rcvmbx += 0x8;
14431
14432 if (sndmbx & 0x4)
14433 sndmbx -= 0x4;
14434 else
14435 sndmbx += 0xc;
14436 }
14437
14192 tg3_init_coal(tp); 14438 tg3_init_coal(tp);
14193 14439
14194 pci_set_drvdata(pdev, dev); 14440 pci_set_drvdata(pdev, dev);
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index d770da124b8..453a34fb72b 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -46,10 +46,9 @@
46#define TG3PCI_DEVICE_TIGON3_57788 0x1691 46#define TG3PCI_DEVICE_TIGON3_57788 0x1691
47#define TG3PCI_DEVICE_TIGON3_5785_G 0x1699 /* GPHY */ 47#define TG3PCI_DEVICE_TIGON3_5785_G 0x1699 /* GPHY */
48#define TG3PCI_DEVICE_TIGON3_5785_F 0x16a0 /* 10/100 only */ 48#define TG3PCI_DEVICE_TIGON3_5785_F 0x16a0 /* 10/100 only */
49#define TG3PCI_DEVICE_TIGON3_5717C 0x1655 49#define TG3PCI_DEVICE_TIGON3_5717 0x1655
50#define TG3PCI_DEVICE_TIGON3_5717S 0x1656 50#define TG3PCI_DEVICE_TIGON3_5718 0x1656
51#define TG3PCI_DEVICE_TIGON3_5718C 0x1665 51#define TG3PCI_DEVICE_TIGON3_5724 0x165c
52#define TG3PCI_DEVICE_TIGON3_5718S 0x1666
53/* 0x04 --> 0x64 unused */ 52/* 0x04 --> 0x64 unused */
54#define TG3PCI_MSI_DATA 0x00000064 53#define TG3PCI_MSI_DATA 0x00000064
55/* 0x66 --> 0x68 unused */ 54/* 0x66 --> 0x68 unused */
@@ -103,6 +102,7 @@
103#define CHIPREV_ID_5906_A1 0xc001 102#define CHIPREV_ID_5906_A1 0xc001
104#define CHIPREV_ID_57780_A0 0x57780000 103#define CHIPREV_ID_57780_A0 0x57780000
105#define CHIPREV_ID_57780_A1 0x57780001 104#define CHIPREV_ID_57780_A1 0x57780001
105#define CHIPREV_ID_5717_A0 0x05717000
106#define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12) 106#define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12)
107#define ASIC_REV_5700 0x07 107#define ASIC_REV_5700 0x07
108#define ASIC_REV_5701 0x00 108#define ASIC_REV_5701 0x00
@@ -141,8 +141,7 @@
141#define METAL_REV_B1 0x01 141#define METAL_REV_B1 0x01
142#define METAL_REV_B2 0x02 142#define METAL_REV_B2 0x02
143#define TG3PCI_DMA_RW_CTRL 0x0000006c 143#define TG3PCI_DMA_RW_CTRL 0x0000006c
144#define DMA_RWCTRL_MIN_DMA 0x000000ff 144#define DMA_RWCTRL_DIS_CACHE_ALIGNMENT 0x00000001
145#define DMA_RWCTRL_MIN_DMA_SHIFT 0
146#define DMA_RWCTRL_READ_BNDRY_MASK 0x00000700 145#define DMA_RWCTRL_READ_BNDRY_MASK 0x00000700
147#define DMA_RWCTRL_READ_BNDRY_DISAB 0x00000000 146#define DMA_RWCTRL_READ_BNDRY_DISAB 0x00000000
148#define DMA_RWCTRL_READ_BNDRY_16 0x00000100 147#define DMA_RWCTRL_READ_BNDRY_16 0x00000100
@@ -242,7 +241,11 @@
242#define MAILBOX_GENERAL_7 0x00000258 /* 64-bit */ 241#define MAILBOX_GENERAL_7 0x00000258 /* 64-bit */
243#define MAILBOX_RELOAD_STAT 0x00000260 /* 64-bit */ 242#define MAILBOX_RELOAD_STAT 0x00000260 /* 64-bit */
244#define MAILBOX_RCV_STD_PROD_IDX 0x00000268 /* 64-bit */ 243#define MAILBOX_RCV_STD_PROD_IDX 0x00000268 /* 64-bit */
244#define TG3_RX_STD_PROD_IDX_REG (MAILBOX_RCV_STD_PROD_IDX + \
245 TG3_64BIT_REG_LOW)
245#define MAILBOX_RCV_JUMBO_PROD_IDX 0x00000270 /* 64-bit */ 246#define MAILBOX_RCV_JUMBO_PROD_IDX 0x00000270 /* 64-bit */
247#define TG3_RX_JMB_PROD_IDX_REG (MAILBOX_RCV_JUMBO_PROD_IDX + \
248 TG3_64BIT_REG_LOW)
246#define MAILBOX_RCV_MINI_PROD_IDX 0x00000278 /* 64-bit */ 249#define MAILBOX_RCV_MINI_PROD_IDX 0x00000278 /* 64-bit */
247#define MAILBOX_RCVRET_CON_IDX_0 0x00000280 /* 64-bit */ 250#define MAILBOX_RCVRET_CON_IDX_0 0x00000280 /* 64-bit */
248#define MAILBOX_RCVRET_CON_IDX_1 0x00000288 /* 64-bit */ 251#define MAILBOX_RCVRET_CON_IDX_1 0x00000288 /* 64-bit */
@@ -2570,8 +2573,10 @@ struct tg3_ethtool_stats {
2570}; 2573};
2571 2574
2572struct tg3_rx_prodring_set { 2575struct tg3_rx_prodring_set {
2573 u32 rx_std_ptr; 2576 u32 rx_std_prod_idx;
2574 u32 rx_jmb_ptr; 2577 u32 rx_std_cons_idx;
2578 u32 rx_jmb_prod_idx;
2579 u32 rx_jmb_cons_idx;
2575 struct tg3_rx_buffer_desc *rx_std; 2580 struct tg3_rx_buffer_desc *rx_std;
2576 struct tg3_ext_rx_buffer_desc *rx_jmb; 2581 struct tg3_ext_rx_buffer_desc *rx_jmb;
2577 struct ring_info *rx_std_buffers; 2582 struct ring_info *rx_std_buffers;
@@ -2599,6 +2604,7 @@ struct tg3_napi {
2599 u32 consmbox; 2604 u32 consmbox;
2600 u32 rx_rcb_ptr; 2605 u32 rx_rcb_ptr;
2601 u16 *rx_rcb_prod_idx; 2606 u16 *rx_rcb_prod_idx;
2607 struct tg3_rx_prodring_set *prodring;
2602 2608
2603 struct tg3_rx_buffer_desc *rx_rcb; 2609 struct tg3_rx_buffer_desc *rx_rcb;
2604 struct tg3_tx_buffer_desc *tx_ring; 2610 struct tg3_tx_buffer_desc *tx_ring;
@@ -2682,7 +2688,7 @@ struct tg3 {
2682 struct vlan_group *vlgrp; 2688 struct vlan_group *vlgrp;
2683#endif 2689#endif
2684 2690
2685 struct tg3_rx_prodring_set prodring[1]; 2691 struct tg3_rx_prodring_set prodring[TG3_IRQ_MAX_VECS - 1];
2686 2692
2687 2693
2688 /* begin "everything else" cacheline(s) section */ 2694 /* begin "everything else" cacheline(s) section */
@@ -2753,7 +2759,7 @@ struct tg3 {
2753#define TG3_FLG2_SERDES_PREEMPHASIS 0x00020000 2759#define TG3_FLG2_SERDES_PREEMPHASIS 0x00020000
2754#define TG3_FLG2_5705_PLUS 0x00040000 2760#define TG3_FLG2_5705_PLUS 0x00040000
2755#define TG3_FLG2_5750_PLUS 0x00080000 2761#define TG3_FLG2_5750_PLUS 0x00080000
2756#define TG3_FLG2_PROTECTED_NVRAM 0x00100000 2762#define TG3_FLG2_HW_TSO_3 0x00100000
2757#define TG3_FLG2_USING_MSI 0x00200000 2763#define TG3_FLG2_USING_MSI 0x00200000
2758#define TG3_FLG2_USING_MSIX 0x00400000 2764#define TG3_FLG2_USING_MSIX 0x00400000
2759#define TG3_FLG2_USING_MSI_OR_MSIX (TG3_FLG2_USING_MSI | \ 2765#define TG3_FLG2_USING_MSI_OR_MSIX (TG3_FLG2_USING_MSI | \
@@ -2765,7 +2771,9 @@ struct tg3 {
2765#define TG3_FLG2_ICH_WORKAROUND 0x02000000 2771#define TG3_FLG2_ICH_WORKAROUND 0x02000000
2766#define TG3_FLG2_5780_CLASS 0x04000000 2772#define TG3_FLG2_5780_CLASS 0x04000000
2767#define TG3_FLG2_HW_TSO_2 0x08000000 2773#define TG3_FLG2_HW_TSO_2 0x08000000
2768#define TG3_FLG2_HW_TSO (TG3_FLG2_HW_TSO_1 | TG3_FLG2_HW_TSO_2) 2774#define TG3_FLG2_HW_TSO (TG3_FLG2_HW_TSO_1 | \
2775 TG3_FLG2_HW_TSO_2 | \
2776 TG3_FLG2_HW_TSO_3)
2769#define TG3_FLG2_1SHOT_MSI 0x10000000 2777#define TG3_FLG2_1SHOT_MSI 0x10000000
2770#define TG3_FLG2_PHY_JITTER_BUG 0x20000000 2778#define TG3_FLG2_PHY_JITTER_BUG 0x20000000
2771#define TG3_FLG2_NO_FWARE_REPORTED 0x40000000 2779#define TG3_FLG2_NO_FWARE_REPORTED 0x40000000
@@ -2773,6 +2781,7 @@ struct tg3 {
2773 u32 tg3_flags3; 2781 u32 tg3_flags3;
2774#define TG3_FLG3_NO_NVRAM_ADDR_TRANS 0x00000001 2782#define TG3_FLG3_NO_NVRAM_ADDR_TRANS 0x00000001
2775#define TG3_FLG3_ENABLE_APE 0x00000002 2783#define TG3_FLG3_ENABLE_APE 0x00000002
2784#define TG3_FLG3_PROTECTED_NVRAM 0x00000004
2776#define TG3_FLG3_5701_DMA_BUG 0x00000008 2785#define TG3_FLG3_5701_DMA_BUG 0x00000008
2777#define TG3_FLG3_USE_PHYLIB 0x00000010 2786#define TG3_FLG3_USE_PHYLIB 0x00000010
2778#define TG3_FLG3_MDIOBUS_INITED 0x00000020 2787#define TG3_FLG3_MDIOBUS_INITED 0x00000020
@@ -2855,6 +2864,7 @@ struct tg3 {
2855#define PHY_ID_BCM5756 0xbc050ed0 2864#define PHY_ID_BCM5756 0xbc050ed0
2856#define PHY_ID_BCM5784 0xbc050fa0 2865#define PHY_ID_BCM5784 0xbc050fa0
2857#define PHY_ID_BCM5761 0xbc050fd0 2866#define PHY_ID_BCM5761 0xbc050fd0
2867#define PHY_ID_BCM5717 0x5c0d8a00
2858#define PHY_ID_BCM5906 0xdc00ac40 2868#define PHY_ID_BCM5906 0xdc00ac40
2859#define PHY_ID_BCM8002 0x60010140 2869#define PHY_ID_BCM8002 0x60010140
2860#define PHY_ID_INVALID 0xffffffff 2870#define PHY_ID_INVALID 0xffffffff
@@ -2896,7 +2906,7 @@ struct tg3 {
2896 (X) == PHY_ID_BCM5780 || (X) == PHY_ID_BCM5787 || \ 2906 (X) == PHY_ID_BCM5780 || (X) == PHY_ID_BCM5787 || \
2897 (X) == PHY_ID_BCM5755 || (X) == PHY_ID_BCM5756 || \ 2907 (X) == PHY_ID_BCM5755 || (X) == PHY_ID_BCM5756 || \
2898 (X) == PHY_ID_BCM5906 || (X) == PHY_ID_BCM5761 || \ 2908 (X) == PHY_ID_BCM5906 || (X) == PHY_ID_BCM5761 || \
2899 (X) == PHY_ID_BCM8002) 2909 (X) == PHY_ID_BCM5717 || (X) == PHY_ID_BCM8002)
2900 2910
2901 struct tg3_hw_stats *hw_stats; 2911 struct tg3_hw_stats *hw_stats;
2902 dma_addr_t stats_mapping; 2912 dma_addr_t stats_mapping;
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index 724158966ec..cf552d1d962 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -610,9 +610,8 @@ static int xl_open(struct net_device *dev)
610 610
611 u16 switchsettings, switchsettings_eeprom ; 611 u16 switchsettings, switchsettings_eeprom ;
612 612
613 if(request_irq(dev->irq, &xl_interrupt, IRQF_SHARED , "3c359", dev)) { 613 if (request_irq(dev->irq, xl_interrupt, IRQF_SHARED , "3c359", dev))
614 return -EAGAIN; 614 return -EAGAIN;
615 }
616 615
617 /* 616 /*
618 * Read the information from the EEPROM that we need. 617 * Read the information from the EEPROM that we need.
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
index d9ec7f0bbd0..df32025c513 100644
--- a/drivers/net/tokenring/olympic.c
+++ b/drivers/net/tokenring/olympic.c
@@ -445,9 +445,9 @@ static int olympic_open(struct net_device *dev)
445 445
446 olympic_init(dev); 446 olympic_init(dev);
447 447
448 if(request_irq(dev->irq, &olympic_interrupt, IRQF_SHARED , "olympic", dev)) { 448 if (request_irq(dev->irq, olympic_interrupt, IRQF_SHARED , "olympic",
449 dev))
449 return -EAGAIN; 450 return -EAGAIN;
450 }
451 451
452#if OLYMPIC_DEBUG 452#if OLYMPIC_DEBUG
453 printk("BMCTL: %x\n",readl(olympic_mmio+BMCTL_SUM)); 453 printk("BMCTL: %x\n",readl(olympic_mmio+BMCTL_SUM));
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index d6d345229fe..4b754102442 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -2150,7 +2150,7 @@ typhoon_open(struct net_device *dev)
2150 goto out_sleep; 2150 goto out_sleep;
2151 } 2151 }
2152 2152
2153 err = request_irq(dev->irq, &typhoon_interrupt, IRQF_SHARED, 2153 err = request_irq(dev->irq, typhoon_interrupt, IRQF_SHARED,
2154 dev->name, dev); 2154 dev->name, dev);
2155 if(err < 0) 2155 if(err < 0)
2156 goto out_sleep; 2156 goto out_sleep;
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 4535e89dfff..ec94ddf01f5 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -1150,7 +1150,7 @@ static int rhine_open(struct net_device *dev)
1150 void __iomem *ioaddr = rp->base; 1150 void __iomem *ioaddr = rp->base;
1151 int rc; 1151 int rc;
1152 1152
1153 rc = request_irq(rp->pdev->irq, &rhine_interrupt, IRQF_SHARED, dev->name, 1153 rc = request_irq(rp->pdev->irq, rhine_interrupt, IRQF_SHARED, dev->name,
1154 dev); 1154 dev);
1155 if (rc) 1155 if (rc)
1156 return rc; 1156 return rc;
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 158f411bd55..1e6b395c555 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -2176,7 +2176,7 @@ static int velocity_open(struct net_device *dev)
2176 2176
2177 velocity_init_registers(vptr, VELOCITY_INIT_COLD); 2177 velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2178 2178
2179 ret = request_irq(vptr->pdev->irq, &velocity_intr, IRQF_SHARED, 2179 ret = request_irq(vptr->pdev->irq, velocity_intr, IRQF_SHARED,
2180 dev->name, dev); 2180 dev->name, dev);
2181 if (ret < 0) { 2181 if (ret < 0) {
2182 /* Power down the chip */ 2182 /* Power down the chip */
diff --git a/drivers/net/vmxnet3/vmxnet3_defs.h b/drivers/net/vmxnet3/vmxnet3_defs.h
index dc8ee4438a4..b4889e6c4a5 100644
--- a/drivers/net/vmxnet3/vmxnet3_defs.h
+++ b/drivers/net/vmxnet3/vmxnet3_defs.h
@@ -90,23 +90,60 @@ enum {
90 VMXNET3_CMD_GET_CONF_INTR 90 VMXNET3_CMD_GET_CONF_INTR
91}; 91};
92 92
93struct Vmxnet3_TxDesc { 93/*
94 u64 addr; 94 * Little Endian layout of bitfields -
95 * Byte 0 : 7.....len.....0
96 * Byte 1 : rsvd gen 13.len.8
97 * Byte 2 : 5.msscof.0 ext1 dtype
98 * Byte 3 : 13...msscof...6
99 *
100 * Big Endian layout of bitfields -
101 * Byte 0: 13...msscof...6
102 * Byte 1 : 5.msscof.0 ext1 dtype
103 * Byte 2 : rsvd gen 13.len.8
104 * Byte 3 : 7.....len.....0
105 *
106 * Thus, le32_to_cpu on the dword will allow the big endian driver to read
107 * the bit fields correctly. And cpu_to_le32 will convert bitfields
108 * bit fields written by big endian driver to format required by device.
109 */
95 110
96 u32 len:14; 111struct Vmxnet3_TxDesc {
97 u32 gen:1; /* generation bit */ 112 __le64 addr;
98 u32 rsvd:1; 113
99 u32 dtype:1; /* descriptor type */ 114#ifdef __BIG_ENDIAN_BITFIELD
100 u32 ext1:1; 115 u32 msscof:14; /* MSS, checksum offset, flags */
101 u32 msscof:14; /* MSS, checksum offset, flags */ 116 u32 ext1:1;
102 117 u32 dtype:1; /* descriptor type */
103 u32 hlen:10; /* header len */ 118 u32 rsvd:1;
104 u32 om:2; /* offload mode */ 119 u32 gen:1; /* generation bit */
105 u32 eop:1; /* End Of Packet */ 120 u32 len:14;
106 u32 cq:1; /* completion request */ 121#else
107 u32 ext2:1; 122 u32 len:14;
108 u32 ti:1; /* VLAN Tag Insertion */ 123 u32 gen:1; /* generation bit */
109 u32 tci:16; /* Tag to Insert */ 124 u32 rsvd:1;
125 u32 dtype:1; /* descriptor type */
126 u32 ext1:1;
127 u32 msscof:14; /* MSS, checksum offset, flags */
128#endif /* __BIG_ENDIAN_BITFIELD */
129
130#ifdef __BIG_ENDIAN_BITFIELD
131 u32 tci:16; /* Tag to Insert */
132 u32 ti:1; /* VLAN Tag Insertion */
133 u32 ext2:1;
134 u32 cq:1; /* completion request */
135 u32 eop:1; /* End Of Packet */
136 u32 om:2; /* offload mode */
137 u32 hlen:10; /* header len */
138#else
139 u32 hlen:10; /* header len */
140 u32 om:2; /* offload mode */
141 u32 eop:1; /* End Of Packet */
142 u32 cq:1; /* completion request */
143 u32 ext2:1;
144 u32 ti:1; /* VLAN Tag Insertion */
145 u32 tci:16; /* Tag to Insert */
146#endif /* __BIG_ENDIAN_BITFIELD */
110}; 147};
111 148
112/* TxDesc.OM values */ 149/* TxDesc.OM values */
@@ -118,6 +155,8 @@ struct Vmxnet3_TxDesc {
118#define VMXNET3_TXD_EOP_SHIFT 12 155#define VMXNET3_TXD_EOP_SHIFT 12
119#define VMXNET3_TXD_CQ_SHIFT 13 156#define VMXNET3_TXD_CQ_SHIFT 13
120#define VMXNET3_TXD_GEN_SHIFT 14 157#define VMXNET3_TXD_GEN_SHIFT 14
158#define VMXNET3_TXD_EOP_DWORD_SHIFT 3
159#define VMXNET3_TXD_GEN_DWORD_SHIFT 2
121 160
122#define VMXNET3_TXD_CQ (1 << VMXNET3_TXD_CQ_SHIFT) 161#define VMXNET3_TXD_CQ (1 << VMXNET3_TXD_CQ_SHIFT)
123#define VMXNET3_TXD_EOP (1 << VMXNET3_TXD_EOP_SHIFT) 162#define VMXNET3_TXD_EOP (1 << VMXNET3_TXD_EOP_SHIFT)
@@ -130,29 +169,40 @@ struct Vmxnet3_TxDataDesc {
130 u8 data[VMXNET3_HDR_COPY_SIZE]; 169 u8 data[VMXNET3_HDR_COPY_SIZE];
131}; 170};
132 171
172#define VMXNET3_TCD_GEN_SHIFT 31
173#define VMXNET3_TCD_GEN_SIZE 1
174#define VMXNET3_TCD_TXIDX_SHIFT 0
175#define VMXNET3_TCD_TXIDX_SIZE 12
176#define VMXNET3_TCD_GEN_DWORD_SHIFT 3
133 177
134struct Vmxnet3_TxCompDesc { 178struct Vmxnet3_TxCompDesc {
135 u32 txdIdx:12; /* Index of the EOP TxDesc */ 179 u32 txdIdx:12; /* Index of the EOP TxDesc */
136 u32 ext1:20; 180 u32 ext1:20;
137 181
138 u32 ext2; 182 __le32 ext2;
139 u32 ext3; 183 __le32 ext3;
140 184
141 u32 rsvd:24; 185 u32 rsvd:24;
142 u32 type:7; /* completion type */ 186 u32 type:7; /* completion type */
143 u32 gen:1; /* generation bit */ 187 u32 gen:1; /* generation bit */
144}; 188};
145 189
146
147struct Vmxnet3_RxDesc { 190struct Vmxnet3_RxDesc {
148 u64 addr; 191 __le64 addr;
149 192
193#ifdef __BIG_ENDIAN_BITFIELD
194 u32 gen:1; /* Generation bit */
195 u32 rsvd:15;
196 u32 dtype:1; /* Descriptor type */
197 u32 btype:1; /* Buffer Type */
198 u32 len:14;
199#else
150 u32 len:14; 200 u32 len:14;
151 u32 btype:1; /* Buffer Type */ 201 u32 btype:1; /* Buffer Type */
152 u32 dtype:1; /* Descriptor type */ 202 u32 dtype:1; /* Descriptor type */
153 u32 rsvd:15; 203 u32 rsvd:15;
154 u32 gen:1; /* Generation bit */ 204 u32 gen:1; /* Generation bit */
155 205#endif
156 u32 ext1; 206 u32 ext1;
157}; 207};
158 208
@@ -164,8 +214,17 @@ struct Vmxnet3_RxDesc {
164#define VMXNET3_RXD_BTYPE_SHIFT 14 214#define VMXNET3_RXD_BTYPE_SHIFT 14
165#define VMXNET3_RXD_GEN_SHIFT 31 215#define VMXNET3_RXD_GEN_SHIFT 31
166 216
167
168struct Vmxnet3_RxCompDesc { 217struct Vmxnet3_RxCompDesc {
218#ifdef __BIG_ENDIAN_BITFIELD
219 u32 ext2:1;
220 u32 cnc:1; /* Checksum Not Calculated */
221 u32 rssType:4; /* RSS hash type used */
222 u32 rqID:10; /* rx queue/ring ID */
223 u32 sop:1; /* Start of Packet */
224 u32 eop:1; /* End of Packet */
225 u32 ext1:2;
226 u32 rxdIdx:12; /* Index of the RxDesc */
227#else
169 u32 rxdIdx:12; /* Index of the RxDesc */ 228 u32 rxdIdx:12; /* Index of the RxDesc */
170 u32 ext1:2; 229 u32 ext1:2;
171 u32 eop:1; /* End of Packet */ 230 u32 eop:1; /* End of Packet */
@@ -174,14 +233,36 @@ struct Vmxnet3_RxCompDesc {
174 u32 rssType:4; /* RSS hash type used */ 233 u32 rssType:4; /* RSS hash type used */
175 u32 cnc:1; /* Checksum Not Calculated */ 234 u32 cnc:1; /* Checksum Not Calculated */
176 u32 ext2:1; 235 u32 ext2:1;
236#endif /* __BIG_ENDIAN_BITFIELD */
177 237
178 u32 rssHash; /* RSS hash value */ 238 __le32 rssHash; /* RSS hash value */
179 239
240#ifdef __BIG_ENDIAN_BITFIELD
241 u32 tci:16; /* Tag stripped */
242 u32 ts:1; /* Tag is stripped */
243 u32 err:1; /* Error */
244 u32 len:14; /* data length */
245#else
180 u32 len:14; /* data length */ 246 u32 len:14; /* data length */
181 u32 err:1; /* Error */ 247 u32 err:1; /* Error */
182 u32 ts:1; /* Tag is stripped */ 248 u32 ts:1; /* Tag is stripped */
183 u32 tci:16; /* Tag stripped */ 249 u32 tci:16; /* Tag stripped */
250#endif /* __BIG_ENDIAN_BITFIELD */
251
184 252
253#ifdef __BIG_ENDIAN_BITFIELD
254 u32 gen:1; /* generation bit */
255 u32 type:7; /* completion type */
256 u32 fcs:1; /* Frame CRC correct */
257 u32 frg:1; /* IP Fragment */
258 u32 v4:1; /* IPv4 */
259 u32 v6:1; /* IPv6 */
260 u32 ipc:1; /* IP Checksum Correct */
261 u32 tcp:1; /* TCP packet */
262 u32 udp:1; /* UDP packet */
263 u32 tuc:1; /* TCP/UDP Checksum Correct */
264 u32 csum:16;
265#else
185 u32 csum:16; 266 u32 csum:16;
186 u32 tuc:1; /* TCP/UDP Checksum Correct */ 267 u32 tuc:1; /* TCP/UDP Checksum Correct */
187 u32 udp:1; /* UDP packet */ 268 u32 udp:1; /* UDP packet */
@@ -193,6 +274,7 @@ struct Vmxnet3_RxCompDesc {
193 u32 fcs:1; /* Frame CRC correct */ 274 u32 fcs:1; /* Frame CRC correct */
194 u32 type:7; /* completion type */ 275 u32 type:7; /* completion type */
195 u32 gen:1; /* generation bit */ 276 u32 gen:1; /* generation bit */
277#endif /* __BIG_ENDIAN_BITFIELD */
196}; 278};
197 279
198/* fields in RxCompDesc we access via Vmxnet3_GenericDesc.dword[3] */ 280/* fields in RxCompDesc we access via Vmxnet3_GenericDesc.dword[3] */
@@ -206,6 +288,8 @@ struct Vmxnet3_RxCompDesc {
206/* csum OK for TCP/UDP pkts over IP */ 288/* csum OK for TCP/UDP pkts over IP */
207#define VMXNET3_RCD_CSUM_OK (1 << VMXNET3_RCD_TUC_SHIFT | \ 289#define VMXNET3_RCD_CSUM_OK (1 << VMXNET3_RCD_TUC_SHIFT | \
208 1 << VMXNET3_RCD_IPC_SHIFT) 290 1 << VMXNET3_RCD_IPC_SHIFT)
291#define VMXNET3_TXD_GEN_SIZE 1
292#define VMXNET3_TXD_EOP_SIZE 1
209 293
210/* value of RxCompDesc.rssType */ 294/* value of RxCompDesc.rssType */
211enum { 295enum {
@@ -219,9 +303,9 @@ enum {
219 303
220/* a union for accessing all cmd/completion descriptors */ 304/* a union for accessing all cmd/completion descriptors */
221union Vmxnet3_GenericDesc { 305union Vmxnet3_GenericDesc {
222 u64 qword[2]; 306 __le64 qword[2];
223 u32 dword[4]; 307 __le32 dword[4];
224 u16 word[8]; 308 __le16 word[8];
225 struct Vmxnet3_TxDesc txd; 309 struct Vmxnet3_TxDesc txd;
226 struct Vmxnet3_RxDesc rxd; 310 struct Vmxnet3_RxDesc rxd;
227 struct Vmxnet3_TxCompDesc tcd; 311 struct Vmxnet3_TxCompDesc tcd;
@@ -287,18 +371,24 @@ enum {
287 371
288 372
289struct Vmxnet3_GOSInfo { 373struct Vmxnet3_GOSInfo {
290 u32 gosBits:2; /* 32-bit or 64-bit? */ 374#ifdef __BIG_ENDIAN_BITFIELD
291 u32 gosType:4; /* which guest */ 375 u32 gosMisc:10; /* other info about gos */
292 u32 gosVer:16; /* gos version */ 376 u32 gosVer:16; /* gos version */
293 u32 gosMisc:10; /* other info about gos */ 377 u32 gosType:4; /* which guest */
378 u32 gosBits:2; /* 32-bit or 64-bit? */
379#else
380 u32 gosBits:2; /* 32-bit or 64-bit? */
381 u32 gosType:4; /* which guest */
382 u32 gosVer:16; /* gos version */
383 u32 gosMisc:10; /* other info about gos */
384#endif /* __BIG_ENDIAN_BITFIELD */
294}; 385};
295 386
296
297struct Vmxnet3_DriverInfo { 387struct Vmxnet3_DriverInfo {
298 u32 version; 388 __le32 version;
299 struct Vmxnet3_GOSInfo gos; 389 struct Vmxnet3_GOSInfo gos;
300 u32 vmxnet3RevSpt; 390 __le32 vmxnet3RevSpt;
301 u32 uptVerSpt; 391 __le32 uptVerSpt;
302}; 392};
303 393
304 394
@@ -315,42 +405,42 @@ struct Vmxnet3_DriverInfo {
315 405
316struct Vmxnet3_MiscConf { 406struct Vmxnet3_MiscConf {
317 struct Vmxnet3_DriverInfo driverInfo; 407 struct Vmxnet3_DriverInfo driverInfo;
318 u64 uptFeatures; 408 __le64 uptFeatures;
319 u64 ddPA; /* driver data PA */ 409 __le64 ddPA; /* driver data PA */
320 u64 queueDescPA; /* queue descriptor table PA */ 410 __le64 queueDescPA; /* queue descriptor table PA */
321 u32 ddLen; /* driver data len */ 411 __le32 ddLen; /* driver data len */
322 u32 queueDescLen; /* queue desc. table len in bytes */ 412 __le32 queueDescLen; /* queue desc. table len in bytes */
323 u32 mtu; 413 __le32 mtu;
324 u16 maxNumRxSG; 414 __le16 maxNumRxSG;
325 u8 numTxQueues; 415 u8 numTxQueues;
326 u8 numRxQueues; 416 u8 numRxQueues;
327 u32 reserved[4]; 417 __le32 reserved[4];
328}; 418};
329 419
330 420
331struct Vmxnet3_TxQueueConf { 421struct Vmxnet3_TxQueueConf {
332 u64 txRingBasePA; 422 __le64 txRingBasePA;
333 u64 dataRingBasePA; 423 __le64 dataRingBasePA;
334 u64 compRingBasePA; 424 __le64 compRingBasePA;
335 u64 ddPA; /* driver data */ 425 __le64 ddPA; /* driver data */
336 u64 reserved; 426 __le64 reserved;
337 u32 txRingSize; /* # of tx desc */ 427 __le32 txRingSize; /* # of tx desc */
338 u32 dataRingSize; /* # of data desc */ 428 __le32 dataRingSize; /* # of data desc */
339 u32 compRingSize; /* # of comp desc */ 429 __le32 compRingSize; /* # of comp desc */
340 u32 ddLen; /* size of driver data */ 430 __le32 ddLen; /* size of driver data */
341 u8 intrIdx; 431 u8 intrIdx;
342 u8 _pad[7]; 432 u8 _pad[7];
343}; 433};
344 434
345 435
346struct Vmxnet3_RxQueueConf { 436struct Vmxnet3_RxQueueConf {
347 u64 rxRingBasePA[2]; 437 __le64 rxRingBasePA[2];
348 u64 compRingBasePA; 438 __le64 compRingBasePA;
349 u64 ddPA; /* driver data */ 439 __le64 ddPA; /* driver data */
350 u64 reserved; 440 __le64 reserved;
351 u32 rxRingSize[2]; /* # of rx desc */ 441 __le32 rxRingSize[2]; /* # of rx desc */
352 u32 compRingSize; /* # of rx comp desc */ 442 __le32 compRingSize; /* # of rx comp desc */
353 u32 ddLen; /* size of driver data */ 443 __le32 ddLen; /* size of driver data */
354 u8 intrIdx; 444 u8 intrIdx;
355 u8 _pad[7]; 445 u8 _pad[7];
356}; 446};
@@ -381,7 +471,7 @@ struct Vmxnet3_IntrConf {
381 u8 eventIntrIdx; 471 u8 eventIntrIdx;
382 u8 modLevels[VMXNET3_MAX_INTRS]; /* moderation level for 472 u8 modLevels[VMXNET3_MAX_INTRS]; /* moderation level for
383 * each intr */ 473 * each intr */
384 u32 reserved[3]; 474 __le32 reserved[3];
385}; 475};
386 476
387/* one bit per VLAN ID, the size is in the units of u32 */ 477/* one bit per VLAN ID, the size is in the units of u32 */
@@ -391,21 +481,21 @@ struct Vmxnet3_IntrConf {
391struct Vmxnet3_QueueStatus { 481struct Vmxnet3_QueueStatus {
392 bool stopped; 482 bool stopped;
393 u8 _pad[3]; 483 u8 _pad[3];
394 u32 error; 484 __le32 error;
395}; 485};
396 486
397 487
398struct Vmxnet3_TxQueueCtrl { 488struct Vmxnet3_TxQueueCtrl {
399 u32 txNumDeferred; 489 __le32 txNumDeferred;
400 u32 txThreshold; 490 __le32 txThreshold;
401 u64 reserved; 491 __le64 reserved;
402}; 492};
403 493
404 494
405struct Vmxnet3_RxQueueCtrl { 495struct Vmxnet3_RxQueueCtrl {
406 bool updateRxProd; 496 bool updateRxProd;
407 u8 _pad[7]; 497 u8 _pad[7];
408 u64 reserved; 498 __le64 reserved;
409}; 499};
410 500
411enum { 501enum {
@@ -417,11 +507,11 @@ enum {
417}; 507};
418 508
419struct Vmxnet3_RxFilterConf { 509struct Vmxnet3_RxFilterConf {
420 u32 rxMode; /* VMXNET3_RXM_xxx */ 510 __le32 rxMode; /* VMXNET3_RXM_xxx */
421 u16 mfTableLen; /* size of the multicast filter table */ 511 __le16 mfTableLen; /* size of the multicast filter table */
422 u16 _pad1; 512 __le16 _pad1;
423 u64 mfTablePA; /* PA of the multicast filters table */ 513 __le64 mfTablePA; /* PA of the multicast filters table */
424 u32 vfTable[VMXNET3_VFT_SIZE]; /* vlan filter */ 514 __le32 vfTable[VMXNET3_VFT_SIZE]; /* vlan filter */
425}; 515};
426 516
427 517
@@ -444,7 +534,7 @@ struct Vmxnet3_PM_PktFilter {
444 534
445 535
446struct Vmxnet3_PMConf { 536struct Vmxnet3_PMConf {
447 u16 wakeUpEvents; /* VMXNET3_PM_WAKEUP_xxx */ 537 __le16 wakeUpEvents; /* VMXNET3_PM_WAKEUP_xxx */
448 u8 numFilters; 538 u8 numFilters;
449 u8 pad[5]; 539 u8 pad[5];
450 struct Vmxnet3_PM_PktFilter filters[VMXNET3_PM_MAX_FILTERS]; 540 struct Vmxnet3_PM_PktFilter filters[VMXNET3_PM_MAX_FILTERS];
@@ -452,9 +542,9 @@ struct Vmxnet3_PMConf {
452 542
453 543
454struct Vmxnet3_VariableLenConfDesc { 544struct Vmxnet3_VariableLenConfDesc {
455 u32 confVer; 545 __le32 confVer;
456 u32 confLen; 546 __le32 confLen;
457 u64 confPA; 547 __le64 confPA;
458}; 548};
459 549
460 550
@@ -491,12 +581,12 @@ struct Vmxnet3_DSDevRead {
491 581
492/* All structures in DriverShared are padded to multiples of 8 bytes */ 582/* All structures in DriverShared are padded to multiples of 8 bytes */
493struct Vmxnet3_DriverShared { 583struct Vmxnet3_DriverShared {
494 u32 magic; 584 __le32 magic;
495 /* make devRead start at 64bit boundaries */ 585 /* make devRead start at 64bit boundaries */
496 u32 pad; 586 __le32 pad;
497 struct Vmxnet3_DSDevRead devRead; 587 struct Vmxnet3_DSDevRead devRead;
498 u32 ecr; 588 __le32 ecr;
499 u32 reserved[5]; 589 __le32 reserved[5];
500}; 590};
501 591
502 592
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 004353a46af..a4c97e786ee 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -24,12 +24,13 @@
24 * 24 *
25 */ 25 */
26 26
27#include <net/ip6_checksum.h>
28
27#include "vmxnet3_int.h" 29#include "vmxnet3_int.h"
28 30
29char vmxnet3_driver_name[] = "vmxnet3"; 31char vmxnet3_driver_name[] = "vmxnet3";
30#define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver" 32#define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
31 33
32
33/* 34/*
34 * PCI Device ID Table 35 * PCI Device ID Table
35 * Last entry must be all 0s 36 * Last entry must be all 0s
@@ -151,11 +152,10 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter)
151 } 152 }
152} 153}
153 154
154
155static void 155static void
156vmxnet3_process_events(struct vmxnet3_adapter *adapter) 156vmxnet3_process_events(struct vmxnet3_adapter *adapter)
157{ 157{
158 u32 events = adapter->shared->ecr; 158 u32 events = le32_to_cpu(adapter->shared->ecr);
159 if (!events) 159 if (!events)
160 return; 160 return;
161 161
@@ -173,7 +173,7 @@ vmxnet3_process_events(struct vmxnet3_adapter *adapter)
173 if (adapter->tqd_start->status.stopped) { 173 if (adapter->tqd_start->status.stopped) {
174 printk(KERN_ERR "%s: tq error 0x%x\n", 174 printk(KERN_ERR "%s: tq error 0x%x\n",
175 adapter->netdev->name, 175 adapter->netdev->name,
176 adapter->tqd_start->status.error); 176 le32_to_cpu(adapter->tqd_start->status.error));
177 } 177 }
178 if (adapter->rqd_start->status.stopped) { 178 if (adapter->rqd_start->status.stopped) {
179 printk(KERN_ERR "%s: rq error 0x%x\n", 179 printk(KERN_ERR "%s: rq error 0x%x\n",
@@ -185,6 +185,106 @@ vmxnet3_process_events(struct vmxnet3_adapter *adapter)
185 } 185 }
186} 186}
187 187
188#ifdef __BIG_ENDIAN_BITFIELD
189/*
190 * The device expects the bitfields in shared structures to be written in
191 * little endian. When CPU is big endian, the following routines are used to
192 * correctly read and write into ABI.
193 * The general technique used here is : double word bitfields are defined in
194 * opposite order for big endian architecture. Then before reading them in
195 * driver the complete double word is translated using le32_to_cpu. Similarly
196 * After the driver writes into bitfields, cpu_to_le32 is used to translate the
197 * double words into required format.
198 * In order to avoid touching bits in shared structure more than once, temporary
199 * descriptors are used. These are passed as srcDesc to following functions.
200 */
201static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc,
202 struct Vmxnet3_RxDesc *dstDesc)
203{
204 u32 *src = (u32 *)srcDesc + 2;
205 u32 *dst = (u32 *)dstDesc + 2;
206 dstDesc->addr = le64_to_cpu(srcDesc->addr);
207 *dst = le32_to_cpu(*src);
208 dstDesc->ext1 = le32_to_cpu(srcDesc->ext1);
209}
210
211static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc,
212 struct Vmxnet3_TxDesc *dstDesc)
213{
214 int i;
215 u32 *src = (u32 *)(srcDesc + 1);
216 u32 *dst = (u32 *)(dstDesc + 1);
217
218 /* Working backwards so that the gen bit is set at the end. */
219 for (i = 2; i > 0; i--) {
220 src--;
221 dst--;
222 *dst = cpu_to_le32(*src);
223 }
224}
225
226
227static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc,
228 struct Vmxnet3_RxCompDesc *dstDesc)
229{
230 int i = 0;
231 u32 *src = (u32 *)srcDesc;
232 u32 *dst = (u32 *)dstDesc;
233 for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) {
234 *dst = le32_to_cpu(*src);
235 src++;
236 dst++;
237 }
238}
239
240
241/* Used to read bitfield values from double words. */
242static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size)
243{
244 u32 temp = le32_to_cpu(*bitfield);
245 u32 mask = ((1 << size) - 1) << pos;
246 temp &= mask;
247 temp >>= pos;
248 return temp;
249}
250
251
252
253#endif /* __BIG_ENDIAN_BITFIELD */
254
255#ifdef __BIG_ENDIAN_BITFIELD
256
257# define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
258 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
259 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
260# define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
261 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
262 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
263# define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
264 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
265 VMXNET3_TCD_GEN_SIZE)
266# define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
267 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
268# define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
269 (dstrcd) = (tmp); \
270 vmxnet3_RxCompToCPU((rcd), (tmp)); \
271 } while (0)
272# define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
273 (dstrxd) = (tmp); \
274 vmxnet3_RxDescToCPU((rxd), (tmp)); \
275 } while (0)
276
277#else
278
279# define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
280# define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
281# define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
282# define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
283# define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
284# define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
285
286#endif /* __BIG_ENDIAN_BITFIELD */
287
188 288
189static void 289static void
190vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi, 290vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
@@ -212,7 +312,7 @@ vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
212 312
213 /* no out of order completion */ 313 /* no out of order completion */
214 BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp); 314 BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
215 BUG_ON(tq->tx_ring.base[eop_idx].txd.eop != 1); 315 BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1);
216 316
217 skb = tq->buf_info[eop_idx].skb; 317 skb = tq->buf_info[eop_idx].skb;
218 BUG_ON(skb == NULL); 318 BUG_ON(skb == NULL);
@@ -246,9 +346,10 @@ vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
246 union Vmxnet3_GenericDesc *gdesc; 346 union Vmxnet3_GenericDesc *gdesc;
247 347
248 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc; 348 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
249 while (gdesc->tcd.gen == tq->comp_ring.gen) { 349 while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
250 completed += vmxnet3_unmap_pkt(gdesc->tcd.txdIdx, tq, 350 completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
251 adapter->pdev, adapter); 351 &gdesc->tcd), tq, adapter->pdev,
352 adapter);
252 353
253 vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring); 354 vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
254 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc; 355 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
@@ -472,9 +573,9 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
472 } 573 }
473 574
474 BUG_ON(rbi->dma_addr == 0); 575 BUG_ON(rbi->dma_addr == 0);
475 gd->rxd.addr = rbi->dma_addr; 576 gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
476 gd->dword[2] = (ring->gen << VMXNET3_RXD_GEN_SHIFT) | val | 577 gd->dword[2] = cpu_to_le32((ring->gen << VMXNET3_RXD_GEN_SHIFT)
477 rbi->len; 578 | val | rbi->len);
478 579
479 num_allocated++; 580 num_allocated++;
480 vmxnet3_cmd_ring_adv_next2fill(ring); 581 vmxnet3_cmd_ring_adv_next2fill(ring);
@@ -531,10 +632,10 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
531 632
532 /* no need to map the buffer if headers are copied */ 633 /* no need to map the buffer if headers are copied */
533 if (ctx->copy_size) { 634 if (ctx->copy_size) {
534 ctx->sop_txd->txd.addr = tq->data_ring.basePA + 635 ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
535 tq->tx_ring.next2fill * 636 tq->tx_ring.next2fill *
536 sizeof(struct Vmxnet3_TxDataDesc); 637 sizeof(struct Vmxnet3_TxDataDesc));
537 ctx->sop_txd->dword[2] = dw2 | ctx->copy_size; 638 ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
538 ctx->sop_txd->dword[3] = 0; 639 ctx->sop_txd->dword[3] = 0;
539 640
540 tbi = tq->buf_info + tq->tx_ring.next2fill; 641 tbi = tq->buf_info + tq->tx_ring.next2fill;
@@ -542,7 +643,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
542 643
543 dev_dbg(&adapter->netdev->dev, 644 dev_dbg(&adapter->netdev->dev,
544 "txd[%u]: 0x%Lx 0x%x 0x%x\n", 645 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
545 tq->tx_ring.next2fill, ctx->sop_txd->txd.addr, 646 tq->tx_ring.next2fill,
647 le64_to_cpu(ctx->sop_txd->txd.addr),
546 ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]); 648 ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
547 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); 649 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
548 650
@@ -570,14 +672,14 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
570 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; 672 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
571 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); 673 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
572 674
573 gdesc->txd.addr = tbi->dma_addr; 675 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
574 gdesc->dword[2] = dw2 | buf_size; 676 gdesc->dword[2] = cpu_to_le32(dw2 | buf_size);
575 gdesc->dword[3] = 0; 677 gdesc->dword[3] = 0;
576 678
577 dev_dbg(&adapter->netdev->dev, 679 dev_dbg(&adapter->netdev->dev,
578 "txd[%u]: 0x%Lx 0x%x 0x%x\n", 680 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
579 tq->tx_ring.next2fill, gdesc->txd.addr, 681 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
580 gdesc->dword[2], gdesc->dword[3]); 682 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
581 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); 683 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
582 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; 684 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
583 685
@@ -599,14 +701,14 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
599 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; 701 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
600 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); 702 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
601 703
602 gdesc->txd.addr = tbi->dma_addr; 704 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
603 gdesc->dword[2] = dw2 | frag->size; 705 gdesc->dword[2] = cpu_to_le32(dw2 | frag->size);
604 gdesc->dword[3] = 0; 706 gdesc->dword[3] = 0;
605 707
606 dev_dbg(&adapter->netdev->dev, 708 dev_dbg(&adapter->netdev->dev,
607 "txd[%u]: 0x%llu %u %u\n", 709 "txd[%u]: 0x%llu %u %u\n",
608 tq->tx_ring.next2fill, gdesc->txd.addr, 710 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
609 gdesc->dword[2], gdesc->dword[3]); 711 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
610 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); 712 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
611 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; 713 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
612 } 714 }
@@ -751,6 +853,10 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
751 unsigned long flags; 853 unsigned long flags;
752 struct vmxnet3_tx_ctx ctx; 854 struct vmxnet3_tx_ctx ctx;
753 union Vmxnet3_GenericDesc *gdesc; 855 union Vmxnet3_GenericDesc *gdesc;
856#ifdef __BIG_ENDIAN_BITFIELD
857 /* Use temporary descriptor to avoid touching bits multiple times */
858 union Vmxnet3_GenericDesc tempTxDesc;
859#endif
754 860
755 /* conservatively estimate # of descriptors to use */ 861 /* conservatively estimate # of descriptors to use */
756 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 862 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) +
@@ -827,16 +933,22 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
827 vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter); 933 vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter);
828 934
829 /* setup the EOP desc */ 935 /* setup the EOP desc */
830 ctx.eop_txd->dword[3] = VMXNET3_TXD_CQ | VMXNET3_TXD_EOP; 936 ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
831 937
832 /* setup the SOP desc */ 938 /* setup the SOP desc */
939#ifdef __BIG_ENDIAN_BITFIELD
940 gdesc = &tempTxDesc;
941 gdesc->dword[2] = ctx.sop_txd->dword[2];
942 gdesc->dword[3] = ctx.sop_txd->dword[3];
943#else
833 gdesc = ctx.sop_txd; 944 gdesc = ctx.sop_txd;
945#endif
834 if (ctx.mss) { 946 if (ctx.mss) {
835 gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size; 947 gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size;
836 gdesc->txd.om = VMXNET3_OM_TSO; 948 gdesc->txd.om = VMXNET3_OM_TSO;
837 gdesc->txd.msscof = ctx.mss; 949 gdesc->txd.msscof = ctx.mss;
838 tq->shared->txNumDeferred += (skb->len - gdesc->txd.hlen + 950 le32_add_cpu(&tq->shared->txNumDeferred, (skb->len -
839 ctx.mss - 1) / ctx.mss; 951 gdesc->txd.hlen + ctx.mss - 1) / ctx.mss);
840 } else { 952 } else {
841 if (skb->ip_summed == CHECKSUM_PARTIAL) { 953 if (skb->ip_summed == CHECKSUM_PARTIAL) {
842 gdesc->txd.hlen = ctx.eth_ip_hdr_size; 954 gdesc->txd.hlen = ctx.eth_ip_hdr_size;
@@ -847,7 +959,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
847 gdesc->txd.om = 0; 959 gdesc->txd.om = 0;
848 gdesc->txd.msscof = 0; 960 gdesc->txd.msscof = 0;
849 } 961 }
850 tq->shared->txNumDeferred++; 962 le32_add_cpu(&tq->shared->txNumDeferred, 1);
851 } 963 }
852 964
853 if (vlan_tx_tag_present(skb)) { 965 if (vlan_tx_tag_present(skb)) {
@@ -855,19 +967,27 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
855 gdesc->txd.tci = vlan_tx_tag_get(skb); 967 gdesc->txd.tci = vlan_tx_tag_get(skb);
856 } 968 }
857 969
858 wmb(); 970 /* finally flips the GEN bit of the SOP desc. */
859 971 gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
860 /* finally flips the GEN bit of the SOP desc */ 972 VMXNET3_TXD_GEN);
861 gdesc->dword[2] ^= VMXNET3_TXD_GEN; 973#ifdef __BIG_ENDIAN_BITFIELD
974 /* Finished updating in bitfields of Tx Desc, so write them in original
975 * place.
976 */
977 vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc,
978 (struct Vmxnet3_TxDesc *)ctx.sop_txd);
979 gdesc = ctx.sop_txd;
980#endif
862 dev_dbg(&adapter->netdev->dev, 981 dev_dbg(&adapter->netdev->dev,
863 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n", 982 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
864 (u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd - 983 (u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd -
865 tq->tx_ring.base), gdesc->txd.addr, gdesc->dword[2], 984 tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
866 gdesc->dword[3]); 985 le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
867 986
868 spin_unlock_irqrestore(&tq->tx_lock, flags); 987 spin_unlock_irqrestore(&tq->tx_lock, flags);
869 988
870 if (tq->shared->txNumDeferred >= tq->shared->txThreshold) { 989 if (le32_to_cpu(tq->shared->txNumDeferred) >=
990 le32_to_cpu(tq->shared->txThreshold)) {
871 tq->shared->txNumDeferred = 0; 991 tq->shared->txNumDeferred = 0;
872 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_TXPROD, 992 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_TXPROD,
873 tq->tx_ring.next2fill); 993 tq->tx_ring.next2fill);
@@ -889,9 +1009,8 @@ static netdev_tx_t
889vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 1009vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
890{ 1010{
891 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1011 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
892 struct vmxnet3_tx_queue *tq = &adapter->tx_queue;
893 1012
894 return vmxnet3_tq_xmit(skb, tq, adapter, netdev); 1013 return vmxnet3_tq_xmit(skb, &adapter->tx_queue, adapter, netdev);
895} 1014}
896 1015
897 1016
@@ -902,7 +1021,7 @@ vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
902{ 1021{
903 if (!gdesc->rcd.cnc && adapter->rxcsum) { 1022 if (!gdesc->rcd.cnc && adapter->rxcsum) {
904 /* typical case: TCP/UDP over IP and both csums are correct */ 1023 /* typical case: TCP/UDP over IP and both csums are correct */
905 if ((gdesc->dword[3] & VMXNET3_RCD_CSUM_OK) == 1024 if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) ==
906 VMXNET3_RCD_CSUM_OK) { 1025 VMXNET3_RCD_CSUM_OK) {
907 skb->ip_summed = CHECKSUM_UNNECESSARY; 1026 skb->ip_summed = CHECKSUM_UNNECESSARY;
908 BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp)); 1027 BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
@@ -957,8 +1076,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
957 u32 num_rxd = 0; 1076 u32 num_rxd = 0;
958 struct Vmxnet3_RxCompDesc *rcd; 1077 struct Vmxnet3_RxCompDesc *rcd;
959 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx; 1078 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
960 1079#ifdef __BIG_ENDIAN_BITFIELD
961 rcd = &rq->comp_ring.base[rq->comp_ring.next2proc].rcd; 1080 struct Vmxnet3_RxDesc rxCmdDesc;
1081 struct Vmxnet3_RxCompDesc rxComp;
1082#endif
1083 vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
1084 &rxComp);
962 while (rcd->gen == rq->comp_ring.gen) { 1085 while (rcd->gen == rq->comp_ring.gen) {
963 struct vmxnet3_rx_buf_info *rbi; 1086 struct vmxnet3_rx_buf_info *rbi;
964 struct sk_buff *skb; 1087 struct sk_buff *skb;
@@ -976,11 +1099,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
976 1099
977 idx = rcd->rxdIdx; 1100 idx = rcd->rxdIdx;
978 ring_idx = rcd->rqID == rq->qid ? 0 : 1; 1101 ring_idx = rcd->rqID == rq->qid ? 0 : 1;
979 1102 vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
980 rxd = &rq->rx_ring[ring_idx].base[idx].rxd; 1103 &rxCmdDesc);
981 rbi = rq->buf_info[ring_idx] + idx; 1104 rbi = rq->buf_info[ring_idx] + idx;
982 1105
983 BUG_ON(rxd->addr != rbi->dma_addr || rxd->len != rbi->len); 1106 BUG_ON(rxd->addr != rbi->dma_addr ||
1107 rxd->len != rbi->len);
984 1108
985 if (unlikely(rcd->eop && rcd->err)) { 1109 if (unlikely(rcd->eop && rcd->err)) {
986 vmxnet3_rx_error(rq, rcd, ctx, adapter); 1110 vmxnet3_rx_error(rq, rcd, ctx, adapter);
@@ -1078,7 +1202,8 @@ rcd_done:
1078 } 1202 }
1079 1203
1080 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring); 1204 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
1081 rcd = &rq->comp_ring.base[rq->comp_ring.next2proc].rcd; 1205 vmxnet3_getRxComp(rcd,
1206 &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
1082 } 1207 }
1083 1208
1084 return num_rxd; 1209 return num_rxd;
@@ -1094,7 +1219,11 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
1094 1219
1095 for (ring_idx = 0; ring_idx < 2; ring_idx++) { 1220 for (ring_idx = 0; ring_idx < 2; ring_idx++) {
1096 for (i = 0; i < rq->rx_ring[ring_idx].size; i++) { 1221 for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
1097 rxd = &rq->rx_ring[ring_idx].base[i].rxd; 1222#ifdef __BIG_ENDIAN_BITFIELD
1223 struct Vmxnet3_RxDesc rxDesc;
1224#endif
1225 vmxnet3_getRxDesc(rxd,
1226 &rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
1098 1227
1099 if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD && 1228 if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
1100 rq->buf_info[ring_idx][i].skb) { 1229 rq->buf_info[ring_idx][i].skb) {
@@ -1346,12 +1475,12 @@ vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
1346 err = request_irq(adapter->intr.msix_entries[0].vector, 1475 err = request_irq(adapter->intr.msix_entries[0].vector,
1347 vmxnet3_intr, 0, adapter->netdev->name, 1476 vmxnet3_intr, 0, adapter->netdev->name,
1348 adapter->netdev); 1477 adapter->netdev);
1349 } else 1478 } else if (adapter->intr.type == VMXNET3_IT_MSI) {
1350#endif
1351 if (adapter->intr.type == VMXNET3_IT_MSI) {
1352 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0, 1479 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
1353 adapter->netdev->name, adapter->netdev); 1480 adapter->netdev->name, adapter->netdev);
1354 } else { 1481 } else
1482#endif
1483 {
1355 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 1484 err = request_irq(adapter->pdev->irq, vmxnet3_intr,
1356 IRQF_SHARED, adapter->netdev->name, 1485 IRQF_SHARED, adapter->netdev->name,
1357 adapter->netdev); 1486 adapter->netdev);
@@ -1412,6 +1541,22 @@ vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
1412} 1541}
1413 1542
1414 1543
1544inline void set_flag_le16(__le16 *data, u16 flag)
1545{
1546 *data = cpu_to_le16(le16_to_cpu(*data) | flag);
1547}
1548
1549inline void set_flag_le64(__le64 *data, u64 flag)
1550{
1551 *data = cpu_to_le64(le64_to_cpu(*data) | flag);
1552}
1553
1554inline void reset_flag_le64(__le64 *data, u64 flag)
1555{
1556 *data = cpu_to_le64(le64_to_cpu(*data) & ~flag);
1557}
1558
1559
1415static void 1560static void
1416vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) 1561vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1417{ 1562{
@@ -1427,7 +1572,8 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1427 adapter->vlan_grp = grp; 1572 adapter->vlan_grp = grp;
1428 1573
1429 /* update FEATURES to device */ 1574 /* update FEATURES to device */
1430 devRead->misc.uptFeatures |= UPT1_F_RXVLAN; 1575 set_flag_le64(&devRead->misc.uptFeatures,
1576 UPT1_F_RXVLAN);
1431 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1577 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1432 VMXNET3_CMD_UPDATE_FEATURE); 1578 VMXNET3_CMD_UPDATE_FEATURE);
1433 /* 1579 /*
@@ -1450,7 +1596,7 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1450 struct Vmxnet3_DSDevRead *devRead = &shared->devRead; 1596 struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
1451 adapter->vlan_grp = NULL; 1597 adapter->vlan_grp = NULL;
1452 1598
1453 if (devRead->misc.uptFeatures & UPT1_F_RXVLAN) { 1599 if (le64_to_cpu(devRead->misc.uptFeatures) & UPT1_F_RXVLAN) {
1454 int i; 1600 int i;
1455 1601
1456 for (i = 0; i < VMXNET3_VFT_SIZE; i++) { 1602 for (i = 0; i < VMXNET3_VFT_SIZE; i++) {
@@ -1463,7 +1609,8 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1463 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 1609 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1464 1610
1465 /* update FEATURES to device */ 1611 /* update FEATURES to device */
1466 devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN; 1612 reset_flag_le64(&devRead->misc.uptFeatures,
1613 UPT1_F_RXVLAN);
1467 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1614 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1468 VMXNET3_CMD_UPDATE_FEATURE); 1615 VMXNET3_CMD_UPDATE_FEATURE);
1469 } 1616 }
@@ -1565,9 +1712,10 @@ vmxnet3_set_mc(struct net_device *netdev)
1565 new_table = vmxnet3_copy_mc(netdev); 1712 new_table = vmxnet3_copy_mc(netdev);
1566 if (new_table) { 1713 if (new_table) {
1567 new_mode |= VMXNET3_RXM_MCAST; 1714 new_mode |= VMXNET3_RXM_MCAST;
1568 rxConf->mfTableLen = netdev->mc_count * 1715 rxConf->mfTableLen = cpu_to_le16(
1569 ETH_ALEN; 1716 netdev->mc_count * ETH_ALEN);
1570 rxConf->mfTablePA = virt_to_phys(new_table); 1717 rxConf->mfTablePA = cpu_to_le64(virt_to_phys(
1718 new_table));
1571 } else { 1719 } else {
1572 printk(KERN_INFO "%s: failed to copy mcast list" 1720 printk(KERN_INFO "%s: failed to copy mcast list"
1573 ", setting ALL_MULTI\n", netdev->name); 1721 ", setting ALL_MULTI\n", netdev->name);
@@ -1582,7 +1730,7 @@ vmxnet3_set_mc(struct net_device *netdev)
1582 } 1730 }
1583 1731
1584 if (new_mode != rxConf->rxMode) { 1732 if (new_mode != rxConf->rxMode) {
1585 rxConf->rxMode = new_mode; 1733 rxConf->rxMode = cpu_to_le32(new_mode);
1586 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1734 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1587 VMXNET3_CMD_UPDATE_RX_MODE); 1735 VMXNET3_CMD_UPDATE_RX_MODE);
1588 } 1736 }
@@ -1610,63 +1758,69 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
1610 memset(shared, 0, sizeof(*shared)); 1758 memset(shared, 0, sizeof(*shared));
1611 1759
1612 /* driver settings */ 1760 /* driver settings */
1613 shared->magic = VMXNET3_REV1_MAGIC; 1761 shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC);
1614 devRead->misc.driverInfo.version = VMXNET3_DRIVER_VERSION_NUM; 1762 devRead->misc.driverInfo.version = cpu_to_le32(
1763 VMXNET3_DRIVER_VERSION_NUM);
1615 devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ? 1764 devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
1616 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64); 1765 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
1617 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX; 1766 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
1618 devRead->misc.driverInfo.vmxnet3RevSpt = 1; 1767 *((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32(
1619 devRead->misc.driverInfo.uptVerSpt = 1; 1768 *((u32 *)&devRead->misc.driverInfo.gos));
1769 devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
1770 devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
1620 1771
1621 devRead->misc.ddPA = virt_to_phys(adapter); 1772 devRead->misc.ddPA = cpu_to_le64(virt_to_phys(adapter));
1622 devRead->misc.ddLen = sizeof(struct vmxnet3_adapter); 1773 devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
1623 1774
1624 /* set up feature flags */ 1775 /* set up feature flags */
1625 if (adapter->rxcsum) 1776 if (adapter->rxcsum)
1626 devRead->misc.uptFeatures |= UPT1_F_RXCSUM; 1777 set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_RXCSUM);
1627 1778
1628 if (adapter->lro) { 1779 if (adapter->lro) {
1629 devRead->misc.uptFeatures |= UPT1_F_LRO; 1780 set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_LRO);
1630 devRead->misc.maxNumRxSG = 1 + MAX_SKB_FRAGS; 1781 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
1631 } 1782 }
1632 if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX) 1783 if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX)
1633 && adapter->vlan_grp) { 1784 && adapter->vlan_grp) {
1634 devRead->misc.uptFeatures |= UPT1_F_RXVLAN; 1785 set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_RXVLAN);
1635 } 1786 }
1636 1787
1637 devRead->misc.mtu = adapter->netdev->mtu; 1788 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
1638 devRead->misc.queueDescPA = adapter->queue_desc_pa; 1789 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
1639 devRead->misc.queueDescLen = sizeof(struct Vmxnet3_TxQueueDesc) + 1790 devRead->misc.queueDescLen = cpu_to_le32(
1640 sizeof(struct Vmxnet3_RxQueueDesc); 1791 sizeof(struct Vmxnet3_TxQueueDesc) +
1792 sizeof(struct Vmxnet3_RxQueueDesc));
1641 1793
1642 /* tx queue settings */ 1794 /* tx queue settings */
1643 BUG_ON(adapter->tx_queue.tx_ring.base == NULL); 1795 BUG_ON(adapter->tx_queue.tx_ring.base == NULL);
1644 1796
1645 devRead->misc.numTxQueues = 1; 1797 devRead->misc.numTxQueues = 1;
1646 tqc = &adapter->tqd_start->conf; 1798 tqc = &adapter->tqd_start->conf;
1647 tqc->txRingBasePA = adapter->tx_queue.tx_ring.basePA; 1799 tqc->txRingBasePA = cpu_to_le64(adapter->tx_queue.tx_ring.basePA);
1648 tqc->dataRingBasePA = adapter->tx_queue.data_ring.basePA; 1800 tqc->dataRingBasePA = cpu_to_le64(adapter->tx_queue.data_ring.basePA);
1649 tqc->compRingBasePA = adapter->tx_queue.comp_ring.basePA; 1801 tqc->compRingBasePA = cpu_to_le64(adapter->tx_queue.comp_ring.basePA);
1650 tqc->ddPA = virt_to_phys(adapter->tx_queue.buf_info); 1802 tqc->ddPA = cpu_to_le64(virt_to_phys(
1651 tqc->txRingSize = adapter->tx_queue.tx_ring.size; 1803 adapter->tx_queue.buf_info));
1652 tqc->dataRingSize = adapter->tx_queue.data_ring.size; 1804 tqc->txRingSize = cpu_to_le32(adapter->tx_queue.tx_ring.size);
1653 tqc->compRingSize = adapter->tx_queue.comp_ring.size; 1805 tqc->dataRingSize = cpu_to_le32(adapter->tx_queue.data_ring.size);
1654 tqc->ddLen = sizeof(struct vmxnet3_tx_buf_info) * 1806 tqc->compRingSize = cpu_to_le32(adapter->tx_queue.comp_ring.size);
1655 tqc->txRingSize; 1807 tqc->ddLen = cpu_to_le32(sizeof(struct vmxnet3_tx_buf_info) *
1808 tqc->txRingSize);
1656 tqc->intrIdx = adapter->tx_queue.comp_ring.intr_idx; 1809 tqc->intrIdx = adapter->tx_queue.comp_ring.intr_idx;
1657 1810
1658 /* rx queue settings */ 1811 /* rx queue settings */
1659 devRead->misc.numRxQueues = 1; 1812 devRead->misc.numRxQueues = 1;
1660 rqc = &adapter->rqd_start->conf; 1813 rqc = &adapter->rqd_start->conf;
1661 rqc->rxRingBasePA[0] = adapter->rx_queue.rx_ring[0].basePA; 1814 rqc->rxRingBasePA[0] = cpu_to_le64(adapter->rx_queue.rx_ring[0].basePA);
1662 rqc->rxRingBasePA[1] = adapter->rx_queue.rx_ring[1].basePA; 1815 rqc->rxRingBasePA[1] = cpu_to_le64(adapter->rx_queue.rx_ring[1].basePA);
1663 rqc->compRingBasePA = adapter->rx_queue.comp_ring.basePA; 1816 rqc->compRingBasePA = cpu_to_le64(adapter->rx_queue.comp_ring.basePA);
1664 rqc->ddPA = virt_to_phys(adapter->rx_queue.buf_info); 1817 rqc->ddPA = cpu_to_le64(virt_to_phys(
1665 rqc->rxRingSize[0] = adapter->rx_queue.rx_ring[0].size; 1818 adapter->rx_queue.buf_info));
1666 rqc->rxRingSize[1] = adapter->rx_queue.rx_ring[1].size; 1819 rqc->rxRingSize[0] = cpu_to_le32(adapter->rx_queue.rx_ring[0].size);
1667 rqc->compRingSize = adapter->rx_queue.comp_ring.size; 1820 rqc->rxRingSize[1] = cpu_to_le32(adapter->rx_queue.rx_ring[1].size);
1668 rqc->ddLen = sizeof(struct vmxnet3_rx_buf_info) * 1821 rqc->compRingSize = cpu_to_le32(adapter->rx_queue.comp_ring.size);
1669 (rqc->rxRingSize[0] + rqc->rxRingSize[1]); 1822 rqc->ddLen = cpu_to_le32(sizeof(struct vmxnet3_rx_buf_info) *
1823 (rqc->rxRingSize[0] + rqc->rxRingSize[1]));
1670 rqc->intrIdx = adapter->rx_queue.comp_ring.intr_idx; 1824 rqc->intrIdx = adapter->rx_queue.comp_ring.intr_idx;
1671 1825
1672 /* intr settings */ 1826 /* intr settings */
@@ -1715,11 +1869,10 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
1715 1869
1716 vmxnet3_setup_driver_shared(adapter); 1870 vmxnet3_setup_driver_shared(adapter);
1717 1871
1718 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 1872 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO(
1719 VMXNET3_GET_ADDR_LO(adapter->shared_pa)); 1873 adapter->shared_pa));
1720 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 1874 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
1721 VMXNET3_GET_ADDR_HI(adapter->shared_pa)); 1875 adapter->shared_pa));
1722
1723 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1876 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1724 VMXNET3_CMD_ACTIVATE_DEV); 1877 VMXNET3_CMD_ACTIVATE_DEV);
1725 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 1878 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
@@ -2425,7 +2578,7 @@ vmxnet3_suspend(struct device *device)
2425 memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN); 2578 memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
2426 pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */ 2579 pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
2427 2580
2428 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER; 2581 set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_FILTER);
2429 i++; 2582 i++;
2430 } 2583 }
2431 2584
@@ -2467,19 +2620,21 @@ vmxnet3_suspend(struct device *device)
2467 pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */ 2620 pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
2468 in_dev_put(in_dev); 2621 in_dev_put(in_dev);
2469 2622
2470 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER; 2623 set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_FILTER);
2471 i++; 2624 i++;
2472 } 2625 }
2473 2626
2474skip_arp: 2627skip_arp:
2475 if (adapter->wol & WAKE_MAGIC) 2628 if (adapter->wol & WAKE_MAGIC)
2476 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC; 2629 set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_MAGIC);
2477 2630
2478 pmConf->numFilters = i; 2631 pmConf->numFilters = i;
2479 2632
2480 adapter->shared->devRead.pmConfDesc.confVer = 1; 2633 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
2481 adapter->shared->devRead.pmConfDesc.confLen = sizeof(*pmConf); 2634 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
2482 adapter->shared->devRead.pmConfDesc.confPA = virt_to_phys(pmConf); 2635 *pmConf));
2636 adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
2637 pmConf));
2483 2638
2484 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2639 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2485 VMXNET3_CMD_UPDATE_PMCFG); 2640 VMXNET3_CMD_UPDATE_PMCFG);
@@ -2510,9 +2665,11 @@ vmxnet3_resume(struct device *device)
2510 pmConf = adapter->pm_conf; 2665 pmConf = adapter->pm_conf;
2511 memset(pmConf, 0, sizeof(*pmConf)); 2666 memset(pmConf, 0, sizeof(*pmConf));
2512 2667
2513 adapter->shared->devRead.pmConfDesc.confVer = 1; 2668 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
2514 adapter->shared->devRead.pmConfDesc.confLen = sizeof(*pmConf); 2669 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
2515 adapter->shared->devRead.pmConfDesc.confPA = virt_to_phys(pmConf); 2670 *pmConf));
2671 adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le32(virt_to_phys(
2672 pmConf));
2516 2673
2517 netif_device_attach(netdev); 2674 netif_device_attach(netdev);
2518 pci_set_power_state(pdev, PCI_D0); 2675 pci_set_power_state(pdev, PCI_D0);
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index c2c15e4cafc..3935c4493fb 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -50,11 +50,13 @@ vmxnet3_set_rx_csum(struct net_device *netdev, u32 val)
50 adapter->rxcsum = val; 50 adapter->rxcsum = val;
51 if (netif_running(netdev)) { 51 if (netif_running(netdev)) {
52 if (val) 52 if (val)
53 adapter->shared->devRead.misc.uptFeatures |= 53 set_flag_le64(
54 UPT1_F_RXCSUM; 54 &adapter->shared->devRead.misc.uptFeatures,
55 UPT1_F_RXCSUM);
55 else 56 else
56 adapter->shared->devRead.misc.uptFeatures &= 57 reset_flag_le64(
57 ~UPT1_F_RXCSUM; 58 &adapter->shared->devRead.misc.uptFeatures,
59 UPT1_F_RXCSUM);
58 60
59 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 61 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
60 VMXNET3_CMD_UPDATE_FEATURE); 62 VMXNET3_CMD_UPDATE_FEATURE);
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 445081686d5..34f392f46fb 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -330,14 +330,14 @@ struct vmxnet3_adapter {
330}; 330};
331 331
332#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \ 332#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \
333 writel((val), (adapter)->hw_addr0 + (reg)) 333 writel(cpu_to_le32(val), (adapter)->hw_addr0 + (reg))
334#define VMXNET3_READ_BAR0_REG(adapter, reg) \ 334#define VMXNET3_READ_BAR0_REG(adapter, reg) \
335 readl((adapter)->hw_addr0 + (reg)) 335 le32_to_cpu(readl((adapter)->hw_addr0 + (reg)))
336 336
337#define VMXNET3_WRITE_BAR1_REG(adapter, reg, val) \ 337#define VMXNET3_WRITE_BAR1_REG(adapter, reg, val) \
338 writel((val), (adapter)->hw_addr1 + (reg)) 338 writel(cpu_to_le32(val), (adapter)->hw_addr1 + (reg))
339#define VMXNET3_READ_BAR1_REG(adapter, reg) \ 339#define VMXNET3_READ_BAR1_REG(adapter, reg) \
340 readl((adapter)->hw_addr1 + (reg)) 340 le32_to_cpu(readl((adapter)->hw_addr1 + (reg)))
341 341
342#define VMXNET3_WAKE_QUEUE_THRESHOLD(tq) (5) 342#define VMXNET3_WAKE_QUEUE_THRESHOLD(tq) (5)
343#define VMXNET3_RX_ALLOC_THRESHOLD(rq, ring_idx, adapter) \ 343#define VMXNET3_RX_ALLOC_THRESHOLD(rq, ring_idx, adapter) \
@@ -353,6 +353,10 @@ struct vmxnet3_adapter {
353#define VMXNET3_MAX_ETH_HDR_SIZE 22 353#define VMXNET3_MAX_ETH_HDR_SIZE 22
354#define VMXNET3_MAX_SKB_BUF_SIZE (3*1024) 354#define VMXNET3_MAX_SKB_BUF_SIZE (3*1024)
355 355
356void set_flag_le16(__le16 *data, u16 flag);
357void set_flag_le64(__le64 *data, u64 flag);
358void reset_flag_le64(__le64 *data, u64 flag);
359
356int 360int
357vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter); 361vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter);
358 362
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 81c8aec9df9..63a010252a3 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -1127,7 +1127,7 @@ done:
1127 init_timer(&dpriv->timer); 1127 init_timer(&dpriv->timer);
1128 dpriv->timer.expires = jiffies + 10*HZ; 1128 dpriv->timer.expires = jiffies + 10*HZ;
1129 dpriv->timer.data = (unsigned long)dev; 1129 dpriv->timer.data = (unsigned long)dev;
1130 dpriv->timer.function = &dscc4_timer; 1130 dpriv->timer.function = dscc4_timer;
1131 add_timer(&dpriv->timer); 1131 add_timer(&dpriv->timer);
1132 netif_carrier_on(dev); 1132 netif_carrier_on(dev);
1133 1133
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index b80f514877d..39410016b4f 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -1538,7 +1538,7 @@ static int adm8211_start(struct ieee80211_hw *dev)
1538 adm8211_hw_init(dev); 1538 adm8211_hw_init(dev);
1539 adm8211_rf_set_channel(dev, priv->channel); 1539 adm8211_rf_set_channel(dev, priv->channel);
1540 1540
1541 retval = request_irq(priv->pdev->irq, &adm8211_interrupt, 1541 retval = request_irq(priv->pdev->irq, adm8211_interrupt,
1542 IRQF_SHARED, "adm8211", dev); 1542 IRQF_SHARED, "adm8211", dev);
1543 if (retval) { 1543 if (retval) {
1544 printk(KERN_ERR "%s: failed to register IRQ handler\n", 1544 printk(KERN_ERR "%s: failed to register IRQ handler\n",
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index a9bc8a97c4e..b7408370cf8 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -6029,7 +6029,7 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
6029 struct ipw2100_priv *priv; 6029 struct ipw2100_priv *priv;
6030 struct net_device *dev; 6030 struct net_device *dev;
6031 6031
6032 dev = alloc_ieee80211(sizeof(struct ipw2100_priv), 0); 6032 dev = alloc_ieee80211(sizeof(struct ipw2100_priv));
6033 if (!dev) 6033 if (!dev)
6034 return NULL; 6034 return NULL;
6035 priv = libipw_priv(dev); 6035 priv = libipw_priv(dev);
@@ -6342,7 +6342,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
6342 sysfs_remove_group(&pci_dev->dev.kobj, 6342 sysfs_remove_group(&pci_dev->dev.kobj,
6343 &ipw2100_attribute_group); 6343 &ipw2100_attribute_group);
6344 6344
6345 free_ieee80211(dev, 0); 6345 free_ieee80211(dev);
6346 pci_set_drvdata(pci_dev, NULL); 6346 pci_set_drvdata(pci_dev, NULL);
6347 } 6347 }
6348 6348
@@ -6400,7 +6400,7 @@ static void __devexit ipw2100_pci_remove_one(struct pci_dev *pci_dev)
6400 if (dev->base_addr) 6400 if (dev->base_addr)
6401 iounmap((void __iomem *)dev->base_addr); 6401 iounmap((void __iomem *)dev->base_addr);
6402 6402
6403 free_ieee80211(dev, 0); 6403 free_ieee80211(dev);
6404 } 6404 }
6405 6405
6406 pci_release_regions(pci_dev); 6406 pci_release_regions(pci_dev);
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 39808e9378b..9b398db2d74 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -108,25 +108,6 @@ static int antenna = CFG_SYS_ANTENNA_BOTH;
108static int rtap_iface = 0; /* def: 0 -- do not create rtap interface */ 108static int rtap_iface = 0; /* def: 0 -- do not create rtap interface */
109#endif 109#endif
110 110
111static struct ieee80211_rate ipw2200_rates[] = {
112 { .bitrate = 10 },
113 { .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
114 { .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
115 { .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
116 { .bitrate = 60 },
117 { .bitrate = 90 },
118 { .bitrate = 120 },
119 { .bitrate = 180 },
120 { .bitrate = 240 },
121 { .bitrate = 360 },
122 { .bitrate = 480 },
123 { .bitrate = 540 }
124};
125
126#define ipw2200_a_rates (ipw2200_rates + 4)
127#define ipw2200_num_a_rates 8
128#define ipw2200_bg_rates (ipw2200_rates + 0)
129#define ipw2200_num_bg_rates 12
130 111
131#ifdef CONFIG_IPW2200_QOS 112#ifdef CONFIG_IPW2200_QOS
132static int qos_enable = 0; 113static int qos_enable = 0;
@@ -8678,6 +8659,24 @@ static int ipw_sw_reset(struct ipw_priv *priv, int option)
8678 * 8659 *
8679 */ 8660 */
8680 8661
8662static int ipw_wx_get_name(struct net_device *dev,
8663 struct iw_request_info *info,
8664 union iwreq_data *wrqu, char *extra)
8665{
8666 struct ipw_priv *priv = libipw_priv(dev);
8667 mutex_lock(&priv->mutex);
8668 if (priv->status & STATUS_RF_KILL_MASK)
8669 strcpy(wrqu->name, "radio off");
8670 else if (!(priv->status & STATUS_ASSOCIATED))
8671 strcpy(wrqu->name, "unassociated");
8672 else
8673 snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11%c",
8674 ipw_modes[priv->assoc_request.ieee_mode]);
8675 IPW_DEBUG_WX("Name: %s\n", wrqu->name);
8676 mutex_unlock(&priv->mutex);
8677 return 0;
8678}
8679
8681static int ipw_set_channel(struct ipw_priv *priv, u8 channel) 8680static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
8682{ 8681{
8683 if (channel == 0) { 8682 if (channel == 0) {
@@ -9977,7 +9976,7 @@ static int ipw_wx_sw_reset(struct net_device *dev,
9977/* Rebase the WE IOCTLs to zero for the handler array */ 9976/* Rebase the WE IOCTLs to zero for the handler array */
9978#define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT] 9977#define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
9979static iw_handler ipw_wx_handlers[] = { 9978static iw_handler ipw_wx_handlers[] = {
9980 IW_IOCTL(SIOCGIWNAME) = (iw_handler) cfg80211_wext_giwname, 9979 IW_IOCTL(SIOCGIWNAME) = ipw_wx_get_name,
9981 IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq, 9980 IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq,
9982 IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq, 9981 IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq,
9983 IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode, 9982 IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode,
@@ -11422,100 +11421,16 @@ static void ipw_bg_down(struct work_struct *work)
11422/* Called by register_netdev() */ 11421/* Called by register_netdev() */
11423static int ipw_net_init(struct net_device *dev) 11422static int ipw_net_init(struct net_device *dev)
11424{ 11423{
11425 int i, rc = 0;
11426 struct ipw_priv *priv = libipw_priv(dev); 11424 struct ipw_priv *priv = libipw_priv(dev);
11427 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
11428 struct wireless_dev *wdev = &priv->ieee->wdev;
11429 mutex_lock(&priv->mutex); 11425 mutex_lock(&priv->mutex);
11430 11426
11431 if (ipw_up(priv)) { 11427 if (ipw_up(priv)) {
11432 rc = -EIO; 11428 mutex_unlock(&priv->mutex);
11433 goto out; 11429 return -EIO;
11434 }
11435
11436 memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN);
11437
11438 /* fill-out priv->ieee->bg_band */
11439 if (geo->bg_channels) {
11440 struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band;
11441
11442 bg_band->band = IEEE80211_BAND_2GHZ;
11443 bg_band->n_channels = geo->bg_channels;
11444 bg_band->channels =
11445 kzalloc(geo->bg_channels *
11446 sizeof(struct ieee80211_channel), GFP_KERNEL);
11447 /* translate geo->bg to bg_band.channels */
11448 for (i = 0; i < geo->bg_channels; i++) {
11449 bg_band->channels[i].band = IEEE80211_BAND_2GHZ;
11450 bg_band->channels[i].center_freq = geo->bg[i].freq;
11451 bg_band->channels[i].hw_value = geo->bg[i].channel;
11452 bg_band->channels[i].max_power = geo->bg[i].max_power;
11453 if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY)
11454 bg_band->channels[i].flags |=
11455 IEEE80211_CHAN_PASSIVE_SCAN;
11456 if (geo->bg[i].flags & LIBIPW_CH_NO_IBSS)
11457 bg_band->channels[i].flags |=
11458 IEEE80211_CHAN_NO_IBSS;
11459 if (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT)
11460 bg_band->channels[i].flags |=
11461 IEEE80211_CHAN_RADAR;
11462 /* No equivalent for LIBIPW_CH_80211H_RULES,
11463 LIBIPW_CH_UNIFORM_SPREADING, or
11464 LIBIPW_CH_B_ONLY... */
11465 }
11466 /* point at bitrate info */
11467 bg_band->bitrates = ipw2200_bg_rates;
11468 bg_band->n_bitrates = ipw2200_num_bg_rates;
11469
11470 wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band;
11471 }
11472
11473 /* fill-out priv->ieee->a_band */
11474 if (geo->a_channels) {
11475 struct ieee80211_supported_band *a_band = &priv->ieee->a_band;
11476
11477 a_band->band = IEEE80211_BAND_5GHZ;
11478 a_band->n_channels = geo->a_channels;
11479 a_band->channels =
11480 kzalloc(geo->a_channels *
11481 sizeof(struct ieee80211_channel), GFP_KERNEL);
11482 /* translate geo->bg to a_band.channels */
11483 for (i = 0; i < geo->a_channels; i++) {
11484 a_band->channels[i].band = IEEE80211_BAND_2GHZ;
11485 a_band->channels[i].center_freq = geo->a[i].freq;
11486 a_band->channels[i].hw_value = geo->a[i].channel;
11487 a_band->channels[i].max_power = geo->a[i].max_power;
11488 if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY)
11489 a_band->channels[i].flags |=
11490 IEEE80211_CHAN_PASSIVE_SCAN;
11491 if (geo->a[i].flags & LIBIPW_CH_NO_IBSS)
11492 a_band->channels[i].flags |=
11493 IEEE80211_CHAN_NO_IBSS;
11494 if (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT)
11495 a_band->channels[i].flags |=
11496 IEEE80211_CHAN_RADAR;
11497 /* No equivalent for LIBIPW_CH_80211H_RULES,
11498 LIBIPW_CH_UNIFORM_SPREADING, or
11499 LIBIPW_CH_B_ONLY... */
11500 }
11501 /* point at bitrate info */
11502 a_band->bitrates = ipw2200_a_rates;
11503 a_band->n_bitrates = ipw2200_num_a_rates;
11504
11505 wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = a_band;
11506 }
11507
11508 set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
11509
11510 /* With that information in place, we can now register the wiphy... */
11511 if (wiphy_register(wdev->wiphy)) {
11512 rc = -EIO;
11513 goto out;
11514 } 11430 }
11515 11431
11516out:
11517 mutex_unlock(&priv->mutex); 11432 mutex_unlock(&priv->mutex);
11518 return rc; 11433 return 0;
11519} 11434}
11520 11435
11521/* PCI driver stuff */ 11436/* PCI driver stuff */
@@ -11646,7 +11561,7 @@ static int ipw_prom_alloc(struct ipw_priv *priv)
11646 if (priv->prom_net_dev) 11561 if (priv->prom_net_dev)
11647 return -EPERM; 11562 return -EPERM;
11648 11563
11649 priv->prom_net_dev = alloc_ieee80211(sizeof(struct ipw_prom_priv), 1); 11564 priv->prom_net_dev = alloc_ieee80211(sizeof(struct ipw_prom_priv));
11650 if (priv->prom_net_dev == NULL) 11565 if (priv->prom_net_dev == NULL)
11651 return -ENOMEM; 11566 return -ENOMEM;
11652 11567
@@ -11665,7 +11580,7 @@ static int ipw_prom_alloc(struct ipw_priv *priv)
11665 11580
11666 rc = register_netdev(priv->prom_net_dev); 11581 rc = register_netdev(priv->prom_net_dev);
11667 if (rc) { 11582 if (rc) {
11668 free_ieee80211(priv->prom_net_dev, 1); 11583 free_ieee80211(priv->prom_net_dev);
11669 priv->prom_net_dev = NULL; 11584 priv->prom_net_dev = NULL;
11670 return rc; 11585 return rc;
11671 } 11586 }
@@ -11679,7 +11594,7 @@ static void ipw_prom_free(struct ipw_priv *priv)
11679 return; 11594 return;
11680 11595
11681 unregister_netdev(priv->prom_net_dev); 11596 unregister_netdev(priv->prom_net_dev);
11682 free_ieee80211(priv->prom_net_dev, 1); 11597 free_ieee80211(priv->prom_net_dev);
11683 11598
11684 priv->prom_net_dev = NULL; 11599 priv->prom_net_dev = NULL;
11685} 11600}
@@ -11707,7 +11622,7 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
11707 struct ipw_priv *priv; 11622 struct ipw_priv *priv;
11708 int i; 11623 int i;
11709 11624
11710 net_dev = alloc_ieee80211(sizeof(struct ipw_priv), 0); 11625 net_dev = alloc_ieee80211(sizeof(struct ipw_priv));
11711 if (net_dev == NULL) { 11626 if (net_dev == NULL) {
11712 err = -ENOMEM; 11627 err = -ENOMEM;
11713 goto out; 11628 goto out;
@@ -11855,7 +11770,7 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
11855 pci_disable_device(pdev); 11770 pci_disable_device(pdev);
11856 pci_set_drvdata(pdev, NULL); 11771 pci_set_drvdata(pdev, NULL);
11857 out_free_ieee80211: 11772 out_free_ieee80211:
11858 free_ieee80211(priv->net_dev, 0); 11773 free_ieee80211(priv->net_dev);
11859 out: 11774 out:
11860 return err; 11775 return err;
11861} 11776}
@@ -11922,7 +11837,7 @@ static void __devexit ipw_pci_remove(struct pci_dev *pdev)
11922 pci_release_regions(pdev); 11837 pci_release_regions(pdev);
11923 pci_disable_device(pdev); 11838 pci_disable_device(pdev);
11924 pci_set_drvdata(pdev, NULL); 11839 pci_set_drvdata(pdev, NULL);
11925 free_ieee80211(priv->net_dev, 0); 11840 free_ieee80211(priv->net_dev);
11926 free_firmware(); 11841 free_firmware();
11927} 11842}
11928 11843
diff --git a/drivers/net/wireless/ipw2x00/libipw.h b/drivers/net/wireless/ipw2x00/libipw.h
index bf45391172f..1e334ff6bd5 100644
--- a/drivers/net/wireless/ipw2x00/libipw.h
+++ b/drivers/net/wireless/ipw2x00/libipw.h
@@ -31,7 +31,6 @@
31#include <linux/ieee80211.h> 31#include <linux/ieee80211.h>
32 32
33#include <net/lib80211.h> 33#include <net/lib80211.h>
34#include <net/cfg80211.h>
35 34
36#define LIBIPW_VERSION "git-1.1.13" 35#define LIBIPW_VERSION "git-1.1.13"
37 36
@@ -784,15 +783,12 @@ struct libipw_geo {
784 783
785struct libipw_device { 784struct libipw_device {
786 struct net_device *dev; 785 struct net_device *dev;
787 struct wireless_dev wdev;
788 struct libipw_security sec; 786 struct libipw_security sec;
789 787
790 /* Bookkeeping structures */ 788 /* Bookkeeping structures */
791 struct libipw_stats ieee_stats; 789 struct libipw_stats ieee_stats;
792 790
793 struct libipw_geo geo; 791 struct libipw_geo geo;
794 struct ieee80211_supported_band bg_band;
795 struct ieee80211_supported_band a_band;
796 792
797 /* Probe / Beacon management */ 793 /* Probe / Beacon management */
798 struct list_head network_free_list; 794 struct list_head network_free_list;
@@ -1018,8 +1014,8 @@ static inline int libipw_is_cck_rate(u8 rate)
1018} 1014}
1019 1015
1020/* ieee80211.c */ 1016/* ieee80211.c */
1021extern void free_ieee80211(struct net_device *dev, int monitor); 1017extern void free_ieee80211(struct net_device *dev);
1022extern struct net_device *alloc_ieee80211(int sizeof_priv, int monitor); 1018extern struct net_device *alloc_ieee80211(int sizeof_priv);
1023extern int libipw_change_mtu(struct net_device *dev, int new_mtu); 1019extern int libipw_change_mtu(struct net_device *dev, int new_mtu);
1024 1020
1025extern void libipw_networks_age(struct libipw_device *ieee, 1021extern void libipw_networks_age(struct libipw_device *ieee,
diff --git a/drivers/net/wireless/ipw2x00/libipw_module.c b/drivers/net/wireless/ipw2x00/libipw_module.c
index a0e9f6aed7d..eb2b60834c1 100644
--- a/drivers/net/wireless/ipw2x00/libipw_module.c
+++ b/drivers/net/wireless/ipw2x00/libipw_module.c
@@ -62,9 +62,6 @@ MODULE_DESCRIPTION(DRV_DESCRIPTION);
62MODULE_AUTHOR(DRV_COPYRIGHT); 62MODULE_AUTHOR(DRV_COPYRIGHT);
63MODULE_LICENSE("GPL"); 63MODULE_LICENSE("GPL");
64 64
65struct cfg80211_ops libipw_config_ops = { };
66void *libipw_wiphy_privid = &libipw_wiphy_privid;
67
68static int libipw_networks_allocate(struct libipw_device *ieee) 65static int libipw_networks_allocate(struct libipw_device *ieee)
69{ 66{
70 if (ieee->networks) 67 if (ieee->networks)
@@ -143,7 +140,7 @@ int libipw_change_mtu(struct net_device *dev, int new_mtu)
143} 140}
144EXPORT_SYMBOL(libipw_change_mtu); 141EXPORT_SYMBOL(libipw_change_mtu);
145 142
146struct net_device *alloc_ieee80211(int sizeof_priv, int monitor) 143struct net_device *alloc_ieee80211(int sizeof_priv)
147{ 144{
148 struct libipw_device *ieee; 145 struct libipw_device *ieee;
149 struct net_device *dev; 146 struct net_device *dev;
@@ -160,31 +157,10 @@ struct net_device *alloc_ieee80211(int sizeof_priv, int monitor)
160 157
161 ieee->dev = dev; 158 ieee->dev = dev;
162 159
163 if (!monitor) {
164 ieee->wdev.wiphy = wiphy_new(&libipw_config_ops, 0);
165 if (!ieee->wdev.wiphy) {
166 LIBIPW_ERROR("Unable to allocate wiphy.\n");
167 goto failed_free_netdev;
168 }
169
170 ieee->dev->ieee80211_ptr = &ieee->wdev;
171 ieee->wdev.iftype = NL80211_IFTYPE_STATION;
172
173 /* Fill-out wiphy structure bits we know... Not enough info
174 here to call set_wiphy_dev or set MAC address or channel info
175 -- have to do that in ->ndo_init... */
176 ieee->wdev.wiphy->privid = libipw_wiphy_privid;
177
178 ieee->wdev.wiphy->max_scan_ssids = 1;
179 ieee->wdev.wiphy->max_scan_ie_len = 0;
180 ieee->wdev.wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION)
181 | BIT(NL80211_IFTYPE_ADHOC);
182 }
183
184 err = libipw_networks_allocate(ieee); 160 err = libipw_networks_allocate(ieee);
185 if (err) { 161 if (err) {
186 LIBIPW_ERROR("Unable to allocate beacon storage: %d\n", err); 162 LIBIPW_ERROR("Unable to allocate beacon storage: %d\n", err);
187 goto failed_free_wiphy; 163 goto failed_free_netdev;
188 } 164 }
189 libipw_networks_initialize(ieee); 165 libipw_networks_initialize(ieee);
190 166
@@ -217,31 +193,19 @@ struct net_device *alloc_ieee80211(int sizeof_priv, int monitor)
217 193
218 return dev; 194 return dev;
219 195
220failed_free_wiphy:
221 if (!monitor)
222 wiphy_free(ieee->wdev.wiphy);
223failed_free_netdev: 196failed_free_netdev:
224 free_netdev(dev); 197 free_netdev(dev);
225failed: 198failed:
226 return NULL; 199 return NULL;
227} 200}
228 201
229void free_ieee80211(struct net_device *dev, int monitor) 202void free_ieee80211(struct net_device *dev)
230{ 203{
231 struct libipw_device *ieee = netdev_priv(dev); 204 struct libipw_device *ieee = netdev_priv(dev);
232 205
233 lib80211_crypt_info_free(&ieee->crypt_info); 206 lib80211_crypt_info_free(&ieee->crypt_info);
234 207
235 libipw_networks_free(ieee); 208 libipw_networks_free(ieee);
236
237 /* free cfg80211 resources */
238 if (!monitor) {
239 wiphy_unregister(ieee->wdev.wiphy);
240 kfree(ieee->a_band.channels);
241 kfree(ieee->bg_band.channels);
242 wiphy_free(ieee->wdev.wiphy);
243 }
244
245 free_netdev(dev); 209 free_netdev(dev);
246} 210}
247 211
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
index dc81e19674f..d4b49883b30 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
@@ -355,7 +355,7 @@ static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
355 355
356 init_timer(&rs_sta->rate_scale_flush); 356 init_timer(&rs_sta->rate_scale_flush);
357 rs_sta->rate_scale_flush.data = (unsigned long)rs_sta; 357 rs_sta->rate_scale_flush.data = (unsigned long)rs_sta;
358 rs_sta->rate_scale_flush.function = &iwl3945_bg_rate_scale_flush; 358 rs_sta->rate_scale_flush.function = iwl3945_bg_rate_scale_flush;
359 359
360 for (i = 0; i < IWL_RATE_COUNT_3945; i++) 360 for (i = 0; i < IWL_RATE_COUNT_3945; i++)
361 iwl3945_clear_window(&rs_sta->win[i]); 361 iwl3945_clear_window(&rs_sta->win[i]);
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index d348c265e86..a15962a19b2 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -411,7 +411,7 @@ static int p54p_open(struct ieee80211_hw *dev)
411 int err; 411 int err;
412 412
413 init_completion(&priv->boot_comp); 413 init_completion(&priv->boot_comp);
414 err = request_irq(priv->pdev->irq, &p54p_interrupt, 414 err = request_irq(priv->pdev->irq, p54p_interrupt,
415 IRQF_SHARED, "p54pci", dev); 415 IRQF_SHARED, "p54pci", dev);
416 if (err) { 416 if (err) {
417 dev_err(&priv->pdev->dev, "failed to register IRQ handler\n"); 417 dev_err(&priv->pdev->dev, "failed to register IRQ handler\n");
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index 17e199546ee..92af9b96bb7 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -426,12 +426,16 @@ static const char p54u_romboot_3887[] = "~~~~";
426static int p54u_firmware_reset_3887(struct ieee80211_hw *dev) 426static int p54u_firmware_reset_3887(struct ieee80211_hw *dev)
427{ 427{
428 struct p54u_priv *priv = dev->priv; 428 struct p54u_priv *priv = dev->priv;
429 u8 buf[4]; 429 u8 *buf;
430 int ret; 430 int ret;
431 431
432 memcpy(&buf, p54u_romboot_3887, sizeof(buf)); 432 buf = kmalloc(4, GFP_KERNEL);
433 if (!buf)
434 return -ENOMEM;
435 memcpy(buf, p54u_romboot_3887, 4);
433 ret = p54u_bulk_msg(priv, P54U_PIPE_DATA, 436 ret = p54u_bulk_msg(priv, P54U_PIPE_DATA,
434 buf, sizeof(buf)); 437 buf, 4);
438 kfree(buf);
435 if (ret) 439 if (ret)
436 dev_err(&priv->udev->dev, "(p54usb) unable to jump to " 440 dev_err(&priv->udev->dev, "(p54usb) unable to jump to "
437 "boot ROM (%d)!\n", ret); 441 "boot ROM (%d)!\n", ret);
diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c
index 16429c49139..a1a3dd15c66 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c
@@ -548,7 +548,7 @@ static int rtl8180_start(struct ieee80211_hw *dev)
548 rtl818x_iowrite32(priv, &priv->map->TNPDA, priv->tx_ring[1].dma); 548 rtl818x_iowrite32(priv, &priv->map->TNPDA, priv->tx_ring[1].dma);
549 rtl818x_iowrite32(priv, &priv->map->TLPDA, priv->tx_ring[0].dma); 549 rtl818x_iowrite32(priv, &priv->map->TLPDA, priv->tx_ring[0].dma);
550 550
551 ret = request_irq(priv->pdev->irq, &rtl8180_interrupt, 551 ret = request_irq(priv->pdev->irq, rtl8180_interrupt,
552 IRQF_SHARED, KBUILD_MODNAME, dev); 552 IRQF_SHARED, KBUILD_MODNAME, dev);
553 if (ret) { 553 if (ret) {
554 printk(KERN_ERR "%s: failed to register IRQ handler\n", 554 printk(KERN_ERR "%s: failed to register IRQ handler\n",
diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile
index 96eddb3b1d0..6cab5a62f99 100644
--- a/drivers/s390/net/Makefile
+++ b/drivers/s390/net/Makefile
@@ -3,11 +3,11 @@
3# 3#
4 4
5ctcm-y += ctcm_main.o ctcm_fsms.o ctcm_mpc.o ctcm_sysfs.o ctcm_dbug.o 5ctcm-y += ctcm_main.o ctcm_fsms.o ctcm_mpc.o ctcm_sysfs.o ctcm_dbug.o
6obj-$(CONFIG_CTCM) += ctcm.o fsm.o cu3088.o 6obj-$(CONFIG_CTCM) += ctcm.o fsm.o
7obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o 7obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o
8obj-$(CONFIG_SMSGIUCV) += smsgiucv.o 8obj-$(CONFIG_SMSGIUCV) += smsgiucv.o
9obj-$(CONFIG_LCS) += lcs.o cu3088.o 9obj-$(CONFIG_LCS) += lcs.o
10obj-$(CONFIG_CLAW) += claw.o cu3088.o 10obj-$(CONFIG_CLAW) += claw.o
11qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o 11qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o
12obj-$(CONFIG_QETH) += qeth.o 12obj-$(CONFIG_QETH) += qeth.o
13qeth_l2-y += qeth_l2_main.o 13qeth_l2-y += qeth_l2_main.o
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index c63babefb69..3c77bfe0764 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -90,7 +90,6 @@
90#include <linux/timer.h> 90#include <linux/timer.h>
91#include <linux/types.h> 91#include <linux/types.h>
92 92
93#include "cu3088.h"
94#include "claw.h" 93#include "claw.h"
95 94
96/* 95/*
@@ -258,6 +257,9 @@ static int claw_pm_prepare(struct ccwgroup_device *gdev)
258 return -EPERM; 257 return -EPERM;
259} 258}
260 259
260/* the root device for claw group devices */
261static struct device *claw_root_dev;
262
261/* ccwgroup table */ 263/* ccwgroup table */
262 264
263static struct ccwgroup_driver claw_group_driver = { 265static struct ccwgroup_driver claw_group_driver = {
@@ -272,6 +274,47 @@ static struct ccwgroup_driver claw_group_driver = {
272 .prepare = claw_pm_prepare, 274 .prepare = claw_pm_prepare,
273}; 275};
274 276
277static struct ccw_device_id claw_ids[] = {
278 {CCW_DEVICE(0x3088, 0x61), .driver_info = claw_channel_type_claw},
279 {},
280};
281MODULE_DEVICE_TABLE(ccw, claw_ids);
282
283static struct ccw_driver claw_ccw_driver = {
284 .owner = THIS_MODULE,
285 .name = "claw",
286 .ids = claw_ids,
287 .probe = ccwgroup_probe_ccwdev,
288 .remove = ccwgroup_remove_ccwdev,
289};
290
291static ssize_t
292claw_driver_group_store(struct device_driver *ddrv, const char *buf,
293 size_t count)
294{
295 int err;
296 err = ccwgroup_create_from_string(claw_root_dev,
297 claw_group_driver.driver_id,
298 &claw_ccw_driver, 3, buf);
299 return err ? err : count;
300}
301
302static DRIVER_ATTR(group, 0200, NULL, claw_driver_group_store);
303
304static struct attribute *claw_group_attrs[] = {
305 &driver_attr_group.attr,
306 NULL,
307};
308
309static struct attribute_group claw_group_attr_group = {
310 .attrs = claw_group_attrs,
311};
312
313static const struct attribute_group *claw_group_attr_groups[] = {
314 &claw_group_attr_group,
315 NULL,
316};
317
275/* 318/*
276* Key functions 319* Key functions
277*/ 320*/
@@ -3326,7 +3369,11 @@ claw_remove_files(struct device *dev)
3326static void __exit 3369static void __exit
3327claw_cleanup(void) 3370claw_cleanup(void)
3328{ 3371{
3329 unregister_cu3088_discipline(&claw_group_driver); 3372 driver_remove_file(&claw_group_driver.driver,
3373 &driver_attr_group);
3374 ccwgroup_driver_unregister(&claw_group_driver);
3375 ccw_driver_unregister(&claw_ccw_driver);
3376 root_device_unregister(claw_root_dev);
3330 claw_unregister_debug_facility(); 3377 claw_unregister_debug_facility();
3331 pr_info("Driver unloaded\n"); 3378 pr_info("Driver unloaded\n");
3332 3379
@@ -3348,16 +3395,31 @@ claw_init(void)
3348 if (ret) { 3395 if (ret) {
3349 pr_err("Registering with the S/390 debug feature" 3396 pr_err("Registering with the S/390 debug feature"
3350 " failed with error code %d\n", ret); 3397 " failed with error code %d\n", ret);
3351 return ret; 3398 goto out_err;
3352 } 3399 }
3353 CLAW_DBF_TEXT(2, setup, "init_mod"); 3400 CLAW_DBF_TEXT(2, setup, "init_mod");
3354 ret = register_cu3088_discipline(&claw_group_driver); 3401 claw_root_dev = root_device_register("qeth");
3355 if (ret) { 3402 ret = IS_ERR(claw_root_dev) ? PTR_ERR(claw_root_dev) : 0;
3356 CLAW_DBF_TEXT(2, setup, "init_bad"); 3403 if (ret)
3357 claw_unregister_debug_facility(); 3404 goto register_err;
3358 pr_err("Registering with the cu3088 device driver failed " 3405 ret = ccw_driver_register(&claw_ccw_driver);
3359 "with error code %d\n", ret); 3406 if (ret)
3360 } 3407 goto ccw_err;
3408 claw_group_driver.driver.groups = claw_group_attr_groups;
3409 ret = ccwgroup_driver_register(&claw_group_driver);
3410 if (ret)
3411 goto ccwgroup_err;
3412 return 0;
3413
3414ccwgroup_err:
3415 ccw_driver_unregister(&claw_ccw_driver);
3416ccw_err:
3417 root_device_unregister(claw_root_dev);
3418register_err:
3419 CLAW_DBF_TEXT(2, setup, "init_bad");
3420 claw_unregister_debug_facility();
3421out_err:
3422 pr_err("Initializing the claw device driver failed\n");
3361 return ret; 3423 return ret;
3362} 3424}
3363 3425
diff --git a/drivers/s390/net/claw.h b/drivers/s390/net/claw.h
index 005072c420d..46d59a13db1 100644
--- a/drivers/s390/net/claw.h
+++ b/drivers/s390/net/claw.h
@@ -129,6 +129,18 @@ static inline int claw_dbf_passes(debug_info_t *dbf_grp, int level)
129 } \ 129 } \
130 } while (0) 130 } while (0)
131 131
132/**
133 * Enum for classifying detected devices.
134 */
135enum claw_channel_types {
136 /* Device is not a channel */
137 claw_channel_type_none,
138
139 /* Device is a CLAW channel device */
140 claw_channel_type_claw
141};
142
143
132/******************************************************* 144/*******************************************************
133* Define Control Blocks * 145* Define Control Blocks *
134* * 146* *
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
index 4ded9ac2c5e..70eb7f13841 100644
--- a/drivers/s390/net/ctcm_fsms.c
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -44,7 +44,6 @@
44#include <asm/idals.h> 44#include <asm/idals.h>
45 45
46#include "fsm.h" 46#include "fsm.h"
47#include "cu3088.h"
48 47
49#include "ctcm_dbug.h" 48#include "ctcm_dbug.h"
50#include "ctcm_main.h" 49#include "ctcm_main.h"
diff --git a/drivers/s390/net/ctcm_fsms.h b/drivers/s390/net/ctcm_fsms.h
index 2326aba9807..046d077fabb 100644
--- a/drivers/s390/net/ctcm_fsms.h
+++ b/drivers/s390/net/ctcm_fsms.h
@@ -39,7 +39,6 @@
39#include <asm/idals.h> 39#include <asm/idals.h>
40 40
41#include "fsm.h" 41#include "fsm.h"
42#include "cu3088.h"
43#include "ctcm_main.h" 42#include "ctcm_main.h"
44 43
45/* 44/*
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index db054ed1a8c..e35713dd050 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -51,12 +51,16 @@
51 51
52#include <asm/idals.h> 52#include <asm/idals.h>
53 53
54#include "cu3088.h"
55#include "ctcm_fsms.h" 54#include "ctcm_fsms.h"
56#include "ctcm_main.h" 55#include "ctcm_main.h"
57 56
58/* Some common global variables */ 57/* Some common global variables */
59 58
59/**
60 * The root device for ctcm group devices
61 */
62static struct device *ctcm_root_dev;
63
60/* 64/*
61 * Linked list of all detected channels. 65 * Linked list of all detected channels.
62 */ 66 */
@@ -246,7 +250,7 @@ static void channel_remove(struct channel *ch)
246 * 250 *
247 * returns Pointer to a channel or NULL if no matching channel available. 251 * returns Pointer to a channel or NULL if no matching channel available.
248 */ 252 */
249static struct channel *channel_get(enum channel_types type, 253static struct channel *channel_get(enum ctcm_channel_types type,
250 char *id, int direction) 254 char *id, int direction)
251{ 255{
252 struct channel *ch = channels; 256 struct channel *ch = channels;
@@ -1342,7 +1346,7 @@ static int ctcm_probe_device(struct ccwgroup_device *cgdev)
1342 * 1346 *
1343 * returns 0 on success, !0 on error. 1347 * returns 0 on success, !0 on error.
1344 */ 1348 */
1345static int add_channel(struct ccw_device *cdev, enum channel_types type, 1349static int add_channel(struct ccw_device *cdev, enum ctcm_channel_types type,
1346 struct ctcm_priv *priv) 1350 struct ctcm_priv *priv)
1347{ 1351{
1348 struct channel **c = &channels; 1352 struct channel **c = &channels;
@@ -1501,13 +1505,13 @@ free_return: /* note that all channel pointers are 0 or valid */
1501/* 1505/*
1502 * Return type of a detected device. 1506 * Return type of a detected device.
1503 */ 1507 */
1504static enum channel_types get_channel_type(struct ccw_device_id *id) 1508static enum ctcm_channel_types get_channel_type(struct ccw_device_id *id)
1505{ 1509{
1506 enum channel_types type; 1510 enum ctcm_channel_types type;
1507 type = (enum channel_types)id->driver_info; 1511 type = (enum ctcm_channel_types)id->driver_info;
1508 1512
1509 if (type == channel_type_ficon) 1513 if (type == ctcm_channel_type_ficon)
1510 type = channel_type_escon; 1514 type = ctcm_channel_type_escon;
1511 1515
1512 return type; 1516 return type;
1513} 1517}
@@ -1525,7 +1529,7 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
1525 char read_id[CTCM_ID_SIZE]; 1529 char read_id[CTCM_ID_SIZE];
1526 char write_id[CTCM_ID_SIZE]; 1530 char write_id[CTCM_ID_SIZE];
1527 int direction; 1531 int direction;
1528 enum channel_types type; 1532 enum ctcm_channel_types type;
1529 struct ctcm_priv *priv; 1533 struct ctcm_priv *priv;
1530 struct net_device *dev; 1534 struct net_device *dev;
1531 struct ccw_device *cdev0; 1535 struct ccw_device *cdev0;
@@ -1720,6 +1724,11 @@ static int ctcm_pm_suspend(struct ccwgroup_device *gdev)
1720 return 0; 1724 return 0;
1721 netif_device_detach(priv->channel[READ]->netdev); 1725 netif_device_detach(priv->channel[READ]->netdev);
1722 ctcm_close(priv->channel[READ]->netdev); 1726 ctcm_close(priv->channel[READ]->netdev);
1727 if (!wait_event_timeout(priv->fsm->wait_q,
1728 fsm_getstate(priv->fsm) == DEV_STATE_STOPPED, CTCM_TIME_5_SEC)) {
1729 netif_device_attach(priv->channel[READ]->netdev);
1730 return -EBUSY;
1731 }
1723 ccw_device_set_offline(gdev->cdev[1]); 1732 ccw_device_set_offline(gdev->cdev[1]);
1724 ccw_device_set_offline(gdev->cdev[0]); 1733 ccw_device_set_offline(gdev->cdev[0]);
1725 return 0; 1734 return 0;
@@ -1744,6 +1753,22 @@ err_out:
1744 return rc; 1753 return rc;
1745} 1754}
1746 1755
1756static struct ccw_device_id ctcm_ids[] = {
1757 {CCW_DEVICE(0x3088, 0x08), .driver_info = ctcm_channel_type_parallel},
1758 {CCW_DEVICE(0x3088, 0x1e), .driver_info = ctcm_channel_type_ficon},
1759 {CCW_DEVICE(0x3088, 0x1f), .driver_info = ctcm_channel_type_escon},
1760 {},
1761};
1762MODULE_DEVICE_TABLE(ccw, ctcm_ids);
1763
1764static struct ccw_driver ctcm_ccw_driver = {
1765 .owner = THIS_MODULE,
1766 .name = "ctcm",
1767 .ids = ctcm_ids,
1768 .probe = ccwgroup_probe_ccwdev,
1769 .remove = ccwgroup_remove_ccwdev,
1770};
1771
1747static struct ccwgroup_driver ctcm_group_driver = { 1772static struct ccwgroup_driver ctcm_group_driver = {
1748 .owner = THIS_MODULE, 1773 .owner = THIS_MODULE,
1749 .name = CTC_DRIVER_NAME, 1774 .name = CTC_DRIVER_NAME,
@@ -1758,6 +1783,33 @@ static struct ccwgroup_driver ctcm_group_driver = {
1758 .restore = ctcm_pm_resume, 1783 .restore = ctcm_pm_resume,
1759}; 1784};
1760 1785
1786static ssize_t
1787ctcm_driver_group_store(struct device_driver *ddrv, const char *buf,
1788 size_t count)
1789{
1790 int err;
1791
1792 err = ccwgroup_create_from_string(ctcm_root_dev,
1793 ctcm_group_driver.driver_id,
1794 &ctcm_ccw_driver, 2, buf);
1795 return err ? err : count;
1796}
1797
1798static DRIVER_ATTR(group, 0200, NULL, ctcm_driver_group_store);
1799
1800static struct attribute *ctcm_group_attrs[] = {
1801 &driver_attr_group.attr,
1802 NULL,
1803};
1804
1805static struct attribute_group ctcm_group_attr_group = {
1806 .attrs = ctcm_group_attrs,
1807};
1808
1809static const struct attribute_group *ctcm_group_attr_groups[] = {
1810 &ctcm_group_attr_group,
1811 NULL,
1812};
1761 1813
1762/* 1814/*
1763 * Module related routines 1815 * Module related routines
@@ -1771,7 +1823,10 @@ static struct ccwgroup_driver ctcm_group_driver = {
1771 */ 1823 */
1772static void __exit ctcm_exit(void) 1824static void __exit ctcm_exit(void)
1773{ 1825{
1774 unregister_cu3088_discipline(&ctcm_group_driver); 1826 driver_remove_file(&ctcm_group_driver.driver, &driver_attr_group);
1827 ccwgroup_driver_unregister(&ctcm_group_driver);
1828 ccw_driver_unregister(&ctcm_ccw_driver);
1829 root_device_unregister(ctcm_root_dev);
1775 ctcm_unregister_dbf_views(); 1830 ctcm_unregister_dbf_views();
1776 pr_info("CTCM driver unloaded\n"); 1831 pr_info("CTCM driver unloaded\n");
1777} 1832}
@@ -1797,17 +1852,31 @@ static int __init ctcm_init(void)
1797 channels = NULL; 1852 channels = NULL;
1798 1853
1799 ret = ctcm_register_dbf_views(); 1854 ret = ctcm_register_dbf_views();
1800 if (ret) { 1855 if (ret)
1801 return ret; 1856 goto out_err;
1802 } 1857 ctcm_root_dev = root_device_register("ctcm");
1803 ret = register_cu3088_discipline(&ctcm_group_driver); 1858 ret = IS_ERR(ctcm_root_dev) ? PTR_ERR(ctcm_root_dev) : 0;
1804 if (ret) { 1859 if (ret)
1805 ctcm_unregister_dbf_views(); 1860 goto register_err;
1806 pr_err("%s / register_cu3088_discipline failed, ret = %d\n", 1861 ret = ccw_driver_register(&ctcm_ccw_driver);
1807 __func__, ret); 1862 if (ret)
1808 return ret; 1863 goto ccw_err;
1809 } 1864 ctcm_group_driver.driver.groups = ctcm_group_attr_groups;
1865 ret = ccwgroup_driver_register(&ctcm_group_driver);
1866 if (ret)
1867 goto ccwgroup_err;
1810 print_banner(); 1868 print_banner();
1869 return 0;
1870
1871ccwgroup_err:
1872 ccw_driver_unregister(&ctcm_ccw_driver);
1873ccw_err:
1874 root_device_unregister(ctcm_root_dev);
1875register_err:
1876 ctcm_unregister_dbf_views();
1877out_err:
1878 pr_err("%s / Initializing the ctcm device driver failed, ret = %d\n",
1879 __func__, ret);
1811 return ret; 1880 return ret;
1812} 1881}
1813 1882
diff --git a/drivers/s390/net/ctcm_main.h b/drivers/s390/net/ctcm_main.h
index d925e732b7d..d34fa14f44e 100644
--- a/drivers/s390/net/ctcm_main.h
+++ b/drivers/s390/net/ctcm_main.h
@@ -16,7 +16,6 @@
16#include <linux/netdevice.h> 16#include <linux/netdevice.h>
17 17
18#include "fsm.h" 18#include "fsm.h"
19#include "cu3088.h"
20#include "ctcm_dbug.h" 19#include "ctcm_dbug.h"
21#include "ctcm_mpc.h" 20#include "ctcm_mpc.h"
22 21
@@ -66,6 +65,23 @@
66 ctcmpc_dumpit(buf, len); \ 65 ctcmpc_dumpit(buf, len); \
67 } while (0) 66 } while (0)
68 67
68/**
69 * Enum for classifying detected devices
70 */
71enum ctcm_channel_types {
72 /* Device is not a channel */
73 ctcm_channel_type_none,
74
75 /* Device is a CTC/A */
76 ctcm_channel_type_parallel,
77
78 /* Device is a FICON channel */
79 ctcm_channel_type_ficon,
80
81 /* Device is a ESCON channel */
82 ctcm_channel_type_escon
83};
84
69/* 85/*
70 * CCW commands, used in this driver. 86 * CCW commands, used in this driver.
71 */ 87 */
@@ -121,7 +137,7 @@ struct channel {
121 * Type of this channel. 137 * Type of this channel.
122 * CTC/A or Escon for valid channels. 138 * CTC/A or Escon for valid channels.
123 */ 139 */
124 enum channel_types type; 140 enum ctcm_channel_types type;
125 /* 141 /*
126 * Misc. flags. See CHANNEL_FLAGS_... below 142 * Misc. flags. See CHANNEL_FLAGS_... below
127 */ 143 */
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index 781e18be7e8..5978b390153 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -53,7 +53,6 @@
53#include <linux/moduleparam.h> 53#include <linux/moduleparam.h>
54#include <asm/idals.h> 54#include <asm/idals.h>
55 55
56#include "cu3088.h"
57#include "ctcm_mpc.h" 56#include "ctcm_mpc.h"
58#include "ctcm_main.h" 57#include "ctcm_main.h"
59#include "ctcm_fsms.h" 58#include "ctcm_fsms.h"
diff --git a/drivers/s390/net/ctcm_sysfs.c b/drivers/s390/net/ctcm_sysfs.c
index 8452bb052d6..738ad26c74a 100644
--- a/drivers/s390/net/ctcm_sysfs.c
+++ b/drivers/s390/net/ctcm_sysfs.c
@@ -158,6 +158,15 @@ static ssize_t ctcm_proto_store(struct device *dev,
158 return count; 158 return count;
159} 159}
160 160
161const char *ctcm_type[] = {
162 "not a channel",
163 "CTC/A",
164 "FICON channel",
165 "ESCON channel",
166 "unknown channel type",
167 "unsupported channel type",
168};
169
161static ssize_t ctcm_type_show(struct device *dev, 170static ssize_t ctcm_type_show(struct device *dev,
162 struct device_attribute *attr, char *buf) 171 struct device_attribute *attr, char *buf)
163{ 172{
@@ -168,7 +177,7 @@ static ssize_t ctcm_type_show(struct device *dev,
168 return -ENODEV; 177 return -ENODEV;
169 178
170 return sprintf(buf, "%s\n", 179 return sprintf(buf, "%s\n",
171 cu3088_type[cgdev->cdev[0]->id.driver_info]); 180 ctcm_type[cgdev->cdev[0]->id.driver_info]);
172} 181}
173 182
174static DEVICE_ATTR(buffer, 0644, ctcm_buffer_show, ctcm_buffer_write); 183static DEVICE_ATTR(buffer, 0644, ctcm_buffer_show, ctcm_buffer_write);
diff --git a/drivers/s390/net/cu3088.c b/drivers/s390/net/cu3088.c
deleted file mode 100644
index 48383459e99..00000000000
--- a/drivers/s390/net/cu3088.c
+++ /dev/null
@@ -1,148 +0,0 @@
1/*
2 * CTC / LCS ccw_device driver
3 *
4 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Arnd Bergmann <arndb@de.ibm.com>
6 * Cornelia Huck <cornelia.huck@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 */
23
24#include <linux/init.h>
25#include <linux/module.h>
26#include <linux/err.h>
27
28#include <asm/ccwdev.h>
29#include <asm/ccwgroup.h>
30
31#include "cu3088.h"
32
33const char *cu3088_type[] = {
34 "not a channel",
35 "CTC/A",
36 "ESCON channel",
37 "FICON channel",
38 "OSA LCS card",
39 "CLAW channel device",
40 "unknown channel type",
41 "unsupported channel type",
42};
43
44/* static definitions */
45
46static struct ccw_device_id cu3088_ids[] = {
47 { CCW_DEVICE(0x3088, 0x08), .driver_info = channel_type_parallel },
48 { CCW_DEVICE(0x3088, 0x1f), .driver_info = channel_type_escon },
49 { CCW_DEVICE(0x3088, 0x1e), .driver_info = channel_type_ficon },
50 { CCW_DEVICE(0x3088, 0x60), .driver_info = channel_type_osa2 },
51 { CCW_DEVICE(0x3088, 0x61), .driver_info = channel_type_claw },
52 { /* end of list */ }
53};
54
55static struct ccw_driver cu3088_driver;
56
57static struct device *cu3088_root_dev;
58
59static ssize_t
60group_write(struct device_driver *drv, const char *buf, size_t count)
61{
62 int ret;
63 struct ccwgroup_driver *cdrv;
64
65 cdrv = to_ccwgroupdrv(drv);
66 if (!cdrv)
67 return -EINVAL;
68 ret = ccwgroup_create_from_string(cu3088_root_dev, cdrv->driver_id,
69 &cu3088_driver, 2, buf);
70
71 return (ret == 0) ? count : ret;
72}
73
74static DRIVER_ATTR(group, 0200, NULL, group_write);
75
76/* Register-unregister for ctc&lcs */
77int
78register_cu3088_discipline(struct ccwgroup_driver *dcp)
79{
80 int rc;
81
82 if (!dcp)
83 return -EINVAL;
84
85 /* Register discipline.*/
86 rc = ccwgroup_driver_register(dcp);
87 if (rc)
88 return rc;
89
90 rc = driver_create_file(&dcp->driver, &driver_attr_group);
91 if (rc)
92 ccwgroup_driver_unregister(dcp);
93
94 return rc;
95
96}
97
98void
99unregister_cu3088_discipline(struct ccwgroup_driver *dcp)
100{
101 if (!dcp)
102 return;
103
104 driver_remove_file(&dcp->driver, &driver_attr_group);
105 ccwgroup_driver_unregister(dcp);
106}
107
108static struct ccw_driver cu3088_driver = {
109 .owner = THIS_MODULE,
110 .ids = cu3088_ids,
111 .name = "cu3088",
112 .probe = ccwgroup_probe_ccwdev,
113 .remove = ccwgroup_remove_ccwdev,
114};
115
116/* module setup */
117static int __init
118cu3088_init (void)
119{
120 int rc;
121
122 cu3088_root_dev = root_device_register("cu3088");
123 if (IS_ERR(cu3088_root_dev))
124 return PTR_ERR(cu3088_root_dev);
125 rc = ccw_driver_register(&cu3088_driver);
126 if (rc)
127 root_device_unregister(cu3088_root_dev);
128
129 return rc;
130}
131
132static void __exit
133cu3088_exit (void)
134{
135 ccw_driver_unregister(&cu3088_driver);
136 root_device_unregister(cu3088_root_dev);
137}
138
139MODULE_DEVICE_TABLE(ccw,cu3088_ids);
140MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
141MODULE_LICENSE("GPL");
142
143module_init(cu3088_init);
144module_exit(cu3088_exit);
145
146EXPORT_SYMBOL_GPL(cu3088_type);
147EXPORT_SYMBOL_GPL(register_cu3088_discipline);
148EXPORT_SYMBOL_GPL(unregister_cu3088_discipline);
diff --git a/drivers/s390/net/cu3088.h b/drivers/s390/net/cu3088.h
deleted file mode 100644
index d8558a7105a..00000000000
--- a/drivers/s390/net/cu3088.h
+++ /dev/null
@@ -1,41 +0,0 @@
1#ifndef _CU3088_H
2#define _CU3088_H
3
4/**
5 * Enum for classifying detected devices.
6 */
7enum channel_types {
8 /* Device is not a channel */
9 channel_type_none,
10
11 /* Device is a CTC/A */
12 channel_type_parallel,
13
14 /* Device is a ESCON channel */
15 channel_type_escon,
16
17 /* Device is a FICON channel */
18 channel_type_ficon,
19
20 /* Device is a OSA2 card */
21 channel_type_osa2,
22
23 /* Device is a CLAW channel device */
24 channel_type_claw,
25
26 /* Device is a channel, but we don't know
27 * anything about it */
28 channel_type_unknown,
29
30 /* Device is an unsupported model */
31 channel_type_unsupported,
32
33 /* number of type entries */
34 num_channel_types
35};
36
37extern const char *cu3088_type[num_channel_types];
38extern int register_cu3088_discipline(struct ccwgroup_driver *);
39extern void unregister_cu3088_discipline(struct ccwgroup_driver *);
40
41#endif
diff --git a/drivers/s390/net/fsm.c b/drivers/s390/net/fsm.c
index 2c1db8036b7..cae48cbc5e9 100644
--- a/drivers/s390/net/fsm.c
+++ b/drivers/s390/net/fsm.c
@@ -27,6 +27,7 @@ init_fsm(char *name, const char **state_names, const char **event_names, int nr_
27 return NULL; 27 return NULL;
28 } 28 }
29 strlcpy(this->name, name, sizeof(this->name)); 29 strlcpy(this->name, name, sizeof(this->name));
30 init_waitqueue_head(&this->wait_q);
30 31
31 f = kzalloc(sizeof(fsm), order); 32 f = kzalloc(sizeof(fsm), order);
32 if (f == NULL) { 33 if (f == NULL) {
diff --git a/drivers/s390/net/fsm.h b/drivers/s390/net/fsm.h
index af679c10f1b..1e8b235d95b 100644
--- a/drivers/s390/net/fsm.h
+++ b/drivers/s390/net/fsm.h
@@ -66,6 +66,7 @@ typedef struct fsm_instance_t {
66 char name[16]; 66 char name[16];
67 void *userdata; 67 void *userdata;
68 int userint; 68 int userint;
69 wait_queue_head_t wait_q;
69#if FSM_DEBUG_HISTORY 70#if FSM_DEBUG_HISTORY
70 int history_index; 71 int history_index;
71 int history_size; 72 int history_size;
@@ -197,6 +198,7 @@ fsm_newstate(fsm_instance *fi, int newstate)
197 printk(KERN_DEBUG "fsm(%s): New state %s\n", fi->name, 198 printk(KERN_DEBUG "fsm(%s): New state %s\n", fi->name,
198 fi->f->state_names[newstate]); 199 fi->f->state_names[newstate]);
199#endif 200#endif
201 wake_up(&fi->wait_q);
200} 202}
201 203
202/** 204/**
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 5e46415d3e1..f6cc46dc050 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -47,7 +47,6 @@
47#include <asm/ccwgroup.h> 47#include <asm/ccwgroup.h>
48 48
49#include "lcs.h" 49#include "lcs.h"
50#include "cu3088.h"
51 50
52 51
53#if !defined(CONFIG_NET_ETHERNET) && \ 52#if !defined(CONFIG_NET_ETHERNET) && \
@@ -60,7 +59,11 @@
60 */ 59 */
61 60
62static char version[] __initdata = "LCS driver"; 61static char version[] __initdata = "LCS driver";
63static char debug_buffer[255]; 62
63/**
64 * the root device for lcs group devices
65 */
66static struct device *lcs_root_dev;
64 67
65/** 68/**
66 * Some prototypes. 69 * Some prototypes.
@@ -76,6 +79,7 @@ static int lcs_recovery(void *ptr);
76/** 79/**
77 * Debug Facility Stuff 80 * Debug Facility Stuff
78 */ 81 */
82static char debug_buffer[255];
79static debug_info_t *lcs_dbf_setup; 83static debug_info_t *lcs_dbf_setup;
80static debug_info_t *lcs_dbf_trace; 84static debug_info_t *lcs_dbf_trace;
81 85
@@ -1968,6 +1972,15 @@ lcs_portno_store (struct device *dev, struct device_attribute *attr, const char
1968 1972
1969static DEVICE_ATTR(portno, 0644, lcs_portno_show, lcs_portno_store); 1973static DEVICE_ATTR(portno, 0644, lcs_portno_show, lcs_portno_store);
1970 1974
1975const char *lcs_type[] = {
1976 "not a channel",
1977 "2216 parallel",
1978 "2216 channel",
1979 "OSA LCS card",
1980 "unknown channel type",
1981 "unsupported channel type",
1982};
1983
1971static ssize_t 1984static ssize_t
1972lcs_type_show(struct device *dev, struct device_attribute *attr, char *buf) 1985lcs_type_show(struct device *dev, struct device_attribute *attr, char *buf)
1973{ 1986{
@@ -1977,7 +1990,7 @@ lcs_type_show(struct device *dev, struct device_attribute *attr, char *buf)
1977 if (!cgdev) 1990 if (!cgdev)
1978 return -ENODEV; 1991 return -ENODEV;
1979 1992
1980 return sprintf(buf, "%s\n", cu3088_type[cgdev->cdev[0]->id.driver_info]); 1993 return sprintf(buf, "%s\n", lcs_type[cgdev->cdev[0]->id.driver_info]);
1981} 1994}
1982 1995
1983static DEVICE_ATTR(type, 0444, lcs_type_show, NULL); 1996static DEVICE_ATTR(type, 0444, lcs_type_show, NULL);
@@ -2370,6 +2383,22 @@ static int lcs_restore(struct ccwgroup_device *gdev)
2370 return lcs_pm_resume(card); 2383 return lcs_pm_resume(card);
2371} 2384}
2372 2385
2386static struct ccw_device_id lcs_ids[] = {
2387 {CCW_DEVICE(0x3088, 0x08), .driver_info = lcs_channel_type_parallel},
2388 {CCW_DEVICE(0x3088, 0x1f), .driver_info = lcs_channel_type_2216},
2389 {CCW_DEVICE(0x3088, 0x60), .driver_info = lcs_channel_type_osa2},
2390 {},
2391};
2392MODULE_DEVICE_TABLE(ccw, lcs_ids);
2393
2394static struct ccw_driver lcs_ccw_driver = {
2395 .owner = THIS_MODULE,
2396 .name = "lcs",
2397 .ids = lcs_ids,
2398 .probe = ccwgroup_probe_ccwdev,
2399 .remove = ccwgroup_remove_ccwdev,
2400};
2401
2373/** 2402/**
2374 * LCS ccwgroup driver registration 2403 * LCS ccwgroup driver registration
2375 */ 2404 */
@@ -2389,6 +2418,33 @@ static struct ccwgroup_driver lcs_group_driver = {
2389 .restore = lcs_restore, 2418 .restore = lcs_restore,
2390}; 2419};
2391 2420
2421static ssize_t
2422lcs_driver_group_store(struct device_driver *ddrv, const char *buf,
2423 size_t count)
2424{
2425 int err;
2426 err = ccwgroup_create_from_string(lcs_root_dev,
2427 lcs_group_driver.driver_id,
2428 &lcs_ccw_driver, 2, buf);
2429 return err ? err : count;
2430}
2431
2432static DRIVER_ATTR(group, 0200, NULL, lcs_driver_group_store);
2433
2434static struct attribute *lcs_group_attrs[] = {
2435 &driver_attr_group.attr,
2436 NULL,
2437};
2438
2439static struct attribute_group lcs_group_attr_group = {
2440 .attrs = lcs_group_attrs,
2441};
2442
2443static const struct attribute_group *lcs_group_attr_groups[] = {
2444 &lcs_group_attr_group,
2445 NULL,
2446};
2447
2392/** 2448/**
2393 * LCS Module/Kernel initialization function 2449 * LCS Module/Kernel initialization function
2394 */ 2450 */
@@ -2400,17 +2456,30 @@ __init lcs_init_module(void)
2400 pr_info("Loading %s\n", version); 2456 pr_info("Loading %s\n", version);
2401 rc = lcs_register_debug_facility(); 2457 rc = lcs_register_debug_facility();
2402 LCS_DBF_TEXT(0, setup, "lcsinit"); 2458 LCS_DBF_TEXT(0, setup, "lcsinit");
2403 if (rc) { 2459 if (rc)
2404 pr_err("Initialization failed\n"); 2460 goto out_err;
2405 return rc; 2461 lcs_root_dev = root_device_register("lcs");
2406 } 2462 rc = IS_ERR(lcs_root_dev) ? PTR_ERR(lcs_root_dev) : 0;
2407 2463 if (rc)
2408 rc = register_cu3088_discipline(&lcs_group_driver); 2464 goto register_err;
2409 if (rc) { 2465 rc = ccw_driver_register(&lcs_ccw_driver);
2410 pr_err("Initialization failed\n"); 2466 if (rc)
2411 return rc; 2467 goto ccw_err;
2412 } 2468 lcs_group_driver.driver.groups = lcs_group_attr_groups;
2469 rc = ccwgroup_driver_register(&lcs_group_driver);
2470 if (rc)
2471 goto ccwgroup_err;
2413 return 0; 2472 return 0;
2473
2474ccwgroup_err:
2475 ccw_driver_unregister(&lcs_ccw_driver);
2476ccw_err:
2477 root_device_unregister(lcs_root_dev);
2478register_err:
2479 lcs_unregister_debug_facility();
2480out_err:
2481 pr_err("Initializing the lcs device driver failed\n");
2482 return rc;
2414} 2483}
2415 2484
2416 2485
@@ -2422,7 +2491,11 @@ __exit lcs_cleanup_module(void)
2422{ 2491{
2423 pr_info("Terminating lcs module.\n"); 2492 pr_info("Terminating lcs module.\n");
2424 LCS_DBF_TEXT(0, trace, "cleanup"); 2493 LCS_DBF_TEXT(0, trace, "cleanup");
2425 unregister_cu3088_discipline(&lcs_group_driver); 2494 driver_remove_file(&lcs_group_driver.driver,
2495 &driver_attr_group);
2496 ccwgroup_driver_unregister(&lcs_group_driver);
2497 ccw_driver_unregister(&lcs_ccw_driver);
2498 root_device_unregister(lcs_root_dev);
2426 lcs_unregister_debug_facility(); 2499 lcs_unregister_debug_facility();
2427} 2500}
2428 2501
diff --git a/drivers/s390/net/lcs.h b/drivers/s390/net/lcs.h
index 6d668642af2..8c03392ac83 100644
--- a/drivers/s390/net/lcs.h
+++ b/drivers/s390/net/lcs.h
@@ -36,6 +36,24 @@ static inline int lcs_dbf_passes(debug_info_t *dbf_grp, int level)
36#define CARD_FROM_DEV(cdev) \ 36#define CARD_FROM_DEV(cdev) \
37 (struct lcs_card *) dev_get_drvdata( \ 37 (struct lcs_card *) dev_get_drvdata( \
38 &((struct ccwgroup_device *)dev_get_drvdata(&cdev->dev))->dev); 38 &((struct ccwgroup_device *)dev_get_drvdata(&cdev->dev))->dev);
39
40/**
41 * Enum for classifying detected devices.
42 */
43enum lcs_channel_types {
44 /* Device is not a channel */
45 lcs_channel_type_none,
46
47 /* Device is a 2216 channel */
48 lcs_channel_type_parallel,
49
50 /* Device is a 2216 channel */
51 lcs_channel_type_2216,
52
53 /* Device is a OSA2 card */
54 lcs_channel_type_osa2
55};
56
39/** 57/**
40 * CCW commands used in this driver 58 * CCW commands used in this driver
41 */ 59 */
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index c84eadd3602..395c04c2b00 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -741,13 +741,13 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
741 if (single_flag) { 741 if (single_flag) {
742 if ((skb = skb_dequeue(&conn->commit_queue))) { 742 if ((skb = skb_dequeue(&conn->commit_queue))) {
743 atomic_dec(&skb->users); 743 atomic_dec(&skb->users);
744 dev_kfree_skb_any(skb);
745 if (privptr) { 744 if (privptr) {
746 privptr->stats.tx_packets++; 745 privptr->stats.tx_packets++;
747 privptr->stats.tx_bytes += 746 privptr->stats.tx_bytes +=
748 (skb->len - NETIUCV_HDRLEN 747 (skb->len - NETIUCV_HDRLEN
749 - NETIUCV_HDRLEN); 748 - NETIUCV_HDRLEN);
750 } 749 }
750 dev_kfree_skb_any(skb);
751 } 751 }
752 } 752 }
753 conn->tx_buff->data = conn->tx_buff->head; 753 conn->tx_buff->data = conn->tx_buff->head;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index e8f72d715eb..b232693378c 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -122,7 +122,6 @@ struct qeth_perf_stats {
122 __u64 outbound_do_qdio_start_time; 122 __u64 outbound_do_qdio_start_time;
123 unsigned int outbound_do_qdio_cnt; 123 unsigned int outbound_do_qdio_cnt;
124 unsigned int outbound_do_qdio_time; 124 unsigned int outbound_do_qdio_time;
125 /* eddp data */
126 unsigned int large_send_bytes; 125 unsigned int large_send_bytes;
127 unsigned int large_send_cnt; 126 unsigned int large_send_cnt;
128 unsigned int sg_skbs_sent; 127 unsigned int sg_skbs_sent;
@@ -135,6 +134,7 @@ struct qeth_perf_stats {
135 unsigned int sg_frags_rx; 134 unsigned int sg_frags_rx;
136 unsigned int sg_alloc_page_rx; 135 unsigned int sg_alloc_page_rx;
137 unsigned int tx_csum; 136 unsigned int tx_csum;
137 unsigned int tx_lin;
138}; 138};
139 139
140/* Routing stuff */ 140/* Routing stuff */
@@ -648,6 +648,7 @@ struct qeth_card_options {
648 enum qeth_large_send_types large_send; 648 enum qeth_large_send_types large_send;
649 int performance_stats; 649 int performance_stats;
650 int rx_sg_cb; 650 int rx_sg_cb;
651 enum qeth_ipa_isolation_modes isolation;
651}; 652};
652 653
653/* 654/*
@@ -776,7 +777,6 @@ static inline void qeth_put_buffer_pool_entry(struct qeth_card *card,
776 list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list); 777 list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list);
777} 778}
778 779
779struct qeth_eddp_context;
780extern struct ccwgroup_driver qeth_l2_ccwgroup_driver; 780extern struct ccwgroup_driver qeth_l2_ccwgroup_driver;
781extern struct ccwgroup_driver qeth_l3_ccwgroup_driver; 781extern struct ccwgroup_driver qeth_l3_ccwgroup_driver;
782const char *qeth_get_cardname_short(struct qeth_card *); 782const char *qeth_get_cardname_short(struct qeth_card *);
@@ -836,7 +836,6 @@ void qeth_prepare_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, char);
836struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *); 836struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *);
837int qeth_mdio_read(struct net_device *, int, int); 837int qeth_mdio_read(struct net_device *, int, int);
838int qeth_snmp_command(struct qeth_card *, char __user *); 838int qeth_snmp_command(struct qeth_card *, char __user *);
839int qeth_set_large_send(struct qeth_card *, enum qeth_large_send_types);
840struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *, __u32, __u32); 839struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *, __u32, __u32);
841int qeth_default_setadapterparms_cb(struct qeth_card *, struct qeth_reply *, 840int qeth_default_setadapterparms_cb(struct qeth_card *, struct qeth_reply *,
842 unsigned long); 841 unsigned long);
@@ -856,6 +855,7 @@ void qeth_core_get_strings(struct net_device *, u32, u8 *);
856void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *); 855void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *);
857void qeth_dbf_longtext(enum qeth_dbf_names dbf_nix, int level, char *text, ...); 856void qeth_dbf_longtext(enum qeth_dbf_names dbf_nix, int level, char *text, ...);
858int qeth_core_ethtool_get_settings(struct net_device *, struct ethtool_cmd *); 857int qeth_core_ethtool_get_settings(struct net_device *, struct ethtool_cmd *);
858int qeth_set_access_ctrl_online(struct qeth_card *card);
859 859
860/* exports for OSN */ 860/* exports for OSN */
861int qeth_osn_assist(struct net_device *, void *, int); 861int qeth_osn_assist(struct net_device *, void *, int);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index edee4dc6430..d34804d5ece 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -270,41 +270,6 @@ int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
270 return qeth_alloc_buffer_pool(card); 270 return qeth_alloc_buffer_pool(card);
271} 271}
272 272
273int qeth_set_large_send(struct qeth_card *card,
274 enum qeth_large_send_types type)
275{
276 int rc = 0;
277
278 if (card->dev == NULL) {
279 card->options.large_send = type;
280 return 0;
281 }
282 if (card->state == CARD_STATE_UP)
283 netif_tx_disable(card->dev);
284 card->options.large_send = type;
285 switch (card->options.large_send) {
286 case QETH_LARGE_SEND_TSO:
287 if (qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
288 card->dev->features |= NETIF_F_TSO | NETIF_F_SG |
289 NETIF_F_HW_CSUM;
290 } else {
291 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
292 NETIF_F_HW_CSUM);
293 card->options.large_send = QETH_LARGE_SEND_NO;
294 rc = -EOPNOTSUPP;
295 }
296 break;
297 default: /* includes QETH_LARGE_SEND_NO */
298 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
299 NETIF_F_HW_CSUM);
300 break;
301 }
302 if (card->state == CARD_STATE_UP)
303 netif_wake_queue(card->dev);
304 return rc;
305}
306EXPORT_SYMBOL_GPL(qeth_set_large_send);
307
308static int qeth_issue_next_read(struct qeth_card *card) 273static int qeth_issue_next_read(struct qeth_card *card)
309{ 274{
310 int rc; 275 int rc;
@@ -1079,6 +1044,7 @@ static void qeth_set_intial_options(struct qeth_card *card)
1079 card->options.add_hhlen = DEFAULT_ADD_HHLEN; 1044 card->options.add_hhlen = DEFAULT_ADD_HHLEN;
1080 card->options.performance_stats = 0; 1045 card->options.performance_stats = 0;
1081 card->options.rx_sg_cb = QETH_RX_SG_CB; 1046 card->options.rx_sg_cb = QETH_RX_SG_CB;
1047 card->options.isolation = ISOLATION_MODE_NONE;
1082} 1048}
1083 1049
1084static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread) 1050static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
@@ -3389,6 +3355,156 @@ int qeth_setadpparms_change_macaddr(struct qeth_card *card)
3389} 3355}
3390EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr); 3356EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
3391 3357
3358static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
3359 struct qeth_reply *reply, unsigned long data)
3360{
3361 struct qeth_ipa_cmd *cmd;
3362 struct qeth_set_access_ctrl *access_ctrl_req;
3363 int rc;
3364
3365 QETH_DBF_TEXT(TRACE, 4, "setaccb");
3366
3367 cmd = (struct qeth_ipa_cmd *) data;
3368 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
3369 QETH_DBF_TEXT_(SETUP, 2, "setaccb");
3370 QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
3371 QETH_DBF_TEXT_(SETUP, 2, "rc=%d",
3372 cmd->data.setadapterparms.hdr.return_code);
3373 switch (cmd->data.setadapterparms.hdr.return_code) {
3374 case SET_ACCESS_CTRL_RC_SUCCESS:
3375 case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
3376 case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
3377 {
3378 card->options.isolation = access_ctrl_req->subcmd_code;
3379 if (card->options.isolation == ISOLATION_MODE_NONE) {
3380 dev_info(&card->gdev->dev,
3381 "QDIO data connection isolation is deactivated\n");
3382 } else {
3383 dev_info(&card->gdev->dev,
3384 "QDIO data connection isolation is activated\n");
3385 }
3386 QETH_DBF_MESSAGE(3, "OK:SET_ACCESS_CTRL(%s, %d)==%d\n",
3387 card->gdev->dev.kobj.name,
3388 access_ctrl_req->subcmd_code,
3389 cmd->data.setadapterparms.hdr.return_code);
3390 rc = 0;
3391 break;
3392 }
3393 case SET_ACCESS_CTRL_RC_NOT_SUPPORTED:
3394 {
3395 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%s,%d)==%d\n",
3396 card->gdev->dev.kobj.name,
3397 access_ctrl_req->subcmd_code,
3398 cmd->data.setadapterparms.hdr.return_code);
3399 dev_err(&card->gdev->dev, "Adapter does not "
3400 "support QDIO data connection isolation\n");
3401
3402 /* ensure isolation mode is "none" */
3403 card->options.isolation = ISOLATION_MODE_NONE;
3404 rc = -EOPNOTSUPP;
3405 break;
3406 }
3407 case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER:
3408 {
3409 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_MODE(%s,%d)==%d\n",
3410 card->gdev->dev.kobj.name,
3411 access_ctrl_req->subcmd_code,
3412 cmd->data.setadapterparms.hdr.return_code);
3413 dev_err(&card->gdev->dev,
3414 "Adapter is dedicated. "
3415 "QDIO data connection isolation not supported\n");
3416
3417 /* ensure isolation mode is "none" */
3418 card->options.isolation = ISOLATION_MODE_NONE;
3419 rc = -EOPNOTSUPP;
3420 break;
3421 }
3422 case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF:
3423 {
3424 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_MODE(%s,%d)==%d\n",
3425 card->gdev->dev.kobj.name,
3426 access_ctrl_req->subcmd_code,
3427 cmd->data.setadapterparms.hdr.return_code);
3428 dev_err(&card->gdev->dev,
3429 "TSO does not permit QDIO data connection isolation\n");
3430
3431 /* ensure isolation mode is "none" */
3432 card->options.isolation = ISOLATION_MODE_NONE;
3433 rc = -EPERM;
3434 break;
3435 }
3436 default:
3437 {
3438 /* this should never happen */
3439 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_MODE(%s,%d)==%d"
3440 "==UNKNOWN\n",
3441 card->gdev->dev.kobj.name,
3442 access_ctrl_req->subcmd_code,
3443 cmd->data.setadapterparms.hdr.return_code);
3444
3445 /* ensure isolation mode is "none" */
3446 card->options.isolation = ISOLATION_MODE_NONE;
3447 rc = 0;
3448 break;
3449 }
3450 }
3451 qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
3452 return rc;
3453}
3454
3455static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
3456 enum qeth_ipa_isolation_modes isolation)
3457{
3458 int rc;
3459 struct qeth_cmd_buffer *iob;
3460 struct qeth_ipa_cmd *cmd;
3461 struct qeth_set_access_ctrl *access_ctrl_req;
3462
3463 QETH_DBF_TEXT(TRACE, 4, "setacctl");
3464
3465 QETH_DBF_TEXT_(SETUP, 2, "setacctl");
3466 QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
3467
3468 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
3469 sizeof(struct qeth_ipacmd_setadpparms_hdr) +
3470 sizeof(struct qeth_set_access_ctrl));
3471 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
3472 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
3473 access_ctrl_req->subcmd_code = isolation;
3474
3475 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb,
3476 NULL);
3477 QETH_DBF_TEXT_(SETUP, 2, "rc=%d", rc);
3478 return rc;
3479}
3480
3481int qeth_set_access_ctrl_online(struct qeth_card *card)
3482{
3483 int rc = 0;
3484
3485 QETH_DBF_TEXT(TRACE, 4, "setactlo");
3486
3487 if (card->info.type == QETH_CARD_TYPE_OSAE &&
3488 qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
3489 rc = qeth_setadpparms_set_access_ctrl(card,
3490 card->options.isolation);
3491 if (rc) {
3492 QETH_DBF_MESSAGE(3,
3493 "IPA(SET_ACCESS_CTRL,%s,%d) sent failed",
3494 card->gdev->dev.kobj.name,
3495 rc);
3496 }
3497 } else if (card->options.isolation != ISOLATION_MODE_NONE) {
3498 card->options.isolation = ISOLATION_MODE_NONE;
3499
3500 dev_err(&card->gdev->dev, "Adapter does not "
3501 "support QDIO data connection isolation\n");
3502 rc = -EOPNOTSUPP;
3503 }
3504 return rc;
3505}
3506EXPORT_SYMBOL_GPL(qeth_set_access_ctrl_online);
3507
3392void qeth_tx_timeout(struct net_device *dev) 3508void qeth_tx_timeout(struct net_device *dev)
3393{ 3509{
3394 struct qeth_card *card; 3510 struct qeth_card *card;
@@ -3732,30 +3848,36 @@ static int qeth_core_driver_group(const char *buf, struct device *root_dev,
3732int qeth_core_hardsetup_card(struct qeth_card *card) 3848int qeth_core_hardsetup_card(struct qeth_card *card)
3733{ 3849{
3734 struct qdio_ssqd_desc *ssqd; 3850 struct qdio_ssqd_desc *ssqd;
3735 int retries = 3; 3851 int retries = 0;
3736 int mpno = 0; 3852 int mpno = 0;
3737 int rc; 3853 int rc;
3738 3854
3739 QETH_DBF_TEXT(SETUP, 2, "hrdsetup"); 3855 QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
3740 atomic_set(&card->force_alloc_skb, 0); 3856 atomic_set(&card->force_alloc_skb, 0);
3741retry: 3857retry:
3742 if (retries < 3) { 3858 if (retries)
3743 QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n", 3859 QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n",
3744 dev_name(&card->gdev->dev)); 3860 dev_name(&card->gdev->dev));
3745 ccw_device_set_offline(CARD_DDEV(card)); 3861 ccw_device_set_offline(CARD_DDEV(card));
3746 ccw_device_set_offline(CARD_WDEV(card)); 3862 ccw_device_set_offline(CARD_WDEV(card));
3747 ccw_device_set_offline(CARD_RDEV(card)); 3863 ccw_device_set_offline(CARD_RDEV(card));
3748 ccw_device_set_online(CARD_RDEV(card)); 3864 rc = ccw_device_set_online(CARD_RDEV(card));
3749 ccw_device_set_online(CARD_WDEV(card)); 3865 if (rc)
3750 ccw_device_set_online(CARD_DDEV(card)); 3866 goto retriable;
3751 } 3867 rc = ccw_device_set_online(CARD_WDEV(card));
3868 if (rc)
3869 goto retriable;
3870 rc = ccw_device_set_online(CARD_DDEV(card));
3871 if (rc)
3872 goto retriable;
3752 rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD); 3873 rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
3874retriable:
3753 if (rc == -ERESTARTSYS) { 3875 if (rc == -ERESTARTSYS) {
3754 QETH_DBF_TEXT(SETUP, 2, "break1"); 3876 QETH_DBF_TEXT(SETUP, 2, "break1");
3755 return rc; 3877 return rc;
3756 } else if (rc) { 3878 } else if (rc) {
3757 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); 3879 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
3758 if (--retries < 0) 3880 if (++retries > 3)
3759 goto out; 3881 goto out;
3760 else 3882 else
3761 goto retry; 3883 goto retry;
@@ -4303,6 +4425,7 @@ static struct {
4303 {"tx do_QDIO time"}, 4425 {"tx do_QDIO time"},
4304 {"tx do_QDIO count"}, 4426 {"tx do_QDIO count"},
4305 {"tx csum"}, 4427 {"tx csum"},
4428 {"tx lin"},
4306}; 4429};
4307 4430
4308int qeth_core_get_sset_count(struct net_device *dev, int stringset) 4431int qeth_core_get_sset_count(struct net_device *dev, int stringset)
@@ -4360,6 +4483,7 @@ void qeth_core_get_ethtool_stats(struct net_device *dev,
4360 data[31] = card->perf_stats.outbound_do_qdio_time; 4483 data[31] = card->perf_stats.outbound_do_qdio_time;
4361 data[32] = card->perf_stats.outbound_do_qdio_cnt; 4484 data[32] = card->perf_stats.outbound_do_qdio_cnt;
4362 data[33] = card->perf_stats.tx_csum; 4485 data[33] = card->perf_stats.tx_csum;
4486 data[34] = card->perf_stats.tx_lin;
4363} 4487}
4364EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats); 4488EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats);
4365 4489
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index eecb2ee62e8..52c03438dbe 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -234,18 +234,19 @@ enum qeth_ipa_setdelip_flags {
234 234
235/* SETADAPTER IPA Command: ****************************************************/ 235/* SETADAPTER IPA Command: ****************************************************/
236enum qeth_ipa_setadp_cmd { 236enum qeth_ipa_setadp_cmd {
237 IPA_SETADP_QUERY_COMMANDS_SUPPORTED = 0x0001, 237 IPA_SETADP_QUERY_COMMANDS_SUPPORTED = 0x00000001L,
238 IPA_SETADP_ALTER_MAC_ADDRESS = 0x0002, 238 IPA_SETADP_ALTER_MAC_ADDRESS = 0x00000002L,
239 IPA_SETADP_ADD_DELETE_GROUP_ADDRESS = 0x0004, 239 IPA_SETADP_ADD_DELETE_GROUP_ADDRESS = 0x00000004L,
240 IPA_SETADP_ADD_DELETE_FUNCTIONAL_ADDR = 0x0008, 240 IPA_SETADP_ADD_DELETE_FUNCTIONAL_ADDR = 0x00000008L,
241 IPA_SETADP_SET_ADDRESSING_MODE = 0x0010, 241 IPA_SETADP_SET_ADDRESSING_MODE = 0x00000010L,
242 IPA_SETADP_SET_CONFIG_PARMS = 0x0020, 242 IPA_SETADP_SET_CONFIG_PARMS = 0x00000020L,
243 IPA_SETADP_SET_CONFIG_PARMS_EXTENDED = 0x0040, 243 IPA_SETADP_SET_CONFIG_PARMS_EXTENDED = 0x00000040L,
244 IPA_SETADP_SET_BROADCAST_MODE = 0x0080, 244 IPA_SETADP_SET_BROADCAST_MODE = 0x00000080L,
245 IPA_SETADP_SEND_OSA_MESSAGE = 0x0100, 245 IPA_SETADP_SEND_OSA_MESSAGE = 0x00000100L,
246 IPA_SETADP_SET_SNMP_CONTROL = 0x0200, 246 IPA_SETADP_SET_SNMP_CONTROL = 0x00000200L,
247 IPA_SETADP_QUERY_CARD_INFO = 0x0400, 247 IPA_SETADP_QUERY_CARD_INFO = 0x00000400L,
248 IPA_SETADP_SET_PROMISC_MODE = 0x0800, 248 IPA_SETADP_SET_PROMISC_MODE = 0x00000800L,
249 IPA_SETADP_SET_ACCESS_CONTROL = 0x00010000L,
249}; 250};
250enum qeth_ipa_mac_ops { 251enum qeth_ipa_mac_ops {
251 CHANGE_ADDR_READ_MAC = 0, 252 CHANGE_ADDR_READ_MAC = 0,
@@ -264,6 +265,20 @@ enum qeth_ipa_promisc_modes {
264 SET_PROMISC_MODE_OFF = 0, 265 SET_PROMISC_MODE_OFF = 0,
265 SET_PROMISC_MODE_ON = 1, 266 SET_PROMISC_MODE_ON = 1,
266}; 267};
268enum qeth_ipa_isolation_modes {
269 ISOLATION_MODE_NONE = 0x00000000L,
270 ISOLATION_MODE_FWD = 0x00000001L,
271 ISOLATION_MODE_DROP = 0x00000002L,
272};
273enum qeth_ipa_set_access_mode_rc {
274 SET_ACCESS_CTRL_RC_SUCCESS = 0x0000,
275 SET_ACCESS_CTRL_RC_NOT_SUPPORTED = 0x0004,
276 SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED = 0x0008,
277 SET_ACCESS_CTRL_RC_ALREADY_ISOLATED = 0x0010,
278 SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER = 0x0014,
279 SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF = 0x0018,
280};
281
267 282
268/* (SET)DELIP(M) IPA stuff ***************************************************/ 283/* (SET)DELIP(M) IPA stuff ***************************************************/
269struct qeth_ipacmd_setdelip4 { 284struct qeth_ipacmd_setdelip4 {
@@ -376,6 +391,11 @@ struct qeth_snmp_ureq {
376 struct qeth_snmp_cmd cmd; 391 struct qeth_snmp_cmd cmd;
377} __attribute__((packed)); 392} __attribute__((packed));
378 393
394/* SET_ACCESS_CONTROL: same format for request and reply */
395struct qeth_set_access_ctrl {
396 __u32 subcmd_code;
397} __attribute__((packed));
398
379struct qeth_ipacmd_setadpparms_hdr { 399struct qeth_ipacmd_setadpparms_hdr {
380 __u32 supp_hw_cmds; 400 __u32 supp_hw_cmds;
381 __u32 reserved1; 401 __u32 reserved1;
@@ -394,6 +414,7 @@ struct qeth_ipacmd_setadpparms {
394 struct qeth_query_cmds_supp query_cmds_supp; 414 struct qeth_query_cmds_supp query_cmds_supp;
395 struct qeth_change_addr change_addr; 415 struct qeth_change_addr change_addr;
396 struct qeth_snmp_cmd snmp; 416 struct qeth_snmp_cmd snmp;
417 struct qeth_set_access_ctrl set_access_ctrl;
397 __u32 mode; 418 __u32 mode;
398 } data; 419 } data;
399} __attribute__ ((packed)); 420} __attribute__ ((packed));
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 33505c2a0e3..9ff2b36fdc4 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -416,7 +416,11 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
416static DEVICE_ATTR(layer2, 0644, qeth_dev_layer2_show, 416static DEVICE_ATTR(layer2, 0644, qeth_dev_layer2_show,
417 qeth_dev_layer2_store); 417 qeth_dev_layer2_store);
418 418
419static ssize_t qeth_dev_large_send_show(struct device *dev, 419#define ATTR_QETH_ISOLATION_NONE ("none")
420#define ATTR_QETH_ISOLATION_FWD ("forward")
421#define ATTR_QETH_ISOLATION_DROP ("drop")
422
423static ssize_t qeth_dev_isolation_show(struct device *dev,
420 struct device_attribute *attr, char *buf) 424 struct device_attribute *attr, char *buf)
421{ 425{
422 struct qeth_card *card = dev_get_drvdata(dev); 426 struct qeth_card *card = dev_get_drvdata(dev);
@@ -424,44 +428,69 @@ static ssize_t qeth_dev_large_send_show(struct device *dev,
424 if (!card) 428 if (!card)
425 return -EINVAL; 429 return -EINVAL;
426 430
427 switch (card->options.large_send) { 431 switch (card->options.isolation) {
428 case QETH_LARGE_SEND_NO: 432 case ISOLATION_MODE_NONE:
429 return sprintf(buf, "%s\n", "no"); 433 return snprintf(buf, 6, "%s\n", ATTR_QETH_ISOLATION_NONE);
430 case QETH_LARGE_SEND_TSO: 434 case ISOLATION_MODE_FWD:
431 return sprintf(buf, "%s\n", "TSO"); 435 return snprintf(buf, 9, "%s\n", ATTR_QETH_ISOLATION_FWD);
436 case ISOLATION_MODE_DROP:
437 return snprintf(buf, 6, "%s\n", ATTR_QETH_ISOLATION_DROP);
432 default: 438 default:
433 return sprintf(buf, "%s\n", "N/A"); 439 return snprintf(buf, 5, "%s\n", "N/A");
434 } 440 }
435} 441}
436 442
437static ssize_t qeth_dev_large_send_store(struct device *dev, 443static ssize_t qeth_dev_isolation_store(struct device *dev,
438 struct device_attribute *attr, const char *buf, size_t count) 444 struct device_attribute *attr, const char *buf, size_t count)
439{ 445{
440 struct qeth_card *card = dev_get_drvdata(dev); 446 struct qeth_card *card = dev_get_drvdata(dev);
441 enum qeth_large_send_types type; 447 enum qeth_ipa_isolation_modes isolation;
442 int rc = 0; 448 int rc = 0;
443 char *tmp; 449 char *tmp, *curtoken;
450 curtoken = (char *) buf;
444 451
445 if (!card) 452 if (!card) {
446 return -EINVAL; 453 rc = -EINVAL;
447 tmp = strsep((char **) &buf, "\n"); 454 goto out;
448 if (!strcmp(tmp, "no")) { 455 }
449 type = QETH_LARGE_SEND_NO; 456
450 } else if (!strcmp(tmp, "TSO")) { 457 /* check for unknown, too, in case we do not yet know who we are */
451 type = QETH_LARGE_SEND_TSO; 458 if (card->info.type != QETH_CARD_TYPE_OSAE &&
459 card->info.type != QETH_CARD_TYPE_UNKNOWN) {
460 rc = -EOPNOTSUPP;
461 dev_err(&card->gdev->dev, "Adapter does not "
462 "support QDIO data connection isolation\n");
463 goto out;
464 }
465
466 /* parse input into isolation mode */
467 tmp = strsep(&curtoken, "\n");
468 if (!strcmp(tmp, ATTR_QETH_ISOLATION_NONE)) {
469 isolation = ISOLATION_MODE_NONE;
470 } else if (!strcmp(tmp, ATTR_QETH_ISOLATION_FWD)) {
471 isolation = ISOLATION_MODE_FWD;
472 } else if (!strcmp(tmp, ATTR_QETH_ISOLATION_DROP)) {
473 isolation = ISOLATION_MODE_DROP;
452 } else { 474 } else {
453 return -EINVAL; 475 rc = -EINVAL;
476 goto out;
454 } 477 }
455 if (card->options.large_send == type) 478 rc = count;
456 return count; 479
457 rc = qeth_set_large_send(card, type); 480 /* defer IP assist if device is offline (until discipline->set_online)*/
458 if (rc) 481 card->options.isolation = isolation;
459 return rc; 482 if (card->state == CARD_STATE_SOFTSETUP ||
460 return count; 483 card->state == CARD_STATE_UP) {
484 int ipa_rc = qeth_set_access_ctrl_online(card);
485 if (ipa_rc != 0)
486 rc = ipa_rc;
487 }
488out:
489 return rc;
461} 490}
462 491
463static DEVICE_ATTR(large_send, 0644, qeth_dev_large_send_show, 492static DEVICE_ATTR(isolation, 0644, qeth_dev_isolation_show,
464 qeth_dev_large_send_store); 493 qeth_dev_isolation_store);
465 494
466static ssize_t qeth_dev_blkt_show(char *buf, struct qeth_card *card, int value) 495static ssize_t qeth_dev_blkt_show(char *buf, struct qeth_card *card, int value)
467{ 496{
@@ -582,7 +611,7 @@ static struct attribute *qeth_device_attrs[] = {
582 &dev_attr_recover.attr, 611 &dev_attr_recover.attr,
583 &dev_attr_performance_stats.attr, 612 &dev_attr_performance_stats.attr,
584 &dev_attr_layer2.attr, 613 &dev_attr_layer2.attr,
585 &dev_attr_large_send.attr, 614 &dev_attr_isolation.attr,
586 NULL, 615 NULL,
587}; 616};
588 617
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index b61d5c723c5..0b763396d5d 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -940,30 +940,17 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
940 940
941 qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1); 941 qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1);
942 recover_flag = card->state; 942 recover_flag = card->state;
943 rc = ccw_device_set_online(CARD_RDEV(card));
944 if (rc) {
945 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
946 return -EIO;
947 }
948 rc = ccw_device_set_online(CARD_WDEV(card));
949 if (rc) {
950 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
951 return -EIO;
952 }
953 rc = ccw_device_set_online(CARD_DDEV(card));
954 if (rc) {
955 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
956 return -EIO;
957 }
958
959 rc = qeth_core_hardsetup_card(card); 943 rc = qeth_core_hardsetup_card(card);
960 if (rc) { 944 if (rc) {
961 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); 945 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
946 rc = -ENODEV;
962 goto out_remove; 947 goto out_remove;
963 } 948 }
964 949
965 if (!card->dev && qeth_l2_setup_netdev(card)) 950 if (!card->dev && qeth_l2_setup_netdev(card)) {
951 rc = -ENODEV;
966 goto out_remove; 952 goto out_remove;
953 }
967 954
968 if (card->info.type != QETH_CARD_TYPE_OSN) 955 if (card->info.type != QETH_CARD_TYPE_OSN)
969 qeth_l2_send_setmac(card, &card->dev->dev_addr[0]); 956 qeth_l2_send_setmac(card, &card->dev->dev_addr[0]);
@@ -983,12 +970,14 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
983 card->lan_online = 0; 970 card->lan_online = 0;
984 return 0; 971 return 0;
985 } 972 }
973 rc = -ENODEV;
986 goto out_remove; 974 goto out_remove;
987 } else 975 } else
988 card->lan_online = 1; 976 card->lan_online = 1;
989 977
990 if (card->info.type != QETH_CARD_TYPE_OSN) { 978 if (card->info.type != QETH_CARD_TYPE_OSN) {
991 qeth_set_large_send(card, card->options.large_send); 979 /* configure isolation level */
980 qeth_set_access_ctrl_online(card);
992 qeth_l2_process_vlans(card, 0); 981 qeth_l2_process_vlans(card, 0);
993 } 982 }
994 983
@@ -997,6 +986,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
997 rc = qeth_init_qdio_queues(card); 986 rc = qeth_init_qdio_queues(card);
998 if (rc) { 987 if (rc) {
999 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); 988 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
989 rc = -ENODEV;
1000 goto out_remove; 990 goto out_remove;
1001 } 991 }
1002 card->state = CARD_STATE_SOFTSETUP; 992 card->state = CARD_STATE_SOFTSETUP;
@@ -1018,6 +1008,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
1018 /* let user_space know that device is online */ 1008 /* let user_space know that device is online */
1019 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); 1009 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
1020 return 0; 1010 return 0;
1011
1021out_remove: 1012out_remove:
1022 card->use_hard_stop = 1; 1013 card->use_hard_stop = 1;
1023 qeth_l2_stop_card(card, 0); 1014 qeth_l2_stop_card(card, 0);
@@ -1028,7 +1019,7 @@ out_remove:
1028 card->state = CARD_STATE_RECOVER; 1019 card->state = CARD_STATE_RECOVER;
1029 else 1020 else
1030 card->state = CARD_STATE_DOWN; 1021 card->state = CARD_STATE_DOWN;
1031 return -ENODEV; 1022 return rc;
1032} 1023}
1033 1024
1034static int qeth_l2_set_online(struct ccwgroup_device *gdev) 1025static int qeth_l2_set_online(struct ccwgroup_device *gdev)
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index 9f143c83bba..321988fa9f7 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -60,5 +60,7 @@ void qeth_l3_del_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
60int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *); 60int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *);
61void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions, 61void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions,
62 const u8 *); 62 const u8 *);
63int qeth_l3_set_large_send(struct qeth_card *, enum qeth_large_send_types);
64int qeth_l3_set_rx_csum(struct qeth_card *, enum qeth_checksum_types);
63 65
64#endif /* __QETH_L3_H__ */ 66#endif /* __QETH_L3_H__ */
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 4ca28c16ca8..fd1b6ed3721 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -41,6 +41,32 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *,
41static int __qeth_l3_set_online(struct ccwgroup_device *, int); 41static int __qeth_l3_set_online(struct ccwgroup_device *, int);
42static int __qeth_l3_set_offline(struct ccwgroup_device *, int); 42static int __qeth_l3_set_offline(struct ccwgroup_device *, int);
43 43
44int qeth_l3_set_large_send(struct qeth_card *card,
45 enum qeth_large_send_types type)
46{
47 int rc = 0;
48
49 card->options.large_send = type;
50 if (card->dev == NULL)
51 return 0;
52
53 if (card->options.large_send == QETH_LARGE_SEND_TSO) {
54 if (qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
55 card->dev->features |= NETIF_F_TSO | NETIF_F_SG |
56 NETIF_F_HW_CSUM;
57 } else {
58 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
59 NETIF_F_HW_CSUM);
60 card->options.large_send = QETH_LARGE_SEND_NO;
61 rc = -EOPNOTSUPP;
62 }
63 } else {
64 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
65 NETIF_F_HW_CSUM);
66 card->options.large_send = QETH_LARGE_SEND_NO;
67 }
68 return rc;
69}
44 70
45static int qeth_l3_isxdigit(char *buf) 71static int qeth_l3_isxdigit(char *buf)
46{ 72{
@@ -1439,6 +1465,35 @@ static int qeth_l3_send_checksum_command(struct qeth_card *card)
1439 return 0; 1465 return 0;
1440} 1466}
1441 1467
1468int qeth_l3_set_rx_csum(struct qeth_card *card,
1469 enum qeth_checksum_types csum_type)
1470{
1471 int rc = 0;
1472
1473 if (card->options.checksum_type == HW_CHECKSUMMING) {
1474 if ((csum_type != HW_CHECKSUMMING) &&
1475 (card->state != CARD_STATE_DOWN)) {
1476 rc = qeth_l3_send_simple_setassparms(card,
1477 IPA_INBOUND_CHECKSUM, IPA_CMD_ASS_STOP, 0);
1478 if (rc)
1479 return -EIO;
1480 }
1481 } else {
1482 if (csum_type == HW_CHECKSUMMING) {
1483 if (card->state != CARD_STATE_DOWN) {
1484 if (!qeth_is_supported(card,
1485 IPA_INBOUND_CHECKSUM))
1486 return -EPERM;
1487 rc = qeth_l3_send_checksum_command(card);
1488 if (rc)
1489 return -EIO;
1490 }
1491 }
1492 }
1493 card->options.checksum_type = csum_type;
1494 return rc;
1495}
1496
1442static int qeth_l3_start_ipa_checksum(struct qeth_card *card) 1497static int qeth_l3_start_ipa_checksum(struct qeth_card *card)
1443{ 1498{
1444 int rc = 0; 1499 int rc = 0;
@@ -1506,6 +1561,8 @@ static int qeth_l3_start_ipa_tso(struct qeth_card *card)
1506static int qeth_l3_start_ipassists(struct qeth_card *card) 1561static int qeth_l3_start_ipassists(struct qeth_card *card)
1507{ 1562{
1508 QETH_DBF_TEXT(TRACE, 3, "strtipas"); 1563 QETH_DBF_TEXT(TRACE, 3, "strtipas");
1564
1565 qeth_set_access_ctrl_online(card); /* go on*/
1509 qeth_l3_start_ipa_arp_processing(card); /* go on*/ 1566 qeth_l3_start_ipa_arp_processing(card); /* go on*/
1510 qeth_l3_start_ipa_ip_fragmentation(card); /* go on*/ 1567 qeth_l3_start_ipa_ip_fragmentation(card); /* go on*/
1511 qeth_l3_start_ipa_source_mac(card); /* go on*/ 1568 qeth_l3_start_ipa_source_mac(card); /* go on*/
@@ -2684,6 +2741,24 @@ static void qeth_tx_csum(struct sk_buff *skb)
2684 *(__sum16 *)(skb->data + offset) = csum_fold(csum); 2741 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
2685} 2742}
2686 2743
2744static inline int qeth_l3_tso_elements(struct sk_buff *skb)
2745{
2746 unsigned long tcpd = (unsigned long)tcp_hdr(skb) +
2747 tcp_hdr(skb)->doff * 4;
2748 int tcpd_len = skb->len - (tcpd - (unsigned long)skb->data);
2749 int elements = PFN_UP(tcpd + tcpd_len) - PFN_DOWN(tcpd);
2750 elements += skb_shinfo(skb)->nr_frags;
2751 return elements;
2752}
2753
2754static inline int qeth_l3_tso_check(struct sk_buff *skb)
2755{
2756 int len = ((unsigned long)tcp_hdr(skb) + tcp_hdr(skb)->doff * 4) -
2757 (unsigned long)skb->data;
2758 return (((unsigned long)skb->data & PAGE_MASK) !=
2759 (((unsigned long)skb->data + len) & PAGE_MASK));
2760}
2761
2687static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) 2762static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2688{ 2763{
2689 int rc; 2764 int rc;
@@ -2777,16 +2852,21 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2777 /* fix hardware limitation: as long as we do not have sbal 2852 /* fix hardware limitation: as long as we do not have sbal
2778 * chaining we can not send long frag lists 2853 * chaining we can not send long frag lists
2779 */ 2854 */
2780 if ((large_send == QETH_LARGE_SEND_TSO) && 2855 if (large_send == QETH_LARGE_SEND_TSO) {
2781 ((skb_shinfo(new_skb)->nr_frags + 2) > 16)) { 2856 if (qeth_l3_tso_elements(new_skb) + 1 > 16) {
2782 if (skb_linearize(new_skb)) 2857 if (skb_linearize(new_skb))
2783 goto tx_drop; 2858 goto tx_drop;
2859 if (card->options.performance_stats)
2860 card->perf_stats.tx_lin++;
2861 }
2784 } 2862 }
2785 2863
2786 if ((large_send == QETH_LARGE_SEND_TSO) && 2864 if ((large_send == QETH_LARGE_SEND_TSO) &&
2787 (cast_type == RTN_UNSPEC)) { 2865 (cast_type == RTN_UNSPEC)) {
2788 hdr = (struct qeth_hdr *)skb_push(new_skb, 2866 hdr = (struct qeth_hdr *)skb_push(new_skb,
2789 sizeof(struct qeth_hdr_tso)); 2867 sizeof(struct qeth_hdr_tso));
2868 if (qeth_l3_tso_check(new_skb))
2869 QETH_DBF_MESSAGE(2, "tso skb misaligned\n");
2790 memset(hdr, 0, sizeof(struct qeth_hdr_tso)); 2870 memset(hdr, 0, sizeof(struct qeth_hdr_tso));
2791 qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type); 2871 qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type);
2792 qeth_tso_fill_header(card, hdr, new_skb); 2872 qeth_tso_fill_header(card, hdr, new_skb);
@@ -2903,46 +2983,28 @@ static u32 qeth_l3_ethtool_get_rx_csum(struct net_device *dev)
2903static int qeth_l3_ethtool_set_rx_csum(struct net_device *dev, u32 data) 2983static int qeth_l3_ethtool_set_rx_csum(struct net_device *dev, u32 data)
2904{ 2984{
2905 struct qeth_card *card = dev->ml_priv; 2985 struct qeth_card *card = dev->ml_priv;
2906 enum qeth_card_states old_state;
2907 enum qeth_checksum_types csum_type; 2986 enum qeth_checksum_types csum_type;
2908 2987
2909 if ((card->state != CARD_STATE_UP) &&
2910 (card->state != CARD_STATE_DOWN))
2911 return -EPERM;
2912
2913 if (data) 2988 if (data)
2914 csum_type = HW_CHECKSUMMING; 2989 csum_type = HW_CHECKSUMMING;
2915 else 2990 else
2916 csum_type = SW_CHECKSUMMING; 2991 csum_type = SW_CHECKSUMMING;
2917 2992
2918 if (card->options.checksum_type != csum_type) { 2993 return qeth_l3_set_rx_csum(card, csum_type);
2919 old_state = card->state;
2920 if (card->state == CARD_STATE_UP)
2921 __qeth_l3_set_offline(card->gdev, 1);
2922 card->options.checksum_type = csum_type;
2923 if (old_state == CARD_STATE_UP)
2924 __qeth_l3_set_online(card->gdev, 1);
2925 }
2926 return 0;
2927} 2994}
2928 2995
2929static int qeth_l3_ethtool_set_tso(struct net_device *dev, u32 data) 2996static int qeth_l3_ethtool_set_tso(struct net_device *dev, u32 data)
2930{ 2997{
2931 struct qeth_card *card = dev->ml_priv; 2998 struct qeth_card *card = dev->ml_priv;
2999 int rc = 0;
2932 3000
2933 if (data) { 3001 if (data) {
2934 if (card->options.large_send == QETH_LARGE_SEND_NO) { 3002 rc = qeth_l3_set_large_send(card, QETH_LARGE_SEND_TSO);
2935 if (card->info.type == QETH_CARD_TYPE_IQD)
2936 return -EPERM;
2937 else
2938 card->options.large_send = QETH_LARGE_SEND_TSO;
2939 dev->features |= NETIF_F_TSO;
2940 }
2941 } else { 3003 } else {
2942 dev->features &= ~NETIF_F_TSO; 3004 dev->features &= ~NETIF_F_TSO;
2943 card->options.large_send = QETH_LARGE_SEND_NO; 3005 card->options.large_send = QETH_LARGE_SEND_NO;
2944 } 3006 }
2945 return 0; 3007 return rc;
2946} 3008}
2947 3009
2948static const struct ethtool_ops qeth_l3_ethtool_ops = { 3010static const struct ethtool_ops qeth_l3_ethtool_ops = {
@@ -3058,6 +3120,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
3058 NETIF_F_HW_VLAN_RX | 3120 NETIF_F_HW_VLAN_RX |
3059 NETIF_F_HW_VLAN_FILTER; 3121 NETIF_F_HW_VLAN_FILTER;
3060 card->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 3122 card->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
3123 card->dev->gso_max_size = 15 * PAGE_SIZE;
3061 3124
3062 SET_NETDEV_DEV(card->dev, &card->gdev->dev); 3125 SET_NETDEV_DEV(card->dev, &card->gdev->dev);
3063 return register_netdev(card->dev); 3126 return register_netdev(card->dev);
@@ -3154,32 +3217,19 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3154 qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1); 3217 qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1);
3155 3218
3156 recover_flag = card->state; 3219 recover_flag = card->state;
3157 rc = ccw_device_set_online(CARD_RDEV(card));
3158 if (rc) {
3159 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
3160 return -EIO;
3161 }
3162 rc = ccw_device_set_online(CARD_WDEV(card));
3163 if (rc) {
3164 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
3165 return -EIO;
3166 }
3167 rc = ccw_device_set_online(CARD_DDEV(card));
3168 if (rc) {
3169 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
3170 return -EIO;
3171 }
3172
3173 rc = qeth_core_hardsetup_card(card); 3220 rc = qeth_core_hardsetup_card(card);
3174 if (rc) { 3221 if (rc) {
3175 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); 3222 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
3223 rc = -ENODEV;
3176 goto out_remove; 3224 goto out_remove;
3177 } 3225 }
3178 3226
3179 qeth_l3_query_ipassists(card, QETH_PROT_IPV4); 3227 qeth_l3_query_ipassists(card, QETH_PROT_IPV4);
3180 3228
3181 if (!card->dev && qeth_l3_setup_netdev(card)) 3229 if (!card->dev && qeth_l3_setup_netdev(card)) {
3230 rc = -ENODEV;
3182 goto out_remove; 3231 goto out_remove;
3232 }
3183 3233
3184 card->state = CARD_STATE_HARDSETUP; 3234 card->state = CARD_STATE_HARDSETUP;
3185 qeth_print_status_message(card); 3235 qeth_print_status_message(card);
@@ -3196,10 +3246,11 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3196 card->lan_online = 0; 3246 card->lan_online = 0;
3197 return 0; 3247 return 0;
3198 } 3248 }
3249 rc = -ENODEV;
3199 goto out_remove; 3250 goto out_remove;
3200 } else 3251 } else
3201 card->lan_online = 1; 3252 card->lan_online = 1;
3202 qeth_set_large_send(card, card->options.large_send); 3253 qeth_l3_set_large_send(card, card->options.large_send);
3203 3254
3204 rc = qeth_l3_setadapter_parms(card); 3255 rc = qeth_l3_setadapter_parms(card);
3205 if (rc) 3256 if (rc)
@@ -3218,6 +3269,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3218 rc = qeth_init_qdio_queues(card); 3269 rc = qeth_init_qdio_queues(card);
3219 if (rc) { 3270 if (rc) {
3220 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); 3271 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
3272 rc = -ENODEV;
3221 goto out_remove; 3273 goto out_remove;
3222 } 3274 }
3223 card->state = CARD_STATE_SOFTSETUP; 3275 card->state = CARD_STATE_SOFTSETUP;
@@ -3248,7 +3300,7 @@ out_remove:
3248 card->state = CARD_STATE_RECOVER; 3300 card->state = CARD_STATE_RECOVER;
3249 else 3301 else
3250 card->state = CARD_STATE_DOWN; 3302 card->state = CARD_STATE_DOWN;
3251 return -ENODEV; 3303 return rc;
3252} 3304}
3253 3305
3254static int qeth_l3_set_online(struct ccwgroup_device *gdev) 3306static int qeth_l3_set_online(struct ccwgroup_device *gdev)
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index c144b9924d5..3360b0941aa 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -293,31 +293,79 @@ static ssize_t qeth_l3_dev_checksum_store(struct device *dev,
293 struct device_attribute *attr, const char *buf, size_t count) 293 struct device_attribute *attr, const char *buf, size_t count)
294{ 294{
295 struct qeth_card *card = dev_get_drvdata(dev); 295 struct qeth_card *card = dev_get_drvdata(dev);
296 enum qeth_checksum_types csum_type;
296 char *tmp; 297 char *tmp;
298 int rc;
297 299
298 if (!card) 300 if (!card)
299 return -EINVAL; 301 return -EINVAL;
300 302
301 if ((card->state != CARD_STATE_DOWN) &&
302 (card->state != CARD_STATE_RECOVER))
303 return -EPERM;
304
305 tmp = strsep((char **) &buf, "\n"); 303 tmp = strsep((char **) &buf, "\n");
306 if (!strcmp(tmp, "sw_checksumming")) 304 if (!strcmp(tmp, "sw_checksumming"))
307 card->options.checksum_type = SW_CHECKSUMMING; 305 csum_type = SW_CHECKSUMMING;
308 else if (!strcmp(tmp, "hw_checksumming")) 306 else if (!strcmp(tmp, "hw_checksumming"))
309 card->options.checksum_type = HW_CHECKSUMMING; 307 csum_type = HW_CHECKSUMMING;
310 else if (!strcmp(tmp, "no_checksumming")) 308 else if (!strcmp(tmp, "no_checksumming"))
311 card->options.checksum_type = NO_CHECKSUMMING; 309 csum_type = NO_CHECKSUMMING;
312 else { 310 else
313 return -EINVAL; 311 return -EINVAL;
314 } 312
313 rc = qeth_l3_set_rx_csum(card, csum_type);
314 if (rc)
315 return rc;
315 return count; 316 return count;
316} 317}
317 318
318static DEVICE_ATTR(checksumming, 0644, qeth_l3_dev_checksum_show, 319static DEVICE_ATTR(checksumming, 0644, qeth_l3_dev_checksum_show,
319 qeth_l3_dev_checksum_store); 320 qeth_l3_dev_checksum_store);
320 321
322static ssize_t qeth_l3_dev_large_send_show(struct device *dev,
323 struct device_attribute *attr, char *buf)
324{
325 struct qeth_card *card = dev_get_drvdata(dev);
326
327 if (!card)
328 return -EINVAL;
329
330 switch (card->options.large_send) {
331 case QETH_LARGE_SEND_NO:
332 return sprintf(buf, "%s\n", "no");
333 case QETH_LARGE_SEND_TSO:
334 return sprintf(buf, "%s\n", "TSO");
335 default:
336 return sprintf(buf, "%s\n", "N/A");
337 }
338}
339
340static ssize_t qeth_l3_dev_large_send_store(struct device *dev,
341 struct device_attribute *attr, const char *buf, size_t count)
342{
343 struct qeth_card *card = dev_get_drvdata(dev);
344 enum qeth_large_send_types type;
345 int rc = 0;
346 char *tmp;
347
348 if (!card)
349 return -EINVAL;
350 tmp = strsep((char **) &buf, "\n");
351 if (!strcmp(tmp, "no"))
352 type = QETH_LARGE_SEND_NO;
353 else if (!strcmp(tmp, "TSO"))
354 type = QETH_LARGE_SEND_TSO;
355 else
356 return -EINVAL;
357
358 if (card->options.large_send == type)
359 return count;
360 rc = qeth_l3_set_large_send(card, type);
361 if (rc)
362 return rc;
363 return count;
364}
365
366static DEVICE_ATTR(large_send, 0644, qeth_l3_dev_large_send_show,
367 qeth_l3_dev_large_send_store);
368
321static struct attribute *qeth_l3_device_attrs[] = { 369static struct attribute *qeth_l3_device_attrs[] = {
322 &dev_attr_route4.attr, 370 &dev_attr_route4.attr,
323 &dev_attr_route6.attr, 371 &dev_attr_route6.attr,
@@ -325,6 +373,7 @@ static struct attribute *qeth_l3_device_attrs[] = {
325 &dev_attr_broadcast_mode.attr, 373 &dev_attr_broadcast_mode.attr,
326 &dev_attr_canonical_macaddr.attr, 374 &dev_attr_canonical_macaddr.attr,
327 &dev_attr_checksumming.attr, 375 &dev_attr_checksumming.attr,
376 &dev_attr_large_send.attr,
328 NULL, 377 NULL,
329}; 378};
330 379