aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/cassini.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-09-24 13:15:13 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-09-24 13:15:13 -0400
commita319a2773a13bab56a0d0b3744ba8703324313b5 (patch)
treef02c86acabd1031439fd422a167784007e84ebb1 /drivers/net/cassini.c
parente18fa700c9a31360bc8f193aa543b7ef7b39a06b (diff)
parent183798799216fad36c7219fe8d4d6dee6b8fa755 (diff)
Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6
* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6: (217 commits) net/ieee80211: fix more crypto-related build breakage [PATCH] Spidernet: add ethtool -S (show statistics) [NET] GT96100: Delete bitrotting ethernet driver [PATCH] mv643xx_eth: restrict to 32-bit PPC_MULTIPLATFORM [PATCH] Cirrus Logic ep93xx ethernet driver r8169: the MMIO region of the 8167 stands behin BAR#1 e1000, ixgb: Remove pointless wrappers [PATCH] Remove powerpc specific parts of 3c509 driver [PATCH] s2io: Switch to pci_get_device [PATCH] gt96100: move to pci_get_device API [PATCH] ehea: bugfix for register access functions [PATCH] e1000 disable device on PCI error drivers/net/phy/fixed: #if 0 some incomplete code drivers/net: const-ify ethtool_ops declarations [PATCH] ethtool: allow const ethtool_ops [PATCH] sky2: big endian [PATCH] sky2: fiber support [PATCH] sky2: tx pause bug fix drivers/net: Trim trailing whitespace [PATCH] ehea: IBM eHEA Ethernet Device Driver ... Manually resolved conflicts in drivers/net/ixgb/ixgb_main.c and drivers/net/sky2.c related to CHECKSUM_HW/CHECKSUM_PARTIAL changes by commit 84fa7933a33f806bbbaae6775e87459b1ec584c0 that just happened to be next to unrelated changes in this update.
Diffstat (limited to 'drivers/net/cassini.c')
-rw-r--r--drivers/net/cassini.c540
1 files changed, 270 insertions, 270 deletions
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 558fdb8ad2dc..7694365092f8 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -43,7 +43,7 @@
43 * -- on page reclamation, the driver swaps the page with a spare page. 43 * -- on page reclamation, the driver swaps the page with a spare page.
44 * if that page is still in use, it frees its reference to that page, 44 * if that page is still in use, it frees its reference to that page,
45 * and allocates a new page for use. otherwise, it just recycles the 45 * and allocates a new page for use. otherwise, it just recycles the
46 * the page. 46 * the page.
47 * 47 *
48 * NOTE: cassini can parse the header. however, it's not worth it 48 * NOTE: cassini can parse the header. however, it's not worth it
49 * as long as the network stack requires a header copy. 49 * as long as the network stack requires a header copy.
@@ -60,10 +60,10 @@
60 * interrupts, but the INT# assignment needs to be set up properly by 60 * interrupts, but the INT# assignment needs to be set up properly by
61 * the BIOS and conveyed to the driver. PCI BIOSes don't know how to do 61 * the BIOS and conveyed to the driver. PCI BIOSes don't know how to do
62 * that. also, the two descriptor rings are designed to distinguish between 62 * that. also, the two descriptor rings are designed to distinguish between
63 * encrypted and non-encrypted packets, but we use them for buffering 63 * encrypted and non-encrypted packets, but we use them for buffering
64 * instead. 64 * instead.
65 * 65 *
66 * by default, the selective clear mask is set up to process rx packets. 66 * by default, the selective clear mask is set up to process rx packets.
67 */ 67 */
68 68
69 69
@@ -112,7 +112,7 @@
112#endif 112#endif
113 113
114/* select which firmware to use */ 114/* select which firmware to use */
115#define USE_HP_WORKAROUND 115#define USE_HP_WORKAROUND
116#define HP_WORKAROUND_DEFAULT /* select which firmware to use as default */ 116#define HP_WORKAROUND_DEFAULT /* select which firmware to use as default */
117#define CAS_HP_ALT_FIRMWARE cas_prog_null /* alternate firmware */ 117#define CAS_HP_ALT_FIRMWARE cas_prog_null /* alternate firmware */
118 118
@@ -168,7 +168,7 @@
168#define STOP_TRIES_PHY 1000 168#define STOP_TRIES_PHY 1000
169#define STOP_TRIES 5000 169#define STOP_TRIES 5000
170 170
171/* specify a minimum frame size to deal with some fifo issues 171/* specify a minimum frame size to deal with some fifo issues
172 * max mtu == 2 * page size - ethernet header - 64 - swivel = 172 * max mtu == 2 * page size - ethernet header - 64 - swivel =
173 * 2 * page_size - 0x50 173 * 2 * page_size - 0x50
174 */ 174 */
@@ -207,7 +207,7 @@ MODULE_PARM_DESC(link_mode, "default link mode");
207 * being confused and never showing a link status of "up." 207 * being confused and never showing a link status of "up."
208 */ 208 */
209#define DEFAULT_LINKDOWN_TIMEOUT 5 209#define DEFAULT_LINKDOWN_TIMEOUT 5
210/* 210/*
211 * Value in seconds, for user input. 211 * Value in seconds, for user input.
212 */ 212 */
213static int linkdown_timeout = DEFAULT_LINKDOWN_TIMEOUT; 213static int linkdown_timeout = DEFAULT_LINKDOWN_TIMEOUT;
@@ -249,7 +249,7 @@ static inline void cas_lock_tx(struct cas *cp)
249{ 249{
250 int i; 250 int i;
251 251
252 for (i = 0; i < N_TX_RINGS; i++) 252 for (i = 0; i < N_TX_RINGS; i++)
253 spin_lock(&cp->tx_lock[i]); 253 spin_lock(&cp->tx_lock[i]);
254} 254}
255 255
@@ -278,8 +278,8 @@ static inline void cas_unlock_tx(struct cas *cp)
278{ 278{
279 int i; 279 int i;
280 280
281 for (i = N_TX_RINGS; i > 0; i--) 281 for (i = N_TX_RINGS; i > 0; i--)
282 spin_unlock(&cp->tx_lock[i - 1]); 282 spin_unlock(&cp->tx_lock[i - 1]);
283} 283}
284 284
285static inline void cas_unlock_all(struct cas *cp) 285static inline void cas_unlock_all(struct cas *cp)
@@ -316,7 +316,7 @@ static void cas_disable_irq(struct cas *cp, const int ring)
316#ifdef USE_PCI_INTD 316#ifdef USE_PCI_INTD
317 case 3: 317 case 3:
318#endif 318#endif
319 writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN, 319 writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN,
320 cp->regs + REG_PLUS_INTRN_MASK(ring)); 320 cp->regs + REG_PLUS_INTRN_MASK(ring));
321 break; 321 break;
322#endif 322#endif
@@ -415,7 +415,7 @@ static inline void cas_entropy_reset(struct cas *cp)
415 if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0) 415 if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
416 return; 416 return;
417 417
418 writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT, 418 writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT,
419 cp->regs + REG_BIM_LOCAL_DEV_EN); 419 cp->regs + REG_BIM_LOCAL_DEV_EN);
420 writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET); 420 writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET);
421 writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG); 421 writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG);
@@ -426,7 +426,7 @@ static inline void cas_entropy_reset(struct cas *cp)
426#endif 426#endif
427} 427}
428 428
429/* access to the phy. the following assumes that we've initialized the MIF to 429/* access to the phy. the following assumes that we've initialized the MIF to
430 * be in frame rather than bit-bang mode 430 * be in frame rather than bit-bang mode
431 */ 431 */
432static u16 cas_phy_read(struct cas *cp, int reg) 432static u16 cas_phy_read(struct cas *cp, int reg)
@@ -439,7 +439,7 @@ static u16 cas_phy_read(struct cas *cp, int reg)
439 cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg); 439 cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
440 cmd |= MIF_FRAME_TURN_AROUND_MSB; 440 cmd |= MIF_FRAME_TURN_AROUND_MSB;
441 writel(cmd, cp->regs + REG_MIF_FRAME); 441 writel(cmd, cp->regs + REG_MIF_FRAME);
442 442
443 /* poll for completion */ 443 /* poll for completion */
444 while (limit-- > 0) { 444 while (limit-- > 0) {
445 udelay(10); 445 udelay(10);
@@ -461,7 +461,7 @@ static int cas_phy_write(struct cas *cp, int reg, u16 val)
461 cmd |= MIF_FRAME_TURN_AROUND_MSB; 461 cmd |= MIF_FRAME_TURN_AROUND_MSB;
462 cmd |= val & MIF_FRAME_DATA_MASK; 462 cmd |= val & MIF_FRAME_DATA_MASK;
463 writel(cmd, cp->regs + REG_MIF_FRAME); 463 writel(cmd, cp->regs + REG_MIF_FRAME);
464 464
465 /* poll for completion */ 465 /* poll for completion */
466 while (limit-- > 0) { 466 while (limit-- > 0) {
467 udelay(10); 467 udelay(10);
@@ -474,7 +474,7 @@ static int cas_phy_write(struct cas *cp, int reg, u16 val)
474 474
475static void cas_phy_powerup(struct cas *cp) 475static void cas_phy_powerup(struct cas *cp)
476{ 476{
477 u16 ctl = cas_phy_read(cp, MII_BMCR); 477 u16 ctl = cas_phy_read(cp, MII_BMCR);
478 478
479 if ((ctl & BMCR_PDOWN) == 0) 479 if ((ctl & BMCR_PDOWN) == 0)
480 return; 480 return;
@@ -484,7 +484,7 @@ static void cas_phy_powerup(struct cas *cp)
484 484
485static void cas_phy_powerdown(struct cas *cp) 485static void cas_phy_powerdown(struct cas *cp)
486{ 486{
487 u16 ctl = cas_phy_read(cp, MII_BMCR); 487 u16 ctl = cas_phy_read(cp, MII_BMCR);
488 488
489 if (ctl & BMCR_PDOWN) 489 if (ctl & BMCR_PDOWN)
490 return; 490 return;
@@ -495,7 +495,7 @@ static void cas_phy_powerdown(struct cas *cp)
495/* cp->lock held. note: the last put_page will free the buffer */ 495/* cp->lock held. note: the last put_page will free the buffer */
496static int cas_page_free(struct cas *cp, cas_page_t *page) 496static int cas_page_free(struct cas *cp, cas_page_t *page)
497{ 497{
498 pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size, 498 pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size,
499 PCI_DMA_FROMDEVICE); 499 PCI_DMA_FROMDEVICE);
500 cas_buffer_dec(page); 500 cas_buffer_dec(page);
501 __free_pages(page->buffer, cp->page_order); 501 __free_pages(page->buffer, cp->page_order);
@@ -507,7 +507,7 @@ static int cas_page_free(struct cas *cp, cas_page_t *page)
507#define RX_USED_ADD(x, y) ((x)->used += (y)) 507#define RX_USED_ADD(x, y) ((x)->used += (y))
508#define RX_USED_SET(x, y) ((x)->used = (y)) 508#define RX_USED_SET(x, y) ((x)->used = (y))
509#else 509#else
510#define RX_USED_ADD(x, y) 510#define RX_USED_ADD(x, y)
511#define RX_USED_SET(x, y) 511#define RX_USED_SET(x, y)
512#endif 512#endif
513 513
@@ -602,7 +602,7 @@ static void cas_spare_recover(struct cas *cp, const gfp_t flags)
602 list_splice(&cp->rx_inuse_list, &list); 602 list_splice(&cp->rx_inuse_list, &list);
603 INIT_LIST_HEAD(&cp->rx_inuse_list); 603 INIT_LIST_HEAD(&cp->rx_inuse_list);
604 spin_unlock(&cp->rx_inuse_lock); 604 spin_unlock(&cp->rx_inuse_lock);
605 605
606 list_for_each_safe(elem, tmp, &list) { 606 list_for_each_safe(elem, tmp, &list) {
607 cas_page_t *page = list_entry(elem, cas_page_t, list); 607 cas_page_t *page = list_entry(elem, cas_page_t, list);
608 608
@@ -627,7 +627,7 @@ static void cas_spare_recover(struct cas *cp, const gfp_t flags)
627 list_splice(&list, &cp->rx_inuse_list); 627 list_splice(&list, &cp->rx_inuse_list);
628 spin_unlock(&cp->rx_inuse_lock); 628 spin_unlock(&cp->rx_inuse_lock);
629 } 629 }
630 630
631 spin_lock(&cp->rx_spare_lock); 631 spin_lock(&cp->rx_spare_lock);
632 needed = cp->rx_spares_needed; 632 needed = cp->rx_spares_needed;
633 spin_unlock(&cp->rx_spare_lock); 633 spin_unlock(&cp->rx_spare_lock);
@@ -639,7 +639,7 @@ static void cas_spare_recover(struct cas *cp, const gfp_t flags)
639 i = 0; 639 i = 0;
640 while (i < needed) { 640 while (i < needed) {
641 cas_page_t *spare = cas_page_alloc(cp, flags); 641 cas_page_t *spare = cas_page_alloc(cp, flags);
642 if (!spare) 642 if (!spare)
643 break; 643 break;
644 list_add(&spare->list, &list); 644 list_add(&spare->list, &list);
645 i++; 645 i++;
@@ -695,12 +695,12 @@ static cas_page_t *cas_page_dequeue(struct cas *cp)
695static void cas_mif_poll(struct cas *cp, const int enable) 695static void cas_mif_poll(struct cas *cp, const int enable)
696{ 696{
697 u32 cfg; 697 u32 cfg;
698 698
699 cfg = readl(cp->regs + REG_MIF_CFG); 699 cfg = readl(cp->regs + REG_MIF_CFG);
700 cfg &= (MIF_CFG_MDIO_0 | MIF_CFG_MDIO_1); 700 cfg &= (MIF_CFG_MDIO_0 | MIF_CFG_MDIO_1);
701 701
702 if (cp->phy_type & CAS_PHY_MII_MDIO1) 702 if (cp->phy_type & CAS_PHY_MII_MDIO1)
703 cfg |= MIF_CFG_PHY_SELECT; 703 cfg |= MIF_CFG_PHY_SELECT;
704 704
705 /* poll and interrupt on link status change. */ 705 /* poll and interrupt on link status change. */
706 if (enable) { 706 if (enable) {
@@ -708,8 +708,8 @@ static void cas_mif_poll(struct cas *cp, const int enable)
708 cfg |= CAS_BASE(MIF_CFG_POLL_REG, MII_BMSR); 708 cfg |= CAS_BASE(MIF_CFG_POLL_REG, MII_BMSR);
709 cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr); 709 cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr);
710 } 710 }
711 writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF, 711 writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF,
712 cp->regs + REG_MIF_MASK); 712 cp->regs + REG_MIF_MASK);
713 writel(cfg, cp->regs + REG_MIF_CFG); 713 writel(cfg, cp->regs + REG_MIF_CFG);
714} 714}
715 715
@@ -759,7 +759,7 @@ start_aneg:
759 /* 759 /*
760 * WTZ: If the old state was link_up, we turn off the carrier 760 * WTZ: If the old state was link_up, we turn off the carrier
761 * to replicate everything we do elsewhere on a link-down 761 * to replicate everything we do elsewhere on a link-down
762 * event when we were already in a link-up state.. 762 * event when we were already in a link-up state..
763 */ 763 */
764 if (oldstate == link_up) 764 if (oldstate == link_up)
765 netif_carrier_off(cp->dev); 765 netif_carrier_off(cp->dev);
@@ -767,7 +767,7 @@ start_aneg:
767 /* 767 /*
768 * WTZ: This branch will simply schedule a full reset after 768 * WTZ: This branch will simply schedule a full reset after
769 * we explicitly changed link modes in an ioctl. See if this 769 * we explicitly changed link modes in an ioctl. See if this
770 * fixes the link-problems we were having for forced mode. 770 * fixes the link-problems we were having for forced mode.
771 */ 771 */
772 atomic_inc(&cp->reset_task_pending); 772 atomic_inc(&cp->reset_task_pending);
773 atomic_inc(&cp->reset_task_pending_all); 773 atomic_inc(&cp->reset_task_pending_all);
@@ -795,7 +795,7 @@ start_aneg:
795 } else { 795 } else {
796 cas_mif_poll(cp, 0); 796 cas_mif_poll(cp, 0);
797 ctl = cas_phy_read(cp, MII_BMCR); 797 ctl = cas_phy_read(cp, MII_BMCR);
798 ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | 798 ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 |
799 CAS_BMCR_SPEED1000 | BMCR_ANENABLE); 799 CAS_BMCR_SPEED1000 | BMCR_ANENABLE);
800 ctl |= cp->link_cntl; 800 ctl |= cp->link_cntl;
801 if (ctl & BMCR_ANENABLE) { 801 if (ctl & BMCR_ANENABLE) {
@@ -818,7 +818,7 @@ static int cas_reset_mii_phy(struct cas *cp)
818{ 818{
819 int limit = STOP_TRIES_PHY; 819 int limit = STOP_TRIES_PHY;
820 u16 val; 820 u16 val;
821 821
822 cas_phy_write(cp, MII_BMCR, BMCR_RESET); 822 cas_phy_write(cp, MII_BMCR, BMCR_RESET);
823 udelay(100); 823 udelay(100);
824 while (limit--) { 824 while (limit--) {
@@ -901,17 +901,17 @@ static void cas_phy_init(struct cas *cp)
901 val = cas_phy_read(cp, BROADCOM_MII_REG4); 901 val = cas_phy_read(cp, BROADCOM_MII_REG4);
902 if (val & 0x0080) { 902 if (val & 0x0080) {
903 /* link workaround */ 903 /* link workaround */
904 cas_phy_write(cp, BROADCOM_MII_REG4, 904 cas_phy_write(cp, BROADCOM_MII_REG4,
905 val & ~0x0080); 905 val & ~0x0080);
906 } 906 }
907 907
908 } else if (cp->cas_flags & CAS_FLAG_SATURN) { 908 } else if (cp->cas_flags & CAS_FLAG_SATURN) {
909 writel((cp->phy_type & CAS_PHY_MII_MDIO0) ? 909 writel((cp->phy_type & CAS_PHY_MII_MDIO0) ?
910 SATURN_PCFG_FSI : 0x0, 910 SATURN_PCFG_FSI : 0x0,
911 cp->regs + REG_SATURN_PCFG); 911 cp->regs + REG_SATURN_PCFG);
912 912
913 /* load firmware to address 10Mbps auto-negotiation 913 /* load firmware to address 10Mbps auto-negotiation
914 * issue. NOTE: this will need to be changed if the 914 * issue. NOTE: this will need to be changed if the
915 * default firmware gets fixed. 915 * default firmware gets fixed.
916 */ 916 */
917 if (PHY_NS_DP83065 == cp->phy_id) { 917 if (PHY_NS_DP83065 == cp->phy_id) {
@@ -930,9 +930,9 @@ static void cas_phy_init(struct cas *cp)
930 cas_phy_read(cp, MII_ADVERTISE) | 930 cas_phy_read(cp, MII_ADVERTISE) |
931 (ADVERTISE_10HALF | ADVERTISE_10FULL | 931 (ADVERTISE_10HALF | ADVERTISE_10FULL |
932 ADVERTISE_100HALF | ADVERTISE_100FULL | 932 ADVERTISE_100HALF | ADVERTISE_100FULL |
933 CAS_ADVERTISE_PAUSE | 933 CAS_ADVERTISE_PAUSE |
934 CAS_ADVERTISE_ASYM_PAUSE)); 934 CAS_ADVERTISE_ASYM_PAUSE));
935 935
936 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { 936 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
937 /* make sure that we don't advertise half 937 /* make sure that we don't advertise half
938 * duplex to avoid a chip issue 938 * duplex to avoid a chip issue
@@ -963,7 +963,7 @@ static void cas_phy_init(struct cas *cp)
963 limit = STOP_TRIES; 963 limit = STOP_TRIES;
964 while (limit-- > 0) { 964 while (limit-- > 0) {
965 udelay(10); 965 udelay(10);
966 if ((readl(cp->regs + REG_PCS_MII_CTRL) & 966 if ((readl(cp->regs + REG_PCS_MII_CTRL) &
967 PCS_MII_RESET) == 0) 967 PCS_MII_RESET) == 0)
968 break; 968 break;
969 } 969 }
@@ -980,7 +980,7 @@ static void cas_phy_init(struct cas *cp)
980 /* Advertise all capabilities except half-duplex. */ 980 /* Advertise all capabilities except half-duplex. */
981 val = readl(cp->regs + REG_PCS_MII_ADVERT); 981 val = readl(cp->regs + REG_PCS_MII_ADVERT);
982 val &= ~PCS_MII_ADVERT_HD; 982 val &= ~PCS_MII_ADVERT_HD;
983 val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE | 983 val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE |
984 PCS_MII_ADVERT_ASYM_PAUSE); 984 PCS_MII_ADVERT_ASYM_PAUSE);
985 writel(val, cp->regs + REG_PCS_MII_ADVERT); 985 writel(val, cp->regs + REG_PCS_MII_ADVERT);
986 986
@@ -1014,7 +1014,7 @@ static int cas_pcs_link_check(struct cas *cp)
1014 PCS_MII_STATUS_REMOTE_FAULT)) == 1014 PCS_MII_STATUS_REMOTE_FAULT)) ==
1015 (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT)) { 1015 (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT)) {
1016 if (netif_msg_link(cp)) 1016 if (netif_msg_link(cp))
1017 printk(KERN_INFO "%s: PCS RemoteFault\n", 1017 printk(KERN_INFO "%s: PCS RemoteFault\n",
1018 cp->dev->name); 1018 cp->dev->name);
1019 } 1019 }
1020 1020
@@ -1033,7 +1033,7 @@ static int cas_pcs_link_check(struct cas *cp)
1033 if (cp->opened) { 1033 if (cp->opened) {
1034 cp->lstate = link_up; 1034 cp->lstate = link_up;
1035 cp->link_transition = LINK_TRANSITION_LINK_UP; 1035 cp->link_transition = LINK_TRANSITION_LINK_UP;
1036 1036
1037 cas_set_link_modes(cp); 1037 cas_set_link_modes(cp);
1038 netif_carrier_on(cp->dev); 1038 netif_carrier_on(cp->dev);
1039 } 1039 }
@@ -1044,8 +1044,8 @@ static int cas_pcs_link_check(struct cas *cp)
1044 cp->link_transition != LINK_TRANSITION_REQUESTED_RESET && 1044 cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
1045 !cp->link_transition_jiffies_valid) { 1045 !cp->link_transition_jiffies_valid) {
1046 /* 1046 /*
1047 * force a reset, as a workaround for the 1047 * force a reset, as a workaround for the
1048 * link-failure problem. May want to move this to a 1048 * link-failure problem. May want to move this to a
1049 * point a bit earlier in the sequence. If we had 1049 * point a bit earlier in the sequence. If we had
1050 * generated a reset a short time ago, we'll wait for 1050 * generated a reset a short time ago, we'll wait for
1051 * the link timer to check the status until a 1051 * the link timer to check the status until a
@@ -1103,17 +1103,17 @@ static int cas_pcs_link_check(struct cas *cp)
1103 return retval; 1103 return retval;
1104} 1104}
1105 1105
1106static int cas_pcs_interrupt(struct net_device *dev, 1106static int cas_pcs_interrupt(struct net_device *dev,
1107 struct cas *cp, u32 status) 1107 struct cas *cp, u32 status)
1108{ 1108{
1109 u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS); 1109 u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS);
1110 1110
1111 if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0) 1111 if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0)
1112 return 0; 1112 return 0;
1113 return cas_pcs_link_check(cp); 1113 return cas_pcs_link_check(cp);
1114} 1114}
1115 1115
1116static int cas_txmac_interrupt(struct net_device *dev, 1116static int cas_txmac_interrupt(struct net_device *dev,
1117 struct cas *cp, u32 status) 1117 struct cas *cp, u32 status)
1118{ 1118{
1119 u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS); 1119 u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS);
@@ -1168,7 +1168,7 @@ static int cas_txmac_interrupt(struct net_device *dev,
1168 return 0; 1168 return 0;
1169} 1169}
1170 1170
1171static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware) 1171static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware)
1172{ 1172{
1173 cas_hp_inst_t *inst; 1173 cas_hp_inst_t *inst;
1174 u32 val; 1174 u32 val;
@@ -1203,12 +1203,12 @@ static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware)
1203 1203
1204static void cas_init_rx_dma(struct cas *cp) 1204static void cas_init_rx_dma(struct cas *cp)
1205{ 1205{
1206 u64 desc_dma = cp->block_dvma; 1206 u64 desc_dma = cp->block_dvma;
1207 u32 val; 1207 u32 val;
1208 int i, size; 1208 int i, size;
1209 1209
1210 /* rx free descriptors */ 1210 /* rx free descriptors */
1211 val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL); 1211 val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL);
1212 val |= CAS_BASE(RX_CFG_DESC_RING, RX_DESC_RINGN_INDEX(0)); 1212 val |= CAS_BASE(RX_CFG_DESC_RING, RX_DESC_RINGN_INDEX(0));
1213 val |= CAS_BASE(RX_CFG_COMP_RING, RX_COMP_RINGN_INDEX(0)); 1213 val |= CAS_BASE(RX_CFG_COMP_RING, RX_COMP_RINGN_INDEX(0));
1214 if ((N_RX_DESC_RINGS > 1) && 1214 if ((N_RX_DESC_RINGS > 1) &&
@@ -1216,27 +1216,27 @@ static void cas_init_rx_dma(struct cas *cp)
1216 val |= CAS_BASE(RX_CFG_DESC_RING1, RX_DESC_RINGN_INDEX(1)); 1216 val |= CAS_BASE(RX_CFG_DESC_RING1, RX_DESC_RINGN_INDEX(1));
1217 writel(val, cp->regs + REG_RX_CFG); 1217 writel(val, cp->regs + REG_RX_CFG);
1218 1218
1219 val = (unsigned long) cp->init_rxds[0] - 1219 val = (unsigned long) cp->init_rxds[0] -
1220 (unsigned long) cp->init_block; 1220 (unsigned long) cp->init_block;
1221 writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI); 1221 writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI);
1222 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW); 1222 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW);
1223 writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK); 1223 writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
1224 1224
1225 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { 1225 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1226 /* rx desc 2 is for IPSEC packets. however, 1226 /* rx desc 2 is for IPSEC packets. however,
1227 * we don't it that for that purpose. 1227 * we don't it that for that purpose.
1228 */ 1228 */
1229 val = (unsigned long) cp->init_rxds[1] - 1229 val = (unsigned long) cp->init_rxds[1] -
1230 (unsigned long) cp->init_block; 1230 (unsigned long) cp->init_block;
1231 writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI); 1231 writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI);
1232 writel((desc_dma + val) & 0xffffffff, cp->regs + 1232 writel((desc_dma + val) & 0xffffffff, cp->regs +
1233 REG_PLUS_RX_DB1_LOW); 1233 REG_PLUS_RX_DB1_LOW);
1234 writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs + 1234 writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs +
1235 REG_PLUS_RX_KICK1); 1235 REG_PLUS_RX_KICK1);
1236 } 1236 }
1237 1237
1238 /* rx completion registers */ 1238 /* rx completion registers */
1239 val = (unsigned long) cp->init_rxcs[0] - 1239 val = (unsigned long) cp->init_rxcs[0] -
1240 (unsigned long) cp->init_block; 1240 (unsigned long) cp->init_block;
1241 writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI); 1241 writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI);
1242 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW); 1242 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW);
@@ -1244,11 +1244,11 @@ static void cas_init_rx_dma(struct cas *cp)
1244 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { 1244 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1245 /* rx comp 2-4 */ 1245 /* rx comp 2-4 */
1246 for (i = 1; i < MAX_RX_COMP_RINGS; i++) { 1246 for (i = 1; i < MAX_RX_COMP_RINGS; i++) {
1247 val = (unsigned long) cp->init_rxcs[i] - 1247 val = (unsigned long) cp->init_rxcs[i] -
1248 (unsigned long) cp->init_block; 1248 (unsigned long) cp->init_block;
1249 writel((desc_dma + val) >> 32, cp->regs + 1249 writel((desc_dma + val) >> 32, cp->regs +
1250 REG_PLUS_RX_CBN_HI(i)); 1250 REG_PLUS_RX_CBN_HI(i));
1251 writel((desc_dma + val) & 0xffffffff, cp->regs + 1251 writel((desc_dma + val) & 0xffffffff, cp->regs +
1252 REG_PLUS_RX_CBN_LOW(i)); 1252 REG_PLUS_RX_CBN_LOW(i));
1253 } 1253 }
1254 } 1254 }
@@ -1265,21 +1265,21 @@ static void cas_init_rx_dma(struct cas *cp)
1265 1265
1266 /* 2 is different from 3 and 4 */ 1266 /* 2 is different from 3 and 4 */
1267 if (N_RX_COMP_RINGS > 1) 1267 if (N_RX_COMP_RINGS > 1)
1268 writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1, 1268 writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1,
1269 cp->regs + REG_PLUS_ALIASN_CLEAR(1)); 1269 cp->regs + REG_PLUS_ALIASN_CLEAR(1));
1270 1270
1271 for (i = 2; i < N_RX_COMP_RINGS; i++) 1271 for (i = 2; i < N_RX_COMP_RINGS; i++)
1272 writel(INTR_RX_DONE_ALT, 1272 writel(INTR_RX_DONE_ALT,
1273 cp->regs + REG_PLUS_ALIASN_CLEAR(i)); 1273 cp->regs + REG_PLUS_ALIASN_CLEAR(i));
1274 } 1274 }
1275 1275
1276 /* set up pause thresholds */ 1276 /* set up pause thresholds */
1277 val = CAS_BASE(RX_PAUSE_THRESH_OFF, 1277 val = CAS_BASE(RX_PAUSE_THRESH_OFF,
1278 cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM); 1278 cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM);
1279 val |= CAS_BASE(RX_PAUSE_THRESH_ON, 1279 val |= CAS_BASE(RX_PAUSE_THRESH_ON,
1280 cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM); 1280 cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM);
1281 writel(val, cp->regs + REG_RX_PAUSE_THRESH); 1281 writel(val, cp->regs + REG_RX_PAUSE_THRESH);
1282 1282
1283 /* zero out dma reassembly buffers */ 1283 /* zero out dma reassembly buffers */
1284 for (i = 0; i < 64; i++) { 1284 for (i = 0; i < 64; i++) {
1285 writel(i, cp->regs + REG_RX_TABLE_ADDR); 1285 writel(i, cp->regs + REG_RX_TABLE_ADDR);
@@ -1318,7 +1318,7 @@ static void cas_init_rx_dma(struct cas *cp)
1318 * this should be tunable. 1318 * this should be tunable.
1319 */ 1319 */
1320 writel(0x0, cp->regs + REG_RX_RED); 1320 writel(0x0, cp->regs + REG_RX_RED);
1321 1321
1322 /* receive page sizes. default == 2K (0x800) */ 1322 /* receive page sizes. default == 2K (0x800) */
1323 val = 0; 1323 val = 0;
1324 if (cp->page_size == 0x1000) 1324 if (cp->page_size == 0x1000)
@@ -1327,7 +1327,7 @@ static void cas_init_rx_dma(struct cas *cp)
1327 val = 0x2; 1327 val = 0x2;
1328 else if (cp->page_size == 0x4000) 1328 else if (cp->page_size == 0x4000)
1329 val = 0x3; 1329 val = 0x3;
1330 1330
1331 /* round mtu + offset. constrain to page size. */ 1331 /* round mtu + offset. constrain to page size. */
1332 size = cp->dev->mtu + 64; 1332 size = cp->dev->mtu + 64;
1333 if (size > cp->page_size) 1333 if (size > cp->page_size)
@@ -1344,11 +1344,11 @@ static void cas_init_rx_dma(struct cas *cp)
1344 1344
1345 cp->mtu_stride = 1 << (i + 10); 1345 cp->mtu_stride = 1 << (i + 10);
1346 val = CAS_BASE(RX_PAGE_SIZE, val); 1346 val = CAS_BASE(RX_PAGE_SIZE, val);
1347 val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i); 1347 val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i);
1348 val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10)); 1348 val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10));
1349 val |= CAS_BASE(RX_PAGE_SIZE_MTU_OFF, 0x1); 1349 val |= CAS_BASE(RX_PAGE_SIZE_MTU_OFF, 0x1);
1350 writel(val, cp->regs + REG_RX_PAGE_SIZE); 1350 writel(val, cp->regs + REG_RX_PAGE_SIZE);
1351 1351
1352 /* enable the header parser if desired */ 1352 /* enable the header parser if desired */
1353 if (CAS_HP_FIRMWARE == cas_prog_null) 1353 if (CAS_HP_FIRMWARE == cas_prog_null)
1354 return; 1354 return;
@@ -1362,7 +1362,7 @@ static void cas_init_rx_dma(struct cas *cp)
1362static inline void cas_rxc_init(struct cas_rx_comp *rxc) 1362static inline void cas_rxc_init(struct cas_rx_comp *rxc)
1363{ 1363{
1364 memset(rxc, 0, sizeof(*rxc)); 1364 memset(rxc, 0, sizeof(*rxc));
1365 rxc->word4 = cpu_to_le64(RX_COMP4_ZERO); 1365 rxc->word4 = cpu_to_le64(RX_COMP4_ZERO);
1366} 1366}
1367 1367
1368/* NOTE: we use the ENC RX DESC ring for spares. the rx_page[0,1] 1368/* NOTE: we use the ENC RX DESC ring for spares. the rx_page[0,1]
@@ -1385,9 +1385,9 @@ static inline cas_page_t *cas_page_spare(struct cas *cp, const int index)
1385 } 1385 }
1386 return new; 1386 return new;
1387} 1387}
1388 1388
1389/* this needs to be changed if we actually use the ENC RX DESC ring */ 1389/* this needs to be changed if we actually use the ENC RX DESC ring */
1390static cas_page_t *cas_page_swap(struct cas *cp, const int ring, 1390static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
1391 const int index) 1391 const int index)
1392{ 1392{
1393 cas_page_t **page0 = cp->rx_pages[0]; 1393 cas_page_t **page0 = cp->rx_pages[0];
@@ -1400,7 +1400,7 @@ static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
1400 page1[index] = page0[index]; 1400 page1[index] = page0[index];
1401 page0[index] = new; 1401 page0[index] = new;
1402 } 1402 }
1403 } 1403 }
1404 RX_USED_SET(page0[index], 0); 1404 RX_USED_SET(page0[index], 0);
1405 return page0[index]; 1405 return page0[index];
1406} 1406}
@@ -1424,11 +1424,11 @@ static void cas_clean_rxds(struct cas *cp)
1424 for (i = 0; i < size; i++) { 1424 for (i = 0; i < size; i++) {
1425 cas_page_t *page = cas_page_swap(cp, 0, i); 1425 cas_page_t *page = cas_page_swap(cp, 0, i);
1426 rxd[i].buffer = cpu_to_le64(page->dma_addr); 1426 rxd[i].buffer = cpu_to_le64(page->dma_addr);
1427 rxd[i].index = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) | 1427 rxd[i].index = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) |
1428 CAS_BASE(RX_INDEX_RING, 0)); 1428 CAS_BASE(RX_INDEX_RING, 0));
1429 } 1429 }
1430 1430
1431 cp->rx_old[0] = RX_DESC_RINGN_SIZE(0) - 4; 1431 cp->rx_old[0] = RX_DESC_RINGN_SIZE(0) - 4;
1432 cp->rx_last[0] = 0; 1432 cp->rx_last[0] = 0;
1433 cp->cas_flags &= ~CAS_FLAG_RXD_POST(0); 1433 cp->cas_flags &= ~CAS_FLAG_RXD_POST(0);
1434} 1434}
@@ -1533,7 +1533,7 @@ static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp,
1533 1533
1534 /* these are all rollovers */ 1534 /* these are all rollovers */
1535 spin_lock(&cp->stat_lock[0]); 1535 spin_lock(&cp->stat_lock[0]);
1536 if (stat & MAC_RX_ALIGN_ERR) 1536 if (stat & MAC_RX_ALIGN_ERR)
1537 cp->net_stats[0].rx_frame_errors += 0x10000; 1537 cp->net_stats[0].rx_frame_errors += 0x10000;
1538 1538
1539 if (stat & MAC_RX_CRC_ERR) 1539 if (stat & MAC_RX_CRC_ERR)
@@ -1579,12 +1579,12 @@ static int cas_mac_interrupt(struct net_device *dev, struct cas *cp,
1579 return 0; 1579 return 0;
1580} 1580}
1581 1581
1582 1582
1583/* Must be invoked under cp->lock. */ 1583/* Must be invoked under cp->lock. */
1584static inline int cas_mdio_link_not_up(struct cas *cp) 1584static inline int cas_mdio_link_not_up(struct cas *cp)
1585{ 1585{
1586 u16 val; 1586 u16 val;
1587 1587
1588 switch (cp->lstate) { 1588 switch (cp->lstate) {
1589 case link_force_ret: 1589 case link_force_ret:
1590 if (netif_msg_link(cp)) 1590 if (netif_msg_link(cp))
@@ -1595,7 +1595,7 @@ static inline int cas_mdio_link_not_up(struct cas *cp)
1595 cp->lstate = link_force_ok; 1595 cp->lstate = link_force_ok;
1596 cp->link_transition = LINK_TRANSITION_LINK_CONFIG; 1596 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1597 break; 1597 break;
1598 1598
1599 case link_aneg: 1599 case link_aneg:
1600 val = cas_phy_read(cp, MII_BMCR); 1600 val = cas_phy_read(cp, MII_BMCR);
1601 1601
@@ -1604,7 +1604,7 @@ static inline int cas_mdio_link_not_up(struct cas *cp)
1604 */ 1604 */
1605 val &= ~(BMCR_ANRESTART | BMCR_ANENABLE); 1605 val &= ~(BMCR_ANRESTART | BMCR_ANENABLE);
1606 val |= BMCR_FULLDPLX; 1606 val |= BMCR_FULLDPLX;
1607 val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ? 1607 val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
1608 CAS_BMCR_SPEED1000 : BMCR_SPEED100; 1608 CAS_BMCR_SPEED1000 : BMCR_SPEED100;
1609 cas_phy_write(cp, MII_BMCR, val); 1609 cas_phy_write(cp, MII_BMCR, val);
1610 cp->timer_ticks = 5; 1610 cp->timer_ticks = 5;
@@ -1646,11 +1646,11 @@ static int cas_mii_link_check(struct cas *cp, const u16 bmsr)
1646 1646
1647 if (bmsr & BMSR_LSTATUS) { 1647 if (bmsr & BMSR_LSTATUS) {
1648 /* Ok, here we got a link. If we had it due to a forced 1648 /* Ok, here we got a link. If we had it due to a forced
1649 * fallback, and we were configured for autoneg, we 1649 * fallback, and we were configured for autoneg, we
1650 * retry a short autoneg pass. If you know your hub is 1650 * retry a short autoneg pass. If you know your hub is
1651 * broken, use ethtool ;) 1651 * broken, use ethtool ;)
1652 */ 1652 */
1653 if ((cp->lstate == link_force_try) && 1653 if ((cp->lstate == link_force_try) &&
1654 (cp->link_cntl & BMCR_ANENABLE)) { 1654 (cp->link_cntl & BMCR_ANENABLE)) {
1655 cp->lstate = link_force_ret; 1655 cp->lstate = link_force_ret;
1656 cp->link_transition = LINK_TRANSITION_LINK_CONFIG; 1656 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
@@ -1690,10 +1690,10 @@ static int cas_mii_link_check(struct cas *cp, const u16 bmsr)
1690 printk(KERN_INFO "%s: Link down\n", 1690 printk(KERN_INFO "%s: Link down\n",
1691 cp->dev->name); 1691 cp->dev->name);
1692 restart = 1; 1692 restart = 1;
1693 1693
1694 } else if (++cp->timer_ticks > 10) 1694 } else if (++cp->timer_ticks > 10)
1695 cas_mdio_link_not_up(cp); 1695 cas_mdio_link_not_up(cp);
1696 1696
1697 return restart; 1697 return restart;
1698} 1698}
1699 1699
@@ -1908,7 +1908,7 @@ static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
1908 1908
1909 skbs[entry] = NULL; 1909 skbs[entry] = NULL;
1910 cp->tx_tiny_use[ring][entry].nbufs = 0; 1910 cp->tx_tiny_use[ring][entry].nbufs = 0;
1911 1911
1912 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { 1912 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1913 struct cas_tx_desc *txd = txds + entry; 1913 struct cas_tx_desc *txd = txds + entry;
1914 1914
@@ -1923,7 +1923,7 @@ static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
1923 if (cp->tx_tiny_use[ring][entry].used) { 1923 if (cp->tx_tiny_use[ring][entry].used) {
1924 cp->tx_tiny_use[ring][entry].used = 0; 1924 cp->tx_tiny_use[ring][entry].used = 0;
1925 entry = TX_DESC_NEXT(ring, entry); 1925 entry = TX_DESC_NEXT(ring, entry);
1926 } 1926 }
1927 } 1927 }
1928 1928
1929 spin_lock(&cp->stat_lock[ring]); 1929 spin_lock(&cp->stat_lock[ring]);
@@ -1964,14 +1964,14 @@ static void cas_tx(struct net_device *dev, struct cas *cp,
1964#else 1964#else
1965 limit = readl(cp->regs + REG_TX_COMPN(ring)); 1965 limit = readl(cp->regs + REG_TX_COMPN(ring));
1966#endif 1966#endif
1967 if (cp->tx_old[ring] != limit) 1967 if (cp->tx_old[ring] != limit)
1968 cas_tx_ringN(cp, ring, limit); 1968 cas_tx_ringN(cp, ring, limit);
1969 } 1969 }
1970} 1970}
1971 1971
1972 1972
1973static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, 1973static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
1974 int entry, const u64 *words, 1974 int entry, const u64 *words,
1975 struct sk_buff **skbref) 1975 struct sk_buff **skbref)
1976{ 1976{
1977 int dlen, hlen, len, i, alloclen; 1977 int dlen, hlen, len, i, alloclen;
@@ -1979,19 +1979,19 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
1979 struct cas_page *page; 1979 struct cas_page *page;
1980 struct sk_buff *skb; 1980 struct sk_buff *skb;
1981 void *addr, *crcaddr; 1981 void *addr, *crcaddr;
1982 char *p; 1982 char *p;
1983 1983
1984 hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]); 1984 hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]);
1985 dlen = CAS_VAL(RX_COMP1_DATA_SIZE, words[0]); 1985 dlen = CAS_VAL(RX_COMP1_DATA_SIZE, words[0]);
1986 len = hlen + dlen; 1986 len = hlen + dlen;
1987 1987
1988 if (RX_COPY_ALWAYS || (words[2] & RX_COMP3_SMALL_PKT)) 1988 if (RX_COPY_ALWAYS || (words[2] & RX_COMP3_SMALL_PKT))
1989 alloclen = len; 1989 alloclen = len;
1990 else 1990 else
1991 alloclen = max(hlen, RX_COPY_MIN); 1991 alloclen = max(hlen, RX_COPY_MIN);
1992 1992
1993 skb = dev_alloc_skb(alloclen + swivel + cp->crc_size); 1993 skb = dev_alloc_skb(alloclen + swivel + cp->crc_size);
1994 if (skb == NULL) 1994 if (skb == NULL)
1995 return -1; 1995 return -1;
1996 1996
1997 *skbref = skb; 1997 *skbref = skb;
@@ -2003,7 +2003,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
2003 if (hlen) { /* always copy header pages */ 2003 if (hlen) { /* always copy header pages */
2004 i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]); 2004 i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
2005 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; 2005 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2006 off = CAS_VAL(RX_COMP2_HDR_OFF, words[1]) * 0x100 + 2006 off = CAS_VAL(RX_COMP2_HDR_OFF, words[1]) * 0x100 +
2007 swivel; 2007 swivel;
2008 2008
2009 i = hlen; 2009 i = hlen;
@@ -2019,7 +2019,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
2019 RX_USED_ADD(page, 0x100); 2019 RX_USED_ADD(page, 0x100);
2020 p += hlen; 2020 p += hlen;
2021 swivel = 0; 2021 swivel = 0;
2022 } 2022 }
2023 2023
2024 2024
2025 if (alloclen < (hlen + dlen)) { 2025 if (alloclen < (hlen + dlen)) {
@@ -2070,7 +2070,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
2070 frag->page = page->buffer; 2070 frag->page = page->buffer;
2071 frag->page_offset = off; 2071 frag->page_offset = off;
2072 frag->size = hlen - swivel; 2072 frag->size = hlen - swivel;
2073 2073
2074 /* any more data? */ 2074 /* any more data? */
2075 if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) { 2075 if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
2076 hlen = dlen; 2076 hlen = dlen;
@@ -2078,8 +2078,8 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
2078 2078
2079 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]); 2079 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2080 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; 2080 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2081 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr, 2081 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
2082 hlen + cp->crc_size, 2082 hlen + cp->crc_size,
2083 PCI_DMA_FROMDEVICE); 2083 PCI_DMA_FROMDEVICE);
2084 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr, 2084 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
2085 hlen + cp->crc_size, 2085 hlen + cp->crc_size,
@@ -2087,7 +2087,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
2087 2087
2088 skb_shinfo(skb)->nr_frags++; 2088 skb_shinfo(skb)->nr_frags++;
2089 skb->data_len += hlen; 2089 skb->data_len += hlen;
2090 skb->len += hlen; 2090 skb->len += hlen;
2091 frag++; 2091 frag++;
2092 2092
2093 get_page(page->buffer); 2093 get_page(page->buffer);
@@ -2134,14 +2134,14 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
2134 RX_USED_ADD(page, cp->mtu_stride); 2134 RX_USED_ADD(page, cp->mtu_stride);
2135 else 2135 else
2136 RX_USED_ADD(page, i); 2136 RX_USED_ADD(page, i);
2137 2137
2138 /* any more data? */ 2138 /* any more data? */
2139 if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) { 2139 if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
2140 p += hlen; 2140 p += hlen;
2141 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]); 2141 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2142 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; 2142 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2143 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr, 2143 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
2144 dlen + cp->crc_size, 2144 dlen + cp->crc_size,
2145 PCI_DMA_FROMDEVICE); 2145 PCI_DMA_FROMDEVICE);
2146 addr = cas_page_map(page->buffer); 2146 addr = cas_page_map(page->buffer);
2147 memcpy(p, addr, dlen + cp->crc_size); 2147 memcpy(p, addr, dlen + cp->crc_size);
@@ -2149,7 +2149,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
2149 dlen + cp->crc_size, 2149 dlen + cp->crc_size,
2150 PCI_DMA_FROMDEVICE); 2150 PCI_DMA_FROMDEVICE);
2151 cas_page_unmap(addr); 2151 cas_page_unmap(addr);
2152 RX_USED_ADD(page, dlen + cp->crc_size); 2152 RX_USED_ADD(page, dlen + cp->crc_size);
2153 } 2153 }
2154end_copy_pkt: 2154end_copy_pkt:
2155 if (cp->crc_size) { 2155 if (cp->crc_size) {
@@ -2174,7 +2174,7 @@ end_copy_pkt:
2174 2174
2175 2175
2176/* we can handle up to 64 rx flows at a time. we do the same thing 2176/* we can handle up to 64 rx flows at a time. we do the same thing
2177 * as nonreassm except that we batch up the buffers. 2177 * as nonreassm except that we batch up the buffers.
2178 * NOTE: we currently just treat each flow as a bunch of packets that 2178 * NOTE: we currently just treat each flow as a bunch of packets that
2179 * we pass up. a better way would be to coalesce the packets 2179 * we pass up. a better way would be to coalesce the packets
2180 * into a jumbo packet. to do that, we need to do the following: 2180 * into a jumbo packet. to do that, we need to do the following:
@@ -2184,7 +2184,7 @@ end_copy_pkt:
2184 * data length and merge the checksums. 2184 * data length and merge the checksums.
2185 * 3) on flow release, fix up the header. 2185 * 3) on flow release, fix up the header.
2186 * 4) make sure the higher layer doesn't care. 2186 * 4) make sure the higher layer doesn't care.
2187 * because packets get coalesced, we shouldn't run into fragment count 2187 * because packets get coalesced, we shouldn't run into fragment count
2188 * issues. 2188 * issues.
2189 */ 2189 */
2190static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words, 2190static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words,
@@ -2192,8 +2192,8 @@ static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words,
2192{ 2192{
2193 int flowid = CAS_VAL(RX_COMP3_FLOWID, words[2]) & (N_RX_FLOWS - 1); 2193 int flowid = CAS_VAL(RX_COMP3_FLOWID, words[2]) & (N_RX_FLOWS - 1);
2194 struct sk_buff_head *flow = &cp->rx_flows[flowid]; 2194 struct sk_buff_head *flow = &cp->rx_flows[flowid];
2195 2195
2196 /* this is protected at a higher layer, so no need to 2196 /* this is protected at a higher layer, so no need to
2197 * do any additional locking here. stick the buffer 2197 * do any additional locking here. stick the buffer
2198 * at the end. 2198 * at the end.
2199 */ 2199 */
@@ -2218,19 +2218,19 @@ static void cas_post_page(struct cas *cp, const int ring, const int index)
2218 new = cas_page_swap(cp, ring, index); 2218 new = cas_page_swap(cp, ring, index);
2219 cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr); 2219 cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr);
2220 cp->init_rxds[ring][entry].index = 2220 cp->init_rxds[ring][entry].index =
2221 cpu_to_le64(CAS_BASE(RX_INDEX_NUM, index) | 2221 cpu_to_le64(CAS_BASE(RX_INDEX_NUM, index) |
2222 CAS_BASE(RX_INDEX_RING, ring)); 2222 CAS_BASE(RX_INDEX_RING, ring));
2223 2223
2224 entry = RX_DESC_ENTRY(ring, entry + 1); 2224 entry = RX_DESC_ENTRY(ring, entry + 1);
2225 cp->rx_old[ring] = entry; 2225 cp->rx_old[ring] = entry;
2226 2226
2227 if (entry % 4) 2227 if (entry % 4)
2228 return; 2228 return;
2229 2229
2230 if (ring == 0) 2230 if (ring == 0)
2231 writel(entry, cp->regs + REG_RX_KICK); 2231 writel(entry, cp->regs + REG_RX_KICK);
2232 else if ((N_RX_DESC_RINGS > 1) && 2232 else if ((N_RX_DESC_RINGS > 1) &&
2233 (cp->cas_flags & CAS_FLAG_REG_PLUS)) 2233 (cp->cas_flags & CAS_FLAG_REG_PLUS))
2234 writel(entry, cp->regs + REG_PLUS_RX_KICK1); 2234 writel(entry, cp->regs + REG_PLUS_RX_KICK1);
2235} 2235}
2236 2236
@@ -2249,7 +2249,7 @@ static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
2249 cp->dev->name, ring, entry); 2249 cp->dev->name, ring, entry);
2250 2250
2251 cluster = -1; 2251 cluster = -1;
2252 count = entry & 0x3; 2252 count = entry & 0x3;
2253 last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4); 2253 last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4);
2254 released = 0; 2254 released = 0;
2255 while (entry != last) { 2255 while (entry != last) {
@@ -2257,12 +2257,12 @@ static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
2257 if (cas_buffer_count(page[entry]) > 1) { 2257 if (cas_buffer_count(page[entry]) > 1) {
2258 cas_page_t *new = cas_page_dequeue(cp); 2258 cas_page_t *new = cas_page_dequeue(cp);
2259 if (!new) { 2259 if (!new) {
2260 /* let the timer know that we need to 2260 /* let the timer know that we need to
2261 * do this again 2261 * do this again
2262 */ 2262 */
2263 cp->cas_flags |= CAS_FLAG_RXD_POST(ring); 2263 cp->cas_flags |= CAS_FLAG_RXD_POST(ring);
2264 if (!timer_pending(&cp->link_timer)) 2264 if (!timer_pending(&cp->link_timer))
2265 mod_timer(&cp->link_timer, jiffies + 2265 mod_timer(&cp->link_timer, jiffies +
2266 CAS_LINK_FAST_TIMEOUT); 2266 CAS_LINK_FAST_TIMEOUT);
2267 cp->rx_old[ring] = entry; 2267 cp->rx_old[ring] = entry;
2268 cp->rx_last[ring] = num ? num - released : 0; 2268 cp->rx_last[ring] = num ? num - released : 0;
@@ -2271,10 +2271,10 @@ static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
2271 spin_lock(&cp->rx_inuse_lock); 2271 spin_lock(&cp->rx_inuse_lock);
2272 list_add(&page[entry]->list, &cp->rx_inuse_list); 2272 list_add(&page[entry]->list, &cp->rx_inuse_list);
2273 spin_unlock(&cp->rx_inuse_lock); 2273 spin_unlock(&cp->rx_inuse_lock);
2274 cp->init_rxds[ring][entry].buffer = 2274 cp->init_rxds[ring][entry].buffer =
2275 cpu_to_le64(new->dma_addr); 2275 cpu_to_le64(new->dma_addr);
2276 page[entry] = new; 2276 page[entry] = new;
2277 2277
2278 } 2278 }
2279 2279
2280 if (++count == 4) { 2280 if (++count == 4) {
@@ -2286,13 +2286,13 @@ static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
2286 } 2286 }
2287 cp->rx_old[ring] = entry; 2287 cp->rx_old[ring] = entry;
2288 2288
2289 if (cluster < 0) 2289 if (cluster < 0)
2290 return 0; 2290 return 0;
2291 2291
2292 if (ring == 0) 2292 if (ring == 0)
2293 writel(cluster, cp->regs + REG_RX_KICK); 2293 writel(cluster, cp->regs + REG_RX_KICK);
2294 else if ((N_RX_DESC_RINGS > 1) && 2294 else if ((N_RX_DESC_RINGS > 1) &&
2295 (cp->cas_flags & CAS_FLAG_REG_PLUS)) 2295 (cp->cas_flags & CAS_FLAG_REG_PLUS))
2296 writel(cluster, cp->regs + REG_PLUS_RX_KICK1); 2296 writel(cluster, cp->regs + REG_PLUS_RX_KICK1);
2297 return 0; 2297 return 0;
2298} 2298}
@@ -2301,14 +2301,14 @@ static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
2301/* process a completion ring. packets are set up in three basic ways: 2301/* process a completion ring. packets are set up in three basic ways:
2302 * small packets: should be copied header + data in single buffer. 2302 * small packets: should be copied header + data in single buffer.
2303 * large packets: header and data in a single buffer. 2303 * large packets: header and data in a single buffer.
2304 * split packets: header in a separate buffer from data. 2304 * split packets: header in a separate buffer from data.
2305 * data may be in multiple pages. data may be > 256 2305 * data may be in multiple pages. data may be > 256
2306 * bytes but in a single page. 2306 * bytes but in a single page.
2307 * 2307 *
2308 * NOTE: RX page posting is done in this routine as well. while there's 2308 * NOTE: RX page posting is done in this routine as well. while there's
2309 * the capability of using multiple RX completion rings, it isn't 2309 * the capability of using multiple RX completion rings, it isn't
2310 * really worthwhile due to the fact that the page posting will 2310 * really worthwhile due to the fact that the page posting will
2311 * force serialization on the single descriptor ring. 2311 * force serialization on the single descriptor ring.
2312 */ 2312 */
2313static int cas_rx_ringN(struct cas *cp, int ring, int budget) 2313static int cas_rx_ringN(struct cas *cp, int ring, int budget)
2314{ 2314{
@@ -2319,7 +2319,7 @@ static int cas_rx_ringN(struct cas *cp, int ring, int budget)
2319 if (netif_msg_intr(cp)) 2319 if (netif_msg_intr(cp))
2320 printk(KERN_DEBUG "%s: rx[%d] interrupt, done: %d/%d\n", 2320 printk(KERN_DEBUG "%s: rx[%d] interrupt, done: %d/%d\n",
2321 cp->dev->name, ring, 2321 cp->dev->name, ring,
2322 readl(cp->regs + REG_RX_COMP_HEAD), 2322 readl(cp->regs + REG_RX_COMP_HEAD),
2323 cp->rx_new[ring]); 2323 cp->rx_new[ring]);
2324 2324
2325 entry = cp->rx_new[ring]; 2325 entry = cp->rx_new[ring];
@@ -2375,7 +2375,7 @@ static int cas_rx_ringN(struct cas *cp, int ring, int budget)
2375 */ 2375 */
2376 if (RX_DONT_BATCH || (type == 0x2)) { 2376 if (RX_DONT_BATCH || (type == 0x2)) {
2377 /* non-reassm: these always get released */ 2377 /* non-reassm: these always get released */
2378 cas_skb_release(skb); 2378 cas_skb_release(skb);
2379 } else { 2379 } else {
2380 cas_rx_flow_pkt(cp, words, skb); 2380 cas_rx_flow_pkt(cp, words, skb);
2381 } 2381 }
@@ -2396,7 +2396,7 @@ static int cas_rx_ringN(struct cas *cp, int ring, int budget)
2396 i = CAS_VAL(RX_INDEX_NUM, i); 2396 i = CAS_VAL(RX_INDEX_NUM, i);
2397 cas_post_page(cp, dring, i); 2397 cas_post_page(cp, dring, i);
2398 } 2398 }
2399 2399
2400 if (words[0] & RX_COMP1_RELEASE_DATA) { 2400 if (words[0] & RX_COMP1_RELEASE_DATA) {
2401 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]); 2401 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2402 dring = CAS_VAL(RX_INDEX_RING, i); 2402 dring = CAS_VAL(RX_INDEX_RING, i);
@@ -2412,7 +2412,7 @@ static int cas_rx_ringN(struct cas *cp, int ring, int budget)
2412 } 2412 }
2413 2413
2414 /* skip to the next entry */ 2414 /* skip to the next entry */
2415 entry = RX_COMP_ENTRY(ring, entry + 1 + 2415 entry = RX_COMP_ENTRY(ring, entry + 1 +
2416 CAS_VAL(RX_COMP1_SKIP, words[0])); 2416 CAS_VAL(RX_COMP1_SKIP, words[0]));
2417#ifdef USE_NAPI 2417#ifdef USE_NAPI
2418 if (budget && (npackets >= budget)) 2418 if (budget && (npackets >= budget))
@@ -2436,12 +2436,12 @@ static void cas_post_rxcs_ringN(struct net_device *dev,
2436 int last, entry; 2436 int last, entry;
2437 2437
2438 last = cp->rx_cur[ring]; 2438 last = cp->rx_cur[ring];
2439 entry = cp->rx_new[ring]; 2439 entry = cp->rx_new[ring];
2440 if (netif_msg_intr(cp)) 2440 if (netif_msg_intr(cp))
2441 printk(KERN_DEBUG "%s: rxc[%d] interrupt, done: %d/%d\n", 2441 printk(KERN_DEBUG "%s: rxc[%d] interrupt, done: %d/%d\n",
2442 dev->name, ring, readl(cp->regs + REG_RX_COMP_HEAD), 2442 dev->name, ring, readl(cp->regs + REG_RX_COMP_HEAD),
2443 entry); 2443 entry);
2444 2444
2445 /* zero and re-mark descriptors */ 2445 /* zero and re-mark descriptors */
2446 while (last != entry) { 2446 while (last != entry) {
2447 cas_rxc_init(rxc + last); 2447 cas_rxc_init(rxc + last);
@@ -2451,21 +2451,21 @@ static void cas_post_rxcs_ringN(struct net_device *dev,
2451 2451
2452 if (ring == 0) 2452 if (ring == 0)
2453 writel(last, cp->regs + REG_RX_COMP_TAIL); 2453 writel(last, cp->regs + REG_RX_COMP_TAIL);
2454 else if (cp->cas_flags & CAS_FLAG_REG_PLUS) 2454 else if (cp->cas_flags & CAS_FLAG_REG_PLUS)
2455 writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring)); 2455 writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring));
2456} 2456}
2457 2457
2458 2458
2459 2459
2460/* cassini can use all four PCI interrupts for the completion ring. 2460/* cassini can use all four PCI interrupts for the completion ring.
2461 * rings 3 and 4 are identical 2461 * rings 3 and 4 are identical
2462 */ 2462 */
2463#if defined(USE_PCI_INTC) || defined(USE_PCI_INTD) 2463#if defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
2464static inline void cas_handle_irqN(struct net_device *dev, 2464static inline void cas_handle_irqN(struct net_device *dev,
2465 struct cas *cp, const u32 status, 2465 struct cas *cp, const u32 status,
2466 const int ring) 2466 const int ring)
2467{ 2467{
2468 if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT)) 2468 if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT))
2469 cas_post_rxcs_ringN(dev, cp, ring); 2469 cas_post_rxcs_ringN(dev, cp, ring);
2470} 2470}
2471 2471
@@ -2505,7 +2505,7 @@ static irqreturn_t cas_interruptN(int irq, void *dev_id, struct pt_regs *regs)
2505static inline void cas_handle_irq1(struct cas *cp, const u32 status) 2505static inline void cas_handle_irq1(struct cas *cp, const u32 status)
2506{ 2506{
2507 if (status & INTR_RX_BUF_UNAVAIL_1) { 2507 if (status & INTR_RX_BUF_UNAVAIL_1) {
2508 /* Frame arrived, no free RX buffers available. 2508 /* Frame arrived, no free RX buffers available.
2509 * NOTE: we can get this on a link transition. */ 2509 * NOTE: we can get this on a link transition. */
2510 cas_post_rxds_ringN(cp, 1, 0); 2510 cas_post_rxds_ringN(cp, 1, 0);
2511 spin_lock(&cp->stat_lock[1]); 2511 spin_lock(&cp->stat_lock[1]);
@@ -2513,8 +2513,8 @@ static inline void cas_handle_irq1(struct cas *cp, const u32 status)
2513 spin_unlock(&cp->stat_lock[1]); 2513 spin_unlock(&cp->stat_lock[1]);
2514 } 2514 }
2515 2515
2516 if (status & INTR_RX_BUF_AE_1) 2516 if (status & INTR_RX_BUF_AE_1)
2517 cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) - 2517 cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) -
2518 RX_AE_FREEN_VAL(1)); 2518 RX_AE_FREEN_VAL(1));
2519 2519
2520 if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL)) 2520 if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
@@ -2558,7 +2558,7 @@ static inline void cas_handle_irq(struct net_device *dev,
2558 cas_abnormal_irq(dev, cp, status); 2558 cas_abnormal_irq(dev, cp, status);
2559 2559
2560 if (status & INTR_RX_BUF_UNAVAIL) { 2560 if (status & INTR_RX_BUF_UNAVAIL) {
2561 /* Frame arrived, no free RX buffers available. 2561 /* Frame arrived, no free RX buffers available.
2562 * NOTE: we can get this on a link transition. 2562 * NOTE: we can get this on a link transition.
2563 */ 2563 */
2564 cas_post_rxds_ringN(cp, 0, 0); 2564 cas_post_rxds_ringN(cp, 0, 0);
@@ -2625,7 +2625,7 @@ static int cas_poll(struct net_device *dev, int *budget)
2625 todo = min(*budget, dev->quota); 2625 todo = min(*budget, dev->quota);
2626 2626
2627 /* to make sure we're fair with the work we loop through each 2627 /* to make sure we're fair with the work we loop through each
2628 * ring N_RX_COMP_RING times with a request of 2628 * ring N_RX_COMP_RING times with a request of
2629 * todo / N_RX_COMP_RINGS 2629 * todo / N_RX_COMP_RINGS
2630 */ 2630 */
2631 enable_intr = 1; 2631 enable_intr = 1;
@@ -2784,13 +2784,13 @@ static void cas_write_txd(struct cas *cp, int ring, int entry,
2784 txd->buffer = cpu_to_le64(mapping); 2784 txd->buffer = cpu_to_le64(mapping);
2785} 2785}
2786 2786
2787static inline void *tx_tiny_buf(struct cas *cp, const int ring, 2787static inline void *tx_tiny_buf(struct cas *cp, const int ring,
2788 const int entry) 2788 const int entry)
2789{ 2789{
2790 return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry; 2790 return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry;
2791} 2791}
2792 2792
2793static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring, 2793static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring,
2794 const int entry, const int tentry) 2794 const int entry, const int tentry)
2795{ 2795{
2796 cp->tx_tiny_use[ring][tentry].nbufs++; 2796 cp->tx_tiny_use[ring][tentry].nbufs++;
@@ -2798,7 +2798,7 @@ static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring,
2798 return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry; 2798 return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry;
2799} 2799}
2800 2800
2801static inline int cas_xmit_tx_ringN(struct cas *cp, int ring, 2801static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
2802 struct sk_buff *skb) 2802 struct sk_buff *skb)
2803{ 2803{
2804 struct net_device *dev = cp->dev; 2804 struct net_device *dev = cp->dev;
@@ -2811,7 +2811,7 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
2811 spin_lock_irqsave(&cp->tx_lock[ring], flags); 2811 spin_lock_irqsave(&cp->tx_lock[ring], flags);
2812 2812
2813 /* This is a hard error, log it. */ 2813 /* This is a hard error, log it. */
2814 if (TX_BUFFS_AVAIL(cp, ring) <= 2814 if (TX_BUFFS_AVAIL(cp, ring) <=
2815 CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) { 2815 CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) {
2816 netif_stop_queue(dev); 2816 netif_stop_queue(dev);
2817 spin_unlock_irqrestore(&cp->tx_lock[ring], flags); 2817 spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
@@ -2827,7 +2827,7 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
2827 csum_start_off = (u64) (skb->h.raw - skb->data); 2827 csum_start_off = (u64) (skb->h.raw - skb->data);
2828 csum_stuff_off = (u64) ((skb->h.raw + skb->csum) - skb->data); 2828 csum_stuff_off = (u64) ((skb->h.raw + skb->csum) - skb->data);
2829 2829
2830 ctrl = TX_DESC_CSUM_EN | 2830 ctrl = TX_DESC_CSUM_EN |
2831 CAS_BASE(TX_DESC_CSUM_START, csum_start_off) | 2831 CAS_BASE(TX_DESC_CSUM_START, csum_start_off) |
2832 CAS_BASE(TX_DESC_CSUM_STUFF, csum_stuff_off); 2832 CAS_BASE(TX_DESC_CSUM_STUFF, csum_stuff_off);
2833 } 2833 }
@@ -2845,17 +2845,17 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
2845 tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len); 2845 tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len);
2846 if (unlikely(tabort)) { 2846 if (unlikely(tabort)) {
2847 /* NOTE: len is always > tabort */ 2847 /* NOTE: len is always > tabort */
2848 cas_write_txd(cp, ring, entry, mapping, len - tabort, 2848 cas_write_txd(cp, ring, entry, mapping, len - tabort,
2849 ctrl | TX_DESC_SOF, 0); 2849 ctrl | TX_DESC_SOF, 0);
2850 entry = TX_DESC_NEXT(ring, entry); 2850 entry = TX_DESC_NEXT(ring, entry);
2851 2851
2852 memcpy(tx_tiny_buf(cp, ring, entry), skb->data + 2852 memcpy(tx_tiny_buf(cp, ring, entry), skb->data +
2853 len - tabort, tabort); 2853 len - tabort, tabort);
2854 mapping = tx_tiny_map(cp, ring, entry, tentry); 2854 mapping = tx_tiny_map(cp, ring, entry, tentry);
2855 cas_write_txd(cp, ring, entry, mapping, tabort, ctrl, 2855 cas_write_txd(cp, ring, entry, mapping, tabort, ctrl,
2856 (nr_frags == 0)); 2856 (nr_frags == 0));
2857 } else { 2857 } else {
2858 cas_write_txd(cp, ring, entry, mapping, len, ctrl | 2858 cas_write_txd(cp, ring, entry, mapping, len, ctrl |
2859 TX_DESC_SOF, (nr_frags == 0)); 2859 TX_DESC_SOF, (nr_frags == 0));
2860 } 2860 }
2861 entry = TX_DESC_NEXT(ring, entry); 2861 entry = TX_DESC_NEXT(ring, entry);
@@ -2876,10 +2876,10 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
2876 cas_write_txd(cp, ring, entry, mapping, len - tabort, 2876 cas_write_txd(cp, ring, entry, mapping, len - tabort,
2877 ctrl, 0); 2877 ctrl, 0);
2878 entry = TX_DESC_NEXT(ring, entry); 2878 entry = TX_DESC_NEXT(ring, entry);
2879 2879
2880 addr = cas_page_map(fragp->page); 2880 addr = cas_page_map(fragp->page);
2881 memcpy(tx_tiny_buf(cp, ring, entry), 2881 memcpy(tx_tiny_buf(cp, ring, entry),
2882 addr + fragp->page_offset + len - tabort, 2882 addr + fragp->page_offset + len - tabort,
2883 tabort); 2883 tabort);
2884 cas_page_unmap(addr); 2884 cas_page_unmap(addr);
2885 mapping = tx_tiny_map(cp, ring, entry, tentry); 2885 mapping = tx_tiny_map(cp, ring, entry, tentry);
@@ -2898,12 +2898,12 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
2898 if (netif_msg_tx_queued(cp)) 2898 if (netif_msg_tx_queued(cp))
2899 printk(KERN_DEBUG "%s: tx[%d] queued, slot %d, skblen %d, " 2899 printk(KERN_DEBUG "%s: tx[%d] queued, slot %d, skblen %d, "
2900 "avail %d\n", 2900 "avail %d\n",
2901 dev->name, ring, entry, skb->len, 2901 dev->name, ring, entry, skb->len,
2902 TX_BUFFS_AVAIL(cp, ring)); 2902 TX_BUFFS_AVAIL(cp, ring));
2903 writel(entry, cp->regs + REG_TX_KICKN(ring)); 2903 writel(entry, cp->regs + REG_TX_KICKN(ring));
2904 spin_unlock_irqrestore(&cp->tx_lock[ring], flags); 2904 spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2905 return 0; 2905 return 0;
2906} 2906}
2907 2907
2908static int cas_start_xmit(struct sk_buff *skb, struct net_device *dev) 2908static int cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
2909{ 2909{
@@ -2912,7 +2912,7 @@ static int cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
2912 /* this is only used as a load-balancing hint, so it doesn't 2912 /* this is only used as a load-balancing hint, so it doesn't
2913 * need to be SMP safe 2913 * need to be SMP safe
2914 */ 2914 */
2915 static int ring; 2915 static int ring;
2916 2916
2917 if (skb_padto(skb, cp->min_frame_size)) 2917 if (skb_padto(skb, cp->min_frame_size))
2918 return 0; 2918 return 0;
@@ -2943,14 +2943,14 @@ static void cas_init_tx_dma(struct cas *cp)
2943 /* enable completion writebacks, enable paced mode, 2943 /* enable completion writebacks, enable paced mode,
2944 * disable read pipe, and disable pre-interrupt compwbs 2944 * disable read pipe, and disable pre-interrupt compwbs
2945 */ 2945 */
2946 val = TX_CFG_COMPWB_Q1 | TX_CFG_COMPWB_Q2 | 2946 val = TX_CFG_COMPWB_Q1 | TX_CFG_COMPWB_Q2 |
2947 TX_CFG_COMPWB_Q3 | TX_CFG_COMPWB_Q4 | 2947 TX_CFG_COMPWB_Q3 | TX_CFG_COMPWB_Q4 |
2948 TX_CFG_DMA_RDPIPE_DIS | TX_CFG_PACED_MODE | 2948 TX_CFG_DMA_RDPIPE_DIS | TX_CFG_PACED_MODE |
2949 TX_CFG_INTR_COMPWB_DIS; 2949 TX_CFG_INTR_COMPWB_DIS;
2950 2950
2951 /* write out tx ring info and tx desc bases */ 2951 /* write out tx ring info and tx desc bases */
2952 for (i = 0; i < MAX_TX_RINGS; i++) { 2952 for (i = 0; i < MAX_TX_RINGS; i++) {
2953 off = (unsigned long) cp->init_txds[i] - 2953 off = (unsigned long) cp->init_txds[i] -
2954 (unsigned long) cp->init_block; 2954 (unsigned long) cp->init_block;
2955 2955
2956 val |= CAS_TX_RINGN_BASE(i); 2956 val |= CAS_TX_RINGN_BASE(i);
@@ -2991,7 +2991,7 @@ static u32 cas_setup_multicast(struct cas *cp)
2991{ 2991{
2992 u32 rxcfg = 0; 2992 u32 rxcfg = 0;
2993 int i; 2993 int i;
2994 2994
2995 if (cp->dev->flags & IFF_PROMISC) { 2995 if (cp->dev->flags & IFF_PROMISC) {
2996 rxcfg |= MAC_RX_CFG_PROMISC_EN; 2996 rxcfg |= MAC_RX_CFG_PROMISC_EN;
2997 2997
@@ -3016,16 +3016,16 @@ static u32 cas_setup_multicast(struct cas *cp)
3016 writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 2)); 3016 writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 2));
3017 continue; 3017 continue;
3018 } 3018 }
3019 writel((dmi->dmi_addr[4] << 8) | dmi->dmi_addr[5], 3019 writel((dmi->dmi_addr[4] << 8) | dmi->dmi_addr[5],
3020 cp->regs + REG_MAC_ADDRN(i*3 + 0)); 3020 cp->regs + REG_MAC_ADDRN(i*3 + 0));
3021 writel((dmi->dmi_addr[2] << 8) | dmi->dmi_addr[3], 3021 writel((dmi->dmi_addr[2] << 8) | dmi->dmi_addr[3],
3022 cp->regs + REG_MAC_ADDRN(i*3 + 1)); 3022 cp->regs + REG_MAC_ADDRN(i*3 + 1));
3023 writel((dmi->dmi_addr[0] << 8) | dmi->dmi_addr[1], 3023 writel((dmi->dmi_addr[0] << 8) | dmi->dmi_addr[1],
3024 cp->regs + REG_MAC_ADDRN(i*3 + 2)); 3024 cp->regs + REG_MAC_ADDRN(i*3 + 2));
3025 dmi = dmi->next; 3025 dmi = dmi->next;
3026 } 3026 }
3027 3027
3028 /* use hw hash table for the next series of 3028 /* use hw hash table for the next series of
3029 * multicast addresses 3029 * multicast addresses
3030 */ 3030 */
3031 memset(hash_table, 0, sizeof(hash_table)); 3031 memset(hash_table, 0, sizeof(hash_table));
@@ -3036,7 +3036,7 @@ static u32 cas_setup_multicast(struct cas *cp)
3036 dmi = dmi->next; 3036 dmi = dmi->next;
3037 } 3037 }
3038 for (i=0; i < 16; i++) 3038 for (i=0; i < 16; i++)
3039 writel(hash_table[i], cp->regs + 3039 writel(hash_table[i], cp->regs +
3040 REG_MAC_HASH_TABLEN(i)); 3040 REG_MAC_HASH_TABLEN(i));
3041 rxcfg |= MAC_RX_CFG_HASH_FILTER_EN; 3041 rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
3042 } 3042 }
@@ -3121,23 +3121,23 @@ static void cas_init_mac(struct cas *cp)
3121 writel(0x00, cp->regs + REG_MAC_IPG0); 3121 writel(0x00, cp->regs + REG_MAC_IPG0);
3122 writel(0x08, cp->regs + REG_MAC_IPG1); 3122 writel(0x08, cp->regs + REG_MAC_IPG1);
3123 writel(0x04, cp->regs + REG_MAC_IPG2); 3123 writel(0x04, cp->regs + REG_MAC_IPG2);
3124 3124
3125 /* change later for 802.3z */ 3125 /* change later for 802.3z */
3126 writel(0x40, cp->regs + REG_MAC_SLOT_TIME); 3126 writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
3127 3127
3128 /* min frame + FCS */ 3128 /* min frame + FCS */
3129 writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN); 3129 writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN);
3130 3130
3131 /* Ethernet payload + header + FCS + optional VLAN tag. NOTE: we 3131 /* Ethernet payload + header + FCS + optional VLAN tag. NOTE: we
3132 * specify the maximum frame size to prevent RX tag errors on 3132 * specify the maximum frame size to prevent RX tag errors on
3133 * oversized frames. 3133 * oversized frames.
3134 */ 3134 */
3135 writel(CAS_BASE(MAC_FRAMESIZE_MAX_BURST, 0x2000) | 3135 writel(CAS_BASE(MAC_FRAMESIZE_MAX_BURST, 0x2000) |
3136 CAS_BASE(MAC_FRAMESIZE_MAX_FRAME, 3136 CAS_BASE(MAC_FRAMESIZE_MAX_FRAME,
3137 (CAS_MAX_MTU + ETH_HLEN + 4 + 4)), 3137 (CAS_MAX_MTU + ETH_HLEN + 4 + 4)),
3138 cp->regs + REG_MAC_FRAMESIZE_MAX); 3138 cp->regs + REG_MAC_FRAMESIZE_MAX);
3139 3139
3140 /* NOTE: crc_size is used as a surrogate for half-duplex. 3140 /* NOTE: crc_size is used as a surrogate for half-duplex.
3141 * workaround saturn half-duplex issue by increasing preamble 3141 * workaround saturn half-duplex issue by increasing preamble
3142 * size to 65 bytes. 3142 * size to 65 bytes.
3143 */ 3143 */
@@ -3180,7 +3180,7 @@ static void cas_init_mac(struct cas *cp)
3180 * spin_lock_irqsave, but we are called only in cas_init_hw and 3180 * spin_lock_irqsave, but we are called only in cas_init_hw and
3181 * cas_init_hw is protected by cas_lock_all, which calls 3181 * cas_init_hw is protected by cas_lock_all, which calls
3182 * spin_lock_irq (so it doesn't need to save the flags, and 3182 * spin_lock_irq (so it doesn't need to save the flags, and
3183 * we should be OK for the writel, as that is the only 3183 * we should be OK for the writel, as that is the only
3184 * difference). 3184 * difference).
3185 */ 3185 */
3186 cp->mac_rx_cfg = rxcfg = cas_setup_multicast(cp); 3186 cp->mac_rx_cfg = rxcfg = cas_setup_multicast(cp);
@@ -3229,7 +3229,7 @@ static int cas_vpd_match(const void __iomem *p, const char *str)
3229{ 3229{
3230 int len = strlen(str) + 1; 3230 int len = strlen(str) + 1;
3231 int i; 3231 int i;
3232 3232
3233 for (i = 0; i < len; i++) { 3233 for (i = 0; i < len; i++) {
3234 if (readb(p + i) != str[i]) 3234 if (readb(p + i) != str[i])
3235 return 0; 3235 return 0;
@@ -3246,7 +3246,7 @@ static int cas_vpd_match(const void __iomem *p, const char *str)
3246 * number. 3246 * number.
3247 * 3) fiber cards don't have bridges, so their slot numbers don't 3247 * 3) fiber cards don't have bridges, so their slot numbers don't
3248 * mean anything. 3248 * mean anything.
3249 * 4) we don't actually know we have a fiber card until after 3249 * 4) we don't actually know we have a fiber card until after
3250 * the mac addresses are parsed. 3250 * the mac addresses are parsed.
3251 */ 3251 */
3252static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr, 3252static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
@@ -3278,15 +3278,15 @@ static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
3278 (readb(p + i + 1) == 0x43) && 3278 (readb(p + i + 1) == 0x43) &&
3279 (readb(p + i + 2) == 0x49) && 3279 (readb(p + i + 2) == 0x49) &&
3280 (readb(p + i + 3) == 0x52)) { 3280 (readb(p + i + 3) == 0x52)) {
3281 base = p + (readb(p + i + 8) | 3281 base = p + (readb(p + i + 8) |
3282 (readb(p + i + 9) << 8)); 3282 (readb(p + i + 9) << 8));
3283 break; 3283 break;
3284 } 3284 }
3285 } 3285 }
3286 3286
3287 if (!base || (readb(base) != 0x82)) 3287 if (!base || (readb(base) != 0x82))
3288 goto use_random_mac_addr; 3288 goto use_random_mac_addr;
3289 3289
3290 i = (readb(base + 1) | (readb(base + 2) << 8)) + 3; 3290 i = (readb(base + 1) | (readb(base + 2) << 8)) + 3;
3291 while (i < EXPANSION_ROM_SIZE) { 3291 while (i < EXPANSION_ROM_SIZE) {
3292 if (readb(base + i) != 0x90) /* no vpd found */ 3292 if (readb(base + i) != 0x90) /* no vpd found */
@@ -3304,20 +3304,20 @@ static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
3304 char type; 3304 char type;
3305 3305
3306 p += 3; 3306 p += 3;
3307 3307
3308 /* look for the following things: 3308 /* look for the following things:
3309 * -- correct length == 29 3309 * -- correct length == 29
3310 * 3 (type) + 2 (size) + 3310 * 3 (type) + 2 (size) +
3311 * 18 (strlen("local-mac-address") + 1) + 3311 * 18 (strlen("local-mac-address") + 1) +
3312 * 6 (mac addr) 3312 * 6 (mac addr)
3313 * -- VPD Instance 'I' 3313 * -- VPD Instance 'I'
3314 * -- VPD Type Bytes 'B' 3314 * -- VPD Type Bytes 'B'
3315 * -- VPD data length == 6 3315 * -- VPD data length == 6
3316 * -- property string == local-mac-address 3316 * -- property string == local-mac-address
3317 * 3317 *
3318 * -- correct length == 24 3318 * -- correct length == 24
3319 * 3 (type) + 2 (size) + 3319 * 3 (type) + 2 (size) +
3320 * 12 (strlen("entropy-dev") + 1) + 3320 * 12 (strlen("entropy-dev") + 1) +
3321 * 7 (strlen("vms110") + 1) 3321 * 7 (strlen("vms110") + 1)
3322 * -- VPD Instance 'I' 3322 * -- VPD Instance 'I'
3323 * -- VPD Type String 'B' 3323 * -- VPD Type String 'B'
@@ -3325,17 +3325,17 @@ static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
3325 * -- property string == entropy-dev 3325 * -- property string == entropy-dev
3326 * 3326 *
3327 * -- correct length == 18 3327 * -- correct length == 18
3328 * 3 (type) + 2 (size) + 3328 * 3 (type) + 2 (size) +
3329 * 9 (strlen("phy-type") + 1) + 3329 * 9 (strlen("phy-type") + 1) +
3330 * 4 (strlen("pcs") + 1) 3330 * 4 (strlen("pcs") + 1)
3331 * -- VPD Instance 'I' 3331 * -- VPD Instance 'I'
3332 * -- VPD Type String 'S' 3332 * -- VPD Type String 'S'
3333 * -- VPD data length == 4 3333 * -- VPD data length == 4
3334 * -- property string == phy-type 3334 * -- property string == phy-type
3335 * 3335 *
3336 * -- correct length == 23 3336 * -- correct length == 23
3337 * 3 (type) + 2 (size) + 3337 * 3 (type) + 2 (size) +
3338 * 14 (strlen("phy-interface") + 1) + 3338 * 14 (strlen("phy-interface") + 1) +
3339 * 4 (strlen("pcs") + 1) 3339 * 4 (strlen("pcs") + 1)
3340 * -- VPD Instance 'I' 3340 * -- VPD Instance 'I'
3341 * -- VPD Type String 'S' 3341 * -- VPD Type String 'S'
@@ -3349,14 +3349,14 @@ static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
3349 type = readb(p + 3); 3349 type = readb(p + 3);
3350 if (type == 'B') { 3350 if (type == 'B') {
3351 if ((klen == 29) && readb(p + 4) == 6 && 3351 if ((klen == 29) && readb(p + 4) == 6 &&
3352 cas_vpd_match(p + 5, 3352 cas_vpd_match(p + 5,
3353 "local-mac-address")) { 3353 "local-mac-address")) {
3354 if (mac_off++ > offset) 3354 if (mac_off++ > offset)
3355 goto next; 3355 goto next;
3356 3356
3357 /* set mac address */ 3357 /* set mac address */
3358 for (j = 0; j < 6; j++) 3358 for (j = 0; j < 6; j++)
3359 dev_addr[j] = 3359 dev_addr[j] =
3360 readb(p + 23 + j); 3360 readb(p + 23 + j);
3361 goto found_mac; 3361 goto found_mac;
3362 } 3362 }
@@ -3366,7 +3366,7 @@ static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
3366 goto next; 3366 goto next;
3367 3367
3368#ifdef USE_ENTROPY_DEV 3368#ifdef USE_ENTROPY_DEV
3369 if ((klen == 24) && 3369 if ((klen == 24) &&
3370 cas_vpd_match(p + 5, "entropy-dev") && 3370 cas_vpd_match(p + 5, "entropy-dev") &&
3371 cas_vpd_match(p + 17, "vms110")) { 3371 cas_vpd_match(p + 17, "vms110")) {
3372 cp->cas_flags |= CAS_FLAG_ENTROPY_DEV; 3372 cp->cas_flags |= CAS_FLAG_ENTROPY_DEV;
@@ -3384,7 +3384,7 @@ static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
3384 goto found_phy; 3384 goto found_phy;
3385 } 3385 }
3386 } 3386 }
3387 3387
3388 if ((klen == 23) && readb(p + 4) == 4 && 3388 if ((klen == 23) && readb(p + 4) == 4 &&
3389 cas_vpd_match(p + 5, "phy-interface")) { 3389 cas_vpd_match(p + 5, "phy-interface")) {
3390 if (cas_vpd_match(p + 19, "pcs")) { 3390 if (cas_vpd_match(p + 19, "pcs")) {
@@ -3462,12 +3462,12 @@ static int cas_check_invariants(struct cas *cp)
3462 int i; 3462 int i;
3463 3463
3464 /* get page size for rx buffers. */ 3464 /* get page size for rx buffers. */
3465 cp->page_order = 0; 3465 cp->page_order = 0;
3466#ifdef USE_PAGE_ORDER 3466#ifdef USE_PAGE_ORDER
3467 if (PAGE_SHIFT < CAS_JUMBO_PAGE_SHIFT) { 3467 if (PAGE_SHIFT < CAS_JUMBO_PAGE_SHIFT) {
3468 /* see if we can allocate larger pages */ 3468 /* see if we can allocate larger pages */
3469 struct page *page = alloc_pages(GFP_ATOMIC, 3469 struct page *page = alloc_pages(GFP_ATOMIC,
3470 CAS_JUMBO_PAGE_SHIFT - 3470 CAS_JUMBO_PAGE_SHIFT -
3471 PAGE_SHIFT); 3471 PAGE_SHIFT);
3472 if (page) { 3472 if (page) {
3473 __free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT); 3473 __free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT);
@@ -3483,15 +3483,15 @@ static int cas_check_invariants(struct cas *cp)
3483 cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64; 3483 cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64;
3484 cp->rx_fifo_size = RX_FIFO_SIZE; 3484 cp->rx_fifo_size = RX_FIFO_SIZE;
3485 3485
3486 /* finish phy determination. MDIO1 takes precedence over MDIO0 if 3486 /* finish phy determination. MDIO1 takes precedence over MDIO0 if
3487 * they're both connected. 3487 * they're both connected.
3488 */ 3488 */
3489 cp->phy_type = cas_get_vpd_info(cp, cp->dev->dev_addr, 3489 cp->phy_type = cas_get_vpd_info(cp, cp->dev->dev_addr,
3490 PCI_SLOT(pdev->devfn)); 3490 PCI_SLOT(pdev->devfn));
3491 if (cp->phy_type & CAS_PHY_SERDES) { 3491 if (cp->phy_type & CAS_PHY_SERDES) {
3492 cp->cas_flags |= CAS_FLAG_1000MB_CAP; 3492 cp->cas_flags |= CAS_FLAG_1000MB_CAP;
3493 return 0; /* no more checking needed */ 3493 return 0; /* no more checking needed */
3494 } 3494 }
3495 3495
3496 /* MII */ 3496 /* MII */
3497 cfg = readl(cp->regs + REG_MIF_CFG); 3497 cfg = readl(cp->regs + REG_MIF_CFG);
@@ -3525,7 +3525,7 @@ static int cas_check_invariants(struct cas *cp)
3525done: 3525done:
3526 /* see if we can do gigabit */ 3526 /* see if we can do gigabit */
3527 cfg = cas_phy_read(cp, MII_BMSR); 3527 cfg = cas_phy_read(cp, MII_BMSR);
3528 if ((cfg & CAS_BMSR_1000_EXTEND) && 3528 if ((cfg & CAS_BMSR_1000_EXTEND) &&
3529 cas_phy_read(cp, CAS_MII_1000_EXTEND)) 3529 cas_phy_read(cp, CAS_MII_1000_EXTEND))
3530 cp->cas_flags |= CAS_FLAG_1000MB_CAP; 3530 cp->cas_flags |= CAS_FLAG_1000MB_CAP;
3531 return 0; 3531 return 0;
@@ -3537,7 +3537,7 @@ static inline void cas_start_dma(struct cas *cp)
3537 int i; 3537 int i;
3538 u32 val; 3538 u32 val;
3539 int txfailed = 0; 3539 int txfailed = 0;
3540 3540
3541 /* enable dma */ 3541 /* enable dma */
3542 val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN; 3542 val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN;
3543 writel(val, cp->regs + REG_TX_CFG); 3543 writel(val, cp->regs + REG_TX_CFG);
@@ -3563,8 +3563,8 @@ static inline void cas_start_dma(struct cas *cp)
3563 val = readl(cp->regs + REG_MAC_RX_CFG); 3563 val = readl(cp->regs + REG_MAC_RX_CFG);
3564 if ((val & MAC_RX_CFG_EN)) { 3564 if ((val & MAC_RX_CFG_EN)) {
3565 if (txfailed) { 3565 if (txfailed) {
3566 printk(KERN_ERR 3566 printk(KERN_ERR
3567 "%s: enabling mac failed [tx:%08x:%08x].\n", 3567 "%s: enabling mac failed [tx:%08x:%08x].\n",
3568 cp->dev->name, 3568 cp->dev->name,
3569 readl(cp->regs + REG_MIF_STATE_MACHINE), 3569 readl(cp->regs + REG_MIF_STATE_MACHINE),
3570 readl(cp->regs + REG_MAC_STATE_MACHINE)); 3570 readl(cp->regs + REG_MAC_STATE_MACHINE));
@@ -3573,7 +3573,7 @@ static inline void cas_start_dma(struct cas *cp)
3573 } 3573 }
3574 udelay(10); 3574 udelay(10);
3575 } 3575 }
3576 printk(KERN_ERR "%s: enabling mac failed [%s:%08x:%08x].\n", 3576 printk(KERN_ERR "%s: enabling mac failed [%s:%08x:%08x].\n",
3577 cp->dev->name, 3577 cp->dev->name,
3578 (txfailed? "tx,rx":"rx"), 3578 (txfailed? "tx,rx":"rx"),
3579 readl(cp->regs + REG_MIF_STATE_MACHINE), 3579 readl(cp->regs + REG_MIF_STATE_MACHINE),
@@ -3585,11 +3585,11 @@ enable_rx_done:
3585 writel(0, cp->regs + REG_RX_COMP_TAIL); 3585 writel(0, cp->regs + REG_RX_COMP_TAIL);
3586 3586
3587 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { 3587 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
3588 if (N_RX_DESC_RINGS > 1) 3588 if (N_RX_DESC_RINGS > 1)
3589 writel(RX_DESC_RINGN_SIZE(1) - 4, 3589 writel(RX_DESC_RINGN_SIZE(1) - 4,
3590 cp->regs + REG_PLUS_RX_KICK1); 3590 cp->regs + REG_PLUS_RX_KICK1);
3591 3591
3592 for (i = 1; i < N_RX_COMP_RINGS; i++) 3592 for (i = 1; i < N_RX_COMP_RINGS; i++)
3593 writel(0, cp->regs + REG_PLUS_RX_COMPN_TAIL(i)); 3593 writel(0, cp->regs + REG_PLUS_RX_COMPN_TAIL(i));
3594 } 3594 }
3595} 3595}
@@ -3615,7 +3615,7 @@ static void cas_read_mii_link_mode(struct cas *cp, int *fd, int *spd,
3615 *fd = 0; 3615 *fd = 0;
3616 *spd = 10; 3616 *spd = 10;
3617 *pause = 0; 3617 *pause = 0;
3618 3618
3619 /* use GMII registers */ 3619 /* use GMII registers */
3620 val = cas_phy_read(cp, MII_LPA); 3620 val = cas_phy_read(cp, MII_LPA);
3621 if (val & CAS_LPA_PAUSE) 3621 if (val & CAS_LPA_PAUSE)
@@ -3656,7 +3656,7 @@ static void cas_set_link_modes(struct cas *cp)
3656 cas_mif_poll(cp, 0); 3656 cas_mif_poll(cp, 0);
3657 val = cas_phy_read(cp, MII_BMCR); 3657 val = cas_phy_read(cp, MII_BMCR);
3658 if (val & BMCR_ANENABLE) { 3658 if (val & BMCR_ANENABLE) {
3659 cas_read_mii_link_mode(cp, &full_duplex, &speed, 3659 cas_read_mii_link_mode(cp, &full_duplex, &speed,
3660 &pause); 3660 &pause);
3661 } else { 3661 } else {
3662 if (val & BMCR_FULLDPLX) 3662 if (val & BMCR_FULLDPLX)
@@ -3689,7 +3689,7 @@ static void cas_set_link_modes(struct cas *cp)
3689 if (!full_duplex) 3689 if (!full_duplex)
3690 val |= MAC_XIF_DISABLE_ECHO; 3690 val |= MAC_XIF_DISABLE_ECHO;
3691 } 3691 }
3692 if (full_duplex) 3692 if (full_duplex)
3693 val |= MAC_XIF_FDPLX_LED; 3693 val |= MAC_XIF_FDPLX_LED;
3694 if (speed == 1000) 3694 if (speed == 1000)
3695 val |= MAC_XIF_GMII_MODE; 3695 val |= MAC_XIF_GMII_MODE;
@@ -3709,17 +3709,17 @@ static void cas_set_link_modes(struct cas *cp)
3709 /* val now set up for REG_MAC_TX_CFG */ 3709 /* val now set up for REG_MAC_TX_CFG */
3710 3710
3711 /* If gigabit and half-duplex, enable carrier extension 3711 /* If gigabit and half-duplex, enable carrier extension
3712 * mode. increase slot time to 512 bytes as well. 3712 * mode. increase slot time to 512 bytes as well.
3713 * else, disable it and make sure slot time is 64 bytes. 3713 * else, disable it and make sure slot time is 64 bytes.
3714 * also activate checksum bug workaround 3714 * also activate checksum bug workaround
3715 */ 3715 */
3716 if ((speed == 1000) && !full_duplex) { 3716 if ((speed == 1000) && !full_duplex) {
3717 writel(val | MAC_TX_CFG_CARRIER_EXTEND, 3717 writel(val | MAC_TX_CFG_CARRIER_EXTEND,
3718 cp->regs + REG_MAC_TX_CFG); 3718 cp->regs + REG_MAC_TX_CFG);
3719 3719
3720 val = readl(cp->regs + REG_MAC_RX_CFG); 3720 val = readl(cp->regs + REG_MAC_RX_CFG);
3721 val &= ~MAC_RX_CFG_STRIP_FCS; /* checksum workaround */ 3721 val &= ~MAC_RX_CFG_STRIP_FCS; /* checksum workaround */
3722 writel(val | MAC_RX_CFG_CARRIER_EXTEND, 3722 writel(val | MAC_RX_CFG_CARRIER_EXTEND,
3723 cp->regs + REG_MAC_RX_CFG); 3723 cp->regs + REG_MAC_RX_CFG);
3724 3724
3725 writel(0x200, cp->regs + REG_MAC_SLOT_TIME); 3725 writel(0x200, cp->regs + REG_MAC_SLOT_TIME);
@@ -3731,7 +3731,7 @@ static void cas_set_link_modes(struct cas *cp)
3731 } else { 3731 } else {
3732 writel(val, cp->regs + REG_MAC_TX_CFG); 3732 writel(val, cp->regs + REG_MAC_TX_CFG);
3733 3733
3734 /* checksum bug workaround. don't strip FCS when in 3734 /* checksum bug workaround. don't strip FCS when in
3735 * half-duplex mode 3735 * half-duplex mode
3736 */ 3736 */
3737 val = readl(cp->regs + REG_MAC_RX_CFG); 3737 val = readl(cp->regs + REG_MAC_RX_CFG);
@@ -3744,7 +3744,7 @@ static void cas_set_link_modes(struct cas *cp)
3744 cp->crc_size = 4; 3744 cp->crc_size = 4;
3745 cp->min_frame_size = CAS_MIN_FRAME; 3745 cp->min_frame_size = CAS_MIN_FRAME;
3746 } 3746 }
3747 writel(val & ~MAC_RX_CFG_CARRIER_EXTEND, 3747 writel(val & ~MAC_RX_CFG_CARRIER_EXTEND,
3748 cp->regs + REG_MAC_RX_CFG); 3748 cp->regs + REG_MAC_RX_CFG);
3749 writel(0x40, cp->regs + REG_MAC_SLOT_TIME); 3749 writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
3750 } 3750 }
@@ -3772,7 +3772,7 @@ static void cas_set_link_modes(struct cas *cp)
3772 val |= MAC_CTRL_CFG_SEND_PAUSE_EN; 3772 val |= MAC_CTRL_CFG_SEND_PAUSE_EN;
3773 if (pause & 0x01) { /* symmetric pause */ 3773 if (pause & 0x01) { /* symmetric pause */
3774 val |= MAC_CTRL_CFG_RECV_PAUSE_EN; 3774 val |= MAC_CTRL_CFG_RECV_PAUSE_EN;
3775 } 3775 }
3776 } 3776 }
3777 writel(val, cp->regs + REG_MAC_CTRL_CFG); 3777 writel(val, cp->regs + REG_MAC_CTRL_CFG);
3778 cas_start_dma(cp); 3778 cas_start_dma(cp);
@@ -3804,7 +3804,7 @@ static void cas_init_hw(struct cas *cp, int restart_link)
3804 */ 3804 */
3805static void cas_hard_reset(struct cas *cp) 3805static void cas_hard_reset(struct cas *cp)
3806{ 3806{
3807 writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN); 3807 writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN);
3808 udelay(20); 3808 udelay(20);
3809 pci_restore_state(cp->pdev); 3809 pci_restore_state(cp->pdev);
3810} 3810}
@@ -3822,7 +3822,7 @@ static void cas_global_reset(struct cas *cp, int blkflag)
3822 * need some special handling if the chip is set into a 3822 * need some special handling if the chip is set into a
3823 * loopback mode. 3823 * loopback mode.
3824 */ 3824 */
3825 writel((SW_RESET_TX | SW_RESET_RX | SW_RESET_BLOCK_PCS_SLINK), 3825 writel((SW_RESET_TX | SW_RESET_RX | SW_RESET_BLOCK_PCS_SLINK),
3826 cp->regs + REG_SW_RESET); 3826 cp->regs + REG_SW_RESET);
3827 } else { 3827 } else {
3828 writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET); 3828 writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET);
@@ -3842,16 +3842,16 @@ static void cas_global_reset(struct cas *cp, int blkflag)
3842 3842
3843done: 3843done:
3844 /* enable various BIM interrupts */ 3844 /* enable various BIM interrupts */
3845 writel(BIM_CFG_DPAR_INTR_ENABLE | BIM_CFG_RMA_INTR_ENABLE | 3845 writel(BIM_CFG_DPAR_INTR_ENABLE | BIM_CFG_RMA_INTR_ENABLE |
3846 BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG); 3846 BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG);
3847 3847
3848 /* clear out pci error status mask for handled errors. 3848 /* clear out pci error status mask for handled errors.
3849 * we don't deal with DMA counter overflows as they happen 3849 * we don't deal with DMA counter overflows as they happen
3850 * all the time. 3850 * all the time.
3851 */ 3851 */
3852 writel(0xFFFFFFFFU & ~(PCI_ERR_BADACK | PCI_ERR_DTRTO | 3852 writel(0xFFFFFFFFU & ~(PCI_ERR_BADACK | PCI_ERR_DTRTO |
3853 PCI_ERR_OTHER | PCI_ERR_BIM_DMA_WRITE | 3853 PCI_ERR_OTHER | PCI_ERR_BIM_DMA_WRITE |
3854 PCI_ERR_BIM_DMA_READ), cp->regs + 3854 PCI_ERR_BIM_DMA_READ), cp->regs +
3855 REG_PCI_ERR_STATUS_MASK); 3855 REG_PCI_ERR_STATUS_MASK);
3856 3856
3857 /* set up for MII by default to address mac rx reset timeout 3857 /* set up for MII by default to address mac rx reset timeout
@@ -3912,7 +3912,7 @@ static void cas_shutdown(struct cas *cp)
3912#else 3912#else
3913 while (atomic_read(&cp->reset_task_pending)) 3913 while (atomic_read(&cp->reset_task_pending))
3914 schedule(); 3914 schedule();
3915#endif 3915#endif
3916 /* Actually stop the chip */ 3916 /* Actually stop the chip */
3917 cas_lock_all_save(cp, flags); 3917 cas_lock_all_save(cp, flags);
3918 cas_reset(cp, 0); 3918 cas_reset(cp, 0);
@@ -3942,7 +3942,7 @@ static int cas_change_mtu(struct net_device *dev, int new_mtu)
3942 } 3942 }
3943 schedule_work(&cp->reset_task); 3943 schedule_work(&cp->reset_task);
3944#else 3944#else
3945 atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ? 3945 atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ?
3946 CAS_RESET_ALL : CAS_RESET_MTU); 3946 CAS_RESET_ALL : CAS_RESET_MTU);
3947 printk(KERN_ERR "reset called in cas_change_mtu\n"); 3947 printk(KERN_ERR "reset called in cas_change_mtu\n");
3948 schedule_work(&cp->reset_task); 3948 schedule_work(&cp->reset_task);
@@ -3976,7 +3976,7 @@ static void cas_clean_txd(struct cas *cp, int ring)
3976 * needs to be unmapped. 3976 * needs to be unmapped.
3977 */ 3977 */
3978 daddr = le64_to_cpu(txd[ent].buffer); 3978 daddr = le64_to_cpu(txd[ent].buffer);
3979 dlen = CAS_VAL(TX_DESC_BUFLEN, 3979 dlen = CAS_VAL(TX_DESC_BUFLEN,
3980 le64_to_cpu(txd[ent].control)); 3980 le64_to_cpu(txd[ent].control));
3981 pci_unmap_page(cp->pdev, daddr, dlen, 3981 pci_unmap_page(cp->pdev, daddr, dlen,
3982 PCI_DMA_TODEVICE); 3982 PCI_DMA_TODEVICE);
@@ -4047,7 +4047,7 @@ static inline int cas_alloc_rx_desc(struct cas *cp, int ring)
4047 4047
4048 size = RX_DESC_RINGN_SIZE(ring); 4048 size = RX_DESC_RINGN_SIZE(ring);
4049 for (i = 0; i < size; i++) { 4049 for (i = 0; i < size; i++) {
4050 if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL) 4050 if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL)
4051 return -1; 4051 return -1;
4052 } 4052 }
4053 return 0; 4053 return 0;
@@ -4114,7 +4114,7 @@ static void cas_reset_task(void *data)
4114 * call to cas_init_hw will restart auto negotiation. 4114 * call to cas_init_hw will restart auto negotiation.
4115 * Setting the second argument of cas_reset to 4115 * Setting the second argument of cas_reset to
4116 * !(pending == CAS_RESET_ALL) will set this argument 4116 * !(pending == CAS_RESET_ALL) will set this argument
4117 * to 1 (avoiding reinitializing the PHY for the normal 4117 * to 1 (avoiding reinitializing the PHY for the normal
4118 * PCS case) when auto negotiation is not restarted. 4118 * PCS case) when auto negotiation is not restarted.
4119 */ 4119 */
4120#if 1 4120#if 1
@@ -4151,9 +4151,9 @@ static void cas_link_timer(unsigned long data)
4151 4151
4152 if (link_transition_timeout != 0 && 4152 if (link_transition_timeout != 0 &&
4153 cp->link_transition_jiffies_valid && 4153 cp->link_transition_jiffies_valid &&
4154 ((jiffies - cp->link_transition_jiffies) > 4154 ((jiffies - cp->link_transition_jiffies) >
4155 (link_transition_timeout))) { 4155 (link_transition_timeout))) {
4156 /* One-second counter so link-down workaround doesn't 4156 /* One-second counter so link-down workaround doesn't
4157 * cause resets to occur so fast as to fool the switch 4157 * cause resets to occur so fast as to fool the switch
4158 * into thinking the link is down. 4158 * into thinking the link is down.
4159 */ 4159 */
@@ -4173,10 +4173,10 @@ static void cas_link_timer(unsigned long data)
4173#if 1 4173#if 1
4174 if (atomic_read(&cp->reset_task_pending_all) || 4174 if (atomic_read(&cp->reset_task_pending_all) ||
4175 atomic_read(&cp->reset_task_pending_spare) || 4175 atomic_read(&cp->reset_task_pending_spare) ||
4176 atomic_read(&cp->reset_task_pending_mtu)) 4176 atomic_read(&cp->reset_task_pending_mtu))
4177 goto done; 4177 goto done;
4178#else 4178#else
4179 if (atomic_read(&cp->reset_task_pending)) 4179 if (atomic_read(&cp->reset_task_pending))
4180 goto done; 4180 goto done;
4181#endif 4181#endif
4182 4182
@@ -4268,7 +4268,7 @@ done:
4268 spin_unlock_irqrestore(&cp->lock, flags); 4268 spin_unlock_irqrestore(&cp->lock, flags);
4269} 4269}
4270 4270
4271/* tiny buffers are used to avoid target abort issues with 4271/* tiny buffers are used to avoid target abort issues with
4272 * older cassini's 4272 * older cassini's
4273 */ 4273 */
4274static void cas_tx_tiny_free(struct cas *cp) 4274static void cas_tx_tiny_free(struct cas *cp)
@@ -4280,7 +4280,7 @@ static void cas_tx_tiny_free(struct cas *cp)
4280 if (!cp->tx_tiny_bufs[i]) 4280 if (!cp->tx_tiny_bufs[i])
4281 continue; 4281 continue;
4282 4282
4283 pci_free_consistent(pdev, TX_TINY_BUF_BLOCK, 4283 pci_free_consistent(pdev, TX_TINY_BUF_BLOCK,
4284 cp->tx_tiny_bufs[i], 4284 cp->tx_tiny_bufs[i],
4285 cp->tx_tiny_dvma[i]); 4285 cp->tx_tiny_dvma[i]);
4286 cp->tx_tiny_bufs[i] = NULL; 4286 cp->tx_tiny_bufs[i] = NULL;
@@ -4293,7 +4293,7 @@ static int cas_tx_tiny_alloc(struct cas *cp)
4293 int i; 4293 int i;
4294 4294
4295 for (i = 0; i < N_TX_RINGS; i++) { 4295 for (i = 0; i < N_TX_RINGS; i++) {
4296 cp->tx_tiny_bufs[i] = 4296 cp->tx_tiny_bufs[i] =
4297 pci_alloc_consistent(pdev, TX_TINY_BUF_BLOCK, 4297 pci_alloc_consistent(pdev, TX_TINY_BUF_BLOCK,
4298 &cp->tx_tiny_dvma[i]); 4298 &cp->tx_tiny_dvma[i]);
4299 if (!cp->tx_tiny_bufs[i]) { 4299 if (!cp->tx_tiny_bufs[i]) {
@@ -4322,7 +4322,7 @@ static int cas_open(struct net_device *dev)
4322 /* Reset the chip */ 4322 /* Reset the chip */
4323 cas_lock_all_save(cp, flags); 4323 cas_lock_all_save(cp, flags);
4324 /* We set the second arg to cas_reset to zero 4324 /* We set the second arg to cas_reset to zero
4325 * because cas_init_hw below will have its second 4325 * because cas_init_hw below will have its second
4326 * argument set to non-zero, which will force 4326 * argument set to non-zero, which will force
4327 * autonegotiation to start. 4327 * autonegotiation to start.
4328 */ 4328 */
@@ -4338,19 +4338,19 @@ static int cas_open(struct net_device *dev)
4338 err = -ENOMEM; 4338 err = -ENOMEM;
4339 if (cas_alloc_rxds(cp) < 0) 4339 if (cas_alloc_rxds(cp) < 0)
4340 goto err_tx_tiny; 4340 goto err_tx_tiny;
4341 4341
4342 /* allocate spares */ 4342 /* allocate spares */
4343 cas_spare_init(cp); 4343 cas_spare_init(cp);
4344 cas_spare_recover(cp, GFP_KERNEL); 4344 cas_spare_recover(cp, GFP_KERNEL);
4345 4345
4346 /* We can now request the interrupt as we know it's masked 4346 /* We can now request the interrupt as we know it's masked
4347 * on the controller. cassini+ has up to 4 interrupts 4347 * on the controller. cassini+ has up to 4 interrupts
4348 * that can be used, but you need to do explicit pci interrupt 4348 * that can be used, but you need to do explicit pci interrupt
4349 * mapping to expose them 4349 * mapping to expose them
4350 */ 4350 */
4351 if (request_irq(cp->pdev->irq, cas_interrupt, 4351 if (request_irq(cp->pdev->irq, cas_interrupt,
4352 IRQF_SHARED, dev->name, (void *) dev)) { 4352 IRQF_SHARED, dev->name, (void *) dev)) {
4353 printk(KERN_ERR "%s: failed to request irq !\n", 4353 printk(KERN_ERR "%s: failed to request irq !\n",
4354 cp->dev->name); 4354 cp->dev->name);
4355 err = -EAGAIN; 4355 err = -EAGAIN;
4356 goto err_spare; 4356 goto err_spare;
@@ -4388,9 +4388,9 @@ static int cas_close(struct net_device *dev)
4388 4388
4389 /* Stop traffic, mark us closed */ 4389 /* Stop traffic, mark us closed */
4390 cas_lock_all_save(cp, flags); 4390 cas_lock_all_save(cp, flags);
4391 cp->opened = 0; 4391 cp->opened = 0;
4392 cas_reset(cp, 0); 4392 cas_reset(cp, 0);
4393 cas_phy_init(cp); 4393 cas_phy_init(cp);
4394 cas_begin_auto_negotiation(cp, NULL); 4394 cas_begin_auto_negotiation(cp, NULL);
4395 cas_clean_rings(cp); 4395 cas_clean_rings(cp);
4396 cas_unlock_all_restore(cp, flags); 4396 cas_unlock_all_restore(cp, flags);
@@ -4483,7 +4483,7 @@ static struct net_device_stats *cas_get_stats(struct net_device *dev)
4483 /* we collate all of the stats into net_stats[N_TX_RING] */ 4483 /* we collate all of the stats into net_stats[N_TX_RING] */
4484 if (!cp->hw_running) 4484 if (!cp->hw_running)
4485 return stats + N_TX_RINGS; 4485 return stats + N_TX_RINGS;
4486 4486
4487 /* collect outstanding stats */ 4487 /* collect outstanding stats */
4488 /* WTZ: the Cassini spec gives these as 16 bit counters but 4488 /* WTZ: the Cassini spec gives these as 16 bit counters but
4489 * stored in 32-bit words. Added a mask of 0xffff to be safe, 4489 * stored in 32-bit words. Added a mask of 0xffff to be safe,
@@ -4493,11 +4493,11 @@ static struct net_device_stats *cas_get_stats(struct net_device *dev)
4493 * that consistent. 4493 * that consistent.
4494 */ 4494 */
4495 spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags); 4495 spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags);
4496 stats[N_TX_RINGS].rx_crc_errors += 4496 stats[N_TX_RINGS].rx_crc_errors +=
4497 readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff; 4497 readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff;
4498 stats[N_TX_RINGS].rx_frame_errors += 4498 stats[N_TX_RINGS].rx_frame_errors +=
4499 readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff; 4499 readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff;
4500 stats[N_TX_RINGS].rx_length_errors += 4500 stats[N_TX_RINGS].rx_length_errors +=
4501 readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff; 4501 readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff;
4502#if 1 4502#if 1
4503 tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) + 4503 tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) +
@@ -4506,7 +4506,7 @@ static struct net_device_stats *cas_get_stats(struct net_device *dev)
4506 stats[N_TX_RINGS].collisions += 4506 stats[N_TX_RINGS].collisions +=
4507 tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff); 4507 tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff);
4508#else 4508#else
4509 stats[N_TX_RINGS].tx_aborted_errors += 4509 stats[N_TX_RINGS].tx_aborted_errors +=
4510 readl(cp->regs + REG_MAC_COLL_EXCESS); 4510 readl(cp->regs + REG_MAC_COLL_EXCESS);
4511 stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) + 4511 stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) +
4512 readl(cp->regs + REG_MAC_COLL_LATE); 4512 readl(cp->regs + REG_MAC_COLL_LATE);
@@ -4525,7 +4525,7 @@ static struct net_device_stats *cas_get_stats(struct net_device *dev)
4525 4525
4526 for (i = 0; i < N_TX_RINGS; i++) { 4526 for (i = 0; i < N_TX_RINGS; i++) {
4527 spin_lock(&cp->stat_lock[i]); 4527 spin_lock(&cp->stat_lock[i]);
4528 stats[N_TX_RINGS].rx_length_errors += 4528 stats[N_TX_RINGS].rx_length_errors +=
4529 stats[i].rx_length_errors; 4529 stats[i].rx_length_errors;
4530 stats[N_TX_RINGS].rx_crc_errors += stats[i].rx_crc_errors; 4530 stats[N_TX_RINGS].rx_crc_errors += stats[i].rx_crc_errors;
4531 stats[N_TX_RINGS].rx_packets += stats[i].rx_packets; 4531 stats[N_TX_RINGS].rx_packets += stats[i].rx_packets;
@@ -4550,10 +4550,10 @@ static void cas_set_multicast(struct net_device *dev)
4550 u32 rxcfg, rxcfg_new; 4550 u32 rxcfg, rxcfg_new;
4551 unsigned long flags; 4551 unsigned long flags;
4552 int limit = STOP_TRIES; 4552 int limit = STOP_TRIES;
4553 4553
4554 if (!cp->hw_running) 4554 if (!cp->hw_running)
4555 return; 4555 return;
4556 4556
4557 spin_lock_irqsave(&cp->lock, flags); 4557 spin_lock_irqsave(&cp->lock, flags);
4558 rxcfg = readl(cp->regs + REG_MAC_RX_CFG); 4558 rxcfg = readl(cp->regs + REG_MAC_RX_CFG);
4559 4559
@@ -4619,22 +4619,22 @@ static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4619 XCVR_INTERNAL : XCVR_EXTERNAL; 4619 XCVR_INTERNAL : XCVR_EXTERNAL;
4620 cmd->phy_address = cp->phy_addr; 4620 cmd->phy_address = cp->phy_addr;
4621 cmd->advertising |= ADVERTISED_TP | ADVERTISED_MII | 4621 cmd->advertising |= ADVERTISED_TP | ADVERTISED_MII |
4622 ADVERTISED_10baseT_Half | 4622 ADVERTISED_10baseT_Half |
4623 ADVERTISED_10baseT_Full | 4623 ADVERTISED_10baseT_Full |
4624 ADVERTISED_100baseT_Half | 4624 ADVERTISED_100baseT_Half |
4625 ADVERTISED_100baseT_Full; 4625 ADVERTISED_100baseT_Full;
4626 4626
4627 cmd->supported |= 4627 cmd->supported |=
4628 (SUPPORTED_10baseT_Half | 4628 (SUPPORTED_10baseT_Half |
4629 SUPPORTED_10baseT_Full | 4629 SUPPORTED_10baseT_Full |
4630 SUPPORTED_100baseT_Half | 4630 SUPPORTED_100baseT_Half |
4631 SUPPORTED_100baseT_Full | 4631 SUPPORTED_100baseT_Full |
4632 SUPPORTED_TP | SUPPORTED_MII); 4632 SUPPORTED_TP | SUPPORTED_MII);
4633 4633
4634 if (cp->hw_running) { 4634 if (cp->hw_running) {
4635 cas_mif_poll(cp, 0); 4635 cas_mif_poll(cp, 0);
4636 bmcr = cas_phy_read(cp, MII_BMCR); 4636 bmcr = cas_phy_read(cp, MII_BMCR);
4637 cas_read_mii_link_mode(cp, &full_duplex, 4637 cas_read_mii_link_mode(cp, &full_duplex,
4638 &speed, &pause); 4638 &speed, &pause);
4639 cas_mif_poll(cp, 1); 4639 cas_mif_poll(cp, 1);
4640 } 4640 }
@@ -4647,9 +4647,9 @@ static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4647 cmd->advertising |= ADVERTISED_FIBRE; 4647 cmd->advertising |= ADVERTISED_FIBRE;
4648 4648
4649 if (cp->hw_running) { 4649 if (cp->hw_running) {
4650 /* pcs uses the same bits as mii */ 4650 /* pcs uses the same bits as mii */
4651 bmcr = readl(cp->regs + REG_PCS_MII_CTRL); 4651 bmcr = readl(cp->regs + REG_PCS_MII_CTRL);
4652 cas_read_pcs_link_mode(cp, &full_duplex, 4652 cas_read_pcs_link_mode(cp, &full_duplex,
4653 &speed, &pause); 4653 &speed, &pause);
4654 } 4654 }
4655 } 4655 }
@@ -4667,8 +4667,8 @@ static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4667 cmd->autoneg = AUTONEG_DISABLE; 4667 cmd->autoneg = AUTONEG_DISABLE;
4668 cmd->speed = 4668 cmd->speed =
4669 (bmcr & CAS_BMCR_SPEED1000) ? 4669 (bmcr & CAS_BMCR_SPEED1000) ?
4670 SPEED_1000 : 4670 SPEED_1000 :
4671 ((bmcr & BMCR_SPEED100) ? SPEED_100: 4671 ((bmcr & BMCR_SPEED100) ? SPEED_100:
4672 SPEED_10); 4672 SPEED_10);
4673 cmd->duplex = 4673 cmd->duplex =
4674 (bmcr & BMCR_FULLDPLX) ? 4674 (bmcr & BMCR_FULLDPLX) ?
@@ -4676,7 +4676,7 @@ static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4676 } 4676 }
4677 if (linkstate != link_up) { 4677 if (linkstate != link_up) {
4678 /* Force these to "unknown" if the link is not up and 4678 /* Force these to "unknown" if the link is not up and
4679 * autonogotiation in enabled. We can set the link 4679 * autonogotiation in enabled. We can set the link
4680 * speed to 0, but not cmd->duplex, 4680 * speed to 0, but not cmd->duplex,
4681 * because its legal values are 0 and 1. Ethtool will 4681 * because its legal values are 0 and 1. Ethtool will
4682 * print the value reported in parentheses after the 4682 * print the value reported in parentheses after the
@@ -4783,7 +4783,7 @@ static int cas_get_stats_count(struct net_device *dev)
4783 4783
4784static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data) 4784static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4785{ 4785{
4786 memcpy(data, &ethtool_cassini_statnames, 4786 memcpy(data, &ethtool_cassini_statnames,
4787 CAS_NUM_STAT_KEYS * ETH_GSTRING_LEN); 4787 CAS_NUM_STAT_KEYS * ETH_GSTRING_LEN);
4788} 4788}
4789 4789
@@ -4812,7 +4812,7 @@ static void cas_get_ethtool_stats(struct net_device *dev,
4812 BUG_ON(i != CAS_NUM_STAT_KEYS); 4812 BUG_ON(i != CAS_NUM_STAT_KEYS);
4813} 4813}
4814 4814
4815static struct ethtool_ops cas_ethtool_ops = { 4815static const struct ethtool_ops cas_ethtool_ops = {
4816 .get_drvinfo = cas_get_drvinfo, 4816 .get_drvinfo = cas_get_drvinfo,
4817 .get_settings = cas_get_settings, 4817 .get_settings = cas_get_settings,
4818 .set_settings = cas_set_settings, 4818 .set_settings = cas_set_settings,
@@ -4833,7 +4833,7 @@ static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4833 struct mii_ioctl_data *data = if_mii(ifr); 4833 struct mii_ioctl_data *data = if_mii(ifr);
4834 unsigned long flags; 4834 unsigned long flags;
4835 int rc = -EOPNOTSUPP; 4835 int rc = -EOPNOTSUPP;
4836 4836
4837 /* Hold the PM mutex while doing ioctl's or we may collide 4837 /* Hold the PM mutex while doing ioctl's or we may collide
4838 * with open/close and power management and oops. 4838 * with open/close and power management and oops.
4839 */ 4839 */
@@ -4933,11 +4933,11 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
4933 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, 4933 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE,
4934 &orig_cacheline_size); 4934 &orig_cacheline_size);
4935 if (orig_cacheline_size < CAS_PREF_CACHELINE_SIZE) { 4935 if (orig_cacheline_size < CAS_PREF_CACHELINE_SIZE) {
4936 cas_cacheline_size = 4936 cas_cacheline_size =
4937 (CAS_PREF_CACHELINE_SIZE < SMP_CACHE_BYTES) ? 4937 (CAS_PREF_CACHELINE_SIZE < SMP_CACHE_BYTES) ?
4938 CAS_PREF_CACHELINE_SIZE : SMP_CACHE_BYTES; 4938 CAS_PREF_CACHELINE_SIZE : SMP_CACHE_BYTES;
4939 if (pci_write_config_byte(pdev, 4939 if (pci_write_config_byte(pdev,
4940 PCI_CACHE_LINE_SIZE, 4940 PCI_CACHE_LINE_SIZE,
4941 cas_cacheline_size)) { 4941 cas_cacheline_size)) {
4942 dev_err(&pdev->dev, "Could not set PCI cache " 4942 dev_err(&pdev->dev, "Could not set PCI cache "
4943 "line size\n"); 4943 "line size\n");
@@ -4977,7 +4977,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
4977 cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0; 4977 cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0;
4978#endif 4978#endif
4979 cp->dev = dev; 4979 cp->dev = dev;
4980 cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE : 4980 cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE :
4981 cassini_debug; 4981 cassini_debug;
4982 4982
4983 cp->link_transition = LINK_TRANSITION_UNKNOWN; 4983 cp->link_transition = LINK_TRANSITION_UNKNOWN;
@@ -5041,13 +5041,13 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
5041 goto err_out_iounmap; 5041 goto err_out_iounmap;
5042 } 5042 }
5043 5043
5044 for (i = 0; i < N_TX_RINGS; i++) 5044 for (i = 0; i < N_TX_RINGS; i++)
5045 cp->init_txds[i] = cp->init_block->txds[i]; 5045 cp->init_txds[i] = cp->init_block->txds[i];
5046 5046
5047 for (i = 0; i < N_RX_DESC_RINGS; i++) 5047 for (i = 0; i < N_RX_DESC_RINGS; i++)
5048 cp->init_rxds[i] = cp->init_block->rxds[i]; 5048 cp->init_rxds[i] = cp->init_block->rxds[i];
5049 5049
5050 for (i = 0; i < N_RX_COMP_RINGS; i++) 5050 for (i = 0; i < N_RX_COMP_RINGS; i++)
5051 cp->init_rxcs[i] = cp->init_block->rxcs[i]; 5051 cp->init_rxcs[i] = cp->init_block->rxcs[i];
5052 5052
5053 for (i = 0; i < N_RX_FLOWS; i++) 5053 for (i = 0; i < N_RX_FLOWS; i++)
@@ -5087,11 +5087,11 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
5087 5087
5088 i = readl(cp->regs + REG_BIM_CFG); 5088 i = readl(cp->regs + REG_BIM_CFG);
5089 printk(KERN_INFO "%s: Sun Cassini%s (%sbit/%sMHz PCI/%s) " 5089 printk(KERN_INFO "%s: Sun Cassini%s (%sbit/%sMHz PCI/%s) "
5090 "Ethernet[%d] ", dev->name, 5090 "Ethernet[%d] ", dev->name,
5091 (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "", 5091 (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "",
5092 (i & BIM_CFG_32BIT) ? "32" : "64", 5092 (i & BIM_CFG_32BIT) ? "32" : "64",
5093 (i & BIM_CFG_66MHZ) ? "66" : "33", 5093 (i & BIM_CFG_66MHZ) ? "66" : "33",
5094 (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq); 5094 (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq);
5095 5095
5096 for (i = 0; i < 6; i++) 5096 for (i = 0; i < 6; i++)
5097 printk("%2.2x%c", dev->dev_addr[i], 5097 printk("%2.2x%c", dev->dev_addr[i],
@@ -5123,7 +5123,7 @@ err_out_free_res:
5123 5123
5124err_write_cacheline: 5124err_write_cacheline:
5125 /* Try to restore it in case the error occured after we 5125 /* Try to restore it in case the error occured after we
5126 * set it. 5126 * set it.
5127 */ 5127 */
5128 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, orig_cacheline_size); 5128 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, orig_cacheline_size);
5129 5129
@@ -5157,7 +5157,7 @@ static void __devexit cas_remove_one(struct pci_dev *pdev)
5157 /* Restore the cache line size if we had modified 5157 /* Restore the cache line size if we had modified
5158 * it. 5158 * it.
5159 */ 5159 */
5160 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 5160 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
5161 cp->orig_cacheline_size); 5161 cp->orig_cacheline_size);
5162 } 5162 }
5163#endif 5163#endif
@@ -5178,7 +5178,7 @@ static int cas_suspend(struct pci_dev *pdev, pm_message_t state)
5178 unsigned long flags; 5178 unsigned long flags;
5179 5179
5180 mutex_lock(&cp->pm_mutex); 5180 mutex_lock(&cp->pm_mutex);
5181 5181
5182 /* If the driver is opened, we stop the DMA */ 5182 /* If the driver is opened, we stop the DMA */
5183 if (cp->opened) { 5183 if (cp->opened) {
5184 netif_device_detach(dev); 5184 netif_device_detach(dev);
@@ -5245,7 +5245,7 @@ static int __init cas_init(void)
5245 else 5245 else
5246 link_transition_timeout = 0; 5246 link_transition_timeout = 0;
5247 5247
5248 return pci_module_init(&cas_driver); 5248 return pci_register_driver(&cas_driver);
5249} 5249}
5250 5250
5251static void __exit cas_cleanup(void) 5251static void __exit cas_cleanup(void)