diff options
author | Alan Cox <alan@linux.intel.com> | 2009-08-27 06:03:09 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2009-09-15 15:02:29 -0400 |
commit | 15700039b108fccc36507bcabdd4dda93f7c4c61 (patch) | |
tree | 31629674e011fb606072d89b913aa169cc689fbc /drivers | |
parent | bc7f9c597fa55814548845a7c43f53d6bbbce94b (diff) |
Staging: et131x: prune all the debug code
We don't need it, we have a perfectly good set of debug tools. For this pass
keep a few debug printks around which are "should not happen" items
Signed-off-by: Alan Cox <alan@linux.intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/staging/et131x/Makefile | 1 | ||||
-rw-r--r-- | drivers/staging/et131x/et1310_eeprom.c | 1 | ||||
-rw-r--r-- | drivers/staging/et131x/et1310_mac.c | 64 | ||||
-rw-r--r-- | drivers/staging/et131x/et1310_phy.c | 88 | ||||
-rw-r--r-- | drivers/staging/et131x/et1310_pm.c | 14 | ||||
-rw-r--r-- | drivers/staging/et131x/et1310_rx.c | 138 | ||||
-rw-r--r-- | drivers/staging/et131x/et1310_tx.c | 592 | ||||
-rw-r--r-- | drivers/staging/et131x/et131x_adapter.h | 7 | ||||
-rw-r--r-- | drivers/staging/et131x/et131x_debug.c | 208 | ||||
-rw-r--r-- | drivers/staging/et131x/et131x_debug.h | 255 | ||||
-rw-r--r-- | drivers/staging/et131x/et131x_initpci.c | 236 | ||||
-rw-r--r-- | drivers/staging/et131x/et131x_isr.c | 58 | ||||
-rw-r--r-- | drivers/staging/et131x/et131x_netdev.c | 176 |
13 files changed, 117 insertions, 1721 deletions
diff --git a/drivers/staging/et131x/Makefile b/drivers/staging/et131x/Makefile index 111049405cb3..95c645d8af39 100644 --- a/drivers/staging/et131x/Makefile +++ b/drivers/staging/et131x/Makefile | |||
@@ -10,7 +10,6 @@ et131x-objs := et1310_eeprom.o \ | |||
10 | et1310_pm.o \ | 10 | et1310_pm.o \ |
11 | et1310_rx.o \ | 11 | et1310_rx.o \ |
12 | et1310_tx.o \ | 12 | et1310_tx.o \ |
13 | et131x_debug.o \ | ||
14 | et131x_initpci.o \ | 13 | et131x_initpci.o \ |
15 | et131x_isr.o \ | 14 | et131x_isr.o \ |
16 | et131x_netdev.o | 15 | et131x_netdev.o |
diff --git a/drivers/staging/et131x/et1310_eeprom.c b/drivers/staging/et131x/et1310_eeprom.c index 7b2e4ea2ad09..c853a2c243a8 100644 --- a/drivers/staging/et131x/et1310_eeprom.c +++ b/drivers/staging/et131x/et1310_eeprom.c | |||
@@ -56,7 +56,6 @@ | |||
56 | */ | 56 | */ |
57 | 57 | ||
58 | #include "et131x_version.h" | 58 | #include "et131x_version.h" |
59 | #include "et131x_debug.h" | ||
60 | #include "et131x_defs.h" | 59 | #include "et131x_defs.h" |
61 | 60 | ||
62 | #include <linux/pci.h> | 61 | #include <linux/pci.h> |
diff --git a/drivers/staging/et131x/et1310_mac.c b/drivers/staging/et131x/et1310_mac.c index c94d66194ac0..f81e1cba8547 100644 --- a/drivers/staging/et131x/et1310_mac.c +++ b/drivers/staging/et131x/et1310_mac.c | |||
@@ -56,7 +56,6 @@ | |||
56 | */ | 56 | */ |
57 | 57 | ||
58 | #include "et131x_version.h" | 58 | #include "et131x_version.h" |
59 | #include "et131x_debug.h" | ||
60 | #include "et131x_defs.h" | 59 | #include "et131x_defs.h" |
61 | 60 | ||
62 | #include <linux/init.h> | 61 | #include <linux/init.h> |
@@ -75,6 +74,7 @@ | |||
75 | #include <linux/delay.h> | 74 | #include <linux/delay.h> |
76 | #include <linux/io.h> | 75 | #include <linux/io.h> |
77 | #include <linux/bitops.h> | 76 | #include <linux/bitops.h> |
77 | #include <linux/pci.h> | ||
78 | #include <asm/system.h> | 78 | #include <asm/system.h> |
79 | 79 | ||
80 | #include <linux/netdevice.h> | 80 | #include <linux/netdevice.h> |
@@ -92,11 +92,6 @@ | |||
92 | #include "et131x_adapter.h" | 92 | #include "et131x_adapter.h" |
93 | #include "et131x_initpci.h" | 93 | #include "et131x_initpci.h" |
94 | 94 | ||
95 | /* Data for debugging facilities */ | ||
96 | #ifdef CONFIG_ET131X_DEBUG | ||
97 | extern dbg_info_t *et131x_dbginfo; | ||
98 | #endif /* CONFIG_ET131X_DEBUG */ | ||
99 | |||
100 | /** | 95 | /** |
101 | * ConfigMacRegs1 - Initialize the first part of MAC regs | 96 | * ConfigMacRegs1 - Initialize the first part of MAC regs |
102 | * @pAdpater: pointer to our adapter structure | 97 | * @pAdpater: pointer to our adapter structure |
@@ -110,8 +105,6 @@ void ConfigMACRegs1(struct et131x_adapter *etdev) | |||
110 | MAC_HFDP_t hfdp; | 105 | MAC_HFDP_t hfdp; |
111 | MII_MGMT_CFG_t mii_mgmt_cfg; | 106 | MII_MGMT_CFG_t mii_mgmt_cfg; |
112 | 107 | ||
113 | DBG_ENTER(et131x_dbginfo); | ||
114 | |||
115 | /* First we need to reset everything. Write to MAC configuration | 108 | /* First we need to reset everything. Write to MAC configuration |
116 | * register 1 to perform reset. | 109 | * register 1 to perform reset. |
117 | */ | 110 | */ |
@@ -171,8 +164,6 @@ void ConfigMACRegs1(struct et131x_adapter *etdev) | |||
171 | 164 | ||
172 | /* clear out MAC config reset */ | 165 | /* clear out MAC config reset */ |
173 | writel(0, &pMac->cfg1.value); | 166 | writel(0, &pMac->cfg1.value); |
174 | |||
175 | DBG_LEAVE(et131x_dbginfo); | ||
176 | } | 167 | } |
177 | 168 | ||
178 | /** | 169 | /** |
@@ -188,8 +179,6 @@ void ConfigMACRegs2(struct et131x_adapter *etdev) | |||
188 | MAC_IF_CTRL_t ifctrl; | 179 | MAC_IF_CTRL_t ifctrl; |
189 | TXMAC_CTL_t ctl; | 180 | TXMAC_CTL_t ctl; |
190 | 181 | ||
191 | DBG_ENTER(et131x_dbginfo); | ||
192 | |||
193 | ctl.value = readl(&etdev->regs->txmac.ctl.value); | 182 | ctl.value = readl(&etdev->regs->txmac.ctl.value); |
194 | cfg1.value = readl(&pMac->cfg1.value); | 183 | cfg1.value = readl(&pMac->cfg1.value); |
195 | cfg2.value = readl(&pMac->cfg2.value); | 184 | cfg2.value = readl(&pMac->cfg2.value); |
@@ -255,17 +244,11 @@ void ConfigMACRegs2(struct et131x_adapter *etdev) | |||
255 | delay < 100); | 244 | delay < 100); |
256 | 245 | ||
257 | if (delay == 100) { | 246 | if (delay == 100) { |
258 | DBG_ERROR(et131x_dbginfo, | 247 | dev_warn(&etdev->pdev->dev, |
259 | "Syncd bits did not respond correctly cfg1 word 0x%08x\n", | 248 | "Syncd bits did not respond correctly cfg1 word 0x%08x\n", |
260 | cfg1.value); | 249 | cfg1.value); |
261 | } | 250 | } |
262 | 251 | ||
263 | DBG_TRACE(et131x_dbginfo, | ||
264 | "Speed %d, Dup %d, CFG1 0x%08x, CFG2 0x%08x, if_ctrl 0x%08x\n", | ||
265 | etdev->linkspeed, etdev->duplex_mode, | ||
266 | readl(&pMac->cfg1.value), readl(&pMac->cfg2.value), | ||
267 | readl(&pMac->if_ctrl.value)); | ||
268 | |||
269 | /* Enable TXMAC */ | 252 | /* Enable TXMAC */ |
270 | ctl.bits.txmac_en = 0x1; | 253 | ctl.bits.txmac_en = 0x1; |
271 | ctl.bits.fc_disable = 0x1; | 254 | ctl.bits.fc_disable = 0x1; |
@@ -275,12 +258,7 @@ void ConfigMACRegs2(struct et131x_adapter *etdev) | |||
275 | if (etdev->Flags & fMP_ADAPTER_LOWER_POWER) { | 258 | if (etdev->Flags & fMP_ADAPTER_LOWER_POWER) { |
276 | et131x_rx_dma_enable(etdev); | 259 | et131x_rx_dma_enable(etdev); |
277 | et131x_tx_dma_enable(etdev); | 260 | et131x_tx_dma_enable(etdev); |
278 | } else { | ||
279 | DBG_WARNING(et131x_dbginfo, | ||
280 | "Didn't enable Rx/Tx due to low-power mode\n"); | ||
281 | } | 261 | } |
282 | |||
283 | DBG_LEAVE(et131x_dbginfo); | ||
284 | } | 262 | } |
285 | 263 | ||
286 | void ConfigRxMacRegs(struct et131x_adapter *etdev) | 264 | void ConfigRxMacRegs(struct et131x_adapter *etdev) |
@@ -290,8 +268,6 @@ void ConfigRxMacRegs(struct et131x_adapter *etdev) | |||
290 | RXMAC_WOL_SA_HI_t sa_hi; | 268 | RXMAC_WOL_SA_HI_t sa_hi; |
291 | RXMAC_PF_CTRL_t pf_ctrl = { 0 }; | 269 | RXMAC_PF_CTRL_t pf_ctrl = { 0 }; |
292 | 270 | ||
293 | DBG_ENTER(et131x_dbginfo); | ||
294 | |||
295 | /* Disable the MAC while it is being configured (also disable WOL) */ | 271 | /* Disable the MAC while it is being configured (also disable WOL) */ |
296 | writel(0x8, &pRxMac->ctrl.value); | 272 | writel(0x8, &pRxMac->ctrl.value); |
297 | 273 | ||
@@ -421,8 +397,6 @@ void ConfigRxMacRegs(struct et131x_adapter *etdev) | |||
421 | */ | 397 | */ |
422 | writel(pf_ctrl.value, &pRxMac->pf_ctrl.value); | 398 | writel(pf_ctrl.value, &pRxMac->pf_ctrl.value); |
423 | writel(0x9, &pRxMac->ctrl.value); | 399 | writel(0x9, &pRxMac->ctrl.value); |
424 | |||
425 | DBG_LEAVE(et131x_dbginfo); | ||
426 | } | 400 | } |
427 | 401 | ||
428 | void ConfigTxMacRegs(struct et131x_adapter *etdev) | 402 | void ConfigTxMacRegs(struct et131x_adapter *etdev) |
@@ -430,8 +404,6 @@ void ConfigTxMacRegs(struct et131x_adapter *etdev) | |||
430 | struct _TXMAC_t __iomem *pTxMac = &etdev->regs->txmac; | 404 | struct _TXMAC_t __iomem *pTxMac = &etdev->regs->txmac; |
431 | TXMAC_CF_PARAM_t Local; | 405 | TXMAC_CF_PARAM_t Local; |
432 | 406 | ||
433 | DBG_ENTER(et131x_dbginfo); | ||
434 | |||
435 | /* We need to update the Control Frame Parameters | 407 | /* We need to update the Control Frame Parameters |
436 | * cfpt - control frame pause timer set to 64 (0x40) | 408 | * cfpt - control frame pause timer set to 64 (0x40) |
437 | * cfep - control frame extended pause timer set to 0x0 | 409 | * cfep - control frame extended pause timer set to 0x0 |
@@ -443,8 +415,6 @@ void ConfigTxMacRegs(struct et131x_adapter *etdev) | |||
443 | Local.bits.cfep = 0x0; | 415 | Local.bits.cfep = 0x0; |
444 | writel(Local.value, &pTxMac->cf_param.value); | 416 | writel(Local.value, &pTxMac->cf_param.value); |
445 | } | 417 | } |
446 | |||
447 | DBG_LEAVE(et131x_dbginfo); | ||
448 | } | 418 | } |
449 | 419 | ||
450 | void ConfigMacStatRegs(struct et131x_adapter *etdev) | 420 | void ConfigMacStatRegs(struct et131x_adapter *etdev) |
@@ -452,8 +422,6 @@ void ConfigMacStatRegs(struct et131x_adapter *etdev) | |||
452 | struct _MAC_STAT_t __iomem *pDevMacStat = | 422 | struct _MAC_STAT_t __iomem *pDevMacStat = |
453 | &etdev->regs->macStat; | 423 | &etdev->regs->macStat; |
454 | 424 | ||
455 | DBG_ENTER(et131x_dbginfo); | ||
456 | |||
457 | /* Next we need to initialize all the MAC_STAT registers to zero on | 425 | /* Next we need to initialize all the MAC_STAT registers to zero on |
458 | * the device. | 426 | * the device. |
459 | */ | 427 | */ |
@@ -534,8 +502,6 @@ void ConfigMacStatRegs(struct et131x_adapter *etdev) | |||
534 | 502 | ||
535 | writel(Carry2M.value, &pDevMacStat->Carry2M.value); | 503 | writel(Carry2M.value, &pDevMacStat->Carry2M.value); |
536 | } | 504 | } |
537 | |||
538 | DBG_LEAVE(et131x_dbginfo); | ||
539 | } | 505 | } |
540 | 506 | ||
541 | void ConfigFlowControl(struct et131x_adapter *etdev) | 507 | void ConfigFlowControl(struct et131x_adapter *etdev) |
@@ -614,8 +580,6 @@ void HandleMacStatInterrupt(struct et131x_adapter *etdev) | |||
614 | MAC_STAT_REG_1_t Carry1; | 580 | MAC_STAT_REG_1_t Carry1; |
615 | MAC_STAT_REG_2_t Carry2; | 581 | MAC_STAT_REG_2_t Carry2; |
616 | 582 | ||
617 | DBG_ENTER(et131x_dbginfo); | ||
618 | |||
619 | /* Read the interrupt bits from the register(s). These are Clear On | 583 | /* Read the interrupt bits from the register(s). These are Clear On |
620 | * Write. | 584 | * Write. |
621 | */ | 585 | */ |
@@ -659,8 +623,6 @@ void HandleMacStatInterrupt(struct et131x_adapter *etdev) | |||
659 | etdev->Stats.late_collisions += COUNTER_WRAP_12_BIT; | 623 | etdev->Stats.late_collisions += COUNTER_WRAP_12_BIT; |
660 | if (Carry2.bits.tncl) | 624 | if (Carry2.bits.tncl) |
661 | etdev->Stats.collisions += COUNTER_WRAP_12_BIT; | 625 | etdev->Stats.collisions += COUNTER_WRAP_12_BIT; |
662 | |||
663 | DBG_LEAVE(et131x_dbginfo); | ||
664 | } | 626 | } |
665 | 627 | ||
666 | void SetupDeviceForMulticast(struct et131x_adapter *etdev) | 628 | void SetupDeviceForMulticast(struct et131x_adapter *etdev) |
@@ -674,30 +636,14 @@ void SetupDeviceForMulticast(struct et131x_adapter *etdev) | |||
674 | uint32_t hash4 = 0; | 636 | uint32_t hash4 = 0; |
675 | u32 pm_csr; | 637 | u32 pm_csr; |
676 | 638 | ||
677 | DBG_ENTER(et131x_dbginfo); | ||
678 | |||
679 | /* If ET131X_PACKET_TYPE_MULTICAST is specified, then we provision | 639 | /* If ET131X_PACKET_TYPE_MULTICAST is specified, then we provision |
680 | * the multi-cast LIST. If it is NOT specified, (and "ALL" is not | 640 | * the multi-cast LIST. If it is NOT specified, (and "ALL" is not |
681 | * specified) then we should pass NO multi-cast addresses to the | 641 | * specified) then we should pass NO multi-cast addresses to the |
682 | * driver. | 642 | * driver. |
683 | */ | 643 | */ |
684 | if (etdev->PacketFilter & ET131X_PACKET_TYPE_MULTICAST) { | 644 | if (etdev->PacketFilter & ET131X_PACKET_TYPE_MULTICAST) { |
685 | DBG_VERBOSE(et131x_dbginfo, | ||
686 | "MULTICAST flag is set, MCCount: %d\n", | ||
687 | etdev->MCAddressCount); | ||
688 | |||
689 | /* Loop through our multicast array and set up the device */ | 645 | /* Loop through our multicast array and set up the device */ |
690 | for (nIndex = 0; nIndex < etdev->MCAddressCount; nIndex++) { | 646 | for (nIndex = 0; nIndex < etdev->MCAddressCount; nIndex++) { |
691 | DBG_VERBOSE(et131x_dbginfo, | ||
692 | "MCList[%d]: %02x:%02x:%02x:%02x:%02x:%02x\n", | ||
693 | nIndex, | ||
694 | etdev->MCList[nIndex][0], | ||
695 | etdev->MCList[nIndex][1], | ||
696 | etdev->MCList[nIndex][2], | ||
697 | etdev->MCList[nIndex][3], | ||
698 | etdev->MCList[nIndex][4], | ||
699 | etdev->MCList[nIndex][5]); | ||
700 | |||
701 | result = ether_crc(6, etdev->MCList[nIndex]); | 647 | result = ether_crc(6, etdev->MCList[nIndex]); |
702 | 648 | ||
703 | result = (result & 0x3F800000) >> 23; | 649 | result = (result & 0x3F800000) >> 23; |
@@ -725,8 +671,6 @@ void SetupDeviceForMulticast(struct et131x_adapter *etdev) | |||
725 | writel(hash3, &rxmac->multi_hash3); | 671 | writel(hash3, &rxmac->multi_hash3); |
726 | writel(hash4, &rxmac->multi_hash4); | 672 | writel(hash4, &rxmac->multi_hash4); |
727 | } | 673 | } |
728 | |||
729 | DBG_LEAVE(et131x_dbginfo); | ||
730 | } | 674 | } |
731 | 675 | ||
732 | void SetupDeviceForUnicast(struct et131x_adapter *etdev) | 676 | void SetupDeviceForUnicast(struct et131x_adapter *etdev) |
@@ -737,8 +681,6 @@ void SetupDeviceForUnicast(struct et131x_adapter *etdev) | |||
737 | RXMAC_UNI_PF_ADDR3_t uni_pf3; | 681 | RXMAC_UNI_PF_ADDR3_t uni_pf3; |
738 | u32 pm_csr; | 682 | u32 pm_csr; |
739 | 683 | ||
740 | DBG_ENTER(et131x_dbginfo); | ||
741 | |||
742 | /* Set up unicast packet filter reg 3 to be the first two octets of | 684 | /* Set up unicast packet filter reg 3 to be the first two octets of |
743 | * the MAC address for both address | 685 | * the MAC address for both address |
744 | * | 686 | * |
@@ -769,6 +711,4 @@ void SetupDeviceForUnicast(struct et131x_adapter *etdev) | |||
769 | writel(uni_pf2.value, &rxmac->uni_pf_addr2.value); | 711 | writel(uni_pf2.value, &rxmac->uni_pf_addr2.value); |
770 | writel(uni_pf3.value, &rxmac->uni_pf_addr3.value); | 712 | writel(uni_pf3.value, &rxmac->uni_pf_addr3.value); |
771 | } | 713 | } |
772 | |||
773 | DBG_LEAVE(et131x_dbginfo); | ||
774 | } | 714 | } |
diff --git a/drivers/staging/et131x/et1310_phy.c b/drivers/staging/et131x/et1310_phy.c index 98055e6b316c..dd199bdb9eff 100644 --- a/drivers/staging/et131x/et1310_phy.c +++ b/drivers/staging/et131x/et1310_phy.c | |||
@@ -56,7 +56,6 @@ | |||
56 | */ | 56 | */ |
57 | 57 | ||
58 | #include "et131x_version.h" | 58 | #include "et131x_version.h" |
59 | #include "et131x_debug.h" | ||
60 | #include "et131x_defs.h" | 59 | #include "et131x_defs.h" |
61 | 60 | ||
62 | #include <linux/pci.h> | 61 | #include <linux/pci.h> |
@@ -98,11 +97,6 @@ | |||
98 | #include "et1310_rx.h" | 97 | #include "et1310_rx.h" |
99 | #include "et1310_mac.h" | 98 | #include "et1310_mac.h" |
100 | 99 | ||
101 | /* Data for debugging facilities */ | ||
102 | #ifdef CONFIG_ET131X_DEBUG | ||
103 | extern dbg_info_t *et131x_dbginfo; | ||
104 | #endif /* CONFIG_ET131X_DEBUG */ | ||
105 | |||
106 | /* Prototypes for functions with local scope */ | 100 | /* Prototypes for functions with local scope */ |
107 | static int et131x_xcvr_init(struct et131x_adapter *adapter); | 101 | static int et131x_xcvr_init(struct et131x_adapter *adapter); |
108 | 102 | ||
@@ -157,9 +151,9 @@ int PhyMiRead(struct et131x_adapter *adapter, uint8_t xcvrAddr, | |||
157 | 151 | ||
158 | /* If we hit the max delay, we could not read the register */ | 152 | /* If we hit the max delay, we could not read the register */ |
159 | if (delay >= 50) { | 153 | if (delay >= 50) { |
160 | DBG_WARNING(et131x_dbginfo, | 154 | dev_warn(&adapter->pdev->dev, |
161 | "xcvrReg 0x%08x could not be read\n", xcvrReg); | 155 | "xcvrReg 0x%08x could not be read\n", xcvrReg); |
162 | DBG_WARNING(et131x_dbginfo, "status is 0x%08x\n", | 156 | dev_warn(&adapter->pdev->dev, "status is 0x%08x\n", |
163 | miiIndicator.value); | 157 | miiIndicator.value); |
164 | 158 | ||
165 | status = -EIO; | 159 | status = -EIO; |
@@ -179,10 +173,6 @@ int PhyMiRead(struct et131x_adapter *adapter, uint8_t xcvrAddr, | |||
179 | /* Stop the read operation */ | 173 | /* Stop the read operation */ |
180 | writel(0, &mac->mii_mgmt_cmd.value); | 174 | writel(0, &mac->mii_mgmt_cmd.value); |
181 | 175 | ||
182 | DBG_VERBOSE(et131x_dbginfo, " xcvr_addr = 0x%02x, " | ||
183 | "xcvr_reg = 0x%02x, " | ||
184 | "value = 0x%04x.\n", xcvrAddr, xcvrReg, *value); | ||
185 | |||
186 | /* set the registers we touched back to the state at which we entered | 176 | /* set the registers we touched back to the state at which we entered |
187 | * this function | 177 | * this function |
188 | */ | 178 | */ |
@@ -242,11 +232,11 @@ int MiWrite(struct et131x_adapter *adapter, uint8_t xcvrReg, uint16_t value) | |||
242 | if (delay == 100) { | 232 | if (delay == 100) { |
243 | uint16_t TempValue; | 233 | uint16_t TempValue; |
244 | 234 | ||
245 | DBG_WARNING(et131x_dbginfo, | 235 | dev_warn(&adapter->pdev->dev, |
246 | "xcvrReg 0x%08x could not be written", xcvrReg); | 236 | "xcvrReg 0x%08x could not be written", xcvrReg); |
247 | DBG_WARNING(et131x_dbginfo, "status is 0x%08x\n", | 237 | dev_warn(&adapter->pdev->dev, "status is 0x%08x\n", |
248 | miiIndicator.value); | 238 | miiIndicator.value); |
249 | DBG_WARNING(et131x_dbginfo, "command is 0x%08x\n", | 239 | dev_warn(&adapter->pdev->dev, "command is 0x%08x\n", |
250 | readl(&mac->mii_mgmt_cmd.value)); | 240 | readl(&mac->mii_mgmt_cmd.value)); |
251 | 241 | ||
252 | MiRead(adapter, xcvrReg, &TempValue); | 242 | MiRead(adapter, xcvrReg, &TempValue); |
@@ -263,10 +253,6 @@ int MiWrite(struct et131x_adapter *adapter, uint8_t xcvrReg, uint16_t value) | |||
263 | writel(miiAddr.value, &mac->mii_mgmt_addr.value); | 253 | writel(miiAddr.value, &mac->mii_mgmt_addr.value); |
264 | writel(miiCmd.value, &mac->mii_mgmt_cmd.value); | 254 | writel(miiCmd.value, &mac->mii_mgmt_cmd.value); |
265 | 255 | ||
266 | DBG_VERBOSE(et131x_dbginfo, " xcvr_addr = 0x%02x, " | ||
267 | "xcvr_reg = 0x%02x, " | ||
268 | "value = 0x%04x.\n", xcvrAddr, xcvrReg, value); | ||
269 | |||
270 | return status; | 256 | return status; |
271 | } | 257 | } |
272 | 258 | ||
@@ -284,8 +270,6 @@ int et131x_xcvr_find(struct et131x_adapter *adapter) | |||
284 | MI_IDR2_t idr2; | 270 | MI_IDR2_t idr2; |
285 | uint32_t xcvr_id; | 271 | uint32_t xcvr_id; |
286 | 272 | ||
287 | DBG_ENTER(et131x_dbginfo); | ||
288 | |||
289 | /* We need to get xcvr id and address we just get the first one */ | 273 | /* We need to get xcvr id and address we just get the first one */ |
290 | for (xcvr_addr = 0; xcvr_addr < 32; xcvr_addr++) { | 274 | for (xcvr_addr = 0; xcvr_addr < 32; xcvr_addr++) { |
291 | /* Read the ID from the PHY */ | 275 | /* Read the ID from the PHY */ |
@@ -299,10 +283,6 @@ int et131x_xcvr_find(struct et131x_adapter *adapter) | |||
299 | xcvr_id = (uint32_t) ((idr1.value << 16) | idr2.value); | 283 | xcvr_id = (uint32_t) ((idr1.value << 16) | idr2.value); |
300 | 284 | ||
301 | if ((idr1.value != 0) && (idr1.value != 0xffff)) { | 285 | if ((idr1.value != 0) && (idr1.value != 0xffff)) { |
302 | DBG_TRACE(et131x_dbginfo, | ||
303 | "Xcvr addr: 0x%02x\tXcvr_id: 0x%08x\n", | ||
304 | xcvr_addr, xcvr_id); | ||
305 | |||
306 | adapter->Stats.xcvr_id = xcvr_id; | 286 | adapter->Stats.xcvr_id = xcvr_id; |
307 | adapter->Stats.xcvr_addr = xcvr_addr; | 287 | adapter->Stats.xcvr_addr = xcvr_addr; |
308 | 288 | ||
@@ -310,8 +290,6 @@ int et131x_xcvr_find(struct et131x_adapter *adapter) | |||
310 | break; | 290 | break; |
311 | } | 291 | } |
312 | } | 292 | } |
313 | |||
314 | DBG_LEAVE(et131x_dbginfo); | ||
315 | return status; | 293 | return status; |
316 | } | 294 | } |
317 | 295 | ||
@@ -327,13 +305,9 @@ int et131x_setphy_normal(struct et131x_adapter *adapter) | |||
327 | { | 305 | { |
328 | int status; | 306 | int status; |
329 | 307 | ||
330 | DBG_ENTER(et131x_dbginfo); | ||
331 | |||
332 | /* Make sure the PHY is powered up */ | 308 | /* Make sure the PHY is powered up */ |
333 | ET1310_PhyPowerDown(adapter, 0); | 309 | ET1310_PhyPowerDown(adapter, 0); |
334 | status = et131x_xcvr_init(adapter); | 310 | status = et131x_xcvr_init(adapter); |
335 | |||
336 | DBG_LEAVE(et131x_dbginfo); | ||
337 | return status; | 311 | return status; |
338 | } | 312 | } |
339 | 313 | ||
@@ -350,8 +324,6 @@ static int et131x_xcvr_init(struct et131x_adapter *adapter) | |||
350 | MI_ISR_t isr; | 324 | MI_ISR_t isr; |
351 | MI_LCR2_t lcr2; | 325 | MI_LCR2_t lcr2; |
352 | 326 | ||
353 | DBG_ENTER(et131x_dbginfo); | ||
354 | |||
355 | /* Zero out the adapter structure variable representing BMSR */ | 327 | /* Zero out the adapter structure variable representing BMSR */ |
356 | adapter->Bmsr.value = 0; | 328 | adapter->Bmsr.value = 0; |
357 | 329 | ||
@@ -412,8 +384,6 @@ static int et131x_xcvr_init(struct et131x_adapter *adapter) | |||
412 | 384 | ||
413 | /* NOTE - Do we need this? */ | 385 | /* NOTE - Do we need this? */ |
414 | ET1310_PhyAccessMiBit(adapter, TRUEPHY_BIT_SET, 0, 9, NULL); | 386 | ET1310_PhyAccessMiBit(adapter, TRUEPHY_BIT_SET, 0, 9, NULL); |
415 | |||
416 | DBG_LEAVE(et131x_dbginfo); | ||
417 | return status; | 387 | return status; |
418 | } else { | 388 | } else { |
419 | ET1310_PhyAutoNeg(adapter, false); | 389 | ET1310_PhyAutoNeg(adapter, false); |
@@ -469,7 +439,6 @@ static int et131x_xcvr_init(struct et131x_adapter *adapter) | |||
469 | break; | 439 | break; |
470 | } | 440 | } |
471 | 441 | ||
472 | DBG_LEAVE(et131x_dbginfo); | ||
473 | return status; | 442 | return status; |
474 | } | 443 | } |
475 | } | 444 | } |
@@ -486,8 +455,6 @@ void et131x_Mii_check(struct et131x_adapter *etdev, | |||
486 | uint32_t polarity; | 455 | uint32_t polarity; |
487 | unsigned long flags; | 456 | unsigned long flags; |
488 | 457 | ||
489 | DBG_ENTER(et131x_dbginfo); | ||
490 | |||
491 | if (bmsr_ints.bits.link_status) { | 458 | if (bmsr_ints.bits.link_status) { |
492 | if (bmsr.bits.link_status) { | 459 | if (bmsr.bits.link_status) { |
493 | etdev->PoMgmt.TransPhyComaModeOnBoot = 20; | 460 | etdev->PoMgmt.TransPhyComaModeOnBoot = 20; |
@@ -506,8 +473,8 @@ void et131x_Mii_check(struct et131x_adapter *etdev, | |||
506 | if (etdev->RegistryPhyLoopbk == false) | 473 | if (etdev->RegistryPhyLoopbk == false) |
507 | netif_carrier_on(etdev->netdev); | 474 | netif_carrier_on(etdev->netdev); |
508 | } else { | 475 | } else { |
509 | DBG_WARNING(et131x_dbginfo, | 476 | dev_warn(&etdev->pdev->dev, |
510 | "Link down cable problem\n"); | 477 | "Link down - cable problem ?\n"); |
511 | 478 | ||
512 | if (etdev->linkspeed == TRUEPHY_SPEED_10MBPS) { | 479 | if (etdev->linkspeed == TRUEPHY_SPEED_10MBPS) { |
513 | /* NOTE - Is there a way to query this without | 480 | /* NOTE - Is there a way to query this without |
@@ -586,11 +553,6 @@ void et131x_Mii_check(struct et131x_adapter *etdev, | |||
586 | etdev->linkspeed = speed; | 553 | etdev->linkspeed = speed; |
587 | etdev->duplex_mode = duplex; | 554 | etdev->duplex_mode = duplex; |
588 | 555 | ||
589 | DBG_TRACE(et131x_dbginfo, | ||
590 | "etdev->linkspeed 0x%04x, etdev->duplex_mode 0x%08x\n", | ||
591 | etdev->linkspeed, | ||
592 | etdev->duplex_mode); | ||
593 | |||
594 | etdev->PoMgmt.TransPhyComaModeOnBoot = 20; | 556 | etdev->PoMgmt.TransPhyComaModeOnBoot = 20; |
595 | 557 | ||
596 | if (etdev->linkspeed == TRUEPHY_SPEED_10MBPS) { | 558 | if (etdev->linkspeed == TRUEPHY_SPEED_10MBPS) { |
@@ -619,8 +581,6 @@ void et131x_Mii_check(struct et131x_adapter *etdev, | |||
619 | ConfigMACRegs2(etdev); | 581 | ConfigMACRegs2(etdev); |
620 | } | 582 | } |
621 | } | 583 | } |
622 | |||
623 | DBG_LEAVE(et131x_dbginfo); | ||
624 | } | 584 | } |
625 | 585 | ||
626 | /** | 586 | /** |
@@ -631,8 +591,6 @@ void et131x_Mii_check(struct et131x_adapter *etdev, | |||
631 | */ | 591 | */ |
632 | void TPAL_SetPhy10HalfDuplex(struct et131x_adapter *etdev) | 592 | void TPAL_SetPhy10HalfDuplex(struct et131x_adapter *etdev) |
633 | { | 593 | { |
634 | DBG_ENTER(et131x_dbginfo); | ||
635 | |||
636 | /* Power down PHY */ | 594 | /* Power down PHY */ |
637 | ET1310_PhyPowerDown(etdev, 1); | 595 | ET1310_PhyPowerDown(etdev, 1); |
638 | 596 | ||
@@ -646,8 +604,6 @@ void TPAL_SetPhy10HalfDuplex(struct et131x_adapter *etdev) | |||
646 | 604 | ||
647 | /* Power up PHY */ | 605 | /* Power up PHY */ |
648 | ET1310_PhyPowerDown(etdev, 0); | 606 | ET1310_PhyPowerDown(etdev, 0); |
649 | |||
650 | DBG_LEAVE(et131x_dbginfo); | ||
651 | } | 607 | } |
652 | 608 | ||
653 | /** | 609 | /** |
@@ -658,8 +614,6 @@ void TPAL_SetPhy10HalfDuplex(struct et131x_adapter *etdev) | |||
658 | */ | 614 | */ |
659 | void TPAL_SetPhy10FullDuplex(struct et131x_adapter *etdev) | 615 | void TPAL_SetPhy10FullDuplex(struct et131x_adapter *etdev) |
660 | { | 616 | { |
661 | DBG_ENTER(et131x_dbginfo); | ||
662 | |||
663 | /* Power down PHY */ | 617 | /* Power down PHY */ |
664 | ET1310_PhyPowerDown(etdev, 1); | 618 | ET1310_PhyPowerDown(etdev, 1); |
665 | 619 | ||
@@ -673,8 +627,6 @@ void TPAL_SetPhy10FullDuplex(struct et131x_adapter *etdev) | |||
673 | 627 | ||
674 | /* Power up PHY */ | 628 | /* Power up PHY */ |
675 | ET1310_PhyPowerDown(etdev, 0); | 629 | ET1310_PhyPowerDown(etdev, 0); |
676 | |||
677 | DBG_LEAVE(et131x_dbginfo); | ||
678 | } | 630 | } |
679 | 631 | ||
680 | /** | 632 | /** |
@@ -683,8 +635,6 @@ void TPAL_SetPhy10FullDuplex(struct et131x_adapter *etdev) | |||
683 | */ | 635 | */ |
684 | void TPAL_SetPhy10Force(struct et131x_adapter *etdev) | 636 | void TPAL_SetPhy10Force(struct et131x_adapter *etdev) |
685 | { | 637 | { |
686 | DBG_ENTER(et131x_dbginfo); | ||
687 | |||
688 | /* Power down PHY */ | 638 | /* Power down PHY */ |
689 | ET1310_PhyPowerDown(etdev, 1); | 639 | ET1310_PhyPowerDown(etdev, 1); |
690 | 640 | ||
@@ -704,8 +654,6 @@ void TPAL_SetPhy10Force(struct et131x_adapter *etdev) | |||
704 | 654 | ||
705 | /* Power up PHY */ | 655 | /* Power up PHY */ |
706 | ET1310_PhyPowerDown(etdev, 0); | 656 | ET1310_PhyPowerDown(etdev, 0); |
707 | |||
708 | DBG_LEAVE(et131x_dbginfo); | ||
709 | } | 657 | } |
710 | 658 | ||
711 | /** | 659 | /** |
@@ -716,8 +664,6 @@ void TPAL_SetPhy10Force(struct et131x_adapter *etdev) | |||
716 | */ | 664 | */ |
717 | void TPAL_SetPhy100HalfDuplex(struct et131x_adapter *etdev) | 665 | void TPAL_SetPhy100HalfDuplex(struct et131x_adapter *etdev) |
718 | { | 666 | { |
719 | DBG_ENTER(et131x_dbginfo); | ||
720 | |||
721 | /* Power down PHY */ | 667 | /* Power down PHY */ |
722 | ET1310_PhyPowerDown(etdev, 1); | 668 | ET1310_PhyPowerDown(etdev, 1); |
723 | 669 | ||
@@ -734,8 +680,6 @@ void TPAL_SetPhy100HalfDuplex(struct et131x_adapter *etdev) | |||
734 | 680 | ||
735 | /* Power up PHY */ | 681 | /* Power up PHY */ |
736 | ET1310_PhyPowerDown(etdev, 0); | 682 | ET1310_PhyPowerDown(etdev, 0); |
737 | |||
738 | DBG_LEAVE(et131x_dbginfo); | ||
739 | } | 683 | } |
740 | 684 | ||
741 | /** | 685 | /** |
@@ -746,8 +690,6 @@ void TPAL_SetPhy100HalfDuplex(struct et131x_adapter *etdev) | |||
746 | */ | 690 | */ |
747 | void TPAL_SetPhy100FullDuplex(struct et131x_adapter *etdev) | 691 | void TPAL_SetPhy100FullDuplex(struct et131x_adapter *etdev) |
748 | { | 692 | { |
749 | DBG_ENTER(et131x_dbginfo); | ||
750 | |||
751 | /* Power down PHY */ | 693 | /* Power down PHY */ |
752 | ET1310_PhyPowerDown(etdev, 1); | 694 | ET1310_PhyPowerDown(etdev, 1); |
753 | 695 | ||
@@ -761,8 +703,6 @@ void TPAL_SetPhy100FullDuplex(struct et131x_adapter *etdev) | |||
761 | 703 | ||
762 | /* Power up PHY */ | 704 | /* Power up PHY */ |
763 | ET1310_PhyPowerDown(etdev, 0); | 705 | ET1310_PhyPowerDown(etdev, 0); |
764 | |||
765 | DBG_LEAVE(et131x_dbginfo); | ||
766 | } | 706 | } |
767 | 707 | ||
768 | /** | 708 | /** |
@@ -771,8 +711,6 @@ void TPAL_SetPhy100FullDuplex(struct et131x_adapter *etdev) | |||
771 | */ | 711 | */ |
772 | void TPAL_SetPhy100Force(struct et131x_adapter *etdev) | 712 | void TPAL_SetPhy100Force(struct et131x_adapter *etdev) |
773 | { | 713 | { |
774 | DBG_ENTER(et131x_dbginfo); | ||
775 | |||
776 | /* Power down PHY */ | 714 | /* Power down PHY */ |
777 | ET1310_PhyPowerDown(etdev, 1); | 715 | ET1310_PhyPowerDown(etdev, 1); |
778 | 716 | ||
@@ -792,8 +730,6 @@ void TPAL_SetPhy100Force(struct et131x_adapter *etdev) | |||
792 | 730 | ||
793 | /* Power up PHY */ | 731 | /* Power up PHY */ |
794 | ET1310_PhyPowerDown(etdev, 0); | 732 | ET1310_PhyPowerDown(etdev, 0); |
795 | |||
796 | DBG_LEAVE(et131x_dbginfo); | ||
797 | } | 733 | } |
798 | 734 | ||
799 | /** | 735 | /** |
@@ -804,8 +740,6 @@ void TPAL_SetPhy100Force(struct et131x_adapter *etdev) | |||
804 | */ | 740 | */ |
805 | void TPAL_SetPhy1000FullDuplex(struct et131x_adapter *etdev) | 741 | void TPAL_SetPhy1000FullDuplex(struct et131x_adapter *etdev) |
806 | { | 742 | { |
807 | DBG_ENTER(et131x_dbginfo); | ||
808 | |||
809 | /* Power down PHY */ | 743 | /* Power down PHY */ |
810 | ET1310_PhyPowerDown(etdev, 1); | 744 | ET1310_PhyPowerDown(etdev, 1); |
811 | 745 | ||
@@ -819,8 +753,6 @@ void TPAL_SetPhy1000FullDuplex(struct et131x_adapter *etdev) | |||
819 | 753 | ||
820 | /* power up PHY */ | 754 | /* power up PHY */ |
821 | ET1310_PhyPowerDown(etdev, 0); | 755 | ET1310_PhyPowerDown(etdev, 0); |
822 | |||
823 | DBG_LEAVE(et131x_dbginfo); | ||
824 | } | 756 | } |
825 | 757 | ||
826 | /** | 758 | /** |
@@ -829,8 +761,6 @@ void TPAL_SetPhy1000FullDuplex(struct et131x_adapter *etdev) | |||
829 | */ | 761 | */ |
830 | void TPAL_SetPhyAutoNeg(struct et131x_adapter *etdev) | 762 | void TPAL_SetPhyAutoNeg(struct et131x_adapter *etdev) |
831 | { | 763 | { |
832 | DBG_ENTER(et131x_dbginfo); | ||
833 | |||
834 | /* Power down PHY */ | 764 | /* Power down PHY */ |
835 | ET1310_PhyPowerDown(etdev, 1); | 765 | ET1310_PhyPowerDown(etdev, 1); |
836 | 766 | ||
@@ -849,8 +779,6 @@ void TPAL_SetPhyAutoNeg(struct et131x_adapter *etdev) | |||
849 | 779 | ||
850 | /* Power up PHY */ | 780 | /* Power up PHY */ |
851 | ET1310_PhyPowerDown(etdev, 0); | 781 | ET1310_PhyPowerDown(etdev, 0); |
852 | |||
853 | DBG_LEAVE(et131x_dbginfo); | ||
854 | } | 782 | } |
855 | 783 | ||
856 | 784 | ||
diff --git a/drivers/staging/et131x/et1310_pm.c b/drivers/staging/et131x/et1310_pm.c index f4c942c7b881..7d0772359291 100644 --- a/drivers/staging/et131x/et1310_pm.c +++ b/drivers/staging/et131x/et1310_pm.c | |||
@@ -56,7 +56,6 @@ | |||
56 | */ | 56 | */ |
57 | 57 | ||
58 | #include "et131x_version.h" | 58 | #include "et131x_version.h" |
59 | #include "et131x_debug.h" | ||
60 | #include "et131x_defs.h" | 59 | #include "et131x_defs.h" |
61 | 60 | ||
62 | #include <linux/init.h> | 61 | #include <linux/init.h> |
@@ -92,11 +91,6 @@ | |||
92 | #include "et131x_adapter.h" | 91 | #include "et131x_adapter.h" |
93 | #include "et131x_initpci.h" | 92 | #include "et131x_initpci.h" |
94 | 93 | ||
95 | /* Data for debugging facilities */ | ||
96 | #ifdef CONFIG_ET131X_DEBUG | ||
97 | extern dbg_info_t *et131x_dbginfo; | ||
98 | #endif /* CONFIG_ET131X_DEBUG */ | ||
99 | |||
100 | /** | 94 | /** |
101 | * EnablePhyComa - called when network cable is unplugged | 95 | * EnablePhyComa - called when network cable is unplugged |
102 | * @etdev: pointer to our adapter structure | 96 | * @etdev: pointer to our adapter structure |
@@ -122,8 +116,6 @@ void EnablePhyComa(struct et131x_adapter *etdev) | |||
122 | unsigned long flags; | 116 | unsigned long flags; |
123 | u32 GlobalPmCSR; | 117 | u32 GlobalPmCSR; |
124 | 118 | ||
125 | DBG_ENTER(et131x_dbginfo); | ||
126 | |||
127 | GlobalPmCSR = readl(&etdev->regs->global.pm_csr); | 119 | GlobalPmCSR = readl(&etdev->regs->global.pm_csr); |
128 | 120 | ||
129 | /* Save the GbE PHY speed and duplex modes. Need to restore this | 121 | /* Save the GbE PHY speed and duplex modes. Need to restore this |
@@ -146,8 +138,6 @@ void EnablePhyComa(struct et131x_adapter *etdev) | |||
146 | /* Program gigE PHY in to Coma mode */ | 138 | /* Program gigE PHY in to Coma mode */ |
147 | GlobalPmCSR |= ET_PM_PHY_SW_COMA; | 139 | GlobalPmCSR |= ET_PM_PHY_SW_COMA; |
148 | writel(GlobalPmCSR, &etdev->regs->global.pm_csr); | 140 | writel(GlobalPmCSR, &etdev->regs->global.pm_csr); |
149 | |||
150 | DBG_LEAVE(et131x_dbginfo); | ||
151 | } | 141 | } |
152 | 142 | ||
153 | /** | 143 | /** |
@@ -158,8 +148,6 @@ void DisablePhyComa(struct et131x_adapter *etdev) | |||
158 | { | 148 | { |
159 | u32 GlobalPmCSR; | 149 | u32 GlobalPmCSR; |
160 | 150 | ||
161 | DBG_ENTER(et131x_dbginfo); | ||
162 | |||
163 | GlobalPmCSR = readl(&etdev->regs->global.pm_csr); | 151 | GlobalPmCSR = readl(&etdev->regs->global.pm_csr); |
164 | 152 | ||
165 | /* Disable phy_sw_coma register and re-enable JAGCore clocks */ | 153 | /* Disable phy_sw_coma register and re-enable JAGCore clocks */ |
@@ -193,7 +181,5 @@ void DisablePhyComa(struct et131x_adapter *etdev) | |||
193 | 181 | ||
194 | /* Need to re-enable Rx. */ | 182 | /* Need to re-enable Rx. */ |
195 | et131x_rx_dma_enable(etdev); | 183 | et131x_rx_dma_enable(etdev); |
196 | |||
197 | DBG_LEAVE(et131x_dbginfo); | ||
198 | } | 184 | } |
199 | 185 | ||
diff --git a/drivers/staging/et131x/et1310_rx.c b/drivers/staging/et131x/et1310_rx.c index 54a7ecfd4901..8f2e91fa0a86 100644 --- a/drivers/staging/et131x/et1310_rx.c +++ b/drivers/staging/et131x/et1310_rx.c | |||
@@ -56,7 +56,6 @@ | |||
56 | */ | 56 | */ |
57 | 57 | ||
58 | #include "et131x_version.h" | 58 | #include "et131x_version.h" |
59 | #include "et131x_debug.h" | ||
60 | #include "et131x_defs.h" | 59 | #include "et131x_defs.h" |
61 | 60 | ||
62 | #include <linux/pci.h> | 61 | #include <linux/pci.h> |
@@ -93,11 +92,6 @@ | |||
93 | 92 | ||
94 | #include "et1310_rx.h" | 93 | #include "et1310_rx.h" |
95 | 94 | ||
96 | /* Data for debugging facilities */ | ||
97 | #ifdef CONFIG_ET131X_DEBUG | ||
98 | extern dbg_info_t *et131x_dbginfo; | ||
99 | #endif /* CONFIG_ET131X_DEBUG */ | ||
100 | |||
101 | 95 | ||
102 | void nic_return_rfd(struct et131x_adapter *etdev, PMP_RFD pMpRfd); | 96 | void nic_return_rfd(struct et131x_adapter *etdev, PMP_RFD pMpRfd); |
103 | 97 | ||
@@ -117,8 +111,6 @@ int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter) | |||
117 | uint32_t pktStatRingSize, FBRChunkSize; | 111 | uint32_t pktStatRingSize, FBRChunkSize; |
118 | RX_RING_t *rx_ring; | 112 | RX_RING_t *rx_ring; |
119 | 113 | ||
120 | DBG_ENTER(et131x_dbginfo); | ||
121 | |||
122 | /* Setup some convenience pointers */ | 114 | /* Setup some convenience pointers */ |
123 | rx_ring = (RX_RING_t *) &adapter->RxRing; | 115 | rx_ring = (RX_RING_t *) &adapter->RxRing; |
124 | 116 | ||
@@ -183,9 +175,8 @@ int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter) | |||
183 | bufsize, | 175 | bufsize, |
184 | &rx_ring->pFbr1RingPa); | 176 | &rx_ring->pFbr1RingPa); |
185 | if (!rx_ring->pFbr1RingVa) { | 177 | if (!rx_ring->pFbr1RingVa) { |
186 | DBG_ERROR(et131x_dbginfo, | 178 | dev_err(&adapter->pdev->dev, |
187 | "Cannot alloc memory for Free Buffer Ring 1\n"); | 179 | "Cannot alloc memory for Free Buffer Ring 1\n"); |
188 | DBG_LEAVE(et131x_dbginfo); | ||
189 | return -ENOMEM; | 180 | return -ENOMEM; |
190 | } | 181 | } |
191 | 182 | ||
@@ -213,9 +204,8 @@ int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter) | |||
213 | bufsize, | 204 | bufsize, |
214 | &rx_ring->pFbr0RingPa); | 205 | &rx_ring->pFbr0RingPa); |
215 | if (!rx_ring->pFbr0RingVa) { | 206 | if (!rx_ring->pFbr0RingVa) { |
216 | DBG_ERROR(et131x_dbginfo, | 207 | dev_err(&adapter->pdev->dev, |
217 | "Cannot alloc memory for Free Buffer Ring 0\n"); | 208 | "Cannot alloc memory for Free Buffer Ring 0\n"); |
218 | DBG_LEAVE(et131x_dbginfo); | ||
219 | return -ENOMEM; | 209 | return -ENOMEM; |
220 | } | 210 | } |
221 | 211 | ||
@@ -262,8 +252,8 @@ int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter) | |||
262 | &rx_ring->Fbr1MemPa[OuterLoop]); | 252 | &rx_ring->Fbr1MemPa[OuterLoop]); |
263 | 253 | ||
264 | if (!rx_ring->Fbr1MemVa[OuterLoop]) { | 254 | if (!rx_ring->Fbr1MemVa[OuterLoop]) { |
265 | DBG_ERROR(et131x_dbginfo, "Could not alloc memory\n"); | 255 | dev_err(&adapter->pdev->dev, |
266 | DBG_LEAVE(et131x_dbginfo); | 256 | "Could not alloc memory\n"); |
267 | return -ENOMEM; | 257 | return -ENOMEM; |
268 | } | 258 | } |
269 | 259 | ||
@@ -313,8 +303,8 @@ int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter) | |||
313 | &rx_ring->Fbr0MemPa[OuterLoop]); | 303 | &rx_ring->Fbr0MemPa[OuterLoop]); |
314 | 304 | ||
315 | if (!rx_ring->Fbr0MemVa[OuterLoop]) { | 305 | if (!rx_ring->Fbr0MemVa[OuterLoop]) { |
316 | DBG_ERROR(et131x_dbginfo, "Could not alloc memory\n"); | 306 | dev_err(&adapter->pdev->dev, |
317 | DBG_LEAVE(et131x_dbginfo); | 307 | "Could not alloc memory\n"); |
318 | return -ENOMEM; | 308 | return -ENOMEM; |
319 | } | 309 | } |
320 | 310 | ||
@@ -356,9 +346,8 @@ int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter) | |||
356 | &rx_ring->pPSRingPa); | 346 | &rx_ring->pPSRingPa); |
357 | 347 | ||
358 | if (!rx_ring->pPSRingVa) { | 348 | if (!rx_ring->pPSRingVa) { |
359 | DBG_ERROR(et131x_dbginfo, | 349 | dev_err(&adapter->pdev->dev, |
360 | "Cannot alloc memory for Packet Status Ring\n"); | 350 | "Cannot alloc memory for Packet Status Ring\n"); |
361 | DBG_LEAVE(et131x_dbginfo); | ||
362 | return -ENOMEM; | 351 | return -ENOMEM; |
363 | } | 352 | } |
364 | 353 | ||
@@ -384,9 +373,8 @@ int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter) | |||
384 | sizeof(RX_STATUS_BLOCK_t) + | 373 | sizeof(RX_STATUS_BLOCK_t) + |
385 | 0x7, &rx_ring->pRxStatusPa); | 374 | 0x7, &rx_ring->pRxStatusPa); |
386 | if (!rx_ring->pRxStatusVa) { | 375 | if (!rx_ring->pRxStatusVa) { |
387 | DBG_ERROR(et131x_dbginfo, | 376 | dev_err(&adapter->pdev->dev, |
388 | "Cannot alloc memory for Status Block\n"); | 377 | "Cannot alloc memory for Status Block\n"); |
389 | DBG_LEAVE(et131x_dbginfo); | ||
390 | return -ENOMEM; | 378 | return -ENOMEM; |
391 | } | 379 | } |
392 | 380 | ||
@@ -422,8 +410,6 @@ int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter) | |||
422 | */ | 410 | */ |
423 | INIT_LIST_HEAD(&rx_ring->RecvList); | 411 | INIT_LIST_HEAD(&rx_ring->RecvList); |
424 | INIT_LIST_HEAD(&rx_ring->RecvPendingList); | 412 | INIT_LIST_HEAD(&rx_ring->RecvPendingList); |
425 | |||
426 | DBG_LEAVE(et131x_dbginfo); | ||
427 | return 0; | 413 | return 0; |
428 | } | 414 | } |
429 | 415 | ||
@@ -439,13 +425,11 @@ void et131x_rx_dma_memory_free(struct et131x_adapter *adapter) | |||
439 | PMP_RFD pMpRfd; | 425 | PMP_RFD pMpRfd; |
440 | RX_RING_t *rx_ring; | 426 | RX_RING_t *rx_ring; |
441 | 427 | ||
442 | DBG_ENTER(et131x_dbginfo); | ||
443 | |||
444 | /* Setup some convenience pointers */ | 428 | /* Setup some convenience pointers */ |
445 | rx_ring = (RX_RING_t *) &adapter->RxRing; | 429 | rx_ring = (RX_RING_t *) &adapter->RxRing; |
446 | 430 | ||
447 | /* Free RFDs and associated packet descriptors */ | 431 | /* Free RFDs and associated packet descriptors */ |
448 | DBG_ASSERT(rx_ring->nReadyRecv == rx_ring->NumRfd); | 432 | WARN_ON(rx_ring->nReadyRecv != rx_ring->NumRfd); |
449 | 433 | ||
450 | while (!list_empty(&rx_ring->RecvList)) { | 434 | while (!list_empty(&rx_ring->RecvList)) { |
451 | pMpRfd = (MP_RFD *) list_entry(rx_ring->RecvList.next, | 435 | pMpRfd = (MP_RFD *) list_entry(rx_ring->RecvList.next, |
@@ -583,8 +567,6 @@ void et131x_rx_dma_memory_free(struct et131x_adapter *adapter) | |||
583 | 567 | ||
584 | /* Reset Counters */ | 568 | /* Reset Counters */ |
585 | rx_ring->nReadyRecv = 0; | 569 | rx_ring->nReadyRecv = 0; |
586 | |||
587 | DBG_LEAVE(et131x_dbginfo); | ||
588 | } | 570 | } |
589 | 571 | ||
590 | /** | 572 | /** |
@@ -601,8 +583,6 @@ int et131x_init_recv(struct et131x_adapter *adapter) | |||
601 | uint32_t TotalNumRfd = 0; | 583 | uint32_t TotalNumRfd = 0; |
602 | RX_RING_t *rx_ring = NULL; | 584 | RX_RING_t *rx_ring = NULL; |
603 | 585 | ||
604 | DBG_ENTER(et131x_dbginfo); | ||
605 | |||
606 | /* Setup some convenience pointers */ | 586 | /* Setup some convenience pointers */ |
607 | rx_ring = (RX_RING_t *) &adapter->RxRing; | 587 | rx_ring = (RX_RING_t *) &adapter->RxRing; |
608 | 588 | ||
@@ -612,7 +592,7 @@ int et131x_init_recv(struct et131x_adapter *adapter) | |||
612 | GFP_ATOMIC | GFP_DMA); | 592 | GFP_ATOMIC | GFP_DMA); |
613 | 593 | ||
614 | if (!pMpRfd) { | 594 | if (!pMpRfd) { |
615 | DBG_ERROR(et131x_dbginfo, | 595 | dev_err(&adapter->pdev->dev, |
616 | "Couldn't alloc RFD out of kmem_cache\n"); | 596 | "Couldn't alloc RFD out of kmem_cache\n"); |
617 | status = -ENOMEM; | 597 | status = -ENOMEM; |
618 | continue; | 598 | continue; |
@@ -620,7 +600,7 @@ int et131x_init_recv(struct et131x_adapter *adapter) | |||
620 | 600 | ||
621 | status = et131x_rfd_resources_alloc(adapter, pMpRfd); | 601 | status = et131x_rfd_resources_alloc(adapter, pMpRfd); |
622 | if (status != 0) { | 602 | if (status != 0) { |
623 | DBG_ERROR(et131x_dbginfo, | 603 | dev_err(&adapter->pdev->dev, |
624 | "Couldn't alloc packet for RFD\n"); | 604 | "Couldn't alloc packet for RFD\n"); |
625 | kmem_cache_free(rx_ring->RecvLookaside, pMpRfd); | 605 | kmem_cache_free(rx_ring->RecvLookaside, pMpRfd); |
626 | continue; | 606 | continue; |
@@ -641,11 +621,9 @@ int et131x_init_recv(struct et131x_adapter *adapter) | |||
641 | 621 | ||
642 | if (status != 0) { | 622 | if (status != 0) { |
643 | kmem_cache_free(rx_ring->RecvLookaside, pMpRfd); | 623 | kmem_cache_free(rx_ring->RecvLookaside, pMpRfd); |
644 | DBG_ERROR(et131x_dbginfo, | 624 | dev_err(&adapter->pdev->dev, |
645 | "Allocation problems in et131x_init_recv\n"); | 625 | "Allocation problems in et131x_init_recv\n"); |
646 | } | 626 | } |
647 | |||
648 | DBG_LEAVE(et131x_dbginfo); | ||
649 | return status; | 627 | return status; |
650 | } | 628 | } |
651 | 629 | ||
@@ -687,8 +665,6 @@ void ConfigRxDmaRegs(struct et131x_adapter *etdev) | |||
687 | RXDMA_PSR_NUM_DES_t psr_num_des; | 665 | RXDMA_PSR_NUM_DES_t psr_num_des; |
688 | unsigned long flags; | 666 | unsigned long flags; |
689 | 667 | ||
690 | DBG_ENTER(et131x_dbginfo); | ||
691 | |||
692 | /* Halt RXDMA to perform the reconfigure. */ | 668 | /* Halt RXDMA to perform the reconfigure. */ |
693 | et131x_rx_dma_disable(etdev); | 669 | et131x_rx_dma_disable(etdev); |
694 | 670 | ||
@@ -786,8 +762,6 @@ void ConfigRxDmaRegs(struct et131x_adapter *etdev) | |||
786 | writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time.value); | 762 | writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time.value); |
787 | 763 | ||
788 | spin_unlock_irqrestore(&etdev->RcvLock, flags); | 764 | spin_unlock_irqrestore(&etdev->RcvLock, flags); |
789 | |||
790 | DBG_LEAVE(et131x_dbginfo); | ||
791 | } | 765 | } |
792 | 766 | ||
793 | /** | 767 | /** |
@@ -814,8 +788,6 @@ void et131x_rx_dma_disable(struct et131x_adapter *etdev) | |||
814 | { | 788 | { |
815 | RXDMA_CSR_t csr; | 789 | RXDMA_CSR_t csr; |
816 | 790 | ||
817 | DBG_ENTER(et131x_dbginfo); | ||
818 | |||
819 | /* Setup the receive dma configuration register */ | 791 | /* Setup the receive dma configuration register */ |
820 | writel(0x00002001, &etdev->regs->rxdma.csr.value); | 792 | writel(0x00002001, &etdev->regs->rxdma.csr.value); |
821 | csr.value = readl(&etdev->regs->rxdma.csr.value); | 793 | csr.value = readl(&etdev->regs->rxdma.csr.value); |
@@ -823,12 +795,10 @@ void et131x_rx_dma_disable(struct et131x_adapter *etdev) | |||
823 | udelay(5); | 795 | udelay(5); |
824 | csr.value = readl(&etdev->regs->rxdma.csr.value); | 796 | csr.value = readl(&etdev->regs->rxdma.csr.value); |
825 | if (csr.bits.halt_status != 1) | 797 | if (csr.bits.halt_status != 1) |
826 | DBG_ERROR(et131x_dbginfo, | 798 | dev_err(&etdev->pdev->dev, |
827 | "RX Dma failed to enter halt state. CSR 0x%08x\n", | 799 | "RX Dma failed to enter halt state. CSR 0x%08x\n", |
828 | csr.value); | 800 | csr.value); |
829 | } | 801 | } |
830 | |||
831 | DBG_LEAVE(et131x_dbginfo); | ||
832 | } | 802 | } |
833 | 803 | ||
834 | /** | 804 | /** |
@@ -837,8 +807,6 @@ void et131x_rx_dma_disable(struct et131x_adapter *etdev) | |||
837 | */ | 807 | */ |
838 | void et131x_rx_dma_enable(struct et131x_adapter *etdev) | 808 | void et131x_rx_dma_enable(struct et131x_adapter *etdev) |
839 | { | 809 | { |
840 | DBG_RX_ENTER(et131x_dbginfo); | ||
841 | |||
842 | if (etdev->RegistryPhyLoopbk) | 810 | if (etdev->RegistryPhyLoopbk) |
843 | /* RxDMA is disabled for loopback operation. */ | 811 | /* RxDMA is disabled for loopback operation. */ |
844 | writel(0x1, &etdev->regs->rxdma.csr.value); | 812 | writel(0x1, &etdev->regs->rxdma.csr.value); |
@@ -869,14 +837,12 @@ void et131x_rx_dma_enable(struct et131x_adapter *etdev) | |||
869 | udelay(5); | 837 | udelay(5); |
870 | csr.value = readl(&etdev->regs->rxdma.csr.value); | 838 | csr.value = readl(&etdev->regs->rxdma.csr.value); |
871 | if (csr.bits.halt_status != 0) { | 839 | if (csr.bits.halt_status != 0) { |
872 | DBG_ERROR(et131x_dbginfo, | 840 | dev_err(&etdev->pdev->dev, |
873 | "RX Dma failed to exit halt state. CSR 0x%08x\n", | 841 | "RX Dma failed to exit halt state. CSR 0x%08x\n", |
874 | csr.value); | 842 | csr.value); |
875 | } | 843 | } |
876 | } | 844 | } |
877 | } | 845 | } |
878 | |||
879 | DBG_RX_LEAVE(et131x_dbginfo); | ||
880 | } | 846 | } |
881 | 847 | ||
882 | /** | 848 | /** |
@@ -905,9 +871,6 @@ PMP_RFD nic_rx_pkts(struct et131x_adapter *etdev) | |||
905 | uint32_t localLen; | 871 | uint32_t localLen; |
906 | PKT_STAT_DESC_WORD0_t Word0; | 872 | PKT_STAT_DESC_WORD0_t Word0; |
907 | 873 | ||
908 | |||
909 | DBG_RX_ENTER(et131x_dbginfo); | ||
910 | |||
911 | /* RX Status block is written by the DMA engine prior to every | 874 | /* RX Status block is written by the DMA engine prior to every |
912 | * interrupt. It contains the next to be used entry in the Packet | 875 | * interrupt. It contains the next to be used entry in the Packet |
913 | * Status Ring, and also the two Free Buffer rings. | 876 | * Status Ring, and also the two Free Buffer rings. |
@@ -919,8 +882,6 @@ PMP_RFD nic_rx_pkts(struct et131x_adapter *etdev) | |||
919 | pRxStatusBlock->Word1.bits.PSRwrap == | 882 | pRxStatusBlock->Word1.bits.PSRwrap == |
920 | pRxLocal->local_psr_full.bits.psr_full_wrap) { | 883 | pRxLocal->local_psr_full.bits.psr_full_wrap) { |
921 | /* Looks like this ring is not updated yet */ | 884 | /* Looks like this ring is not updated yet */ |
922 | DBG_RX(et131x_dbginfo, "(0)\n"); | ||
923 | DBG_RX_LEAVE(et131x_dbginfo); | ||
924 | return NULL; | 885 | return NULL; |
925 | } | 886 | } |
926 | 887 | ||
@@ -937,23 +898,6 @@ PMP_RFD nic_rx_pkts(struct et131x_adapter *etdev) | |||
937 | bufferIndex = (uint16_t) pPSREntry->word1.bits.bi; | 898 | bufferIndex = (uint16_t) pPSREntry->word1.bits.bi; |
938 | Word0 = pPSREntry->word0; | 899 | Word0 = pPSREntry->word0; |
939 | 900 | ||
940 | DBG_RX(et131x_dbginfo, "RX PACKET STATUS\n"); | ||
941 | DBG_RX(et131x_dbginfo, "\tlength : %d\n", localLen); | ||
942 | DBG_RX(et131x_dbginfo, "\tringIndex : %d\n", ringIndex); | ||
943 | DBG_RX(et131x_dbginfo, "\tbufferIndex : %d\n", bufferIndex); | ||
944 | DBG_RX(et131x_dbginfo, "\tword0 : 0x%08x\n", Word0.value); | ||
945 | |||
946 | #if 0 | ||
947 | /* Check the Status Word that the MAC has appended to the PSR | ||
948 | * entry in case the MAC has detected errors. | ||
949 | */ | ||
950 | if (Word0.value & ALCATEL_BAD_STATUS) { | ||
951 | DBG_ERROR(et131x_dbginfo, | ||
952 | "NICRxPkts >> Alcatel Status Word error." | ||
953 | "Value 0x%08x\n", pPSREntry->word0.value); | ||
954 | } | ||
955 | #endif | ||
956 | |||
957 | /* Indicate that we have used this PSR entry. */ | 901 | /* Indicate that we have used this PSR entry. */ |
958 | if (++pRxLocal->local_psr_full.bits.psr_full > | 902 | if (++pRxLocal->local_psr_full.bits.psr_full > |
959 | pRxLocal->PsrNumEntries - 1) { | 903 | pRxLocal->PsrNumEntries - 1) { |
@@ -966,11 +910,6 @@ PMP_RFD nic_rx_pkts(struct et131x_adapter *etdev) | |||
966 | 910 | ||
967 | #ifndef USE_FBR0 | 911 | #ifndef USE_FBR0 |
968 | if (ringIndex != 1) { | 912 | if (ringIndex != 1) { |
969 | DBG_ERROR(et131x_dbginfo, | ||
970 | "NICRxPkts PSR Entry %d indicates " | ||
971 | "Buffer Ring 0 in use\n", | ||
972 | pRxLocal->local_psr_full.bits.psr_full); | ||
973 | DBG_RX_LEAVE(et131x_dbginfo); | ||
974 | return NULL; | 913 | return NULL; |
975 | } | 914 | } |
976 | #endif | 915 | #endif |
@@ -987,12 +926,11 @@ PMP_RFD nic_rx_pkts(struct et131x_adapter *etdev) | |||
987 | #endif | 926 | #endif |
988 | { | 927 | { |
989 | /* Illegal buffer or ring index cannot be used by S/W*/ | 928 | /* Illegal buffer or ring index cannot be used by S/W*/ |
990 | DBG_ERROR(et131x_dbginfo, | 929 | dev_err(&etdev->pdev->dev, |
991 | "NICRxPkts PSR Entry %d indicates " | 930 | "NICRxPkts PSR Entry %d indicates " |
992 | "length of %d and/or bad bi(%d)\n", | 931 | "length of %d and/or bad bi(%d)\n", |
993 | pRxLocal->local_psr_full.bits.psr_full, | 932 | pRxLocal->local_psr_full.bits.psr_full, |
994 | localLen, bufferIndex); | 933 | localLen, bufferIndex); |
995 | DBG_RX_LEAVE(et131x_dbginfo); | ||
996 | return NULL; | 934 | return NULL; |
997 | } | 935 | } |
998 | 936 | ||
@@ -1004,9 +942,6 @@ PMP_RFD nic_rx_pkts(struct et131x_adapter *etdev) | |||
1004 | pMpRfd = (PMP_RFD) list_entry(element, MP_RFD, list_node); | 942 | pMpRfd = (PMP_RFD) list_entry(element, MP_RFD, list_node); |
1005 | 943 | ||
1006 | if (pMpRfd == NULL) { | 944 | if (pMpRfd == NULL) { |
1007 | DBG_RX(et131x_dbginfo, | ||
1008 | "NULL RFD returned from RecvList via list_entry()\n"); | ||
1009 | DBG_RX_LEAVE(et131x_dbginfo); | ||
1010 | spin_unlock_irqrestore(&etdev->RcvLock, flags); | 945 | spin_unlock_irqrestore(&etdev->RcvLock, flags); |
1011 | return NULL; | 946 | return NULL; |
1012 | } | 947 | } |
@@ -1040,19 +975,6 @@ PMP_RFD nic_rx_pkts(struct et131x_adapter *etdev) | |||
1040 | etdev->ReplicaPhyLoopbkPF = 1; | 975 | etdev->ReplicaPhyLoopbkPF = 1; |
1041 | } | 976 | } |
1042 | } | 977 | } |
1043 | DBG_WARNING(et131x_dbginfo, | ||
1044 | "pBufVa:\t%02x:%02x:%02x:%02x:%02x:%02x\n", | ||
1045 | pBufVa[6], pBufVa[7], pBufVa[8], | ||
1046 | pBufVa[9], pBufVa[10], pBufVa[11]); | ||
1047 | |||
1048 | DBG_WARNING(et131x_dbginfo, | ||
1049 | "CurrentAddr:\t%02x:%02x:%02x:%02x:%02x:%02x\n", | ||
1050 | etdev->CurrentAddress[0], | ||
1051 | etdev->CurrentAddress[1], | ||
1052 | etdev->CurrentAddress[2], | ||
1053 | etdev->CurrentAddress[3], | ||
1054 | etdev->CurrentAddress[4], | ||
1055 | etdev->CurrentAddress[5]); | ||
1056 | } | 978 | } |
1057 | 979 | ||
1058 | /* Determine if this is a multicast packet coming in */ | 980 | /* Determine if this is a multicast packet coming in */ |
@@ -1127,9 +1049,8 @@ PMP_RFD nic_rx_pkts(struct et131x_adapter *etdev) | |||
1127 | 1049 | ||
1128 | skb = dev_alloc_skb(pMpRfd->PacketSize + 2); | 1050 | skb = dev_alloc_skb(pMpRfd->PacketSize + 2); |
1129 | if (!skb) { | 1051 | if (!skb) { |
1130 | DBG_ERROR(et131x_dbginfo, | 1052 | dev_err(&etdev->pdev->dev, |
1131 | "Couldn't alloc an SKB for Rx\n"); | 1053 | "Couldn't alloc an SKB for Rx\n"); |
1132 | DBG_RX_LEAVE(et131x_dbginfo); | ||
1133 | return NULL; | 1054 | return NULL; |
1134 | } | 1055 | } |
1135 | 1056 | ||
@@ -1149,9 +1070,6 @@ PMP_RFD nic_rx_pkts(struct et131x_adapter *etdev) | |||
1149 | } | 1070 | } |
1150 | 1071 | ||
1151 | nic_return_rfd(etdev, pMpRfd); | 1072 | nic_return_rfd(etdev, pMpRfd); |
1152 | |||
1153 | DBG_RX(et131x_dbginfo, "(1)\n"); | ||
1154 | DBG_RX_LEAVE(et131x_dbginfo); | ||
1155 | return pMpRfd; | 1073 | return pMpRfd; |
1156 | } | 1074 | } |
1157 | 1075 | ||
@@ -1166,9 +1084,7 @@ void et131x_reset_recv(struct et131x_adapter *etdev) | |||
1166 | PMP_RFD pMpRfd; | 1084 | PMP_RFD pMpRfd; |
1167 | struct list_head *element; | 1085 | struct list_head *element; |
1168 | 1086 | ||
1169 | DBG_ENTER(et131x_dbginfo); | 1087 | WARN_ON(list_empty(&etdev->RxRing.RecvList)); |
1170 | |||
1171 | DBG_ASSERT(!list_empty(&etdev->RxRing.RecvList)); | ||
1172 | 1088 | ||
1173 | /* Take all the RFD's from the pending list, and stick them on the | 1089 | /* Take all the RFD's from the pending list, and stick them on the |
1174 | * RecvList. | 1090 | * RecvList. |
@@ -1180,8 +1096,6 @@ void et131x_reset_recv(struct et131x_adapter *etdev) | |||
1180 | 1096 | ||
1181 | list_move_tail(&pMpRfd->list_node, &etdev->RxRing.RecvList); | 1097 | list_move_tail(&pMpRfd->list_node, &etdev->RxRing.RecvList); |
1182 | } | 1098 | } |
1183 | |||
1184 | DBG_LEAVE(et131x_dbginfo); | ||
1185 | } | 1099 | } |
1186 | 1100 | ||
1187 | /** | 1101 | /** |
@@ -1200,15 +1114,12 @@ void et131x_handle_recv_interrupt(struct et131x_adapter *etdev) | |||
1200 | uint32_t PacketFreeCount = 0; | 1114 | uint32_t PacketFreeCount = 0; |
1201 | bool TempUnfinishedRec = false; | 1115 | bool TempUnfinishedRec = false; |
1202 | 1116 | ||
1203 | DBG_RX_ENTER(et131x_dbginfo); | ||
1204 | |||
1205 | PacketsToHandle = NUM_PACKETS_HANDLED; | 1117 | PacketsToHandle = NUM_PACKETS_HANDLED; |
1206 | 1118 | ||
1207 | /* Process up to available RFD's */ | 1119 | /* Process up to available RFD's */ |
1208 | while (PacketArrayCount < PacketsToHandle) { | 1120 | while (PacketArrayCount < PacketsToHandle) { |
1209 | if (list_empty(&etdev->RxRing.RecvList)) { | 1121 | if (list_empty(&etdev->RxRing.RecvList)) { |
1210 | DBG_ASSERT(etdev->RxRing.nReadyRecv == 0); | 1122 | WARN_ON(etdev->RxRing.nReadyRecv != 0); |
1211 | DBG_ERROR(et131x_dbginfo, "NO RFD's !!!!!!!!!!!!!\n"); | ||
1212 | TempUnfinishedRec = true; | 1123 | TempUnfinishedRec = true; |
1213 | break; | 1124 | break; |
1214 | } | 1125 | } |
@@ -1246,8 +1157,8 @@ void et131x_handle_recv_interrupt(struct et131x_adapter *etdev) | |||
1246 | RFDFreeArray[PacketFreeCount] = pMpRfd; | 1157 | RFDFreeArray[PacketFreeCount] = pMpRfd; |
1247 | PacketFreeCount++; | 1158 | PacketFreeCount++; |
1248 | 1159 | ||
1249 | DBG_WARNING(et131x_dbginfo, | 1160 | dev_warn(&etdev->pdev->dev, |
1250 | "RFD's are running out !!!!!!!!!!!!!\n"); | 1161 | "RFD's are running out\n"); |
1251 | } | 1162 | } |
1252 | 1163 | ||
1253 | PacketArray[PacketArrayCount] = pMpRfd->Packet; | 1164 | PacketArray[PacketArrayCount] = pMpRfd->Packet; |
@@ -1262,8 +1173,6 @@ void et131x_handle_recv_interrupt(struct et131x_adapter *etdev) | |||
1262 | /* Watchdog timer will disable itself if appropriate. */ | 1173 | /* Watchdog timer will disable itself if appropriate. */ |
1263 | etdev->RxRing.UnfinishedReceives = false; | 1174 | etdev->RxRing.UnfinishedReceives = false; |
1264 | } | 1175 | } |
1265 | |||
1266 | DBG_RX_LEAVE(et131x_dbginfo); | ||
1267 | } | 1176 | } |
1268 | 1177 | ||
1269 | static inline u32 bump_fbr(u32 *fbr, u32 limit) | 1178 | static inline u32 bump_fbr(u32 *fbr, u32 limit) |
@@ -1289,8 +1198,6 @@ void nic_return_rfd(struct et131x_adapter *etdev, PMP_RFD pMpRfd) | |||
1289 | uint8_t ri = pMpRfd->ringindex; | 1198 | uint8_t ri = pMpRfd->ringindex; |
1290 | unsigned long flags; | 1199 | unsigned long flags; |
1291 | 1200 | ||
1292 | DBG_RX_ENTER(et131x_dbginfo); | ||
1293 | |||
1294 | /* We don't use any of the OOB data besides status. Otherwise, we | 1201 | /* We don't use any of the OOB data besides status. Otherwise, we |
1295 | * need to clean up OOB data | 1202 | * need to clean up OOB data |
1296 | */ | 1203 | */ |
@@ -1339,7 +1246,7 @@ void nic_return_rfd(struct et131x_adapter *etdev, PMP_RFD pMpRfd) | |||
1339 | #endif | 1246 | #endif |
1340 | spin_unlock_irqrestore(&etdev->FbrLock, flags); | 1247 | spin_unlock_irqrestore(&etdev->FbrLock, flags); |
1341 | } else { | 1248 | } else { |
1342 | DBG_ERROR(et131x_dbginfo, | 1249 | dev_err(&etdev->pdev->dev, |
1343 | "NICReturnRFD illegal Buffer Index returned\n"); | 1250 | "NICReturnRFD illegal Buffer Index returned\n"); |
1344 | } | 1251 | } |
1345 | 1252 | ||
@@ -1351,6 +1258,5 @@ void nic_return_rfd(struct et131x_adapter *etdev, PMP_RFD pMpRfd) | |||
1351 | rx_local->nReadyRecv++; | 1258 | rx_local->nReadyRecv++; |
1352 | spin_unlock_irqrestore(&etdev->RcvLock, flags); | 1259 | spin_unlock_irqrestore(&etdev->RcvLock, flags); |
1353 | 1260 | ||
1354 | DBG_ASSERT(rx_local->nReadyRecv <= rx_local->NumRfd); | 1261 | WARN_ON(rx_local->nReadyRecv > rx_local->NumRfd); |
1355 | DBG_RX_LEAVE(et131x_dbginfo); | ||
1356 | } | 1262 | } |
diff --git a/drivers/staging/et131x/et1310_tx.c b/drivers/staging/et131x/et1310_tx.c index 4c18e9a2d2e8..387a697c4af8 100644 --- a/drivers/staging/et131x/et1310_tx.c +++ b/drivers/staging/et131x/et1310_tx.c | |||
@@ -56,7 +56,6 @@ | |||
56 | */ | 56 | */ |
57 | 57 | ||
58 | #include "et131x_version.h" | 58 | #include "et131x_version.h" |
59 | #include "et131x_debug.h" | ||
60 | #include "et131x_defs.h" | 59 | #include "et131x_defs.h" |
61 | 60 | ||
62 | #include <linux/pci.h> | 61 | #include <linux/pci.h> |
@@ -95,11 +94,6 @@ | |||
95 | #include "et1310_tx.h" | 94 | #include "et1310_tx.h" |
96 | 95 | ||
97 | 96 | ||
98 | /* Data for debugging facilities */ | ||
99 | #ifdef CONFIG_ET131X_DEBUG | ||
100 | extern dbg_info_t *et131x_dbginfo; | ||
101 | #endif /* CONFIG_ET131X_DEBUG */ | ||
102 | |||
103 | static void et131x_update_tcb_list(struct et131x_adapter *etdev); | 97 | static void et131x_update_tcb_list(struct et131x_adapter *etdev); |
104 | static void et131x_check_send_wait_list(struct et131x_adapter *etdev); | 98 | static void et131x_check_send_wait_list(struct et131x_adapter *etdev); |
105 | static inline void et131x_free_send_packet(struct et131x_adapter *etdev, | 99 | static inline void et131x_free_send_packet(struct et131x_adapter *etdev, |
@@ -125,14 +119,11 @@ int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter) | |||
125 | int desc_size = 0; | 119 | int desc_size = 0; |
126 | TX_RING_t *tx_ring = &adapter->TxRing; | 120 | TX_RING_t *tx_ring = &adapter->TxRing; |
127 | 121 | ||
128 | DBG_ENTER(et131x_dbginfo); | ||
129 | |||
130 | /* Allocate memory for the TCB's (Transmit Control Block) */ | 122 | /* Allocate memory for the TCB's (Transmit Control Block) */ |
131 | adapter->TxRing.MpTcbMem = (MP_TCB *)kcalloc(NUM_TCB, sizeof(MP_TCB), | 123 | adapter->TxRing.MpTcbMem = (MP_TCB *)kcalloc(NUM_TCB, sizeof(MP_TCB), |
132 | GFP_ATOMIC | GFP_DMA); | 124 | GFP_ATOMIC | GFP_DMA); |
133 | if (!adapter->TxRing.MpTcbMem) { | 125 | if (!adapter->TxRing.MpTcbMem) { |
134 | DBG_ERROR(et131x_dbginfo, "Cannot alloc memory for TCBs\n"); | 126 | dev_err(&adapter->pdev->dev, "Cannot alloc memory for TCBs\n"); |
135 | DBG_LEAVE(et131x_dbginfo); | ||
136 | return -ENOMEM; | 127 | return -ENOMEM; |
137 | } | 128 | } |
138 | 129 | ||
@@ -144,8 +135,7 @@ int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter) | |||
144 | (PTX_DESC_ENTRY_t) pci_alloc_consistent(adapter->pdev, desc_size, | 135 | (PTX_DESC_ENTRY_t) pci_alloc_consistent(adapter->pdev, desc_size, |
145 | &tx_ring->pTxDescRingPa); | 136 | &tx_ring->pTxDescRingPa); |
146 | if (!adapter->TxRing.pTxDescRingVa) { | 137 | if (!adapter->TxRing.pTxDescRingVa) { |
147 | DBG_ERROR(et131x_dbginfo, "Cannot alloc memory for Tx Ring\n"); | 138 | dev_err(&adapter->pdev->dev, "Cannot alloc memory for Tx Ring\n"); |
148 | DBG_LEAVE(et131x_dbginfo); | ||
149 | return -ENOMEM; | 139 | return -ENOMEM; |
150 | } | 140 | } |
151 | 141 | ||
@@ -170,9 +160,8 @@ int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter) | |||
170 | sizeof(TX_STATUS_BLOCK_t), | 160 | sizeof(TX_STATUS_BLOCK_t), |
171 | &tx_ring->pTxStatusPa); | 161 | &tx_ring->pTxStatusPa); |
172 | if (!adapter->TxRing.pTxStatusPa) { | 162 | if (!adapter->TxRing.pTxStatusPa) { |
173 | DBG_ERROR(et131x_dbginfo, | 163 | dev_err(&adapter->pdev->dev, |
174 | "Cannot alloc memory for Tx status block\n"); | 164 | "Cannot alloc memory for Tx status block\n"); |
175 | DBG_LEAVE(et131x_dbginfo); | ||
176 | return -ENOMEM; | 165 | return -ENOMEM; |
177 | } | 166 | } |
178 | 167 | ||
@@ -181,13 +170,11 @@ int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter) | |||
181 | NIC_MIN_PACKET_SIZE, | 170 | NIC_MIN_PACKET_SIZE, |
182 | &tx_ring->pTxDummyBlkPa); | 171 | &tx_ring->pTxDummyBlkPa); |
183 | if (!adapter->TxRing.pTxDummyBlkPa) { | 172 | if (!adapter->TxRing.pTxDummyBlkPa) { |
184 | DBG_ERROR(et131x_dbginfo, | 173 | dev_err(&adapter->pdev->dev, |
185 | "Cannot alloc memory for Tx dummy buffer\n"); | 174 | "Cannot alloc memory for Tx dummy buffer\n"); |
186 | DBG_LEAVE(et131x_dbginfo); | ||
187 | return -ENOMEM; | 175 | return -ENOMEM; |
188 | } | 176 | } |
189 | 177 | ||
190 | DBG_LEAVE(et131x_dbginfo); | ||
191 | return 0; | 178 | return 0; |
192 | } | 179 | } |
193 | 180 | ||
@@ -201,8 +188,6 @@ void et131x_tx_dma_memory_free(struct et131x_adapter *adapter) | |||
201 | { | 188 | { |
202 | int desc_size = 0; | 189 | int desc_size = 0; |
203 | 190 | ||
204 | DBG_ENTER(et131x_dbginfo); | ||
205 | |||
206 | if (adapter->TxRing.pTxDescRingVa) { | 191 | if (adapter->TxRing.pTxDescRingVa) { |
207 | /* Free memory relating to Tx rings here */ | 192 | /* Free memory relating to Tx rings here */ |
208 | adapter->TxRing.pTxDescRingVa -= adapter->TxRing.TxDescOffset; | 193 | adapter->TxRing.pTxDescRingVa -= adapter->TxRing.TxDescOffset; |
@@ -240,8 +225,6 @@ void et131x_tx_dma_memory_free(struct et131x_adapter *adapter) | |||
240 | 225 | ||
241 | /* Free the memory for MP_TCB structures */ | 226 | /* Free the memory for MP_TCB structures */ |
242 | kfree(adapter->TxRing.MpTcbMem); | 227 | kfree(adapter->TxRing.MpTcbMem); |
243 | |||
244 | DBG_LEAVE(et131x_dbginfo); | ||
245 | } | 228 | } |
246 | 229 | ||
247 | /** | 230 | /** |
@@ -252,8 +235,6 @@ void ConfigTxDmaRegs(struct et131x_adapter *etdev) | |||
252 | { | 235 | { |
253 | struct _TXDMA_t __iomem *txdma = &etdev->regs->txdma; | 236 | struct _TXDMA_t __iomem *txdma = &etdev->regs->txdma; |
254 | 237 | ||
255 | DBG_ENTER(et131x_dbginfo); | ||
256 | |||
257 | /* Load the hardware with the start of the transmit descriptor ring. */ | 238 | /* Load the hardware with the start of the transmit descriptor ring. */ |
258 | writel((uint32_t) (etdev->TxRing.pTxDescRingAdjustedPa >> 32), | 239 | writel((uint32_t) (etdev->TxRing.pTxDescRingAdjustedPa >> 32), |
259 | &txdma->pr_base_hi); | 240 | &txdma->pr_base_hi); |
@@ -277,8 +258,6 @@ void ConfigTxDmaRegs(struct et131x_adapter *etdev) | |||
277 | 258 | ||
278 | writel(0, &txdma->service_request); | 259 | writel(0, &txdma->service_request); |
279 | etdev->TxRing.txDmaReadyToSend = 0; | 260 | etdev->TxRing.txDmaReadyToSend = 0; |
280 | |||
281 | DBG_LEAVE(et131x_dbginfo); | ||
282 | } | 261 | } |
283 | 262 | ||
284 | /** | 263 | /** |
@@ -287,12 +266,8 @@ void ConfigTxDmaRegs(struct et131x_adapter *etdev) | |||
287 | */ | 266 | */ |
288 | void et131x_tx_dma_disable(struct et131x_adapter *etdev) | 267 | void et131x_tx_dma_disable(struct et131x_adapter *etdev) |
289 | { | 268 | { |
290 | DBG_ENTER(et131x_dbginfo); | ||
291 | |||
292 | /* Setup the tramsmit dma configuration register */ | 269 | /* Setup the tramsmit dma configuration register */ |
293 | writel(0x101, &etdev->regs->txdma.csr.value); | 270 | writel(0x101, &etdev->regs->txdma.csr.value); |
294 | |||
295 | DBG_LEAVE(et131x_dbginfo); | ||
296 | } | 271 | } |
297 | 272 | ||
298 | /** | 273 | /** |
@@ -303,8 +278,6 @@ void et131x_tx_dma_disable(struct et131x_adapter *etdev) | |||
303 | */ | 278 | */ |
304 | void et131x_tx_dma_enable(struct et131x_adapter *etdev) | 279 | void et131x_tx_dma_enable(struct et131x_adapter *etdev) |
305 | { | 280 | { |
306 | DBG_ENTER(et131x_dbginfo); | ||
307 | |||
308 | if (etdev->RegistryPhyLoopbk) { | 281 | if (etdev->RegistryPhyLoopbk) { |
309 | /* TxDMA is disabled for loopback operation. */ | 282 | /* TxDMA is disabled for loopback operation. */ |
310 | writel(0x101, &etdev->regs->txdma.csr.value); | 283 | writel(0x101, &etdev->regs->txdma.csr.value); |
@@ -319,8 +292,6 @@ void et131x_tx_dma_enable(struct et131x_adapter *etdev) | |||
319 | csr.bits.cache_thrshld = PARM_DMA_CACHE_DEF; | 292 | csr.bits.cache_thrshld = PARM_DMA_CACHE_DEF; |
320 | writel(csr.value, &etdev->regs->txdma.csr.value); | 293 | writel(csr.value, &etdev->regs->txdma.csr.value); |
321 | } | 294 | } |
322 | |||
323 | DBG_LEAVE(et131x_dbginfo); | ||
324 | } | 295 | } |
325 | 296 | ||
326 | /** | 297 | /** |
@@ -333,8 +304,6 @@ void et131x_init_send(struct et131x_adapter *adapter) | |||
333 | uint32_t TcbCount; | 304 | uint32_t TcbCount; |
334 | TX_RING_t *tx_ring; | 305 | TX_RING_t *tx_ring; |
335 | 306 | ||
336 | DBG_ENTER(et131x_dbginfo); | ||
337 | |||
338 | /* Setup some convenience pointers */ | 307 | /* Setup some convenience pointers */ |
339 | tx_ring = &adapter->TxRing; | 308 | tx_ring = &adapter->TxRing; |
340 | pMpTcb = adapter->TxRing.MpTcbMem; | 309 | pMpTcb = adapter->TxRing.MpTcbMem; |
@@ -364,8 +333,6 @@ void et131x_init_send(struct et131x_adapter *adapter) | |||
364 | tx_ring->CurrSendTail = (PMP_TCB) NULL; | 333 | tx_ring->CurrSendTail = (PMP_TCB) NULL; |
365 | 334 | ||
366 | INIT_LIST_HEAD(&adapter->TxRing.SendWaitQueue); | 335 | INIT_LIST_HEAD(&adapter->TxRing.SendWaitQueue); |
367 | |||
368 | DBG_LEAVE(et131x_dbginfo); | ||
369 | } | 336 | } |
370 | 337 | ||
371 | /** | 338 | /** |
@@ -380,8 +347,6 @@ int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev) | |||
380 | int status = 0; | 347 | int status = 0; |
381 | struct et131x_adapter *etdev = NULL; | 348 | struct et131x_adapter *etdev = NULL; |
382 | 349 | ||
383 | DBG_TX_ENTER(et131x_dbginfo); | ||
384 | |||
385 | etdev = netdev_priv(netdev); | 350 | etdev = netdev_priv(netdev); |
386 | 351 | ||
387 | /* Send these packets | 352 | /* Send these packets |
@@ -397,7 +362,6 @@ int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev) | |||
397 | * packet under Linux; if we just send an error up to the | 362 | * packet under Linux; if we just send an error up to the |
398 | * netif layer, it will resend the skb to us. | 363 | * netif layer, it will resend the skb to us. |
399 | */ | 364 | */ |
400 | DBG_VERBOSE(et131x_dbginfo, "TCB Resources Not Available\n"); | ||
401 | status = -ENOMEM; | 365 | status = -ENOMEM; |
402 | } else { | 366 | } else { |
403 | /* We need to see if the link is up; if it's not, make the | 367 | /* We need to see if the link is up; if it's not, make the |
@@ -409,9 +373,6 @@ int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev) | |||
409 | */ | 373 | */ |
410 | if (MP_SHOULD_FAIL_SEND(etdev) || etdev->DriverNoPhyAccess | 374 | if (MP_SHOULD_FAIL_SEND(etdev) || etdev->DriverNoPhyAccess |
411 | || !netif_carrier_ok(netdev)) { | 375 | || !netif_carrier_ok(netdev)) { |
412 | DBG_VERBOSE(et131x_dbginfo, | ||
413 | "Can't Tx, Link is DOWN; drop the packet\n"); | ||
414 | |||
415 | dev_kfree_skb_any(skb); | 376 | dev_kfree_skb_any(skb); |
416 | skb = NULL; | 377 | skb = NULL; |
417 | 378 | ||
@@ -426,24 +387,16 @@ int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev) | |||
426 | * send an error up to the netif layer, it | 387 | * send an error up to the netif layer, it |
427 | * will resend the skb to us. | 388 | * will resend the skb to us. |
428 | */ | 389 | */ |
429 | DBG_WARNING(et131x_dbginfo, | ||
430 | "Resources problem, Queue tx packet\n"); | ||
431 | } else if (status != 0) { | 390 | } else if (status != 0) { |
432 | /* On any other error, make netif think we're | 391 | /* On any other error, make netif think we're |
433 | * OK and drop the packet | 392 | * OK and drop the packet |
434 | */ | 393 | */ |
435 | DBG_WARNING(et131x_dbginfo, | ||
436 | "General error, drop packet\n"); | ||
437 | |||
438 | dev_kfree_skb_any(skb); | 394 | dev_kfree_skb_any(skb); |
439 | skb = NULL; | 395 | skb = NULL; |
440 | |||
441 | etdev->net_stats.tx_dropped++; | 396 | etdev->net_stats.tx_dropped++; |
442 | } | 397 | } |
443 | } | 398 | } |
444 | } | 399 | } |
445 | |||
446 | DBG_TX_LEAVE(et131x_dbginfo); | ||
447 | return status; | 400 | return status; |
448 | } | 401 | } |
449 | 402 | ||
@@ -464,21 +417,8 @@ static int et131x_send_packet(struct sk_buff *skb, | |||
464 | uint16_t *shbufva; | 417 | uint16_t *shbufva; |
465 | unsigned long flags; | 418 | unsigned long flags; |
466 | 419 | ||
467 | DBG_TX_ENTER(et131x_dbginfo); | ||
468 | |||
469 | /* Is our buffer scattered, or continuous? */ | ||
470 | if (skb_shinfo(skb)->nr_frags == 0) { | ||
471 | DBG_TX(et131x_dbginfo, "Scattered buffer: NO\n"); | ||
472 | } else { | ||
473 | DBG_TX(et131x_dbginfo, "Scattered buffer: YES, Num Frags: %d\n", | ||
474 | skb_shinfo(skb)->nr_frags); | ||
475 | } | ||
476 | |||
477 | /* All packets must have at least a MAC address and a protocol type */ | 420 | /* All packets must have at least a MAC address and a protocol type */ |
478 | if (skb->len < ETH_HLEN) { | 421 | if (skb->len < ETH_HLEN) { |
479 | DBG_ERROR(et131x_dbginfo, | ||
480 | "Packet size < ETH_HLEN (14 bytes)\n"); | ||
481 | DBG_LEAVE(et131x_dbginfo); | ||
482 | return -EIO; | 422 | return -EIO; |
483 | } | 423 | } |
484 | 424 | ||
@@ -489,9 +429,6 @@ static int et131x_send_packet(struct sk_buff *skb, | |||
489 | 429 | ||
490 | if (pMpTcb == NULL) { | 430 | if (pMpTcb == NULL) { |
491 | spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags); | 431 | spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags); |
492 | |||
493 | DBG_WARNING(et131x_dbginfo, "Can't obtain a TCB\n"); | ||
494 | DBG_TX_LEAVE(et131x_dbginfo); | ||
495 | return -ENOMEM; | 432 | return -ENOMEM; |
496 | } | 433 | } |
497 | 434 | ||
@@ -533,16 +470,10 @@ static int et131x_send_packet(struct sk_buff *skb, | |||
533 | } | 470 | } |
534 | 471 | ||
535 | etdev->TxRing.TCBReadyQueueTail = pMpTcb; | 472 | etdev->TxRing.TCBReadyQueueTail = pMpTcb; |
536 | |||
537 | spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags); | 473 | spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags); |
538 | |||
539 | DBG_TX_LEAVE(et131x_dbginfo); | ||
540 | return status; | 474 | return status; |
541 | } | 475 | } |
542 | 476 | WARN_ON(etdev->TxRing.nBusySend > NUM_TCB); | |
543 | DBG_ASSERT(etdev->TxRing.nBusySend <= NUM_TCB); | ||
544 | |||
545 | DBG_TX_LEAVE(et131x_dbginfo); | ||
546 | return 0; | 477 | return 0; |
547 | } | 478 | } |
548 | 479 | ||
@@ -564,8 +495,6 @@ static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb) | |||
564 | struct skb_frag_struct *pFragList = &skb_shinfo(pPacket)->frags[0]; | 495 | struct skb_frag_struct *pFragList = &skb_shinfo(pPacket)->frags[0]; |
565 | unsigned long flags; | 496 | unsigned long flags; |
566 | 497 | ||
567 | DBG_TX_ENTER(et131x_dbginfo); | ||
568 | |||
569 | /* Part of the optimizations of this send routine restrict us to | 498 | /* Part of the optimizations of this send routine restrict us to |
570 | * sending 24 fragments at a pass. In practice we should never see | 499 | * sending 24 fragments at a pass. In practice we should never see |
571 | * more than 5 fragments. | 500 | * more than 5 fragments. |
@@ -575,7 +504,6 @@ static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb) | |||
575 | * although it is less efficient. | 504 | * although it is less efficient. |
576 | */ | 505 | */ |
577 | if (FragListCount > 23) { | 506 | if (FragListCount > 23) { |
578 | DBG_TX_LEAVE(et131x_dbginfo); | ||
579 | return -EIO; | 507 | return -EIO; |
580 | } | 508 | } |
581 | 509 | ||
@@ -596,15 +524,7 @@ static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb) | |||
596 | * doesn't seem to like large fragments. | 524 | * doesn't seem to like large fragments. |
597 | */ | 525 | */ |
598 | if ((pPacket->len - pPacket->data_len) <= 1514) { | 526 | if ((pPacket->len - pPacket->data_len) <= 1514) { |
599 | DBG_TX(et131x_dbginfo, | ||
600 | "Got packet of length %d, " | ||
601 | "filling desc entry %d, " | ||
602 | "TCB: 0x%p\n", | ||
603 | (pPacket->len - pPacket->data_len), | ||
604 | etdev->TxRing.txDmaReadyToSend, pMpTcb); | ||
605 | |||
606 | CurDesc[FragmentNumber].DataBufferPtrHigh = 0; | 527 | CurDesc[FragmentNumber].DataBufferPtrHigh = 0; |
607 | |||
608 | CurDesc[FragmentNumber].word2.bits. | 528 | CurDesc[FragmentNumber].word2.bits. |
609 | length_in_bytes = | 529 | length_in_bytes = |
610 | pPacket->len - pPacket->data_len; | 530 | pPacket->len - pPacket->data_len; |
@@ -624,15 +544,7 @@ static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb) | |||
624 | pPacket->data_len, | 544 | pPacket->data_len, |
625 | PCI_DMA_TODEVICE); | 545 | PCI_DMA_TODEVICE); |
626 | } else { | 546 | } else { |
627 | DBG_TX(et131x_dbginfo, | ||
628 | "Got packet of length %d, " | ||
629 | "filling desc entry %d, " | ||
630 | "TCB: 0x%p\n", | ||
631 | (pPacket->len - pPacket->data_len), | ||
632 | etdev->TxRing.txDmaReadyToSend, pMpTcb); | ||
633 | |||
634 | CurDesc[FragmentNumber].DataBufferPtrHigh = 0; | 547 | CurDesc[FragmentNumber].DataBufferPtrHigh = 0; |
635 | |||
636 | CurDesc[FragmentNumber].word2.bits. | 548 | CurDesc[FragmentNumber].word2.bits. |
637 | length_in_bytes = | 549 | length_in_bytes = |
638 | ((pPacket->len - pPacket->data_len) / 2); | 550 | ((pPacket->len - pPacket->data_len) / 2); |
@@ -675,16 +587,7 @@ static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb) | |||
675 | PCI_DMA_TODEVICE); | 587 | PCI_DMA_TODEVICE); |
676 | } | 588 | } |
677 | } else { | 589 | } else { |
678 | DBG_TX(et131x_dbginfo, | ||
679 | "Got packet of length %d," | ||
680 | "filling desc entry %d\n" | ||
681 | "TCB: 0x%p\n", | ||
682 | pFragList[loopIndex].size, | ||
683 | etdev->TxRing.txDmaReadyToSend, | ||
684 | pMpTcb); | ||
685 | |||
686 | CurDesc[FragmentNumber].DataBufferPtrHigh = 0; | 590 | CurDesc[FragmentNumber].DataBufferPtrHigh = 0; |
687 | |||
688 | CurDesc[FragmentNumber].word2.bits.length_in_bytes = | 591 | CurDesc[FragmentNumber].word2.bits.length_in_bytes = |
689 | pFragList[loopIndex - 1].size; | 592 | pFragList[loopIndex - 1].size; |
690 | 593 | ||
@@ -703,10 +606,8 @@ static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb) | |||
703 | } | 606 | } |
704 | } | 607 | } |
705 | 608 | ||
706 | if (FragmentNumber == 0) { | 609 | if (FragmentNumber == 0) |
707 | DBG_WARNING(et131x_dbginfo, "No. frags is 0\n"); | ||
708 | return -EIO; | 610 | return -EIO; |
709 | } | ||
710 | 611 | ||
711 | if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) { | 612 | if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) { |
712 | if (++etdev->TxRing.TxPacketsSinceLastinterrupt == | 613 | if (++etdev->TxRing.TxPacketsSinceLastinterrupt == |
@@ -774,7 +675,7 @@ static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb) | |||
774 | 675 | ||
775 | etdev->TxRing.CurrSendTail = pMpTcb; | 676 | etdev->TxRing.CurrSendTail = pMpTcb; |
776 | 677 | ||
777 | DBG_ASSERT(pMpTcb->Next == NULL); | 678 | WARN_ON(pMpTcb->Next != NULL); |
778 | 679 | ||
779 | etdev->TxRing.nBusySend++; | 680 | etdev->TxRing.nBusySend++; |
780 | 681 | ||
@@ -791,432 +692,11 @@ static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb) | |||
791 | writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO, | 692 | writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO, |
792 | &etdev->regs->global.watchdog_timer); | 693 | &etdev->regs->global.watchdog_timer); |
793 | } | 694 | } |
794 | |||
795 | spin_unlock_irqrestore(&etdev->SendHWLock, flags); | 695 | spin_unlock_irqrestore(&etdev->SendHWLock, flags); |
796 | 696 | ||
797 | DBG_TX_LEAVE(et131x_dbginfo); | ||
798 | return 0; | 697 | return 0; |
799 | } | 698 | } |
800 | 699 | ||
801 | /* | ||
802 | * NOTE: For now, keep this older version of NICSendPacket around for | ||
803 | * reference, even though it's not used | ||
804 | */ | ||
805 | #if 0 | ||
806 | |||
807 | /** | ||
808 | * NICSendPacket - NIC specific send handler. | ||
809 | * @etdev: pointer to our adapter | ||
810 | * @pMpTcb: pointer to MP_TCB | ||
811 | * | ||
812 | * Returns 0 on succes, errno on failure. | ||
813 | * | ||
814 | * This version of the send routine is designed for version A silicon. | ||
815 | * Assumption - Send spinlock has been acquired. | ||
816 | */ | ||
817 | static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb) | ||
818 | { | ||
819 | uint32_t loopIndex, fragIndex, loopEnd; | ||
820 | uint32_t splitfirstelem = 0; | ||
821 | uint32_t SegmentSize = 0; | ||
822 | TX_DESC_ENTRY_t CurDesc; | ||
823 | TX_DESC_ENTRY_t *CurDescPostCopy = NULL; | ||
824 | uint32_t SlotsAvailable; | ||
825 | DMA10W_t ServiceComplete; | ||
826 | unsigned int flags; | ||
827 | struct sk_buff *pPacket = pMpTcb->Packet; | ||
828 | uint32_t FragListCount = skb_shinfo(pPacket)->nr_frags + 1; | ||
829 | struct skb_frag_struct *pFragList = &skb_shinfo(pPacket)->frags[0]; | ||
830 | |||
831 | DBG_TX_ENTER(et131x_dbginfo); | ||
832 | |||
833 | ServiceComplete.value = | ||
834 | readl(&etdev->regs->txdma.NewServiceComplete.value); | ||
835 | |||
836 | /* | ||
837 | * Attempt to fix TWO hardware bugs: | ||
838 | * 1) NEVER write an odd number of descriptors. | ||
839 | * 2) If packet length is less than NIC_MIN_PACKET_SIZE, then pad the | ||
840 | * packet to NIC_MIN_PACKET_SIZE bytes by adding a new last | ||
841 | * descriptor IN HALF DUPLEX MODE ONLY | ||
842 | * NOTE that (2) interacts with (1). If the packet is less than | ||
843 | * NIC_MIN_PACKET_SIZE bytes then we will append a descriptor. | ||
844 | * Therefore if it is even now, it will eventually end up odd, and | ||
845 | * so will need adjusting. | ||
846 | * | ||
847 | * VLAN tags get involved since VLAN tags add another one or two | ||
848 | * segments. | ||
849 | */ | ||
850 | DBG_TX(et131x_dbginfo, | ||
851 | "pMpTcb->PacketLength: %d\n", pMpTcb->PacketLength); | ||
852 | |||
853 | if ((etdev->duplex_mode == 0) | ||
854 | && (pMpTcb->PacketLength < NIC_MIN_PACKET_SIZE)) { | ||
855 | DBG_TX(et131x_dbginfo, | ||
856 | "HALF DUPLEX mode AND len < MIN_PKT_SIZE\n"); | ||
857 | if ((FragListCount & 0x1) == 0) { | ||
858 | DBG_TX(et131x_dbginfo, | ||
859 | "Even number of descs, split 1st elem\n"); | ||
860 | splitfirstelem = 1; | ||
861 | /* SegmentSize = pFragList[0].size / 2; */ | ||
862 | SegmentSize = (pPacket->len - pPacket->data_len) / 2; | ||
863 | } | ||
864 | } else if (FragListCount & 0x1) { | ||
865 | DBG_TX(et131x_dbginfo, "Odd number of descs, split 1st elem\n"); | ||
866 | |||
867 | splitfirstelem = 1; | ||
868 | /* SegmentSize = pFragList[0].size / 2; */ | ||
869 | SegmentSize = (pPacket->len - pPacket->data_len) / 2; | ||
870 | } | ||
871 | |||
872 | spin_lock_irqsave(&etdev->SendHWLock, flags); | ||
873 | |||
874 | if (etdev->TxRing.txDmaReadyToSend.bits.serv_req_wrap == | ||
875 | ServiceComplete.bits.serv_cpl_wrap) { | ||
876 | /* The ring hasn't wrapped. Slots available should be | ||
877 | * (RING_SIZE) - the difference between the two pointers. | ||
878 | */ | ||
879 | SlotsAvailable = NUM_DESC_PER_RING_TX - | ||
880 | (etdev->TxRing.txDmaReadyToSend.bits.serv_req - | ||
881 | ServiceComplete.bits.serv_cpl); | ||
882 | } else { | ||
883 | /* The ring has wrapped. Slots available should be the | ||
884 | * difference between the two pointers. | ||
885 | */ | ||
886 | SlotsAvailable = ServiceComplete.bits.serv_cpl - | ||
887 | etdev->TxRing.txDmaReadyToSend.bits.serv_req; | ||
888 | } | ||
889 | |||
890 | if ((FragListCount + splitfirstelem) > SlotsAvailable) { | ||
891 | DBG_WARNING(et131x_dbginfo, | ||
892 | "Not Enough Space in Tx Desc Ring\n"); | ||
893 | spin_unlock_irqrestore(&etdev->SendHWLock, flags); | ||
894 | return -ENOMEM; | ||
895 | } | ||
896 | |||
897 | loopEnd = (FragListCount) + splitfirstelem; | ||
898 | fragIndex = 0; | ||
899 | |||
900 | DBG_TX(et131x_dbginfo, | ||
901 | "TCB : 0x%p\n" | ||
902 | "Packet (SKB) : 0x%p\t Packet->len: %d\t Packet->data_len: %d\n" | ||
903 | "FragListCount : %d\t splitfirstelem: %d\t loopEnd:%d\n", | ||
904 | pMpTcb, | ||
905 | pPacket, pPacket->len, pPacket->data_len, | ||
906 | FragListCount, splitfirstelem, loopEnd); | ||
907 | |||
908 | for (loopIndex = 0; loopIndex < loopEnd; loopIndex++) { | ||
909 | if (loopIndex > splitfirstelem) | ||
910 | fragIndex++; | ||
911 | |||
912 | DBG_TX(et131x_dbginfo, | ||
913 | "In loop, loopIndex: %d\t fragIndex: %d\n", loopIndex, | ||
914 | fragIndex); | ||
915 | |||
916 | /* If there is something in this element, let's get a | ||
917 | * descriptor from the ring and get the necessary data | ||
918 | */ | ||
919 | DBG_TX(et131x_dbginfo, | ||
920 | "Packet Length %d," | ||
921 | "filling desc entry %d\n", | ||
922 | pPacket->len, | ||
923 | etdev->TxRing.txDmaReadyToSend.bits.serv_req); | ||
924 | |||
925 | /* | ||
926 | * NOTE - Should we do a paranoia check here to make sure the fragment | ||
927 | * actually has a length? It's HIGHLY unlikely the fragment would | ||
928 | * contain no data... | ||
929 | */ | ||
930 | if (1) { | ||
931 | /* NOTE - Currently always getting 32-bit addrs, and | ||
932 | * dma_addr_t is only 32-bit, so leave "high" ptr | ||
933 | * value out for now | ||
934 | * CurDesc.DataBufferPtrHigh = 0; | ||
935 | */ | ||
936 | |||
937 | CurDesc.word2.value = 0; | ||
938 | CurDesc.word3.value = 0; | ||
939 | |||
940 | if (fragIndex == 0) { | ||
941 | if (splitfirstelem) { | ||
942 | DBG_TX(et131x_dbginfo, | ||
943 | "Split first element: YES\n"); | ||
944 | |||
945 | if (loopIndex == 0) { | ||
946 | DBG_TX(et131x_dbginfo, | ||
947 | "Got fragment of length %d, fragIndex: %d\n", | ||
948 | pPacket->len - | ||
949 | pPacket->data_len, | ||
950 | fragIndex); | ||
951 | DBG_TX(et131x_dbginfo, | ||
952 | "SegmentSize: %d\n", | ||
953 | SegmentSize); | ||
954 | |||
955 | CurDesc.word2.bits. | ||
956 | length_in_bytes = | ||
957 | SegmentSize; | ||
958 | CurDesc.DataBufferPtrLow = | ||
959 | pci_map_single(etdev-> | ||
960 | pdev, | ||
961 | pPacket-> | ||
962 | data, | ||
963 | SegmentSize, | ||
964 | PCI_DMA_TODEVICE); | ||
965 | DBG_TX(et131x_dbginfo, | ||
966 | "pci_map_single() returns: 0x%08x\n", | ||
967 | CurDesc. | ||
968 | DataBufferPtrLow); | ||
969 | } else { | ||
970 | DBG_TX(et131x_dbginfo, | ||
971 | "Got fragment of length %d, fragIndex: %d\n", | ||
972 | pPacket->len - | ||
973 | pPacket->data_len, | ||
974 | fragIndex); | ||
975 | DBG_TX(et131x_dbginfo, | ||
976 | "Leftover Size: %d\n", | ||
977 | (pPacket->len - | ||
978 | pPacket->data_len - | ||
979 | SegmentSize)); | ||
980 | |||
981 | CurDesc.word2.bits. | ||
982 | length_in_bytes = | ||
983 | ((pPacket->len - | ||
984 | pPacket->data_len) - | ||
985 | SegmentSize); | ||
986 | CurDesc.DataBufferPtrLow = | ||
987 | pci_map_single(etdev-> | ||
988 | pdev, | ||
989 | (pPacket-> | ||
990 | data + | ||
991 | SegmentSize), | ||
992 | (pPacket-> | ||
993 | len - | ||
994 | pPacket-> | ||
995 | data_len - | ||
996 | SegmentSize), | ||
997 | PCI_DMA_TODEVICE); | ||
998 | DBG_TX(et131x_dbginfo, | ||
999 | "pci_map_single() returns: 0x%08x\n", | ||
1000 | CurDesc. | ||
1001 | DataBufferPtrLow); | ||
1002 | } | ||
1003 | } else { | ||
1004 | DBG_TX(et131x_dbginfo, | ||
1005 | "Split first element: NO\n"); | ||
1006 | |||
1007 | CurDesc.word2.bits.length_in_bytes = | ||
1008 | pPacket->len - pPacket->data_len; | ||
1009 | |||
1010 | CurDesc.DataBufferPtrLow = | ||
1011 | pci_map_single(etdev->pdev, | ||
1012 | pPacket->data, | ||
1013 | (pPacket->len - | ||
1014 | pPacket->data_len), | ||
1015 | PCI_DMA_TODEVICE); | ||
1016 | DBG_TX(et131x_dbginfo, | ||
1017 | "pci_map_single() returns: 0x%08x\n", | ||
1018 | CurDesc.DataBufferPtrLow); | ||
1019 | } | ||
1020 | } else { | ||
1021 | |||
1022 | CurDesc.word2.bits.length_in_bytes = | ||
1023 | pFragList[fragIndex - 1].size; | ||
1024 | CurDesc.DataBufferPtrLow = | ||
1025 | pci_map_page(etdev->pdev, | ||
1026 | pFragList[fragIndex - 1].page, | ||
1027 | pFragList[fragIndex - | ||
1028 | 1].page_offset, | ||
1029 | pFragList[fragIndex - 1].size, | ||
1030 | PCI_DMA_TODEVICE); | ||
1031 | DBG_TX(et131x_dbginfo, | ||
1032 | "pci_map_page() returns: 0x%08x\n", | ||
1033 | CurDesc.DataBufferPtrLow); | ||
1034 | } | ||
1035 | |||
1036 | if (loopIndex == 0) { | ||
1037 | /* This is the first descriptor of the packet | ||
1038 | * | ||
1039 | * Set the "f" bit to indicate this is the | ||
1040 | * first descriptor in the packet. | ||
1041 | */ | ||
1042 | DBG_TX(et131x_dbginfo, | ||
1043 | "This is our FIRST descriptor\n"); | ||
1044 | CurDesc.word3.bits.f = 1; | ||
1045 | |||
1046 | pMpTcb->WrIndexStart = | ||
1047 | etdev->TxRing.txDmaReadyToSend; | ||
1048 | } | ||
1049 | |||
1050 | if ((loopIndex == (loopEnd - 1)) && | ||
1051 | (etdev->duplex_mode || | ||
1052 | (pMpTcb->PacketLength >= NIC_MIN_PACKET_SIZE))) { | ||
1053 | /* This is the Last descriptor of the packet */ | ||
1054 | DBG_TX(et131x_dbginfo, | ||
1055 | "THIS is our LAST descriptor\n"); | ||
1056 | |||
1057 | if (etdev->linkspeed == | ||
1058 | TRUEPHY_SPEED_1000MBPS) { | ||
1059 | if (++etdev->TxRing. | ||
1060 | TxPacketsSinceLastinterrupt >= | ||
1061 | PARM_TX_NUM_BUFS_DEF) { | ||
1062 | CurDesc.word3.value = 0x5; | ||
1063 | etdev->TxRing. | ||
1064 | TxPacketsSinceLastinterrupt | ||
1065 | = 0; | ||
1066 | } else { | ||
1067 | CurDesc.word3.value = 0x1; | ||
1068 | } | ||
1069 | } else { | ||
1070 | CurDesc.word3.value = 0x5; | ||
1071 | } | ||
1072 | |||
1073 | /* Following index will be used during freeing | ||
1074 | * of packet | ||
1075 | */ | ||
1076 | pMpTcb->WrIndex = | ||
1077 | etdev->TxRing.txDmaReadyToSend; | ||
1078 | pMpTcb->PacketStaleCount = 0; | ||
1079 | } | ||
1080 | |||
1081 | /* Copy the descriptor (filled above) into the | ||
1082 | * descriptor ring at the next free entry. Advance | ||
1083 | * the "next free entry" variable | ||
1084 | */ | ||
1085 | memcpy(etdev->TxRing.pTxDescRingVa + | ||
1086 | etdev->TxRing.txDmaReadyToSend.bits.serv_req, | ||
1087 | &CurDesc, sizeof(TX_DESC_ENTRY_t)); | ||
1088 | |||
1089 | CurDescPostCopy = | ||
1090 | etdev->TxRing.pTxDescRingVa + | ||
1091 | etdev->TxRing.txDmaReadyToSend.bits.serv_req; | ||
1092 | |||
1093 | DBG_TX(et131x_dbginfo, | ||
1094 | "CURRENT DESCRIPTOR\n" | ||
1095 | "\tAddress : 0x%p\n" | ||
1096 | "\tDataBufferPtrHigh : 0x%08x\n" | ||
1097 | "\tDataBufferPtrLow : 0x%08x\n" | ||
1098 | "\tword2 : 0x%08x\n" | ||
1099 | "\tword3 : 0x%08x\n", | ||
1100 | CurDescPostCopy, | ||
1101 | CurDescPostCopy->DataBufferPtrHigh, | ||
1102 | CurDescPostCopy->DataBufferPtrLow, | ||
1103 | CurDescPostCopy->word2.value, | ||
1104 | CurDescPostCopy->word3.value); | ||
1105 | |||
1106 | if (++etdev->TxRing.txDmaReadyToSend.bits.serv_req >= | ||
1107 | NUM_DESC_PER_RING_TX) { | ||
1108 | if (etdev->TxRing.txDmaReadyToSend.bits. | ||
1109 | serv_req_wrap) { | ||
1110 | etdev->TxRing.txDmaReadyToSend. | ||
1111 | value = 0; | ||
1112 | } else { | ||
1113 | etdev->TxRing.txDmaReadyToSend. | ||
1114 | value = 0x400; | ||
1115 | } | ||
1116 | } | ||
1117 | } | ||
1118 | } | ||
1119 | |||
1120 | if (etdev->duplex_mode == 0 && | ||
1121 | pMpTcb->PacketLength < NIC_MIN_PACKET_SIZE) { | ||
1122 | /* NOTE - Same 32/64-bit issue as above... */ | ||
1123 | CurDesc.DataBufferPtrHigh = 0x0; | ||
1124 | CurDesc.DataBufferPtrLow = etdev->TxRing.pTxDummyBlkPa; | ||
1125 | CurDesc.word2.value = 0; | ||
1126 | |||
1127 | if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) { | ||
1128 | if (++etdev->TxRing.TxPacketsSinceLastinterrupt >= | ||
1129 | PARM_TX_NUM_BUFS_DEF) { | ||
1130 | CurDesc.word3.value = 0x5; | ||
1131 | etdev->TxRing.TxPacketsSinceLastinterrupt = | ||
1132 | 0; | ||
1133 | } else { | ||
1134 | CurDesc.word3.value = 0x1; | ||
1135 | } | ||
1136 | } else { | ||
1137 | CurDesc.word3.value = 0x5; | ||
1138 | } | ||
1139 | |||
1140 | CurDesc.word2.bits.length_in_bytes = | ||
1141 | NIC_MIN_PACKET_SIZE - pMpTcb->PacketLength; | ||
1142 | |||
1143 | pMpTcb->WrIndex = etdev->TxRing.txDmaReadyToSend; | ||
1144 | |||
1145 | memcpy(etdev->TxRing.pTxDescRingVa + | ||
1146 | etdev->TxRing.txDmaReadyToSend.bits.serv_req, | ||
1147 | &CurDesc, sizeof(TX_DESC_ENTRY_t)); | ||
1148 | |||
1149 | CurDescPostCopy = | ||
1150 | etdev->TxRing.pTxDescRingVa + | ||
1151 | etdev->TxRing.txDmaReadyToSend.bits.serv_req; | ||
1152 | |||
1153 | DBG_TX(et131x_dbginfo, | ||
1154 | "CURRENT DESCRIPTOR\n" | ||
1155 | "\tAddress : 0x%p\n" | ||
1156 | "\tDataBufferPtrHigh : 0x%08x\n" | ||
1157 | "\tDataBufferPtrLow : 0x%08x\n" | ||
1158 | "\tword2 : 0x%08x\n" | ||
1159 | "\tword3 : 0x%08x\n", | ||
1160 | CurDescPostCopy, | ||
1161 | CurDescPostCopy->DataBufferPtrHigh, | ||
1162 | CurDescPostCopy->DataBufferPtrLow, | ||
1163 | CurDescPostCopy->word2.value, | ||
1164 | CurDescPostCopy->word3.value); | ||
1165 | |||
1166 | if (++etdev->TxRing.txDmaReadyToSend.bits.serv_req >= | ||
1167 | NUM_DESC_PER_RING_TX) { | ||
1168 | if (etdev->TxRing.txDmaReadyToSend.bits. | ||
1169 | serv_req_wrap) { | ||
1170 | etdev->TxRing.txDmaReadyToSend.value = 0; | ||
1171 | } else { | ||
1172 | etdev->TxRing.txDmaReadyToSend.value = 0x400; | ||
1173 | } | ||
1174 | } | ||
1175 | |||
1176 | DBG_TX(et131x_dbginfo, "Padding descriptor %d by %d bytes\n", | ||
1177 | /* etdev->TxRing.txDmaReadyToSend.value, */ | ||
1178 | etdev->TxRing.txDmaReadyToSend.bits.serv_req, | ||
1179 | NIC_MIN_PACKET_SIZE - pMpTcb->PacketLength); | ||
1180 | } | ||
1181 | |||
1182 | spin_lock(&etdev->TCBSendQLock); | ||
1183 | |||
1184 | if (etdev->TxRing.CurrSendTail) | ||
1185 | etdev->TxRing.CurrSendTail->Next = pMpTcb; | ||
1186 | else | ||
1187 | etdev->TxRing.CurrSendHead = pMpTcb; | ||
1188 | |||
1189 | etdev->TxRing.CurrSendTail = pMpTcb; | ||
1190 | |||
1191 | DBG_ASSERT(pMpTcb->Next == NULL); | ||
1192 | |||
1193 | etdev->TxRing.nBusySend++; | ||
1194 | |||
1195 | spin_unlock(&etdev->TCBSendQLock); | ||
1196 | |||
1197 | /* Write the new write pointer back to the device. */ | ||
1198 | writel(etdev->TxRing.txDmaReadyToSend.value, | ||
1199 | &etdev->regs->txdma.service_request.value); | ||
1200 | |||
1201 | #ifdef CONFIG_ET131X_DEBUG | ||
1202 | DumpDeviceBlock(DBG_TX_ON, etdev, 1); | ||
1203 | #endif | ||
1204 | |||
1205 | /* For Gig only, we use Tx Interrupt coalescing. Enable the software | ||
1206 | * timer to wake us up if this packet isn't followed by N more. | ||
1207 | */ | ||
1208 | if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) { | ||
1209 | writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO, | ||
1210 | &etdev->regs->global.watchdog_timer); | ||
1211 | } | ||
1212 | |||
1213 | spin_unlock_irqrestore(&etdev->SendHWLock, flags); | ||
1214 | |||
1215 | DBG_TX_LEAVE(et131x_dbginfo); | ||
1216 | return 0; | ||
1217 | } | ||
1218 | |||
1219 | #endif | ||
1220 | 700 | ||
1221 | /** | 701 | /** |
1222 | * et131x_free_send_packet - Recycle a MP_TCB, complete the packet if necessary | 702 | * et131x_free_send_packet - Recycle a MP_TCB, complete the packet if necessary |
@@ -1246,37 +726,11 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev, | |||
1246 | * corresponding to this packet and umap the fragments | 726 | * corresponding to this packet and umap the fragments |
1247 | * they point to | 727 | * they point to |
1248 | */ | 728 | */ |
1249 | DBG_TX(et131x_dbginfo, | ||
1250 | "Unmap descriptors Here\n" | ||
1251 | "TCB : 0x%p\n" | ||
1252 | "TCB Next : 0x%p\n" | ||
1253 | "TCB PacketLength : %d\n" | ||
1254 | "TCB WrIndexS.value : 0x%08x\n" | ||
1255 | "TCB WrIndex.value : 0x%08x\n", | ||
1256 | pMpTcb, | ||
1257 | pMpTcb->Next, | ||
1258 | pMpTcb->PacketLength, | ||
1259 | pMpTcb->WrIndexStart, | ||
1260 | pMpTcb->WrIndex); | ||
1261 | |||
1262 | do { | 729 | do { |
1263 | desc = | 730 | desc = |
1264 | (TX_DESC_ENTRY_t *) (etdev->TxRing.pTxDescRingVa + | 731 | (TX_DESC_ENTRY_t *) (etdev->TxRing.pTxDescRingVa + |
1265 | INDEX10(pMpTcb->WrIndexStart)); | 732 | INDEX10(pMpTcb->WrIndexStart)); |
1266 | 733 | ||
1267 | DBG_TX(et131x_dbginfo, | ||
1268 | "CURRENT DESCRIPTOR\n" | ||
1269 | "\tAddress : 0x%p\n" | ||
1270 | "\tDataBufferPtrHigh : 0x%08x\n" | ||
1271 | "\tDataBufferPtrLow : 0x%08x\n" | ||
1272 | "\tword2 : 0x%08x\n" | ||
1273 | "\tword3 : 0x%08x\n", | ||
1274 | desc, | ||
1275 | desc->DataBufferPtrHigh, | ||
1276 | desc->DataBufferPtrLow, | ||
1277 | desc->word2.value, | ||
1278 | desc->word3.value); | ||
1279 | |||
1280 | pci_unmap_single(etdev->pdev, | 734 | pci_unmap_single(etdev->pdev, |
1281 | desc->DataBufferPtrLow, | 735 | desc->DataBufferPtrLow, |
1282 | desc->word2.value, PCI_DMA_TODEVICE); | 736 | desc->word2.value, PCI_DMA_TODEVICE); |
@@ -1290,9 +744,6 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev, | |||
1290 | } while (desc != (etdev->TxRing.pTxDescRingVa + | 744 | } while (desc != (etdev->TxRing.pTxDescRingVa + |
1291 | INDEX10(pMpTcb->WrIndex))); | 745 | INDEX10(pMpTcb->WrIndex))); |
1292 | 746 | ||
1293 | DBG_TX(et131x_dbginfo, | ||
1294 | "Free Packet (SKB) : 0x%p\n", pMpTcb->Packet); | ||
1295 | |||
1296 | dev_kfree_skb_any(pMpTcb->Packet); | 747 | dev_kfree_skb_any(pMpTcb->Packet); |
1297 | } | 748 | } |
1298 | 749 | ||
@@ -1313,8 +764,7 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev, | |||
1313 | etdev->TxRing.TCBReadyQueueTail = pMpTcb; | 764 | etdev->TxRing.TCBReadyQueueTail = pMpTcb; |
1314 | 765 | ||
1315 | spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags); | 766 | spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags); |
1316 | 767 | WARN_ON(etdev->TxRing.nBusySend < 0); | |
1317 | DBG_ASSERT(etdev->TxRing.nBusySend >= 0); | ||
1318 | } | 768 | } |
1319 | 769 | ||
1320 | /** | 770 | /** |
@@ -1330,8 +780,6 @@ void et131x_free_busy_send_packets(struct et131x_adapter *etdev) | |||
1330 | unsigned long flags; | 780 | unsigned long flags; |
1331 | uint32_t FreeCounter = 0; | 781 | uint32_t FreeCounter = 0; |
1332 | 782 | ||
1333 | DBG_ENTER(et131x_dbginfo); | ||
1334 | |||
1335 | while (!list_empty(&etdev->TxRing.SendWaitQueue)) { | 783 | while (!list_empty(&etdev->TxRing.SendWaitQueue)) { |
1336 | spin_lock_irqsave(&etdev->SendWaitLock, flags); | 784 | spin_lock_irqsave(&etdev->SendWaitLock, flags); |
1337 | 785 | ||
@@ -1360,8 +808,6 @@ void et131x_free_busy_send_packets(struct et131x_adapter *etdev) | |||
1360 | 808 | ||
1361 | spin_unlock_irqrestore(&etdev->TCBSendQLock, flags); | 809 | spin_unlock_irqrestore(&etdev->TCBSendQLock, flags); |
1362 | 810 | ||
1363 | DBG_VERBOSE(et131x_dbginfo, "pMpTcb = 0x%p\n", pMpTcb); | ||
1364 | |||
1365 | FreeCounter++; | 811 | FreeCounter++; |
1366 | et131x_free_send_packet(etdev, pMpTcb); | 812 | et131x_free_send_packet(etdev, pMpTcb); |
1367 | 813 | ||
@@ -1370,17 +816,11 @@ void et131x_free_busy_send_packets(struct et131x_adapter *etdev) | |||
1370 | pMpTcb = etdev->TxRing.CurrSendHead; | 816 | pMpTcb = etdev->TxRing.CurrSendHead; |
1371 | } | 817 | } |
1372 | 818 | ||
1373 | if (FreeCounter == NUM_TCB) { | 819 | WARN_ON(FreeCounter == NUM_TCB); |
1374 | DBG_ERROR(et131x_dbginfo, | ||
1375 | "MpFreeBusySendPackets exited loop for a bad reason\n"); | ||
1376 | BUG(); | ||
1377 | } | ||
1378 | 820 | ||
1379 | spin_unlock_irqrestore(&etdev->TCBSendQLock, flags); | 821 | spin_unlock_irqrestore(&etdev->TCBSendQLock, flags); |
1380 | 822 | ||
1381 | etdev->TxRing.nBusySend = 0; | 823 | etdev->TxRing.nBusySend = 0; |
1382 | |||
1383 | DBG_LEAVE(et131x_dbginfo); | ||
1384 | } | 824 | } |
1385 | 825 | ||
1386 | /** | 826 | /** |
@@ -1394,8 +834,6 @@ void et131x_free_busy_send_packets(struct et131x_adapter *etdev) | |||
1394 | */ | 834 | */ |
1395 | void et131x_handle_send_interrupt(struct et131x_adapter *etdev) | 835 | void et131x_handle_send_interrupt(struct et131x_adapter *etdev) |
1396 | { | 836 | { |
1397 | DBG_TX_ENTER(et131x_dbginfo); | ||
1398 | |||
1399 | /* Mark as completed any packets which have been sent by the device. */ | 837 | /* Mark as completed any packets which have been sent by the device. */ |
1400 | et131x_update_tcb_list(etdev); | 838 | et131x_update_tcb_list(etdev); |
1401 | 839 | ||
@@ -1403,8 +841,6 @@ void et131x_handle_send_interrupt(struct et131x_adapter *etdev) | |||
1403 | * dequeue and send those packets now, as long as we have free TCBs. | 841 | * dequeue and send those packets now, as long as we have free TCBs. |
1404 | */ | 842 | */ |
1405 | et131x_check_send_wait_list(etdev); | 843 | et131x_check_send_wait_list(etdev); |
1406 | |||
1407 | DBG_TX_LEAVE(et131x_dbginfo); | ||
1408 | } | 844 | } |
1409 | 845 | ||
1410 | /** | 846 | /** |
@@ -1487,15 +923,9 @@ static void et131x_check_send_wait_list(struct et131x_adapter *etdev) | |||
1487 | MP_TCB_RESOURCES_AVAILABLE(etdev)) { | 923 | MP_TCB_RESOURCES_AVAILABLE(etdev)) { |
1488 | struct list_head *entry; | 924 | struct list_head *entry; |
1489 | 925 | ||
1490 | DBG_VERBOSE(et131x_dbginfo, "Tx packets on the wait queue\n"); | ||
1491 | |||
1492 | entry = etdev->TxRing.SendWaitQueue.next; | 926 | entry = etdev->TxRing.SendWaitQueue.next; |
1493 | 927 | ||
1494 | etdev->TxRing.nWaitSend--; | 928 | etdev->TxRing.nWaitSend--; |
1495 | |||
1496 | DBG_WARNING(et131x_dbginfo, | ||
1497 | "MpHandleSendInterrupt - sent a queued pkt. Waiting %d\n", | ||
1498 | etdev->TxRing.nWaitSend); | ||
1499 | } | 929 | } |
1500 | 930 | ||
1501 | spin_unlock_irqrestore(&etdev->SendWaitLock, flags); | 931 | spin_unlock_irqrestore(&etdev->SendWaitLock, flags); |
diff --git a/drivers/staging/et131x/et131x_adapter.h b/drivers/staging/et131x/et131x_adapter.h index 4e795f0b076e..1dfe06f1b1a7 100644 --- a/drivers/staging/et131x/et131x_adapter.h +++ b/drivers/staging/et131x/et131x_adapter.h | |||
@@ -176,13 +176,6 @@ typedef struct _ce_stats_t { | |||
176 | u32 code_violations; | 176 | u32 code_violations; |
177 | u32 other_errors; | 177 | u32 other_errors; |
178 | 178 | ||
179 | #ifdef CONFIG_ET131X_DEBUG | ||
180 | u32 UnhandledInterruptsPerSec; | ||
181 | u32 RxDmaInterruptsPerSec; | ||
182 | u32 TxDmaInterruptsPerSec; | ||
183 | u32 WatchDogInterruptsPerSec; | ||
184 | #endif /* CONFIG_ET131X_DEBUG */ | ||
185 | |||
186 | u32 SynchrounousIterations; | 179 | u32 SynchrounousIterations; |
187 | u32 InterruptStatus; | 180 | u32 InterruptStatus; |
188 | } CE_STATS_t, *PCE_STATS_t; | 181 | } CE_STATS_t, *PCE_STATS_t; |
diff --git a/drivers/staging/et131x/et131x_debug.c b/drivers/staging/et131x/et131x_debug.c deleted file mode 100644 index 945b4b542ada..000000000000 --- a/drivers/staging/et131x/et131x_debug.c +++ /dev/null | |||
@@ -1,208 +0,0 @@ | |||
1 | /* | ||
2 | * Agere Systems Inc. | ||
3 | * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs | ||
4 | * | ||
5 | * Copyright © 2005 Agere Systems Inc. | ||
6 | * All rights reserved. | ||
7 | * http://www.agere.com | ||
8 | * | ||
9 | *------------------------------------------------------------------------------ | ||
10 | * | ||
11 | * et131x_debug.c - Routines used for debugging. | ||
12 | * | ||
13 | *------------------------------------------------------------------------------ | ||
14 | * | ||
15 | * SOFTWARE LICENSE | ||
16 | * | ||
17 | * This software is provided subject to the following terms and conditions, | ||
18 | * which you should read carefully before using the software. Using this | ||
19 | * software indicates your acceptance of these terms and conditions. If you do | ||
20 | * not agree with these terms and conditions, do not use the software. | ||
21 | * | ||
22 | * Copyright © 2005 Agere Systems Inc. | ||
23 | * All rights reserved. | ||
24 | * | ||
25 | * Redistribution and use in source or binary forms, with or without | ||
26 | * modifications, are permitted provided that the following conditions are met: | ||
27 | * | ||
28 | * . Redistributions of source code must retain the above copyright notice, this | ||
29 | * list of conditions and the following Disclaimer as comments in the code as | ||
30 | * well as in the documentation and/or other materials provided with the | ||
31 | * distribution. | ||
32 | * | ||
33 | * . Redistributions in binary form must reproduce the above copyright notice, | ||
34 | * this list of conditions and the following Disclaimer in the documentation | ||
35 | * and/or other materials provided with the distribution. | ||
36 | * | ||
37 | * . Neither the name of Agere Systems Inc. nor the names of the contributors | ||
38 | * may be used to endorse or promote products derived from this software | ||
39 | * without specific prior written permission. | ||
40 | * | ||
41 | * Disclaimer | ||
42 | * | ||
43 | * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, | ||
44 | * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF | ||
45 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY | ||
46 | * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN | ||
47 | * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY | ||
48 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | ||
49 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
50 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ||
51 | * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT | ||
52 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT | ||
53 | * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH | ||
54 | * DAMAGE. | ||
55 | * | ||
56 | */ | ||
57 | |||
58 | #ifdef CONFIG_ET131X_DEBUG | ||
59 | |||
60 | #include "et131x_version.h" | ||
61 | #include "et131x_debug.h" | ||
62 | #include "et131x_defs.h" | ||
63 | |||
64 | #include <linux/pci.h> | ||
65 | #include <linux/init.h> | ||
66 | #include <linux/module.h> | ||
67 | #include <linux/types.h> | ||
68 | #include <linux/kernel.h> | ||
69 | |||
70 | #include <linux/sched.h> | ||
71 | #include <linux/ptrace.h> | ||
72 | #include <linux/slab.h> | ||
73 | #include <linux/ctype.h> | ||
74 | #include <linux/string.h> | ||
75 | #include <linux/timer.h> | ||
76 | #include <linux/interrupt.h> | ||
77 | #include <linux/in.h> | ||
78 | #include <linux/delay.h> | ||
79 | #include <linux/io.h> | ||
80 | #include <linux/bitops.h> | ||
81 | #include <asm/system.h> | ||
82 | |||
83 | #include <linux/netdevice.h> | ||
84 | #include <linux/etherdevice.h> | ||
85 | #include <linux/skbuff.h> | ||
86 | #include <linux/if_arp.h> | ||
87 | #include <linux/ioport.h> | ||
88 | #include <linux/random.h> | ||
89 | |||
90 | #include "et1310_phy.h" | ||
91 | #include "et1310_pm.h" | ||
92 | #include "et1310_jagcore.h" | ||
93 | |||
94 | #include "et131x_adapter.h" | ||
95 | #include "et131x_netdev.h" | ||
96 | #include "et131x_config.h" | ||
97 | #include "et131x_isr.h" | ||
98 | |||
99 | #include "et1310_address_map.h" | ||
100 | #include "et1310_tx.h" | ||
101 | #include "et1310_rx.h" | ||
102 | #include "et1310_mac.h" | ||
103 | |||
104 | /* Data for debugging facilities */ | ||
105 | extern dbg_info_t *et131x_dbginfo; | ||
106 | |||
107 | /** | ||
108 | * DumpTxQueueContents - Dump out the tx queue and the shadow pointers | ||
109 | * @etdev: pointer to our adapter structure | ||
110 | */ | ||
111 | void DumpTxQueueContents(int debug, struct et131x_adapter *etdev) | ||
112 | { | ||
113 | MMC_t __iomem *mmc = &etdev->regs->mmc; | ||
114 | u32 txq_addr; | ||
115 | |||
116 | if (DBG_FLAGS(et131x_dbginfo) & debug) { | ||
117 | for (txq_addr = 0x200; txq_addr < 0x3ff; txq_addr++) { | ||
118 | u32 sram_access = readl(&mmc->sram_access); | ||
119 | sram_access &= 0xFFFF; | ||
120 | sram_access |= (txq_addr << 16) | ET_SRAM_REQ_ACCESS; | ||
121 | writel(sram_access, &mmc->sram_access); | ||
122 | |||
123 | DBG_PRINT("Addr 0x%x, Access 0x%08x\t" | ||
124 | "Value 1 0x%08x, Value 2 0x%08x, " | ||
125 | "Value 3 0x%08x, Value 4 0x%08x, \n", | ||
126 | txq_addr, | ||
127 | readl(&mmc->sram_access), | ||
128 | readl(&mmc->sram_word1), | ||
129 | readl(&mmc->sram_word2), | ||
130 | readl(&mmc->sram_word3), | ||
131 | readl(&mmc->sram_word4)); | ||
132 | } | ||
133 | |||
134 | DBG_PRINT("Shadow Pointers 0x%08x\n", | ||
135 | readl(&etdev->regs->txmac.shadow_ptr.value)); | ||
136 | } | ||
137 | } | ||
138 | |||
139 | #define NUM_BLOCKS 8 | ||
140 | |||
141 | static const char *BlockNames[NUM_BLOCKS] = { | ||
142 | "Global", "Tx DMA", "Rx DMA", "Tx MAC", | ||
143 | "Rx MAC", "MAC", "MAC Stat", "MMC" | ||
144 | }; | ||
145 | |||
146 | |||
147 | /** | ||
148 | * DumpDeviceBlock | ||
149 | * @etdev: pointer to our adapter | ||
150 | * | ||
151 | * Dumps the first 64 regs of each block of the et-1310 (each block is | ||
152 | * mapped to a new page, each page is 4096 bytes). | ||
153 | */ | ||
154 | void DumpDeviceBlock(int debug, struct et131x_adapter *etdev, | ||
155 | u32 block) | ||
156 | { | ||
157 | u32 addr1, addr2; | ||
158 | u32 __iomem *regs = (u32 __iomem *) etdev->regs; | ||
159 | |||
160 | /* Output the debug counters to the debug terminal */ | ||
161 | if (DBG_FLAGS(et131x_dbginfo) & debug) { | ||
162 | DBG_PRINT("%s block\n", BlockNames[block]); | ||
163 | regs += block * 1024; | ||
164 | for (addr1 = 0; addr1 < 8; addr1++) { | ||
165 | for (addr2 = 0; addr2 < 8; addr2++) { | ||
166 | if (block == 0 && | ||
167 | (addr1 * 8 + addr2) == 6) | ||
168 | DBG_PRINT(" ISR , "); | ||
169 | else | ||
170 | DBG_PRINT("0x%08x, ", readl(regs++)); | ||
171 | } | ||
172 | DBG_PRINT("\n"); | ||
173 | } | ||
174 | DBG_PRINT("\n"); | ||
175 | } | ||
176 | } | ||
177 | |||
178 | /** | ||
179 | * DumpDeviceReg | ||
180 | * @etdev: pointer to our adapter | ||
181 | * | ||
182 | * Dumps the first 64 regs of each block of the et-1310 (each block is | ||
183 | * mapped to a new page, each page is 4096 bytes). | ||
184 | */ | ||
185 | void DumpDeviceReg(int debug, struct et131x_adapter *etdev) | ||
186 | { | ||
187 | u32 addr1, addr2; | ||
188 | u32 block; | ||
189 | u32 __iomem *regs = (u32 __iomem *)etdev->regs; | ||
190 | u32 __iomem *p; | ||
191 | |||
192 | /* Output the debug counters to the debug terminal */ | ||
193 | if (DBG_FLAGS(et131x_dbginfo) & debug) { | ||
194 | for (block = 0; block < NUM_BLOCKS; block++) { | ||
195 | DBG_PRINT("%s block\n", BlockNames[block]); | ||
196 | p = regs + block * 1024; | ||
197 | |||
198 | for (addr1 = 0; addr1 < 8; addr1++) { | ||
199 | for (addr2 = 0; addr2 < 8; addr2++) | ||
200 | DBG_PRINT("0x%08x, ", readl(p++)); | ||
201 | DBG_PRINT("\n"); | ||
202 | } | ||
203 | DBG_PRINT("\n"); | ||
204 | } | ||
205 | } | ||
206 | } | ||
207 | |||
208 | #endif /* CONFIG_ET131X_DEBUG */ | ||
diff --git a/drivers/staging/et131x/et131x_debug.h b/drivers/staging/et131x/et131x_debug.h deleted file mode 100644 index 7c70e230ccd2..000000000000 --- a/drivers/staging/et131x/et131x_debug.h +++ /dev/null | |||
@@ -1,255 +0,0 @@ | |||
1 | /* | ||
2 | * Agere Systems Inc. | ||
3 | * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs | ||
4 | * | ||
5 | * Copyright © 2005 Agere Systems Inc. | ||
6 | * All rights reserved. | ||
7 | * http://www.agere.com | ||
8 | * | ||
9 | *------------------------------------------------------------------------------ | ||
10 | * | ||
11 | * et131x_debug.h - Defines, structs, enums, prototypes, etc. used for | ||
12 | * outputting debug messages to the system logging facility | ||
13 | * (ksyslogd) | ||
14 | * | ||
15 | *------------------------------------------------------------------------------ | ||
16 | * | ||
17 | * SOFTWARE LICENSE | ||
18 | * | ||
19 | * This software is provided subject to the following terms and conditions, | ||
20 | * which you should read carefully before using the software. Using this | ||
21 | * software indicates your acceptance of these terms and conditions. If you do | ||
22 | * not agree with these terms and conditions, do not use the software. | ||
23 | * | ||
24 | * Copyright © 2005 Agere Systems Inc. | ||
25 | * All rights reserved. | ||
26 | * | ||
27 | * Redistribution and use in source or binary forms, with or without | ||
28 | * modifications, are permitted provided that the following conditions are met: | ||
29 | * | ||
30 | * . Redistributions of source code must retain the above copyright notice, this | ||
31 | * list of conditions and the following Disclaimer as comments in the code as | ||
32 | * well as in the documentation and/or other materials provided with the | ||
33 | * distribution. | ||
34 | * | ||
35 | * . Redistributions in binary form must reproduce the above copyright notice, | ||
36 | * this list of conditions and the following Disclaimer in the documentation | ||
37 | * and/or other materials provided with the distribution. | ||
38 | * | ||
39 | * . Neither the name of Agere Systems Inc. nor the names of the contributors | ||
40 | * may be used to endorse or promote products derived from this software | ||
41 | * without specific prior written permission. | ||
42 | * | ||
43 | * Disclaimer | ||
44 | * | ||
45 | * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, | ||
46 | * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF | ||
47 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY | ||
48 | * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN | ||
49 | * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY | ||
50 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | ||
51 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
52 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ||
53 | * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT | ||
54 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT | ||
55 | * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH | ||
56 | * DAMAGE. | ||
57 | * | ||
58 | */ | ||
59 | |||
60 | #ifndef __ET131X_DBG_H__ | ||
61 | #define __ET131X_DBG_H__ | ||
62 | |||
63 | /* Define Masks for debugging types/levels */ | ||
64 | #define DBG_ERROR_ON 0x00000001L | ||
65 | #define DBG_WARNING_ON 0x00000002L | ||
66 | #define DBG_NOTICE_ON 0x00000004L | ||
67 | #define DBG_TRACE_ON 0x00000008L | ||
68 | #define DBG_VERBOSE_ON 0x00000010L | ||
69 | #define DBG_PARAM_ON 0x00000020L | ||
70 | #define DBG_BREAK_ON 0x00000040L | ||
71 | #define DBG_RX_ON 0x00000100L | ||
72 | #define DBG_TX_ON 0x00000200L | ||
73 | |||
74 | #ifdef CONFIG_ET131X_DEBUG | ||
75 | |||
76 | /* | ||
77 | * Set the level of debugging if not done with a preprocessor define. See | ||
78 | * et131x_main.c, function et131x_init_module() for how the debug level | ||
79 | * translates into the types of messages displayed. | ||
80 | */ | ||
81 | #ifndef DBG_LVL | ||
82 | #define DBG_LVL 3 | ||
83 | #endif /* DBG_LVL */ | ||
84 | |||
85 | #define DBG_DEFAULTS (DBG_ERROR_ON | DBG_WARNING_ON | DBG_BREAK_ON) | ||
86 | |||
87 | #define DBG_FLAGS(A) ((A)->dbgFlags) | ||
88 | #define DBG_NAME(A) ((A)->dbgName) | ||
89 | #define DBG_LEVEL(A) ((A)->dbgLevel) | ||
90 | |||
91 | #ifndef DBG_PRINT | ||
92 | #define DBG_PRINT(S...) printk(KERN_DEBUG S) | ||
93 | #endif /* DBG_PRINT */ | ||
94 | |||
95 | #ifndef DBG_PRINTC | ||
96 | #define DBG_PRINTC(S...) printk(S) | ||
97 | #endif /* DBG_PRINTC */ | ||
98 | |||
99 | #ifndef DBG_TRAP | ||
100 | #define DBG_TRAP do {} while (0) /* BUG() */ | ||
101 | #endif /* DBG_TRAP */ | ||
102 | |||
103 | #define _ENTER_STR ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>" | ||
104 | #define _LEAVE_STR "<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<" | ||
105 | |||
106 | #define _DBG_ENTER(A) printk(KERN_DEBUG "%s:%.*s:%s\n", DBG_NAME(A), \ | ||
107 | ++DBG_LEVEL(A), _ENTER_STR, __func__) | ||
108 | #define _DBG_LEAVE(A) printk(KERN_DEBUG "%s:%.*s:%s\n", DBG_NAME(A), \ | ||
109 | DBG_LEVEL(A)--, _LEAVE_STR, __func__) | ||
110 | |||
111 | #define DBG_ENTER(A) \ | ||
112 | do { \ | ||
113 | if (DBG_FLAGS(A) & DBG_TRACE_ON) \ | ||
114 | _DBG_ENTER(A); \ | ||
115 | } while (0) | ||
116 | |||
117 | #define DBG_LEAVE(A) \ | ||
118 | do { \ | ||
119 | if (DBG_FLAGS(A) & DBG_TRACE_ON) \ | ||
120 | _DBG_LEAVE(A); \ | ||
121 | } while (0) | ||
122 | |||
123 | #define DBG_PARAM(A, N, F, S...) \ | ||
124 | do { \ | ||
125 | if (DBG_FLAGS(A) & DBG_PARAM_ON) \ | ||
126 | DBG_PRINT(" %s -- "F" ", N, S); \ | ||
127 | } while (0) | ||
128 | |||
129 | #define DBG_ERROR(A, S...) \ | ||
130 | do { \ | ||
131 | if (DBG_FLAGS(A) & DBG_ERROR_ON) { \ | ||
132 | DBG_PRINT("%s:ERROR:%s ", DBG_NAME(A), __func__);\ | ||
133 | DBG_PRINTC(S); \ | ||
134 | DBG_TRAP; \ | ||
135 | } \ | ||
136 | } while (0) | ||
137 | |||
138 | #define DBG_WARNING(A, S...) \ | ||
139 | do { \ | ||
140 | if (DBG_FLAGS(A) & DBG_WARNING_ON) { \ | ||
141 | DBG_PRINT("%s:WARNING:%s ", DBG_NAME(A), __func__); \ | ||
142 | DBG_PRINTC(S); \ | ||
143 | } \ | ||
144 | } while (0) | ||
145 | |||
146 | #define DBG_NOTICE(A, S...) \ | ||
147 | do { \ | ||
148 | if (DBG_FLAGS(A) & DBG_NOTICE_ON) { \ | ||
149 | DBG_PRINT("%s:NOTICE:%s ", DBG_NAME(A), __func__); \ | ||
150 | DBG_PRINTC(S); \ | ||
151 | } \ | ||
152 | } while (0) | ||
153 | |||
154 | #define DBG_TRACE(A, S...) \ | ||
155 | do { \ | ||
156 | if (DBG_FLAGS(A) & DBG_TRACE_ON) { \ | ||
157 | DBG_PRINT("%s:TRACE:%s ", DBG_NAME(A), __func__); \ | ||
158 | DBG_PRINTC(S); \ | ||
159 | } \ | ||
160 | } while (0) | ||
161 | |||
162 | #define DBG_VERBOSE(A, S...) \ | ||
163 | do { \ | ||
164 | if (DBG_FLAGS(A) & DBG_VERBOSE_ON) { \ | ||
165 | DBG_PRINT("%s:VERBOSE:%s ", DBG_NAME(A), __func__); \ | ||
166 | DBG_PRINTC(S); \ | ||
167 | } \ | ||
168 | } while (0) | ||
169 | |||
170 | #define DBG_RX(A, S...) \ | ||
171 | do { \ | ||
172 | if (DBG_FLAGS(A) & DBG_RX_ON) \ | ||
173 | DBG_PRINT(S); \ | ||
174 | } while (0) | ||
175 | |||
176 | #define DBG_RX_ENTER(A) \ | ||
177 | do { \ | ||
178 | if (DBG_FLAGS(A) & DBG_RX_ON) \ | ||
179 | _DBG_ENTER(A); \ | ||
180 | } while (0) | ||
181 | |||
182 | #define DBG_RX_LEAVE(A) \ | ||
183 | do { \ | ||
184 | if (DBG_FLAGS(A) & DBG_RX_ON) \ | ||
185 | _DBG_LEAVE(A); \ | ||
186 | } while (0) | ||
187 | |||
188 | #define DBG_TX(A, S...) \ | ||
189 | do { \ | ||
190 | if (DBG_FLAGS(A) & DBG_TX_ON) \ | ||
191 | DBG_PRINT(S); \ | ||
192 | } while (0) | ||
193 | |||
194 | #define DBG_TX_ENTER(A) \ | ||
195 | do { \ | ||
196 | if (DBG_FLAGS(A) & DBG_TX_ON) \ | ||
197 | _DBG_ENTER(A); \ | ||
198 | } while (0) | ||
199 | |||
200 | #define DBG_TX_LEAVE(A) \ | ||
201 | do { \ | ||
202 | if (DBG_FLAGS(A) & DBG_TX_ON) \ | ||
203 | _DBG_LEAVE(A); \ | ||
204 | } while (0) | ||
205 | |||
206 | #define DBG_ASSERT(C) \ | ||
207 | do { \ | ||
208 | if (!(C)) { \ | ||
209 | DBG_PRINT("ASSERT(%s) -- %s#%d (%s) ", \ | ||
210 | #C, __FILE__, __LINE__, __func__); \ | ||
211 | DBG_TRAP; \ | ||
212 | } \ | ||
213 | } while (0) | ||
214 | |||
215 | #define STATIC | ||
216 | |||
217 | typedef struct { | ||
218 | char *dbgName; | ||
219 | int dbgLevel; | ||
220 | unsigned long dbgFlags; | ||
221 | } dbg_info_t; | ||
222 | |||
223 | #else /* CONFIG_ET131X_DEBUG */ | ||
224 | |||
225 | #define DBG_DEFN | ||
226 | #define DBG_TRAP | ||
227 | #define DBG_PRINT(S...) | ||
228 | #define DBG_ENTER(A) | ||
229 | #define DBG_LEAVE(A) | ||
230 | #define DBG_PARAM(A, N, F, S...) | ||
231 | #define DBG_ERROR(A, S...) | ||
232 | #define DBG_WARNING(A, S...) | ||
233 | #define DBG_NOTICE(A, S...) | ||
234 | #define DBG_TRACE(A, S...) | ||
235 | #define DBG_VERBOSE(A, S...) | ||
236 | #define DBG_RX(A, S...) | ||
237 | #define DBG_RX_ENTER(A) | ||
238 | #define DBG_RX_LEAVE(A) | ||
239 | #define DBG_TX(A, S...) | ||
240 | #define DBG_TX_ENTER(A) | ||
241 | #define DBG_TX_LEAVE(A) | ||
242 | #define DBG_ASSERT(C) | ||
243 | #define STATIC static | ||
244 | |||
245 | #endif /* CONFIG_ET131X_DEBUG */ | ||
246 | |||
247 | /* Forward declaration of the private adapter structure */ | ||
248 | struct et131x_adapter; | ||
249 | |||
250 | void DumpTxQueueContents(int dbgLvl, struct et131x_adapter *adapter); | ||
251 | void DumpDeviceBlock(int dbgLvl, struct et131x_adapter *adapter, | ||
252 | unsigned int Block); | ||
253 | void DumpDeviceReg(int dbgLvl, struct et131x_adapter *adapter); | ||
254 | |||
255 | #endif /* __ET131X_DBG_H__ */ | ||
diff --git a/drivers/staging/et131x/et131x_initpci.c b/drivers/staging/et131x/et131x_initpci.c index 0662e7ff964e..2d8b62f066a8 100644 --- a/drivers/staging/et131x/et131x_initpci.c +++ b/drivers/staging/et131x/et131x_initpci.c | |||
@@ -58,7 +58,6 @@ | |||
58 | */ | 58 | */ |
59 | 59 | ||
60 | #include "et131x_version.h" | 60 | #include "et131x_version.h" |
61 | #include "et131x_debug.h" | ||
62 | #include "et131x_defs.h" | 61 | #include "et131x_defs.h" |
63 | 62 | ||
64 | #include <linux/pci.h> | 63 | #include <linux/pci.h> |
@@ -113,33 +112,6 @@ MODULE_AUTHOR(DRIVER_AUTHOR); | |||
113 | MODULE_DESCRIPTION(DRIVER_INFO); | 112 | MODULE_DESCRIPTION(DRIVER_INFO); |
114 | MODULE_LICENSE(DRIVER_LICENSE); | 113 | MODULE_LICENSE(DRIVER_LICENSE); |
115 | 114 | ||
116 | /* Module Parameters and related data for debugging facilities */ | ||
117 | #ifdef CONFIG_ET131X_DEBUG | ||
118 | static u32 et131x_debug_level = DBG_LVL; | ||
119 | static u32 et131x_debug_flags = DBG_DEFAULTS; | ||
120 | |||
121 | /* | ||
122 | et131x_debug_level : | ||
123 | Level of debugging desired (0-7) | ||
124 | 7 : DBG_RX_ON | DBG_TX_ON | ||
125 | 6 : DBG_PARAM_ON | ||
126 | 5 : DBG_VERBOSE_ON | ||
127 | 4 : DBG_TRACE_ON | ||
128 | 3 : DBG_NOTICE_ON | ||
129 | 2 : no debug info | ||
130 | 1 : no debug info | ||
131 | 0 : no debug info | ||
132 | */ | ||
133 | |||
134 | module_param(et131x_debug_level, uint, 0); | ||
135 | module_param(et131x_debug_flags, uint, 0); | ||
136 | |||
137 | MODULE_PARM_DESC(et131x_debug_level, "Level of debugging desired (0-7)"); | ||
138 | |||
139 | static dbg_info_t et131x_info = { DRIVER_NAME_EXT, 0, 0 }; | ||
140 | dbg_info_t *et131x_dbginfo = &et131x_info; | ||
141 | #endif /* CONFIG_ET131X_DEBUG */ | ||
142 | |||
143 | /* Defines for Parameter Default/Min/Max vaules */ | 115 | /* Defines for Parameter Default/Min/Max vaules */ |
144 | #define PARM_SPEED_DUPLEX_MIN 0 | 116 | #define PARM_SPEED_DUPLEX_MIN 0 |
145 | #define PARM_SPEED_DUPLEX_MAX 5 | 117 | #define PARM_SPEED_DUPLEX_MAX 5 |
@@ -196,71 +168,29 @@ static struct pci_driver et131x_driver = { | |||
196 | * | 168 | * |
197 | * Returns 0 on success, errno on failure (as defined in errno.h) | 169 | * Returns 0 on success, errno on failure (as defined in errno.h) |
198 | */ | 170 | */ |
199 | int et131x_init_module(void) | 171 | static int et131x_init_module(void) |
200 | { | 172 | { |
201 | int result; | ||
202 | |||
203 | #ifdef CONFIG_ET131X_DEBUG | ||
204 | /* Set the level of debug messages displayed using the module | ||
205 | * parameter | ||
206 | */ | ||
207 | et131x_dbginfo->dbgFlags = et131x_debug_flags; | ||
208 | |||
209 | switch (et131x_debug_level) { | ||
210 | case 7: | ||
211 | et131x_dbginfo->dbgFlags |= (DBG_RX_ON | DBG_TX_ON); | ||
212 | |||
213 | case 6: | ||
214 | et131x_dbginfo->dbgFlags |= DBG_PARAM_ON; | ||
215 | |||
216 | case 5: | ||
217 | et131x_dbginfo->dbgFlags |= DBG_VERBOSE_ON; | ||
218 | |||
219 | case 4: | ||
220 | et131x_dbginfo->dbgFlags |= DBG_TRACE_ON; | ||
221 | |||
222 | case 3: | ||
223 | et131x_dbginfo->dbgFlags |= DBG_NOTICE_ON; | ||
224 | |||
225 | case 2: | ||
226 | case 1: | ||
227 | case 0: | ||
228 | default: | ||
229 | break; | ||
230 | } | ||
231 | #endif /* CONFIG_ET131X_DEBUG */ | ||
232 | |||
233 | DBG_ENTER(et131x_dbginfo); | ||
234 | DBG_PRINT("%s\n", DRIVER_INFO); | ||
235 | |||
236 | if (et131x_speed_set < PARM_SPEED_DUPLEX_MIN || | 173 | if (et131x_speed_set < PARM_SPEED_DUPLEX_MIN || |
237 | et131x_speed_set > PARM_SPEED_DUPLEX_MAX) { | 174 | et131x_speed_set > PARM_SPEED_DUPLEX_MAX) { |
238 | printk(KERN_WARNING "et131x: invalid speed setting ignored.\n"); | 175 | printk(KERN_WARNING "et131x: invalid speed setting ignored.\n"); |
239 | et131x_speed_set = 0; | 176 | et131x_speed_set = 0; |
240 | } | 177 | } |
241 | 178 | return pci_register_driver(&et131x_driver); | |
242 | result = pci_register_driver(&et131x_driver); | ||
243 | |||
244 | DBG_LEAVE(et131x_dbginfo); | ||
245 | return result; | ||
246 | } | 179 | } |
247 | 180 | ||
248 | /** | 181 | /** |
249 | * et131x_cleanup_module - The entry point called on driver cleanup | 182 | * et131x_cleanup_module - The entry point called on driver cleanup |
250 | */ | 183 | */ |
251 | void et131x_cleanup_module(void) | 184 | static void et131x_cleanup_module(void) |
252 | { | 185 | { |
253 | DBG_ENTER(et131x_dbginfo); | ||
254 | |||
255 | pci_unregister_driver(&et131x_driver); | 186 | pci_unregister_driver(&et131x_driver); |
256 | |||
257 | DBG_LEAVE(et131x_dbginfo); | ||
258 | } | 187 | } |
259 | 188 | ||
260 | /* | 189 | /* |
261 | * These macros map the driver-specific init_module() and cleanup_module() | 190 | * These macros map the driver-specific init_module() and cleanup_module() |
262 | * routines so they can be called by the kernel. | 191 | * routines so they can be called by the kernel. |
263 | */ | 192 | */ |
193 | |||
264 | module_init(et131x_init_module); | 194 | module_init(et131x_init_module); |
265 | module_exit(et131x_cleanup_module); | 195 | module_exit(et131x_cleanup_module); |
266 | 196 | ||
@@ -279,8 +209,6 @@ int et131x_find_adapter(struct et131x_adapter *adapter, struct pci_dev *pdev) | |||
279 | uint8_t read_size_reg; | 209 | uint8_t read_size_reg; |
280 | u8 rev; | 210 | u8 rev; |
281 | 211 | ||
282 | DBG_ENTER(et131x_dbginfo); | ||
283 | |||
284 | /* Allow disabling of Non-Maskable Interrupts in I/O space, to | 212 | /* Allow disabling of Non-Maskable Interrupts in I/O space, to |
285 | * support validation. | 213 | * support validation. |
286 | */ | 214 | */ |
@@ -311,9 +239,8 @@ int et131x_find_adapter(struct et131x_adapter *adapter, struct pci_dev *pdev) | |||
311 | result = pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, | 239 | result = pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, |
312 | &eepromStat); | 240 | &eepromStat); |
313 | if (result != PCIBIOS_SUCCESSFUL) { | 241 | if (result != PCIBIOS_SUCCESSFUL) { |
314 | DBG_ERROR(et131x_dbginfo, "Could not read PCI config space for " | 242 | dev_err(&pdev->dev, "Could not read PCI config space for " |
315 | "EEPROM Status\n"); | 243 | "EEPROM Status\n"); |
316 | DBG_LEAVE(et131x_dbginfo); | ||
317 | return -EIO; | 244 | return -EIO; |
318 | } | 245 | } |
319 | 246 | ||
@@ -323,10 +250,9 @@ int et131x_find_adapter(struct et131x_adapter *adapter, struct pci_dev *pdev) | |||
323 | if (eepromStat & 0x4C) { | 250 | if (eepromStat & 0x4C) { |
324 | result = pci_read_config_byte(pdev, PCI_REVISION_ID, &rev); | 251 | result = pci_read_config_byte(pdev, PCI_REVISION_ID, &rev); |
325 | if (result != PCIBIOS_SUCCESSFUL) { | 252 | if (result != PCIBIOS_SUCCESSFUL) { |
326 | DBG_ERROR(et131x_dbginfo, | 253 | dev_err(&pdev->dev, |
327 | "Could not read PCI config space for " | 254 | "Could not read PCI config space for " |
328 | "Revision ID\n"); | 255 | "Revision ID\n"); |
329 | DBG_LEAVE(et131x_dbginfo); | ||
330 | return -EIO; | 256 | return -EIO; |
331 | } else if (rev == 0x01) { | 257 | } else if (rev == 0x01) { |
332 | int32_t nLoop; | 258 | int32_t nLoop; |
@@ -341,8 +267,7 @@ int et131x_find_adapter(struct et131x_adapter *adapter, struct pci_dev *pdev) | |||
341 | } | 267 | } |
342 | } | 268 | } |
343 | 269 | ||
344 | DBG_ERROR(et131x_dbginfo, | 270 | dev_err(&pdev->dev, "Fatal EEPROM Status Error - 0x%04x\n", eepromStat); |
345 | "Fatal EEPROM Status Error - 0x%04x\n", eepromStat); | ||
346 | 271 | ||
347 | /* This error could mean that there was an error reading the | 272 | /* This error could mean that there was an error reading the |
348 | * eeprom or that the eeprom doesn't exist. We will treat | 273 | * eeprom or that the eeprom doesn't exist. We will treat |
@@ -351,14 +276,9 @@ int et131x_find_adapter(struct et131x_adapter *adapter, struct pci_dev *pdev) | |||
351 | * MAC Address | 276 | * MAC Address |
352 | */ | 277 | */ |
353 | adapter->has_eeprom = 0; | 278 | adapter->has_eeprom = 0; |
354 | |||
355 | DBG_LEAVE(et131x_dbginfo); | ||
356 | return -EIO; | 279 | return -EIO; |
357 | } else { | 280 | } else |
358 | DBG_TRACE(et131x_dbginfo, "EEPROM Status Code - 0x%04x\n", | ||
359 | eepromStat); | ||
360 | adapter->has_eeprom = 1; | 281 | adapter->has_eeprom = 1; |
361 | } | ||
362 | 282 | ||
363 | /* Read the EEPROM for information regarding LED behavior. Refer to | 283 | /* Read the EEPROM for information regarding LED behavior. Refer to |
364 | * ET1310_phy.c, et131x_xcvr_init(), for its use. | 284 | * ET1310_phy.c, et131x_xcvr_init(), for its use. |
@@ -375,9 +295,8 @@ int et131x_find_adapter(struct et131x_adapter *adapter, struct pci_dev *pdev) | |||
375 | */ | 295 | */ |
376 | result = pci_read_config_byte(pdev, ET1310_PCI_MAX_PYLD, &maxPayload); | 296 | result = pci_read_config_byte(pdev, ET1310_PCI_MAX_PYLD, &maxPayload); |
377 | if (result != PCIBIOS_SUCCESSFUL) { | 297 | if (result != PCIBIOS_SUCCESSFUL) { |
378 | DBG_ERROR(et131x_dbginfo, "Could not read PCI config space for " | 298 | dev_err(&pdev->dev, |
379 | "Max Payload Size\n"); | 299 | "Could not read PCI config space for Max Payload Size\n"); |
380 | DBG_LEAVE(et131x_dbginfo); | ||
381 | return -EIO; | 300 | return -EIO; |
382 | } | 301 | } |
383 | 302 | ||
@@ -391,20 +310,16 @@ int et131x_find_adapter(struct et131x_adapter *adapter, struct pci_dev *pdev) | |||
391 | result = pci_write_config_word(pdev, ET1310_PCI_ACK_NACK, | 310 | result = pci_write_config_word(pdev, ET1310_PCI_ACK_NACK, |
392 | AckNak[maxPayload]); | 311 | AckNak[maxPayload]); |
393 | if (result != PCIBIOS_SUCCESSFUL) { | 312 | if (result != PCIBIOS_SUCCESSFUL) { |
394 | DBG_ERROR(et131x_dbginfo, | 313 | dev_err(&pdev->dev, |
395 | "Could not write PCI config space " | 314 | "Could not write PCI config space for ACK/NAK\n"); |
396 | "for ACK/NAK\n"); | ||
397 | DBG_LEAVE(et131x_dbginfo); | ||
398 | return -EIO; | 315 | return -EIO; |
399 | } | 316 | } |
400 | 317 | ||
401 | result = pci_write_config_word(pdev, ET1310_PCI_REPLAY, | 318 | result = pci_write_config_word(pdev, ET1310_PCI_REPLAY, |
402 | Replay[maxPayload]); | 319 | Replay[maxPayload]); |
403 | if (result != PCIBIOS_SUCCESSFUL) { | 320 | if (result != PCIBIOS_SUCCESSFUL) { |
404 | DBG_ERROR(et131x_dbginfo, | 321 | dev_err(&pdev->dev, |
405 | "Could not write PCI config space " | 322 | "Could not write PCI config space for Replay Timer\n"); |
406 | "for Replay Timer\n"); | ||
407 | DBG_LEAVE(et131x_dbginfo); | ||
408 | return -EIO; | 323 | return -EIO; |
409 | } | 324 | } |
410 | } | 325 | } |
@@ -414,19 +329,16 @@ int et131x_find_adapter(struct et131x_adapter *adapter, struct pci_dev *pdev) | |||
414 | */ | 329 | */ |
415 | result = pci_write_config_byte(pdev, ET1310_PCI_L0L1LATENCY, 0x11); | 330 | result = pci_write_config_byte(pdev, ET1310_PCI_L0L1LATENCY, 0x11); |
416 | if (result != PCIBIOS_SUCCESSFUL) { | 331 | if (result != PCIBIOS_SUCCESSFUL) { |
417 | DBG_ERROR(et131x_dbginfo, | 332 | dev_err(&pdev->dev, |
418 | "Could not write PCI config space for " | 333 | "Could not write PCI config space for Latency Timers\n"); |
419 | "Latency Timers\n"); | ||
420 | DBG_LEAVE(et131x_dbginfo); | ||
421 | return -EIO; | 334 | return -EIO; |
422 | } | 335 | } |
423 | 336 | ||
424 | /* Change the max read size to 2k */ | 337 | /* Change the max read size to 2k */ |
425 | result = pci_read_config_byte(pdev, 0x51, &read_size_reg); | 338 | result = pci_read_config_byte(pdev, 0x51, &read_size_reg); |
426 | if (result != PCIBIOS_SUCCESSFUL) { | 339 | if (result != PCIBIOS_SUCCESSFUL) { |
427 | DBG_ERROR(et131x_dbginfo, | 340 | dev_err(&pdev->dev, |
428 | "Could not read PCI config space for Max read size\n"); | 341 | "Could not read PCI config space for Max read size\n"); |
429 | DBG_LEAVE(et131x_dbginfo); | ||
430 | return -EIO; | 342 | return -EIO; |
431 | } | 343 | } |
432 | 344 | ||
@@ -435,9 +347,8 @@ int et131x_find_adapter(struct et131x_adapter *adapter, struct pci_dev *pdev) | |||
435 | 347 | ||
436 | result = pci_write_config_byte(pdev, 0x51, read_size_reg); | 348 | result = pci_write_config_byte(pdev, 0x51, read_size_reg); |
437 | if (result != PCIBIOS_SUCCESSFUL) { | 349 | if (result != PCIBIOS_SUCCESSFUL) { |
438 | DBG_ERROR(et131x_dbginfo, | 350 | dev_err(&pdev->dev, |
439 | "Could not write PCI config space for Max read size\n"); | 351 | "Could not write PCI config space for Max read size\n"); |
440 | DBG_LEAVE(et131x_dbginfo); | ||
441 | return -EIO; | 352 | return -EIO; |
442 | } | 353 | } |
443 | 354 | ||
@@ -452,15 +363,11 @@ int et131x_find_adapter(struct et131x_adapter *adapter, struct pci_dev *pdev) | |||
452 | pdev, ET1310_PCI_MAC_ADDRESS + i, | 363 | pdev, ET1310_PCI_MAC_ADDRESS + i, |
453 | adapter->PermanentAddress + i); | 364 | adapter->PermanentAddress + i); |
454 | if (result != PCIBIOS_SUCCESSFUL) { | 365 | if (result != PCIBIOS_SUCCESSFUL) { |
455 | DBG_ERROR(et131x_dbginfo, | 366 | dev_err(&pdev->dev, ";Could not read PCI config space for MAC address\n"); |
456 | "Could not read PCI config space for MAC address\n"); | ||
457 | DBG_LEAVE(et131x_dbginfo); | ||
458 | return -EIO; | 367 | return -EIO; |
459 | } | 368 | } |
460 | } | 369 | } |
461 | } | 370 | } |
462 | |||
463 | DBG_LEAVE(et131x_dbginfo); | ||
464 | return 0; | 371 | return 0; |
465 | } | 372 | } |
466 | 373 | ||
@@ -481,9 +388,8 @@ void et131x_error_timer_handler(unsigned long data) | |||
481 | if ((pm_csr & ET_PM_PHY_SW_COMA) == 0) | 388 | if ((pm_csr & ET_PM_PHY_SW_COMA) == 0) |
482 | UpdateMacStatHostCounters(etdev); | 389 | UpdateMacStatHostCounters(etdev); |
483 | else | 390 | else |
484 | DBG_VERBOSE(et131x_dbginfo, | 391 | dev_err(&etdev->pdev->dev, |
485 | "No interrupts, in PHY coma, pm_csr = 0x%x\n", | 392 | "No interrupts, in PHY coma, pm_csr = 0x%x\n", pm_csr); |
486 | pm_csr); | ||
487 | 393 | ||
488 | if (!etdev->Bmsr.bits.link_status && | 394 | if (!etdev->Bmsr.bits.link_status && |
489 | etdev->RegistryPhyComa && | 395 | etdev->RegistryPhyComa && |
@@ -541,8 +447,6 @@ void ConfigGlobalRegs(struct et131x_adapter *etdev) | |||
541 | { | 447 | { |
542 | struct _GLOBAL_t __iomem *regs = &etdev->regs->global; | 448 | struct _GLOBAL_t __iomem *regs = &etdev->regs->global; |
543 | 449 | ||
544 | DBG_ENTER(et131x_dbginfo); | ||
545 | |||
546 | if (etdev->RegistryPhyLoopbk == false) { | 450 | if (etdev->RegistryPhyLoopbk == false) { |
547 | if (etdev->RegistryJumboPacket < 2048) { | 451 | if (etdev->RegistryJumboPacket < 2048) { |
548 | /* Tx / RxDMA and Tx/Rx MAC interfaces have a 1k word | 452 | /* Tx / RxDMA and Tx/Rx MAC interfaces have a 1k word |
@@ -596,8 +500,6 @@ void ConfigGlobalRegs(struct et131x_adapter *etdev) | |||
596 | * a packet is queued. | 500 | * a packet is queued. |
597 | */ | 501 | */ |
598 | writel(0, ®s->watchdog_timer); | 502 | writel(0, ®s->watchdog_timer); |
599 | |||
600 | DBG_LEAVE(et131x_dbginfo); | ||
601 | } | 503 | } |
602 | 504 | ||
603 | 505 | ||
@@ -611,8 +513,6 @@ int et131x_adapter_setup(struct et131x_adapter *etdev) | |||
611 | { | 513 | { |
612 | int status = 0; | 514 | int status = 0; |
613 | 515 | ||
614 | DBG_ENTER(et131x_dbginfo); | ||
615 | |||
616 | /* Configure the JAGCore */ | 516 | /* Configure the JAGCore */ |
617 | ConfigGlobalRegs(etdev); | 517 | ConfigGlobalRegs(etdev); |
618 | 518 | ||
@@ -634,7 +534,7 @@ int et131x_adapter_setup(struct et131x_adapter *etdev) | |||
634 | status = et131x_xcvr_find(etdev); | 534 | status = et131x_xcvr_find(etdev); |
635 | 535 | ||
636 | if (status != 0) | 536 | if (status != 0) |
637 | DBG_WARNING(et131x_dbginfo, "Could not find the xcvr\n"); | 537 | dev_warn(&etdev->pdev->dev, "Could not find the xcvr\n"); |
638 | 538 | ||
639 | /* Prepare the TRUEPHY library. */ | 539 | /* Prepare the TRUEPHY library. */ |
640 | ET1310_PhyInit(etdev); | 540 | ET1310_PhyInit(etdev); |
@@ -658,9 +558,7 @@ int et131x_adapter_setup(struct et131x_adapter *etdev) | |||
658 | ET1310_PhyPowerDown(etdev, 0); | 558 | ET1310_PhyPowerDown(etdev, 0); |
659 | 559 | ||
660 | et131x_setphy_normal(etdev); | 560 | et131x_setphy_normal(etdev); |
661 | 561 | ; return status; | |
662 | DBG_LEAVE(et131x_dbginfo); | ||
663 | return status; | ||
664 | } | 562 | } |
665 | 563 | ||
666 | /** | 564 | /** |
@@ -669,8 +567,6 @@ int et131x_adapter_setup(struct et131x_adapter *etdev) | |||
669 | */ | 567 | */ |
670 | void et131x_setup_hardware_properties(struct et131x_adapter *adapter) | 568 | void et131x_setup_hardware_properties(struct et131x_adapter *adapter) |
671 | { | 569 | { |
672 | DBG_ENTER(et131x_dbginfo); | ||
673 | |||
674 | /* If have our default mac from registry and no mac address from | 570 | /* If have our default mac from registry and no mac address from |
675 | * EEPROM then we need to generate the last octet and set it on the | 571 | * EEPROM then we need to generate the last octet and set it on the |
676 | * device | 572 | * device |
@@ -702,8 +598,6 @@ void et131x_setup_hardware_properties(struct et131x_adapter *adapter) | |||
702 | memcpy(adapter->CurrentAddress, | 598 | memcpy(adapter->CurrentAddress, |
703 | adapter->PermanentAddress, ETH_ALEN); | 599 | adapter->PermanentAddress, ETH_ALEN); |
704 | } | 600 | } |
705 | |||
706 | DBG_LEAVE(et131x_dbginfo); | ||
707 | } | 601 | } |
708 | 602 | ||
709 | /** | 603 | /** |
@@ -712,8 +606,6 @@ void et131x_setup_hardware_properties(struct et131x_adapter *adapter) | |||
712 | */ | 606 | */ |
713 | void et131x_soft_reset(struct et131x_adapter *adapter) | 607 | void et131x_soft_reset(struct et131x_adapter *adapter) |
714 | { | 608 | { |
715 | DBG_ENTER(et131x_dbginfo); | ||
716 | |||
717 | /* Disable MAC Core */ | 609 | /* Disable MAC Core */ |
718 | writel(0xc00f0000, &adapter->regs->mac.cfg1.value); | 610 | writel(0xc00f0000, &adapter->regs->mac.cfg1.value); |
719 | 611 | ||
@@ -721,8 +613,6 @@ void et131x_soft_reset(struct et131x_adapter *adapter) | |||
721 | writel(0x7F, &adapter->regs->global.sw_reset); | 613 | writel(0x7F, &adapter->regs->global.sw_reset); |
722 | writel(0x000f0000, &adapter->regs->mac.cfg1.value); | 614 | writel(0x000f0000, &adapter->regs->mac.cfg1.value); |
723 | writel(0x00000000, &adapter->regs->mac.cfg1.value); | 615 | writel(0x00000000, &adapter->regs->mac.cfg1.value); |
724 | |||
725 | DBG_LEAVE(et131x_dbginfo); | ||
726 | } | 616 | } |
727 | 617 | ||
728 | /** | 618 | /** |
@@ -738,8 +628,6 @@ void et131x_align_allocated_memory(struct et131x_adapter *adapter, | |||
738 | { | 628 | { |
739 | uint64_t new_addr; | 629 | uint64_t new_addr; |
740 | 630 | ||
741 | DBG_ENTER(et131x_dbginfo); | ||
742 | |||
743 | *offset = 0; | 631 | *offset = 0; |
744 | 632 | ||
745 | new_addr = *phys_addr & ~mask; | 633 | new_addr = *phys_addr & ~mask; |
@@ -752,8 +640,6 @@ void et131x_align_allocated_memory(struct et131x_adapter *adapter, | |||
752 | /* Return new physical address */ | 640 | /* Return new physical address */ |
753 | *phys_addr = new_addr; | 641 | *phys_addr = new_addr; |
754 | } | 642 | } |
755 | |||
756 | DBG_LEAVE(et131x_dbginfo); | ||
757 | } | 643 | } |
758 | 644 | ||
759 | /** | 645 | /** |
@@ -768,13 +654,11 @@ int et131x_adapter_memory_alloc(struct et131x_adapter *adapter) | |||
768 | { | 654 | { |
769 | int status = 0; | 655 | int status = 0; |
770 | 656 | ||
771 | DBG_ENTER(et131x_dbginfo); | ||
772 | |||
773 | do { | 657 | do { |
774 | /* Allocate memory for the Tx Ring */ | 658 | /* Allocate memory for the Tx Ring */ |
775 | status = et131x_tx_dma_memory_alloc(adapter); | 659 | status = et131x_tx_dma_memory_alloc(adapter); |
776 | if (status != 0) { | 660 | if (status != 0) { |
777 | DBG_ERROR(et131x_dbginfo, | 661 | dev_err(&adapter->pdev->dev, |
778 | "et131x_tx_dma_memory_alloc FAILED\n"); | 662 | "et131x_tx_dma_memory_alloc FAILED\n"); |
779 | break; | 663 | break; |
780 | } | 664 | } |
@@ -782,7 +666,7 @@ int et131x_adapter_memory_alloc(struct et131x_adapter *adapter) | |||
782 | /* Receive buffer memory allocation */ | 666 | /* Receive buffer memory allocation */ |
783 | status = et131x_rx_dma_memory_alloc(adapter); | 667 | status = et131x_rx_dma_memory_alloc(adapter); |
784 | if (status != 0) { | 668 | if (status != 0) { |
785 | DBG_ERROR(et131x_dbginfo, | 669 | dev_err(&adapter->pdev->dev, |
786 | "et131x_rx_dma_memory_alloc FAILED\n"); | 670 | "et131x_rx_dma_memory_alloc FAILED\n"); |
787 | et131x_tx_dma_memory_free(adapter); | 671 | et131x_tx_dma_memory_free(adapter); |
788 | break; | 672 | break; |
@@ -791,14 +675,13 @@ int et131x_adapter_memory_alloc(struct et131x_adapter *adapter) | |||
791 | /* Init receive data structures */ | 675 | /* Init receive data structures */ |
792 | status = et131x_init_recv(adapter); | 676 | status = et131x_init_recv(adapter); |
793 | if (status != 0) { | 677 | if (status != 0) { |
794 | DBG_ERROR(et131x_dbginfo, "et131x_init_recv FAILED\n"); | 678 | dev_err(&adapter->pdev->dev, |
679 | "et131x_init_recv FAILED\n"); | ||
795 | et131x_tx_dma_memory_free(adapter); | 680 | et131x_tx_dma_memory_free(adapter); |
796 | et131x_rx_dma_memory_free(adapter); | 681 | et131x_rx_dma_memory_free(adapter); |
797 | break; | 682 | break; |
798 | } | 683 | } |
799 | } while (0); | 684 | } while (0); |
800 | |||
801 | DBG_LEAVE(et131x_dbginfo); | ||
802 | return status; | 685 | return status; |
803 | } | 686 | } |
804 | 687 | ||
@@ -808,13 +691,9 @@ int et131x_adapter_memory_alloc(struct et131x_adapter *adapter) | |||
808 | */ | 691 | */ |
809 | void et131x_adapter_memory_free(struct et131x_adapter *adapter) | 692 | void et131x_adapter_memory_free(struct et131x_adapter *adapter) |
810 | { | 693 | { |
811 | DBG_ENTER(et131x_dbginfo); | ||
812 | |||
813 | /* Free DMA memory */ | 694 | /* Free DMA memory */ |
814 | et131x_tx_dma_memory_free(adapter); | 695 | et131x_tx_dma_memory_free(adapter); |
815 | et131x_rx_dma_memory_free(adapter); | 696 | et131x_rx_dma_memory_free(adapter); |
816 | |||
817 | DBG_LEAVE(et131x_dbginfo); | ||
818 | } | 697 | } |
819 | 698 | ||
820 | /** | 699 | /** |
@@ -830,8 +709,6 @@ void __devexit et131x_pci_remove(struct pci_dev *pdev) | |||
830 | struct net_device *netdev; | 709 | struct net_device *netdev; |
831 | struct et131x_adapter *adapter; | 710 | struct et131x_adapter *adapter; |
832 | 711 | ||
833 | DBG_ENTER(et131x_dbginfo); | ||
834 | |||
835 | /* Retrieve the net_device pointer from the pci_dev struct, as well | 712 | /* Retrieve the net_device pointer from the pci_dev struct, as well |
836 | * as the private adapter struct | 713 | * as the private adapter struct |
837 | */ | 714 | */ |
@@ -846,8 +723,6 @@ void __devexit et131x_pci_remove(struct pci_dev *pdev) | |||
846 | free_netdev(netdev); | 723 | free_netdev(netdev); |
847 | pci_release_regions(pdev); | 724 | pci_release_regions(pdev); |
848 | pci_disable_device(pdev); | 725 | pci_disable_device(pdev); |
849 | |||
850 | DBG_LEAVE(et131x_dbginfo); | ||
851 | } | 726 | } |
852 | 727 | ||
853 | /** | 728 | /** |
@@ -866,11 +741,9 @@ void et131x_config_parse(struct et131x_adapter *etdev) | |||
866 | static const u8 duplex[] = { 0, 1, 2, 1, 2, 2 }; | 741 | static const u8 duplex[] = { 0, 1, 2, 1, 2, 2 }; |
867 | static const u16 speed[] = { 0, 10, 10, 100, 100, 1000 }; | 742 | static const u16 speed[] = { 0, 10, 10, 100, 100, 1000 }; |
868 | 743 | ||
869 | DBG_ENTER(et131x_dbginfo); | ||
870 | |||
871 | if (et131x_speed_set) | 744 | if (et131x_speed_set) |
872 | DBG_VERBOSE(et131x_dbginfo, "Speed set manually to : %d \n", | 745 | dev_info(&etdev->pdev->dev, |
873 | et131x_speed_set); | 746 | "Speed set manually to : %d \n", et131x_speed_set); |
874 | 747 | ||
875 | etdev->SpeedDuplex = et131x_speed_set; | 748 | etdev->SpeedDuplex = et131x_speed_set; |
876 | etdev->RegistryJumboPacket = 1514; /* 1514-9216 */ | 749 | etdev->RegistryJumboPacket = 1514; /* 1514-9216 */ |
@@ -894,8 +767,6 @@ void et131x_config_parse(struct et131x_adapter *etdev) | |||
894 | 767 | ||
895 | etdev->AiForceSpeed = speed[etdev->SpeedDuplex]; | 768 | etdev->AiForceSpeed = speed[etdev->SpeedDuplex]; |
896 | etdev->AiForceDpx = duplex[etdev->SpeedDuplex]; /* Auto FDX */ | 769 | etdev->AiForceDpx = duplex[etdev->SpeedDuplex]; /* Auto FDX */ |
897 | |||
898 | DBG_LEAVE(et131x_dbginfo); | ||
899 | } | 770 | } |
900 | 771 | ||
901 | 772 | ||
@@ -920,18 +791,17 @@ int __devinit et131x_pci_setup(struct pci_dev *pdev, | |||
920 | struct net_device *netdev = NULL; | 791 | struct net_device *netdev = NULL; |
921 | struct et131x_adapter *adapter = NULL; | 792 | struct et131x_adapter *adapter = NULL; |
922 | 793 | ||
923 | DBG_ENTER(et131x_dbginfo); | ||
924 | |||
925 | /* Enable the device via the PCI subsystem */ | 794 | /* Enable the device via the PCI subsystem */ |
926 | result = pci_enable_device(pdev); | 795 | result = pci_enable_device(pdev); |
927 | if (result != 0) { | 796 | if (result != 0) { |
928 | DBG_ERROR(et131x_dbginfo, "pci_enable_device() failed\n"); | 797 | dev_err(&adapter->pdev->dev, |
798 | "pci_enable_device() failed\n"); | ||
929 | goto out; | 799 | goto out; |
930 | } | 800 | } |
931 | 801 | ||
932 | /* Perform some basic PCI checks */ | 802 | /* Perform some basic PCI checks */ |
933 | if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { | 803 | if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { |
934 | DBG_ERROR(et131x_dbginfo, | 804 | dev_err(&adapter->pdev->dev, |
935 | "Can't find PCI device's base address\n"); | 805 | "Can't find PCI device's base address\n"); |
936 | result = -ENODEV; | 806 | result = -ENODEV; |
937 | goto out; | 807 | goto out; |
@@ -939,12 +809,12 @@ int __devinit et131x_pci_setup(struct pci_dev *pdev, | |||
939 | 809 | ||
940 | result = pci_request_regions(pdev, DRIVER_NAME); | 810 | result = pci_request_regions(pdev, DRIVER_NAME); |
941 | if (result != 0) { | 811 | if (result != 0) { |
942 | DBG_ERROR(et131x_dbginfo, "Can't get PCI resources\n"); | 812 | dev_err(&adapter->pdev->dev, |
813 | "Can't get PCI resources\n"); | ||
943 | goto err_disable; | 814 | goto err_disable; |
944 | } | 815 | } |
945 | 816 | ||
946 | /* Enable PCI bus mastering */ | 817 | /* Enable PCI bus mastering */ |
947 | DBG_TRACE(et131x_dbginfo, "Setting PCI Bus Mastering...\n"); | ||
948 | pci_set_master(pdev); | 818 | pci_set_master(pdev); |
949 | 819 | ||
950 | /* Query PCI for Power Mgmt Capabilities | 820 | /* Query PCI for Power Mgmt Capabilities |
@@ -954,7 +824,7 @@ int __devinit et131x_pci_setup(struct pci_dev *pdev, | |||
954 | */ | 824 | */ |
955 | pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); | 825 | pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); |
956 | if (pm_cap == 0) { | 826 | if (pm_cap == 0) { |
957 | DBG_ERROR(et131x_dbginfo, | 827 | dev_err(&adapter->pdev->dev, |
958 | "Cannot find Power Management capabilities\n"); | 828 | "Cannot find Power Management capabilities\n"); |
959 | result = -EIO; | 829 | result = -EIO; |
960 | goto err_release_res; | 830 | goto err_release_res; |
@@ -962,40 +832,34 @@ int __devinit et131x_pci_setup(struct pci_dev *pdev, | |||
962 | 832 | ||
963 | /* Check the DMA addressing support of this device */ | 833 | /* Check the DMA addressing support of this device */ |
964 | if (!pci_set_dma_mask(pdev, 0xffffffffffffffffULL)) { | 834 | if (!pci_set_dma_mask(pdev, 0xffffffffffffffffULL)) { |
965 | DBG_TRACE(et131x_dbginfo, "64-bit DMA addressing supported\n"); | ||
966 | pci_using_dac = true; | 835 | pci_using_dac = true; |
967 | 836 | ||
968 | result = | 837 | result = |
969 | pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL); | 838 | pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL); |
970 | if (result != 0) { | 839 | if (result != 0) { |
971 | DBG_ERROR(et131x_dbginfo, | 840 | dev_err(&pdev->dev, |
972 | "Unable to obtain 64 bit DMA for consistent allocations\n"); | 841 | "Unable to obtain 64 bit DMA for consistent allocations\n"); |
973 | goto err_release_res; | 842 | goto err_release_res; |
974 | } | 843 | } |
975 | } else if (!pci_set_dma_mask(pdev, 0xffffffffULL)) { | 844 | } else if (!pci_set_dma_mask(pdev, 0xffffffffULL)) { |
976 | DBG_TRACE(et131x_dbginfo, | ||
977 | "64-bit DMA addressing NOT supported\n"); | ||
978 | DBG_TRACE(et131x_dbginfo, | ||
979 | "32-bit DMA addressing will be used\n"); | ||
980 | pci_using_dac = false; | 845 | pci_using_dac = false; |
981 | } else { | 846 | } else { |
982 | DBG_ERROR(et131x_dbginfo, "No usable DMA addressing method\n"); | 847 | dev_err(&adapter->pdev->dev, |
848 | "No usable DMA addressing method\n"); | ||
983 | result = -EIO; | 849 | result = -EIO; |
984 | goto err_release_res; | 850 | goto err_release_res; |
985 | } | 851 | } |
986 | 852 | ||
987 | /* Allocate netdev and private adapter structs */ | 853 | /* Allocate netdev and private adapter structs */ |
988 | DBG_TRACE(et131x_dbginfo, | ||
989 | "Allocate netdev and private adapter structs...\n"); | ||
990 | netdev = et131x_device_alloc(); | 854 | netdev = et131x_device_alloc(); |
991 | if (netdev == NULL) { | 855 | if (netdev == NULL) { |
992 | DBG_ERROR(et131x_dbginfo, "Couldn't alloc netdev struct\n"); | 856 | dev_err(&adapter->pdev->dev, |
857 | "Couldn't alloc netdev struct\n"); | ||
993 | result = -ENOMEM; | 858 | result = -ENOMEM; |
994 | goto err_release_res; | 859 | goto err_release_res; |
995 | } | 860 | } |
996 | 861 | ||
997 | /* Setup the fundamental net_device and private adapter structure elements */ | 862 | /* Setup the fundamental net_device and private adapter structure elements */ |
998 | DBG_TRACE(et131x_dbginfo, "Setting fundamental net_device info...\n"); | ||
999 | SET_NETDEV_DEV(netdev, &pdev->dev); | 863 | SET_NETDEV_DEV(netdev, &pdev->dev); |
1000 | /* | 864 | /* |
1001 | if (pci_using_dac) { | 865 | if (pci_using_dac) { |
@@ -1036,8 +900,6 @@ int __devinit et131x_pci_setup(struct pci_dev *pdev, | |||
1036 | netdev->base_addr = pdev->resource[0].start; | 900 | netdev->base_addr = pdev->resource[0].start; |
1037 | 901 | ||
1038 | /* Initialize spinlocks here */ | 902 | /* Initialize spinlocks here */ |
1039 | DBG_TRACE(et131x_dbginfo, "Initialize spinlocks...\n"); | ||
1040 | |||
1041 | spin_lock_init(&adapter->Lock); | 903 | spin_lock_init(&adapter->Lock); |
1042 | spin_lock_init(&adapter->TCBSendQLock); | 904 | spin_lock_init(&adapter->TCBSendQLock); |
1043 | spin_lock_init(&adapter->TCBReadyQLock); | 905 | spin_lock_init(&adapter->TCBReadyQLock); |
@@ -1061,13 +923,11 @@ int __devinit et131x_pci_setup(struct pci_dev *pdev, | |||
1061 | et131x_find_adapter(adapter, pdev); | 923 | et131x_find_adapter(adapter, pdev); |
1062 | 924 | ||
1063 | /* Map the bus-relative registers to system virtual memory */ | 925 | /* Map the bus-relative registers to system virtual memory */ |
1064 | DBG_TRACE(et131x_dbginfo, | ||
1065 | "Mapping bus-relative registers to virtual memory...\n"); | ||
1066 | 926 | ||
1067 | adapter->regs = ioremap_nocache(pci_resource_start(pdev, 0), | 927 | adapter->regs = ioremap_nocache(pci_resource_start(pdev, 0), |
1068 | pci_resource_len(pdev, 0)); | 928 | pci_resource_len(pdev, 0)); |
1069 | if (adapter->regs == NULL) { | 929 | if (adapter->regs == NULL) { |
1070 | DBG_ERROR(et131x_dbginfo, "Cannot map device registers\n"); | 930 | dev_err(&pdev->dev, "Cannot map device registers\n"); |
1071 | result = -ENOMEM; | 931 | result = -ENOMEM; |
1072 | goto err_free_dev; | 932 | goto err_free_dev; |
1073 | } | 933 | } |
@@ -1078,23 +938,19 @@ int __devinit et131x_pci_setup(struct pci_dev *pdev, | |||
1078 | writel(ET_PMCSR_INIT, &adapter->regs->global.pm_csr); | 938 | writel(ET_PMCSR_INIT, &adapter->regs->global.pm_csr); |
1079 | 939 | ||
1080 | /* Issue a global reset to the et1310 */ | 940 | /* Issue a global reset to the et1310 */ |
1081 | DBG_TRACE(et131x_dbginfo, "Issuing soft reset...\n"); | ||
1082 | et131x_soft_reset(adapter); | 941 | et131x_soft_reset(adapter); |
1083 | 942 | ||
1084 | /* Disable all interrupts (paranoid) */ | 943 | /* Disable all interrupts (paranoid) */ |
1085 | DBG_TRACE(et131x_dbginfo, "Disable device interrupts...\n"); | ||
1086 | et131x_disable_interrupts(adapter); | 944 | et131x_disable_interrupts(adapter); |
1087 | 945 | ||
1088 | /* Allocate DMA memory */ | 946 | /* Allocate DMA memory */ |
1089 | result = et131x_adapter_memory_alloc(adapter); | 947 | result = et131x_adapter_memory_alloc(adapter); |
1090 | if (result != 0) { | 948 | if (result != 0) { |
1091 | DBG_ERROR(et131x_dbginfo, | 949 | dev_err(&pdev->dev, "Could not alloc adapater memory (DMA)\n"); |
1092 | "Could not alloc adapater memory (DMA)\n"); | ||
1093 | goto err_iounmap; | 950 | goto err_iounmap; |
1094 | } | 951 | } |
1095 | 952 | ||
1096 | /* Init send data structures */ | 953 | /* Init send data structures */ |
1097 | DBG_TRACE(et131x_dbginfo, "Init send data structures...\n"); | ||
1098 | et131x_init_send(adapter); | 954 | et131x_init_send(adapter); |
1099 | 955 | ||
1100 | /* Register the interrupt | 956 | /* Register the interrupt |
@@ -1109,13 +965,11 @@ int __devinit et131x_pci_setup(struct pci_dev *pdev, | |||
1109 | INIT_WORK(&adapter->task, et131x_isr_handler); | 965 | INIT_WORK(&adapter->task, et131x_isr_handler); |
1110 | 966 | ||
1111 | /* Determine MAC Address, and copy into the net_device struct */ | 967 | /* Determine MAC Address, and copy into the net_device struct */ |
1112 | DBG_TRACE(et131x_dbginfo, "Retrieve MAC address...\n"); | ||
1113 | et131x_setup_hardware_properties(adapter); | 968 | et131x_setup_hardware_properties(adapter); |
1114 | 969 | ||
1115 | memcpy(netdev->dev_addr, adapter->CurrentAddress, ETH_ALEN); | 970 | memcpy(netdev->dev_addr, adapter->CurrentAddress, ETH_ALEN); |
1116 | 971 | ||
1117 | /* Setup et1310 as per the documentation */ | 972 | /* Setup et1310 as per the documentation */ |
1118 | DBG_TRACE(et131x_dbginfo, "Setup the adapter...\n"); | ||
1119 | et131x_adapter_setup(adapter); | 973 | et131x_adapter_setup(adapter); |
1120 | 974 | ||
1121 | /* Create a timer to count errors received by the NIC */ | 975 | /* Create a timer to count errors received by the NIC */ |
@@ -1140,10 +994,9 @@ int __devinit et131x_pci_setup(struct pci_dev *pdev, | |||
1140 | */ | 994 | */ |
1141 | 995 | ||
1142 | /* Register the net_device struct with the Linux network layer */ | 996 | /* Register the net_device struct with the Linux network layer */ |
1143 | DBG_TRACE(et131x_dbginfo, "Registering net_device...\n"); | ||
1144 | result = register_netdev(netdev); | 997 | result = register_netdev(netdev); |
1145 | if (result != 0) { | 998 | if (result != 0) { |
1146 | DBG_ERROR(et131x_dbginfo, "register_netdev() failed\n"); | 999 | dev_err(&pdev->dev, "register_netdev() failed\n"); |
1147 | goto err_mem_free; | 1000 | goto err_mem_free; |
1148 | } | 1001 | } |
1149 | 1002 | ||
@@ -1156,7 +1009,6 @@ int __devinit et131x_pci_setup(struct pci_dev *pdev, | |||
1156 | pci_save_state(adapter->pdev); | 1009 | pci_save_state(adapter->pdev); |
1157 | 1010 | ||
1158 | out: | 1011 | out: |
1159 | DBG_LEAVE(et131x_dbginfo); | ||
1160 | return result; | 1012 | return result; |
1161 | 1013 | ||
1162 | err_mem_free: | 1014 | err_mem_free: |
diff --git a/drivers/staging/et131x/et131x_isr.c b/drivers/staging/et131x/et131x_isr.c index 878fd205e415..2943178a7a36 100644 --- a/drivers/staging/et131x/et131x_isr.c +++ b/drivers/staging/et131x/et131x_isr.c | |||
@@ -57,7 +57,6 @@ | |||
57 | */ | 57 | */ |
58 | 58 | ||
59 | #include "et131x_version.h" | 59 | #include "et131x_version.h" |
60 | #include "et131x_debug.h" | ||
61 | #include "et131x_defs.h" | 60 | #include "et131x_defs.h" |
62 | 61 | ||
63 | #include <linux/init.h> | 62 | #include <linux/init.h> |
@@ -76,6 +75,7 @@ | |||
76 | #include <linux/delay.h> | 75 | #include <linux/delay.h> |
77 | #include <linux/io.h> | 76 | #include <linux/io.h> |
78 | #include <linux/bitops.h> | 77 | #include <linux/bitops.h> |
78 | #include <linux/pci.h> | ||
79 | #include <asm/system.h> | 79 | #include <asm/system.h> |
80 | 80 | ||
81 | #include <linux/netdevice.h> | 81 | #include <linux/netdevice.h> |
@@ -91,11 +91,6 @@ | |||
91 | 91 | ||
92 | #include "et131x_adapter.h" | 92 | #include "et131x_adapter.h" |
93 | 93 | ||
94 | /* Data for debugging facilities */ | ||
95 | #ifdef CONFIG_ET131X_DEBUG | ||
96 | extern dbg_info_t *et131x_dbginfo; | ||
97 | #endif /* CONFIG_ET131X_DEBUG */ | ||
98 | |||
99 | /** | 94 | /** |
100 | * et131x_enable_interrupts - enable interrupt | 95 | * et131x_enable_interrupts - enable interrupt |
101 | * @adapter: et131x device | 96 | * @adapter: et131x device |
@@ -151,9 +146,7 @@ irqreturn_t et131x_isr(int irq, void *dev_id) | |||
151 | struct et131x_adapter *adapter = NULL; | 146 | struct et131x_adapter *adapter = NULL; |
152 | u32 status; | 147 | u32 status; |
153 | 148 | ||
154 | if (netdev == NULL || !netif_device_present(netdev)) { | 149 | if (!netif_device_present(netdev)) { |
155 | DBG_WARNING(et131x_dbginfo, | ||
156 | "No net_device struct or device not present\n"); | ||
157 | handled = false; | 150 | handled = false; |
158 | goto out; | 151 | goto out; |
159 | } | 152 | } |
@@ -181,23 +174,12 @@ irqreturn_t et131x_isr(int irq, void *dev_id) | |||
181 | 174 | ||
182 | /* Make sure this is our interrupt */ | 175 | /* Make sure this is our interrupt */ |
183 | if (!status) { | 176 | if (!status) { |
184 | #ifdef CONFIG_ET131X_DEBUG | ||
185 | adapter->Stats.UnhandledInterruptsPerSec++; | ||
186 | #endif | ||
187 | handled = false; | 177 | handled = false; |
188 | DBG_VERBOSE(et131x_dbginfo, "NOT OUR INTERRUPT\n"); | ||
189 | et131x_enable_interrupts(adapter); | 178 | et131x_enable_interrupts(adapter); |
190 | goto out; | 179 | goto out; |
191 | } | 180 | } |
192 | 181 | ||
193 | /* This is our interrupt, so process accordingly */ | 182 | /* This is our interrupt, so process accordingly */ |
194 | #ifdef CONFIG_ET131X_DEBUG | ||
195 | if (status & ET_INTR_RXDMA_XFR_DONE) | ||
196 | adapter->Stats.RxDmaInterruptsPerSec++; | ||
197 | |||
198 | if (status & ET_INTR_TXDMA_ISR) | ||
199 | adapter->Stats.TxDmaInterruptsPerSec++; | ||
200 | #endif | ||
201 | 183 | ||
202 | if (status & ET_INTR_WATCHDOG) { | 184 | if (status & ET_INTR_WATCHDOG) { |
203 | PMP_TCB pMpTcb = adapter->TxRing.CurrSendHead; | 185 | PMP_TCB pMpTcb = adapter->TxRing.CurrSendHead; |
@@ -212,9 +194,6 @@ irqreturn_t et131x_isr(int irq, void *dev_id) | |||
212 | writel(0, &adapter->regs->global.watchdog_timer); | 194 | writel(0, &adapter->regs->global.watchdog_timer); |
213 | 195 | ||
214 | status &= ~ET_INTR_WATCHDOG; | 196 | status &= ~ET_INTR_WATCHDOG; |
215 | #ifdef CONFIG_ET131X_DEBUG | ||
216 | adapter->Stats.WatchDogInterruptsPerSec++; | ||
217 | #endif | ||
218 | } | 197 | } |
219 | 198 | ||
220 | if (status == 0) { | 199 | if (status == 0) { |
@@ -263,13 +242,11 @@ void et131x_isr_handler(struct work_struct *work) | |||
263 | */ | 242 | */ |
264 | /* Handle all the completed Transmit interrupts */ | 243 | /* Handle all the completed Transmit interrupts */ |
265 | if (status & ET_INTR_TXDMA_ISR) { | 244 | if (status & ET_INTR_TXDMA_ISR) { |
266 | DBG_TX(et131x_dbginfo, "TXDMA_ISR interrupt\n"); | ||
267 | et131x_handle_send_interrupt(etdev); | 245 | et131x_handle_send_interrupt(etdev); |
268 | } | 246 | } |
269 | 247 | ||
270 | /* Handle all the completed Receives interrupts */ | 248 | /* Handle all the completed Receives interrupts */ |
271 | if (status & ET_INTR_RXDMA_XFR_DONE) { | 249 | if (status & ET_INTR_RXDMA_XFR_DONE) { |
272 | DBG_RX(et131x_dbginfo, "RXDMA_XFR_DONE interrupt\n"); | ||
273 | et131x_handle_recv_interrupt(etdev); | 250 | et131x_handle_recv_interrupt(etdev); |
274 | } | 251 | } |
275 | 252 | ||
@@ -283,7 +260,7 @@ void et131x_isr_handler(struct work_struct *work) | |||
283 | /* Following read also clears the register (COR) */ | 260 | /* Following read also clears the register (COR) */ |
284 | TxDmaErr.value = readl(&iomem->txdma.TxDmaError.value); | 261 | TxDmaErr.value = readl(&iomem->txdma.TxDmaError.value); |
285 | 262 | ||
286 | DBG_WARNING(et131x_dbginfo, | 263 | dev_warn(&etdev->pdev->dev, |
287 | "TXDMA_ERR interrupt, error = %d\n", | 264 | "TXDMA_ERR interrupt, error = %d\n", |
288 | TxDmaErr.value); | 265 | TxDmaErr.value); |
289 | } | 266 | } |
@@ -304,9 +281,6 @@ void et131x_isr_handler(struct work_struct *work) | |||
304 | * ET1310 for re-use. This interrupt is one method of | 281 | * ET1310 for re-use. This interrupt is one method of |
305 | * returning resources. | 282 | * returning resources. |
306 | */ | 283 | */ |
307 | DBG_WARNING(et131x_dbginfo, | ||
308 | "RXDMA_FB_RING0_LOW or " | ||
309 | "RXDMA_FB_RING1_LOW interrupt\n"); | ||
310 | 284 | ||
311 | /* If the user has flow control on, then we will | 285 | /* If the user has flow control on, then we will |
312 | * send a pause packet, otherwise just exit | 286 | * send a pause packet, otherwise just exit |
@@ -332,8 +306,6 @@ void et131x_isr_handler(struct work_struct *work) | |||
332 | 306 | ||
333 | /* Handle Packet Status Ring Low Interrupt */ | 307 | /* Handle Packet Status Ring Low Interrupt */ |
334 | if (status & ET_INTR_RXDMA_STAT_LOW) { | 308 | if (status & ET_INTR_RXDMA_STAT_LOW) { |
335 | DBG_WARNING(et131x_dbginfo, | ||
336 | "RXDMA_PKT_STAT_RING_LOW interrupt\n"); | ||
337 | 309 | ||
338 | /* | 310 | /* |
339 | * Same idea as with the two Free Buffer Rings. | 311 | * Same idea as with the two Free Buffer Rings. |
@@ -370,7 +342,7 @@ void et131x_isr_handler(struct work_struct *work) | |||
370 | 342 | ||
371 | etdev->TxMacTest.value = | 343 | etdev->TxMacTest.value = |
372 | readl(&iomem->txmac.tx_test.value); | 344 | readl(&iomem->txmac.tx_test.value); |
373 | DBG_WARNING(et131x_dbginfo, | 345 | dev_warn(&etdev->pdev->dev, |
374 | "RxDMA_ERR interrupt, error %x\n", | 346 | "RxDMA_ERR interrupt, error %x\n", |
375 | etdev->TxMacTest.value); | 347 | etdev->TxMacTest.value); |
376 | } | 348 | } |
@@ -384,7 +356,7 @@ void et131x_isr_handler(struct work_struct *work) | |||
384 | * message when we are in DBG mode, otherwise we | 356 | * message when we are in DBG mode, otherwise we |
385 | * will ignore it. | 357 | * will ignore it. |
386 | */ | 358 | */ |
387 | DBG_ERROR(et131x_dbginfo, "WAKE_ON_LAN interrupt\n"); | 359 | dev_err(&etdev->pdev->dev, "WAKE_ON_LAN interrupt\n"); |
388 | } | 360 | } |
389 | 361 | ||
390 | /* Handle the PHY interrupt */ | 362 | /* Handle the PHY interrupt */ |
@@ -393,8 +365,6 @@ void et131x_isr_handler(struct work_struct *work) | |||
393 | MI_BMSR_t BmsrInts, BmsrData; | 365 | MI_BMSR_t BmsrInts, BmsrData; |
394 | MI_ISR_t myIsr; | 366 | MI_ISR_t myIsr; |
395 | 367 | ||
396 | DBG_VERBOSE(et131x_dbginfo, "PHY interrupt\n"); | ||
397 | |||
398 | /* If we are in coma mode when we get this interrupt, | 368 | /* If we are in coma mode when we get this interrupt, |
399 | * we need to disable it. | 369 | * we need to disable it. |
400 | */ | 370 | */ |
@@ -405,9 +375,6 @@ void et131x_isr_handler(struct work_struct *work) | |||
405 | * so, disable it because we will not be able | 375 | * so, disable it because we will not be able |
406 | * to read PHY values until we are out. | 376 | * to read PHY values until we are out. |
407 | */ | 377 | */ |
408 | DBG_VERBOSE(et131x_dbginfo, | ||
409 | "Device is in COMA mode, " | ||
410 | "need to wake up\n"); | ||
411 | DisablePhyComa(etdev); | 378 | DisablePhyComa(etdev); |
412 | } | 379 | } |
413 | 380 | ||
@@ -426,11 +393,6 @@ void et131x_isr_handler(struct work_struct *work) | |||
426 | etdev->Bmsr.value ^ BmsrData.value; | 393 | etdev->Bmsr.value ^ BmsrData.value; |
427 | etdev->Bmsr.value = BmsrData.value; | 394 | etdev->Bmsr.value = BmsrData.value; |
428 | 395 | ||
429 | DBG_VERBOSE(et131x_dbginfo, | ||
430 | "Bmsr.value = 0x%04x," | ||
431 | "Bmsr_ints.value = 0x%04x\n", | ||
432 | BmsrData.value, BmsrInts.value); | ||
433 | |||
434 | /* Do all the cable in / cable out stuff */ | 396 | /* Do all the cable in / cable out stuff */ |
435 | et131x_Mii_check(etdev, BmsrData, BmsrInts); | 397 | et131x_Mii_check(etdev, BmsrData, BmsrInts); |
436 | } | 398 | } |
@@ -451,7 +413,7 @@ void et131x_isr_handler(struct work_struct *work) | |||
451 | * a nutshell, the whole Tx path will have to be reset | 413 | * a nutshell, the whole Tx path will have to be reset |
452 | * and re-configured afterwards. | 414 | * and re-configured afterwards. |
453 | */ | 415 | */ |
454 | DBG_WARNING(et131x_dbginfo, | 416 | dev_warn(&etdev->pdev->dev, |
455 | "TXMAC interrupt, error 0x%08x\n", | 417 | "TXMAC interrupt, error 0x%08x\n", |
456 | etdev->TxRing.TxMacErr.value); | 418 | etdev->TxRing.TxMacErr.value); |
457 | 419 | ||
@@ -459,7 +421,6 @@ void et131x_isr_handler(struct work_struct *work) | |||
459 | * otherwise we just want the device to be reset and | 421 | * otherwise we just want the device to be reset and |
460 | * continue | 422 | * continue |
461 | */ | 423 | */ |
462 | /* DBG_TRAP(); */ | ||
463 | } | 424 | } |
464 | 425 | ||
465 | /* Handle RXMAC Interrupt */ | 426 | /* Handle RXMAC Interrupt */ |
@@ -473,11 +434,11 @@ void et131x_isr_handler(struct work_struct *work) | |||
473 | /* MP_SET_FLAG( etdev, | 434 | /* MP_SET_FLAG( etdev, |
474 | fMP_ADAPTER_HARDWARE_ERROR); */ | 435 | fMP_ADAPTER_HARDWARE_ERROR); */ |
475 | 436 | ||
476 | DBG_WARNING(et131x_dbginfo, | 437 | dev_warn(&etdev->pdev->dev, |
477 | "RXMAC interrupt, error 0x%08x. Requesting reset\n", | 438 | "RXMAC interrupt, error 0x%08x. Requesting reset\n", |
478 | readl(&iomem->rxmac.err_reg.value)); | 439 | readl(&iomem->rxmac.err_reg.value)); |
479 | 440 | ||
480 | DBG_WARNING(et131x_dbginfo, | 441 | dev_warn(&etdev->pdev->dev, |
481 | "Enable 0x%08x, Diag 0x%08x\n", | 442 | "Enable 0x%08x, Diag 0x%08x\n", |
482 | readl(&iomem->rxmac.ctrl.value), | 443 | readl(&iomem->rxmac.ctrl.value), |
483 | readl(&iomem->rxmac.rxq_diag.value)); | 444 | readl(&iomem->rxmac.rxq_diag.value)); |
@@ -487,7 +448,6 @@ void et131x_isr_handler(struct work_struct *work) | |||
487 | * otherwise we just want the device to be reset and | 448 | * otherwise we just want the device to be reset and |
488 | * continue | 449 | * continue |
489 | */ | 450 | */ |
490 | /* TRAP(); */ | ||
491 | } | 451 | } |
492 | 452 | ||
493 | /* Handle MAC_STAT Interrupt */ | 453 | /* Handle MAC_STAT Interrupt */ |
@@ -498,7 +458,6 @@ void et131x_isr_handler(struct work_struct *work) | |||
498 | * to maintain the top, software managed bits of the | 458 | * to maintain the top, software managed bits of the |
499 | * counter(s). | 459 | * counter(s). |
500 | */ | 460 | */ |
501 | DBG_VERBOSE(et131x_dbginfo, "MAC_STAT interrupt\n"); | ||
502 | HandleMacStatInterrupt(etdev); | 461 | HandleMacStatInterrupt(etdev); |
503 | } | 462 | } |
504 | 463 | ||
@@ -513,7 +472,6 @@ void et131x_isr_handler(struct work_struct *work) | |||
513 | * addressed module is in a power-down state and | 472 | * addressed module is in a power-down state and |
514 | * can't respond. | 473 | * can't respond. |
515 | */ | 474 | */ |
516 | DBG_VERBOSE(et131x_dbginfo, "SLV_TIMEOUT interrupt\n"); | ||
517 | } | 475 | } |
518 | } | 476 | } |
519 | et131x_enable_interrupts(etdev); | 477 | et131x_enable_interrupts(etdev); |
diff --git a/drivers/staging/et131x/et131x_netdev.c b/drivers/staging/et131x/et131x_netdev.c index 2a4b9ac9539e..8c7612f63f91 100644 --- a/drivers/staging/et131x/et131x_netdev.c +++ b/drivers/staging/et131x/et131x_netdev.c | |||
@@ -56,7 +56,6 @@ | |||
56 | */ | 56 | */ |
57 | 57 | ||
58 | #include "et131x_version.h" | 58 | #include "et131x_version.h" |
59 | #include "et131x_debug.h" | ||
60 | #include "et131x_defs.h" | 59 | #include "et131x_defs.h" |
61 | 60 | ||
62 | #include <linux/init.h> | 61 | #include <linux/init.h> |
@@ -75,6 +74,7 @@ | |||
75 | #include <linux/delay.h> | 74 | #include <linux/delay.h> |
76 | #include <linux/io.h> | 75 | #include <linux/io.h> |
77 | #include <linux/bitops.h> | 76 | #include <linux/bitops.h> |
77 | #include <linux/pci.h> | ||
78 | #include <asm/system.h> | 78 | #include <asm/system.h> |
79 | 79 | ||
80 | #include <linux/mii.h> | 80 | #include <linux/mii.h> |
@@ -94,11 +94,6 @@ | |||
94 | #include "et131x_isr.h" | 94 | #include "et131x_isr.h" |
95 | #include "et131x_initpci.h" | 95 | #include "et131x_initpci.h" |
96 | 96 | ||
97 | /* Data for debugging facilities */ | ||
98 | #ifdef CONFIG_ET131X_DEBUG | ||
99 | extern dbg_info_t *et131x_dbginfo; | ||
100 | #endif /* CONFIG_ET131X_DEBUG */ | ||
101 | |||
102 | struct net_device_stats *et131x_stats(struct net_device *netdev); | 97 | struct net_device_stats *et131x_stats(struct net_device *netdev); |
103 | int et131x_open(struct net_device *netdev); | 98 | int et131x_open(struct net_device *netdev); |
104 | int et131x_close(struct net_device *netdev); | 99 | int et131x_close(struct net_device *netdev); |
@@ -138,15 +133,11 @@ struct net_device *et131x_device_alloc(void) | |||
138 | { | 133 | { |
139 | struct net_device *netdev; | 134 | struct net_device *netdev; |
140 | 135 | ||
141 | DBG_ENTER(et131x_dbginfo); | ||
142 | |||
143 | /* Alloc net_device and adapter structs */ | 136 | /* Alloc net_device and adapter structs */ |
144 | netdev = alloc_etherdev(sizeof(struct et131x_adapter)); | 137 | netdev = alloc_etherdev(sizeof(struct et131x_adapter)); |
145 | 138 | ||
146 | if (netdev == NULL) { | 139 | if (netdev == NULL) { |
147 | DBG_ERROR(et131x_dbginfo, | 140 | printk(KERN_ERR "et131x: Alloc of net_device struct failed\n"); |
148 | "Alloc of net_device struct failed\n"); | ||
149 | DBG_LEAVE(et131x_dbginfo); | ||
150 | return NULL; | 141 | return NULL; |
151 | } | 142 | } |
152 | 143 | ||
@@ -163,8 +154,6 @@ struct net_device *et131x_device_alloc(void) | |||
163 | /* Poll? */ | 154 | /* Poll? */ |
164 | /* netdev->poll = &et131x_poll; */ | 155 | /* netdev->poll = &et131x_poll; */ |
165 | /* netdev->poll_controller = &et131x_poll_controller; */ | 156 | /* netdev->poll_controller = &et131x_poll_controller; */ |
166 | |||
167 | DBG_LEAVE(et131x_dbginfo); | ||
168 | return netdev; | 157 | return netdev; |
169 | } | 158 | } |
170 | 159 | ||
@@ -180,8 +169,6 @@ struct net_device_stats *et131x_stats(struct net_device *netdev) | |||
180 | struct net_device_stats *stats = &adapter->net_stats; | 169 | struct net_device_stats *stats = &adapter->net_stats; |
181 | CE_STATS_t *devstat = &adapter->Stats; | 170 | CE_STATS_t *devstat = &adapter->Stats; |
182 | 171 | ||
183 | DBG_ENTER(et131x_dbginfo); | ||
184 | |||
185 | stats->rx_packets = devstat->ipackets; | 172 | stats->rx_packets = devstat->ipackets; |
186 | stats->tx_packets = devstat->opackets; | 173 | stats->tx_packets = devstat->opackets; |
187 | stats->rx_errors = devstat->length_err + devstat->alignment_err + | 174 | stats->rx_errors = devstat->length_err + devstat->alignment_err + |
@@ -213,8 +200,6 @@ struct net_device_stats *et131x_stats(struct net_device *netdev) | |||
213 | /* stats->tx_fifo_errors = devstat->; */ | 200 | /* stats->tx_fifo_errors = devstat->; */ |
214 | /* stats->tx_heartbeat_errors = devstat->; */ | 201 | /* stats->tx_heartbeat_errors = devstat->; */ |
215 | /* stats->tx_window_errors = devstat->; */ | 202 | /* stats->tx_window_errors = devstat->; */ |
216 | |||
217 | DBG_LEAVE(et131x_dbginfo); | ||
218 | return stats; | 203 | return stats; |
219 | } | 204 | } |
220 | 205 | ||
@@ -229,20 +214,15 @@ int et131x_open(struct net_device *netdev) | |||
229 | int result = 0; | 214 | int result = 0; |
230 | struct et131x_adapter *adapter = netdev_priv(netdev); | 215 | struct et131x_adapter *adapter = netdev_priv(netdev); |
231 | 216 | ||
232 | DBG_ENTER(et131x_dbginfo); | ||
233 | |||
234 | /* Start the timer to track NIC errors */ | 217 | /* Start the timer to track NIC errors */ |
235 | add_timer(&adapter->ErrorTimer); | 218 | add_timer(&adapter->ErrorTimer); |
236 | 219 | ||
237 | /* Register our ISR */ | 220 | /* Register our IRQ */ |
238 | DBG_TRACE(et131x_dbginfo, "Registering ISR...\n"); | 221 | result = request_irq(netdev->irq, et131x_isr, IRQF_SHARED, |
239 | 222 | netdev->name, netdev); | |
240 | result = | ||
241 | request_irq(netdev->irq, et131x_isr, IRQF_SHARED, netdev->name, | ||
242 | netdev); | ||
243 | if (result) { | 223 | if (result) { |
244 | DBG_ERROR(et131x_dbginfo, "Could not register ISR\n"); | 224 | dev_err(&adapter->pdev->dev, "c ould not register IRQ %d\n", |
245 | DBG_LEAVE(et131x_dbginfo); | 225 | netdev->irq); |
246 | return result; | 226 | return result; |
247 | } | 227 | } |
248 | 228 | ||
@@ -257,8 +237,6 @@ int et131x_open(struct net_device *netdev) | |||
257 | 237 | ||
258 | /* We're ready to move some data, so start the queue */ | 238 | /* We're ready to move some data, so start the queue */ |
259 | netif_start_queue(netdev); | 239 | netif_start_queue(netdev); |
260 | |||
261 | DBG_LEAVE(et131x_dbginfo); | ||
262 | return result; | 240 | return result; |
263 | } | 241 | } |
264 | 242 | ||
@@ -272,8 +250,6 @@ int et131x_close(struct net_device *netdev) | |||
272 | { | 250 | { |
273 | struct et131x_adapter *adapter = netdev_priv(netdev); | 251 | struct et131x_adapter *adapter = netdev_priv(netdev); |
274 | 252 | ||
275 | DBG_ENTER(et131x_dbginfo); | ||
276 | |||
277 | /* First thing is to stop the queue */ | 253 | /* First thing is to stop the queue */ |
278 | netif_stop_queue(netdev); | 254 | netif_stop_queue(netdev); |
279 | 255 | ||
@@ -286,14 +262,10 @@ int et131x_close(struct net_device *netdev) | |||
286 | 262 | ||
287 | /* Deregistering ISR */ | 263 | /* Deregistering ISR */ |
288 | adapter->Flags &= ~fMP_ADAPTER_INTERRUPT_IN_USE; | 264 | adapter->Flags &= ~fMP_ADAPTER_INTERRUPT_IN_USE; |
289 | |||
290 | DBG_TRACE(et131x_dbginfo, "Deregistering ISR...\n"); | ||
291 | free_irq(netdev->irq, netdev); | 265 | free_irq(netdev->irq, netdev); |
292 | 266 | ||
293 | /* Stop the error timer */ | 267 | /* Stop the error timer */ |
294 | del_timer_sync(&adapter->ErrorTimer); | 268 | del_timer_sync(&adapter->ErrorTimer); |
295 | |||
296 | DBG_LEAVE(et131x_dbginfo); | ||
297 | return 0; | 269 | return 0; |
298 | } | 270 | } |
299 | 271 | ||
@@ -311,39 +283,30 @@ int et131x_ioctl_mii(struct net_device *netdev, struct ifreq *reqbuf, int cmd) | |||
311 | struct et131x_adapter *etdev = netdev_priv(netdev); | 283 | struct et131x_adapter *etdev = netdev_priv(netdev); |
312 | struct mii_ioctl_data *data = if_mii(reqbuf); | 284 | struct mii_ioctl_data *data = if_mii(reqbuf); |
313 | 285 | ||
314 | DBG_ENTER(et131x_dbginfo); | ||
315 | |||
316 | switch (cmd) { | 286 | switch (cmd) { |
317 | case SIOCGMIIPHY: | 287 | case SIOCGMIIPHY: |
318 | DBG_VERBOSE(et131x_dbginfo, "SIOCGMIIPHY\n"); | ||
319 | data->phy_id = etdev->Stats.xcvr_addr; | 288 | data->phy_id = etdev->Stats.xcvr_addr; |
320 | break; | 289 | break; |
321 | 290 | ||
322 | case SIOCGMIIREG: | 291 | case SIOCGMIIREG: |
323 | DBG_VERBOSE(et131x_dbginfo, "SIOCGMIIREG\n"); | 292 | if (!capable(CAP_NET_ADMIN)) |
324 | if (!capable(CAP_NET_ADMIN)) { | ||
325 | status = -EPERM; | 293 | status = -EPERM; |
326 | } else { | 294 | else |
327 | status = MiRead(etdev, | 295 | status = MiRead(etdev, |
328 | data->reg_num, &data->val_out); | 296 | data->reg_num, &data->val_out); |
329 | } | ||
330 | break; | 297 | break; |
331 | 298 | ||
332 | case SIOCSMIIREG: | 299 | case SIOCSMIIREG: |
333 | DBG_VERBOSE(et131x_dbginfo, "SIOCSMIIREG\n"); | 300 | if (!capable(CAP_NET_ADMIN)) |
334 | if (!capable(CAP_NET_ADMIN)) { | ||
335 | status = -EPERM; | 301 | status = -EPERM; |
336 | } else { | 302 | else |
337 | status = MiWrite(etdev, data->reg_num, | 303 | status = MiWrite(etdev, data->reg_num, |
338 | data->val_in); | 304 | data->val_in); |
339 | } | ||
340 | break; | 305 | break; |
341 | 306 | ||
342 | default: | 307 | default: |
343 | status = -EOPNOTSUPP; | 308 | status = -EOPNOTSUPP; |
344 | } | 309 | } |
345 | |||
346 | DBG_LEAVE(et131x_dbginfo); | ||
347 | return status; | 310 | return status; |
348 | } | 311 | } |
349 | 312 | ||
@@ -359,8 +322,6 @@ int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf, int cmd) | |||
359 | { | 322 | { |
360 | int status = 0; | 323 | int status = 0; |
361 | 324 | ||
362 | DBG_ENTER(et131x_dbginfo); | ||
363 | |||
364 | switch (cmd) { | 325 | switch (cmd) { |
365 | case SIOCGMIIPHY: | 326 | case SIOCGMIIPHY: |
366 | case SIOCGMIIREG: | 327 | case SIOCGMIIREG: |
@@ -369,12 +330,8 @@ int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf, int cmd) | |||
369 | break; | 330 | break; |
370 | 331 | ||
371 | default: | 332 | default: |
372 | DBG_WARNING(et131x_dbginfo, "Unhandled IOCTL Code: 0x%04x\n", | ||
373 | cmd); | ||
374 | status = -EOPNOTSUPP; | 333 | status = -EOPNOTSUPP; |
375 | } | 334 | } |
376 | |||
377 | DBG_LEAVE(et131x_dbginfo); | ||
378 | return status; | 335 | return status; |
379 | } | 336 | } |
380 | 337 | ||
@@ -391,8 +348,6 @@ int et131x_set_packet_filter(struct et131x_adapter *adapter) | |||
391 | RXMAC_CTRL_t ctrl; | 348 | RXMAC_CTRL_t ctrl; |
392 | RXMAC_PF_CTRL_t pf_ctrl; | 349 | RXMAC_PF_CTRL_t pf_ctrl; |
393 | 350 | ||
394 | DBG_ENTER(et131x_dbginfo); | ||
395 | |||
396 | ctrl.value = readl(&adapter->regs->rxmac.ctrl.value); | 351 | ctrl.value = readl(&adapter->regs->rxmac.ctrl.value); |
397 | pf_ctrl.value = readl(&adapter->regs->rxmac.pf_ctrl.value); | 352 | pf_ctrl.value = readl(&adapter->regs->rxmac.pf_ctrl.value); |
398 | 353 | ||
@@ -415,12 +370,8 @@ int et131x_set_packet_filter(struct et131x_adapter *adapter) | |||
415 | * multicast entries or (3) we receive none. | 370 | * multicast entries or (3) we receive none. |
416 | */ | 371 | */ |
417 | if (filter & ET131X_PACKET_TYPE_ALL_MULTICAST) { | 372 | if (filter & ET131X_PACKET_TYPE_ALL_MULTICAST) { |
418 | DBG_VERBOSE(et131x_dbginfo, | ||
419 | "Multicast filtering OFF (Rx ALL MULTICAST)\n"); | ||
420 | pf_ctrl.bits.filter_multi_en = 0; | 373 | pf_ctrl.bits.filter_multi_en = 0; |
421 | } else { | 374 | } else { |
422 | DBG_VERBOSE(et131x_dbginfo, | ||
423 | "Multicast filtering ON\n"); | ||
424 | SetupDeviceForMulticast(adapter); | 375 | SetupDeviceForMulticast(adapter); |
425 | pf_ctrl.bits.filter_multi_en = 1; | 376 | pf_ctrl.bits.filter_multi_en = 1; |
426 | ctrl.bits.pkt_filter_disable = 0; | 377 | ctrl.bits.pkt_filter_disable = 0; |
@@ -428,7 +379,6 @@ int et131x_set_packet_filter(struct et131x_adapter *adapter) | |||
428 | 379 | ||
429 | /* Set us up with Unicast packet filtering */ | 380 | /* Set us up with Unicast packet filtering */ |
430 | if (filter & ET131X_PACKET_TYPE_DIRECTED) { | 381 | if (filter & ET131X_PACKET_TYPE_DIRECTED) { |
431 | DBG_VERBOSE(et131x_dbginfo, "Unicast Filtering ON\n"); | ||
432 | SetupDeviceForUnicast(adapter); | 382 | SetupDeviceForUnicast(adapter); |
433 | pf_ctrl.bits.filter_uni_en = 1; | 383 | pf_ctrl.bits.filter_uni_en = 1; |
434 | ctrl.bits.pkt_filter_disable = 0; | 384 | ctrl.bits.pkt_filter_disable = 0; |
@@ -436,12 +386,9 @@ int et131x_set_packet_filter(struct et131x_adapter *adapter) | |||
436 | 386 | ||
437 | /* Set us up with Broadcast packet filtering */ | 387 | /* Set us up with Broadcast packet filtering */ |
438 | if (filter & ET131X_PACKET_TYPE_BROADCAST) { | 388 | if (filter & ET131X_PACKET_TYPE_BROADCAST) { |
439 | DBG_VERBOSE(et131x_dbginfo, "Broadcast Filtering ON\n"); | ||
440 | pf_ctrl.bits.filter_broad_en = 1; | 389 | pf_ctrl.bits.filter_broad_en = 1; |
441 | ctrl.bits.pkt_filter_disable = 0; | 390 | ctrl.bits.pkt_filter_disable = 0; |
442 | } else { | 391 | } else { |
443 | DBG_VERBOSE(et131x_dbginfo, | ||
444 | "Broadcast Filtering OFF\n"); | ||
445 | pf_ctrl.bits.filter_broad_en = 0; | 392 | pf_ctrl.bits.filter_broad_en = 0; |
446 | } | 393 | } |
447 | 394 | ||
@@ -453,8 +400,6 @@ int et131x_set_packet_filter(struct et131x_adapter *adapter) | |||
453 | &adapter->regs->rxmac.pf_ctrl.value); | 400 | &adapter->regs->rxmac.pf_ctrl.value); |
454 | writel(ctrl.value, &adapter->regs->rxmac.ctrl.value); | 401 | writel(ctrl.value, &adapter->regs->rxmac.ctrl.value); |
455 | } | 402 | } |
456 | |||
457 | DBG_LEAVE(et131x_dbginfo); | ||
458 | return status; | 403 | return status; |
459 | } | 404 | } |
460 | 405 | ||
@@ -470,8 +415,6 @@ void et131x_multicast(struct net_device *netdev) | |||
470 | unsigned long flags; | 415 | unsigned long flags; |
471 | struct dev_mc_list *mclist = netdev->mc_list; | 416 | struct dev_mc_list *mclist = netdev->mc_list; |
472 | 417 | ||
473 | DBG_ENTER(et131x_dbginfo); | ||
474 | |||
475 | spin_lock_irqsave(&adapter->Lock, flags); | 418 | spin_lock_irqsave(&adapter->Lock, flags); |
476 | 419 | ||
477 | /* Before we modify the platform-independent filter flags, store them | 420 | /* Before we modify the platform-independent filter flags, store them |
@@ -490,35 +433,25 @@ void et131x_multicast(struct net_device *netdev) | |||
490 | /* Check the net_device flags and set the device independent flags | 433 | /* Check the net_device flags and set the device independent flags |
491 | * accordingly | 434 | * accordingly |
492 | */ | 435 | */ |
493 | DBG_VERBOSE(et131x_dbginfo, | ||
494 | "MULTICAST ADDR COUNT: %d\n", netdev->mc_count); | ||
495 | 436 | ||
496 | if (netdev->flags & IFF_PROMISC) { | 437 | if (netdev->flags & IFF_PROMISC) { |
497 | DBG_VERBOSE(et131x_dbginfo, "Request: PROMISCUOUS MODE ON\n"); | ||
498 | adapter->PacketFilter |= ET131X_PACKET_TYPE_PROMISCUOUS; | 438 | adapter->PacketFilter |= ET131X_PACKET_TYPE_PROMISCUOUS; |
499 | } else { | 439 | } else { |
500 | DBG_VERBOSE(et131x_dbginfo, "Request: PROMISCUOUS MODE OFF\n"); | ||
501 | adapter->PacketFilter &= ~ET131X_PACKET_TYPE_PROMISCUOUS; | 440 | adapter->PacketFilter &= ~ET131X_PACKET_TYPE_PROMISCUOUS; |
502 | } | 441 | } |
503 | 442 | ||
504 | if (netdev->flags & IFF_ALLMULTI) { | 443 | if (netdev->flags & IFF_ALLMULTI) { |
505 | DBG_VERBOSE(et131x_dbginfo, "Request: ACCEPT ALL MULTICAST\n"); | ||
506 | adapter->PacketFilter |= ET131X_PACKET_TYPE_ALL_MULTICAST; | 444 | adapter->PacketFilter |= ET131X_PACKET_TYPE_ALL_MULTICAST; |
507 | } | 445 | } |
508 | 446 | ||
509 | if (netdev->mc_count > NIC_MAX_MCAST_LIST) { | 447 | if (netdev->mc_count > NIC_MAX_MCAST_LIST) { |
510 | DBG_WARNING(et131x_dbginfo, | ||
511 | "ACCEPT ALL MULTICAST for now, as there's more Multicast addresses than the HW supports\n"); | ||
512 | adapter->PacketFilter |= ET131X_PACKET_TYPE_ALL_MULTICAST; | 448 | adapter->PacketFilter |= ET131X_PACKET_TYPE_ALL_MULTICAST; |
513 | } | 449 | } |
514 | 450 | ||
515 | if (netdev->mc_count < 1) { | 451 | if (netdev->mc_count < 1) { |
516 | DBG_VERBOSE(et131x_dbginfo, "Request: REJECT ALL MULTICAST\n"); | ||
517 | adapter->PacketFilter &= ~ET131X_PACKET_TYPE_ALL_MULTICAST; | 452 | adapter->PacketFilter &= ~ET131X_PACKET_TYPE_ALL_MULTICAST; |
518 | adapter->PacketFilter &= ~ET131X_PACKET_TYPE_MULTICAST; | 453 | adapter->PacketFilter &= ~ET131X_PACKET_TYPE_MULTICAST; |
519 | } else { | 454 | } else { |
520 | DBG_VERBOSE(et131x_dbginfo, | ||
521 | "Request: SET MULTICAST FILTER(S)\n"); | ||
522 | adapter->PacketFilter |= ET131X_PACKET_TYPE_MULTICAST; | 455 | adapter->PacketFilter |= ET131X_PACKET_TYPE_MULTICAST; |
523 | } | 456 | } |
524 | 457 | ||
@@ -526,14 +459,8 @@ void et131x_multicast(struct net_device *netdev) | |||
526 | adapter->MCAddressCount = netdev->mc_count; | 459 | adapter->MCAddressCount = netdev->mc_count; |
527 | 460 | ||
528 | if (netdev->mc_count) { | 461 | if (netdev->mc_count) { |
529 | if (mclist->dmi_addrlen != ETH_ALEN) | 462 | count = netdev->mc_count - 1; |
530 | DBG_WARNING(et131x_dbginfo, | 463 | memcpy(adapter->MCList[count], mclist->dmi_addr, ETH_ALEN); |
531 | "Multicast addrs are not ETH_ALEN in size\n"); | ||
532 | else { | ||
533 | count = netdev->mc_count - 1; | ||
534 | memcpy(adapter->MCList[count], mclist->dmi_addr, | ||
535 | ETH_ALEN); | ||
536 | } | ||
537 | } | 464 | } |
538 | 465 | ||
539 | /* Are the new flags different from the previous ones? If not, then no | 466 | /* Are the new flags different from the previous ones? If not, then no |
@@ -544,17 +471,9 @@ void et131x_multicast(struct net_device *netdev) | |||
544 | */ | 471 | */ |
545 | if (PacketFilter != adapter->PacketFilter) { | 472 | if (PacketFilter != adapter->PacketFilter) { |
546 | /* Call the device's filter function */ | 473 | /* Call the device's filter function */ |
547 | DBG_VERBOSE(et131x_dbginfo, "UPDATE REQUIRED, FLAGS changed\n"); | ||
548 | |||
549 | et131x_set_packet_filter(adapter); | 474 | et131x_set_packet_filter(adapter); |
550 | } else { | ||
551 | DBG_VERBOSE(et131x_dbginfo, | ||
552 | "NO UPDATE REQUIRED, FLAGS didn't change\n"); | ||
553 | } | 475 | } |
554 | |||
555 | spin_unlock_irqrestore(&adapter->Lock, flags); | 476 | spin_unlock_irqrestore(&adapter->Lock, flags); |
556 | |||
557 | DBG_LEAVE(et131x_dbginfo); | ||
558 | } | 477 | } |
559 | 478 | ||
560 | /** | 479 | /** |
@@ -568,8 +487,6 @@ int et131x_tx(struct sk_buff *skb, struct net_device *netdev) | |||
568 | { | 487 | { |
569 | int status = 0; | 488 | int status = 0; |
570 | 489 | ||
571 | DBG_TX_ENTER(et131x_dbginfo); | ||
572 | |||
573 | /* Save the timestamp for the TX timeout watchdog */ | 490 | /* Save the timestamp for the TX timeout watchdog */ |
574 | netdev->trans_start = jiffies; | 491 | netdev->trans_start = jiffies; |
575 | 492 | ||
@@ -579,22 +496,15 @@ int et131x_tx(struct sk_buff *skb, struct net_device *netdev) | |||
579 | /* Check status and manage the netif queue if necessary */ | 496 | /* Check status and manage the netif queue if necessary */ |
580 | if (status != 0) { | 497 | if (status != 0) { |
581 | if (status == -ENOMEM) { | 498 | if (status == -ENOMEM) { |
582 | DBG_VERBOSE(et131x_dbginfo, | ||
583 | "OUT OF TCBs; STOP NETIF QUEUE\n"); | ||
584 | |||
585 | /* Put the queue to sleep until resources are | 499 | /* Put the queue to sleep until resources are |
586 | * available | 500 | * available |
587 | */ | 501 | */ |
588 | netif_stop_queue(netdev); | 502 | netif_stop_queue(netdev); |
589 | status = NETDEV_TX_BUSY; | 503 | status = NETDEV_TX_BUSY; |
590 | } else { | 504 | } else { |
591 | DBG_WARNING(et131x_dbginfo, | ||
592 | "Misc error; drop packet\n"); | ||
593 | status = NETDEV_TX_OK; | 505 | status = NETDEV_TX_OK; |
594 | } | 506 | } |
595 | } | 507 | } |
596 | |||
597 | DBG_TX_LEAVE(et131x_dbginfo); | ||
598 | return status; | 508 | return status; |
599 | } | 509 | } |
600 | 510 | ||
@@ -612,25 +522,19 @@ void et131x_tx_timeout(struct net_device *netdev) | |||
612 | PMP_TCB pMpTcb; | 522 | PMP_TCB pMpTcb; |
613 | unsigned long flags; | 523 | unsigned long flags; |
614 | 524 | ||
615 | DBG_WARNING(et131x_dbginfo, "TX TIMEOUT\n"); | ||
616 | |||
617 | /* Just skip this part if the adapter is doing link detection */ | 525 | /* Just skip this part if the adapter is doing link detection */ |
618 | if (etdev->Flags & fMP_ADAPTER_LINK_DETECTION) { | 526 | if (etdev->Flags & fMP_ADAPTER_LINK_DETECTION) |
619 | DBG_ERROR(et131x_dbginfo, "Still doing link detection\n"); | ||
620 | return; | 527 | return; |
621 | } | ||
622 | 528 | ||
623 | /* Any nonrecoverable hardware error? | 529 | /* Any nonrecoverable hardware error? |
624 | * Checks adapter->flags for any failure in phy reading | 530 | * Checks adapter->flags for any failure in phy reading |
625 | */ | 531 | */ |
626 | if (etdev->Flags & fMP_ADAPTER_NON_RECOVER_ERROR) { | 532 | if (etdev->Flags & fMP_ADAPTER_NON_RECOVER_ERROR) |
627 | DBG_WARNING(et131x_dbginfo, "Non recoverable error - remove\n"); | ||
628 | return; | 533 | return; |
629 | } | ||
630 | 534 | ||
631 | /* Hardware failure? */ | 535 | /* Hardware failure? */ |
632 | if (etdev->Flags & fMP_ADAPTER_HARDWARE_ERROR) { | 536 | if (etdev->Flags & fMP_ADAPTER_HARDWARE_ERROR) { |
633 | DBG_WARNING(et131x_dbginfo, "hardware error - reset\n"); | 537 | dev_err(&etdev->pdev->dev, "hardware error - reset\n"); |
634 | return; | 538 | return; |
635 | } | 539 | } |
636 | 540 | ||
@@ -643,13 +547,6 @@ void et131x_tx_timeout(struct net_device *netdev) | |||
643 | pMpTcb->Count++; | 547 | pMpTcb->Count++; |
644 | 548 | ||
645 | if (pMpTcb->Count > NIC_SEND_HANG_THRESHOLD) { | 549 | if (pMpTcb->Count > NIC_SEND_HANG_THRESHOLD) { |
646 | #ifdef CONFIG_ET131X_DEBUG | ||
647 | TX_STATUS_BLOCK_t txDmaComplete = | ||
648 | *(etdev->TxRing.pTxStatusVa); | ||
649 | PTX_DESC_ENTRY_t pDesc = | ||
650 | etdev->TxRing.pTxDescRingVa + | ||
651 | INDEX10(pMpTcb->WrIndex); | ||
652 | #endif | ||
653 | TX_DESC_ENTRY_t StuckDescriptors[10]; | 550 | TX_DESC_ENTRY_t StuckDescriptors[10]; |
654 | 551 | ||
655 | if (INDEX10(pMpTcb->WrIndex) > 7) { | 552 | if (INDEX10(pMpTcb->WrIndex) > 7) { |
@@ -662,26 +559,11 @@ void et131x_tx_timeout(struct net_device *netdev) | |||
662 | spin_unlock_irqrestore(&etdev->TCBSendQLock, | 559 | spin_unlock_irqrestore(&etdev->TCBSendQLock, |
663 | flags); | 560 | flags); |
664 | 561 | ||
665 | DBG_WARNING(et131x_dbginfo, | 562 | dev_warn(&etdev->pdev->dev, |
666 | "Send stuck - reset. pMpTcb->WrIndex %x, Flags 0x%08x\n", | 563 | "Send stuck - reset. pMpTcb->WrIndex %x, Flags 0x%08x\n", |
667 | pMpTcb->WrIndex, | 564 | pMpTcb->WrIndex, |
668 | pMpTcb->Flags); | 565 | pMpTcb->Flags); |
669 | 566 | ||
670 | DBG_WARNING(et131x_dbginfo, | ||
671 | "pDesc 0x%08x, 0x%08x, 0x%08x, 0x%08x\n", | ||
672 | pDesc->DataBufferPtrHigh, | ||
673 | pDesc->DataBufferPtrLow, pDesc->word2.value, | ||
674 | pDesc->word3.value); | ||
675 | |||
676 | DBG_WARNING(et131x_dbginfo, | ||
677 | "WbStatus 0x%08x\n", txDmaComplete.value); | ||
678 | |||
679 | #ifdef CONFIG_ET131X_DEBUG | ||
680 | DumpDeviceBlock(DBG_WARNING_ON, etdev, 0); | ||
681 | DumpDeviceBlock(DBG_WARNING_ON, etdev, 1); | ||
682 | DumpDeviceBlock(DBG_WARNING_ON, etdev, 3); | ||
683 | DumpDeviceBlock(DBG_WARNING_ON, etdev, 5); | ||
684 | #endif | ||
685 | et131x_close(netdev); | 567 | et131x_close(netdev); |
686 | et131x_open(netdev); | 568 | et131x_open(netdev); |
687 | 569 | ||
@@ -704,13 +586,9 @@ int et131x_change_mtu(struct net_device *netdev, int new_mtu) | |||
704 | int result = 0; | 586 | int result = 0; |
705 | struct et131x_adapter *adapter = netdev_priv(netdev); | 587 | struct et131x_adapter *adapter = netdev_priv(netdev); |
706 | 588 | ||
707 | DBG_ENTER(et131x_dbginfo); | ||
708 | |||
709 | /* Make sure the requested MTU is valid */ | 589 | /* Make sure the requested MTU is valid */ |
710 | if (new_mtu == 0 || new_mtu > 9216) { | 590 | if (new_mtu < 64 || new_mtu > 9216) |
711 | DBG_LEAVE(et131x_dbginfo); | ||
712 | return -EINVAL; | 591 | return -EINVAL; |
713 | } | ||
714 | 592 | ||
715 | /* Stop the netif queue */ | 593 | /* Stop the netif queue */ |
716 | netif_stop_queue(netdev); | 594 | netif_stop_queue(netdev); |
@@ -737,7 +615,7 @@ int et131x_change_mtu(struct net_device *netdev, int new_mtu) | |||
737 | /* Alloc and init Rx DMA memory */ | 615 | /* Alloc and init Rx DMA memory */ |
738 | result = et131x_adapter_memory_alloc(adapter); | 616 | result = et131x_adapter_memory_alloc(adapter); |
739 | if (result != 0) { | 617 | if (result != 0) { |
740 | DBG_WARNING(et131x_dbginfo, | 618 | dev_warn(&adapter->pdev->dev, |
741 | "Change MTU failed; couldn't re-alloc DMA memory\n"); | 619 | "Change MTU failed; couldn't re-alloc DMA memory\n"); |
742 | return result; | 620 | return result; |
743 | } | 621 | } |
@@ -760,8 +638,6 @@ int et131x_change_mtu(struct net_device *netdev, int new_mtu) | |||
760 | 638 | ||
761 | /* Restart the netif queue */ | 639 | /* Restart the netif queue */ |
762 | netif_wake_queue(netdev); | 640 | netif_wake_queue(netdev); |
763 | |||
764 | DBG_LEAVE(et131x_dbginfo); | ||
765 | return result; | 641 | return result; |
766 | } | 642 | } |
767 | 643 | ||
@@ -780,20 +656,14 @@ int et131x_set_mac_addr(struct net_device *netdev, void *new_mac) | |||
780 | struct et131x_adapter *adapter = netdev_priv(netdev); | 656 | struct et131x_adapter *adapter = netdev_priv(netdev); |
781 | struct sockaddr *address = new_mac; | 657 | struct sockaddr *address = new_mac; |
782 | 658 | ||
783 | DBG_ENTER(et131x_dbginfo); | ||
784 | /* begin blux */ | 659 | /* begin blux */ |
785 | /* DBG_VERBOSE( et131x_dbginfo, "Function not implemented!!\n" ); */ | ||
786 | 660 | ||
787 | if (adapter == NULL) { | 661 | if (adapter == NULL) |
788 | DBG_LEAVE(et131x_dbginfo); | ||
789 | return -ENODEV; | 662 | return -ENODEV; |
790 | } | ||
791 | 663 | ||
792 | /* Make sure the requested MAC is valid */ | 664 | /* Make sure the requested MAC is valid */ |
793 | if (!is_valid_ether_addr(address->sa_data)) { | 665 | if (!is_valid_ether_addr(address->sa_data)) |
794 | DBG_LEAVE(et131x_dbginfo); | ||
795 | return -EINVAL; | 666 | return -EINVAL; |
796 | } | ||
797 | 667 | ||
798 | /* Stop the netif queue */ | 668 | /* Stop the netif queue */ |
799 | netif_stop_queue(netdev); | 669 | netif_stop_queue(netdev); |
@@ -832,7 +702,7 @@ int et131x_set_mac_addr(struct net_device *netdev, void *new_mac) | |||
832 | /* Alloc and init Rx DMA memory */ | 702 | /* Alloc and init Rx DMA memory */ |
833 | result = et131x_adapter_memory_alloc(adapter); | 703 | result = et131x_adapter_memory_alloc(adapter); |
834 | if (result != 0) { | 704 | if (result != 0) { |
835 | DBG_WARNING(et131x_dbginfo, | 705 | dev_err(&adapter->pdev->dev, |
836 | "Change MAC failed; couldn't re-alloc DMA memory\n"); | 706 | "Change MAC failed; couldn't re-alloc DMA memory\n"); |
837 | return result; | 707 | return result; |
838 | } | 708 | } |
@@ -856,7 +726,5 @@ int et131x_set_mac_addr(struct net_device *netdev, void *new_mac) | |||
856 | 726 | ||
857 | /* Restart the netif queue */ | 727 | /* Restart the netif queue */ |
858 | netif_wake_queue(netdev); | 728 | netif_wake_queue(netdev); |
859 | |||
860 | DBG_LEAVE(et131x_dbginfo); | ||
861 | return result; | 729 | return result; |
862 | } | 730 | } |