aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/qla3xxx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/qla3xxx.c')
-rw-r--r--[-rwxr-xr-x]drivers/net/qla3xxx.c128
1 files changed, 64 insertions, 64 deletions
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 48069ece4e51..30adf726743c 100755..100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -81,7 +81,7 @@ typedef enum {
81} PHY_DEVICE_et; 81} PHY_DEVICE_et;
82 82
83typedef struct { 83typedef struct {
84 PHY_DEVICE_et phyDevice; 84 PHY_DEVICE_et phyDevice;
85 u32 phyIdOUI; 85 u32 phyIdOUI;
86 u16 phyIdModel; 86 u16 phyIdModel;
87 char *name; 87 char *name;
@@ -330,7 +330,7 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
330 PCI_DMA_FROMDEVICE); 330 PCI_DMA_FROMDEVICE);
331 err = pci_dma_mapping_error(map); 331 err = pci_dma_mapping_error(map);
332 if(err) { 332 if(err) {
333 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", 333 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
334 qdev->ndev->name, err); 334 qdev->ndev->name, err);
335 dev_kfree_skb(lrg_buf_cb->skb); 335 dev_kfree_skb(lrg_buf_cb->skb);
336 lrg_buf_cb->skb = NULL; 336 lrg_buf_cb->skb = NULL;
@@ -884,14 +884,14 @@ static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev)
884 u16 reg; 884 u16 reg;
885 885
886 /* Enable Auto-negotiation sense */ 886 /* Enable Auto-negotiation sense */
887 ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, &reg, 887 ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, &reg,
888 PHYAddr[qdev->mac_index]); 888 PHYAddr[qdev->mac_index]);
889 reg |= PETBI_TBI_AUTO_SENSE; 889 reg |= PETBI_TBI_AUTO_SENSE;
890 ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg, 890 ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg,
891 PHYAddr[qdev->mac_index]); 891 PHYAddr[qdev->mac_index]);
892 892
893 ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER, 893 ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER,
894 PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX, 894 PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX,
895 PHYAddr[qdev->mac_index]); 895 PHYAddr[qdev->mac_index]);
896 896
897 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, 897 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG,
@@ -945,7 +945,7 @@ static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr)
945 ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr); 945 ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr);
946 /* Write new PHYAD w/bit 5 set */ 946 /* Write new PHYAD w/bit 5 set */
947 ql_mii_write_reg_ex(qdev, 0x11, 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr); 947 ql_mii_write_reg_ex(qdev, 0x11, 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr);
948 /* 948 /*
949 * Disable diagnostic mode bit 2 = 0 949 * Disable diagnostic mode bit 2 = 0
950 * Power up device bit 11 = 0 950 * Power up device bit 11 = 0
951 * Link up (on) and activity (blink) 951 * Link up (on) and activity (blink)
@@ -955,18 +955,18 @@ static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr)
955 ql_mii_write_reg(qdev, 0x1c, 0xfaf0); 955 ql_mii_write_reg(qdev, 0x1c, 0xfaf0);
956} 956}
957 957
958static PHY_DEVICE_et getPhyType (struct ql3_adapter *qdev, 958static PHY_DEVICE_et getPhyType (struct ql3_adapter *qdev,
959 u16 phyIdReg0, u16 phyIdReg1) 959 u16 phyIdReg0, u16 phyIdReg1)
960{ 960{
961 PHY_DEVICE_et result = PHY_TYPE_UNKNOWN; 961 PHY_DEVICE_et result = PHY_TYPE_UNKNOWN;
962 u32 oui; 962 u32 oui;
963 u16 model; 963 u16 model;
964 int i; 964 int i;
965 965
966 if (phyIdReg0 == 0xffff) { 966 if (phyIdReg0 == 0xffff) {
967 return result; 967 return result;
968 } 968 }
969 969
970 if (phyIdReg1 == 0xffff) { 970 if (phyIdReg1 == 0xffff) {
971 return result; 971 return result;
972 } 972 }
@@ -984,7 +984,7 @@ static PHY_DEVICE_et getPhyType (struct ql3_adapter *qdev,
984 984
985 printk(KERN_INFO "%s: Phy: %s\n", 985 printk(KERN_INFO "%s: Phy: %s\n",
986 qdev->ndev->name, PHY_DEVICES[i].name); 986 qdev->ndev->name, PHY_DEVICES[i].name);
987 987
988 break; 988 break;
989 } 989 }
990 } 990 }
@@ -1033,7 +1033,7 @@ static int ql_is_full_dup(struct ql3_adapter *qdev)
1033 { 1033 {
1034 if (ql_mii_read_reg(qdev, 0x1A, &reg)) 1034 if (ql_mii_read_reg(qdev, 0x1A, &reg))
1035 return 0; 1035 return 0;
1036 1036
1037 return ((reg & 0x0080) && (reg & 0x1000)) != 0; 1037 return ((reg & 0x0080) && (reg & 0x1000)) != 0;
1038 } 1038 }
1039 case PHY_VITESSE_VSC8211: 1039 case PHY_VITESSE_VSC8211:
@@ -1082,19 +1082,19 @@ static int PHY_Setup(struct ql3_adapter *qdev)
1082 /* Check if we have a Agere PHY */ 1082 /* Check if we have a Agere PHY */
1083 if ((reg1 == 0xffff) || (reg2 == 0xffff)) { 1083 if ((reg1 == 0xffff) || (reg2 == 0xffff)) {
1084 1084
1085 /* Determine which MII address we should be using 1085 /* Determine which MII address we should be using
1086 determined by the index of the card */ 1086 determined by the index of the card */
1087 if (qdev->mac_index == 0) { 1087 if (qdev->mac_index == 0) {
1088 miiAddr = MII_AGERE_ADDR_1; 1088 miiAddr = MII_AGERE_ADDR_1;
1089 } else { 1089 } else {
1090 miiAddr = MII_AGERE_ADDR_2; 1090 miiAddr = MII_AGERE_ADDR_2;
1091 } 1091 }
1092 1092
1093 err =ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, &reg1, miiAddr); 1093 err =ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, &reg1, miiAddr);
1094 if(err != 0) { 1094 if(err != 0) {
1095 printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG after Agere detected\n", 1095 printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG after Agere detected\n",
1096 qdev->ndev->name); 1096 qdev->ndev->name);
1097 return err; 1097 return err;
1098 } 1098 }
1099 1099
1100 err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, &reg2, miiAddr); 1100 err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, &reg2, miiAddr);
@@ -1103,9 +1103,9 @@ static int PHY_Setup(struct ql3_adapter *qdev)
1103 qdev->ndev->name); 1103 qdev->ndev->name);
1104 return err; 1104 return err;
1105 } 1105 }
1106 1106
1107 /* We need to remember to initialize the Agere PHY */ 1107 /* We need to remember to initialize the Agere PHY */
1108 agereAddrChangeNeeded = true; 1108 agereAddrChangeNeeded = true;
1109 } 1109 }
1110 1110
1111 /* Determine the particular PHY we have on board to apply 1111 /* Determine the particular PHY we have on board to apply
@@ -1114,7 +1114,7 @@ static int PHY_Setup(struct ql3_adapter *qdev)
1114 1114
1115 if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) { 1115 if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) {
1116 /* need this here so address gets changed */ 1116 /* need this here so address gets changed */
1117 phyAgereSpecificInit(qdev, miiAddr); 1117 phyAgereSpecificInit(qdev, miiAddr);
1118 } else if (qdev->phyType == PHY_TYPE_UNKNOWN) { 1118 } else if (qdev->phyType == PHY_TYPE_UNKNOWN) {
1119 printk(KERN_ERR "%s: PHY is unknown\n", qdev->ndev->name); 1119 printk(KERN_ERR "%s: PHY is unknown\n", qdev->ndev->name);
1120 return -EIO; 1120 return -EIO;
@@ -1427,7 +1427,7 @@ static int ql_this_adapter_controls_port(struct ql3_adapter *qdev)
1427 1427
1428static void ql_phy_reset_ex(struct ql3_adapter *qdev) 1428static void ql_phy_reset_ex(struct ql3_adapter *qdev)
1429{ 1429{
1430 ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET, 1430 ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET,
1431 PHYAddr[qdev->mac_index]); 1431 PHYAddr[qdev->mac_index]);
1432} 1432}
1433 1433
@@ -1438,7 +1438,7 @@ static void ql_phy_start_neg_ex(struct ql3_adapter *qdev)
1438 1438
1439 if(qdev->phyType == PHY_AGERE_ET1011C) { 1439 if(qdev->phyType == PHY_AGERE_ET1011C) {
1440 /* turn off external loopback */ 1440 /* turn off external loopback */
1441 ql_mii_write_reg(qdev, 0x13, 0x0000); 1441 ql_mii_write_reg(qdev, 0x13, 0x0000);
1442 } 1442 }
1443 1443
1444 if(qdev->mac_index == 0) 1444 if(qdev->mac_index == 0)
@@ -1452,23 +1452,23 @@ static void ql_phy_start_neg_ex(struct ql3_adapter *qdev)
1452 portConfiguration = PORT_CONFIG_DEFAULT; 1452 portConfiguration = PORT_CONFIG_DEFAULT;
1453 1453
1454 /* Set the 1000 advertisements */ 1454 /* Set the 1000 advertisements */
1455 ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, &reg, 1455 ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, &reg,
1456 PHYAddr[qdev->mac_index]); 1456 PHYAddr[qdev->mac_index]);
1457 reg &= ~PHY_GIG_ALL_PARAMS; 1457 reg &= ~PHY_GIG_ALL_PARAMS;
1458 1458
1459 if(portConfiguration & 1459 if(portConfiguration &
1460 PORT_CONFIG_FULL_DUPLEX_ENABLED & 1460 PORT_CONFIG_FULL_DUPLEX_ENABLED &
1461 PORT_CONFIG_1000MB_SPEED) { 1461 PORT_CONFIG_1000MB_SPEED) {
1462 reg |= PHY_GIG_ADV_1000F; 1462 reg |= PHY_GIG_ADV_1000F;
1463 } 1463 }
1464 1464
1465 if(portConfiguration & 1465 if(portConfiguration &
1466 PORT_CONFIG_HALF_DUPLEX_ENABLED & 1466 PORT_CONFIG_HALF_DUPLEX_ENABLED &
1467 PORT_CONFIG_1000MB_SPEED) { 1467 PORT_CONFIG_1000MB_SPEED) {
1468 reg |= PHY_GIG_ADV_1000H; 1468 reg |= PHY_GIG_ADV_1000H;
1469 } 1469 }
1470 1470
1471 ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg, 1471 ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg,
1472 PHYAddr[qdev->mac_index]); 1472 PHYAddr[qdev->mac_index]);
1473 1473
1474 /* Set the 10/100 & pause negotiation advertisements */ 1474 /* Set the 10/100 & pause negotiation advertisements */
@@ -1482,7 +1482,7 @@ static void ql_phy_start_neg_ex(struct ql3_adapter *qdev)
1482 if(portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) { 1482 if(portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) {
1483 if(portConfiguration & PORT_CONFIG_100MB_SPEED) 1483 if(portConfiguration & PORT_CONFIG_100MB_SPEED)
1484 reg |= PHY_NEG_ADV_100F; 1484 reg |= PHY_NEG_ADV_100F;
1485 1485
1486 if(portConfiguration & PORT_CONFIG_10MB_SPEED) 1486 if(portConfiguration & PORT_CONFIG_10MB_SPEED)
1487 reg |= PHY_NEG_ADV_10F; 1487 reg |= PHY_NEG_ADV_10F;
1488 } 1488 }
@@ -1490,22 +1490,22 @@ static void ql_phy_start_neg_ex(struct ql3_adapter *qdev)
1490 if(portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) { 1490 if(portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) {
1491 if(portConfiguration & PORT_CONFIG_100MB_SPEED) 1491 if(portConfiguration & PORT_CONFIG_100MB_SPEED)
1492 reg |= PHY_NEG_ADV_100H; 1492 reg |= PHY_NEG_ADV_100H;
1493 1493
1494 if(portConfiguration & PORT_CONFIG_10MB_SPEED) 1494 if(portConfiguration & PORT_CONFIG_10MB_SPEED)
1495 reg |= PHY_NEG_ADV_10H; 1495 reg |= PHY_NEG_ADV_10H;
1496 } 1496 }
1497 1497
1498 if(portConfiguration & 1498 if(portConfiguration &
1499 PORT_CONFIG_1000MB_SPEED) { 1499 PORT_CONFIG_1000MB_SPEED) {
1500 reg |= 1; 1500 reg |= 1;
1501 } 1501 }
1502 1502
1503 ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg, 1503 ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg,
1504 PHYAddr[qdev->mac_index]); 1504 PHYAddr[qdev->mac_index]);
1505 1505
1506 ql_mii_read_reg_ex(qdev, CONTROL_REG, &reg, PHYAddr[qdev->mac_index]); 1506 ql_mii_read_reg_ex(qdev, CONTROL_REG, &reg, PHYAddr[qdev->mac_index]);
1507 1507
1508 ql_mii_write_reg_ex(qdev, CONTROL_REG, 1508 ql_mii_write_reg_ex(qdev, CONTROL_REG,
1509 reg | PHY_CTRL_RESTART_NEG | PHY_CTRL_AUTO_NEG, 1509 reg | PHY_CTRL_RESTART_NEG | PHY_CTRL_AUTO_NEG,
1510 PHYAddr[qdev->mac_index]); 1510 PHYAddr[qdev->mac_index]);
1511} 1511}
@@ -1660,7 +1660,7 @@ static void ql_link_state_machine(struct ql3_adapter *qdev)
1660 "%s: Reset in progress, skip processing link " 1660 "%s: Reset in progress, skip processing link "
1661 "state.\n", qdev->ndev->name); 1661 "state.\n", qdev->ndev->name);
1662 1662
1663 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1663 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1664 return; 1664 return;
1665 } 1665 }
1666 1666
@@ -1752,7 +1752,7 @@ static int ql_mii_setup(struct ql3_adapter *qdev)
1752 return -1; 1752 return -1;
1753 1753
1754 if (qdev->device_id == QL3032_DEVICE_ID) 1754 if (qdev->device_id == QL3032_DEVICE_ID)
1755 ql_write_page0_reg(qdev, 1755 ql_write_page0_reg(qdev,
1756 &port_regs->macMIIMgmtControlReg, 0x0f00000); 1756 &port_regs->macMIIMgmtControlReg, 0x0f00000);
1757 1757
1758 /* Divide 125MHz clock by 28 to meet PHY timing requirements */ 1758 /* Divide 125MHz clock by 28 to meet PHY timing requirements */
@@ -1936,7 +1936,7 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev)
1936 1936
1937 err = pci_dma_mapping_error(map); 1937 err = pci_dma_mapping_error(map);
1938 if(err) { 1938 if(err) {
1939 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", 1939 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
1940 qdev->ndev->name, err); 1940 qdev->ndev->name, err);
1941 dev_kfree_skb(lrg_buf_cb->skb); 1941 dev_kfree_skb(lrg_buf_cb->skb);
1942 lrg_buf_cb->skb = NULL; 1942 lrg_buf_cb->skb = NULL;
@@ -2044,7 +2044,7 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
2044 if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) { 2044 if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
2045 printk(KERN_WARNING "Frame short but, frame was padded and sent.\n"); 2045 printk(KERN_WARNING "Frame short but, frame was padded and sent.\n");
2046 } 2046 }
2047 2047
2048 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; 2048 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
2049 2049
2050 /* Check the transmit response flags for any errors */ 2050 /* Check the transmit response flags for any errors */
@@ -2108,13 +2108,13 @@ static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev)
2108 2108
2109/* 2109/*
2110 * The difference between 3022 and 3032 for inbound completions: 2110 * The difference between 3022 and 3032 for inbound completions:
2111 * 3022 uses two buffers per completion. The first buffer contains 2111 * 3022 uses two buffers per completion. The first buffer contains
2112 * (some) header info, the second the remainder of the headers plus 2112 * (some) header info, the second the remainder of the headers plus
2113 * the data. For this chip we reserve some space at the top of the 2113 * the data. For this chip we reserve some space at the top of the
2114 * receive buffer so that the header info in buffer one can be 2114 * receive buffer so that the header info in buffer one can be
2115 * prepended to the buffer two. Buffer two is the sent up while 2115 * prepended to the buffer two. Buffer two is the sent up while
2116 * buffer one is returned to the hardware to be reused. 2116 * buffer one is returned to the hardware to be reused.
2117 * 3032 receives all of it's data and headers in one buffer for a 2117 * 3032 receives all of it's data and headers in one buffer for a
2118 * simpler process. 3032 also supports checksum verification as 2118 * simpler process. 3032 also supports checksum verification as
2119 * can be seen in ql_process_macip_rx_intr(). 2119 * can be seen in ql_process_macip_rx_intr().
2120 */ 2120 */
@@ -2205,13 +2205,13 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
2205 skb_push(skb2, size), size); 2205 skb_push(skb2, size), size);
2206 } else { 2206 } else {
2207 u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum); 2207 u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum);
2208 if (checksum & 2208 if (checksum &
2209 (IB_IP_IOCB_RSP_3032_ICE | 2209 (IB_IP_IOCB_RSP_3032_ICE |
2210 IB_IP_IOCB_RSP_3032_CE)) { 2210 IB_IP_IOCB_RSP_3032_CE)) {
2211 printk(KERN_ERR 2211 printk(KERN_ERR
2212 "%s: Bad checksum for this %s packet, checksum = %x.\n", 2212 "%s: Bad checksum for this %s packet, checksum = %x.\n",
2213 __func__, 2213 __func__,
2214 ((checksum & 2214 ((checksum &
2215 IB_IP_IOCB_RSP_3032_TCP) ? "TCP" : 2215 IB_IP_IOCB_RSP_3032_TCP) ? "TCP" :
2216 "UDP"),checksum); 2216 "UDP"),checksum);
2217 } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) || 2217 } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) ||
@@ -2394,12 +2394,12 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
2394} 2394}
2395 2395
2396/* 2396/*
2397 * Get the total number of segments needed for the 2397 * Get the total number of segments needed for the
2398 * given number of fragments. This is necessary because 2398 * given number of fragments. This is necessary because
2399 * outbound address lists (OAL) will be used when more than 2399 * outbound address lists (OAL) will be used when more than
2400 * two frags are given. Each address list has 5 addr/len 2400 * two frags are given. Each address list has 5 addr/len
2401 * pairs. The 5th pair in each AOL is used to point to 2401 * pairs. The 5th pair in each AOL is used to point to
2402 * the next AOL if more frags are coming. 2402 * the next AOL if more frags are coming.
2403 * That is why the frags:segment count ratio is not linear. 2403 * That is why the frags:segment count ratio is not linear.
2404 */ 2404 */
2405static int ql_get_seg_count(struct ql3_adapter *qdev, 2405static int ql_get_seg_count(struct ql3_adapter *qdev,
@@ -2476,12 +2476,12 @@ static int ql_send_map(struct ql3_adapter *qdev,
2476 2476
2477 err = pci_dma_mapping_error(map); 2477 err = pci_dma_mapping_error(map);
2478 if(err) { 2478 if(err) {
2479 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", 2479 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
2480 qdev->ndev->name, err); 2480 qdev->ndev->name, err);
2481 2481
2482 return NETDEV_TX_BUSY; 2482 return NETDEV_TX_BUSY;
2483 } 2483 }
2484 2484
2485 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; 2485 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
2486 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); 2486 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2487 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); 2487 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
@@ -2511,7 +2511,7 @@ static int ql_send_map(struct ql3_adapter *qdev,
2511 err = pci_dma_mapping_error(map); 2511 err = pci_dma_mapping_error(map);
2512 if(err) { 2512 if(err) {
2513 2513
2514 printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n", 2514 printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n",
2515 qdev->ndev->name, err); 2515 qdev->ndev->name, err);
2516 goto map_error; 2516 goto map_error;
2517 } 2517 }
@@ -2537,7 +2537,7 @@ static int ql_send_map(struct ql3_adapter *qdev,
2537 2537
2538 err = pci_dma_mapping_error(map); 2538 err = pci_dma_mapping_error(map);
2539 if(err) { 2539 if(err) {
2540 printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n", 2540 printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n",
2541 qdev->ndev->name, err); 2541 qdev->ndev->name, err);
2542 goto map_error; 2542 goto map_error;
2543 } 2543 }
@@ -2558,10 +2558,10 @@ static int ql_send_map(struct ql3_adapter *qdev,
2558 2558
2559map_error: 2559map_error:
2560 /* A PCI mapping failed and now we will need to back out 2560 /* A PCI mapping failed and now we will need to back out
2561 * We need to traverse through the oal's and associated pages which 2561 * We need to traverse through the oal's and associated pages which
2562 * have been mapped and now we must unmap them to clean up properly 2562 * have been mapped and now we must unmap them to clean up properly
2563 */ 2563 */
2564 2564
2565 seg = 1; 2565 seg = 1;
2566 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; 2566 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
2567 oal = tx_cb->oal; 2567 oal = tx_cb->oal;
@@ -2599,11 +2599,11 @@ map_error:
2599 * The difference between 3022 and 3032 sends: 2599 * The difference between 3022 and 3032 sends:
2600 * 3022 only supports a simple single segment transmission. 2600 * 3022 only supports a simple single segment transmission.
2601 * 3032 supports checksumming and scatter/gather lists (fragments). 2601 * 3032 supports checksumming and scatter/gather lists (fragments).
2602 * The 3032 supports sglists by using the 3 addr/len pairs (ALP) 2602 * The 3032 supports sglists by using the 3 addr/len pairs (ALP)
2603 * in the IOCB plus a chain of outbound address lists (OAL) that 2603 * in the IOCB plus a chain of outbound address lists (OAL) that
2604 * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th) 2604 * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th)
2605 * will used to point to an OAL when more ALP entries are required. 2605 * will used to point to an OAL when more ALP entries are required.
2606 * The IOCB is always the top of the chain followed by one or more 2606 * The IOCB is always the top of the chain followed by one or more
2607 * OALs (when necessary). 2607 * OALs (when necessary).
2608 */ 2608 */
2609static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev) 2609static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
@@ -2617,14 +2617,14 @@ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
2617 if (unlikely(atomic_read(&qdev->tx_count) < 2)) { 2617 if (unlikely(atomic_read(&qdev->tx_count) < 2)) {
2618 return NETDEV_TX_BUSY; 2618 return NETDEV_TX_BUSY;
2619 } 2619 }
2620 2620
2621 tx_cb = &qdev->tx_buf[qdev->req_producer_index] ; 2621 tx_cb = &qdev->tx_buf[qdev->req_producer_index] ;
2622 if((tx_cb->seg_count = ql_get_seg_count(qdev, 2622 if((tx_cb->seg_count = ql_get_seg_count(qdev,
2623 (skb_shinfo(skb)->nr_frags))) == -1) { 2623 (skb_shinfo(skb)->nr_frags))) == -1) {
2624 printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__); 2624 printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__);
2625 return NETDEV_TX_OK; 2625 return NETDEV_TX_OK;
2626 } 2626 }
2627 2627
2628 mac_iocb_ptr = tx_cb->queue_entry; 2628 mac_iocb_ptr = tx_cb->queue_entry;
2629 memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req)); 2629 memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req));
2630 mac_iocb_ptr->opcode = qdev->mac_ob_opcode; 2630 mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
@@ -2636,12 +2636,12 @@ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
2636 if (qdev->device_id == QL3032_DEVICE_ID && 2636 if (qdev->device_id == QL3032_DEVICE_ID &&
2637 skb->ip_summed == CHECKSUM_PARTIAL) 2637 skb->ip_summed == CHECKSUM_PARTIAL)
2638 ql_hw_csum_setup(skb, mac_iocb_ptr); 2638 ql_hw_csum_setup(skb, mac_iocb_ptr);
2639 2639
2640 if(ql_send_map(qdev,mac_iocb_ptr,tx_cb,skb) != NETDEV_TX_OK) { 2640 if(ql_send_map(qdev,mac_iocb_ptr,tx_cb,skb) != NETDEV_TX_OK) {
2641 printk(KERN_ERR PFX"%s: Could not map the segments!\n",__func__); 2641 printk(KERN_ERR PFX"%s: Could not map the segments!\n",__func__);
2642 return NETDEV_TX_BUSY; 2642 return NETDEV_TX_BUSY;
2643 } 2643 }
2644 2644
2645 wmb(); 2645 wmb();
2646 qdev->req_producer_index++; 2646 qdev->req_producer_index++;
2647 if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES) 2647 if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
@@ -2739,7 +2739,7 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
2739 "%s: qdev->lrg_buf alloc failed.\n", qdev->ndev->name); 2739 "%s: qdev->lrg_buf alloc failed.\n", qdev->ndev->name);
2740 return -ENOMEM; 2740 return -ENOMEM;
2741 } 2741 }
2742 2742
2743 qdev->lrg_buf_q_alloc_virt_addr = 2743 qdev->lrg_buf_q_alloc_virt_addr =
2744 pci_alloc_consistent(qdev->pdev, 2744 pci_alloc_consistent(qdev->pdev,
2745 qdev->lrg_buf_q_alloc_size, 2745 qdev->lrg_buf_q_alloc_size,