aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/via-velocity.c
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
commitada47b5fe13d89735805b566185f4885f5a3f750 (patch)
tree644b88f8a71896307d71438e9b3af49126ffb22b /drivers/net/via-velocity.c
parent43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff)
parent3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff)
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'drivers/net/via-velocity.c')
-rw-r--r--drivers/net/via-velocity.c432
1 files changed, 290 insertions, 142 deletions
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index e04e5bee005c..bc278d4ee89d 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -9,7 +9,6 @@
9 * 9 *
10 * TODO 10 * TODO
11 * rx_copybreak/alignment 11 * rx_copybreak/alignment
12 * Scatter gather
13 * More testing 12 * More testing
14 * 13 *
15 * The changes are (c) Copyright 2004, Red Hat Inc. <alan@lxorguk.ukuu.org.uk> 14 * The changes are (c) Copyright 2004, Red Hat Inc. <alan@lxorguk.ukuu.org.uk>
@@ -275,7 +274,7 @@ VELOCITY_PARAM(rx_thresh, "Receive fifo threshold");
275 274
276#define DMA_LENGTH_MIN 0 275#define DMA_LENGTH_MIN 0
277#define DMA_LENGTH_MAX 7 276#define DMA_LENGTH_MAX 7
278#define DMA_LENGTH_DEF 0 277#define DMA_LENGTH_DEF 6
279 278
280/* DMA_length[] is used for controlling the DMA length 279/* DMA_length[] is used for controlling the DMA length
281 0: 8 DWORDs 280 0: 8 DWORDs
@@ -298,14 +297,6 @@ VELOCITY_PARAM(DMA_length, "DMA length");
298*/ 297*/
299VELOCITY_PARAM(IP_byte_align, "Enable IP header dword aligned"); 298VELOCITY_PARAM(IP_byte_align, "Enable IP header dword aligned");
300 299
301#define TX_CSUM_DEF 1
302/* txcsum_offload[] is used for setting the checksum offload ability of NIC.
303 (We only support RX checksum offload now)
304 0: disable csum_offload[checksum offload
305 1: enable checksum offload. (Default)
306*/
307VELOCITY_PARAM(txcsum_offload, "Enable transmit packet checksum offload");
308
309#define FLOW_CNTL_DEF 1 300#define FLOW_CNTL_DEF 1
310#define FLOW_CNTL_MIN 1 301#define FLOW_CNTL_MIN 1
311#define FLOW_CNTL_MAX 5 302#define FLOW_CNTL_MAX 5
@@ -354,21 +345,10 @@ VELOCITY_PARAM(ValPktLen, "Receiving or Drop invalid 802.3 frame");
354*/ 345*/
355VELOCITY_PARAM(wol_opts, "Wake On Lan options"); 346VELOCITY_PARAM(wol_opts, "Wake On Lan options");
356 347
357#define INT_WORKS_DEF 20
358#define INT_WORKS_MIN 10
359#define INT_WORKS_MAX 64
360
361VELOCITY_PARAM(int_works, "Number of packets per interrupt services");
362
363static int rx_copybreak = 200; 348static int rx_copybreak = 200;
364module_param(rx_copybreak, int, 0644); 349module_param(rx_copybreak, int, 0644);
365MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames"); 350MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
366 351
367#ifdef CONFIG_PM
368static DEFINE_SPINLOCK(velocity_dev_list_lock);
369static LIST_HEAD(velocity_dev_list);
370#endif
371
372/* 352/*
373 * Internal board variants. At the moment we have only one 353 * Internal board variants. At the moment we have only one
374 */ 354 */
@@ -381,7 +361,7 @@ static struct velocity_info_tbl chip_info_table[] = {
381 * Describe the PCI device identifiers that we support in this 361 * Describe the PCI device identifiers that we support in this
382 * device driver. Used for hotplug autoloading. 362 * device driver. Used for hotplug autoloading.
383 */ 363 */
384static const struct pci_device_id velocity_id_table[] __devinitdata = { 364static DEFINE_PCI_DEVICE_TABLE(velocity_id_table) = {
385 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) }, 365 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) },
386 { } 366 { }
387}; 367};
@@ -417,14 +397,6 @@ static void __devexit velocity_remove1(struct pci_dev *pdev)
417 struct net_device *dev = pci_get_drvdata(pdev); 397 struct net_device *dev = pci_get_drvdata(pdev);
418 struct velocity_info *vptr = netdev_priv(dev); 398 struct velocity_info *vptr = netdev_priv(dev);
419 399
420#ifdef CONFIG_PM
421 unsigned long flags;
422
423 spin_lock_irqsave(&velocity_dev_list_lock, flags);
424 if (!list_empty(&velocity_dev_list))
425 list_del(&vptr->list);
426 spin_unlock_irqrestore(&velocity_dev_list_lock, flags);
427#endif
428 unregister_netdev(dev); 400 unregister_netdev(dev);
429 iounmap(vptr->mac_regs); 401 iounmap(vptr->mac_regs);
430 pci_release_regions(pdev); 402 pci_release_regions(pdev);
@@ -510,13 +482,11 @@ static void __devinit velocity_get_options(struct velocity_opt *opts, int index,
510 velocity_set_int_opt(&opts->numrx, RxDescriptors[index], RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF, "RxDescriptors", devname); 482 velocity_set_int_opt(&opts->numrx, RxDescriptors[index], RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF, "RxDescriptors", devname);
511 velocity_set_int_opt(&opts->numtx, TxDescriptors[index], TX_DESC_MIN, TX_DESC_MAX, TX_DESC_DEF, "TxDescriptors", devname); 483 velocity_set_int_opt(&opts->numtx, TxDescriptors[index], TX_DESC_MIN, TX_DESC_MAX, TX_DESC_DEF, "TxDescriptors", devname);
512 484
513 velocity_set_bool_opt(&opts->flags, txcsum_offload[index], TX_CSUM_DEF, VELOCITY_FLAGS_TX_CSUM, "txcsum_offload", devname);
514 velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname); 485 velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname);
515 velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname); 486 velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname);
516 velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname); 487 velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname);
517 velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname); 488 velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname);
518 velocity_set_int_opt((int *) &opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname); 489 velocity_set_int_opt((int *) &opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname);
519 velocity_set_int_opt((int *) &opts->int_works, int_works[index], INT_WORKS_MIN, INT_WORKS_MAX, INT_WORKS_DEF, "Interrupt service works", devname);
520 opts->numrx = (opts->numrx & ~3); 490 opts->numrx = (opts->numrx & ~3);
521} 491}
522 492
@@ -842,7 +812,7 @@ static void set_mii_flow_control(struct velocity_info *vptr)
842 812
843 case FLOW_CNTL_TX_RX: 813 case FLOW_CNTL_TX_RX:
844 MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs); 814 MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
845 MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs); 815 MII_REG_BITS_OFF(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
846 break; 816 break;
847 817
848 case FLOW_CNTL_DISABLE: 818 case FLOW_CNTL_DISABLE:
@@ -925,8 +895,8 @@ static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
925 895
926 /* 896 /*
927 Check if new status is consisent with current status 897 Check if new status is consisent with current status
928 if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE) 898 if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE) ||
929 || (mii_status==curr_status)) { 899 (mii_status==curr_status)) {
930 vptr->mii_status=mii_check_media_mode(vptr->mac_regs); 900 vptr->mii_status=mii_check_media_mode(vptr->mac_regs);
931 vptr->mii_status=check_connection_type(vptr->mac_regs); 901 vptr->mii_status=check_connection_type(vptr->mac_regs);
932 VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity link no change\n"); 902 VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity link no change\n");
@@ -1162,8 +1132,8 @@ static void velocity_set_multi(struct net_device *dev)
1162 writel(0xffffffff, &regs->MARCAM[0]); 1132 writel(0xffffffff, &regs->MARCAM[0]);
1163 writel(0xffffffff, &regs->MARCAM[4]); 1133 writel(0xffffffff, &regs->MARCAM[4]);
1164 rx_mode = (RCR_AM | RCR_AB | RCR_PROM); 1134 rx_mode = (RCR_AM | RCR_AB | RCR_PROM);
1165 } else if ((dev->mc_count > vptr->multicast_limit) 1135 } else if ((netdev_mc_count(dev) > vptr->multicast_limit) ||
1166 || (dev->flags & IFF_ALLMULTI)) { 1136 (dev->flags & IFF_ALLMULTI)) {
1167 writel(0xffffffff, &regs->MARCAM[0]); 1137 writel(0xffffffff, &regs->MARCAM[0]);
1168 writel(0xffffffff, &regs->MARCAM[4]); 1138 writel(0xffffffff, &regs->MARCAM[4]);
1169 rx_mode = (RCR_AM | RCR_AB); 1139 rx_mode = (RCR_AM | RCR_AB);
@@ -1171,9 +1141,11 @@ static void velocity_set_multi(struct net_device *dev)
1171 int offset = MCAM_SIZE - vptr->multicast_limit; 1141 int offset = MCAM_SIZE - vptr->multicast_limit;
1172 mac_get_cam_mask(regs, vptr->mCAMmask); 1142 mac_get_cam_mask(regs, vptr->mCAMmask);
1173 1143
1174 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; i++, mclist = mclist->next) { 1144 i = 0;
1145 netdev_for_each_mc_addr(mclist, dev) {
1175 mac_set_cam(regs, i + offset, mclist->dmi_addr); 1146 mac_set_cam(regs, i + offset, mclist->dmi_addr);
1176 vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7); 1147 vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7);
1148 i++;
1177 } 1149 }
1178 1150
1179 mac_set_cam_mask(regs, vptr->mCAMmask); 1151 mac_set_cam_mask(regs, vptr->mCAMmask);
@@ -1259,6 +1231,66 @@ static void mii_init(struct velocity_info *vptr, u32 mii_status)
1259 } 1231 }
1260} 1232}
1261 1233
1234/**
1235 * setup_queue_timers - Setup interrupt timers
1236 *
1237 * Setup interrupt frequency during suppression (timeout if the frame
1238 * count isn't filled).
1239 */
1240static void setup_queue_timers(struct velocity_info *vptr)
1241{
1242 /* Only for newer revisions */
1243 if (vptr->rev_id >= REV_ID_VT3216_A0) {
1244 u8 txqueue_timer = 0;
1245 u8 rxqueue_timer = 0;
1246
1247 if (vptr->mii_status & (VELOCITY_SPEED_1000 |
1248 VELOCITY_SPEED_100)) {
1249 txqueue_timer = vptr->options.txqueue_timer;
1250 rxqueue_timer = vptr->options.rxqueue_timer;
1251 }
1252
1253 writeb(txqueue_timer, &vptr->mac_regs->TQETMR);
1254 writeb(rxqueue_timer, &vptr->mac_regs->RQETMR);
1255 }
1256}
1257/**
1258 * setup_adaptive_interrupts - Setup interrupt suppression
1259 *
1260 * @vptr velocity adapter
1261 *
1262 * The velocity is able to suppress interrupt during high interrupt load.
1263 * This function turns on that feature.
1264 */
1265static void setup_adaptive_interrupts(struct velocity_info *vptr)
1266{
1267 struct mac_regs __iomem *regs = vptr->mac_regs;
1268 u16 tx_intsup = vptr->options.tx_intsup;
1269 u16 rx_intsup = vptr->options.rx_intsup;
1270
1271 /* Setup default interrupt mask (will be changed below) */
1272 vptr->int_mask = INT_MASK_DEF;
1273
1274 /* Set Tx Interrupt Suppression Threshold */
1275 writeb(CAMCR_PS0, &regs->CAMCR);
1276 if (tx_intsup != 0) {
1277 vptr->int_mask &= ~(ISR_PTXI | ISR_PTX0I | ISR_PTX1I |
1278 ISR_PTX2I | ISR_PTX3I);
1279 writew(tx_intsup, &regs->ISRCTL);
1280 } else
1281 writew(ISRCTL_TSUPDIS, &regs->ISRCTL);
1282
1283 /* Set Rx Interrupt Suppression Threshold */
1284 writeb(CAMCR_PS1, &regs->CAMCR);
1285 if (rx_intsup != 0) {
1286 vptr->int_mask &= ~ISR_PRXI;
1287 writew(rx_intsup, &regs->ISRCTL);
1288 } else
1289 writew(ISRCTL_RSUPDIS, &regs->ISRCTL);
1290
1291 /* Select page to interrupt hold timer */
1292 writeb(0, &regs->CAMCR);
1293}
1262 1294
1263/** 1295/**
1264 * velocity_init_registers - initialise MAC registers 1296 * velocity_init_registers - initialise MAC registers
@@ -1345,7 +1377,7 @@ static void velocity_init_registers(struct velocity_info *vptr,
1345 */ 1377 */
1346 enable_mii_autopoll(regs); 1378 enable_mii_autopoll(regs);
1347 1379
1348 vptr->int_mask = INT_MASK_DEF; 1380 setup_adaptive_interrupts(vptr);
1349 1381
1350 writel(vptr->rx.pool_dma, &regs->RDBaseLo); 1382 writel(vptr->rx.pool_dma, &regs->RDBaseLo);
1351 writew(vptr->options.numrx - 1, &regs->RDCSize); 1383 writew(vptr->options.numrx - 1, &regs->RDCSize);
@@ -1483,7 +1515,8 @@ static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1483 * Do the gymnastics to get the buffer head for data at 1515 * Do the gymnastics to get the buffer head for data at
1484 * 64byte alignment. 1516 * 64byte alignment.
1485 */ 1517 */
1486 skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->data & 63); 1518 skb_reserve(rd_info->skb,
1519 64 - ((unsigned long) rd_info->skb->data & 63));
1487 rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, 1520 rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data,
1488 vptr->rx.buf_sz, PCI_DMA_FROMDEVICE); 1521 vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
1489 1522
@@ -1602,12 +1635,10 @@ out:
1602 */ 1635 */
1603static int velocity_init_td_ring(struct velocity_info *vptr) 1636static int velocity_init_td_ring(struct velocity_info *vptr)
1604{ 1637{
1605 dma_addr_t curr;
1606 int j; 1638 int j;
1607 1639
1608 /* Init the TD ring entries */ 1640 /* Init the TD ring entries */
1609 for (j = 0; j < vptr->tx.numq; j++) { 1641 for (j = 0; j < vptr->tx.numq; j++) {
1610 curr = vptr->tx.pool_dma[j];
1611 1642
1612 vptr->tx.infos[j] = kcalloc(vptr->options.numtx, 1643 vptr->tx.infos[j] = kcalloc(vptr->options.numtx,
1613 sizeof(struct velocity_td_info), 1644 sizeof(struct velocity_td_info),
@@ -1673,21 +1704,27 @@ err_free_dma_rings_0:
1673 * Release an transmit buffer. If the buffer was preallocated then 1704 * Release an transmit buffer. If the buffer was preallocated then
1674 * recycle it, if not then unmap the buffer. 1705 * recycle it, if not then unmap the buffer.
1675 */ 1706 */
1676static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_info *tdinfo) 1707static void velocity_free_tx_buf(struct velocity_info *vptr,
1708 struct velocity_td_info *tdinfo, struct tx_desc *td)
1677{ 1709{
1678 struct sk_buff *skb = tdinfo->skb; 1710 struct sk_buff *skb = tdinfo->skb;
1679 int i;
1680 int pktlen;
1681 1711
1682 /* 1712 /*
1683 * Don't unmap the pre-allocated tx_bufs 1713 * Don't unmap the pre-allocated tx_bufs
1684 */ 1714 */
1685 if (tdinfo->skb_dma) { 1715 if (tdinfo->skb_dma) {
1716 int i;
1686 1717
1687 pktlen = max_t(unsigned int, skb->len, ETH_ZLEN);
1688 for (i = 0; i < tdinfo->nskb_dma; i++) { 1718 for (i = 0; i < tdinfo->nskb_dma; i++) {
1689 pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], pktlen, PCI_DMA_TODEVICE); 1719 size_t pktlen = max_t(size_t, skb->len, ETH_ZLEN);
1690 tdinfo->skb_dma[i] = 0; 1720
1721 /* For scatter-gather */
1722 if (skb_shinfo(skb)->nr_frags > 0)
1723 pktlen = max_t(size_t, pktlen,
1724 td->td_buf[i].size & ~TD_QUEUE);
1725
1726 pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i],
1727 le16_to_cpu(pktlen), PCI_DMA_TODEVICE);
1691 } 1728 }
1692 } 1729 }
1693 dev_kfree_skb_irq(skb); 1730 dev_kfree_skb_irq(skb);
@@ -1801,6 +1838,8 @@ static void velocity_error(struct velocity_info *vptr, int status)
1801 BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG); 1838 BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
1802 else 1839 else
1803 BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG); 1840 BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
1841
1842 setup_queue_timers(vptr);
1804 } 1843 }
1805 /* 1844 /*
1806 * Get link status from PHYSR0 1845 * Get link status from PHYSR0
@@ -1840,13 +1879,12 @@ static void velocity_error(struct velocity_info *vptr, int status)
1840/** 1879/**
1841 * tx_srv - transmit interrupt service 1880 * tx_srv - transmit interrupt service
1842 * @vptr; Velocity 1881 * @vptr; Velocity
1843 * @status:
1844 * 1882 *
1845 * Scan the queues looking for transmitted packets that 1883 * Scan the queues looking for transmitted packets that
1846 * we can complete and clean up. Update any statistics as 1884 * we can complete and clean up. Update any statistics as
1847 * necessary/ 1885 * necessary/
1848 */ 1886 */
1849static int velocity_tx_srv(struct velocity_info *vptr, u32 status) 1887static int velocity_tx_srv(struct velocity_info *vptr)
1850{ 1888{
1851 struct tx_desc *td; 1889 struct tx_desc *td;
1852 int qnum; 1890 int qnum;
@@ -1887,7 +1925,7 @@ static int velocity_tx_srv(struct velocity_info *vptr, u32 status)
1887 stats->tx_packets++; 1925 stats->tx_packets++;
1888 stats->tx_bytes += tdinfo->skb->len; 1926 stats->tx_bytes += tdinfo->skb->len;
1889 } 1927 }
1890 velocity_free_tx_buf(vptr, tdinfo); 1928 velocity_free_tx_buf(vptr, tdinfo, td);
1891 vptr->tx.used[qnum]--; 1929 vptr->tx.used[qnum]--;
1892 } 1930 }
1893 vptr->tx.tail[qnum] = idx; 1931 vptr->tx.tail[qnum] = idx;
@@ -1899,8 +1937,8 @@ static int velocity_tx_srv(struct velocity_info *vptr, u32 status)
1899 * Look to see if we should kick the transmit network 1937 * Look to see if we should kick the transmit network
1900 * layer for more work. 1938 * layer for more work.
1901 */ 1939 */
1902 if (netif_queue_stopped(vptr->dev) && (full == 0) 1940 if (netif_queue_stopped(vptr->dev) && (full == 0) &&
1903 && (!(vptr->mii_status & VELOCITY_LINK_FAIL))) { 1941 (!(vptr->mii_status & VELOCITY_LINK_FAIL))) {
1904 netif_wake_queue(vptr->dev); 1942 netif_wake_queue(vptr->dev);
1905 } 1943 }
1906 return works; 1944 return works;
@@ -1949,10 +1987,9 @@ static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
1949 if (pkt_size < rx_copybreak) { 1987 if (pkt_size < rx_copybreak) {
1950 struct sk_buff *new_skb; 1988 struct sk_buff *new_skb;
1951 1989
1952 new_skb = netdev_alloc_skb(vptr->dev, pkt_size + 2); 1990 new_skb = netdev_alloc_skb_ip_align(vptr->dev, pkt_size);
1953 if (new_skb) { 1991 if (new_skb) {
1954 new_skb->ip_summed = rx_skb[0]->ip_summed; 1992 new_skb->ip_summed = rx_skb[0]->ip_summed;
1955 skb_reserve(new_skb, 2);
1956 skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size); 1993 skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size);
1957 *rx_skb = new_skb; 1994 *rx_skb = new_skb;
1958 ret = 0; 1995 ret = 0;
@@ -2054,19 +2091,18 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2054/** 2091/**
2055 * velocity_rx_srv - service RX interrupt 2092 * velocity_rx_srv - service RX interrupt
2056 * @vptr: velocity 2093 * @vptr: velocity
2057 * @status: adapter status (unused)
2058 * 2094 *
2059 * Walk the receive ring of the velocity adapter and remove 2095 * Walk the receive ring of the velocity adapter and remove
2060 * any received packets from the receive queue. Hand the ring 2096 * any received packets from the receive queue. Hand the ring
2061 * slots back to the adapter for reuse. 2097 * slots back to the adapter for reuse.
2062 */ 2098 */
2063static int velocity_rx_srv(struct velocity_info *vptr, int status) 2099static int velocity_rx_srv(struct velocity_info *vptr, int budget_left)
2064{ 2100{
2065 struct net_device_stats *stats = &vptr->dev->stats; 2101 struct net_device_stats *stats = &vptr->dev->stats;
2066 int rd_curr = vptr->rx.curr; 2102 int rd_curr = vptr->rx.curr;
2067 int works = 0; 2103 int works = 0;
2068 2104
2069 do { 2105 while (works < budget_left) {
2070 struct rx_desc *rd = vptr->rx.ring + rd_curr; 2106 struct rx_desc *rd = vptr->rx.ring + rd_curr;
2071 2107
2072 if (!vptr->rx.info[rd_curr].skb) 2108 if (!vptr->rx.info[rd_curr].skb)
@@ -2097,7 +2133,8 @@ static int velocity_rx_srv(struct velocity_info *vptr, int status)
2097 rd_curr++; 2133 rd_curr++;
2098 if (rd_curr >= vptr->options.numrx) 2134 if (rd_curr >= vptr->options.numrx)
2099 rd_curr = 0; 2135 rd_curr = 0;
2100 } while (++works <= 15); 2136 works++;
2137 }
2101 2138
2102 vptr->rx.curr = rd_curr; 2139 vptr->rx.curr = rd_curr;
2103 2140
@@ -2108,6 +2145,32 @@ static int velocity_rx_srv(struct velocity_info *vptr, int status)
2108 return works; 2145 return works;
2109} 2146}
2110 2147
2148static int velocity_poll(struct napi_struct *napi, int budget)
2149{
2150 struct velocity_info *vptr = container_of(napi,
2151 struct velocity_info, napi);
2152 unsigned int rx_done;
2153 unsigned long flags;
2154
2155 spin_lock_irqsave(&vptr->lock, flags);
2156 /*
2157 * Do rx and tx twice for performance (taken from the VIA
2158 * out-of-tree driver).
2159 */
2160 rx_done = velocity_rx_srv(vptr, budget / 2);
2161 velocity_tx_srv(vptr);
2162 rx_done += velocity_rx_srv(vptr, budget - rx_done);
2163 velocity_tx_srv(vptr);
2164
2165 /* If budget not fully consumed, exit the polling mode */
2166 if (rx_done < budget) {
2167 napi_complete(napi);
2168 mac_enable_int(vptr->mac_regs);
2169 }
2170 spin_unlock_irqrestore(&vptr->lock, flags);
2171
2172 return rx_done;
2173}
2111 2174
2112/** 2175/**
2113 * velocity_intr - interrupt callback 2176 * velocity_intr - interrupt callback
@@ -2124,8 +2187,6 @@ static irqreturn_t velocity_intr(int irq, void *dev_instance)
2124 struct net_device *dev = dev_instance; 2187 struct net_device *dev = dev_instance;
2125 struct velocity_info *vptr = netdev_priv(dev); 2188 struct velocity_info *vptr = netdev_priv(dev);
2126 u32 isr_status; 2189 u32 isr_status;
2127 int max_count = 0;
2128
2129 2190
2130 spin_lock(&vptr->lock); 2191 spin_lock(&vptr->lock);
2131 isr_status = mac_read_isr(vptr->mac_regs); 2192 isr_status = mac_read_isr(vptr->mac_regs);
@@ -2136,32 +2197,20 @@ static irqreturn_t velocity_intr(int irq, void *dev_instance)
2136 return IRQ_NONE; 2197 return IRQ_NONE;
2137 } 2198 }
2138 2199
2139 mac_disable_int(vptr->mac_regs); 2200 /* Ack the interrupt */
2201 mac_write_isr(vptr->mac_regs, isr_status);
2140 2202
2141 /* 2203 if (likely(napi_schedule_prep(&vptr->napi))) {
2142 * Keep processing the ISR until we have completed 2204 mac_disable_int(vptr->mac_regs);
2143 * processing and the isr_status becomes zero 2205 __napi_schedule(&vptr->napi);
2144 */
2145
2146 while (isr_status != 0) {
2147 mac_write_isr(vptr->mac_regs, isr_status);
2148 if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
2149 velocity_error(vptr, isr_status);
2150 if (isr_status & (ISR_PRXI | ISR_PPRXI))
2151 max_count += velocity_rx_srv(vptr, isr_status);
2152 if (isr_status & (ISR_PTXI | ISR_PPTXI))
2153 max_count += velocity_tx_srv(vptr, isr_status);
2154 isr_status = mac_read_isr(vptr->mac_regs);
2155 if (max_count > vptr->options.int_works) {
2156 printk(KERN_WARNING "%s: excessive work at interrupt.\n",
2157 dev->name);
2158 max_count = 0;
2159 }
2160 } 2206 }
2207
2208 if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
2209 velocity_error(vptr, isr_status);
2210
2161 spin_unlock(&vptr->lock); 2211 spin_unlock(&vptr->lock);
2162 mac_enable_int(vptr->mac_regs);
2163 return IRQ_HANDLED;
2164 2212
2213 return IRQ_HANDLED;
2165} 2214}
2166 2215
2167/** 2216/**
@@ -2186,11 +2235,9 @@ static int velocity_open(struct net_device *dev)
2186 /* Ensure chip is running */ 2235 /* Ensure chip is running */
2187 pci_set_power_state(vptr->pdev, PCI_D0); 2236 pci_set_power_state(vptr->pdev, PCI_D0);
2188 2237
2189 velocity_give_many_rx_descs(vptr);
2190
2191 velocity_init_registers(vptr, VELOCITY_INIT_COLD); 2238 velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2192 2239
2193 ret = request_irq(vptr->pdev->irq, &velocity_intr, IRQF_SHARED, 2240 ret = request_irq(vptr->pdev->irq, velocity_intr, IRQF_SHARED,
2194 dev->name, dev); 2241 dev->name, dev);
2195 if (ret < 0) { 2242 if (ret < 0) {
2196 /* Power down the chip */ 2243 /* Power down the chip */
@@ -2199,8 +2246,11 @@ static int velocity_open(struct net_device *dev)
2199 goto out; 2246 goto out;
2200 } 2247 }
2201 2248
2249 velocity_give_many_rx_descs(vptr);
2250
2202 mac_enable_int(vptr->mac_regs); 2251 mac_enable_int(vptr->mac_regs);
2203 netif_start_queue(dev); 2252 netif_start_queue(dev);
2253 napi_enable(&vptr->napi);
2204 vptr->flags |= VELOCITY_FLAGS_OPENED; 2254 vptr->flags |= VELOCITY_FLAGS_OPENED;
2205out: 2255out:
2206 return ret; 2256 return ret;
@@ -2287,10 +2337,10 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu)
2287 2337
2288 dev->mtu = new_mtu; 2338 dev->mtu = new_mtu;
2289 2339
2290 velocity_give_many_rx_descs(vptr);
2291
2292 velocity_init_registers(vptr, VELOCITY_INIT_COLD); 2340 velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2293 2341
2342 velocity_give_many_rx_descs(vptr);
2343
2294 mac_enable_int(vptr->mac_regs); 2344 mac_enable_int(vptr->mac_regs);
2295 netif_start_queue(dev); 2345 netif_start_queue(dev);
2296 2346
@@ -2436,6 +2486,7 @@ static int velocity_close(struct net_device *dev)
2436{ 2486{
2437 struct velocity_info *vptr = netdev_priv(dev); 2487 struct velocity_info *vptr = netdev_priv(dev);
2438 2488
2489 napi_disable(&vptr->napi);
2439 netif_stop_queue(dev); 2490 netif_stop_queue(dev);
2440 velocity_shutdown(vptr); 2491 velocity_shutdown(vptr);
2441 2492
@@ -2470,14 +2521,22 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
2470 struct velocity_td_info *tdinfo; 2521 struct velocity_td_info *tdinfo;
2471 unsigned long flags; 2522 unsigned long flags;
2472 int pktlen; 2523 int pktlen;
2473 __le16 len; 2524 int index, prev;
2474 int index; 2525 int i = 0;
2475 2526
2476 if (skb_padto(skb, ETH_ZLEN)) 2527 if (skb_padto(skb, ETH_ZLEN))
2477 goto out; 2528 goto out;
2478 pktlen = max_t(unsigned int, skb->len, ETH_ZLEN);
2479 2529
2480 len = cpu_to_le16(pktlen); 2530 /* The hardware can handle at most 7 memory segments, so merge
2531 * the skb if there are more */
2532 if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
2533 kfree_skb(skb);
2534 return NETDEV_TX_OK;
2535 }
2536
2537 pktlen = skb_shinfo(skb)->nr_frags == 0 ?
2538 max_t(unsigned int, skb->len, ETH_ZLEN) :
2539 skb_headlen(skb);
2481 2540
2482 spin_lock_irqsave(&vptr->lock, flags); 2541 spin_lock_irqsave(&vptr->lock, flags);
2483 2542
@@ -2494,11 +2553,24 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
2494 */ 2553 */
2495 tdinfo->skb = skb; 2554 tdinfo->skb = skb;
2496 tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE); 2555 tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE);
2497 td_ptr->tdesc0.len = len; 2556 td_ptr->tdesc0.len = cpu_to_le16(pktlen);
2498 td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); 2557 td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
2499 td_ptr->td_buf[0].pa_high = 0; 2558 td_ptr->td_buf[0].pa_high = 0;
2500 td_ptr->td_buf[0].size = len; 2559 td_ptr->td_buf[0].size = cpu_to_le16(pktlen);
2501 tdinfo->nskb_dma = 1; 2560
2561 /* Handle fragments */
2562 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2563 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2564
2565 tdinfo->skb_dma[i + 1] = pci_map_page(vptr->pdev, frag->page,
2566 frag->page_offset, frag->size,
2567 PCI_DMA_TODEVICE);
2568
2569 td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
2570 td_ptr->td_buf[i + 1].pa_high = 0;
2571 td_ptr->td_buf[i + 1].size = cpu_to_le16(frag->size);
2572 }
2573 tdinfo->nskb_dma = i + 1;
2502 2574
2503 td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16; 2575 td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16;
2504 2576
@@ -2510,8 +2582,8 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
2510 /* 2582 /*
2511 * Handle hardware checksum 2583 * Handle hardware checksum
2512 */ 2584 */
2513 if ((vptr->flags & VELOCITY_FLAGS_TX_CSUM) 2585 if ((dev->features & NETIF_F_IP_CSUM) &&
2514 && (skb->ip_summed == CHECKSUM_PARTIAL)) { 2586 (skb->ip_summed == CHECKSUM_PARTIAL)) {
2515 const struct iphdr *ip = ip_hdr(skb); 2587 const struct iphdr *ip = ip_hdr(skb);
2516 if (ip->protocol == IPPROTO_TCP) 2588 if (ip->protocol == IPPROTO_TCP)
2517 td_ptr->tdesc1.TCR |= TCR0_TCPCK; 2589 td_ptr->tdesc1.TCR |= TCR0_TCPCK;
@@ -2519,23 +2591,21 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
2519 td_ptr->tdesc1.TCR |= (TCR0_UDPCK); 2591 td_ptr->tdesc1.TCR |= (TCR0_UDPCK);
2520 td_ptr->tdesc1.TCR |= TCR0_IPCK; 2592 td_ptr->tdesc1.TCR |= TCR0_IPCK;
2521 } 2593 }
2522 {
2523 2594
2524 int prev = index - 1; 2595 prev = index - 1;
2596 if (prev < 0)
2597 prev = vptr->options.numtx - 1;
2598 td_ptr->tdesc0.len |= OWNED_BY_NIC;
2599 vptr->tx.used[qnum]++;
2600 vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx;
2525 2601
2526 if (prev < 0) 2602 if (AVAIL_TD(vptr, qnum) < 1)
2527 prev = vptr->options.numtx - 1; 2603 netif_stop_queue(dev);
2528 td_ptr->tdesc0.len |= OWNED_BY_NIC;
2529 vptr->tx.used[qnum]++;
2530 vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx;
2531 2604
2532 if (AVAIL_TD(vptr, qnum) < 1) 2605 td_ptr = &(vptr->tx.rings[qnum][prev]);
2533 netif_stop_queue(dev); 2606 td_ptr->td_buf[0].size |= TD_QUEUE;
2607 mac_tx_queue_wake(vptr->mac_regs, qnum);
2534 2608
2535 td_ptr = &(vptr->tx.rings[qnum][prev]);
2536 td_ptr->td_buf[0].size |= TD_QUEUE;
2537 mac_tx_queue_wake(vptr->mac_regs, qnum);
2538 }
2539 dev->trans_start = jiffies; 2609 dev->trans_start = jiffies;
2540 spin_unlock_irqrestore(&vptr->lock, flags); 2610 spin_unlock_irqrestore(&vptr->lock, flags);
2541out: 2611out:
@@ -2578,7 +2648,6 @@ static void __devinit velocity_init_info(struct pci_dev *pdev,
2578 vptr->tx.numq = info->txqueue; 2648 vptr->tx.numq = info->txqueue;
2579 vptr->multicast_limit = MCAM_SIZE; 2649 vptr->multicast_limit = MCAM_SIZE;
2580 spin_lock_init(&vptr->lock); 2650 spin_lock_init(&vptr->lock);
2581 INIT_LIST_HEAD(&vptr->list);
2582} 2651}
2583 2652
2584/** 2653/**
@@ -2631,10 +2700,8 @@ static void __devinit velocity_print_info(struct velocity_info *vptr)
2631 struct net_device *dev = vptr->dev; 2700 struct net_device *dev = vptr->dev;
2632 2701
2633 printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id)); 2702 printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id));
2634 printk(KERN_INFO "%s: Ethernet Address: %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n", 2703 printk(KERN_INFO "%s: Ethernet Address: %pM\n",
2635 dev->name, 2704 dev->name, dev->dev_addr);
2636 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
2637 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
2638} 2705}
2639 2706
2640static u32 velocity_get_link(struct net_device *dev) 2707static u32 velocity_get_link(struct net_device *dev)
@@ -2755,12 +2822,10 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
2755 dev->irq = pdev->irq; 2822 dev->irq = pdev->irq;
2756 dev->netdev_ops = &velocity_netdev_ops; 2823 dev->netdev_ops = &velocity_netdev_ops;
2757 dev->ethtool_ops = &velocity_ethtool_ops; 2824 dev->ethtool_ops = &velocity_ethtool_ops;
2825 netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT);
2758 2826
2759 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | 2827 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
2760 NETIF_F_HW_VLAN_RX; 2828 NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM | NETIF_F_SG;
2761
2762 if (vptr->flags & VELOCITY_FLAGS_TX_CSUM)
2763 dev->features |= NETIF_F_IP_CSUM;
2764 2829
2765 ret = register_netdev(dev); 2830 ret = register_netdev(dev);
2766 if (ret < 0) 2831 if (ret < 0)
@@ -2777,15 +2842,6 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
2777 /* and leave the chip powered down */ 2842 /* and leave the chip powered down */
2778 2843
2779 pci_set_power_state(pdev, PCI_D3hot); 2844 pci_set_power_state(pdev, PCI_D3hot);
2780#ifdef CONFIG_PM
2781 {
2782 unsigned long flags;
2783
2784 spin_lock_irqsave(&velocity_dev_list_lock, flags);
2785 list_add(&vptr->list, &velocity_dev_list);
2786 spin_unlock_irqrestore(&velocity_dev_list_lock, flags);
2787 }
2788#endif
2789 velocity_nics++; 2845 velocity_nics++;
2790out: 2846out:
2791 return ret; 2847 return ret;
@@ -3040,7 +3096,7 @@ static int velocity_resume(struct pci_dev *pdev)
3040 velocity_init_registers(vptr, VELOCITY_INIT_WOL); 3096 velocity_init_registers(vptr, VELOCITY_INIT_WOL);
3041 mac_disable_int(vptr->mac_regs); 3097 mac_disable_int(vptr->mac_regs);
3042 3098
3043 velocity_tx_srv(vptr, 0); 3099 velocity_tx_srv(vptr);
3044 3100
3045 for (i = 0; i < vptr->tx.numq; i++) { 3101 for (i = 0; i < vptr->tx.numq; i++) {
3046 if (vptr->tx.used[i]) 3102 if (vptr->tx.used[i])
@@ -3222,15 +3278,117 @@ static void velocity_set_msglevel(struct net_device *dev, u32 value)
3222 msglevel = value; 3278 msglevel = value;
3223} 3279}
3224 3280
3281static int get_pending_timer_val(int val)
3282{
3283 int mult_bits = val >> 6;
3284 int mult = 1;
3285
3286 switch (mult_bits)
3287 {
3288 case 1:
3289 mult = 4; break;
3290 case 2:
3291 mult = 16; break;
3292 case 3:
3293 mult = 64; break;
3294 case 0:
3295 default:
3296 break;
3297 }
3298
3299 return (val & 0x3f) * mult;
3300}
3301
3302static void set_pending_timer_val(int *val, u32 us)
3303{
3304 u8 mult = 0;
3305 u8 shift = 0;
3306
3307 if (us >= 0x3f) {
3308 mult = 1; /* mult with 4 */
3309 shift = 2;
3310 }
3311 if (us >= 0x3f * 4) {
3312 mult = 2; /* mult with 16 */
3313 shift = 4;
3314 }
3315 if (us >= 0x3f * 16) {
3316 mult = 3; /* mult with 64 */
3317 shift = 6;
3318 }
3319
3320 *val = (mult << 6) | ((us >> shift) & 0x3f);
3321}
3322
3323
3324static int velocity_get_coalesce(struct net_device *dev,
3325 struct ethtool_coalesce *ecmd)
3326{
3327 struct velocity_info *vptr = netdev_priv(dev);
3328
3329 ecmd->tx_max_coalesced_frames = vptr->options.tx_intsup;
3330 ecmd->rx_max_coalesced_frames = vptr->options.rx_intsup;
3331
3332 ecmd->rx_coalesce_usecs = get_pending_timer_val(vptr->options.rxqueue_timer);
3333 ecmd->tx_coalesce_usecs = get_pending_timer_val(vptr->options.txqueue_timer);
3334
3335 return 0;
3336}
3337
3338static int velocity_set_coalesce(struct net_device *dev,
3339 struct ethtool_coalesce *ecmd)
3340{
3341 struct velocity_info *vptr = netdev_priv(dev);
3342 int max_us = 0x3f * 64;
3343 unsigned long flags;
3344
3345 /* 6 bits of */
3346 if (ecmd->tx_coalesce_usecs > max_us)
3347 return -EINVAL;
3348 if (ecmd->rx_coalesce_usecs > max_us)
3349 return -EINVAL;
3350
3351 if (ecmd->tx_max_coalesced_frames > 0xff)
3352 return -EINVAL;
3353 if (ecmd->rx_max_coalesced_frames > 0xff)
3354 return -EINVAL;
3355
3356 vptr->options.rx_intsup = ecmd->rx_max_coalesced_frames;
3357 vptr->options.tx_intsup = ecmd->tx_max_coalesced_frames;
3358
3359 set_pending_timer_val(&vptr->options.rxqueue_timer,
3360 ecmd->rx_coalesce_usecs);
3361 set_pending_timer_val(&vptr->options.txqueue_timer,
3362 ecmd->tx_coalesce_usecs);
3363
3364 /* Setup the interrupt suppression and queue timers */
3365 spin_lock_irqsave(&vptr->lock, flags);
3366 mac_disable_int(vptr->mac_regs);
3367 setup_adaptive_interrupts(vptr);
3368 setup_queue_timers(vptr);
3369
3370 mac_write_int_mask(vptr->int_mask, vptr->mac_regs);
3371 mac_clear_isr(vptr->mac_regs);
3372 mac_enable_int(vptr->mac_regs);
3373 spin_unlock_irqrestore(&vptr->lock, flags);
3374
3375 return 0;
3376}
3377
3225static const struct ethtool_ops velocity_ethtool_ops = { 3378static const struct ethtool_ops velocity_ethtool_ops = {
3226 .get_settings = velocity_get_settings, 3379 .get_settings = velocity_get_settings,
3227 .set_settings = velocity_set_settings, 3380 .set_settings = velocity_set_settings,
3228 .get_drvinfo = velocity_get_drvinfo, 3381 .get_drvinfo = velocity_get_drvinfo,
3382 .set_tx_csum = ethtool_op_set_tx_csum,
3383 .get_tx_csum = ethtool_op_get_tx_csum,
3229 .get_wol = velocity_ethtool_get_wol, 3384 .get_wol = velocity_ethtool_get_wol,
3230 .set_wol = velocity_ethtool_set_wol, 3385 .set_wol = velocity_ethtool_set_wol,
3231 .get_msglevel = velocity_get_msglevel, 3386 .get_msglevel = velocity_get_msglevel,
3232 .set_msglevel = velocity_set_msglevel, 3387 .set_msglevel = velocity_set_msglevel,
3388 .set_sg = ethtool_op_set_sg,
3233 .get_link = velocity_get_link, 3389 .get_link = velocity_get_link,
3390 .get_coalesce = velocity_get_coalesce,
3391 .set_coalesce = velocity_set_coalesce,
3234 .begin = velocity_ethtool_up, 3392 .begin = velocity_ethtool_up,
3235 .complete = velocity_ethtool_down 3393 .complete = velocity_ethtool_down
3236}; 3394};
@@ -3241,20 +3399,10 @@ static int velocity_netdev_event(struct notifier_block *nb, unsigned long notifi
3241{ 3399{
3242 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr; 3400 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
3243 struct net_device *dev = ifa->ifa_dev->dev; 3401 struct net_device *dev = ifa->ifa_dev->dev;
3244 struct velocity_info *vptr;
3245 unsigned long flags;
3246 3402
3247 if (dev_net(dev) != &init_net) 3403 if (dev_net(dev) == &init_net &&
3248 return NOTIFY_DONE; 3404 dev->netdev_ops == &velocity_netdev_ops)
3249 3405 velocity_get_ip(netdev_priv(dev));
3250 spin_lock_irqsave(&velocity_dev_list_lock, flags);
3251 list_for_each_entry(vptr, &velocity_dev_list, list) {
3252 if (vptr->dev == dev) {
3253 velocity_get_ip(vptr);
3254 break;
3255 }
3256 }
3257 spin_unlock_irqrestore(&velocity_dev_list_lock, flags);
3258 3406
3259 return NOTIFY_DONE; 3407 return NOTIFY_DONE;
3260} 3408}