diff options
Diffstat (limited to 'drivers/net/via-velocity.c')
-rw-r--r-- | drivers/net/via-velocity.c | 391 |
1 files changed, 270 insertions, 121 deletions
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index e04e5bee005c..d4eac2a14427 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c | |||
@@ -9,7 +9,6 @@ | |||
9 | * | 9 | * |
10 | * TODO | 10 | * TODO |
11 | * rx_copybreak/alignment | 11 | * rx_copybreak/alignment |
12 | * Scatter gather | ||
13 | * More testing | 12 | * More testing |
14 | * | 13 | * |
15 | * The changes are (c) Copyright 2004, Red Hat Inc. <alan@lxorguk.ukuu.org.uk> | 14 | * The changes are (c) Copyright 2004, Red Hat Inc. <alan@lxorguk.ukuu.org.uk> |
@@ -275,7 +274,7 @@ VELOCITY_PARAM(rx_thresh, "Receive fifo threshold"); | |||
275 | 274 | ||
276 | #define DMA_LENGTH_MIN 0 | 275 | #define DMA_LENGTH_MIN 0 |
277 | #define DMA_LENGTH_MAX 7 | 276 | #define DMA_LENGTH_MAX 7 |
278 | #define DMA_LENGTH_DEF 0 | 277 | #define DMA_LENGTH_DEF 6 |
279 | 278 | ||
280 | /* DMA_length[] is used for controlling the DMA length | 279 | /* DMA_length[] is used for controlling the DMA length |
281 | 0: 8 DWORDs | 280 | 0: 8 DWORDs |
@@ -298,14 +297,6 @@ VELOCITY_PARAM(DMA_length, "DMA length"); | |||
298 | */ | 297 | */ |
299 | VELOCITY_PARAM(IP_byte_align, "Enable IP header dword aligned"); | 298 | VELOCITY_PARAM(IP_byte_align, "Enable IP header dword aligned"); |
300 | 299 | ||
301 | #define TX_CSUM_DEF 1 | ||
302 | /* txcsum_offload[] is used for setting the checksum offload ability of NIC. | ||
303 | (We only support RX checksum offload now) | ||
304 | 0: disable csum_offload[checksum offload | ||
305 | 1: enable checksum offload. (Default) | ||
306 | */ | ||
307 | VELOCITY_PARAM(txcsum_offload, "Enable transmit packet checksum offload"); | ||
308 | |||
309 | #define FLOW_CNTL_DEF 1 | 300 | #define FLOW_CNTL_DEF 1 |
310 | #define FLOW_CNTL_MIN 1 | 301 | #define FLOW_CNTL_MIN 1 |
311 | #define FLOW_CNTL_MAX 5 | 302 | #define FLOW_CNTL_MAX 5 |
@@ -354,21 +345,10 @@ VELOCITY_PARAM(ValPktLen, "Receiving or Drop invalid 802.3 frame"); | |||
354 | */ | 345 | */ |
355 | VELOCITY_PARAM(wol_opts, "Wake On Lan options"); | 346 | VELOCITY_PARAM(wol_opts, "Wake On Lan options"); |
356 | 347 | ||
357 | #define INT_WORKS_DEF 20 | ||
358 | #define INT_WORKS_MIN 10 | ||
359 | #define INT_WORKS_MAX 64 | ||
360 | |||
361 | VELOCITY_PARAM(int_works, "Number of packets per interrupt services"); | ||
362 | |||
363 | static int rx_copybreak = 200; | 348 | static int rx_copybreak = 200; |
364 | module_param(rx_copybreak, int, 0644); | 349 | module_param(rx_copybreak, int, 0644); |
365 | MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames"); | 350 | MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames"); |
366 | 351 | ||
367 | #ifdef CONFIG_PM | ||
368 | static DEFINE_SPINLOCK(velocity_dev_list_lock); | ||
369 | static LIST_HEAD(velocity_dev_list); | ||
370 | #endif | ||
371 | |||
372 | /* | 352 | /* |
373 | * Internal board variants. At the moment we have only one | 353 | * Internal board variants. At the moment we have only one |
374 | */ | 354 | */ |
@@ -417,14 +397,6 @@ static void __devexit velocity_remove1(struct pci_dev *pdev) | |||
417 | struct net_device *dev = pci_get_drvdata(pdev); | 397 | struct net_device *dev = pci_get_drvdata(pdev); |
418 | struct velocity_info *vptr = netdev_priv(dev); | 398 | struct velocity_info *vptr = netdev_priv(dev); |
419 | 399 | ||
420 | #ifdef CONFIG_PM | ||
421 | unsigned long flags; | ||
422 | |||
423 | spin_lock_irqsave(&velocity_dev_list_lock, flags); | ||
424 | if (!list_empty(&velocity_dev_list)) | ||
425 | list_del(&vptr->list); | ||
426 | spin_unlock_irqrestore(&velocity_dev_list_lock, flags); | ||
427 | #endif | ||
428 | unregister_netdev(dev); | 400 | unregister_netdev(dev); |
429 | iounmap(vptr->mac_regs); | 401 | iounmap(vptr->mac_regs); |
430 | pci_release_regions(pdev); | 402 | pci_release_regions(pdev); |
@@ -510,13 +482,11 @@ static void __devinit velocity_get_options(struct velocity_opt *opts, int index, | |||
510 | velocity_set_int_opt(&opts->numrx, RxDescriptors[index], RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF, "RxDescriptors", devname); | 482 | velocity_set_int_opt(&opts->numrx, RxDescriptors[index], RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF, "RxDescriptors", devname); |
511 | velocity_set_int_opt(&opts->numtx, TxDescriptors[index], TX_DESC_MIN, TX_DESC_MAX, TX_DESC_DEF, "TxDescriptors", devname); | 483 | velocity_set_int_opt(&opts->numtx, TxDescriptors[index], TX_DESC_MIN, TX_DESC_MAX, TX_DESC_DEF, "TxDescriptors", devname); |
512 | 484 | ||
513 | velocity_set_bool_opt(&opts->flags, txcsum_offload[index], TX_CSUM_DEF, VELOCITY_FLAGS_TX_CSUM, "txcsum_offload", devname); | ||
514 | velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname); | 485 | velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname); |
515 | velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname); | 486 | velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname); |
516 | velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname); | 487 | velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname); |
517 | velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname); | 488 | velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname); |
518 | velocity_set_int_opt((int *) &opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname); | 489 | velocity_set_int_opt((int *) &opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname); |
519 | velocity_set_int_opt((int *) &opts->int_works, int_works[index], INT_WORKS_MIN, INT_WORKS_MAX, INT_WORKS_DEF, "Interrupt service works", devname); | ||
520 | opts->numrx = (opts->numrx & ~3); | 490 | opts->numrx = (opts->numrx & ~3); |
521 | } | 491 | } |
522 | 492 | ||
@@ -1259,6 +1229,66 @@ static void mii_init(struct velocity_info *vptr, u32 mii_status) | |||
1259 | } | 1229 | } |
1260 | } | 1230 | } |
1261 | 1231 | ||
1232 | /** | ||
1233 | * setup_queue_timers - Setup interrupt timers | ||
1234 | * | ||
1235 | * Setup interrupt frequency during suppression (timeout if the frame | ||
1236 | * count isn't filled). | ||
1237 | */ | ||
1238 | static void setup_queue_timers(struct velocity_info *vptr) | ||
1239 | { | ||
1240 | /* Only for newer revisions */ | ||
1241 | if (vptr->rev_id >= REV_ID_VT3216_A0) { | ||
1242 | u8 txqueue_timer = 0; | ||
1243 | u8 rxqueue_timer = 0; | ||
1244 | |||
1245 | if (vptr->mii_status & (VELOCITY_SPEED_1000 | | ||
1246 | VELOCITY_SPEED_100)) { | ||
1247 | txqueue_timer = vptr->options.txqueue_timer; | ||
1248 | rxqueue_timer = vptr->options.rxqueue_timer; | ||
1249 | } | ||
1250 | |||
1251 | writeb(txqueue_timer, &vptr->mac_regs->TQETMR); | ||
1252 | writeb(rxqueue_timer, &vptr->mac_regs->RQETMR); | ||
1253 | } | ||
1254 | } | ||
1255 | /** | ||
1256 | * setup_adaptive_interrupts - Setup interrupt suppression | ||
1257 | * | ||
1258 | * @vptr velocity adapter | ||
1259 | * | ||
1260 | * The velocity is able to suppress interrupt during high interrupt load. | ||
1261 | * This function turns on that feature. | ||
1262 | */ | ||
1263 | static void setup_adaptive_interrupts(struct velocity_info *vptr) | ||
1264 | { | ||
1265 | struct mac_regs __iomem *regs = vptr->mac_regs; | ||
1266 | u16 tx_intsup = vptr->options.tx_intsup; | ||
1267 | u16 rx_intsup = vptr->options.rx_intsup; | ||
1268 | |||
1269 | /* Setup default interrupt mask (will be changed below) */ | ||
1270 | vptr->int_mask = INT_MASK_DEF; | ||
1271 | |||
1272 | /* Set Tx Interrupt Suppression Threshold */ | ||
1273 | writeb(CAMCR_PS0, ®s->CAMCR); | ||
1274 | if (tx_intsup != 0) { | ||
1275 | vptr->int_mask &= ~(ISR_PTXI | ISR_PTX0I | ISR_PTX1I | | ||
1276 | ISR_PTX2I | ISR_PTX3I); | ||
1277 | writew(tx_intsup, ®s->ISRCTL); | ||
1278 | } else | ||
1279 | writew(ISRCTL_TSUPDIS, ®s->ISRCTL); | ||
1280 | |||
1281 | /* Set Rx Interrupt Suppression Threshold */ | ||
1282 | writeb(CAMCR_PS1, ®s->CAMCR); | ||
1283 | if (rx_intsup != 0) { | ||
1284 | vptr->int_mask &= ~ISR_PRXI; | ||
1285 | writew(rx_intsup, ®s->ISRCTL); | ||
1286 | } else | ||
1287 | writew(ISRCTL_RSUPDIS, ®s->ISRCTL); | ||
1288 | |||
1289 | /* Select page to interrupt hold timer */ | ||
1290 | writeb(0, ®s->CAMCR); | ||
1291 | } | ||
1262 | 1292 | ||
1263 | /** | 1293 | /** |
1264 | * velocity_init_registers - initialise MAC registers | 1294 | * velocity_init_registers - initialise MAC registers |
@@ -1345,7 +1375,7 @@ static void velocity_init_registers(struct velocity_info *vptr, | |||
1345 | */ | 1375 | */ |
1346 | enable_mii_autopoll(regs); | 1376 | enable_mii_autopoll(regs); |
1347 | 1377 | ||
1348 | vptr->int_mask = INT_MASK_DEF; | 1378 | setup_adaptive_interrupts(vptr); |
1349 | 1379 | ||
1350 | writel(vptr->rx.pool_dma, ®s->RDBaseLo); | 1380 | writel(vptr->rx.pool_dma, ®s->RDBaseLo); |
1351 | writew(vptr->options.numrx - 1, ®s->RDCSize); | 1381 | writew(vptr->options.numrx - 1, ®s->RDCSize); |
@@ -1483,7 +1513,8 @@ static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx) | |||
1483 | * Do the gymnastics to get the buffer head for data at | 1513 | * Do the gymnastics to get the buffer head for data at |
1484 | * 64byte alignment. | 1514 | * 64byte alignment. |
1485 | */ | 1515 | */ |
1486 | skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->data & 63); | 1516 | skb_reserve(rd_info->skb, |
1517 | 64 - ((unsigned long) rd_info->skb->data & 63)); | ||
1487 | rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, | 1518 | rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, |
1488 | vptr->rx.buf_sz, PCI_DMA_FROMDEVICE); | 1519 | vptr->rx.buf_sz, PCI_DMA_FROMDEVICE); |
1489 | 1520 | ||
@@ -1602,12 +1633,10 @@ out: | |||
1602 | */ | 1633 | */ |
1603 | static int velocity_init_td_ring(struct velocity_info *vptr) | 1634 | static int velocity_init_td_ring(struct velocity_info *vptr) |
1604 | { | 1635 | { |
1605 | dma_addr_t curr; | ||
1606 | int j; | 1636 | int j; |
1607 | 1637 | ||
1608 | /* Init the TD ring entries */ | 1638 | /* Init the TD ring entries */ |
1609 | for (j = 0; j < vptr->tx.numq; j++) { | 1639 | for (j = 0; j < vptr->tx.numq; j++) { |
1610 | curr = vptr->tx.pool_dma[j]; | ||
1611 | 1640 | ||
1612 | vptr->tx.infos[j] = kcalloc(vptr->options.numtx, | 1641 | vptr->tx.infos[j] = kcalloc(vptr->options.numtx, |
1613 | sizeof(struct velocity_td_info), | 1642 | sizeof(struct velocity_td_info), |
@@ -1673,21 +1702,27 @@ err_free_dma_rings_0: | |||
1673 | * Release an transmit buffer. If the buffer was preallocated then | 1702 | * Release an transmit buffer. If the buffer was preallocated then |
1674 | * recycle it, if not then unmap the buffer. | 1703 | * recycle it, if not then unmap the buffer. |
1675 | */ | 1704 | */ |
1676 | static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_info *tdinfo) | 1705 | static void velocity_free_tx_buf(struct velocity_info *vptr, |
1706 | struct velocity_td_info *tdinfo, struct tx_desc *td) | ||
1677 | { | 1707 | { |
1678 | struct sk_buff *skb = tdinfo->skb; | 1708 | struct sk_buff *skb = tdinfo->skb; |
1679 | int i; | ||
1680 | int pktlen; | ||
1681 | 1709 | ||
1682 | /* | 1710 | /* |
1683 | * Don't unmap the pre-allocated tx_bufs | 1711 | * Don't unmap the pre-allocated tx_bufs |
1684 | */ | 1712 | */ |
1685 | if (tdinfo->skb_dma) { | 1713 | if (tdinfo->skb_dma) { |
1714 | int i; | ||
1686 | 1715 | ||
1687 | pktlen = max_t(unsigned int, skb->len, ETH_ZLEN); | ||
1688 | for (i = 0; i < tdinfo->nskb_dma; i++) { | 1716 | for (i = 0; i < tdinfo->nskb_dma; i++) { |
1689 | pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], pktlen, PCI_DMA_TODEVICE); | 1717 | size_t pktlen = max_t(size_t, skb->len, ETH_ZLEN); |
1690 | tdinfo->skb_dma[i] = 0; | 1718 | |
1719 | /* For scatter-gather */ | ||
1720 | if (skb_shinfo(skb)->nr_frags > 0) | ||
1721 | pktlen = max_t(size_t, pktlen, | ||
1722 | td->td_buf[i].size & ~TD_QUEUE); | ||
1723 | |||
1724 | pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], | ||
1725 | le16_to_cpu(pktlen), PCI_DMA_TODEVICE); | ||
1691 | } | 1726 | } |
1692 | } | 1727 | } |
1693 | dev_kfree_skb_irq(skb); | 1728 | dev_kfree_skb_irq(skb); |
@@ -1801,6 +1836,8 @@ static void velocity_error(struct velocity_info *vptr, int status) | |||
1801 | BYTE_REG_BITS_OFF(TESTCFG_HBDIS, ®s->TESTCFG); | 1836 | BYTE_REG_BITS_OFF(TESTCFG_HBDIS, ®s->TESTCFG); |
1802 | else | 1837 | else |
1803 | BYTE_REG_BITS_ON(TESTCFG_HBDIS, ®s->TESTCFG); | 1838 | BYTE_REG_BITS_ON(TESTCFG_HBDIS, ®s->TESTCFG); |
1839 | |||
1840 | setup_queue_timers(vptr); | ||
1804 | } | 1841 | } |
1805 | /* | 1842 | /* |
1806 | * Get link status from PHYSR0 | 1843 | * Get link status from PHYSR0 |
@@ -1887,7 +1924,7 @@ static int velocity_tx_srv(struct velocity_info *vptr, u32 status) | |||
1887 | stats->tx_packets++; | 1924 | stats->tx_packets++; |
1888 | stats->tx_bytes += tdinfo->skb->len; | 1925 | stats->tx_bytes += tdinfo->skb->len; |
1889 | } | 1926 | } |
1890 | velocity_free_tx_buf(vptr, tdinfo); | 1927 | velocity_free_tx_buf(vptr, tdinfo, td); |
1891 | vptr->tx.used[qnum]--; | 1928 | vptr->tx.used[qnum]--; |
1892 | } | 1929 | } |
1893 | vptr->tx.tail[qnum] = idx; | 1930 | vptr->tx.tail[qnum] = idx; |
@@ -1949,10 +1986,9 @@ static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size, | |||
1949 | if (pkt_size < rx_copybreak) { | 1986 | if (pkt_size < rx_copybreak) { |
1950 | struct sk_buff *new_skb; | 1987 | struct sk_buff *new_skb; |
1951 | 1988 | ||
1952 | new_skb = netdev_alloc_skb(vptr->dev, pkt_size + 2); | 1989 | new_skb = netdev_alloc_skb_ip_align(vptr->dev, pkt_size); |
1953 | if (new_skb) { | 1990 | if (new_skb) { |
1954 | new_skb->ip_summed = rx_skb[0]->ip_summed; | 1991 | new_skb->ip_summed = rx_skb[0]->ip_summed; |
1955 | skb_reserve(new_skb, 2); | ||
1956 | skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size); | 1992 | skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size); |
1957 | *rx_skb = new_skb; | 1993 | *rx_skb = new_skb; |
1958 | ret = 0; | 1994 | ret = 0; |
@@ -2060,13 +2096,14 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx) | |||
2060 | * any received packets from the receive queue. Hand the ring | 2096 | * any received packets from the receive queue. Hand the ring |
2061 | * slots back to the adapter for reuse. | 2097 | * slots back to the adapter for reuse. |
2062 | */ | 2098 | */ |
2063 | static int velocity_rx_srv(struct velocity_info *vptr, int status) | 2099 | static int velocity_rx_srv(struct velocity_info *vptr, int status, |
2100 | int budget_left) | ||
2064 | { | 2101 | { |
2065 | struct net_device_stats *stats = &vptr->dev->stats; | 2102 | struct net_device_stats *stats = &vptr->dev->stats; |
2066 | int rd_curr = vptr->rx.curr; | 2103 | int rd_curr = vptr->rx.curr; |
2067 | int works = 0; | 2104 | int works = 0; |
2068 | 2105 | ||
2069 | do { | 2106 | while (works < budget_left) { |
2070 | struct rx_desc *rd = vptr->rx.ring + rd_curr; | 2107 | struct rx_desc *rd = vptr->rx.ring + rd_curr; |
2071 | 2108 | ||
2072 | if (!vptr->rx.info[rd_curr].skb) | 2109 | if (!vptr->rx.info[rd_curr].skb) |
@@ -2097,7 +2134,8 @@ static int velocity_rx_srv(struct velocity_info *vptr, int status) | |||
2097 | rd_curr++; | 2134 | rd_curr++; |
2098 | if (rd_curr >= vptr->options.numrx) | 2135 | if (rd_curr >= vptr->options.numrx) |
2099 | rd_curr = 0; | 2136 | rd_curr = 0; |
2100 | } while (++works <= 15); | 2137 | works++; |
2138 | } | ||
2101 | 2139 | ||
2102 | vptr->rx.curr = rd_curr; | 2140 | vptr->rx.curr = rd_curr; |
2103 | 2141 | ||
@@ -2108,6 +2146,40 @@ static int velocity_rx_srv(struct velocity_info *vptr, int status) | |||
2108 | return works; | 2146 | return works; |
2109 | } | 2147 | } |
2110 | 2148 | ||
2149 | static int velocity_poll(struct napi_struct *napi, int budget) | ||
2150 | { | ||
2151 | struct velocity_info *vptr = container_of(napi, | ||
2152 | struct velocity_info, napi); | ||
2153 | unsigned int rx_done; | ||
2154 | u32 isr_status; | ||
2155 | |||
2156 | spin_lock(&vptr->lock); | ||
2157 | isr_status = mac_read_isr(vptr->mac_regs); | ||
2158 | |||
2159 | /* Ack the interrupt */ | ||
2160 | mac_write_isr(vptr->mac_regs, isr_status); | ||
2161 | if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI))) | ||
2162 | velocity_error(vptr, isr_status); | ||
2163 | |||
2164 | /* | ||
2165 | * Do rx and tx twice for performance (taken from the VIA | ||
2166 | * out-of-tree driver). | ||
2167 | */ | ||
2168 | rx_done = velocity_rx_srv(vptr, isr_status, budget / 2); | ||
2169 | velocity_tx_srv(vptr, isr_status); | ||
2170 | rx_done += velocity_rx_srv(vptr, isr_status, budget - rx_done); | ||
2171 | velocity_tx_srv(vptr, isr_status); | ||
2172 | |||
2173 | spin_unlock(&vptr->lock); | ||
2174 | |||
2175 | /* If budget not fully consumed, exit the polling mode */ | ||
2176 | if (rx_done < budget) { | ||
2177 | napi_complete(napi); | ||
2178 | mac_enable_int(vptr->mac_regs); | ||
2179 | } | ||
2180 | |||
2181 | return rx_done; | ||
2182 | } | ||
2111 | 2183 | ||
2112 | /** | 2184 | /** |
2113 | * velocity_intr - interrupt callback | 2185 | * velocity_intr - interrupt callback |
@@ -2124,8 +2196,6 @@ static irqreturn_t velocity_intr(int irq, void *dev_instance) | |||
2124 | struct net_device *dev = dev_instance; | 2196 | struct net_device *dev = dev_instance; |
2125 | struct velocity_info *vptr = netdev_priv(dev); | 2197 | struct velocity_info *vptr = netdev_priv(dev); |
2126 | u32 isr_status; | 2198 | u32 isr_status; |
2127 | int max_count = 0; | ||
2128 | |||
2129 | 2199 | ||
2130 | spin_lock(&vptr->lock); | 2200 | spin_lock(&vptr->lock); |
2131 | isr_status = mac_read_isr(vptr->mac_regs); | 2201 | isr_status = mac_read_isr(vptr->mac_regs); |
@@ -2136,32 +2206,13 @@ static irqreturn_t velocity_intr(int irq, void *dev_instance) | |||
2136 | return IRQ_NONE; | 2206 | return IRQ_NONE; |
2137 | } | 2207 | } |
2138 | 2208 | ||
2139 | mac_disable_int(vptr->mac_regs); | 2209 | if (likely(napi_schedule_prep(&vptr->napi))) { |
2140 | 2210 | mac_disable_int(vptr->mac_regs); | |
2141 | /* | 2211 | __napi_schedule(&vptr->napi); |
2142 | * Keep processing the ISR until we have completed | ||
2143 | * processing and the isr_status becomes zero | ||
2144 | */ | ||
2145 | |||
2146 | while (isr_status != 0) { | ||
2147 | mac_write_isr(vptr->mac_regs, isr_status); | ||
2148 | if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI))) | ||
2149 | velocity_error(vptr, isr_status); | ||
2150 | if (isr_status & (ISR_PRXI | ISR_PPRXI)) | ||
2151 | max_count += velocity_rx_srv(vptr, isr_status); | ||
2152 | if (isr_status & (ISR_PTXI | ISR_PPTXI)) | ||
2153 | max_count += velocity_tx_srv(vptr, isr_status); | ||
2154 | isr_status = mac_read_isr(vptr->mac_regs); | ||
2155 | if (max_count > vptr->options.int_works) { | ||
2156 | printk(KERN_WARNING "%s: excessive work at interrupt.\n", | ||
2157 | dev->name); | ||
2158 | max_count = 0; | ||
2159 | } | ||
2160 | } | 2212 | } |
2161 | spin_unlock(&vptr->lock); | 2213 | spin_unlock(&vptr->lock); |
2162 | mac_enable_int(vptr->mac_regs); | ||
2163 | return IRQ_HANDLED; | ||
2164 | 2214 | ||
2215 | return IRQ_HANDLED; | ||
2165 | } | 2216 | } |
2166 | 2217 | ||
2167 | /** | 2218 | /** |
@@ -2190,7 +2241,7 @@ static int velocity_open(struct net_device *dev) | |||
2190 | 2241 | ||
2191 | velocity_init_registers(vptr, VELOCITY_INIT_COLD); | 2242 | velocity_init_registers(vptr, VELOCITY_INIT_COLD); |
2192 | 2243 | ||
2193 | ret = request_irq(vptr->pdev->irq, &velocity_intr, IRQF_SHARED, | 2244 | ret = request_irq(vptr->pdev->irq, velocity_intr, IRQF_SHARED, |
2194 | dev->name, dev); | 2245 | dev->name, dev); |
2195 | if (ret < 0) { | 2246 | if (ret < 0) { |
2196 | /* Power down the chip */ | 2247 | /* Power down the chip */ |
@@ -2201,6 +2252,7 @@ static int velocity_open(struct net_device *dev) | |||
2201 | 2252 | ||
2202 | mac_enable_int(vptr->mac_regs); | 2253 | mac_enable_int(vptr->mac_regs); |
2203 | netif_start_queue(dev); | 2254 | netif_start_queue(dev); |
2255 | napi_enable(&vptr->napi); | ||
2204 | vptr->flags |= VELOCITY_FLAGS_OPENED; | 2256 | vptr->flags |= VELOCITY_FLAGS_OPENED; |
2205 | out: | 2257 | out: |
2206 | return ret; | 2258 | return ret; |
@@ -2436,6 +2488,7 @@ static int velocity_close(struct net_device *dev) | |||
2436 | { | 2488 | { |
2437 | struct velocity_info *vptr = netdev_priv(dev); | 2489 | struct velocity_info *vptr = netdev_priv(dev); |
2438 | 2490 | ||
2491 | napi_disable(&vptr->napi); | ||
2439 | netif_stop_queue(dev); | 2492 | netif_stop_queue(dev); |
2440 | velocity_shutdown(vptr); | 2493 | velocity_shutdown(vptr); |
2441 | 2494 | ||
@@ -2470,14 +2523,22 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb, | |||
2470 | struct velocity_td_info *tdinfo; | 2523 | struct velocity_td_info *tdinfo; |
2471 | unsigned long flags; | 2524 | unsigned long flags; |
2472 | int pktlen; | 2525 | int pktlen; |
2473 | __le16 len; | 2526 | int index, prev; |
2474 | int index; | 2527 | int i = 0; |
2475 | 2528 | ||
2476 | if (skb_padto(skb, ETH_ZLEN)) | 2529 | if (skb_padto(skb, ETH_ZLEN)) |
2477 | goto out; | 2530 | goto out; |
2478 | pktlen = max_t(unsigned int, skb->len, ETH_ZLEN); | ||
2479 | 2531 | ||
2480 | len = cpu_to_le16(pktlen); | 2532 | /* The hardware can handle at most 7 memory segments, so merge |
2533 | * the skb if there are more */ | ||
2534 | if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) { | ||
2535 | kfree_skb(skb); | ||
2536 | return NETDEV_TX_OK; | ||
2537 | } | ||
2538 | |||
2539 | pktlen = skb_shinfo(skb)->nr_frags == 0 ? | ||
2540 | max_t(unsigned int, skb->len, ETH_ZLEN) : | ||
2541 | skb_headlen(skb); | ||
2481 | 2542 | ||
2482 | spin_lock_irqsave(&vptr->lock, flags); | 2543 | spin_lock_irqsave(&vptr->lock, flags); |
2483 | 2544 | ||
@@ -2494,11 +2555,24 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb, | |||
2494 | */ | 2555 | */ |
2495 | tdinfo->skb = skb; | 2556 | tdinfo->skb = skb; |
2496 | tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE); | 2557 | tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE); |
2497 | td_ptr->tdesc0.len = len; | 2558 | td_ptr->tdesc0.len = cpu_to_le16(pktlen); |
2498 | td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); | 2559 | td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); |
2499 | td_ptr->td_buf[0].pa_high = 0; | 2560 | td_ptr->td_buf[0].pa_high = 0; |
2500 | td_ptr->td_buf[0].size = len; | 2561 | td_ptr->td_buf[0].size = cpu_to_le16(pktlen); |
2501 | tdinfo->nskb_dma = 1; | 2562 | |
2563 | /* Handle fragments */ | ||
2564 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | ||
2565 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | ||
2566 | |||
2567 | tdinfo->skb_dma[i + 1] = pci_map_page(vptr->pdev, frag->page, | ||
2568 | frag->page_offset, frag->size, | ||
2569 | PCI_DMA_TODEVICE); | ||
2570 | |||
2571 | td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]); | ||
2572 | td_ptr->td_buf[i + 1].pa_high = 0; | ||
2573 | td_ptr->td_buf[i + 1].size = cpu_to_le16(frag->size); | ||
2574 | } | ||
2575 | tdinfo->nskb_dma = i + 1; | ||
2502 | 2576 | ||
2503 | td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16; | 2577 | td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16; |
2504 | 2578 | ||
@@ -2510,7 +2584,7 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb, | |||
2510 | /* | 2584 | /* |
2511 | * Handle hardware checksum | 2585 | * Handle hardware checksum |
2512 | */ | 2586 | */ |
2513 | if ((vptr->flags & VELOCITY_FLAGS_TX_CSUM) | 2587 | if ( (dev->features & NETIF_F_IP_CSUM) |
2514 | && (skb->ip_summed == CHECKSUM_PARTIAL)) { | 2588 | && (skb->ip_summed == CHECKSUM_PARTIAL)) { |
2515 | const struct iphdr *ip = ip_hdr(skb); | 2589 | const struct iphdr *ip = ip_hdr(skb); |
2516 | if (ip->protocol == IPPROTO_TCP) | 2590 | if (ip->protocol == IPPROTO_TCP) |
@@ -2519,23 +2593,21 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb, | |||
2519 | td_ptr->tdesc1.TCR |= (TCR0_UDPCK); | 2593 | td_ptr->tdesc1.TCR |= (TCR0_UDPCK); |
2520 | td_ptr->tdesc1.TCR |= TCR0_IPCK; | 2594 | td_ptr->tdesc1.TCR |= TCR0_IPCK; |
2521 | } | 2595 | } |
2522 | { | ||
2523 | 2596 | ||
2524 | int prev = index - 1; | 2597 | prev = index - 1; |
2598 | if (prev < 0) | ||
2599 | prev = vptr->options.numtx - 1; | ||
2600 | td_ptr->tdesc0.len |= OWNED_BY_NIC; | ||
2601 | vptr->tx.used[qnum]++; | ||
2602 | vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx; | ||
2525 | 2603 | ||
2526 | if (prev < 0) | 2604 | if (AVAIL_TD(vptr, qnum) < 1) |
2527 | prev = vptr->options.numtx - 1; | 2605 | netif_stop_queue(dev); |
2528 | td_ptr->tdesc0.len |= OWNED_BY_NIC; | ||
2529 | vptr->tx.used[qnum]++; | ||
2530 | vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx; | ||
2531 | 2606 | ||
2532 | if (AVAIL_TD(vptr, qnum) < 1) | 2607 | td_ptr = &(vptr->tx.rings[qnum][prev]); |
2533 | netif_stop_queue(dev); | 2608 | td_ptr->td_buf[0].size |= TD_QUEUE; |
2609 | mac_tx_queue_wake(vptr->mac_regs, qnum); | ||
2534 | 2610 | ||
2535 | td_ptr = &(vptr->tx.rings[qnum][prev]); | ||
2536 | td_ptr->td_buf[0].size |= TD_QUEUE; | ||
2537 | mac_tx_queue_wake(vptr->mac_regs, qnum); | ||
2538 | } | ||
2539 | dev->trans_start = jiffies; | 2611 | dev->trans_start = jiffies; |
2540 | spin_unlock_irqrestore(&vptr->lock, flags); | 2612 | spin_unlock_irqrestore(&vptr->lock, flags); |
2541 | out: | 2613 | out: |
@@ -2578,7 +2650,6 @@ static void __devinit velocity_init_info(struct pci_dev *pdev, | |||
2578 | vptr->tx.numq = info->txqueue; | 2650 | vptr->tx.numq = info->txqueue; |
2579 | vptr->multicast_limit = MCAM_SIZE; | 2651 | vptr->multicast_limit = MCAM_SIZE; |
2580 | spin_lock_init(&vptr->lock); | 2652 | spin_lock_init(&vptr->lock); |
2581 | INIT_LIST_HEAD(&vptr->list); | ||
2582 | } | 2653 | } |
2583 | 2654 | ||
2584 | /** | 2655 | /** |
@@ -2755,12 +2826,10 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi | |||
2755 | dev->irq = pdev->irq; | 2826 | dev->irq = pdev->irq; |
2756 | dev->netdev_ops = &velocity_netdev_ops; | 2827 | dev->netdev_ops = &velocity_netdev_ops; |
2757 | dev->ethtool_ops = &velocity_ethtool_ops; | 2828 | dev->ethtool_ops = &velocity_ethtool_ops; |
2829 | netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT); | ||
2758 | 2830 | ||
2759 | dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | | 2831 | dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | |
2760 | NETIF_F_HW_VLAN_RX; | 2832 | NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM; |
2761 | |||
2762 | if (vptr->flags & VELOCITY_FLAGS_TX_CSUM) | ||
2763 | dev->features |= NETIF_F_IP_CSUM; | ||
2764 | 2833 | ||
2765 | ret = register_netdev(dev); | 2834 | ret = register_netdev(dev); |
2766 | if (ret < 0) | 2835 | if (ret < 0) |
@@ -2777,15 +2846,6 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi | |||
2777 | /* and leave the chip powered down */ | 2846 | /* and leave the chip powered down */ |
2778 | 2847 | ||
2779 | pci_set_power_state(pdev, PCI_D3hot); | 2848 | pci_set_power_state(pdev, PCI_D3hot); |
2780 | #ifdef CONFIG_PM | ||
2781 | { | ||
2782 | unsigned long flags; | ||
2783 | |||
2784 | spin_lock_irqsave(&velocity_dev_list_lock, flags); | ||
2785 | list_add(&vptr->list, &velocity_dev_list); | ||
2786 | spin_unlock_irqrestore(&velocity_dev_list_lock, flags); | ||
2787 | } | ||
2788 | #endif | ||
2789 | velocity_nics++; | 2849 | velocity_nics++; |
2790 | out: | 2850 | out: |
2791 | return ret; | 2851 | return ret; |
@@ -3222,15 +3282,114 @@ static void velocity_set_msglevel(struct net_device *dev, u32 value) | |||
3222 | msglevel = value; | 3282 | msglevel = value; |
3223 | } | 3283 | } |
3224 | 3284 | ||
3285 | static int get_pending_timer_val(int val) | ||
3286 | { | ||
3287 | int mult_bits = val >> 6; | ||
3288 | int mult = 1; | ||
3289 | |||
3290 | switch (mult_bits) | ||
3291 | { | ||
3292 | case 1: | ||
3293 | mult = 4; break; | ||
3294 | case 2: | ||
3295 | mult = 16; break; | ||
3296 | case 3: | ||
3297 | mult = 64; break; | ||
3298 | case 0: | ||
3299 | default: | ||
3300 | break; | ||
3301 | } | ||
3302 | |||
3303 | return (val & 0x3f) * mult; | ||
3304 | } | ||
3305 | |||
3306 | static void set_pending_timer_val(int *val, u32 us) | ||
3307 | { | ||
3308 | u8 mult = 0; | ||
3309 | u8 shift = 0; | ||
3310 | |||
3311 | if (us >= 0x3f) { | ||
3312 | mult = 1; /* mult with 4 */ | ||
3313 | shift = 2; | ||
3314 | } | ||
3315 | if (us >= 0x3f * 4) { | ||
3316 | mult = 2; /* mult with 16 */ | ||
3317 | shift = 4; | ||
3318 | } | ||
3319 | if (us >= 0x3f * 16) { | ||
3320 | mult = 3; /* mult with 64 */ | ||
3321 | shift = 6; | ||
3322 | } | ||
3323 | |||
3324 | *val = (mult << 6) | ((us >> shift) & 0x3f); | ||
3325 | } | ||
3326 | |||
3327 | |||
3328 | static int velocity_get_coalesce(struct net_device *dev, | ||
3329 | struct ethtool_coalesce *ecmd) | ||
3330 | { | ||
3331 | struct velocity_info *vptr = netdev_priv(dev); | ||
3332 | |||
3333 | ecmd->tx_max_coalesced_frames = vptr->options.tx_intsup; | ||
3334 | ecmd->rx_max_coalesced_frames = vptr->options.rx_intsup; | ||
3335 | |||
3336 | ecmd->rx_coalesce_usecs = get_pending_timer_val(vptr->options.rxqueue_timer); | ||
3337 | ecmd->tx_coalesce_usecs = get_pending_timer_val(vptr->options.txqueue_timer); | ||
3338 | |||
3339 | return 0; | ||
3340 | } | ||
3341 | |||
3342 | static int velocity_set_coalesce(struct net_device *dev, | ||
3343 | struct ethtool_coalesce *ecmd) | ||
3344 | { | ||
3345 | struct velocity_info *vptr = netdev_priv(dev); | ||
3346 | int max_us = 0x3f * 64; | ||
3347 | |||
3348 | /* 6 bits of */ | ||
3349 | if (ecmd->tx_coalesce_usecs > max_us) | ||
3350 | return -EINVAL; | ||
3351 | if (ecmd->rx_coalesce_usecs > max_us) | ||
3352 | return -EINVAL; | ||
3353 | |||
3354 | if (ecmd->tx_max_coalesced_frames > 0xff) | ||
3355 | return -EINVAL; | ||
3356 | if (ecmd->rx_max_coalesced_frames > 0xff) | ||
3357 | return -EINVAL; | ||
3358 | |||
3359 | vptr->options.rx_intsup = ecmd->rx_max_coalesced_frames; | ||
3360 | vptr->options.tx_intsup = ecmd->tx_max_coalesced_frames; | ||
3361 | |||
3362 | set_pending_timer_val(&vptr->options.rxqueue_timer, | ||
3363 | ecmd->rx_coalesce_usecs); | ||
3364 | set_pending_timer_val(&vptr->options.txqueue_timer, | ||
3365 | ecmd->tx_coalesce_usecs); | ||
3366 | |||
3367 | /* Setup the interrupt suppression and queue timers */ | ||
3368 | mac_disable_int(vptr->mac_regs); | ||
3369 | setup_adaptive_interrupts(vptr); | ||
3370 | setup_queue_timers(vptr); | ||
3371 | |||
3372 | mac_write_int_mask(vptr->int_mask, vptr->mac_regs); | ||
3373 | mac_clear_isr(vptr->mac_regs); | ||
3374 | mac_enable_int(vptr->mac_regs); | ||
3375 | |||
3376 | return 0; | ||
3377 | } | ||
3378 | |||
3225 | static const struct ethtool_ops velocity_ethtool_ops = { | 3379 | static const struct ethtool_ops velocity_ethtool_ops = { |
3226 | .get_settings = velocity_get_settings, | 3380 | .get_settings = velocity_get_settings, |
3227 | .set_settings = velocity_set_settings, | 3381 | .set_settings = velocity_set_settings, |
3228 | .get_drvinfo = velocity_get_drvinfo, | 3382 | .get_drvinfo = velocity_get_drvinfo, |
3383 | .set_tx_csum = ethtool_op_set_tx_csum, | ||
3384 | .get_tx_csum = ethtool_op_get_tx_csum, | ||
3229 | .get_wol = velocity_ethtool_get_wol, | 3385 | .get_wol = velocity_ethtool_get_wol, |
3230 | .set_wol = velocity_ethtool_set_wol, | 3386 | .set_wol = velocity_ethtool_set_wol, |
3231 | .get_msglevel = velocity_get_msglevel, | 3387 | .get_msglevel = velocity_get_msglevel, |
3232 | .set_msglevel = velocity_set_msglevel, | 3388 | .set_msglevel = velocity_set_msglevel, |
3389 | .set_sg = ethtool_op_set_sg, | ||
3233 | .get_link = velocity_get_link, | 3390 | .get_link = velocity_get_link, |
3391 | .get_coalesce = velocity_get_coalesce, | ||
3392 | .set_coalesce = velocity_set_coalesce, | ||
3234 | .begin = velocity_ethtool_up, | 3393 | .begin = velocity_ethtool_up, |
3235 | .complete = velocity_ethtool_down | 3394 | .complete = velocity_ethtool_down |
3236 | }; | 3395 | }; |
@@ -3241,20 +3400,10 @@ static int velocity_netdev_event(struct notifier_block *nb, unsigned long notifi | |||
3241 | { | 3400 | { |
3242 | struct in_ifaddr *ifa = (struct in_ifaddr *) ptr; | 3401 | struct in_ifaddr *ifa = (struct in_ifaddr *) ptr; |
3243 | struct net_device *dev = ifa->ifa_dev->dev; | 3402 | struct net_device *dev = ifa->ifa_dev->dev; |
3244 | struct velocity_info *vptr; | ||
3245 | unsigned long flags; | ||
3246 | 3403 | ||
3247 | if (dev_net(dev) != &init_net) | 3404 | if (dev_net(dev) == &init_net && |
3248 | return NOTIFY_DONE; | 3405 | dev->netdev_ops == &velocity_netdev_ops) |
3249 | 3406 | velocity_get_ip(netdev_priv(dev)); | |
3250 | spin_lock_irqsave(&velocity_dev_list_lock, flags); | ||
3251 | list_for_each_entry(vptr, &velocity_dev_list, list) { | ||
3252 | if (vptr->dev == dev) { | ||
3253 | velocity_get_ip(vptr); | ||
3254 | break; | ||
3255 | } | ||
3256 | } | ||
3257 | spin_unlock_irqrestore(&velocity_dev_list_lock, flags); | ||
3258 | 3407 | ||
3259 | return NOTIFY_DONE; | 3408 | return NOTIFY_DONE; |
3260 | } | 3409 | } |