diff options
author | Atsushi Nemoto <anemo@mba.ocn.ne.jp> | 2009-11-01 23:34:47 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-11-03 02:43:58 -0500 |
commit | a02b7b7a138c7b1bc08e0e749ecbb613eadb6d41 (patch) | |
tree | d70c0df159407ff6fdaa065b453d1aeda8f9abf6 /drivers/net/tc35815.c | |
parent | c6a2dbbadee65345a226aa15a9cbe5b70ae912e7 (diff) |
tc35815: Kill unused code
- TC35815_DMA_SYNC_ONDEMAND is always enabled.
- WORKAROUND_LOSTCAR is always enabled.
- WORKAROUND_100HALF_PROMISC is always enabled.
- GATHER_TXINT is always enabled.
- TC35815_USE_PACKEDBUFFER is always disabled.
- NO_CHECK_CARRIER is always disabled.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/tc35815.c')
-rw-r--r-- | drivers/net/tc35815.c | 224 |
1 files changed, 3 insertions, 221 deletions
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c index 803eb64ffcc2..6572e8a54520 100644 --- a/drivers/net/tc35815.c +++ b/drivers/net/tc35815.c | |||
@@ -50,13 +50,6 @@ static const char *version = "tc35815.c:v" DRV_VERSION "\n"; | |||
50 | #include <asm/io.h> | 50 | #include <asm/io.h> |
51 | #include <asm/byteorder.h> | 51 | #include <asm/byteorder.h> |
52 | 52 | ||
53 | /* First, a few definitions that the brave might change. */ | ||
54 | |||
55 | #define GATHER_TXINT /* On-Demand Tx Interrupt */ | ||
56 | #define WORKAROUND_LOSTCAR | ||
57 | #define WORKAROUND_100HALF_PROMISC | ||
58 | /* #define TC35815_USE_PACKEDBUFFER */ | ||
59 | |||
60 | enum tc35815_chiptype { | 53 | enum tc35815_chiptype { |
61 | TC35815CF = 0, | 54 | TC35815CF = 0, |
62 | TC35815_NWU, | 55 | TC35815_NWU, |
@@ -326,17 +319,10 @@ struct BDesc { | |||
326 | 319 | ||
327 | 320 | ||
328 | /* Some useful constants. */ | 321 | /* Some useful constants. */ |
329 | #undef NO_CHECK_CARRIER /* Does not check No-Carrier with TP */ | ||
330 | 322 | ||
331 | #ifdef NO_CHECK_CARRIER | 323 | #define TX_CTL_CMD (Tx_EnTxPar | Tx_EnLateColl | \ |
332 | #define TX_CTL_CMD (Tx_EnComp | Tx_EnTxPar | Tx_EnLateColl | \ | ||
333 | Tx_EnExColl | Tx_EnExDefer | Tx_EnUnder | \ | ||
334 | Tx_En) /* maybe 0x7b01 */ | ||
335 | #else | ||
336 | #define TX_CTL_CMD (Tx_EnComp | Tx_EnTxPar | Tx_EnLateColl | \ | ||
337 | Tx_EnExColl | Tx_EnLCarr | Tx_EnExDefer | Tx_EnUnder | \ | 324 | Tx_EnExColl | Tx_EnLCarr | Tx_EnExDefer | Tx_EnUnder | \ |
338 | Tx_En) /* maybe 0x7b01 */ | 325 | Tx_En) /* maybe 0x7b01 */ |
339 | #endif | ||
340 | /* Do not use Rx_StripCRC -- it causes trouble on BLEx/FDAEx condition */ | 326 | /* Do not use Rx_StripCRC -- it causes trouble on BLEx/FDAEx condition */ |
341 | #define RX_CTL_CMD (Rx_EnGood | Rx_EnRxPar | Rx_EnLongErr | Rx_EnOver \ | 327 | #define RX_CTL_CMD (Rx_EnGood | Rx_EnRxPar | Rx_EnLongErr | Rx_EnOver \ |
342 | | Rx_EnCRCErr | Rx_EnAlign | Rx_RxEn) /* maybe 0x6f01 */ | 328 | | Rx_EnCRCErr | Rx_EnAlign | Rx_RxEn) /* maybe 0x6f01 */ |
@@ -357,13 +343,6 @@ struct BDesc { | |||
357 | #define TX_THRESHOLD_KEEP_LIMIT 10 | 343 | #define TX_THRESHOLD_KEEP_LIMIT 10 |
358 | 344 | ||
359 | /* 16 + RX_BUF_NUM * 8 + RX_FD_NUM * 16 + TX_FD_NUM * 32 <= PAGE_SIZE*FD_PAGE_NUM */ | 345 | /* 16 + RX_BUF_NUM * 8 + RX_FD_NUM * 16 + TX_FD_NUM * 32 <= PAGE_SIZE*FD_PAGE_NUM */ |
360 | #ifdef TC35815_USE_PACKEDBUFFER | ||
361 | #define FD_PAGE_NUM 2 | ||
362 | #define RX_BUF_NUM 8 /* >= 2 */ | ||
363 | #define RX_FD_NUM 250 /* >= 32 */ | ||
364 | #define TX_FD_NUM 128 | ||
365 | #define RX_BUF_SIZE PAGE_SIZE | ||
366 | #else /* TC35815_USE_PACKEDBUFFER */ | ||
367 | #define FD_PAGE_NUM 4 | 346 | #define FD_PAGE_NUM 4 |
368 | #define RX_BUF_NUM 128 /* < 256 */ | 347 | #define RX_BUF_NUM 128 /* < 256 */ |
369 | #define RX_FD_NUM 256 /* >= 32 */ | 348 | #define RX_FD_NUM 256 /* >= 32 */ |
@@ -377,7 +356,6 @@ struct BDesc { | |||
377 | #define RX_BUF_SIZE \ | 356 | #define RX_BUF_SIZE \ |
378 | L1_CACHE_ALIGN(ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN + NET_IP_ALIGN) | 357 | L1_CACHE_ALIGN(ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN + NET_IP_ALIGN) |
379 | #endif | 358 | #endif |
380 | #endif /* TC35815_USE_PACKEDBUFFER */ | ||
381 | #define RX_FD_RESERVE (2 / 2) /* max 2 BD per RxFD */ | 359 | #define RX_FD_RESERVE (2 / 2) /* max 2 BD per RxFD */ |
382 | #define NAPI_WEIGHT 16 | 360 | #define NAPI_WEIGHT 16 |
383 | 361 | ||
@@ -435,11 +413,7 @@ struct tc35815_local { | |||
435 | /* | 413 | /* |
436 | * Transmitting: Batch Mode. | 414 | * Transmitting: Batch Mode. |
437 | * 1 BD in 1 TxFD. | 415 | * 1 BD in 1 TxFD. |
438 | * Receiving: Packing Mode. (TC35815_USE_PACKEDBUFFER) | 416 | * Receiving: Non-Packing Mode. |
439 | * 1 circular FD for Free Buffer List. | ||
440 | * RX_BUF_NUM BD in Free Buffer FD. | ||
441 | * One Free Buffer BD has PAGE_SIZE data buffer. | ||
442 | * Or Non-Packing Mode. | ||
443 | * 1 circular FD for Free Buffer List. | 417 | * 1 circular FD for Free Buffer List. |
444 | * RX_BUF_NUM BD in Free Buffer FD. | 418 | * RX_BUF_NUM BD in Free Buffer FD. |
445 | * One Free Buffer BD has ETH_FRAME_LEN data buffer. | 419 | * One Free Buffer BD has ETH_FRAME_LEN data buffer. |
@@ -453,21 +427,11 @@ struct tc35815_local { | |||
453 | struct RxFD *rfd_limit; | 427 | struct RxFD *rfd_limit; |
454 | struct RxFD *rfd_cur; | 428 | struct RxFD *rfd_cur; |
455 | struct FrFD *fbl_ptr; | 429 | struct FrFD *fbl_ptr; |
456 | #ifdef TC35815_USE_PACKEDBUFFER | ||
457 | unsigned char fbl_curid; | ||
458 | void *data_buf[RX_BUF_NUM]; /* packing */ | ||
459 | dma_addr_t data_buf_dma[RX_BUF_NUM]; | ||
460 | struct { | ||
461 | struct sk_buff *skb; | ||
462 | dma_addr_t skb_dma; | ||
463 | } tx_skbs[TX_FD_NUM]; | ||
464 | #else | ||
465 | unsigned int fbl_count; | 430 | unsigned int fbl_count; |
466 | struct { | 431 | struct { |
467 | struct sk_buff *skb; | 432 | struct sk_buff *skb; |
468 | dma_addr_t skb_dma; | 433 | dma_addr_t skb_dma; |
469 | } tx_skbs[TX_FD_NUM], rx_skbs[RX_BUF_NUM]; | 434 | } tx_skbs[TX_FD_NUM], rx_skbs[RX_BUF_NUM]; |
470 | #endif | ||
471 | u32 msg_enable; | 435 | u32 msg_enable; |
472 | enum tc35815_chiptype chiptype; | 436 | enum tc35815_chiptype chiptype; |
473 | }; | 437 | }; |
@@ -482,51 +446,6 @@ static inline void *fd_bus_to_virt(struct tc35815_local *lp, dma_addr_t bus) | |||
482 | return (void *)((u8 *)lp->fd_buf + (bus - lp->fd_buf_dma)); | 446 | return (void *)((u8 *)lp->fd_buf + (bus - lp->fd_buf_dma)); |
483 | } | 447 | } |
484 | #endif | 448 | #endif |
485 | #ifdef TC35815_USE_PACKEDBUFFER | ||
486 | static inline void *rxbuf_bus_to_virt(struct tc35815_local *lp, dma_addr_t bus) | ||
487 | { | ||
488 | int i; | ||
489 | for (i = 0; i < RX_BUF_NUM; i++) { | ||
490 | if (bus >= lp->data_buf_dma[i] && | ||
491 | bus < lp->data_buf_dma[i] + PAGE_SIZE) | ||
492 | return (void *)((u8 *)lp->data_buf[i] + | ||
493 | (bus - lp->data_buf_dma[i])); | ||
494 | } | ||
495 | return NULL; | ||
496 | } | ||
497 | |||
498 | #define TC35815_DMA_SYNC_ONDEMAND | ||
499 | static void *alloc_rxbuf_page(struct pci_dev *hwdev, dma_addr_t *dma_handle) | ||
500 | { | ||
501 | #ifdef TC35815_DMA_SYNC_ONDEMAND | ||
502 | void *buf; | ||
503 | /* pci_map + pci_dma_sync will be more effective than | ||
504 | * pci_alloc_consistent on some archs. */ | ||
505 | buf = (void *)__get_free_page(GFP_ATOMIC); | ||
506 | if (!buf) | ||
507 | return NULL; | ||
508 | *dma_handle = pci_map_single(hwdev, buf, PAGE_SIZE, | ||
509 | PCI_DMA_FROMDEVICE); | ||
510 | if (pci_dma_mapping_error(hwdev, *dma_handle)) { | ||
511 | free_page((unsigned long)buf); | ||
512 | return NULL; | ||
513 | } | ||
514 | return buf; | ||
515 | #else | ||
516 | return pci_alloc_consistent(hwdev, PAGE_SIZE, dma_handle); | ||
517 | #endif | ||
518 | } | ||
519 | |||
520 | static void free_rxbuf_page(struct pci_dev *hwdev, void *buf, dma_addr_t dma_handle) | ||
521 | { | ||
522 | #ifdef TC35815_DMA_SYNC_ONDEMAND | ||
523 | pci_unmap_single(hwdev, dma_handle, PAGE_SIZE, PCI_DMA_FROMDEVICE); | ||
524 | free_page((unsigned long)buf); | ||
525 | #else | ||
526 | pci_free_consistent(hwdev, PAGE_SIZE, buf, dma_handle); | ||
527 | #endif | ||
528 | } | ||
529 | #else /* TC35815_USE_PACKEDBUFFER */ | ||
530 | static struct sk_buff *alloc_rxbuf_skb(struct net_device *dev, | 449 | static struct sk_buff *alloc_rxbuf_skb(struct net_device *dev, |
531 | struct pci_dev *hwdev, | 450 | struct pci_dev *hwdev, |
532 | dma_addr_t *dma_handle) | 451 | dma_addr_t *dma_handle) |
@@ -551,7 +470,6 @@ static void free_rxbuf_skb(struct pci_dev *hwdev, struct sk_buff *skb, dma_addr_ | |||
551 | PCI_DMA_FROMDEVICE); | 470 | PCI_DMA_FROMDEVICE); |
552 | dev_kfree_skb_any(skb); | 471 | dev_kfree_skb_any(skb); |
553 | } | 472 | } |
554 | #endif /* TC35815_USE_PACKEDBUFFER */ | ||
555 | 473 | ||
556 | /* Index to functions, as function prototypes. */ | 474 | /* Index to functions, as function prototypes. */ |
557 | 475 | ||
@@ -646,8 +564,6 @@ static void tc_handle_link_change(struct net_device *dev) | |||
646 | * TX4939 PCFG.SPEEDn bit will be changed on | 564 | * TX4939 PCFG.SPEEDn bit will be changed on |
647 | * NETDEV_CHANGE event. | 565 | * NETDEV_CHANGE event. |
648 | */ | 566 | */ |
649 | |||
650 | #if !defined(NO_CHECK_CARRIER) && defined(WORKAROUND_LOSTCAR) | ||
651 | /* | 567 | /* |
652 | * WORKAROUND: enable LostCrS only if half duplex | 568 | * WORKAROUND: enable LostCrS only if half duplex |
653 | * operation. | 569 | * operation. |
@@ -657,7 +573,6 @@ static void tc_handle_link_change(struct net_device *dev) | |||
657 | lp->chiptype != TC35815_TX4939) | 573 | lp->chiptype != TC35815_TX4939) |
658 | tc_writel(tc_readl(&tr->Tx_Ctl) | Tx_EnLCarr, | 574 | tc_writel(tc_readl(&tr->Tx_Ctl) | Tx_EnLCarr, |
659 | &tr->Tx_Ctl); | 575 | &tr->Tx_Ctl); |
660 | #endif | ||
661 | 576 | ||
662 | lp->speed = phydev->speed; | 577 | lp->speed = phydev->speed; |
663 | lp->duplex = phydev->duplex; | 578 | lp->duplex = phydev->duplex; |
@@ -666,11 +581,9 @@ static void tc_handle_link_change(struct net_device *dev) | |||
666 | 581 | ||
667 | if (phydev->link != lp->link) { | 582 | if (phydev->link != lp->link) { |
668 | if (phydev->link) { | 583 | if (phydev->link) { |
669 | #ifdef WORKAROUND_100HALF_PROMISC | ||
670 | /* delayed promiscuous enabling */ | 584 | /* delayed promiscuous enabling */ |
671 | if (dev->flags & IFF_PROMISC) | 585 | if (dev->flags & IFF_PROMISC) |
672 | tc35815_set_multicast_list(dev); | 586 | tc35815_set_multicast_list(dev); |
673 | #endif | ||
674 | } else { | 587 | } else { |
675 | lp->speed = 0; | 588 | lp->speed = 0; |
676 | lp->duplex = -1; | 589 | lp->duplex = -1; |
@@ -997,25 +910,6 @@ tc35815_init_queues(struct net_device *dev) | |||
997 | if (!lp->fd_buf) | 910 | if (!lp->fd_buf) |
998 | return -ENOMEM; | 911 | return -ENOMEM; |
999 | for (i = 0; i < RX_BUF_NUM; i++) { | 912 | for (i = 0; i < RX_BUF_NUM; i++) { |
1000 | #ifdef TC35815_USE_PACKEDBUFFER | ||
1001 | lp->data_buf[i] = | ||
1002 | alloc_rxbuf_page(lp->pci_dev, | ||
1003 | &lp->data_buf_dma[i]); | ||
1004 | if (!lp->data_buf[i]) { | ||
1005 | while (--i >= 0) { | ||
1006 | free_rxbuf_page(lp->pci_dev, | ||
1007 | lp->data_buf[i], | ||
1008 | lp->data_buf_dma[i]); | ||
1009 | lp->data_buf[i] = NULL; | ||
1010 | } | ||
1011 | pci_free_consistent(lp->pci_dev, | ||
1012 | PAGE_SIZE * FD_PAGE_NUM, | ||
1013 | lp->fd_buf, | ||
1014 | lp->fd_buf_dma); | ||
1015 | lp->fd_buf = NULL; | ||
1016 | return -ENOMEM; | ||
1017 | } | ||
1018 | #else | ||
1019 | lp->rx_skbs[i].skb = | 913 | lp->rx_skbs[i].skb = |
1020 | alloc_rxbuf_skb(dev, lp->pci_dev, | 914 | alloc_rxbuf_skb(dev, lp->pci_dev, |
1021 | &lp->rx_skbs[i].skb_dma); | 915 | &lp->rx_skbs[i].skb_dma); |
@@ -1033,15 +927,9 @@ tc35815_init_queues(struct net_device *dev) | |||
1033 | lp->fd_buf = NULL; | 927 | lp->fd_buf = NULL; |
1034 | return -ENOMEM; | 928 | return -ENOMEM; |
1035 | } | 929 | } |
1036 | #endif | ||
1037 | } | 930 | } |
1038 | printk(KERN_DEBUG "%s: FD buf %p DataBuf", | 931 | printk(KERN_DEBUG "%s: FD buf %p DataBuf", |
1039 | dev->name, lp->fd_buf); | 932 | dev->name, lp->fd_buf); |
1040 | #ifdef TC35815_USE_PACKEDBUFFER | ||
1041 | printk(" DataBuf"); | ||
1042 | for (i = 0; i < RX_BUF_NUM; i++) | ||
1043 | printk(" %p", lp->data_buf[i]); | ||
1044 | #endif | ||
1045 | printk("\n"); | 933 | printk("\n"); |
1046 | } else { | 934 | } else { |
1047 | for (i = 0; i < FD_PAGE_NUM; i++) | 935 | for (i = 0; i < FD_PAGE_NUM; i++) |
@@ -1074,7 +962,6 @@ tc35815_init_queues(struct net_device *dev) | |||
1074 | lp->fbl_ptr = (struct FrFD *)fd_addr; | 962 | lp->fbl_ptr = (struct FrFD *)fd_addr; |
1075 | lp->fbl_ptr->fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, lp->fbl_ptr)); | 963 | lp->fbl_ptr->fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, lp->fbl_ptr)); |
1076 | lp->fbl_ptr->fd.FDCtl = cpu_to_le32(RX_BUF_NUM | FD_CownsFD); | 964 | lp->fbl_ptr->fd.FDCtl = cpu_to_le32(RX_BUF_NUM | FD_CownsFD); |
1077 | #ifndef TC35815_USE_PACKEDBUFFER | ||
1078 | /* | 965 | /* |
1079 | * move all allocated skbs to head of rx_skbs[] array. | 966 | * move all allocated skbs to head of rx_skbs[] array. |
1080 | * fbl_count mighe not be RX_BUF_NUM if alloc_rxbuf_skb() in | 967 | * fbl_count mighe not be RX_BUF_NUM if alloc_rxbuf_skb() in |
@@ -1092,11 +979,7 @@ tc35815_init_queues(struct net_device *dev) | |||
1092 | lp->fbl_count++; | 979 | lp->fbl_count++; |
1093 | } | 980 | } |
1094 | } | 981 | } |
1095 | #endif | ||
1096 | for (i = 0; i < RX_BUF_NUM; i++) { | 982 | for (i = 0; i < RX_BUF_NUM; i++) { |
1097 | #ifdef TC35815_USE_PACKEDBUFFER | ||
1098 | lp->fbl_ptr->bd[i].BuffData = cpu_to_le32(lp->data_buf_dma[i]); | ||
1099 | #else | ||
1100 | if (i >= lp->fbl_count) { | 983 | if (i >= lp->fbl_count) { |
1101 | lp->fbl_ptr->bd[i].BuffData = 0; | 984 | lp->fbl_ptr->bd[i].BuffData = 0; |
1102 | lp->fbl_ptr->bd[i].BDCtl = 0; | 985 | lp->fbl_ptr->bd[i].BDCtl = 0; |
@@ -1104,15 +987,11 @@ tc35815_init_queues(struct net_device *dev) | |||
1104 | } | 987 | } |
1105 | lp->fbl_ptr->bd[i].BuffData = | 988 | lp->fbl_ptr->bd[i].BuffData = |
1106 | cpu_to_le32(lp->rx_skbs[i].skb_dma); | 989 | cpu_to_le32(lp->rx_skbs[i].skb_dma); |
1107 | #endif | ||
1108 | /* BDID is index of FrFD.bd[] */ | 990 | /* BDID is index of FrFD.bd[] */ |
1109 | lp->fbl_ptr->bd[i].BDCtl = | 991 | lp->fbl_ptr->bd[i].BDCtl = |
1110 | cpu_to_le32(BD_CownsBD | (i << BD_RxBDID_SHIFT) | | 992 | cpu_to_le32(BD_CownsBD | (i << BD_RxBDID_SHIFT) | |
1111 | RX_BUF_SIZE); | 993 | RX_BUF_SIZE); |
1112 | } | 994 | } |
1113 | #ifdef TC35815_USE_PACKEDBUFFER | ||
1114 | lp->fbl_curid = 0; | ||
1115 | #endif | ||
1116 | 995 | ||
1117 | printk(KERN_DEBUG "%s: TxFD %p RxFD %p FrFD %p\n", | 996 | printk(KERN_DEBUG "%s: TxFD %p RxFD %p FrFD %p\n", |
1118 | dev->name, lp->tfd_base, lp->rfd_base, lp->fbl_ptr); | 997 | dev->name, lp->tfd_base, lp->rfd_base, lp->fbl_ptr); |
@@ -1186,19 +1065,11 @@ tc35815_free_queues(struct net_device *dev) | |||
1186 | lp->fbl_ptr = NULL; | 1065 | lp->fbl_ptr = NULL; |
1187 | 1066 | ||
1188 | for (i = 0; i < RX_BUF_NUM; i++) { | 1067 | for (i = 0; i < RX_BUF_NUM; i++) { |
1189 | #ifdef TC35815_USE_PACKEDBUFFER | ||
1190 | if (lp->data_buf[i]) { | ||
1191 | free_rxbuf_page(lp->pci_dev, | ||
1192 | lp->data_buf[i], lp->data_buf_dma[i]); | ||
1193 | lp->data_buf[i] = NULL; | ||
1194 | } | ||
1195 | #else | ||
1196 | if (lp->rx_skbs[i].skb) { | 1068 | if (lp->rx_skbs[i].skb) { |
1197 | free_rxbuf_skb(lp->pci_dev, lp->rx_skbs[i].skb, | 1069 | free_rxbuf_skb(lp->pci_dev, lp->rx_skbs[i].skb, |
1198 | lp->rx_skbs[i].skb_dma); | 1070 | lp->rx_skbs[i].skb_dma); |
1199 | lp->rx_skbs[i].skb = NULL; | 1071 | lp->rx_skbs[i].skb = NULL; |
1200 | } | 1072 | } |
1201 | #endif | ||
1202 | } | 1073 | } |
1203 | if (lp->fd_buf) { | 1074 | if (lp->fd_buf) { |
1204 | pci_free_consistent(lp->pci_dev, PAGE_SIZE * FD_PAGE_NUM, | 1075 | pci_free_consistent(lp->pci_dev, PAGE_SIZE * FD_PAGE_NUM, |
@@ -1244,7 +1115,7 @@ dump_rxfd(struct RxFD *fd) | |||
1244 | return bd_count; | 1115 | return bd_count; |
1245 | } | 1116 | } |
1246 | 1117 | ||
1247 | #if defined(DEBUG) || defined(TC35815_USE_PACKEDBUFFER) | 1118 | #ifdef DEBUG |
1248 | static void | 1119 | static void |
1249 | dump_frfd(struct FrFD *fd) | 1120 | dump_frfd(struct FrFD *fd) |
1250 | { | 1121 | { |
@@ -1261,9 +1132,7 @@ dump_frfd(struct FrFD *fd) | |||
1261 | le32_to_cpu(fd->bd[i].BDCtl)); | 1132 | le32_to_cpu(fd->bd[i].BDCtl)); |
1262 | printk("\n"); | 1133 | printk("\n"); |
1263 | } | 1134 | } |
1264 | #endif | ||
1265 | 1135 | ||
1266 | #ifdef DEBUG | ||
1267 | static void | 1136 | static void |
1268 | panic_queues(struct net_device *dev) | 1137 | panic_queues(struct net_device *dev) |
1269 | { | 1138 | { |
@@ -1466,9 +1335,7 @@ static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev) | |||
1466 | (struct tc35815_regs __iomem *)dev->base_addr; | 1335 | (struct tc35815_regs __iomem *)dev->base_addr; |
1467 | /* Start DMA Transmitter. */ | 1336 | /* Start DMA Transmitter. */ |
1468 | txfd->fd.FDNext |= cpu_to_le32(FD_Next_EOL); | 1337 | txfd->fd.FDNext |= cpu_to_le32(FD_Next_EOL); |
1469 | #ifdef GATHER_TXINT | ||
1470 | txfd->fd.FDCtl |= cpu_to_le32(FD_FrmOpt_IntTx); | 1338 | txfd->fd.FDCtl |= cpu_to_le32(FD_FrmOpt_IntTx); |
1471 | #endif | ||
1472 | if (netif_msg_tx_queued(lp)) { | 1339 | if (netif_msg_tx_queued(lp)) { |
1473 | printk("%s: starting TxFD.\n", dev->name); | 1340 | printk("%s: starting TxFD.\n", dev->name); |
1474 | dump_txfd(txfd); | 1341 | dump_txfd(txfd); |
@@ -1640,50 +1507,9 @@ tc35815_rx(struct net_device *dev, int limit) | |||
1640 | struct sk_buff *skb; | 1507 | struct sk_buff *skb; |
1641 | unsigned char *data; | 1508 | unsigned char *data; |
1642 | int cur_bd; | 1509 | int cur_bd; |
1643 | #ifdef TC35815_USE_PACKEDBUFFER | ||
1644 | int offset; | ||
1645 | #endif | ||
1646 | 1510 | ||
1647 | if (--limit < 0) | 1511 | if (--limit < 0) |
1648 | break; | 1512 | break; |
1649 | #ifdef TC35815_USE_PACKEDBUFFER | ||
1650 | BUG_ON(bd_count > 2); | ||
1651 | skb = dev_alloc_skb(pkt_len + NET_IP_ALIGN); | ||
1652 | if (skb == NULL) { | ||
1653 | printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", | ||
1654 | dev->name); | ||
1655 | dev->stats.rx_dropped++; | ||
1656 | break; | ||
1657 | } | ||
1658 | skb_reserve(skb, NET_IP_ALIGN); | ||
1659 | |||
1660 | data = skb_put(skb, pkt_len); | ||
1661 | |||
1662 | /* copy from receive buffer */ | ||
1663 | cur_bd = 0; | ||
1664 | offset = 0; | ||
1665 | while (offset < pkt_len && cur_bd < bd_count) { | ||
1666 | int len = le32_to_cpu(lp->rfd_cur->bd[cur_bd].BDCtl) & | ||
1667 | BD_BuffLength_MASK; | ||
1668 | dma_addr_t dma = le32_to_cpu(lp->rfd_cur->bd[cur_bd].BuffData); | ||
1669 | void *rxbuf = rxbuf_bus_to_virt(lp, dma); | ||
1670 | if (offset + len > pkt_len) | ||
1671 | len = pkt_len - offset; | ||
1672 | #ifdef TC35815_DMA_SYNC_ONDEMAND | ||
1673 | pci_dma_sync_single_for_cpu(lp->pci_dev, | ||
1674 | dma, len, | ||
1675 | PCI_DMA_FROMDEVICE); | ||
1676 | #endif | ||
1677 | memcpy(data + offset, rxbuf, len); | ||
1678 | #ifdef TC35815_DMA_SYNC_ONDEMAND | ||
1679 | pci_dma_sync_single_for_device(lp->pci_dev, | ||
1680 | dma, len, | ||
1681 | PCI_DMA_FROMDEVICE); | ||
1682 | #endif | ||
1683 | offset += len; | ||
1684 | cur_bd++; | ||
1685 | } | ||
1686 | #else /* TC35815_USE_PACKEDBUFFER */ | ||
1687 | BUG_ON(bd_count > 1); | 1513 | BUG_ON(bd_count > 1); |
1688 | cur_bd = (le32_to_cpu(lp->rfd_cur->bd[0].BDCtl) | 1514 | cur_bd = (le32_to_cpu(lp->rfd_cur->bd[0].BDCtl) |
1689 | & BD_RxBDID_MASK) >> BD_RxBDID_SHIFT; | 1515 | & BD_RxBDID_MASK) >> BD_RxBDID_SHIFT; |
@@ -1711,7 +1537,6 @@ tc35815_rx(struct net_device *dev, int limit) | |||
1711 | memmove(skb->data, skb->data - NET_IP_ALIGN, | 1537 | memmove(skb->data, skb->data - NET_IP_ALIGN, |
1712 | pkt_len); | 1538 | pkt_len); |
1713 | data = skb_put(skb, pkt_len); | 1539 | data = skb_put(skb, pkt_len); |
1714 | #endif /* TC35815_USE_PACKEDBUFFER */ | ||
1715 | if (netif_msg_pktdata(lp)) | 1540 | if (netif_msg_pktdata(lp)) |
1716 | print_eth(data); | 1541 | print_eth(data); |
1717 | skb->protocol = eth_type_trans(skb, dev); | 1542 | skb->protocol = eth_type_trans(skb, dev); |
@@ -1753,19 +1578,11 @@ tc35815_rx(struct net_device *dev, int limit) | |||
1753 | BUG_ON(id >= RX_BUF_NUM); | 1578 | BUG_ON(id >= RX_BUF_NUM); |
1754 | #endif | 1579 | #endif |
1755 | /* free old buffers */ | 1580 | /* free old buffers */ |
1756 | #ifdef TC35815_USE_PACKEDBUFFER | ||
1757 | while (lp->fbl_curid != id) | ||
1758 | #else | ||
1759 | lp->fbl_count--; | 1581 | lp->fbl_count--; |
1760 | while (lp->fbl_count < RX_BUF_NUM) | 1582 | while (lp->fbl_count < RX_BUF_NUM) |
1761 | #endif | ||
1762 | { | 1583 | { |
1763 | #ifdef TC35815_USE_PACKEDBUFFER | ||
1764 | unsigned char curid = lp->fbl_curid; | ||
1765 | #else | ||
1766 | unsigned char curid = | 1584 | unsigned char curid = |
1767 | (id + 1 + lp->fbl_count) % RX_BUF_NUM; | 1585 | (id + 1 + lp->fbl_count) % RX_BUF_NUM; |
1768 | #endif | ||
1769 | struct BDesc *bd = &lp->fbl_ptr->bd[curid]; | 1586 | struct BDesc *bd = &lp->fbl_ptr->bd[curid]; |
1770 | #ifdef DEBUG | 1587 | #ifdef DEBUG |
1771 | bdctl = le32_to_cpu(bd->BDCtl); | 1588 | bdctl = le32_to_cpu(bd->BDCtl); |
@@ -1776,7 +1593,6 @@ tc35815_rx(struct net_device *dev, int limit) | |||
1776 | } | 1593 | } |
1777 | #endif | 1594 | #endif |
1778 | /* pass BD to controller */ | 1595 | /* pass BD to controller */ |
1779 | #ifndef TC35815_USE_PACKEDBUFFER | ||
1780 | if (!lp->rx_skbs[curid].skb) { | 1596 | if (!lp->rx_skbs[curid].skb) { |
1781 | lp->rx_skbs[curid].skb = | 1597 | lp->rx_skbs[curid].skb = |
1782 | alloc_rxbuf_skb(dev, | 1598 | alloc_rxbuf_skb(dev, |
@@ -1786,21 +1602,11 @@ tc35815_rx(struct net_device *dev, int limit) | |||
1786 | break; /* try on next reception */ | 1602 | break; /* try on next reception */ |
1787 | bd->BuffData = cpu_to_le32(lp->rx_skbs[curid].skb_dma); | 1603 | bd->BuffData = cpu_to_le32(lp->rx_skbs[curid].skb_dma); |
1788 | } | 1604 | } |
1789 | #endif /* TC35815_USE_PACKEDBUFFER */ | ||
1790 | /* Note: BDLength was modified by chip. */ | 1605 | /* Note: BDLength was modified by chip. */ |
1791 | bd->BDCtl = cpu_to_le32(BD_CownsBD | | 1606 | bd->BDCtl = cpu_to_le32(BD_CownsBD | |
1792 | (curid << BD_RxBDID_SHIFT) | | 1607 | (curid << BD_RxBDID_SHIFT) | |
1793 | RX_BUF_SIZE); | 1608 | RX_BUF_SIZE); |
1794 | #ifdef TC35815_USE_PACKEDBUFFER | ||
1795 | lp->fbl_curid = (curid + 1) % RX_BUF_NUM; | ||
1796 | if (netif_msg_rx_status(lp)) { | ||
1797 | printk("%s: Entering new FBD %d\n", | ||
1798 | dev->name, lp->fbl_curid); | ||
1799 | dump_frfd(lp->fbl_ptr); | ||
1800 | } | ||
1801 | #else | ||
1802 | lp->fbl_count++; | 1609 | lp->fbl_count++; |
1803 | #endif | ||
1804 | } | 1610 | } |
1805 | } | 1611 | } |
1806 | 1612 | ||
@@ -1872,11 +1678,7 @@ static int tc35815_poll(struct napi_struct *napi, int budget) | |||
1872 | return received; | 1678 | return received; |
1873 | } | 1679 | } |
1874 | 1680 | ||
1875 | #ifdef NO_CHECK_CARRIER | ||
1876 | #define TX_STA_ERR (Tx_ExColl|Tx_Under|Tx_Defer|Tx_LateColl|Tx_TxPar|Tx_SQErr) | ||
1877 | #else | ||
1878 | #define TX_STA_ERR (Tx_ExColl|Tx_Under|Tx_Defer|Tx_NCarr|Tx_LateColl|Tx_TxPar|Tx_SQErr) | 1681 | #define TX_STA_ERR (Tx_ExColl|Tx_Under|Tx_Defer|Tx_NCarr|Tx_LateColl|Tx_TxPar|Tx_SQErr) |
1879 | #endif | ||
1880 | 1682 | ||
1881 | static void | 1683 | static void |
1882 | tc35815_check_tx_stat(struct net_device *dev, int status) | 1684 | tc35815_check_tx_stat(struct net_device *dev, int status) |
@@ -1890,16 +1692,12 @@ tc35815_check_tx_stat(struct net_device *dev, int status) | |||
1890 | if (status & Tx_TxColl_MASK) | 1692 | if (status & Tx_TxColl_MASK) |
1891 | dev->stats.collisions += status & Tx_TxColl_MASK; | 1693 | dev->stats.collisions += status & Tx_TxColl_MASK; |
1892 | 1694 | ||
1893 | #ifndef NO_CHECK_CARRIER | ||
1894 | /* TX4939 does not have NCarr */ | 1695 | /* TX4939 does not have NCarr */ |
1895 | if (lp->chiptype == TC35815_TX4939) | 1696 | if (lp->chiptype == TC35815_TX4939) |
1896 | status &= ~Tx_NCarr; | 1697 | status &= ~Tx_NCarr; |
1897 | #ifdef WORKAROUND_LOSTCAR | ||
1898 | /* WORKAROUND: ignore LostCrS in full duplex operation */ | 1698 | /* WORKAROUND: ignore LostCrS in full duplex operation */ |
1899 | if (!lp->link || lp->duplex == DUPLEX_FULL) | 1699 | if (!lp->link || lp->duplex == DUPLEX_FULL) |
1900 | status &= ~Tx_NCarr; | 1700 | status &= ~Tx_NCarr; |
1901 | #endif | ||
1902 | #endif | ||
1903 | 1701 | ||
1904 | if (!(status & TX_STA_ERR)) { | 1702 | if (!(status & TX_STA_ERR)) { |
1905 | /* no error. */ | 1703 | /* no error. */ |
@@ -1929,12 +1727,10 @@ tc35815_check_tx_stat(struct net_device *dev, int status) | |||
1929 | dev->stats.tx_fifo_errors++; | 1727 | dev->stats.tx_fifo_errors++; |
1930 | msg = "Excessive Deferral."; | 1728 | msg = "Excessive Deferral."; |
1931 | } | 1729 | } |
1932 | #ifndef NO_CHECK_CARRIER | ||
1933 | if (status & Tx_NCarr) { | 1730 | if (status & Tx_NCarr) { |
1934 | dev->stats.tx_carrier_errors++; | 1731 | dev->stats.tx_carrier_errors++; |
1935 | msg = "Lost Carrier Sense."; | 1732 | msg = "Lost Carrier Sense."; |
1936 | } | 1733 | } |
1937 | #endif | ||
1938 | if (status & Tx_LateColl) { | 1734 | if (status & Tx_LateColl) { |
1939 | dev->stats.tx_aborted_errors++; | 1735 | dev->stats.tx_aborted_errors++; |
1940 | msg = "Late Collision."; | 1736 | msg = "Late Collision."; |
@@ -2025,9 +1821,7 @@ tc35815_txdone(struct net_device *dev) | |||
2025 | 1821 | ||
2026 | /* start DMA Transmitter again */ | 1822 | /* start DMA Transmitter again */ |
2027 | txhead->fd.FDNext |= cpu_to_le32(FD_Next_EOL); | 1823 | txhead->fd.FDNext |= cpu_to_le32(FD_Next_EOL); |
2028 | #ifdef GATHER_TXINT | ||
2029 | txhead->fd.FDCtl |= cpu_to_le32(FD_FrmOpt_IntTx); | 1824 | txhead->fd.FDCtl |= cpu_to_le32(FD_FrmOpt_IntTx); |
2030 | #endif | ||
2031 | if (netif_msg_tx_queued(lp)) { | 1825 | if (netif_msg_tx_queued(lp)) { |
2032 | printk("%s: start TxFD on queue.\n", | 1826 | printk("%s: start TxFD on queue.\n", |
2033 | dev->name); | 1827 | dev->name); |
@@ -2138,14 +1932,12 @@ tc35815_set_multicast_list(struct net_device *dev) | |||
2138 | (struct tc35815_regs __iomem *)dev->base_addr; | 1932 | (struct tc35815_regs __iomem *)dev->base_addr; |
2139 | 1933 | ||
2140 | if (dev->flags & IFF_PROMISC) { | 1934 | if (dev->flags & IFF_PROMISC) { |
2141 | #ifdef WORKAROUND_100HALF_PROMISC | ||
2142 | /* With some (all?) 100MHalf HUB, controller will hang | 1935 | /* With some (all?) 100MHalf HUB, controller will hang |
2143 | * if we enabled promiscuous mode before linkup... */ | 1936 | * if we enabled promiscuous mode before linkup... */ |
2144 | struct tc35815_local *lp = netdev_priv(dev); | 1937 | struct tc35815_local *lp = netdev_priv(dev); |
2145 | 1938 | ||
2146 | if (!lp->link) | 1939 | if (!lp->link) |
2147 | return; | 1940 | return; |
2148 | #endif | ||
2149 | /* Enable promiscuous mode */ | 1941 | /* Enable promiscuous mode */ |
2150 | tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc | CAM_StationAcc, &tr->CAM_Ctl); | 1942 | tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc | CAM_StationAcc, &tr->CAM_Ctl); |
2151 | } else if ((dev->flags & IFF_ALLMULTI) || | 1943 | } else if ((dev->flags & IFF_ALLMULTI) || |
@@ -2332,9 +2124,6 @@ static void tc35815_chip_init(struct net_device *dev) | |||
2332 | tc_writel(DMA_BURST_SIZE | DMA_RxAlign_2, &tr->DMA_Ctl); | 2124 | tc_writel(DMA_BURST_SIZE | DMA_RxAlign_2, &tr->DMA_Ctl); |
2333 | else | 2125 | else |
2334 | tc_writel(DMA_BURST_SIZE, &tr->DMA_Ctl); | 2126 | tc_writel(DMA_BURST_SIZE, &tr->DMA_Ctl); |
2335 | #ifdef TC35815_USE_PACKEDBUFFER | ||
2336 | tc_writel(RxFrag_EnPack | ETH_ZLEN, &tr->RxFragSize); /* Packing */ | ||
2337 | #endif | ||
2338 | tc_writel(0, &tr->TxPollCtr); /* Batch mode */ | 2127 | tc_writel(0, &tr->TxPollCtr); /* Batch mode */ |
2339 | tc_writel(TX_THRESHOLD, &tr->TxThrsh); | 2128 | tc_writel(TX_THRESHOLD, &tr->TxThrsh); |
2340 | tc_writel(INT_EN_CMD, &tr->Int_En); | 2129 | tc_writel(INT_EN_CMD, &tr->Int_En); |
@@ -2352,19 +2141,12 @@ static void tc35815_chip_init(struct net_device *dev) | |||
2352 | tc_writel(RX_CTL_CMD, &tr->Rx_Ctl); /* start MAC receiver */ | 2141 | tc_writel(RX_CTL_CMD, &tr->Rx_Ctl); /* start MAC receiver */ |
2353 | 2142 | ||
2354 | /* start MAC transmitter */ | 2143 | /* start MAC transmitter */ |
2355 | #ifndef NO_CHECK_CARRIER | ||
2356 | /* TX4939 does not have EnLCarr */ | 2144 | /* TX4939 does not have EnLCarr */ |
2357 | if (lp->chiptype == TC35815_TX4939) | 2145 | if (lp->chiptype == TC35815_TX4939) |
2358 | txctl &= ~Tx_EnLCarr; | 2146 | txctl &= ~Tx_EnLCarr; |
2359 | #ifdef WORKAROUND_LOSTCAR | ||
2360 | /* WORKAROUND: ignore LostCrS in full duplex operation */ | 2147 | /* WORKAROUND: ignore LostCrS in full duplex operation */ |
2361 | if (!lp->phy_dev || !lp->link || lp->duplex == DUPLEX_FULL) | 2148 | if (!lp->phy_dev || !lp->link || lp->duplex == DUPLEX_FULL) |
2362 | txctl &= ~Tx_EnLCarr; | 2149 | txctl &= ~Tx_EnLCarr; |
2363 | #endif | ||
2364 | #endif /* !NO_CHECK_CARRIER */ | ||
2365 | #ifdef GATHER_TXINT | ||
2366 | txctl &= ~Tx_EnComp; /* disable global tx completion int. */ | ||
2367 | #endif | ||
2368 | tc_writel(txctl, &tr->Tx_Ctl); | 2150 | tc_writel(txctl, &tr->Tx_Ctl); |
2369 | } | 2151 | } |
2370 | 2152 | ||