diff options
author | Malli Chilakala <mallikarjuna.chilakala@intel.com> | 2005-04-28 22:19:46 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@pobox.com> | 2005-05-12 20:59:28 -0400 |
commit | 962082b6df11fe8cabafd2971b07c914cd52ee2a (patch) | |
tree | b6c1cf72db43fbb5064cc91d3d07a4ffb68a6b0c /drivers/net/e100.c | |
parent | 6bdacb1ad58bb6b772a4fc18f21684437bd5f439 (diff) |
[PATCH] e100: Performance optimizations to e100 Tx Path
Performance optimizations to e100 Tx Path
Signed-off-by: Mallikarjuna R Chilakala <mallikarjuna.chilakala@intel.com>
Signed-off-by: Ganesh Venkatesan <ganesh.venkatesan@intel.com>
Signed-off-by: John Ronciak <john.ronciak@intel.com>
Diffstat (limited to 'drivers/net/e100.c')
-rw-r--r-- | drivers/net/e100.c | 16 |
1 files changed, 12 insertions, 4 deletions
diff --git a/drivers/net/e100.c b/drivers/net/e100.c index a43b82512b60..fc2a401d561d 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c | |||
@@ -777,7 +777,7 @@ static int e100_eeprom_save(struct nic *nic, u16 start, u16 count) | |||
777 | return 0; | 777 | return 0; |
778 | } | 778 | } |
779 | 779 | ||
780 | #define E100_WAIT_SCB_TIMEOUT 40 | 780 | #define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */ |
781 | static inline int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr) | 781 | static inline int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr) |
782 | { | 782 | { |
783 | unsigned long flags; | 783 | unsigned long flags; |
@@ -847,6 +847,10 @@ static inline int e100_exec_cb(struct nic *nic, struct sk_buff *skb, | |||
847 | * because the controller is too busy, so | 847 | * because the controller is too busy, so |
848 | * let's just queue the command and try again | 848 | * let's just queue the command and try again |
849 | * when another command is scheduled. */ | 849 | * when another command is scheduled. */ |
850 | if(err == -ENOSPC) { | ||
851 | //request a reset | ||
852 | schedule_work(&nic->tx_timeout_task); | ||
853 | } | ||
850 | break; | 854 | break; |
851 | } else { | 855 | } else { |
852 | nic->cuc_cmd = cuc_resume; | 856 | nic->cuc_cmd = cuc_resume; |
@@ -891,7 +895,7 @@ static void mdio_write(struct net_device *netdev, int addr, int reg, int data) | |||
891 | 895 | ||
892 | static void e100_get_defaults(struct nic *nic) | 896 | static void e100_get_defaults(struct nic *nic) |
893 | { | 897 | { |
894 | struct param_range rfds = { .min = 64, .max = 256, .count = 64 }; | 898 | struct param_range rfds = { .min = 16, .max = 256, .count = 64 }; |
895 | struct param_range cbs = { .min = 64, .max = 256, .count = 64 }; | 899 | struct param_range cbs = { .min = 64, .max = 256, .count = 64 }; |
896 | 900 | ||
897 | pci_read_config_byte(nic->pdev, PCI_REVISION_ID, &nic->rev_id); | 901 | pci_read_config_byte(nic->pdev, PCI_REVISION_ID, &nic->rev_id); |
@@ -906,8 +910,9 @@ static void e100_get_defaults(struct nic *nic) | |||
906 | /* Quadwords to DMA into FIFO before starting frame transmit */ | 910 | /* Quadwords to DMA into FIFO before starting frame transmit */ |
907 | nic->tx_threshold = 0xE0; | 911 | nic->tx_threshold = 0xE0; |
908 | 912 | ||
909 | nic->tx_command = cpu_to_le16(cb_tx | cb_i | cb_tx_sf | | 913 | /* no interrupt for every tx completion, delay = 256us if not 557*/ |
910 | ((nic->mac >= mac_82558_D101_A4) ? cb_cid : 0)); | 914 | nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf | |
915 | ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i)); | ||
911 | 916 | ||
912 | /* Template for a freshly allocated RFD */ | 917 | /* Template for a freshly allocated RFD */ |
913 | nic->blank_rfd.command = cpu_to_le16(cb_el); | 918 | nic->blank_rfd.command = cpu_to_le16(cb_el); |
@@ -1289,12 +1294,15 @@ static inline void e100_xmit_prepare(struct nic *nic, struct cb *cb, | |||
1289 | struct sk_buff *skb) | 1294 | struct sk_buff *skb) |
1290 | { | 1295 | { |
1291 | cb->command = nic->tx_command; | 1296 | cb->command = nic->tx_command; |
1297 | /* interrupt every 16 packets regardless of delay */ | ||
1298 | if((nic->cbs_avail & ~15) == nic->cbs_avail) cb->command |= cb_i; | ||
1292 | cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd); | 1299 | cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd); |
1293 | cb->u.tcb.tcb_byte_count = 0; | 1300 | cb->u.tcb.tcb_byte_count = 0; |
1294 | cb->u.tcb.threshold = nic->tx_threshold; | 1301 | cb->u.tcb.threshold = nic->tx_threshold; |
1295 | cb->u.tcb.tbd_count = 1; | 1302 | cb->u.tcb.tbd_count = 1; |
1296 | cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev, | 1303 | cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev, |
1297 | skb->data, skb->len, PCI_DMA_TODEVICE)); | 1304 | skb->data, skb->len, PCI_DMA_TODEVICE)); |
1305 | // check for mapping failure? | ||
1298 | cb->u.tcb.tbd.size = cpu_to_le16(skb->len); | 1306 | cb->u.tcb.tbd.size = cpu_to_le16(skb->len); |
1299 | } | 1307 | } |
1300 | 1308 | ||