diff options
-rw-r--r-- | drivers/net/ibmveth.c | 100 | ||||
-rw-r--r-- | drivers/net/ibmveth.h | 5 |
2 files changed, 17 insertions, 88 deletions
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index 40cb00e37df6..f6be7b29c845 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c | |||
@@ -28,7 +28,6 @@ | |||
28 | /**************************************************************************/ | 28 | /**************************************************************************/ |
29 | /* | 29 | /* |
30 | TODO: | 30 | TODO: |
31 | - remove frag processing code - no longer needed | ||
32 | - add support for sysfs | 31 | - add support for sysfs |
33 | - possibly remove procfs support | 32 | - possibly remove procfs support |
34 | */ | 33 | */ |
@@ -128,9 +127,6 @@ struct ibmveth_stat ibmveth_stats[] = { | |||
128 | { "replenish_add_buff_success", IBMVETH_STAT_OFF(replenish_add_buff_success) }, | 127 | { "replenish_add_buff_success", IBMVETH_STAT_OFF(replenish_add_buff_success) }, |
129 | { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) }, | 128 | { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) }, |
130 | { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) }, | 129 | { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) }, |
131 | { "tx_multidesc_send", IBMVETH_STAT_OFF(tx_multidesc_send) }, | ||
132 | { "tx_linearized", IBMVETH_STAT_OFF(tx_linearized) }, | ||
133 | { "tx_linearize_failed", IBMVETH_STAT_OFF(tx_linearize_failed) }, | ||
134 | { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) }, | 130 | { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) }, |
135 | { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) }, | 131 | { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) }, |
136 | }; | 132 | }; |
@@ -843,9 +839,8 @@ static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
843 | static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) | 839 | static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) |
844 | { | 840 | { |
845 | struct ibmveth_adapter *adapter = netdev->priv; | 841 | struct ibmveth_adapter *adapter = netdev->priv; |
846 | union ibmveth_buf_desc desc[IbmVethMaxSendFrags]; | 842 | union ibmveth_buf_desc desc; |
847 | unsigned long lpar_rc; | 843 | unsigned long lpar_rc; |
848 | int nfrags = 0, curfrag; | ||
849 | unsigned long correlator; | 844 | unsigned long correlator; |
850 | unsigned long flags; | 845 | unsigned long flags; |
851 | unsigned int retry_count; | 846 | unsigned int retry_count; |
@@ -855,25 +850,11 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
855 | unsigned int tx_send_failed = 0; | 850 | unsigned int tx_send_failed = 0; |
856 | unsigned int tx_map_failed = 0; | 851 | unsigned int tx_map_failed = 0; |
857 | 852 | ||
858 | 853 | desc.desc = 0; | |
859 | if ((skb_shinfo(skb)->nr_frags + 1) > IbmVethMaxSendFrags) { | 854 | desc.fields.length = skb->len; |
860 | tx_dropped++; | 855 | desc.fields.address = dma_map_single(&adapter->vdev->dev, skb->data, |
861 | goto out; | 856 | desc.fields.length, DMA_TO_DEVICE); |
862 | } | 857 | desc.fields.valid = 1; |
863 | |||
864 | memset(&desc, 0, sizeof(desc)); | ||
865 | |||
866 | /* nfrags = number of frags after the initial fragment */ | ||
867 | nfrags = skb_shinfo(skb)->nr_frags; | ||
868 | |||
869 | if(nfrags) | ||
870 | adapter->tx_multidesc_send++; | ||
871 | |||
872 | /* map the initial fragment */ | ||
873 | desc[0].fields.length = nfrags ? skb->len - skb->data_len : skb->len; | ||
874 | desc[0].fields.address = dma_map_single(&adapter->vdev->dev, skb->data, | ||
875 | desc[0].fields.length, DMA_TO_DEVICE); | ||
876 | desc[0].fields.valid = 1; | ||
877 | 858 | ||
878 | if (skb->ip_summed == CHECKSUM_PARTIAL && | 859 | if (skb->ip_summed == CHECKSUM_PARTIAL && |
879 | ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) { | 860 | ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) { |
@@ -885,75 +866,34 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
885 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 866 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
886 | unsigned char *buf = skb_transport_header(skb) + skb->csum_offset; | 867 | unsigned char *buf = skb_transport_header(skb) + skb->csum_offset; |
887 | 868 | ||
888 | desc[0].fields.no_csum = 1; | 869 | desc.fields.no_csum = 1; |
889 | desc[0].fields.csum_good = 1; | 870 | desc.fields.csum_good = 1; |
890 | 871 | ||
891 | /* Need to zero out the checksum */ | 872 | /* Need to zero out the checksum */ |
892 | buf[0] = 0; | 873 | buf[0] = 0; |
893 | buf[1] = 0; | 874 | buf[1] = 0; |
894 | } | 875 | } |
895 | 876 | ||
896 | if(dma_mapping_error(desc[0].fields.address)) { | 877 | if (dma_mapping_error(desc.fields.address)) { |
897 | ibmveth_error_printk("tx: unable to map initial fragment\n"); | 878 | ibmveth_error_printk("tx: unable to map xmit buffer\n"); |
898 | tx_map_failed++; | 879 | tx_map_failed++; |
899 | tx_dropped++; | 880 | tx_dropped++; |
900 | goto out; | 881 | goto out; |
901 | } | 882 | } |
902 | 883 | ||
903 | curfrag = nfrags; | ||
904 | |||
905 | /* map fragments past the initial portion if there are any */ | ||
906 | while(curfrag--) { | ||
907 | skb_frag_t *frag = &skb_shinfo(skb)->frags[curfrag]; | ||
908 | desc[curfrag+1].fields.address | ||
909 | = dma_map_single(&adapter->vdev->dev, | ||
910 | page_address(frag->page) + frag->page_offset, | ||
911 | frag->size, DMA_TO_DEVICE); | ||
912 | desc[curfrag+1].fields.length = frag->size; | ||
913 | desc[curfrag+1].fields.valid = 1; | ||
914 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
915 | desc[curfrag+1].fields.no_csum = 1; | ||
916 | desc[curfrag+1].fields.csum_good = 1; | ||
917 | } | ||
918 | |||
919 | if(dma_mapping_error(desc[curfrag+1].fields.address)) { | ||
920 | ibmveth_error_printk("tx: unable to map fragment %d\n", curfrag); | ||
921 | tx_map_failed++; | ||
922 | tx_dropped++; | ||
923 | /* Free all the mappings we just created */ | ||
924 | while(curfrag < nfrags) { | ||
925 | dma_unmap_single(&adapter->vdev->dev, | ||
926 | desc[curfrag+1].fields.address, | ||
927 | desc[curfrag+1].fields.length, | ||
928 | DMA_TO_DEVICE); | ||
929 | curfrag++; | ||
930 | } | ||
931 | goto out; | ||
932 | } | ||
933 | } | ||
934 | |||
935 | /* send the frame. Arbitrarily set retrycount to 1024 */ | 884 | /* send the frame. Arbitrarily set retrycount to 1024 */ |
936 | correlator = 0; | 885 | correlator = 0; |
937 | retry_count = 1024; | 886 | retry_count = 1024; |
938 | do { | 887 | do { |
939 | lpar_rc = h_send_logical_lan(adapter->vdev->unit_address, | 888 | lpar_rc = h_send_logical_lan(adapter->vdev->unit_address, |
940 | desc[0].desc, | 889 | desc.desc, 0, 0, 0, 0, 0, |
941 | desc[1].desc, | 890 | correlator, &correlator); |
942 | desc[2].desc, | ||
943 | desc[3].desc, | ||
944 | desc[4].desc, | ||
945 | desc[5].desc, | ||
946 | correlator, | ||
947 | &correlator); | ||
948 | } while ((lpar_rc == H_BUSY) && (retry_count--)); | 891 | } while ((lpar_rc == H_BUSY) && (retry_count--)); |
949 | 892 | ||
950 | if(lpar_rc != H_SUCCESS && lpar_rc != H_DROPPED) { | 893 | if(lpar_rc != H_SUCCESS && lpar_rc != H_DROPPED) { |
951 | int i; | ||
952 | ibmveth_error_printk("tx: h_send_logical_lan failed with rc=%ld\n", lpar_rc); | 894 | ibmveth_error_printk("tx: h_send_logical_lan failed with rc=%ld\n", lpar_rc); |
953 | for(i = 0; i < 6; i++) { | 895 | ibmveth_error_printk("tx: valid=%d, len=%d, address=0x%08x\n", |
954 | ibmveth_error_printk("tx: desc[%i] valid=%d, len=%d, address=0x%d\n", i, | 896 | desc.fields.valid, desc.fields.length, desc.fields.address); |
955 | desc[i].fields.valid, desc[i].fields.length, desc[i].fields.address); | ||
956 | } | ||
957 | tx_send_failed++; | 897 | tx_send_failed++; |
958 | tx_dropped++; | 898 | tx_dropped++; |
959 | } else { | 899 | } else { |
@@ -962,11 +902,8 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
962 | netdev->trans_start = jiffies; | 902 | netdev->trans_start = jiffies; |
963 | } | 903 | } |
964 | 904 | ||
965 | do { | 905 | dma_unmap_single(&adapter->vdev->dev, desc.fields.address, |
966 | dma_unmap_single(&adapter->vdev->dev, | 906 | desc.fields.length, DMA_TO_DEVICE); |
967 | desc[nfrags].fields.address, | ||
968 | desc[nfrags].fields.length, DMA_TO_DEVICE); | ||
969 | } while(--nfrags >= 0); | ||
970 | 907 | ||
971 | out: spin_lock_irqsave(&adapter->stats_lock, flags); | 908 | out: spin_lock_irqsave(&adapter->stats_lock, flags); |
972 | adapter->stats.tx_dropped += tx_dropped; | 909 | adapter->stats.tx_dropped += tx_dropped; |
@@ -1366,10 +1303,7 @@ static int ibmveth_seq_show(struct seq_file *seq, void *v) | |||
1366 | firmware_mac[3], firmware_mac[4], firmware_mac[5]); | 1303 | firmware_mac[3], firmware_mac[4], firmware_mac[5]); |
1367 | 1304 | ||
1368 | seq_printf(seq, "\nAdapter Statistics:\n"); | 1305 | seq_printf(seq, "\nAdapter Statistics:\n"); |
1369 | seq_printf(seq, " TX: skbuffs linearized: %ld\n", adapter->tx_linearized); | 1306 | seq_printf(seq, " TX: vio_map_single failres: %ld\n", adapter->tx_map_failed); |
1370 | seq_printf(seq, " multi-descriptor sends: %ld\n", adapter->tx_multidesc_send); | ||
1371 | seq_printf(seq, " skb_linearize failures: %ld\n", adapter->tx_linearize_failed); | ||
1372 | seq_printf(seq, " vio_map_single failres: %ld\n", adapter->tx_map_failed); | ||
1373 | seq_printf(seq, " send failures: %ld\n", adapter->tx_send_failed); | 1307 | seq_printf(seq, " send failures: %ld\n", adapter->tx_send_failed); |
1374 | seq_printf(seq, " RX: replenish task cycles: %ld\n", adapter->replenish_task_cycles); | 1308 | seq_printf(seq, " RX: replenish task cycles: %ld\n", adapter->replenish_task_cycles); |
1375 | seq_printf(seq, " alloc_skb_failures: %ld\n", adapter->replenish_no_mem); | 1309 | seq_printf(seq, " alloc_skb_failures: %ld\n", adapter->replenish_no_mem); |
diff --git a/drivers/net/ibmveth.h b/drivers/net/ibmveth.h index 43b068d9a558..30f9fc67b0c8 100644 --- a/drivers/net/ibmveth.h +++ b/drivers/net/ibmveth.h | |||
@@ -25,8 +25,6 @@ | |||
25 | #ifndef _IBMVETH_H | 25 | #ifndef _IBMVETH_H |
26 | #define _IBMVETH_H | 26 | #define _IBMVETH_H |
27 | 27 | ||
28 | #define IbmVethMaxSendFrags 6 | ||
29 | |||
30 | /* constants for H_MULTICAST_CTRL */ | 28 | /* constants for H_MULTICAST_CTRL */ |
31 | #define IbmVethMcastReceptionModifyBit 0x80000UL | 29 | #define IbmVethMcastReceptionModifyBit 0x80000UL |
32 | #define IbmVethMcastReceptionEnableBit 0x20000UL | 30 | #define IbmVethMcastReceptionEnableBit 0x20000UL |
@@ -147,9 +145,6 @@ struct ibmveth_adapter { | |||
147 | u64 replenish_add_buff_success; | 145 | u64 replenish_add_buff_success; |
148 | u64 rx_invalid_buffer; | 146 | u64 rx_invalid_buffer; |
149 | u64 rx_no_buffer; | 147 | u64 rx_no_buffer; |
150 | u64 tx_multidesc_send; | ||
151 | u64 tx_linearized; | ||
152 | u64 tx_linearize_failed; | ||
153 | u64 tx_map_failed; | 148 | u64 tx_map_failed; |
154 | u64 tx_send_failed; | 149 | u64 tx_send_failed; |
155 | spinlock_t stats_lock; | 150 | spinlock_t stats_lock; |