diff options
author | Eugene Crosser <Eugene.Crosser@ru.ibm.com> | 2016-06-16 10:18:52 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-06-17 01:16:11 -0400 |
commit | 2863c61334aa9fd82000da500075b7c959361919 (patch) | |
tree | 872a8ead65c8aed5a00590dcf9cb827556f1587f /drivers/s390/net | |
parent | 1b05cf6285c108c8c2acfec47dd02ed50d973823 (diff) |
qeth: refactor calculation of SBALE count
Rewrite the functions that calculate the required number of buffer
elements needed to represent SKB data, to make them hopefully more
comprehensible. Plus a few cleanups.
Signed-off-by: Eugene Crosser <Eugene.Crosser@ru.ibm.com>
Signed-off-by: Ursula Braun <ubraun@linux.vnet.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/s390/net')
-rw-r--r-- | drivers/s390/net/qeth_core.h | 13 | ||||
-rw-r--r-- | drivers/s390/net/qeth_core_main.c | 55 | ||||
-rw-r--r-- | drivers/s390/net/qeth_l3_main.c | 58 |
3 files changed, 85 insertions, 41 deletions
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index ec2e014e885c..eb8f434a2681 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h | |||
@@ -844,6 +844,19 @@ struct qeth_trap_id { | |||
844 | /*some helper functions*/ | 844 | /*some helper functions*/ |
845 | #define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "") | 845 | #define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "") |
846 | 846 | ||
847 | /** | ||
848 | * qeth_get_elements_for_range() - find number of SBALEs to cover range. | ||
849 | * @start: Start of the address range. | ||
850 | * @end: Address after the end of the range. | ||
851 | * | ||
852 | * Returns the number of pages, and thus QDIO buffer elements, needed to cover | ||
853 | * the specified address range. | ||
854 | */ | ||
855 | static inline int qeth_get_elements_for_range(addr_t start, addr_t end) | ||
856 | { | ||
857 | return PFN_UP(end - 1) - PFN_DOWN(start); | ||
858 | } | ||
859 | |||
847 | static inline int qeth_get_micros(void) | 860 | static inline int qeth_get_micros(void) |
848 | { | 861 | { |
849 | return (int) (get_tod_clock() >> 12); | 862 | return (int) (get_tod_clock() >> 12); |
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index b7b74776e2ff..a91a31d2fc9a 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c | |||
@@ -3810,41 +3810,54 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, | |||
3810 | } | 3810 | } |
3811 | EXPORT_SYMBOL_GPL(qeth_get_priority_queue); | 3811 | EXPORT_SYMBOL_GPL(qeth_get_priority_queue); |
3812 | 3812 | ||
3813 | /** | ||
3814 | * qeth_get_elements_for_frags() - find number of SBALEs for skb frags. | ||
3815 | * @skb: SKB address | ||
3816 | * | ||
3817 | * Returns the number of pages, and thus QDIO buffer elements, needed to cover | ||
3818 | * fragmented part of the SKB. Returns zero for linear SKB. | ||
3819 | */ | ||
3813 | int qeth_get_elements_for_frags(struct sk_buff *skb) | 3820 | int qeth_get_elements_for_frags(struct sk_buff *skb) |
3814 | { | 3821 | { |
3815 | int cnt, length, e, elements = 0; | 3822 | int cnt, elements = 0; |
3816 | struct skb_frag_struct *frag; | ||
3817 | char *data; | ||
3818 | 3823 | ||
3819 | for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { | 3824 | for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { |
3820 | frag = &skb_shinfo(skb)->frags[cnt]; | 3825 | struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[cnt]; |
3821 | data = (char *)page_to_phys(skb_frag_page(frag)) + | 3826 | |
3822 | frag->page_offset; | 3827 | elements += qeth_get_elements_for_range( |
3823 | length = frag->size; | 3828 | (addr_t)skb_frag_address(frag), |
3824 | e = PFN_UP((unsigned long)data + length - 1) - | 3829 | (addr_t)skb_frag_address(frag) + skb_frag_size(frag)); |
3825 | PFN_DOWN((unsigned long)data); | ||
3826 | elements += e; | ||
3827 | } | 3830 | } |
3828 | return elements; | 3831 | return elements; |
3829 | } | 3832 | } |
3830 | EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags); | 3833 | EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags); |
3831 | 3834 | ||
3835 | /** | ||
3836 | * qeth_get_elements_no() - find number of SBALEs for skb data, inc. frags. | ||
3837 | * @card: qeth card structure, to check max. elems. | ||
3838 | * @skb: SKB address | ||
3839 | * @extra_elems: extra elems needed, to check against max. | ||
3840 | * | ||
3841 | * Returns the number of pages, and thus QDIO buffer elements, needed to cover | ||
3842 | * skb data, including linear part and fragments. Checks if the result plus | ||
3843 | * extra_elems fits under the limit for the card. Returns 0 if it does not. | ||
3844 | * Note: extra_elems is not included in the returned result. | ||
3845 | */ | ||
3832 | int qeth_get_elements_no(struct qeth_card *card, | 3846 | int qeth_get_elements_no(struct qeth_card *card, |
3833 | struct sk_buff *skb, int elems) | 3847 | struct sk_buff *skb, int extra_elems) |
3834 | { | 3848 | { |
3835 | int dlen = skb->len - skb->data_len; | 3849 | int elements = qeth_get_elements_for_range( |
3836 | int elements_needed = PFN_UP((unsigned long)skb->data + dlen - 1) - | 3850 | (addr_t)skb->data, |
3837 | PFN_DOWN((unsigned long)skb->data); | 3851 | (addr_t)skb->data + skb_headlen(skb)) + |
3838 | 3852 | qeth_get_elements_for_frags(skb); | |
3839 | elements_needed += qeth_get_elements_for_frags(skb); | ||
3840 | 3853 | ||
3841 | if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { | 3854 | if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { |
3842 | QETH_DBF_MESSAGE(2, "Invalid size of IP packet " | 3855 | QETH_DBF_MESSAGE(2, "Invalid size of IP packet " |
3843 | "(Number=%d / Length=%d). Discarded.\n", | 3856 | "(Number=%d / Length=%d). Discarded.\n", |
3844 | (elements_needed+elems), skb->len); | 3857 | elements + extra_elems, skb->len); |
3845 | return 0; | 3858 | return 0; |
3846 | } | 3859 | } |
3847 | return elements_needed; | 3860 | return elements; |
3848 | } | 3861 | } |
3849 | EXPORT_SYMBOL_GPL(qeth_get_elements_no); | 3862 | EXPORT_SYMBOL_GPL(qeth_get_elements_no); |
3850 | 3863 | ||
@@ -3859,7 +3872,7 @@ int qeth_hdr_chk_and_bounce(struct sk_buff *skb, struct qeth_hdr **hdr, int len) | |||
3859 | rest = len - inpage; | 3872 | rest = len - inpage; |
3860 | if (rest > hroom) | 3873 | if (rest > hroom) |
3861 | return 1; | 3874 | return 1; |
3862 | memmove(skb->data - rest, skb->data, skb->len - skb->data_len); | 3875 | memmove(skb->data - rest, skb->data, skb_headlen(skb)); |
3863 | skb->data -= rest; | 3876 | skb->data -= rest; |
3864 | skb->tail -= rest; | 3877 | skb->tail -= rest; |
3865 | *hdr = (struct qeth_hdr *)skb->data; | 3878 | *hdr = (struct qeth_hdr *)skb->data; |
@@ -3873,7 +3886,7 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb, | |||
3873 | struct qdio_buffer *buffer, int is_tso, int *next_element_to_fill, | 3886 | struct qdio_buffer *buffer, int is_tso, int *next_element_to_fill, |
3874 | int offset) | 3887 | int offset) |
3875 | { | 3888 | { |
3876 | int length = skb->len - skb->data_len; | 3889 | int length = skb_headlen(skb); |
3877 | int length_here; | 3890 | int length_here; |
3878 | int element; | 3891 | int element; |
3879 | char *data; | 3892 | char *data; |
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index ac544330daeb..7c9968adb66d 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c | |||
@@ -2793,15 +2793,34 @@ static void qeth_tso_fill_header(struct qeth_card *card, | |||
2793 | } | 2793 | } |
2794 | } | 2794 | } |
2795 | 2795 | ||
2796 | static inline int qeth_l3_tso_elements(struct sk_buff *skb) | 2796 | /** |
2797 | * qeth_get_elements_no_tso() - find number of SBALEs for skb data, inc. frags. | ||
2798 | * @card: qeth card structure, to check max. elems. | ||
2799 | * @skb: SKB address | ||
2800 | * @extra_elems: extra elems needed, to check against max. | ||
2801 | * | ||
2802 | * Returns the number of pages, and thus QDIO buffer elements, needed to cover | ||
2803 | * skb data, including linear part and fragments, but excluding TCP header. | ||
2804 | * (Exclusion of TCP header distinguishes it from qeth_get_elements_no().) | ||
2805 | * Checks if the result plus extra_elems fits under the limit for the card. | ||
2806 | * Returns 0 if it does not. | ||
2807 | * Note: extra_elems is not included in the returned result. | ||
2808 | */ | ||
2809 | static int qeth_get_elements_no_tso(struct qeth_card *card, | ||
2810 | struct sk_buff *skb, int extra_elems) | ||
2797 | { | 2811 | { |
2798 | unsigned long tcpd = (unsigned long)tcp_hdr(skb) + | 2812 | addr_t tcpdptr = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb); |
2799 | tcp_hdr(skb)->doff * 4; | 2813 | int elements = qeth_get_elements_for_range( |
2800 | int tcpd_len = skb_headlen(skb) - (tcpd - (unsigned long)skb->data); | 2814 | tcpdptr, |
2801 | int elements = PFN_UP(tcpd + tcpd_len - 1) - PFN_DOWN(tcpd); | 2815 | (addr_t)skb->data + skb_headlen(skb)) + |
2802 | 2816 | qeth_get_elements_for_frags(skb); | |
2803 | elements += qeth_get_elements_for_frags(skb); | ||
2804 | 2817 | ||
2818 | if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { | ||
2819 | QETH_DBF_MESSAGE(2, | ||
2820 | "Invalid size of TSO IP packet (Number=%d / Length=%d). Discarded.\n", | ||
2821 | elements + extra_elems, skb->len); | ||
2822 | return 0; | ||
2823 | } | ||
2805 | return elements; | 2824 | return elements; |
2806 | } | 2825 | } |
2807 | 2826 | ||
@@ -2810,8 +2829,8 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2810 | int rc; | 2829 | int rc; |
2811 | u16 *tag; | 2830 | u16 *tag; |
2812 | struct qeth_hdr *hdr = NULL; | 2831 | struct qeth_hdr *hdr = NULL; |
2813 | int elements_needed = 0; | 2832 | int hdr_elements = 0; |
2814 | int elems; | 2833 | int elements; |
2815 | struct qeth_card *card = dev->ml_priv; | 2834 | struct qeth_card *card = dev->ml_priv; |
2816 | struct sk_buff *new_skb = NULL; | 2835 | struct sk_buff *new_skb = NULL; |
2817 | int ipv = qeth_get_ip_version(skb); | 2836 | int ipv = qeth_get_ip_version(skb); |
@@ -2859,7 +2878,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2859 | hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC); | 2878 | hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC); |
2860 | if (!hdr) | 2879 | if (!hdr) |
2861 | goto tx_drop; | 2880 | goto tx_drop; |
2862 | elements_needed++; | 2881 | hdr_elements++; |
2863 | } else { | 2882 | } else { |
2864 | /* create a clone with writeable headroom */ | 2883 | /* create a clone with writeable headroom */ |
2865 | new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr_tso) | 2884 | new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr_tso) |
@@ -2895,7 +2914,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2895 | * chaining we can not send long frag lists | 2914 | * chaining we can not send long frag lists |
2896 | */ | 2915 | */ |
2897 | if (large_send) { | 2916 | if (large_send) { |
2898 | if (qeth_l3_tso_elements(new_skb) + 1 > 16) { | 2917 | if (!qeth_get_elements_no_tso(card, new_skb, 1)) { |
2899 | if (skb_linearize(new_skb)) | 2918 | if (skb_linearize(new_skb)) |
2900 | goto tx_drop; | 2919 | goto tx_drop; |
2901 | if (card->options.performance_stats) | 2920 | if (card->options.performance_stats) |
@@ -2909,7 +2928,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2909 | memset(hdr, 0, sizeof(struct qeth_hdr_tso)); | 2928 | memset(hdr, 0, sizeof(struct qeth_hdr_tso)); |
2910 | qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type); | 2929 | qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type); |
2911 | qeth_tso_fill_header(card, hdr, new_skb); | 2930 | qeth_tso_fill_header(card, hdr, new_skb); |
2912 | elements_needed++; | 2931 | hdr_elements++; |
2913 | } else { | 2932 | } else { |
2914 | if (data_offset < 0) { | 2933 | if (data_offset < 0) { |
2915 | hdr = (struct qeth_hdr *)skb_push(new_skb, | 2934 | hdr = (struct qeth_hdr *)skb_push(new_skb, |
@@ -2930,31 +2949,29 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2930 | qeth_l3_hdr_csum(card, hdr, new_skb); | 2949 | qeth_l3_hdr_csum(card, hdr, new_skb); |
2931 | } | 2950 | } |
2932 | 2951 | ||
2933 | elems = qeth_get_elements_no(card, new_skb, elements_needed); | 2952 | elements = qeth_get_elements_no(card, new_skb, hdr_elements); |
2934 | if (!elems) { | 2953 | if (!elements) { |
2935 | if (data_offset >= 0) | 2954 | if (data_offset >= 0) |
2936 | kmem_cache_free(qeth_core_header_cache, hdr); | 2955 | kmem_cache_free(qeth_core_header_cache, hdr); |
2937 | goto tx_drop; | 2956 | goto tx_drop; |
2938 | } | 2957 | } |
2939 | elements_needed += elems; | 2958 | elements += hdr_elements; |
2940 | nr_frags = skb_shinfo(new_skb)->nr_frags; | ||
2941 | 2959 | ||
2942 | if (card->info.type != QETH_CARD_TYPE_IQD) { | 2960 | if (card->info.type != QETH_CARD_TYPE_IQD) { |
2943 | int len; | 2961 | int len; |
2944 | if (large_send) | 2962 | if (large_send) |
2945 | len = ((unsigned long)tcp_hdr(new_skb) + | 2963 | len = ((unsigned long)tcp_hdr(new_skb) + |
2946 | tcp_hdr(new_skb)->doff * 4) - | 2964 | tcp_hdrlen(new_skb)) - |
2947 | (unsigned long)new_skb->data; | 2965 | (unsigned long)new_skb->data; |
2948 | else | 2966 | else |
2949 | len = sizeof(struct qeth_hdr_layer3); | 2967 | len = sizeof(struct qeth_hdr_layer3); |
2950 | 2968 | ||
2951 | if (qeth_hdr_chk_and_bounce(new_skb, &hdr, len)) | 2969 | if (qeth_hdr_chk_and_bounce(new_skb, &hdr, len)) |
2952 | goto tx_drop; | 2970 | goto tx_drop; |
2953 | rc = qeth_do_send_packet(card, queue, new_skb, hdr, | 2971 | rc = qeth_do_send_packet(card, queue, new_skb, hdr, elements); |
2954 | elements_needed); | ||
2955 | } else | 2972 | } else |
2956 | rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr, | 2973 | rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr, |
2957 | elements_needed, data_offset, 0); | 2974 | elements, data_offset, 0); |
2958 | 2975 | ||
2959 | if (!rc) { | 2976 | if (!rc) { |
2960 | card->stats.tx_packets++; | 2977 | card->stats.tx_packets++; |
@@ -2962,6 +2979,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2962 | if (new_skb != skb) | 2979 | if (new_skb != skb) |
2963 | dev_kfree_skb_any(skb); | 2980 | dev_kfree_skb_any(skb); |
2964 | if (card->options.performance_stats) { | 2981 | if (card->options.performance_stats) { |
2982 | nr_frags = skb_shinfo(new_skb)->nr_frags; | ||
2965 | if (large_send) { | 2983 | if (large_send) { |
2966 | card->perf_stats.large_send_bytes += tx_bytes; | 2984 | card->perf_stats.large_send_bytes += tx_bytes; |
2967 | card->perf_stats.large_send_cnt++; | 2985 | card->perf_stats.large_send_cnt++; |