aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/net/qeth_tso.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390/net/qeth_tso.c')
-rw-r--r--drivers/s390/net/qeth_tso.c256
1 files changed, 0 insertions, 256 deletions
diff --git a/drivers/s390/net/qeth_tso.c b/drivers/s390/net/qeth_tso.c
deleted file mode 100644
index 4e58f19cb71c..000000000000
--- a/drivers/s390/net/qeth_tso.c
+++ /dev/null
@@ -1,256 +0,0 @@
1/*
2 * linux/drivers/s390/net/qeth_tso.c ($Revision: 1.7 $)
3 *
4 * Header file for qeth TCP Segmentation Offload support.
5 *
6 * Copyright 2004 IBM Corporation
7 *
8 * Author(s): Frank Pavlic <pavlic@de.ibm.com>
9 *
10 * $Revision: 1.7 $ $Date: 2005/04/01 21:40:41 $
11 *
12 */
13
14#include <linux/skbuff.h>
15#include <linux/tcp.h>
16#include <linux/ip.h>
17#include <linux/ipv6.h>
18#include <net/ip6_checksum.h>
19#include "qeth.h"
20#include "qeth_mpc.h"
21#include "qeth_tso.h"
22
23/**
24 * skb already partially prepared
25 * classic qdio header in skb->data
26 * */
27static inline struct qeth_hdr_tso *
28qeth_tso_prepare_skb(struct qeth_card *card, struct sk_buff **skb)
29{
30 int rc = 0;
31
32 QETH_DBF_TEXT(trace, 5, "tsoprsk");
33 rc = qeth_realloc_headroom(card, skb,sizeof(struct qeth_hdr_ext_tso));
34 if (rc)
35 return NULL;
36
37 return qeth_push_skb(card, skb, sizeof(struct qeth_hdr_ext_tso));
38}
39
40/**
41 * fill header for a TSO packet
42 */
43static inline void
44qeth_tso_fill_header(struct qeth_card *card, struct sk_buff *skb)
45{
46 struct qeth_hdr_tso *hdr;
47 struct tcphdr *tcph;
48 struct iphdr *iph;
49
50 QETH_DBF_TEXT(trace, 5, "tsofhdr");
51
52 hdr = (struct qeth_hdr_tso *) skb->data;
53 iph = skb->nh.iph;
54 tcph = skb->h.th;
55 /*fix header to TSO values ...*/
56 hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
57 /*set values which are fix for the first approach ...*/
58 hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
59 hdr->ext.imb_hdr_no = 1;
60 hdr->ext.hdr_type = 1;
61 hdr->ext.hdr_version = 1;
62 hdr->ext.hdr_len = 28;
63 /*insert non-fix values */
64 hdr->ext.mss = skb_shinfo(skb)->tso_size;
65 hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4);
66 hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
67 sizeof(struct qeth_hdr_tso));
68}
69
70/**
71 * change some header values as requested by hardware
72 */
73static inline void
74qeth_tso_set_tcpip_header(struct qeth_card *card, struct sk_buff *skb)
75{
76 struct iphdr *iph;
77 struct ipv6hdr *ip6h;
78 struct tcphdr *tcph;
79
80 iph = skb->nh.iph;
81 ip6h = skb->nh.ipv6h;
82 tcph = skb->h.th;
83
84 tcph->check = 0;
85 if (skb->protocol == ETH_P_IPV6) {
86 ip6h->payload_len = 0;
87 tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
88 0, IPPROTO_TCP, 0);
89 return;
90 }
91 /*OSA want us to set these values ...*/
92 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
93 0, IPPROTO_TCP, 0);
94 iph->tot_len = 0;
95 iph->check = 0;
96}
97
98static inline struct qeth_hdr_tso *
99qeth_tso_prepare_packet(struct qeth_card *card, struct sk_buff *skb,
100 int ipv, int cast_type)
101{
102 struct qeth_hdr_tso *hdr;
103 int rc = 0;
104
105 QETH_DBF_TEXT(trace, 5, "tsoprep");
106
107 /*get headroom for tso qdio header */
108 hdr = (struct qeth_hdr_tso *) qeth_tso_prepare_skb(card, &skb);
109 if (hdr == NULL) {
110 QETH_DBF_TEXT_(trace, 4, "2err%d", rc);
111 return NULL;
112 }
113 memset(hdr, 0, sizeof(struct qeth_hdr_tso));
114 /*fill first 32 bytes of qdio header as used
115 *FIXME: TSO has two struct members
116 * with different names but same size
117 * */
118 qeth_fill_header(card, &hdr->hdr, skb, ipv, cast_type);
119 qeth_tso_fill_header(card, skb);
120 qeth_tso_set_tcpip_header(card, skb);
121 return hdr;
122}
123
124static inline int
125qeth_tso_get_queue_buffer(struct qeth_qdio_out_q *queue)
126{
127 struct qeth_qdio_out_buffer *buffer;
128 int flush_cnt = 0;
129
130 QETH_DBF_TEXT(trace, 5, "tsobuf");
131
132 /* force to non-packing*/
133 if (queue->do_pack)
134 queue->do_pack = 0;
135 buffer = &queue->bufs[queue->next_buf_to_fill];
136 /* get a new buffer if current is already in use*/
137 if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
138 (buffer->next_element_to_fill > 0)) {
139 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
140 queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
141 QDIO_MAX_BUFFERS_PER_Q;
142 flush_cnt++;
143 }
144 return flush_cnt;
145}
146
147
148static inline int
149qeth_tso_fill_buffer(struct qeth_qdio_out_buffer *buf,
150 struct sk_buff *skb)
151{
152 int length, length_here, element;
153 int hdr_len;
154 struct qdio_buffer *buffer;
155 struct qeth_hdr_tso *hdr;
156 char *data;
157
158 QETH_DBF_TEXT(trace, 3, "tsfilbuf");
159
160 /*increment user count and queue skb ...*/
161 atomic_inc(&skb->users);
162 skb_queue_tail(&buf->skb_list, skb);
163
164 /*initialize all variables...*/
165 buffer = buf->buffer;
166 hdr = (struct qeth_hdr_tso *)skb->data;
167 hdr_len = sizeof(struct qeth_hdr_tso) + hdr->ext.dg_hdr_len;
168 data = skb->data + hdr_len;
169 length = skb->len - hdr_len;
170 element = buf->next_element_to_fill;
171 /*fill first buffer entry only with header information */
172 buffer->element[element].addr = skb->data;
173 buffer->element[element].length = hdr_len;
174 buffer->element[element].flags = SBAL_FLAGS_FIRST_FRAG;
175 buf->next_element_to_fill++;
176 /*check if we have frags ...*/
177 if (skb_shinfo(skb)->nr_frags > 0) {
178 skb->len = length;
179 skb->data = data;
180 __qeth_fill_buffer_frag(skb, buffer,1,
181 (int *)&buf->next_element_to_fill);
182 goto out;
183 }
184
185 /*... if not, use this */
186 element++;
187 while (length > 0) {
188 /* length_here is the remaining amount of data in this page */
189 length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE);
190 if (length < length_here)
191 length_here = length;
192 buffer->element[element].addr = data;
193 buffer->element[element].length = length_here;
194 length -= length_here;
195 if (!length)
196 buffer->element[element].flags =
197 SBAL_FLAGS_LAST_FRAG;
198 else
199 buffer->element[element].flags =
200 SBAL_FLAGS_MIDDLE_FRAG;
201 data += length_here;
202 element++;
203 }
204 buf->next_element_to_fill = element;
205out:
206 /*prime buffer now ...*/
207 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
208 return 1;
209}
210
211int
212qeth_tso_send_packet(struct qeth_card *card, struct sk_buff *skb,
213 struct qeth_qdio_out_q *queue, int ipv, int cast_type)
214{
215 int flush_cnt = 0;
216 struct qeth_hdr_tso *hdr;
217 struct qeth_qdio_out_buffer *buffer;
218 int start_index;
219
220 QETH_DBF_TEXT(trace, 3, "tsosend");
221
222 if (!(hdr = qeth_tso_prepare_packet(card, skb, ipv, cast_type)))
223 return -ENOMEM;
224 /*check if skb fits in one SBAL ...*/
225 if (!(qeth_get_elements_no(card, (void*)hdr, skb)))
226 return -EINVAL;
227 /*lock queue, force switching to non-packing and send it ...*/
228 while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED,
229 QETH_OUT_Q_LOCKED,
230 &queue->state));
231 start_index = queue->next_buf_to_fill;
232 buffer = &queue->bufs[queue->next_buf_to_fill];
233 /*check if card is too busy ...*/
234 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY){
235 card->stats.tx_dropped++;
236 goto out;
237 }
238 /*let's force to non-packing and get a new SBAL*/
239 flush_cnt += qeth_tso_get_queue_buffer(queue);
240 buffer = &queue->bufs[queue->next_buf_to_fill];
241 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
242 card->stats.tx_dropped++;
243 goto out;
244 }
245 flush_cnt += qeth_tso_fill_buffer(buffer, skb);
246 queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
247 QDIO_MAX_BUFFERS_PER_Q;
248out:
249 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
250 if (flush_cnt)
251 qeth_flush_buffers(queue, 0, start_index, flush_cnt);
252 /*do some statistics */
253 card->stats.tx_packets++;
254 card->stats.tx_bytes += skb->len;
255 return 0;
256}