diff options
Diffstat (limited to 'drivers/s390/net/qeth_eddp.c')
-rw-r--r-- | drivers/s390/net/qeth_eddp.c | 643 |
1 files changed, 643 insertions, 0 deletions
diff --git a/drivers/s390/net/qeth_eddp.c b/drivers/s390/net/qeth_eddp.c new file mode 100644 index 000000000000..7ee1c06ed68a --- /dev/null +++ b/drivers/s390/net/qeth_eddp.c | |||
@@ -0,0 +1,643 @@ | |||
1 | /* | ||
2 | * | ||
3 | * linux/drivers/s390/net/qeth_eddp.c ($Revision: 1.11 $) | ||
4 | * | ||
5 | * Enhanced Device Driver Packing (EDDP) support for the qeth driver. | ||
6 | * | ||
7 | * Copyright 2004 IBM Corporation | ||
8 | * | ||
9 | * Author(s): Thomas Spatzier <tspat@de.ibm.com> | ||
10 | * | ||
11 | * $Revision: 1.11 $ $Date: 2005/03/24 09:04:18 $ | ||
12 | * | ||
13 | */ | ||
14 | #include <linux/config.h> | ||
15 | #include <linux/errno.h> | ||
16 | #include <linux/ip.h> | ||
17 | #include <linux/inetdevice.h> | ||
18 | #include <linux/netdevice.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/tcp.h> | ||
21 | #include <net/tcp.h> | ||
22 | #include <linux/skbuff.h> | ||
23 | |||
24 | #include <net/ip.h> | ||
25 | |||
26 | #include "qeth.h" | ||
27 | #include "qeth_mpc.h" | ||
28 | #include "qeth_eddp.h" | ||
29 | |||
30 | int | ||
31 | qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *queue, | ||
32 | struct qeth_eddp_context *ctx) | ||
33 | { | ||
34 | int index = queue->next_buf_to_fill; | ||
35 | int elements_needed = ctx->num_elements; | ||
36 | int elements_in_buffer; | ||
37 | int skbs_in_buffer; | ||
38 | int buffers_needed = 0; | ||
39 | |||
40 | QETH_DBF_TEXT(trace, 5, "eddpcbfc"); | ||
41 | while(elements_needed > 0) { | ||
42 | buffers_needed++; | ||
43 | if (atomic_read(&queue->bufs[index].state) != | ||
44 | QETH_QDIO_BUF_EMPTY) | ||
45 | return -EBUSY; | ||
46 | |||
47 | elements_in_buffer = QETH_MAX_BUFFER_ELEMENTS(queue->card) - | ||
48 | queue->bufs[index].next_element_to_fill; | ||
49 | skbs_in_buffer = elements_in_buffer / ctx->elements_per_skb; | ||
50 | elements_needed -= skbs_in_buffer * ctx->elements_per_skb; | ||
51 | index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q; | ||
52 | } | ||
53 | return buffers_needed; | ||
54 | } | ||
55 | |||
56 | static inline void | ||
57 | qeth_eddp_free_context(struct qeth_eddp_context *ctx) | ||
58 | { | ||
59 | int i; | ||
60 | |||
61 | QETH_DBF_TEXT(trace, 5, "eddpfctx"); | ||
62 | for (i = 0; i < ctx->num_pages; ++i) | ||
63 | free_page((unsigned long)ctx->pages[i]); | ||
64 | kfree(ctx->pages); | ||
65 | if (ctx->elements != NULL) | ||
66 | kfree(ctx->elements); | ||
67 | kfree(ctx); | ||
68 | } | ||
69 | |||
70 | |||
71 | static inline void | ||
72 | qeth_eddp_get_context(struct qeth_eddp_context *ctx) | ||
73 | { | ||
74 | atomic_inc(&ctx->refcnt); | ||
75 | } | ||
76 | |||
77 | void | ||
78 | qeth_eddp_put_context(struct qeth_eddp_context *ctx) | ||
79 | { | ||
80 | if (atomic_dec_return(&ctx->refcnt) == 0) | ||
81 | qeth_eddp_free_context(ctx); | ||
82 | } | ||
83 | |||
84 | void | ||
85 | qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf) | ||
86 | { | ||
87 | struct qeth_eddp_context_reference *ref; | ||
88 | |||
89 | QETH_DBF_TEXT(trace, 6, "eddprctx"); | ||
90 | while (!list_empty(&buf->ctx_list)){ | ||
91 | ref = list_entry(buf->ctx_list.next, | ||
92 | struct qeth_eddp_context_reference, list); | ||
93 | qeth_eddp_put_context(ref->ctx); | ||
94 | list_del(&ref->list); | ||
95 | kfree(ref); | ||
96 | } | ||
97 | } | ||
98 | |||
99 | static inline int | ||
100 | qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf, | ||
101 | struct qeth_eddp_context *ctx) | ||
102 | { | ||
103 | struct qeth_eddp_context_reference *ref; | ||
104 | |||
105 | QETH_DBF_TEXT(trace, 6, "eddprfcx"); | ||
106 | ref = kmalloc(sizeof(struct qeth_eddp_context_reference), GFP_ATOMIC); | ||
107 | if (ref == NULL) | ||
108 | return -ENOMEM; | ||
109 | qeth_eddp_get_context(ctx); | ||
110 | ref->ctx = ctx; | ||
111 | list_add_tail(&ref->list, &buf->ctx_list); | ||
112 | return 0; | ||
113 | } | ||
114 | |||
115 | int | ||
116 | qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue, | ||
117 | struct qeth_eddp_context *ctx, | ||
118 | int index) | ||
119 | { | ||
120 | struct qeth_qdio_out_buffer *buf = NULL; | ||
121 | struct qdio_buffer *buffer; | ||
122 | int elements = ctx->num_elements; | ||
123 | int element = 0; | ||
124 | int flush_cnt = 0; | ||
125 | int must_refcnt = 1; | ||
126 | int i; | ||
127 | |||
128 | QETH_DBF_TEXT(trace, 5, "eddpfibu"); | ||
129 | while (elements > 0) { | ||
130 | buf = &queue->bufs[index]; | ||
131 | if (atomic_read(&buf->state) != QETH_QDIO_BUF_EMPTY){ | ||
132 | /* normally this should not happen since we checked for | ||
133 | * available elements in qeth_check_elements_for_context | ||
134 | */ | ||
135 | if (element == 0) | ||
136 | return -EBUSY; | ||
137 | else { | ||
138 | PRINT_WARN("could only partially fill eddp " | ||
139 | "buffer!\n"); | ||
140 | goto out; | ||
141 | } | ||
142 | } | ||
143 | /* check if the whole next skb fits into current buffer */ | ||
144 | if ((QETH_MAX_BUFFER_ELEMENTS(queue->card) - | ||
145 | buf->next_element_to_fill) | ||
146 | < ctx->elements_per_skb){ | ||
147 | /* no -> go to next buffer */ | ||
148 | atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED); | ||
149 | index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q; | ||
150 | flush_cnt++; | ||
151 | /* new buffer, so we have to add ctx to buffer'ctx_list | ||
152 | * and increment ctx's refcnt */ | ||
153 | must_refcnt = 1; | ||
154 | continue; | ||
155 | } | ||
156 | if (must_refcnt){ | ||
157 | must_refcnt = 0; | ||
158 | if (qeth_eddp_buf_ref_context(buf, ctx)){ | ||
159 | PRINT_WARN("no memory to create eddp context " | ||
160 | "reference\n"); | ||
161 | goto out_check; | ||
162 | } | ||
163 | } | ||
164 | buffer = buf->buffer; | ||
165 | /* fill one skb into buffer */ | ||
166 | for (i = 0; i < ctx->elements_per_skb; ++i){ | ||
167 | buffer->element[buf->next_element_to_fill].addr = | ||
168 | ctx->elements[element].addr; | ||
169 | buffer->element[buf->next_element_to_fill].length = | ||
170 | ctx->elements[element].length; | ||
171 | buffer->element[buf->next_element_to_fill].flags = | ||
172 | ctx->elements[element].flags; | ||
173 | buf->next_element_to_fill++; | ||
174 | element++; | ||
175 | elements--; | ||
176 | } | ||
177 | } | ||
178 | out_check: | ||
179 | if (!queue->do_pack) { | ||
180 | QETH_DBF_TEXT(trace, 6, "fillbfnp"); | ||
181 | /* set state to PRIMED -> will be flushed */ | ||
182 | if (buf->next_element_to_fill > 0){ | ||
183 | atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED); | ||
184 | flush_cnt++; | ||
185 | } | ||
186 | } else { | ||
187 | #ifdef CONFIG_QETH_PERF_STATS | ||
188 | queue->card->perf_stats.skbs_sent_pack++; | ||
189 | #endif | ||
190 | QETH_DBF_TEXT(trace, 6, "fillbfpa"); | ||
191 | if (buf->next_element_to_fill >= | ||
192 | QETH_MAX_BUFFER_ELEMENTS(queue->card)) { | ||
193 | /* | ||
194 | * packed buffer if full -> set state PRIMED | ||
195 | * -> will be flushed | ||
196 | */ | ||
197 | atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED); | ||
198 | flush_cnt++; | ||
199 | } | ||
200 | } | ||
201 | out: | ||
202 | return flush_cnt; | ||
203 | } | ||
204 | |||
205 | static inline int | ||
206 | qeth_get_skb_data_len(struct sk_buff *skb) | ||
207 | { | ||
208 | int len = skb->len; | ||
209 | int i; | ||
210 | |||
211 | for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) | ||
212 | len -= skb_shinfo(skb)->frags[i].size; | ||
213 | return len; | ||
214 | } | ||
215 | |||
216 | static inline void | ||
217 | qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx, | ||
218 | struct qeth_eddp_data *eddp) | ||
219 | { | ||
220 | u8 *page; | ||
221 | int page_remainder; | ||
222 | int page_offset; | ||
223 | int hdr_len; | ||
224 | struct qeth_eddp_element *element; | ||
225 | |||
226 | QETH_DBF_TEXT(trace, 5, "eddpcrsh"); | ||
227 | page = ctx->pages[ctx->offset >> PAGE_SHIFT]; | ||
228 | page_offset = ctx->offset % PAGE_SIZE; | ||
229 | element = &ctx->elements[ctx->num_elements]; | ||
230 | hdr_len = eddp->nhl + eddp->thl; | ||
231 | /* FIXME: layer2 and VLAN !!! */ | ||
232 | if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) | ||
233 | hdr_len += ETH_HLEN; | ||
234 | if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) | ||
235 | hdr_len += VLAN_HLEN; | ||
236 | /* does complete header fit in current page ? */ | ||
237 | page_remainder = PAGE_SIZE - page_offset; | ||
238 | if (page_remainder < (sizeof(struct qeth_hdr) + hdr_len)){ | ||
239 | /* no -> go to start of next page */ | ||
240 | ctx->offset += page_remainder; | ||
241 | page = ctx->pages[ctx->offset >> PAGE_SHIFT]; | ||
242 | page_offset = 0; | ||
243 | } | ||
244 | memcpy(page + page_offset, &eddp->qh, sizeof(struct qeth_hdr)); | ||
245 | element->addr = page + page_offset; | ||
246 | element->length = sizeof(struct qeth_hdr); | ||
247 | ctx->offset += sizeof(struct qeth_hdr); | ||
248 | page_offset += sizeof(struct qeth_hdr); | ||
249 | /* add mac header (?) */ | ||
250 | if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){ | ||
251 | memcpy(page + page_offset, &eddp->mac, ETH_HLEN); | ||
252 | element->length += ETH_HLEN; | ||
253 | ctx->offset += ETH_HLEN; | ||
254 | page_offset += ETH_HLEN; | ||
255 | } | ||
256 | /* add VLAN tag */ | ||
257 | if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)){ | ||
258 | memcpy(page + page_offset, &eddp->vlan, VLAN_HLEN); | ||
259 | element->length += VLAN_HLEN; | ||
260 | ctx->offset += VLAN_HLEN; | ||
261 | page_offset += VLAN_HLEN; | ||
262 | } | ||
263 | /* add network header */ | ||
264 | memcpy(page + page_offset, (u8 *)&eddp->nh, eddp->nhl); | ||
265 | element->length += eddp->nhl; | ||
266 | eddp->nh_in_ctx = page + page_offset; | ||
267 | ctx->offset += eddp->nhl; | ||
268 | page_offset += eddp->nhl; | ||
269 | /* add transport header */ | ||
270 | memcpy(page + page_offset, (u8 *)&eddp->th, eddp->thl); | ||
271 | element->length += eddp->thl; | ||
272 | eddp->th_in_ctx = page + page_offset; | ||
273 | ctx->offset += eddp->thl; | ||
274 | } | ||
275 | |||
276 | static inline void | ||
277 | qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len, | ||
278 | u32 *hcsum) | ||
279 | { | ||
280 | struct skb_frag_struct *frag; | ||
281 | int left_in_frag; | ||
282 | int copy_len; | ||
283 | u8 *src; | ||
284 | |||
285 | QETH_DBF_TEXT(trace, 5, "eddpcdtc"); | ||
286 | if (skb_shinfo(eddp->skb)->nr_frags == 0) { | ||
287 | memcpy(dst, eddp->skb->data + eddp->skb_offset, len); | ||
288 | *hcsum = csum_partial(eddp->skb->data + eddp->skb_offset, len, | ||
289 | *hcsum); | ||
290 | eddp->skb_offset += len; | ||
291 | } else { | ||
292 | while (len > 0) { | ||
293 | if (eddp->frag < 0) { | ||
294 | /* we're in skb->data */ | ||
295 | left_in_frag = qeth_get_skb_data_len(eddp->skb) | ||
296 | - eddp->skb_offset; | ||
297 | src = eddp->skb->data + eddp->skb_offset; | ||
298 | } else { | ||
299 | frag = &skb_shinfo(eddp->skb)-> | ||
300 | frags[eddp->frag]; | ||
301 | left_in_frag = frag->size - eddp->frag_offset; | ||
302 | src = (u8 *)( | ||
303 | (page_to_pfn(frag->page) << PAGE_SHIFT)+ | ||
304 | frag->page_offset + eddp->frag_offset); | ||
305 | } | ||
306 | if (left_in_frag <= 0) { | ||
307 | eddp->frag++; | ||
308 | eddp->frag_offset = 0; | ||
309 | continue; | ||
310 | } | ||
311 | copy_len = min(left_in_frag, len); | ||
312 | memcpy(dst, src, copy_len); | ||
313 | *hcsum = csum_partial(src, copy_len, *hcsum); | ||
314 | dst += copy_len; | ||
315 | eddp->frag_offset += copy_len; | ||
316 | eddp->skb_offset += copy_len; | ||
317 | len -= copy_len; | ||
318 | } | ||
319 | } | ||
320 | } | ||
321 | |||
322 | static inline void | ||
323 | qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx, | ||
324 | struct qeth_eddp_data *eddp, int data_len, | ||
325 | u32 hcsum) | ||
326 | { | ||
327 | u8 *page; | ||
328 | int page_remainder; | ||
329 | int page_offset; | ||
330 | struct qeth_eddp_element *element; | ||
331 | int first_lap = 1; | ||
332 | |||
333 | QETH_DBF_TEXT(trace, 5, "eddpcsdt"); | ||
334 | page = ctx->pages[ctx->offset >> PAGE_SHIFT]; | ||
335 | page_offset = ctx->offset % PAGE_SIZE; | ||
336 | element = &ctx->elements[ctx->num_elements]; | ||
337 | while (data_len){ | ||
338 | page_remainder = PAGE_SIZE - page_offset; | ||
339 | if (page_remainder < data_len){ | ||
340 | qeth_eddp_copy_data_tcp(page + page_offset, eddp, | ||
341 | page_remainder, &hcsum); | ||
342 | element->length += page_remainder; | ||
343 | if (first_lap) | ||
344 | element->flags = SBAL_FLAGS_FIRST_FRAG; | ||
345 | else | ||
346 | element->flags = SBAL_FLAGS_MIDDLE_FRAG; | ||
347 | ctx->num_elements++; | ||
348 | element++; | ||
349 | data_len -= page_remainder; | ||
350 | ctx->offset += page_remainder; | ||
351 | page = ctx->pages[ctx->offset >> PAGE_SHIFT]; | ||
352 | page_offset = 0; | ||
353 | element->addr = page + page_offset; | ||
354 | } else { | ||
355 | qeth_eddp_copy_data_tcp(page + page_offset, eddp, | ||
356 | data_len, &hcsum); | ||
357 | element->length += data_len; | ||
358 | if (!first_lap) | ||
359 | element->flags = SBAL_FLAGS_LAST_FRAG; | ||
360 | ctx->num_elements++; | ||
361 | ctx->offset += data_len; | ||
362 | data_len = 0; | ||
363 | } | ||
364 | first_lap = 0; | ||
365 | } | ||
366 | ((struct tcphdr *)eddp->th_in_ctx)->check = csum_fold(hcsum); | ||
367 | } | ||
368 | |||
369 | static inline u32 | ||
370 | qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len) | ||
371 | { | ||
372 | u32 phcsum; /* pseudo header checksum */ | ||
373 | |||
374 | QETH_DBF_TEXT(trace, 5, "eddpckt4"); | ||
375 | eddp->th.tcp.h.check = 0; | ||
376 | /* compute pseudo header checksum */ | ||
377 | phcsum = csum_tcpudp_nofold(eddp->nh.ip4.h.saddr, eddp->nh.ip4.h.daddr, | ||
378 | eddp->thl + data_len, IPPROTO_TCP, 0); | ||
379 | /* compute checksum of tcp header */ | ||
380 | return csum_partial((u8 *)&eddp->th, eddp->thl, phcsum); | ||
381 | } | ||
382 | |||
383 | static inline u32 | ||
384 | qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, int data_len) | ||
385 | { | ||
386 | u32 proto; | ||
387 | u32 phcsum; /* pseudo header checksum */ | ||
388 | |||
389 | QETH_DBF_TEXT(trace, 5, "eddpckt6"); | ||
390 | eddp->th.tcp.h.check = 0; | ||
391 | /* compute pseudo header checksum */ | ||
392 | phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.saddr, | ||
393 | sizeof(struct in6_addr), 0); | ||
394 | phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.daddr, | ||
395 | sizeof(struct in6_addr), phcsum); | ||
396 | proto = htonl(IPPROTO_TCP); | ||
397 | phcsum = csum_partial((u8 *)&proto, sizeof(u32), phcsum); | ||
398 | return phcsum; | ||
399 | } | ||
400 | |||
401 | static inline struct qeth_eddp_data * | ||
402 | qeth_eddp_create_eddp_data(struct qeth_hdr *qh, u8 *nh, u8 nhl, u8 *th, u8 thl) | ||
403 | { | ||
404 | struct qeth_eddp_data *eddp; | ||
405 | |||
406 | QETH_DBF_TEXT(trace, 5, "eddpcrda"); | ||
407 | eddp = kmalloc(sizeof(struct qeth_eddp_data), GFP_ATOMIC); | ||
408 | if (eddp){ | ||
409 | memset(eddp, 0, sizeof(struct qeth_eddp_data)); | ||
410 | eddp->nhl = nhl; | ||
411 | eddp->thl = thl; | ||
412 | memcpy(&eddp->qh, qh, sizeof(struct qeth_hdr)); | ||
413 | memcpy(&eddp->nh, nh, nhl); | ||
414 | memcpy(&eddp->th, th, thl); | ||
415 | eddp->frag = -1; /* initially we're in skb->data */ | ||
416 | } | ||
417 | return eddp; | ||
418 | } | ||
419 | |||
420 | static inline void | ||
421 | __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, | ||
422 | struct qeth_eddp_data *eddp) | ||
423 | { | ||
424 | struct tcphdr *tcph; | ||
425 | int data_len; | ||
426 | u32 hcsum; | ||
427 | |||
428 | QETH_DBF_TEXT(trace, 5, "eddpftcp"); | ||
429 | eddp->skb_offset = sizeof(struct qeth_hdr) + eddp->nhl + eddp->thl; | ||
430 | tcph = eddp->skb->h.th; | ||
431 | while (eddp->skb_offset < eddp->skb->len) { | ||
432 | data_len = min((int)skb_shinfo(eddp->skb)->tso_size, | ||
433 | (int)(eddp->skb->len - eddp->skb_offset)); | ||
434 | /* prepare qdio hdr */ | ||
435 | if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){ | ||
436 | eddp->qh.hdr.l2.pkt_length = data_len + ETH_HLEN + | ||
437 | eddp->nhl + eddp->thl - | ||
438 | sizeof(struct qeth_hdr); | ||
439 | #ifdef CONFIG_QETH_VLAN | ||
440 | if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) | ||
441 | eddp->qh.hdr.l2.pkt_length += VLAN_HLEN; | ||
442 | #endif /* CONFIG_QETH_VLAN */ | ||
443 | } else | ||
444 | eddp->qh.hdr.l3.length = data_len + eddp->nhl + | ||
445 | eddp->thl; | ||
446 | /* prepare ip hdr */ | ||
447 | if (eddp->skb->protocol == ETH_P_IP){ | ||
448 | eddp->nh.ip4.h.tot_len = data_len + eddp->nhl + | ||
449 | eddp->thl; | ||
450 | eddp->nh.ip4.h.check = 0; | ||
451 | eddp->nh.ip4.h.check = | ||
452 | ip_fast_csum((u8 *)&eddp->nh.ip4.h, | ||
453 | eddp->nh.ip4.h.ihl); | ||
454 | } else | ||
455 | eddp->nh.ip6.h.payload_len = data_len + eddp->thl; | ||
456 | /* prepare tcp hdr */ | ||
457 | if (data_len == (eddp->skb->len - eddp->skb_offset)){ | ||
458 | /* last segment -> set FIN and PSH flags */ | ||
459 | eddp->th.tcp.h.fin = tcph->fin; | ||
460 | eddp->th.tcp.h.psh = tcph->psh; | ||
461 | } | ||
462 | if (eddp->skb->protocol == ETH_P_IP) | ||
463 | hcsum = qeth_eddp_check_tcp4_hdr(eddp, data_len); | ||
464 | else | ||
465 | hcsum = qeth_eddp_check_tcp6_hdr(eddp, data_len); | ||
466 | /* fill the next segment into the context */ | ||
467 | qeth_eddp_create_segment_hdrs(ctx, eddp); | ||
468 | qeth_eddp_create_segment_data_tcp(ctx, eddp, data_len, hcsum); | ||
469 | if (eddp->skb_offset >= eddp->skb->len) | ||
470 | break; | ||
471 | /* prepare headers for next round */ | ||
472 | if (eddp->skb->protocol == ETH_P_IP) | ||
473 | eddp->nh.ip4.h.id++; | ||
474 | eddp->th.tcp.h.seq += data_len; | ||
475 | } | ||
476 | } | ||
477 | |||
478 | static inline int | ||
479 | qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, | ||
480 | struct sk_buff *skb, struct qeth_hdr *qhdr) | ||
481 | { | ||
482 | struct qeth_eddp_data *eddp = NULL; | ||
483 | |||
484 | QETH_DBF_TEXT(trace, 5, "eddpficx"); | ||
485 | /* create our segmentation headers and copy original headers */ | ||
486 | if (skb->protocol == ETH_P_IP) | ||
487 | eddp = qeth_eddp_create_eddp_data(qhdr, (u8 *)skb->nh.iph, | ||
488 | skb->nh.iph->ihl*4, | ||
489 | (u8 *)skb->h.th, skb->h.th->doff*4); | ||
490 | else | ||
491 | eddp = qeth_eddp_create_eddp_data(qhdr, (u8 *)skb->nh.ipv6h, | ||
492 | sizeof(struct ipv6hdr), | ||
493 | (u8 *)skb->h.th, skb->h.th->doff*4); | ||
494 | |||
495 | if (eddp == NULL) { | ||
496 | QETH_DBF_TEXT(trace, 2, "eddpfcnm"); | ||
497 | return -ENOMEM; | ||
498 | } | ||
499 | if (qhdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) { | ||
500 | memcpy(&eddp->mac, eth_hdr(skb), ETH_HLEN); | ||
501 | #ifdef CONFIG_QETH_VLAN | ||
502 | if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) { | ||
503 | eddp->vlan[0] = __constant_htons(skb->protocol); | ||
504 | eddp->vlan[1] = htons(vlan_tx_tag_get(skb)); | ||
505 | } | ||
506 | #endif /* CONFIG_QETH_VLAN */ | ||
507 | } | ||
508 | /* the next flags will only be set on the last segment */ | ||
509 | eddp->th.tcp.h.fin = 0; | ||
510 | eddp->th.tcp.h.psh = 0; | ||
511 | eddp->skb = skb; | ||
512 | /* begin segmentation and fill context */ | ||
513 | __qeth_eddp_fill_context_tcp(ctx, eddp); | ||
514 | kfree(eddp); | ||
515 | return 0; | ||
516 | } | ||
517 | |||
518 | static inline void | ||
519 | qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb, | ||
520 | int hdr_len) | ||
521 | { | ||
522 | int skbs_per_page; | ||
523 | |||
524 | QETH_DBF_TEXT(trace, 5, "eddpcanp"); | ||
525 | /* can we put multiple skbs in one page? */ | ||
526 | skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->tso_size + hdr_len); | ||
527 | if (skbs_per_page > 1){ | ||
528 | ctx->num_pages = (skb_shinfo(skb)->tso_segs + 1) / | ||
529 | skbs_per_page + 1; | ||
530 | ctx->elements_per_skb = 1; | ||
531 | } else { | ||
532 | /* no -> how many elements per skb? */ | ||
533 | ctx->elements_per_skb = (skb_shinfo(skb)->tso_size + hdr_len + | ||
534 | PAGE_SIZE) >> PAGE_SHIFT; | ||
535 | ctx->num_pages = ctx->elements_per_skb * | ||
536 | (skb_shinfo(skb)->tso_segs + 1); | ||
537 | } | ||
538 | ctx->num_elements = ctx->elements_per_skb * | ||
539 | (skb_shinfo(skb)->tso_segs + 1); | ||
540 | } | ||
541 | |||
542 | static inline struct qeth_eddp_context * | ||
543 | qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb, | ||
544 | int hdr_len) | ||
545 | { | ||
546 | struct qeth_eddp_context *ctx = NULL; | ||
547 | u8 *addr; | ||
548 | int i; | ||
549 | |||
550 | QETH_DBF_TEXT(trace, 5, "creddpcg"); | ||
551 | /* create the context and allocate pages */ | ||
552 | ctx = kmalloc(sizeof(struct qeth_eddp_context), GFP_ATOMIC); | ||
553 | if (ctx == NULL){ | ||
554 | QETH_DBF_TEXT(trace, 2, "ceddpcn1"); | ||
555 | return NULL; | ||
556 | } | ||
557 | memset(ctx, 0, sizeof(struct qeth_eddp_context)); | ||
558 | ctx->type = QETH_LARGE_SEND_EDDP; | ||
559 | qeth_eddp_calc_num_pages(ctx, skb, hdr_len); | ||
560 | if (ctx->elements_per_skb > QETH_MAX_BUFFER_ELEMENTS(card)){ | ||
561 | QETH_DBF_TEXT(trace, 2, "ceddpcis"); | ||
562 | kfree(ctx); | ||
563 | return NULL; | ||
564 | } | ||
565 | ctx->pages = kmalloc(ctx->num_pages * sizeof(u8 *), GFP_ATOMIC); | ||
566 | if (ctx->pages == NULL){ | ||
567 | QETH_DBF_TEXT(trace, 2, "ceddpcn2"); | ||
568 | kfree(ctx); | ||
569 | return NULL; | ||
570 | } | ||
571 | memset(ctx->pages, 0, ctx->num_pages * sizeof(u8 *)); | ||
572 | for (i = 0; i < ctx->num_pages; ++i){ | ||
573 | addr = (u8 *)__get_free_page(GFP_ATOMIC); | ||
574 | if (addr == NULL){ | ||
575 | QETH_DBF_TEXT(trace, 2, "ceddpcn3"); | ||
576 | ctx->num_pages = i; | ||
577 | qeth_eddp_free_context(ctx); | ||
578 | return NULL; | ||
579 | } | ||
580 | memset(addr, 0, PAGE_SIZE); | ||
581 | ctx->pages[i] = addr; | ||
582 | } | ||
583 | ctx->elements = kmalloc(ctx->num_elements * | ||
584 | sizeof(struct qeth_eddp_element), GFP_ATOMIC); | ||
585 | if (ctx->elements == NULL){ | ||
586 | QETH_DBF_TEXT(trace, 2, "ceddpcn4"); | ||
587 | qeth_eddp_free_context(ctx); | ||
588 | return NULL; | ||
589 | } | ||
590 | memset(ctx->elements, 0, | ||
591 | ctx->num_elements * sizeof(struct qeth_eddp_element)); | ||
592 | /* reset num_elements; will be incremented again in fill_buffer to | ||
593 | * reflect number of actually used elements */ | ||
594 | ctx->num_elements = 0; | ||
595 | return ctx; | ||
596 | } | ||
597 | |||
598 | static inline struct qeth_eddp_context * | ||
599 | qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb, | ||
600 | struct qeth_hdr *qhdr) | ||
601 | { | ||
602 | struct qeth_eddp_context *ctx = NULL; | ||
603 | |||
604 | QETH_DBF_TEXT(trace, 5, "creddpct"); | ||
605 | if (skb->protocol == ETH_P_IP) | ||
606 | ctx = qeth_eddp_create_context_generic(card, skb, | ||
607 | sizeof(struct qeth_hdr) + skb->nh.iph->ihl*4 + | ||
608 | skb->h.th->doff*4); | ||
609 | else if (skb->protocol == ETH_P_IPV6) | ||
610 | ctx = qeth_eddp_create_context_generic(card, skb, | ||
611 | sizeof(struct qeth_hdr) + sizeof(struct ipv6hdr) + | ||
612 | skb->h.th->doff*4); | ||
613 | else | ||
614 | QETH_DBF_TEXT(trace, 2, "cetcpinv"); | ||
615 | |||
616 | if (ctx == NULL) { | ||
617 | QETH_DBF_TEXT(trace, 2, "creddpnl"); | ||
618 | return NULL; | ||
619 | } | ||
620 | if (qeth_eddp_fill_context_tcp(ctx, skb, qhdr)){ | ||
621 | QETH_DBF_TEXT(trace, 2, "ceddptfe"); | ||
622 | qeth_eddp_free_context(ctx); | ||
623 | return NULL; | ||
624 | } | ||
625 | atomic_set(&ctx->refcnt, 1); | ||
626 | return ctx; | ||
627 | } | ||
628 | |||
629 | struct qeth_eddp_context * | ||
630 | qeth_eddp_create_context(struct qeth_card *card, struct sk_buff *skb, | ||
631 | struct qeth_hdr *qhdr) | ||
632 | { | ||
633 | QETH_DBF_TEXT(trace, 5, "creddpc"); | ||
634 | switch (skb->sk->sk_protocol){ | ||
635 | case IPPROTO_TCP: | ||
636 | return qeth_eddp_create_context_tcp(card, skb, qhdr); | ||
637 | default: | ||
638 | QETH_DBF_TEXT(trace, 2, "eddpinvp"); | ||
639 | } | ||
640 | return NULL; | ||
641 | } | ||
642 | |||
643 | |||