aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/net
diff options
context:
space:
mode:
authorFrank Blaschka <frank.blaschka@de.ibm.com>2007-07-12 06:51:34 -0400
committerJeff Garzik <jeff@garzik.org>2007-07-16 18:28:04 -0400
commitaa617aa9568e5fc80103194f5a6da2977c305f10 (patch)
treef9ca79404a596e65d50539073b71abdffe86edc3 /drivers/s390/net
parent44c821525778c5d2e81da293195d5d589e8ad845 (diff)
s390: scatter-gather for inbound traffic in qeth driver
For large incoming packets > PAGE_SIZE/2 qeth creates a fragmented skb by adding pointers to qdio pages to the fragment list of the skb. This avoids allocating big chunks of consecutive memory. Also copying data from the qdio buffer to the skb is economized. Signed-off-by: Frank Blaschka <frank.blaschka@de.ibm.com> Signed-off-by: Ursula Braun <braunu@de.ibm.com> Signed-off-by: Frank Pavlic <fpavlic@de.ibm.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/s390/net')
-rw-r--r--drivers/s390/net/qeth.h9
-rw-r--r--drivers/s390/net/qeth_main.c182
-rw-r--r--drivers/s390/net/qeth_proc.c6
3 files changed, 165 insertions, 32 deletions
diff --git a/drivers/s390/net/qeth.h b/drivers/s390/net/qeth.h
index b34eb82edd98..ec18bae05df0 100644
--- a/drivers/s390/net/qeth.h
+++ b/drivers/s390/net/qeth.h
@@ -211,6 +211,10 @@ struct qeth_perf_stats {
211 /* initial values when measuring starts */ 211 /* initial values when measuring starts */
212 unsigned long initial_rx_packets; 212 unsigned long initial_rx_packets;
213 unsigned long initial_tx_packets; 213 unsigned long initial_tx_packets;
214 /* inbound scatter gather data */
215 unsigned int sg_skbs_rx;
216 unsigned int sg_frags_rx;
217 unsigned int sg_alloc_page_rx;
214}; 218};
215 219
216/* Routing stuff */ 220/* Routing stuff */
@@ -341,6 +345,9 @@ qeth_is_ipa_enabled(struct qeth_ipa_info *ipa, enum qeth_ipa_funcs func)
341 345
342#define QETH_IP_HEADER_SIZE 40 346#define QETH_IP_HEADER_SIZE 40
343 347
348/* large receive scatter gather copy break */
349#define QETH_RX_SG_CB (PAGE_SIZE >> 1)
350
344struct qeth_hdr_layer3 { 351struct qeth_hdr_layer3 {
345 __u8 id; 352 __u8 id;
346 __u8 flags; 353 __u8 flags;
@@ -771,6 +778,7 @@ struct qeth_card_options {
771 int layer2; 778 int layer2;
772 enum qeth_large_send_types large_send; 779 enum qeth_large_send_types large_send;
773 int performance_stats; 780 int performance_stats;
781 int rx_sg_cb;
774}; 782};
775 783
776/* 784/*
@@ -828,6 +836,7 @@ struct qeth_card {
828 int (*orig_hard_header)(struct sk_buff *,struct net_device *, 836 int (*orig_hard_header)(struct sk_buff *,struct net_device *,
829 unsigned short,void *,void *,unsigned); 837 unsigned short,void *,void *,unsigned);
830 struct qeth_osn_info osn_info; 838 struct qeth_osn_info osn_info;
839 atomic_t force_alloc_skb;
831}; 840};
832 841
833struct qeth_card_list_struct { 842struct qeth_card_list_struct {
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index 86b0c44165c1..8dca93598ea3 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -1054,6 +1054,7 @@ qeth_set_intial_options(struct qeth_card *card)
1054 else 1054 else
1055 card->options.layer2 = 0; 1055 card->options.layer2 = 0;
1056 card->options.performance_stats = 0; 1056 card->options.performance_stats = 0;
1057 card->options.rx_sg_cb = QETH_RX_SG_CB;
1057} 1058}
1058 1059
1059/** 1060/**
@@ -2258,6 +2259,89 @@ qeth_get_skb(unsigned int length, struct qeth_hdr *hdr)
2258 return skb; 2259 return skb;
2259} 2260}
2260 2261
2262static inline int
2263qeth_create_skb_frag(struct qdio_buffer_element *element,
2264 struct sk_buff **pskb,
2265 int offset, int *pfrag, int data_len)
2266{
2267 struct page *page = virt_to_page(element->addr);
2268 if (*pfrag == 0) {
2269 /* the upper protocol layers assume that there is data in the
2270 * skb itself. Copy a small amount (64 bytes) to make them
2271 * happy. */
2272 *pskb = dev_alloc_skb(64 + QETH_FAKE_LL_LEN_ETH);
2273 if (!(*pskb))
2274 return -ENOMEM;
2275 skb_reserve(*pskb, QETH_FAKE_LL_LEN_ETH);
2276 if (data_len <= 64) {
2277 memcpy(skb_put(*pskb, data_len), element->addr + offset,
2278 data_len);
2279 } else {
2280 get_page(page);
2281 memcpy(skb_put(*pskb, 64), element->addr + offset, 64);
2282 skb_fill_page_desc(*pskb, *pfrag, page, offset + 64,
2283 data_len - 64);
2284 (*pskb)->data_len += data_len - 64;
2285 (*pskb)->len += data_len - 64;
2286 (*pskb)->truesize += data_len - 64;
2287 }
2288 } else {
2289 get_page(page);
2290 skb_fill_page_desc(*pskb, *pfrag, page, offset, data_len);
2291 (*pskb)->data_len += data_len;
2292 (*pskb)->len += data_len;
2293 (*pskb)->truesize += data_len;
2294 }
2295 (*pfrag)++;
2296 return 0;
2297}
2298
2299static inline struct qeth_buffer_pool_entry *
2300qeth_find_free_buffer_pool_entry(struct qeth_card *card)
2301{
2302 struct list_head *plh;
2303 struct qeth_buffer_pool_entry *entry;
2304 int i, free;
2305 struct page *page;
2306
2307 if (list_empty(&card->qdio.in_buf_pool.entry_list))
2308 return NULL;
2309
2310 list_for_each(plh, &card->qdio.in_buf_pool.entry_list) {
2311 entry = list_entry(plh, struct qeth_buffer_pool_entry, list);
2312 free = 1;
2313 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2314 if (page_count(virt_to_page(entry->elements[i])) > 1) {
2315 free = 0;
2316 break;
2317 }
2318 }
2319 if (free) {
2320 list_del_init(&entry->list);
2321 return entry;
2322 }
2323 }
2324
2325 /* no free buffer in pool so take first one and swap pages */
2326 entry = list_entry(card->qdio.in_buf_pool.entry_list.next,
2327 struct qeth_buffer_pool_entry, list);
2328 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2329 if (page_count(virt_to_page(entry->elements[i])) > 1) {
2330 page = alloc_page(GFP_ATOMIC|GFP_DMA);
2331 if (!page) {
2332 return NULL;
2333 } else {
2334 free_page((unsigned long)entry->elements[i]);
2335 entry->elements[i] = page_address(page);
2336 if (card->options.performance_stats)
2337 card->perf_stats.sg_alloc_page_rx++;
2338 }
2339 }
2340 }
2341 list_del_init(&entry->list);
2342 return entry;
2343}
2344
2261static struct sk_buff * 2345static struct sk_buff *
2262qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer, 2346qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer,
2263 struct qdio_buffer_element **__element, int *__offset, 2347 struct qdio_buffer_element **__element, int *__offset,
@@ -2269,6 +2353,8 @@ qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer,
2269 int skb_len; 2353 int skb_len;
2270 void *data_ptr; 2354 void *data_ptr;
2271 int data_len; 2355 int data_len;
2356 int use_rx_sg = 0;
2357 int frag = 0;
2272 2358
2273 QETH_DBF_TEXT(trace,6,"nextskb"); 2359 QETH_DBF_TEXT(trace,6,"nextskb");
2274 /* qeth_hdr must not cross element boundaries */ 2360 /* qeth_hdr must not cross element boundaries */
@@ -2293,23 +2379,43 @@ qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer,
2293 2379
2294 if (!skb_len) 2380 if (!skb_len)
2295 return NULL; 2381 return NULL;
2296 if (card->options.fake_ll){ 2382 if ((skb_len >= card->options.rx_sg_cb) &&
2297 if(card->dev->type == ARPHRD_IEEE802_TR){ 2383 (!(card->info.type == QETH_CARD_TYPE_OSN)) &&
2298 if (!(skb = qeth_get_skb(skb_len+QETH_FAKE_LL_LEN_TR, *hdr))) 2384 (!atomic_read(&card->force_alloc_skb))) {
2299 goto no_mem; 2385 use_rx_sg = 1;
2300 skb_reserve(skb,QETH_FAKE_LL_LEN_TR); 2386 } else {
2387 if (card->options.fake_ll) {
2388 if (card->dev->type == ARPHRD_IEEE802_TR) {
2389 if (!(skb = qeth_get_skb(skb_len +
2390 QETH_FAKE_LL_LEN_TR, *hdr)))
2391 goto no_mem;
2392 skb_reserve(skb, QETH_FAKE_LL_LEN_TR);
2393 } else {
2394 if (!(skb = qeth_get_skb(skb_len +
2395 QETH_FAKE_LL_LEN_ETH, *hdr)))
2396 goto no_mem;
2397 skb_reserve(skb, QETH_FAKE_LL_LEN_ETH);
2398 }
2301 } else { 2399 } else {
2302 if (!(skb = qeth_get_skb(skb_len+QETH_FAKE_LL_LEN_ETH, *hdr))) 2400 skb = qeth_get_skb(skb_len, *hdr);
2401 if (!skb)
2303 goto no_mem; 2402 goto no_mem;
2304 skb_reserve(skb,QETH_FAKE_LL_LEN_ETH);
2305 } 2403 }
2306 } else if (!(skb = qeth_get_skb(skb_len, *hdr))) 2404 }
2307 goto no_mem; 2405
2308 data_ptr = element->addr + offset; 2406 data_ptr = element->addr + offset;
2309 while (skb_len) { 2407 while (skb_len) {
2310 data_len = min(skb_len, (int)(element->length - offset)); 2408 data_len = min(skb_len, (int)(element->length - offset));
2311 if (data_len) 2409 if (data_len) {
2312 memcpy(skb_put(skb, data_len), data_ptr, data_len); 2410 if (use_rx_sg) {
2411 if (qeth_create_skb_frag(element, &skb, offset,
2412 &frag, data_len))
2413 goto no_mem;
2414 } else {
2415 memcpy(skb_put(skb, data_len), data_ptr,
2416 data_len);
2417 }
2418 }
2313 skb_len -= data_len; 2419 skb_len -= data_len;
2314 if (skb_len){ 2420 if (skb_len){
2315 if (qeth_is_last_sbale(element)){ 2421 if (qeth_is_last_sbale(element)){
@@ -2331,6 +2437,10 @@ qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer,
2331 } 2437 }
2332 *__element = element; 2438 *__element = element;
2333 *__offset = offset; 2439 *__offset = offset;
2440 if (use_rx_sg && card->options.performance_stats) {
2441 card->perf_stats.sg_skbs_rx++;
2442 card->perf_stats.sg_frags_rx += skb_shinfo(skb)->nr_frags;
2443 }
2334 return skb; 2444 return skb;
2335no_mem: 2445no_mem:
2336 if (net_ratelimit()){ 2446 if (net_ratelimit()){
@@ -2608,28 +2718,15 @@ qeth_process_inbound_buffer(struct qeth_card *card,
2608 } 2718 }
2609} 2719}
2610 2720
2611static struct qeth_buffer_pool_entry * 2721static int
2612qeth_get_buffer_pool_entry(struct qeth_card *card)
2613{
2614 struct qeth_buffer_pool_entry *entry;
2615
2616 QETH_DBF_TEXT(trace, 6, "gtbfplen");
2617 if (!list_empty(&card->qdio.in_buf_pool.entry_list)) {
2618 entry = list_entry(card->qdio.in_buf_pool.entry_list.next,
2619 struct qeth_buffer_pool_entry, list);
2620 list_del_init(&entry->list);
2621 return entry;
2622 }
2623 return NULL;
2624}
2625
2626static void
2627qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf) 2722qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf)
2628{ 2723{
2629 struct qeth_buffer_pool_entry *pool_entry; 2724 struct qeth_buffer_pool_entry *pool_entry;
2630 int i; 2725 int i;
2631 2726
2632 pool_entry = qeth_get_buffer_pool_entry(card); 2727 pool_entry = qeth_find_free_buffer_pool_entry(card);
2728 if (!pool_entry)
2729 return 1;
2633 /* 2730 /*
2634 * since the buffer is accessed only from the input_tasklet 2731 * since the buffer is accessed only from the input_tasklet
2635 * there shouldn't be a need to synchronize; also, since we use 2732 * there shouldn't be a need to synchronize; also, since we use
@@ -2648,6 +2745,7 @@ qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf)
2648 buf->buffer->element[i].flags = 0; 2745 buf->buffer->element[i].flags = 0;
2649 } 2746 }
2650 buf->state = QETH_QDIO_BUF_EMPTY; 2747 buf->state = QETH_QDIO_BUF_EMPTY;
2748 return 0;
2651} 2749}
2652 2750
2653static void 2751static void
@@ -2682,6 +2780,7 @@ qeth_queue_input_buffer(struct qeth_card *card, int index)
2682 int count; 2780 int count;
2683 int i; 2781 int i;
2684 int rc; 2782 int rc;
2783 int newcount = 0;
2685 2784
2686 QETH_DBF_TEXT(trace,6,"queinbuf"); 2785 QETH_DBF_TEXT(trace,6,"queinbuf");
2687 count = (index < queue->next_buf_to_init)? 2786 count = (index < queue->next_buf_to_init)?
@@ -2692,9 +2791,27 @@ qeth_queue_input_buffer(struct qeth_card *card, int index)
2692 /* only requeue at a certain threshold to avoid SIGAs */ 2791 /* only requeue at a certain threshold to avoid SIGAs */
2693 if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)){ 2792 if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)){
2694 for (i = queue->next_buf_to_init; 2793 for (i = queue->next_buf_to_init;
2695 i < queue->next_buf_to_init + count; ++i) 2794 i < queue->next_buf_to_init + count; ++i) {
2696 qeth_init_input_buffer(card, 2795 if (qeth_init_input_buffer(card,
2697 &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]); 2796 &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q])) {
2797 break;
2798 } else {
2799 newcount++;
2800 }
2801 }
2802
2803 if (newcount < count) {
2804 /* we are in memory shortage so we switch back to
2805 traditional skb allocation and drop packages */
2806 if (atomic_cmpxchg(&card->force_alloc_skb, 0, 1))
2807 printk(KERN_WARNING
2808 "qeth: switch to alloc skb\n");
2809 count = newcount;
2810 } else {
2811 if (atomic_cmpxchg(&card->force_alloc_skb, 1, 0))
2812 printk(KERN_WARNING "qeth: switch to sg\n");
2813 }
2814
2698 /* 2815 /*
2699 * according to old code it should be avoided to requeue all 2816 * according to old code it should be avoided to requeue all
2700 * 128 buffers in order to benefit from PCI avoidance. 2817 * 128 buffers in order to benefit from PCI avoidance.
@@ -6494,6 +6611,7 @@ qeth_hardsetup_card(struct qeth_card *card)
6494 6611
6495 QETH_DBF_TEXT(setup, 2, "hrdsetup"); 6612 QETH_DBF_TEXT(setup, 2, "hrdsetup");
6496 6613
6614 atomic_set(&card->force_alloc_skb, 0);
6497retry: 6615retry:
6498 if (retries < 3){ 6616 if (retries < 3){
6499 PRINT_WARN("Retrying to do IDX activates.\n"); 6617 PRINT_WARN("Retrying to do IDX activates.\n");
diff --git a/drivers/s390/net/qeth_proc.c b/drivers/s390/net/qeth_proc.c
index 89d56c8ecdd2..f1ff165a5e05 100644
--- a/drivers/s390/net/qeth_proc.c
+++ b/drivers/s390/net/qeth_proc.c
@@ -212,6 +212,12 @@ qeth_perf_procfile_seq_show(struct seq_file *s, void *it)
212 " Skb fragments sent in SG mode : %u\n\n", 212 " Skb fragments sent in SG mode : %u\n\n",
213 card->perf_stats.sg_skbs_sent, 213 card->perf_stats.sg_skbs_sent,
214 card->perf_stats.sg_frags_sent); 214 card->perf_stats.sg_frags_sent);
215 seq_printf(s, " Skbs received in SG mode : %u\n"
216 " Skb fragments received in SG mode : %u\n"
217 " Page allocations for rx SG mode : %u\n\n",
218 card->perf_stats.sg_skbs_rx,
219 card->perf_stats.sg_frags_rx,
220 card->perf_stats.sg_alloc_page_rx);
215 seq_printf(s, " large_send tx (in Kbytes) : %u\n" 221 seq_printf(s, " large_send tx (in Kbytes) : %u\n"
216 " large_send count : %u\n\n", 222 " large_send count : %u\n\n",
217 card->perf_stats.large_send_bytes >> 10, 223 card->perf_stats.large_send_bytes >> 10,