diff options
author | Rasesh Mody <rmody@brocade.com> | 2012-12-11 07:24:53 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-12-11 18:25:49 -0500 |
commit | 30f9fc947938d483c48011530973903797e8739f (patch) | |
tree | 5164f7ad8d4885f52abc56a4d9f87239bd3de628 /drivers/net/ethernet/brocade | |
parent | d3f92aec9549697666f98d195cc9d59add472cfb (diff) |
bna: Rx Page Based Allocation
Change Details:
Enhanced support for GRO. Page-base allocation method for Rx buffers is
used in GRO. Skb allocation has been removed in Rx path to use always warm-cache
skbs provided by napi_get_frags.
Signed-off-by: Rasesh Mody <rmody@brocade.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/brocade')
-rw-r--r-- | drivers/net/ethernet/brocade/bna/bnad.c | 318 | ||||
-rw-r--r-- | drivers/net/ethernet/brocade/bna/bnad.h | 19 |
2 files changed, 273 insertions, 64 deletions
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index 35a301330e5d..7cce42dc2f20 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c | |||
@@ -266,53 +266,181 @@ bnad_msix_tx(int irq, void *data) | |||
266 | return IRQ_HANDLED; | 266 | return IRQ_HANDLED; |
267 | } | 267 | } |
268 | 268 | ||
269 | static inline void | ||
270 | bnad_rxq_alloc_uninit(struct bnad *bnad, struct bna_rcb *rcb) | ||
271 | { | ||
272 | struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; | ||
273 | |||
274 | unmap_q->reuse_pi = -1; | ||
275 | unmap_q->alloc_order = -1; | ||
276 | unmap_q->map_size = 0; | ||
277 | unmap_q->type = BNAD_RXBUF_NONE; | ||
278 | } | ||
279 | |||
280 | /* Default is page-based allocation. Multi-buffer support - TBD */ | ||
281 | static int | ||
282 | bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb) | ||
283 | { | ||
284 | struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; | ||
285 | int mtu, order; | ||
286 | |||
287 | bnad_rxq_alloc_uninit(bnad, rcb); | ||
288 | |||
289 | mtu = bna_enet_mtu_get(&bnad->bna.enet); | ||
290 | order = get_order(mtu); | ||
291 | |||
292 | if (bna_is_small_rxq(rcb->id)) { | ||
293 | unmap_q->alloc_order = 0; | ||
294 | unmap_q->map_size = rcb->rxq->buffer_size; | ||
295 | } else { | ||
296 | unmap_q->alloc_order = order; | ||
297 | unmap_q->map_size = | ||
298 | (rcb->rxq->buffer_size > 2048) ? | ||
299 | PAGE_SIZE << order : 2048; | ||
300 | } | ||
301 | |||
302 | BUG_ON(((PAGE_SIZE << order) % unmap_q->map_size)); | ||
303 | |||
304 | unmap_q->type = BNAD_RXBUF_PAGE; | ||
305 | |||
306 | return 0; | ||
307 | } | ||
308 | |||
309 | static inline void | ||
310 | bnad_rxq_cleanup_page(struct bnad *bnad, struct bnad_rx_unmap *unmap) | ||
311 | { | ||
312 | if (!unmap->page) | ||
313 | return; | ||
314 | |||
315 | dma_unmap_page(&bnad->pcidev->dev, | ||
316 | dma_unmap_addr(&unmap->vector, dma_addr), | ||
317 | unmap->vector.len, DMA_FROM_DEVICE); | ||
318 | put_page(unmap->page); | ||
319 | unmap->page = NULL; | ||
320 | dma_unmap_addr_set(&unmap->vector, dma_addr, 0); | ||
321 | unmap->vector.len = 0; | ||
322 | } | ||
323 | |||
324 | static inline void | ||
325 | bnad_rxq_cleanup_skb(struct bnad *bnad, struct bnad_rx_unmap *unmap) | ||
326 | { | ||
327 | if (!unmap->skb) | ||
328 | return; | ||
329 | |||
330 | dma_unmap_single(&bnad->pcidev->dev, | ||
331 | dma_unmap_addr(&unmap->vector, dma_addr), | ||
332 | unmap->vector.len, DMA_FROM_DEVICE); | ||
333 | dev_kfree_skb_any(unmap->skb); | ||
334 | unmap->skb = NULL; | ||
335 | dma_unmap_addr_set(&unmap->vector, dma_addr, 0); | ||
336 | unmap->vector.len = 0; | ||
337 | } | ||
338 | |||
269 | static void | 339 | static void |
270 | bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb) | 340 | bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb) |
271 | { | 341 | { |
272 | struct bnad_rx_unmap *unmap_q = rcb->unmap_q; | 342 | struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; |
273 | struct sk_buff *skb; | ||
274 | int i; | 343 | int i; |
275 | 344 | ||
276 | for (i = 0; i < rcb->q_depth; i++) { | 345 | for (i = 0; i < rcb->q_depth; i++) { |
277 | struct bnad_rx_unmap *unmap = &unmap_q[i]; | 346 | struct bnad_rx_unmap *unmap = &unmap_q->unmap[i]; |
278 | 347 | ||
279 | skb = unmap->skb; | 348 | if (BNAD_RXBUF_IS_PAGE(unmap_q->type)) |
280 | if (!skb) | 349 | bnad_rxq_cleanup_page(bnad, unmap); |
281 | continue; | 350 | else |
351 | bnad_rxq_cleanup_skb(bnad, unmap); | ||
352 | } | ||
353 | bnad_rxq_alloc_uninit(bnad, rcb); | ||
354 | } | ||
282 | 355 | ||
283 | unmap->skb = NULL; | 356 | static u32 |
284 | dma_unmap_single(&bnad->pcidev->dev, | 357 | bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc) |
285 | dma_unmap_addr(&unmap->vector, dma_addr), | 358 | { |
286 | unmap->vector.len, DMA_FROM_DEVICE); | 359 | u32 alloced, prod, q_depth; |
287 | dma_unmap_addr_set(&unmap->vector, dma_addr, 0); | 360 | struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; |
288 | unmap->vector.len = 0; | 361 | struct bnad_rx_unmap *unmap, *prev; |
289 | dev_kfree_skb_any(skb); | 362 | struct bna_rxq_entry *rxent; |
363 | struct page *page; | ||
364 | u32 page_offset, alloc_size; | ||
365 | dma_addr_t dma_addr; | ||
366 | |||
367 | prod = rcb->producer_index; | ||
368 | q_depth = rcb->q_depth; | ||
369 | |||
370 | alloc_size = PAGE_SIZE << unmap_q->alloc_order; | ||
371 | alloced = 0; | ||
372 | |||
373 | while (nalloc--) { | ||
374 | unmap = &unmap_q->unmap[prod]; | ||
375 | |||
376 | if (unmap_q->reuse_pi < 0) { | ||
377 | page = alloc_pages(GFP_ATOMIC | __GFP_COMP, | ||
378 | unmap_q->alloc_order); | ||
379 | page_offset = 0; | ||
380 | } else { | ||
381 | prev = &unmap_q->unmap[unmap_q->reuse_pi]; | ||
382 | page = prev->page; | ||
383 | page_offset = prev->page_offset + unmap_q->map_size; | ||
384 | get_page(page); | ||
385 | } | ||
386 | |||
387 | if (unlikely(!page)) { | ||
388 | BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed); | ||
389 | rcb->rxq->rxbuf_alloc_failed++; | ||
390 | goto finishing; | ||
391 | } | ||
392 | |||
393 | dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset, | ||
394 | unmap_q->map_size, DMA_FROM_DEVICE); | ||
395 | |||
396 | unmap->page = page; | ||
397 | unmap->page_offset = page_offset; | ||
398 | dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr); | ||
399 | unmap->vector.len = unmap_q->map_size; | ||
400 | page_offset += unmap_q->map_size; | ||
401 | |||
402 | if (page_offset < alloc_size) | ||
403 | unmap_q->reuse_pi = prod; | ||
404 | else | ||
405 | unmap_q->reuse_pi = -1; | ||
406 | |||
407 | rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod]; | ||
408 | BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr); | ||
409 | BNA_QE_INDX_INC(prod, q_depth); | ||
410 | alloced++; | ||
290 | } | 411 | } |
412 | |||
413 | finishing: | ||
414 | if (likely(alloced)) { | ||
415 | rcb->producer_index = prod; | ||
416 | smp_mb(); | ||
417 | if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags))) | ||
418 | bna_rxq_prod_indx_doorbell(rcb); | ||
419 | } | ||
420 | |||
421 | return alloced; | ||
291 | } | 422 | } |
292 | 423 | ||
293 | /* Allocate and post BNAD_RXQ_REFILL_THRESHOLD_SHIFT buffers at a time */ | 424 | static u32 |
294 | static void | 425 | bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc) |
295 | bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb) | ||
296 | { | 426 | { |
297 | u32 to_alloc, alloced, prod, q_depth, buff_sz; | 427 | u32 alloced, prod, q_depth, buff_sz; |
298 | struct bnad_rx_unmap *unmap_q = rcb->unmap_q; | 428 | struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; |
299 | struct bnad_rx_unmap *unmap; | 429 | struct bnad_rx_unmap *unmap; |
300 | struct bna_rxq_entry *rxent; | 430 | struct bna_rxq_entry *rxent; |
301 | struct sk_buff *skb; | 431 | struct sk_buff *skb; |
302 | dma_addr_t dma_addr; | 432 | dma_addr_t dma_addr; |
303 | 433 | ||
304 | buff_sz = rcb->rxq->buffer_size; | 434 | buff_sz = rcb->rxq->buffer_size; |
305 | alloced = 0; | ||
306 | to_alloc = BNA_QE_FREE_CNT(rcb, rcb->q_depth); | ||
307 | if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)) | ||
308 | return; | ||
309 | |||
310 | prod = rcb->producer_index; | 435 | prod = rcb->producer_index; |
311 | q_depth = rcb->q_depth; | 436 | q_depth = rcb->q_depth; |
312 | 437 | ||
313 | while (to_alloc--) { | 438 | alloced = 0; |
314 | skb = netdev_alloc_skb_ip_align(bnad->netdev, | 439 | while (nalloc--) { |
315 | buff_sz); | 440 | unmap = &unmap_q->unmap[prod]; |
441 | |||
442 | skb = netdev_alloc_skb_ip_align(bnad->netdev, buff_sz); | ||
443 | |||
316 | if (unlikely(!skb)) { | 444 | if (unlikely(!skb)) { |
317 | BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed); | 445 | BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed); |
318 | rcb->rxq->rxbuf_alloc_failed++; | 446 | rcb->rxq->rxbuf_alloc_failed++; |
@@ -320,13 +448,13 @@ bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb) | |||
320 | } | 448 | } |
321 | dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, | 449 | dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, |
322 | buff_sz, DMA_FROM_DEVICE); | 450 | buff_sz, DMA_FROM_DEVICE); |
323 | rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod]; | ||
324 | 451 | ||
325 | BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr); | ||
326 | unmap = &unmap_q[prod]; | ||
327 | unmap->skb = skb; | 452 | unmap->skb = skb; |
328 | dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr); | 453 | dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr); |
329 | unmap->vector.len = buff_sz; | 454 | unmap->vector.len = buff_sz; |
455 | |||
456 | rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod]; | ||
457 | BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr); | ||
330 | BNA_QE_INDX_INC(prod, q_depth); | 458 | BNA_QE_INDX_INC(prod, q_depth); |
331 | alloced++; | 459 | alloced++; |
332 | } | 460 | } |
@@ -338,6 +466,24 @@ finishing: | |||
338 | if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags))) | 466 | if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags))) |
339 | bna_rxq_prod_indx_doorbell(rcb); | 467 | bna_rxq_prod_indx_doorbell(rcb); |
340 | } | 468 | } |
469 | |||
470 | return alloced; | ||
471 | } | ||
472 | |||
473 | static inline void | ||
474 | bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb) | ||
475 | { | ||
476 | struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; | ||
477 | u32 to_alloc; | ||
478 | |||
479 | to_alloc = BNA_QE_FREE_CNT(rcb, rcb->q_depth); | ||
480 | if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)) | ||
481 | return; | ||
482 | |||
483 | if (BNAD_RXBUF_IS_PAGE(unmap_q->type)) | ||
484 | bnad_rxq_refill_page(bnad, rcb, to_alloc); | ||
485 | else | ||
486 | bnad_rxq_refill_skb(bnad, rcb, to_alloc); | ||
341 | } | 487 | } |
342 | 488 | ||
343 | #define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \ | 489 | #define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \ |
@@ -354,17 +500,62 @@ finishing: | |||
354 | #define flags_udp6 (BNA_CQ_EF_IPV6 | \ | 500 | #define flags_udp6 (BNA_CQ_EF_IPV6 | \ |
355 | BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK) | 501 | BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK) |
356 | 502 | ||
503 | static inline struct sk_buff * | ||
504 | bnad_cq_prepare_skb(struct bnad_rx_ctrl *rx_ctrl, | ||
505 | struct bnad_rx_unmap_q *unmap_q, | ||
506 | struct bnad_rx_unmap *unmap, | ||
507 | u32 length, u32 flags) | ||
508 | { | ||
509 | struct bnad *bnad = rx_ctrl->bnad; | ||
510 | struct sk_buff *skb; | ||
511 | |||
512 | if (BNAD_RXBUF_IS_PAGE(unmap_q->type)) { | ||
513 | skb = napi_get_frags(&rx_ctrl->napi); | ||
514 | if (unlikely(!skb)) | ||
515 | return NULL; | ||
516 | |||
517 | dma_unmap_page(&bnad->pcidev->dev, | ||
518 | dma_unmap_addr(&unmap->vector, dma_addr), | ||
519 | unmap->vector.len, DMA_FROM_DEVICE); | ||
520 | skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, | ||
521 | unmap->page, unmap->page_offset, length); | ||
522 | skb->len += length; | ||
523 | skb->data_len += length; | ||
524 | skb->truesize += length; | ||
525 | |||
526 | unmap->page = NULL; | ||
527 | unmap->vector.len = 0; | ||
528 | |||
529 | return skb; | ||
530 | } | ||
531 | |||
532 | skb = unmap->skb; | ||
533 | BUG_ON(!skb); | ||
534 | |||
535 | dma_unmap_single(&bnad->pcidev->dev, | ||
536 | dma_unmap_addr(&unmap->vector, dma_addr), | ||
537 | unmap->vector.len, DMA_FROM_DEVICE); | ||
538 | |||
539 | skb_put(skb, length); | ||
540 | |||
541 | skb->protocol = eth_type_trans(skb, bnad->netdev); | ||
542 | |||
543 | unmap->skb = NULL; | ||
544 | unmap->vector.len = 0; | ||
545 | return skb; | ||
546 | } | ||
547 | |||
357 | static u32 | 548 | static u32 |
358 | bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget) | 549 | bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget) |
359 | { | 550 | { |
360 | struct bna_cq_entry *cq, *cmpl, *next_cmpl; | 551 | struct bna_cq_entry *cq, *cmpl; |
361 | struct bna_rcb *rcb = NULL; | 552 | struct bna_rcb *rcb = NULL; |
362 | struct bnad_rx_unmap *unmap_q, *unmap; | 553 | struct bnad_rx_unmap_q *unmap_q; |
363 | unsigned int packets = 0; | 554 | struct bnad_rx_unmap *unmap; |
364 | struct sk_buff *skb; | 555 | struct sk_buff *skb; |
365 | u32 flags, masked_flags; | ||
366 | struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate; | 556 | struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate; |
367 | struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl); | 557 | struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl; |
558 | u32 packets = 0, length = 0, flags, masked_flags; | ||
368 | 559 | ||
369 | prefetch(bnad->netdev); | 560 | prefetch(bnad->netdev); |
370 | 561 | ||
@@ -373,6 +564,8 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget) | |||
373 | 564 | ||
374 | while (cmpl->valid && (packets < budget)) { | 565 | while (cmpl->valid && (packets < budget)) { |
375 | packets++; | 566 | packets++; |
567 | flags = ntohl(cmpl->flags); | ||
568 | length = ntohs(cmpl->length); | ||
376 | BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length)); | 569 | BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length)); |
377 | 570 | ||
378 | if (bna_is_small_rxq(cmpl->rxq_id)) | 571 | if (bna_is_small_rxq(cmpl->rxq_id)) |
@@ -381,32 +574,25 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget) | |||
381 | rcb = ccb->rcb[0]; | 574 | rcb = ccb->rcb[0]; |
382 | 575 | ||
383 | unmap_q = rcb->unmap_q; | 576 | unmap_q = rcb->unmap_q; |
384 | unmap = &unmap_q[rcb->consumer_index]; | 577 | unmap = &unmap_q->unmap[rcb->consumer_index]; |
385 | 578 | ||
386 | skb = unmap->skb; | 579 | if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR | |
387 | BUG_ON(!(skb)); | 580 | BNA_CQ_EF_FCS_ERROR | |
388 | unmap->skb = NULL; | 581 | BNA_CQ_EF_TOO_LONG))) { |
389 | dma_unmap_single(&bnad->pcidev->dev, | 582 | if (BNAD_RXBUF_IS_PAGE(unmap_q->type)) |
390 | dma_unmap_addr(&unmap->vector, dma_addr), | 583 | bnad_rxq_cleanup_page(bnad, unmap); |
391 | unmap->vector.len, DMA_FROM_DEVICE); | 584 | else |
392 | unmap->vector.len = 0; | 585 | bnad_rxq_cleanup_skb(bnad, unmap); |
393 | BNA_QE_INDX_INC(rcb->consumer_index, rcb->q_depth); | ||
394 | BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth); | ||
395 | next_cmpl = &cq[ccb->producer_index]; | ||
396 | 586 | ||
397 | prefetch(next_cmpl); | ||
398 | |||
399 | flags = ntohl(cmpl->flags); | ||
400 | if (unlikely | ||
401 | (flags & | ||
402 | (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR | | ||
403 | BNA_CQ_EF_TOO_LONG))) { | ||
404 | dev_kfree_skb_any(skb); | ||
405 | rcb->rxq->rx_packets_with_error++; | 587 | rcb->rxq->rx_packets_with_error++; |
406 | goto next; | 588 | goto next; |
407 | } | 589 | } |
408 | 590 | ||
409 | skb_put(skb, ntohs(cmpl->length)); | 591 | skb = bnad_cq_prepare_skb(ccb->ctrl, unmap_q, unmap, |
592 | length, flags); | ||
593 | |||
594 | if (unlikely(!skb)) | ||
595 | break; | ||
410 | 596 | ||
411 | masked_flags = flags & flags_cksum_prot_mask; | 597 | masked_flags = flags & flags_cksum_prot_mask; |
412 | 598 | ||
@@ -421,22 +607,24 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget) | |||
421 | skb_checksum_none_assert(skb); | 607 | skb_checksum_none_assert(skb); |
422 | 608 | ||
423 | rcb->rxq->rx_packets++; | 609 | rcb->rxq->rx_packets++; |
424 | rcb->rxq->rx_bytes += skb->len; | 610 | rcb->rxq->rx_bytes += length; |
425 | skb->protocol = eth_type_trans(skb, bnad->netdev); | ||
426 | 611 | ||
427 | if (flags & BNA_CQ_EF_VLAN) | 612 | if (flags & BNA_CQ_EF_VLAN) |
428 | __vlan_hwaccel_put_tag(skb, ntohs(cmpl->vlan_tag)); | 613 | __vlan_hwaccel_put_tag(skb, ntohs(cmpl->vlan_tag)); |
429 | 614 | ||
430 | if (skb->ip_summed == CHECKSUM_UNNECESSARY) | 615 | if (BNAD_RXBUF_IS_PAGE(unmap_q->type)) |
431 | napi_gro_receive(&rx_ctrl->napi, skb); | 616 | napi_gro_frags(&rx_ctrl->napi); |
432 | else | 617 | else |
433 | netif_receive_skb(skb); | 618 | netif_receive_skb(skb); |
434 | 619 | ||
435 | next: | 620 | next: |
436 | cmpl->valid = 0; | 621 | cmpl->valid = 0; |
437 | cmpl = next_cmpl; | 622 | BNA_QE_INDX_INC(rcb->consumer_index, rcb->q_depth); |
623 | BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth); | ||
624 | cmpl = &cq[ccb->producer_index]; | ||
438 | } | 625 | } |
439 | 626 | ||
627 | napi_gro_flush(&rx_ctrl->napi, false); | ||
440 | if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))) | 628 | if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))) |
441 | bna_ib_ack_disable_irq(ccb->i_dbell, packets); | 629 | bna_ib_ack_disable_irq(ccb->i_dbell, packets); |
442 | 630 | ||
@@ -956,8 +1144,7 @@ bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx) | |||
956 | struct bna_ccb *ccb; | 1144 | struct bna_ccb *ccb; |
957 | struct bna_rcb *rcb; | 1145 | struct bna_rcb *rcb; |
958 | struct bnad_rx_ctrl *rx_ctrl; | 1146 | struct bnad_rx_ctrl *rx_ctrl; |
959 | int i; | 1147 | int i, j; |
960 | int j; | ||
961 | 1148 | ||
962 | for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) { | 1149 | for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) { |
963 | rx_ctrl = &rx_info->rx_ctrl[i]; | 1150 | rx_ctrl = &rx_info->rx_ctrl[i]; |
@@ -972,6 +1159,7 @@ bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx) | |||
972 | if (!rcb) | 1159 | if (!rcb) |
973 | continue; | 1160 | continue; |
974 | 1161 | ||
1162 | bnad_rxq_alloc_init(bnad, rcb); | ||
975 | set_bit(BNAD_RXQ_STARTED, &rcb->flags); | 1163 | set_bit(BNAD_RXQ_STARTED, &rcb->flags); |
976 | set_bit(BNAD_RXQ_POST_OK, &rcb->flags); | 1164 | set_bit(BNAD_RXQ_POST_OK, &rcb->flags); |
977 | bnad_rxq_post(bnad, rcb); | 1165 | bnad_rxq_post(bnad, rcb); |
@@ -1861,9 +2049,11 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id) | |||
1861 | 2049 | ||
1862 | /* Fill Unmap Q memory requirements */ | 2050 | /* Fill Unmap Q memory requirements */ |
1863 | BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPQ], | 2051 | BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPQ], |
1864 | rx_config->num_paths + ((rx_config->rxp_type == BNA_RXP_SINGLE) | 2052 | rx_config->num_paths + |
1865 | ? 0 : rx_config->num_paths), (bnad->rxq_depth * | 2053 | ((rx_config->rxp_type == BNA_RXP_SINGLE) ? |
1866 | sizeof(struct bnad_rx_unmap))); | 2054 | 0 : rx_config->num_paths), |
2055 | ((bnad->rxq_depth * sizeof(struct bnad_rx_unmap)) + | ||
2056 | sizeof(struct bnad_rx_unmap_q))); | ||
1867 | 2057 | ||
1868 | /* Allocate resource */ | 2058 | /* Allocate resource */ |
1869 | err = bnad_rx_res_alloc(bnad, res_info, rx_id); | 2059 | err = bnad_rx_res_alloc(bnad, res_info, rx_id); |
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h index 670a0790a183..2e758e18b533 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.h +++ b/drivers/net/ethernet/brocade/bna/bnad.h | |||
@@ -233,10 +233,29 @@ struct bnad_rx_vector { | |||
233 | }; | 233 | }; |
234 | 234 | ||
235 | struct bnad_rx_unmap { | 235 | struct bnad_rx_unmap { |
236 | struct page *page; | ||
237 | u32 page_offset; | ||
236 | struct sk_buff *skb; | 238 | struct sk_buff *skb; |
237 | struct bnad_rx_vector vector; | 239 | struct bnad_rx_vector vector; |
238 | }; | 240 | }; |
239 | 241 | ||
242 | enum bnad_rxbuf_type { | ||
243 | BNAD_RXBUF_NONE = 0, | ||
244 | BNAD_RXBUF_SKB = 1, | ||
245 | BNAD_RXBUF_PAGE = 2, | ||
246 | BNAD_RXBUF_MULTI = 3 | ||
247 | }; | ||
248 | |||
249 | #define BNAD_RXBUF_IS_PAGE(_type) ((_type) == BNAD_RXBUF_PAGE) | ||
250 | |||
251 | struct bnad_rx_unmap_q { | ||
252 | int reuse_pi; | ||
253 | int alloc_order; | ||
254 | u32 map_size; | ||
255 | enum bnad_rxbuf_type type; | ||
256 | struct bnad_rx_unmap unmap[0]; | ||
257 | }; | ||
258 | |||
240 | /* Bit mask values for bnad->cfg_flags */ | 259 | /* Bit mask values for bnad->cfg_flags */ |
241 | #define BNAD_CF_DIM_ENABLED 0x01 /* DIM */ | 260 | #define BNAD_CF_DIM_ENABLED 0x01 /* DIM */ |
242 | #define BNAD_CF_PROMISC 0x02 | 261 | #define BNAD_CF_PROMISC 0x02 |