diff options
author | Ron Mercer <ron.mercer@qlogic.com> | 2009-06-10 11:49:34 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-06-11 05:37:05 -0400 |
commit | b8facca01ba381c3f8ff2391fbe3860ebc6a6bdc (patch) | |
tree | 11e7b481ab9436d5cbaf616101b46aa67cb4bc36 | |
parent | 88c55e3cbd1bd4e8f52dcda67456763710a025a5 (diff) |
qlge: Allow RX buf rings to be > than 4096 bytes.
RX buffer rings can be comprised of non-contiguous fixed
size chunks of memory. The ring is given to the hardware
as a pointer to a location that stores the location of
the queue. If the queue is greater than 4096 bytes then
the hardware gets a list of said pointers.
This patch addes the necessary logic to generate the list if
the queue size exceeds 4096.
Signed-off-by: Ron Mercer <ron.mercer@qlogic.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/qlge/qlge.h | 13 | ||||
-rw-r--r-- | drivers/net/qlge/qlge_main.c | 28 |
2 files changed, 33 insertions, 8 deletions
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h index b1ddfd1b8d53..156e02e8905d 100644 --- a/drivers/net/qlge/qlge.h +++ b/drivers/net/qlge/qlge.h | |||
@@ -41,7 +41,18 @@ | |||
41 | 41 | ||
42 | #define NUM_SMALL_BUFFERS 512 | 42 | #define NUM_SMALL_BUFFERS 512 |
43 | #define NUM_LARGE_BUFFERS 512 | 43 | #define NUM_LARGE_BUFFERS 512 |
44 | #define DB_PAGE_SIZE 4096 | ||
45 | |||
46 | /* Calculate the number of (4k) pages required to | ||
47 | * contain a buffer queue of the given length. | ||
48 | */ | ||
49 | #define MAX_DB_PAGES_PER_BQ(x) \ | ||
50 | (((x * sizeof(u64)) / DB_PAGE_SIZE) + \ | ||
51 | (((x * sizeof(u64)) % DB_PAGE_SIZE) ? 1 : 0)) | ||
44 | 52 | ||
53 | #define RX_RING_SHADOW_SPACE (sizeof(u64) + \ | ||
54 | MAX_DB_PAGES_PER_BQ(NUM_SMALL_BUFFERS) * sizeof(u64) + \ | ||
55 | MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64)) | ||
45 | #define SMALL_BUFFER_SIZE 256 | 56 | #define SMALL_BUFFER_SIZE 256 |
46 | #define LARGE_BUFFER_SIZE PAGE_SIZE | 57 | #define LARGE_BUFFER_SIZE PAGE_SIZE |
47 | #define MAX_SPLIT_SIZE 1023 | 58 | #define MAX_SPLIT_SIZE 1023 |
@@ -65,8 +76,6 @@ | |||
65 | #define TX_DESC_PER_OAL 0 | 76 | #define TX_DESC_PER_OAL 0 |
66 | #endif | 77 | #endif |
67 | 78 | ||
68 | #define DB_PAGE_SIZE 4096 | ||
69 | |||
70 | /* MPI test register definitions. This register | 79 | /* MPI test register definitions. This register |
71 | * is used for determining alternate NIC function's | 80 | * is used for determining alternate NIC function's |
72 | * PCI->func number. | 81 | * PCI->func number. |
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c index 17d512c6bc36..b9a5f59d6c9b 100644 --- a/drivers/net/qlge/qlge_main.c +++ b/drivers/net/qlge/qlge_main.c | |||
@@ -2552,14 +2552,16 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) | |||
2552 | { | 2552 | { |
2553 | struct cqicb *cqicb = &rx_ring->cqicb; | 2553 | struct cqicb *cqicb = &rx_ring->cqicb; |
2554 | void *shadow_reg = qdev->rx_ring_shadow_reg_area + | 2554 | void *shadow_reg = qdev->rx_ring_shadow_reg_area + |
2555 | (rx_ring->cq_id * sizeof(u64) * 4); | 2555 | (rx_ring->cq_id * RX_RING_SHADOW_SPACE); |
2556 | u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma + | 2556 | u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma + |
2557 | (rx_ring->cq_id * sizeof(u64) * 4); | 2557 | (rx_ring->cq_id * RX_RING_SHADOW_SPACE); |
2558 | void __iomem *doorbell_area = | 2558 | void __iomem *doorbell_area = |
2559 | qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id)); | 2559 | qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id)); |
2560 | int err = 0; | 2560 | int err = 0; |
2561 | u16 bq_len; | 2561 | u16 bq_len; |
2562 | u64 tmp; | 2562 | u64 tmp; |
2563 | __le64 *base_indirect_ptr; | ||
2564 | int page_entries; | ||
2563 | 2565 | ||
2564 | /* Set up the shadow registers for this ring. */ | 2566 | /* Set up the shadow registers for this ring. */ |
2565 | rx_ring->prod_idx_sh_reg = shadow_reg; | 2567 | rx_ring->prod_idx_sh_reg = shadow_reg; |
@@ -2568,8 +2570,8 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) | |||
2568 | shadow_reg_dma += sizeof(u64); | 2570 | shadow_reg_dma += sizeof(u64); |
2569 | rx_ring->lbq_base_indirect = shadow_reg; | 2571 | rx_ring->lbq_base_indirect = shadow_reg; |
2570 | rx_ring->lbq_base_indirect_dma = shadow_reg_dma; | 2572 | rx_ring->lbq_base_indirect_dma = shadow_reg_dma; |
2571 | shadow_reg += sizeof(u64); | 2573 | shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len)); |
2572 | shadow_reg_dma += sizeof(u64); | 2574 | shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len)); |
2573 | rx_ring->sbq_base_indirect = shadow_reg; | 2575 | rx_ring->sbq_base_indirect = shadow_reg; |
2574 | rx_ring->sbq_base_indirect_dma = shadow_reg_dma; | 2576 | rx_ring->sbq_base_indirect_dma = shadow_reg_dma; |
2575 | 2577 | ||
@@ -2606,7 +2608,14 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) | |||
2606 | if (rx_ring->lbq_len) { | 2608 | if (rx_ring->lbq_len) { |
2607 | cqicb->flags |= FLAGS_LL; /* Load lbq values */ | 2609 | cqicb->flags |= FLAGS_LL; /* Load lbq values */ |
2608 | tmp = (u64)rx_ring->lbq_base_dma;; | 2610 | tmp = (u64)rx_ring->lbq_base_dma;; |
2609 | *((__le64 *) rx_ring->lbq_base_indirect) = cpu_to_le64(tmp); | 2611 | base_indirect_ptr = (__le64 *) rx_ring->lbq_base_indirect; |
2612 | page_entries = 0; | ||
2613 | do { | ||
2614 | *base_indirect_ptr = cpu_to_le64(tmp); | ||
2615 | tmp += DB_PAGE_SIZE; | ||
2616 | base_indirect_ptr++; | ||
2617 | page_entries++; | ||
2618 | } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len)); | ||
2610 | cqicb->lbq_addr = | 2619 | cqicb->lbq_addr = |
2611 | cpu_to_le64(rx_ring->lbq_base_indirect_dma); | 2620 | cpu_to_le64(rx_ring->lbq_base_indirect_dma); |
2612 | bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 : | 2621 | bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 : |
@@ -2623,7 +2632,14 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) | |||
2623 | if (rx_ring->sbq_len) { | 2632 | if (rx_ring->sbq_len) { |
2624 | cqicb->flags |= FLAGS_LS; /* Load sbq values */ | 2633 | cqicb->flags |= FLAGS_LS; /* Load sbq values */ |
2625 | tmp = (u64)rx_ring->sbq_base_dma;; | 2634 | tmp = (u64)rx_ring->sbq_base_dma;; |
2626 | *((__le64 *) rx_ring->sbq_base_indirect) = cpu_to_le64(tmp); | 2635 | base_indirect_ptr = (__le64 *) rx_ring->sbq_base_indirect; |
2636 | page_entries = 0; | ||
2637 | do { | ||
2638 | *base_indirect_ptr = cpu_to_le64(tmp); | ||
2639 | tmp += DB_PAGE_SIZE; | ||
2640 | base_indirect_ptr++; | ||
2641 | page_entries++; | ||
2642 | } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len)); | ||
2627 | cqicb->sbq_addr = | 2643 | cqicb->sbq_addr = |
2628 | cpu_to_le64(rx_ring->sbq_base_indirect_dma); | 2644 | cpu_to_le64(rx_ring->sbq_base_indirect_dma); |
2629 | cqicb->sbq_buf_size = | 2645 | cqicb->sbq_buf_size = |