diff options
author | Vasanthy Kolluri <vkolluri@cisco.com> | 2010-06-24 06:51:51 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-06-25 23:50:24 -0400 |
commit | b5bab85c15ed3d1ae7f917a7c077086ac6c04572 (patch) | |
tree | e7a259fc15178a546cd9e4d0757ec040b4209bc6 /drivers/net/enic | |
parent | 70feadf36df94dc0dc2f32fec4c131ecd75344f2 (diff) |
enic: Use receive queue buffer blocks of 32/64 entries
Change the receive queue buffer allocations into blocks of 32 entries when
ring size is less than 64, otherwise use 64 entries per block.
Signed-off-by: Scott Feldman <scofeldm@cisco.com>
Signed-off-by: Vasanthy Kolluri <vkolluri@cisco.com>
Signed-off-by: Roopa Prabhu <roprabhu@cisco.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/enic')
-rw-r--r-- | drivers/net/enic/vnic_rq.c | 20 | ||||
-rw-r--r-- | drivers/net/enic/vnic_rq.h | 14 | ||||
-rw-r--r-- | drivers/net/enic/vnic_wq.c | 15 | ||||
-rw-r--r-- | drivers/net/enic/vnic_wq.h | 14 |
4 files changed, 37 insertions, 26 deletions
diff --git a/drivers/net/enic/vnic_rq.c b/drivers/net/enic/vnic_rq.c index 6d84ca840052..45cfc79f9f98 100644 --- a/drivers/net/enic/vnic_rq.c +++ b/drivers/net/enic/vnic_rq.c | |||
@@ -37,7 +37,7 @@ static int vnic_rq_alloc_bufs(struct vnic_rq *rq) | |||
37 | vdev = rq->vdev; | 37 | vdev = rq->vdev; |
38 | 38 | ||
39 | for (i = 0; i < blks; i++) { | 39 | for (i = 0; i < blks; i++) { |
40 | rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC); | 40 | rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_ATOMIC); |
41 | if (!rq->bufs[i]) { | 41 | if (!rq->bufs[i]) { |
42 | pr_err("Failed to alloc rq_bufs\n"); | 42 | pr_err("Failed to alloc rq_bufs\n"); |
43 | return -ENOMEM; | 43 | return -ENOMEM; |
@@ -46,14 +46,14 @@ static int vnic_rq_alloc_bufs(struct vnic_rq *rq) | |||
46 | 46 | ||
47 | for (i = 0; i < blks; i++) { | 47 | for (i = 0; i < blks; i++) { |
48 | buf = rq->bufs[i]; | 48 | buf = rq->bufs[i]; |
49 | for (j = 0; j < VNIC_RQ_BUF_BLK_ENTRIES; j++) { | 49 | for (j = 0; j < VNIC_RQ_BUF_BLK_ENTRIES(count); j++) { |
50 | buf->index = i * VNIC_RQ_BUF_BLK_ENTRIES + j; | 50 | buf->index = i * VNIC_RQ_BUF_BLK_ENTRIES(count) + j; |
51 | buf->desc = (u8 *)rq->ring.descs + | 51 | buf->desc = (u8 *)rq->ring.descs + |
52 | rq->ring.desc_size * buf->index; | 52 | rq->ring.desc_size * buf->index; |
53 | if (buf->index + 1 == count) { | 53 | if (buf->index + 1 == count) { |
54 | buf->next = rq->bufs[0]; | 54 | buf->next = rq->bufs[0]; |
55 | break; | 55 | break; |
56 | } else if (j + 1 == VNIC_RQ_BUF_BLK_ENTRIES) { | 56 | } else if (j + 1 == VNIC_RQ_BUF_BLK_ENTRIES(count)) { |
57 | buf->next = rq->bufs[i + 1]; | 57 | buf->next = rq->bufs[i + 1]; |
58 | } else { | 58 | } else { |
59 | buf->next = buf + 1; | 59 | buf->next = buf + 1; |
@@ -119,10 +119,11 @@ void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index, | |||
119 | unsigned int error_interrupt_offset) | 119 | unsigned int error_interrupt_offset) |
120 | { | 120 | { |
121 | u64 paddr; | 121 | u64 paddr; |
122 | unsigned int count = rq->ring.desc_count; | ||
122 | 123 | ||
123 | paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET; | 124 | paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET; |
124 | writeq(paddr, &rq->ctrl->ring_base); | 125 | writeq(paddr, &rq->ctrl->ring_base); |
125 | iowrite32(rq->ring.desc_count, &rq->ctrl->ring_size); | 126 | iowrite32(count, &rq->ctrl->ring_size); |
126 | iowrite32(cq_index, &rq->ctrl->cq_index); | 127 | iowrite32(cq_index, &rq->ctrl->cq_index); |
127 | iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable); | 128 | iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable); |
128 | iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset); | 129 | iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset); |
@@ -132,8 +133,8 @@ void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index, | |||
132 | iowrite32(posted_index, &rq->ctrl->posted_index); | 133 | iowrite32(posted_index, &rq->ctrl->posted_index); |
133 | 134 | ||
134 | rq->to_use = rq->to_clean = | 135 | rq->to_use = rq->to_clean = |
135 | &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES] | 136 | &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES(count)] |
136 | [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES]; | 137 | [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES(count)]; |
137 | } | 138 | } |
138 | 139 | ||
139 | void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index, | 140 | void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index, |
@@ -184,6 +185,7 @@ void vnic_rq_clean(struct vnic_rq *rq, | |||
184 | { | 185 | { |
185 | struct vnic_rq_buf *buf; | 186 | struct vnic_rq_buf *buf; |
186 | u32 fetch_index; | 187 | u32 fetch_index; |
188 | unsigned int count = rq->ring.desc_count; | ||
187 | 189 | ||
188 | BUG_ON(ioread32(&rq->ctrl->enable)); | 190 | BUG_ON(ioread32(&rq->ctrl->enable)); |
189 | 191 | ||
@@ -200,8 +202,8 @@ void vnic_rq_clean(struct vnic_rq *rq, | |||
200 | /* Use current fetch_index as the ring starting point */ | 202 | /* Use current fetch_index as the ring starting point */ |
201 | fetch_index = ioread32(&rq->ctrl->fetch_index); | 203 | fetch_index = ioread32(&rq->ctrl->fetch_index); |
202 | rq->to_use = rq->to_clean = | 204 | rq->to_use = rq->to_clean = |
203 | &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES] | 205 | &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES(count)] |
204 | [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES]; | 206 | [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES(count)]; |
205 | iowrite32(fetch_index, &rq->ctrl->posted_index); | 207 | iowrite32(fetch_index, &rq->ctrl->posted_index); |
206 | 208 | ||
207 | vnic_dev_clear_desc_ring(&rq->ring); | 209 | vnic_dev_clear_desc_ring(&rq->ring); |
diff --git a/drivers/net/enic/vnic_rq.h b/drivers/net/enic/vnic_rq.h index 35e736cc2d88..8f0fb78f0cdf 100644 --- a/drivers/net/enic/vnic_rq.h +++ b/drivers/net/enic/vnic_rq.h | |||
@@ -52,12 +52,16 @@ struct vnic_rq_ctrl { | |||
52 | u32 pad10; | 52 | u32 pad10; |
53 | }; | 53 | }; |
54 | 54 | ||
55 | /* Break the vnic_rq_buf allocations into blocks of 64 entries */ | 55 | /* Break the vnic_rq_buf allocations into blocks of 32/64 entries */ |
56 | #define VNIC_RQ_BUF_BLK_ENTRIES 64 | 56 | #define VNIC_RQ_BUF_MIN_BLK_ENTRIES 32 |
57 | #define VNIC_RQ_BUF_BLK_SZ \ | 57 | #define VNIC_RQ_BUF_DFLT_BLK_ENTRIES 64 |
58 | (VNIC_RQ_BUF_BLK_ENTRIES * sizeof(struct vnic_rq_buf)) | 58 | #define VNIC_RQ_BUF_BLK_ENTRIES(entries) \ |
59 | ((unsigned int)((entries < VNIC_RQ_BUF_DFLT_BLK_ENTRIES) ? \ | ||
60 | VNIC_RQ_BUF_MIN_BLK_ENTRIES : VNIC_RQ_BUF_DFLT_BLK_ENTRIES)) | ||
61 | #define VNIC_RQ_BUF_BLK_SZ(entries) \ | ||
62 | (VNIC_RQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_rq_buf)) | ||
59 | #define VNIC_RQ_BUF_BLKS_NEEDED(entries) \ | 63 | #define VNIC_RQ_BUF_BLKS_NEEDED(entries) \ |
60 | DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES) | 64 | DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES(entries)) |
61 | #define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096) | 65 | #define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096) |
62 | 66 | ||
63 | struct vnic_rq_buf { | 67 | struct vnic_rq_buf { |
diff --git a/drivers/net/enic/vnic_wq.c b/drivers/net/enic/vnic_wq.c index ed090a3d9319..6c4d4f7f100f 100644 --- a/drivers/net/enic/vnic_wq.c +++ b/drivers/net/enic/vnic_wq.c | |||
@@ -37,7 +37,7 @@ static int vnic_wq_alloc_bufs(struct vnic_wq *wq) | |||
37 | vdev = wq->vdev; | 37 | vdev = wq->vdev; |
38 | 38 | ||
39 | for (i = 0; i < blks; i++) { | 39 | for (i = 0; i < blks; i++) { |
40 | wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC); | 40 | wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_ATOMIC); |
41 | if (!wq->bufs[i]) { | 41 | if (!wq->bufs[i]) { |
42 | pr_err("Failed to alloc wq_bufs\n"); | 42 | pr_err("Failed to alloc wq_bufs\n"); |
43 | return -ENOMEM; | 43 | return -ENOMEM; |
@@ -46,14 +46,14 @@ static int vnic_wq_alloc_bufs(struct vnic_wq *wq) | |||
46 | 46 | ||
47 | for (i = 0; i < blks; i++) { | 47 | for (i = 0; i < blks; i++) { |
48 | buf = wq->bufs[i]; | 48 | buf = wq->bufs[i]; |
49 | for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES; j++) { | 49 | for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES(count); j++) { |
50 | buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES + j; | 50 | buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES(count) + j; |
51 | buf->desc = (u8 *)wq->ring.descs + | 51 | buf->desc = (u8 *)wq->ring.descs + |
52 | wq->ring.desc_size * buf->index; | 52 | wq->ring.desc_size * buf->index; |
53 | if (buf->index + 1 == count) { | 53 | if (buf->index + 1 == count) { |
54 | buf->next = wq->bufs[0]; | 54 | buf->next = wq->bufs[0]; |
55 | break; | 55 | break; |
56 | } else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES) { | 56 | } else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES(count)) { |
57 | buf->next = wq->bufs[i + 1]; | 57 | buf->next = wq->bufs[i + 1]; |
58 | } else { | 58 | } else { |
59 | buf->next = buf + 1; | 59 | buf->next = buf + 1; |
@@ -119,10 +119,11 @@ void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, | |||
119 | unsigned int error_interrupt_offset) | 119 | unsigned int error_interrupt_offset) |
120 | { | 120 | { |
121 | u64 paddr; | 121 | u64 paddr; |
122 | unsigned int count = wq->ring.desc_count; | ||
122 | 123 | ||
123 | paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET; | 124 | paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET; |
124 | writeq(paddr, &wq->ctrl->ring_base); | 125 | writeq(paddr, &wq->ctrl->ring_base); |
125 | iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size); | 126 | iowrite32(count, &wq->ctrl->ring_size); |
126 | iowrite32(fetch_index, &wq->ctrl->fetch_index); | 127 | iowrite32(fetch_index, &wq->ctrl->fetch_index); |
127 | iowrite32(posted_index, &wq->ctrl->posted_index); | 128 | iowrite32(posted_index, &wq->ctrl->posted_index); |
128 | iowrite32(cq_index, &wq->ctrl->cq_index); | 129 | iowrite32(cq_index, &wq->ctrl->cq_index); |
@@ -131,8 +132,8 @@ void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, | |||
131 | iowrite32(0, &wq->ctrl->error_status); | 132 | iowrite32(0, &wq->ctrl->error_status); |
132 | 133 | ||
133 | wq->to_use = wq->to_clean = | 134 | wq->to_use = wq->to_clean = |
134 | &wq->bufs[fetch_index / VNIC_WQ_BUF_BLK_ENTRIES] | 135 | &wq->bufs[fetch_index / VNIC_WQ_BUF_BLK_ENTRIES(count)] |
135 | [fetch_index % VNIC_WQ_BUF_BLK_ENTRIES]; | 136 | [fetch_index % VNIC_WQ_BUF_BLK_ENTRIES(count)]; |
136 | } | 137 | } |
137 | 138 | ||
138 | void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, | 139 | void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, |
diff --git a/drivers/net/enic/vnic_wq.h b/drivers/net/enic/vnic_wq.h index 9c34d41a887e..1c8213959fc4 100644 --- a/drivers/net/enic/vnic_wq.h +++ b/drivers/net/enic/vnic_wq.h | |||
@@ -60,12 +60,16 @@ struct vnic_wq_buf { | |||
60 | void *desc; | 60 | void *desc; |
61 | }; | 61 | }; |
62 | 62 | ||
63 | /* Break the vnic_wq_buf allocations into blocks of 64 entries */ | 63 | /* Break the vnic_wq_buf allocations into blocks of 32/64 entries */ |
64 | #define VNIC_WQ_BUF_BLK_ENTRIES 64 | 64 | #define VNIC_WQ_BUF_MIN_BLK_ENTRIES 32 |
65 | #define VNIC_WQ_BUF_BLK_SZ \ | 65 | #define VNIC_WQ_BUF_DFLT_BLK_ENTRIES 64 |
66 | (VNIC_WQ_BUF_BLK_ENTRIES * sizeof(struct vnic_wq_buf)) | 66 | #define VNIC_WQ_BUF_BLK_ENTRIES(entries) \ |
67 | ((unsigned int)((entries < VNIC_WQ_BUF_DFLT_BLK_ENTRIES) ? \ | ||
68 | VNIC_WQ_BUF_MIN_BLK_ENTRIES : VNIC_WQ_BUF_DFLT_BLK_ENTRIES)) | ||
69 | #define VNIC_WQ_BUF_BLK_SZ(entries) \ | ||
70 | (VNIC_WQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_wq_buf)) | ||
67 | #define VNIC_WQ_BUF_BLKS_NEEDED(entries) \ | 71 | #define VNIC_WQ_BUF_BLKS_NEEDED(entries) \ |
68 | DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES) | 72 | DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES(entries)) |
69 | #define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096) | 73 | #define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096) |
70 | 74 | ||
71 | struct vnic_wq { | 75 | struct vnic_wq { |