diff options
author | David S. Miller <davem@davemloft.net> | 2011-07-06 02:23:37 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-07-06 02:23:37 -0400 |
commit | e12fe68ce34d60c04bb1ddb1d3cc5c3022388fe4 (patch) | |
tree | 83c0e192ccaa4752c80b6131a7d0aa8272b5d0d0 /drivers/net/vmxnet3 | |
parent | 7329f0d58de01878d9ce4f0be7a76e136f223eef (diff) | |
parent | 712ae51afd55b20c04c5383d02ba5d10233313b1 (diff) |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Diffstat (limited to 'drivers/net/vmxnet3')
-rw-r--r-- | drivers/net/vmxnet3/vmxnet3_drv.c | 135 | ||||
-rw-r--r-- | drivers/net/vmxnet3/vmxnet3_int.h | 4 |
2 files changed, 96 insertions, 43 deletions
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 2c1473686abe..fabcded7c6a0 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c | |||
@@ -573,7 +573,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, | |||
573 | struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx]; | 573 | struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx]; |
574 | u32 val; | 574 | u32 val; |
575 | 575 | ||
576 | while (num_allocated < num_to_alloc) { | 576 | while (num_allocated <= num_to_alloc) { |
577 | struct vmxnet3_rx_buf_info *rbi; | 577 | struct vmxnet3_rx_buf_info *rbi; |
578 | union Vmxnet3_GenericDesc *gd; | 578 | union Vmxnet3_GenericDesc *gd; |
579 | 579 | ||
@@ -619,9 +619,15 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, | |||
619 | 619 | ||
620 | BUG_ON(rbi->dma_addr == 0); | 620 | BUG_ON(rbi->dma_addr == 0); |
621 | gd->rxd.addr = cpu_to_le64(rbi->dma_addr); | 621 | gd->rxd.addr = cpu_to_le64(rbi->dma_addr); |
622 | gd->dword[2] = cpu_to_le32((ring->gen << VMXNET3_RXD_GEN_SHIFT) | 622 | gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT) |
623 | | val | rbi->len); | 623 | | val | rbi->len); |
624 | 624 | ||
625 | /* Fill the last buffer but dont mark it ready, or else the | ||
626 | * device will think that the queue is full */ | ||
627 | if (num_allocated == num_to_alloc) | ||
628 | break; | ||
629 | |||
630 | gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT); | ||
625 | num_allocated++; | 631 | num_allocated++; |
626 | vmxnet3_cmd_ring_adv_next2fill(ring); | 632 | vmxnet3_cmd_ring_adv_next2fill(ring); |
627 | } | 633 | } |
@@ -1138,6 +1144,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, | |||
1138 | VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2 | 1144 | VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2 |
1139 | }; | 1145 | }; |
1140 | u32 num_rxd = 0; | 1146 | u32 num_rxd = 0; |
1147 | bool skip_page_frags = false; | ||
1141 | struct Vmxnet3_RxCompDesc *rcd; | 1148 | struct Vmxnet3_RxCompDesc *rcd; |
1142 | struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx; | 1149 | struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx; |
1143 | #ifdef __BIG_ENDIAN_BITFIELD | 1150 | #ifdef __BIG_ENDIAN_BITFIELD |
@@ -1148,11 +1155,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, | |||
1148 | &rxComp); | 1155 | &rxComp); |
1149 | while (rcd->gen == rq->comp_ring.gen) { | 1156 | while (rcd->gen == rq->comp_ring.gen) { |
1150 | struct vmxnet3_rx_buf_info *rbi; | 1157 | struct vmxnet3_rx_buf_info *rbi; |
1151 | struct sk_buff *skb; | 1158 | struct sk_buff *skb, *new_skb = NULL; |
1159 | struct page *new_page = NULL; | ||
1152 | int num_to_alloc; | 1160 | int num_to_alloc; |
1153 | struct Vmxnet3_RxDesc *rxd; | 1161 | struct Vmxnet3_RxDesc *rxd; |
1154 | u32 idx, ring_idx; | 1162 | u32 idx, ring_idx; |
1155 | 1163 | struct vmxnet3_cmd_ring *ring = NULL; | |
1156 | if (num_rxd >= quota) { | 1164 | if (num_rxd >= quota) { |
1157 | /* we may stop even before we see the EOP desc of | 1165 | /* we may stop even before we see the EOP desc of |
1158 | * the current pkt | 1166 | * the current pkt |
@@ -1163,6 +1171,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, | |||
1163 | BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2); | 1171 | BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2); |
1164 | idx = rcd->rxdIdx; | 1172 | idx = rcd->rxdIdx; |
1165 | ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1; | 1173 | ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1; |
1174 | ring = rq->rx_ring + ring_idx; | ||
1166 | vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd, | 1175 | vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd, |
1167 | &rxCmdDesc); | 1176 | &rxCmdDesc); |
1168 | rbi = rq->buf_info[ring_idx] + idx; | 1177 | rbi = rq->buf_info[ring_idx] + idx; |
@@ -1191,37 +1200,80 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, | |||
1191 | goto rcd_done; | 1200 | goto rcd_done; |
1192 | } | 1201 | } |
1193 | 1202 | ||
1203 | skip_page_frags = false; | ||
1194 | ctx->skb = rbi->skb; | 1204 | ctx->skb = rbi->skb; |
1195 | rbi->skb = NULL; | 1205 | new_skb = dev_alloc_skb(rbi->len + NET_IP_ALIGN); |
1206 | if (new_skb == NULL) { | ||
1207 | /* Skb allocation failed, do not handover this | ||
1208 | * skb to stack. Reuse it. Drop the existing pkt | ||
1209 | */ | ||
1210 | rq->stats.rx_buf_alloc_failure++; | ||
1211 | ctx->skb = NULL; | ||
1212 | rq->stats.drop_total++; | ||
1213 | skip_page_frags = true; | ||
1214 | goto rcd_done; | ||
1215 | } | ||
1196 | 1216 | ||
1197 | pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len, | 1217 | pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len, |
1198 | PCI_DMA_FROMDEVICE); | 1218 | PCI_DMA_FROMDEVICE); |
1199 | 1219 | ||
1200 | skb_put(ctx->skb, rcd->len); | 1220 | skb_put(ctx->skb, rcd->len); |
1221 | |||
1222 | /* Immediate refill */ | ||
1223 | new_skb->dev = adapter->netdev; | ||
1224 | skb_reserve(new_skb, NET_IP_ALIGN); | ||
1225 | rbi->skb = new_skb; | ||
1226 | rbi->dma_addr = pci_map_single(adapter->pdev, | ||
1227 | rbi->skb->data, rbi->len, | ||
1228 | PCI_DMA_FROMDEVICE); | ||
1229 | rxd->addr = cpu_to_le64(rbi->dma_addr); | ||
1230 | rxd->len = rbi->len; | ||
1231 | |||
1201 | } else { | 1232 | } else { |
1202 | BUG_ON(ctx->skb == NULL); | 1233 | BUG_ON(ctx->skb == NULL && !skip_page_frags); |
1234 | |||
1203 | /* non SOP buffer must be type 1 in most cases */ | 1235 | /* non SOP buffer must be type 1 in most cases */ |
1204 | if (rbi->buf_type == VMXNET3_RX_BUF_PAGE) { | 1236 | BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE); |
1205 | BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY); | 1237 | BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY); |
1206 | 1238 | ||
1207 | if (rcd->len) { | 1239 | /* If an sop buffer was dropped, skip all |
1208 | pci_unmap_page(adapter->pdev, | 1240 | * following non-sop fragments. They will be reused. |
1209 | rbi->dma_addr, rbi->len, | 1241 | */ |
1210 | PCI_DMA_FROMDEVICE); | 1242 | if (skip_page_frags) |
1243 | goto rcd_done; | ||
1211 | 1244 | ||
1212 | vmxnet3_append_frag(ctx->skb, rcd, rbi); | 1245 | new_page = alloc_page(GFP_ATOMIC); |
1213 | rbi->page = NULL; | 1246 | if (unlikely(new_page == NULL)) { |
1214 | } | 1247 | /* Replacement page frag could not be allocated. |
1215 | } else { | 1248 | * Reuse this page. Drop the pkt and free the |
1216 | /* | 1249 | * skb which contained this page as a frag. Skip |
1217 | * The only time a non-SOP buffer is type 0 is | 1250 | * processing all the following non-sop frags. |
1218 | * when it's EOP and error flag is raised, which | ||
1219 | * has already been handled. | ||
1220 | */ | 1251 | */ |
1221 | BUG_ON(true); | 1252 | rq->stats.rx_buf_alloc_failure++; |
1253 | dev_kfree_skb(ctx->skb); | ||
1254 | ctx->skb = NULL; | ||
1255 | skip_page_frags = true; | ||
1256 | goto rcd_done; | ||
1257 | } | ||
1258 | |||
1259 | if (rcd->len) { | ||
1260 | pci_unmap_page(adapter->pdev, | ||
1261 | rbi->dma_addr, rbi->len, | ||
1262 | PCI_DMA_FROMDEVICE); | ||
1263 | |||
1264 | vmxnet3_append_frag(ctx->skb, rcd, rbi); | ||
1222 | } | 1265 | } |
1266 | |||
1267 | /* Immediate refill */ | ||
1268 | rbi->page = new_page; | ||
1269 | rbi->dma_addr = pci_map_page(adapter->pdev, rbi->page, | ||
1270 | 0, PAGE_SIZE, | ||
1271 | PCI_DMA_FROMDEVICE); | ||
1272 | rxd->addr = cpu_to_le64(rbi->dma_addr); | ||
1273 | rxd->len = rbi->len; | ||
1223 | } | 1274 | } |
1224 | 1275 | ||
1276 | |||
1225 | skb = ctx->skb; | 1277 | skb = ctx->skb; |
1226 | if (rcd->eop) { | 1278 | if (rcd->eop) { |
1227 | skb->len += skb->data_len; | 1279 | skb->len += skb->data_len; |
@@ -1243,26 +1295,27 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, | |||
1243 | } | 1295 | } |
1244 | 1296 | ||
1245 | rcd_done: | 1297 | rcd_done: |
1246 | /* device may skip some rx descs */ | 1298 | /* device may have skipped some rx descs */ |
1247 | rq->rx_ring[ring_idx].next2comp = idx; | 1299 | ring->next2comp = idx; |
1248 | VMXNET3_INC_RING_IDX_ONLY(rq->rx_ring[ring_idx].next2comp, | 1300 | num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring); |
1249 | rq->rx_ring[ring_idx].size); | 1301 | ring = rq->rx_ring + ring_idx; |
1250 | 1302 | while (num_to_alloc) { | |
1251 | /* refill rx buffers frequently to avoid starving the h/w */ | 1303 | vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd, |
1252 | num_to_alloc = vmxnet3_cmd_ring_desc_avail(rq->rx_ring + | 1304 | &rxCmdDesc); |
1253 | ring_idx); | 1305 | BUG_ON(!rxd->addr); |
1254 | if (unlikely(num_to_alloc > VMXNET3_RX_ALLOC_THRESHOLD(rq, | 1306 | |
1255 | ring_idx, adapter))) { | 1307 | /* Recv desc is ready to be used by the device */ |
1256 | vmxnet3_rq_alloc_rx_buf(rq, ring_idx, num_to_alloc, | 1308 | rxd->gen = ring->gen; |
1257 | adapter); | 1309 | vmxnet3_cmd_ring_adv_next2fill(ring); |
1258 | 1310 | num_to_alloc--; | |
1259 | /* if needed, update the register */ | 1311 | } |
1260 | if (unlikely(rq->shared->updateRxProd)) { | 1312 | |
1261 | VMXNET3_WRITE_BAR0_REG(adapter, | 1313 | /* if needed, update the register */ |
1262 | rxprod_reg[ring_idx] + rq->qid * 8, | 1314 | if (unlikely(rq->shared->updateRxProd)) { |
1263 | rq->rx_ring[ring_idx].next2fill); | 1315 | VMXNET3_WRITE_BAR0_REG(adapter, |
1264 | rq->uncommitted[ring_idx] = 0; | 1316 | rxprod_reg[ring_idx] + rq->qid * 8, |
1265 | } | 1317 | ring->next2fill); |
1318 | rq->uncommitted[ring_idx] = 0; | ||
1266 | } | 1319 | } |
1267 | 1320 | ||
1268 | vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring); | 1321 | vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring); |
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index 2e37985809d2..a9cb3fabb17f 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h | |||
@@ -69,10 +69,10 @@ | |||
69 | /* | 69 | /* |
70 | * Version numbers | 70 | * Version numbers |
71 | */ | 71 | */ |
72 | #define VMXNET3_DRIVER_VERSION_STRING "1.1.9.0-k" | 72 | #define VMXNET3_DRIVER_VERSION_STRING "1.1.14.0-k" |
73 | 73 | ||
74 | /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ | 74 | /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ |
75 | #define VMXNET3_DRIVER_VERSION_NUM 0x01010900 | 75 | #define VMXNET3_DRIVER_VERSION_NUM 0x01010E00 |
76 | 76 | ||
77 | #if defined(CONFIG_PCI_MSI) | 77 | #if defined(CONFIG_PCI_MSI) |
78 | /* RSS only makes sense if MSI-X is supported. */ | 78 | /* RSS only makes sense if MSI-X is supported. */ |