diff options
Diffstat (limited to 'drivers/net/vmxnet3/vmxnet3_drv.c')
| -rw-r--r-- | drivers/net/vmxnet3/vmxnet3_drv.c | 172 |
1 files changed, 147 insertions, 25 deletions
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 61c0840c448c..46f4caddccbe 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c | |||
| @@ -861,6 +861,9 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, | |||
| 861 | , skb_headlen(skb)); | 861 | , skb_headlen(skb)); |
| 862 | } | 862 | } |
| 863 | 863 | ||
| 864 | if (skb->len <= VMXNET3_HDR_COPY_SIZE) | ||
| 865 | ctx->copy_size = skb->len; | ||
| 866 | |||
| 864 | /* make sure headers are accessible directly */ | 867 | /* make sure headers are accessible directly */ |
| 865 | if (unlikely(!pskb_may_pull(skb, ctx->copy_size))) | 868 | if (unlikely(!pskb_may_pull(skb, ctx->copy_size))) |
| 866 | goto err; | 869 | goto err; |
| @@ -1160,6 +1163,52 @@ vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd, | |||
| 1160 | } | 1163 | } |
| 1161 | 1164 | ||
| 1162 | 1165 | ||
| 1166 | static u32 | ||
| 1167 | vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb, | ||
| 1168 | union Vmxnet3_GenericDesc *gdesc) | ||
| 1169 | { | ||
| 1170 | u32 hlen, maplen; | ||
| 1171 | union { | ||
| 1172 | void *ptr; | ||
| 1173 | struct ethhdr *eth; | ||
| 1174 | struct iphdr *ipv4; | ||
| 1175 | struct ipv6hdr *ipv6; | ||
| 1176 | struct tcphdr *tcp; | ||
| 1177 | } hdr; | ||
| 1178 | BUG_ON(gdesc->rcd.tcp == 0); | ||
| 1179 | |||
| 1180 | maplen = skb_headlen(skb); | ||
| 1181 | if (unlikely(sizeof(struct iphdr) + sizeof(struct tcphdr) > maplen)) | ||
| 1182 | return 0; | ||
| 1183 | |||
| 1184 | hdr.eth = eth_hdr(skb); | ||
| 1185 | if (gdesc->rcd.v4) { | ||
| 1186 | BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP)); | ||
| 1187 | hdr.ptr += sizeof(struct ethhdr); | ||
| 1188 | BUG_ON(hdr.ipv4->protocol != IPPROTO_TCP); | ||
| 1189 | hlen = hdr.ipv4->ihl << 2; | ||
| 1190 | hdr.ptr += hdr.ipv4->ihl << 2; | ||
| 1191 | } else if (gdesc->rcd.v6) { | ||
| 1192 | BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6)); | ||
| 1193 | hdr.ptr += sizeof(struct ethhdr); | ||
| 1194 | /* Use an estimated value, since we also need to handle | ||
| 1195 | * TSO case. | ||
| 1196 | */ | ||
| 1197 | if (hdr.ipv6->nexthdr != IPPROTO_TCP) | ||
| 1198 | return sizeof(struct ipv6hdr) + sizeof(struct tcphdr); | ||
| 1199 | hlen = sizeof(struct ipv6hdr); | ||
| 1200 | hdr.ptr += sizeof(struct ipv6hdr); | ||
| 1201 | } else { | ||
| 1202 | /* Non-IP pkt, dont estimate header length */ | ||
| 1203 | return 0; | ||
| 1204 | } | ||
| 1205 | |||
| 1206 | if (hlen + sizeof(struct tcphdr) > maplen) | ||
| 1207 | return 0; | ||
| 1208 | |||
| 1209 | return (hlen + (hdr.tcp->doff << 2)); | ||
| 1210 | } | ||
| 1211 | |||
| 1163 | static int | 1212 | static int |
| 1164 | vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, | 1213 | vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, |
| 1165 | struct vmxnet3_adapter *adapter, int quota) | 1214 | struct vmxnet3_adapter *adapter, int quota) |
| @@ -1167,10 +1216,11 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, | |||
| 1167 | static const u32 rxprod_reg[2] = { | 1216 | static const u32 rxprod_reg[2] = { |
| 1168 | VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2 | 1217 | VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2 |
| 1169 | }; | 1218 | }; |
| 1170 | u32 num_rxd = 0; | 1219 | u32 num_pkts = 0; |
| 1171 | bool skip_page_frags = false; | 1220 | bool skip_page_frags = false; |
| 1172 | struct Vmxnet3_RxCompDesc *rcd; | 1221 | struct Vmxnet3_RxCompDesc *rcd; |
| 1173 | struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx; | 1222 | struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx; |
| 1223 | u16 segCnt = 0, mss = 0; | ||
| 1174 | #ifdef __BIG_ENDIAN_BITFIELD | 1224 | #ifdef __BIG_ENDIAN_BITFIELD |
| 1175 | struct Vmxnet3_RxDesc rxCmdDesc; | 1225 | struct Vmxnet3_RxDesc rxCmdDesc; |
| 1176 | struct Vmxnet3_RxCompDesc rxComp; | 1226 | struct Vmxnet3_RxCompDesc rxComp; |
| @@ -1185,13 +1235,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, | |||
| 1185 | struct Vmxnet3_RxDesc *rxd; | 1235 | struct Vmxnet3_RxDesc *rxd; |
| 1186 | u32 idx, ring_idx; | 1236 | u32 idx, ring_idx; |
| 1187 | struct vmxnet3_cmd_ring *ring = NULL; | 1237 | struct vmxnet3_cmd_ring *ring = NULL; |
| 1188 | if (num_rxd >= quota) { | 1238 | if (num_pkts >= quota) { |
| 1189 | /* we may stop even before we see the EOP desc of | 1239 | /* we may stop even before we see the EOP desc of |
| 1190 | * the current pkt | 1240 | * the current pkt |
| 1191 | */ | 1241 | */ |
| 1192 | break; | 1242 | break; |
| 1193 | } | 1243 | } |
| 1194 | num_rxd++; | ||
| 1195 | BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2); | 1244 | BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2); |
| 1196 | idx = rcd->rxdIdx; | 1245 | idx = rcd->rxdIdx; |
| 1197 | ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1; | 1246 | ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1; |
| @@ -1259,7 +1308,19 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, | |||
| 1259 | PCI_DMA_FROMDEVICE); | 1308 | PCI_DMA_FROMDEVICE); |
| 1260 | rxd->addr = cpu_to_le64(rbi->dma_addr); | 1309 | rxd->addr = cpu_to_le64(rbi->dma_addr); |
| 1261 | rxd->len = rbi->len; | 1310 | rxd->len = rbi->len; |
| 1262 | 1311 | if (adapter->version == 2 && | |
| 1312 | rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) { | ||
| 1313 | struct Vmxnet3_RxCompDescExt *rcdlro; | ||
| 1314 | rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd; | ||
| 1315 | |||
| 1316 | segCnt = rcdlro->segCnt; | ||
| 1317 | BUG_ON(segCnt <= 1); | ||
| 1318 | mss = rcdlro->mss; | ||
| 1319 | if (unlikely(segCnt <= 1)) | ||
| 1320 | segCnt = 0; | ||
| 1321 | } else { | ||
| 1322 | segCnt = 0; | ||
| 1323 | } | ||
| 1263 | } else { | 1324 | } else { |
| 1264 | BUG_ON(ctx->skb == NULL && !skip_page_frags); | 1325 | BUG_ON(ctx->skb == NULL && !skip_page_frags); |
| 1265 | 1326 | ||
| @@ -1273,47 +1334,75 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, | |||
| 1273 | if (skip_page_frags) | 1334 | if (skip_page_frags) |
| 1274 | goto rcd_done; | 1335 | goto rcd_done; |
| 1275 | 1336 | ||
| 1276 | new_page = alloc_page(GFP_ATOMIC); | 1337 | if (rcd->len) { |
| 1277 | if (unlikely(new_page == NULL)) { | 1338 | new_page = alloc_page(GFP_ATOMIC); |
| 1278 | /* Replacement page frag could not be allocated. | 1339 | /* Replacement page frag could not be allocated. |
| 1279 | * Reuse this page. Drop the pkt and free the | 1340 | * Reuse this page. Drop the pkt and free the |
| 1280 | * skb which contained this page as a frag. Skip | 1341 | * skb which contained this page as a frag. Skip |
| 1281 | * processing all the following non-sop frags. | 1342 | * processing all the following non-sop frags. |
| 1282 | */ | 1343 | */ |
| 1283 | rq->stats.rx_buf_alloc_failure++; | 1344 | if (unlikely(!new_page)) { |
| 1284 | dev_kfree_skb(ctx->skb); | 1345 | rq->stats.rx_buf_alloc_failure++; |
| 1285 | ctx->skb = NULL; | 1346 | dev_kfree_skb(ctx->skb); |
| 1286 | skip_page_frags = true; | 1347 | ctx->skb = NULL; |
| 1287 | goto rcd_done; | 1348 | skip_page_frags = true; |
| 1288 | } | 1349 | goto rcd_done; |
| 1350 | } | ||
| 1289 | 1351 | ||
| 1290 | if (rcd->len) { | ||
| 1291 | dma_unmap_page(&adapter->pdev->dev, | 1352 | dma_unmap_page(&adapter->pdev->dev, |
| 1292 | rbi->dma_addr, rbi->len, | 1353 | rbi->dma_addr, rbi->len, |
| 1293 | PCI_DMA_FROMDEVICE); | 1354 | PCI_DMA_FROMDEVICE); |
| 1294 | 1355 | ||
| 1295 | vmxnet3_append_frag(ctx->skb, rcd, rbi); | 1356 | vmxnet3_append_frag(ctx->skb, rcd, rbi); |
| 1296 | } | ||
| 1297 | 1357 | ||
| 1298 | /* Immediate refill */ | 1358 | /* Immediate refill */ |
| 1299 | rbi->page = new_page; | 1359 | rbi->page = new_page; |
| 1300 | rbi->dma_addr = dma_map_page(&adapter->pdev->dev, | 1360 | rbi->dma_addr = dma_map_page(&adapter->pdev->dev |
| 1301 | rbi->page, | 1361 | , rbi->page, |
| 1302 | 0, PAGE_SIZE, | 1362 | 0, PAGE_SIZE, |
| 1303 | PCI_DMA_FROMDEVICE); | 1363 | PCI_DMA_FROMDEVICE); |
| 1304 | rxd->addr = cpu_to_le64(rbi->dma_addr); | 1364 | rxd->addr = cpu_to_le64(rbi->dma_addr); |
| 1305 | rxd->len = rbi->len; | 1365 | rxd->len = rbi->len; |
| 1366 | } | ||
| 1306 | } | 1367 | } |
| 1307 | 1368 | ||
| 1308 | 1369 | ||
| 1309 | skb = ctx->skb; | 1370 | skb = ctx->skb; |
| 1310 | if (rcd->eop) { | 1371 | if (rcd->eop) { |
| 1372 | u32 mtu = adapter->netdev->mtu; | ||
| 1311 | skb->len += skb->data_len; | 1373 | skb->len += skb->data_len; |
| 1312 | 1374 | ||
| 1313 | vmxnet3_rx_csum(adapter, skb, | 1375 | vmxnet3_rx_csum(adapter, skb, |
| 1314 | (union Vmxnet3_GenericDesc *)rcd); | 1376 | (union Vmxnet3_GenericDesc *)rcd); |
| 1315 | skb->protocol = eth_type_trans(skb, adapter->netdev); | 1377 | skb->protocol = eth_type_trans(skb, adapter->netdev); |
| 1316 | 1378 | if (!rcd->tcp || !adapter->lro) | |
| 1379 | goto not_lro; | ||
| 1380 | |||
| 1381 | if (segCnt != 0 && mss != 0) { | ||
| 1382 | skb_shinfo(skb)->gso_type = rcd->v4 ? | ||
| 1383 | SKB_GSO_TCPV4 : SKB_GSO_TCPV6; | ||
| 1384 | skb_shinfo(skb)->gso_size = mss; | ||
| 1385 | skb_shinfo(skb)->gso_segs = segCnt; | ||
| 1386 | } else if (segCnt != 0 || skb->len > mtu) { | ||
| 1387 | u32 hlen; | ||
| 1388 | |||
| 1389 | hlen = vmxnet3_get_hdr_len(adapter, skb, | ||
| 1390 | (union Vmxnet3_GenericDesc *)rcd); | ||
| 1391 | if (hlen == 0) | ||
| 1392 | goto not_lro; | ||
| 1393 | |||
| 1394 | skb_shinfo(skb)->gso_type = | ||
| 1395 | rcd->v4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6; | ||
| 1396 | if (segCnt != 0) { | ||
| 1397 | skb_shinfo(skb)->gso_segs = segCnt; | ||
| 1398 | skb_shinfo(skb)->gso_size = | ||
| 1399 | DIV_ROUND_UP(skb->len - | ||
| 1400 | hlen, segCnt); | ||
| 1401 | } else { | ||
| 1402 | skb_shinfo(skb)->gso_size = mtu - hlen; | ||
| 1403 | } | ||
| 1404 | } | ||
| 1405 | not_lro: | ||
| 1317 | if (unlikely(rcd->ts)) | 1406 | if (unlikely(rcd->ts)) |
| 1318 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rcd->tci); | 1407 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rcd->tci); |
| 1319 | 1408 | ||
| @@ -1323,6 +1412,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, | |||
| 1323 | napi_gro_receive(&rq->napi, skb); | 1412 | napi_gro_receive(&rq->napi, skb); |
| 1324 | 1413 | ||
| 1325 | ctx->skb = NULL; | 1414 | ctx->skb = NULL; |
| 1415 | num_pkts++; | ||
| 1326 | } | 1416 | } |
| 1327 | 1417 | ||
| 1328 | rcd_done: | 1418 | rcd_done: |
| @@ -1353,7 +1443,7 @@ rcd_done: | |||
| 1353 | &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp); | 1443 | &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp); |
| 1354 | } | 1444 | } |
| 1355 | 1445 | ||
| 1356 | return num_rxd; | 1446 | return num_pkts; |
| 1357 | } | 1447 | } |
| 1358 | 1448 | ||
| 1359 | 1449 | ||
| @@ -3038,14 +3128,19 @@ vmxnet3_probe_device(struct pci_dev *pdev, | |||
| 3038 | goto err_alloc_pci; | 3128 | goto err_alloc_pci; |
| 3039 | 3129 | ||
| 3040 | ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS); | 3130 | ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS); |
| 3041 | if (ver & 1) { | 3131 | if (ver & 2) { |
| 3132 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 2); | ||
| 3133 | adapter->version = 2; | ||
| 3134 | } else if (ver & 1) { | ||
| 3042 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1); | 3135 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1); |
| 3136 | adapter->version = 1; | ||
| 3043 | } else { | 3137 | } else { |
| 3044 | dev_err(&pdev->dev, | 3138 | dev_err(&pdev->dev, |
| 3045 | "Incompatible h/w version (0x%x) for adapter\n", ver); | 3139 | "Incompatible h/w version (0x%x) for adapter\n", ver); |
| 3046 | err = -EBUSY; | 3140 | err = -EBUSY; |
| 3047 | goto err_ver; | 3141 | goto err_ver; |
| 3048 | } | 3142 | } |
| 3143 | dev_dbg(&pdev->dev, "Using device version %d\n", adapter->version); | ||
| 3049 | 3144 | ||
| 3050 | ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS); | 3145 | ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS); |
| 3051 | if (ver & 1) { | 3146 | if (ver & 1) { |
| @@ -3184,6 +3279,32 @@ vmxnet3_remove_device(struct pci_dev *pdev) | |||
| 3184 | free_netdev(netdev); | 3279 | free_netdev(netdev); |
| 3185 | } | 3280 | } |
| 3186 | 3281 | ||
| 3282 | static void vmxnet3_shutdown_device(struct pci_dev *pdev) | ||
| 3283 | { | ||
| 3284 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
| 3285 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | ||
| 3286 | unsigned long flags; | ||
| 3287 | |||
| 3288 | /* Reset_work may be in the middle of resetting the device, wait for its | ||
| 3289 | * completion. | ||
| 3290 | */ | ||
| 3291 | while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) | ||
| 3292 | msleep(1); | ||
| 3293 | |||
| 3294 | if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, | ||
| 3295 | &adapter->state)) { | ||
| 3296 | clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); | ||
| 3297 | return; | ||
| 3298 | } | ||
| 3299 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
| 3300 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | ||
| 3301 | VMXNET3_CMD_QUIESCE_DEV); | ||
| 3302 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
| 3303 | vmxnet3_disable_all_intrs(adapter); | ||
| 3304 | |||
| 3305 | clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); | ||
| 3306 | } | ||
| 3307 | |||
| 3187 | 3308 | ||
| 3188 | #ifdef CONFIG_PM | 3309 | #ifdef CONFIG_PM |
| 3189 | 3310 | ||
| @@ -3360,6 +3481,7 @@ static struct pci_driver vmxnet3_driver = { | |||
| 3360 | .id_table = vmxnet3_pciid_table, | 3481 | .id_table = vmxnet3_pciid_table, |
| 3361 | .probe = vmxnet3_probe_device, | 3482 | .probe = vmxnet3_probe_device, |
| 3362 | .remove = vmxnet3_remove_device, | 3483 | .remove = vmxnet3_remove_device, |
| 3484 | .shutdown = vmxnet3_shutdown_device, | ||
| 3363 | #ifdef CONFIG_PM | 3485 | #ifdef CONFIG_PM |
| 3364 | .driver.pm = &vmxnet3_pm_ops, | 3486 | .driver.pm = &vmxnet3_pm_ops, |
| 3365 | #endif | 3487 | #endif |
