aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/vxge/vxge-main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/vxge/vxge-main.c')
-rw-r--r--drivers/net/vxge/vxge-main.c140
1 files changed, 64 insertions, 76 deletions
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index 068d7a9d3e36..ba6d0da78c30 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -43,6 +43,7 @@
43 43
44#include <linux/if_vlan.h> 44#include <linux/if_vlan.h>
45#include <linux/pci.h> 45#include <linux/pci.h>
46#include <linux/slab.h>
46#include <linux/tcp.h> 47#include <linux/tcp.h>
47#include <net/ip.h> 48#include <net/ip.h>
48#include <linux/netdevice.h> 49#include <linux/netdevice.h>
@@ -54,7 +55,7 @@ MODULE_LICENSE("Dual BSD/GPL");
54MODULE_DESCRIPTION("Neterion's X3100 Series 10GbE PCIe I/O" 55MODULE_DESCRIPTION("Neterion's X3100 Series 10GbE PCIe I/O"
55 "Virtualized Server Adapter"); 56 "Virtualized Server Adapter");
56 57
57static struct pci_device_id vxge_id_table[] __devinitdata = { 58static DEFINE_PCI_DEVICE_TABLE(vxge_id_table) = {
58 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_WIN, PCI_ANY_ID, 59 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_WIN, PCI_ANY_ID,
59 PCI_ANY_ID}, 60 PCI_ANY_ID},
60 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_UNI, PCI_ANY_ID, 61 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_UNI, PCI_ANY_ID,
@@ -310,7 +311,7 @@ static int vxge_rx_map(void *dtrh, struct vxge_ring *ring)
310 dma_addr = pci_map_single(ring->pdev, rx_priv->skb_data, 311 dma_addr = pci_map_single(ring->pdev, rx_priv->skb_data,
311 rx_priv->data_size, PCI_DMA_FROMDEVICE); 312 rx_priv->data_size, PCI_DMA_FROMDEVICE);
312 313
313 if (dma_addr == 0) { 314 if (unlikely(pci_dma_mapping_error(ring->pdev, dma_addr))) {
314 ring->stats.pci_map_fail++; 315 ring->stats.pci_map_fail++;
315 return -EIO; 316 return -EIO;
316 } 317 }
@@ -1178,11 +1179,11 @@ static void vxge_set_multicast(struct net_device *dev)
1178 1179
1179 memset(&mac_info, 0, sizeof(struct macInfo)); 1180 memset(&mac_info, 0, sizeof(struct macInfo));
1180 /* Update individual M_CAST address list */ 1181 /* Update individual M_CAST address list */
1181 if ((!vdev->all_multi_flg) && dev->mc_count) { 1182 if ((!vdev->all_multi_flg) && netdev_mc_count(dev)) {
1182 1183
1183 mcast_cnt = vdev->vpaths[0].mcast_addr_cnt; 1184 mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
1184 list_head = &vdev->vpaths[0].mac_addr_list; 1185 list_head = &vdev->vpaths[0].mac_addr_list;
1185 if ((dev->mc_count + 1186 if ((netdev_mc_count(dev) +
1186 (vdev->vpaths[0].mac_addr_cnt - mcast_cnt)) > 1187 (vdev->vpaths[0].mac_addr_cnt - mcast_cnt)) >
1187 vdev->vpaths[0].max_mac_addr_cnt) 1188 vdev->vpaths[0].max_mac_addr_cnt)
1188 goto _set_all_mcast; 1189 goto _set_all_mcast;
@@ -1217,9 +1218,7 @@ static void vxge_set_multicast(struct net_device *dev)
1217 } 1218 }
1218 1219
1219 /* Add new ones */ 1220 /* Add new ones */
1220 for (i = 0, mclist = dev->mc_list; i < dev->mc_count; 1221 netdev_for_each_mc_addr(mclist, dev) {
1221 i++, mclist = mclist->next) {
1222
1223 memcpy(mac_info.macaddr, mclist->dmi_addr, ETH_ALEN); 1222 memcpy(mac_info.macaddr, mclist->dmi_addr, ETH_ALEN);
1224 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; 1223 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
1225 vpath_idx++) { 1224 vpath_idx++) {
@@ -2435,7 +2434,6 @@ static int vxge_add_isr(struct vxgedev *vdev)
2435 int ret = 0; 2434 int ret = 0;
2436#ifdef CONFIG_PCI_MSI 2435#ifdef CONFIG_PCI_MSI
2437 int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0; 2436 int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0;
2438 u64 function_mode = vdev->config.device_hw_info.function_mode;
2439 int pci_fun = PCI_FUNC(vdev->pdev->devfn); 2437 int pci_fun = PCI_FUNC(vdev->pdev->devfn);
2440 2438
2441 if (vdev->config.intr_type == MSI_X) 2439 if (vdev->config.intr_type == MSI_X)
@@ -2444,20 +2442,9 @@ static int vxge_add_isr(struct vxgedev *vdev)
2444 if (ret) { 2442 if (ret) {
2445 vxge_debug_init(VXGE_ERR, 2443 vxge_debug_init(VXGE_ERR,
2446 "%s: Enabling MSI-X Failed", VXGE_DRIVER_NAME); 2444 "%s: Enabling MSI-X Failed", VXGE_DRIVER_NAME);
2447 if ((function_mode == VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION) && 2445 vxge_debug_init(VXGE_ERR,
2448 test_and_set_bit(__VXGE_STATE_CARD_UP, 2446 "%s: Defaulting to INTA", VXGE_DRIVER_NAME);
2449 &driver_config->inta_dev_open)) 2447 vdev->config.intr_type = INTA;
2450 return VXGE_HW_FAIL;
2451 else {
2452 vxge_debug_init(VXGE_ERR,
2453 "%s: Defaulting to INTA", VXGE_DRIVER_NAME);
2454 vdev->config.intr_type = INTA;
2455 vxge_hw_device_set_intr_type(vdev->devh,
2456 VXGE_HW_INTR_MODE_IRQLINE);
2457 vxge_close_vpaths(vdev, 1);
2458 vdev->no_of_vpath = 1;
2459 vdev->stats.vpaths_open = 1;
2460 }
2461 } 2448 }
2462 2449
2463 if (vdev->config.intr_type == MSI_X) { 2450 if (vdev->config.intr_type == MSI_X) {
@@ -2505,24 +2492,11 @@ static int vxge_add_isr(struct vxgedev *vdev)
2505 "%s: MSIX - %d Registration failed", 2492 "%s: MSIX - %d Registration failed",
2506 vdev->ndev->name, intr_cnt); 2493 vdev->ndev->name, intr_cnt);
2507 vxge_rem_msix_isr(vdev); 2494 vxge_rem_msix_isr(vdev);
2508 if ((function_mode == 2495 vdev->config.intr_type = INTA;
2509 VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION) && 2496 vxge_debug_init(VXGE_ERR,
2510 test_and_set_bit(__VXGE_STATE_CARD_UP, 2497 "%s: Defaulting to INTA"
2511 &driver_config->inta_dev_open)) 2498 , vdev->ndev->name);
2512 return VXGE_HW_FAIL;
2513 else {
2514 vxge_hw_device_set_intr_type(
2515 vdev->devh,
2516 VXGE_HW_INTR_MODE_IRQLINE);
2517 vdev->config.intr_type = INTA;
2518 vxge_debug_init(VXGE_ERR,
2519 "%s: Defaulting to INTA"
2520 , vdev->ndev->name);
2521 vxge_close_vpaths(vdev, 1);
2522 vdev->no_of_vpath = 1;
2523 vdev->stats.vpaths_open = 1;
2524 goto INTA_MODE; 2499 goto INTA_MODE;
2525 }
2526 } 2500 }
2527 2501
2528 if (irq_req) { 2502 if (irq_req) {
@@ -2535,9 +2509,9 @@ static int vxge_add_isr(struct vxgedev *vdev)
2535 } 2509 }
2536 2510
2537 /* Point to next vpath handler */ 2511 /* Point to next vpath handler */
2538 if (((intr_idx + 1) % VXGE_HW_VPATH_MSIX_ACTIVE == 0) 2512 if (((intr_idx + 1) % VXGE_HW_VPATH_MSIX_ACTIVE == 0) &&
2539 && (vp_idx < (vdev->no_of_vpath - 1))) 2513 (vp_idx < (vdev->no_of_vpath - 1)))
2540 vp_idx++; 2514 vp_idx++;
2541 } 2515 }
2542 2516
2543 intr_cnt = vdev->max_vpath_supported * 2; 2517 intr_cnt = vdev->max_vpath_supported * 2;
@@ -2555,23 +2529,11 @@ static int vxge_add_isr(struct vxgedev *vdev)
2555 "%s: MSIX - %d Registration failed", 2529 "%s: MSIX - %d Registration failed",
2556 vdev->ndev->name, intr_cnt); 2530 vdev->ndev->name, intr_cnt);
2557 vxge_rem_msix_isr(vdev); 2531 vxge_rem_msix_isr(vdev);
2558 if ((function_mode == 2532 vdev->config.intr_type = INTA;
2559 VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION) && 2533 vxge_debug_init(VXGE_ERR,
2560 test_and_set_bit(__VXGE_STATE_CARD_UP, 2534 "%s: Defaulting to INTA",
2561 &driver_config->inta_dev_open)) 2535 vdev->ndev->name);
2562 return VXGE_HW_FAIL;
2563 else {
2564 vxge_hw_device_set_intr_type(vdev->devh,
2565 VXGE_HW_INTR_MODE_IRQLINE);
2566 vdev->config.intr_type = INTA;
2567 vxge_debug_init(VXGE_ERR,
2568 "%s: Defaulting to INTA",
2569 vdev->ndev->name);
2570 vxge_close_vpaths(vdev, 1);
2571 vdev->no_of_vpath = 1;
2572 vdev->stats.vpaths_open = 1;
2573 goto INTA_MODE; 2536 goto INTA_MODE;
2574 }
2575 } 2537 }
2576 2538
2577 vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle, 2539 vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle,
@@ -2584,6 +2546,10 @@ INTA_MODE:
2584 snprintf(vdev->desc[0], VXGE_INTR_STRLEN, "%s:vxge", vdev->ndev->name); 2546 snprintf(vdev->desc[0], VXGE_INTR_STRLEN, "%s:vxge", vdev->ndev->name);
2585 2547
2586 if (vdev->config.intr_type == INTA) { 2548 if (vdev->config.intr_type == INTA) {
2549 vxge_hw_device_set_intr_type(vdev->devh,
2550 VXGE_HW_INTR_MODE_IRQLINE);
2551 vxge_hw_vpath_tti_ci_set(vdev->devh,
2552 vdev->vpaths[0].device_id);
2587 ret = request_irq((int) vdev->pdev->irq, 2553 ret = request_irq((int) vdev->pdev->irq,
2588 vxge_isr_napi, 2554 vxge_isr_napi,
2589 IRQF_SHARED, vdev->desc[0], vdev); 2555 IRQF_SHARED, vdev->desc[0], vdev);
@@ -2688,13 +2654,6 @@ vxge_open(struct net_device *dev)
2688 * initialized */ 2654 * initialized */
2689 netif_carrier_off(dev); 2655 netif_carrier_off(dev);
2690 2656
2691 /* Check for another device already opn with INTA */
2692 if ((function_mode == VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION) &&
2693 test_bit(__VXGE_STATE_CARD_UP, &driver_config->inta_dev_open)) {
2694 ret = -EPERM;
2695 goto out0;
2696 }
2697
2698 /* Open VPATHs */ 2657 /* Open VPATHs */
2699 status = vxge_open_vpaths(vdev); 2658 status = vxge_open_vpaths(vdev);
2700 if (status != VXGE_HW_OK) { 2659 if (status != VXGE_HW_OK) {
@@ -2983,7 +2942,6 @@ int do_vxge_close(struct net_device *dev, int do_io)
2983 vxge_debug_entryexit(VXGE_TRACE, 2942 vxge_debug_entryexit(VXGE_TRACE,
2984 "%s: %s:%d Exiting...", dev->name, __func__, __LINE__); 2943 "%s: %s:%d Exiting...", dev->name, __func__, __LINE__);
2985 2944
2986 clear_bit(__VXGE_STATE_CARD_UP, &driver_config->inta_dev_open);
2987 clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state); 2945 clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
2988 2946
2989 return 0; 2947 return 0;
@@ -3653,11 +3611,12 @@ static int __devinit vxge_config_vpaths(
3653 device_config->vp_config[i].fifo.enable = 3611 device_config->vp_config[i].fifo.enable =
3654 VXGE_HW_FIFO_ENABLE; 3612 VXGE_HW_FIFO_ENABLE;
3655 device_config->vp_config[i].fifo.max_frags = 3613 device_config->vp_config[i].fifo.max_frags =
3656 MAX_SKB_FRAGS; 3614 MAX_SKB_FRAGS + 1;
3657 device_config->vp_config[i].fifo.memblock_size = 3615 device_config->vp_config[i].fifo.memblock_size =
3658 VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE; 3616 VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE;
3659 3617
3660 txdl_size = MAX_SKB_FRAGS * sizeof(struct vxge_hw_fifo_txd); 3618 txdl_size = device_config->vp_config[i].fifo.max_frags *
3619 sizeof(struct vxge_hw_fifo_txd);
3661 txdl_per_memblock = VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE / txdl_size; 3620 txdl_per_memblock = VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE / txdl_size;
3662 3621
3663 device_config->vp_config[i].fifo.fifo_blocks = 3622 device_config->vp_config[i].fifo.fifo_blocks =
@@ -4088,9 +4047,10 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4088 driver_config->config_dev_cnt = 0; 4047 driver_config->config_dev_cnt = 0;
4089 driver_config->total_dev_cnt = 0; 4048 driver_config->total_dev_cnt = 0;
4090 driver_config->g_no_cpus = 0; 4049 driver_config->g_no_cpus = 0;
4091 driver_config->vpath_per_dev = max_config_vpath;
4092 } 4050 }
4093 4051
4052 driver_config->vpath_per_dev = max_config_vpath;
4053
4094 driver_config->total_dev_cnt++; 4054 driver_config->total_dev_cnt++;
4095 if (++driver_config->config_dev_cnt > max_config_dev) { 4055 if (++driver_config->config_dev_cnt > max_config_dev) {
4096 ret = 0; 4056 ret = 0;
@@ -4126,21 +4086,21 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4126 goto _exit0; 4086 goto _exit0;
4127 } 4087 }
4128 4088
4129 if (!pci_set_dma_mask(pdev, 0xffffffffffffffffULL)) { 4089 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4130 vxge_debug_ll_config(VXGE_TRACE, 4090 vxge_debug_ll_config(VXGE_TRACE,
4131 "%s : using 64bit DMA", __func__); 4091 "%s : using 64bit DMA", __func__);
4132 4092
4133 high_dma = 1; 4093 high_dma = 1;
4134 4094
4135 if (pci_set_consistent_dma_mask(pdev, 4095 if (pci_set_consistent_dma_mask(pdev,
4136 0xffffffffffffffffULL)) { 4096 DMA_BIT_MASK(64))) {
4137 vxge_debug_init(VXGE_ERR, 4097 vxge_debug_init(VXGE_ERR,
4138 "%s : unable to obtain 64bit DMA for " 4098 "%s : unable to obtain 64bit DMA for "
4139 "consistent allocations", __func__); 4099 "consistent allocations", __func__);
4140 ret = -ENOMEM; 4100 ret = -ENOMEM;
4141 goto _exit1; 4101 goto _exit1;
4142 } 4102 }
4143 } else if (!pci_set_dma_mask(pdev, 0xffffffffUL)) { 4103 } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
4144 vxge_debug_ll_config(VXGE_TRACE, 4104 vxge_debug_ll_config(VXGE_TRACE,
4145 "%s : using 32bit DMA", __func__); 4105 "%s : using 32bit DMA", __func__);
4146 } else { 4106 } else {
@@ -4243,6 +4203,15 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4243 goto _exit3; 4203 goto _exit3;
4244 } 4204 }
4245 4205
4206 /* if FCS stripping is not disabled in MAC fail driver load */
4207 if (vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask) != VXGE_HW_OK) {
4208 vxge_debug_init(VXGE_ERR,
4209 "%s: FCS stripping is not disabled in MAC"
4210 " failing driver load", VXGE_DRIVER_NAME);
4211 ret = -EINVAL;
4212 goto _exit4;
4213 }
4214
4246 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL); 4215 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
4247 4216
4248 /* set private device info */ 4217 /* set private device info */
@@ -4327,10 +4296,8 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4327 vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter", 4296 vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter",
4328 vdev->ndev->name, ll_config.device_hw_info.product_desc); 4297 vdev->ndev->name, ll_config.device_hw_info.product_desc);
4329 4298
4330 vxge_debug_init(VXGE_TRACE, 4299 vxge_debug_init(VXGE_TRACE, "%s: MAC ADDR: %pM",
4331 "%s: MAC ADDR: %02X:%02X:%02X:%02X:%02X:%02X", 4300 vdev->ndev->name, macaddr);
4332 vdev->ndev->name, macaddr[0], macaddr[1], macaddr[2],
4333 macaddr[3], macaddr[4], macaddr[5]);
4334 4301
4335 vxge_debug_init(VXGE_TRACE, "%s: Link Width x%d", 4302 vxge_debug_init(VXGE_TRACE, "%s: Link Width x%d",
4336 vdev->ndev->name, vxge_hw_device_link_width_get(hldev)); 4303 vdev->ndev->name, vxge_hw_device_link_width_get(hldev));
@@ -4387,6 +4354,27 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4387 } 4354 }
4388 4355
4389 kfree(device_config); 4356 kfree(device_config);
4357
4358 /*
4359 * INTA is shared in multi-function mode. This is unlike the INTA
4360 * implementation in MR mode, where each VH has its own INTA message.
4361 * - INTA is masked (disabled) as long as at least one function sets
4362 * its TITAN_MASK_ALL_INT.ALARM bit.
4363 * - INTA is unmasked (enabled) when all enabled functions have cleared
4364 * their own TITAN_MASK_ALL_INT.ALARM bit.
4365 * The TITAN_MASK_ALL_INT ALARM & TRAFFIC bits are cleared on power up.
4366 * Though this driver leaves the top level interrupts unmasked while
4367 * leaving the required module interrupt bits masked on exit, there
4368 * could be a rougue driver around that does not follow this procedure
4369 * resulting in a failure to generate interrupts. The following code is
4370 * present to prevent such a failure.
4371 */
4372
4373 if (ll_config.device_hw_info.function_mode ==
4374 VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION)
4375 if (vdev->config.intr_type == INTA)
4376 vxge_hw_device_unmask_all(hldev);
4377
4390 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...", 4378 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
4391 vdev->ndev->name, __func__, __LINE__); 4379 vdev->ndev->name, __func__, __LINE__);
4392 4380