diff options
author | Ayaz Abdulla <aabdulla@nvidia.com> | 2006-06-10 22:48:08 -0400 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2006-06-11 09:25:16 -0400 |
commit | 7a1854b7977d36360fde4e06c2d9cedcc3dd0933 (patch) | |
tree | 65bc7ef5a9df892f35436847dd2769bf075bad14 /drivers/net | |
parent | 52da35789c305f6f44d0e85b294a9845c1271898 (diff) |
[PATCH] forcedeth config: move functions
This patch moves a few functions (no logic change) so that the next
patch has these functions defined.
Signed-Off-By: Ayaz Abdulla <aabdulla@nvidia.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/forcedeth.c | 266 |
1 files changed, 133 insertions, 133 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 2b8fbebd44ac..8d7666856420 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -2502,6 +2502,139 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs) | |||
2502 | return IRQ_RETVAL(i); | 2502 | return IRQ_RETVAL(i); |
2503 | } | 2503 | } |
2504 | 2504 | ||
2505 | static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask) | ||
2506 | { | ||
2507 | u8 __iomem *base = get_hwbase(dev); | ||
2508 | int i; | ||
2509 | u32 msixmap = 0; | ||
2510 | |||
2511 | /* Each interrupt bit can be mapped to a MSIX vector (4 bits). | ||
2512 | * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents | ||
2513 | * the remaining 8 interrupts. | ||
2514 | */ | ||
2515 | for (i = 0; i < 8; i++) { | ||
2516 | if ((irqmask >> i) & 0x1) { | ||
2517 | msixmap |= vector << (i << 2); | ||
2518 | } | ||
2519 | } | ||
2520 | writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0); | ||
2521 | |||
2522 | msixmap = 0; | ||
2523 | for (i = 0; i < 8; i++) { | ||
2524 | if ((irqmask >> (i + 8)) & 0x1) { | ||
2525 | msixmap |= vector << (i << 2); | ||
2526 | } | ||
2527 | } | ||
2528 | writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); | ||
2529 | } | ||
2530 | |||
2531 | static int nv_request_irq(struct net_device *dev) | ||
2532 | { | ||
2533 | struct fe_priv *np = get_nvpriv(dev); | ||
2534 | u8 __iomem *base = get_hwbase(dev); | ||
2535 | int ret = 1; | ||
2536 | int i; | ||
2537 | |||
2538 | if (np->msi_flags & NV_MSI_X_CAPABLE) { | ||
2539 | for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { | ||
2540 | np->msi_x_entry[i].entry = i; | ||
2541 | } | ||
2542 | if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) { | ||
2543 | np->msi_flags |= NV_MSI_X_ENABLED; | ||
2544 | if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) { | ||
2545 | /* Request irq for rx handling */ | ||
2546 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) { | ||
2547 | printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret); | ||
2548 | pci_disable_msix(np->pci_dev); | ||
2549 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
2550 | goto out_err; | ||
2551 | } | ||
2552 | /* Request irq for tx handling */ | ||
2553 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) { | ||
2554 | printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret); | ||
2555 | pci_disable_msix(np->pci_dev); | ||
2556 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
2557 | goto out_free_rx; | ||
2558 | } | ||
2559 | /* Request irq for link and timer handling */ | ||
2560 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) { | ||
2561 | printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret); | ||
2562 | pci_disable_msix(np->pci_dev); | ||
2563 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
2564 | goto out_free_tx; | ||
2565 | } | ||
2566 | /* map interrupts to their respective vector */ | ||
2567 | writel(0, base + NvRegMSIXMap0); | ||
2568 | writel(0, base + NvRegMSIXMap1); | ||
2569 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL); | ||
2570 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL); | ||
2571 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); | ||
2572 | } else { | ||
2573 | /* Request irq for all interrupts */ | ||
2574 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) { | ||
2575 | printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); | ||
2576 | pci_disable_msix(np->pci_dev); | ||
2577 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
2578 | goto out_err; | ||
2579 | } | ||
2580 | |||
2581 | /* map interrupts to vector 0 */ | ||
2582 | writel(0, base + NvRegMSIXMap0); | ||
2583 | writel(0, base + NvRegMSIXMap1); | ||
2584 | } | ||
2585 | } | ||
2586 | } | ||
2587 | if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { | ||
2588 | if ((ret = pci_enable_msi(np->pci_dev)) == 0) { | ||
2589 | np->msi_flags |= NV_MSI_ENABLED; | ||
2590 | if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) { | ||
2591 | printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); | ||
2592 | pci_disable_msi(np->pci_dev); | ||
2593 | np->msi_flags &= ~NV_MSI_ENABLED; | ||
2594 | goto out_err; | ||
2595 | } | ||
2596 | |||
2597 | /* map interrupts to vector 0 */ | ||
2598 | writel(0, base + NvRegMSIMap0); | ||
2599 | writel(0, base + NvRegMSIMap1); | ||
2600 | /* enable msi vector 0 */ | ||
2601 | writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); | ||
2602 | } | ||
2603 | } | ||
2604 | if (ret != 0) { | ||
2605 | if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) | ||
2606 | goto out_err; | ||
2607 | } | ||
2608 | |||
2609 | return 0; | ||
2610 | out_free_tx: | ||
2611 | free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev); | ||
2612 | out_free_rx: | ||
2613 | free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev); | ||
2614 | out_err: | ||
2615 | return 1; | ||
2616 | } | ||
2617 | |||
2618 | static void nv_free_irq(struct net_device *dev) | ||
2619 | { | ||
2620 | struct fe_priv *np = get_nvpriv(dev); | ||
2621 | int i; | ||
2622 | |||
2623 | if (np->msi_flags & NV_MSI_X_ENABLED) { | ||
2624 | for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { | ||
2625 | free_irq(np->msi_x_entry[i].vector, dev); | ||
2626 | } | ||
2627 | pci_disable_msix(np->pci_dev); | ||
2628 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
2629 | } else { | ||
2630 | free_irq(np->pci_dev->irq, dev); | ||
2631 | if (np->msi_flags & NV_MSI_ENABLED) { | ||
2632 | pci_disable_msi(np->pci_dev); | ||
2633 | np->msi_flags &= ~NV_MSI_ENABLED; | ||
2634 | } | ||
2635 | } | ||
2636 | } | ||
2637 | |||
2505 | static void nv_do_nic_poll(unsigned long data) | 2638 | static void nv_do_nic_poll(unsigned long data) |
2506 | { | 2639 | { |
2507 | struct net_device *dev = (struct net_device *) data; | 2640 | struct net_device *dev = (struct net_device *) data; |
@@ -3319,139 +3452,6 @@ static void nv_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | |||
3319 | /* nothing to do */ | 3452 | /* nothing to do */ |
3320 | }; | 3453 | }; |
3321 | 3454 | ||
3322 | static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask) | ||
3323 | { | ||
3324 | u8 __iomem *base = get_hwbase(dev); | ||
3325 | int i; | ||
3326 | u32 msixmap = 0; | ||
3327 | |||
3328 | /* Each interrupt bit can be mapped to a MSIX vector (4 bits). | ||
3329 | * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents | ||
3330 | * the remaining 8 interrupts. | ||
3331 | */ | ||
3332 | for (i = 0; i < 8; i++) { | ||
3333 | if ((irqmask >> i) & 0x1) { | ||
3334 | msixmap |= vector << (i << 2); | ||
3335 | } | ||
3336 | } | ||
3337 | writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0); | ||
3338 | |||
3339 | msixmap = 0; | ||
3340 | for (i = 0; i < 8; i++) { | ||
3341 | if ((irqmask >> (i + 8)) & 0x1) { | ||
3342 | msixmap |= vector << (i << 2); | ||
3343 | } | ||
3344 | } | ||
3345 | writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); | ||
3346 | } | ||
3347 | |||
3348 | static int nv_request_irq(struct net_device *dev) | ||
3349 | { | ||
3350 | struct fe_priv *np = get_nvpriv(dev); | ||
3351 | u8 __iomem *base = get_hwbase(dev); | ||
3352 | int ret = 1; | ||
3353 | int i; | ||
3354 | |||
3355 | if (np->msi_flags & NV_MSI_X_CAPABLE) { | ||
3356 | for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { | ||
3357 | np->msi_x_entry[i].entry = i; | ||
3358 | } | ||
3359 | if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) { | ||
3360 | np->msi_flags |= NV_MSI_X_ENABLED; | ||
3361 | if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) { | ||
3362 | /* Request irq for rx handling */ | ||
3363 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) { | ||
3364 | printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret); | ||
3365 | pci_disable_msix(np->pci_dev); | ||
3366 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
3367 | goto out_err; | ||
3368 | } | ||
3369 | /* Request irq for tx handling */ | ||
3370 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) { | ||
3371 | printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret); | ||
3372 | pci_disable_msix(np->pci_dev); | ||
3373 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
3374 | goto out_free_rx; | ||
3375 | } | ||
3376 | /* Request irq for link and timer handling */ | ||
3377 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) { | ||
3378 | printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret); | ||
3379 | pci_disable_msix(np->pci_dev); | ||
3380 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
3381 | goto out_free_tx; | ||
3382 | } | ||
3383 | /* map interrupts to their respective vector */ | ||
3384 | writel(0, base + NvRegMSIXMap0); | ||
3385 | writel(0, base + NvRegMSIXMap1); | ||
3386 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL); | ||
3387 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL); | ||
3388 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); | ||
3389 | } else { | ||
3390 | /* Request irq for all interrupts */ | ||
3391 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) { | ||
3392 | printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); | ||
3393 | pci_disable_msix(np->pci_dev); | ||
3394 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
3395 | goto out_err; | ||
3396 | } | ||
3397 | |||
3398 | /* map interrupts to vector 0 */ | ||
3399 | writel(0, base + NvRegMSIXMap0); | ||
3400 | writel(0, base + NvRegMSIXMap1); | ||
3401 | } | ||
3402 | } | ||
3403 | } | ||
3404 | if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { | ||
3405 | if ((ret = pci_enable_msi(np->pci_dev)) == 0) { | ||
3406 | np->msi_flags |= NV_MSI_ENABLED; | ||
3407 | if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) { | ||
3408 | printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); | ||
3409 | pci_disable_msi(np->pci_dev); | ||
3410 | np->msi_flags &= ~NV_MSI_ENABLED; | ||
3411 | goto out_err; | ||
3412 | } | ||
3413 | |||
3414 | /* map interrupts to vector 0 */ | ||
3415 | writel(0, base + NvRegMSIMap0); | ||
3416 | writel(0, base + NvRegMSIMap1); | ||
3417 | /* enable msi vector 0 */ | ||
3418 | writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); | ||
3419 | } | ||
3420 | } | ||
3421 | if (ret != 0) { | ||
3422 | if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) | ||
3423 | goto out_err; | ||
3424 | } | ||
3425 | |||
3426 | return 0; | ||
3427 | out_free_tx: | ||
3428 | free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev); | ||
3429 | out_free_rx: | ||
3430 | free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev); | ||
3431 | out_err: | ||
3432 | return 1; | ||
3433 | } | ||
3434 | |||
3435 | static void nv_free_irq(struct net_device *dev) | ||
3436 | { | ||
3437 | struct fe_priv *np = get_nvpriv(dev); | ||
3438 | int i; | ||
3439 | |||
3440 | if (np->msi_flags & NV_MSI_X_ENABLED) { | ||
3441 | for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { | ||
3442 | free_irq(np->msi_x_entry[i].vector, dev); | ||
3443 | } | ||
3444 | pci_disable_msix(np->pci_dev); | ||
3445 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
3446 | } else { | ||
3447 | free_irq(np->pci_dev->irq, dev); | ||
3448 | if (np->msi_flags & NV_MSI_ENABLED) { | ||
3449 | pci_disable_msi(np->pci_dev); | ||
3450 | np->msi_flags &= ~NV_MSI_ENABLED; | ||
3451 | } | ||
3452 | } | ||
3453 | } | ||
3454 | |||
3455 | static int nv_open(struct net_device *dev) | 3455 | static int nv_open(struct net_device *dev) |
3456 | { | 3456 | { |
3457 | struct fe_priv *np = netdev_priv(dev); | 3457 | struct fe_priv *np = netdev_priv(dev); |