diff options
129 files changed, 11208 insertions, 495 deletions
diff --git a/Documentation/ABI/testing/sysfs-class-usb_host b/Documentation/ABI/testing/sysfs-class-uwb_rc-wusbhc index 46b66ad1f1b4..4e8106f7cfd9 100644 --- a/Documentation/ABI/testing/sysfs-class-usb_host +++ b/Documentation/ABI/testing/sysfs-class-uwb_rc-wusbhc | |||
@@ -1,4 +1,4 @@ | |||
1 | What: /sys/class/usb_host/usb_hostN/wusb_chid | 1 | What: /sys/class/uwb_rc/uwbN/wusbhc/wusb_chid |
2 | Date: July 2008 | 2 | Date: July 2008 |
3 | KernelVersion: 2.6.27 | 3 | KernelVersion: 2.6.27 |
4 | Contact: David Vrabel <david.vrabel@csr.com> | 4 | Contact: David Vrabel <david.vrabel@csr.com> |
@@ -9,7 +9,7 @@ Description: | |||
9 | 9 | ||
10 | Set an all zero CHID to stop the host controller. | 10 | Set an all zero CHID to stop the host controller. |
11 | 11 | ||
12 | What: /sys/class/usb_host/usb_hostN/wusb_trust_timeout | 12 | What: /sys/class/uwb_rc/uwbN/wusbhc/wusb_trust_timeout |
13 | Date: July 2008 | 13 | Date: July 2008 |
14 | KernelVersion: 2.6.27 | 14 | KernelVersion: 2.6.27 |
15 | Contact: David Vrabel <david.vrabel@csr.com> | 15 | Contact: David Vrabel <david.vrabel@csr.com> |
diff --git a/MAINTAINERS b/MAINTAINERS index d5eb8c13ef05..88241154f4ce 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -3667,6 +3667,7 @@ NETWORKING [GENERAL] | |||
3667 | M: "David S. Miller" <davem@davemloft.net> | 3667 | M: "David S. Miller" <davem@davemloft.net> |
3668 | L: netdev@vger.kernel.org | 3668 | L: netdev@vger.kernel.org |
3669 | W: http://www.linuxfoundation.org/en/Net | 3669 | W: http://www.linuxfoundation.org/en/Net |
3670 | W: http://patchwork.ozlabs.org/project/netdev/list/ | ||
3670 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.git | 3671 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.git |
3671 | S: Maintained | 3672 | S: Maintained |
3672 | F: net/ | 3673 | F: net/ |
@@ -5664,6 +5665,13 @@ S: Maintained | |||
5664 | F: drivers/vlynq/vlynq.c | 5665 | F: drivers/vlynq/vlynq.c |
5665 | F: include/linux/vlynq.h | 5666 | F: include/linux/vlynq.h |
5666 | 5667 | ||
5668 | VMWARE VMXNET3 ETHERNET DRIVER | ||
5669 | M: Shreyas Bhatewara <sbhatewara@vmware.com> | ||
5670 | M: VMware, Inc. <pv-drivers@vmware.com> | ||
5671 | L: netdev@vger.kernel.org | ||
5672 | S: Maintained | ||
5673 | F: drivers/net/vmxnet3/ | ||
5674 | |||
5667 | VOLTAGE AND CURRENT REGULATOR FRAMEWORK | 5675 | VOLTAGE AND CURRENT REGULATOR FRAMEWORK |
5668 | M: Liam Girdwood <lrg@slimlogic.co.uk> | 5676 | M: Liam Girdwood <lrg@slimlogic.co.uk> |
5669 | M: Mark Brown <broonie@opensource.wolfsonmicro.com> | 5677 | M: Mark Brown <broonie@opensource.wolfsonmicro.com> |
diff --git a/arch/arm/configs/omap3_beagle_defconfig b/arch/arm/configs/omap3_beagle_defconfig index 357d4021e2d0..b3c8cce0f8fb 100644 --- a/arch/arm/configs/omap3_beagle_defconfig +++ b/arch/arm/configs/omap3_beagle_defconfig | |||
@@ -969,7 +969,6 @@ CONFIG_USB_ETH_RNDIS=y | |||
969 | # | 969 | # |
970 | CONFIG_USB_OTG_UTILS=y | 970 | CONFIG_USB_OTG_UTILS=y |
971 | # CONFIG_USB_GPIO_VBUS is not set | 971 | # CONFIG_USB_GPIO_VBUS is not set |
972 | # CONFIG_ISP1301_OMAP is not set | ||
973 | CONFIG_TWL4030_USB=y | 972 | CONFIG_TWL4030_USB=y |
974 | # CONFIG_NOP_USB_XCEIV is not set | 973 | # CONFIG_NOP_USB_XCEIV is not set |
975 | CONFIG_MMC=y | 974 | CONFIG_MMC=y |
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c index c1af5326e92f..2b0eb1ba5d7f 100644 --- a/arch/arm/mach-omap2/board-rx51-peripherals.c +++ b/arch/arm/mach-omap2/board-rx51-peripherals.c | |||
@@ -444,7 +444,7 @@ static int __init rx51_i2c_init(void) | |||
444 | rx51_twldata.vaux3 = &rx51_vaux3_cam; | 444 | rx51_twldata.vaux3 = &rx51_vaux3_cam; |
445 | rx51_twldata.vmmc2 = &rx51_vmmc2; | 445 | rx51_twldata.vmmc2 = &rx51_vmmc2; |
446 | } | 446 | } |
447 | omap_register_i2c_bus(1, 2600, rx51_peripherals_i2c_board_info_1, | 447 | omap_register_i2c_bus(1, 2200, rx51_peripherals_i2c_board_info_1, |
448 | ARRAY_SIZE(rx51_peripherals_i2c_board_info_1)); | 448 | ARRAY_SIZE(rx51_peripherals_i2c_board_info_1)); |
449 | omap_register_i2c_bus(2, 100, NULL, 0); | 449 | omap_register_i2c_bus(2, 100, NULL, 0); |
450 | omap_register_i2c_bus(3, 400, NULL, 0); | 450 | omap_register_i2c_bus(3, 400, NULL, 0); |
diff --git a/arch/arm/mach-omap2/board-zoom2.c b/arch/arm/mach-omap2/board-zoom2.c index b7b32208ced7..fd3369d5e5cb 100644 --- a/arch/arm/mach-omap2/board-zoom2.c +++ b/arch/arm/mach-omap2/board-zoom2.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <mach/keypad.h> | 25 | #include <mach/keypad.h> |
26 | 26 | ||
27 | #include "mmc-twl4030.h" | 27 | #include "mmc-twl4030.h" |
28 | #include "sdram-micron-mt46h32m32lf-6.h" | ||
28 | 29 | ||
29 | /* Zoom2 has Qwerty keyboard*/ | 30 | /* Zoom2 has Qwerty keyboard*/ |
30 | static int board_keymap[] = { | 31 | static int board_keymap[] = { |
@@ -213,7 +214,8 @@ static void __init omap_zoom2_init_irq(void) | |||
213 | { | 214 | { |
214 | omap_board_config = zoom2_config; | 215 | omap_board_config = zoom2_config; |
215 | omap_board_config_size = ARRAY_SIZE(zoom2_config); | 216 | omap_board_config_size = ARRAY_SIZE(zoom2_config); |
216 | omap2_init_common_hw(NULL, NULL); | 217 | omap2_init_common_hw(mt46h32m32lf6_sdrc_params, |
218 | mt46h32m32lf6_sdrc_params); | ||
217 | omap_init_irq(); | 219 | omap_init_irq(); |
218 | omap_gpio_init(); | 220 | omap_gpio_init(); |
219 | } | 221 | } |
diff --git a/arch/arm/mach-omap2/clock24xx.c b/arch/arm/mach-omap2/clock24xx.c index bc5d3ac66611..e2dbedd581e8 100644 --- a/arch/arm/mach-omap2/clock24xx.c +++ b/arch/arm/mach-omap2/clock24xx.c | |||
@@ -769,6 +769,7 @@ int __init omap2_clk_init(void) | |||
769 | if (c->cpu & cpu_mask) { | 769 | if (c->cpu & cpu_mask) { |
770 | clkdev_add(&c->lk); | 770 | clkdev_add(&c->lk); |
771 | clk_register(c->lk.clk); | 771 | clk_register(c->lk.clk); |
772 | omap2_init_clk_clkdm(c->lk.clk); | ||
772 | } | 773 | } |
773 | 774 | ||
774 | /* Check the MPU rate set by bootloader */ | 775 | /* Check the MPU rate set by bootloader */ |
diff --git a/arch/arm/mach-omap2/clockdomain.c b/arch/arm/mach-omap2/clockdomain.c index 4ef7b4f5474e..58aff8485df9 100644 --- a/arch/arm/mach-omap2/clockdomain.c +++ b/arch/arm/mach-omap2/clockdomain.c | |||
@@ -137,6 +137,36 @@ static void _clkdm_del_autodeps(struct clockdomain *clkdm) | |||
137 | } | 137 | } |
138 | } | 138 | } |
139 | 139 | ||
140 | /* | ||
141 | * _omap2_clkdm_set_hwsup - set the hwsup idle transition bit | ||
142 | * @clkdm: struct clockdomain * | ||
143 | * @enable: int 0 to disable, 1 to enable | ||
144 | * | ||
145 | * Internal helper for actually switching the bit that controls hwsup | ||
146 | * idle transitions for clkdm. | ||
147 | */ | ||
148 | static void _omap2_clkdm_set_hwsup(struct clockdomain *clkdm, int enable) | ||
149 | { | ||
150 | u32 v; | ||
151 | |||
152 | if (cpu_is_omap24xx()) { | ||
153 | if (enable) | ||
154 | v = OMAP24XX_CLKSTCTRL_ENABLE_AUTO; | ||
155 | else | ||
156 | v = OMAP24XX_CLKSTCTRL_DISABLE_AUTO; | ||
157 | } else if (cpu_is_omap34xx()) { | ||
158 | if (enable) | ||
159 | v = OMAP34XX_CLKSTCTRL_ENABLE_AUTO; | ||
160 | else | ||
161 | v = OMAP34XX_CLKSTCTRL_DISABLE_AUTO; | ||
162 | } else { | ||
163 | BUG(); | ||
164 | } | ||
165 | |||
166 | cm_rmw_mod_reg_bits(clkdm->clktrctrl_mask, | ||
167 | v << __ffs(clkdm->clktrctrl_mask), | ||
168 | clkdm->pwrdm.ptr->prcm_offs, CM_CLKSTCTRL); | ||
169 | } | ||
140 | 170 | ||
141 | static struct clockdomain *_clkdm_lookup(const char *name) | 171 | static struct clockdomain *_clkdm_lookup(const char *name) |
142 | { | 172 | { |
@@ -456,8 +486,6 @@ int omap2_clkdm_wakeup(struct clockdomain *clkdm) | |||
456 | */ | 486 | */ |
457 | void omap2_clkdm_allow_idle(struct clockdomain *clkdm) | 487 | void omap2_clkdm_allow_idle(struct clockdomain *clkdm) |
458 | { | 488 | { |
459 | u32 v; | ||
460 | |||
461 | if (!clkdm) | 489 | if (!clkdm) |
462 | return; | 490 | return; |
463 | 491 | ||
@@ -473,18 +501,7 @@ void omap2_clkdm_allow_idle(struct clockdomain *clkdm) | |||
473 | if (atomic_read(&clkdm->usecount) > 0) | 501 | if (atomic_read(&clkdm->usecount) > 0) |
474 | _clkdm_add_autodeps(clkdm); | 502 | _clkdm_add_autodeps(clkdm); |
475 | 503 | ||
476 | if (cpu_is_omap24xx()) | 504 | _omap2_clkdm_set_hwsup(clkdm, 1); |
477 | v = OMAP24XX_CLKSTCTRL_ENABLE_AUTO; | ||
478 | else if (cpu_is_omap34xx()) | ||
479 | v = OMAP34XX_CLKSTCTRL_ENABLE_AUTO; | ||
480 | else | ||
481 | BUG(); | ||
482 | |||
483 | |||
484 | cm_rmw_mod_reg_bits(clkdm->clktrctrl_mask, | ||
485 | v << __ffs(clkdm->clktrctrl_mask), | ||
486 | clkdm->pwrdm.ptr->prcm_offs, | ||
487 | CM_CLKSTCTRL); | ||
488 | 505 | ||
489 | pwrdm_clkdm_state_switch(clkdm); | 506 | pwrdm_clkdm_state_switch(clkdm); |
490 | } | 507 | } |
@@ -500,8 +517,6 @@ void omap2_clkdm_allow_idle(struct clockdomain *clkdm) | |||
500 | */ | 517 | */ |
501 | void omap2_clkdm_deny_idle(struct clockdomain *clkdm) | 518 | void omap2_clkdm_deny_idle(struct clockdomain *clkdm) |
502 | { | 519 | { |
503 | u32 v; | ||
504 | |||
505 | if (!clkdm) | 520 | if (!clkdm) |
506 | return; | 521 | return; |
507 | 522 | ||
@@ -514,16 +529,7 @@ void omap2_clkdm_deny_idle(struct clockdomain *clkdm) | |||
514 | pr_debug("clockdomain: disabling automatic idle transitions for %s\n", | 529 | pr_debug("clockdomain: disabling automatic idle transitions for %s\n", |
515 | clkdm->name); | 530 | clkdm->name); |
516 | 531 | ||
517 | if (cpu_is_omap24xx()) | 532 | _omap2_clkdm_set_hwsup(clkdm, 0); |
518 | v = OMAP24XX_CLKSTCTRL_DISABLE_AUTO; | ||
519 | else if (cpu_is_omap34xx()) | ||
520 | v = OMAP34XX_CLKSTCTRL_DISABLE_AUTO; | ||
521 | else | ||
522 | BUG(); | ||
523 | |||
524 | cm_rmw_mod_reg_bits(clkdm->clktrctrl_mask, | ||
525 | v << __ffs(clkdm->clktrctrl_mask), | ||
526 | clkdm->pwrdm.ptr->prcm_offs, CM_CLKSTCTRL); | ||
527 | 533 | ||
528 | if (atomic_read(&clkdm->usecount) > 0) | 534 | if (atomic_read(&clkdm->usecount) > 0) |
529 | _clkdm_del_autodeps(clkdm); | 535 | _clkdm_del_autodeps(clkdm); |
@@ -569,10 +575,14 @@ int omap2_clkdm_clk_enable(struct clockdomain *clkdm, struct clk *clk) | |||
569 | v = omap2_clkdm_clktrctrl_read(clkdm); | 575 | v = omap2_clkdm_clktrctrl_read(clkdm); |
570 | 576 | ||
571 | if ((cpu_is_omap34xx() && v == OMAP34XX_CLKSTCTRL_ENABLE_AUTO) || | 577 | if ((cpu_is_omap34xx() && v == OMAP34XX_CLKSTCTRL_ENABLE_AUTO) || |
572 | (cpu_is_omap24xx() && v == OMAP24XX_CLKSTCTRL_ENABLE_AUTO)) | 578 | (cpu_is_omap24xx() && v == OMAP24XX_CLKSTCTRL_ENABLE_AUTO)) { |
579 | /* Disable HW transitions when we are changing deps */ | ||
580 | _omap2_clkdm_set_hwsup(clkdm, 0); | ||
573 | _clkdm_add_autodeps(clkdm); | 581 | _clkdm_add_autodeps(clkdm); |
574 | else | 582 | _omap2_clkdm_set_hwsup(clkdm, 1); |
583 | } else { | ||
575 | omap2_clkdm_wakeup(clkdm); | 584 | omap2_clkdm_wakeup(clkdm); |
585 | } | ||
576 | 586 | ||
577 | pwrdm_wait_transition(clkdm->pwrdm.ptr); | 587 | pwrdm_wait_transition(clkdm->pwrdm.ptr); |
578 | pwrdm_clkdm_state_switch(clkdm); | 588 | pwrdm_clkdm_state_switch(clkdm); |
@@ -623,10 +633,14 @@ int omap2_clkdm_clk_disable(struct clockdomain *clkdm, struct clk *clk) | |||
623 | v = omap2_clkdm_clktrctrl_read(clkdm); | 633 | v = omap2_clkdm_clktrctrl_read(clkdm); |
624 | 634 | ||
625 | if ((cpu_is_omap34xx() && v == OMAP34XX_CLKSTCTRL_ENABLE_AUTO) || | 635 | if ((cpu_is_omap34xx() && v == OMAP34XX_CLKSTCTRL_ENABLE_AUTO) || |
626 | (cpu_is_omap24xx() && v == OMAP24XX_CLKSTCTRL_ENABLE_AUTO)) | 636 | (cpu_is_omap24xx() && v == OMAP24XX_CLKSTCTRL_ENABLE_AUTO)) { |
637 | /* Disable HW transitions when we are changing deps */ | ||
638 | _omap2_clkdm_set_hwsup(clkdm, 0); | ||
627 | _clkdm_del_autodeps(clkdm); | 639 | _clkdm_del_autodeps(clkdm); |
628 | else | 640 | _omap2_clkdm_set_hwsup(clkdm, 1); |
641 | } else { | ||
629 | omap2_clkdm_sleep(clkdm); | 642 | omap2_clkdm_sleep(clkdm); |
643 | } | ||
630 | 644 | ||
631 | pwrdm_clkdm_state_switch(clkdm); | 645 | pwrdm_clkdm_state_switch(clkdm); |
632 | 646 | ||
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c index fd3154ae69b1..0eb676d7e807 100644 --- a/arch/arm/plat-omap/dma.c +++ b/arch/arm/plat-omap/dma.c | |||
@@ -829,10 +829,10 @@ EXPORT_SYMBOL(omap_free_dma); | |||
829 | * | 829 | * |
830 | * @param arb_rate | 830 | * @param arb_rate |
831 | * @param max_fifo_depth | 831 | * @param max_fifo_depth |
832 | * @param tparams - Number of thereads to reserve : DMA_THREAD_RESERVE_NORM | 832 | * @param tparams - Number of threads to reserve : DMA_THREAD_RESERVE_NORM |
833 | * DMA_THREAD_RESERVE_ONET | 833 | * DMA_THREAD_RESERVE_ONET |
834 | * DMA_THREAD_RESERVE_TWOT | 834 | * DMA_THREAD_RESERVE_TWOT |
835 | * DMA_THREAD_RESERVE_THREET | 835 | * DMA_THREAD_RESERVE_THREET |
836 | */ | 836 | */ |
837 | void | 837 | void |
838 | omap_dma_set_global_params(int arb_rate, int max_fifo_depth, int tparams) | 838 | omap_dma_set_global_params(int arb_rate, int max_fifo_depth, int tparams) |
@@ -844,11 +844,14 @@ omap_dma_set_global_params(int arb_rate, int max_fifo_depth, int tparams) | |||
844 | return; | 844 | return; |
845 | } | 845 | } |
846 | 846 | ||
847 | if (max_fifo_depth == 0) | ||
848 | max_fifo_depth = 1; | ||
847 | if (arb_rate == 0) | 849 | if (arb_rate == 0) |
848 | arb_rate = 1; | 850 | arb_rate = 1; |
849 | 851 | ||
850 | reg = (arb_rate & 0xff) << 16; | 852 | reg = 0xff & max_fifo_depth; |
851 | reg |= (0xff & max_fifo_depth); | 853 | reg |= (0x3 & tparams) << 12; |
854 | reg |= (arb_rate & 0xff) << 16; | ||
852 | 855 | ||
853 | dma_write(reg, GCR); | 856 | dma_write(reg, GCR); |
854 | } | 857 | } |
diff --git a/arch/arm/plat-omap/mcbsp.c b/arch/arm/plat-omap/mcbsp.c index 88ac9768f1c1..e664b912d7bb 100644 --- a/arch/arm/plat-omap/mcbsp.c +++ b/arch/arm/plat-omap/mcbsp.c | |||
@@ -595,7 +595,7 @@ void omap_mcbsp_stop(unsigned int id, int tx, int rx) | |||
595 | rx &= 1; | 595 | rx &= 1; |
596 | if (cpu_is_omap2430() || cpu_is_omap34xx()) { | 596 | if (cpu_is_omap2430() || cpu_is_omap34xx()) { |
597 | w = OMAP_MCBSP_READ(io_base, RCCR); | 597 | w = OMAP_MCBSP_READ(io_base, RCCR); |
598 | w |= (tx ? RDISABLE : 0); | 598 | w |= (rx ? RDISABLE : 0); |
599 | OMAP_MCBSP_WRITE(io_base, RCCR, w); | 599 | OMAP_MCBSP_WRITE(io_base, RCCR, w); |
600 | } | 600 | } |
601 | w = OMAP_MCBSP_READ(io_base, SPCR1); | 601 | w = OMAP_MCBSP_READ(io_base, SPCR1); |
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c index 6b36ee56e6fe..ed86d3bf249a 100644 --- a/drivers/char/vt_ioctl.c +++ b/drivers/char/vt_ioctl.c | |||
@@ -1532,7 +1532,7 @@ long vt_compat_ioctl(struct tty_struct *tty, struct file * file, | |||
1532 | 1532 | ||
1533 | case PIO_UNIMAP: | 1533 | case PIO_UNIMAP: |
1534 | case GIO_UNIMAP: | 1534 | case GIO_UNIMAP: |
1535 | ret = do_unimap_ioctl(cmd, up, perm, vc); | 1535 | ret = compat_unimap_ioctl(cmd, up, perm, vc); |
1536 | break; | 1536 | break; |
1537 | 1537 | ||
1538 | /* | 1538 | /* |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 712776089b46..e19ca4bb7510 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -1741,6 +1741,7 @@ config KS8851 | |||
1741 | config KS8851_MLL | 1741 | config KS8851_MLL |
1742 | tristate "Micrel KS8851 MLL" | 1742 | tristate "Micrel KS8851 MLL" |
1743 | depends on HAS_IOMEM | 1743 | depends on HAS_IOMEM |
1744 | select MII | ||
1744 | help | 1745 | help |
1745 | This platform driver is for Micrel KS8851 Address/data bus | 1746 | This platform driver is for Micrel KS8851 Address/data bus |
1746 | multiplexed network chip. | 1747 | multiplexed network chip. |
@@ -2482,6 +2483,8 @@ config S6GMAC | |||
2482 | To compile this driver as a module, choose M here. The module | 2483 | To compile this driver as a module, choose M here. The module |
2483 | will be called s6gmac. | 2484 | will be called s6gmac. |
2484 | 2485 | ||
2486 | source "drivers/net/stmmac/Kconfig" | ||
2487 | |||
2485 | endif # NETDEV_1000 | 2488 | endif # NETDEV_1000 |
2486 | 2489 | ||
2487 | # | 2490 | # |
@@ -3230,4 +3233,12 @@ config VIRTIO_NET | |||
3230 | This is the virtual network driver for virtio. It can be used with | 3233 | This is the virtual network driver for virtio. It can be used with |
3231 | lguest or QEMU based VMMs (like KVM or Xen). Say Y or M. | 3234 | lguest or QEMU based VMMs (like KVM or Xen). Say Y or M. |
3232 | 3235 | ||
3236 | config VMXNET3 | ||
3237 | tristate "VMware VMXNET3 ethernet driver" | ||
3238 | depends on PCI && X86 && INET | ||
3239 | help | ||
3240 | This driver supports VMware's vmxnet3 virtual ethernet NIC. | ||
3241 | To compile this driver as a module, choose M here: the | ||
3242 | module will be called vmxnet3. | ||
3243 | |||
3233 | endif # NETDEVICES | 3244 | endif # NETDEVICES |
diff --git a/drivers/net/Makefile b/drivers/net/Makefile index d866b8cf65d1..246323d7f161 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile | |||
@@ -2,6 +2,10 @@ | |||
2 | # Makefile for the Linux network (ethercard) device drivers. | 2 | # Makefile for the Linux network (ethercard) device drivers. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-$(CONFIG_MII) += mii.o | ||
6 | obj-$(CONFIG_MDIO) += mdio.o | ||
7 | obj-$(CONFIG_PHYLIB) += phy/ | ||
8 | |||
5 | obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o | 9 | obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o |
6 | 10 | ||
7 | obj-$(CONFIG_E1000) += e1000/ | 11 | obj-$(CONFIG_E1000) += e1000/ |
@@ -26,6 +30,7 @@ obj-$(CONFIG_TEHUTI) += tehuti.o | |||
26 | obj-$(CONFIG_ENIC) += enic/ | 30 | obj-$(CONFIG_ENIC) += enic/ |
27 | obj-$(CONFIG_JME) += jme.o | 31 | obj-$(CONFIG_JME) += jme.o |
28 | obj-$(CONFIG_BE2NET) += benet/ | 32 | obj-$(CONFIG_BE2NET) += benet/ |
33 | obj-$(CONFIG_VMXNET3) += vmxnet3/ | ||
29 | 34 | ||
30 | gianfar_driver-objs := gianfar.o \ | 35 | gianfar_driver-objs := gianfar.o \ |
31 | gianfar_ethtool.o \ | 36 | gianfar_ethtool.o \ |
@@ -95,15 +100,12 @@ obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o | |||
95 | obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o | 100 | obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o |
96 | obj-$(CONFIG_RIONET) += rionet.o | 101 | obj-$(CONFIG_RIONET) += rionet.o |
97 | obj-$(CONFIG_SH_ETH) += sh_eth.o | 102 | obj-$(CONFIG_SH_ETH) += sh_eth.o |
103 | obj-$(CONFIG_STMMAC_ETH) += stmmac/ | ||
98 | 104 | ||
99 | # | 105 | # |
100 | # end link order section | 106 | # end link order section |
101 | # | 107 | # |
102 | 108 | ||
103 | obj-$(CONFIG_MII) += mii.o | ||
104 | obj-$(CONFIG_MDIO) += mdio.o | ||
105 | obj-$(CONFIG_PHYLIB) += phy/ | ||
106 | |||
107 | obj-$(CONFIG_SUNDANCE) += sundance.o | 109 | obj-$(CONFIG_SUNDANCE) += sundance.o |
108 | obj-$(CONFIG_HAMACHI) += hamachi.o | 110 | obj-$(CONFIG_HAMACHI) += hamachi.o |
109 | obj-$(CONFIG_NET) += Space.o loopback.o | 111 | obj-$(CONFIG_NET) += Space.o loopback.o |
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c index 5f0b05c2d71f..d82a9a994753 100644 --- a/drivers/net/acenic.c +++ b/drivers/net/acenic.c | |||
@@ -1209,7 +1209,8 @@ static int __devinit ace_init(struct net_device *dev) | |||
1209 | memset(ap->info, 0, sizeof(struct ace_info)); | 1209 | memset(ap->info, 0, sizeof(struct ace_info)); |
1210 | memset(ap->skb, 0, sizeof(struct ace_skb)); | 1210 | memset(ap->skb, 0, sizeof(struct ace_skb)); |
1211 | 1211 | ||
1212 | if (ace_load_firmware(dev)) | 1212 | ecode = ace_load_firmware(dev); |
1213 | if (ecode) | ||
1213 | goto init_error; | 1214 | goto init_error; |
1214 | 1215 | ||
1215 | ap->fw_running = 0; | 1216 | ap->fw_running = 0; |
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c index 3373560405ba..9dd076a626a5 100644 --- a/drivers/net/can/sja1000/sja1000_of_platform.c +++ b/drivers/net/can/sja1000/sja1000_of_platform.c | |||
@@ -213,6 +213,7 @@ static struct of_device_id __devinitdata sja1000_ofp_table[] = { | |||
213 | {.compatible = "nxp,sja1000"}, | 213 | {.compatible = "nxp,sja1000"}, |
214 | {}, | 214 | {}, |
215 | }; | 215 | }; |
216 | MODULE_DEVICE_TABLE(of, sja1000_ofp_table); | ||
216 | 217 | ||
217 | static struct of_platform_driver sja1000_ofp_driver = { | 218 | static struct of_platform_driver sja1000_ofp_driver = { |
218 | .owner = THIS_MODULE, | 219 | .owner = THIS_MODULE, |
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c index 65a2d0ba64e2..f72c56dec33c 100644 --- a/drivers/net/davinci_emac.c +++ b/drivers/net/davinci_emac.c | |||
@@ -333,6 +333,9 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1"; | |||
333 | #define EMAC_DM646X_MAC_EOI_C0_RXEN (0x01) | 333 | #define EMAC_DM646X_MAC_EOI_C0_RXEN (0x01) |
334 | #define EMAC_DM646X_MAC_EOI_C0_TXEN (0x02) | 334 | #define EMAC_DM646X_MAC_EOI_C0_TXEN (0x02) |
335 | 335 | ||
336 | /* EMAC Stats Clear Mask */ | ||
337 | #define EMAC_STATS_CLR_MASK (0xFFFFFFFF) | ||
338 | |||
336 | /** net_buf_obj: EMAC network bufferdata structure | 339 | /** net_buf_obj: EMAC network bufferdata structure |
337 | * | 340 | * |
338 | * EMAC network buffer data structure | 341 | * EMAC network buffer data structure |
@@ -2548,40 +2551,49 @@ static int emac_dev_stop(struct net_device *ndev) | |||
2548 | static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev) | 2551 | static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev) |
2549 | { | 2552 | { |
2550 | struct emac_priv *priv = netdev_priv(ndev); | 2553 | struct emac_priv *priv = netdev_priv(ndev); |
2554 | u32 mac_control; | ||
2555 | u32 stats_clear_mask; | ||
2551 | 2556 | ||
2552 | /* update emac hardware stats and reset the registers*/ | 2557 | /* update emac hardware stats and reset the registers*/ |
2553 | 2558 | ||
2559 | mac_control = emac_read(EMAC_MACCONTROL); | ||
2560 | |||
2561 | if (mac_control & EMAC_MACCONTROL_GMIIEN) | ||
2562 | stats_clear_mask = EMAC_STATS_CLR_MASK; | ||
2563 | else | ||
2564 | stats_clear_mask = 0; | ||
2565 | |||
2554 | priv->net_dev_stats.multicast += emac_read(EMAC_RXMCASTFRAMES); | 2566 | priv->net_dev_stats.multicast += emac_read(EMAC_RXMCASTFRAMES); |
2555 | emac_write(EMAC_RXMCASTFRAMES, EMAC_ALL_MULTI_REG_VALUE); | 2567 | emac_write(EMAC_RXMCASTFRAMES, stats_clear_mask); |
2556 | 2568 | ||
2557 | priv->net_dev_stats.collisions += (emac_read(EMAC_TXCOLLISION) + | 2569 | priv->net_dev_stats.collisions += (emac_read(EMAC_TXCOLLISION) + |
2558 | emac_read(EMAC_TXSINGLECOLL) + | 2570 | emac_read(EMAC_TXSINGLECOLL) + |
2559 | emac_read(EMAC_TXMULTICOLL)); | 2571 | emac_read(EMAC_TXMULTICOLL)); |
2560 | emac_write(EMAC_TXCOLLISION, EMAC_ALL_MULTI_REG_VALUE); | 2572 | emac_write(EMAC_TXCOLLISION, stats_clear_mask); |
2561 | emac_write(EMAC_TXSINGLECOLL, EMAC_ALL_MULTI_REG_VALUE); | 2573 | emac_write(EMAC_TXSINGLECOLL, stats_clear_mask); |
2562 | emac_write(EMAC_TXMULTICOLL, EMAC_ALL_MULTI_REG_VALUE); | 2574 | emac_write(EMAC_TXMULTICOLL, stats_clear_mask); |
2563 | 2575 | ||
2564 | priv->net_dev_stats.rx_length_errors += (emac_read(EMAC_RXOVERSIZED) + | 2576 | priv->net_dev_stats.rx_length_errors += (emac_read(EMAC_RXOVERSIZED) + |
2565 | emac_read(EMAC_RXJABBER) + | 2577 | emac_read(EMAC_RXJABBER) + |
2566 | emac_read(EMAC_RXUNDERSIZED)); | 2578 | emac_read(EMAC_RXUNDERSIZED)); |
2567 | emac_write(EMAC_RXOVERSIZED, EMAC_ALL_MULTI_REG_VALUE); | 2579 | emac_write(EMAC_RXOVERSIZED, stats_clear_mask); |
2568 | emac_write(EMAC_RXJABBER, EMAC_ALL_MULTI_REG_VALUE); | 2580 | emac_write(EMAC_RXJABBER, stats_clear_mask); |
2569 | emac_write(EMAC_RXUNDERSIZED, EMAC_ALL_MULTI_REG_VALUE); | 2581 | emac_write(EMAC_RXUNDERSIZED, stats_clear_mask); |
2570 | 2582 | ||
2571 | priv->net_dev_stats.rx_over_errors += (emac_read(EMAC_RXSOFOVERRUNS) + | 2583 | priv->net_dev_stats.rx_over_errors += (emac_read(EMAC_RXSOFOVERRUNS) + |
2572 | emac_read(EMAC_RXMOFOVERRUNS)); | 2584 | emac_read(EMAC_RXMOFOVERRUNS)); |
2573 | emac_write(EMAC_RXSOFOVERRUNS, EMAC_ALL_MULTI_REG_VALUE); | 2585 | emac_write(EMAC_RXSOFOVERRUNS, stats_clear_mask); |
2574 | emac_write(EMAC_RXMOFOVERRUNS, EMAC_ALL_MULTI_REG_VALUE); | 2586 | emac_write(EMAC_RXMOFOVERRUNS, stats_clear_mask); |
2575 | 2587 | ||
2576 | priv->net_dev_stats.rx_fifo_errors += emac_read(EMAC_RXDMAOVERRUNS); | 2588 | priv->net_dev_stats.rx_fifo_errors += emac_read(EMAC_RXDMAOVERRUNS); |
2577 | emac_write(EMAC_RXDMAOVERRUNS, EMAC_ALL_MULTI_REG_VALUE); | 2589 | emac_write(EMAC_RXDMAOVERRUNS, stats_clear_mask); |
2578 | 2590 | ||
2579 | priv->net_dev_stats.tx_carrier_errors += | 2591 | priv->net_dev_stats.tx_carrier_errors += |
2580 | emac_read(EMAC_TXCARRIERSENSE); | 2592 | emac_read(EMAC_TXCARRIERSENSE); |
2581 | emac_write(EMAC_TXCARRIERSENSE, EMAC_ALL_MULTI_REG_VALUE); | 2593 | emac_write(EMAC_TXCARRIERSENSE, stats_clear_mask); |
2582 | 2594 | ||
2583 | priv->net_dev_stats.tx_fifo_errors = emac_read(EMAC_TXUNDERRUN); | 2595 | priv->net_dev_stats.tx_fifo_errors = emac_read(EMAC_TXUNDERRUN); |
2584 | emac_write(EMAC_TXUNDERRUN, EMAC_ALL_MULTI_REG_VALUE); | 2596 | emac_write(EMAC_TXUNDERRUN, stats_clear_mask); |
2585 | 2597 | ||
2586 | return &priv->net_dev_stats; | 2598 | return &priv->net_dev_stats; |
2587 | } | 2599 | } |
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c index 96f5b2a2d2c5..9c950bb5e90c 100644 --- a/drivers/net/ethoc.c +++ b/drivers/net/ethoc.c | |||
@@ -664,7 +664,8 @@ static int ethoc_open(struct net_device *dev) | |||
664 | return ret; | 664 | return ret; |
665 | 665 | ||
666 | /* calculate the number of TX/RX buffers, maximum 128 supported */ | 666 | /* calculate the number of TX/RX buffers, maximum 128 supported */ |
667 | num_bd = min(128, (dev->mem_end - dev->mem_start + 1) / ETHOC_BUFSIZ); | 667 | num_bd = min_t(unsigned int, |
668 | 128, (dev->mem_end - dev->mem_start + 1) / ETHOC_BUFSIZ); | ||
668 | priv->num_tx = max(min_tx, num_bd / 4); | 669 | priv->num_tx = max(min_tx, num_bd / 4); |
669 | priv->num_rx = num_bd - priv->num_tx; | 670 | priv->num_rx = num_bd - priv->num_tx; |
670 | ethoc_write(priv, TX_BD_NUM, priv->num_tx); | 671 | ethoc_write(priv, TX_BD_NUM, priv->num_tx); |
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c index c40113f58963..66dace6d324f 100644 --- a/drivers/net/fec_mpc52xx.c +++ b/drivers/net/fec_mpc52xx.c | |||
@@ -759,12 +759,6 @@ static void mpc52xx_fec_reset(struct net_device *dev) | |||
759 | 759 | ||
760 | mpc52xx_fec_hw_init(dev); | 760 | mpc52xx_fec_hw_init(dev); |
761 | 761 | ||
762 | if (priv->phydev) { | ||
763 | phy_stop(priv->phydev); | ||
764 | phy_write(priv->phydev, MII_BMCR, BMCR_RESET); | ||
765 | phy_start(priv->phydev); | ||
766 | } | ||
767 | |||
768 | bcom_fec_rx_reset(priv->rx_dmatsk); | 762 | bcom_fec_rx_reset(priv->rx_dmatsk); |
769 | bcom_fec_tx_reset(priv->tx_dmatsk); | 763 | bcom_fec_tx_reset(priv->tx_dmatsk); |
770 | 764 | ||
diff --git a/drivers/net/fec_mpc52xx_phy.c b/drivers/net/fec_mpc52xx_phy.c index 31e6d62b785d..ee0f3c6d3f88 100644 --- a/drivers/net/fec_mpc52xx_phy.c +++ b/drivers/net/fec_mpc52xx_phy.c | |||
@@ -155,6 +155,7 @@ static struct of_device_id mpc52xx_fec_mdio_match[] = { | |||
155 | { .compatible = "mpc5200b-fec-phy", }, | 155 | { .compatible = "mpc5200b-fec-phy", }, |
156 | {} | 156 | {} |
157 | }; | 157 | }; |
158 | MODULE_DEVICE_TABLE(of, mpc52xx_fec_mdio_match); | ||
158 | 159 | ||
159 | struct of_platform_driver mpc52xx_fec_mdio_driver = { | 160 | struct of_platform_driver mpc52xx_fec_mdio_driver = { |
160 | .name = "mpc5200b-fec-phy", | 161 | .name = "mpc5200b-fec-phy", |
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c index 2bc2d2b20644..ec2f5034457f 100644 --- a/drivers/net/fs_enet/fs_enet-main.c +++ b/drivers/net/fs_enet/fs_enet-main.c | |||
@@ -1110,6 +1110,7 @@ static struct of_device_id fs_enet_match[] = { | |||
1110 | #endif | 1110 | #endif |
1111 | {} | 1111 | {} |
1112 | }; | 1112 | }; |
1113 | MODULE_DEVICE_TABLE(of, fs_enet_match); | ||
1113 | 1114 | ||
1114 | static struct of_platform_driver fs_enet_driver = { | 1115 | static struct of_platform_driver fs_enet_driver = { |
1115 | .name = "fs_enet", | 1116 | .name = "fs_enet", |
diff --git a/drivers/net/fs_enet/mii-bitbang.c b/drivers/net/fs_enet/mii-bitbang.c index 93b481b0e3c7..24ff9f43a62b 100644 --- a/drivers/net/fs_enet/mii-bitbang.c +++ b/drivers/net/fs_enet/mii-bitbang.c | |||
@@ -221,6 +221,7 @@ static struct of_device_id fs_enet_mdio_bb_match[] = { | |||
221 | }, | 221 | }, |
222 | {}, | 222 | {}, |
223 | }; | 223 | }; |
224 | MODULE_DEVICE_TABLE(of, fs_enet_mdio_bb_match); | ||
224 | 225 | ||
225 | static struct of_platform_driver fs_enet_bb_mdio_driver = { | 226 | static struct of_platform_driver fs_enet_bb_mdio_driver = { |
226 | .name = "fsl-bb-mdio", | 227 | .name = "fsl-bb-mdio", |
diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c index a2d69c1cd07e..96eba4280c5c 100644 --- a/drivers/net/fs_enet/mii-fec.c +++ b/drivers/net/fs_enet/mii-fec.c | |||
@@ -219,6 +219,7 @@ static struct of_device_id fs_enet_mdio_fec_match[] = { | |||
219 | #endif | 219 | #endif |
220 | {}, | 220 | {}, |
221 | }; | 221 | }; |
222 | MODULE_DEVICE_TABLE(of, fs_enet_mdio_fec_match); | ||
222 | 223 | ||
223 | static struct of_platform_driver fs_enet_fec_mdio_driver = { | 224 | static struct of_platform_driver fs_enet_fec_mdio_driver = { |
224 | .name = "fsl-fec-mdio", | 225 | .name = "fsl-fec-mdio", |
diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c index d167090248e2..6ac464866972 100644 --- a/drivers/net/fsl_pq_mdio.c +++ b/drivers/net/fsl_pq_mdio.c | |||
@@ -407,6 +407,7 @@ static struct of_device_id fsl_pq_mdio_match[] = { | |||
407 | }, | 407 | }, |
408 | {}, | 408 | {}, |
409 | }; | 409 | }; |
410 | MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match); | ||
410 | 411 | ||
411 | static struct of_platform_driver fsl_pq_mdio_driver = { | 412 | static struct of_platform_driver fsl_pq_mdio_driver = { |
412 | .name = "fsl-pq_mdio", | 413 | .name = "fsl-pq_mdio", |
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 1e5289ffef6f..5bf31f1509c9 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
@@ -2325,9 +2325,6 @@ static irqreturn_t gfar_error(int irq, void *dev_id) | |||
2325 | return IRQ_HANDLED; | 2325 | return IRQ_HANDLED; |
2326 | } | 2326 | } |
2327 | 2327 | ||
2328 | /* work with hotplug and coldplug */ | ||
2329 | MODULE_ALIAS("platform:fsl-gianfar"); | ||
2330 | |||
2331 | static struct of_device_id gfar_match[] = | 2328 | static struct of_device_id gfar_match[] = |
2332 | { | 2329 | { |
2333 | { | 2330 | { |
@@ -2336,6 +2333,7 @@ static struct of_device_id gfar_match[] = | |||
2336 | }, | 2333 | }, |
2337 | {}, | 2334 | {}, |
2338 | }; | 2335 | }; |
2336 | MODULE_DEVICE_TABLE(of, gfar_match); | ||
2339 | 2337 | ||
2340 | /* Structure for a device driver */ | 2338 | /* Structure for a device driver */ |
2341 | static struct of_platform_driver gfar_driver = { | 2339 | static struct of_platform_driver gfar_driver = { |
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c index 89c82c5e63e4..3fae87559791 100644 --- a/drivers/net/ibm_newemac/core.c +++ b/drivers/net/ibm_newemac/core.c | |||
@@ -24,6 +24,7 @@ | |||
24 | * | 24 | * |
25 | */ | 25 | */ |
26 | 26 | ||
27 | #include <linux/module.h> | ||
27 | #include <linux/sched.h> | 28 | #include <linux/sched.h> |
28 | #include <linux/string.h> | 29 | #include <linux/string.h> |
29 | #include <linux/errno.h> | 30 | #include <linux/errno.h> |
@@ -443,7 +444,7 @@ static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_s | |||
443 | ret |= EMAC_MR1_TFS_2K; | 444 | ret |= EMAC_MR1_TFS_2K; |
444 | break; | 445 | break; |
445 | default: | 446 | default: |
446 | printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n", | 447 | printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n", |
447 | dev->ndev->name, tx_size); | 448 | dev->ndev->name, tx_size); |
448 | } | 449 | } |
449 | 450 | ||
@@ -470,6 +471,9 @@ static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_ | |||
470 | DBG2(dev, "__emac4_calc_base_mr1" NL); | 471 | DBG2(dev, "__emac4_calc_base_mr1" NL); |
471 | 472 | ||
472 | switch(tx_size) { | 473 | switch(tx_size) { |
474 | case 16384: | ||
475 | ret |= EMAC4_MR1_TFS_16K; | ||
476 | break; | ||
473 | case 4096: | 477 | case 4096: |
474 | ret |= EMAC4_MR1_TFS_4K; | 478 | ret |= EMAC4_MR1_TFS_4K; |
475 | break; | 479 | break; |
@@ -477,7 +481,7 @@ static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_ | |||
477 | ret |= EMAC4_MR1_TFS_2K; | 481 | ret |= EMAC4_MR1_TFS_2K; |
478 | break; | 482 | break; |
479 | default: | 483 | default: |
480 | printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n", | 484 | printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n", |
481 | dev->ndev->name, tx_size); | 485 | dev->ndev->name, tx_size); |
482 | } | 486 | } |
483 | 487 | ||
@@ -2985,6 +2989,7 @@ static struct of_device_id emac_match[] = | |||
2985 | }, | 2989 | }, |
2986 | {}, | 2990 | {}, |
2987 | }; | 2991 | }; |
2992 | MODULE_DEVICE_TABLE(of, emac_match); | ||
2988 | 2993 | ||
2989 | static struct of_platform_driver emac_driver = { | 2994 | static struct of_platform_driver emac_driver = { |
2990 | .name = "emac", | 2995 | .name = "emac", |
diff --git a/drivers/net/ibm_newemac/emac.h b/drivers/net/ibm_newemac/emac.h index 0afc2cf5c52b..d34adf99fc6a 100644 --- a/drivers/net/ibm_newemac/emac.h +++ b/drivers/net/ibm_newemac/emac.h | |||
@@ -153,6 +153,7 @@ struct emac_regs { | |||
153 | #define EMAC4_MR1_RFS_16K 0x00280000 | 153 | #define EMAC4_MR1_RFS_16K 0x00280000 |
154 | #define EMAC4_MR1_TFS_2K 0x00020000 | 154 | #define EMAC4_MR1_TFS_2K 0x00020000 |
155 | #define EMAC4_MR1_TFS_4K 0x00030000 | 155 | #define EMAC4_MR1_TFS_4K 0x00030000 |
156 | #define EMAC4_MR1_TFS_16K 0x00050000 | ||
156 | #define EMAC4_MR1_TR 0x00008000 | 157 | #define EMAC4_MR1_TR 0x00008000 |
157 | #define EMAC4_MR1_MWSW_001 0x00001000 | 158 | #define EMAC4_MR1_MWSW_001 0x00001000 |
158 | #define EMAC4_MR1_JPSM 0x00000800 | 159 | #define EMAC4_MR1_JPSM 0x00000800 |
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c index 38bf7cf2256d..c412e8026173 100644 --- a/drivers/net/irda/sa1100_ir.c +++ b/drivers/net/irda/sa1100_ir.c | |||
@@ -232,8 +232,11 @@ static int sa1100_irda_startup(struct sa1100_irda *si) | |||
232 | /* | 232 | /* |
233 | * Ensure that the ports for this device are setup correctly. | 233 | * Ensure that the ports for this device are setup correctly. |
234 | */ | 234 | */ |
235 | if (si->pdata->startup) | 235 | if (si->pdata->startup) { |
236 | si->pdata->startup(si->dev); | 236 | ret = si->pdata->startup(si->dev); |
237 | if (ret) | ||
238 | return ret; | ||
239 | } | ||
237 | 240 | ||
238 | /* | 241 | /* |
239 | * Configure PPC for IRDA - we want to drive TXD2 low. | 242 | * Configure PPC for IRDA - we want to drive TXD2 low. |
diff --git a/drivers/net/ixp2000/enp2611.c b/drivers/net/ixp2000/enp2611.c index b02a981c87a8..34a6cfd17930 100644 --- a/drivers/net/ixp2000/enp2611.c +++ b/drivers/net/ixp2000/enp2611.c | |||
@@ -119,24 +119,9 @@ static struct ixp2400_msf_parameters enp2611_msf_parameters = | |||
119 | } | 119 | } |
120 | }; | 120 | }; |
121 | 121 | ||
122 | struct enp2611_ixpdev_priv | ||
123 | { | ||
124 | struct ixpdev_priv ixpdev_priv; | ||
125 | struct net_device_stats stats; | ||
126 | }; | ||
127 | |||
128 | static struct net_device *nds[3]; | 122 | static struct net_device *nds[3]; |
129 | static struct timer_list link_check_timer; | 123 | static struct timer_list link_check_timer; |
130 | 124 | ||
131 | static struct net_device_stats *enp2611_get_stats(struct net_device *dev) | ||
132 | { | ||
133 | struct enp2611_ixpdev_priv *ip = netdev_priv(dev); | ||
134 | |||
135 | pm3386_get_stats(ip->ixpdev_priv.channel, &(ip->stats)); | ||
136 | |||
137 | return &(ip->stats); | ||
138 | } | ||
139 | |||
140 | /* @@@ Poll the SFP moddef0 line too. */ | 125 | /* @@@ Poll the SFP moddef0 line too. */ |
141 | /* @@@ Try to use the pm3386 DOOL interrupt as well. */ | 126 | /* @@@ Try to use the pm3386 DOOL interrupt as well. */ |
142 | static void enp2611_check_link_status(unsigned long __dummy) | 127 | static void enp2611_check_link_status(unsigned long __dummy) |
@@ -203,14 +188,13 @@ static int __init enp2611_init_module(void) | |||
203 | 188 | ||
204 | ports = pm3386_port_count(); | 189 | ports = pm3386_port_count(); |
205 | for (i = 0; i < ports; i++) { | 190 | for (i = 0; i < ports; i++) { |
206 | nds[i] = ixpdev_alloc(i, sizeof(struct enp2611_ixpdev_priv)); | 191 | nds[i] = ixpdev_alloc(i, sizeof(struct ixpdev_priv)); |
207 | if (nds[i] == NULL) { | 192 | if (nds[i] == NULL) { |
208 | while (--i >= 0) | 193 | while (--i >= 0) |
209 | free_netdev(nds[i]); | 194 | free_netdev(nds[i]); |
210 | return -ENOMEM; | 195 | return -ENOMEM; |
211 | } | 196 | } |
212 | 197 | ||
213 | nds[i]->get_stats = enp2611_get_stats; | ||
214 | pm3386_init_port(i); | 198 | pm3386_init_port(i); |
215 | pm3386_get_mac(i, nds[i]->dev_addr); | 199 | pm3386_get_mac(i, nds[i]->dev_addr); |
216 | } | 200 | } |
diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c index 127243461a51..9aee0cc922c9 100644 --- a/drivers/net/ixp2000/ixpdev.c +++ b/drivers/net/ixp2000/ixpdev.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include "ixp2400_tx.ucode" | 21 | #include "ixp2400_tx.ucode" |
22 | #include "ixpdev_priv.h" | 22 | #include "ixpdev_priv.h" |
23 | #include "ixpdev.h" | 23 | #include "ixpdev.h" |
24 | #include "pm3386.h" | ||
24 | 25 | ||
25 | #define DRV_MODULE_VERSION "0.2" | 26 | #define DRV_MODULE_VERSION "0.2" |
26 | 27 | ||
@@ -271,6 +272,15 @@ static int ixpdev_close(struct net_device *dev) | |||
271 | return 0; | 272 | return 0; |
272 | } | 273 | } |
273 | 274 | ||
275 | static struct net_device_stats *ixpdev_get_stats(struct net_device *dev) | ||
276 | { | ||
277 | struct ixpdev_priv *ip = netdev_priv(dev); | ||
278 | |||
279 | pm3386_get_stats(ip->channel, &(dev->stats)); | ||
280 | |||
281 | return &(dev->stats); | ||
282 | } | ||
283 | |||
274 | static const struct net_device_ops ixpdev_netdev_ops = { | 284 | static const struct net_device_ops ixpdev_netdev_ops = { |
275 | .ndo_open = ixpdev_open, | 285 | .ndo_open = ixpdev_open, |
276 | .ndo_stop = ixpdev_close, | 286 | .ndo_stop = ixpdev_close, |
@@ -278,6 +288,7 @@ static const struct net_device_ops ixpdev_netdev_ops = { | |||
278 | .ndo_change_mtu = eth_change_mtu, | 288 | .ndo_change_mtu = eth_change_mtu, |
279 | .ndo_validate_addr = eth_validate_addr, | 289 | .ndo_validate_addr = eth_validate_addr, |
280 | .ndo_set_mac_address = eth_mac_addr, | 290 | .ndo_set_mac_address = eth_mac_addr, |
291 | .ndo_get_stats = ixpdev_get_stats, | ||
281 | #ifdef CONFIG_NET_POLL_CONTROLLER | 292 | #ifdef CONFIG_NET_POLL_CONTROLLER |
282 | .ndo_poll_controller = ixpdev_poll_controller, | 293 | .ndo_poll_controller = ixpdev_poll_controller, |
283 | #endif | 294 | #endif |
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 9b9eab107704..7fc15e9e8adb 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c | |||
@@ -595,7 +595,8 @@ netxen_setup_pci_map(struct netxen_adapter *adapter) | |||
595 | void __iomem *mem_ptr2 = NULL; | 595 | void __iomem *mem_ptr2 = NULL; |
596 | void __iomem *db_ptr = NULL; | 596 | void __iomem *db_ptr = NULL; |
597 | 597 | ||
598 | unsigned long mem_base, mem_len, db_base, db_len = 0, pci_len0 = 0; | 598 | resource_size_t mem_base, db_base; |
599 | unsigned long mem_len, db_len = 0, pci_len0 = 0; | ||
599 | 600 | ||
600 | struct pci_dev *pdev = adapter->pdev; | 601 | struct pci_dev *pdev = adapter->pdev; |
601 | int pci_func = adapter->ahw.pci_func; | 602 | int pci_func = adapter->ahw.pci_func; |
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c index ee8ad3e180dd..b58965a2b3ae 100644 --- a/drivers/net/pcmcia/3c574_cs.c +++ b/drivers/net/pcmcia/3c574_cs.c | |||
@@ -251,6 +251,7 @@ static void el3_tx_timeout(struct net_device *dev); | |||
251 | static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); | 251 | static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); |
252 | static const struct ethtool_ops netdev_ethtool_ops; | 252 | static const struct ethtool_ops netdev_ethtool_ops; |
253 | static void set_rx_mode(struct net_device *dev); | 253 | static void set_rx_mode(struct net_device *dev); |
254 | static void set_multicast_list(struct net_device *dev); | ||
254 | 255 | ||
255 | static void tc574_detach(struct pcmcia_device *p_dev); | 256 | static void tc574_detach(struct pcmcia_device *p_dev); |
256 | 257 | ||
@@ -266,7 +267,7 @@ static const struct net_device_ops el3_netdev_ops = { | |||
266 | .ndo_tx_timeout = el3_tx_timeout, | 267 | .ndo_tx_timeout = el3_tx_timeout, |
267 | .ndo_get_stats = el3_get_stats, | 268 | .ndo_get_stats = el3_get_stats, |
268 | .ndo_do_ioctl = el3_ioctl, | 269 | .ndo_do_ioctl = el3_ioctl, |
269 | .ndo_set_multicast_list = set_rx_mode, | 270 | .ndo_set_multicast_list = set_multicast_list, |
270 | .ndo_change_mtu = eth_change_mtu, | 271 | .ndo_change_mtu = eth_change_mtu, |
271 | .ndo_set_mac_address = eth_mac_addr, | 272 | .ndo_set_mac_address = eth_mac_addr, |
272 | .ndo_validate_addr = eth_validate_addr, | 273 | .ndo_validate_addr = eth_validate_addr, |
@@ -1161,6 +1162,16 @@ static void set_rx_mode(struct net_device *dev) | |||
1161 | outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD); | 1162 | outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD); |
1162 | } | 1163 | } |
1163 | 1164 | ||
1165 | static void set_multicast_list(struct net_device *dev) | ||
1166 | { | ||
1167 | struct el3_private *lp = netdev_priv(dev); | ||
1168 | unsigned long flags; | ||
1169 | |||
1170 | spin_lock_irqsave(&lp->window_lock, flags); | ||
1171 | set_rx_mode(dev); | ||
1172 | spin_unlock_irqrestore(&lp->window_lock, flags); | ||
1173 | } | ||
1174 | |||
1164 | static int el3_close(struct net_device *dev) | 1175 | static int el3_close(struct net_device *dev) |
1165 | { | 1176 | { |
1166 | unsigned int ioaddr = dev->base_addr; | 1177 | unsigned int ioaddr = dev->base_addr; |
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 250e10f2c35b..8659d341e769 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c | |||
@@ -238,6 +238,7 @@ static struct of_device_id mdio_ofgpio_match[] = { | |||
238 | }, | 238 | }, |
239 | {}, | 239 | {}, |
240 | }; | 240 | }; |
241 | MODULE_DEVICE_TABLE(of, mdio_ofgpio_match); | ||
241 | 242 | ||
242 | static struct of_platform_driver mdio_ofgpio_driver = { | 243 | static struct of_platform_driver mdio_ofgpio_driver = { |
243 | .name = "mdio-gpio", | 244 | .name = "mdio-gpio", |
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h index 3ec6e85587a2..e7285f01bd04 100644 --- a/drivers/net/qlge/qlge.h +++ b/drivers/net/qlge/qlge.h | |||
@@ -803,6 +803,12 @@ enum { | |||
803 | MB_CMD_SET_PORT_CFG = 0x00000122, | 803 | MB_CMD_SET_PORT_CFG = 0x00000122, |
804 | MB_CMD_GET_PORT_CFG = 0x00000123, | 804 | MB_CMD_GET_PORT_CFG = 0x00000123, |
805 | MB_CMD_GET_LINK_STS = 0x00000124, | 805 | MB_CMD_GET_LINK_STS = 0x00000124, |
806 | MB_CMD_SET_MGMNT_TFK_CTL = 0x00000160, /* Set Mgmnt Traffic Control */ | ||
807 | MB_SET_MPI_TFK_STOP = (1 << 0), | ||
808 | MB_SET_MPI_TFK_RESUME = (1 << 1), | ||
809 | MB_CMD_GET_MGMNT_TFK_CTL = 0x00000161, /* Get Mgmnt Traffic Control */ | ||
810 | MB_GET_MPI_TFK_STOPPED = (1 << 0), | ||
811 | MB_GET_MPI_TFK_FIFO_EMPTY = (1 << 1), | ||
806 | 812 | ||
807 | /* Mailbox Command Status. */ | 813 | /* Mailbox Command Status. */ |
808 | MB_CMD_STS_GOOD = 0x00004000, /* Success. */ | 814 | MB_CMD_STS_GOOD = 0x00004000, /* Success. */ |
@@ -1168,7 +1174,7 @@ struct ricb { | |||
1168 | #define RSS_RI6 0x40 | 1174 | #define RSS_RI6 0x40 |
1169 | #define RSS_RT6 0x80 | 1175 | #define RSS_RT6 0x80 |
1170 | __le16 mask; | 1176 | __le16 mask; |
1171 | __le32 hash_cq_id[256]; | 1177 | u8 hash_cq_id[1024]; |
1172 | __le32 ipv6_hash_key[10]; | 1178 | __le32 ipv6_hash_key[10]; |
1173 | __le32 ipv4_hash_key[4]; | 1179 | __le32 ipv4_hash_key[4]; |
1174 | } __attribute((packed)); | 1180 | } __attribute((packed)); |
@@ -1606,6 +1612,8 @@ int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data); | |||
1606 | int ql_mb_about_fw(struct ql_adapter *qdev); | 1612 | int ql_mb_about_fw(struct ql_adapter *qdev); |
1607 | void ql_link_on(struct ql_adapter *qdev); | 1613 | void ql_link_on(struct ql_adapter *qdev); |
1608 | void ql_link_off(struct ql_adapter *qdev); | 1614 | void ql_link_off(struct ql_adapter *qdev); |
1615 | int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control); | ||
1616 | int ql_wait_fifo_empty(struct ql_adapter *qdev); | ||
1609 | 1617 | ||
1610 | #if 1 | 1618 | #if 1 |
1611 | #define QL_ALL_DUMP | 1619 | #define QL_ALL_DUMP |
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c index 61680715cde0..48b45df85ec9 100644 --- a/drivers/net/qlge/qlge_main.c +++ b/drivers/net/qlge/qlge_main.c | |||
@@ -320,6 +320,37 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type, | |||
320 | 320 | ||
321 | switch (type) { | 321 | switch (type) { |
322 | case MAC_ADDR_TYPE_MULTI_MAC: | 322 | case MAC_ADDR_TYPE_MULTI_MAC: |
323 | { | ||
324 | u32 upper = (addr[0] << 8) | addr[1]; | ||
325 | u32 lower = (addr[2] << 24) | (addr[3] << 16) | | ||
326 | (addr[4] << 8) | (addr[5]); | ||
327 | |||
328 | status = | ||
329 | ql_wait_reg_rdy(qdev, | ||
330 | MAC_ADDR_IDX, MAC_ADDR_MW, 0); | ||
331 | if (status) | ||
332 | goto exit; | ||
333 | ql_write32(qdev, MAC_ADDR_IDX, (offset++) | | ||
334 | (index << MAC_ADDR_IDX_SHIFT) | | ||
335 | type | MAC_ADDR_E); | ||
336 | ql_write32(qdev, MAC_ADDR_DATA, lower); | ||
337 | status = | ||
338 | ql_wait_reg_rdy(qdev, | ||
339 | MAC_ADDR_IDX, MAC_ADDR_MW, 0); | ||
340 | if (status) | ||
341 | goto exit; | ||
342 | ql_write32(qdev, MAC_ADDR_IDX, (offset++) | | ||
343 | (index << MAC_ADDR_IDX_SHIFT) | | ||
344 | type | MAC_ADDR_E); | ||
345 | |||
346 | ql_write32(qdev, MAC_ADDR_DATA, upper); | ||
347 | status = | ||
348 | ql_wait_reg_rdy(qdev, | ||
349 | MAC_ADDR_IDX, MAC_ADDR_MW, 0); | ||
350 | if (status) | ||
351 | goto exit; | ||
352 | break; | ||
353 | } | ||
323 | case MAC_ADDR_TYPE_CAM_MAC: | 354 | case MAC_ADDR_TYPE_CAM_MAC: |
324 | { | 355 | { |
325 | u32 cam_output; | 356 | u32 cam_output; |
@@ -365,16 +396,14 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type, | |||
365 | and possibly the function id. Right now we hardcode | 396 | and possibly the function id. Right now we hardcode |
366 | the route field to NIC core. | 397 | the route field to NIC core. |
367 | */ | 398 | */ |
368 | if (type == MAC_ADDR_TYPE_CAM_MAC) { | 399 | cam_output = (CAM_OUT_ROUTE_NIC | |
369 | cam_output = (CAM_OUT_ROUTE_NIC | | 400 | (qdev-> |
370 | (qdev-> | 401 | func << CAM_OUT_FUNC_SHIFT) | |
371 | func << CAM_OUT_FUNC_SHIFT) | | 402 | (0 << CAM_OUT_CQ_ID_SHIFT)); |
372 | (0 << CAM_OUT_CQ_ID_SHIFT)); | 403 | if (qdev->vlgrp) |
373 | if (qdev->vlgrp) | 404 | cam_output |= CAM_OUT_RV; |
374 | cam_output |= CAM_OUT_RV; | 405 | /* route to NIC core */ |
375 | /* route to NIC core */ | 406 | ql_write32(qdev, MAC_ADDR_DATA, cam_output); |
376 | ql_write32(qdev, MAC_ADDR_DATA, cam_output); | ||
377 | } | ||
378 | break; | 407 | break; |
379 | } | 408 | } |
380 | case MAC_ADDR_TYPE_VLAN: | 409 | case MAC_ADDR_TYPE_VLAN: |
@@ -546,14 +575,14 @@ static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask, | |||
546 | } | 575 | } |
547 | case RT_IDX_MCAST: /* Pass up All Multicast frames. */ | 576 | case RT_IDX_MCAST: /* Pass up All Multicast frames. */ |
548 | { | 577 | { |
549 | value = RT_IDX_DST_CAM_Q | /* dest */ | 578 | value = RT_IDX_DST_DFLT_Q | /* dest */ |
550 | RT_IDX_TYPE_NICQ | /* type */ | 579 | RT_IDX_TYPE_NICQ | /* type */ |
551 | (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */ | 580 | (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */ |
552 | break; | 581 | break; |
553 | } | 582 | } |
554 | case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */ | 583 | case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */ |
555 | { | 584 | { |
556 | value = RT_IDX_DST_CAM_Q | /* dest */ | 585 | value = RT_IDX_DST_DFLT_Q | /* dest */ |
557 | RT_IDX_TYPE_NICQ | /* type */ | 586 | RT_IDX_TYPE_NICQ | /* type */ |
558 | (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */ | 587 | (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */ |
559 | break; | 588 | break; |
@@ -3077,6 +3106,12 @@ err_irq: | |||
3077 | 3106 | ||
3078 | static int ql_start_rss(struct ql_adapter *qdev) | 3107 | static int ql_start_rss(struct ql_adapter *qdev) |
3079 | { | 3108 | { |
3109 | u8 init_hash_seed[] = {0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, | ||
3110 | 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, | ||
3111 | 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, | ||
3112 | 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, | ||
3113 | 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, | ||
3114 | 0xbe, 0xac, 0x01, 0xfa}; | ||
3080 | struct ricb *ricb = &qdev->ricb; | 3115 | struct ricb *ricb = &qdev->ricb; |
3081 | int status = 0; | 3116 | int status = 0; |
3082 | int i; | 3117 | int i; |
@@ -3086,21 +3121,17 @@ static int ql_start_rss(struct ql_adapter *qdev) | |||
3086 | 3121 | ||
3087 | ricb->base_cq = RSS_L4K; | 3122 | ricb->base_cq = RSS_L4K; |
3088 | ricb->flags = | 3123 | ricb->flags = |
3089 | (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RI4 | RSS_RI6 | RSS_RT4 | | 3124 | (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6); |
3090 | RSS_RT6); | 3125 | ricb->mask = cpu_to_le16((u16)(0x3ff)); |
3091 | ricb->mask = cpu_to_le16(qdev->rss_ring_count - 1); | ||
3092 | 3126 | ||
3093 | /* | 3127 | /* |
3094 | * Fill out the Indirection Table. | 3128 | * Fill out the Indirection Table. |
3095 | */ | 3129 | */ |
3096 | for (i = 0; i < 256; i++) | 3130 | for (i = 0; i < 1024; i++) |
3097 | hash_id[i] = i & (qdev->rss_ring_count - 1); | 3131 | hash_id[i] = (i & (qdev->rss_ring_count - 1)); |
3098 | 3132 | ||
3099 | /* | 3133 | memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40); |
3100 | * Random values for the IPv6 and IPv4 Hash Keys. | 3134 | memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16); |
3101 | */ | ||
3102 | get_random_bytes((void *)&ricb->ipv6_hash_key[0], 40); | ||
3103 | get_random_bytes((void *)&ricb->ipv4_hash_key[0], 16); | ||
3104 | 3135 | ||
3105 | QPRINTK(qdev, IFUP, DEBUG, "Initializing RSS.\n"); | 3136 | QPRINTK(qdev, IFUP, DEBUG, "Initializing RSS.\n"); |
3106 | 3137 | ||
@@ -3239,6 +3270,13 @@ static int ql_adapter_initialize(struct ql_adapter *qdev) | |||
3239 | ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP | | 3270 | ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP | |
3240 | min(SMALL_BUFFER_SIZE, MAX_SPLIT_SIZE)); | 3271 | min(SMALL_BUFFER_SIZE, MAX_SPLIT_SIZE)); |
3241 | 3272 | ||
3273 | /* Set RX packet routing to use port/pci function on which the | ||
3274 | * packet arrived on in addition to usual frame routing. | ||
3275 | * This is helpful on bonding where both interfaces can have | ||
3276 | * the same MAC address. | ||
3277 | */ | ||
3278 | ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ); | ||
3279 | |||
3242 | /* Start up the rx queues. */ | 3280 | /* Start up the rx queues. */ |
3243 | for (i = 0; i < qdev->rx_ring_count; i++) { | 3281 | for (i = 0; i < qdev->rx_ring_count; i++) { |
3244 | status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]); | 3282 | status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]); |
@@ -3311,6 +3349,13 @@ static int ql_adapter_reset(struct ql_adapter *qdev) | |||
3311 | 3349 | ||
3312 | end_jiffies = jiffies + | 3350 | end_jiffies = jiffies + |
3313 | max((unsigned long)1, usecs_to_jiffies(30)); | 3351 | max((unsigned long)1, usecs_to_jiffies(30)); |
3352 | |||
3353 | /* Stop management traffic. */ | ||
3354 | ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP); | ||
3355 | |||
3356 | /* Wait for the NIC and MGMNT FIFOs to empty. */ | ||
3357 | ql_wait_fifo_empty(qdev); | ||
3358 | |||
3314 | ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR); | 3359 | ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR); |
3315 | 3360 | ||
3316 | do { | 3361 | do { |
@@ -3326,6 +3371,8 @@ static int ql_adapter_reset(struct ql_adapter *qdev) | |||
3326 | status = -ETIMEDOUT; | 3371 | status = -ETIMEDOUT; |
3327 | } | 3372 | } |
3328 | 3373 | ||
3374 | /* Resume management traffic. */ | ||
3375 | ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME); | ||
3329 | return status; | 3376 | return status; |
3330 | } | 3377 | } |
3331 | 3378 | ||
@@ -3704,6 +3751,12 @@ static void ql_asic_reset_work(struct work_struct *work) | |||
3704 | status = ql_adapter_up(qdev); | 3751 | status = ql_adapter_up(qdev); |
3705 | if (status) | 3752 | if (status) |
3706 | goto error; | 3753 | goto error; |
3754 | |||
3755 | /* Restore rx mode. */ | ||
3756 | clear_bit(QL_ALLMULTI, &qdev->flags); | ||
3757 | clear_bit(QL_PROMISCUOUS, &qdev->flags); | ||
3758 | qlge_set_multicast_list(qdev->ndev); | ||
3759 | |||
3707 | rtnl_unlock(); | 3760 | rtnl_unlock(); |
3708 | return; | 3761 | return; |
3709 | error: | 3762 | error: |
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c index c2e43073047e..99e58e3f8e22 100644 --- a/drivers/net/qlge/qlge_mpi.c +++ b/drivers/net/qlge/qlge_mpi.c | |||
@@ -768,6 +768,95 @@ static int ql_idc_wait(struct ql_adapter *qdev) | |||
768 | return status; | 768 | return status; |
769 | } | 769 | } |
770 | 770 | ||
771 | int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control) | ||
772 | { | ||
773 | struct mbox_params mbc; | ||
774 | struct mbox_params *mbcp = &mbc; | ||
775 | int status; | ||
776 | |||
777 | memset(mbcp, 0, sizeof(struct mbox_params)); | ||
778 | |||
779 | mbcp->in_count = 1; | ||
780 | mbcp->out_count = 2; | ||
781 | |||
782 | mbcp->mbox_in[0] = MB_CMD_SET_MGMNT_TFK_CTL; | ||
783 | mbcp->mbox_in[1] = control; | ||
784 | |||
785 | status = ql_mailbox_command(qdev, mbcp); | ||
786 | if (status) | ||
787 | return status; | ||
788 | |||
789 | if (mbcp->mbox_out[0] == MB_CMD_STS_GOOD) | ||
790 | return status; | ||
791 | |||
792 | if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) { | ||
793 | QPRINTK(qdev, DRV, ERR, | ||
794 | "Command not supported by firmware.\n"); | ||
795 | status = -EINVAL; | ||
796 | } else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) { | ||
797 | /* This indicates that the firmware is | ||
798 | * already in the state we are trying to | ||
799 | * change it to. | ||
800 | */ | ||
801 | QPRINTK(qdev, DRV, ERR, | ||
802 | "Command parameters make no change.\n"); | ||
803 | } | ||
804 | return status; | ||
805 | } | ||
806 | |||
807 | /* Returns a negative error code or the mailbox command status. */ | ||
808 | static int ql_mb_get_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 *control) | ||
809 | { | ||
810 | struct mbox_params mbc; | ||
811 | struct mbox_params *mbcp = &mbc; | ||
812 | int status; | ||
813 | |||
814 | memset(mbcp, 0, sizeof(struct mbox_params)); | ||
815 | *control = 0; | ||
816 | |||
817 | mbcp->in_count = 1; | ||
818 | mbcp->out_count = 1; | ||
819 | |||
820 | mbcp->mbox_in[0] = MB_CMD_GET_MGMNT_TFK_CTL; | ||
821 | |||
822 | status = ql_mailbox_command(qdev, mbcp); | ||
823 | if (status) | ||
824 | return status; | ||
825 | |||
826 | if (mbcp->mbox_out[0] == MB_CMD_STS_GOOD) { | ||
827 | *control = mbcp->mbox_in[1]; | ||
828 | return status; | ||
829 | } | ||
830 | |||
831 | if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) { | ||
832 | QPRINTK(qdev, DRV, ERR, | ||
833 | "Command not supported by firmware.\n"); | ||
834 | status = -EINVAL; | ||
835 | } else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) { | ||
836 | QPRINTK(qdev, DRV, ERR, | ||
837 | "Failed to get MPI traffic control.\n"); | ||
838 | status = -EIO; | ||
839 | } | ||
840 | return status; | ||
841 | } | ||
842 | |||
843 | int ql_wait_fifo_empty(struct ql_adapter *qdev) | ||
844 | { | ||
845 | int count = 5; | ||
846 | u32 mgmnt_fifo_empty; | ||
847 | u32 nic_fifo_empty; | ||
848 | |||
849 | do { | ||
850 | nic_fifo_empty = ql_read32(qdev, STS) & STS_NFE; | ||
851 | ql_mb_get_mgmnt_traffic_ctl(qdev, &mgmnt_fifo_empty); | ||
852 | mgmnt_fifo_empty &= MB_GET_MPI_TFK_FIFO_EMPTY; | ||
853 | if (nic_fifo_empty && mgmnt_fifo_empty) | ||
854 | return 0; | ||
855 | msleep(100); | ||
856 | } while (count-- > 0); | ||
857 | return -ETIMEDOUT; | ||
858 | } | ||
859 | |||
771 | /* API called in work thread context to set new TX/RX | 860 | /* API called in work thread context to set new TX/RX |
772 | * maximum frame size values to match MTU. | 861 | * maximum frame size values to match MTU. |
773 | */ | 862 | */ |
@@ -876,6 +965,8 @@ void ql_mpi_work(struct work_struct *work) | |||
876 | int err = 0; | 965 | int err = 0; |
877 | 966 | ||
878 | rtnl_lock(); | 967 | rtnl_lock(); |
968 | /* Begin polled mode for MPI */ | ||
969 | ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); | ||
879 | 970 | ||
880 | while (ql_read32(qdev, STS) & STS_PI) { | 971 | while (ql_read32(qdev, STS) & STS_PI) { |
881 | memset(mbcp, 0, sizeof(struct mbox_params)); | 972 | memset(mbcp, 0, sizeof(struct mbox_params)); |
@@ -888,6 +979,8 @@ void ql_mpi_work(struct work_struct *work) | |||
888 | break; | 979 | break; |
889 | } | 980 | } |
890 | 981 | ||
982 | /* End polled mode for MPI */ | ||
983 | ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); | ||
891 | rtnl_unlock(); | 984 | rtnl_unlock(); |
892 | ql_enable_completion_interrupt(qdev, 0); | 985 | ql_enable_completion_interrupt(qdev, 0); |
893 | } | 986 | } |
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 50c6a3cfe439..83c47d95c3aa 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -115,7 +115,9 @@ enum mac_version { | |||
115 | RTL_GIGA_MAC_VER_22 = 0x16, // 8168C | 115 | RTL_GIGA_MAC_VER_22 = 0x16, // 8168C |
116 | RTL_GIGA_MAC_VER_23 = 0x17, // 8168CP | 116 | RTL_GIGA_MAC_VER_23 = 0x17, // 8168CP |
117 | RTL_GIGA_MAC_VER_24 = 0x18, // 8168CP | 117 | RTL_GIGA_MAC_VER_24 = 0x18, // 8168CP |
118 | RTL_GIGA_MAC_VER_25 = 0x19 // 8168D | 118 | RTL_GIGA_MAC_VER_25 = 0x19, // 8168D |
119 | RTL_GIGA_MAC_VER_26 = 0x1a, // 8168D | ||
120 | RTL_GIGA_MAC_VER_27 = 0x1b // 8168DP | ||
119 | }; | 121 | }; |
120 | 122 | ||
121 | #define _R(NAME,MAC,MASK) \ | 123 | #define _R(NAME,MAC,MASK) \ |
@@ -150,7 +152,9 @@ static const struct { | |||
150 | _R("RTL8168c/8111c", RTL_GIGA_MAC_VER_22, 0xff7e1880), // PCI-E | 152 | _R("RTL8168c/8111c", RTL_GIGA_MAC_VER_22, 0xff7e1880), // PCI-E |
151 | _R("RTL8168cp/8111cp", RTL_GIGA_MAC_VER_23, 0xff7e1880), // PCI-E | 153 | _R("RTL8168cp/8111cp", RTL_GIGA_MAC_VER_23, 0xff7e1880), // PCI-E |
152 | _R("RTL8168cp/8111cp", RTL_GIGA_MAC_VER_24, 0xff7e1880), // PCI-E | 154 | _R("RTL8168cp/8111cp", RTL_GIGA_MAC_VER_24, 0xff7e1880), // PCI-E |
153 | _R("RTL8168d/8111d", RTL_GIGA_MAC_VER_25, 0xff7e1880) // PCI-E | 155 | _R("RTL8168d/8111d", RTL_GIGA_MAC_VER_25, 0xff7e1880), // PCI-E |
156 | _R("RTL8168d/8111d", RTL_GIGA_MAC_VER_26, 0xff7e1880), // PCI-E | ||
157 | _R("RTL8168dp/8111dp", RTL_GIGA_MAC_VER_27, 0xff7e1880) // PCI-E | ||
154 | }; | 158 | }; |
155 | #undef _R | 159 | #undef _R |
156 | 160 | ||
@@ -253,6 +257,13 @@ enum rtl8168_8101_registers { | |||
253 | DBG_REG = 0xd1, | 257 | DBG_REG = 0xd1, |
254 | #define FIX_NAK_1 (1 << 4) | 258 | #define FIX_NAK_1 (1 << 4) |
255 | #define FIX_NAK_2 (1 << 3) | 259 | #define FIX_NAK_2 (1 << 3) |
260 | EFUSEAR = 0xdc, | ||
261 | #define EFUSEAR_FLAG 0x80000000 | ||
262 | #define EFUSEAR_WRITE_CMD 0x80000000 | ||
263 | #define EFUSEAR_READ_CMD 0x00000000 | ||
264 | #define EFUSEAR_REG_MASK 0x03ff | ||
265 | #define EFUSEAR_REG_SHIFT 8 | ||
266 | #define EFUSEAR_DATA_MASK 0xff | ||
256 | }; | 267 | }; |
257 | 268 | ||
258 | enum rtl_register_content { | 269 | enum rtl_register_content { |
@@ -568,6 +579,14 @@ static void mdio_patch(void __iomem *ioaddr, int reg_addr, int value) | |||
568 | mdio_write(ioaddr, reg_addr, mdio_read(ioaddr, reg_addr) | value); | 579 | mdio_write(ioaddr, reg_addr, mdio_read(ioaddr, reg_addr) | value); |
569 | } | 580 | } |
570 | 581 | ||
582 | static void mdio_plus_minus(void __iomem *ioaddr, int reg_addr, int p, int m) | ||
583 | { | ||
584 | int val; | ||
585 | |||
586 | val = mdio_read(ioaddr, reg_addr); | ||
587 | mdio_write(ioaddr, reg_addr, (val | p) & ~m); | ||
588 | } | ||
589 | |||
571 | static void rtl_mdio_write(struct net_device *dev, int phy_id, int location, | 590 | static void rtl_mdio_write(struct net_device *dev, int phy_id, int location, |
572 | int val) | 591 | int val) |
573 | { | 592 | { |
@@ -651,6 +670,24 @@ static u32 rtl_csi_read(void __iomem *ioaddr, int addr) | |||
651 | return value; | 670 | return value; |
652 | } | 671 | } |
653 | 672 | ||
673 | static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr) | ||
674 | { | ||
675 | u8 value = 0xff; | ||
676 | unsigned int i; | ||
677 | |||
678 | RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT); | ||
679 | |||
680 | for (i = 0; i < 300; i++) { | ||
681 | if (RTL_R32(EFUSEAR) & EFUSEAR_FLAG) { | ||
682 | value = RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK; | ||
683 | break; | ||
684 | } | ||
685 | udelay(100); | ||
686 | } | ||
687 | |||
688 | return value; | ||
689 | } | ||
690 | |||
654 | static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr) | 691 | static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr) |
655 | { | 692 | { |
656 | RTL_W16(IntrMask, 0x0000); | 693 | RTL_W16(IntrMask, 0x0000); |
@@ -1243,7 +1280,10 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp, | |||
1243 | int mac_version; | 1280 | int mac_version; |
1244 | } mac_info[] = { | 1281 | } mac_info[] = { |
1245 | /* 8168D family. */ | 1282 | /* 8168D family. */ |
1246 | { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_25 }, | 1283 | { 0x7cf00000, 0x28300000, RTL_GIGA_MAC_VER_26 }, |
1284 | { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 }, | ||
1285 | { 0x7c800000, 0x28800000, RTL_GIGA_MAC_VER_27 }, | ||
1286 | { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 }, | ||
1247 | 1287 | ||
1248 | /* 8168C family. */ | 1288 | /* 8168C family. */ |
1249 | { 0x7cf00000, 0x3ca00000, RTL_GIGA_MAC_VER_24 }, | 1289 | { 0x7cf00000, 0x3ca00000, RTL_GIGA_MAC_VER_24 }, |
@@ -1648,74 +1688,903 @@ static void rtl8168c_4_hw_phy_config(void __iomem *ioaddr) | |||
1648 | rtl8168c_3_hw_phy_config(ioaddr); | 1688 | rtl8168c_3_hw_phy_config(ioaddr); |
1649 | } | 1689 | } |
1650 | 1690 | ||
1651 | static void rtl8168d_hw_phy_config(void __iomem *ioaddr) | 1691 | static void rtl8168d_1_hw_phy_config(void __iomem *ioaddr) |
1652 | { | 1692 | { |
1653 | struct phy_reg phy_reg_init_0[] = { | 1693 | static struct phy_reg phy_reg_init_0[] = { |
1654 | { 0x1f, 0x0001 }, | 1694 | { 0x1f, 0x0001 }, |
1655 | { 0x09, 0x2770 }, | 1695 | { 0x06, 0x4064 }, |
1656 | { 0x08, 0x04d0 }, | 1696 | { 0x07, 0x2863 }, |
1657 | { 0x0b, 0xad15 }, | 1697 | { 0x08, 0x059c }, |
1658 | { 0x0c, 0x5bf0 }, | 1698 | { 0x09, 0x26b4 }, |
1659 | { 0x1c, 0xf101 }, | 1699 | { 0x0a, 0x6a19 }, |
1700 | { 0x0b, 0xdcc8 }, | ||
1701 | { 0x10, 0xf06d }, | ||
1702 | { 0x14, 0x7f68 }, | ||
1703 | { 0x18, 0x7fd9 }, | ||
1704 | { 0x1c, 0xf0ff }, | ||
1705 | { 0x1d, 0x3d9c }, | ||
1660 | { 0x1f, 0x0003 }, | 1706 | { 0x1f, 0x0003 }, |
1661 | { 0x14, 0x94d7 }, | 1707 | { 0x12, 0xf49f }, |
1662 | { 0x12, 0xf4d6 }, | 1708 | { 0x13, 0x070b }, |
1663 | { 0x09, 0xca0f }, | 1709 | { 0x1a, 0x05ad }, |
1664 | { 0x1f, 0x0002 }, | 1710 | { 0x14, 0x94c0 } |
1665 | { 0x0b, 0x0b10 }, | 1711 | }; |
1666 | { 0x0c, 0xd1f7 }, | 1712 | static struct phy_reg phy_reg_init_1[] = { |
1667 | { 0x1f, 0x0002 }, | ||
1668 | { 0x06, 0x5461 }, | ||
1669 | { 0x1f, 0x0002 }, | 1713 | { 0x1f, 0x0002 }, |
1670 | { 0x05, 0x6662 }, | 1714 | { 0x06, 0x5561 }, |
1715 | { 0x1f, 0x0005 }, | ||
1716 | { 0x05, 0x8332 }, | ||
1717 | { 0x06, 0x5561 } | ||
1718 | }; | ||
1719 | static struct phy_reg phy_reg_init_2[] = { | ||
1720 | { 0x1f, 0x0005 }, | ||
1721 | { 0x05, 0xffc2 }, | ||
1722 | { 0x1f, 0x0005 }, | ||
1723 | { 0x05, 0x8000 }, | ||
1724 | { 0x06, 0xf8f9 }, | ||
1725 | { 0x06, 0xfaef }, | ||
1726 | { 0x06, 0x59ee }, | ||
1727 | { 0x06, 0xf8ea }, | ||
1728 | { 0x06, 0x00ee }, | ||
1729 | { 0x06, 0xf8eb }, | ||
1730 | { 0x06, 0x00e0 }, | ||
1731 | { 0x06, 0xf87c }, | ||
1732 | { 0x06, 0xe1f8 }, | ||
1733 | { 0x06, 0x7d59 }, | ||
1734 | { 0x06, 0x0fef }, | ||
1735 | { 0x06, 0x0139 }, | ||
1736 | { 0x06, 0x029e }, | ||
1737 | { 0x06, 0x06ef }, | ||
1738 | { 0x06, 0x1039 }, | ||
1739 | { 0x06, 0x089f }, | ||
1740 | { 0x06, 0x2aee }, | ||
1741 | { 0x06, 0xf8ea }, | ||
1742 | { 0x06, 0x00ee }, | ||
1743 | { 0x06, 0xf8eb }, | ||
1744 | { 0x06, 0x01e0 }, | ||
1745 | { 0x06, 0xf87c }, | ||
1746 | { 0x06, 0xe1f8 }, | ||
1747 | { 0x06, 0x7d58 }, | ||
1748 | { 0x06, 0x409e }, | ||
1749 | { 0x06, 0x0f39 }, | ||
1750 | { 0x06, 0x46aa }, | ||
1751 | { 0x06, 0x0bbf }, | ||
1752 | { 0x06, 0x8290 }, | ||
1753 | { 0x06, 0xd682 }, | ||
1754 | { 0x06, 0x9802 }, | ||
1755 | { 0x06, 0x014f }, | ||
1756 | { 0x06, 0xae09 }, | ||
1757 | { 0x06, 0xbf82 }, | ||
1758 | { 0x06, 0x98d6 }, | ||
1759 | { 0x06, 0x82a0 }, | ||
1760 | { 0x06, 0x0201 }, | ||
1761 | { 0x06, 0x4fef }, | ||
1762 | { 0x06, 0x95fe }, | ||
1763 | { 0x06, 0xfdfc }, | ||
1764 | { 0x06, 0x05f8 }, | ||
1765 | { 0x06, 0xf9fa }, | ||
1766 | { 0x06, 0xeef8 }, | ||
1767 | { 0x06, 0xea00 }, | ||
1768 | { 0x06, 0xeef8 }, | ||
1769 | { 0x06, 0xeb00 }, | ||
1770 | { 0x06, 0xe2f8 }, | ||
1771 | { 0x06, 0x7ce3 }, | ||
1772 | { 0x06, 0xf87d }, | ||
1773 | { 0x06, 0xa511 }, | ||
1774 | { 0x06, 0x1112 }, | ||
1775 | { 0x06, 0xd240 }, | ||
1776 | { 0x06, 0xd644 }, | ||
1777 | { 0x06, 0x4402 }, | ||
1778 | { 0x06, 0x8217 }, | ||
1779 | { 0x06, 0xd2a0 }, | ||
1780 | { 0x06, 0xd6aa }, | ||
1781 | { 0x06, 0xaa02 }, | ||
1782 | { 0x06, 0x8217 }, | ||
1783 | { 0x06, 0xae0f }, | ||
1784 | { 0x06, 0xa544 }, | ||
1785 | { 0x06, 0x4402 }, | ||
1786 | { 0x06, 0xae4d }, | ||
1787 | { 0x06, 0xa5aa }, | ||
1788 | { 0x06, 0xaa02 }, | ||
1789 | { 0x06, 0xae47 }, | ||
1790 | { 0x06, 0xaf82 }, | ||
1791 | { 0x06, 0x13ee }, | ||
1792 | { 0x06, 0x834e }, | ||
1793 | { 0x06, 0x00ee }, | ||
1794 | { 0x06, 0x834d }, | ||
1795 | { 0x06, 0x0fee }, | ||
1796 | { 0x06, 0x834c }, | ||
1797 | { 0x06, 0x0fee }, | ||
1798 | { 0x06, 0x834f }, | ||
1799 | { 0x06, 0x00ee }, | ||
1800 | { 0x06, 0x8351 }, | ||
1801 | { 0x06, 0x00ee }, | ||
1802 | { 0x06, 0x834a }, | ||
1803 | { 0x06, 0xffee }, | ||
1804 | { 0x06, 0x834b }, | ||
1805 | { 0x06, 0xffe0 }, | ||
1806 | { 0x06, 0x8330 }, | ||
1807 | { 0x06, 0xe183 }, | ||
1808 | { 0x06, 0x3158 }, | ||
1809 | { 0x06, 0xfee4 }, | ||
1810 | { 0x06, 0xf88a }, | ||
1811 | { 0x06, 0xe5f8 }, | ||
1812 | { 0x06, 0x8be0 }, | ||
1813 | { 0x06, 0x8332 }, | ||
1814 | { 0x06, 0xe183 }, | ||
1815 | { 0x06, 0x3359 }, | ||
1816 | { 0x06, 0x0fe2 }, | ||
1817 | { 0x06, 0x834d }, | ||
1818 | { 0x06, 0x0c24 }, | ||
1819 | { 0x06, 0x5af0 }, | ||
1820 | { 0x06, 0x1e12 }, | ||
1821 | { 0x06, 0xe4f8 }, | ||
1822 | { 0x06, 0x8ce5 }, | ||
1823 | { 0x06, 0xf88d }, | ||
1824 | { 0x06, 0xaf82 }, | ||
1825 | { 0x06, 0x13e0 }, | ||
1826 | { 0x06, 0x834f }, | ||
1827 | { 0x06, 0x10e4 }, | ||
1828 | { 0x06, 0x834f }, | ||
1829 | { 0x06, 0xe083 }, | ||
1830 | { 0x06, 0x4e78 }, | ||
1831 | { 0x06, 0x009f }, | ||
1832 | { 0x06, 0x0ae0 }, | ||
1833 | { 0x06, 0x834f }, | ||
1834 | { 0x06, 0xa010 }, | ||
1835 | { 0x06, 0xa5ee }, | ||
1836 | { 0x06, 0x834e }, | ||
1837 | { 0x06, 0x01e0 }, | ||
1838 | { 0x06, 0x834e }, | ||
1839 | { 0x06, 0x7805 }, | ||
1840 | { 0x06, 0x9e9a }, | ||
1841 | { 0x06, 0xe083 }, | ||
1842 | { 0x06, 0x4e78 }, | ||
1843 | { 0x06, 0x049e }, | ||
1844 | { 0x06, 0x10e0 }, | ||
1845 | { 0x06, 0x834e }, | ||
1846 | { 0x06, 0x7803 }, | ||
1847 | { 0x06, 0x9e0f }, | ||
1848 | { 0x06, 0xe083 }, | ||
1849 | { 0x06, 0x4e78 }, | ||
1850 | { 0x06, 0x019e }, | ||
1851 | { 0x06, 0x05ae }, | ||
1852 | { 0x06, 0x0caf }, | ||
1853 | { 0x06, 0x81f8 }, | ||
1854 | { 0x06, 0xaf81 }, | ||
1855 | { 0x06, 0xa3af }, | ||
1856 | { 0x06, 0x81dc }, | ||
1857 | { 0x06, 0xaf82 }, | ||
1858 | { 0x06, 0x13ee }, | ||
1859 | { 0x06, 0x8348 }, | ||
1860 | { 0x06, 0x00ee }, | ||
1861 | { 0x06, 0x8349 }, | ||
1862 | { 0x06, 0x00e0 }, | ||
1863 | { 0x06, 0x8351 }, | ||
1864 | { 0x06, 0x10e4 }, | ||
1865 | { 0x06, 0x8351 }, | ||
1866 | { 0x06, 0x5801 }, | ||
1867 | { 0x06, 0x9fea }, | ||
1868 | { 0x06, 0xd000 }, | ||
1869 | { 0x06, 0xd180 }, | ||
1870 | { 0x06, 0x1f66 }, | ||
1871 | { 0x06, 0xe2f8 }, | ||
1872 | { 0x06, 0xeae3 }, | ||
1873 | { 0x06, 0xf8eb }, | ||
1874 | { 0x06, 0x5af8 }, | ||
1875 | { 0x06, 0x1e20 }, | ||
1876 | { 0x06, 0xe6f8 }, | ||
1877 | { 0x06, 0xeae5 }, | ||
1878 | { 0x06, 0xf8eb }, | ||
1879 | { 0x06, 0xd302 }, | ||
1880 | { 0x06, 0xb3fe }, | ||
1881 | { 0x06, 0xe2f8 }, | ||
1882 | { 0x06, 0x7cef }, | ||
1883 | { 0x06, 0x325b }, | ||
1884 | { 0x06, 0x80e3 }, | ||
1885 | { 0x06, 0xf87d }, | ||
1886 | { 0x06, 0x9e03 }, | ||
1887 | { 0x06, 0x7dff }, | ||
1888 | { 0x06, 0xff0d }, | ||
1889 | { 0x06, 0x581c }, | ||
1890 | { 0x06, 0x551a }, | ||
1891 | { 0x06, 0x6511 }, | ||
1892 | { 0x06, 0xa190 }, | ||
1893 | { 0x06, 0xd3e2 }, | ||
1894 | { 0x06, 0x8348 }, | ||
1895 | { 0x06, 0xe383 }, | ||
1896 | { 0x06, 0x491b }, | ||
1897 | { 0x06, 0x56ab }, | ||
1898 | { 0x06, 0x08ef }, | ||
1899 | { 0x06, 0x56e6 }, | ||
1900 | { 0x06, 0x8348 }, | ||
1901 | { 0x06, 0xe783 }, | ||
1902 | { 0x06, 0x4910 }, | ||
1903 | { 0x06, 0xd180 }, | ||
1904 | { 0x06, 0x1f66 }, | ||
1905 | { 0x06, 0xa004 }, | ||
1906 | { 0x06, 0xb9e2 }, | ||
1907 | { 0x06, 0x8348 }, | ||
1908 | { 0x06, 0xe383 }, | ||
1909 | { 0x06, 0x49ef }, | ||
1910 | { 0x06, 0x65e2 }, | ||
1911 | { 0x06, 0x834a }, | ||
1912 | { 0x06, 0xe383 }, | ||
1913 | { 0x06, 0x4b1b }, | ||
1914 | { 0x06, 0x56aa }, | ||
1915 | { 0x06, 0x0eef }, | ||
1916 | { 0x06, 0x56e6 }, | ||
1917 | { 0x06, 0x834a }, | ||
1918 | { 0x06, 0xe783 }, | ||
1919 | { 0x06, 0x4be2 }, | ||
1920 | { 0x06, 0x834d }, | ||
1921 | { 0x06, 0xe683 }, | ||
1922 | { 0x06, 0x4ce0 }, | ||
1923 | { 0x06, 0x834d }, | ||
1924 | { 0x06, 0xa000 }, | ||
1925 | { 0x06, 0x0caf }, | ||
1926 | { 0x06, 0x81dc }, | ||
1927 | { 0x06, 0xe083 }, | ||
1928 | { 0x06, 0x4d10 }, | ||
1929 | { 0x06, 0xe483 }, | ||
1930 | { 0x06, 0x4dae }, | ||
1931 | { 0x06, 0x0480 }, | ||
1932 | { 0x06, 0xe483 }, | ||
1933 | { 0x06, 0x4de0 }, | ||
1934 | { 0x06, 0x834e }, | ||
1935 | { 0x06, 0x7803 }, | ||
1936 | { 0x06, 0x9e0b }, | ||
1937 | { 0x06, 0xe083 }, | ||
1938 | { 0x06, 0x4e78 }, | ||
1939 | { 0x06, 0x049e }, | ||
1940 | { 0x06, 0x04ee }, | ||
1941 | { 0x06, 0x834e }, | ||
1942 | { 0x06, 0x02e0 }, | ||
1943 | { 0x06, 0x8332 }, | ||
1944 | { 0x06, 0xe183 }, | ||
1945 | { 0x06, 0x3359 }, | ||
1946 | { 0x06, 0x0fe2 }, | ||
1947 | { 0x06, 0x834d }, | ||
1948 | { 0x06, 0x0c24 }, | ||
1949 | { 0x06, 0x5af0 }, | ||
1950 | { 0x06, 0x1e12 }, | ||
1951 | { 0x06, 0xe4f8 }, | ||
1952 | { 0x06, 0x8ce5 }, | ||
1953 | { 0x06, 0xf88d }, | ||
1954 | { 0x06, 0xe083 }, | ||
1955 | { 0x06, 0x30e1 }, | ||
1956 | { 0x06, 0x8331 }, | ||
1957 | { 0x06, 0x6801 }, | ||
1958 | { 0x06, 0xe4f8 }, | ||
1959 | { 0x06, 0x8ae5 }, | ||
1960 | { 0x06, 0xf88b }, | ||
1961 | { 0x06, 0xae37 }, | ||
1962 | { 0x06, 0xee83 }, | ||
1963 | { 0x06, 0x4e03 }, | ||
1964 | { 0x06, 0xe083 }, | ||
1965 | { 0x06, 0x4ce1 }, | ||
1966 | { 0x06, 0x834d }, | ||
1967 | { 0x06, 0x1b01 }, | ||
1968 | { 0x06, 0x9e04 }, | ||
1969 | { 0x06, 0xaaa1 }, | ||
1970 | { 0x06, 0xaea8 }, | ||
1971 | { 0x06, 0xee83 }, | ||
1972 | { 0x06, 0x4e04 }, | ||
1973 | { 0x06, 0xee83 }, | ||
1974 | { 0x06, 0x4f00 }, | ||
1975 | { 0x06, 0xaeab }, | ||
1976 | { 0x06, 0xe083 }, | ||
1977 | { 0x06, 0x4f78 }, | ||
1978 | { 0x06, 0x039f }, | ||
1979 | { 0x06, 0x14ee }, | ||
1980 | { 0x06, 0x834e }, | ||
1981 | { 0x06, 0x05d2 }, | ||
1982 | { 0x06, 0x40d6 }, | ||
1983 | { 0x06, 0x5554 }, | ||
1984 | { 0x06, 0x0282 }, | ||
1985 | { 0x06, 0x17d2 }, | ||
1986 | { 0x06, 0xa0d6 }, | ||
1987 | { 0x06, 0xba00 }, | ||
1988 | { 0x06, 0x0282 }, | ||
1989 | { 0x06, 0x17fe }, | ||
1990 | { 0x06, 0xfdfc }, | ||
1991 | { 0x06, 0x05f8 }, | ||
1992 | { 0x06, 0xe0f8 }, | ||
1993 | { 0x06, 0x60e1 }, | ||
1994 | { 0x06, 0xf861 }, | ||
1995 | { 0x06, 0x6802 }, | ||
1996 | { 0x06, 0xe4f8 }, | ||
1997 | { 0x06, 0x60e5 }, | ||
1998 | { 0x06, 0xf861 }, | ||
1999 | { 0x06, 0xe0f8 }, | ||
2000 | { 0x06, 0x48e1 }, | ||
2001 | { 0x06, 0xf849 }, | ||
2002 | { 0x06, 0x580f }, | ||
2003 | { 0x06, 0x1e02 }, | ||
2004 | { 0x06, 0xe4f8 }, | ||
2005 | { 0x06, 0x48e5 }, | ||
2006 | { 0x06, 0xf849 }, | ||
2007 | { 0x06, 0xd000 }, | ||
2008 | { 0x06, 0x0282 }, | ||
2009 | { 0x06, 0x5bbf }, | ||
2010 | { 0x06, 0x8350 }, | ||
2011 | { 0x06, 0xef46 }, | ||
2012 | { 0x06, 0xdc19 }, | ||
2013 | { 0x06, 0xddd0 }, | ||
2014 | { 0x06, 0x0102 }, | ||
2015 | { 0x06, 0x825b }, | ||
2016 | { 0x06, 0x0282 }, | ||
2017 | { 0x06, 0x77e0 }, | ||
2018 | { 0x06, 0xf860 }, | ||
2019 | { 0x06, 0xe1f8 }, | ||
2020 | { 0x06, 0x6158 }, | ||
2021 | { 0x06, 0xfde4 }, | ||
2022 | { 0x06, 0xf860 }, | ||
2023 | { 0x06, 0xe5f8 }, | ||
2024 | { 0x06, 0x61fc }, | ||
2025 | { 0x06, 0x04f9 }, | ||
2026 | { 0x06, 0xfafb }, | ||
2027 | { 0x06, 0xc6bf }, | ||
2028 | { 0x06, 0xf840 }, | ||
2029 | { 0x06, 0xbe83 }, | ||
2030 | { 0x06, 0x50a0 }, | ||
2031 | { 0x06, 0x0101 }, | ||
2032 | { 0x06, 0x071b }, | ||
2033 | { 0x06, 0x89cf }, | ||
2034 | { 0x06, 0xd208 }, | ||
2035 | { 0x06, 0xebdb }, | ||
2036 | { 0x06, 0x19b2 }, | ||
2037 | { 0x06, 0xfbff }, | ||
2038 | { 0x06, 0xfefd }, | ||
2039 | { 0x06, 0x04f8 }, | ||
2040 | { 0x06, 0xe0f8 }, | ||
2041 | { 0x06, 0x48e1 }, | ||
2042 | { 0x06, 0xf849 }, | ||
2043 | { 0x06, 0x6808 }, | ||
2044 | { 0x06, 0xe4f8 }, | ||
2045 | { 0x06, 0x48e5 }, | ||
2046 | { 0x06, 0xf849 }, | ||
2047 | { 0x06, 0x58f7 }, | ||
2048 | { 0x06, 0xe4f8 }, | ||
2049 | { 0x06, 0x48e5 }, | ||
2050 | { 0x06, 0xf849 }, | ||
2051 | { 0x06, 0xfc04 }, | ||
2052 | { 0x06, 0x4d20 }, | ||
2053 | { 0x06, 0x0002 }, | ||
2054 | { 0x06, 0x4e22 }, | ||
2055 | { 0x06, 0x0002 }, | ||
2056 | { 0x06, 0x4ddf }, | ||
2057 | { 0x06, 0xff01 }, | ||
2058 | { 0x06, 0x4edd }, | ||
2059 | { 0x06, 0xff01 }, | ||
2060 | { 0x05, 0x83d4 }, | ||
2061 | { 0x06, 0x8000 }, | ||
2062 | { 0x05, 0x83d8 }, | ||
2063 | { 0x06, 0x8051 }, | ||
2064 | { 0x02, 0x6010 }, | ||
2065 | { 0x03, 0xdc00 }, | ||
2066 | { 0x05, 0xfff6 }, | ||
2067 | { 0x06, 0x00fc }, | ||
1671 | { 0x1f, 0x0000 }, | 2068 | { 0x1f, 0x0000 }, |
1672 | { 0x14, 0x0060 }, | 2069 | |
1673 | { 0x1f, 0x0000 }, | 2070 | { 0x1f, 0x0000 }, |
1674 | { 0x0d, 0xf8a0 }, | 2071 | { 0x0d, 0xf880 }, |
2072 | { 0x1f, 0x0000 } | ||
2073 | }; | ||
2074 | |||
2075 | rtl_phy_write(ioaddr, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); | ||
2076 | |||
2077 | mdio_write(ioaddr, 0x1f, 0x0002); | ||
2078 | mdio_plus_minus(ioaddr, 0x0b, 0x0010, 0x00ef); | ||
2079 | mdio_plus_minus(ioaddr, 0x0c, 0xa200, 0x5d00); | ||
2080 | |||
2081 | rtl_phy_write(ioaddr, phy_reg_init_1, ARRAY_SIZE(phy_reg_init_1)); | ||
2082 | |||
2083 | if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) { | ||
2084 | struct phy_reg phy_reg_init[] = { | ||
2085 | { 0x1f, 0x0002 }, | ||
2086 | { 0x05, 0x669a }, | ||
2087 | { 0x1f, 0x0005 }, | ||
2088 | { 0x05, 0x8330 }, | ||
2089 | { 0x06, 0x669a }, | ||
2090 | { 0x1f, 0x0002 } | ||
2091 | }; | ||
2092 | int val; | ||
2093 | |||
2094 | rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init)); | ||
2095 | |||
2096 | val = mdio_read(ioaddr, 0x0d); | ||
2097 | |||
2098 | if ((val & 0x00ff) != 0x006c) { | ||
2099 | u32 set[] = { | ||
2100 | 0x0065, 0x0066, 0x0067, 0x0068, | ||
2101 | 0x0069, 0x006a, 0x006b, 0x006c | ||
2102 | }; | ||
2103 | int i; | ||
2104 | |||
2105 | mdio_write(ioaddr, 0x1f, 0x0002); | ||
2106 | |||
2107 | val &= 0xff00; | ||
2108 | for (i = 0; i < ARRAY_SIZE(set); i++) | ||
2109 | mdio_write(ioaddr, 0x0d, val | set[i]); | ||
2110 | } | ||
2111 | } else { | ||
2112 | struct phy_reg phy_reg_init[] = { | ||
2113 | { 0x1f, 0x0002 }, | ||
2114 | { 0x05, 0x6662 }, | ||
2115 | { 0x1f, 0x0005 }, | ||
2116 | { 0x05, 0x8330 }, | ||
2117 | { 0x06, 0x6662 } | ||
2118 | }; | ||
2119 | |||
2120 | rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init)); | ||
2121 | } | ||
2122 | |||
2123 | mdio_write(ioaddr, 0x1f, 0x0002); | ||
2124 | mdio_patch(ioaddr, 0x0d, 0x0300); | ||
2125 | mdio_patch(ioaddr, 0x0f, 0x0010); | ||
2126 | |||
2127 | mdio_write(ioaddr, 0x1f, 0x0002); | ||
2128 | mdio_plus_minus(ioaddr, 0x02, 0x0100, 0x0600); | ||
2129 | mdio_plus_minus(ioaddr, 0x03, 0x0000, 0xe000); | ||
2130 | |||
2131 | rtl_phy_write(ioaddr, phy_reg_init_2, ARRAY_SIZE(phy_reg_init_2)); | ||
2132 | } | ||
2133 | |||
2134 | static void rtl8168d_2_hw_phy_config(void __iomem *ioaddr) | ||
2135 | { | ||
2136 | static struct phy_reg phy_reg_init_0[] = { | ||
2137 | { 0x1f, 0x0001 }, | ||
2138 | { 0x06, 0x4064 }, | ||
2139 | { 0x07, 0x2863 }, | ||
2140 | { 0x08, 0x059c }, | ||
2141 | { 0x09, 0x26b4 }, | ||
2142 | { 0x0a, 0x6a19 }, | ||
2143 | { 0x0b, 0xdcc8 }, | ||
2144 | { 0x10, 0xf06d }, | ||
2145 | { 0x14, 0x7f68 }, | ||
2146 | { 0x18, 0x7fd9 }, | ||
2147 | { 0x1c, 0xf0ff }, | ||
2148 | { 0x1d, 0x3d9c }, | ||
2149 | { 0x1f, 0x0003 }, | ||
2150 | { 0x12, 0xf49f }, | ||
2151 | { 0x13, 0x070b }, | ||
2152 | { 0x1a, 0x05ad }, | ||
2153 | { 0x14, 0x94c0 }, | ||
2154 | |||
2155 | { 0x1f, 0x0002 }, | ||
2156 | { 0x06, 0x5561 }, | ||
2157 | { 0x1f, 0x0005 }, | ||
2158 | { 0x05, 0x8332 }, | ||
2159 | { 0x06, 0x5561 } | ||
2160 | }; | ||
2161 | static struct phy_reg phy_reg_init_1[] = { | ||
2162 | { 0x1f, 0x0005 }, | ||
2163 | { 0x05, 0xffc2 }, | ||
1675 | { 0x1f, 0x0005 }, | 2164 | { 0x1f, 0x0005 }, |
1676 | { 0x05, 0xffc2 } | 2165 | { 0x05, 0x8000 }, |
2166 | { 0x06, 0xf8f9 }, | ||
2167 | { 0x06, 0xfaee }, | ||
2168 | { 0x06, 0xf8ea }, | ||
2169 | { 0x06, 0x00ee }, | ||
2170 | { 0x06, 0xf8eb }, | ||
2171 | { 0x06, 0x00e2 }, | ||
2172 | { 0x06, 0xf87c }, | ||
2173 | { 0x06, 0xe3f8 }, | ||
2174 | { 0x06, 0x7da5 }, | ||
2175 | { 0x06, 0x1111 }, | ||
2176 | { 0x06, 0x12d2 }, | ||
2177 | { 0x06, 0x40d6 }, | ||
2178 | { 0x06, 0x4444 }, | ||
2179 | { 0x06, 0x0281 }, | ||
2180 | { 0x06, 0xc6d2 }, | ||
2181 | { 0x06, 0xa0d6 }, | ||
2182 | { 0x06, 0xaaaa }, | ||
2183 | { 0x06, 0x0281 }, | ||
2184 | { 0x06, 0xc6ae }, | ||
2185 | { 0x06, 0x0fa5 }, | ||
2186 | { 0x06, 0x4444 }, | ||
2187 | { 0x06, 0x02ae }, | ||
2188 | { 0x06, 0x4da5 }, | ||
2189 | { 0x06, 0xaaaa }, | ||
2190 | { 0x06, 0x02ae }, | ||
2191 | { 0x06, 0x47af }, | ||
2192 | { 0x06, 0x81c2 }, | ||
2193 | { 0x06, 0xee83 }, | ||
2194 | { 0x06, 0x4e00 }, | ||
2195 | { 0x06, 0xee83 }, | ||
2196 | { 0x06, 0x4d0f }, | ||
2197 | { 0x06, 0xee83 }, | ||
2198 | { 0x06, 0x4c0f }, | ||
2199 | { 0x06, 0xee83 }, | ||
2200 | { 0x06, 0x4f00 }, | ||
2201 | { 0x06, 0xee83 }, | ||
2202 | { 0x06, 0x5100 }, | ||
2203 | { 0x06, 0xee83 }, | ||
2204 | { 0x06, 0x4aff }, | ||
2205 | { 0x06, 0xee83 }, | ||
2206 | { 0x06, 0x4bff }, | ||
2207 | { 0x06, 0xe083 }, | ||
2208 | { 0x06, 0x30e1 }, | ||
2209 | { 0x06, 0x8331 }, | ||
2210 | { 0x06, 0x58fe }, | ||
2211 | { 0x06, 0xe4f8 }, | ||
2212 | { 0x06, 0x8ae5 }, | ||
2213 | { 0x06, 0xf88b }, | ||
2214 | { 0x06, 0xe083 }, | ||
2215 | { 0x06, 0x32e1 }, | ||
2216 | { 0x06, 0x8333 }, | ||
2217 | { 0x06, 0x590f }, | ||
2218 | { 0x06, 0xe283 }, | ||
2219 | { 0x06, 0x4d0c }, | ||
2220 | { 0x06, 0x245a }, | ||
2221 | { 0x06, 0xf01e }, | ||
2222 | { 0x06, 0x12e4 }, | ||
2223 | { 0x06, 0xf88c }, | ||
2224 | { 0x06, 0xe5f8 }, | ||
2225 | { 0x06, 0x8daf }, | ||
2226 | { 0x06, 0x81c2 }, | ||
2227 | { 0x06, 0xe083 }, | ||
2228 | { 0x06, 0x4f10 }, | ||
2229 | { 0x06, 0xe483 }, | ||
2230 | { 0x06, 0x4fe0 }, | ||
2231 | { 0x06, 0x834e }, | ||
2232 | { 0x06, 0x7800 }, | ||
2233 | { 0x06, 0x9f0a }, | ||
2234 | { 0x06, 0xe083 }, | ||
2235 | { 0x06, 0x4fa0 }, | ||
2236 | { 0x06, 0x10a5 }, | ||
2237 | { 0x06, 0xee83 }, | ||
2238 | { 0x06, 0x4e01 }, | ||
2239 | { 0x06, 0xe083 }, | ||
2240 | { 0x06, 0x4e78 }, | ||
2241 | { 0x06, 0x059e }, | ||
2242 | { 0x06, 0x9ae0 }, | ||
2243 | { 0x06, 0x834e }, | ||
2244 | { 0x06, 0x7804 }, | ||
2245 | { 0x06, 0x9e10 }, | ||
2246 | { 0x06, 0xe083 }, | ||
2247 | { 0x06, 0x4e78 }, | ||
2248 | { 0x06, 0x039e }, | ||
2249 | { 0x06, 0x0fe0 }, | ||
2250 | { 0x06, 0x834e }, | ||
2251 | { 0x06, 0x7801 }, | ||
2252 | { 0x06, 0x9e05 }, | ||
2253 | { 0x06, 0xae0c }, | ||
2254 | { 0x06, 0xaf81 }, | ||
2255 | { 0x06, 0xa7af }, | ||
2256 | { 0x06, 0x8152 }, | ||
2257 | { 0x06, 0xaf81 }, | ||
2258 | { 0x06, 0x8baf }, | ||
2259 | { 0x06, 0x81c2 }, | ||
2260 | { 0x06, 0xee83 }, | ||
2261 | { 0x06, 0x4800 }, | ||
2262 | { 0x06, 0xee83 }, | ||
2263 | { 0x06, 0x4900 }, | ||
2264 | { 0x06, 0xe083 }, | ||
2265 | { 0x06, 0x5110 }, | ||
2266 | { 0x06, 0xe483 }, | ||
2267 | { 0x06, 0x5158 }, | ||
2268 | { 0x06, 0x019f }, | ||
2269 | { 0x06, 0xead0 }, | ||
2270 | { 0x06, 0x00d1 }, | ||
2271 | { 0x06, 0x801f }, | ||
2272 | { 0x06, 0x66e2 }, | ||
2273 | { 0x06, 0xf8ea }, | ||
2274 | { 0x06, 0xe3f8 }, | ||
2275 | { 0x06, 0xeb5a }, | ||
2276 | { 0x06, 0xf81e }, | ||
2277 | { 0x06, 0x20e6 }, | ||
2278 | { 0x06, 0xf8ea }, | ||
2279 | { 0x06, 0xe5f8 }, | ||
2280 | { 0x06, 0xebd3 }, | ||
2281 | { 0x06, 0x02b3 }, | ||
2282 | { 0x06, 0xfee2 }, | ||
2283 | { 0x06, 0xf87c }, | ||
2284 | { 0x06, 0xef32 }, | ||
2285 | { 0x06, 0x5b80 }, | ||
2286 | { 0x06, 0xe3f8 }, | ||
2287 | { 0x06, 0x7d9e }, | ||
2288 | { 0x06, 0x037d }, | ||
2289 | { 0x06, 0xffff }, | ||
2290 | { 0x06, 0x0d58 }, | ||
2291 | { 0x06, 0x1c55 }, | ||
2292 | { 0x06, 0x1a65 }, | ||
2293 | { 0x06, 0x11a1 }, | ||
2294 | { 0x06, 0x90d3 }, | ||
2295 | { 0x06, 0xe283 }, | ||
2296 | { 0x06, 0x48e3 }, | ||
2297 | { 0x06, 0x8349 }, | ||
2298 | { 0x06, 0x1b56 }, | ||
2299 | { 0x06, 0xab08 }, | ||
2300 | { 0x06, 0xef56 }, | ||
2301 | { 0x06, 0xe683 }, | ||
2302 | { 0x06, 0x48e7 }, | ||
2303 | { 0x06, 0x8349 }, | ||
2304 | { 0x06, 0x10d1 }, | ||
2305 | { 0x06, 0x801f }, | ||
2306 | { 0x06, 0x66a0 }, | ||
2307 | { 0x06, 0x04b9 }, | ||
2308 | { 0x06, 0xe283 }, | ||
2309 | { 0x06, 0x48e3 }, | ||
2310 | { 0x06, 0x8349 }, | ||
2311 | { 0x06, 0xef65 }, | ||
2312 | { 0x06, 0xe283 }, | ||
2313 | { 0x06, 0x4ae3 }, | ||
2314 | { 0x06, 0x834b }, | ||
2315 | { 0x06, 0x1b56 }, | ||
2316 | { 0x06, 0xaa0e }, | ||
2317 | { 0x06, 0xef56 }, | ||
2318 | { 0x06, 0xe683 }, | ||
2319 | { 0x06, 0x4ae7 }, | ||
2320 | { 0x06, 0x834b }, | ||
2321 | { 0x06, 0xe283 }, | ||
2322 | { 0x06, 0x4de6 }, | ||
2323 | { 0x06, 0x834c }, | ||
2324 | { 0x06, 0xe083 }, | ||
2325 | { 0x06, 0x4da0 }, | ||
2326 | { 0x06, 0x000c }, | ||
2327 | { 0x06, 0xaf81 }, | ||
2328 | { 0x06, 0x8be0 }, | ||
2329 | { 0x06, 0x834d }, | ||
2330 | { 0x06, 0x10e4 }, | ||
2331 | { 0x06, 0x834d }, | ||
2332 | { 0x06, 0xae04 }, | ||
2333 | { 0x06, 0x80e4 }, | ||
2334 | { 0x06, 0x834d }, | ||
2335 | { 0x06, 0xe083 }, | ||
2336 | { 0x06, 0x4e78 }, | ||
2337 | { 0x06, 0x039e }, | ||
2338 | { 0x06, 0x0be0 }, | ||
2339 | { 0x06, 0x834e }, | ||
2340 | { 0x06, 0x7804 }, | ||
2341 | { 0x06, 0x9e04 }, | ||
2342 | { 0x06, 0xee83 }, | ||
2343 | { 0x06, 0x4e02 }, | ||
2344 | { 0x06, 0xe083 }, | ||
2345 | { 0x06, 0x32e1 }, | ||
2346 | { 0x06, 0x8333 }, | ||
2347 | { 0x06, 0x590f }, | ||
2348 | { 0x06, 0xe283 }, | ||
2349 | { 0x06, 0x4d0c }, | ||
2350 | { 0x06, 0x245a }, | ||
2351 | { 0x06, 0xf01e }, | ||
2352 | { 0x06, 0x12e4 }, | ||
2353 | { 0x06, 0xf88c }, | ||
2354 | { 0x06, 0xe5f8 }, | ||
2355 | { 0x06, 0x8de0 }, | ||
2356 | { 0x06, 0x8330 }, | ||
2357 | { 0x06, 0xe183 }, | ||
2358 | { 0x06, 0x3168 }, | ||
2359 | { 0x06, 0x01e4 }, | ||
2360 | { 0x06, 0xf88a }, | ||
2361 | { 0x06, 0xe5f8 }, | ||
2362 | { 0x06, 0x8bae }, | ||
2363 | { 0x06, 0x37ee }, | ||
2364 | { 0x06, 0x834e }, | ||
2365 | { 0x06, 0x03e0 }, | ||
2366 | { 0x06, 0x834c }, | ||
2367 | { 0x06, 0xe183 }, | ||
2368 | { 0x06, 0x4d1b }, | ||
2369 | { 0x06, 0x019e }, | ||
2370 | { 0x06, 0x04aa }, | ||
2371 | { 0x06, 0xa1ae }, | ||
2372 | { 0x06, 0xa8ee }, | ||
2373 | { 0x06, 0x834e }, | ||
2374 | { 0x06, 0x04ee }, | ||
2375 | { 0x06, 0x834f }, | ||
2376 | { 0x06, 0x00ae }, | ||
2377 | { 0x06, 0xabe0 }, | ||
2378 | { 0x06, 0x834f }, | ||
2379 | { 0x06, 0x7803 }, | ||
2380 | { 0x06, 0x9f14 }, | ||
2381 | { 0x06, 0xee83 }, | ||
2382 | { 0x06, 0x4e05 }, | ||
2383 | { 0x06, 0xd240 }, | ||
2384 | { 0x06, 0xd655 }, | ||
2385 | { 0x06, 0x5402 }, | ||
2386 | { 0x06, 0x81c6 }, | ||
2387 | { 0x06, 0xd2a0 }, | ||
2388 | { 0x06, 0xd6ba }, | ||
2389 | { 0x06, 0x0002 }, | ||
2390 | { 0x06, 0x81c6 }, | ||
2391 | { 0x06, 0xfefd }, | ||
2392 | { 0x06, 0xfc05 }, | ||
2393 | { 0x06, 0xf8e0 }, | ||
2394 | { 0x06, 0xf860 }, | ||
2395 | { 0x06, 0xe1f8 }, | ||
2396 | { 0x06, 0x6168 }, | ||
2397 | { 0x06, 0x02e4 }, | ||
2398 | { 0x06, 0xf860 }, | ||
2399 | { 0x06, 0xe5f8 }, | ||
2400 | { 0x06, 0x61e0 }, | ||
2401 | { 0x06, 0xf848 }, | ||
2402 | { 0x06, 0xe1f8 }, | ||
2403 | { 0x06, 0x4958 }, | ||
2404 | { 0x06, 0x0f1e }, | ||
2405 | { 0x06, 0x02e4 }, | ||
2406 | { 0x06, 0xf848 }, | ||
2407 | { 0x06, 0xe5f8 }, | ||
2408 | { 0x06, 0x49d0 }, | ||
2409 | { 0x06, 0x0002 }, | ||
2410 | { 0x06, 0x820a }, | ||
2411 | { 0x06, 0xbf83 }, | ||
2412 | { 0x06, 0x50ef }, | ||
2413 | { 0x06, 0x46dc }, | ||
2414 | { 0x06, 0x19dd }, | ||
2415 | { 0x06, 0xd001 }, | ||
2416 | { 0x06, 0x0282 }, | ||
2417 | { 0x06, 0x0a02 }, | ||
2418 | { 0x06, 0x8226 }, | ||
2419 | { 0x06, 0xe0f8 }, | ||
2420 | { 0x06, 0x60e1 }, | ||
2421 | { 0x06, 0xf861 }, | ||
2422 | { 0x06, 0x58fd }, | ||
2423 | { 0x06, 0xe4f8 }, | ||
2424 | { 0x06, 0x60e5 }, | ||
2425 | { 0x06, 0xf861 }, | ||
2426 | { 0x06, 0xfc04 }, | ||
2427 | { 0x06, 0xf9fa }, | ||
2428 | { 0x06, 0xfbc6 }, | ||
2429 | { 0x06, 0xbff8 }, | ||
2430 | { 0x06, 0x40be }, | ||
2431 | { 0x06, 0x8350 }, | ||
2432 | { 0x06, 0xa001 }, | ||
2433 | { 0x06, 0x0107 }, | ||
2434 | { 0x06, 0x1b89 }, | ||
2435 | { 0x06, 0xcfd2 }, | ||
2436 | { 0x06, 0x08eb }, | ||
2437 | { 0x06, 0xdb19 }, | ||
2438 | { 0x06, 0xb2fb }, | ||
2439 | { 0x06, 0xfffe }, | ||
2440 | { 0x06, 0xfd04 }, | ||
2441 | { 0x06, 0xf8e0 }, | ||
2442 | { 0x06, 0xf848 }, | ||
2443 | { 0x06, 0xe1f8 }, | ||
2444 | { 0x06, 0x4968 }, | ||
2445 | { 0x06, 0x08e4 }, | ||
2446 | { 0x06, 0xf848 }, | ||
2447 | { 0x06, 0xe5f8 }, | ||
2448 | { 0x06, 0x4958 }, | ||
2449 | { 0x06, 0xf7e4 }, | ||
2450 | { 0x06, 0xf848 }, | ||
2451 | { 0x06, 0xe5f8 }, | ||
2452 | { 0x06, 0x49fc }, | ||
2453 | { 0x06, 0x044d }, | ||
2454 | { 0x06, 0x2000 }, | ||
2455 | { 0x06, 0x024e }, | ||
2456 | { 0x06, 0x2200 }, | ||
2457 | { 0x06, 0x024d }, | ||
2458 | { 0x06, 0xdfff }, | ||
2459 | { 0x06, 0x014e }, | ||
2460 | { 0x06, 0xddff }, | ||
2461 | { 0x06, 0x0100 }, | ||
2462 | { 0x05, 0x83d8 }, | ||
2463 | { 0x06, 0x8000 }, | ||
2464 | { 0x03, 0xdc00 }, | ||
2465 | { 0x05, 0xfff6 }, | ||
2466 | { 0x06, 0x00fc }, | ||
2467 | { 0x1f, 0x0000 }, | ||
2468 | |||
2469 | { 0x1f, 0x0000 }, | ||
2470 | { 0x0d, 0xf880 }, | ||
2471 | { 0x1f, 0x0000 } | ||
1677 | }; | 2472 | }; |
1678 | 2473 | ||
1679 | rtl_phy_write(ioaddr, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); | 2474 | rtl_phy_write(ioaddr, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); |
1680 | 2475 | ||
1681 | if (mdio_read(ioaddr, 0x06) == 0xc400) { | 2476 | if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) { |
1682 | struct phy_reg phy_reg_init_1[] = { | 2477 | struct phy_reg phy_reg_init[] = { |
2478 | { 0x1f, 0x0002 }, | ||
2479 | { 0x05, 0x669a }, | ||
1683 | { 0x1f, 0x0005 }, | 2480 | { 0x1f, 0x0005 }, |
1684 | { 0x01, 0x0300 }, | 2481 | { 0x05, 0x8330 }, |
1685 | { 0x1f, 0x0000 }, | 2482 | { 0x06, 0x669a }, |
1686 | { 0x11, 0x401c }, | 2483 | |
1687 | { 0x16, 0x4100 }, | 2484 | { 0x1f, 0x0002 } |
2485 | }; | ||
2486 | int val; | ||
2487 | |||
2488 | rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init)); | ||
2489 | |||
2490 | val = mdio_read(ioaddr, 0x0d); | ||
2491 | if ((val & 0x00ff) != 0x006c) { | ||
2492 | u32 set[] = { | ||
2493 | 0x0065, 0x0066, 0x0067, 0x0068, | ||
2494 | 0x0069, 0x006a, 0x006b, 0x006c | ||
2495 | }; | ||
2496 | int i; | ||
2497 | |||
2498 | mdio_write(ioaddr, 0x1f, 0x0002); | ||
2499 | |||
2500 | val &= 0xff00; | ||
2501 | for (i = 0; i < ARRAY_SIZE(set); i++) | ||
2502 | mdio_write(ioaddr, 0x0d, val | set[i]); | ||
2503 | } | ||
2504 | } else { | ||
2505 | struct phy_reg phy_reg_init[] = { | ||
2506 | { 0x1f, 0x0002 }, | ||
2507 | { 0x05, 0x2642 }, | ||
1688 | { 0x1f, 0x0005 }, | 2508 | { 0x1f, 0x0005 }, |
1689 | { 0x07, 0x0010 }, | 2509 | { 0x05, 0x8330 }, |
1690 | { 0x05, 0x83dc }, | 2510 | { 0x06, 0x2642 } |
1691 | { 0x06, 0x087d }, | ||
1692 | { 0x05, 0x8300 }, | ||
1693 | { 0x06, 0x0101 }, | ||
1694 | { 0x06, 0x05f8 }, | ||
1695 | { 0x06, 0xf9fa }, | ||
1696 | { 0x06, 0xfbef }, | ||
1697 | { 0x06, 0x79e2 }, | ||
1698 | { 0x06, 0x835f }, | ||
1699 | { 0x06, 0xe0f8 }, | ||
1700 | { 0x06, 0x9ae1 }, | ||
1701 | { 0x06, 0xf89b }, | ||
1702 | { 0x06, 0xef31 }, | ||
1703 | { 0x06, 0x3b65 }, | ||
1704 | { 0x06, 0xaa07 }, | ||
1705 | { 0x06, 0x81e4 }, | ||
1706 | { 0x06, 0xf89a }, | ||
1707 | { 0x06, 0xe5f8 }, | ||
1708 | { 0x06, 0x9baf }, | ||
1709 | { 0x06, 0x06ae }, | ||
1710 | { 0x05, 0x83dc }, | ||
1711 | { 0x06, 0x8300 }, | ||
1712 | }; | 2511 | }; |
1713 | 2512 | ||
1714 | rtl_phy_write(ioaddr, phy_reg_init_1, | 2513 | rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init)); |
1715 | ARRAY_SIZE(phy_reg_init_1)); | ||
1716 | } | 2514 | } |
1717 | 2515 | ||
1718 | mdio_write(ioaddr, 0x1f, 0x0000); | 2516 | mdio_write(ioaddr, 0x1f, 0x0002); |
2517 | mdio_plus_minus(ioaddr, 0x02, 0x0100, 0x0600); | ||
2518 | mdio_plus_minus(ioaddr, 0x03, 0x0000, 0xe000); | ||
2519 | |||
2520 | mdio_write(ioaddr, 0x1f, 0x0001); | ||
2521 | mdio_write(ioaddr, 0x17, 0x0cc0); | ||
2522 | |||
2523 | mdio_write(ioaddr, 0x1f, 0x0002); | ||
2524 | mdio_patch(ioaddr, 0x0f, 0x0017); | ||
2525 | |||
2526 | rtl_phy_write(ioaddr, phy_reg_init_1, ARRAY_SIZE(phy_reg_init_1)); | ||
2527 | } | ||
2528 | |||
2529 | static void rtl8168d_3_hw_phy_config(void __iomem *ioaddr) | ||
2530 | { | ||
2531 | struct phy_reg phy_reg_init[] = { | ||
2532 | { 0x1f, 0x0002 }, | ||
2533 | { 0x10, 0x0008 }, | ||
2534 | { 0x0d, 0x006c }, | ||
2535 | |||
2536 | { 0x1f, 0x0000 }, | ||
2537 | { 0x0d, 0xf880 }, | ||
2538 | |||
2539 | { 0x1f, 0x0001 }, | ||
2540 | { 0x17, 0x0cc0 }, | ||
2541 | |||
2542 | { 0x1f, 0x0001 }, | ||
2543 | { 0x0b, 0xa4d8 }, | ||
2544 | { 0x09, 0x281c }, | ||
2545 | { 0x07, 0x2883 }, | ||
2546 | { 0x0a, 0x6b35 }, | ||
2547 | { 0x1d, 0x3da4 }, | ||
2548 | { 0x1c, 0xeffd }, | ||
2549 | { 0x14, 0x7f52 }, | ||
2550 | { 0x18, 0x7fc6 }, | ||
2551 | { 0x08, 0x0601 }, | ||
2552 | { 0x06, 0x4063 }, | ||
2553 | { 0x10, 0xf074 }, | ||
2554 | { 0x1f, 0x0003 }, | ||
2555 | { 0x13, 0x0789 }, | ||
2556 | { 0x12, 0xf4bd }, | ||
2557 | { 0x1a, 0x04fd }, | ||
2558 | { 0x14, 0x84b0 }, | ||
2559 | { 0x1f, 0x0000 }, | ||
2560 | { 0x00, 0x9200 }, | ||
2561 | |||
2562 | { 0x1f, 0x0005 }, | ||
2563 | { 0x01, 0x0340 }, | ||
2564 | { 0x1f, 0x0001 }, | ||
2565 | { 0x04, 0x4000 }, | ||
2566 | { 0x03, 0x1d21 }, | ||
2567 | { 0x02, 0x0c32 }, | ||
2568 | { 0x01, 0x0200 }, | ||
2569 | { 0x00, 0x5554 }, | ||
2570 | { 0x04, 0x4800 }, | ||
2571 | { 0x04, 0x4000 }, | ||
2572 | { 0x04, 0xf000 }, | ||
2573 | { 0x03, 0xdf01 }, | ||
2574 | { 0x02, 0xdf20 }, | ||
2575 | { 0x01, 0x101a }, | ||
2576 | { 0x00, 0xa0ff }, | ||
2577 | { 0x04, 0xf800 }, | ||
2578 | { 0x04, 0xf000 }, | ||
2579 | { 0x1f, 0x0000 }, | ||
2580 | |||
2581 | { 0x1f, 0x0007 }, | ||
2582 | { 0x1e, 0x0023 }, | ||
2583 | { 0x16, 0x0000 }, | ||
2584 | { 0x1f, 0x0000 } | ||
2585 | }; | ||
2586 | |||
2587 | rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init)); | ||
1719 | } | 2588 | } |
1720 | 2589 | ||
1721 | static void rtl8102e_hw_phy_config(void __iomem *ioaddr) | 2590 | static void rtl8102e_hw_phy_config(void __iomem *ioaddr) |
@@ -1792,7 +2661,13 @@ static void rtl_hw_phy_config(struct net_device *dev) | |||
1792 | rtl8168cp_2_hw_phy_config(ioaddr); | 2661 | rtl8168cp_2_hw_phy_config(ioaddr); |
1793 | break; | 2662 | break; |
1794 | case RTL_GIGA_MAC_VER_25: | 2663 | case RTL_GIGA_MAC_VER_25: |
1795 | rtl8168d_hw_phy_config(ioaddr); | 2664 | rtl8168d_1_hw_phy_config(ioaddr); |
2665 | break; | ||
2666 | case RTL_GIGA_MAC_VER_26: | ||
2667 | rtl8168d_2_hw_phy_config(ioaddr); | ||
2668 | break; | ||
2669 | case RTL_GIGA_MAC_VER_27: | ||
2670 | rtl8168d_3_hw_phy_config(ioaddr); | ||
1796 | break; | 2671 | break; |
1797 | 2672 | ||
1798 | default: | 2673 | default: |
@@ -2863,6 +3738,8 @@ static void rtl_hw_start_8168(struct net_device *dev) | |||
2863 | break; | 3738 | break; |
2864 | 3739 | ||
2865 | case RTL_GIGA_MAC_VER_25: | 3740 | case RTL_GIGA_MAC_VER_25: |
3741 | case RTL_GIGA_MAC_VER_26: | ||
3742 | case RTL_GIGA_MAC_VER_27: | ||
2866 | rtl_hw_start_8168d(ioaddr, pdev); | 3743 | rtl_hw_start_8168d(ioaddr, pdev); |
2867 | break; | 3744 | break; |
2868 | 3745 | ||
diff --git a/drivers/net/stmmac/Kconfig b/drivers/net/stmmac/Kconfig new file mode 100644 index 000000000000..35eaa5251d7f --- /dev/null +++ b/drivers/net/stmmac/Kconfig | |||
@@ -0,0 +1,53 @@ | |||
1 | config STMMAC_ETH | ||
2 | tristate "STMicroelectronics 10/100/1000 Ethernet driver" | ||
3 | select MII | ||
4 | select PHYLIB | ||
5 | depends on NETDEVICES && CPU_SUBTYPE_ST40 | ||
6 | help | ||
7 | This is the driver for the ST MAC 10/100/1000 on-chip Ethernet | ||
8 | controllers. ST Ethernet IPs are built around a Synopsys IP Core. | ||
9 | |||
10 | if STMMAC_ETH | ||
11 | |||
12 | config STMMAC_DA | ||
13 | bool "STMMAC DMA arbitration scheme" | ||
14 | default n | ||
15 | help | ||
16 | Selecting this option, rx has priority over Tx (only for Giga | ||
17 | Ethernet device). | ||
18 | By default, the DMA arbitration scheme is based on Round-robin | ||
19 | (rx:tx priority is 1:1). | ||
20 | |||
21 | config STMMAC_DUAL_MAC | ||
22 | bool "STMMAC: dual mac support (EXPERIMENTAL)" | ||
23 | default n | ||
24 | depends on EXPERIMENTAL && STMMAC_ETH && !STMMAC_TIMER | ||
25 | help | ||
26 | Some ST SoCs (for example the stx7141 and stx7200c2) have two | ||
27 | Ethernet Controllers. This option turns on the second Ethernet | ||
28 | device on this kind of platforms. | ||
29 | |||
30 | config STMMAC_TIMER | ||
31 | bool "STMMAC Timer optimisation" | ||
32 | default n | ||
33 | help | ||
34 | Use an external timer for mitigating the number of network | ||
35 | interrupts. | ||
36 | |||
37 | choice | ||
38 | prompt "Select Timer device" | ||
39 | depends on STMMAC_TIMER | ||
40 | |||
41 | config STMMAC_TMU_TIMER | ||
42 | bool "TMU channel 2" | ||
43 | depends on CPU_SH4 | ||
44 | help | ||
45 | |||
46 | config STMMAC_RTC_TIMER | ||
47 | bool "Real time clock" | ||
48 | depends on RTC_CLASS | ||
49 | help | ||
50 | |||
51 | endchoice | ||
52 | |||
53 | endif | ||
diff --git a/drivers/net/stmmac/Makefile b/drivers/net/stmmac/Makefile new file mode 100644 index 000000000000..b2d7a5564dfa --- /dev/null +++ b/drivers/net/stmmac/Makefile | |||
@@ -0,0 +1,4 @@ | |||
1 | obj-$(CONFIG_STMMAC_ETH) += stmmac.o | ||
2 | stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o | ||
3 | stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \ | ||
4 | mac100.o gmac.o $(stmmac-y) | ||
diff --git a/drivers/net/stmmac/common.h b/drivers/net/stmmac/common.h new file mode 100644 index 000000000000..e49e5188e887 --- /dev/null +++ b/drivers/net/stmmac/common.h | |||
@@ -0,0 +1,330 @@ | |||
1 | /******************************************************************************* | ||
2 | STMMAC Common Header File | ||
3 | |||
4 | Copyright (C) 2007-2009 STMicroelectronics Ltd | ||
5 | |||
6 | This program is free software; you can redistribute it and/or modify it | ||
7 | under the terms and conditions of the GNU General Public License, | ||
8 | version 2, as published by the Free Software Foundation. | ||
9 | |||
10 | This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License along with | ||
16 | this program; if not, write to the Free Software Foundation, Inc., | ||
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
19 | The full GNU General Public License is included in this distribution in | ||
20 | the file called "COPYING". | ||
21 | |||
22 | Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> | ||
23 | *******************************************************************************/ | ||
24 | |||
25 | #include "descs.h" | ||
26 | #include <linux/io.h> | ||
27 | |||
28 | /* ********************************************* | ||
29 | DMA CRS Control and Status Register Mapping | ||
30 | * *********************************************/ | ||
31 | #define DMA_BUS_MODE 0x00001000 /* Bus Mode */ | ||
32 | #define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */ | ||
33 | #define DMA_RCV_POLL_DEMAND 0x00001008 /* Received Poll Demand */ | ||
34 | #define DMA_RCV_BASE_ADDR 0x0000100c /* Receive List Base */ | ||
35 | #define DMA_TX_BASE_ADDR 0x00001010 /* Transmit List Base */ | ||
36 | #define DMA_STATUS 0x00001014 /* Status Register */ | ||
37 | #define DMA_CONTROL 0x00001018 /* Ctrl (Operational Mode) */ | ||
38 | #define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */ | ||
39 | #define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */ | ||
40 | #define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */ | ||
41 | #define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */ | ||
42 | |||
43 | /* ******************************** | ||
44 | DMA Control register defines | ||
45 | * ********************************/ | ||
46 | #define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */ | ||
47 | #define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */ | ||
48 | |||
49 | /* ************************************** | ||
50 | DMA Interrupt Enable register defines | ||
51 | * **************************************/ | ||
52 | /**** NORMAL INTERRUPT ****/ | ||
53 | #define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */ | ||
54 | #define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */ | ||
55 | #define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavailable */ | ||
56 | #define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */ | ||
57 | #define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */ | ||
58 | |||
59 | #define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \ | ||
60 | DMA_INTR_ENA_TIE) | ||
61 | |||
62 | /**** ABNORMAL INTERRUPT ****/ | ||
63 | #define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */ | ||
64 | #define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */ | ||
65 | #define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */ | ||
66 | #define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */ | ||
67 | #define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */ | ||
68 | #define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */ | ||
69 | #define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */ | ||
70 | #define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */ | ||
71 | #define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */ | ||
72 | #define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */ | ||
73 | |||
74 | #define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \ | ||
75 | DMA_INTR_ENA_UNE) | ||
76 | |||
77 | /* DMA default interrupt mask */ | ||
78 | #define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL) | ||
79 | |||
80 | /* **************************** | ||
81 | * DMA Status register defines | ||
82 | * ****************************/ | ||
83 | #define DMA_STATUS_GPI 0x10000000 /* PMT interrupt */ | ||
84 | #define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */ | ||
85 | #define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int. */ | ||
86 | #define DMA_STATUS_GMI 0x08000000 | ||
87 | #define DMA_STATUS_GLI 0x04000000 | ||
88 | #define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */ | ||
89 | #define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */ | ||
90 | #define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */ | ||
91 | #define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */ | ||
92 | #define DMA_STATUS_TS_SHIFT 20 | ||
93 | #define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */ | ||
94 | #define DMA_STATUS_RS_SHIFT 17 | ||
95 | #define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */ | ||
96 | #define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */ | ||
97 | #define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */ | ||
98 | #define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */ | ||
99 | #define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */ | ||
100 | #define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */ | ||
101 | #define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */ | ||
102 | #define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */ | ||
103 | #define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */ | ||
104 | #define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */ | ||
105 | #define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */ | ||
106 | #define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */ | ||
107 | #define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */ | ||
108 | #define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */ | ||
109 | #define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */ | ||
110 | |||
111 | /* Other defines */ | ||
112 | #define HASH_TABLE_SIZE 64 | ||
113 | #define PAUSE_TIME 0x200 | ||
114 | |||
115 | /* Flow Control defines */ | ||
116 | #define FLOW_OFF 0 | ||
117 | #define FLOW_RX 1 | ||
118 | #define FLOW_TX 2 | ||
119 | #define FLOW_AUTO (FLOW_TX | FLOW_RX) | ||
120 | |||
121 | /* DMA STORE-AND-FORWARD Operation Mode */ | ||
122 | #define SF_DMA_MODE 1 | ||
123 | |||
124 | #define HW_CSUM 1 | ||
125 | #define NO_HW_CSUM 0 | ||
126 | |||
127 | /* GMAC TX FIFO is 8K, Rx FIFO is 16K */ | ||
128 | #define BUF_SIZE_16KiB 16384 | ||
129 | #define BUF_SIZE_8KiB 8192 | ||
130 | #define BUF_SIZE_4KiB 4096 | ||
131 | #define BUF_SIZE_2KiB 2048 | ||
132 | |||
133 | /* Power Down and WOL */ | ||
134 | #define PMT_NOT_SUPPORTED 0 | ||
135 | #define PMT_SUPPORTED 1 | ||
136 | |||
137 | /* Common MAC defines */ | ||
138 | #define MAC_CTRL_REG 0x00000000 /* MAC Control */ | ||
139 | #define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */ | ||
140 | #define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */ | ||
141 | |||
142 | /* MAC Management Counters register */ | ||
143 | #define MMC_CONTROL 0x00000100 /* MMC Control */ | ||
144 | #define MMC_HIGH_INTR 0x00000104 /* MMC High Interrupt */ | ||
145 | #define MMC_LOW_INTR 0x00000108 /* MMC Low Interrupt */ | ||
146 | #define MMC_HIGH_INTR_MASK 0x0000010c /* MMC High Interrupt Mask */ | ||
147 | #define MMC_LOW_INTR_MASK 0x00000110 /* MMC Low Interrupt Mask */ | ||
148 | |||
149 | #define MMC_CONTROL_MAX_FRM_MASK 0x0003ff8 /* Maximum Frame Size */ | ||
150 | #define MMC_CONTROL_MAX_FRM_SHIFT 3 | ||
151 | #define MMC_CONTROL_MAX_FRAME 0x7FF | ||
152 | |||
153 | struct stmmac_extra_stats { | ||
154 | /* Transmit errors */ | ||
155 | unsigned long tx_underflow ____cacheline_aligned; | ||
156 | unsigned long tx_carrier; | ||
157 | unsigned long tx_losscarrier; | ||
158 | unsigned long tx_heartbeat; | ||
159 | unsigned long tx_deferred; | ||
160 | unsigned long tx_vlan; | ||
161 | unsigned long tx_jabber; | ||
162 | unsigned long tx_frame_flushed; | ||
163 | unsigned long tx_payload_error; | ||
164 | unsigned long tx_ip_header_error; | ||
165 | /* Receive errors */ | ||
166 | unsigned long rx_desc; | ||
167 | unsigned long rx_partial; | ||
168 | unsigned long rx_runt; | ||
169 | unsigned long rx_toolong; | ||
170 | unsigned long rx_collision; | ||
171 | unsigned long rx_crc; | ||
172 | unsigned long rx_lenght; | ||
173 | unsigned long rx_mii; | ||
174 | unsigned long rx_multicast; | ||
175 | unsigned long rx_gmac_overflow; | ||
176 | unsigned long rx_watchdog; | ||
177 | unsigned long da_rx_filter_fail; | ||
178 | unsigned long sa_rx_filter_fail; | ||
179 | unsigned long rx_missed_cntr; | ||
180 | unsigned long rx_overflow_cntr; | ||
181 | unsigned long rx_vlan; | ||
182 | /* Tx/Rx IRQ errors */ | ||
183 | unsigned long tx_undeflow_irq; | ||
184 | unsigned long tx_process_stopped_irq; | ||
185 | unsigned long tx_jabber_irq; | ||
186 | unsigned long rx_overflow_irq; | ||
187 | unsigned long rx_buf_unav_irq; | ||
188 | unsigned long rx_process_stopped_irq; | ||
189 | unsigned long rx_watchdog_irq; | ||
190 | unsigned long tx_early_irq; | ||
191 | unsigned long fatal_bus_error_irq; | ||
192 | /* Extra info */ | ||
193 | unsigned long threshold; | ||
194 | unsigned long tx_pkt_n; | ||
195 | unsigned long rx_pkt_n; | ||
196 | unsigned long poll_n; | ||
197 | unsigned long sched_timer_n; | ||
198 | unsigned long normal_irq_n; | ||
199 | }; | ||
200 | |||
201 | /* GMAC core can compute the checksums in HW. */ | ||
202 | enum rx_frame_status { | ||
203 | good_frame = 0, | ||
204 | discard_frame = 1, | ||
205 | csum_none = 2, | ||
206 | }; | ||
207 | |||
208 | static inline void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6], | ||
209 | unsigned int high, unsigned int low) | ||
210 | { | ||
211 | unsigned long data; | ||
212 | |||
213 | data = (addr[5] << 8) | addr[4]; | ||
214 | writel(data, ioaddr + high); | ||
215 | data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; | ||
216 | writel(data, ioaddr + low); | ||
217 | |||
218 | return; | ||
219 | } | ||
220 | |||
221 | static inline void stmmac_get_mac_addr(unsigned long ioaddr, | ||
222 | unsigned char *addr, unsigned int high, | ||
223 | unsigned int low) | ||
224 | { | ||
225 | unsigned int hi_addr, lo_addr; | ||
226 | |||
227 | /* Read the MAC address from the hardware */ | ||
228 | hi_addr = readl(ioaddr + high); | ||
229 | lo_addr = readl(ioaddr + low); | ||
230 | |||
231 | /* Extract the MAC address from the high and low words */ | ||
232 | addr[0] = lo_addr & 0xff; | ||
233 | addr[1] = (lo_addr >> 8) & 0xff; | ||
234 | addr[2] = (lo_addr >> 16) & 0xff; | ||
235 | addr[3] = (lo_addr >> 24) & 0xff; | ||
236 | addr[4] = hi_addr & 0xff; | ||
237 | addr[5] = (hi_addr >> 8) & 0xff; | ||
238 | |||
239 | return; | ||
240 | } | ||
241 | |||
242 | struct stmmac_ops { | ||
243 | /* MAC core initialization */ | ||
244 | void (*core_init) (unsigned long ioaddr) ____cacheline_aligned; | ||
245 | /* DMA core initialization */ | ||
246 | int (*dma_init) (unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx); | ||
247 | /* Dump MAC registers */ | ||
248 | void (*dump_mac_regs) (unsigned long ioaddr); | ||
249 | /* Dump DMA registers */ | ||
250 | void (*dump_dma_regs) (unsigned long ioaddr); | ||
251 | /* Set tx/rx threshold in the csr6 register | ||
252 | * An invalid value enables the store-and-forward mode */ | ||
253 | void (*dma_mode) (unsigned long ioaddr, int txmode, int rxmode); | ||
254 | /* To track extra statistic (if supported) */ | ||
255 | void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x, | ||
256 | unsigned long ioaddr); | ||
257 | /* RX descriptor ring initialization */ | ||
258 | void (*init_rx_desc) (struct dma_desc *p, unsigned int ring_size, | ||
259 | int disable_rx_ic); | ||
260 | /* TX descriptor ring initialization */ | ||
261 | void (*init_tx_desc) (struct dma_desc *p, unsigned int ring_size); | ||
262 | |||
263 | /* Invoked by the xmit function to prepare the tx descriptor */ | ||
264 | void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len, | ||
265 | int csum_flag); | ||
266 | /* Set/get the owner of the descriptor */ | ||
267 | void (*set_tx_owner) (struct dma_desc *p); | ||
268 | int (*get_tx_owner) (struct dma_desc *p); | ||
269 | /* Invoked by the xmit function to close the tx descriptor */ | ||
270 | void (*close_tx_desc) (struct dma_desc *p); | ||
271 | /* Clean the tx descriptor as soon as the tx irq is received */ | ||
272 | void (*release_tx_desc) (struct dma_desc *p); | ||
273 | /* Clear interrupt on tx frame completion. When this bit is | ||
274 | * set an interrupt happens as soon as the frame is transmitted */ | ||
275 | void (*clear_tx_ic) (struct dma_desc *p); | ||
276 | /* Last tx segment reports the transmit status */ | ||
277 | int (*get_tx_ls) (struct dma_desc *p); | ||
278 | /* Return the transmit status looking at the TDES1 */ | ||
279 | int (*tx_status) (void *data, struct stmmac_extra_stats *x, | ||
280 | struct dma_desc *p, unsigned long ioaddr); | ||
281 | /* Get the buffer size from the descriptor */ | ||
282 | int (*get_tx_len) (struct dma_desc *p); | ||
283 | /* Handle extra events on specific interrupts hw dependent */ | ||
284 | void (*host_irq_status) (unsigned long ioaddr); | ||
285 | int (*get_rx_owner) (struct dma_desc *p); | ||
286 | void (*set_rx_owner) (struct dma_desc *p); | ||
287 | /* Get the receive frame size */ | ||
288 | int (*get_rx_frame_len) (struct dma_desc *p); | ||
289 | /* Return the reception status looking at the RDES1 */ | ||
290 | int (*rx_status) (void *data, struct stmmac_extra_stats *x, | ||
291 | struct dma_desc *p); | ||
292 | /* Multicast filter setting */ | ||
293 | void (*set_filter) (struct net_device *dev); | ||
294 | /* Flow control setting */ | ||
295 | void (*flow_ctrl) (unsigned long ioaddr, unsigned int duplex, | ||
296 | unsigned int fc, unsigned int pause_time); | ||
297 | /* Set power management mode (e.g. magic frame) */ | ||
298 | void (*pmt) (unsigned long ioaddr, unsigned long mode); | ||
299 | /* Set/Get Unicast MAC addresses */ | ||
300 | void (*set_umac_addr) (unsigned long ioaddr, unsigned char *addr, | ||
301 | unsigned int reg_n); | ||
302 | void (*get_umac_addr) (unsigned long ioaddr, unsigned char *addr, | ||
303 | unsigned int reg_n); | ||
304 | }; | ||
305 | |||
306 | struct mac_link { | ||
307 | int port; | ||
308 | int duplex; | ||
309 | int speed; | ||
310 | }; | ||
311 | |||
312 | struct mii_regs { | ||
313 | unsigned int addr; /* MII Address */ | ||
314 | unsigned int data; /* MII Data */ | ||
315 | }; | ||
316 | |||
317 | struct hw_cap { | ||
318 | unsigned int version; /* Core Version register (GMAC) */ | ||
319 | unsigned int pmt; /* Power-Down mode (GMAC) */ | ||
320 | struct mac_link link; | ||
321 | struct mii_regs mii; | ||
322 | }; | ||
323 | |||
324 | struct mac_device_info { | ||
325 | struct hw_cap hw; | ||
326 | struct stmmac_ops *ops; | ||
327 | }; | ||
328 | |||
329 | struct mac_device_info *gmac_setup(unsigned long addr); | ||
330 | struct mac_device_info *mac100_setup(unsigned long addr); | ||
diff --git a/drivers/net/stmmac/descs.h b/drivers/net/stmmac/descs.h new file mode 100644 index 000000000000..6d2a0b2f5e57 --- /dev/null +++ b/drivers/net/stmmac/descs.h | |||
@@ -0,0 +1,163 @@ | |||
1 | /******************************************************************************* | ||
2 | Header File to describe the DMA descriptors | ||
3 | Use enhanced descriptors in case of GMAC Cores. | ||
4 | |||
5 | This program is free software; you can redistribute it and/or modify it | ||
6 | under the terms and conditions of the GNU General Public License, | ||
7 | version 2, as published by the Free Software Foundation. | ||
8 | |||
9 | This program is distributed in the hope it will be useful, but WITHOUT | ||
10 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | more details. | ||
13 | |||
14 | You should have received a copy of the GNU General Public License along with | ||
15 | this program; if not, write to the Free Software Foundation, Inc., | ||
16 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
17 | |||
18 | The full GNU General Public License is included in this distribution in | ||
19 | the file called "COPYING". | ||
20 | |||
21 | Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> | ||
22 | *******************************************************************************/ | ||
23 | struct dma_desc { | ||
24 | /* Receive descriptor */ | ||
25 | union { | ||
26 | struct { | ||
27 | /* RDES0 */ | ||
28 | u32 reserved1:1; | ||
29 | u32 crc_error:1; | ||
30 | u32 dribbling:1; | ||
31 | u32 mii_error:1; | ||
32 | u32 receive_watchdog:1; | ||
33 | u32 frame_type:1; | ||
34 | u32 collision:1; | ||
35 | u32 frame_too_long:1; | ||
36 | u32 last_descriptor:1; | ||
37 | u32 first_descriptor:1; | ||
38 | u32 multicast_frame:1; | ||
39 | u32 run_frame:1; | ||
40 | u32 length_error:1; | ||
41 | u32 partial_frame_error:1; | ||
42 | u32 descriptor_error:1; | ||
43 | u32 error_summary:1; | ||
44 | u32 frame_length:14; | ||
45 | u32 filtering_fail:1; | ||
46 | u32 own:1; | ||
47 | /* RDES1 */ | ||
48 | u32 buffer1_size:11; | ||
49 | u32 buffer2_size:11; | ||
50 | u32 reserved2:2; | ||
51 | u32 second_address_chained:1; | ||
52 | u32 end_ring:1; | ||
53 | u32 reserved3:5; | ||
54 | u32 disable_ic:1; | ||
55 | } rx; | ||
56 | struct { | ||
57 | /* RDES0 */ | ||
58 | u32 payload_csum_error:1; | ||
59 | u32 crc_error:1; | ||
60 | u32 dribbling:1; | ||
61 | u32 error_gmii:1; | ||
62 | u32 receive_watchdog:1; | ||
63 | u32 frame_type:1; | ||
64 | u32 late_collision:1; | ||
65 | u32 ipc_csum_error:1; | ||
66 | u32 last_descriptor:1; | ||
67 | u32 first_descriptor:1; | ||
68 | u32 vlan_tag:1; | ||
69 | u32 overflow_error:1; | ||
70 | u32 length_error:1; | ||
71 | u32 sa_filter_fail:1; | ||
72 | u32 descriptor_error:1; | ||
73 | u32 error_summary:1; | ||
74 | u32 frame_length:14; | ||
75 | u32 da_filter_fail:1; | ||
76 | u32 own:1; | ||
77 | /* RDES1 */ | ||
78 | u32 buffer1_size:13; | ||
79 | u32 reserved1:1; | ||
80 | u32 second_address_chained:1; | ||
81 | u32 end_ring:1; | ||
82 | u32 buffer2_size:13; | ||
83 | u32 reserved2:2; | ||
84 | u32 disable_ic:1; | ||
85 | } erx; /* -- enhanced -- */ | ||
86 | |||
87 | /* Transmit descriptor */ | ||
88 | struct { | ||
89 | /* TDES0 */ | ||
90 | u32 deferred:1; | ||
91 | u32 underflow_error:1; | ||
92 | u32 excessive_deferral:1; | ||
93 | u32 collision_count:4; | ||
94 | u32 heartbeat_fail:1; | ||
95 | u32 excessive_collisions:1; | ||
96 | u32 late_collision:1; | ||
97 | u32 no_carrier:1; | ||
98 | u32 loss_carrier:1; | ||
99 | u32 reserved1:3; | ||
100 | u32 error_summary:1; | ||
101 | u32 reserved2:15; | ||
102 | u32 own:1; | ||
103 | /* TDES1 */ | ||
104 | u32 buffer1_size:11; | ||
105 | u32 buffer2_size:11; | ||
106 | u32 reserved3:1; | ||
107 | u32 disable_padding:1; | ||
108 | u32 second_address_chained:1; | ||
109 | u32 end_ring:1; | ||
110 | u32 crc_disable:1; | ||
111 | u32 reserved4:2; | ||
112 | u32 first_segment:1; | ||
113 | u32 last_segment:1; | ||
114 | u32 interrupt:1; | ||
115 | } tx; | ||
116 | struct { | ||
117 | /* TDES0 */ | ||
118 | u32 deferred:1; | ||
119 | u32 underflow_error:1; | ||
120 | u32 excessive_deferral:1; | ||
121 | u32 collision_count:4; | ||
122 | u32 vlan_frame:1; | ||
123 | u32 excessive_collisions:1; | ||
124 | u32 late_collision:1; | ||
125 | u32 no_carrier:1; | ||
126 | u32 loss_carrier:1; | ||
127 | u32 payload_error:1; | ||
128 | u32 frame_flushed:1; | ||
129 | u32 jabber_timeout:1; | ||
130 | u32 error_summary:1; | ||
131 | u32 ip_header_error:1; | ||
132 | u32 time_stamp_status:1; | ||
133 | u32 reserved1:2; | ||
134 | u32 second_address_chained:1; | ||
135 | u32 end_ring:1; | ||
136 | u32 checksum_insertion:2; | ||
137 | u32 reserved2:1; | ||
138 | u32 time_stamp_enable:1; | ||
139 | u32 disable_padding:1; | ||
140 | u32 crc_disable:1; | ||
141 | u32 first_segment:1; | ||
142 | u32 last_segment:1; | ||
143 | u32 interrupt:1; | ||
144 | u32 own:1; | ||
145 | /* TDES1 */ | ||
146 | u32 buffer1_size:13; | ||
147 | u32 reserved3:3; | ||
148 | u32 buffer2_size:13; | ||
149 | u32 reserved4:3; | ||
150 | } etx; /* -- enhanced -- */ | ||
151 | } des01; | ||
152 | unsigned int des2; | ||
153 | unsigned int des3; | ||
154 | }; | ||
155 | |||
156 | /* Transmit checksum insertion control */ | ||
157 | enum tdes_csum_insertion { | ||
158 | cic_disabled = 0, /* Checksum Insertion Control */ | ||
159 | cic_only_ip = 1, /* Only IP header */ | ||
160 | cic_no_pseudoheader = 2, /* IP header but pseudoheader | ||
161 | * is not calculated */ | ||
162 | cic_full = 3, /* IP header and pseudoheader */ | ||
163 | }; | ||
diff --git a/drivers/net/stmmac/gmac.c b/drivers/net/stmmac/gmac.c new file mode 100644 index 000000000000..b624bb5bae0a --- /dev/null +++ b/drivers/net/stmmac/gmac.c | |||
@@ -0,0 +1,693 @@ | |||
1 | /******************************************************************************* | ||
2 | This is the driver for the GMAC on-chip Ethernet controller for ST SoCs. | ||
3 | DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for | ||
4 | developing this code. | ||
5 | |||
6 | Copyright (C) 2007-2009 STMicroelectronics Ltd | ||
7 | |||
8 | This program is free software; you can redistribute it and/or modify it | ||
9 | under the terms and conditions of the GNU General Public License, | ||
10 | version 2, as published by the Free Software Foundation. | ||
11 | |||
12 | This program is distributed in the hope it will be useful, but WITHOUT | ||
13 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | more details. | ||
16 | |||
17 | You should have received a copy of the GNU General Public License along with | ||
18 | this program; if not, write to the Free Software Foundation, Inc., | ||
19 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
20 | |||
21 | The full GNU General Public License is included in this distribution in | ||
22 | the file called "COPYING". | ||
23 | |||
24 | Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> | ||
25 | *******************************************************************************/ | ||
26 | |||
27 | #include <linux/netdevice.h> | ||
28 | #include <linux/crc32.h> | ||
29 | #include <linux/mii.h> | ||
30 | #include <linux/phy.h> | ||
31 | |||
32 | #include "stmmac.h" | ||
33 | #include "gmac.h" | ||
34 | |||
35 | #undef GMAC_DEBUG | ||
36 | /*#define GMAC_DEBUG*/ | ||
37 | #undef FRAME_FILTER_DEBUG | ||
38 | /*#define FRAME_FILTER_DEBUG*/ | ||
39 | #ifdef GMAC_DEBUG | ||
40 | #define DBG(fmt, args...) printk(fmt, ## args) | ||
41 | #else | ||
42 | #define DBG(fmt, args...) do { } while (0) | ||
43 | #endif | ||
44 | |||
45 | static void gmac_dump_regs(unsigned long ioaddr) | ||
46 | { | ||
47 | int i; | ||
48 | pr_info("\t----------------------------------------------\n" | ||
49 | "\t GMAC registers (base addr = 0x%8x)\n" | ||
50 | "\t----------------------------------------------\n", | ||
51 | (unsigned int)ioaddr); | ||
52 | |||
53 | for (i = 0; i < 55; i++) { | ||
54 | int offset = i * 4; | ||
55 | pr_info("\tReg No. %d (offset 0x%x): 0x%08x\n", i, | ||
56 | offset, readl(ioaddr + offset)); | ||
57 | } | ||
58 | return; | ||
59 | } | ||
60 | |||
61 | static int gmac_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx) | ||
62 | { | ||
63 | u32 value = readl(ioaddr + DMA_BUS_MODE); | ||
64 | /* DMA SW reset */ | ||
65 | value |= DMA_BUS_MODE_SFT_RESET; | ||
66 | writel(value, ioaddr + DMA_BUS_MODE); | ||
67 | do {} while ((readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET)); | ||
68 | |||
69 | value = /* DMA_BUS_MODE_FB | */ DMA_BUS_MODE_4PBL | | ||
70 | ((pbl << DMA_BUS_MODE_PBL_SHIFT) | | ||
71 | (pbl << DMA_BUS_MODE_RPBL_SHIFT)); | ||
72 | |||
73 | #ifdef CONFIG_STMMAC_DA | ||
74 | value |= DMA_BUS_MODE_DA; /* Rx has priority over tx */ | ||
75 | #endif | ||
76 | writel(value, ioaddr + DMA_BUS_MODE); | ||
77 | |||
78 | /* Mask interrupts by writing to CSR7 */ | ||
79 | writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA); | ||
80 | |||
81 | /* The base address of the RX/TX descriptor lists must be written into | ||
82 | * DMA CSR3 and CSR4, respectively. */ | ||
83 | writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR); | ||
84 | writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR); | ||
85 | |||
86 | return 0; | ||
87 | } | ||
88 | |||
89 | /* Transmit FIFO flush operation */ | ||
90 | static void gmac_flush_tx_fifo(unsigned long ioaddr) | ||
91 | { | ||
92 | u32 csr6 = readl(ioaddr + DMA_CONTROL); | ||
93 | writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL); | ||
94 | |||
95 | do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF)); | ||
96 | } | ||
97 | |||
98 | static void gmac_dma_operation_mode(unsigned long ioaddr, int txmode, | ||
99 | int rxmode) | ||
100 | { | ||
101 | u32 csr6 = readl(ioaddr + DMA_CONTROL); | ||
102 | |||
103 | if (txmode == SF_DMA_MODE) { | ||
104 | DBG(KERN_DEBUG "GMAC: enabling TX store and forward mode\n"); | ||
105 | /* Transmit COE type 2 cannot be done in cut-through mode. */ | ||
106 | csr6 |= DMA_CONTROL_TSF; | ||
107 | /* Operating on second frame increase the performance | ||
108 | * especially when transmit store-and-forward is used.*/ | ||
109 | csr6 |= DMA_CONTROL_OSF; | ||
110 | } else { | ||
111 | DBG(KERN_DEBUG "GMAC: disabling TX store and forward mode" | ||
112 | " (threshold = %d)\n", txmode); | ||
113 | csr6 &= ~DMA_CONTROL_TSF; | ||
114 | csr6 &= DMA_CONTROL_TC_TX_MASK; | ||
115 | /* Set the transmit threashold */ | ||
116 | if (txmode <= 32) | ||
117 | csr6 |= DMA_CONTROL_TTC_32; | ||
118 | else if (txmode <= 64) | ||
119 | csr6 |= DMA_CONTROL_TTC_64; | ||
120 | else if (txmode <= 128) | ||
121 | csr6 |= DMA_CONTROL_TTC_128; | ||
122 | else if (txmode <= 192) | ||
123 | csr6 |= DMA_CONTROL_TTC_192; | ||
124 | else | ||
125 | csr6 |= DMA_CONTROL_TTC_256; | ||
126 | } | ||
127 | |||
128 | if (rxmode == SF_DMA_MODE) { | ||
129 | DBG(KERN_DEBUG "GMAC: enabling RX store and forward mode\n"); | ||
130 | csr6 |= DMA_CONTROL_RSF; | ||
131 | } else { | ||
132 | DBG(KERN_DEBUG "GMAC: disabling RX store and forward mode" | ||
133 | " (threshold = %d)\n", rxmode); | ||
134 | csr6 &= ~DMA_CONTROL_RSF; | ||
135 | csr6 &= DMA_CONTROL_TC_RX_MASK; | ||
136 | if (rxmode <= 32) | ||
137 | csr6 |= DMA_CONTROL_RTC_32; | ||
138 | else if (rxmode <= 64) | ||
139 | csr6 |= DMA_CONTROL_RTC_64; | ||
140 | else if (rxmode <= 96) | ||
141 | csr6 |= DMA_CONTROL_RTC_96; | ||
142 | else | ||
143 | csr6 |= DMA_CONTROL_RTC_128; | ||
144 | } | ||
145 | |||
146 | writel(csr6, ioaddr + DMA_CONTROL); | ||
147 | return; | ||
148 | } | ||
149 | |||
150 | /* Not yet implemented --- no RMON module */ | ||
151 | static void gmac_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x, | ||
152 | unsigned long ioaddr) | ||
153 | { | ||
154 | return; | ||
155 | } | ||
156 | |||
157 | static void gmac_dump_dma_regs(unsigned long ioaddr) | ||
158 | { | ||
159 | int i; | ||
160 | pr_info(" DMA registers\n"); | ||
161 | for (i = 0; i < 22; i++) { | ||
162 | if ((i < 9) || (i > 17)) { | ||
163 | int offset = i * 4; | ||
164 | pr_err("\t Reg No. %d (offset 0x%x): 0x%08x\n", i, | ||
165 | (DMA_BUS_MODE + offset), | ||
166 | readl(ioaddr + DMA_BUS_MODE + offset)); | ||
167 | } | ||
168 | } | ||
169 | return; | ||
170 | } | ||
171 | |||
172 | static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x, | ||
173 | struct dma_desc *p, unsigned long ioaddr) | ||
174 | { | ||
175 | int ret = 0; | ||
176 | struct net_device_stats *stats = (struct net_device_stats *)data; | ||
177 | |||
178 | if (unlikely(p->des01.etx.error_summary)) { | ||
179 | DBG(KERN_ERR "GMAC TX error... 0x%08x\n", p->des01.etx); | ||
180 | if (unlikely(p->des01.etx.jabber_timeout)) { | ||
181 | DBG(KERN_ERR "\tjabber_timeout error\n"); | ||
182 | x->tx_jabber++; | ||
183 | } | ||
184 | |||
185 | if (unlikely(p->des01.etx.frame_flushed)) { | ||
186 | DBG(KERN_ERR "\tframe_flushed error\n"); | ||
187 | x->tx_frame_flushed++; | ||
188 | gmac_flush_tx_fifo(ioaddr); | ||
189 | } | ||
190 | |||
191 | if (unlikely(p->des01.etx.loss_carrier)) { | ||
192 | DBG(KERN_ERR "\tloss_carrier error\n"); | ||
193 | x->tx_losscarrier++; | ||
194 | stats->tx_carrier_errors++; | ||
195 | } | ||
196 | if (unlikely(p->des01.etx.no_carrier)) { | ||
197 | DBG(KERN_ERR "\tno_carrier error\n"); | ||
198 | x->tx_carrier++; | ||
199 | stats->tx_carrier_errors++; | ||
200 | } | ||
201 | if (unlikely(p->des01.etx.late_collision)) { | ||
202 | DBG(KERN_ERR "\tlate_collision error\n"); | ||
203 | stats->collisions += p->des01.etx.collision_count; | ||
204 | } | ||
205 | if (unlikely(p->des01.etx.excessive_collisions)) { | ||
206 | DBG(KERN_ERR "\texcessive_collisions\n"); | ||
207 | stats->collisions += p->des01.etx.collision_count; | ||
208 | } | ||
209 | if (unlikely(p->des01.etx.excessive_deferral)) { | ||
210 | DBG(KERN_INFO "\texcessive tx_deferral\n"); | ||
211 | x->tx_deferred++; | ||
212 | } | ||
213 | |||
214 | if (unlikely(p->des01.etx.underflow_error)) { | ||
215 | DBG(KERN_ERR "\tunderflow error\n"); | ||
216 | gmac_flush_tx_fifo(ioaddr); | ||
217 | x->tx_underflow++; | ||
218 | } | ||
219 | |||
220 | if (unlikely(p->des01.etx.ip_header_error)) { | ||
221 | DBG(KERN_ERR "\tTX IP header csum error\n"); | ||
222 | x->tx_ip_header_error++; | ||
223 | } | ||
224 | |||
225 | if (unlikely(p->des01.etx.payload_error)) { | ||
226 | DBG(KERN_ERR "\tAddr/Payload csum error\n"); | ||
227 | x->tx_payload_error++; | ||
228 | gmac_flush_tx_fifo(ioaddr); | ||
229 | } | ||
230 | |||
231 | ret = -1; | ||
232 | } | ||
233 | |||
234 | if (unlikely(p->des01.etx.deferred)) { | ||
235 | DBG(KERN_INFO "GMAC TX status: tx deferred\n"); | ||
236 | x->tx_deferred++; | ||
237 | } | ||
238 | #ifdef STMMAC_VLAN_TAG_USED | ||
239 | if (p->des01.etx.vlan_frame) { | ||
240 | DBG(KERN_INFO "GMAC TX status: VLAN frame\n"); | ||
241 | x->tx_vlan++; | ||
242 | } | ||
243 | #endif | ||
244 | |||
245 | return ret; | ||
246 | } | ||
247 | |||
248 | static int gmac_get_tx_len(struct dma_desc *p) | ||
249 | { | ||
250 | return p->des01.etx.buffer1_size; | ||
251 | } | ||
252 | |||
253 | static int gmac_coe_rdes0(int ipc_err, int type, int payload_err) | ||
254 | { | ||
255 | int ret = good_frame; | ||
256 | u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7; | ||
257 | |||
258 | /* bits 5 7 0 | Frame status | ||
259 | * ---------------------------------------------------------- | ||
260 | * 0 0 0 | IEEE 802.3 Type frame (lenght < 1536 octects) | ||
261 | * 1 0 0 | IPv4/6 No CSUM errorS. | ||
262 | * 1 0 1 | IPv4/6 CSUM PAYLOAD error | ||
263 | * 1 1 0 | IPv4/6 CSUM IP HR error | ||
264 | * 1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS | ||
265 | * 0 0 1 | IPv4/6 unsupported IP PAYLOAD | ||
266 | * 0 1 1 | COE bypassed.. no IPv4/6 frame | ||
267 | * 0 1 0 | Reserved. | ||
268 | */ | ||
269 | if (status == 0x0) { | ||
270 | DBG(KERN_INFO "RX Des0 status: IEEE 802.3 Type frame.\n"); | ||
271 | ret = good_frame; | ||
272 | } else if (status == 0x4) { | ||
273 | DBG(KERN_INFO "RX Des0 status: IPv4/6 No CSUM errorS.\n"); | ||
274 | ret = good_frame; | ||
275 | } else if (status == 0x5) { | ||
276 | DBG(KERN_ERR "RX Des0 status: IPv4/6 Payload Error.\n"); | ||
277 | ret = csum_none; | ||
278 | } else if (status == 0x6) { | ||
279 | DBG(KERN_ERR "RX Des0 status: IPv4/6 Header Error.\n"); | ||
280 | ret = csum_none; | ||
281 | } else if (status == 0x7) { | ||
282 | DBG(KERN_ERR | ||
283 | "RX Des0 status: IPv4/6 Header and Payload Error.\n"); | ||
284 | ret = csum_none; | ||
285 | } else if (status == 0x1) { | ||
286 | DBG(KERN_ERR | ||
287 | "RX Des0 status: IPv4/6 unsupported IP PAYLOAD.\n"); | ||
288 | ret = discard_frame; | ||
289 | } else if (status == 0x3) { | ||
290 | DBG(KERN_ERR "RX Des0 status: No IPv4, IPv6 frame.\n"); | ||
291 | ret = discard_frame; | ||
292 | } | ||
293 | return ret; | ||
294 | } | ||
295 | |||
296 | static int gmac_get_rx_frame_status(void *data, struct stmmac_extra_stats *x, | ||
297 | struct dma_desc *p) | ||
298 | { | ||
299 | int ret = good_frame; | ||
300 | struct net_device_stats *stats = (struct net_device_stats *)data; | ||
301 | |||
302 | if (unlikely(p->des01.erx.error_summary)) { | ||
303 | DBG(KERN_ERR "GMAC RX Error Summary... 0x%08x\n", p->des01.erx); | ||
304 | if (unlikely(p->des01.erx.descriptor_error)) { | ||
305 | DBG(KERN_ERR "\tdescriptor error\n"); | ||
306 | x->rx_desc++; | ||
307 | stats->rx_length_errors++; | ||
308 | } | ||
309 | if (unlikely(p->des01.erx.overflow_error)) { | ||
310 | DBG(KERN_ERR "\toverflow error\n"); | ||
311 | x->rx_gmac_overflow++; | ||
312 | } | ||
313 | |||
314 | if (unlikely(p->des01.erx.ipc_csum_error)) | ||
315 | DBG(KERN_ERR "\tIPC Csum Error/Giant frame\n"); | ||
316 | |||
317 | if (unlikely(p->des01.erx.late_collision)) { | ||
318 | DBG(KERN_ERR "\tlate_collision error\n"); | ||
319 | stats->collisions++; | ||
320 | stats->collisions++; | ||
321 | } | ||
322 | if (unlikely(p->des01.erx.receive_watchdog)) { | ||
323 | DBG(KERN_ERR "\treceive_watchdog error\n"); | ||
324 | x->rx_watchdog++; | ||
325 | } | ||
326 | if (unlikely(p->des01.erx.error_gmii)) { | ||
327 | DBG(KERN_ERR "\tReceive Error\n"); | ||
328 | x->rx_mii++; | ||
329 | } | ||
330 | if (unlikely(p->des01.erx.crc_error)) { | ||
331 | DBG(KERN_ERR "\tCRC error\n"); | ||
332 | x->rx_crc++; | ||
333 | stats->rx_crc_errors++; | ||
334 | } | ||
335 | ret = discard_frame; | ||
336 | } | ||
337 | |||
338 | /* After a payload csum error, the ES bit is set. | ||
339 | * It doesn't match with the information reported into the databook. | ||
340 | * At any rate, we need to understand if the CSUM hw computation is ok | ||
341 | * and report this info to the upper layers. */ | ||
342 | ret = gmac_coe_rdes0(p->des01.erx.ipc_csum_error, | ||
343 | p->des01.erx.frame_type, p->des01.erx.payload_csum_error); | ||
344 | |||
345 | if (unlikely(p->des01.erx.dribbling)) { | ||
346 | DBG(KERN_ERR "GMAC RX: dribbling error\n"); | ||
347 | ret = discard_frame; | ||
348 | } | ||
349 | if (unlikely(p->des01.erx.sa_filter_fail)) { | ||
350 | DBG(KERN_ERR "GMAC RX : Source Address filter fail\n"); | ||
351 | x->sa_rx_filter_fail++; | ||
352 | ret = discard_frame; | ||
353 | } | ||
354 | if (unlikely(p->des01.erx.da_filter_fail)) { | ||
355 | DBG(KERN_ERR "GMAC RX : Destination Address filter fail\n"); | ||
356 | x->da_rx_filter_fail++; | ||
357 | ret = discard_frame; | ||
358 | } | ||
359 | if (unlikely(p->des01.erx.length_error)) { | ||
360 | DBG(KERN_ERR "GMAC RX: length_error error\n"); | ||
361 | x->rx_lenght++; | ||
362 | ret = discard_frame; | ||
363 | } | ||
364 | #ifdef STMMAC_VLAN_TAG_USED | ||
365 | if (p->des01.erx.vlan_tag) { | ||
366 | DBG(KERN_INFO "GMAC RX: VLAN frame tagged\n"); | ||
367 | x->rx_vlan++; | ||
368 | } | ||
369 | #endif | ||
370 | return ret; | ||
371 | } | ||
372 | |||
373 | static void gmac_irq_status(unsigned long ioaddr) | ||
374 | { | ||
375 | u32 intr_status = readl(ioaddr + GMAC_INT_STATUS); | ||
376 | |||
377 | /* Not used events (e.g. MMC interrupts) are not handled. */ | ||
378 | if ((intr_status & mmc_tx_irq)) | ||
379 | DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n", | ||
380 | readl(ioaddr + GMAC_MMC_TX_INTR)); | ||
381 | if (unlikely(intr_status & mmc_rx_irq)) | ||
382 | DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n", | ||
383 | readl(ioaddr + GMAC_MMC_RX_INTR)); | ||
384 | if (unlikely(intr_status & mmc_rx_csum_offload_irq)) | ||
385 | DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n", | ||
386 | readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD)); | ||
387 | if (unlikely(intr_status & pmt_irq)) { | ||
388 | DBG(KERN_DEBUG "GMAC: received Magic frame\n"); | ||
389 | /* clear the PMT bits 5 and 6 by reading the PMT | ||
390 | * status register. */ | ||
391 | readl(ioaddr + GMAC_PMT); | ||
392 | } | ||
393 | |||
394 | return; | ||
395 | } | ||
396 | |||
397 | static void gmac_core_init(unsigned long ioaddr) | ||
398 | { | ||
399 | u32 value = readl(ioaddr + GMAC_CONTROL); | ||
400 | value |= GMAC_CORE_INIT; | ||
401 | writel(value, ioaddr + GMAC_CONTROL); | ||
402 | |||
403 | /* STBus Bridge Configuration */ | ||
404 | /*writel(0xc5608, ioaddr + 0x00007000);*/ | ||
405 | |||
406 | /* Freeze MMC counters */ | ||
407 | writel(0x8, ioaddr + GMAC_MMC_CTRL); | ||
408 | /* Mask GMAC interrupts */ | ||
409 | writel(0x207, ioaddr + GMAC_INT_MASK); | ||
410 | |||
411 | #ifdef STMMAC_VLAN_TAG_USED | ||
412 | /* Tag detection without filtering */ | ||
413 | writel(0x0, ioaddr + GMAC_VLAN_TAG); | ||
414 | #endif | ||
415 | return; | ||
416 | } | ||
417 | |||
418 | static void gmac_set_umac_addr(unsigned long ioaddr, unsigned char *addr, | ||
419 | unsigned int reg_n) | ||
420 | { | ||
421 | stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n), | ||
422 | GMAC_ADDR_LOW(reg_n)); | ||
423 | } | ||
424 | |||
425 | static void gmac_get_umac_addr(unsigned long ioaddr, unsigned char *addr, | ||
426 | unsigned int reg_n) | ||
427 | { | ||
428 | stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n), | ||
429 | GMAC_ADDR_LOW(reg_n)); | ||
430 | } | ||
431 | |||
432 | static void gmac_set_filter(struct net_device *dev) | ||
433 | { | ||
434 | unsigned long ioaddr = dev->base_addr; | ||
435 | unsigned int value = 0; | ||
436 | |||
437 | DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n", | ||
438 | __func__, dev->mc_count, dev->uc_count); | ||
439 | |||
440 | if (dev->flags & IFF_PROMISC) | ||
441 | value = GMAC_FRAME_FILTER_PR; | ||
442 | else if ((dev->mc_count > HASH_TABLE_SIZE) | ||
443 | || (dev->flags & IFF_ALLMULTI)) { | ||
444 | value = GMAC_FRAME_FILTER_PM; /* pass all multi */ | ||
445 | writel(0xffffffff, ioaddr + GMAC_HASH_HIGH); | ||
446 | writel(0xffffffff, ioaddr + GMAC_HASH_LOW); | ||
447 | } else if (dev->mc_count > 0) { | ||
448 | int i; | ||
449 | u32 mc_filter[2]; | ||
450 | struct dev_mc_list *mclist; | ||
451 | |||
452 | /* Hash filter for multicast */ | ||
453 | value = GMAC_FRAME_FILTER_HMC; | ||
454 | |||
455 | memset(mc_filter, 0, sizeof(mc_filter)); | ||
456 | for (i = 0, mclist = dev->mc_list; | ||
457 | mclist && i < dev->mc_count; i++, mclist = mclist->next) { | ||
458 | /* The upper 6 bits of the calculated CRC are used to | ||
459 | index the contens of the hash table */ | ||
460 | int bit_nr = | ||
461 | bitrev32(~crc32_le(~0, mclist->dmi_addr, 6)) >> 26; | ||
462 | /* The most significant bit determines the register to | ||
463 | * use (H/L) while the other 5 bits determine the bit | ||
464 | * within the register. */ | ||
465 | mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); | ||
466 | } | ||
467 | writel(mc_filter[0], ioaddr + GMAC_HASH_LOW); | ||
468 | writel(mc_filter[1], ioaddr + GMAC_HASH_HIGH); | ||
469 | } | ||
470 | |||
471 | /* Handle multiple unicast addresses (perfect filtering)*/ | ||
472 | if (dev->uc_count > GMAC_MAX_UNICAST_ADDRESSES) | ||
473 | /* Switch to promiscuous mode is more than 16 addrs | ||
474 | are required */ | ||
475 | value |= GMAC_FRAME_FILTER_PR; | ||
476 | else { | ||
477 | int i; | ||
478 | struct dev_addr_list *uc_ptr = dev->uc_list; | ||
479 | |||
480 | for (i = 0; i < dev->uc_count; i++) { | ||
481 | gmac_set_umac_addr(ioaddr, uc_ptr->da_addr, | ||
482 | i + 1); | ||
483 | |||
484 | DBG(KERN_INFO "\t%d " | ||
485 | "- Unicast addr %02x:%02x:%02x:%02x:%02x:" | ||
486 | "%02x\n", i + 1, | ||
487 | uc_ptr->da_addr[0], uc_ptr->da_addr[1], | ||
488 | uc_ptr->da_addr[2], uc_ptr->da_addr[3], | ||
489 | uc_ptr->da_addr[4], uc_ptr->da_addr[5]); | ||
490 | uc_ptr = uc_ptr->next; | ||
491 | } | ||
492 | } | ||
493 | |||
494 | #ifdef FRAME_FILTER_DEBUG | ||
495 | /* Enable Receive all mode (to debug filtering_fail errors) */ | ||
496 | value |= GMAC_FRAME_FILTER_RA; | ||
497 | #endif | ||
498 | writel(value, ioaddr + GMAC_FRAME_FILTER); | ||
499 | |||
500 | DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: " | ||
501 | "HI 0x%08x, LO 0x%08x\n", readl(ioaddr + GMAC_FRAME_FILTER), | ||
502 | readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW)); | ||
503 | |||
504 | return; | ||
505 | } | ||
506 | |||
507 | static void gmac_flow_ctrl(unsigned long ioaddr, unsigned int duplex, | ||
508 | unsigned int fc, unsigned int pause_time) | ||
509 | { | ||
510 | unsigned int flow = 0; | ||
511 | |||
512 | DBG(KERN_DEBUG "GMAC Flow-Control:\n"); | ||
513 | if (fc & FLOW_RX) { | ||
514 | DBG(KERN_DEBUG "\tReceive Flow-Control ON\n"); | ||
515 | flow |= GMAC_FLOW_CTRL_RFE; | ||
516 | } | ||
517 | if (fc & FLOW_TX) { | ||
518 | DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n"); | ||
519 | flow |= GMAC_FLOW_CTRL_TFE; | ||
520 | } | ||
521 | |||
522 | if (duplex) { | ||
523 | DBG(KERN_DEBUG "\tduplex mode: pause time: %d\n", pause_time); | ||
524 | flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT); | ||
525 | } | ||
526 | |||
527 | writel(flow, ioaddr + GMAC_FLOW_CTRL); | ||
528 | return; | ||
529 | } | ||
530 | |||
531 | static void gmac_pmt(unsigned long ioaddr, unsigned long mode) | ||
532 | { | ||
533 | unsigned int pmt = 0; | ||
534 | |||
535 | if (mode == WAKE_MAGIC) { | ||
536 | DBG(KERN_DEBUG "GMAC: WOL Magic frame\n"); | ||
537 | pmt |= power_down | magic_pkt_en; | ||
538 | } else if (mode == WAKE_UCAST) { | ||
539 | DBG(KERN_DEBUG "GMAC: WOL on global unicast\n"); | ||
540 | pmt |= global_unicast; | ||
541 | } | ||
542 | |||
543 | writel(pmt, ioaddr + GMAC_PMT); | ||
544 | return; | ||
545 | } | ||
546 | |||
547 | static void gmac_init_rx_desc(struct dma_desc *p, unsigned int ring_size, | ||
548 | int disable_rx_ic) | ||
549 | { | ||
550 | int i; | ||
551 | for (i = 0; i < ring_size; i++) { | ||
552 | p->des01.erx.own = 1; | ||
553 | p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1; | ||
554 | /* To support jumbo frames */ | ||
555 | p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1; | ||
556 | if (i == ring_size - 1) | ||
557 | p->des01.erx.end_ring = 1; | ||
558 | if (disable_rx_ic) | ||
559 | p->des01.erx.disable_ic = 1; | ||
560 | p++; | ||
561 | } | ||
562 | return; | ||
563 | } | ||
564 | |||
565 | static void gmac_init_tx_desc(struct dma_desc *p, unsigned int ring_size) | ||
566 | { | ||
567 | int i; | ||
568 | |||
569 | for (i = 0; i < ring_size; i++) { | ||
570 | p->des01.etx.own = 0; | ||
571 | if (i == ring_size - 1) | ||
572 | p->des01.etx.end_ring = 1; | ||
573 | p++; | ||
574 | } | ||
575 | |||
576 | return; | ||
577 | } | ||
578 | |||
579 | static int gmac_get_tx_owner(struct dma_desc *p) | ||
580 | { | ||
581 | return p->des01.etx.own; | ||
582 | } | ||
583 | |||
584 | static int gmac_get_rx_owner(struct dma_desc *p) | ||
585 | { | ||
586 | return p->des01.erx.own; | ||
587 | } | ||
588 | |||
589 | static void gmac_set_tx_owner(struct dma_desc *p) | ||
590 | { | ||
591 | p->des01.etx.own = 1; | ||
592 | } | ||
593 | |||
594 | static void gmac_set_rx_owner(struct dma_desc *p) | ||
595 | { | ||
596 | p->des01.erx.own = 1; | ||
597 | } | ||
598 | |||
599 | static int gmac_get_tx_ls(struct dma_desc *p) | ||
600 | { | ||
601 | return p->des01.etx.last_segment; | ||
602 | } | ||
603 | |||
604 | static void gmac_release_tx_desc(struct dma_desc *p) | ||
605 | { | ||
606 | int ter = p->des01.etx.end_ring; | ||
607 | |||
608 | memset(p, 0, sizeof(struct dma_desc)); | ||
609 | p->des01.etx.end_ring = ter; | ||
610 | |||
611 | return; | ||
612 | } | ||
613 | |||
614 | static void gmac_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, | ||
615 | int csum_flag) | ||
616 | { | ||
617 | p->des01.etx.first_segment = is_fs; | ||
618 | if (unlikely(len > BUF_SIZE_4KiB)) { | ||
619 | p->des01.etx.buffer1_size = BUF_SIZE_4KiB; | ||
620 | p->des01.etx.buffer2_size = len - BUF_SIZE_4KiB; | ||
621 | } else { | ||
622 | p->des01.etx.buffer1_size = len; | ||
623 | } | ||
624 | if (likely(csum_flag)) | ||
625 | p->des01.etx.checksum_insertion = cic_full; | ||
626 | } | ||
627 | |||
628 | static void gmac_clear_tx_ic(struct dma_desc *p) | ||
629 | { | ||
630 | p->des01.etx.interrupt = 0; | ||
631 | } | ||
632 | |||
633 | static void gmac_close_tx_desc(struct dma_desc *p) | ||
634 | { | ||
635 | p->des01.etx.last_segment = 1; | ||
636 | p->des01.etx.interrupt = 1; | ||
637 | } | ||
638 | |||
639 | static int gmac_get_rx_frame_len(struct dma_desc *p) | ||
640 | { | ||
641 | return p->des01.erx.frame_length; | ||
642 | } | ||
643 | |||
644 | struct stmmac_ops gmac_driver = { | ||
645 | .core_init = gmac_core_init, | ||
646 | .dump_mac_regs = gmac_dump_regs, | ||
647 | .dma_init = gmac_dma_init, | ||
648 | .dump_dma_regs = gmac_dump_dma_regs, | ||
649 | .dma_mode = gmac_dma_operation_mode, | ||
650 | .dma_diagnostic_fr = gmac_dma_diagnostic_fr, | ||
651 | .tx_status = gmac_get_tx_frame_status, | ||
652 | .rx_status = gmac_get_rx_frame_status, | ||
653 | .get_tx_len = gmac_get_tx_len, | ||
654 | .set_filter = gmac_set_filter, | ||
655 | .flow_ctrl = gmac_flow_ctrl, | ||
656 | .pmt = gmac_pmt, | ||
657 | .init_rx_desc = gmac_init_rx_desc, | ||
658 | .init_tx_desc = gmac_init_tx_desc, | ||
659 | .get_tx_owner = gmac_get_tx_owner, | ||
660 | .get_rx_owner = gmac_get_rx_owner, | ||
661 | .release_tx_desc = gmac_release_tx_desc, | ||
662 | .prepare_tx_desc = gmac_prepare_tx_desc, | ||
663 | .clear_tx_ic = gmac_clear_tx_ic, | ||
664 | .close_tx_desc = gmac_close_tx_desc, | ||
665 | .get_tx_ls = gmac_get_tx_ls, | ||
666 | .set_tx_owner = gmac_set_tx_owner, | ||
667 | .set_rx_owner = gmac_set_rx_owner, | ||
668 | .get_rx_frame_len = gmac_get_rx_frame_len, | ||
669 | .host_irq_status = gmac_irq_status, | ||
670 | .set_umac_addr = gmac_set_umac_addr, | ||
671 | .get_umac_addr = gmac_get_umac_addr, | ||
672 | }; | ||
673 | |||
674 | struct mac_device_info *gmac_setup(unsigned long ioaddr) | ||
675 | { | ||
676 | struct mac_device_info *mac; | ||
677 | u32 uid = readl(ioaddr + GMAC_VERSION); | ||
678 | |||
679 | pr_info("\tGMAC - user ID: 0x%x, Synopsys ID: 0x%x\n", | ||
680 | ((uid & 0x0000ff00) >> 8), (uid & 0x000000ff)); | ||
681 | |||
682 | mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL); | ||
683 | |||
684 | mac->ops = &gmac_driver; | ||
685 | mac->hw.pmt = PMT_SUPPORTED; | ||
686 | mac->hw.link.port = GMAC_CONTROL_PS; | ||
687 | mac->hw.link.duplex = GMAC_CONTROL_DM; | ||
688 | mac->hw.link.speed = GMAC_CONTROL_FES; | ||
689 | mac->hw.mii.addr = GMAC_MII_ADDR; | ||
690 | mac->hw.mii.data = GMAC_MII_DATA; | ||
691 | |||
692 | return mac; | ||
693 | } | ||
diff --git a/drivers/net/stmmac/gmac.h b/drivers/net/stmmac/gmac.h new file mode 100644 index 000000000000..684a363120a9 --- /dev/null +++ b/drivers/net/stmmac/gmac.h | |||
@@ -0,0 +1,204 @@ | |||
1 | /******************************************************************************* | ||
2 | Copyright (C) 2007-2009 STMicroelectronics Ltd | ||
3 | |||
4 | This program is free software; you can redistribute it and/or modify it | ||
5 | under the terms and conditions of the GNU General Public License, | ||
6 | version 2, as published by the Free Software Foundation. | ||
7 | |||
8 | This program is distributed in the hope it will be useful, but WITHOUT | ||
9 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
10 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
11 | more details. | ||
12 | |||
13 | You should have received a copy of the GNU General Public License along with | ||
14 | this program; if not, write to the Free Software Foundation, Inc., | ||
15 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
16 | |||
17 | The full GNU General Public License is included in this distribution in | ||
18 | the file called "COPYING". | ||
19 | |||
20 | Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> | ||
21 | *******************************************************************************/ | ||
22 | |||
23 | #define GMAC_CONTROL 0x00000000 /* Configuration */ | ||
24 | #define GMAC_FRAME_FILTER 0x00000004 /* Frame Filter */ | ||
25 | #define GMAC_HASH_HIGH 0x00000008 /* Multicast Hash Table High */ | ||
26 | #define GMAC_HASH_LOW 0x0000000c /* Multicast Hash Table Low */ | ||
27 | #define GMAC_MII_ADDR 0x00000010 /* MII Address */ | ||
28 | #define GMAC_MII_DATA 0x00000014 /* MII Data */ | ||
29 | #define GMAC_FLOW_CTRL 0x00000018 /* Flow Control */ | ||
30 | #define GMAC_VLAN_TAG 0x0000001c /* VLAN Tag */ | ||
31 | #define GMAC_VERSION 0x00000020 /* GMAC CORE Version */ | ||
32 | #define GMAC_WAKEUP_FILTER 0x00000028 /* Wake-up Frame Filter */ | ||
33 | |||
34 | #define GMAC_INT_STATUS 0x00000038 /* interrupt status register */ | ||
35 | enum gmac_irq_status { | ||
36 | time_stamp_irq = 0x0200, | ||
37 | mmc_rx_csum_offload_irq = 0x0080, | ||
38 | mmc_tx_irq = 0x0040, | ||
39 | mmc_rx_irq = 0x0020, | ||
40 | mmc_irq = 0x0010, | ||
41 | pmt_irq = 0x0008, | ||
42 | pcs_ane_irq = 0x0004, | ||
43 | pcs_link_irq = 0x0002, | ||
44 | rgmii_irq = 0x0001, | ||
45 | }; | ||
46 | #define GMAC_INT_MASK 0x0000003c /* interrupt mask register */ | ||
47 | |||
48 | /* PMT Control and Status */ | ||
49 | #define GMAC_PMT 0x0000002c | ||
50 | enum power_event { | ||
51 | pointer_reset = 0x80000000, | ||
52 | global_unicast = 0x00000200, | ||
53 | wake_up_rx_frame = 0x00000040, | ||
54 | magic_frame = 0x00000020, | ||
55 | wake_up_frame_en = 0x00000004, | ||
56 | magic_pkt_en = 0x00000002, | ||
57 | power_down = 0x00000001, | ||
58 | }; | ||
59 | |||
60 | /* GMAC HW ADDR regs */ | ||
61 | #define GMAC_ADDR_HIGH(reg) (0x00000040+(reg * 8)) | ||
62 | #define GMAC_ADDR_LOW(reg) (0x00000044+(reg * 8)) | ||
63 | #define GMAC_MAX_UNICAST_ADDRESSES 16 | ||
64 | |||
65 | #define GMAC_AN_CTRL 0x000000c0 /* AN control */ | ||
66 | #define GMAC_AN_STATUS 0x000000c4 /* AN status */ | ||
67 | #define GMAC_ANE_ADV 0x000000c8 /* Auto-Neg. Advertisement */ | ||
68 | #define GMAC_ANE_LINK 0x000000cc /* Auto-Neg. link partener ability */ | ||
69 | #define GMAC_ANE_EXP 0x000000d0 /* ANE expansion */ | ||
70 | #define GMAC_TBI 0x000000d4 /* TBI extend status */ | ||
71 | #define GMAC_GMII_STATUS 0x000000d8 /* S/R-GMII status */ | ||
72 | |||
73 | /* GMAC Configuration defines */ | ||
74 | #define GMAC_CONTROL_TC 0x01000000 /* Transmit Conf. in RGMII/SGMII */ | ||
75 | #define GMAC_CONTROL_WD 0x00800000 /* Disable Watchdog on receive */ | ||
76 | #define GMAC_CONTROL_JD 0x00400000 /* Jabber disable */ | ||
77 | #define GMAC_CONTROL_BE 0x00200000 /* Frame Burst Enable */ | ||
78 | #define GMAC_CONTROL_JE 0x00100000 /* Jumbo frame */ | ||
79 | enum inter_frame_gap { | ||
80 | GMAC_CONTROL_IFG_88 = 0x00040000, | ||
81 | GMAC_CONTROL_IFG_80 = 0x00020000, | ||
82 | GMAC_CONTROL_IFG_40 = 0x000e0000, | ||
83 | }; | ||
84 | #define GMAC_CONTROL_DCRS 0x00010000 /* Disable carrier sense during tx */ | ||
85 | #define GMAC_CONTROL_PS 0x00008000 /* Port Select 0:GMI 1:MII */ | ||
86 | #define GMAC_CONTROL_FES 0x00004000 /* Speed 0:10 1:100 */ | ||
87 | #define GMAC_CONTROL_DO 0x00002000 /* Disable Rx Own */ | ||
88 | #define GMAC_CONTROL_LM 0x00001000 /* Loop-back mode */ | ||
89 | #define GMAC_CONTROL_DM 0x00000800 /* Duplex Mode */ | ||
90 | #define GMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */ | ||
91 | #define GMAC_CONTROL_DR 0x00000200 /* Disable Retry */ | ||
92 | #define GMAC_CONTROL_LUD 0x00000100 /* Link up/down */ | ||
93 | #define GMAC_CONTROL_ACS 0x00000080 /* Automatic Pad Stripping */ | ||
94 | #define GMAC_CONTROL_DC 0x00000010 /* Deferral Check */ | ||
95 | #define GMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */ | ||
96 | #define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */ | ||
97 | |||
98 | #define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | GMAC_CONTROL_ACS | \ | ||
99 | GMAC_CONTROL_IPC | GMAC_CONTROL_JE | GMAC_CONTROL_BE) | ||
100 | |||
101 | /* GMAC Frame Filter defines */ | ||
102 | #define GMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */ | ||
103 | #define GMAC_FRAME_FILTER_HUC 0x00000002 /* Hash Unicast */ | ||
104 | #define GMAC_FRAME_FILTER_HMC 0x00000004 /* Hash Multicast */ | ||
105 | #define GMAC_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */ | ||
106 | #define GMAC_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */ | ||
107 | #define GMAC_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */ | ||
108 | #define GMAC_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */ | ||
109 | #define GMAC_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */ | ||
110 | #define GMAC_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */ | ||
111 | #define GMAC_FRAME_FILTER_RA 0x80000000 /* Receive all mode */ | ||
112 | /* GMII ADDR defines */ | ||
113 | #define GMAC_MII_ADDR_WRITE 0x00000002 /* MII Write */ | ||
114 | #define GMAC_MII_ADDR_BUSY 0x00000001 /* MII Busy */ | ||
115 | /* GMAC FLOW CTRL defines */ | ||
116 | #define GMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */ | ||
117 | #define GMAC_FLOW_CTRL_PT_SHIFT 16 | ||
118 | #define GMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */ | ||
119 | #define GMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */ | ||
120 | #define GMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */ | ||
121 | |||
122 | /*--- DMA BLOCK defines ---*/ | ||
123 | /* DMA Bus Mode register defines */ | ||
124 | #define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */ | ||
125 | #define DMA_BUS_MODE_DA 0x00000002 /* Arbitration scheme */ | ||
126 | #define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */ | ||
127 | #define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */ | ||
128 | /* Programmable burst length (passed thorugh platform)*/ | ||
129 | #define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */ | ||
130 | #define DMA_BUS_MODE_PBL_SHIFT 8 | ||
131 | |||
132 | enum rx_tx_priority_ratio { | ||
133 | double_ratio = 0x00004000, /*2:1 */ | ||
134 | triple_ratio = 0x00008000, /*3:1 */ | ||
135 | quadruple_ratio = 0x0000c000, /*4:1 */ | ||
136 | }; | ||
137 | |||
138 | #define DMA_BUS_MODE_FB 0x00010000 /* Fixed burst */ | ||
139 | #define DMA_BUS_MODE_RPBL_MASK 0x003e0000 /* Rx-Programmable Burst Len */ | ||
140 | #define DMA_BUS_MODE_RPBL_SHIFT 17 | ||
141 | #define DMA_BUS_MODE_USP 0x00800000 | ||
142 | #define DMA_BUS_MODE_4PBL 0x01000000 | ||
143 | #define DMA_BUS_MODE_AAL 0x02000000 | ||
144 | |||
145 | /* DMA CRS Control and Status Register Mapping */ | ||
146 | #define DMA_HOST_TX_DESC 0x00001048 /* Current Host Tx descriptor */ | ||
147 | #define DMA_HOST_RX_DESC 0x0000104c /* Current Host Rx descriptor */ | ||
148 | /* DMA Bus Mode register defines */ | ||
149 | #define DMA_BUS_PR_RATIO_MASK 0x0000c000 /* Rx/Tx priority ratio */ | ||
150 | #define DMA_BUS_PR_RATIO_SHIFT 14 | ||
151 | #define DMA_BUS_FB 0x00010000 /* Fixed Burst */ | ||
152 | |||
153 | /* DMA operation mode defines (start/stop tx/rx are placed in common header)*/ | ||
154 | #define DMA_CONTROL_DT 0x04000000 /* Disable Drop TCP/IP csum error */ | ||
155 | #define DMA_CONTROL_RSF 0x02000000 /* Receive Store and Forward */ | ||
156 | #define DMA_CONTROL_DFF 0x01000000 /* Disaable flushing */ | ||
157 | /* Theshold for Activating the FC */ | ||
158 | enum rfa { | ||
159 | act_full_minus_1 = 0x00800000, | ||
160 | act_full_minus_2 = 0x00800200, | ||
161 | act_full_minus_3 = 0x00800400, | ||
162 | act_full_minus_4 = 0x00800600, | ||
163 | }; | ||
164 | /* Theshold for Deactivating the FC */ | ||
165 | enum rfd { | ||
166 | deac_full_minus_1 = 0x00400000, | ||
167 | deac_full_minus_2 = 0x00400800, | ||
168 | deac_full_minus_3 = 0x00401000, | ||
169 | deac_full_minus_4 = 0x00401800, | ||
170 | }; | ||
171 | #define DMA_CONTROL_TSF 0x00200000 /* Transmit Store and Forward */ | ||
172 | #define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */ | ||
173 | |||
174 | enum ttc_control { | ||
175 | DMA_CONTROL_TTC_64 = 0x00000000, | ||
176 | DMA_CONTROL_TTC_128 = 0x00004000, | ||
177 | DMA_CONTROL_TTC_192 = 0x00008000, | ||
178 | DMA_CONTROL_TTC_256 = 0x0000c000, | ||
179 | DMA_CONTROL_TTC_40 = 0x00010000, | ||
180 | DMA_CONTROL_TTC_32 = 0x00014000, | ||
181 | DMA_CONTROL_TTC_24 = 0x00018000, | ||
182 | DMA_CONTROL_TTC_16 = 0x0001c000, | ||
183 | }; | ||
184 | #define DMA_CONTROL_TC_TX_MASK 0xfffe3fff | ||
185 | |||
186 | #define DMA_CONTROL_EFC 0x00000100 | ||
187 | #define DMA_CONTROL_FEF 0x00000080 | ||
188 | #define DMA_CONTROL_FUF 0x00000040 | ||
189 | |||
190 | enum rtc_control { | ||
191 | DMA_CONTROL_RTC_64 = 0x00000000, | ||
192 | DMA_CONTROL_RTC_32 = 0x00000008, | ||
193 | DMA_CONTROL_RTC_96 = 0x00000010, | ||
194 | DMA_CONTROL_RTC_128 = 0x00000018, | ||
195 | }; | ||
196 | #define DMA_CONTROL_TC_RX_MASK 0xffffffe7 | ||
197 | |||
198 | #define DMA_CONTROL_OSF 0x00000004 /* Operate on second frame */ | ||
199 | |||
200 | /* MMC registers offset */ | ||
201 | #define GMAC_MMC_CTRL 0x100 | ||
202 | #define GMAC_MMC_RX_INTR 0x104 | ||
203 | #define GMAC_MMC_TX_INTR 0x108 | ||
204 | #define GMAC_MMC_RX_CSUM_OFFLOAD 0x208 | ||
diff --git a/drivers/net/stmmac/mac100.c b/drivers/net/stmmac/mac100.c new file mode 100644 index 000000000000..625171b6062b --- /dev/null +++ b/drivers/net/stmmac/mac100.c | |||
@@ -0,0 +1,517 @@ | |||
1 | /******************************************************************************* | ||
2 | This is the driver for the MAC 10/100 on-chip Ethernet controller | ||
3 | currently tested on all the ST boards based on STb7109 and stx7200 SoCs. | ||
4 | |||
5 | DWC Ether MAC 10/100 Universal version 4.0 has been used for developing | ||
6 | this code. | ||
7 | |||
8 | Copyright (C) 2007-2009 STMicroelectronics Ltd | ||
9 | |||
10 | This program is free software; you can redistribute it and/or modify it | ||
11 | under the terms and conditions of the GNU General Public License, | ||
12 | version 2, as published by the Free Software Foundation. | ||
13 | |||
14 | This program is distributed in the hope it will be useful, but WITHOUT | ||
15 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
16 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
17 | more details. | ||
18 | |||
19 | You should have received a copy of the GNU General Public License along with | ||
20 | this program; if not, write to the Free Software Foundation, Inc., | ||
21 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
22 | |||
23 | The full GNU General Public License is included in this distribution in | ||
24 | the file called "COPYING". | ||
25 | |||
26 | Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> | ||
27 | *******************************************************************************/ | ||
28 | |||
29 | #include <linux/netdevice.h> | ||
30 | #include <linux/crc32.h> | ||
31 | #include <linux/mii.h> | ||
32 | #include <linux/phy.h> | ||
33 | |||
34 | #include "common.h" | ||
35 | #include "mac100.h" | ||
36 | |||
37 | #undef MAC100_DEBUG | ||
38 | /*#define MAC100_DEBUG*/ | ||
39 | #ifdef MAC100_DEBUG | ||
40 | #define DBG(fmt, args...) printk(fmt, ## args) | ||
41 | #else | ||
42 | #define DBG(fmt, args...) do { } while (0) | ||
43 | #endif | ||
44 | |||
45 | static void mac100_core_init(unsigned long ioaddr) | ||
46 | { | ||
47 | u32 value = readl(ioaddr + MAC_CONTROL); | ||
48 | |||
49 | writel((value | MAC_CORE_INIT), ioaddr + MAC_CONTROL); | ||
50 | |||
51 | #ifdef STMMAC_VLAN_TAG_USED | ||
52 | writel(ETH_P_8021Q, ioaddr + MAC_VLAN1); | ||
53 | #endif | ||
54 | return; | ||
55 | } | ||
56 | |||
57 | static void mac100_dump_mac_regs(unsigned long ioaddr) | ||
58 | { | ||
59 | pr_info("\t----------------------------------------------\n" | ||
60 | "\t MAC100 CSR (base addr = 0x%8x)\n" | ||
61 | "\t----------------------------------------------\n", | ||
62 | (unsigned int)ioaddr); | ||
63 | pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL, | ||
64 | readl(ioaddr + MAC_CONTROL)); | ||
65 | pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH, | ||
66 | readl(ioaddr + MAC_ADDR_HIGH)); | ||
67 | pr_info("\taddr LO (offset 0x%x): 0x%08x\n", MAC_ADDR_LOW, | ||
68 | readl(ioaddr + MAC_ADDR_LOW)); | ||
69 | pr_info("\tmulticast hash HI (offset 0x%x): 0x%08x\n", | ||
70 | MAC_HASH_HIGH, readl(ioaddr + MAC_HASH_HIGH)); | ||
71 | pr_info("\tmulticast hash LO (offset 0x%x): 0x%08x\n", | ||
72 | MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW)); | ||
73 | pr_info("\tflow control (offset 0x%x): 0x%08x\n", | ||
74 | MAC_FLOW_CTRL, readl(ioaddr + MAC_FLOW_CTRL)); | ||
75 | pr_info("\tVLAN1 tag (offset 0x%x): 0x%08x\n", MAC_VLAN1, | ||
76 | readl(ioaddr + MAC_VLAN1)); | ||
77 | pr_info("\tVLAN2 tag (offset 0x%x): 0x%08x\n", MAC_VLAN2, | ||
78 | readl(ioaddr + MAC_VLAN2)); | ||
79 | pr_info("\n\tMAC management counter registers\n"); | ||
80 | pr_info("\t MMC crtl (offset 0x%x): 0x%08x\n", | ||
81 | MMC_CONTROL, readl(ioaddr + MMC_CONTROL)); | ||
82 | pr_info("\t MMC High Interrupt (offset 0x%x): 0x%08x\n", | ||
83 | MMC_HIGH_INTR, readl(ioaddr + MMC_HIGH_INTR)); | ||
84 | pr_info("\t MMC Low Interrupt (offset 0x%x): 0x%08x\n", | ||
85 | MMC_LOW_INTR, readl(ioaddr + MMC_LOW_INTR)); | ||
86 | pr_info("\t MMC High Interrupt Mask (offset 0x%x): 0x%08x\n", | ||
87 | MMC_HIGH_INTR_MASK, readl(ioaddr + MMC_HIGH_INTR_MASK)); | ||
88 | pr_info("\t MMC Low Interrupt Mask (offset 0x%x): 0x%08x\n", | ||
89 | MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK)); | ||
90 | return; | ||
91 | } | ||
92 | |||
93 | static int mac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx, | ||
94 | u32 dma_rx) | ||
95 | { | ||
96 | u32 value = readl(ioaddr + DMA_BUS_MODE); | ||
97 | /* DMA SW reset */ | ||
98 | value |= DMA_BUS_MODE_SFT_RESET; | ||
99 | writel(value, ioaddr + DMA_BUS_MODE); | ||
100 | do {} while ((readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET)); | ||
101 | |||
102 | /* Enable Application Access by writing to DMA CSR0 */ | ||
103 | writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT), | ||
104 | ioaddr + DMA_BUS_MODE); | ||
105 | |||
106 | /* Mask interrupts by writing to CSR7 */ | ||
107 | writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA); | ||
108 | |||
109 | /* The base address of the RX/TX descriptor lists must be written into | ||
110 | * DMA CSR3 and CSR4, respectively. */ | ||
111 | writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR); | ||
112 | writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR); | ||
113 | |||
114 | return 0; | ||
115 | } | ||
116 | |||
117 | /* Store and Forward capability is not used at all.. | ||
118 | * The transmit threshold can be programmed by | ||
119 | * setting the TTC bits in the DMA control register.*/ | ||
120 | static void mac100_dma_operation_mode(unsigned long ioaddr, int txmode, | ||
121 | int rxmode) | ||
122 | { | ||
123 | u32 csr6 = readl(ioaddr + DMA_CONTROL); | ||
124 | |||
125 | if (txmode <= 32) | ||
126 | csr6 |= DMA_CONTROL_TTC_32; | ||
127 | else if (txmode <= 64) | ||
128 | csr6 |= DMA_CONTROL_TTC_64; | ||
129 | else | ||
130 | csr6 |= DMA_CONTROL_TTC_128; | ||
131 | |||
132 | writel(csr6, ioaddr + DMA_CONTROL); | ||
133 | |||
134 | return; | ||
135 | } | ||
136 | |||
137 | static void mac100_dump_dma_regs(unsigned long ioaddr) | ||
138 | { | ||
139 | int i; | ||
140 | |||
141 | DBG(KERN_DEBUG "MAC100 DMA CSR \n"); | ||
142 | for (i = 0; i < 9; i++) | ||
143 | pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i, | ||
144 | (DMA_BUS_MODE + i * 4), | ||
145 | readl(ioaddr + DMA_BUS_MODE + i * 4)); | ||
146 | DBG(KERN_DEBUG "\t CSR20 (offset 0x%x): 0x%08x\n", | ||
147 | DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR)); | ||
148 | DBG(KERN_DEBUG "\t CSR21 (offset 0x%x): 0x%08x\n", | ||
149 | DMA_CUR_RX_BUF_ADDR, readl(ioaddr + DMA_CUR_RX_BUF_ADDR)); | ||
150 | return; | ||
151 | } | ||
152 | |||
153 | /* DMA controller has two counters to track the number of | ||
154 | the receive missed frames. */ | ||
155 | static void mac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x, | ||
156 | unsigned long ioaddr) | ||
157 | { | ||
158 | struct net_device_stats *stats = (struct net_device_stats *)data; | ||
159 | u32 csr8 = readl(ioaddr + DMA_MISSED_FRAME_CTR); | ||
160 | |||
161 | if (unlikely(csr8)) { | ||
162 | if (csr8 & DMA_MISSED_FRAME_OVE) { | ||
163 | stats->rx_over_errors += 0x800; | ||
164 | x->rx_overflow_cntr += 0x800; | ||
165 | } else { | ||
166 | unsigned int ove_cntr; | ||
167 | ove_cntr = ((csr8 & DMA_MISSED_FRAME_OVE_CNTR) >> 17); | ||
168 | stats->rx_over_errors += ove_cntr; | ||
169 | x->rx_overflow_cntr += ove_cntr; | ||
170 | } | ||
171 | |||
172 | if (csr8 & DMA_MISSED_FRAME_OVE_M) { | ||
173 | stats->rx_missed_errors += 0xffff; | ||
174 | x->rx_missed_cntr += 0xffff; | ||
175 | } else { | ||
176 | unsigned int miss_f = (csr8 & DMA_MISSED_FRAME_M_CNTR); | ||
177 | stats->rx_missed_errors += miss_f; | ||
178 | x->rx_missed_cntr += miss_f; | ||
179 | } | ||
180 | } | ||
181 | return; | ||
182 | } | ||
183 | |||
184 | static int mac100_get_tx_frame_status(void *data, struct stmmac_extra_stats *x, | ||
185 | struct dma_desc *p, unsigned long ioaddr) | ||
186 | { | ||
187 | int ret = 0; | ||
188 | struct net_device_stats *stats = (struct net_device_stats *)data; | ||
189 | |||
190 | if (unlikely(p->des01.tx.error_summary)) { | ||
191 | if (unlikely(p->des01.tx.underflow_error)) { | ||
192 | x->tx_underflow++; | ||
193 | stats->tx_fifo_errors++; | ||
194 | } | ||
195 | if (unlikely(p->des01.tx.no_carrier)) { | ||
196 | x->tx_carrier++; | ||
197 | stats->tx_carrier_errors++; | ||
198 | } | ||
199 | if (unlikely(p->des01.tx.loss_carrier)) { | ||
200 | x->tx_losscarrier++; | ||
201 | stats->tx_carrier_errors++; | ||
202 | } | ||
203 | if (unlikely((p->des01.tx.excessive_deferral) || | ||
204 | (p->des01.tx.excessive_collisions) || | ||
205 | (p->des01.tx.late_collision))) | ||
206 | stats->collisions += p->des01.tx.collision_count; | ||
207 | ret = -1; | ||
208 | } | ||
209 | if (unlikely(p->des01.tx.heartbeat_fail)) { | ||
210 | x->tx_heartbeat++; | ||
211 | stats->tx_heartbeat_errors++; | ||
212 | ret = -1; | ||
213 | } | ||
214 | if (unlikely(p->des01.tx.deferred)) | ||
215 | x->tx_deferred++; | ||
216 | |||
217 | return ret; | ||
218 | } | ||
219 | |||
220 | static int mac100_get_tx_len(struct dma_desc *p) | ||
221 | { | ||
222 | return p->des01.tx.buffer1_size; | ||
223 | } | ||
224 | |||
225 | /* This function verifies if each incoming frame has some errors | ||
226 | * and, if required, updates the multicast statistics. | ||
227 | * In case of success, it returns csum_none becasue the device | ||
228 | * is not able to compute the csum in HW. */ | ||
229 | static int mac100_get_rx_frame_status(void *data, struct stmmac_extra_stats *x, | ||
230 | struct dma_desc *p) | ||
231 | { | ||
232 | int ret = csum_none; | ||
233 | struct net_device_stats *stats = (struct net_device_stats *)data; | ||
234 | |||
235 | if (unlikely(p->des01.rx.last_descriptor == 0)) { | ||
236 | pr_warning("mac100 Error: Oversized Ethernet " | ||
237 | "frame spanned multiple buffers\n"); | ||
238 | stats->rx_length_errors++; | ||
239 | return discard_frame; | ||
240 | } | ||
241 | |||
242 | if (unlikely(p->des01.rx.error_summary)) { | ||
243 | if (unlikely(p->des01.rx.descriptor_error)) | ||
244 | x->rx_desc++; | ||
245 | if (unlikely(p->des01.rx.partial_frame_error)) | ||
246 | x->rx_partial++; | ||
247 | if (unlikely(p->des01.rx.run_frame)) | ||
248 | x->rx_runt++; | ||
249 | if (unlikely(p->des01.rx.frame_too_long)) | ||
250 | x->rx_toolong++; | ||
251 | if (unlikely(p->des01.rx.collision)) { | ||
252 | x->rx_collision++; | ||
253 | stats->collisions++; | ||
254 | } | ||
255 | if (unlikely(p->des01.rx.crc_error)) { | ||
256 | x->rx_crc++; | ||
257 | stats->rx_crc_errors++; | ||
258 | } | ||
259 | ret = discard_frame; | ||
260 | } | ||
261 | if (unlikely(p->des01.rx.dribbling)) | ||
262 | ret = discard_frame; | ||
263 | |||
264 | if (unlikely(p->des01.rx.length_error)) { | ||
265 | x->rx_lenght++; | ||
266 | ret = discard_frame; | ||
267 | } | ||
268 | if (unlikely(p->des01.rx.mii_error)) { | ||
269 | x->rx_mii++; | ||
270 | ret = discard_frame; | ||
271 | } | ||
272 | if (p->des01.rx.multicast_frame) { | ||
273 | x->rx_multicast++; | ||
274 | stats->multicast++; | ||
275 | } | ||
276 | return ret; | ||
277 | } | ||
278 | |||
279 | static void mac100_irq_status(unsigned long ioaddr) | ||
280 | { | ||
281 | return; | ||
282 | } | ||
283 | |||
284 | static void mac100_set_umac_addr(unsigned long ioaddr, unsigned char *addr, | ||
285 | unsigned int reg_n) | ||
286 | { | ||
287 | stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW); | ||
288 | } | ||
289 | |||
290 | static void mac100_get_umac_addr(unsigned long ioaddr, unsigned char *addr, | ||
291 | unsigned int reg_n) | ||
292 | { | ||
293 | stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW); | ||
294 | } | ||
295 | |||
296 | static void mac100_set_filter(struct net_device *dev) | ||
297 | { | ||
298 | unsigned long ioaddr = dev->base_addr; | ||
299 | u32 value = readl(ioaddr + MAC_CONTROL); | ||
300 | |||
301 | if (dev->flags & IFF_PROMISC) { | ||
302 | value |= MAC_CONTROL_PR; | ||
303 | value &= ~(MAC_CONTROL_PM | MAC_CONTROL_IF | MAC_CONTROL_HO | | ||
304 | MAC_CONTROL_HP); | ||
305 | } else if ((dev->mc_count > HASH_TABLE_SIZE) | ||
306 | || (dev->flags & IFF_ALLMULTI)) { | ||
307 | value |= MAC_CONTROL_PM; | ||
308 | value &= ~(MAC_CONTROL_PR | MAC_CONTROL_IF | MAC_CONTROL_HO); | ||
309 | writel(0xffffffff, ioaddr + MAC_HASH_HIGH); | ||
310 | writel(0xffffffff, ioaddr + MAC_HASH_LOW); | ||
311 | } else if (dev->mc_count == 0) { /* no multicast */ | ||
312 | value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF | | ||
313 | MAC_CONTROL_HO | MAC_CONTROL_HP); | ||
314 | } else { | ||
315 | int i; | ||
316 | u32 mc_filter[2]; | ||
317 | struct dev_mc_list *mclist; | ||
318 | |||
319 | /* Perfect filter mode for physical address and Hash | ||
320 | filter for multicast */ | ||
321 | value |= MAC_CONTROL_HP; | ||
322 | value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF | ||
323 | | MAC_CONTROL_HO); | ||
324 | |||
325 | memset(mc_filter, 0, sizeof(mc_filter)); | ||
326 | for (i = 0, mclist = dev->mc_list; | ||
327 | mclist && i < dev->mc_count; i++, mclist = mclist->next) { | ||
328 | /* The upper 6 bits of the calculated CRC are used to | ||
329 | * index the contens of the hash table */ | ||
330 | int bit_nr = | ||
331 | ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; | ||
332 | /* The most significant bit determines the register to | ||
333 | * use (H/L) while the other 5 bits determine the bit | ||
334 | * within the register. */ | ||
335 | mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); | ||
336 | } | ||
337 | writel(mc_filter[0], ioaddr + MAC_HASH_LOW); | ||
338 | writel(mc_filter[1], ioaddr + MAC_HASH_HIGH); | ||
339 | } | ||
340 | |||
341 | writel(value, ioaddr + MAC_CONTROL); | ||
342 | |||
343 | DBG(KERN_INFO "%s: CTRL reg: 0x%08x Hash regs: " | ||
344 | "HI 0x%08x, LO 0x%08x\n", | ||
345 | __func__, readl(ioaddr + MAC_CONTROL), | ||
346 | readl(ioaddr + MAC_HASH_HIGH), readl(ioaddr + MAC_HASH_LOW)); | ||
347 | return; | ||
348 | } | ||
349 | |||
350 | static void mac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex, | ||
351 | unsigned int fc, unsigned int pause_time) | ||
352 | { | ||
353 | unsigned int flow = MAC_FLOW_CTRL_ENABLE; | ||
354 | |||
355 | if (duplex) | ||
356 | flow |= (pause_time << MAC_FLOW_CTRL_PT_SHIFT); | ||
357 | writel(flow, ioaddr + MAC_FLOW_CTRL); | ||
358 | |||
359 | return; | ||
360 | } | ||
361 | |||
362 | /* No PMT module supported in our SoC for the Ethernet Controller. */ | ||
363 | static void mac100_pmt(unsigned long ioaddr, unsigned long mode) | ||
364 | { | ||
365 | return; | ||
366 | } | ||
367 | |||
368 | static void mac100_init_rx_desc(struct dma_desc *p, unsigned int ring_size, | ||
369 | int disable_rx_ic) | ||
370 | { | ||
371 | int i; | ||
372 | for (i = 0; i < ring_size; i++) { | ||
373 | p->des01.rx.own = 1; | ||
374 | p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1; | ||
375 | if (i == ring_size - 1) | ||
376 | p->des01.rx.end_ring = 1; | ||
377 | if (disable_rx_ic) | ||
378 | p->des01.rx.disable_ic = 1; | ||
379 | p++; | ||
380 | } | ||
381 | return; | ||
382 | } | ||
383 | |||
384 | static void mac100_init_tx_desc(struct dma_desc *p, unsigned int ring_size) | ||
385 | { | ||
386 | int i; | ||
387 | for (i = 0; i < ring_size; i++) { | ||
388 | p->des01.tx.own = 0; | ||
389 | if (i == ring_size - 1) | ||
390 | p->des01.tx.end_ring = 1; | ||
391 | p++; | ||
392 | } | ||
393 | return; | ||
394 | } | ||
395 | |||
396 | static int mac100_get_tx_owner(struct dma_desc *p) | ||
397 | { | ||
398 | return p->des01.tx.own; | ||
399 | } | ||
400 | |||
401 | static int mac100_get_rx_owner(struct dma_desc *p) | ||
402 | { | ||
403 | return p->des01.rx.own; | ||
404 | } | ||
405 | |||
406 | static void mac100_set_tx_owner(struct dma_desc *p) | ||
407 | { | ||
408 | p->des01.tx.own = 1; | ||
409 | } | ||
410 | |||
411 | static void mac100_set_rx_owner(struct dma_desc *p) | ||
412 | { | ||
413 | p->des01.rx.own = 1; | ||
414 | } | ||
415 | |||
416 | static int mac100_get_tx_ls(struct dma_desc *p) | ||
417 | { | ||
418 | return p->des01.tx.last_segment; | ||
419 | } | ||
420 | |||
421 | static void mac100_release_tx_desc(struct dma_desc *p) | ||
422 | { | ||
423 | int ter = p->des01.tx.end_ring; | ||
424 | |||
425 | /* clean field used within the xmit */ | ||
426 | p->des01.tx.first_segment = 0; | ||
427 | p->des01.tx.last_segment = 0; | ||
428 | p->des01.tx.buffer1_size = 0; | ||
429 | |||
430 | /* clean status reported */ | ||
431 | p->des01.tx.error_summary = 0; | ||
432 | p->des01.tx.underflow_error = 0; | ||
433 | p->des01.tx.no_carrier = 0; | ||
434 | p->des01.tx.loss_carrier = 0; | ||
435 | p->des01.tx.excessive_deferral = 0; | ||
436 | p->des01.tx.excessive_collisions = 0; | ||
437 | p->des01.tx.late_collision = 0; | ||
438 | p->des01.tx.heartbeat_fail = 0; | ||
439 | p->des01.tx.deferred = 0; | ||
440 | |||
441 | /* set termination field */ | ||
442 | p->des01.tx.end_ring = ter; | ||
443 | |||
444 | return; | ||
445 | } | ||
446 | |||
447 | static void mac100_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, | ||
448 | int csum_flag) | ||
449 | { | ||
450 | p->des01.tx.first_segment = is_fs; | ||
451 | p->des01.tx.buffer1_size = len; | ||
452 | } | ||
453 | |||
454 | static void mac100_clear_tx_ic(struct dma_desc *p) | ||
455 | { | ||
456 | p->des01.tx.interrupt = 0; | ||
457 | } | ||
458 | |||
459 | static void mac100_close_tx_desc(struct dma_desc *p) | ||
460 | { | ||
461 | p->des01.tx.last_segment = 1; | ||
462 | p->des01.tx.interrupt = 1; | ||
463 | } | ||
464 | |||
465 | static int mac100_get_rx_frame_len(struct dma_desc *p) | ||
466 | { | ||
467 | return p->des01.rx.frame_length; | ||
468 | } | ||
469 | |||
470 | struct stmmac_ops mac100_driver = { | ||
471 | .core_init = mac100_core_init, | ||
472 | .dump_mac_regs = mac100_dump_mac_regs, | ||
473 | .dma_init = mac100_dma_init, | ||
474 | .dump_dma_regs = mac100_dump_dma_regs, | ||
475 | .dma_mode = mac100_dma_operation_mode, | ||
476 | .dma_diagnostic_fr = mac100_dma_diagnostic_fr, | ||
477 | .tx_status = mac100_get_tx_frame_status, | ||
478 | .rx_status = mac100_get_rx_frame_status, | ||
479 | .get_tx_len = mac100_get_tx_len, | ||
480 | .set_filter = mac100_set_filter, | ||
481 | .flow_ctrl = mac100_flow_ctrl, | ||
482 | .pmt = mac100_pmt, | ||
483 | .init_rx_desc = mac100_init_rx_desc, | ||
484 | .init_tx_desc = mac100_init_tx_desc, | ||
485 | .get_tx_owner = mac100_get_tx_owner, | ||
486 | .get_rx_owner = mac100_get_rx_owner, | ||
487 | .release_tx_desc = mac100_release_tx_desc, | ||
488 | .prepare_tx_desc = mac100_prepare_tx_desc, | ||
489 | .clear_tx_ic = mac100_clear_tx_ic, | ||
490 | .close_tx_desc = mac100_close_tx_desc, | ||
491 | .get_tx_ls = mac100_get_tx_ls, | ||
492 | .set_tx_owner = mac100_set_tx_owner, | ||
493 | .set_rx_owner = mac100_set_rx_owner, | ||
494 | .get_rx_frame_len = mac100_get_rx_frame_len, | ||
495 | .host_irq_status = mac100_irq_status, | ||
496 | .set_umac_addr = mac100_set_umac_addr, | ||
497 | .get_umac_addr = mac100_get_umac_addr, | ||
498 | }; | ||
499 | |||
500 | struct mac_device_info *mac100_setup(unsigned long ioaddr) | ||
501 | { | ||
502 | struct mac_device_info *mac; | ||
503 | |||
504 | mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL); | ||
505 | |||
506 | pr_info("\tMAC 10/100\n"); | ||
507 | |||
508 | mac->ops = &mac100_driver; | ||
509 | mac->hw.pmt = PMT_NOT_SUPPORTED; | ||
510 | mac->hw.link.port = MAC_CONTROL_PS; | ||
511 | mac->hw.link.duplex = MAC_CONTROL_F; | ||
512 | mac->hw.link.speed = 0; | ||
513 | mac->hw.mii.addr = MAC_MII_ADDR; | ||
514 | mac->hw.mii.data = MAC_MII_DATA; | ||
515 | |||
516 | return mac; | ||
517 | } | ||
diff --git a/drivers/net/stmmac/mac100.h b/drivers/net/stmmac/mac100.h new file mode 100644 index 000000000000..0f8f110d004a --- /dev/null +++ b/drivers/net/stmmac/mac100.h | |||
@@ -0,0 +1,116 @@ | |||
1 | /******************************************************************************* | ||
2 | MAC 10/100 Header File | ||
3 | |||
4 | Copyright (C) 2007-2009 STMicroelectronics Ltd | ||
5 | |||
6 | This program is free software; you can redistribute it and/or modify it | ||
7 | under the terms and conditions of the GNU General Public License, | ||
8 | version 2, as published by the Free Software Foundation. | ||
9 | |||
10 | This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License along with | ||
16 | this program; if not, write to the Free Software Foundation, Inc., | ||
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
19 | The full GNU General Public License is included in this distribution in | ||
20 | the file called "COPYING". | ||
21 | |||
22 | Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> | ||
23 | *******************************************************************************/ | ||
24 | |||
25 | /*---------------------------------------------------------------------------- | ||
26 | * MAC BLOCK defines | ||
27 | *---------------------------------------------------------------------------*/ | ||
28 | /* MAC CSR offset */ | ||
29 | #define MAC_CONTROL 0x00000000 /* MAC Control */ | ||
30 | #define MAC_ADDR_HIGH 0x00000004 /* MAC Address High */ | ||
31 | #define MAC_ADDR_LOW 0x00000008 /* MAC Address Low */ | ||
32 | #define MAC_HASH_HIGH 0x0000000c /* Multicast Hash Table High */ | ||
33 | #define MAC_HASH_LOW 0x00000010 /* Multicast Hash Table Low */ | ||
34 | #define MAC_MII_ADDR 0x00000014 /* MII Address */ | ||
35 | #define MAC_MII_DATA 0x00000018 /* MII Data */ | ||
36 | #define MAC_FLOW_CTRL 0x0000001c /* Flow Control */ | ||
37 | #define MAC_VLAN1 0x00000020 /* VLAN1 Tag */ | ||
38 | #define MAC_VLAN2 0x00000024 /* VLAN2 Tag */ | ||
39 | |||
40 | /* MAC CTRL defines */ | ||
41 | #define MAC_CONTROL_RA 0x80000000 /* Receive All Mode */ | ||
42 | #define MAC_CONTROL_BLE 0x40000000 /* Endian Mode */ | ||
43 | #define MAC_CONTROL_HBD 0x10000000 /* Heartbeat Disable */ | ||
44 | #define MAC_CONTROL_PS 0x08000000 /* Port Select */ | ||
45 | #define MAC_CONTROL_DRO 0x00800000 /* Disable Receive Own */ | ||
46 | #define MAC_CONTROL_EXT_LOOPBACK 0x00400000 /* Reserved (ext loopback?) */ | ||
47 | #define MAC_CONTROL_OM 0x00200000 /* Loopback Operating Mode */ | ||
48 | #define MAC_CONTROL_F 0x00100000 /* Full Duplex Mode */ | ||
49 | #define MAC_CONTROL_PM 0x00080000 /* Pass All Multicast */ | ||
50 | #define MAC_CONTROL_PR 0x00040000 /* Promiscuous Mode */ | ||
51 | #define MAC_CONTROL_IF 0x00020000 /* Inverse Filtering */ | ||
52 | #define MAC_CONTROL_PB 0x00010000 /* Pass Bad Frames */ | ||
53 | #define MAC_CONTROL_HO 0x00008000 /* Hash Only Filtering Mode */ | ||
54 | #define MAC_CONTROL_HP 0x00002000 /* Hash/Perfect Filtering Mode */ | ||
55 | #define MAC_CONTROL_LCC 0x00001000 /* Late Collision Control */ | ||
56 | #define MAC_CONTROL_DBF 0x00000800 /* Disable Broadcast Frames */ | ||
57 | #define MAC_CONTROL_DRTY 0x00000400 /* Disable Retry */ | ||
58 | #define MAC_CONTROL_ASTP 0x00000100 /* Automatic Pad Stripping */ | ||
59 | #define MAC_CONTROL_BOLMT_10 0x00000000 /* Back Off Limit 10 */ | ||
60 | #define MAC_CONTROL_BOLMT_8 0x00000040 /* Back Off Limit 8 */ | ||
61 | #define MAC_CONTROL_BOLMT_4 0x00000080 /* Back Off Limit 4 */ | ||
62 | #define MAC_CONTROL_BOLMT_1 0x000000c0 /* Back Off Limit 1 */ | ||
63 | #define MAC_CONTROL_DC 0x00000020 /* Deferral Check */ | ||
64 | #define MAC_CONTROL_TE 0x00000008 /* Transmitter Enable */ | ||
65 | #define MAC_CONTROL_RE 0x00000004 /* Receiver Enable */ | ||
66 | |||
67 | #define MAC_CORE_INIT (MAC_CONTROL_HBD | MAC_CONTROL_ASTP) | ||
68 | |||
69 | /* MAC FLOW CTRL defines */ | ||
70 | #define MAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */ | ||
71 | #define MAC_FLOW_CTRL_PT_SHIFT 16 | ||
72 | #define MAC_FLOW_CTRL_PASS 0x00000004 /* Pass Control Frames */ | ||
73 | #define MAC_FLOW_CTRL_ENABLE 0x00000002 /* Flow Control Enable */ | ||
74 | #define MAC_FLOW_CTRL_PAUSE 0x00000001 /* Flow Control Busy ... */ | ||
75 | |||
76 | /* MII ADDR defines */ | ||
77 | #define MAC_MII_ADDR_WRITE 0x00000002 /* MII Write */ | ||
78 | #define MAC_MII_ADDR_BUSY 0x00000001 /* MII Busy */ | ||
79 | |||
80 | /*---------------------------------------------------------------------------- | ||
81 | * DMA BLOCK defines | ||
82 | *---------------------------------------------------------------------------*/ | ||
83 | |||
84 | /* DMA Bus Mode register defines */ | ||
85 | #define DMA_BUS_MODE_DBO 0x00100000 /* Descriptor Byte Ordering */ | ||
86 | #define DMA_BUS_MODE_BLE 0x00000080 /* Big Endian/Little Endian */ | ||
87 | #define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */ | ||
88 | #define DMA_BUS_MODE_PBL_SHIFT 8 | ||
89 | #define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */ | ||
90 | #define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */ | ||
91 | #define DMA_BUS_MODE_BAR_BUS 0x00000002 /* Bar-Bus Arbitration */ | ||
92 | #define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */ | ||
93 | #define DMA_BUS_MODE_DEFAULT 0x00000000 | ||
94 | |||
95 | /* DMA Control register defines */ | ||
96 | #define DMA_CONTROL_SF 0x00200000 /* Store And Forward */ | ||
97 | |||
98 | /* Transmit Threshold Control */ | ||
99 | enum ttc_control { | ||
100 | DMA_CONTROL_TTC_DEFAULT = 0x00000000, /* Threshold is 32 DWORDS */ | ||
101 | DMA_CONTROL_TTC_64 = 0x00004000, /* Threshold is 64 DWORDS */ | ||
102 | DMA_CONTROL_TTC_128 = 0x00008000, /* Threshold is 128 DWORDS */ | ||
103 | DMA_CONTROL_TTC_256 = 0x0000c000, /* Threshold is 256 DWORDS */ | ||
104 | DMA_CONTROL_TTC_18 = 0x00400000, /* Threshold is 18 DWORDS */ | ||
105 | DMA_CONTROL_TTC_24 = 0x00404000, /* Threshold is 24 DWORDS */ | ||
106 | DMA_CONTROL_TTC_32 = 0x00408000, /* Threshold is 32 DWORDS */ | ||
107 | DMA_CONTROL_TTC_40 = 0x0040c000, /* Threshold is 40 DWORDS */ | ||
108 | DMA_CONTROL_SE = 0x00000008, /* Stop On Empty */ | ||
109 | DMA_CONTROL_OSF = 0x00000004, /* Operate On 2nd Frame */ | ||
110 | }; | ||
111 | |||
112 | /* STMAC110 DMA Missed Frame Counter register defines */ | ||
113 | #define DMA_MISSED_FRAME_OVE 0x10000000 /* FIFO Overflow Overflow */ | ||
114 | #define DMA_MISSED_FRAME_OVE_CNTR 0x0ffe0000 /* Overflow Frame Counter */ | ||
115 | #define DMA_MISSED_FRAME_OVE_M 0x00010000 /* Missed Frame Overflow */ | ||
116 | #define DMA_MISSED_FRAME_M_CNTR 0x0000ffff /* Missed Frame Couinter */ | ||
diff --git a/drivers/net/stmmac/stmmac.h b/drivers/net/stmmac/stmmac.h new file mode 100644 index 000000000000..6d2eae3040e5 --- /dev/null +++ b/drivers/net/stmmac/stmmac.h | |||
@@ -0,0 +1,98 @@ | |||
1 | /******************************************************************************* | ||
2 | Copyright (C) 2007-2009 STMicroelectronics Ltd | ||
3 | |||
4 | This program is free software; you can redistribute it and/or modify it | ||
5 | under the terms and conditions of the GNU General Public License, | ||
6 | version 2, as published by the Free Software Foundation. | ||
7 | |||
8 | This program is distributed in the hope it will be useful, but WITHOUT | ||
9 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
10 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
11 | more details. | ||
12 | |||
13 | You should have received a copy of the GNU General Public License along with | ||
14 | this program; if not, write to the Free Software Foundation, Inc., | ||
15 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
16 | |||
17 | The full GNU General Public License is included in this distribution in | ||
18 | the file called "COPYING". | ||
19 | |||
20 | Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> | ||
21 | *******************************************************************************/ | ||
22 | |||
23 | #define DRV_MODULE_VERSION "Oct_09" | ||
24 | |||
25 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | ||
26 | #define STMMAC_VLAN_TAG_USED | ||
27 | #include <linux/if_vlan.h> | ||
28 | #endif | ||
29 | |||
30 | #include "common.h" | ||
31 | #ifdef CONFIG_STMMAC_TIMER | ||
32 | #include "stmmac_timer.h" | ||
33 | #endif | ||
34 | |||
35 | struct stmmac_priv { | ||
36 | /* Frequently used values are kept adjacent for cache effect */ | ||
37 | struct dma_desc *dma_tx ____cacheline_aligned; | ||
38 | dma_addr_t dma_tx_phy; | ||
39 | struct sk_buff **tx_skbuff; | ||
40 | unsigned int cur_tx; | ||
41 | unsigned int dirty_tx; | ||
42 | unsigned int dma_tx_size; | ||
43 | int tx_coe; | ||
44 | int tx_coalesce; | ||
45 | |||
46 | struct dma_desc *dma_rx ; | ||
47 | unsigned int cur_rx; | ||
48 | unsigned int dirty_rx; | ||
49 | struct sk_buff **rx_skbuff; | ||
50 | dma_addr_t *rx_skbuff_dma; | ||
51 | struct sk_buff_head rx_recycle; | ||
52 | |||
53 | struct net_device *dev; | ||
54 | int is_gmac; | ||
55 | dma_addr_t dma_rx_phy; | ||
56 | unsigned int dma_rx_size; | ||
57 | int rx_csum; | ||
58 | unsigned int dma_buf_sz; | ||
59 | struct device *device; | ||
60 | struct mac_device_info *mac_type; | ||
61 | |||
62 | struct stmmac_extra_stats xstats; | ||
63 | struct napi_struct napi; | ||
64 | |||
65 | phy_interface_t phy_interface; | ||
66 | int pbl; | ||
67 | int bus_id; | ||
68 | int phy_addr; | ||
69 | int phy_mask; | ||
70 | int (*phy_reset) (void *priv); | ||
71 | void (*fix_mac_speed) (void *priv, unsigned int speed); | ||
72 | void *bsp_priv; | ||
73 | |||
74 | int phy_irq; | ||
75 | struct phy_device *phydev; | ||
76 | int oldlink; | ||
77 | int speed; | ||
78 | int oldduplex; | ||
79 | unsigned int flow_ctrl; | ||
80 | unsigned int pause; | ||
81 | struct mii_bus *mii; | ||
82 | |||
83 | u32 msg_enable; | ||
84 | spinlock_t lock; | ||
85 | int wolopts; | ||
86 | int wolenabled; | ||
87 | int shutdown; | ||
88 | #ifdef CONFIG_STMMAC_TIMER | ||
89 | struct stmmac_timer *tm; | ||
90 | #endif | ||
91 | #ifdef STMMAC_VLAN_TAG_USED | ||
92 | struct vlan_group *vlgrp; | ||
93 | #endif | ||
94 | }; | ||
95 | |||
96 | extern int stmmac_mdio_unregister(struct net_device *ndev); | ||
97 | extern int stmmac_mdio_register(struct net_device *ndev); | ||
98 | extern void stmmac_set_ethtool_ops(struct net_device *netdev); | ||
diff --git a/drivers/net/stmmac/stmmac_ethtool.c b/drivers/net/stmmac/stmmac_ethtool.c new file mode 100644 index 000000000000..694ebe6a0758 --- /dev/null +++ b/drivers/net/stmmac/stmmac_ethtool.c | |||
@@ -0,0 +1,395 @@ | |||
1 | /******************************************************************************* | ||
2 | STMMAC Ethtool support | ||
3 | |||
4 | Copyright (C) 2007-2009 STMicroelectronics Ltd | ||
5 | |||
6 | This program is free software; you can redistribute it and/or modify it | ||
7 | under the terms and conditions of the GNU General Public License, | ||
8 | version 2, as published by the Free Software Foundation. | ||
9 | |||
10 | This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License along with | ||
16 | this program; if not, write to the Free Software Foundation, Inc., | ||
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
19 | The full GNU General Public License is included in this distribution in | ||
20 | the file called "COPYING". | ||
21 | |||
22 | Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> | ||
23 | *******************************************************************************/ | ||
24 | |||
25 | #include <linux/etherdevice.h> | ||
26 | #include <linux/ethtool.h> | ||
27 | #include <linux/mii.h> | ||
28 | #include <linux/phy.h> | ||
29 | |||
30 | #include "stmmac.h" | ||
31 | |||
32 | #define REG_SPACE_SIZE 0x1054 | ||
33 | #define MAC100_ETHTOOL_NAME "st_mac100" | ||
34 | #define GMAC_ETHTOOL_NAME "st_gmac" | ||
35 | |||
36 | struct stmmac_stats { | ||
37 | char stat_string[ETH_GSTRING_LEN]; | ||
38 | int sizeof_stat; | ||
39 | int stat_offset; | ||
40 | }; | ||
41 | |||
42 | #define STMMAC_STAT(m) \ | ||
43 | { #m, FIELD_SIZEOF(struct stmmac_extra_stats, m), \ | ||
44 | offsetof(struct stmmac_priv, xstats.m)} | ||
45 | |||
46 | static const struct stmmac_stats stmmac_gstrings_stats[] = { | ||
47 | STMMAC_STAT(tx_underflow), | ||
48 | STMMAC_STAT(tx_carrier), | ||
49 | STMMAC_STAT(tx_losscarrier), | ||
50 | STMMAC_STAT(tx_heartbeat), | ||
51 | STMMAC_STAT(tx_deferred), | ||
52 | STMMAC_STAT(tx_vlan), | ||
53 | STMMAC_STAT(rx_vlan), | ||
54 | STMMAC_STAT(tx_jabber), | ||
55 | STMMAC_STAT(tx_frame_flushed), | ||
56 | STMMAC_STAT(tx_payload_error), | ||
57 | STMMAC_STAT(tx_ip_header_error), | ||
58 | STMMAC_STAT(rx_desc), | ||
59 | STMMAC_STAT(rx_partial), | ||
60 | STMMAC_STAT(rx_runt), | ||
61 | STMMAC_STAT(rx_toolong), | ||
62 | STMMAC_STAT(rx_collision), | ||
63 | STMMAC_STAT(rx_crc), | ||
64 | STMMAC_STAT(rx_lenght), | ||
65 | STMMAC_STAT(rx_mii), | ||
66 | STMMAC_STAT(rx_multicast), | ||
67 | STMMAC_STAT(rx_gmac_overflow), | ||
68 | STMMAC_STAT(rx_watchdog), | ||
69 | STMMAC_STAT(da_rx_filter_fail), | ||
70 | STMMAC_STAT(sa_rx_filter_fail), | ||
71 | STMMAC_STAT(rx_missed_cntr), | ||
72 | STMMAC_STAT(rx_overflow_cntr), | ||
73 | STMMAC_STAT(tx_undeflow_irq), | ||
74 | STMMAC_STAT(tx_process_stopped_irq), | ||
75 | STMMAC_STAT(tx_jabber_irq), | ||
76 | STMMAC_STAT(rx_overflow_irq), | ||
77 | STMMAC_STAT(rx_buf_unav_irq), | ||
78 | STMMAC_STAT(rx_process_stopped_irq), | ||
79 | STMMAC_STAT(rx_watchdog_irq), | ||
80 | STMMAC_STAT(tx_early_irq), | ||
81 | STMMAC_STAT(fatal_bus_error_irq), | ||
82 | STMMAC_STAT(threshold), | ||
83 | STMMAC_STAT(tx_pkt_n), | ||
84 | STMMAC_STAT(rx_pkt_n), | ||
85 | STMMAC_STAT(poll_n), | ||
86 | STMMAC_STAT(sched_timer_n), | ||
87 | STMMAC_STAT(normal_irq_n), | ||
88 | }; | ||
89 | #define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats) | ||
90 | |||
91 | void stmmac_ethtool_getdrvinfo(struct net_device *dev, | ||
92 | struct ethtool_drvinfo *info) | ||
93 | { | ||
94 | struct stmmac_priv *priv = netdev_priv(dev); | ||
95 | |||
96 | if (!priv->is_gmac) | ||
97 | strcpy(info->driver, MAC100_ETHTOOL_NAME); | ||
98 | else | ||
99 | strcpy(info->driver, GMAC_ETHTOOL_NAME); | ||
100 | |||
101 | strcpy(info->version, DRV_MODULE_VERSION); | ||
102 | info->fw_version[0] = '\0'; | ||
103 | info->n_stats = STMMAC_STATS_LEN; | ||
104 | return; | ||
105 | } | ||
106 | |||
107 | int stmmac_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
108 | { | ||
109 | struct stmmac_priv *priv = netdev_priv(dev); | ||
110 | struct phy_device *phy = priv->phydev; | ||
111 | int rc; | ||
112 | if (phy == NULL) { | ||
113 | pr_err("%s: %s: PHY is not registered\n", | ||
114 | __func__, dev->name); | ||
115 | return -ENODEV; | ||
116 | } | ||
117 | if (!netif_running(dev)) { | ||
118 | pr_err("%s: interface is disabled: we cannot track " | ||
119 | "link speed / duplex setting\n", dev->name); | ||
120 | return -EBUSY; | ||
121 | } | ||
122 | cmd->transceiver = XCVR_INTERNAL; | ||
123 | spin_lock_irq(&priv->lock); | ||
124 | rc = phy_ethtool_gset(phy, cmd); | ||
125 | spin_unlock_irq(&priv->lock); | ||
126 | return rc; | ||
127 | } | ||
128 | |||
129 | int stmmac_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
130 | { | ||
131 | struct stmmac_priv *priv = netdev_priv(dev); | ||
132 | struct phy_device *phy = priv->phydev; | ||
133 | int rc; | ||
134 | |||
135 | spin_lock(&priv->lock); | ||
136 | rc = phy_ethtool_sset(phy, cmd); | ||
137 | spin_unlock(&priv->lock); | ||
138 | |||
139 | return rc; | ||
140 | } | ||
141 | |||
142 | u32 stmmac_ethtool_getmsglevel(struct net_device *dev) | ||
143 | { | ||
144 | struct stmmac_priv *priv = netdev_priv(dev); | ||
145 | return priv->msg_enable; | ||
146 | } | ||
147 | |||
148 | void stmmac_ethtool_setmsglevel(struct net_device *dev, u32 level) | ||
149 | { | ||
150 | struct stmmac_priv *priv = netdev_priv(dev); | ||
151 | priv->msg_enable = level; | ||
152 | |||
153 | } | ||
154 | |||
155 | int stmmac_check_if_running(struct net_device *dev) | ||
156 | { | ||
157 | if (!netif_running(dev)) | ||
158 | return -EBUSY; | ||
159 | return 0; | ||
160 | } | ||
161 | |||
162 | int stmmac_ethtool_get_regs_len(struct net_device *dev) | ||
163 | { | ||
164 | return REG_SPACE_SIZE; | ||
165 | } | ||
166 | |||
167 | void stmmac_ethtool_gregs(struct net_device *dev, | ||
168 | struct ethtool_regs *regs, void *space) | ||
169 | { | ||
170 | int i; | ||
171 | u32 *reg_space = (u32 *) space; | ||
172 | |||
173 | struct stmmac_priv *priv = netdev_priv(dev); | ||
174 | |||
175 | memset(reg_space, 0x0, REG_SPACE_SIZE); | ||
176 | |||
177 | if (!priv->is_gmac) { | ||
178 | /* MAC registers */ | ||
179 | for (i = 0; i < 12; i++) | ||
180 | reg_space[i] = readl(dev->base_addr + (i * 4)); | ||
181 | /* DMA registers */ | ||
182 | for (i = 0; i < 9; i++) | ||
183 | reg_space[i + 12] = | ||
184 | readl(dev->base_addr + (DMA_BUS_MODE + (i * 4))); | ||
185 | reg_space[22] = readl(dev->base_addr + DMA_CUR_TX_BUF_ADDR); | ||
186 | reg_space[23] = readl(dev->base_addr + DMA_CUR_RX_BUF_ADDR); | ||
187 | } else { | ||
188 | /* MAC registers */ | ||
189 | for (i = 0; i < 55; i++) | ||
190 | reg_space[i] = readl(dev->base_addr + (i * 4)); | ||
191 | /* DMA registers */ | ||
192 | for (i = 0; i < 22; i++) | ||
193 | reg_space[i + 55] = | ||
194 | readl(dev->base_addr + (DMA_BUS_MODE + (i * 4))); | ||
195 | } | ||
196 | |||
197 | return; | ||
198 | } | ||
199 | |||
200 | int stmmac_ethtool_set_tx_csum(struct net_device *netdev, u32 data) | ||
201 | { | ||
202 | if (data) | ||
203 | netdev->features |= NETIF_F_HW_CSUM; | ||
204 | else | ||
205 | netdev->features &= ~NETIF_F_HW_CSUM; | ||
206 | |||
207 | return 0; | ||
208 | } | ||
209 | |||
210 | u32 stmmac_ethtool_get_rx_csum(struct net_device *dev) | ||
211 | { | ||
212 | struct stmmac_priv *priv = netdev_priv(dev); | ||
213 | |||
214 | return priv->rx_csum; | ||
215 | } | ||
216 | |||
217 | static void | ||
218 | stmmac_get_pauseparam(struct net_device *netdev, | ||
219 | struct ethtool_pauseparam *pause) | ||
220 | { | ||
221 | struct stmmac_priv *priv = netdev_priv(netdev); | ||
222 | |||
223 | spin_lock(&priv->lock); | ||
224 | |||
225 | pause->rx_pause = 0; | ||
226 | pause->tx_pause = 0; | ||
227 | pause->autoneg = priv->phydev->autoneg; | ||
228 | |||
229 | if (priv->flow_ctrl & FLOW_RX) | ||
230 | pause->rx_pause = 1; | ||
231 | if (priv->flow_ctrl & FLOW_TX) | ||
232 | pause->tx_pause = 1; | ||
233 | |||
234 | spin_unlock(&priv->lock); | ||
235 | return; | ||
236 | } | ||
237 | |||
238 | static int | ||
239 | stmmac_set_pauseparam(struct net_device *netdev, | ||
240 | struct ethtool_pauseparam *pause) | ||
241 | { | ||
242 | struct stmmac_priv *priv = netdev_priv(netdev); | ||
243 | struct phy_device *phy = priv->phydev; | ||
244 | int new_pause = FLOW_OFF; | ||
245 | int ret = 0; | ||
246 | |||
247 | spin_lock(&priv->lock); | ||
248 | |||
249 | if (pause->rx_pause) | ||
250 | new_pause |= FLOW_RX; | ||
251 | if (pause->tx_pause) | ||
252 | new_pause |= FLOW_TX; | ||
253 | |||
254 | priv->flow_ctrl = new_pause; | ||
255 | |||
256 | if (phy->autoneg) { | ||
257 | if (netif_running(netdev)) { | ||
258 | struct ethtool_cmd cmd; | ||
259 | /* auto-negotiation automatically restarted */ | ||
260 | cmd.cmd = ETHTOOL_NWAY_RST; | ||
261 | cmd.supported = phy->supported; | ||
262 | cmd.advertising = phy->advertising; | ||
263 | cmd.autoneg = phy->autoneg; | ||
264 | cmd.speed = phy->speed; | ||
265 | cmd.duplex = phy->duplex; | ||
266 | cmd.phy_address = phy->addr; | ||
267 | ret = phy_ethtool_sset(phy, &cmd); | ||
268 | } | ||
269 | } else { | ||
270 | unsigned long ioaddr = netdev->base_addr; | ||
271 | priv->mac_type->ops->flow_ctrl(ioaddr, phy->duplex, | ||
272 | priv->flow_ctrl, priv->pause); | ||
273 | } | ||
274 | spin_unlock(&priv->lock); | ||
275 | return ret; | ||
276 | } | ||
277 | |||
278 | static void stmmac_get_ethtool_stats(struct net_device *dev, | ||
279 | struct ethtool_stats *dummy, u64 *data) | ||
280 | { | ||
281 | struct stmmac_priv *priv = netdev_priv(dev); | ||
282 | unsigned long ioaddr = dev->base_addr; | ||
283 | int i; | ||
284 | |||
285 | /* Update HW stats if supported */ | ||
286 | priv->mac_type->ops->dma_diagnostic_fr(&dev->stats, &priv->xstats, | ||
287 | ioaddr); | ||
288 | |||
289 | for (i = 0; i < STMMAC_STATS_LEN; i++) { | ||
290 | char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset; | ||
291 | data[i] = (stmmac_gstrings_stats[i].sizeof_stat == | ||
292 | sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p); | ||
293 | } | ||
294 | |||
295 | return; | ||
296 | } | ||
297 | |||
298 | static int stmmac_get_sset_count(struct net_device *netdev, int sset) | ||
299 | { | ||
300 | switch (sset) { | ||
301 | case ETH_SS_STATS: | ||
302 | return STMMAC_STATS_LEN; | ||
303 | default: | ||
304 | return -EOPNOTSUPP; | ||
305 | } | ||
306 | } | ||
307 | |||
308 | static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data) | ||
309 | { | ||
310 | int i; | ||
311 | u8 *p = data; | ||
312 | |||
313 | switch (stringset) { | ||
314 | case ETH_SS_STATS: | ||
315 | for (i = 0; i < STMMAC_STATS_LEN; i++) { | ||
316 | memcpy(p, stmmac_gstrings_stats[i].stat_string, | ||
317 | ETH_GSTRING_LEN); | ||
318 | p += ETH_GSTRING_LEN; | ||
319 | } | ||
320 | break; | ||
321 | default: | ||
322 | WARN_ON(1); | ||
323 | break; | ||
324 | } | ||
325 | return; | ||
326 | } | ||
327 | |||
328 | /* Currently only support WOL through Magic packet. */ | ||
329 | static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | ||
330 | { | ||
331 | struct stmmac_priv *priv = netdev_priv(dev); | ||
332 | |||
333 | spin_lock_irq(&priv->lock); | ||
334 | if (priv->wolenabled == PMT_SUPPORTED) { | ||
335 | wol->supported = WAKE_MAGIC; | ||
336 | wol->wolopts = priv->wolopts; | ||
337 | } | ||
338 | spin_unlock_irq(&priv->lock); | ||
339 | } | ||
340 | |||
341 | static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | ||
342 | { | ||
343 | struct stmmac_priv *priv = netdev_priv(dev); | ||
344 | u32 support = WAKE_MAGIC; | ||
345 | |||
346 | if (priv->wolenabled == PMT_NOT_SUPPORTED) | ||
347 | return -EINVAL; | ||
348 | |||
349 | if (wol->wolopts & ~support) | ||
350 | return -EINVAL; | ||
351 | |||
352 | if (wol->wolopts == 0) | ||
353 | device_set_wakeup_enable(priv->device, 0); | ||
354 | else | ||
355 | device_set_wakeup_enable(priv->device, 1); | ||
356 | |||
357 | spin_lock_irq(&priv->lock); | ||
358 | priv->wolopts = wol->wolopts; | ||
359 | spin_unlock_irq(&priv->lock); | ||
360 | |||
361 | return 0; | ||
362 | } | ||
363 | |||
364 | static struct ethtool_ops stmmac_ethtool_ops = { | ||
365 | .begin = stmmac_check_if_running, | ||
366 | .get_drvinfo = stmmac_ethtool_getdrvinfo, | ||
367 | .get_settings = stmmac_ethtool_getsettings, | ||
368 | .set_settings = stmmac_ethtool_setsettings, | ||
369 | .get_msglevel = stmmac_ethtool_getmsglevel, | ||
370 | .set_msglevel = stmmac_ethtool_setmsglevel, | ||
371 | .get_regs = stmmac_ethtool_gregs, | ||
372 | .get_regs_len = stmmac_ethtool_get_regs_len, | ||
373 | .get_link = ethtool_op_get_link, | ||
374 | .get_rx_csum = stmmac_ethtool_get_rx_csum, | ||
375 | .get_tx_csum = ethtool_op_get_tx_csum, | ||
376 | .set_tx_csum = stmmac_ethtool_set_tx_csum, | ||
377 | .get_sg = ethtool_op_get_sg, | ||
378 | .set_sg = ethtool_op_set_sg, | ||
379 | .get_pauseparam = stmmac_get_pauseparam, | ||
380 | .set_pauseparam = stmmac_set_pauseparam, | ||
381 | .get_ethtool_stats = stmmac_get_ethtool_stats, | ||
382 | .get_strings = stmmac_get_strings, | ||
383 | .get_wol = stmmac_get_wol, | ||
384 | .set_wol = stmmac_set_wol, | ||
385 | .get_sset_count = stmmac_get_sset_count, | ||
386 | #ifdef NETIF_F_TSO | ||
387 | .get_tso = ethtool_op_get_tso, | ||
388 | .set_tso = ethtool_op_set_tso, | ||
389 | #endif | ||
390 | }; | ||
391 | |||
392 | void stmmac_set_ethtool_ops(struct net_device *netdev) | ||
393 | { | ||
394 | SET_ETHTOOL_OPS(netdev, &stmmac_ethtool_ops); | ||
395 | } | ||
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c new file mode 100644 index 000000000000..c2f14dc9ba28 --- /dev/null +++ b/drivers/net/stmmac/stmmac_main.c | |||
@@ -0,0 +1,2204 @@ | |||
1 | /******************************************************************************* | ||
2 | This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers. | ||
3 | ST Ethernet IPs are built around a Synopsys IP Core. | ||
4 | |||
5 | Copyright (C) 2007-2009 STMicroelectronics Ltd | ||
6 | |||
7 | This program is free software; you can redistribute it and/or modify it | ||
8 | under the terms and conditions of the GNU General Public License, | ||
9 | version 2, as published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope it will be useful, but WITHOUT | ||
12 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
13 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
14 | more details. | ||
15 | |||
16 | You should have received a copy of the GNU General Public License along with | ||
17 | this program; if not, write to the Free Software Foundation, Inc., | ||
18 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
19 | |||
20 | The full GNU General Public License is included in this distribution in | ||
21 | the file called "COPYING". | ||
22 | |||
23 | Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> | ||
24 | |||
25 | Documentation available at: | ||
26 | http://www.stlinux.com | ||
27 | Support available at: | ||
28 | https://bugzilla.stlinux.com/ | ||
29 | *******************************************************************************/ | ||
30 | |||
31 | #include <linux/module.h> | ||
32 | #include <linux/init.h> | ||
33 | #include <linux/kernel.h> | ||
34 | #include <linux/interrupt.h> | ||
35 | #include <linux/netdevice.h> | ||
36 | #include <linux/etherdevice.h> | ||
37 | #include <linux/platform_device.h> | ||
38 | #include <linux/ip.h> | ||
39 | #include <linux/tcp.h> | ||
40 | #include <linux/skbuff.h> | ||
41 | #include <linux/ethtool.h> | ||
42 | #include <linux/if_ether.h> | ||
43 | #include <linux/crc32.h> | ||
44 | #include <linux/mii.h> | ||
45 | #include <linux/phy.h> | ||
46 | #include <linux/if_vlan.h> | ||
47 | #include <linux/dma-mapping.h> | ||
48 | #include <linux/stm/soc.h> | ||
49 | #include "stmmac.h" | ||
50 | |||
51 | #define STMMAC_RESOURCE_NAME "stmmaceth" | ||
52 | #define PHY_RESOURCE_NAME "stmmacphy" | ||
53 | |||
54 | #undef STMMAC_DEBUG | ||
55 | /*#define STMMAC_DEBUG*/ | ||
56 | #ifdef STMMAC_DEBUG | ||
57 | #define DBG(nlevel, klevel, fmt, args...) \ | ||
58 | ((void)(netif_msg_##nlevel(priv) && \ | ||
59 | printk(KERN_##klevel fmt, ## args))) | ||
60 | #else | ||
61 | #define DBG(nlevel, klevel, fmt, args...) do { } while (0) | ||
62 | #endif | ||
63 | |||
64 | #undef STMMAC_RX_DEBUG | ||
65 | /*#define STMMAC_RX_DEBUG*/ | ||
66 | #ifdef STMMAC_RX_DEBUG | ||
67 | #define RX_DBG(fmt, args...) printk(fmt, ## args) | ||
68 | #else | ||
69 | #define RX_DBG(fmt, args...) do { } while (0) | ||
70 | #endif | ||
71 | |||
72 | #undef STMMAC_XMIT_DEBUG | ||
73 | /*#define STMMAC_XMIT_DEBUG*/ | ||
74 | #ifdef STMMAC_TX_DEBUG | ||
75 | #define TX_DBG(fmt, args...) printk(fmt, ## args) | ||
76 | #else | ||
77 | #define TX_DBG(fmt, args...) do { } while (0) | ||
78 | #endif | ||
79 | |||
80 | #define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x) | ||
81 | #define JUMBO_LEN 9000 | ||
82 | |||
83 | /* Module parameters */ | ||
84 | #define TX_TIMEO 5000 /* default 5 seconds */ | ||
85 | static int watchdog = TX_TIMEO; | ||
86 | module_param(watchdog, int, S_IRUGO | S_IWUSR); | ||
87 | MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds"); | ||
88 | |||
89 | static int debug = -1; /* -1: default, 0: no output, 16: all */ | ||
90 | module_param(debug, int, S_IRUGO | S_IWUSR); | ||
91 | MODULE_PARM_DESC(debug, "Message Level (0: no output, 16: all)"); | ||
92 | |||
93 | static int phyaddr = -1; | ||
94 | module_param(phyaddr, int, S_IRUGO); | ||
95 | MODULE_PARM_DESC(phyaddr, "Physical device address"); | ||
96 | |||
97 | #define DMA_TX_SIZE 256 | ||
98 | static int dma_txsize = DMA_TX_SIZE; | ||
99 | module_param(dma_txsize, int, S_IRUGO | S_IWUSR); | ||
100 | MODULE_PARM_DESC(dma_txsize, "Number of descriptors in the TX list"); | ||
101 | |||
102 | #define DMA_RX_SIZE 256 | ||
103 | static int dma_rxsize = DMA_RX_SIZE; | ||
104 | module_param(dma_rxsize, int, S_IRUGO | S_IWUSR); | ||
105 | MODULE_PARM_DESC(dma_rxsize, "Number of descriptors in the RX list"); | ||
106 | |||
107 | static int flow_ctrl = FLOW_OFF; | ||
108 | module_param(flow_ctrl, int, S_IRUGO | S_IWUSR); | ||
109 | MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]"); | ||
110 | |||
111 | static int pause = PAUSE_TIME; | ||
112 | module_param(pause, int, S_IRUGO | S_IWUSR); | ||
113 | MODULE_PARM_DESC(pause, "Flow Control Pause Time"); | ||
114 | |||
115 | #define TC_DEFAULT 64 | ||
116 | static int tc = TC_DEFAULT; | ||
117 | module_param(tc, int, S_IRUGO | S_IWUSR); | ||
118 | MODULE_PARM_DESC(tc, "DMA threshold control value"); | ||
119 | |||
120 | #define RX_NO_COALESCE 1 /* Always interrupt on completion */ | ||
121 | #define TX_NO_COALESCE -1 /* No moderation by default */ | ||
122 | |||
123 | /* Pay attention to tune this parameter; take care of both | ||
124 | * hardware capability and network stabitily/performance impact. | ||
125 | * Many tests showed that ~4ms latency seems to be good enough. */ | ||
126 | #ifdef CONFIG_STMMAC_TIMER | ||
127 | #define DEFAULT_PERIODIC_RATE 256 | ||
128 | static int tmrate = DEFAULT_PERIODIC_RATE; | ||
129 | module_param(tmrate, int, S_IRUGO | S_IWUSR); | ||
130 | MODULE_PARM_DESC(tmrate, "External timer freq. (default: 256Hz)"); | ||
131 | #endif | ||
132 | |||
133 | #define DMA_BUFFER_SIZE BUF_SIZE_2KiB | ||
134 | static int buf_sz = DMA_BUFFER_SIZE; | ||
135 | module_param(buf_sz, int, S_IRUGO | S_IWUSR); | ||
136 | MODULE_PARM_DESC(buf_sz, "DMA buffer size"); | ||
137 | |||
138 | /* In case of Giga ETH, we can enable/disable the COE for the | ||
139 | * transmit HW checksum computation. | ||
140 | * Note that, if tx csum is off in HW, SG will be still supported. */ | ||
141 | static int tx_coe = HW_CSUM; | ||
142 | module_param(tx_coe, int, S_IRUGO | S_IWUSR); | ||
143 | MODULE_PARM_DESC(tx_coe, "GMAC COE type 2 [on/off]"); | ||
144 | |||
145 | static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | | ||
146 | NETIF_MSG_LINK | NETIF_MSG_IFUP | | ||
147 | NETIF_MSG_IFDOWN | NETIF_MSG_TIMER); | ||
148 | |||
149 | static irqreturn_t stmmac_interrupt(int irq, void *dev_id); | ||
150 | static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev); | ||
151 | |||
152 | /** | ||
153 | * stmmac_verify_args - verify the driver parameters. | ||
154 | * Description: it verifies if some wrong parameter is passed to the driver. | ||
155 | * Note that wrong parameters are replaced with the default values. | ||
156 | */ | ||
157 | static void stmmac_verify_args(void) | ||
158 | { | ||
159 | if (unlikely(watchdog < 0)) | ||
160 | watchdog = TX_TIMEO; | ||
161 | if (unlikely(dma_rxsize < 0)) | ||
162 | dma_rxsize = DMA_RX_SIZE; | ||
163 | if (unlikely(dma_txsize < 0)) | ||
164 | dma_txsize = DMA_TX_SIZE; | ||
165 | if (unlikely((buf_sz < DMA_BUFFER_SIZE) || (buf_sz > BUF_SIZE_16KiB))) | ||
166 | buf_sz = DMA_BUFFER_SIZE; | ||
167 | if (unlikely(flow_ctrl > 1)) | ||
168 | flow_ctrl = FLOW_AUTO; | ||
169 | else if (likely(flow_ctrl < 0)) | ||
170 | flow_ctrl = FLOW_OFF; | ||
171 | if (unlikely((pause < 0) || (pause > 0xffff))) | ||
172 | pause = PAUSE_TIME; | ||
173 | |||
174 | return; | ||
175 | } | ||
176 | |||
177 | #if defined(STMMAC_XMIT_DEBUG) || defined(STMMAC_RX_DEBUG) | ||
178 | static void print_pkt(unsigned char *buf, int len) | ||
179 | { | ||
180 | int j; | ||
181 | pr_info("len = %d byte, buf addr: 0x%p", len, buf); | ||
182 | for (j = 0; j < len; j++) { | ||
183 | if ((j % 16) == 0) | ||
184 | pr_info("\n %03x:", j); | ||
185 | pr_info(" %02x", buf[j]); | ||
186 | } | ||
187 | pr_info("\n"); | ||
188 | return; | ||
189 | } | ||
190 | #endif | ||
191 | |||
192 | /* minimum number of free TX descriptors required to wake up TX process */ | ||
193 | #define STMMAC_TX_THRESH(x) (x->dma_tx_size/4) | ||
194 | |||
195 | static inline u32 stmmac_tx_avail(struct stmmac_priv *priv) | ||
196 | { | ||
197 | return priv->dirty_tx + priv->dma_tx_size - priv->cur_tx - 1; | ||
198 | } | ||
199 | |||
200 | /** | ||
201 | * stmmac_adjust_link | ||
202 | * @dev: net device structure | ||
203 | * Description: it adjusts the link parameters. | ||
204 | */ | ||
205 | static void stmmac_adjust_link(struct net_device *dev) | ||
206 | { | ||
207 | struct stmmac_priv *priv = netdev_priv(dev); | ||
208 | struct phy_device *phydev = priv->phydev; | ||
209 | unsigned long ioaddr = dev->base_addr; | ||
210 | unsigned long flags; | ||
211 | int new_state = 0; | ||
212 | unsigned int fc = priv->flow_ctrl, pause_time = priv->pause; | ||
213 | |||
214 | if (phydev == NULL) | ||
215 | return; | ||
216 | |||
217 | DBG(probe, DEBUG, "stmmac_adjust_link: called. address %d link %d\n", | ||
218 | phydev->addr, phydev->link); | ||
219 | |||
220 | spin_lock_irqsave(&priv->lock, flags); | ||
221 | if (phydev->link) { | ||
222 | u32 ctrl = readl(ioaddr + MAC_CTRL_REG); | ||
223 | |||
224 | /* Now we make sure that we can be in full duplex mode. | ||
225 | * If not, we operate in half-duplex mode. */ | ||
226 | if (phydev->duplex != priv->oldduplex) { | ||
227 | new_state = 1; | ||
228 | if (!(phydev->duplex)) | ||
229 | ctrl &= ~priv->mac_type->hw.link.duplex; | ||
230 | else | ||
231 | ctrl |= priv->mac_type->hw.link.duplex; | ||
232 | priv->oldduplex = phydev->duplex; | ||
233 | } | ||
234 | /* Flow Control operation */ | ||
235 | if (phydev->pause) | ||
236 | priv->mac_type->ops->flow_ctrl(ioaddr, phydev->duplex, | ||
237 | fc, pause_time); | ||
238 | |||
239 | if (phydev->speed != priv->speed) { | ||
240 | new_state = 1; | ||
241 | switch (phydev->speed) { | ||
242 | case 1000: | ||
243 | if (likely(priv->is_gmac)) | ||
244 | ctrl &= ~priv->mac_type->hw.link.port; | ||
245 | break; | ||
246 | case 100: | ||
247 | case 10: | ||
248 | if (priv->is_gmac) { | ||
249 | ctrl |= priv->mac_type->hw.link.port; | ||
250 | if (phydev->speed == SPEED_100) { | ||
251 | ctrl |= | ||
252 | priv->mac_type->hw.link. | ||
253 | speed; | ||
254 | } else { | ||
255 | ctrl &= | ||
256 | ~(priv->mac_type->hw. | ||
257 | link.speed); | ||
258 | } | ||
259 | } else { | ||
260 | ctrl &= ~priv->mac_type->hw.link.port; | ||
261 | } | ||
262 | priv->fix_mac_speed(priv->bsp_priv, | ||
263 | phydev->speed); | ||
264 | break; | ||
265 | default: | ||
266 | if (netif_msg_link(priv)) | ||
267 | pr_warning("%s: Speed (%d) is not 10" | ||
268 | " or 100!\n", dev->name, phydev->speed); | ||
269 | break; | ||
270 | } | ||
271 | |||
272 | priv->speed = phydev->speed; | ||
273 | } | ||
274 | |||
275 | writel(ctrl, ioaddr + MAC_CTRL_REG); | ||
276 | |||
277 | if (!priv->oldlink) { | ||
278 | new_state = 1; | ||
279 | priv->oldlink = 1; | ||
280 | } | ||
281 | } else if (priv->oldlink) { | ||
282 | new_state = 1; | ||
283 | priv->oldlink = 0; | ||
284 | priv->speed = 0; | ||
285 | priv->oldduplex = -1; | ||
286 | } | ||
287 | |||
288 | if (new_state && netif_msg_link(priv)) | ||
289 | phy_print_status(phydev); | ||
290 | |||
291 | spin_unlock_irqrestore(&priv->lock, flags); | ||
292 | |||
293 | DBG(probe, DEBUG, "stmmac_adjust_link: exiting\n"); | ||
294 | } | ||
295 | |||
296 | /** | ||
297 | * stmmac_init_phy - PHY initialization | ||
298 | * @dev: net device structure | ||
299 | * Description: it initializes the driver's PHY state, and attaches the PHY | ||
300 | * to the mac driver. | ||
301 | * Return value: | ||
302 | * 0 on success | ||
303 | */ | ||
304 | static int stmmac_init_phy(struct net_device *dev) | ||
305 | { | ||
306 | struct stmmac_priv *priv = netdev_priv(dev); | ||
307 | struct phy_device *phydev; | ||
308 | char phy_id[BUS_ID_SIZE]; /* PHY to connect */ | ||
309 | char bus_id[BUS_ID_SIZE]; | ||
310 | |||
311 | priv->oldlink = 0; | ||
312 | priv->speed = 0; | ||
313 | priv->oldduplex = -1; | ||
314 | |||
315 | if (priv->phy_addr == -1) { | ||
316 | /* We don't have a PHY, so do nothing */ | ||
317 | return 0; | ||
318 | } | ||
319 | |||
320 | snprintf(bus_id, MII_BUS_ID_SIZE, "%x", priv->bus_id); | ||
321 | snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, bus_id, priv->phy_addr); | ||
322 | pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id); | ||
323 | |||
324 | phydev = phy_connect(dev, phy_id, &stmmac_adjust_link, 0, | ||
325 | priv->phy_interface); | ||
326 | |||
327 | if (IS_ERR(phydev)) { | ||
328 | pr_err("%s: Could not attach to PHY\n", dev->name); | ||
329 | return PTR_ERR(phydev); | ||
330 | } | ||
331 | |||
332 | /* | ||
333 | * Broken HW is sometimes missing the pull-up resistor on the | ||
334 | * MDIO line, which results in reads to non-existent devices returning | ||
335 | * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent | ||
336 | * device as well. | ||
337 | * Note: phydev->phy_id is the result of reading the UID PHY registers. | ||
338 | */ | ||
339 | if (phydev->phy_id == 0) { | ||
340 | phy_disconnect(phydev); | ||
341 | return -ENODEV; | ||
342 | } | ||
343 | pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)" | ||
344 | " Link = %d\n", dev->name, phydev->phy_id, phydev->link); | ||
345 | |||
346 | priv->phydev = phydev; | ||
347 | |||
348 | return 0; | ||
349 | } | ||
350 | |||
351 | static inline void stmmac_mac_enable_rx(unsigned long ioaddr) | ||
352 | { | ||
353 | u32 value = readl(ioaddr + MAC_CTRL_REG); | ||
354 | value |= MAC_RNABLE_RX; | ||
355 | /* Set the RE (receive enable bit into the MAC CTRL register). */ | ||
356 | writel(value, ioaddr + MAC_CTRL_REG); | ||
357 | } | ||
358 | |||
359 | static inline void stmmac_mac_enable_tx(unsigned long ioaddr) | ||
360 | { | ||
361 | u32 value = readl(ioaddr + MAC_CTRL_REG); | ||
362 | value |= MAC_ENABLE_TX; | ||
363 | /* Set the TE (transmit enable bit into the MAC CTRL register). */ | ||
364 | writel(value, ioaddr + MAC_CTRL_REG); | ||
365 | } | ||
366 | |||
367 | static inline void stmmac_mac_disable_rx(unsigned long ioaddr) | ||
368 | { | ||
369 | u32 value = readl(ioaddr + MAC_CTRL_REG); | ||
370 | value &= ~MAC_RNABLE_RX; | ||
371 | writel(value, ioaddr + MAC_CTRL_REG); | ||
372 | } | ||
373 | |||
374 | static inline void stmmac_mac_disable_tx(unsigned long ioaddr) | ||
375 | { | ||
376 | u32 value = readl(ioaddr + MAC_CTRL_REG); | ||
377 | value &= ~MAC_ENABLE_TX; | ||
378 | writel(value, ioaddr + MAC_CTRL_REG); | ||
379 | } | ||
380 | |||
381 | /** | ||
382 | * display_ring | ||
383 | * @p: pointer to the ring. | ||
384 | * @size: size of the ring. | ||
385 | * Description: display all the descriptors within the ring. | ||
386 | */ | ||
387 | static void display_ring(struct dma_desc *p, int size) | ||
388 | { | ||
389 | struct tmp_s { | ||
390 | u64 a; | ||
391 | unsigned int b; | ||
392 | unsigned int c; | ||
393 | }; | ||
394 | int i; | ||
395 | for (i = 0; i < size; i++) { | ||
396 | struct tmp_s *x = (struct tmp_s *)(p + i); | ||
397 | pr_info("\t%d [0x%x]: DES0=0x%x DES1=0x%x BUF1=0x%x BUF2=0x%x", | ||
398 | i, (unsigned int)virt_to_phys(&p[i]), | ||
399 | (unsigned int)(x->a), (unsigned int)((x->a) >> 32), | ||
400 | x->b, x->c); | ||
401 | pr_info("\n"); | ||
402 | } | ||
403 | } | ||
404 | |||
405 | /** | ||
406 | * init_dma_desc_rings - init the RX/TX descriptor rings | ||
407 | * @dev: net device structure | ||
408 | * Description: this function initializes the DMA RX/TX descriptors | ||
409 | * and allocates the socket buffers. | ||
410 | */ | ||
411 | static void init_dma_desc_rings(struct net_device *dev) | ||
412 | { | ||
413 | int i; | ||
414 | struct stmmac_priv *priv = netdev_priv(dev); | ||
415 | struct sk_buff *skb; | ||
416 | unsigned int txsize = priv->dma_tx_size; | ||
417 | unsigned int rxsize = priv->dma_rx_size; | ||
418 | unsigned int bfsize = priv->dma_buf_sz; | ||
419 | int buff2_needed = 0; | ||
420 | int dis_ic = 0; | ||
421 | |||
422 | #ifdef CONFIG_STMMAC_TIMER | ||
423 | /* Using Timers disable interrupts on completion for the reception */ | ||
424 | dis_ic = 1; | ||
425 | #endif | ||
426 | /* Set the Buffer size according to the MTU; | ||
427 | * indeed, in case of jumbo we need to bump-up the buffer sizes. | ||
428 | */ | ||
429 | if (unlikely(dev->mtu >= BUF_SIZE_8KiB)) | ||
430 | bfsize = BUF_SIZE_16KiB; | ||
431 | else if (unlikely(dev->mtu >= BUF_SIZE_4KiB)) | ||
432 | bfsize = BUF_SIZE_8KiB; | ||
433 | else if (unlikely(dev->mtu >= BUF_SIZE_2KiB)) | ||
434 | bfsize = BUF_SIZE_4KiB; | ||
435 | else if (unlikely(dev->mtu >= DMA_BUFFER_SIZE)) | ||
436 | bfsize = BUF_SIZE_2KiB; | ||
437 | else | ||
438 | bfsize = DMA_BUFFER_SIZE; | ||
439 | |||
440 | /* If the MTU exceeds 8k so use the second buffer in the chain */ | ||
441 | if (bfsize >= BUF_SIZE_8KiB) | ||
442 | buff2_needed = 1; | ||
443 | |||
444 | DBG(probe, INFO, "stmmac: txsize %d, rxsize %d, bfsize %d\n", | ||
445 | txsize, rxsize, bfsize); | ||
446 | |||
447 | priv->rx_skbuff_dma = kmalloc(rxsize * sizeof(dma_addr_t), GFP_KERNEL); | ||
448 | priv->rx_skbuff = | ||
449 | kmalloc(sizeof(struct sk_buff *) * rxsize, GFP_KERNEL); | ||
450 | priv->dma_rx = | ||
451 | (struct dma_desc *)dma_alloc_coherent(priv->device, | ||
452 | rxsize * | ||
453 | sizeof(struct dma_desc), | ||
454 | &priv->dma_rx_phy, | ||
455 | GFP_KERNEL); | ||
456 | priv->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * txsize, | ||
457 | GFP_KERNEL); | ||
458 | priv->dma_tx = | ||
459 | (struct dma_desc *)dma_alloc_coherent(priv->device, | ||
460 | txsize * | ||
461 | sizeof(struct dma_desc), | ||
462 | &priv->dma_tx_phy, | ||
463 | GFP_KERNEL); | ||
464 | |||
465 | if ((priv->dma_rx == NULL) || (priv->dma_tx == NULL)) { | ||
466 | pr_err("%s:ERROR allocating the DMA Tx/Rx desc\n", __func__); | ||
467 | return; | ||
468 | } | ||
469 | |||
470 | DBG(probe, INFO, "stmmac (%s) DMA desc rings: virt addr (Rx %p, " | ||
471 | "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n", | ||
472 | dev->name, priv->dma_rx, priv->dma_tx, | ||
473 | (unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy); | ||
474 | |||
475 | /* RX INITIALIZATION */ | ||
476 | DBG(probe, INFO, "stmmac: SKB addresses:\n" | ||
477 | "skb\t\tskb data\tdma data\n"); | ||
478 | |||
479 | for (i = 0; i < rxsize; i++) { | ||
480 | struct dma_desc *p = priv->dma_rx + i; | ||
481 | |||
482 | skb = netdev_alloc_skb_ip_align(dev, bfsize); | ||
483 | if (unlikely(skb == NULL)) { | ||
484 | pr_err("%s: Rx init fails; skb is NULL\n", __func__); | ||
485 | break; | ||
486 | } | ||
487 | priv->rx_skbuff[i] = skb; | ||
488 | priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, | ||
489 | bfsize, DMA_FROM_DEVICE); | ||
490 | |||
491 | p->des2 = priv->rx_skbuff_dma[i]; | ||
492 | if (unlikely(buff2_needed)) | ||
493 | p->des3 = p->des2 + BUF_SIZE_8KiB; | ||
494 | DBG(probe, INFO, "[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i], | ||
495 | priv->rx_skbuff[i]->data, priv->rx_skbuff_dma[i]); | ||
496 | } | ||
497 | priv->cur_rx = 0; | ||
498 | priv->dirty_rx = (unsigned int)(i - rxsize); | ||
499 | priv->dma_buf_sz = bfsize; | ||
500 | buf_sz = bfsize; | ||
501 | |||
502 | /* TX INITIALIZATION */ | ||
503 | for (i = 0; i < txsize; i++) { | ||
504 | priv->tx_skbuff[i] = NULL; | ||
505 | priv->dma_tx[i].des2 = 0; | ||
506 | } | ||
507 | priv->dirty_tx = 0; | ||
508 | priv->cur_tx = 0; | ||
509 | |||
510 | /* Clear the Rx/Tx descriptors */ | ||
511 | priv->mac_type->ops->init_rx_desc(priv->dma_rx, rxsize, dis_ic); | ||
512 | priv->mac_type->ops->init_tx_desc(priv->dma_tx, txsize); | ||
513 | |||
514 | if (netif_msg_hw(priv)) { | ||
515 | pr_info("RX descriptor ring:\n"); | ||
516 | display_ring(priv->dma_rx, rxsize); | ||
517 | pr_info("TX descriptor ring:\n"); | ||
518 | display_ring(priv->dma_tx, txsize); | ||
519 | } | ||
520 | return; | ||
521 | } | ||
522 | |||
523 | static void dma_free_rx_skbufs(struct stmmac_priv *priv) | ||
524 | { | ||
525 | int i; | ||
526 | |||
527 | for (i = 0; i < priv->dma_rx_size; i++) { | ||
528 | if (priv->rx_skbuff[i]) { | ||
529 | dma_unmap_single(priv->device, priv->rx_skbuff_dma[i], | ||
530 | priv->dma_buf_sz, DMA_FROM_DEVICE); | ||
531 | dev_kfree_skb_any(priv->rx_skbuff[i]); | ||
532 | } | ||
533 | priv->rx_skbuff[i] = NULL; | ||
534 | } | ||
535 | return; | ||
536 | } | ||
537 | |||
538 | static void dma_free_tx_skbufs(struct stmmac_priv *priv) | ||
539 | { | ||
540 | int i; | ||
541 | |||
542 | for (i = 0; i < priv->dma_tx_size; i++) { | ||
543 | if (priv->tx_skbuff[i] != NULL) { | ||
544 | struct dma_desc *p = priv->dma_tx + i; | ||
545 | if (p->des2) | ||
546 | dma_unmap_single(priv->device, p->des2, | ||
547 | priv->mac_type->ops->get_tx_len(p), | ||
548 | DMA_TO_DEVICE); | ||
549 | dev_kfree_skb_any(priv->tx_skbuff[i]); | ||
550 | priv->tx_skbuff[i] = NULL; | ||
551 | } | ||
552 | } | ||
553 | return; | ||
554 | } | ||
555 | |||
556 | static void free_dma_desc_resources(struct stmmac_priv *priv) | ||
557 | { | ||
558 | /* Release the DMA TX/RX socket buffers */ | ||
559 | dma_free_rx_skbufs(priv); | ||
560 | dma_free_tx_skbufs(priv); | ||
561 | |||
562 | /* Free the region of consistent memory previously allocated for | ||
563 | * the DMA */ | ||
564 | dma_free_coherent(priv->device, | ||
565 | priv->dma_tx_size * sizeof(struct dma_desc), | ||
566 | priv->dma_tx, priv->dma_tx_phy); | ||
567 | dma_free_coherent(priv->device, | ||
568 | priv->dma_rx_size * sizeof(struct dma_desc), | ||
569 | priv->dma_rx, priv->dma_rx_phy); | ||
570 | kfree(priv->rx_skbuff_dma); | ||
571 | kfree(priv->rx_skbuff); | ||
572 | kfree(priv->tx_skbuff); | ||
573 | |||
574 | return; | ||
575 | } | ||
576 | |||
577 | /** | ||
578 | * stmmac_dma_start_tx | ||
579 | * @ioaddr: device I/O address | ||
580 | * Description: this function starts the DMA tx process. | ||
581 | */ | ||
582 | static void stmmac_dma_start_tx(unsigned long ioaddr) | ||
583 | { | ||
584 | u32 value = readl(ioaddr + DMA_CONTROL); | ||
585 | value |= DMA_CONTROL_ST; | ||
586 | writel(value, ioaddr + DMA_CONTROL); | ||
587 | return; | ||
588 | } | ||
589 | |||
590 | static void stmmac_dma_stop_tx(unsigned long ioaddr) | ||
591 | { | ||
592 | u32 value = readl(ioaddr + DMA_CONTROL); | ||
593 | value &= ~DMA_CONTROL_ST; | ||
594 | writel(value, ioaddr + DMA_CONTROL); | ||
595 | return; | ||
596 | } | ||
597 | |||
598 | /** | ||
599 | * stmmac_dma_start_rx | ||
600 | * @ioaddr: device I/O address | ||
601 | * Description: this function starts the DMA rx process. | ||
602 | */ | ||
603 | static void stmmac_dma_start_rx(unsigned long ioaddr) | ||
604 | { | ||
605 | u32 value = readl(ioaddr + DMA_CONTROL); | ||
606 | value |= DMA_CONTROL_SR; | ||
607 | writel(value, ioaddr + DMA_CONTROL); | ||
608 | |||
609 | return; | ||
610 | } | ||
611 | |||
612 | static void stmmac_dma_stop_rx(unsigned long ioaddr) | ||
613 | { | ||
614 | u32 value = readl(ioaddr + DMA_CONTROL); | ||
615 | value &= ~DMA_CONTROL_SR; | ||
616 | writel(value, ioaddr + DMA_CONTROL); | ||
617 | |||
618 | return; | ||
619 | } | ||
620 | |||
621 | /** | ||
622 | * stmmac_dma_operation_mode - HW DMA operation mode | ||
623 | * @priv : pointer to the private device structure. | ||
624 | * Description: it sets the DMA operation mode: tx/rx DMA thresholds | ||
625 | * or Store-And-Forward capability. It also verifies the COE for the | ||
626 | * transmission in case of Giga ETH. | ||
627 | */ | ||
628 | static void stmmac_dma_operation_mode(struct stmmac_priv *priv) | ||
629 | { | ||
630 | if (!priv->is_gmac) { | ||
631 | /* MAC 10/100 */ | ||
632 | priv->mac_type->ops->dma_mode(priv->dev->base_addr, tc, 0); | ||
633 | priv->tx_coe = NO_HW_CSUM; | ||
634 | } else { | ||
635 | if ((priv->dev->mtu <= ETH_DATA_LEN) && (tx_coe)) { | ||
636 | priv->mac_type->ops->dma_mode(priv->dev->base_addr, | ||
637 | SF_DMA_MODE, SF_DMA_MODE); | ||
638 | tc = SF_DMA_MODE; | ||
639 | priv->tx_coe = HW_CSUM; | ||
640 | } else { | ||
641 | /* Checksum computation is performed in software. */ | ||
642 | priv->mac_type->ops->dma_mode(priv->dev->base_addr, tc, | ||
643 | SF_DMA_MODE); | ||
644 | priv->tx_coe = NO_HW_CSUM; | ||
645 | } | ||
646 | } | ||
647 | tx_coe = priv->tx_coe; | ||
648 | |||
649 | return; | ||
650 | } | ||
651 | |||
652 | #ifdef STMMAC_DEBUG | ||
653 | /** | ||
654 | * show_tx_process_state | ||
655 | * @status: tx descriptor status field | ||
656 | * Description: it shows the Transmit Process State for CSR5[22:20] | ||
657 | */ | ||
658 | static void show_tx_process_state(unsigned int status) | ||
659 | { | ||
660 | unsigned int state; | ||
661 | state = (status & DMA_STATUS_TS_MASK) >> DMA_STATUS_TS_SHIFT; | ||
662 | |||
663 | switch (state) { | ||
664 | case 0: | ||
665 | pr_info("- TX (Stopped): Reset or Stop command\n"); | ||
666 | break; | ||
667 | case 1: | ||
668 | pr_info("- TX (Running):Fetching the Tx desc\n"); | ||
669 | break; | ||
670 | case 2: | ||
671 | pr_info("- TX (Running): Waiting for end of tx\n"); | ||
672 | break; | ||
673 | case 3: | ||
674 | pr_info("- TX (Running): Reading the data " | ||
675 | "and queuing the data into the Tx buf\n"); | ||
676 | break; | ||
677 | case 6: | ||
678 | pr_info("- TX (Suspended): Tx Buff Underflow " | ||
679 | "or an unavailable Transmit descriptor\n"); | ||
680 | break; | ||
681 | case 7: | ||
682 | pr_info("- TX (Running): Closing Tx descriptor\n"); | ||
683 | break; | ||
684 | default: | ||
685 | break; | ||
686 | } | ||
687 | return; | ||
688 | } | ||
689 | |||
690 | /** | ||
691 | * show_rx_process_state | ||
692 | * @status: rx descriptor status field | ||
693 | * Description: it shows the Receive Process State for CSR5[19:17] | ||
694 | */ | ||
695 | static void show_rx_process_state(unsigned int status) | ||
696 | { | ||
697 | unsigned int state; | ||
698 | state = (status & DMA_STATUS_RS_MASK) >> DMA_STATUS_RS_SHIFT; | ||
699 | |||
700 | switch (state) { | ||
701 | case 0: | ||
702 | pr_info("- RX (Stopped): Reset or Stop command\n"); | ||
703 | break; | ||
704 | case 1: | ||
705 | pr_info("- RX (Running): Fetching the Rx desc\n"); | ||
706 | break; | ||
707 | case 2: | ||
708 | pr_info("- RX (Running):Checking for end of pkt\n"); | ||
709 | break; | ||
710 | case 3: | ||
711 | pr_info("- RX (Running): Waiting for Rx pkt\n"); | ||
712 | break; | ||
713 | case 4: | ||
714 | pr_info("- RX (Suspended): Unavailable Rx buf\n"); | ||
715 | break; | ||
716 | case 5: | ||
717 | pr_info("- RX (Running): Closing Rx descriptor\n"); | ||
718 | break; | ||
719 | case 6: | ||
720 | pr_info("- RX(Running): Flushing the current frame" | ||
721 | " from the Rx buf\n"); | ||
722 | break; | ||
723 | case 7: | ||
724 | pr_info("- RX (Running): Queuing the Rx frame" | ||
725 | " from the Rx buf into memory\n"); | ||
726 | break; | ||
727 | default: | ||
728 | break; | ||
729 | } | ||
730 | return; | ||
731 | } | ||
732 | #endif | ||
733 | |||
734 | /** | ||
735 | * stmmac_tx: | ||
736 | * @priv: private driver structure | ||
737 | * Description: it reclaims resources after transmission completes. | ||
738 | */ | ||
739 | static void stmmac_tx(struct stmmac_priv *priv) | ||
740 | { | ||
741 | unsigned int txsize = priv->dma_tx_size; | ||
742 | unsigned long ioaddr = priv->dev->base_addr; | ||
743 | |||
744 | while (priv->dirty_tx != priv->cur_tx) { | ||
745 | int last; | ||
746 | unsigned int entry = priv->dirty_tx % txsize; | ||
747 | struct sk_buff *skb = priv->tx_skbuff[entry]; | ||
748 | struct dma_desc *p = priv->dma_tx + entry; | ||
749 | |||
750 | /* Check if the descriptor is owned by the DMA. */ | ||
751 | if (priv->mac_type->ops->get_tx_owner(p)) | ||
752 | break; | ||
753 | |||
754 | /* Verify tx error by looking at the last segment */ | ||
755 | last = priv->mac_type->ops->get_tx_ls(p); | ||
756 | if (likely(last)) { | ||
757 | int tx_error = | ||
758 | priv->mac_type->ops->tx_status(&priv->dev->stats, | ||
759 | &priv->xstats, | ||
760 | p, ioaddr); | ||
761 | if (likely(tx_error == 0)) { | ||
762 | priv->dev->stats.tx_packets++; | ||
763 | priv->xstats.tx_pkt_n++; | ||
764 | } else | ||
765 | priv->dev->stats.tx_errors++; | ||
766 | } | ||
767 | TX_DBG("%s: curr %d, dirty %d\n", __func__, | ||
768 | priv->cur_tx, priv->dirty_tx); | ||
769 | |||
770 | if (likely(p->des2)) | ||
771 | dma_unmap_single(priv->device, p->des2, | ||
772 | priv->mac_type->ops->get_tx_len(p), | ||
773 | DMA_TO_DEVICE); | ||
774 | if (unlikely(p->des3)) | ||
775 | p->des3 = 0; | ||
776 | |||
777 | if (likely(skb != NULL)) { | ||
778 | /* | ||
779 | * If there's room in the queue (limit it to size) | ||
780 | * we add this skb back into the pool, | ||
781 | * if it's the right size. | ||
782 | */ | ||
783 | if ((skb_queue_len(&priv->rx_recycle) < | ||
784 | priv->dma_rx_size) && | ||
785 | skb_recycle_check(skb, priv->dma_buf_sz)) | ||
786 | __skb_queue_head(&priv->rx_recycle, skb); | ||
787 | else | ||
788 | dev_kfree_skb(skb); | ||
789 | |||
790 | priv->tx_skbuff[entry] = NULL; | ||
791 | } | ||
792 | |||
793 | priv->mac_type->ops->release_tx_desc(p); | ||
794 | |||
795 | entry = (++priv->dirty_tx) % txsize; | ||
796 | } | ||
797 | if (unlikely(netif_queue_stopped(priv->dev) && | ||
798 | stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) { | ||
799 | netif_tx_lock(priv->dev); | ||
800 | if (netif_queue_stopped(priv->dev) && | ||
801 | stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv)) { | ||
802 | TX_DBG("%s: restart transmit\n", __func__); | ||
803 | netif_wake_queue(priv->dev); | ||
804 | } | ||
805 | netif_tx_unlock(priv->dev); | ||
806 | } | ||
807 | return; | ||
808 | } | ||
809 | |||
810 | static inline void stmmac_enable_irq(struct stmmac_priv *priv) | ||
811 | { | ||
812 | #ifndef CONFIG_STMMAC_TIMER | ||
813 | writel(DMA_INTR_DEFAULT_MASK, priv->dev->base_addr + DMA_INTR_ENA); | ||
814 | #else | ||
815 | priv->tm->timer_start(tmrate); | ||
816 | #endif | ||
817 | } | ||
818 | |||
819 | static inline void stmmac_disable_irq(struct stmmac_priv *priv) | ||
820 | { | ||
821 | #ifndef CONFIG_STMMAC_TIMER | ||
822 | writel(0, priv->dev->base_addr + DMA_INTR_ENA); | ||
823 | #else | ||
824 | priv->tm->timer_stop(); | ||
825 | #endif | ||
826 | } | ||
827 | |||
828 | static int stmmac_has_work(struct stmmac_priv *priv) | ||
829 | { | ||
830 | unsigned int has_work = 0; | ||
831 | int rxret, tx_work = 0; | ||
832 | |||
833 | rxret = priv->mac_type->ops->get_rx_owner(priv->dma_rx + | ||
834 | (priv->cur_rx % priv->dma_rx_size)); | ||
835 | |||
836 | if (priv->dirty_tx != priv->cur_tx) | ||
837 | tx_work = 1; | ||
838 | |||
839 | if (likely(!rxret || tx_work)) | ||
840 | has_work = 1; | ||
841 | |||
842 | return has_work; | ||
843 | } | ||
844 | |||
845 | static inline void _stmmac_schedule(struct stmmac_priv *priv) | ||
846 | { | ||
847 | if (likely(stmmac_has_work(priv))) { | ||
848 | stmmac_disable_irq(priv); | ||
849 | napi_schedule(&priv->napi); | ||
850 | } | ||
851 | } | ||
852 | |||
853 | #ifdef CONFIG_STMMAC_TIMER | ||
854 | void stmmac_schedule(struct net_device *dev) | ||
855 | { | ||
856 | struct stmmac_priv *priv = netdev_priv(dev); | ||
857 | |||
858 | priv->xstats.sched_timer_n++; | ||
859 | |||
860 | _stmmac_schedule(priv); | ||
861 | |||
862 | return; | ||
863 | } | ||
864 | |||
865 | static void stmmac_no_timer_started(unsigned int x) | ||
866 | {; | ||
867 | }; | ||
868 | |||
869 | static void stmmac_no_timer_stopped(void) | ||
870 | {; | ||
871 | }; | ||
872 | #endif | ||
873 | |||
874 | /** | ||
875 | * stmmac_tx_err: | ||
876 | * @priv: pointer to the private device structure | ||
877 | * Description: it cleans the descriptors and restarts the transmission | ||
878 | * in case of errors. | ||
879 | */ | ||
880 | static void stmmac_tx_err(struct stmmac_priv *priv) | ||
881 | { | ||
882 | netif_stop_queue(priv->dev); | ||
883 | |||
884 | stmmac_dma_stop_tx(priv->dev->base_addr); | ||
885 | dma_free_tx_skbufs(priv); | ||
886 | priv->mac_type->ops->init_tx_desc(priv->dma_tx, priv->dma_tx_size); | ||
887 | priv->dirty_tx = 0; | ||
888 | priv->cur_tx = 0; | ||
889 | stmmac_dma_start_tx(priv->dev->base_addr); | ||
890 | |||
891 | priv->dev->stats.tx_errors++; | ||
892 | netif_wake_queue(priv->dev); | ||
893 | |||
894 | return; | ||
895 | } | ||
896 | |||
897 | /** | ||
898 | * stmmac_dma_interrupt - Interrupt handler for the driver | ||
899 | * @dev: net device structure | ||
900 | * Description: Interrupt handler for the driver (DMA). | ||
901 | */ | ||
902 | static void stmmac_dma_interrupt(struct net_device *dev) | ||
903 | { | ||
904 | unsigned long ioaddr = dev->base_addr; | ||
905 | struct stmmac_priv *priv = netdev_priv(dev); | ||
906 | /* read the status register (CSR5) */ | ||
907 | u32 intr_status = readl(ioaddr + DMA_STATUS); | ||
908 | |||
909 | DBG(intr, INFO, "%s: [CSR5: 0x%08x]\n", __func__, intr_status); | ||
910 | |||
911 | #ifdef STMMAC_DEBUG | ||
912 | /* It displays the DMA transmit process state (CSR5 register) */ | ||
913 | if (netif_msg_tx_done(priv)) | ||
914 | show_tx_process_state(intr_status); | ||
915 | if (netif_msg_rx_status(priv)) | ||
916 | show_rx_process_state(intr_status); | ||
917 | #endif | ||
918 | /* ABNORMAL interrupts */ | ||
919 | if (unlikely(intr_status & DMA_STATUS_AIS)) { | ||
920 | DBG(intr, INFO, "CSR5[15] DMA ABNORMAL IRQ: "); | ||
921 | if (unlikely(intr_status & DMA_STATUS_UNF)) { | ||
922 | DBG(intr, INFO, "transmit underflow\n"); | ||
923 | if (unlikely(tc != SF_DMA_MODE) | ||
924 | && (tc <= 256)) { | ||
925 | /* Try to bump up the threshold */ | ||
926 | tc += 64; | ||
927 | priv->mac_type->ops->dma_mode(ioaddr, tc, | ||
928 | SF_DMA_MODE); | ||
929 | priv->xstats.threshold = tc; | ||
930 | } | ||
931 | stmmac_tx_err(priv); | ||
932 | priv->xstats.tx_undeflow_irq++; | ||
933 | } | ||
934 | if (unlikely(intr_status & DMA_STATUS_TJT)) { | ||
935 | DBG(intr, INFO, "transmit jabber\n"); | ||
936 | priv->xstats.tx_jabber_irq++; | ||
937 | } | ||
938 | if (unlikely(intr_status & DMA_STATUS_OVF)) { | ||
939 | DBG(intr, INFO, "recv overflow\n"); | ||
940 | priv->xstats.rx_overflow_irq++; | ||
941 | } | ||
942 | if (unlikely(intr_status & DMA_STATUS_RU)) { | ||
943 | DBG(intr, INFO, "receive buffer unavailable\n"); | ||
944 | priv->xstats.rx_buf_unav_irq++; | ||
945 | } | ||
946 | if (unlikely(intr_status & DMA_STATUS_RPS)) { | ||
947 | DBG(intr, INFO, "receive process stopped\n"); | ||
948 | priv->xstats.rx_process_stopped_irq++; | ||
949 | } | ||
950 | if (unlikely(intr_status & DMA_STATUS_RWT)) { | ||
951 | DBG(intr, INFO, "receive watchdog\n"); | ||
952 | priv->xstats.rx_watchdog_irq++; | ||
953 | } | ||
954 | if (unlikely(intr_status & DMA_STATUS_ETI)) { | ||
955 | DBG(intr, INFO, "transmit early interrupt\n"); | ||
956 | priv->xstats.tx_early_irq++; | ||
957 | } | ||
958 | if (unlikely(intr_status & DMA_STATUS_TPS)) { | ||
959 | DBG(intr, INFO, "transmit process stopped\n"); | ||
960 | priv->xstats.tx_process_stopped_irq++; | ||
961 | stmmac_tx_err(priv); | ||
962 | } | ||
963 | if (unlikely(intr_status & DMA_STATUS_FBI)) { | ||
964 | DBG(intr, INFO, "fatal bus error\n"); | ||
965 | priv->xstats.fatal_bus_error_irq++; | ||
966 | stmmac_tx_err(priv); | ||
967 | } | ||
968 | } | ||
969 | |||
970 | /* TX/RX NORMAL interrupts */ | ||
971 | if (intr_status & DMA_STATUS_NIS) { | ||
972 | priv->xstats.normal_irq_n++; | ||
973 | if (likely((intr_status & DMA_STATUS_RI) || | ||
974 | (intr_status & (DMA_STATUS_TI)))) | ||
975 | _stmmac_schedule(priv); | ||
976 | } | ||
977 | |||
978 | /* Optional hardware blocks, interrupts should be disabled */ | ||
979 | if (unlikely(intr_status & | ||
980 | (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI))) | ||
981 | pr_info("%s: unexpected status %08x\n", __func__, intr_status); | ||
982 | |||
983 | /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */ | ||
984 | writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS); | ||
985 | |||
986 | DBG(intr, INFO, "\n\n"); | ||
987 | |||
988 | return; | ||
989 | } | ||
990 | |||
991 | /** | ||
992 | * stmmac_open - open entry point of the driver | ||
993 | * @dev : pointer to the device structure. | ||
994 | * Description: | ||
995 | * This function is the open entry point of the driver. | ||
996 | * Return value: | ||
997 | * 0 on success and an appropriate (-)ve integer as defined in errno.h | ||
998 | * file on failure. | ||
999 | */ | ||
1000 | static int stmmac_open(struct net_device *dev) | ||
1001 | { | ||
1002 | struct stmmac_priv *priv = netdev_priv(dev); | ||
1003 | unsigned long ioaddr = dev->base_addr; | ||
1004 | int ret; | ||
1005 | |||
1006 | /* Check that the MAC address is valid. If its not, refuse | ||
1007 | * to bring the device up. The user must specify an | ||
1008 | * address using the following linux command: | ||
1009 | * ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx */ | ||
1010 | if (!is_valid_ether_addr(dev->dev_addr)) { | ||
1011 | random_ether_addr(dev->dev_addr); | ||
1012 | pr_warning("%s: generated random MAC address %pM\n", dev->name, | ||
1013 | dev->dev_addr); | ||
1014 | } | ||
1015 | |||
1016 | stmmac_verify_args(); | ||
1017 | |||
1018 | ret = stmmac_init_phy(dev); | ||
1019 | if (unlikely(ret)) { | ||
1020 | pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret); | ||
1021 | return ret; | ||
1022 | } | ||
1023 | |||
1024 | /* Request the IRQ lines */ | ||
1025 | ret = request_irq(dev->irq, &stmmac_interrupt, | ||
1026 | IRQF_SHARED, dev->name, dev); | ||
1027 | if (unlikely(ret < 0)) { | ||
1028 | pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n", | ||
1029 | __func__, dev->irq, ret); | ||
1030 | return ret; | ||
1031 | } | ||
1032 | |||
1033 | #ifdef CONFIG_STMMAC_TIMER | ||
1034 | priv->tm = kmalloc(sizeof(struct stmmac_timer *), GFP_KERNEL); | ||
1035 | if (unlikely(priv->tm == NULL)) { | ||
1036 | pr_err("%s: ERROR: timer memory alloc failed \n", __func__); | ||
1037 | return -ENOMEM; | ||
1038 | } | ||
1039 | priv->tm->freq = tmrate; | ||
1040 | |||
1041 | /* Test if the HW timer can be actually used. | ||
1042 | * In case of failure continue with no timer. */ | ||
1043 | if (unlikely((stmmac_open_ext_timer(dev, priv->tm)) < 0)) { | ||
1044 | pr_warning("stmmaceth: cannot attach the HW timer\n"); | ||
1045 | tmrate = 0; | ||
1046 | priv->tm->freq = 0; | ||
1047 | priv->tm->timer_start = stmmac_no_timer_started; | ||
1048 | priv->tm->timer_stop = stmmac_no_timer_stopped; | ||
1049 | } | ||
1050 | #endif | ||
1051 | |||
1052 | /* Create and initialize the TX/RX descriptors chains. */ | ||
1053 | priv->dma_tx_size = STMMAC_ALIGN(dma_txsize); | ||
1054 | priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize); | ||
1055 | priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); | ||
1056 | init_dma_desc_rings(dev); | ||
1057 | |||
1058 | /* DMA initialization and SW reset */ | ||
1059 | if (unlikely(priv->mac_type->ops->dma_init(ioaddr, | ||
1060 | priv->pbl, priv->dma_tx_phy, priv->dma_rx_phy) < 0)) { | ||
1061 | |||
1062 | pr_err("%s: DMA initialization failed\n", __func__); | ||
1063 | return -1; | ||
1064 | } | ||
1065 | |||
1066 | /* Copy the MAC addr into the HW */ | ||
1067 | priv->mac_type->ops->set_umac_addr(ioaddr, dev->dev_addr, 0); | ||
1068 | /* Initialize the MAC Core */ | ||
1069 | priv->mac_type->ops->core_init(ioaddr); | ||
1070 | |||
1071 | priv->shutdown = 0; | ||
1072 | |||
1073 | /* Initialise the MMC (if present) to disable all interrupts. */ | ||
1074 | writel(0xffffffff, ioaddr + MMC_HIGH_INTR_MASK); | ||
1075 | writel(0xffffffff, ioaddr + MMC_LOW_INTR_MASK); | ||
1076 | |||
1077 | /* Enable the MAC Rx/Tx */ | ||
1078 | stmmac_mac_enable_rx(ioaddr); | ||
1079 | stmmac_mac_enable_tx(ioaddr); | ||
1080 | |||
1081 | /* Set the HW DMA mode and the COE */ | ||
1082 | stmmac_dma_operation_mode(priv); | ||
1083 | |||
1084 | /* Extra statistics */ | ||
1085 | memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats)); | ||
1086 | priv->xstats.threshold = tc; | ||
1087 | |||
1088 | /* Start the ball rolling... */ | ||
1089 | DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name); | ||
1090 | stmmac_dma_start_tx(ioaddr); | ||
1091 | stmmac_dma_start_rx(ioaddr); | ||
1092 | |||
1093 | #ifdef CONFIG_STMMAC_TIMER | ||
1094 | priv->tm->timer_start(tmrate); | ||
1095 | #endif | ||
1096 | /* Dump DMA/MAC registers */ | ||
1097 | if (netif_msg_hw(priv)) { | ||
1098 | priv->mac_type->ops->dump_mac_regs(ioaddr); | ||
1099 | priv->mac_type->ops->dump_dma_regs(ioaddr); | ||
1100 | } | ||
1101 | |||
1102 | if (priv->phydev) | ||
1103 | phy_start(priv->phydev); | ||
1104 | |||
1105 | napi_enable(&priv->napi); | ||
1106 | skb_queue_head_init(&priv->rx_recycle); | ||
1107 | netif_start_queue(dev); | ||
1108 | return 0; | ||
1109 | } | ||
1110 | |||
1111 | /** | ||
1112 | * stmmac_release - close entry point of the driver | ||
1113 | * @dev : device pointer. | ||
1114 | * Description: | ||
1115 | * This is the stop entry point of the driver. | ||
1116 | */ | ||
1117 | static int stmmac_release(struct net_device *dev) | ||
1118 | { | ||
1119 | struct stmmac_priv *priv = netdev_priv(dev); | ||
1120 | |||
1121 | /* Stop and disconnect the PHY */ | ||
1122 | if (priv->phydev) { | ||
1123 | phy_stop(priv->phydev); | ||
1124 | phy_disconnect(priv->phydev); | ||
1125 | priv->phydev = NULL; | ||
1126 | } | ||
1127 | |||
1128 | netif_stop_queue(dev); | ||
1129 | |||
1130 | #ifdef CONFIG_STMMAC_TIMER | ||
1131 | /* Stop and release the timer */ | ||
1132 | stmmac_close_ext_timer(); | ||
1133 | if (priv->tm != NULL) | ||
1134 | kfree(priv->tm); | ||
1135 | #endif | ||
1136 | napi_disable(&priv->napi); | ||
1137 | skb_queue_purge(&priv->rx_recycle); | ||
1138 | |||
1139 | /* Free the IRQ lines */ | ||
1140 | free_irq(dev->irq, dev); | ||
1141 | |||
1142 | /* Stop TX/RX DMA and clear the descriptors */ | ||
1143 | stmmac_dma_stop_tx(dev->base_addr); | ||
1144 | stmmac_dma_stop_rx(dev->base_addr); | ||
1145 | |||
1146 | /* Release and free the Rx/Tx resources */ | ||
1147 | free_dma_desc_resources(priv); | ||
1148 | |||
1149 | /* Disable the MAC core */ | ||
1150 | stmmac_mac_disable_tx(dev->base_addr); | ||
1151 | stmmac_mac_disable_rx(dev->base_addr); | ||
1152 | |||
1153 | netif_carrier_off(dev); | ||
1154 | |||
1155 | return 0; | ||
1156 | } | ||
1157 | |||
1158 | /* | ||
1159 | * To perform emulated hardware segmentation on skb. | ||
1160 | */ | ||
1161 | static int stmmac_sw_tso(struct stmmac_priv *priv, struct sk_buff *skb) | ||
1162 | { | ||
1163 | struct sk_buff *segs, *curr_skb; | ||
1164 | int gso_segs = skb_shinfo(skb)->gso_segs; | ||
1165 | |||
1166 | /* Estimate the number of fragments in the worst case */ | ||
1167 | if (unlikely(stmmac_tx_avail(priv) < gso_segs)) { | ||
1168 | netif_stop_queue(priv->dev); | ||
1169 | TX_DBG(KERN_ERR "%s: TSO BUG! Tx Ring full when queue awake\n", | ||
1170 | __func__); | ||
1171 | if (stmmac_tx_avail(priv) < gso_segs) | ||
1172 | return NETDEV_TX_BUSY; | ||
1173 | |||
1174 | netif_wake_queue(priv->dev); | ||
1175 | } | ||
1176 | TX_DBG("\tstmmac_sw_tso: segmenting: skb %p (len %d)\n", | ||
1177 | skb, skb->len); | ||
1178 | |||
1179 | segs = skb_gso_segment(skb, priv->dev->features & ~NETIF_F_TSO); | ||
1180 | if (unlikely(IS_ERR(segs))) | ||
1181 | goto sw_tso_end; | ||
1182 | |||
1183 | do { | ||
1184 | curr_skb = segs; | ||
1185 | segs = segs->next; | ||
1186 | TX_DBG("\t\tcurrent skb->len: %d, *curr %p," | ||
1187 | "*next %p\n", curr_skb->len, curr_skb, segs); | ||
1188 | curr_skb->next = NULL; | ||
1189 | stmmac_xmit(curr_skb, priv->dev); | ||
1190 | } while (segs); | ||
1191 | |||
1192 | sw_tso_end: | ||
1193 | dev_kfree_skb(skb); | ||
1194 | |||
1195 | return NETDEV_TX_OK; | ||
1196 | } | ||
1197 | |||
1198 | static unsigned int stmmac_handle_jumbo_frames(struct sk_buff *skb, | ||
1199 | struct net_device *dev, | ||
1200 | int csum_insertion) | ||
1201 | { | ||
1202 | struct stmmac_priv *priv = netdev_priv(dev); | ||
1203 | unsigned int nopaged_len = skb_headlen(skb); | ||
1204 | unsigned int txsize = priv->dma_tx_size; | ||
1205 | unsigned int entry = priv->cur_tx % txsize; | ||
1206 | struct dma_desc *desc = priv->dma_tx + entry; | ||
1207 | |||
1208 | if (nopaged_len > BUF_SIZE_8KiB) { | ||
1209 | |||
1210 | int buf2_size = nopaged_len - BUF_SIZE_8KiB; | ||
1211 | |||
1212 | desc->des2 = dma_map_single(priv->device, skb->data, | ||
1213 | BUF_SIZE_8KiB, DMA_TO_DEVICE); | ||
1214 | desc->des3 = desc->des2 + BUF_SIZE_4KiB; | ||
1215 | priv->mac_type->ops->prepare_tx_desc(desc, 1, BUF_SIZE_8KiB, | ||
1216 | csum_insertion); | ||
1217 | |||
1218 | entry = (++priv->cur_tx) % txsize; | ||
1219 | desc = priv->dma_tx + entry; | ||
1220 | |||
1221 | desc->des2 = dma_map_single(priv->device, | ||
1222 | skb->data + BUF_SIZE_8KiB, | ||
1223 | buf2_size, DMA_TO_DEVICE); | ||
1224 | desc->des3 = desc->des2 + BUF_SIZE_4KiB; | ||
1225 | priv->mac_type->ops->prepare_tx_desc(desc, 0, | ||
1226 | buf2_size, csum_insertion); | ||
1227 | priv->mac_type->ops->set_tx_owner(desc); | ||
1228 | priv->tx_skbuff[entry] = NULL; | ||
1229 | } else { | ||
1230 | desc->des2 = dma_map_single(priv->device, skb->data, | ||
1231 | nopaged_len, DMA_TO_DEVICE); | ||
1232 | desc->des3 = desc->des2 + BUF_SIZE_4KiB; | ||
1233 | priv->mac_type->ops->prepare_tx_desc(desc, 1, nopaged_len, | ||
1234 | csum_insertion); | ||
1235 | } | ||
1236 | return entry; | ||
1237 | } | ||
1238 | |||
1239 | /** | ||
1240 | * stmmac_xmit: | ||
1241 | * @skb : the socket buffer | ||
1242 | * @dev : device pointer | ||
1243 | * Description : Tx entry point of the driver. | ||
1244 | */ | ||
1245 | static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) | ||
1246 | { | ||
1247 | struct stmmac_priv *priv = netdev_priv(dev); | ||
1248 | unsigned int txsize = priv->dma_tx_size; | ||
1249 | unsigned int entry; | ||
1250 | int i, csum_insertion = 0; | ||
1251 | int nfrags = skb_shinfo(skb)->nr_frags; | ||
1252 | struct dma_desc *desc, *first; | ||
1253 | |||
1254 | if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) { | ||
1255 | if (!netif_queue_stopped(dev)) { | ||
1256 | netif_stop_queue(dev); | ||
1257 | /* This is a hard error, log it. */ | ||
1258 | pr_err("%s: BUG! Tx Ring full when queue awake\n", | ||
1259 | __func__); | ||
1260 | } | ||
1261 | return NETDEV_TX_BUSY; | ||
1262 | } | ||
1263 | |||
1264 | entry = priv->cur_tx % txsize; | ||
1265 | |||
1266 | #ifdef STMMAC_XMIT_DEBUG | ||
1267 | if ((skb->len > ETH_FRAME_LEN) || nfrags) | ||
1268 | pr_info("stmmac xmit:\n" | ||
1269 | "\tskb addr %p - len: %d - nopaged_len: %d\n" | ||
1270 | "\tn_frags: %d - ip_summed: %d - %s gso\n", | ||
1271 | skb, skb->len, skb_headlen(skb), nfrags, skb->ip_summed, | ||
1272 | !skb_is_gso(skb) ? "isn't" : "is"); | ||
1273 | #endif | ||
1274 | |||
1275 | if (unlikely(skb_is_gso(skb))) | ||
1276 | return stmmac_sw_tso(priv, skb); | ||
1277 | |||
1278 | if (likely((skb->ip_summed == CHECKSUM_PARTIAL))) { | ||
1279 | if (likely(priv->tx_coe == NO_HW_CSUM)) | ||
1280 | skb_checksum_help(skb); | ||
1281 | else | ||
1282 | csum_insertion = 1; | ||
1283 | } | ||
1284 | |||
1285 | desc = priv->dma_tx + entry; | ||
1286 | first = desc; | ||
1287 | |||
1288 | #ifdef STMMAC_XMIT_DEBUG | ||
1289 | if ((nfrags > 0) || (skb->len > ETH_FRAME_LEN)) | ||
1290 | pr_debug("stmmac xmit: skb len: %d, nopaged_len: %d,\n" | ||
1291 | "\t\tn_frags: %d, ip_summed: %d\n", | ||
1292 | skb->len, skb_headlen(skb), nfrags, skb->ip_summed); | ||
1293 | #endif | ||
1294 | priv->tx_skbuff[entry] = skb; | ||
1295 | if (unlikely(skb->len >= BUF_SIZE_4KiB)) { | ||
1296 | entry = stmmac_handle_jumbo_frames(skb, dev, csum_insertion); | ||
1297 | desc = priv->dma_tx + entry; | ||
1298 | } else { | ||
1299 | unsigned int nopaged_len = skb_headlen(skb); | ||
1300 | desc->des2 = dma_map_single(priv->device, skb->data, | ||
1301 | nopaged_len, DMA_TO_DEVICE); | ||
1302 | priv->mac_type->ops->prepare_tx_desc(desc, 1, nopaged_len, | ||
1303 | csum_insertion); | ||
1304 | } | ||
1305 | |||
1306 | for (i = 0; i < nfrags; i++) { | ||
1307 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | ||
1308 | int len = frag->size; | ||
1309 | |||
1310 | entry = (++priv->cur_tx) % txsize; | ||
1311 | desc = priv->dma_tx + entry; | ||
1312 | |||
1313 | TX_DBG("\t[entry %d] segment len: %d\n", entry, len); | ||
1314 | desc->des2 = dma_map_page(priv->device, frag->page, | ||
1315 | frag->page_offset, | ||
1316 | len, DMA_TO_DEVICE); | ||
1317 | priv->tx_skbuff[entry] = NULL; | ||
1318 | priv->mac_type->ops->prepare_tx_desc(desc, 0, len, | ||
1319 | csum_insertion); | ||
1320 | priv->mac_type->ops->set_tx_owner(desc); | ||
1321 | } | ||
1322 | |||
1323 | /* Interrupt on completition only for the latest segment */ | ||
1324 | priv->mac_type->ops->close_tx_desc(desc); | ||
1325 | #ifdef CONFIG_STMMAC_TIMER | ||
1326 | /* Clean IC while using timers */ | ||
1327 | priv->mac_type->ops->clear_tx_ic(desc); | ||
1328 | #endif | ||
1329 | /* To avoid raise condition */ | ||
1330 | priv->mac_type->ops->set_tx_owner(first); | ||
1331 | |||
1332 | priv->cur_tx++; | ||
1333 | |||
1334 | #ifdef STMMAC_XMIT_DEBUG | ||
1335 | if (netif_msg_pktdata(priv)) { | ||
1336 | pr_info("stmmac xmit: current=%d, dirty=%d, entry=%d, " | ||
1337 | "first=%p, nfrags=%d\n", | ||
1338 | (priv->cur_tx % txsize), (priv->dirty_tx % txsize), | ||
1339 | entry, first, nfrags); | ||
1340 | display_ring(priv->dma_tx, txsize); | ||
1341 | pr_info(">>> frame to be transmitted: "); | ||
1342 | print_pkt(skb->data, skb->len); | ||
1343 | } | ||
1344 | #endif | ||
1345 | if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) { | ||
1346 | TX_DBG("%s: stop transmitted packets\n", __func__); | ||
1347 | netif_stop_queue(dev); | ||
1348 | } | ||
1349 | |||
1350 | dev->stats.tx_bytes += skb->len; | ||
1351 | |||
1352 | /* CSR1 enables the transmit DMA to check for new descriptor */ | ||
1353 | writel(1, dev->base_addr + DMA_XMT_POLL_DEMAND); | ||
1354 | |||
1355 | return NETDEV_TX_OK; | ||
1356 | } | ||
1357 | |||
1358 | static inline void stmmac_rx_refill(struct stmmac_priv *priv) | ||
1359 | { | ||
1360 | unsigned int rxsize = priv->dma_rx_size; | ||
1361 | int bfsize = priv->dma_buf_sz; | ||
1362 | struct dma_desc *p = priv->dma_rx; | ||
1363 | |||
1364 | for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) { | ||
1365 | unsigned int entry = priv->dirty_rx % rxsize; | ||
1366 | if (likely(priv->rx_skbuff[entry] == NULL)) { | ||
1367 | struct sk_buff *skb; | ||
1368 | |||
1369 | skb = __skb_dequeue(&priv->rx_recycle); | ||
1370 | if (skb == NULL) | ||
1371 | skb = netdev_alloc_skb_ip_align(priv->dev, | ||
1372 | bfsize); | ||
1373 | |||
1374 | if (unlikely(skb == NULL)) | ||
1375 | break; | ||
1376 | |||
1377 | priv->rx_skbuff[entry] = skb; | ||
1378 | priv->rx_skbuff_dma[entry] = | ||
1379 | dma_map_single(priv->device, skb->data, bfsize, | ||
1380 | DMA_FROM_DEVICE); | ||
1381 | |||
1382 | (p + entry)->des2 = priv->rx_skbuff_dma[entry]; | ||
1383 | if (unlikely(priv->is_gmac)) { | ||
1384 | if (bfsize >= BUF_SIZE_8KiB) | ||
1385 | (p + entry)->des3 = | ||
1386 | (p + entry)->des2 + BUF_SIZE_8KiB; | ||
1387 | } | ||
1388 | RX_DBG(KERN_INFO "\trefill entry #%d\n", entry); | ||
1389 | } | ||
1390 | priv->mac_type->ops->set_rx_owner(p + entry); | ||
1391 | } | ||
1392 | return; | ||
1393 | } | ||
1394 | |||
1395 | static int stmmac_rx(struct stmmac_priv *priv, int limit) | ||
1396 | { | ||
1397 | unsigned int rxsize = priv->dma_rx_size; | ||
1398 | unsigned int entry = priv->cur_rx % rxsize; | ||
1399 | unsigned int next_entry; | ||
1400 | unsigned int count = 0; | ||
1401 | struct dma_desc *p = priv->dma_rx + entry; | ||
1402 | struct dma_desc *p_next; | ||
1403 | |||
1404 | #ifdef STMMAC_RX_DEBUG | ||
1405 | if (netif_msg_hw(priv)) { | ||
1406 | pr_debug(">>> stmmac_rx: descriptor ring:\n"); | ||
1407 | display_ring(priv->dma_rx, rxsize); | ||
1408 | } | ||
1409 | #endif | ||
1410 | count = 0; | ||
1411 | while (!priv->mac_type->ops->get_rx_owner(p)) { | ||
1412 | int status; | ||
1413 | |||
1414 | if (count >= limit) | ||
1415 | break; | ||
1416 | |||
1417 | count++; | ||
1418 | |||
1419 | next_entry = (++priv->cur_rx) % rxsize; | ||
1420 | p_next = priv->dma_rx + next_entry; | ||
1421 | prefetch(p_next); | ||
1422 | |||
1423 | /* read the status of the incoming frame */ | ||
1424 | status = (priv->mac_type->ops->rx_status(&priv->dev->stats, | ||
1425 | &priv->xstats, p)); | ||
1426 | if (unlikely(status == discard_frame)) | ||
1427 | priv->dev->stats.rx_errors++; | ||
1428 | else { | ||
1429 | struct sk_buff *skb; | ||
1430 | /* Length should omit the CRC */ | ||
1431 | int frame_len = | ||
1432 | priv->mac_type->ops->get_rx_frame_len(p) - 4; | ||
1433 | |||
1434 | #ifdef STMMAC_RX_DEBUG | ||
1435 | if (frame_len > ETH_FRAME_LEN) | ||
1436 | pr_debug("\tRX frame size %d, COE status: %d\n", | ||
1437 | frame_len, status); | ||
1438 | |||
1439 | if (netif_msg_hw(priv)) | ||
1440 | pr_debug("\tdesc: %p [entry %d] buff=0x%x\n", | ||
1441 | p, entry, p->des2); | ||
1442 | #endif | ||
1443 | skb = priv->rx_skbuff[entry]; | ||
1444 | if (unlikely(!skb)) { | ||
1445 | pr_err("%s: Inconsistent Rx descriptor chain\n", | ||
1446 | priv->dev->name); | ||
1447 | priv->dev->stats.rx_dropped++; | ||
1448 | break; | ||
1449 | } | ||
1450 | prefetch(skb->data - NET_IP_ALIGN); | ||
1451 | priv->rx_skbuff[entry] = NULL; | ||
1452 | |||
1453 | skb_put(skb, frame_len); | ||
1454 | dma_unmap_single(priv->device, | ||
1455 | priv->rx_skbuff_dma[entry], | ||
1456 | priv->dma_buf_sz, DMA_FROM_DEVICE); | ||
1457 | #ifdef STMMAC_RX_DEBUG | ||
1458 | if (netif_msg_pktdata(priv)) { | ||
1459 | pr_info(" frame received (%dbytes)", frame_len); | ||
1460 | print_pkt(skb->data, frame_len); | ||
1461 | } | ||
1462 | #endif | ||
1463 | skb->protocol = eth_type_trans(skb, priv->dev); | ||
1464 | |||
1465 | if (unlikely(status == csum_none)) { | ||
1466 | /* always for the old mac 10/100 */ | ||
1467 | skb->ip_summed = CHECKSUM_NONE; | ||
1468 | netif_receive_skb(skb); | ||
1469 | } else { | ||
1470 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
1471 | napi_gro_receive(&priv->napi, skb); | ||
1472 | } | ||
1473 | |||
1474 | priv->dev->stats.rx_packets++; | ||
1475 | priv->dev->stats.rx_bytes += frame_len; | ||
1476 | priv->dev->last_rx = jiffies; | ||
1477 | } | ||
1478 | entry = next_entry; | ||
1479 | p = p_next; /* use prefetched values */ | ||
1480 | } | ||
1481 | |||
1482 | stmmac_rx_refill(priv); | ||
1483 | |||
1484 | priv->xstats.rx_pkt_n += count; | ||
1485 | |||
1486 | return count; | ||
1487 | } | ||
1488 | |||
1489 | /** | ||
1490 | * stmmac_poll - stmmac poll method (NAPI) | ||
1491 | * @napi : pointer to the napi structure. | ||
1492 | * @budget : maximum number of packets that the current CPU can receive from | ||
1493 | * all interfaces. | ||
1494 | * Description : | ||
1495 | * This function implements the the reception process. | ||
1496 | * Also it runs the TX completion thread | ||
1497 | */ | ||
1498 | static int stmmac_poll(struct napi_struct *napi, int budget) | ||
1499 | { | ||
1500 | struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi); | ||
1501 | int work_done = 0; | ||
1502 | |||
1503 | priv->xstats.poll_n++; | ||
1504 | stmmac_tx(priv); | ||
1505 | work_done = stmmac_rx(priv, budget); | ||
1506 | |||
1507 | if (work_done < budget) { | ||
1508 | napi_complete(napi); | ||
1509 | stmmac_enable_irq(priv); | ||
1510 | } | ||
1511 | return work_done; | ||
1512 | } | ||
1513 | |||
1514 | /** | ||
1515 | * stmmac_tx_timeout | ||
1516 | * @dev : Pointer to net device structure | ||
1517 | * Description: this function is called when a packet transmission fails to | ||
1518 | * complete within a reasonable tmrate. The driver will mark the error in the | ||
1519 | * netdev structure and arrange for the device to be reset to a sane state | ||
1520 | * in order to transmit a new packet. | ||
1521 | */ | ||
1522 | static void stmmac_tx_timeout(struct net_device *dev) | ||
1523 | { | ||
1524 | struct stmmac_priv *priv = netdev_priv(dev); | ||
1525 | |||
1526 | /* Clear Tx resources and restart transmitting again */ | ||
1527 | stmmac_tx_err(priv); | ||
1528 | return; | ||
1529 | } | ||
1530 | |||
1531 | /* Configuration changes (passed on by ifconfig) */ | ||
1532 | static int stmmac_config(struct net_device *dev, struct ifmap *map) | ||
1533 | { | ||
1534 | if (dev->flags & IFF_UP) /* can't act on a running interface */ | ||
1535 | return -EBUSY; | ||
1536 | |||
1537 | /* Don't allow changing the I/O address */ | ||
1538 | if (map->base_addr != dev->base_addr) { | ||
1539 | pr_warning("%s: can't change I/O address\n", dev->name); | ||
1540 | return -EOPNOTSUPP; | ||
1541 | } | ||
1542 | |||
1543 | /* Don't allow changing the IRQ */ | ||
1544 | if (map->irq != dev->irq) { | ||
1545 | pr_warning("%s: can't change IRQ number %d\n", | ||
1546 | dev->name, dev->irq); | ||
1547 | return -EOPNOTSUPP; | ||
1548 | } | ||
1549 | |||
1550 | /* ignore other fields */ | ||
1551 | return 0; | ||
1552 | } | ||
1553 | |||
1554 | /** | ||
1555 | * stmmac_multicast_list - entry point for multicast addressing | ||
1556 | * @dev : pointer to the device structure | ||
1557 | * Description: | ||
1558 | * This function is a driver entry point which gets called by the kernel | ||
1559 | * whenever multicast addresses must be enabled/disabled. | ||
1560 | * Return value: | ||
1561 | * void. | ||
1562 | */ | ||
1563 | static void stmmac_multicast_list(struct net_device *dev) | ||
1564 | { | ||
1565 | struct stmmac_priv *priv = netdev_priv(dev); | ||
1566 | |||
1567 | spin_lock(&priv->lock); | ||
1568 | priv->mac_type->ops->set_filter(dev); | ||
1569 | spin_unlock(&priv->lock); | ||
1570 | return; | ||
1571 | } | ||
1572 | |||
1573 | /** | ||
1574 | * stmmac_change_mtu - entry point to change MTU size for the device. | ||
1575 | * @dev : device pointer. | ||
1576 | * @new_mtu : the new MTU size for the device. | ||
1577 | * Description: the Maximum Transfer Unit (MTU) is used by the network layer | ||
1578 | * to drive packet transmission. Ethernet has an MTU of 1500 octets | ||
1579 | * (ETH_DATA_LEN). This value can be changed with ifconfig. | ||
1580 | * Return value: | ||
1581 | * 0 on success and an appropriate (-)ve integer as defined in errno.h | ||
1582 | * file on failure. | ||
1583 | */ | ||
1584 | static int stmmac_change_mtu(struct net_device *dev, int new_mtu) | ||
1585 | { | ||
1586 | struct stmmac_priv *priv = netdev_priv(dev); | ||
1587 | int max_mtu; | ||
1588 | |||
1589 | if (netif_running(dev)) { | ||
1590 | pr_err("%s: must be stopped to change its MTU\n", dev->name); | ||
1591 | return -EBUSY; | ||
1592 | } | ||
1593 | |||
1594 | if (priv->is_gmac) | ||
1595 | max_mtu = JUMBO_LEN; | ||
1596 | else | ||
1597 | max_mtu = ETH_DATA_LEN; | ||
1598 | |||
1599 | if ((new_mtu < 46) || (new_mtu > max_mtu)) { | ||
1600 | pr_err("%s: invalid MTU, max MTU is: %d\n", dev->name, max_mtu); | ||
1601 | return -EINVAL; | ||
1602 | } | ||
1603 | |||
1604 | dev->mtu = new_mtu; | ||
1605 | |||
1606 | return 0; | ||
1607 | } | ||
1608 | |||
1609 | static irqreturn_t stmmac_interrupt(int irq, void *dev_id) | ||
1610 | { | ||
1611 | struct net_device *dev = (struct net_device *)dev_id; | ||
1612 | struct stmmac_priv *priv = netdev_priv(dev); | ||
1613 | |||
1614 | if (unlikely(!dev)) { | ||
1615 | pr_err("%s: invalid dev pointer\n", __func__); | ||
1616 | return IRQ_NONE; | ||
1617 | } | ||
1618 | |||
1619 | if (priv->is_gmac) { | ||
1620 | unsigned long ioaddr = dev->base_addr; | ||
1621 | /* To handle GMAC own interrupts */ | ||
1622 | priv->mac_type->ops->host_irq_status(ioaddr); | ||
1623 | } | ||
1624 | stmmac_dma_interrupt(dev); | ||
1625 | |||
1626 | return IRQ_HANDLED; | ||
1627 | } | ||
1628 | |||
1629 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1630 | /* Polling receive - used by NETCONSOLE and other diagnostic tools | ||
1631 | * to allow network I/O with interrupts disabled. */ | ||
1632 | static void stmmac_poll_controller(struct net_device *dev) | ||
1633 | { | ||
1634 | disable_irq(dev->irq); | ||
1635 | stmmac_interrupt(dev->irq, dev); | ||
1636 | enable_irq(dev->irq); | ||
1637 | } | ||
1638 | #endif | ||
1639 | |||
1640 | /** | ||
1641 | * stmmac_ioctl - Entry point for the Ioctl | ||
1642 | * @dev: Device pointer. | ||
1643 | * @rq: An IOCTL specefic structure, that can contain a pointer to | ||
1644 | * a proprietary structure used to pass information to the driver. | ||
1645 | * @cmd: IOCTL command | ||
1646 | * Description: | ||
1647 | * Currently there are no special functionality supported in IOCTL, just the | ||
1648 | * phy_mii_ioctl(...) can be invoked. | ||
1649 | */ | ||
1650 | static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | ||
1651 | { | ||
1652 | struct stmmac_priv *priv = netdev_priv(dev); | ||
1653 | int ret = -EOPNOTSUPP; | ||
1654 | |||
1655 | if (!netif_running(dev)) | ||
1656 | return -EINVAL; | ||
1657 | |||
1658 | switch (cmd) { | ||
1659 | case SIOCGMIIPHY: | ||
1660 | case SIOCGMIIREG: | ||
1661 | case SIOCSMIIREG: | ||
1662 | if (!priv->phydev) | ||
1663 | return -EINVAL; | ||
1664 | |||
1665 | spin_lock(&priv->lock); | ||
1666 | ret = phy_mii_ioctl(priv->phydev, if_mii(rq), cmd); | ||
1667 | spin_unlock(&priv->lock); | ||
1668 | default: | ||
1669 | break; | ||
1670 | } | ||
1671 | return ret; | ||
1672 | } | ||
1673 | |||
1674 | #ifdef STMMAC_VLAN_TAG_USED | ||
1675 | static void stmmac_vlan_rx_register(struct net_device *dev, | ||
1676 | struct vlan_group *grp) | ||
1677 | { | ||
1678 | struct stmmac_priv *priv = netdev_priv(dev); | ||
1679 | |||
1680 | DBG(probe, INFO, "%s: Setting vlgrp to %p\n", dev->name, grp); | ||
1681 | |||
1682 | spin_lock(&priv->lock); | ||
1683 | priv->vlgrp = grp; | ||
1684 | spin_unlock(&priv->lock); | ||
1685 | |||
1686 | return; | ||
1687 | } | ||
1688 | #endif | ||
1689 | |||
1690 | static const struct net_device_ops stmmac_netdev_ops = { | ||
1691 | .ndo_open = stmmac_open, | ||
1692 | .ndo_start_xmit = stmmac_xmit, | ||
1693 | .ndo_stop = stmmac_release, | ||
1694 | .ndo_change_mtu = stmmac_change_mtu, | ||
1695 | .ndo_set_multicast_list = stmmac_multicast_list, | ||
1696 | .ndo_tx_timeout = stmmac_tx_timeout, | ||
1697 | .ndo_do_ioctl = stmmac_ioctl, | ||
1698 | .ndo_set_config = stmmac_config, | ||
1699 | #ifdef STMMAC_VLAN_TAG_USED | ||
1700 | .ndo_vlan_rx_register = stmmac_vlan_rx_register, | ||
1701 | #endif | ||
1702 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1703 | .ndo_poll_controller = stmmac_poll_controller, | ||
1704 | #endif | ||
1705 | .ndo_set_mac_address = eth_mac_addr, | ||
1706 | }; | ||
1707 | |||
1708 | /** | ||
1709 | * stmmac_probe - Initialization of the adapter . | ||
1710 | * @dev : device pointer | ||
1711 | * Description: The function initializes the network device structure for | ||
1712 | * the STMMAC driver. It also calls the low level routines | ||
1713 | * in order to init the HW (i.e. the DMA engine) | ||
1714 | */ | ||
1715 | static int stmmac_probe(struct net_device *dev) | ||
1716 | { | ||
1717 | int ret = 0; | ||
1718 | struct stmmac_priv *priv = netdev_priv(dev); | ||
1719 | |||
1720 | ether_setup(dev); | ||
1721 | |||
1722 | dev->netdev_ops = &stmmac_netdev_ops; | ||
1723 | stmmac_set_ethtool_ops(dev); | ||
1724 | |||
1725 | dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA); | ||
1726 | dev->watchdog_timeo = msecs_to_jiffies(watchdog); | ||
1727 | #ifdef STMMAC_VLAN_TAG_USED | ||
1728 | /* Both mac100 and gmac support receive VLAN tag detection */ | ||
1729 | dev->features |= NETIF_F_HW_VLAN_RX; | ||
1730 | #endif | ||
1731 | priv->msg_enable = netif_msg_init(debug, default_msg_level); | ||
1732 | |||
1733 | if (priv->is_gmac) | ||
1734 | priv->rx_csum = 1; | ||
1735 | |||
1736 | if (flow_ctrl) | ||
1737 | priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ | ||
1738 | |||
1739 | priv->pause = pause; | ||
1740 | netif_napi_add(dev, &priv->napi, stmmac_poll, 64); | ||
1741 | |||
1742 | /* Get the MAC address */ | ||
1743 | priv->mac_type->ops->get_umac_addr(dev->base_addr, dev->dev_addr, 0); | ||
1744 | |||
1745 | if (!is_valid_ether_addr(dev->dev_addr)) | ||
1746 | pr_warning("\tno valid MAC address;" | ||
1747 | "please, use ifconfig or nwhwconfig!\n"); | ||
1748 | |||
1749 | ret = register_netdev(dev); | ||
1750 | if (ret) { | ||
1751 | pr_err("%s: ERROR %i registering the device\n", | ||
1752 | __func__, ret); | ||
1753 | return -ENODEV; | ||
1754 | } | ||
1755 | |||
1756 | DBG(probe, DEBUG, "%s: Scatter/Gather: %s - HW checksums: %s\n", | ||
1757 | dev->name, (dev->features & NETIF_F_SG) ? "on" : "off", | ||
1758 | (dev->features & NETIF_F_HW_CSUM) ? "on" : "off"); | ||
1759 | |||
1760 | spin_lock_init(&priv->lock); | ||
1761 | |||
1762 | return ret; | ||
1763 | } | ||
1764 | |||
1765 | /** | ||
1766 | * stmmac_mac_device_setup | ||
1767 | * @dev : device pointer | ||
1768 | * Description: select and initialise the mac device (mac100 or Gmac). | ||
1769 | */ | ||
1770 | static int stmmac_mac_device_setup(struct net_device *dev) | ||
1771 | { | ||
1772 | struct stmmac_priv *priv = netdev_priv(dev); | ||
1773 | unsigned long ioaddr = dev->base_addr; | ||
1774 | |||
1775 | struct mac_device_info *device; | ||
1776 | |||
1777 | if (priv->is_gmac) | ||
1778 | device = gmac_setup(ioaddr); | ||
1779 | else | ||
1780 | device = mac100_setup(ioaddr); | ||
1781 | |||
1782 | if (!device) | ||
1783 | return -ENOMEM; | ||
1784 | |||
1785 | priv->mac_type = device; | ||
1786 | |||
1787 | priv->wolenabled = priv->mac_type->hw.pmt; /* PMT supported */ | ||
1788 | if (priv->wolenabled == PMT_SUPPORTED) | ||
1789 | priv->wolopts = WAKE_MAGIC; /* Magic Frame */ | ||
1790 | |||
1791 | return 0; | ||
1792 | } | ||
1793 | |||
1794 | static int stmmacphy_dvr_probe(struct platform_device *pdev) | ||
1795 | { | ||
1796 | struct plat_stmmacphy_data *plat_dat; | ||
1797 | plat_dat = (struct plat_stmmacphy_data *)((pdev->dev).platform_data); | ||
1798 | |||
1799 | pr_debug("stmmacphy_dvr_probe: added phy for bus %d\n", | ||
1800 | plat_dat->bus_id); | ||
1801 | |||
1802 | return 0; | ||
1803 | } | ||
1804 | |||
1805 | static int stmmacphy_dvr_remove(struct platform_device *pdev) | ||
1806 | { | ||
1807 | return 0; | ||
1808 | } | ||
1809 | |||
1810 | static struct platform_driver stmmacphy_driver = { | ||
1811 | .driver = { | ||
1812 | .name = PHY_RESOURCE_NAME, | ||
1813 | }, | ||
1814 | .probe = stmmacphy_dvr_probe, | ||
1815 | .remove = stmmacphy_dvr_remove, | ||
1816 | }; | ||
1817 | |||
1818 | /** | ||
1819 | * stmmac_associate_phy | ||
1820 | * @dev: pointer to device structure | ||
1821 | * @data: points to the private structure. | ||
1822 | * Description: Scans through all the PHYs we have registered and checks if | ||
1823 | * any are associated with our MAC. If so, then just fill in | ||
1824 | * the blanks in our local context structure | ||
1825 | */ | ||
1826 | static int stmmac_associate_phy(struct device *dev, void *data) | ||
1827 | { | ||
1828 | struct stmmac_priv *priv = (struct stmmac_priv *)data; | ||
1829 | struct plat_stmmacphy_data *plat_dat; | ||
1830 | |||
1831 | plat_dat = (struct plat_stmmacphy_data *)(dev->platform_data); | ||
1832 | |||
1833 | DBG(probe, DEBUG, "%s: checking phy for bus %d\n", __func__, | ||
1834 | plat_dat->bus_id); | ||
1835 | |||
1836 | /* Check that this phy is for the MAC being initialised */ | ||
1837 | if (priv->bus_id != plat_dat->bus_id) | ||
1838 | return 0; | ||
1839 | |||
1840 | /* OK, this PHY is connected to the MAC. | ||
1841 | Go ahead and get the parameters */ | ||
1842 | DBG(probe, DEBUG, "%s: OK. Found PHY config\n", __func__); | ||
1843 | priv->phy_irq = | ||
1844 | platform_get_irq_byname(to_platform_device(dev), "phyirq"); | ||
1845 | DBG(probe, DEBUG, "%s: PHY irq on bus %d is %d\n", __func__, | ||
1846 | plat_dat->bus_id, priv->phy_irq); | ||
1847 | |||
1848 | /* Override with kernel parameters if supplied XXX CRS XXX | ||
1849 | * this needs to have multiple instances */ | ||
1850 | if ((phyaddr >= 0) && (phyaddr <= 31)) | ||
1851 | plat_dat->phy_addr = phyaddr; | ||
1852 | |||
1853 | priv->phy_addr = plat_dat->phy_addr; | ||
1854 | priv->phy_mask = plat_dat->phy_mask; | ||
1855 | priv->phy_interface = plat_dat->interface; | ||
1856 | priv->phy_reset = plat_dat->phy_reset; | ||
1857 | |||
1858 | DBG(probe, DEBUG, "%s: exiting\n", __func__); | ||
1859 | return 1; /* forces exit of driver_for_each_device() */ | ||
1860 | } | ||
1861 | |||
1862 | /** | ||
1863 | * stmmac_dvr_probe | ||
1864 | * @pdev: platform device pointer | ||
1865 | * Description: the driver is initialized through platform_device. | ||
1866 | */ | ||
1867 | static int stmmac_dvr_probe(struct platform_device *pdev) | ||
1868 | { | ||
1869 | int ret = 0; | ||
1870 | struct resource *res; | ||
1871 | unsigned int *addr = NULL; | ||
1872 | struct net_device *ndev = NULL; | ||
1873 | struct stmmac_priv *priv; | ||
1874 | struct plat_stmmacenet_data *plat_dat; | ||
1875 | |||
1876 | pr_info("STMMAC driver:\n\tplatform registration... "); | ||
1877 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1878 | if (!res) { | ||
1879 | ret = -ENODEV; | ||
1880 | goto out; | ||
1881 | } | ||
1882 | pr_info("done!\n"); | ||
1883 | |||
1884 | if (!request_mem_region(res->start, (res->end - res->start), | ||
1885 | pdev->name)) { | ||
1886 | pr_err("%s: ERROR: memory allocation failed" | ||
1887 | "cannot get the I/O addr 0x%x\n", | ||
1888 | __func__, (unsigned int)res->start); | ||
1889 | ret = -EBUSY; | ||
1890 | goto out; | ||
1891 | } | ||
1892 | |||
1893 | addr = ioremap(res->start, (res->end - res->start)); | ||
1894 | if (!addr) { | ||
1895 | pr_err("%s: ERROR: memory mapping failed \n", __func__); | ||
1896 | ret = -ENOMEM; | ||
1897 | goto out; | ||
1898 | } | ||
1899 | |||
1900 | ndev = alloc_etherdev(sizeof(struct stmmac_priv)); | ||
1901 | if (!ndev) { | ||
1902 | pr_err("%s: ERROR: allocating the device\n", __func__); | ||
1903 | ret = -ENOMEM; | ||
1904 | goto out; | ||
1905 | } | ||
1906 | |||
1907 | SET_NETDEV_DEV(ndev, &pdev->dev); | ||
1908 | |||
1909 | /* Get the MAC information */ | ||
1910 | ndev->irq = platform_get_irq_byname(pdev, "macirq"); | ||
1911 | if (ndev->irq == -ENXIO) { | ||
1912 | pr_err("%s: ERROR: MAC IRQ configuration " | ||
1913 | "information not found\n", __func__); | ||
1914 | ret = -ENODEV; | ||
1915 | goto out; | ||
1916 | } | ||
1917 | |||
1918 | priv = netdev_priv(ndev); | ||
1919 | priv->device = &(pdev->dev); | ||
1920 | priv->dev = ndev; | ||
1921 | plat_dat = (struct plat_stmmacenet_data *)((pdev->dev).platform_data); | ||
1922 | priv->bus_id = plat_dat->bus_id; | ||
1923 | priv->pbl = plat_dat->pbl; /* TLI */ | ||
1924 | priv->is_gmac = plat_dat->has_gmac; /* GMAC is on board */ | ||
1925 | |||
1926 | platform_set_drvdata(pdev, ndev); | ||
1927 | |||
1928 | /* Set the I/O base addr */ | ||
1929 | ndev->base_addr = (unsigned long)addr; | ||
1930 | |||
1931 | /* MAC HW revice detection */ | ||
1932 | ret = stmmac_mac_device_setup(ndev); | ||
1933 | if (ret < 0) | ||
1934 | goto out; | ||
1935 | |||
1936 | /* Network Device Registration */ | ||
1937 | ret = stmmac_probe(ndev); | ||
1938 | if (ret < 0) | ||
1939 | goto out; | ||
1940 | |||
1941 | /* associate a PHY - it is provided by another platform bus */ | ||
1942 | if (!driver_for_each_device | ||
1943 | (&(stmmacphy_driver.driver), NULL, (void *)priv, | ||
1944 | stmmac_associate_phy)) { | ||
1945 | pr_err("No PHY device is associated with this MAC!\n"); | ||
1946 | ret = -ENODEV; | ||
1947 | goto out; | ||
1948 | } | ||
1949 | |||
1950 | priv->fix_mac_speed = plat_dat->fix_mac_speed; | ||
1951 | priv->bsp_priv = plat_dat->bsp_priv; | ||
1952 | |||
1953 | pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n" | ||
1954 | "\tIO base addr: 0x%08x)\n", ndev->name, pdev->name, | ||
1955 | pdev->id, ndev->irq, (unsigned int)addr); | ||
1956 | |||
1957 | /* MDIO bus Registration */ | ||
1958 | pr_debug("\tMDIO bus (id: %d)...", priv->bus_id); | ||
1959 | ret = stmmac_mdio_register(ndev); | ||
1960 | if (ret < 0) | ||
1961 | goto out; | ||
1962 | pr_debug("registered!\n"); | ||
1963 | |||
1964 | out: | ||
1965 | if (ret < 0) { | ||
1966 | platform_set_drvdata(pdev, NULL); | ||
1967 | release_mem_region(res->start, (res->end - res->start)); | ||
1968 | if (addr != NULL) | ||
1969 | iounmap(addr); | ||
1970 | } | ||
1971 | |||
1972 | return ret; | ||
1973 | } | ||
1974 | |||
1975 | /** | ||
1976 | * stmmac_dvr_remove | ||
1977 | * @pdev: platform device pointer | ||
1978 | * Description: this function resets the TX/RX processes, disables the MAC RX/TX | ||
1979 | * changes the link status, releases the DMA descriptor rings, | ||
1980 | * unregisters the MDIO bus and unmaps the allocated memory. | ||
1981 | */ | ||
1982 | static int stmmac_dvr_remove(struct platform_device *pdev) | ||
1983 | { | ||
1984 | struct net_device *ndev = platform_get_drvdata(pdev); | ||
1985 | struct resource *res; | ||
1986 | |||
1987 | pr_info("%s:\n\tremoving driver", __func__); | ||
1988 | |||
1989 | stmmac_dma_stop_rx(ndev->base_addr); | ||
1990 | stmmac_dma_stop_tx(ndev->base_addr); | ||
1991 | |||
1992 | stmmac_mac_disable_rx(ndev->base_addr); | ||
1993 | stmmac_mac_disable_tx(ndev->base_addr); | ||
1994 | |||
1995 | netif_carrier_off(ndev); | ||
1996 | |||
1997 | stmmac_mdio_unregister(ndev); | ||
1998 | |||
1999 | platform_set_drvdata(pdev, NULL); | ||
2000 | unregister_netdev(ndev); | ||
2001 | |||
2002 | iounmap((void *)ndev->base_addr); | ||
2003 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
2004 | release_mem_region(res->start, (res->end - res->start)); | ||
2005 | |||
2006 | free_netdev(ndev); | ||
2007 | |||
2008 | return 0; | ||
2009 | } | ||
2010 | |||
2011 | #ifdef CONFIG_PM | ||
2012 | static int stmmac_suspend(struct platform_device *pdev, pm_message_t state) | ||
2013 | { | ||
2014 | struct net_device *dev = platform_get_drvdata(pdev); | ||
2015 | struct stmmac_priv *priv = netdev_priv(dev); | ||
2016 | int dis_ic = 0; | ||
2017 | |||
2018 | if (!dev || !netif_running(dev)) | ||
2019 | return 0; | ||
2020 | |||
2021 | spin_lock(&priv->lock); | ||
2022 | |||
2023 | if (state.event == PM_EVENT_SUSPEND) { | ||
2024 | netif_device_detach(dev); | ||
2025 | netif_stop_queue(dev); | ||
2026 | if (priv->phydev) | ||
2027 | phy_stop(priv->phydev); | ||
2028 | |||
2029 | #ifdef CONFIG_STMMAC_TIMER | ||
2030 | priv->tm->timer_stop(); | ||
2031 | dis_ic = 1; | ||
2032 | #endif | ||
2033 | napi_disable(&priv->napi); | ||
2034 | |||
2035 | /* Stop TX/RX DMA */ | ||
2036 | stmmac_dma_stop_tx(dev->base_addr); | ||
2037 | stmmac_dma_stop_rx(dev->base_addr); | ||
2038 | /* Clear the Rx/Tx descriptors */ | ||
2039 | priv->mac_type->ops->init_rx_desc(priv->dma_rx, | ||
2040 | priv->dma_rx_size, dis_ic); | ||
2041 | priv->mac_type->ops->init_tx_desc(priv->dma_tx, | ||
2042 | priv->dma_tx_size); | ||
2043 | |||
2044 | stmmac_mac_disable_tx(dev->base_addr); | ||
2045 | |||
2046 | if (device_may_wakeup(&(pdev->dev))) { | ||
2047 | /* Enable Power down mode by programming the PMT regs */ | ||
2048 | if (priv->wolenabled == PMT_SUPPORTED) | ||
2049 | priv->mac_type->ops->pmt(dev->base_addr, | ||
2050 | priv->wolopts); | ||
2051 | } else { | ||
2052 | stmmac_mac_disable_rx(dev->base_addr); | ||
2053 | } | ||
2054 | } else { | ||
2055 | priv->shutdown = 1; | ||
2056 | /* Although this can appear slightly redundant it actually | ||
2057 | * makes fast the standby operation and guarantees the driver | ||
2058 | * working if hibernation is on media. */ | ||
2059 | stmmac_release(dev); | ||
2060 | } | ||
2061 | |||
2062 | spin_unlock(&priv->lock); | ||
2063 | return 0; | ||
2064 | } | ||
2065 | |||
2066 | static int stmmac_resume(struct platform_device *pdev) | ||
2067 | { | ||
2068 | struct net_device *dev = platform_get_drvdata(pdev); | ||
2069 | struct stmmac_priv *priv = netdev_priv(dev); | ||
2070 | unsigned long ioaddr = dev->base_addr; | ||
2071 | |||
2072 | if (!netif_running(dev)) | ||
2073 | return 0; | ||
2074 | |||
2075 | spin_lock(&priv->lock); | ||
2076 | |||
2077 | if (priv->shutdown) { | ||
2078 | /* Re-open the interface and re-init the MAC/DMA | ||
2079 | and the rings. */ | ||
2080 | stmmac_open(dev); | ||
2081 | goto out_resume; | ||
2082 | } | ||
2083 | |||
2084 | /* Power Down bit, into the PM register, is cleared | ||
2085 | * automatically as soon as a magic packet or a Wake-up frame | ||
2086 | * is received. Anyway, it's better to manually clear | ||
2087 | * this bit because it can generate problems while resuming | ||
2088 | * from another devices (e.g. serial console). */ | ||
2089 | if (device_may_wakeup(&(pdev->dev))) | ||
2090 | if (priv->wolenabled == PMT_SUPPORTED) | ||
2091 | priv->mac_type->ops->pmt(dev->base_addr, 0); | ||
2092 | |||
2093 | netif_device_attach(dev); | ||
2094 | |||
2095 | /* Enable the MAC and DMA */ | ||
2096 | stmmac_mac_enable_rx(ioaddr); | ||
2097 | stmmac_mac_enable_tx(ioaddr); | ||
2098 | stmmac_dma_start_tx(ioaddr); | ||
2099 | stmmac_dma_start_rx(ioaddr); | ||
2100 | |||
2101 | #ifdef CONFIG_STMMAC_TIMER | ||
2102 | priv->tm->timer_start(tmrate); | ||
2103 | #endif | ||
2104 | napi_enable(&priv->napi); | ||
2105 | |||
2106 | if (priv->phydev) | ||
2107 | phy_start(priv->phydev); | ||
2108 | |||
2109 | netif_start_queue(dev); | ||
2110 | |||
2111 | out_resume: | ||
2112 | spin_unlock(&priv->lock); | ||
2113 | return 0; | ||
2114 | } | ||
2115 | #endif | ||
2116 | |||
2117 | static struct platform_driver stmmac_driver = { | ||
2118 | .driver = { | ||
2119 | .name = STMMAC_RESOURCE_NAME, | ||
2120 | }, | ||
2121 | .probe = stmmac_dvr_probe, | ||
2122 | .remove = stmmac_dvr_remove, | ||
2123 | #ifdef CONFIG_PM | ||
2124 | .suspend = stmmac_suspend, | ||
2125 | .resume = stmmac_resume, | ||
2126 | #endif | ||
2127 | |||
2128 | }; | ||
2129 | |||
2130 | /** | ||
2131 | * stmmac_init_module - Entry point for the driver | ||
2132 | * Description: This function is the entry point for the driver. | ||
2133 | */ | ||
2134 | static int __init stmmac_init_module(void) | ||
2135 | { | ||
2136 | int ret; | ||
2137 | |||
2138 | if (platform_driver_register(&stmmacphy_driver)) { | ||
2139 | pr_err("No PHY devices registered!\n"); | ||
2140 | return -ENODEV; | ||
2141 | } | ||
2142 | |||
2143 | ret = platform_driver_register(&stmmac_driver); | ||
2144 | return ret; | ||
2145 | } | ||
2146 | |||
2147 | /** | ||
2148 | * stmmac_cleanup_module - Cleanup routine for the driver | ||
2149 | * Description: This function is the cleanup routine for the driver. | ||
2150 | */ | ||
2151 | static void __exit stmmac_cleanup_module(void) | ||
2152 | { | ||
2153 | platform_driver_unregister(&stmmacphy_driver); | ||
2154 | platform_driver_unregister(&stmmac_driver); | ||
2155 | } | ||
2156 | |||
2157 | #ifndef MODULE | ||
2158 | static int __init stmmac_cmdline_opt(char *str) | ||
2159 | { | ||
2160 | char *opt; | ||
2161 | |||
2162 | if (!str || !*str) | ||
2163 | return -EINVAL; | ||
2164 | while ((opt = strsep(&str, ",")) != NULL) { | ||
2165 | if (!strncmp(opt, "debug:", 6)) | ||
2166 | strict_strtoul(opt + 6, 0, (unsigned long *)&debug); | ||
2167 | else if (!strncmp(opt, "phyaddr:", 8)) | ||
2168 | strict_strtoul(opt + 8, 0, (unsigned long *)&phyaddr); | ||
2169 | else if (!strncmp(opt, "dma_txsize:", 11)) | ||
2170 | strict_strtoul(opt + 11, 0, | ||
2171 | (unsigned long *)&dma_txsize); | ||
2172 | else if (!strncmp(opt, "dma_rxsize:", 11)) | ||
2173 | strict_strtoul(opt + 11, 0, | ||
2174 | (unsigned long *)&dma_rxsize); | ||
2175 | else if (!strncmp(opt, "buf_sz:", 7)) | ||
2176 | strict_strtoul(opt + 7, 0, (unsigned long *)&buf_sz); | ||
2177 | else if (!strncmp(opt, "tc:", 3)) | ||
2178 | strict_strtoul(opt + 3, 0, (unsigned long *)&tc); | ||
2179 | else if (!strncmp(opt, "tx_coe:", 7)) | ||
2180 | strict_strtoul(opt + 7, 0, (unsigned long *)&tx_coe); | ||
2181 | else if (!strncmp(opt, "watchdog:", 9)) | ||
2182 | strict_strtoul(opt + 9, 0, (unsigned long *)&watchdog); | ||
2183 | else if (!strncmp(opt, "flow_ctrl:", 10)) | ||
2184 | strict_strtoul(opt + 10, 0, | ||
2185 | (unsigned long *)&flow_ctrl); | ||
2186 | else if (!strncmp(opt, "pause:", 6)) | ||
2187 | strict_strtoul(opt + 6, 0, (unsigned long *)&pause); | ||
2188 | #ifdef CONFIG_STMMAC_TIMER | ||
2189 | else if (!strncmp(opt, "tmrate:", 7)) | ||
2190 | strict_strtoul(opt + 7, 0, (unsigned long *)&tmrate); | ||
2191 | #endif | ||
2192 | } | ||
2193 | return 0; | ||
2194 | } | ||
2195 | |||
2196 | __setup("stmmaceth=", stmmac_cmdline_opt); | ||
2197 | #endif | ||
2198 | |||
2199 | module_init(stmmac_init_module); | ||
2200 | module_exit(stmmac_cleanup_module); | ||
2201 | |||
2202 | MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet driver"); | ||
2203 | MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); | ||
2204 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/net/stmmac/stmmac_mdio.c b/drivers/net/stmmac/stmmac_mdio.c new file mode 100644 index 000000000000..8498552a22fc --- /dev/null +++ b/drivers/net/stmmac/stmmac_mdio.c | |||
@@ -0,0 +1,217 @@ | |||
1 | /******************************************************************************* | ||
2 | STMMAC Ethernet Driver -- MDIO bus implementation | ||
3 | Provides Bus interface for MII registers | ||
4 | |||
5 | Copyright (C) 2007-2009 STMicroelectronics Ltd | ||
6 | |||
7 | This program is free software; you can redistribute it and/or modify it | ||
8 | under the terms and conditions of the GNU General Public License, | ||
9 | version 2, as published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope it will be useful, but WITHOUT | ||
12 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
13 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
14 | more details. | ||
15 | |||
16 | You should have received a copy of the GNU General Public License along with | ||
17 | this program; if not, write to the Free Software Foundation, Inc., | ||
18 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
19 | |||
20 | The full GNU General Public License is included in this distribution in | ||
21 | the file called "COPYING". | ||
22 | |||
23 | Author: Carl Shaw <carl.shaw@st.com> | ||
24 | Maintainer: Giuseppe Cavallaro <peppe.cavallaro@st.com> | ||
25 | *******************************************************************************/ | ||
26 | |||
27 | #include <linux/netdevice.h> | ||
28 | #include <linux/mii.h> | ||
29 | #include <linux/phy.h> | ||
30 | |||
31 | #include "stmmac.h" | ||
32 | |||
33 | #define MII_BUSY 0x00000001 | ||
34 | #define MII_WRITE 0x00000002 | ||
35 | |||
36 | /** | ||
37 | * stmmac_mdio_read | ||
38 | * @bus: points to the mii_bus structure | ||
39 | * @phyaddr: MII addr reg bits 15-11 | ||
40 | * @phyreg: MII addr reg bits 10-6 | ||
41 | * Description: it reads data from the MII register from within the phy device. | ||
42 | * For the 7111 GMAC, we must set the bit 0 in the MII address register while | ||
43 | * accessing the PHY registers. | ||
44 | * Fortunately, it seems this has no drawback for the 7109 MAC. | ||
45 | */ | ||
46 | static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg) | ||
47 | { | ||
48 | struct net_device *ndev = bus->priv; | ||
49 | struct stmmac_priv *priv = netdev_priv(ndev); | ||
50 | unsigned long ioaddr = ndev->base_addr; | ||
51 | unsigned int mii_address = priv->mac_type->hw.mii.addr; | ||
52 | unsigned int mii_data = priv->mac_type->hw.mii.data; | ||
53 | |||
54 | int data; | ||
55 | u16 regValue = (((phyaddr << 11) & (0x0000F800)) | | ||
56 | ((phyreg << 6) & (0x000007C0))); | ||
57 | regValue |= MII_BUSY; /* in case of GMAC */ | ||
58 | |||
59 | do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1); | ||
60 | writel(regValue, ioaddr + mii_address); | ||
61 | do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1); | ||
62 | |||
63 | /* Read the data from the MII data register */ | ||
64 | data = (int)readl(ioaddr + mii_data); | ||
65 | |||
66 | return data; | ||
67 | } | ||
68 | |||
69 | /** | ||
70 | * stmmac_mdio_write | ||
71 | * @bus: points to the mii_bus structure | ||
72 | * @phyaddr: MII addr reg bits 15-11 | ||
73 | * @phyreg: MII addr reg bits 10-6 | ||
74 | * @phydata: phy data | ||
75 | * Description: it writes the data into the MII register from within the device. | ||
76 | */ | ||
77 | static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg, | ||
78 | u16 phydata) | ||
79 | { | ||
80 | struct net_device *ndev = bus->priv; | ||
81 | struct stmmac_priv *priv = netdev_priv(ndev); | ||
82 | unsigned long ioaddr = ndev->base_addr; | ||
83 | unsigned int mii_address = priv->mac_type->hw.mii.addr; | ||
84 | unsigned int mii_data = priv->mac_type->hw.mii.data; | ||
85 | |||
86 | u16 value = | ||
87 | (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0))) | ||
88 | | MII_WRITE; | ||
89 | |||
90 | value |= MII_BUSY; | ||
91 | |||
92 | /* Wait until any existing MII operation is complete */ | ||
93 | do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1); | ||
94 | |||
95 | /* Set the MII address register to write */ | ||
96 | writel(phydata, ioaddr + mii_data); | ||
97 | writel(value, ioaddr + mii_address); | ||
98 | |||
99 | /* Wait until any existing MII operation is complete */ | ||
100 | do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1); | ||
101 | |||
102 | return 0; | ||
103 | } | ||
104 | |||
105 | /** | ||
106 | * stmmac_mdio_reset | ||
107 | * @bus: points to the mii_bus structure | ||
108 | * Description: reset the MII bus | ||
109 | */ | ||
110 | static int stmmac_mdio_reset(struct mii_bus *bus) | ||
111 | { | ||
112 | struct net_device *ndev = bus->priv; | ||
113 | struct stmmac_priv *priv = netdev_priv(ndev); | ||
114 | unsigned long ioaddr = ndev->base_addr; | ||
115 | unsigned int mii_address = priv->mac_type->hw.mii.addr; | ||
116 | |||
117 | if (priv->phy_reset) { | ||
118 | pr_debug("stmmac_mdio_reset: calling phy_reset\n"); | ||
119 | priv->phy_reset(priv->bsp_priv); | ||
120 | } | ||
121 | |||
122 | /* This is a workaround for problems with the STE101P PHY. | ||
123 | * It doesn't complete its reset until at least one clock cycle | ||
124 | * on MDC, so perform a dummy mdio read. | ||
125 | */ | ||
126 | writel(0, ioaddr + mii_address); | ||
127 | |||
128 | return 0; | ||
129 | } | ||
130 | |||
131 | /** | ||
132 | * stmmac_mdio_register | ||
133 | * @ndev: net device structure | ||
134 | * Description: it registers the MII bus | ||
135 | */ | ||
136 | int stmmac_mdio_register(struct net_device *ndev) | ||
137 | { | ||
138 | int err = 0; | ||
139 | struct mii_bus *new_bus; | ||
140 | int *irqlist; | ||
141 | struct stmmac_priv *priv = netdev_priv(ndev); | ||
142 | int addr, found; | ||
143 | |||
144 | new_bus = mdiobus_alloc(); | ||
145 | if (new_bus == NULL) | ||
146 | return -ENOMEM; | ||
147 | |||
148 | irqlist = kzalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); | ||
149 | if (irqlist == NULL) { | ||
150 | err = -ENOMEM; | ||
151 | goto irqlist_alloc_fail; | ||
152 | } | ||
153 | |||
154 | /* Assign IRQ to phy at address phy_addr */ | ||
155 | if (priv->phy_addr != -1) | ||
156 | irqlist[priv->phy_addr] = priv->phy_irq; | ||
157 | |||
158 | new_bus->name = "STMMAC MII Bus"; | ||
159 | new_bus->read = &stmmac_mdio_read; | ||
160 | new_bus->write = &stmmac_mdio_write; | ||
161 | new_bus->reset = &stmmac_mdio_reset; | ||
162 | snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", priv->bus_id); | ||
163 | new_bus->priv = ndev; | ||
164 | new_bus->irq = irqlist; | ||
165 | new_bus->phy_mask = priv->phy_mask; | ||
166 | new_bus->parent = priv->device; | ||
167 | err = mdiobus_register(new_bus); | ||
168 | if (err != 0) { | ||
169 | pr_err("%s: Cannot register as MDIO bus\n", new_bus->name); | ||
170 | goto bus_register_fail; | ||
171 | } | ||
172 | |||
173 | priv->mii = new_bus; | ||
174 | |||
175 | found = 0; | ||
176 | for (addr = 0; addr < 32; addr++) { | ||
177 | struct phy_device *phydev = new_bus->phy_map[addr]; | ||
178 | if (phydev) { | ||
179 | if (priv->phy_addr == -1) { | ||
180 | priv->phy_addr = addr; | ||
181 | phydev->irq = priv->phy_irq; | ||
182 | irqlist[addr] = priv->phy_irq; | ||
183 | } | ||
184 | pr_info("%s: PHY ID %08x at %d IRQ %d (%s)%s\n", | ||
185 | ndev->name, phydev->phy_id, addr, | ||
186 | phydev->irq, dev_name(&phydev->dev), | ||
187 | (addr == priv->phy_addr) ? " active" : ""); | ||
188 | found = 1; | ||
189 | } | ||
190 | } | ||
191 | |||
192 | if (!found) | ||
193 | pr_warning("%s: No PHY found\n", ndev->name); | ||
194 | |||
195 | return 0; | ||
196 | bus_register_fail: | ||
197 | kfree(irqlist); | ||
198 | irqlist_alloc_fail: | ||
199 | kfree(new_bus); | ||
200 | return err; | ||
201 | } | ||
202 | |||
203 | /** | ||
204 | * stmmac_mdio_unregister | ||
205 | * @ndev: net device structure | ||
206 | * Description: it unregisters the MII bus | ||
207 | */ | ||
208 | int stmmac_mdio_unregister(struct net_device *ndev) | ||
209 | { | ||
210 | struct stmmac_priv *priv = netdev_priv(ndev); | ||
211 | |||
212 | mdiobus_unregister(priv->mii); | ||
213 | priv->mii->priv = NULL; | ||
214 | kfree(priv->mii); | ||
215 | |||
216 | return 0; | ||
217 | } | ||
diff --git a/drivers/net/stmmac/stmmac_timer.c b/drivers/net/stmmac/stmmac_timer.c new file mode 100644 index 000000000000..b838c6582077 --- /dev/null +++ b/drivers/net/stmmac/stmmac_timer.c | |||
@@ -0,0 +1,140 @@ | |||
1 | /******************************************************************************* | ||
2 | STMMAC external timer support. | ||
3 | |||
4 | Copyright (C) 2007-2009 STMicroelectronics Ltd | ||
5 | |||
6 | This program is free software; you can redistribute it and/or modify it | ||
7 | under the terms and conditions of the GNU General Public License, | ||
8 | version 2, as published by the Free Software Foundation. | ||
9 | |||
10 | This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License along with | ||
16 | this program; if not, write to the Free Software Foundation, Inc., | ||
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
19 | The full GNU General Public License is included in this distribution in | ||
20 | the file called "COPYING". | ||
21 | |||
22 | Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> | ||
23 | *******************************************************************************/ | ||
24 | |||
25 | #include <linux/kernel.h> | ||
26 | #include <linux/etherdevice.h> | ||
27 | #include "stmmac_timer.h" | ||
28 | |||
29 | static void stmmac_timer_handler(void *data) | ||
30 | { | ||
31 | struct net_device *dev = (struct net_device *)data; | ||
32 | |||
33 | stmmac_schedule(dev); | ||
34 | |||
35 | return; | ||
36 | } | ||
37 | |||
38 | #define STMMAC_TIMER_MSG(timer, freq) \ | ||
39 | printk(KERN_INFO "stmmac_timer: %s Timer ON (freq %dHz)\n", timer, freq); | ||
40 | |||
41 | #if defined(CONFIG_STMMAC_RTC_TIMER) | ||
42 | #include <linux/rtc.h> | ||
43 | static struct rtc_device *stmmac_rtc; | ||
44 | static rtc_task_t stmmac_task; | ||
45 | |||
46 | static void stmmac_rtc_start(unsigned int new_freq) | ||
47 | { | ||
48 | rtc_irq_set_freq(stmmac_rtc, &stmmac_task, new_freq); | ||
49 | rtc_irq_set_state(stmmac_rtc, &stmmac_task, 1); | ||
50 | return; | ||
51 | } | ||
52 | |||
53 | static void stmmac_rtc_stop(void) | ||
54 | { | ||
55 | rtc_irq_set_state(stmmac_rtc, &stmmac_task, 0); | ||
56 | return; | ||
57 | } | ||
58 | |||
59 | int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm) | ||
60 | { | ||
61 | stmmac_task.private_data = dev; | ||
62 | stmmac_task.func = stmmac_timer_handler; | ||
63 | |||
64 | stmmac_rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE); | ||
65 | if (stmmac_rtc == NULL) { | ||
66 | pr_error("open rtc device failed\n"); | ||
67 | return -ENODEV; | ||
68 | } | ||
69 | |||
70 | rtc_irq_register(stmmac_rtc, &stmmac_task); | ||
71 | |||
72 | /* Periodic mode is not supported */ | ||
73 | if ((rtc_irq_set_freq(stmmac_rtc, &stmmac_task, tm->freq) < 0)) { | ||
74 | pr_error("set periodic failed\n"); | ||
75 | rtc_irq_unregister(stmmac_rtc, &stmmac_task); | ||
76 | rtc_class_close(stmmac_rtc); | ||
77 | return -1; | ||
78 | } | ||
79 | |||
80 | STMMAC_TIMER_MSG(CONFIG_RTC_HCTOSYS_DEVICE, tm->freq); | ||
81 | |||
82 | tm->timer_start = stmmac_rtc_start; | ||
83 | tm->timer_stop = stmmac_rtc_stop; | ||
84 | |||
85 | return 0; | ||
86 | } | ||
87 | |||
88 | int stmmac_close_ext_timer(void) | ||
89 | { | ||
90 | rtc_irq_set_state(stmmac_rtc, &stmmac_task, 0); | ||
91 | rtc_irq_unregister(stmmac_rtc, &stmmac_task); | ||
92 | rtc_class_close(stmmac_rtc); | ||
93 | return 0; | ||
94 | } | ||
95 | |||
96 | #elif defined(CONFIG_STMMAC_TMU_TIMER) | ||
97 | #include <linux/clk.h> | ||
98 | #define TMU_CHANNEL "tmu2_clk" | ||
99 | static struct clk *timer_clock; | ||
100 | |||
101 | static void stmmac_tmu_start(unsigned int new_freq) | ||
102 | { | ||
103 | clk_set_rate(timer_clock, new_freq); | ||
104 | clk_enable(timer_clock); | ||
105 | return; | ||
106 | } | ||
107 | |||
108 | static void stmmac_tmu_stop(void) | ||
109 | { | ||
110 | clk_disable(timer_clock); | ||
111 | return; | ||
112 | } | ||
113 | |||
114 | int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm) | ||
115 | { | ||
116 | timer_clock = clk_get(NULL, TMU_CHANNEL); | ||
117 | |||
118 | if (timer_clock == NULL) | ||
119 | return -1; | ||
120 | |||
121 | if (tmu2_register_user(stmmac_timer_handler, (void *)dev) < 0) { | ||
122 | timer_clock = NULL; | ||
123 | return -1; | ||
124 | } | ||
125 | |||
126 | STMMAC_TIMER_MSG("TMU2", tm->freq); | ||
127 | tm->timer_start = stmmac_tmu_start; | ||
128 | tm->timer_stop = stmmac_tmu_stop; | ||
129 | |||
130 | return 0; | ||
131 | } | ||
132 | |||
133 | int stmmac_close_ext_timer(void) | ||
134 | { | ||
135 | clk_disable(timer_clock); | ||
136 | tmu2_unregister_user(); | ||
137 | clk_put(timer_clock); | ||
138 | return 0; | ||
139 | } | ||
140 | #endif | ||
diff --git a/drivers/net/stmmac/stmmac_timer.h b/drivers/net/stmmac/stmmac_timer.h new file mode 100644 index 000000000000..f795cae33725 --- /dev/null +++ b/drivers/net/stmmac/stmmac_timer.h | |||
@@ -0,0 +1,41 @@ | |||
1 | /******************************************************************************* | ||
2 | STMMAC external timer Header File. | ||
3 | |||
4 | Copyright (C) 2007-2009 STMicroelectronics Ltd | ||
5 | |||
6 | This program is free software; you can redistribute it and/or modify it | ||
7 | under the terms and conditions of the GNU General Public License, | ||
8 | version 2, as published by the Free Software Foundation. | ||
9 | |||
10 | This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License along with | ||
16 | this program; if not, write to the Free Software Foundation, Inc., | ||
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
19 | The full GNU General Public License is included in this distribution in | ||
20 | the file called "COPYING". | ||
21 | |||
22 | Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> | ||
23 | *******************************************************************************/ | ||
24 | |||
25 | struct stmmac_timer { | ||
26 | void (*timer_start) (unsigned int new_freq); | ||
27 | void (*timer_stop) (void); | ||
28 | unsigned int freq; | ||
29 | }; | ||
30 | |||
31 | /* Open the HW timer device and return 0 in case of success */ | ||
32 | int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm); | ||
33 | /* Stop the timer and release it */ | ||
34 | int stmmac_close_ext_timer(void); | ||
35 | /* Function used for scheduling task within the stmmac */ | ||
36 | void stmmac_schedule(struct net_device *dev); | ||
37 | |||
38 | #if defined(CONFIG_STMMAC_TMU_TIMER) | ||
39 | extern int tmu2_register_user(void *fnt, void *data); | ||
40 | extern void tmu2_unregister_user(void); | ||
41 | #endif | ||
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c index 6fdaba8674b9..ed4a508ef262 100644 --- a/drivers/net/usb/pegasus.c +++ b/drivers/net/usb/pegasus.c | |||
@@ -62,8 +62,11 @@ static char *devid=NULL; | |||
62 | static struct usb_eth_dev usb_dev_id[] = { | 62 | static struct usb_eth_dev usb_dev_id[] = { |
63 | #define PEGASUS_DEV(pn, vid, pid, flags) \ | 63 | #define PEGASUS_DEV(pn, vid, pid, flags) \ |
64 | {.name = pn, .vendor = vid, .device = pid, .private = flags}, | 64 | {.name = pn, .vendor = vid, .device = pid, .private = flags}, |
65 | #define PEGASUS_DEV_CLASS(pn, vid, pid, dclass, flags) \ | ||
66 | PEGASUS_DEV(pn, vid, pid, flags) | ||
65 | #include "pegasus.h" | 67 | #include "pegasus.h" |
66 | #undef PEGASUS_DEV | 68 | #undef PEGASUS_DEV |
69 | #undef PEGASUS_DEV_CLASS | ||
67 | {NULL, 0, 0, 0}, | 70 | {NULL, 0, 0, 0}, |
68 | {NULL, 0, 0, 0} | 71 | {NULL, 0, 0, 0} |
69 | }; | 72 | }; |
@@ -71,8 +74,18 @@ static struct usb_eth_dev usb_dev_id[] = { | |||
71 | static struct usb_device_id pegasus_ids[] = { | 74 | static struct usb_device_id pegasus_ids[] = { |
72 | #define PEGASUS_DEV(pn, vid, pid, flags) \ | 75 | #define PEGASUS_DEV(pn, vid, pid, flags) \ |
73 | {.match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = vid, .idProduct = pid}, | 76 | {.match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = vid, .idProduct = pid}, |
77 | /* | ||
78 | * The Belkin F8T012xx1 bluetooth adaptor has the same vendor and product | ||
79 | * IDs as the Belkin F5D5050, so we need to teach the pegasus driver to | ||
80 | * ignore adaptors belonging to the "Wireless" class 0xE0. For this one | ||
81 | * case anyway, seeing as the pegasus is for "Wired" adaptors. | ||
82 | */ | ||
83 | #define PEGASUS_DEV_CLASS(pn, vid, pid, dclass, flags) \ | ||
84 | {.match_flags = (USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_DEV_CLASS), \ | ||
85 | .idVendor = vid, .idProduct = pid, .bDeviceClass = dclass}, | ||
74 | #include "pegasus.h" | 86 | #include "pegasus.h" |
75 | #undef PEGASUS_DEV | 87 | #undef PEGASUS_DEV |
88 | #undef PEGASUS_DEV_CLASS | ||
76 | {}, | 89 | {}, |
77 | {} | 90 | {} |
78 | }; | 91 | }; |
diff --git a/drivers/net/usb/pegasus.h b/drivers/net/usb/pegasus.h index f968c834ff63..5d02f0200737 100644 --- a/drivers/net/usb/pegasus.h +++ b/drivers/net/usb/pegasus.h | |||
@@ -202,7 +202,11 @@ PEGASUS_DEV( "AEI USB Fast Ethernet Adapter", VENDOR_AEILAB, 0x1701, | |||
202 | DEFAULT_GPIO_RESET | PEGASUS_II ) | 202 | DEFAULT_GPIO_RESET | PEGASUS_II ) |
203 | PEGASUS_DEV( "Allied Telesyn Int. AT-USB100", VENDOR_ALLIEDTEL, 0xb100, | 203 | PEGASUS_DEV( "Allied Telesyn Int. AT-USB100", VENDOR_ALLIEDTEL, 0xb100, |
204 | DEFAULT_GPIO_RESET | PEGASUS_II ) | 204 | DEFAULT_GPIO_RESET | PEGASUS_II ) |
205 | PEGASUS_DEV( "Belkin F5D5050 USB Ethernet", VENDOR_BELKIN, 0x0121, | 205 | /* |
206 | * Distinguish between this Belkin adaptor and the Belkin bluetooth adaptors | ||
207 | * with the same product IDs by checking the device class too. | ||
208 | */ | ||
209 | PEGASUS_DEV_CLASS( "Belkin F5D5050 USB Ethernet", VENDOR_BELKIN, 0x0121, 0x00, | ||
206 | DEFAULT_GPIO_RESET | PEGASUS_II ) | 210 | DEFAULT_GPIO_RESET | PEGASUS_II ) |
207 | PEGASUS_DEV( "Billionton USB-100", VENDOR_BILLIONTON, 0x0986, | 211 | PEGASUS_DEV( "Billionton USB-100", VENDOR_BILLIONTON, 0x0986, |
208 | DEFAULT_GPIO_RESET ) | 212 | DEFAULT_GPIO_RESET ) |
diff --git a/drivers/net/vmxnet3/Makefile b/drivers/net/vmxnet3/Makefile new file mode 100644 index 000000000000..880f5098eac9 --- /dev/null +++ b/drivers/net/vmxnet3/Makefile | |||
@@ -0,0 +1,35 @@ | |||
1 | ################################################################################ | ||
2 | # | ||
3 | # Linux driver for VMware's vmxnet3 ethernet NIC. | ||
4 | # | ||
5 | # Copyright (C) 2007-2009, VMware, Inc. All Rights Reserved. | ||
6 | # | ||
7 | # This program is free software; you can redistribute it and/or modify it | ||
8 | # under the terms of the GNU General Public License as published by the | ||
9 | # Free Software Foundation; version 2 of the License and no later version. | ||
10 | # | ||
11 | # This program is distributed in the hope that it will be useful, but | ||
12 | # WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | # MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
14 | # NON INFRINGEMENT. See the GNU General Public License for more | ||
15 | # details. | ||
16 | # | ||
17 | # You should have received a copy of the GNU General Public License | ||
18 | # along with this program; if not, write to the Free Software | ||
19 | # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | ||
20 | # | ||
21 | # The full GNU General Public License is included in this distribution in | ||
22 | # the file called "COPYING". | ||
23 | # | ||
24 | # Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com> | ||
25 | # | ||
26 | # | ||
27 | ################################################################################ | ||
28 | |||
29 | # | ||
30 | # Makefile for the VMware vmxnet3 ethernet NIC driver | ||
31 | # | ||
32 | |||
33 | obj-$(CONFIG_VMXNET3) += vmxnet3.o | ||
34 | |||
35 | vmxnet3-objs := vmxnet3_drv.o vmxnet3_ethtool.o | ||
diff --git a/drivers/net/vmxnet3/upt1_defs.h b/drivers/net/vmxnet3/upt1_defs.h new file mode 100644 index 000000000000..37108fb226d3 --- /dev/null +++ b/drivers/net/vmxnet3/upt1_defs.h | |||
@@ -0,0 +1,96 @@ | |||
1 | /* | ||
2 | * Linux driver for VMware's vmxnet3 ethernet NIC. | ||
3 | * | ||
4 | * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License as published by the | ||
8 | * Free Software Foundation; version 2 of the License and no later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but | ||
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
13 | * NON INFRINGEMENT. See the GNU General Public License for more | ||
14 | * details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | ||
19 | * | ||
20 | * The full GNU General Public License is included in this distribution in | ||
21 | * the file called "COPYING". | ||
22 | * | ||
23 | * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com> | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | #ifndef _UPT1_DEFS_H | ||
28 | #define _UPT1_DEFS_H | ||
29 | |||
30 | struct UPT1_TxStats { | ||
31 | u64 TSOPktsTxOK; /* TSO pkts post-segmentation */ | ||
32 | u64 TSOBytesTxOK; | ||
33 | u64 ucastPktsTxOK; | ||
34 | u64 ucastBytesTxOK; | ||
35 | u64 mcastPktsTxOK; | ||
36 | u64 mcastBytesTxOK; | ||
37 | u64 bcastPktsTxOK; | ||
38 | u64 bcastBytesTxOK; | ||
39 | u64 pktsTxError; | ||
40 | u64 pktsTxDiscard; | ||
41 | }; | ||
42 | |||
43 | struct UPT1_RxStats { | ||
44 | u64 LROPktsRxOK; /* LRO pkts */ | ||
45 | u64 LROBytesRxOK; /* bytes from LRO pkts */ | ||
46 | /* the following counters are for pkts from the wire, i.e., pre-LRO */ | ||
47 | u64 ucastPktsRxOK; | ||
48 | u64 ucastBytesRxOK; | ||
49 | u64 mcastPktsRxOK; | ||
50 | u64 mcastBytesRxOK; | ||
51 | u64 bcastPktsRxOK; | ||
52 | u64 bcastBytesRxOK; | ||
53 | u64 pktsRxOutOfBuf; | ||
54 | u64 pktsRxError; | ||
55 | }; | ||
56 | |||
57 | /* interrupt moderation level */ | ||
58 | enum { | ||
59 | UPT1_IML_NONE = 0, /* no interrupt moderation */ | ||
60 | UPT1_IML_HIGHEST = 7, /* least intr generated */ | ||
61 | UPT1_IML_ADAPTIVE = 8, /* adpative intr moderation */ | ||
62 | }; | ||
63 | /* values for UPT1_RSSConf.hashFunc */ | ||
64 | enum { | ||
65 | UPT1_RSS_HASH_TYPE_NONE = 0x0, | ||
66 | UPT1_RSS_HASH_TYPE_IPV4 = 0x01, | ||
67 | UPT1_RSS_HASH_TYPE_TCP_IPV4 = 0x02, | ||
68 | UPT1_RSS_HASH_TYPE_IPV6 = 0x04, | ||
69 | UPT1_RSS_HASH_TYPE_TCP_IPV6 = 0x08, | ||
70 | }; | ||
71 | |||
72 | enum { | ||
73 | UPT1_RSS_HASH_FUNC_NONE = 0x0, | ||
74 | UPT1_RSS_HASH_FUNC_TOEPLITZ = 0x01, | ||
75 | }; | ||
76 | |||
77 | #define UPT1_RSS_MAX_KEY_SIZE 40 | ||
78 | #define UPT1_RSS_MAX_IND_TABLE_SIZE 128 | ||
79 | |||
80 | struct UPT1_RSSConf { | ||
81 | u16 hashType; | ||
82 | u16 hashFunc; | ||
83 | u16 hashKeySize; | ||
84 | u16 indTableSize; | ||
85 | u8 hashKey[UPT1_RSS_MAX_KEY_SIZE]; | ||
86 | u8 indTable[UPT1_RSS_MAX_IND_TABLE_SIZE]; | ||
87 | }; | ||
88 | |||
89 | /* features */ | ||
90 | enum { | ||
91 | UPT1_F_RXCSUM = 0x0001, /* rx csum verification */ | ||
92 | UPT1_F_RSS = 0x0002, | ||
93 | UPT1_F_RXVLAN = 0x0004, /* VLAN tag stripping */ | ||
94 | UPT1_F_LRO = 0x0008, | ||
95 | }; | ||
96 | #endif | ||
diff --git a/drivers/net/vmxnet3/vmxnet3_defs.h b/drivers/net/vmxnet3/vmxnet3_defs.h new file mode 100644 index 000000000000..dc8ee4438a4f --- /dev/null +++ b/drivers/net/vmxnet3/vmxnet3_defs.h | |||
@@ -0,0 +1,535 @@ | |||
1 | /* | ||
2 | * Linux driver for VMware's vmxnet3 ethernet NIC. | ||
3 | * | ||
4 | * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License as published by the | ||
8 | * Free Software Foundation; version 2 of the License and no later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but | ||
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
13 | * NON INFRINGEMENT. See the GNU General Public License for more | ||
14 | * details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | ||
19 | * | ||
20 | * The full GNU General Public License is included in this distribution in | ||
21 | * the file called "COPYING". | ||
22 | * | ||
23 | * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com> | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | #ifndef _VMXNET3_DEFS_H_ | ||
28 | #define _VMXNET3_DEFS_H_ | ||
29 | |||
30 | #include "upt1_defs.h" | ||
31 | |||
32 | /* all registers are 32 bit wide */ | ||
33 | /* BAR 1 */ | ||
34 | enum { | ||
35 | VMXNET3_REG_VRRS = 0x0, /* Vmxnet3 Revision Report Selection */ | ||
36 | VMXNET3_REG_UVRS = 0x8, /* UPT Version Report Selection */ | ||
37 | VMXNET3_REG_DSAL = 0x10, /* Driver Shared Address Low */ | ||
38 | VMXNET3_REG_DSAH = 0x18, /* Driver Shared Address High */ | ||
39 | VMXNET3_REG_CMD = 0x20, /* Command */ | ||
40 | VMXNET3_REG_MACL = 0x28, /* MAC Address Low */ | ||
41 | VMXNET3_REG_MACH = 0x30, /* MAC Address High */ | ||
42 | VMXNET3_REG_ICR = 0x38, /* Interrupt Cause Register */ | ||
43 | VMXNET3_REG_ECR = 0x40 /* Event Cause Register */ | ||
44 | }; | ||
45 | |||
46 | /* BAR 0 */ | ||
47 | enum { | ||
48 | VMXNET3_REG_IMR = 0x0, /* Interrupt Mask Register */ | ||
49 | VMXNET3_REG_TXPROD = 0x600, /* Tx Producer Index */ | ||
50 | VMXNET3_REG_RXPROD = 0x800, /* Rx Producer Index for ring 1 */ | ||
51 | VMXNET3_REG_RXPROD2 = 0xA00 /* Rx Producer Index for ring 2 */ | ||
52 | }; | ||
53 | |||
54 | #define VMXNET3_PT_REG_SIZE 4096 /* BAR 0 */ | ||
55 | #define VMXNET3_VD_REG_SIZE 4096 /* BAR 1 */ | ||
56 | |||
57 | #define VMXNET3_REG_ALIGN 8 /* All registers are 8-byte aligned. */ | ||
58 | #define VMXNET3_REG_ALIGN_MASK 0x7 | ||
59 | |||
60 | /* I/O Mapped access to registers */ | ||
61 | #define VMXNET3_IO_TYPE_PT 0 | ||
62 | #define VMXNET3_IO_TYPE_VD 1 | ||
63 | #define VMXNET3_IO_ADDR(type, reg) (((type) << 24) | ((reg) & 0xFFFFFF)) | ||
64 | #define VMXNET3_IO_TYPE(addr) ((addr) >> 24) | ||
65 | #define VMXNET3_IO_REG(addr) ((addr) & 0xFFFFFF) | ||
66 | |||
67 | enum { | ||
68 | VMXNET3_CMD_FIRST_SET = 0xCAFE0000, | ||
69 | VMXNET3_CMD_ACTIVATE_DEV = VMXNET3_CMD_FIRST_SET, | ||
70 | VMXNET3_CMD_QUIESCE_DEV, | ||
71 | VMXNET3_CMD_RESET_DEV, | ||
72 | VMXNET3_CMD_UPDATE_RX_MODE, | ||
73 | VMXNET3_CMD_UPDATE_MAC_FILTERS, | ||
74 | VMXNET3_CMD_UPDATE_VLAN_FILTERS, | ||
75 | VMXNET3_CMD_UPDATE_RSSIDT, | ||
76 | VMXNET3_CMD_UPDATE_IML, | ||
77 | VMXNET3_CMD_UPDATE_PMCFG, | ||
78 | VMXNET3_CMD_UPDATE_FEATURE, | ||
79 | VMXNET3_CMD_LOAD_PLUGIN, | ||
80 | |||
81 | VMXNET3_CMD_FIRST_GET = 0xF00D0000, | ||
82 | VMXNET3_CMD_GET_QUEUE_STATUS = VMXNET3_CMD_FIRST_GET, | ||
83 | VMXNET3_CMD_GET_STATS, | ||
84 | VMXNET3_CMD_GET_LINK, | ||
85 | VMXNET3_CMD_GET_PERM_MAC_LO, | ||
86 | VMXNET3_CMD_GET_PERM_MAC_HI, | ||
87 | VMXNET3_CMD_GET_DID_LO, | ||
88 | VMXNET3_CMD_GET_DID_HI, | ||
89 | VMXNET3_CMD_GET_DEV_EXTRA_INFO, | ||
90 | VMXNET3_CMD_GET_CONF_INTR | ||
91 | }; | ||
92 | |||
93 | struct Vmxnet3_TxDesc { | ||
94 | u64 addr; | ||
95 | |||
96 | u32 len:14; | ||
97 | u32 gen:1; /* generation bit */ | ||
98 | u32 rsvd:1; | ||
99 | u32 dtype:1; /* descriptor type */ | ||
100 | u32 ext1:1; | ||
101 | u32 msscof:14; /* MSS, checksum offset, flags */ | ||
102 | |||
103 | u32 hlen:10; /* header len */ | ||
104 | u32 om:2; /* offload mode */ | ||
105 | u32 eop:1; /* End Of Packet */ | ||
106 | u32 cq:1; /* completion request */ | ||
107 | u32 ext2:1; | ||
108 | u32 ti:1; /* VLAN Tag Insertion */ | ||
109 | u32 tci:16; /* Tag to Insert */ | ||
110 | }; | ||
111 | |||
112 | /* TxDesc.OM values */ | ||
113 | #define VMXNET3_OM_NONE 0 | ||
114 | #define VMXNET3_OM_CSUM 2 | ||
115 | #define VMXNET3_OM_TSO 3 | ||
116 | |||
117 | /* fields in TxDesc we access w/o using bit fields */ | ||
118 | #define VMXNET3_TXD_EOP_SHIFT 12 | ||
119 | #define VMXNET3_TXD_CQ_SHIFT 13 | ||
120 | #define VMXNET3_TXD_GEN_SHIFT 14 | ||
121 | |||
122 | #define VMXNET3_TXD_CQ (1 << VMXNET3_TXD_CQ_SHIFT) | ||
123 | #define VMXNET3_TXD_EOP (1 << VMXNET3_TXD_EOP_SHIFT) | ||
124 | #define VMXNET3_TXD_GEN (1 << VMXNET3_TXD_GEN_SHIFT) | ||
125 | |||
126 | #define VMXNET3_HDR_COPY_SIZE 128 | ||
127 | |||
128 | |||
129 | struct Vmxnet3_TxDataDesc { | ||
130 | u8 data[VMXNET3_HDR_COPY_SIZE]; | ||
131 | }; | ||
132 | |||
133 | |||
134 | struct Vmxnet3_TxCompDesc { | ||
135 | u32 txdIdx:12; /* Index of the EOP TxDesc */ | ||
136 | u32 ext1:20; | ||
137 | |||
138 | u32 ext2; | ||
139 | u32 ext3; | ||
140 | |||
141 | u32 rsvd:24; | ||
142 | u32 type:7; /* completion type */ | ||
143 | u32 gen:1; /* generation bit */ | ||
144 | }; | ||
145 | |||
146 | |||
147 | struct Vmxnet3_RxDesc { | ||
148 | u64 addr; | ||
149 | |||
150 | u32 len:14; | ||
151 | u32 btype:1; /* Buffer Type */ | ||
152 | u32 dtype:1; /* Descriptor type */ | ||
153 | u32 rsvd:15; | ||
154 | u32 gen:1; /* Generation bit */ | ||
155 | |||
156 | u32 ext1; | ||
157 | }; | ||
158 | |||
159 | /* values of RXD.BTYPE */ | ||
160 | #define VMXNET3_RXD_BTYPE_HEAD 0 /* head only */ | ||
161 | #define VMXNET3_RXD_BTYPE_BODY 1 /* body only */ | ||
162 | |||
163 | /* fields in RxDesc we access w/o using bit fields */ | ||
164 | #define VMXNET3_RXD_BTYPE_SHIFT 14 | ||
165 | #define VMXNET3_RXD_GEN_SHIFT 31 | ||
166 | |||
167 | |||
168 | struct Vmxnet3_RxCompDesc { | ||
169 | u32 rxdIdx:12; /* Index of the RxDesc */ | ||
170 | u32 ext1:2; | ||
171 | u32 eop:1; /* End of Packet */ | ||
172 | u32 sop:1; /* Start of Packet */ | ||
173 | u32 rqID:10; /* rx queue/ring ID */ | ||
174 | u32 rssType:4; /* RSS hash type used */ | ||
175 | u32 cnc:1; /* Checksum Not Calculated */ | ||
176 | u32 ext2:1; | ||
177 | |||
178 | u32 rssHash; /* RSS hash value */ | ||
179 | |||
180 | u32 len:14; /* data length */ | ||
181 | u32 err:1; /* Error */ | ||
182 | u32 ts:1; /* Tag is stripped */ | ||
183 | u32 tci:16; /* Tag stripped */ | ||
184 | |||
185 | u32 csum:16; | ||
186 | u32 tuc:1; /* TCP/UDP Checksum Correct */ | ||
187 | u32 udp:1; /* UDP packet */ | ||
188 | u32 tcp:1; /* TCP packet */ | ||
189 | u32 ipc:1; /* IP Checksum Correct */ | ||
190 | u32 v6:1; /* IPv6 */ | ||
191 | u32 v4:1; /* IPv4 */ | ||
192 | u32 frg:1; /* IP Fragment */ | ||
193 | u32 fcs:1; /* Frame CRC correct */ | ||
194 | u32 type:7; /* completion type */ | ||
195 | u32 gen:1; /* generation bit */ | ||
196 | }; | ||
197 | |||
198 | /* fields in RxCompDesc we access via Vmxnet3_GenericDesc.dword[3] */ | ||
199 | #define VMXNET3_RCD_TUC_SHIFT 16 | ||
200 | #define VMXNET3_RCD_IPC_SHIFT 19 | ||
201 | |||
202 | /* fields in RxCompDesc we access via Vmxnet3_GenericDesc.qword[1] */ | ||
203 | #define VMXNET3_RCD_TYPE_SHIFT 56 | ||
204 | #define VMXNET3_RCD_GEN_SHIFT 63 | ||
205 | |||
206 | /* csum OK for TCP/UDP pkts over IP */ | ||
207 | #define VMXNET3_RCD_CSUM_OK (1 << VMXNET3_RCD_TUC_SHIFT | \ | ||
208 | 1 << VMXNET3_RCD_IPC_SHIFT) | ||
209 | |||
210 | /* value of RxCompDesc.rssType */ | ||
211 | enum { | ||
212 | VMXNET3_RCD_RSS_TYPE_NONE = 0, | ||
213 | VMXNET3_RCD_RSS_TYPE_IPV4 = 1, | ||
214 | VMXNET3_RCD_RSS_TYPE_TCPIPV4 = 2, | ||
215 | VMXNET3_RCD_RSS_TYPE_IPV6 = 3, | ||
216 | VMXNET3_RCD_RSS_TYPE_TCPIPV6 = 4, | ||
217 | }; | ||
218 | |||
219 | |||
220 | /* a union for accessing all cmd/completion descriptors */ | ||
221 | union Vmxnet3_GenericDesc { | ||
222 | u64 qword[2]; | ||
223 | u32 dword[4]; | ||
224 | u16 word[8]; | ||
225 | struct Vmxnet3_TxDesc txd; | ||
226 | struct Vmxnet3_RxDesc rxd; | ||
227 | struct Vmxnet3_TxCompDesc tcd; | ||
228 | struct Vmxnet3_RxCompDesc rcd; | ||
229 | }; | ||
230 | |||
231 | #define VMXNET3_INIT_GEN 1 | ||
232 | |||
233 | /* Max size of a single tx buffer */ | ||
234 | #define VMXNET3_MAX_TX_BUF_SIZE (1 << 14) | ||
235 | |||
236 | /* # of tx desc needed for a tx buffer size */ | ||
237 | #define VMXNET3_TXD_NEEDED(size) (((size) + VMXNET3_MAX_TX_BUF_SIZE - 1) / \ | ||
238 | VMXNET3_MAX_TX_BUF_SIZE) | ||
239 | |||
240 | /* max # of tx descs for a non-tso pkt */ | ||
241 | #define VMXNET3_MAX_TXD_PER_PKT 16 | ||
242 | |||
243 | /* Max size of a single rx buffer */ | ||
244 | #define VMXNET3_MAX_RX_BUF_SIZE ((1 << 14) - 1) | ||
245 | /* Minimum size of a type 0 buffer */ | ||
246 | #define VMXNET3_MIN_T0_BUF_SIZE 128 | ||
247 | #define VMXNET3_MAX_CSUM_OFFSET 1024 | ||
248 | |||
249 | /* Ring base address alignment */ | ||
250 | #define VMXNET3_RING_BA_ALIGN 512 | ||
251 | #define VMXNET3_RING_BA_MASK (VMXNET3_RING_BA_ALIGN - 1) | ||
252 | |||
253 | /* Ring size must be a multiple of 32 */ | ||
254 | #define VMXNET3_RING_SIZE_ALIGN 32 | ||
255 | #define VMXNET3_RING_SIZE_MASK (VMXNET3_RING_SIZE_ALIGN - 1) | ||
256 | |||
257 | /* Max ring size */ | ||
258 | #define VMXNET3_TX_RING_MAX_SIZE 4096 | ||
259 | #define VMXNET3_TC_RING_MAX_SIZE 4096 | ||
260 | #define VMXNET3_RX_RING_MAX_SIZE 4096 | ||
261 | #define VMXNET3_RC_RING_MAX_SIZE 8192 | ||
262 | |||
263 | /* a list of reasons for queue stop */ | ||
264 | |||
265 | enum { | ||
266 | VMXNET3_ERR_NOEOP = 0x80000000, /* cannot find the EOP desc of a pkt */ | ||
267 | VMXNET3_ERR_TXD_REUSE = 0x80000001, /* reuse TxDesc before tx completion */ | ||
268 | VMXNET3_ERR_BIG_PKT = 0x80000002, /* too many TxDesc for a pkt */ | ||
269 | VMXNET3_ERR_DESC_NOT_SPT = 0x80000003, /* descriptor type not supported */ | ||
270 | VMXNET3_ERR_SMALL_BUF = 0x80000004, /* type 0 buffer too small */ | ||
271 | VMXNET3_ERR_STRESS = 0x80000005, /* stress option firing in vmkernel */ | ||
272 | VMXNET3_ERR_SWITCH = 0x80000006, /* mode switch failure */ | ||
273 | VMXNET3_ERR_TXD_INVALID = 0x80000007, /* invalid TxDesc */ | ||
274 | }; | ||
275 | |||
276 | /* completion descriptor types */ | ||
277 | #define VMXNET3_CDTYPE_TXCOMP 0 /* Tx Completion Descriptor */ | ||
278 | #define VMXNET3_CDTYPE_RXCOMP 3 /* Rx Completion Descriptor */ | ||
279 | |||
280 | enum { | ||
281 | VMXNET3_GOS_BITS_UNK = 0, /* unknown */ | ||
282 | VMXNET3_GOS_BITS_32 = 1, | ||
283 | VMXNET3_GOS_BITS_64 = 2, | ||
284 | }; | ||
285 | |||
286 | #define VMXNET3_GOS_TYPE_LINUX 1 | ||
287 | |||
288 | |||
289 | struct Vmxnet3_GOSInfo { | ||
290 | u32 gosBits:2; /* 32-bit or 64-bit? */ | ||
291 | u32 gosType:4; /* which guest */ | ||
292 | u32 gosVer:16; /* gos version */ | ||
293 | u32 gosMisc:10; /* other info about gos */ | ||
294 | }; | ||
295 | |||
296 | |||
297 | struct Vmxnet3_DriverInfo { | ||
298 | u32 version; | ||
299 | struct Vmxnet3_GOSInfo gos; | ||
300 | u32 vmxnet3RevSpt; | ||
301 | u32 uptVerSpt; | ||
302 | }; | ||
303 | |||
304 | |||
305 | #define VMXNET3_REV1_MAGIC 0xbabefee1 | ||
306 | |||
307 | /* | ||
308 | * QueueDescPA must be 128 bytes aligned. It points to an array of | ||
309 | * Vmxnet3_TxQueueDesc followed by an array of Vmxnet3_RxQueueDesc. | ||
310 | * The number of Vmxnet3_TxQueueDesc/Vmxnet3_RxQueueDesc are specified by | ||
311 | * Vmxnet3_MiscConf.numTxQueues/numRxQueues, respectively. | ||
312 | */ | ||
313 | #define VMXNET3_QUEUE_DESC_ALIGN 128 | ||
314 | |||
315 | |||
316 | struct Vmxnet3_MiscConf { | ||
317 | struct Vmxnet3_DriverInfo driverInfo; | ||
318 | u64 uptFeatures; | ||
319 | u64 ddPA; /* driver data PA */ | ||
320 | u64 queueDescPA; /* queue descriptor table PA */ | ||
321 | u32 ddLen; /* driver data len */ | ||
322 | u32 queueDescLen; /* queue desc. table len in bytes */ | ||
323 | u32 mtu; | ||
324 | u16 maxNumRxSG; | ||
325 | u8 numTxQueues; | ||
326 | u8 numRxQueues; | ||
327 | u32 reserved[4]; | ||
328 | }; | ||
329 | |||
330 | |||
331 | struct Vmxnet3_TxQueueConf { | ||
332 | u64 txRingBasePA; | ||
333 | u64 dataRingBasePA; | ||
334 | u64 compRingBasePA; | ||
335 | u64 ddPA; /* driver data */ | ||
336 | u64 reserved; | ||
337 | u32 txRingSize; /* # of tx desc */ | ||
338 | u32 dataRingSize; /* # of data desc */ | ||
339 | u32 compRingSize; /* # of comp desc */ | ||
340 | u32 ddLen; /* size of driver data */ | ||
341 | u8 intrIdx; | ||
342 | u8 _pad[7]; | ||
343 | }; | ||
344 | |||
345 | |||
346 | struct Vmxnet3_RxQueueConf { | ||
347 | u64 rxRingBasePA[2]; | ||
348 | u64 compRingBasePA; | ||
349 | u64 ddPA; /* driver data */ | ||
350 | u64 reserved; | ||
351 | u32 rxRingSize[2]; /* # of rx desc */ | ||
352 | u32 compRingSize; /* # of rx comp desc */ | ||
353 | u32 ddLen; /* size of driver data */ | ||
354 | u8 intrIdx; | ||
355 | u8 _pad[7]; | ||
356 | }; | ||
357 | |||
358 | |||
359 | enum vmxnet3_intr_mask_mode { | ||
360 | VMXNET3_IMM_AUTO = 0, | ||
361 | VMXNET3_IMM_ACTIVE = 1, | ||
362 | VMXNET3_IMM_LAZY = 2 | ||
363 | }; | ||
364 | |||
365 | enum vmxnet3_intr_type { | ||
366 | VMXNET3_IT_AUTO = 0, | ||
367 | VMXNET3_IT_INTX = 1, | ||
368 | VMXNET3_IT_MSI = 2, | ||
369 | VMXNET3_IT_MSIX = 3 | ||
370 | }; | ||
371 | |||
372 | #define VMXNET3_MAX_TX_QUEUES 8 | ||
373 | #define VMXNET3_MAX_RX_QUEUES 16 | ||
374 | /* addition 1 for events */ | ||
375 | #define VMXNET3_MAX_INTRS 25 | ||
376 | |||
377 | |||
378 | struct Vmxnet3_IntrConf { | ||
379 | bool autoMask; | ||
380 | u8 numIntrs; /* # of interrupts */ | ||
381 | u8 eventIntrIdx; | ||
382 | u8 modLevels[VMXNET3_MAX_INTRS]; /* moderation level for | ||
383 | * each intr */ | ||
384 | u32 reserved[3]; | ||
385 | }; | ||
386 | |||
387 | /* one bit per VLAN ID, the size is in the units of u32 */ | ||
388 | #define VMXNET3_VFT_SIZE (4096 / (sizeof(u32) * 8)) | ||
389 | |||
390 | |||
391 | struct Vmxnet3_QueueStatus { | ||
392 | bool stopped; | ||
393 | u8 _pad[3]; | ||
394 | u32 error; | ||
395 | }; | ||
396 | |||
397 | |||
398 | struct Vmxnet3_TxQueueCtrl { | ||
399 | u32 txNumDeferred; | ||
400 | u32 txThreshold; | ||
401 | u64 reserved; | ||
402 | }; | ||
403 | |||
404 | |||
405 | struct Vmxnet3_RxQueueCtrl { | ||
406 | bool updateRxProd; | ||
407 | u8 _pad[7]; | ||
408 | u64 reserved; | ||
409 | }; | ||
410 | |||
411 | enum { | ||
412 | VMXNET3_RXM_UCAST = 0x01, /* unicast only */ | ||
413 | VMXNET3_RXM_MCAST = 0x02, /* multicast passing the filters */ | ||
414 | VMXNET3_RXM_BCAST = 0x04, /* broadcast only */ | ||
415 | VMXNET3_RXM_ALL_MULTI = 0x08, /* all multicast */ | ||
416 | VMXNET3_RXM_PROMISC = 0x10 /* promiscuous */ | ||
417 | }; | ||
418 | |||
419 | struct Vmxnet3_RxFilterConf { | ||
420 | u32 rxMode; /* VMXNET3_RXM_xxx */ | ||
421 | u16 mfTableLen; /* size of the multicast filter table */ | ||
422 | u16 _pad1; | ||
423 | u64 mfTablePA; /* PA of the multicast filters table */ | ||
424 | u32 vfTable[VMXNET3_VFT_SIZE]; /* vlan filter */ | ||
425 | }; | ||
426 | |||
427 | |||
428 | #define VMXNET3_PM_MAX_FILTERS 6 | ||
429 | #define VMXNET3_PM_MAX_PATTERN_SIZE 128 | ||
430 | #define VMXNET3_PM_MAX_MASK_SIZE (VMXNET3_PM_MAX_PATTERN_SIZE / 8) | ||
431 | |||
432 | #define VMXNET3_PM_WAKEUP_MAGIC 0x01 /* wake up on magic pkts */ | ||
433 | #define VMXNET3_PM_WAKEUP_FILTER 0x02 /* wake up on pkts matching | ||
434 | * filters */ | ||
435 | |||
436 | |||
437 | struct Vmxnet3_PM_PktFilter { | ||
438 | u8 maskSize; | ||
439 | u8 patternSize; | ||
440 | u8 mask[VMXNET3_PM_MAX_MASK_SIZE]; | ||
441 | u8 pattern[VMXNET3_PM_MAX_PATTERN_SIZE]; | ||
442 | u8 pad[6]; | ||
443 | }; | ||
444 | |||
445 | |||
446 | struct Vmxnet3_PMConf { | ||
447 | u16 wakeUpEvents; /* VMXNET3_PM_WAKEUP_xxx */ | ||
448 | u8 numFilters; | ||
449 | u8 pad[5]; | ||
450 | struct Vmxnet3_PM_PktFilter filters[VMXNET3_PM_MAX_FILTERS]; | ||
451 | }; | ||
452 | |||
453 | |||
454 | struct Vmxnet3_VariableLenConfDesc { | ||
455 | u32 confVer; | ||
456 | u32 confLen; | ||
457 | u64 confPA; | ||
458 | }; | ||
459 | |||
460 | |||
461 | struct Vmxnet3_TxQueueDesc { | ||
462 | struct Vmxnet3_TxQueueCtrl ctrl; | ||
463 | struct Vmxnet3_TxQueueConf conf; | ||
464 | |||
465 | /* Driver read after a GET command */ | ||
466 | struct Vmxnet3_QueueStatus status; | ||
467 | struct UPT1_TxStats stats; | ||
468 | u8 _pad[88]; /* 128 aligned */ | ||
469 | }; | ||
470 | |||
471 | |||
472 | struct Vmxnet3_RxQueueDesc { | ||
473 | struct Vmxnet3_RxQueueCtrl ctrl; | ||
474 | struct Vmxnet3_RxQueueConf conf; | ||
475 | /* Driver read after a GET commad */ | ||
476 | struct Vmxnet3_QueueStatus status; | ||
477 | struct UPT1_RxStats stats; | ||
478 | u8 __pad[88]; /* 128 aligned */ | ||
479 | }; | ||
480 | |||
481 | |||
482 | struct Vmxnet3_DSDevRead { | ||
483 | /* read-only region for device, read by dev in response to a SET cmd */ | ||
484 | struct Vmxnet3_MiscConf misc; | ||
485 | struct Vmxnet3_IntrConf intrConf; | ||
486 | struct Vmxnet3_RxFilterConf rxFilterConf; | ||
487 | struct Vmxnet3_VariableLenConfDesc rssConfDesc; | ||
488 | struct Vmxnet3_VariableLenConfDesc pmConfDesc; | ||
489 | struct Vmxnet3_VariableLenConfDesc pluginConfDesc; | ||
490 | }; | ||
491 | |||
492 | /* All structures in DriverShared are padded to multiples of 8 bytes */ | ||
493 | struct Vmxnet3_DriverShared { | ||
494 | u32 magic; | ||
495 | /* make devRead start at 64bit boundaries */ | ||
496 | u32 pad; | ||
497 | struct Vmxnet3_DSDevRead devRead; | ||
498 | u32 ecr; | ||
499 | u32 reserved[5]; | ||
500 | }; | ||
501 | |||
502 | |||
503 | #define VMXNET3_ECR_RQERR (1 << 0) | ||
504 | #define VMXNET3_ECR_TQERR (1 << 1) | ||
505 | #define VMXNET3_ECR_LINK (1 << 2) | ||
506 | #define VMXNET3_ECR_DIC (1 << 3) | ||
507 | #define VMXNET3_ECR_DEBUG (1 << 4) | ||
508 | |||
509 | /* flip the gen bit of a ring */ | ||
510 | #define VMXNET3_FLIP_RING_GEN(gen) ((gen) = (gen) ^ 0x1) | ||
511 | |||
512 | /* only use this if moving the idx won't affect the gen bit */ | ||
513 | #define VMXNET3_INC_RING_IDX_ONLY(idx, ring_size) \ | ||
514 | do {\ | ||
515 | (idx)++;\ | ||
516 | if (unlikely((idx) == (ring_size))) {\ | ||
517 | (idx) = 0;\ | ||
518 | } \ | ||
519 | } while (0) | ||
520 | |||
521 | #define VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid) \ | ||
522 | (vfTable[vid >> 5] |= (1 << (vid & 31))) | ||
523 | #define VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid) \ | ||
524 | (vfTable[vid >> 5] &= ~(1 << (vid & 31))) | ||
525 | |||
526 | #define VMXNET3_VFTABLE_ENTRY_IS_SET(vfTable, vid) \ | ||
527 | ((vfTable[vid >> 5] & (1 << (vid & 31))) != 0) | ||
528 | |||
529 | #define VMXNET3_MAX_MTU 9000 | ||
530 | #define VMXNET3_MIN_MTU 60 | ||
531 | |||
532 | #define VMXNET3_LINK_UP (10000 << 16 | 1) /* 10 Gbps, up */ | ||
533 | #define VMXNET3_LINK_DOWN 0 | ||
534 | |||
535 | #endif /* _VMXNET3_DEFS_H_ */ | ||
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c new file mode 100644 index 000000000000..6a16f76f277e --- /dev/null +++ b/drivers/net/vmxnet3/vmxnet3_drv.c | |||
@@ -0,0 +1,2565 @@ | |||
1 | /* | ||
2 | * Linux driver for VMware's vmxnet3 ethernet NIC. | ||
3 | * | ||
4 | * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License as published by the | ||
8 | * Free Software Foundation; version 2 of the License and no later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but | ||
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
13 | * NON INFRINGEMENT. See the GNU General Public License for more | ||
14 | * details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | ||
19 | * | ||
20 | * The full GNU General Public License is included in this distribution in | ||
21 | * the file called "COPYING". | ||
22 | * | ||
23 | * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com> | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | #include "vmxnet3_int.h" | ||
28 | |||
29 | char vmxnet3_driver_name[] = "vmxnet3"; | ||
30 | #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver" | ||
31 | |||
32 | |||
33 | /* | ||
34 | * PCI Device ID Table | ||
35 | * Last entry must be all 0s | ||
36 | */ | ||
37 | static const struct pci_device_id vmxnet3_pciid_table[] = { | ||
38 | {PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)}, | ||
39 | {0} | ||
40 | }; | ||
41 | |||
42 | MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table); | ||
43 | |||
44 | static atomic_t devices_found; | ||
45 | |||
46 | |||
47 | /* | ||
48 | * Enable/Disable the given intr | ||
49 | */ | ||
50 | static void | ||
51 | vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx) | ||
52 | { | ||
53 | VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0); | ||
54 | } | ||
55 | |||
56 | |||
57 | static void | ||
58 | vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx) | ||
59 | { | ||
60 | VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1); | ||
61 | } | ||
62 | |||
63 | |||
64 | /* | ||
65 | * Enable/Disable all intrs used by the device | ||
66 | */ | ||
67 | static void | ||
68 | vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter) | ||
69 | { | ||
70 | int i; | ||
71 | |||
72 | for (i = 0; i < adapter->intr.num_intrs; i++) | ||
73 | vmxnet3_enable_intr(adapter, i); | ||
74 | } | ||
75 | |||
76 | |||
77 | static void | ||
78 | vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter) | ||
79 | { | ||
80 | int i; | ||
81 | |||
82 | for (i = 0; i < adapter->intr.num_intrs; i++) | ||
83 | vmxnet3_disable_intr(adapter, i); | ||
84 | } | ||
85 | |||
86 | |||
87 | static void | ||
88 | vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events) | ||
89 | { | ||
90 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events); | ||
91 | } | ||
92 | |||
93 | |||
94 | static bool | ||
95 | vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) | ||
96 | { | ||
97 | return netif_queue_stopped(adapter->netdev); | ||
98 | } | ||
99 | |||
100 | |||
101 | static void | ||
102 | vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) | ||
103 | { | ||
104 | tq->stopped = false; | ||
105 | netif_start_queue(adapter->netdev); | ||
106 | } | ||
107 | |||
108 | |||
109 | static void | ||
110 | vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) | ||
111 | { | ||
112 | tq->stopped = false; | ||
113 | netif_wake_queue(adapter->netdev); | ||
114 | } | ||
115 | |||
116 | |||
117 | static void | ||
118 | vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) | ||
119 | { | ||
120 | tq->stopped = true; | ||
121 | tq->num_stop++; | ||
122 | netif_stop_queue(adapter->netdev); | ||
123 | } | ||
124 | |||
125 | |||
126 | /* | ||
127 | * Check the link state. This may start or stop the tx queue. | ||
128 | */ | ||
129 | static void | ||
130 | vmxnet3_check_link(struct vmxnet3_adapter *adapter) | ||
131 | { | ||
132 | u32 ret; | ||
133 | |||
134 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK); | ||
135 | ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); | ||
136 | adapter->link_speed = ret >> 16; | ||
137 | if (ret & 1) { /* Link is up. */ | ||
138 | printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n", | ||
139 | adapter->netdev->name, adapter->link_speed); | ||
140 | if (!netif_carrier_ok(adapter->netdev)) | ||
141 | netif_carrier_on(adapter->netdev); | ||
142 | |||
143 | vmxnet3_tq_start(&adapter->tx_queue, adapter); | ||
144 | } else { | ||
145 | printk(KERN_INFO "%s: NIC Link is Down\n", | ||
146 | adapter->netdev->name); | ||
147 | if (netif_carrier_ok(adapter->netdev)) | ||
148 | netif_carrier_off(adapter->netdev); | ||
149 | |||
150 | vmxnet3_tq_stop(&adapter->tx_queue, adapter); | ||
151 | } | ||
152 | } | ||
153 | |||
154 | |||
155 | static void | ||
156 | vmxnet3_process_events(struct vmxnet3_adapter *adapter) | ||
157 | { | ||
158 | u32 events = adapter->shared->ecr; | ||
159 | if (!events) | ||
160 | return; | ||
161 | |||
162 | vmxnet3_ack_events(adapter, events); | ||
163 | |||
164 | /* Check if link state has changed */ | ||
165 | if (events & VMXNET3_ECR_LINK) | ||
166 | vmxnet3_check_link(adapter); | ||
167 | |||
168 | /* Check if there is an error on xmit/recv queues */ | ||
169 | if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) { | ||
170 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | ||
171 | VMXNET3_CMD_GET_QUEUE_STATUS); | ||
172 | |||
173 | if (adapter->tqd_start->status.stopped) { | ||
174 | printk(KERN_ERR "%s: tq error 0x%x\n", | ||
175 | adapter->netdev->name, | ||
176 | adapter->tqd_start->status.error); | ||
177 | } | ||
178 | if (adapter->rqd_start->status.stopped) { | ||
179 | printk(KERN_ERR "%s: rq error 0x%x\n", | ||
180 | adapter->netdev->name, | ||
181 | adapter->rqd_start->status.error); | ||
182 | } | ||
183 | |||
184 | schedule_work(&adapter->work); | ||
185 | } | ||
186 | } | ||
187 | |||
188 | |||
189 | static void | ||
190 | vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi, | ||
191 | struct pci_dev *pdev) | ||
192 | { | ||
193 | if (tbi->map_type == VMXNET3_MAP_SINGLE) | ||
194 | pci_unmap_single(pdev, tbi->dma_addr, tbi->len, | ||
195 | PCI_DMA_TODEVICE); | ||
196 | else if (tbi->map_type == VMXNET3_MAP_PAGE) | ||
197 | pci_unmap_page(pdev, tbi->dma_addr, tbi->len, | ||
198 | PCI_DMA_TODEVICE); | ||
199 | else | ||
200 | BUG_ON(tbi->map_type != VMXNET3_MAP_NONE); | ||
201 | |||
202 | tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */ | ||
203 | } | ||
204 | |||
205 | |||
206 | static int | ||
207 | vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq, | ||
208 | struct pci_dev *pdev, struct vmxnet3_adapter *adapter) | ||
209 | { | ||
210 | struct sk_buff *skb; | ||
211 | int entries = 0; | ||
212 | |||
213 | /* no out of order completion */ | ||
214 | BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp); | ||
215 | BUG_ON(tq->tx_ring.base[eop_idx].txd.eop != 1); | ||
216 | |||
217 | skb = tq->buf_info[eop_idx].skb; | ||
218 | BUG_ON(skb == NULL); | ||
219 | tq->buf_info[eop_idx].skb = NULL; | ||
220 | |||
221 | VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size); | ||
222 | |||
223 | while (tq->tx_ring.next2comp != eop_idx) { | ||
224 | vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp, | ||
225 | pdev); | ||
226 | |||
227 | /* update next2comp w/o tx_lock. Since we are marking more, | ||
228 | * instead of less, tx ring entries avail, the worst case is | ||
229 | * that the tx routine incorrectly re-queues a pkt due to | ||
230 | * insufficient tx ring entries. | ||
231 | */ | ||
232 | vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring); | ||
233 | entries++; | ||
234 | } | ||
235 | |||
236 | dev_kfree_skb_any(skb); | ||
237 | return entries; | ||
238 | } | ||
239 | |||
240 | |||
241 | static int | ||
242 | vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq, | ||
243 | struct vmxnet3_adapter *adapter) | ||
244 | { | ||
245 | int completed = 0; | ||
246 | union Vmxnet3_GenericDesc *gdesc; | ||
247 | |||
248 | gdesc = tq->comp_ring.base + tq->comp_ring.next2proc; | ||
249 | while (gdesc->tcd.gen == tq->comp_ring.gen) { | ||
250 | completed += vmxnet3_unmap_pkt(gdesc->tcd.txdIdx, tq, | ||
251 | adapter->pdev, adapter); | ||
252 | |||
253 | vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring); | ||
254 | gdesc = tq->comp_ring.base + tq->comp_ring.next2proc; | ||
255 | } | ||
256 | |||
257 | if (completed) { | ||
258 | spin_lock(&tq->tx_lock); | ||
259 | if (unlikely(vmxnet3_tq_stopped(tq, adapter) && | ||
260 | vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) > | ||
261 | VMXNET3_WAKE_QUEUE_THRESHOLD(tq) && | ||
262 | netif_carrier_ok(adapter->netdev))) { | ||
263 | vmxnet3_tq_wake(tq, adapter); | ||
264 | } | ||
265 | spin_unlock(&tq->tx_lock); | ||
266 | } | ||
267 | return completed; | ||
268 | } | ||
269 | |||
270 | |||
271 | static void | ||
272 | vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq, | ||
273 | struct vmxnet3_adapter *adapter) | ||
274 | { | ||
275 | int i; | ||
276 | |||
277 | while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) { | ||
278 | struct vmxnet3_tx_buf_info *tbi; | ||
279 | union Vmxnet3_GenericDesc *gdesc; | ||
280 | |||
281 | tbi = tq->buf_info + tq->tx_ring.next2comp; | ||
282 | gdesc = tq->tx_ring.base + tq->tx_ring.next2comp; | ||
283 | |||
284 | vmxnet3_unmap_tx_buf(tbi, adapter->pdev); | ||
285 | if (tbi->skb) { | ||
286 | dev_kfree_skb_any(tbi->skb); | ||
287 | tbi->skb = NULL; | ||
288 | } | ||
289 | vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring); | ||
290 | } | ||
291 | |||
292 | /* sanity check, verify all buffers are indeed unmapped and freed */ | ||
293 | for (i = 0; i < tq->tx_ring.size; i++) { | ||
294 | BUG_ON(tq->buf_info[i].skb != NULL || | ||
295 | tq->buf_info[i].map_type != VMXNET3_MAP_NONE); | ||
296 | } | ||
297 | |||
298 | tq->tx_ring.gen = VMXNET3_INIT_GEN; | ||
299 | tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0; | ||
300 | |||
301 | tq->comp_ring.gen = VMXNET3_INIT_GEN; | ||
302 | tq->comp_ring.next2proc = 0; | ||
303 | } | ||
304 | |||
305 | |||
306 | void | ||
307 | vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq, | ||
308 | struct vmxnet3_adapter *adapter) | ||
309 | { | ||
310 | if (tq->tx_ring.base) { | ||
311 | pci_free_consistent(adapter->pdev, tq->tx_ring.size * | ||
312 | sizeof(struct Vmxnet3_TxDesc), | ||
313 | tq->tx_ring.base, tq->tx_ring.basePA); | ||
314 | tq->tx_ring.base = NULL; | ||
315 | } | ||
316 | if (tq->data_ring.base) { | ||
317 | pci_free_consistent(adapter->pdev, tq->data_ring.size * | ||
318 | sizeof(struct Vmxnet3_TxDataDesc), | ||
319 | tq->data_ring.base, tq->data_ring.basePA); | ||
320 | tq->data_ring.base = NULL; | ||
321 | } | ||
322 | if (tq->comp_ring.base) { | ||
323 | pci_free_consistent(adapter->pdev, tq->comp_ring.size * | ||
324 | sizeof(struct Vmxnet3_TxCompDesc), | ||
325 | tq->comp_ring.base, tq->comp_ring.basePA); | ||
326 | tq->comp_ring.base = NULL; | ||
327 | } | ||
328 | kfree(tq->buf_info); | ||
329 | tq->buf_info = NULL; | ||
330 | } | ||
331 | |||
332 | |||
333 | static void | ||
334 | vmxnet3_tq_init(struct vmxnet3_tx_queue *tq, | ||
335 | struct vmxnet3_adapter *adapter) | ||
336 | { | ||
337 | int i; | ||
338 | |||
339 | /* reset the tx ring contents to 0 and reset the tx ring states */ | ||
340 | memset(tq->tx_ring.base, 0, tq->tx_ring.size * | ||
341 | sizeof(struct Vmxnet3_TxDesc)); | ||
342 | tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0; | ||
343 | tq->tx_ring.gen = VMXNET3_INIT_GEN; | ||
344 | |||
345 | memset(tq->data_ring.base, 0, tq->data_ring.size * | ||
346 | sizeof(struct Vmxnet3_TxDataDesc)); | ||
347 | |||
348 | /* reset the tx comp ring contents to 0 and reset comp ring states */ | ||
349 | memset(tq->comp_ring.base, 0, tq->comp_ring.size * | ||
350 | sizeof(struct Vmxnet3_TxCompDesc)); | ||
351 | tq->comp_ring.next2proc = 0; | ||
352 | tq->comp_ring.gen = VMXNET3_INIT_GEN; | ||
353 | |||
354 | /* reset the bookkeeping data */ | ||
355 | memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size); | ||
356 | for (i = 0; i < tq->tx_ring.size; i++) | ||
357 | tq->buf_info[i].map_type = VMXNET3_MAP_NONE; | ||
358 | |||
359 | /* stats are not reset */ | ||
360 | } | ||
361 | |||
362 | |||
363 | static int | ||
364 | vmxnet3_tq_create(struct vmxnet3_tx_queue *tq, | ||
365 | struct vmxnet3_adapter *adapter) | ||
366 | { | ||
367 | BUG_ON(tq->tx_ring.base || tq->data_ring.base || | ||
368 | tq->comp_ring.base || tq->buf_info); | ||
369 | |||
370 | tq->tx_ring.base = pci_alloc_consistent(adapter->pdev, tq->tx_ring.size | ||
371 | * sizeof(struct Vmxnet3_TxDesc), | ||
372 | &tq->tx_ring.basePA); | ||
373 | if (!tq->tx_ring.base) { | ||
374 | printk(KERN_ERR "%s: failed to allocate tx ring\n", | ||
375 | adapter->netdev->name); | ||
376 | goto err; | ||
377 | } | ||
378 | |||
379 | tq->data_ring.base = pci_alloc_consistent(adapter->pdev, | ||
380 | tq->data_ring.size * | ||
381 | sizeof(struct Vmxnet3_TxDataDesc), | ||
382 | &tq->data_ring.basePA); | ||
383 | if (!tq->data_ring.base) { | ||
384 | printk(KERN_ERR "%s: failed to allocate data ring\n", | ||
385 | adapter->netdev->name); | ||
386 | goto err; | ||
387 | } | ||
388 | |||
389 | tq->comp_ring.base = pci_alloc_consistent(adapter->pdev, | ||
390 | tq->comp_ring.size * | ||
391 | sizeof(struct Vmxnet3_TxCompDesc), | ||
392 | &tq->comp_ring.basePA); | ||
393 | if (!tq->comp_ring.base) { | ||
394 | printk(KERN_ERR "%s: failed to allocate tx comp ring\n", | ||
395 | adapter->netdev->name); | ||
396 | goto err; | ||
397 | } | ||
398 | |||
399 | tq->buf_info = kcalloc(tq->tx_ring.size, sizeof(tq->buf_info[0]), | ||
400 | GFP_KERNEL); | ||
401 | if (!tq->buf_info) { | ||
402 | printk(KERN_ERR "%s: failed to allocate tx bufinfo\n", | ||
403 | adapter->netdev->name); | ||
404 | goto err; | ||
405 | } | ||
406 | |||
407 | return 0; | ||
408 | |||
409 | err: | ||
410 | vmxnet3_tq_destroy(tq, adapter); | ||
411 | return -ENOMEM; | ||
412 | } | ||
413 | |||
414 | |||
415 | /* | ||
416 | * starting from ring->next2fill, allocate rx buffers for the given ring | ||
417 | * of the rx queue and update the rx desc. stop after @num_to_alloc buffers | ||
418 | * are allocated or allocation fails | ||
419 | */ | ||
420 | |||
421 | static int | ||
422 | vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, | ||
423 | int num_to_alloc, struct vmxnet3_adapter *adapter) | ||
424 | { | ||
425 | int num_allocated = 0; | ||
426 | struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx]; | ||
427 | struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx]; | ||
428 | u32 val; | ||
429 | |||
430 | while (num_allocated < num_to_alloc) { | ||
431 | struct vmxnet3_rx_buf_info *rbi; | ||
432 | union Vmxnet3_GenericDesc *gd; | ||
433 | |||
434 | rbi = rbi_base + ring->next2fill; | ||
435 | gd = ring->base + ring->next2fill; | ||
436 | |||
437 | if (rbi->buf_type == VMXNET3_RX_BUF_SKB) { | ||
438 | if (rbi->skb == NULL) { | ||
439 | rbi->skb = dev_alloc_skb(rbi->len + | ||
440 | NET_IP_ALIGN); | ||
441 | if (unlikely(rbi->skb == NULL)) { | ||
442 | rq->stats.rx_buf_alloc_failure++; | ||
443 | break; | ||
444 | } | ||
445 | rbi->skb->dev = adapter->netdev; | ||
446 | |||
447 | skb_reserve(rbi->skb, NET_IP_ALIGN); | ||
448 | rbi->dma_addr = pci_map_single(adapter->pdev, | ||
449 | rbi->skb->data, rbi->len, | ||
450 | PCI_DMA_FROMDEVICE); | ||
451 | } else { | ||
452 | /* rx buffer skipped by the device */ | ||
453 | } | ||
454 | val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT; | ||
455 | } else { | ||
456 | BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE || | ||
457 | rbi->len != PAGE_SIZE); | ||
458 | |||
459 | if (rbi->page == NULL) { | ||
460 | rbi->page = alloc_page(GFP_ATOMIC); | ||
461 | if (unlikely(rbi->page == NULL)) { | ||
462 | rq->stats.rx_buf_alloc_failure++; | ||
463 | break; | ||
464 | } | ||
465 | rbi->dma_addr = pci_map_page(adapter->pdev, | ||
466 | rbi->page, 0, PAGE_SIZE, | ||
467 | PCI_DMA_FROMDEVICE); | ||
468 | } else { | ||
469 | /* rx buffers skipped by the device */ | ||
470 | } | ||
471 | val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT; | ||
472 | } | ||
473 | |||
474 | BUG_ON(rbi->dma_addr == 0); | ||
475 | gd->rxd.addr = rbi->dma_addr; | ||
476 | gd->dword[2] = (ring->gen << VMXNET3_RXD_GEN_SHIFT) | val | | ||
477 | rbi->len; | ||
478 | |||
479 | num_allocated++; | ||
480 | vmxnet3_cmd_ring_adv_next2fill(ring); | ||
481 | } | ||
482 | rq->uncommitted[ring_idx] += num_allocated; | ||
483 | |||
484 | dprintk(KERN_ERR "alloc_rx_buf: %d allocated, next2fill %u, next2comp " | ||
485 | "%u, uncommited %u\n", num_allocated, ring->next2fill, | ||
486 | ring->next2comp, rq->uncommitted[ring_idx]); | ||
487 | |||
488 | /* so that the device can distinguish a full ring and an empty ring */ | ||
489 | BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp); | ||
490 | |||
491 | return num_allocated; | ||
492 | } | ||
493 | |||
494 | |||
495 | static void | ||
496 | vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd, | ||
497 | struct vmxnet3_rx_buf_info *rbi) | ||
498 | { | ||
499 | struct skb_frag_struct *frag = skb_shinfo(skb)->frags + | ||
500 | skb_shinfo(skb)->nr_frags; | ||
501 | |||
502 | BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS); | ||
503 | |||
504 | frag->page = rbi->page; | ||
505 | frag->page_offset = 0; | ||
506 | frag->size = rcd->len; | ||
507 | skb->data_len += frag->size; | ||
508 | skb_shinfo(skb)->nr_frags++; | ||
509 | } | ||
510 | |||
511 | |||
512 | static void | ||
513 | vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, | ||
514 | struct vmxnet3_tx_queue *tq, struct pci_dev *pdev, | ||
515 | struct vmxnet3_adapter *adapter) | ||
516 | { | ||
517 | u32 dw2, len; | ||
518 | unsigned long buf_offset; | ||
519 | int i; | ||
520 | union Vmxnet3_GenericDesc *gdesc; | ||
521 | struct vmxnet3_tx_buf_info *tbi = NULL; | ||
522 | |||
523 | BUG_ON(ctx->copy_size > skb_headlen(skb)); | ||
524 | |||
525 | /* use the previous gen bit for the SOP desc */ | ||
526 | dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT; | ||
527 | |||
528 | ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill; | ||
529 | gdesc = ctx->sop_txd; /* both loops below can be skipped */ | ||
530 | |||
531 | /* no need to map the buffer if headers are copied */ | ||
532 | if (ctx->copy_size) { | ||
533 | ctx->sop_txd->txd.addr = tq->data_ring.basePA + | ||
534 | tq->tx_ring.next2fill * | ||
535 | sizeof(struct Vmxnet3_TxDataDesc); | ||
536 | ctx->sop_txd->dword[2] = dw2 | ctx->copy_size; | ||
537 | ctx->sop_txd->dword[3] = 0; | ||
538 | |||
539 | tbi = tq->buf_info + tq->tx_ring.next2fill; | ||
540 | tbi->map_type = VMXNET3_MAP_NONE; | ||
541 | |||
542 | dprintk(KERN_ERR "txd[%u]: 0x%Lx 0x%x 0x%x\n", | ||
543 | tq->tx_ring.next2fill, ctx->sop_txd->txd.addr, | ||
544 | ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]); | ||
545 | vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); | ||
546 | |||
547 | /* use the right gen for non-SOP desc */ | ||
548 | dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; | ||
549 | } | ||
550 | |||
551 | /* linear part can use multiple tx desc if it's big */ | ||
552 | len = skb_headlen(skb) - ctx->copy_size; | ||
553 | buf_offset = ctx->copy_size; | ||
554 | while (len) { | ||
555 | u32 buf_size; | ||
556 | |||
557 | buf_size = len > VMXNET3_MAX_TX_BUF_SIZE ? | ||
558 | VMXNET3_MAX_TX_BUF_SIZE : len; | ||
559 | |||
560 | tbi = tq->buf_info + tq->tx_ring.next2fill; | ||
561 | tbi->map_type = VMXNET3_MAP_SINGLE; | ||
562 | tbi->dma_addr = pci_map_single(adapter->pdev, | ||
563 | skb->data + buf_offset, buf_size, | ||
564 | PCI_DMA_TODEVICE); | ||
565 | |||
566 | tbi->len = buf_size; /* this automatically convert 2^14 to 0 */ | ||
567 | |||
568 | gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; | ||
569 | BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); | ||
570 | |||
571 | gdesc->txd.addr = tbi->dma_addr; | ||
572 | gdesc->dword[2] = dw2 | buf_size; | ||
573 | gdesc->dword[3] = 0; | ||
574 | |||
575 | dprintk(KERN_ERR "txd[%u]: 0x%Lx 0x%x 0x%x\n", | ||
576 | tq->tx_ring.next2fill, gdesc->txd.addr, | ||
577 | gdesc->dword[2], gdesc->dword[3]); | ||
578 | vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); | ||
579 | dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; | ||
580 | |||
581 | len -= buf_size; | ||
582 | buf_offset += buf_size; | ||
583 | } | ||
584 | |||
585 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | ||
586 | struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; | ||
587 | |||
588 | tbi = tq->buf_info + tq->tx_ring.next2fill; | ||
589 | tbi->map_type = VMXNET3_MAP_PAGE; | ||
590 | tbi->dma_addr = pci_map_page(adapter->pdev, frag->page, | ||
591 | frag->page_offset, frag->size, | ||
592 | PCI_DMA_TODEVICE); | ||
593 | |||
594 | tbi->len = frag->size; | ||
595 | |||
596 | gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; | ||
597 | BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); | ||
598 | |||
599 | gdesc->txd.addr = tbi->dma_addr; | ||
600 | gdesc->dword[2] = dw2 | frag->size; | ||
601 | gdesc->dword[3] = 0; | ||
602 | |||
603 | dprintk(KERN_ERR "txd[%u]: 0x%llu %u %u\n", | ||
604 | tq->tx_ring.next2fill, gdesc->txd.addr, | ||
605 | gdesc->dword[2], gdesc->dword[3]); | ||
606 | vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); | ||
607 | dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; | ||
608 | } | ||
609 | |||
610 | ctx->eop_txd = gdesc; | ||
611 | |||
612 | /* set the last buf_info for the pkt */ | ||
613 | tbi->skb = skb; | ||
614 | tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base; | ||
615 | } | ||
616 | |||
617 | |||
618 | /* | ||
619 | * parse and copy relevant protocol headers: | ||
620 | * For a tso pkt, relevant headers are L2/3/4 including options | ||
621 | * For a pkt requesting csum offloading, they are L2/3 and may include L4 | ||
622 | * if it's a TCP/UDP pkt | ||
623 | * | ||
624 | * Returns: | ||
625 | * -1: error happens during parsing | ||
626 | * 0: protocol headers parsed, but too big to be copied | ||
627 | * 1: protocol headers parsed and copied | ||
628 | * | ||
629 | * Other effects: | ||
630 | * 1. related *ctx fields are updated. | ||
631 | * 2. ctx->copy_size is # of bytes copied | ||
632 | * 3. the portion copied is guaranteed to be in the linear part | ||
633 | * | ||
634 | */ | ||
635 | static int | ||
636 | vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, | ||
637 | struct vmxnet3_tx_ctx *ctx, | ||
638 | struct vmxnet3_adapter *adapter) | ||
639 | { | ||
640 | struct Vmxnet3_TxDataDesc *tdd; | ||
641 | |||
642 | if (ctx->mss) { | ||
643 | ctx->eth_ip_hdr_size = skb_transport_offset(skb); | ||
644 | ctx->l4_hdr_size = ((struct tcphdr *) | ||
645 | skb_transport_header(skb))->doff * 4; | ||
646 | ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size; | ||
647 | } else { | ||
648 | unsigned int pull_size; | ||
649 | |||
650 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
651 | ctx->eth_ip_hdr_size = skb_transport_offset(skb); | ||
652 | |||
653 | if (ctx->ipv4) { | ||
654 | struct iphdr *iph = (struct iphdr *) | ||
655 | skb_network_header(skb); | ||
656 | if (iph->protocol == IPPROTO_TCP) { | ||
657 | pull_size = ctx->eth_ip_hdr_size + | ||
658 | sizeof(struct tcphdr); | ||
659 | |||
660 | if (unlikely(!pskb_may_pull(skb, | ||
661 | pull_size))) { | ||
662 | goto err; | ||
663 | } | ||
664 | ctx->l4_hdr_size = ((struct tcphdr *) | ||
665 | skb_transport_header(skb))->doff * 4; | ||
666 | } else if (iph->protocol == IPPROTO_UDP) { | ||
667 | ctx->l4_hdr_size = | ||
668 | sizeof(struct udphdr); | ||
669 | } else { | ||
670 | ctx->l4_hdr_size = 0; | ||
671 | } | ||
672 | } else { | ||
673 | /* for simplicity, don't copy L4 headers */ | ||
674 | ctx->l4_hdr_size = 0; | ||
675 | } | ||
676 | ctx->copy_size = ctx->eth_ip_hdr_size + | ||
677 | ctx->l4_hdr_size; | ||
678 | } else { | ||
679 | ctx->eth_ip_hdr_size = 0; | ||
680 | ctx->l4_hdr_size = 0; | ||
681 | /* copy as much as allowed */ | ||
682 | ctx->copy_size = min((unsigned int)VMXNET3_HDR_COPY_SIZE | ||
683 | , skb_headlen(skb)); | ||
684 | } | ||
685 | |||
686 | /* make sure headers are accessible directly */ | ||
687 | if (unlikely(!pskb_may_pull(skb, ctx->copy_size))) | ||
688 | goto err; | ||
689 | } | ||
690 | |||
691 | if (unlikely(ctx->copy_size > VMXNET3_HDR_COPY_SIZE)) { | ||
692 | tq->stats.oversized_hdr++; | ||
693 | ctx->copy_size = 0; | ||
694 | return 0; | ||
695 | } | ||
696 | |||
697 | tdd = tq->data_ring.base + tq->tx_ring.next2fill; | ||
698 | |||
699 | memcpy(tdd->data, skb->data, ctx->copy_size); | ||
700 | dprintk(KERN_ERR "copy %u bytes to dataRing[%u]\n", | ||
701 | ctx->copy_size, tq->tx_ring.next2fill); | ||
702 | return 1; | ||
703 | |||
704 | err: | ||
705 | return -1; | ||
706 | } | ||
707 | |||
708 | |||
709 | static void | ||
710 | vmxnet3_prepare_tso(struct sk_buff *skb, | ||
711 | struct vmxnet3_tx_ctx *ctx) | ||
712 | { | ||
713 | struct tcphdr *tcph = (struct tcphdr *)skb_transport_header(skb); | ||
714 | if (ctx->ipv4) { | ||
715 | struct iphdr *iph = (struct iphdr *)skb_network_header(skb); | ||
716 | iph->check = 0; | ||
717 | tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, | ||
718 | IPPROTO_TCP, 0); | ||
719 | } else { | ||
720 | struct ipv6hdr *iph = (struct ipv6hdr *)skb_network_header(skb); | ||
721 | tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0, | ||
722 | IPPROTO_TCP, 0); | ||
723 | } | ||
724 | } | ||
725 | |||
726 | |||
727 | /* | ||
728 | * Transmits a pkt thru a given tq | ||
729 | * Returns: | ||
730 | * NETDEV_TX_OK: descriptors are setup successfully | ||
731 | * NETDEV_TX_OK: error occured, the pkt is dropped | ||
732 | * NETDEV_TX_BUSY: tx ring is full, queue is stopped | ||
733 | * | ||
734 | * Side-effects: | ||
735 | * 1. tx ring may be changed | ||
736 | * 2. tq stats may be updated accordingly | ||
737 | * 3. shared->txNumDeferred may be updated | ||
738 | */ | ||
739 | |||
740 | static int | ||
741 | vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, | ||
742 | struct vmxnet3_adapter *adapter, struct net_device *netdev) | ||
743 | { | ||
744 | int ret; | ||
745 | u32 count; | ||
746 | unsigned long flags; | ||
747 | struct vmxnet3_tx_ctx ctx; | ||
748 | union Vmxnet3_GenericDesc *gdesc; | ||
749 | |||
750 | /* conservatively estimate # of descriptors to use */ | ||
751 | count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + | ||
752 | skb_shinfo(skb)->nr_frags + 1; | ||
753 | |||
754 | ctx.ipv4 = (skb->protocol == __constant_ntohs(ETH_P_IP)); | ||
755 | |||
756 | ctx.mss = skb_shinfo(skb)->gso_size; | ||
757 | if (ctx.mss) { | ||
758 | if (skb_header_cloned(skb)) { | ||
759 | if (unlikely(pskb_expand_head(skb, 0, 0, | ||
760 | GFP_ATOMIC) != 0)) { | ||
761 | tq->stats.drop_tso++; | ||
762 | goto drop_pkt; | ||
763 | } | ||
764 | tq->stats.copy_skb_header++; | ||
765 | } | ||
766 | vmxnet3_prepare_tso(skb, &ctx); | ||
767 | } else { | ||
768 | if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) { | ||
769 | |||
770 | /* non-tso pkts must not use more than | ||
771 | * VMXNET3_MAX_TXD_PER_PKT entries | ||
772 | */ | ||
773 | if (skb_linearize(skb) != 0) { | ||
774 | tq->stats.drop_too_many_frags++; | ||
775 | goto drop_pkt; | ||
776 | } | ||
777 | tq->stats.linearized++; | ||
778 | |||
779 | /* recalculate the # of descriptors to use */ | ||
780 | count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1; | ||
781 | } | ||
782 | } | ||
783 | |||
784 | ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter); | ||
785 | if (ret >= 0) { | ||
786 | BUG_ON(ret <= 0 && ctx.copy_size != 0); | ||
787 | /* hdrs parsed, check against other limits */ | ||
788 | if (ctx.mss) { | ||
789 | if (unlikely(ctx.eth_ip_hdr_size + ctx.l4_hdr_size > | ||
790 | VMXNET3_MAX_TX_BUF_SIZE)) { | ||
791 | goto hdr_too_big; | ||
792 | } | ||
793 | } else { | ||
794 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
795 | if (unlikely(ctx.eth_ip_hdr_size + | ||
796 | skb->csum_offset > | ||
797 | VMXNET3_MAX_CSUM_OFFSET)) { | ||
798 | goto hdr_too_big; | ||
799 | } | ||
800 | } | ||
801 | } | ||
802 | } else { | ||
803 | tq->stats.drop_hdr_inspect_err++; | ||
804 | goto drop_pkt; | ||
805 | } | ||
806 | |||
807 | spin_lock_irqsave(&tq->tx_lock, flags); | ||
808 | |||
809 | if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) { | ||
810 | tq->stats.tx_ring_full++; | ||
811 | dprintk(KERN_ERR "tx queue stopped on %s, next2comp %u" | ||
812 | " next2fill %u\n", adapter->netdev->name, | ||
813 | tq->tx_ring.next2comp, tq->tx_ring.next2fill); | ||
814 | |||
815 | vmxnet3_tq_stop(tq, adapter); | ||
816 | spin_unlock_irqrestore(&tq->tx_lock, flags); | ||
817 | return NETDEV_TX_BUSY; | ||
818 | } | ||
819 | |||
820 | /* fill tx descs related to addr & len */ | ||
821 | vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter); | ||
822 | |||
823 | /* setup the EOP desc */ | ||
824 | ctx.eop_txd->dword[3] = VMXNET3_TXD_CQ | VMXNET3_TXD_EOP; | ||
825 | |||
826 | /* setup the SOP desc */ | ||
827 | gdesc = ctx.sop_txd; | ||
828 | if (ctx.mss) { | ||
829 | gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size; | ||
830 | gdesc->txd.om = VMXNET3_OM_TSO; | ||
831 | gdesc->txd.msscof = ctx.mss; | ||
832 | tq->shared->txNumDeferred += (skb->len - gdesc->txd.hlen + | ||
833 | ctx.mss - 1) / ctx.mss; | ||
834 | } else { | ||
835 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
836 | gdesc->txd.hlen = ctx.eth_ip_hdr_size; | ||
837 | gdesc->txd.om = VMXNET3_OM_CSUM; | ||
838 | gdesc->txd.msscof = ctx.eth_ip_hdr_size + | ||
839 | skb->csum_offset; | ||
840 | } else { | ||
841 | gdesc->txd.om = 0; | ||
842 | gdesc->txd.msscof = 0; | ||
843 | } | ||
844 | tq->shared->txNumDeferred++; | ||
845 | } | ||
846 | |||
847 | if (vlan_tx_tag_present(skb)) { | ||
848 | gdesc->txd.ti = 1; | ||
849 | gdesc->txd.tci = vlan_tx_tag_get(skb); | ||
850 | } | ||
851 | |||
852 | wmb(); | ||
853 | |||
854 | /* finally flips the GEN bit of the SOP desc */ | ||
855 | gdesc->dword[2] ^= VMXNET3_TXD_GEN; | ||
856 | dprintk(KERN_ERR "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n", | ||
857 | (u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd - | ||
858 | tq->tx_ring.base), gdesc->txd.addr, gdesc->dword[2], | ||
859 | gdesc->dword[3]); | ||
860 | |||
861 | spin_unlock_irqrestore(&tq->tx_lock, flags); | ||
862 | |||
863 | if (tq->shared->txNumDeferred >= tq->shared->txThreshold) { | ||
864 | tq->shared->txNumDeferred = 0; | ||
865 | VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_TXPROD, | ||
866 | tq->tx_ring.next2fill); | ||
867 | } | ||
868 | netdev->trans_start = jiffies; | ||
869 | |||
870 | return NETDEV_TX_OK; | ||
871 | |||
872 | hdr_too_big: | ||
873 | tq->stats.drop_oversized_hdr++; | ||
874 | drop_pkt: | ||
875 | tq->stats.drop_total++; | ||
876 | dev_kfree_skb(skb); | ||
877 | return NETDEV_TX_OK; | ||
878 | } | ||
879 | |||
880 | |||
881 | static netdev_tx_t | ||
882 | vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | ||
883 | { | ||
884 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | ||
885 | struct vmxnet3_tx_queue *tq = &adapter->tx_queue; | ||
886 | |||
887 | return vmxnet3_tq_xmit(skb, tq, adapter, netdev); | ||
888 | } | ||
889 | |||
890 | |||
891 | static void | ||
892 | vmxnet3_rx_csum(struct vmxnet3_adapter *adapter, | ||
893 | struct sk_buff *skb, | ||
894 | union Vmxnet3_GenericDesc *gdesc) | ||
895 | { | ||
896 | if (!gdesc->rcd.cnc && adapter->rxcsum) { | ||
897 | /* typical case: TCP/UDP over IP and both csums are correct */ | ||
898 | if ((gdesc->dword[3] & VMXNET3_RCD_CSUM_OK) == | ||
899 | VMXNET3_RCD_CSUM_OK) { | ||
900 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
901 | BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp)); | ||
902 | BUG_ON(!(gdesc->rcd.v4 || gdesc->rcd.v6)); | ||
903 | BUG_ON(gdesc->rcd.frg); | ||
904 | } else { | ||
905 | if (gdesc->rcd.csum) { | ||
906 | skb->csum = htons(gdesc->rcd.csum); | ||
907 | skb->ip_summed = CHECKSUM_PARTIAL; | ||
908 | } else { | ||
909 | skb->ip_summed = CHECKSUM_NONE; | ||
910 | } | ||
911 | } | ||
912 | } else { | ||
913 | skb->ip_summed = CHECKSUM_NONE; | ||
914 | } | ||
915 | } | ||
916 | |||
917 | |||
918 | static void | ||
919 | vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd, | ||
920 | struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter) | ||
921 | { | ||
922 | rq->stats.drop_err++; | ||
923 | if (!rcd->fcs) | ||
924 | rq->stats.drop_fcs++; | ||
925 | |||
926 | rq->stats.drop_total++; | ||
927 | |||
928 | /* | ||
929 | * We do not unmap and chain the rx buffer to the skb. | ||
930 | * We basically pretend this buffer is not used and will be recycled | ||
931 | * by vmxnet3_rq_alloc_rx_buf() | ||
932 | */ | ||
933 | |||
934 | /* | ||
935 | * ctx->skb may be NULL if this is the first and the only one | ||
936 | * desc for the pkt | ||
937 | */ | ||
938 | if (ctx->skb) | ||
939 | dev_kfree_skb_irq(ctx->skb); | ||
940 | |||
941 | ctx->skb = NULL; | ||
942 | } | ||
943 | |||
944 | |||
945 | static int | ||
946 | vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, | ||
947 | struct vmxnet3_adapter *adapter, int quota) | ||
948 | { | ||
949 | static u32 rxprod_reg[2] = {VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2}; | ||
950 | u32 num_rxd = 0; | ||
951 | struct Vmxnet3_RxCompDesc *rcd; | ||
952 | struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx; | ||
953 | |||
954 | rcd = &rq->comp_ring.base[rq->comp_ring.next2proc].rcd; | ||
955 | while (rcd->gen == rq->comp_ring.gen) { | ||
956 | struct vmxnet3_rx_buf_info *rbi; | ||
957 | struct sk_buff *skb; | ||
958 | int num_to_alloc; | ||
959 | struct Vmxnet3_RxDesc *rxd; | ||
960 | u32 idx, ring_idx; | ||
961 | |||
962 | if (num_rxd >= quota) { | ||
963 | /* we may stop even before we see the EOP desc of | ||
964 | * the current pkt | ||
965 | */ | ||
966 | break; | ||
967 | } | ||
968 | num_rxd++; | ||
969 | |||
970 | idx = rcd->rxdIdx; | ||
971 | ring_idx = rcd->rqID == rq->qid ? 0 : 1; | ||
972 | |||
973 | rxd = &rq->rx_ring[ring_idx].base[idx].rxd; | ||
974 | rbi = rq->buf_info[ring_idx] + idx; | ||
975 | |||
976 | BUG_ON(rxd->addr != rbi->dma_addr || rxd->len != rbi->len); | ||
977 | |||
978 | if (unlikely(rcd->eop && rcd->err)) { | ||
979 | vmxnet3_rx_error(rq, rcd, ctx, adapter); | ||
980 | goto rcd_done; | ||
981 | } | ||
982 | |||
983 | if (rcd->sop) { /* first buf of the pkt */ | ||
984 | BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD || | ||
985 | rcd->rqID != rq->qid); | ||
986 | |||
987 | BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB); | ||
988 | BUG_ON(ctx->skb != NULL || rbi->skb == NULL); | ||
989 | |||
990 | if (unlikely(rcd->len == 0)) { | ||
991 | /* Pretend the rx buffer is skipped. */ | ||
992 | BUG_ON(!(rcd->sop && rcd->eop)); | ||
993 | dprintk(KERN_ERR "rxRing[%u][%u] 0 length\n", | ||
994 | ring_idx, idx); | ||
995 | goto rcd_done; | ||
996 | } | ||
997 | |||
998 | ctx->skb = rbi->skb; | ||
999 | rbi->skb = NULL; | ||
1000 | |||
1001 | pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len, | ||
1002 | PCI_DMA_FROMDEVICE); | ||
1003 | |||
1004 | skb_put(ctx->skb, rcd->len); | ||
1005 | } else { | ||
1006 | BUG_ON(ctx->skb == NULL); | ||
1007 | /* non SOP buffer must be type 1 in most cases */ | ||
1008 | if (rbi->buf_type == VMXNET3_RX_BUF_PAGE) { | ||
1009 | BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY); | ||
1010 | |||
1011 | if (rcd->len) { | ||
1012 | pci_unmap_page(adapter->pdev, | ||
1013 | rbi->dma_addr, rbi->len, | ||
1014 | PCI_DMA_FROMDEVICE); | ||
1015 | |||
1016 | vmxnet3_append_frag(ctx->skb, rcd, rbi); | ||
1017 | rbi->page = NULL; | ||
1018 | } | ||
1019 | } else { | ||
1020 | /* | ||
1021 | * The only time a non-SOP buffer is type 0 is | ||
1022 | * when it's EOP and error flag is raised, which | ||
1023 | * has already been handled. | ||
1024 | */ | ||
1025 | BUG_ON(true); | ||
1026 | } | ||
1027 | } | ||
1028 | |||
1029 | skb = ctx->skb; | ||
1030 | if (rcd->eop) { | ||
1031 | skb->len += skb->data_len; | ||
1032 | skb->truesize += skb->data_len; | ||
1033 | |||
1034 | vmxnet3_rx_csum(adapter, skb, | ||
1035 | (union Vmxnet3_GenericDesc *)rcd); | ||
1036 | skb->protocol = eth_type_trans(skb, adapter->netdev); | ||
1037 | |||
1038 | if (unlikely(adapter->vlan_grp && rcd->ts)) { | ||
1039 | vlan_hwaccel_receive_skb(skb, | ||
1040 | adapter->vlan_grp, rcd->tci); | ||
1041 | } else { | ||
1042 | netif_receive_skb(skb); | ||
1043 | } | ||
1044 | |||
1045 | adapter->netdev->last_rx = jiffies; | ||
1046 | ctx->skb = NULL; | ||
1047 | } | ||
1048 | |||
1049 | rcd_done: | ||
1050 | /* device may skip some rx descs */ | ||
1051 | rq->rx_ring[ring_idx].next2comp = idx; | ||
1052 | VMXNET3_INC_RING_IDX_ONLY(rq->rx_ring[ring_idx].next2comp, | ||
1053 | rq->rx_ring[ring_idx].size); | ||
1054 | |||
1055 | /* refill rx buffers frequently to avoid starving the h/w */ | ||
1056 | num_to_alloc = vmxnet3_cmd_ring_desc_avail(rq->rx_ring + | ||
1057 | ring_idx); | ||
1058 | if (unlikely(num_to_alloc > VMXNET3_RX_ALLOC_THRESHOLD(rq, | ||
1059 | ring_idx, adapter))) { | ||
1060 | vmxnet3_rq_alloc_rx_buf(rq, ring_idx, num_to_alloc, | ||
1061 | adapter); | ||
1062 | |||
1063 | /* if needed, update the register */ | ||
1064 | if (unlikely(rq->shared->updateRxProd)) { | ||
1065 | VMXNET3_WRITE_BAR0_REG(adapter, | ||
1066 | rxprod_reg[ring_idx] + rq->qid * 8, | ||
1067 | rq->rx_ring[ring_idx].next2fill); | ||
1068 | rq->uncommitted[ring_idx] = 0; | ||
1069 | } | ||
1070 | } | ||
1071 | |||
1072 | vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring); | ||
1073 | rcd = &rq->comp_ring.base[rq->comp_ring.next2proc].rcd; | ||
1074 | } | ||
1075 | |||
1076 | return num_rxd; | ||
1077 | } | ||
1078 | |||
1079 | |||
1080 | static void | ||
1081 | vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq, | ||
1082 | struct vmxnet3_adapter *adapter) | ||
1083 | { | ||
1084 | u32 i, ring_idx; | ||
1085 | struct Vmxnet3_RxDesc *rxd; | ||
1086 | |||
1087 | for (ring_idx = 0; ring_idx < 2; ring_idx++) { | ||
1088 | for (i = 0; i < rq->rx_ring[ring_idx].size; i++) { | ||
1089 | rxd = &rq->rx_ring[ring_idx].base[i].rxd; | ||
1090 | |||
1091 | if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD && | ||
1092 | rq->buf_info[ring_idx][i].skb) { | ||
1093 | pci_unmap_single(adapter->pdev, rxd->addr, | ||
1094 | rxd->len, PCI_DMA_FROMDEVICE); | ||
1095 | dev_kfree_skb(rq->buf_info[ring_idx][i].skb); | ||
1096 | rq->buf_info[ring_idx][i].skb = NULL; | ||
1097 | } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY && | ||
1098 | rq->buf_info[ring_idx][i].page) { | ||
1099 | pci_unmap_page(adapter->pdev, rxd->addr, | ||
1100 | rxd->len, PCI_DMA_FROMDEVICE); | ||
1101 | put_page(rq->buf_info[ring_idx][i].page); | ||
1102 | rq->buf_info[ring_idx][i].page = NULL; | ||
1103 | } | ||
1104 | } | ||
1105 | |||
1106 | rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN; | ||
1107 | rq->rx_ring[ring_idx].next2fill = | ||
1108 | rq->rx_ring[ring_idx].next2comp = 0; | ||
1109 | rq->uncommitted[ring_idx] = 0; | ||
1110 | } | ||
1111 | |||
1112 | rq->comp_ring.gen = VMXNET3_INIT_GEN; | ||
1113 | rq->comp_ring.next2proc = 0; | ||
1114 | } | ||
1115 | |||
1116 | |||
1117 | void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, | ||
1118 | struct vmxnet3_adapter *adapter) | ||
1119 | { | ||
1120 | int i; | ||
1121 | int j; | ||
1122 | |||
1123 | /* all rx buffers must have already been freed */ | ||
1124 | for (i = 0; i < 2; i++) { | ||
1125 | if (rq->buf_info[i]) { | ||
1126 | for (j = 0; j < rq->rx_ring[i].size; j++) | ||
1127 | BUG_ON(rq->buf_info[i][j].page != NULL); | ||
1128 | } | ||
1129 | } | ||
1130 | |||
1131 | |||
1132 | kfree(rq->buf_info[0]); | ||
1133 | |||
1134 | for (i = 0; i < 2; i++) { | ||
1135 | if (rq->rx_ring[i].base) { | ||
1136 | pci_free_consistent(adapter->pdev, rq->rx_ring[i].size | ||
1137 | * sizeof(struct Vmxnet3_RxDesc), | ||
1138 | rq->rx_ring[i].base, | ||
1139 | rq->rx_ring[i].basePA); | ||
1140 | rq->rx_ring[i].base = NULL; | ||
1141 | } | ||
1142 | rq->buf_info[i] = NULL; | ||
1143 | } | ||
1144 | |||
1145 | if (rq->comp_ring.base) { | ||
1146 | pci_free_consistent(adapter->pdev, rq->comp_ring.size * | ||
1147 | sizeof(struct Vmxnet3_RxCompDesc), | ||
1148 | rq->comp_ring.base, rq->comp_ring.basePA); | ||
1149 | rq->comp_ring.base = NULL; | ||
1150 | } | ||
1151 | } | ||
1152 | |||
1153 | |||
1154 | static int | ||
1155 | vmxnet3_rq_init(struct vmxnet3_rx_queue *rq, | ||
1156 | struct vmxnet3_adapter *adapter) | ||
1157 | { | ||
1158 | int i; | ||
1159 | |||
1160 | /* initialize buf_info */ | ||
1161 | for (i = 0; i < rq->rx_ring[0].size; i++) { | ||
1162 | |||
1163 | /* 1st buf for a pkt is skbuff */ | ||
1164 | if (i % adapter->rx_buf_per_pkt == 0) { | ||
1165 | rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB; | ||
1166 | rq->buf_info[0][i].len = adapter->skb_buf_size; | ||
1167 | } else { /* subsequent bufs for a pkt is frag */ | ||
1168 | rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE; | ||
1169 | rq->buf_info[0][i].len = PAGE_SIZE; | ||
1170 | } | ||
1171 | } | ||
1172 | for (i = 0; i < rq->rx_ring[1].size; i++) { | ||
1173 | rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE; | ||
1174 | rq->buf_info[1][i].len = PAGE_SIZE; | ||
1175 | } | ||
1176 | |||
1177 | /* reset internal state and allocate buffers for both rings */ | ||
1178 | for (i = 0; i < 2; i++) { | ||
1179 | rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0; | ||
1180 | rq->uncommitted[i] = 0; | ||
1181 | |||
1182 | memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size * | ||
1183 | sizeof(struct Vmxnet3_RxDesc)); | ||
1184 | rq->rx_ring[i].gen = VMXNET3_INIT_GEN; | ||
1185 | } | ||
1186 | if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1, | ||
1187 | adapter) == 0) { | ||
1188 | /* at least has 1 rx buffer for the 1st ring */ | ||
1189 | return -ENOMEM; | ||
1190 | } | ||
1191 | vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter); | ||
1192 | |||
1193 | /* reset the comp ring */ | ||
1194 | rq->comp_ring.next2proc = 0; | ||
1195 | memset(rq->comp_ring.base, 0, rq->comp_ring.size * | ||
1196 | sizeof(struct Vmxnet3_RxCompDesc)); | ||
1197 | rq->comp_ring.gen = VMXNET3_INIT_GEN; | ||
1198 | |||
1199 | /* reset rxctx */ | ||
1200 | rq->rx_ctx.skb = NULL; | ||
1201 | |||
1202 | /* stats are not reset */ | ||
1203 | return 0; | ||
1204 | } | ||
1205 | |||
1206 | |||
1207 | static int | ||
1208 | vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter) | ||
1209 | { | ||
1210 | int i; | ||
1211 | size_t sz; | ||
1212 | struct vmxnet3_rx_buf_info *bi; | ||
1213 | |||
1214 | for (i = 0; i < 2; i++) { | ||
1215 | |||
1216 | sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc); | ||
1217 | rq->rx_ring[i].base = pci_alloc_consistent(adapter->pdev, sz, | ||
1218 | &rq->rx_ring[i].basePA); | ||
1219 | if (!rq->rx_ring[i].base) { | ||
1220 | printk(KERN_ERR "%s: failed to allocate rx ring %d\n", | ||
1221 | adapter->netdev->name, i); | ||
1222 | goto err; | ||
1223 | } | ||
1224 | } | ||
1225 | |||
1226 | sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc); | ||
1227 | rq->comp_ring.base = pci_alloc_consistent(adapter->pdev, sz, | ||
1228 | &rq->comp_ring.basePA); | ||
1229 | if (!rq->comp_ring.base) { | ||
1230 | printk(KERN_ERR "%s: failed to allocate rx comp ring\n", | ||
1231 | adapter->netdev->name); | ||
1232 | goto err; | ||
1233 | } | ||
1234 | |||
1235 | sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size + | ||
1236 | rq->rx_ring[1].size); | ||
1237 | bi = kmalloc(sz, GFP_KERNEL); | ||
1238 | if (!bi) { | ||
1239 | printk(KERN_ERR "%s: failed to allocate rx bufinfo\n", | ||
1240 | adapter->netdev->name); | ||
1241 | goto err; | ||
1242 | } | ||
1243 | memset(bi, 0, sz); | ||
1244 | rq->buf_info[0] = bi; | ||
1245 | rq->buf_info[1] = bi + rq->rx_ring[0].size; | ||
1246 | |||
1247 | return 0; | ||
1248 | |||
1249 | err: | ||
1250 | vmxnet3_rq_destroy(rq, adapter); | ||
1251 | return -ENOMEM; | ||
1252 | } | ||
1253 | |||
1254 | |||
1255 | static int | ||
1256 | vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget) | ||
1257 | { | ||
1258 | if (unlikely(adapter->shared->ecr)) | ||
1259 | vmxnet3_process_events(adapter); | ||
1260 | |||
1261 | vmxnet3_tq_tx_complete(&adapter->tx_queue, adapter); | ||
1262 | return vmxnet3_rq_rx_complete(&adapter->rx_queue, adapter, budget); | ||
1263 | } | ||
1264 | |||
1265 | |||
1266 | static int | ||
1267 | vmxnet3_poll(struct napi_struct *napi, int budget) | ||
1268 | { | ||
1269 | struct vmxnet3_adapter *adapter = container_of(napi, | ||
1270 | struct vmxnet3_adapter, napi); | ||
1271 | int rxd_done; | ||
1272 | |||
1273 | rxd_done = vmxnet3_do_poll(adapter, budget); | ||
1274 | |||
1275 | if (rxd_done < budget) { | ||
1276 | napi_complete(napi); | ||
1277 | vmxnet3_enable_intr(adapter, 0); | ||
1278 | } | ||
1279 | return rxd_done; | ||
1280 | } | ||
1281 | |||
1282 | |||
1283 | /* Interrupt handler for vmxnet3 */ | ||
1284 | static irqreturn_t | ||
1285 | vmxnet3_intr(int irq, void *dev_id) | ||
1286 | { | ||
1287 | struct net_device *dev = dev_id; | ||
1288 | struct vmxnet3_adapter *adapter = netdev_priv(dev); | ||
1289 | |||
1290 | if (unlikely(adapter->intr.type == VMXNET3_IT_INTX)) { | ||
1291 | u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR); | ||
1292 | if (unlikely(icr == 0)) | ||
1293 | /* not ours */ | ||
1294 | return IRQ_NONE; | ||
1295 | } | ||
1296 | |||
1297 | |||
1298 | /* disable intr if needed */ | ||
1299 | if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) | ||
1300 | vmxnet3_disable_intr(adapter, 0); | ||
1301 | |||
1302 | napi_schedule(&adapter->napi); | ||
1303 | |||
1304 | return IRQ_HANDLED; | ||
1305 | } | ||
1306 | |||
1307 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1308 | |||
1309 | |||
1310 | /* netpoll callback. */ | ||
1311 | static void | ||
1312 | vmxnet3_netpoll(struct net_device *netdev) | ||
1313 | { | ||
1314 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | ||
1315 | int irq; | ||
1316 | |||
1317 | #ifdef CONFIG_PCI_MSI | ||
1318 | if (adapter->intr.type == VMXNET3_IT_MSIX) | ||
1319 | irq = adapter->intr.msix_entries[0].vector; | ||
1320 | else | ||
1321 | #endif | ||
1322 | irq = adapter->pdev->irq; | ||
1323 | |||
1324 | disable_irq(irq); | ||
1325 | vmxnet3_intr(irq, netdev); | ||
1326 | enable_irq(irq); | ||
1327 | } | ||
1328 | #endif | ||
1329 | |||
1330 | static int | ||
1331 | vmxnet3_request_irqs(struct vmxnet3_adapter *adapter) | ||
1332 | { | ||
1333 | int err; | ||
1334 | |||
1335 | #ifdef CONFIG_PCI_MSI | ||
1336 | if (adapter->intr.type == VMXNET3_IT_MSIX) { | ||
1337 | /* we only use 1 MSI-X vector */ | ||
1338 | err = request_irq(adapter->intr.msix_entries[0].vector, | ||
1339 | vmxnet3_intr, 0, adapter->netdev->name, | ||
1340 | adapter->netdev); | ||
1341 | } else | ||
1342 | #endif | ||
1343 | if (adapter->intr.type == VMXNET3_IT_MSI) { | ||
1344 | err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0, | ||
1345 | adapter->netdev->name, adapter->netdev); | ||
1346 | } else { | ||
1347 | err = request_irq(adapter->pdev->irq, vmxnet3_intr, | ||
1348 | IRQF_SHARED, adapter->netdev->name, | ||
1349 | adapter->netdev); | ||
1350 | } | ||
1351 | |||
1352 | if (err) | ||
1353 | printk(KERN_ERR "Failed to request irq %s (intr type:%d), error" | ||
1354 | ":%d\n", adapter->netdev->name, adapter->intr.type, err); | ||
1355 | |||
1356 | |||
1357 | if (!err) { | ||
1358 | int i; | ||
1359 | /* init our intr settings */ | ||
1360 | for (i = 0; i < adapter->intr.num_intrs; i++) | ||
1361 | adapter->intr.mod_levels[i] = UPT1_IML_ADAPTIVE; | ||
1362 | |||
1363 | /* next setup intr index for all intr sources */ | ||
1364 | adapter->tx_queue.comp_ring.intr_idx = 0; | ||
1365 | adapter->rx_queue.comp_ring.intr_idx = 0; | ||
1366 | adapter->intr.event_intr_idx = 0; | ||
1367 | |||
1368 | printk(KERN_INFO "%s: intr type %u, mode %u, %u vectors " | ||
1369 | "allocated\n", adapter->netdev->name, adapter->intr.type, | ||
1370 | adapter->intr.mask_mode, adapter->intr.num_intrs); | ||
1371 | } | ||
1372 | |||
1373 | return err; | ||
1374 | } | ||
1375 | |||
1376 | |||
1377 | static void | ||
1378 | vmxnet3_free_irqs(struct vmxnet3_adapter *adapter) | ||
1379 | { | ||
1380 | BUG_ON(adapter->intr.type == VMXNET3_IT_AUTO || | ||
1381 | adapter->intr.num_intrs <= 0); | ||
1382 | |||
1383 | switch (adapter->intr.type) { | ||
1384 | #ifdef CONFIG_PCI_MSI | ||
1385 | case VMXNET3_IT_MSIX: | ||
1386 | { | ||
1387 | int i; | ||
1388 | |||
1389 | for (i = 0; i < adapter->intr.num_intrs; i++) | ||
1390 | free_irq(adapter->intr.msix_entries[i].vector, | ||
1391 | adapter->netdev); | ||
1392 | break; | ||
1393 | } | ||
1394 | #endif | ||
1395 | case VMXNET3_IT_MSI: | ||
1396 | free_irq(adapter->pdev->irq, adapter->netdev); | ||
1397 | break; | ||
1398 | case VMXNET3_IT_INTX: | ||
1399 | free_irq(adapter->pdev->irq, adapter->netdev); | ||
1400 | break; | ||
1401 | default: | ||
1402 | BUG_ON(true); | ||
1403 | } | ||
1404 | } | ||
1405 | |||
1406 | |||
1407 | static void | ||
1408 | vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) | ||
1409 | { | ||
1410 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | ||
1411 | struct Vmxnet3_DriverShared *shared = adapter->shared; | ||
1412 | u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; | ||
1413 | |||
1414 | if (grp) { | ||
1415 | /* add vlan rx stripping. */ | ||
1416 | if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) { | ||
1417 | int i; | ||
1418 | struct Vmxnet3_DSDevRead *devRead = &shared->devRead; | ||
1419 | adapter->vlan_grp = grp; | ||
1420 | |||
1421 | /* update FEATURES to device */ | ||
1422 | devRead->misc.uptFeatures |= UPT1_F_RXVLAN; | ||
1423 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | ||
1424 | VMXNET3_CMD_UPDATE_FEATURE); | ||
1425 | /* | ||
1426 | * Clear entire vfTable; then enable untagged pkts. | ||
1427 | * Note: setting one entry in vfTable to non-zero turns | ||
1428 | * on VLAN rx filtering. | ||
1429 | */ | ||
1430 | for (i = 0; i < VMXNET3_VFT_SIZE; i++) | ||
1431 | vfTable[i] = 0; | ||
1432 | |||
1433 | VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0); | ||
1434 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | ||
1435 | VMXNET3_CMD_UPDATE_VLAN_FILTERS); | ||
1436 | } else { | ||
1437 | printk(KERN_ERR "%s: vlan_rx_register when device has " | ||
1438 | "no NETIF_F_HW_VLAN_RX\n", netdev->name); | ||
1439 | } | ||
1440 | } else { | ||
1441 | /* remove vlan rx stripping. */ | ||
1442 | struct Vmxnet3_DSDevRead *devRead = &shared->devRead; | ||
1443 | adapter->vlan_grp = NULL; | ||
1444 | |||
1445 | if (devRead->misc.uptFeatures & UPT1_F_RXVLAN) { | ||
1446 | int i; | ||
1447 | |||
1448 | for (i = 0; i < VMXNET3_VFT_SIZE; i++) { | ||
1449 | /* clear entire vfTable; this also disables | ||
1450 | * VLAN rx filtering | ||
1451 | */ | ||
1452 | vfTable[i] = 0; | ||
1453 | } | ||
1454 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | ||
1455 | VMXNET3_CMD_UPDATE_VLAN_FILTERS); | ||
1456 | |||
1457 | /* update FEATURES to device */ | ||
1458 | devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN; | ||
1459 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | ||
1460 | VMXNET3_CMD_UPDATE_FEATURE); | ||
1461 | } | ||
1462 | } | ||
1463 | } | ||
1464 | |||
1465 | |||
1466 | static void | ||
1467 | vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter) | ||
1468 | { | ||
1469 | if (adapter->vlan_grp) { | ||
1470 | u16 vid; | ||
1471 | u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; | ||
1472 | bool activeVlan = false; | ||
1473 | |||
1474 | for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { | ||
1475 | if (vlan_group_get_device(adapter->vlan_grp, vid)) { | ||
1476 | VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid); | ||
1477 | activeVlan = true; | ||
1478 | } | ||
1479 | } | ||
1480 | if (activeVlan) { | ||
1481 | /* continue to allow untagged pkts */ | ||
1482 | VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0); | ||
1483 | } | ||
1484 | } | ||
1485 | } | ||
1486 | |||
1487 | |||
1488 | static void | ||
1489 | vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid) | ||
1490 | { | ||
1491 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | ||
1492 | u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; | ||
1493 | |||
1494 | VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid); | ||
1495 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | ||
1496 | VMXNET3_CMD_UPDATE_VLAN_FILTERS); | ||
1497 | } | ||
1498 | |||
1499 | |||
1500 | static void | ||
1501 | vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) | ||
1502 | { | ||
1503 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | ||
1504 | u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; | ||
1505 | |||
1506 | VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid); | ||
1507 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | ||
1508 | VMXNET3_CMD_UPDATE_VLAN_FILTERS); | ||
1509 | } | ||
1510 | |||
1511 | |||
1512 | static u8 * | ||
1513 | vmxnet3_copy_mc(struct net_device *netdev) | ||
1514 | { | ||
1515 | u8 *buf = NULL; | ||
1516 | u32 sz = netdev->mc_count * ETH_ALEN; | ||
1517 | |||
1518 | /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */ | ||
1519 | if (sz <= 0xffff) { | ||
1520 | /* We may be called with BH disabled */ | ||
1521 | buf = kmalloc(sz, GFP_ATOMIC); | ||
1522 | if (buf) { | ||
1523 | int i; | ||
1524 | struct dev_mc_list *mc = netdev->mc_list; | ||
1525 | |||
1526 | for (i = 0; i < netdev->mc_count; i++) { | ||
1527 | BUG_ON(!mc); | ||
1528 | memcpy(buf + i * ETH_ALEN, mc->dmi_addr, | ||
1529 | ETH_ALEN); | ||
1530 | mc = mc->next; | ||
1531 | } | ||
1532 | } | ||
1533 | } | ||
1534 | return buf; | ||
1535 | } | ||
1536 | |||
1537 | |||
1538 | static void | ||
1539 | vmxnet3_set_mc(struct net_device *netdev) | ||
1540 | { | ||
1541 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | ||
1542 | struct Vmxnet3_RxFilterConf *rxConf = | ||
1543 | &adapter->shared->devRead.rxFilterConf; | ||
1544 | u8 *new_table = NULL; | ||
1545 | u32 new_mode = VMXNET3_RXM_UCAST; | ||
1546 | |||
1547 | if (netdev->flags & IFF_PROMISC) | ||
1548 | new_mode |= VMXNET3_RXM_PROMISC; | ||
1549 | |||
1550 | if (netdev->flags & IFF_BROADCAST) | ||
1551 | new_mode |= VMXNET3_RXM_BCAST; | ||
1552 | |||
1553 | if (netdev->flags & IFF_ALLMULTI) | ||
1554 | new_mode |= VMXNET3_RXM_ALL_MULTI; | ||
1555 | else | ||
1556 | if (netdev->mc_count > 0) { | ||
1557 | new_table = vmxnet3_copy_mc(netdev); | ||
1558 | if (new_table) { | ||
1559 | new_mode |= VMXNET3_RXM_MCAST; | ||
1560 | rxConf->mfTableLen = netdev->mc_count * | ||
1561 | ETH_ALEN; | ||
1562 | rxConf->mfTablePA = virt_to_phys(new_table); | ||
1563 | } else { | ||
1564 | printk(KERN_INFO "%s: failed to copy mcast list" | ||
1565 | ", setting ALL_MULTI\n", netdev->name); | ||
1566 | new_mode |= VMXNET3_RXM_ALL_MULTI; | ||
1567 | } | ||
1568 | } | ||
1569 | |||
1570 | |||
1571 | if (!(new_mode & VMXNET3_RXM_MCAST)) { | ||
1572 | rxConf->mfTableLen = 0; | ||
1573 | rxConf->mfTablePA = 0; | ||
1574 | } | ||
1575 | |||
1576 | if (new_mode != rxConf->rxMode) { | ||
1577 | rxConf->rxMode = new_mode; | ||
1578 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | ||
1579 | VMXNET3_CMD_UPDATE_RX_MODE); | ||
1580 | } | ||
1581 | |||
1582 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | ||
1583 | VMXNET3_CMD_UPDATE_MAC_FILTERS); | ||
1584 | |||
1585 | kfree(new_table); | ||
1586 | } | ||
1587 | |||
1588 | |||
1589 | /* | ||
1590 | * Set up driver_shared based on settings in adapter. | ||
1591 | */ | ||
1592 | |||
1593 | static void | ||
1594 | vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) | ||
1595 | { | ||
1596 | struct Vmxnet3_DriverShared *shared = adapter->shared; | ||
1597 | struct Vmxnet3_DSDevRead *devRead = &shared->devRead; | ||
1598 | struct Vmxnet3_TxQueueConf *tqc; | ||
1599 | struct Vmxnet3_RxQueueConf *rqc; | ||
1600 | int i; | ||
1601 | |||
1602 | memset(shared, 0, sizeof(*shared)); | ||
1603 | |||
1604 | /* driver settings */ | ||
1605 | shared->magic = VMXNET3_REV1_MAGIC; | ||
1606 | devRead->misc.driverInfo.version = VMXNET3_DRIVER_VERSION_NUM; | ||
1607 | devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ? | ||
1608 | VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64); | ||
1609 | devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX; | ||
1610 | devRead->misc.driverInfo.vmxnet3RevSpt = 1; | ||
1611 | devRead->misc.driverInfo.uptVerSpt = 1; | ||
1612 | |||
1613 | devRead->misc.ddPA = virt_to_phys(adapter); | ||
1614 | devRead->misc.ddLen = sizeof(struct vmxnet3_adapter); | ||
1615 | |||
1616 | /* set up feature flags */ | ||
1617 | if (adapter->rxcsum) | ||
1618 | devRead->misc.uptFeatures |= UPT1_F_RXCSUM; | ||
1619 | |||
1620 | if (adapter->lro) { | ||
1621 | devRead->misc.uptFeatures |= UPT1_F_LRO; | ||
1622 | devRead->misc.maxNumRxSG = 1 + MAX_SKB_FRAGS; | ||
1623 | } | ||
1624 | if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX) | ||
1625 | && adapter->vlan_grp) { | ||
1626 | devRead->misc.uptFeatures |= UPT1_F_RXVLAN; | ||
1627 | } | ||
1628 | |||
1629 | devRead->misc.mtu = adapter->netdev->mtu; | ||
1630 | devRead->misc.queueDescPA = adapter->queue_desc_pa; | ||
1631 | devRead->misc.queueDescLen = sizeof(struct Vmxnet3_TxQueueDesc) + | ||
1632 | sizeof(struct Vmxnet3_RxQueueDesc); | ||
1633 | |||
1634 | /* tx queue settings */ | ||
1635 | BUG_ON(adapter->tx_queue.tx_ring.base == NULL); | ||
1636 | |||
1637 | devRead->misc.numTxQueues = 1; | ||
1638 | tqc = &adapter->tqd_start->conf; | ||
1639 | tqc->txRingBasePA = adapter->tx_queue.tx_ring.basePA; | ||
1640 | tqc->dataRingBasePA = adapter->tx_queue.data_ring.basePA; | ||
1641 | tqc->compRingBasePA = adapter->tx_queue.comp_ring.basePA; | ||
1642 | tqc->ddPA = virt_to_phys(adapter->tx_queue.buf_info); | ||
1643 | tqc->txRingSize = adapter->tx_queue.tx_ring.size; | ||
1644 | tqc->dataRingSize = adapter->tx_queue.data_ring.size; | ||
1645 | tqc->compRingSize = adapter->tx_queue.comp_ring.size; | ||
1646 | tqc->ddLen = sizeof(struct vmxnet3_tx_buf_info) * | ||
1647 | tqc->txRingSize; | ||
1648 | tqc->intrIdx = adapter->tx_queue.comp_ring.intr_idx; | ||
1649 | |||
1650 | /* rx queue settings */ | ||
1651 | devRead->misc.numRxQueues = 1; | ||
1652 | rqc = &adapter->rqd_start->conf; | ||
1653 | rqc->rxRingBasePA[0] = adapter->rx_queue.rx_ring[0].basePA; | ||
1654 | rqc->rxRingBasePA[1] = adapter->rx_queue.rx_ring[1].basePA; | ||
1655 | rqc->compRingBasePA = adapter->rx_queue.comp_ring.basePA; | ||
1656 | rqc->ddPA = virt_to_phys(adapter->rx_queue.buf_info); | ||
1657 | rqc->rxRingSize[0] = adapter->rx_queue.rx_ring[0].size; | ||
1658 | rqc->rxRingSize[1] = adapter->rx_queue.rx_ring[1].size; | ||
1659 | rqc->compRingSize = adapter->rx_queue.comp_ring.size; | ||
1660 | rqc->ddLen = sizeof(struct vmxnet3_rx_buf_info) * | ||
1661 | (rqc->rxRingSize[0] + rqc->rxRingSize[1]); | ||
1662 | rqc->intrIdx = adapter->rx_queue.comp_ring.intr_idx; | ||
1663 | |||
1664 | /* intr settings */ | ||
1665 | devRead->intrConf.autoMask = adapter->intr.mask_mode == | ||
1666 | VMXNET3_IMM_AUTO; | ||
1667 | devRead->intrConf.numIntrs = adapter->intr.num_intrs; | ||
1668 | for (i = 0; i < adapter->intr.num_intrs; i++) | ||
1669 | devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i]; | ||
1670 | |||
1671 | devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx; | ||
1672 | |||
1673 | /* rx filter settings */ | ||
1674 | devRead->rxFilterConf.rxMode = 0; | ||
1675 | vmxnet3_restore_vlan(adapter); | ||
1676 | /* the rest are already zeroed */ | ||
1677 | } | ||
1678 | |||
1679 | |||
1680 | int | ||
1681 | vmxnet3_activate_dev(struct vmxnet3_adapter *adapter) | ||
1682 | { | ||
1683 | int err; | ||
1684 | u32 ret; | ||
1685 | |||
1686 | dprintk(KERN_ERR "%s: skb_buf_size %d, rx_buf_per_pkt %d, ring sizes" | ||
1687 | " %u %u %u\n", adapter->netdev->name, adapter->skb_buf_size, | ||
1688 | adapter->rx_buf_per_pkt, adapter->tx_queue.tx_ring.size, | ||
1689 | adapter->rx_queue.rx_ring[0].size, | ||
1690 | adapter->rx_queue.rx_ring[1].size); | ||
1691 | |||
1692 | vmxnet3_tq_init(&adapter->tx_queue, adapter); | ||
1693 | err = vmxnet3_rq_init(&adapter->rx_queue, adapter); | ||
1694 | if (err) { | ||
1695 | printk(KERN_ERR "Failed to init rx queue for %s: error %d\n", | ||
1696 | adapter->netdev->name, err); | ||
1697 | goto rq_err; | ||
1698 | } | ||
1699 | |||
1700 | err = vmxnet3_request_irqs(adapter); | ||
1701 | if (err) { | ||
1702 | printk(KERN_ERR "Failed to setup irq for %s: error %d\n", | ||
1703 | adapter->netdev->name, err); | ||
1704 | goto irq_err; | ||
1705 | } | ||
1706 | |||
1707 | vmxnet3_setup_driver_shared(adapter); | ||
1708 | |||
1709 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, | ||
1710 | VMXNET3_GET_ADDR_LO(adapter->shared_pa)); | ||
1711 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, | ||
1712 | VMXNET3_GET_ADDR_HI(adapter->shared_pa)); | ||
1713 | |||
1714 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | ||
1715 | VMXNET3_CMD_ACTIVATE_DEV); | ||
1716 | ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); | ||
1717 | |||
1718 | if (ret != 0) { | ||
1719 | printk(KERN_ERR "Failed to activate dev %s: error %u\n", | ||
1720 | adapter->netdev->name, ret); | ||
1721 | err = -EINVAL; | ||
1722 | goto activate_err; | ||
1723 | } | ||
1724 | VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_RXPROD, | ||
1725 | adapter->rx_queue.rx_ring[0].next2fill); | ||
1726 | VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_RXPROD2, | ||
1727 | adapter->rx_queue.rx_ring[1].next2fill); | ||
1728 | |||
1729 | /* Apply the rx filter settins last. */ | ||
1730 | vmxnet3_set_mc(adapter->netdev); | ||
1731 | |||
1732 | /* | ||
1733 | * Check link state when first activating device. It will start the | ||
1734 | * tx queue if the link is up. | ||
1735 | */ | ||
1736 | vmxnet3_check_link(adapter); | ||
1737 | |||
1738 | napi_enable(&adapter->napi); | ||
1739 | vmxnet3_enable_all_intrs(adapter); | ||
1740 | clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); | ||
1741 | return 0; | ||
1742 | |||
1743 | activate_err: | ||
1744 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0); | ||
1745 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0); | ||
1746 | vmxnet3_free_irqs(adapter); | ||
1747 | irq_err: | ||
1748 | rq_err: | ||
1749 | /* free up buffers we allocated */ | ||
1750 | vmxnet3_rq_cleanup(&adapter->rx_queue, adapter); | ||
1751 | return err; | ||
1752 | } | ||
1753 | |||
1754 | |||
1755 | void | ||
1756 | vmxnet3_reset_dev(struct vmxnet3_adapter *adapter) | ||
1757 | { | ||
1758 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV); | ||
1759 | } | ||
1760 | |||
1761 | |||
1762 | int | ||
1763 | vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter) | ||
1764 | { | ||
1765 | if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state)) | ||
1766 | return 0; | ||
1767 | |||
1768 | |||
1769 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | ||
1770 | VMXNET3_CMD_QUIESCE_DEV); | ||
1771 | vmxnet3_disable_all_intrs(adapter); | ||
1772 | |||
1773 | napi_disable(&adapter->napi); | ||
1774 | netif_tx_disable(adapter->netdev); | ||
1775 | adapter->link_speed = 0; | ||
1776 | netif_carrier_off(adapter->netdev); | ||
1777 | |||
1778 | vmxnet3_tq_cleanup(&adapter->tx_queue, adapter); | ||
1779 | vmxnet3_rq_cleanup(&adapter->rx_queue, adapter); | ||
1780 | vmxnet3_free_irqs(adapter); | ||
1781 | return 0; | ||
1782 | } | ||
1783 | |||
1784 | |||
1785 | static void | ||
1786 | vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac) | ||
1787 | { | ||
1788 | u32 tmp; | ||
1789 | |||
1790 | tmp = *(u32 *)mac; | ||
1791 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp); | ||
1792 | |||
1793 | tmp = (mac[5] << 8) | mac[4]; | ||
1794 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp); | ||
1795 | } | ||
1796 | |||
1797 | |||
1798 | static int | ||
1799 | vmxnet3_set_mac_addr(struct net_device *netdev, void *p) | ||
1800 | { | ||
1801 | struct sockaddr *addr = p; | ||
1802 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | ||
1803 | |||
1804 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | ||
1805 | vmxnet3_write_mac_addr(adapter, addr->sa_data); | ||
1806 | |||
1807 | return 0; | ||
1808 | } | ||
1809 | |||
1810 | |||
1811 | /* ==================== initialization and cleanup routines ============ */ | ||
1812 | |||
1813 | static int | ||
1814 | vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64) | ||
1815 | { | ||
1816 | int err; | ||
1817 | unsigned long mmio_start, mmio_len; | ||
1818 | struct pci_dev *pdev = adapter->pdev; | ||
1819 | |||
1820 | err = pci_enable_device(pdev); | ||
1821 | if (err) { | ||
1822 | printk(KERN_ERR "Failed to enable adapter %s: error %d\n", | ||
1823 | pci_name(pdev), err); | ||
1824 | return err; | ||
1825 | } | ||
1826 | |||
1827 | if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) { | ||
1828 | if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) { | ||
1829 | printk(KERN_ERR "pci_set_consistent_dma_mask failed " | ||
1830 | "for adapter %s\n", pci_name(pdev)); | ||
1831 | err = -EIO; | ||
1832 | goto err_set_mask; | ||
1833 | } | ||
1834 | *dma64 = true; | ||
1835 | } else { | ||
1836 | if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { | ||
1837 | printk(KERN_ERR "pci_set_dma_mask failed for adapter " | ||
1838 | "%s\n", pci_name(pdev)); | ||
1839 | err = -EIO; | ||
1840 | goto err_set_mask; | ||
1841 | } | ||
1842 | *dma64 = false; | ||
1843 | } | ||
1844 | |||
1845 | err = pci_request_selected_regions(pdev, (1 << 2) - 1, | ||
1846 | vmxnet3_driver_name); | ||
1847 | if (err) { | ||
1848 | printk(KERN_ERR "Failed to request region for adapter %s: " | ||
1849 | "error %d\n", pci_name(pdev), err); | ||
1850 | goto err_set_mask; | ||
1851 | } | ||
1852 | |||
1853 | pci_set_master(pdev); | ||
1854 | |||
1855 | mmio_start = pci_resource_start(pdev, 0); | ||
1856 | mmio_len = pci_resource_len(pdev, 0); | ||
1857 | adapter->hw_addr0 = ioremap(mmio_start, mmio_len); | ||
1858 | if (!adapter->hw_addr0) { | ||
1859 | printk(KERN_ERR "Failed to map bar0 for adapter %s\n", | ||
1860 | pci_name(pdev)); | ||
1861 | err = -EIO; | ||
1862 | goto err_ioremap; | ||
1863 | } | ||
1864 | |||
1865 | mmio_start = pci_resource_start(pdev, 1); | ||
1866 | mmio_len = pci_resource_len(pdev, 1); | ||
1867 | adapter->hw_addr1 = ioremap(mmio_start, mmio_len); | ||
1868 | if (!adapter->hw_addr1) { | ||
1869 | printk(KERN_ERR "Failed to map bar1 for adapter %s\n", | ||
1870 | pci_name(pdev)); | ||
1871 | err = -EIO; | ||
1872 | goto err_bar1; | ||
1873 | } | ||
1874 | return 0; | ||
1875 | |||
1876 | err_bar1: | ||
1877 | iounmap(adapter->hw_addr0); | ||
1878 | err_ioremap: | ||
1879 | pci_release_selected_regions(pdev, (1 << 2) - 1); | ||
1880 | err_set_mask: | ||
1881 | pci_disable_device(pdev); | ||
1882 | return err; | ||
1883 | } | ||
1884 | |||
1885 | |||
1886 | static void | ||
1887 | vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter) | ||
1888 | { | ||
1889 | BUG_ON(!adapter->pdev); | ||
1890 | |||
1891 | iounmap(adapter->hw_addr0); | ||
1892 | iounmap(adapter->hw_addr1); | ||
1893 | pci_release_selected_regions(adapter->pdev, (1 << 2) - 1); | ||
1894 | pci_disable_device(adapter->pdev); | ||
1895 | } | ||
1896 | |||
1897 | |||
1898 | static void | ||
1899 | vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter) | ||
1900 | { | ||
1901 | size_t sz; | ||
1902 | |||
1903 | if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE - | ||
1904 | VMXNET3_MAX_ETH_HDR_SIZE) { | ||
1905 | adapter->skb_buf_size = adapter->netdev->mtu + | ||
1906 | VMXNET3_MAX_ETH_HDR_SIZE; | ||
1907 | if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE) | ||
1908 | adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE; | ||
1909 | |||
1910 | adapter->rx_buf_per_pkt = 1; | ||
1911 | } else { | ||
1912 | adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE; | ||
1913 | sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE + | ||
1914 | VMXNET3_MAX_ETH_HDR_SIZE; | ||
1915 | adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE; | ||
1916 | } | ||
1917 | |||
1918 | /* | ||
1919 | * for simplicity, force the ring0 size to be a multiple of | ||
1920 | * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN | ||
1921 | */ | ||
1922 | sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN; | ||
1923 | adapter->rx_queue.rx_ring[0].size = (adapter->rx_queue.rx_ring[0].size + | ||
1924 | sz - 1) / sz * sz; | ||
1925 | adapter->rx_queue.rx_ring[0].size = min_t(u32, | ||
1926 | adapter->rx_queue.rx_ring[0].size, | ||
1927 | VMXNET3_RX_RING_MAX_SIZE / sz * sz); | ||
1928 | } | ||
1929 | |||
1930 | |||
1931 | int | ||
1932 | vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size, | ||
1933 | u32 rx_ring_size, u32 rx_ring2_size) | ||
1934 | { | ||
1935 | int err; | ||
1936 | |||
1937 | adapter->tx_queue.tx_ring.size = tx_ring_size; | ||
1938 | adapter->tx_queue.data_ring.size = tx_ring_size; | ||
1939 | adapter->tx_queue.comp_ring.size = tx_ring_size; | ||
1940 | adapter->tx_queue.shared = &adapter->tqd_start->ctrl; | ||
1941 | adapter->tx_queue.stopped = true; | ||
1942 | err = vmxnet3_tq_create(&adapter->tx_queue, adapter); | ||
1943 | if (err) | ||
1944 | return err; | ||
1945 | |||
1946 | adapter->rx_queue.rx_ring[0].size = rx_ring_size; | ||
1947 | adapter->rx_queue.rx_ring[1].size = rx_ring2_size; | ||
1948 | vmxnet3_adjust_rx_ring_size(adapter); | ||
1949 | adapter->rx_queue.comp_ring.size = adapter->rx_queue.rx_ring[0].size + | ||
1950 | adapter->rx_queue.rx_ring[1].size; | ||
1951 | adapter->rx_queue.qid = 0; | ||
1952 | adapter->rx_queue.qid2 = 1; | ||
1953 | adapter->rx_queue.shared = &adapter->rqd_start->ctrl; | ||
1954 | err = vmxnet3_rq_create(&adapter->rx_queue, adapter); | ||
1955 | if (err) | ||
1956 | vmxnet3_tq_destroy(&adapter->tx_queue, adapter); | ||
1957 | |||
1958 | return err; | ||
1959 | } | ||
1960 | |||
1961 | static int | ||
1962 | vmxnet3_open(struct net_device *netdev) | ||
1963 | { | ||
1964 | struct vmxnet3_adapter *adapter; | ||
1965 | int err; | ||
1966 | |||
1967 | adapter = netdev_priv(netdev); | ||
1968 | |||
1969 | spin_lock_init(&adapter->tx_queue.tx_lock); | ||
1970 | |||
1971 | err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE, | ||
1972 | VMXNET3_DEF_RX_RING_SIZE, | ||
1973 | VMXNET3_DEF_RX_RING_SIZE); | ||
1974 | if (err) | ||
1975 | goto queue_err; | ||
1976 | |||
1977 | err = vmxnet3_activate_dev(adapter); | ||
1978 | if (err) | ||
1979 | goto activate_err; | ||
1980 | |||
1981 | return 0; | ||
1982 | |||
1983 | activate_err: | ||
1984 | vmxnet3_rq_destroy(&adapter->rx_queue, adapter); | ||
1985 | vmxnet3_tq_destroy(&adapter->tx_queue, adapter); | ||
1986 | queue_err: | ||
1987 | return err; | ||
1988 | } | ||
1989 | |||
1990 | |||
1991 | static int | ||
1992 | vmxnet3_close(struct net_device *netdev) | ||
1993 | { | ||
1994 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | ||
1995 | |||
1996 | /* | ||
1997 | * Reset_work may be in the middle of resetting the device, wait for its | ||
1998 | * completion. | ||
1999 | */ | ||
2000 | while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) | ||
2001 | msleep(1); | ||
2002 | |||
2003 | vmxnet3_quiesce_dev(adapter); | ||
2004 | |||
2005 | vmxnet3_rq_destroy(&adapter->rx_queue, adapter); | ||
2006 | vmxnet3_tq_destroy(&adapter->tx_queue, adapter); | ||
2007 | |||
2008 | clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); | ||
2009 | |||
2010 | |||
2011 | return 0; | ||
2012 | } | ||
2013 | |||
2014 | |||
2015 | void | ||
2016 | vmxnet3_force_close(struct vmxnet3_adapter *adapter) | ||
2017 | { | ||
2018 | /* | ||
2019 | * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise | ||
2020 | * vmxnet3_close() will deadlock. | ||
2021 | */ | ||
2022 | BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)); | ||
2023 | |||
2024 | /* we need to enable NAPI, otherwise dev_close will deadlock */ | ||
2025 | napi_enable(&adapter->napi); | ||
2026 | dev_close(adapter->netdev); | ||
2027 | } | ||
2028 | |||
2029 | |||
2030 | static int | ||
2031 | vmxnet3_change_mtu(struct net_device *netdev, int new_mtu) | ||
2032 | { | ||
2033 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | ||
2034 | int err = 0; | ||
2035 | |||
2036 | if (new_mtu < VMXNET3_MIN_MTU || new_mtu > VMXNET3_MAX_MTU) | ||
2037 | return -EINVAL; | ||
2038 | |||
2039 | if (new_mtu > 1500 && !adapter->jumbo_frame) | ||
2040 | return -EINVAL; | ||
2041 | |||
2042 | netdev->mtu = new_mtu; | ||
2043 | |||
2044 | /* | ||
2045 | * Reset_work may be in the middle of resetting the device, wait for its | ||
2046 | * completion. | ||
2047 | */ | ||
2048 | while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) | ||
2049 | msleep(1); | ||
2050 | |||
2051 | if (netif_running(netdev)) { | ||
2052 | vmxnet3_quiesce_dev(adapter); | ||
2053 | vmxnet3_reset_dev(adapter); | ||
2054 | |||
2055 | /* we need to re-create the rx queue based on the new mtu */ | ||
2056 | vmxnet3_rq_destroy(&adapter->rx_queue, adapter); | ||
2057 | vmxnet3_adjust_rx_ring_size(adapter); | ||
2058 | adapter->rx_queue.comp_ring.size = | ||
2059 | adapter->rx_queue.rx_ring[0].size + | ||
2060 | adapter->rx_queue.rx_ring[1].size; | ||
2061 | err = vmxnet3_rq_create(&adapter->rx_queue, adapter); | ||
2062 | if (err) { | ||
2063 | printk(KERN_ERR "%s: failed to re-create rx queue," | ||
2064 | " error %d. Closing it.\n", netdev->name, err); | ||
2065 | goto out; | ||
2066 | } | ||
2067 | |||
2068 | err = vmxnet3_activate_dev(adapter); | ||
2069 | if (err) { | ||
2070 | printk(KERN_ERR "%s: failed to re-activate, error %d. " | ||
2071 | "Closing it\n", netdev->name, err); | ||
2072 | goto out; | ||
2073 | } | ||
2074 | } | ||
2075 | |||
2076 | out: | ||
2077 | clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); | ||
2078 | if (err) | ||
2079 | vmxnet3_force_close(adapter); | ||
2080 | |||
2081 | return err; | ||
2082 | } | ||
2083 | |||
2084 | |||
2085 | static void | ||
2086 | vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64) | ||
2087 | { | ||
2088 | struct net_device *netdev = adapter->netdev; | ||
2089 | |||
2090 | netdev->features = NETIF_F_SG | | ||
2091 | NETIF_F_HW_CSUM | | ||
2092 | NETIF_F_HW_VLAN_TX | | ||
2093 | NETIF_F_HW_VLAN_RX | | ||
2094 | NETIF_F_HW_VLAN_FILTER | | ||
2095 | NETIF_F_TSO | | ||
2096 | NETIF_F_TSO6 | | ||
2097 | NETIF_F_LRO; | ||
2098 | |||
2099 | printk(KERN_INFO "features: sg csum vlan jf tso tsoIPv6 lro"); | ||
2100 | |||
2101 | adapter->rxcsum = true; | ||
2102 | adapter->jumbo_frame = true; | ||
2103 | adapter->lro = true; | ||
2104 | |||
2105 | if (dma64) { | ||
2106 | netdev->features |= NETIF_F_HIGHDMA; | ||
2107 | printk(" highDMA"); | ||
2108 | } | ||
2109 | |||
2110 | netdev->vlan_features = netdev->features; | ||
2111 | printk("\n"); | ||
2112 | } | ||
2113 | |||
2114 | |||
2115 | static void | ||
2116 | vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac) | ||
2117 | { | ||
2118 | u32 tmp; | ||
2119 | |||
2120 | tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL); | ||
2121 | *(u32 *)mac = tmp; | ||
2122 | |||
2123 | tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH); | ||
2124 | mac[4] = tmp & 0xff; | ||
2125 | mac[5] = (tmp >> 8) & 0xff; | ||
2126 | } | ||
2127 | |||
2128 | |||
2129 | static void | ||
2130 | vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) | ||
2131 | { | ||
2132 | u32 cfg; | ||
2133 | |||
2134 | /* intr settings */ | ||
2135 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | ||
2136 | VMXNET3_CMD_GET_CONF_INTR); | ||
2137 | cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); | ||
2138 | adapter->intr.type = cfg & 0x3; | ||
2139 | adapter->intr.mask_mode = (cfg >> 2) & 0x3; | ||
2140 | |||
2141 | if (adapter->intr.type == VMXNET3_IT_AUTO) { | ||
2142 | int err; | ||
2143 | |||
2144 | #ifdef CONFIG_PCI_MSI | ||
2145 | adapter->intr.msix_entries[0].entry = 0; | ||
2146 | err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries, | ||
2147 | VMXNET3_LINUX_MAX_MSIX_VECT); | ||
2148 | if (!err) { | ||
2149 | adapter->intr.num_intrs = 1; | ||
2150 | adapter->intr.type = VMXNET3_IT_MSIX; | ||
2151 | return; | ||
2152 | } | ||
2153 | #endif | ||
2154 | |||
2155 | err = pci_enable_msi(adapter->pdev); | ||
2156 | if (!err) { | ||
2157 | adapter->intr.num_intrs = 1; | ||
2158 | adapter->intr.type = VMXNET3_IT_MSI; | ||
2159 | return; | ||
2160 | } | ||
2161 | } | ||
2162 | |||
2163 | adapter->intr.type = VMXNET3_IT_INTX; | ||
2164 | |||
2165 | /* INT-X related setting */ | ||
2166 | adapter->intr.num_intrs = 1; | ||
2167 | } | ||
2168 | |||
2169 | |||
2170 | static void | ||
2171 | vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter) | ||
2172 | { | ||
2173 | if (adapter->intr.type == VMXNET3_IT_MSIX) | ||
2174 | pci_disable_msix(adapter->pdev); | ||
2175 | else if (adapter->intr.type == VMXNET3_IT_MSI) | ||
2176 | pci_disable_msi(adapter->pdev); | ||
2177 | else | ||
2178 | BUG_ON(adapter->intr.type != VMXNET3_IT_INTX); | ||
2179 | } | ||
2180 | |||
2181 | |||
2182 | static void | ||
2183 | vmxnet3_tx_timeout(struct net_device *netdev) | ||
2184 | { | ||
2185 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | ||
2186 | adapter->tx_timeout_count++; | ||
2187 | |||
2188 | printk(KERN_ERR "%s: tx hang\n", adapter->netdev->name); | ||
2189 | schedule_work(&adapter->work); | ||
2190 | } | ||
2191 | |||
2192 | |||
2193 | static void | ||
2194 | vmxnet3_reset_work(struct work_struct *data) | ||
2195 | { | ||
2196 | struct vmxnet3_adapter *adapter; | ||
2197 | |||
2198 | adapter = container_of(data, struct vmxnet3_adapter, work); | ||
2199 | |||
2200 | /* if another thread is resetting the device, no need to proceed */ | ||
2201 | if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) | ||
2202 | return; | ||
2203 | |||
2204 | /* if the device is closed, we must leave it alone */ | ||
2205 | if (netif_running(adapter->netdev)) { | ||
2206 | printk(KERN_INFO "%s: resetting\n", adapter->netdev->name); | ||
2207 | vmxnet3_quiesce_dev(adapter); | ||
2208 | vmxnet3_reset_dev(adapter); | ||
2209 | vmxnet3_activate_dev(adapter); | ||
2210 | } else { | ||
2211 | printk(KERN_INFO "%s: already closed\n", adapter->netdev->name); | ||
2212 | } | ||
2213 | |||
2214 | clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); | ||
2215 | } | ||
2216 | |||
2217 | |||
2218 | static int __devinit | ||
2219 | vmxnet3_probe_device(struct pci_dev *pdev, | ||
2220 | const struct pci_device_id *id) | ||
2221 | { | ||
2222 | static const struct net_device_ops vmxnet3_netdev_ops = { | ||
2223 | .ndo_open = vmxnet3_open, | ||
2224 | .ndo_stop = vmxnet3_close, | ||
2225 | .ndo_start_xmit = vmxnet3_xmit_frame, | ||
2226 | .ndo_set_mac_address = vmxnet3_set_mac_addr, | ||
2227 | .ndo_change_mtu = vmxnet3_change_mtu, | ||
2228 | .ndo_get_stats = vmxnet3_get_stats, | ||
2229 | .ndo_tx_timeout = vmxnet3_tx_timeout, | ||
2230 | .ndo_set_multicast_list = vmxnet3_set_mc, | ||
2231 | .ndo_vlan_rx_register = vmxnet3_vlan_rx_register, | ||
2232 | .ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid, | ||
2233 | .ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid, | ||
2234 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
2235 | .ndo_poll_controller = vmxnet3_netpoll, | ||
2236 | #endif | ||
2237 | }; | ||
2238 | int err; | ||
2239 | bool dma64 = false; /* stupid gcc */ | ||
2240 | u32 ver; | ||
2241 | struct net_device *netdev; | ||
2242 | struct vmxnet3_adapter *adapter; | ||
2243 | u8 mac[ETH_ALEN]; | ||
2244 | |||
2245 | netdev = alloc_etherdev(sizeof(struct vmxnet3_adapter)); | ||
2246 | if (!netdev) { | ||
2247 | printk(KERN_ERR "Failed to alloc ethernet device for adapter " | ||
2248 | "%s\n", pci_name(pdev)); | ||
2249 | return -ENOMEM; | ||
2250 | } | ||
2251 | |||
2252 | pci_set_drvdata(pdev, netdev); | ||
2253 | adapter = netdev_priv(netdev); | ||
2254 | adapter->netdev = netdev; | ||
2255 | adapter->pdev = pdev; | ||
2256 | |||
2257 | adapter->shared = pci_alloc_consistent(adapter->pdev, | ||
2258 | sizeof(struct Vmxnet3_DriverShared), | ||
2259 | &adapter->shared_pa); | ||
2260 | if (!adapter->shared) { | ||
2261 | printk(KERN_ERR "Failed to allocate memory for %s\n", | ||
2262 | pci_name(pdev)); | ||
2263 | err = -ENOMEM; | ||
2264 | goto err_alloc_shared; | ||
2265 | } | ||
2266 | |||
2267 | adapter->tqd_start = pci_alloc_consistent(adapter->pdev, | ||
2268 | sizeof(struct Vmxnet3_TxQueueDesc) + | ||
2269 | sizeof(struct Vmxnet3_RxQueueDesc), | ||
2270 | &adapter->queue_desc_pa); | ||
2271 | |||
2272 | if (!adapter->tqd_start) { | ||
2273 | printk(KERN_ERR "Failed to allocate memory for %s\n", | ||
2274 | pci_name(pdev)); | ||
2275 | err = -ENOMEM; | ||
2276 | goto err_alloc_queue_desc; | ||
2277 | } | ||
2278 | adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start | ||
2279 | + 1); | ||
2280 | |||
2281 | adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL); | ||
2282 | if (adapter->pm_conf == NULL) { | ||
2283 | printk(KERN_ERR "Failed to allocate memory for %s\n", | ||
2284 | pci_name(pdev)); | ||
2285 | err = -ENOMEM; | ||
2286 | goto err_alloc_pm; | ||
2287 | } | ||
2288 | |||
2289 | err = vmxnet3_alloc_pci_resources(adapter, &dma64); | ||
2290 | if (err < 0) | ||
2291 | goto err_alloc_pci; | ||
2292 | |||
2293 | ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS); | ||
2294 | if (ver & 1) { | ||
2295 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1); | ||
2296 | } else { | ||
2297 | printk(KERN_ERR "Incompatible h/w version (0x%x) for adapter" | ||
2298 | " %s\n", ver, pci_name(pdev)); | ||
2299 | err = -EBUSY; | ||
2300 | goto err_ver; | ||
2301 | } | ||
2302 | |||
2303 | ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS); | ||
2304 | if (ver & 1) { | ||
2305 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1); | ||
2306 | } else { | ||
2307 | printk(KERN_ERR "Incompatible upt version (0x%x) for " | ||
2308 | "adapter %s\n", ver, pci_name(pdev)); | ||
2309 | err = -EBUSY; | ||
2310 | goto err_ver; | ||
2311 | } | ||
2312 | |||
2313 | vmxnet3_declare_features(adapter, dma64); | ||
2314 | |||
2315 | adapter->dev_number = atomic_read(&devices_found); | ||
2316 | vmxnet3_alloc_intr_resources(adapter); | ||
2317 | |||
2318 | vmxnet3_read_mac_addr(adapter, mac); | ||
2319 | memcpy(netdev->dev_addr, mac, netdev->addr_len); | ||
2320 | |||
2321 | netdev->netdev_ops = &vmxnet3_netdev_ops; | ||
2322 | netdev->watchdog_timeo = 5 * HZ; | ||
2323 | vmxnet3_set_ethtool_ops(netdev); | ||
2324 | |||
2325 | INIT_WORK(&adapter->work, vmxnet3_reset_work); | ||
2326 | |||
2327 | netif_napi_add(netdev, &adapter->napi, vmxnet3_poll, 64); | ||
2328 | SET_NETDEV_DEV(netdev, &pdev->dev); | ||
2329 | err = register_netdev(netdev); | ||
2330 | |||
2331 | if (err) { | ||
2332 | printk(KERN_ERR "Failed to register adapter %s\n", | ||
2333 | pci_name(pdev)); | ||
2334 | goto err_register; | ||
2335 | } | ||
2336 | |||
2337 | set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); | ||
2338 | atomic_inc(&devices_found); | ||
2339 | return 0; | ||
2340 | |||
2341 | err_register: | ||
2342 | vmxnet3_free_intr_resources(adapter); | ||
2343 | err_ver: | ||
2344 | vmxnet3_free_pci_resources(adapter); | ||
2345 | err_alloc_pci: | ||
2346 | kfree(adapter->pm_conf); | ||
2347 | err_alloc_pm: | ||
2348 | pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_TxQueueDesc) + | ||
2349 | sizeof(struct Vmxnet3_RxQueueDesc), | ||
2350 | adapter->tqd_start, adapter->queue_desc_pa); | ||
2351 | err_alloc_queue_desc: | ||
2352 | pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared), | ||
2353 | adapter->shared, adapter->shared_pa); | ||
2354 | err_alloc_shared: | ||
2355 | pci_set_drvdata(pdev, NULL); | ||
2356 | free_netdev(netdev); | ||
2357 | return err; | ||
2358 | } | ||
2359 | |||
2360 | |||
2361 | static void __devexit | ||
2362 | vmxnet3_remove_device(struct pci_dev *pdev) | ||
2363 | { | ||
2364 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
2365 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | ||
2366 | |||
2367 | flush_scheduled_work(); | ||
2368 | |||
2369 | unregister_netdev(netdev); | ||
2370 | |||
2371 | vmxnet3_free_intr_resources(adapter); | ||
2372 | vmxnet3_free_pci_resources(adapter); | ||
2373 | kfree(adapter->pm_conf); | ||
2374 | pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_TxQueueDesc) + | ||
2375 | sizeof(struct Vmxnet3_RxQueueDesc), | ||
2376 | adapter->tqd_start, adapter->queue_desc_pa); | ||
2377 | pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared), | ||
2378 | adapter->shared, adapter->shared_pa); | ||
2379 | free_netdev(netdev); | ||
2380 | } | ||
2381 | |||
2382 | |||
2383 | #ifdef CONFIG_PM | ||
2384 | |||
2385 | static int | ||
2386 | vmxnet3_suspend(struct device *device) | ||
2387 | { | ||
2388 | struct pci_dev *pdev = to_pci_dev(device); | ||
2389 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
2390 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | ||
2391 | struct Vmxnet3_PMConf *pmConf; | ||
2392 | struct ethhdr *ehdr; | ||
2393 | struct arphdr *ahdr; | ||
2394 | u8 *arpreq; | ||
2395 | struct in_device *in_dev; | ||
2396 | struct in_ifaddr *ifa; | ||
2397 | int i = 0; | ||
2398 | |||
2399 | if (!netif_running(netdev)) | ||
2400 | return 0; | ||
2401 | |||
2402 | vmxnet3_disable_all_intrs(adapter); | ||
2403 | vmxnet3_free_irqs(adapter); | ||
2404 | vmxnet3_free_intr_resources(adapter); | ||
2405 | |||
2406 | netif_device_detach(netdev); | ||
2407 | netif_stop_queue(netdev); | ||
2408 | |||
2409 | /* Create wake-up filters. */ | ||
2410 | pmConf = adapter->pm_conf; | ||
2411 | memset(pmConf, 0, sizeof(*pmConf)); | ||
2412 | |||
2413 | if (adapter->wol & WAKE_UCAST) { | ||
2414 | pmConf->filters[i].patternSize = ETH_ALEN; | ||
2415 | pmConf->filters[i].maskSize = 1; | ||
2416 | memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN); | ||
2417 | pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */ | ||
2418 | |||
2419 | pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER; | ||
2420 | i++; | ||
2421 | } | ||
2422 | |||
2423 | if (adapter->wol & WAKE_ARP) { | ||
2424 | in_dev = in_dev_get(netdev); | ||
2425 | if (!in_dev) | ||
2426 | goto skip_arp; | ||
2427 | |||
2428 | ifa = (struct in_ifaddr *)in_dev->ifa_list; | ||
2429 | if (!ifa) | ||
2430 | goto skip_arp; | ||
2431 | |||
2432 | pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/ | ||
2433 | sizeof(struct arphdr) + /* ARP header */ | ||
2434 | 2 * ETH_ALEN + /* 2 Ethernet addresses*/ | ||
2435 | 2 * sizeof(u32); /*2 IPv4 addresses */ | ||
2436 | pmConf->filters[i].maskSize = | ||
2437 | (pmConf->filters[i].patternSize - 1) / 8 + 1; | ||
2438 | |||
2439 | /* ETH_P_ARP in Ethernet header. */ | ||
2440 | ehdr = (struct ethhdr *)pmConf->filters[i].pattern; | ||
2441 | ehdr->h_proto = htons(ETH_P_ARP); | ||
2442 | |||
2443 | /* ARPOP_REQUEST in ARP header. */ | ||
2444 | ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN]; | ||
2445 | ahdr->ar_op = htons(ARPOP_REQUEST); | ||
2446 | arpreq = (u8 *)(ahdr + 1); | ||
2447 | |||
2448 | /* The Unicast IPv4 address in 'tip' field. */ | ||
2449 | arpreq += 2 * ETH_ALEN + sizeof(u32); | ||
2450 | *(u32 *)arpreq = ifa->ifa_address; | ||
2451 | |||
2452 | /* The mask for the relevant bits. */ | ||
2453 | pmConf->filters[i].mask[0] = 0x00; | ||
2454 | pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */ | ||
2455 | pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */ | ||
2456 | pmConf->filters[i].mask[3] = 0x00; | ||
2457 | pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */ | ||
2458 | pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */ | ||
2459 | in_dev_put(in_dev); | ||
2460 | |||
2461 | pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER; | ||
2462 | i++; | ||
2463 | } | ||
2464 | |||
2465 | skip_arp: | ||
2466 | if (adapter->wol & WAKE_MAGIC) | ||
2467 | pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC; | ||
2468 | |||
2469 | pmConf->numFilters = i; | ||
2470 | |||
2471 | adapter->shared->devRead.pmConfDesc.confVer = 1; | ||
2472 | adapter->shared->devRead.pmConfDesc.confLen = sizeof(*pmConf); | ||
2473 | adapter->shared->devRead.pmConfDesc.confPA = virt_to_phys(pmConf); | ||
2474 | |||
2475 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | ||
2476 | VMXNET3_CMD_UPDATE_PMCFG); | ||
2477 | |||
2478 | pci_save_state(pdev); | ||
2479 | pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND), | ||
2480 | adapter->wol); | ||
2481 | pci_disable_device(pdev); | ||
2482 | pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND)); | ||
2483 | |||
2484 | return 0; | ||
2485 | } | ||
2486 | |||
2487 | |||
2488 | static int | ||
2489 | vmxnet3_resume(struct device *device) | ||
2490 | { | ||
2491 | int err; | ||
2492 | struct pci_dev *pdev = to_pci_dev(device); | ||
2493 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
2494 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | ||
2495 | struct Vmxnet3_PMConf *pmConf; | ||
2496 | |||
2497 | if (!netif_running(netdev)) | ||
2498 | return 0; | ||
2499 | |||
2500 | /* Destroy wake-up filters. */ | ||
2501 | pmConf = adapter->pm_conf; | ||
2502 | memset(pmConf, 0, sizeof(*pmConf)); | ||
2503 | |||
2504 | adapter->shared->devRead.pmConfDesc.confVer = 1; | ||
2505 | adapter->shared->devRead.pmConfDesc.confLen = sizeof(*pmConf); | ||
2506 | adapter->shared->devRead.pmConfDesc.confPA = virt_to_phys(pmConf); | ||
2507 | |||
2508 | netif_device_attach(netdev); | ||
2509 | pci_set_power_state(pdev, PCI_D0); | ||
2510 | pci_restore_state(pdev); | ||
2511 | err = pci_enable_device_mem(pdev); | ||
2512 | if (err != 0) | ||
2513 | return err; | ||
2514 | |||
2515 | pci_enable_wake(pdev, PCI_D0, 0); | ||
2516 | |||
2517 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | ||
2518 | VMXNET3_CMD_UPDATE_PMCFG); | ||
2519 | vmxnet3_alloc_intr_resources(adapter); | ||
2520 | vmxnet3_request_irqs(adapter); | ||
2521 | vmxnet3_enable_all_intrs(adapter); | ||
2522 | |||
2523 | return 0; | ||
2524 | } | ||
2525 | |||
2526 | static struct dev_pm_ops vmxnet3_pm_ops = { | ||
2527 | .suspend = vmxnet3_suspend, | ||
2528 | .resume = vmxnet3_resume, | ||
2529 | }; | ||
2530 | #endif | ||
2531 | |||
2532 | static struct pci_driver vmxnet3_driver = { | ||
2533 | .name = vmxnet3_driver_name, | ||
2534 | .id_table = vmxnet3_pciid_table, | ||
2535 | .probe = vmxnet3_probe_device, | ||
2536 | .remove = __devexit_p(vmxnet3_remove_device), | ||
2537 | #ifdef CONFIG_PM | ||
2538 | .driver.pm = &vmxnet3_pm_ops, | ||
2539 | #endif | ||
2540 | }; | ||
2541 | |||
2542 | |||
2543 | static int __init | ||
2544 | vmxnet3_init_module(void) | ||
2545 | { | ||
2546 | printk(KERN_INFO "%s - version %s\n", VMXNET3_DRIVER_DESC, | ||
2547 | VMXNET3_DRIVER_VERSION_REPORT); | ||
2548 | return pci_register_driver(&vmxnet3_driver); | ||
2549 | } | ||
2550 | |||
2551 | module_init(vmxnet3_init_module); | ||
2552 | |||
2553 | |||
2554 | static void | ||
2555 | vmxnet3_exit_module(void) | ||
2556 | { | ||
2557 | pci_unregister_driver(&vmxnet3_driver); | ||
2558 | } | ||
2559 | |||
2560 | module_exit(vmxnet3_exit_module); | ||
2561 | |||
2562 | MODULE_AUTHOR("VMware, Inc."); | ||
2563 | MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC); | ||
2564 | MODULE_LICENSE("GPL v2"); | ||
2565 | MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING); | ||
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c new file mode 100644 index 000000000000..c2c15e4cafc7 --- /dev/null +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c | |||
@@ -0,0 +1,566 @@ | |||
1 | /* | ||
2 | * Linux driver for VMware's vmxnet3 ethernet NIC. | ||
3 | * | ||
4 | * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License as published by the | ||
8 | * Free Software Foundation; version 2 of the License and no later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but | ||
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
13 | * NON INFRINGEMENT. See the GNU General Public License for more | ||
14 | * details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | ||
19 | * | ||
20 | * The full GNU General Public License is included in this distribution in | ||
21 | * the file called "COPYING". | ||
22 | * | ||
23 | * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com> | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | |||
28 | #include "vmxnet3_int.h" | ||
29 | |||
30 | struct vmxnet3_stat_desc { | ||
31 | char desc[ETH_GSTRING_LEN]; | ||
32 | int offset; | ||
33 | }; | ||
34 | |||
35 | |||
36 | static u32 | ||
37 | vmxnet3_get_rx_csum(struct net_device *netdev) | ||
38 | { | ||
39 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | ||
40 | return adapter->rxcsum; | ||
41 | } | ||
42 | |||
43 | |||
44 | static int | ||
45 | vmxnet3_set_rx_csum(struct net_device *netdev, u32 val) | ||
46 | { | ||
47 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | ||
48 | |||
49 | if (adapter->rxcsum != val) { | ||
50 | adapter->rxcsum = val; | ||
51 | if (netif_running(netdev)) { | ||
52 | if (val) | ||
53 | adapter->shared->devRead.misc.uptFeatures |= | ||
54 | UPT1_F_RXCSUM; | ||
55 | else | ||
56 | adapter->shared->devRead.misc.uptFeatures &= | ||
57 | ~UPT1_F_RXCSUM; | ||
58 | |||
59 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | ||
60 | VMXNET3_CMD_UPDATE_FEATURE); | ||
61 | } | ||
62 | } | ||
63 | return 0; | ||
64 | } | ||
65 | |||
66 | |||
67 | /* per tq stats maintained by the device */ | ||
68 | static const struct vmxnet3_stat_desc | ||
69 | vmxnet3_tq_dev_stats[] = { | ||
70 | /* description, offset */ | ||
71 | { "TSO pkts tx", offsetof(struct UPT1_TxStats, TSOPktsTxOK) }, | ||
72 | { "TSO bytes tx", offsetof(struct UPT1_TxStats, TSOBytesTxOK) }, | ||
73 | { "ucast pkts tx", offsetof(struct UPT1_TxStats, ucastPktsTxOK) }, | ||
74 | { "ucast bytes tx", offsetof(struct UPT1_TxStats, ucastBytesTxOK) }, | ||
75 | { "mcast pkts tx", offsetof(struct UPT1_TxStats, mcastPktsTxOK) }, | ||
76 | { "mcast bytes tx", offsetof(struct UPT1_TxStats, mcastBytesTxOK) }, | ||
77 | { "bcast pkts tx", offsetof(struct UPT1_TxStats, bcastPktsTxOK) }, | ||
78 | { "bcast bytes tx", offsetof(struct UPT1_TxStats, bcastBytesTxOK) }, | ||
79 | { "pkts tx err", offsetof(struct UPT1_TxStats, pktsTxError) }, | ||
80 | { "pkts tx discard", offsetof(struct UPT1_TxStats, pktsTxDiscard) }, | ||
81 | }; | ||
82 | |||
83 | /* per tq stats maintained by the driver */ | ||
84 | static const struct vmxnet3_stat_desc | ||
85 | vmxnet3_tq_driver_stats[] = { | ||
86 | /* description, offset */ | ||
87 | {"drv dropped tx total", offsetof(struct vmxnet3_tq_driver_stats, | ||
88 | drop_total) }, | ||
89 | { " too many frags", offsetof(struct vmxnet3_tq_driver_stats, | ||
90 | drop_too_many_frags) }, | ||
91 | { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats, | ||
92 | drop_oversized_hdr) }, | ||
93 | { " hdr err", offsetof(struct vmxnet3_tq_driver_stats, | ||
94 | drop_hdr_inspect_err) }, | ||
95 | { " tso", offsetof(struct vmxnet3_tq_driver_stats, | ||
96 | drop_tso) }, | ||
97 | { "ring full", offsetof(struct vmxnet3_tq_driver_stats, | ||
98 | tx_ring_full) }, | ||
99 | { "pkts linearized", offsetof(struct vmxnet3_tq_driver_stats, | ||
100 | linearized) }, | ||
101 | { "hdr cloned", offsetof(struct vmxnet3_tq_driver_stats, | ||
102 | copy_skb_header) }, | ||
103 | { "giant hdr", offsetof(struct vmxnet3_tq_driver_stats, | ||
104 | oversized_hdr) }, | ||
105 | }; | ||
106 | |||
107 | /* per rq stats maintained by the device */ | ||
108 | static const struct vmxnet3_stat_desc | ||
109 | vmxnet3_rq_dev_stats[] = { | ||
110 | { "LRO pkts rx", offsetof(struct UPT1_RxStats, LROPktsRxOK) }, | ||
111 | { "LRO byte rx", offsetof(struct UPT1_RxStats, LROBytesRxOK) }, | ||
112 | { "ucast pkts rx", offsetof(struct UPT1_RxStats, ucastPktsRxOK) }, | ||
113 | { "ucast bytes rx", offsetof(struct UPT1_RxStats, ucastBytesRxOK) }, | ||
114 | { "mcast pkts rx", offsetof(struct UPT1_RxStats, mcastPktsRxOK) }, | ||
115 | { "mcast bytes rx", offsetof(struct UPT1_RxStats, mcastBytesRxOK) }, | ||
116 | { "bcast pkts rx", offsetof(struct UPT1_RxStats, bcastPktsRxOK) }, | ||
117 | { "bcast bytes rx", offsetof(struct UPT1_RxStats, bcastBytesRxOK) }, | ||
118 | { "pkts rx out of buf", offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) }, | ||
119 | { "pkts rx err", offsetof(struct UPT1_RxStats, pktsRxError) }, | ||
120 | }; | ||
121 | |||
122 | /* per rq stats maintained by the driver */ | ||
123 | static const struct vmxnet3_stat_desc | ||
124 | vmxnet3_rq_driver_stats[] = { | ||
125 | /* description, offset */ | ||
126 | { "drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats, | ||
127 | drop_total) }, | ||
128 | { " err", offsetof(struct vmxnet3_rq_driver_stats, | ||
129 | drop_err) }, | ||
130 | { " fcs", offsetof(struct vmxnet3_rq_driver_stats, | ||
131 | drop_fcs) }, | ||
132 | { "rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats, | ||
133 | rx_buf_alloc_failure) }, | ||
134 | }; | ||
135 | |||
136 | /* gloabl stats maintained by the driver */ | ||
137 | static const struct vmxnet3_stat_desc | ||
138 | vmxnet3_global_stats[] = { | ||
139 | /* description, offset */ | ||
140 | { "tx timeout count", offsetof(struct vmxnet3_adapter, | ||
141 | tx_timeout_count) } | ||
142 | }; | ||
143 | |||
144 | |||
145 | struct net_device_stats * | ||
146 | vmxnet3_get_stats(struct net_device *netdev) | ||
147 | { | ||
148 | struct vmxnet3_adapter *adapter; | ||
149 | struct vmxnet3_tq_driver_stats *drvTxStats; | ||
150 | struct vmxnet3_rq_driver_stats *drvRxStats; | ||
151 | struct UPT1_TxStats *devTxStats; | ||
152 | struct UPT1_RxStats *devRxStats; | ||
153 | struct net_device_stats *net_stats = &netdev->stats; | ||
154 | |||
155 | adapter = netdev_priv(netdev); | ||
156 | |||
157 | /* Collect the dev stats into the shared area */ | ||
158 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); | ||
159 | |||
160 | /* Assuming that we have a single queue device */ | ||
161 | devTxStats = &adapter->tqd_start->stats; | ||
162 | devRxStats = &adapter->rqd_start->stats; | ||
163 | |||
164 | /* Get access to the driver stats per queue */ | ||
165 | drvTxStats = &adapter->tx_queue.stats; | ||
166 | drvRxStats = &adapter->rx_queue.stats; | ||
167 | |||
168 | memset(net_stats, 0, sizeof(*net_stats)); | ||
169 | |||
170 | net_stats->rx_packets = devRxStats->ucastPktsRxOK + | ||
171 | devRxStats->mcastPktsRxOK + | ||
172 | devRxStats->bcastPktsRxOK; | ||
173 | |||
174 | net_stats->tx_packets = devTxStats->ucastPktsTxOK + | ||
175 | devTxStats->mcastPktsTxOK + | ||
176 | devTxStats->bcastPktsTxOK; | ||
177 | |||
178 | net_stats->rx_bytes = devRxStats->ucastBytesRxOK + | ||
179 | devRxStats->mcastBytesRxOK + | ||
180 | devRxStats->bcastBytesRxOK; | ||
181 | |||
182 | net_stats->tx_bytes = devTxStats->ucastBytesTxOK + | ||
183 | devTxStats->mcastBytesTxOK + | ||
184 | devTxStats->bcastBytesTxOK; | ||
185 | |||
186 | net_stats->rx_errors = devRxStats->pktsRxError; | ||
187 | net_stats->tx_errors = devTxStats->pktsTxError; | ||
188 | net_stats->rx_dropped = drvRxStats->drop_total; | ||
189 | net_stats->tx_dropped = drvTxStats->drop_total; | ||
190 | net_stats->multicast = devRxStats->mcastPktsRxOK; | ||
191 | |||
192 | return net_stats; | ||
193 | } | ||
194 | |||
195 | static int | ||
196 | vmxnet3_get_sset_count(struct net_device *netdev, int sset) | ||
197 | { | ||
198 | switch (sset) { | ||
199 | case ETH_SS_STATS: | ||
200 | return ARRAY_SIZE(vmxnet3_tq_dev_stats) + | ||
201 | ARRAY_SIZE(vmxnet3_tq_driver_stats) + | ||
202 | ARRAY_SIZE(vmxnet3_rq_dev_stats) + | ||
203 | ARRAY_SIZE(vmxnet3_rq_driver_stats) + | ||
204 | ARRAY_SIZE(vmxnet3_global_stats); | ||
205 | default: | ||
206 | return -EOPNOTSUPP; | ||
207 | } | ||
208 | } | ||
209 | |||
210 | |||
211 | static int | ||
212 | vmxnet3_get_regs_len(struct net_device *netdev) | ||
213 | { | ||
214 | return 20 * sizeof(u32); | ||
215 | } | ||
216 | |||
217 | |||
218 | static void | ||
219 | vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) | ||
220 | { | ||
221 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | ||
222 | |||
223 | strlcpy(drvinfo->driver, vmxnet3_driver_name, sizeof(drvinfo->driver)); | ||
224 | drvinfo->driver[sizeof(drvinfo->driver) - 1] = '\0'; | ||
225 | |||
226 | strlcpy(drvinfo->version, VMXNET3_DRIVER_VERSION_REPORT, | ||
227 | sizeof(drvinfo->version)); | ||
228 | drvinfo->driver[sizeof(drvinfo->version) - 1] = '\0'; | ||
229 | |||
230 | strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); | ||
231 | drvinfo->fw_version[sizeof(drvinfo->fw_version) - 1] = '\0'; | ||
232 | |||
233 | strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), | ||
234 | ETHTOOL_BUSINFO_LEN); | ||
235 | drvinfo->n_stats = vmxnet3_get_sset_count(netdev, ETH_SS_STATS); | ||
236 | drvinfo->testinfo_len = 0; | ||
237 | drvinfo->eedump_len = 0; | ||
238 | drvinfo->regdump_len = vmxnet3_get_regs_len(netdev); | ||
239 | } | ||
240 | |||
241 | |||
242 | static void | ||
243 | vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf) | ||
244 | { | ||
245 | if (stringset == ETH_SS_STATS) { | ||
246 | int i; | ||
247 | |||
248 | for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) { | ||
249 | memcpy(buf, vmxnet3_tq_dev_stats[i].desc, | ||
250 | ETH_GSTRING_LEN); | ||
251 | buf += ETH_GSTRING_LEN; | ||
252 | } | ||
253 | for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) { | ||
254 | memcpy(buf, vmxnet3_tq_driver_stats[i].desc, | ||
255 | ETH_GSTRING_LEN); | ||
256 | buf += ETH_GSTRING_LEN; | ||
257 | } | ||
258 | for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) { | ||
259 | memcpy(buf, vmxnet3_rq_dev_stats[i].desc, | ||
260 | ETH_GSTRING_LEN); | ||
261 | buf += ETH_GSTRING_LEN; | ||
262 | } | ||
263 | for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) { | ||
264 | memcpy(buf, vmxnet3_rq_driver_stats[i].desc, | ||
265 | ETH_GSTRING_LEN); | ||
266 | buf += ETH_GSTRING_LEN; | ||
267 | } | ||
268 | for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) { | ||
269 | memcpy(buf, vmxnet3_global_stats[i].desc, | ||
270 | ETH_GSTRING_LEN); | ||
271 | buf += ETH_GSTRING_LEN; | ||
272 | } | ||
273 | } | ||
274 | } | ||
275 | |||
276 | static u32 | ||
277 | vmxnet3_get_flags(struct net_device *netdev) { | ||
278 | return netdev->features; | ||
279 | } | ||
280 | |||
281 | static int | ||
282 | vmxnet3_set_flags(struct net_device *netdev, u32 data) { | ||
283 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | ||
284 | u8 lro_requested = (data & ETH_FLAG_LRO) == 0 ? 0 : 1; | ||
285 | u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1; | ||
286 | |||
287 | if (lro_requested ^ lro_present) { | ||
288 | /* toggle the LRO feature*/ | ||
289 | netdev->features ^= NETIF_F_LRO; | ||
290 | |||
291 | /* update harware LRO capability accordingly */ | ||
292 | if (lro_requested) | ||
293 | adapter->shared->devRead.misc.uptFeatures &= UPT1_F_LRO; | ||
294 | else | ||
295 | adapter->shared->devRead.misc.uptFeatures &= | ||
296 | ~UPT1_F_LRO; | ||
297 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | ||
298 | VMXNET3_CMD_UPDATE_FEATURE); | ||
299 | } | ||
300 | return 0; | ||
301 | } | ||
302 | |||
303 | static void | ||
304 | vmxnet3_get_ethtool_stats(struct net_device *netdev, | ||
305 | struct ethtool_stats *stats, u64 *buf) | ||
306 | { | ||
307 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | ||
308 | u8 *base; | ||
309 | int i; | ||
310 | |||
311 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); | ||
312 | |||
313 | /* this does assume each counter is 64-bit wide */ | ||
314 | |||
315 | base = (u8 *)&adapter->tqd_start->stats; | ||
316 | for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) | ||
317 | *buf++ = *(u64 *)(base + vmxnet3_tq_dev_stats[i].offset); | ||
318 | |||
319 | base = (u8 *)&adapter->tx_queue.stats; | ||
320 | for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) | ||
321 | *buf++ = *(u64 *)(base + vmxnet3_tq_driver_stats[i].offset); | ||
322 | |||
323 | base = (u8 *)&adapter->rqd_start->stats; | ||
324 | for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) | ||
325 | *buf++ = *(u64 *)(base + vmxnet3_rq_dev_stats[i].offset); | ||
326 | |||
327 | base = (u8 *)&adapter->rx_queue.stats; | ||
328 | for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) | ||
329 | *buf++ = *(u64 *)(base + vmxnet3_rq_driver_stats[i].offset); | ||
330 | |||
331 | base = (u8 *)adapter; | ||
332 | for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) | ||
333 | *buf++ = *(u64 *)(base + vmxnet3_global_stats[i].offset); | ||
334 | } | ||
335 | |||
336 | |||
337 | static void | ||
338 | vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) | ||
339 | { | ||
340 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | ||
341 | u32 *buf = p; | ||
342 | |||
343 | memset(p, 0, vmxnet3_get_regs_len(netdev)); | ||
344 | |||
345 | regs->version = 1; | ||
346 | |||
347 | /* Update vmxnet3_get_regs_len if we want to dump more registers */ | ||
348 | |||
349 | /* make each ring use multiple of 16 bytes */ | ||
350 | buf[0] = adapter->tx_queue.tx_ring.next2fill; | ||
351 | buf[1] = adapter->tx_queue.tx_ring.next2comp; | ||
352 | buf[2] = adapter->tx_queue.tx_ring.gen; | ||
353 | buf[3] = 0; | ||
354 | |||
355 | buf[4] = adapter->tx_queue.comp_ring.next2proc; | ||
356 | buf[5] = adapter->tx_queue.comp_ring.gen; | ||
357 | buf[6] = adapter->tx_queue.stopped; | ||
358 | buf[7] = 0; | ||
359 | |||
360 | buf[8] = adapter->rx_queue.rx_ring[0].next2fill; | ||
361 | buf[9] = adapter->rx_queue.rx_ring[0].next2comp; | ||
362 | buf[10] = adapter->rx_queue.rx_ring[0].gen; | ||
363 | buf[11] = 0; | ||
364 | |||
365 | buf[12] = adapter->rx_queue.rx_ring[1].next2fill; | ||
366 | buf[13] = adapter->rx_queue.rx_ring[1].next2comp; | ||
367 | buf[14] = adapter->rx_queue.rx_ring[1].gen; | ||
368 | buf[15] = 0; | ||
369 | |||
370 | buf[16] = adapter->rx_queue.comp_ring.next2proc; | ||
371 | buf[17] = adapter->rx_queue.comp_ring.gen; | ||
372 | buf[18] = 0; | ||
373 | buf[19] = 0; | ||
374 | } | ||
375 | |||
376 | |||
377 | static void | ||
378 | vmxnet3_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) | ||
379 | { | ||
380 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | ||
381 | |||
382 | wol->supported = WAKE_UCAST | WAKE_ARP | WAKE_MAGIC; | ||
383 | wol->wolopts = adapter->wol; | ||
384 | } | ||
385 | |||
386 | |||
387 | static int | ||
388 | vmxnet3_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) | ||
389 | { | ||
390 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | ||
391 | |||
392 | if (wol->wolopts & (WAKE_PHY | WAKE_MCAST | WAKE_BCAST | | ||
393 | WAKE_MAGICSECURE)) { | ||
394 | return -EOPNOTSUPP; | ||
395 | } | ||
396 | |||
397 | adapter->wol = wol->wolopts; | ||
398 | |||
399 | device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); | ||
400 | |||
401 | return 0; | ||
402 | } | ||
403 | |||
404 | |||
405 | static int | ||
406 | vmxnet3_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) | ||
407 | { | ||
408 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | ||
409 | |||
410 | ecmd->supported = SUPPORTED_10000baseT_Full | SUPPORTED_1000baseT_Full | | ||
411 | SUPPORTED_TP; | ||
412 | ecmd->advertising = ADVERTISED_TP; | ||
413 | ecmd->port = PORT_TP; | ||
414 | ecmd->transceiver = XCVR_INTERNAL; | ||
415 | |||
416 | if (adapter->link_speed) { | ||
417 | ecmd->speed = adapter->link_speed; | ||
418 | ecmd->duplex = DUPLEX_FULL; | ||
419 | } else { | ||
420 | ecmd->speed = -1; | ||
421 | ecmd->duplex = -1; | ||
422 | } | ||
423 | return 0; | ||
424 | } | ||
425 | |||
426 | |||
427 | static void | ||
428 | vmxnet3_get_ringparam(struct net_device *netdev, | ||
429 | struct ethtool_ringparam *param) | ||
430 | { | ||
431 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | ||
432 | |||
433 | param->rx_max_pending = VMXNET3_RX_RING_MAX_SIZE; | ||
434 | param->tx_max_pending = VMXNET3_TX_RING_MAX_SIZE; | ||
435 | param->rx_mini_max_pending = 0; | ||
436 | param->rx_jumbo_max_pending = 0; | ||
437 | |||
438 | param->rx_pending = adapter->rx_queue.rx_ring[0].size; | ||
439 | param->tx_pending = adapter->tx_queue.tx_ring.size; | ||
440 | param->rx_mini_pending = 0; | ||
441 | param->rx_jumbo_pending = 0; | ||
442 | } | ||
443 | |||
444 | |||
445 | static int | ||
446 | vmxnet3_set_ringparam(struct net_device *netdev, | ||
447 | struct ethtool_ringparam *param) | ||
448 | { | ||
449 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | ||
450 | u32 new_tx_ring_size, new_rx_ring_size; | ||
451 | u32 sz; | ||
452 | int err = 0; | ||
453 | |||
454 | if (param->tx_pending == 0 || param->tx_pending > | ||
455 | VMXNET3_TX_RING_MAX_SIZE) | ||
456 | return -EINVAL; | ||
457 | |||
458 | if (param->rx_pending == 0 || param->rx_pending > | ||
459 | VMXNET3_RX_RING_MAX_SIZE) | ||
460 | return -EINVAL; | ||
461 | |||
462 | |||
463 | /* round it up to a multiple of VMXNET3_RING_SIZE_ALIGN */ | ||
464 | new_tx_ring_size = (param->tx_pending + VMXNET3_RING_SIZE_MASK) & | ||
465 | ~VMXNET3_RING_SIZE_MASK; | ||
466 | new_tx_ring_size = min_t(u32, new_tx_ring_size, | ||
467 | VMXNET3_TX_RING_MAX_SIZE); | ||
468 | if (new_tx_ring_size > VMXNET3_TX_RING_MAX_SIZE || (new_tx_ring_size % | ||
469 | VMXNET3_RING_SIZE_ALIGN) != 0) | ||
470 | return -EINVAL; | ||
471 | |||
472 | /* ring0 has to be a multiple of | ||
473 | * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN | ||
474 | */ | ||
475 | sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN; | ||
476 | new_rx_ring_size = (param->rx_pending + sz - 1) / sz * sz; | ||
477 | new_rx_ring_size = min_t(u32, new_rx_ring_size, | ||
478 | VMXNET3_RX_RING_MAX_SIZE / sz * sz); | ||
479 | if (new_rx_ring_size > VMXNET3_RX_RING_MAX_SIZE || (new_rx_ring_size % | ||
480 | sz) != 0) | ||
481 | return -EINVAL; | ||
482 | |||
483 | if (new_tx_ring_size == adapter->tx_queue.tx_ring.size && | ||
484 | new_rx_ring_size == adapter->rx_queue.rx_ring[0].size) { | ||
485 | return 0; | ||
486 | } | ||
487 | |||
488 | /* | ||
489 | * Reset_work may be in the middle of resetting the device, wait for its | ||
490 | * completion. | ||
491 | */ | ||
492 | while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) | ||
493 | msleep(1); | ||
494 | |||
495 | if (netif_running(netdev)) { | ||
496 | vmxnet3_quiesce_dev(adapter); | ||
497 | vmxnet3_reset_dev(adapter); | ||
498 | |||
499 | /* recreate the rx queue and the tx queue based on the | ||
500 | * new sizes */ | ||
501 | vmxnet3_tq_destroy(&adapter->tx_queue, adapter); | ||
502 | vmxnet3_rq_destroy(&adapter->rx_queue, adapter); | ||
503 | |||
504 | err = vmxnet3_create_queues(adapter, new_tx_ring_size, | ||
505 | new_rx_ring_size, VMXNET3_DEF_RX_RING_SIZE); | ||
506 | if (err) { | ||
507 | /* failed, most likely because of OOM, try default | ||
508 | * size */ | ||
509 | printk(KERN_ERR "%s: failed to apply new sizes, try the" | ||
510 | " default ones\n", netdev->name); | ||
511 | err = vmxnet3_create_queues(adapter, | ||
512 | VMXNET3_DEF_TX_RING_SIZE, | ||
513 | VMXNET3_DEF_RX_RING_SIZE, | ||
514 | VMXNET3_DEF_RX_RING_SIZE); | ||
515 | if (err) { | ||
516 | printk(KERN_ERR "%s: failed to create queues " | ||
517 | "with default sizes. Closing it\n", | ||
518 | netdev->name); | ||
519 | goto out; | ||
520 | } | ||
521 | } | ||
522 | |||
523 | err = vmxnet3_activate_dev(adapter); | ||
524 | if (err) | ||
525 | printk(KERN_ERR "%s: failed to re-activate, error %d." | ||
526 | " Closing it\n", netdev->name, err); | ||
527 | } | ||
528 | |||
529 | out: | ||
530 | clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); | ||
531 | if (err) | ||
532 | vmxnet3_force_close(adapter); | ||
533 | |||
534 | return err; | ||
535 | } | ||
536 | |||
537 | |||
538 | static struct ethtool_ops vmxnet3_ethtool_ops = { | ||
539 | .get_settings = vmxnet3_get_settings, | ||
540 | .get_drvinfo = vmxnet3_get_drvinfo, | ||
541 | .get_regs_len = vmxnet3_get_regs_len, | ||
542 | .get_regs = vmxnet3_get_regs, | ||
543 | .get_wol = vmxnet3_get_wol, | ||
544 | .set_wol = vmxnet3_set_wol, | ||
545 | .get_link = ethtool_op_get_link, | ||
546 | .get_rx_csum = vmxnet3_get_rx_csum, | ||
547 | .set_rx_csum = vmxnet3_set_rx_csum, | ||
548 | .get_tx_csum = ethtool_op_get_tx_csum, | ||
549 | .set_tx_csum = ethtool_op_set_tx_hw_csum, | ||
550 | .get_sg = ethtool_op_get_sg, | ||
551 | .set_sg = ethtool_op_set_sg, | ||
552 | .get_tso = ethtool_op_get_tso, | ||
553 | .set_tso = ethtool_op_set_tso, | ||
554 | .get_strings = vmxnet3_get_strings, | ||
555 | .get_flags = vmxnet3_get_flags, | ||
556 | .set_flags = vmxnet3_set_flags, | ||
557 | .get_sset_count = vmxnet3_get_sset_count, | ||
558 | .get_ethtool_stats = vmxnet3_get_ethtool_stats, | ||
559 | .get_ringparam = vmxnet3_get_ringparam, | ||
560 | .set_ringparam = vmxnet3_set_ringparam, | ||
561 | }; | ||
562 | |||
563 | void vmxnet3_set_ethtool_ops(struct net_device *netdev) | ||
564 | { | ||
565 | SET_ETHTOOL_OPS(netdev, &vmxnet3_ethtool_ops); | ||
566 | } | ||
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h new file mode 100644 index 000000000000..6bb91576e999 --- /dev/null +++ b/drivers/net/vmxnet3/vmxnet3_int.h | |||
@@ -0,0 +1,389 @@ | |||
1 | /* | ||
2 | * Linux driver for VMware's vmxnet3 ethernet NIC. | ||
3 | * | ||
4 | * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License as published by the | ||
8 | * Free Software Foundation; version 2 of the License and no later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but | ||
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
13 | * NON INFRINGEMENT. See the GNU General Public License for more | ||
14 | * details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | ||
19 | * | ||
20 | * The full GNU General Public License is included in this distribution in | ||
21 | * the file called "COPYING". | ||
22 | * | ||
23 | * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com> | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | #ifndef _VMXNET3_INT_H | ||
28 | #define _VMXNET3_INT_H | ||
29 | |||
30 | #include <linux/types.h> | ||
31 | #include <linux/ethtool.h> | ||
32 | #include <linux/delay.h> | ||
33 | #include <linux/netdevice.h> | ||
34 | #include <linux/pci.h> | ||
35 | #include <linux/ethtool.h> | ||
36 | #include <linux/compiler.h> | ||
37 | #include <linux/module.h> | ||
38 | #include <linux/moduleparam.h> | ||
39 | #include <linux/slab.h> | ||
40 | #include <linux/spinlock.h> | ||
41 | #include <linux/ioport.h> | ||
42 | #include <linux/highmem.h> | ||
43 | #include <linux/init.h> | ||
44 | #include <linux/timer.h> | ||
45 | #include <linux/skbuff.h> | ||
46 | #include <linux/interrupt.h> | ||
47 | #include <linux/workqueue.h> | ||
48 | #include <linux/uaccess.h> | ||
49 | #include <asm/dma.h> | ||
50 | #include <asm/page.h> | ||
51 | |||
52 | #include <linux/tcp.h> | ||
53 | #include <linux/udp.h> | ||
54 | #include <linux/ip.h> | ||
55 | #include <linux/ipv6.h> | ||
56 | #include <linux/in.h> | ||
57 | #include <linux/etherdevice.h> | ||
58 | #include <asm/checksum.h> | ||
59 | #include <linux/if_vlan.h> | ||
60 | #include <linux/if_arp.h> | ||
61 | #include <linux/inetdevice.h> | ||
62 | #include <linux/dst.h> | ||
63 | |||
64 | #include "vmxnet3_defs.h" | ||
65 | |||
66 | #ifdef DEBUG | ||
67 | # define VMXNET3_DRIVER_VERSION_REPORT VMXNET3_DRIVER_VERSION_STRING"-NAPI(debug)" | ||
68 | #else | ||
69 | # define VMXNET3_DRIVER_VERSION_REPORT VMXNET3_DRIVER_VERSION_STRING"-NAPI" | ||
70 | #endif | ||
71 | |||
72 | |||
73 | /* | ||
74 | * Version numbers | ||
75 | */ | ||
76 | #define VMXNET3_DRIVER_VERSION_STRING "1.0.5.0-k" | ||
77 | |||
78 | /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ | ||
79 | #define VMXNET3_DRIVER_VERSION_NUM 0x01000500 | ||
80 | |||
81 | |||
82 | /* | ||
83 | * Capabilities | ||
84 | */ | ||
85 | |||
86 | enum { | ||
87 | VMNET_CAP_SG = 0x0001, /* Can do scatter-gather transmits. */ | ||
88 | VMNET_CAP_IP4_CSUM = 0x0002, /* Can checksum only TCP/UDP over | ||
89 | * IPv4 */ | ||
90 | VMNET_CAP_HW_CSUM = 0x0004, /* Can checksum all packets. */ | ||
91 | VMNET_CAP_HIGH_DMA = 0x0008, /* Can DMA to high memory. */ | ||
92 | VMNET_CAP_TOE = 0x0010, /* Supports TCP/IP offload. */ | ||
93 | VMNET_CAP_TSO = 0x0020, /* Supports TCP Segmentation | ||
94 | * offload */ | ||
95 | VMNET_CAP_SW_TSO = 0x0040, /* Supports SW TCP Segmentation */ | ||
96 | VMNET_CAP_VMXNET_APROM = 0x0080, /* Vmxnet APROM support */ | ||
97 | VMNET_CAP_HW_TX_VLAN = 0x0100, /* Can we do VLAN tagging in HW */ | ||
98 | VMNET_CAP_HW_RX_VLAN = 0x0200, /* Can we do VLAN untagging in HW */ | ||
99 | VMNET_CAP_SW_VLAN = 0x0400, /* VLAN tagging/untagging in SW */ | ||
100 | VMNET_CAP_WAKE_PCKT_RCV = 0x0800, /* Can wake on network packet recv? */ | ||
101 | VMNET_CAP_ENABLE_INT_INLINE = 0x1000, /* Enable Interrupt Inline */ | ||
102 | VMNET_CAP_ENABLE_HEADER_COPY = 0x2000, /* copy header for vmkernel */ | ||
103 | VMNET_CAP_TX_CHAIN = 0x4000, /* Guest can use multiple tx entries | ||
104 | * for a pkt */ | ||
105 | VMNET_CAP_RX_CHAIN = 0x8000, /* pkt can span multiple rx entries */ | ||
106 | VMNET_CAP_LPD = 0x10000, /* large pkt delivery */ | ||
107 | VMNET_CAP_BPF = 0x20000, /* BPF Support in VMXNET Virtual HW*/ | ||
108 | VMNET_CAP_SG_SPAN_PAGES = 0x40000, /* Scatter-gather can span multiple*/ | ||
109 | /* pages transmits */ | ||
110 | VMNET_CAP_IP6_CSUM = 0x80000, /* Can do IPv6 csum offload. */ | ||
111 | VMNET_CAP_TSO6 = 0x100000, /* TSO seg. offload for IPv6 pkts. */ | ||
112 | VMNET_CAP_TSO256k = 0x200000, /* Can do TSO seg offload for */ | ||
113 | /* pkts up to 256kB. */ | ||
114 | VMNET_CAP_UPT = 0x400000 /* Support UPT */ | ||
115 | }; | ||
116 | |||
117 | /* | ||
118 | * PCI vendor and device IDs. | ||
119 | */ | ||
120 | #define PCI_VENDOR_ID_VMWARE 0x15AD | ||
121 | #define PCI_DEVICE_ID_VMWARE_VMXNET3 0x07B0 | ||
122 | #define MAX_ETHERNET_CARDS 10 | ||
123 | #define MAX_PCI_PASSTHRU_DEVICE 6 | ||
124 | |||
125 | struct vmxnet3_cmd_ring { | ||
126 | union Vmxnet3_GenericDesc *base; | ||
127 | u32 size; | ||
128 | u32 next2fill; | ||
129 | u32 next2comp; | ||
130 | u8 gen; | ||
131 | dma_addr_t basePA; | ||
132 | }; | ||
133 | |||
134 | static inline void | ||
135 | vmxnet3_cmd_ring_adv_next2fill(struct vmxnet3_cmd_ring *ring) | ||
136 | { | ||
137 | ring->next2fill++; | ||
138 | if (unlikely(ring->next2fill == ring->size)) { | ||
139 | ring->next2fill = 0; | ||
140 | VMXNET3_FLIP_RING_GEN(ring->gen); | ||
141 | } | ||
142 | } | ||
143 | |||
144 | static inline void | ||
145 | vmxnet3_cmd_ring_adv_next2comp(struct vmxnet3_cmd_ring *ring) | ||
146 | { | ||
147 | VMXNET3_INC_RING_IDX_ONLY(ring->next2comp, ring->size); | ||
148 | } | ||
149 | |||
150 | static inline int | ||
151 | vmxnet3_cmd_ring_desc_avail(struct vmxnet3_cmd_ring *ring) | ||
152 | { | ||
153 | return (ring->next2comp > ring->next2fill ? 0 : ring->size) + | ||
154 | ring->next2comp - ring->next2fill - 1; | ||
155 | } | ||
156 | |||
157 | struct vmxnet3_comp_ring { | ||
158 | union Vmxnet3_GenericDesc *base; | ||
159 | u32 size; | ||
160 | u32 next2proc; | ||
161 | u8 gen; | ||
162 | u8 intr_idx; | ||
163 | dma_addr_t basePA; | ||
164 | }; | ||
165 | |||
166 | static inline void | ||
167 | vmxnet3_comp_ring_adv_next2proc(struct vmxnet3_comp_ring *ring) | ||
168 | { | ||
169 | ring->next2proc++; | ||
170 | if (unlikely(ring->next2proc == ring->size)) { | ||
171 | ring->next2proc = 0; | ||
172 | VMXNET3_FLIP_RING_GEN(ring->gen); | ||
173 | } | ||
174 | } | ||
175 | |||
176 | struct vmxnet3_tx_data_ring { | ||
177 | struct Vmxnet3_TxDataDesc *base; | ||
178 | u32 size; | ||
179 | dma_addr_t basePA; | ||
180 | }; | ||
181 | |||
182 | enum vmxnet3_buf_map_type { | ||
183 | VMXNET3_MAP_INVALID = 0, | ||
184 | VMXNET3_MAP_NONE, | ||
185 | VMXNET3_MAP_SINGLE, | ||
186 | VMXNET3_MAP_PAGE, | ||
187 | }; | ||
188 | |||
189 | struct vmxnet3_tx_buf_info { | ||
190 | u32 map_type; | ||
191 | u16 len; | ||
192 | u16 sop_idx; | ||
193 | dma_addr_t dma_addr; | ||
194 | struct sk_buff *skb; | ||
195 | }; | ||
196 | |||
197 | struct vmxnet3_tq_driver_stats { | ||
198 | u64 drop_total; /* # of pkts dropped by the driver, the | ||
199 | * counters below track droppings due to | ||
200 | * different reasons | ||
201 | */ | ||
202 | u64 drop_too_many_frags; | ||
203 | u64 drop_oversized_hdr; | ||
204 | u64 drop_hdr_inspect_err; | ||
205 | u64 drop_tso; | ||
206 | |||
207 | u64 tx_ring_full; | ||
208 | u64 linearized; /* # of pkts linearized */ | ||
209 | u64 copy_skb_header; /* # of times we have to copy skb header */ | ||
210 | u64 oversized_hdr; | ||
211 | }; | ||
212 | |||
213 | struct vmxnet3_tx_ctx { | ||
214 | bool ipv4; | ||
215 | u16 mss; | ||
216 | u32 eth_ip_hdr_size; /* only valid for pkts requesting tso or csum | ||
217 | * offloading | ||
218 | */ | ||
219 | u32 l4_hdr_size; /* only valid if mss != 0 */ | ||
220 | u32 copy_size; /* # of bytes copied into the data ring */ | ||
221 | union Vmxnet3_GenericDesc *sop_txd; | ||
222 | union Vmxnet3_GenericDesc *eop_txd; | ||
223 | }; | ||
224 | |||
225 | struct vmxnet3_tx_queue { | ||
226 | spinlock_t tx_lock; | ||
227 | struct vmxnet3_cmd_ring tx_ring; | ||
228 | struct vmxnet3_tx_buf_info *buf_info; | ||
229 | struct vmxnet3_tx_data_ring data_ring; | ||
230 | struct vmxnet3_comp_ring comp_ring; | ||
231 | struct Vmxnet3_TxQueueCtrl *shared; | ||
232 | struct vmxnet3_tq_driver_stats stats; | ||
233 | bool stopped; | ||
234 | int num_stop; /* # of times the queue is | ||
235 | * stopped */ | ||
236 | } __attribute__((__aligned__(SMP_CACHE_BYTES))); | ||
237 | |||
238 | enum vmxnet3_rx_buf_type { | ||
239 | VMXNET3_RX_BUF_NONE = 0, | ||
240 | VMXNET3_RX_BUF_SKB = 1, | ||
241 | VMXNET3_RX_BUF_PAGE = 2 | ||
242 | }; | ||
243 | |||
244 | struct vmxnet3_rx_buf_info { | ||
245 | enum vmxnet3_rx_buf_type buf_type; | ||
246 | u16 len; | ||
247 | union { | ||
248 | struct sk_buff *skb; | ||
249 | struct page *page; | ||
250 | }; | ||
251 | dma_addr_t dma_addr; | ||
252 | }; | ||
253 | |||
254 | struct vmxnet3_rx_ctx { | ||
255 | struct sk_buff *skb; | ||
256 | u32 sop_idx; | ||
257 | }; | ||
258 | |||
259 | struct vmxnet3_rq_driver_stats { | ||
260 | u64 drop_total; | ||
261 | u64 drop_err; | ||
262 | u64 drop_fcs; | ||
263 | u64 rx_buf_alloc_failure; | ||
264 | }; | ||
265 | |||
266 | struct vmxnet3_rx_queue { | ||
267 | struct vmxnet3_cmd_ring rx_ring[2]; | ||
268 | struct vmxnet3_comp_ring comp_ring; | ||
269 | struct vmxnet3_rx_ctx rx_ctx; | ||
270 | u32 qid; /* rqID in RCD for buffer from 1st ring */ | ||
271 | u32 qid2; /* rqID in RCD for buffer from 2nd ring */ | ||
272 | u32 uncommitted[2]; /* # of buffers allocated since last RXPROD | ||
273 | * update */ | ||
274 | struct vmxnet3_rx_buf_info *buf_info[2]; | ||
275 | struct Vmxnet3_RxQueueCtrl *shared; | ||
276 | struct vmxnet3_rq_driver_stats stats; | ||
277 | } __attribute__((__aligned__(SMP_CACHE_BYTES))); | ||
278 | |||
279 | #define VMXNET3_LINUX_MAX_MSIX_VECT 1 | ||
280 | |||
281 | struct vmxnet3_intr { | ||
282 | enum vmxnet3_intr_mask_mode mask_mode; | ||
283 | enum vmxnet3_intr_type type; /* MSI-X, MSI, or INTx? */ | ||
284 | u8 num_intrs; /* # of intr vectors */ | ||
285 | u8 event_intr_idx; /* idx of the intr vector for event */ | ||
286 | u8 mod_levels[VMXNET3_LINUX_MAX_MSIX_VECT]; /* moderation level */ | ||
287 | #ifdef CONFIG_PCI_MSI | ||
288 | struct msix_entry msix_entries[VMXNET3_LINUX_MAX_MSIX_VECT]; | ||
289 | #endif | ||
290 | }; | ||
291 | |||
292 | #define VMXNET3_STATE_BIT_RESETTING 0 | ||
293 | #define VMXNET3_STATE_BIT_QUIESCED 1 | ||
294 | struct vmxnet3_adapter { | ||
295 | struct vmxnet3_tx_queue tx_queue; | ||
296 | struct vmxnet3_rx_queue rx_queue; | ||
297 | struct napi_struct napi; | ||
298 | struct vlan_group *vlan_grp; | ||
299 | |||
300 | struct vmxnet3_intr intr; | ||
301 | |||
302 | struct Vmxnet3_DriverShared *shared; | ||
303 | struct Vmxnet3_PMConf *pm_conf; | ||
304 | struct Vmxnet3_TxQueueDesc *tqd_start; /* first tx queue desc */ | ||
305 | struct Vmxnet3_RxQueueDesc *rqd_start; /* first rx queue desc */ | ||
306 | struct net_device *netdev; | ||
307 | struct pci_dev *pdev; | ||
308 | |||
309 | u8 *hw_addr0; /* for BAR 0 */ | ||
310 | u8 *hw_addr1; /* for BAR 1 */ | ||
311 | |||
312 | /* feature control */ | ||
313 | bool rxcsum; | ||
314 | bool lro; | ||
315 | bool jumbo_frame; | ||
316 | |||
317 | /* rx buffer related */ | ||
318 | unsigned skb_buf_size; | ||
319 | int rx_buf_per_pkt; /* only apply to the 1st ring */ | ||
320 | dma_addr_t shared_pa; | ||
321 | dma_addr_t queue_desc_pa; | ||
322 | |||
323 | /* Wake-on-LAN */ | ||
324 | u32 wol; | ||
325 | |||
326 | /* Link speed */ | ||
327 | u32 link_speed; /* in mbps */ | ||
328 | |||
329 | u64 tx_timeout_count; | ||
330 | struct work_struct work; | ||
331 | |||
332 | unsigned long state; /* VMXNET3_STATE_BIT_xxx */ | ||
333 | |||
334 | int dev_number; | ||
335 | }; | ||
336 | |||
337 | #define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \ | ||
338 | writel((val), (adapter)->hw_addr0 + (reg)) | ||
339 | #define VMXNET3_READ_BAR0_REG(adapter, reg) \ | ||
340 | readl((adapter)->hw_addr0 + (reg)) | ||
341 | |||
342 | #define VMXNET3_WRITE_BAR1_REG(adapter, reg, val) \ | ||
343 | writel((val), (adapter)->hw_addr1 + (reg)) | ||
344 | #define VMXNET3_READ_BAR1_REG(adapter, reg) \ | ||
345 | readl((adapter)->hw_addr1 + (reg)) | ||
346 | |||
347 | #define VMXNET3_WAKE_QUEUE_THRESHOLD(tq) (5) | ||
348 | #define VMXNET3_RX_ALLOC_THRESHOLD(rq, ring_idx, adapter) \ | ||
349 | ((rq)->rx_ring[ring_idx].size >> 3) | ||
350 | |||
351 | #define VMXNET3_GET_ADDR_LO(dma) ((u32)(dma)) | ||
352 | #define VMXNET3_GET_ADDR_HI(dma) ((u32)(((u64)(dma)) >> 32)) | ||
353 | |||
354 | /* must be a multiple of VMXNET3_RING_SIZE_ALIGN */ | ||
355 | #define VMXNET3_DEF_TX_RING_SIZE 512 | ||
356 | #define VMXNET3_DEF_RX_RING_SIZE 256 | ||
357 | |||
358 | #define VMXNET3_MAX_ETH_HDR_SIZE 22 | ||
359 | #define VMXNET3_MAX_SKB_BUF_SIZE (3*1024) | ||
360 | |||
361 | int | ||
362 | vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter); | ||
363 | |||
364 | int | ||
365 | vmxnet3_activate_dev(struct vmxnet3_adapter *adapter); | ||
366 | |||
367 | void | ||
368 | vmxnet3_force_close(struct vmxnet3_adapter *adapter); | ||
369 | |||
370 | void | ||
371 | vmxnet3_reset_dev(struct vmxnet3_adapter *adapter); | ||
372 | |||
373 | void | ||
374 | vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq, | ||
375 | struct vmxnet3_adapter *adapter); | ||
376 | |||
377 | void | ||
378 | vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, | ||
379 | struct vmxnet3_adapter *adapter); | ||
380 | |||
381 | int | ||
382 | vmxnet3_create_queues(struct vmxnet3_adapter *adapter, | ||
383 | u32 tx_ring_size, u32 rx_ring_size, u32 rx_ring2_size); | ||
384 | |||
385 | extern void vmxnet3_set_ethtool_ops(struct net_device *netdev); | ||
386 | extern struct net_device_stats *vmxnet3_get_stats(struct net_device *netdev); | ||
387 | |||
388 | extern char vmxnet3_driver_name[]; | ||
389 | #endif | ||
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c index cf5fd17ad707..f1bff98acd1f 100644 --- a/drivers/net/wan/hdlc_cisco.c +++ b/drivers/net/wan/hdlc_cisco.c | |||
@@ -58,8 +58,7 @@ struct cisco_state { | |||
58 | spinlock_t lock; | 58 | spinlock_t lock; |
59 | unsigned long last_poll; | 59 | unsigned long last_poll; |
60 | int up; | 60 | int up; |
61 | int request_sent; | 61 | u32 txseq; /* TX sequence number, 0 = none */ |
62 | u32 txseq; /* TX sequence number */ | ||
63 | u32 rxseq; /* RX sequence number */ | 62 | u32 rxseq; /* RX sequence number */ |
64 | }; | 63 | }; |
65 | 64 | ||
@@ -163,6 +162,7 @@ static int cisco_rx(struct sk_buff *skb) | |||
163 | struct cisco_packet *cisco_data; | 162 | struct cisco_packet *cisco_data; |
164 | struct in_device *in_dev; | 163 | struct in_device *in_dev; |
165 | __be32 addr, mask; | 164 | __be32 addr, mask; |
165 | u32 ack; | ||
166 | 166 | ||
167 | if (skb->len < sizeof(struct hdlc_header)) | 167 | if (skb->len < sizeof(struct hdlc_header)) |
168 | goto rx_error; | 168 | goto rx_error; |
@@ -223,8 +223,10 @@ static int cisco_rx(struct sk_buff *skb) | |||
223 | case CISCO_KEEPALIVE_REQ: | 223 | case CISCO_KEEPALIVE_REQ: |
224 | spin_lock(&st->lock); | 224 | spin_lock(&st->lock); |
225 | st->rxseq = ntohl(cisco_data->par1); | 225 | st->rxseq = ntohl(cisco_data->par1); |
226 | if (st->request_sent && | 226 | ack = ntohl(cisco_data->par2); |
227 | ntohl(cisco_data->par2) == st->txseq) { | 227 | if (ack && (ack == st->txseq || |
228 | /* our current REQ may be in transit */ | ||
229 | ack == st->txseq - 1)) { | ||
228 | st->last_poll = jiffies; | 230 | st->last_poll = jiffies; |
229 | if (!st->up) { | 231 | if (!st->up) { |
230 | u32 sec, min, hrs, days; | 232 | u32 sec, min, hrs, days; |
@@ -275,7 +277,6 @@ static void cisco_timer(unsigned long arg) | |||
275 | 277 | ||
276 | cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ, htonl(++st->txseq), | 278 | cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ, htonl(++st->txseq), |
277 | htonl(st->rxseq)); | 279 | htonl(st->rxseq)); |
278 | st->request_sent = 1; | ||
279 | spin_unlock(&st->lock); | 280 | spin_unlock(&st->lock); |
280 | 281 | ||
281 | st->timer.expires = jiffies + st->settings.interval * HZ; | 282 | st->timer.expires = jiffies + st->settings.interval * HZ; |
@@ -293,9 +294,7 @@ static void cisco_start(struct net_device *dev) | |||
293 | unsigned long flags; | 294 | unsigned long flags; |
294 | 295 | ||
295 | spin_lock_irqsave(&st->lock, flags); | 296 | spin_lock_irqsave(&st->lock, flags); |
296 | st->up = 0; | 297 | st->up = st->txseq = st->rxseq = 0; |
297 | st->request_sent = 0; | ||
298 | st->txseq = st->rxseq = 0; | ||
299 | spin_unlock_irqrestore(&st->lock, flags); | 298 | spin_unlock_irqrestore(&st->lock, flags); |
300 | 299 | ||
301 | init_timer(&st->timer); | 300 | init_timer(&st->timer); |
@@ -317,8 +316,7 @@ static void cisco_stop(struct net_device *dev) | |||
317 | 316 | ||
318 | spin_lock_irqsave(&st->lock, flags); | 317 | spin_lock_irqsave(&st->lock, flags); |
319 | netif_dormant_on(dev); | 318 | netif_dormant_on(dev); |
320 | st->up = 0; | 319 | st->up = st->txseq = 0; |
321 | st->request_sent = 0; | ||
322 | spin_unlock_irqrestore(&st->lock, flags); | 320 | spin_unlock_irqrestore(&st->lock, flags); |
323 | } | 321 | } |
324 | 322 | ||
diff --git a/drivers/net/wireless/adm8211.h b/drivers/net/wireless/adm8211.h index 4f6ab1322189..b07e4d3a6b4d 100644 --- a/drivers/net/wireless/adm8211.h +++ b/drivers/net/wireless/adm8211.h | |||
@@ -266,7 +266,7 @@ do { \ | |||
266 | #define ADM8211_SYNCTL_CS1 (1 << 28) | 266 | #define ADM8211_SYNCTL_CS1 (1 << 28) |
267 | #define ADM8211_SYNCTL_CAL (1 << 27) | 267 | #define ADM8211_SYNCTL_CAL (1 << 27) |
268 | #define ADM8211_SYNCTL_SELCAL (1 << 26) | 268 | #define ADM8211_SYNCTL_SELCAL (1 << 26) |
269 | #define ADM8211_SYNCTL_RFtype ((1 << 24) || (1 << 23) || (1 << 22)) | 269 | #define ADM8211_SYNCTL_RFtype ((1 << 24) | (1 << 23) | (1 << 22)) |
270 | #define ADM8211_SYNCTL_RFMD (1 << 22) | 270 | #define ADM8211_SYNCTL_RFMD (1 << 22) |
271 | #define ADM8211_SYNCTL_GENERAL (0x7 << 22) | 271 | #define ADM8211_SYNCTL_GENERAL (0x7 << 22) |
272 | /* SYNCTL 21:0 Data (Si4126: 18-bit data, 4-bit address) */ | 272 | /* SYNCTL 21:0 Data (Si4126: 18-bit data, 4-bit address) */ |
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h index fa1549a03c71..660716214d49 100644 --- a/drivers/net/wireless/b43/b43.h +++ b/drivers/net/wireless/b43/b43.h | |||
@@ -607,82 +607,7 @@ struct b43_qos_params { | |||
607 | struct ieee80211_tx_queue_params p; | 607 | struct ieee80211_tx_queue_params p; |
608 | }; | 608 | }; |
609 | 609 | ||
610 | struct b43_wldev; | 610 | struct b43_wl; |
611 | |||
612 | /* Data structure for the WLAN parts (802.11 cores) of the b43 chip. */ | ||
613 | struct b43_wl { | ||
614 | /* Pointer to the active wireless device on this chip */ | ||
615 | struct b43_wldev *current_dev; | ||
616 | /* Pointer to the ieee80211 hardware data structure */ | ||
617 | struct ieee80211_hw *hw; | ||
618 | |||
619 | /* Global driver mutex. Every operation must run with this mutex locked. */ | ||
620 | struct mutex mutex; | ||
621 | /* Hard-IRQ spinlock. This lock protects things used in the hard-IRQ | ||
622 | * handler, only. This basically is just the IRQ mask register. */ | ||
623 | spinlock_t hardirq_lock; | ||
624 | |||
625 | /* The number of queues that were registered with the mac80211 subsystem | ||
626 | * initially. This is a backup copy of hw->queues in case hw->queues has | ||
627 | * to be dynamically lowered at runtime (Firmware does not support QoS). | ||
628 | * hw->queues has to be restored to the original value before unregistering | ||
629 | * from the mac80211 subsystem. */ | ||
630 | u16 mac80211_initially_registered_queues; | ||
631 | |||
632 | /* We can only have one operating interface (802.11 core) | ||
633 | * at a time. General information about this interface follows. | ||
634 | */ | ||
635 | |||
636 | struct ieee80211_vif *vif; | ||
637 | /* The MAC address of the operating interface. */ | ||
638 | u8 mac_addr[ETH_ALEN]; | ||
639 | /* Current BSSID */ | ||
640 | u8 bssid[ETH_ALEN]; | ||
641 | /* Interface type. (NL80211_IFTYPE_XXX) */ | ||
642 | int if_type; | ||
643 | /* Is the card operating in AP, STA or IBSS mode? */ | ||
644 | bool operating; | ||
645 | /* filter flags */ | ||
646 | unsigned int filter_flags; | ||
647 | /* Stats about the wireless interface */ | ||
648 | struct ieee80211_low_level_stats ieee_stats; | ||
649 | |||
650 | #ifdef CONFIG_B43_HWRNG | ||
651 | struct hwrng rng; | ||
652 | bool rng_initialized; | ||
653 | char rng_name[30 + 1]; | ||
654 | #endif /* CONFIG_B43_HWRNG */ | ||
655 | |||
656 | /* List of all wireless devices on this chip */ | ||
657 | struct list_head devlist; | ||
658 | u8 nr_devs; | ||
659 | |||
660 | bool radiotap_enabled; | ||
661 | bool radio_enabled; | ||
662 | |||
663 | /* The beacon we are currently using (AP or IBSS mode). */ | ||
664 | struct sk_buff *current_beacon; | ||
665 | bool beacon0_uploaded; | ||
666 | bool beacon1_uploaded; | ||
667 | bool beacon_templates_virgin; /* Never wrote the templates? */ | ||
668 | struct work_struct beacon_update_trigger; | ||
669 | |||
670 | /* The current QOS parameters for the 4 queues. */ | ||
671 | struct b43_qos_params qos_params[4]; | ||
672 | |||
673 | /* Work for adjustment of the transmission power. | ||
674 | * This is scheduled when we determine that the actual TX output | ||
675 | * power doesn't match what we want. */ | ||
676 | struct work_struct txpower_adjust_work; | ||
677 | |||
678 | /* Packet transmit work */ | ||
679 | struct work_struct tx_work; | ||
680 | /* Queue of packets to be transmitted. */ | ||
681 | struct sk_buff_head tx_queue; | ||
682 | |||
683 | /* The device LEDs. */ | ||
684 | struct b43_leds leds; | ||
685 | }; | ||
686 | 611 | ||
687 | /* The type of the firmware file. */ | 612 | /* The type of the firmware file. */ |
688 | enum b43_firmware_file_type { | 613 | enum b43_firmware_file_type { |
@@ -824,6 +749,97 @@ struct b43_wldev { | |||
824 | #endif | 749 | #endif |
825 | }; | 750 | }; |
826 | 751 | ||
752 | /* | ||
753 | * Include goes here to avoid a dependency problem. | ||
754 | * A better fix would be to integrate xmit.h into b43.h. | ||
755 | */ | ||
756 | #include "xmit.h" | ||
757 | |||
758 | /* Data structure for the WLAN parts (802.11 cores) of the b43 chip. */ | ||
759 | struct b43_wl { | ||
760 | /* Pointer to the active wireless device on this chip */ | ||
761 | struct b43_wldev *current_dev; | ||
762 | /* Pointer to the ieee80211 hardware data structure */ | ||
763 | struct ieee80211_hw *hw; | ||
764 | |||
765 | /* Global driver mutex. Every operation must run with this mutex locked. */ | ||
766 | struct mutex mutex; | ||
767 | /* Hard-IRQ spinlock. This lock protects things used in the hard-IRQ | ||
768 | * handler, only. This basically is just the IRQ mask register. */ | ||
769 | spinlock_t hardirq_lock; | ||
770 | |||
771 | /* The number of queues that were registered with the mac80211 subsystem | ||
772 | * initially. This is a backup copy of hw->queues in case hw->queues has | ||
773 | * to be dynamically lowered at runtime (Firmware does not support QoS). | ||
774 | * hw->queues has to be restored to the original value before unregistering | ||
775 | * from the mac80211 subsystem. */ | ||
776 | u16 mac80211_initially_registered_queues; | ||
777 | |||
778 | /* We can only have one operating interface (802.11 core) | ||
779 | * at a time. General information about this interface follows. | ||
780 | */ | ||
781 | |||
782 | struct ieee80211_vif *vif; | ||
783 | /* The MAC address of the operating interface. */ | ||
784 | u8 mac_addr[ETH_ALEN]; | ||
785 | /* Current BSSID */ | ||
786 | u8 bssid[ETH_ALEN]; | ||
787 | /* Interface type. (NL80211_IFTYPE_XXX) */ | ||
788 | int if_type; | ||
789 | /* Is the card operating in AP, STA or IBSS mode? */ | ||
790 | bool operating; | ||
791 | /* filter flags */ | ||
792 | unsigned int filter_flags; | ||
793 | /* Stats about the wireless interface */ | ||
794 | struct ieee80211_low_level_stats ieee_stats; | ||
795 | |||
796 | #ifdef CONFIG_B43_HWRNG | ||
797 | struct hwrng rng; | ||
798 | bool rng_initialized; | ||
799 | char rng_name[30 + 1]; | ||
800 | #endif /* CONFIG_B43_HWRNG */ | ||
801 | |||
802 | /* List of all wireless devices on this chip */ | ||
803 | struct list_head devlist; | ||
804 | u8 nr_devs; | ||
805 | |||
806 | bool radiotap_enabled; | ||
807 | bool radio_enabled; | ||
808 | |||
809 | /* The beacon we are currently using (AP or IBSS mode). */ | ||
810 | struct sk_buff *current_beacon; | ||
811 | bool beacon0_uploaded; | ||
812 | bool beacon1_uploaded; | ||
813 | bool beacon_templates_virgin; /* Never wrote the templates? */ | ||
814 | struct work_struct beacon_update_trigger; | ||
815 | |||
816 | /* The current QOS parameters for the 4 queues. */ | ||
817 | struct b43_qos_params qos_params[4]; | ||
818 | |||
819 | /* Work for adjustment of the transmission power. | ||
820 | * This is scheduled when we determine that the actual TX output | ||
821 | * power doesn't match what we want. */ | ||
822 | struct work_struct txpower_adjust_work; | ||
823 | |||
824 | /* Packet transmit work */ | ||
825 | struct work_struct tx_work; | ||
826 | /* Queue of packets to be transmitted. */ | ||
827 | struct sk_buff_head tx_queue; | ||
828 | |||
829 | /* The device LEDs. */ | ||
830 | struct b43_leds leds; | ||
831 | |||
832 | #ifdef CONFIG_B43_PIO | ||
833 | /* | ||
834 | * RX/TX header/tail buffers used by the frame transmit functions. | ||
835 | */ | ||
836 | struct b43_rxhdr_fw4 rxhdr; | ||
837 | struct b43_txhdr txhdr; | ||
838 | u8 rx_tail[4]; | ||
839 | u8 tx_tail[4]; | ||
840 | #endif /* CONFIG_B43_PIO */ | ||
841 | }; | ||
842 | |||
827 | static inline struct b43_wl *hw_to_b43_wl(struct ieee80211_hw *hw) | 843 | static inline struct b43_wl *hw_to_b43_wl(struct ieee80211_hw *hw) |
828 | { | 844 | { |
829 | return hw->priv; | 845 | return hw->priv; |
diff --git a/drivers/net/wireless/b43/leds.c b/drivers/net/wireless/b43/leds.c index fbe3d4f62ce2..1e8dba488004 100644 --- a/drivers/net/wireless/b43/leds.c +++ b/drivers/net/wireless/b43/leds.c | |||
@@ -348,9 +348,9 @@ void b43_leds_register(struct b43_wldev *dev) | |||
348 | } | 348 | } |
349 | } | 349 | } |
350 | 350 | ||
351 | void b43_leds_unregister(struct b43_wldev *dev) | 351 | void b43_leds_unregister(struct b43_wl *wl) |
352 | { | 352 | { |
353 | struct b43_leds *leds = &dev->wl->leds; | 353 | struct b43_leds *leds = &wl->leds; |
354 | 354 | ||
355 | b43_unregister_led(&leds->led_tx); | 355 | b43_unregister_led(&leds->led_tx); |
356 | b43_unregister_led(&leds->led_rx); | 356 | b43_unregister_led(&leds->led_rx); |
diff --git a/drivers/net/wireless/b43/leds.h b/drivers/net/wireless/b43/leds.h index 9592e4c5a5f5..4c56187810fc 100644 --- a/drivers/net/wireless/b43/leds.h +++ b/drivers/net/wireless/b43/leds.h | |||
@@ -60,7 +60,7 @@ enum b43_led_behaviour { | |||
60 | }; | 60 | }; |
61 | 61 | ||
62 | void b43_leds_register(struct b43_wldev *dev); | 62 | void b43_leds_register(struct b43_wldev *dev); |
63 | void b43_leds_unregister(struct b43_wldev *dev); | 63 | void b43_leds_unregister(struct b43_wl *wl); |
64 | void b43_leds_init(struct b43_wldev *dev); | 64 | void b43_leds_init(struct b43_wldev *dev); |
65 | void b43_leds_exit(struct b43_wldev *dev); | 65 | void b43_leds_exit(struct b43_wldev *dev); |
66 | void b43_leds_stop(struct b43_wldev *dev); | 66 | void b43_leds_stop(struct b43_wldev *dev); |
@@ -76,7 +76,7 @@ struct b43_leds { | |||
76 | static inline void b43_leds_register(struct b43_wldev *dev) | 76 | static inline void b43_leds_register(struct b43_wldev *dev) |
77 | { | 77 | { |
78 | } | 78 | } |
79 | static inline void b43_leds_unregister(struct b43_wldev *dev) | 79 | static inline void b43_leds_unregister(struct b43_wl *wl) |
80 | { | 80 | { |
81 | } | 81 | } |
82 | static inline void b43_leds_init(struct b43_wldev *dev) | 82 | static inline void b43_leds_init(struct b43_wldev *dev) |
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index 9b907a36bb8c..df6b26a0c05e 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c | |||
@@ -3874,6 +3874,7 @@ static struct b43_wldev * b43_wireless_core_stop(struct b43_wldev *dev) | |||
3874 | { | 3874 | { |
3875 | struct b43_wl *wl = dev->wl; | 3875 | struct b43_wl *wl = dev->wl; |
3876 | struct b43_wldev *orig_dev; | 3876 | struct b43_wldev *orig_dev; |
3877 | u32 mask; | ||
3877 | 3878 | ||
3878 | redo: | 3879 | redo: |
3879 | if (!dev || b43_status(dev) < B43_STAT_STARTED) | 3880 | if (!dev || b43_status(dev) < B43_STAT_STARTED) |
@@ -3920,7 +3921,8 @@ redo: | |||
3920 | goto redo; | 3921 | goto redo; |
3921 | return dev; | 3922 | return dev; |
3922 | } | 3923 | } |
3923 | B43_WARN_ON(b43_read32(dev, B43_MMIO_GEN_IRQ_MASK)); | 3924 | mask = b43_read32(dev, B43_MMIO_GEN_IRQ_MASK); |
3925 | B43_WARN_ON(mask != 0xFFFFFFFF && mask); | ||
3924 | 3926 | ||
3925 | /* Drain the TX queue */ | 3927 | /* Drain the TX queue */ |
3926 | while (skb_queue_len(&wl->tx_queue)) | 3928 | while (skb_queue_len(&wl->tx_queue)) |
@@ -4499,6 +4501,7 @@ static void b43_op_stop(struct ieee80211_hw *hw) | |||
4499 | 4501 | ||
4500 | cancel_work_sync(&(wl->beacon_update_trigger)); | 4502 | cancel_work_sync(&(wl->beacon_update_trigger)); |
4501 | 4503 | ||
4504 | wiphy_rfkill_stop_polling(hw->wiphy); | ||
4502 | mutex_lock(&wl->mutex); | 4505 | mutex_lock(&wl->mutex); |
4503 | if (b43_status(dev) >= B43_STAT_STARTED) { | 4506 | if (b43_status(dev) >= B43_STAT_STARTED) { |
4504 | dev = b43_wireless_core_stop(dev); | 4507 | dev = b43_wireless_core_stop(dev); |
@@ -4997,7 +5000,7 @@ static void b43_remove(struct ssb_device *dev) | |||
4997 | 5000 | ||
4998 | if (list_empty(&wl->devlist)) { | 5001 | if (list_empty(&wl->devlist)) { |
4999 | b43_rng_exit(wl); | 5002 | b43_rng_exit(wl); |
5000 | b43_leds_unregister(wldev); | 5003 | b43_leds_unregister(wl); |
5001 | /* Last core on the chip unregistered. | 5004 | /* Last core on the chip unregistered. |
5002 | * We can destroy common struct b43_wl. | 5005 | * We can destroy common struct b43_wl. |
5003 | */ | 5006 | */ |
diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c index 5e87650b07fb..9b9044400218 100644 --- a/drivers/net/wireless/b43/pio.c +++ b/drivers/net/wireless/b43/pio.c | |||
@@ -332,6 +332,7 @@ static u16 tx_write_2byte_queue(struct b43_pio_txqueue *q, | |||
332 | unsigned int data_len) | 332 | unsigned int data_len) |
333 | { | 333 | { |
334 | struct b43_wldev *dev = q->dev; | 334 | struct b43_wldev *dev = q->dev; |
335 | struct b43_wl *wl = dev->wl; | ||
335 | const u8 *data = _data; | 336 | const u8 *data = _data; |
336 | 337 | ||
337 | ctl |= B43_PIO_TXCTL_WRITELO | B43_PIO_TXCTL_WRITEHI; | 338 | ctl |= B43_PIO_TXCTL_WRITELO | B43_PIO_TXCTL_WRITEHI; |
@@ -341,13 +342,12 @@ static u16 tx_write_2byte_queue(struct b43_pio_txqueue *q, | |||
341 | q->mmio_base + B43_PIO_TXDATA, | 342 | q->mmio_base + B43_PIO_TXDATA, |
342 | sizeof(u16)); | 343 | sizeof(u16)); |
343 | if (data_len & 1) { | 344 | if (data_len & 1) { |
344 | u8 tail[2] = { 0, }; | ||
345 | |||
346 | /* Write the last byte. */ | 345 | /* Write the last byte. */ |
347 | ctl &= ~B43_PIO_TXCTL_WRITEHI; | 346 | ctl &= ~B43_PIO_TXCTL_WRITEHI; |
348 | b43_piotx_write16(q, B43_PIO_TXCTL, ctl); | 347 | b43_piotx_write16(q, B43_PIO_TXCTL, ctl); |
349 | tail[0] = data[data_len - 1]; | 348 | wl->tx_tail[0] = data[data_len - 1]; |
350 | ssb_block_write(dev->dev, tail, 2, | 349 | wl->tx_tail[1] = 0; |
350 | ssb_block_write(dev->dev, wl->tx_tail, 2, | ||
351 | q->mmio_base + B43_PIO_TXDATA, | 351 | q->mmio_base + B43_PIO_TXDATA, |
352 | sizeof(u16)); | 352 | sizeof(u16)); |
353 | } | 353 | } |
@@ -382,6 +382,7 @@ static u32 tx_write_4byte_queue(struct b43_pio_txqueue *q, | |||
382 | unsigned int data_len) | 382 | unsigned int data_len) |
383 | { | 383 | { |
384 | struct b43_wldev *dev = q->dev; | 384 | struct b43_wldev *dev = q->dev; |
385 | struct b43_wl *wl = dev->wl; | ||
385 | const u8 *data = _data; | 386 | const u8 *data = _data; |
386 | 387 | ||
387 | ctl |= B43_PIO8_TXCTL_0_7 | B43_PIO8_TXCTL_8_15 | | 388 | ctl |= B43_PIO8_TXCTL_0_7 | B43_PIO8_TXCTL_8_15 | |
@@ -392,29 +393,31 @@ static u32 tx_write_4byte_queue(struct b43_pio_txqueue *q, | |||
392 | q->mmio_base + B43_PIO8_TXDATA, | 393 | q->mmio_base + B43_PIO8_TXDATA, |
393 | sizeof(u32)); | 394 | sizeof(u32)); |
394 | if (data_len & 3) { | 395 | if (data_len & 3) { |
395 | u8 tail[4] = { 0, }; | 396 | wl->tx_tail[3] = 0; |
396 | |||
397 | /* Write the last few bytes. */ | 397 | /* Write the last few bytes. */ |
398 | ctl &= ~(B43_PIO8_TXCTL_8_15 | B43_PIO8_TXCTL_16_23 | | 398 | ctl &= ~(B43_PIO8_TXCTL_8_15 | B43_PIO8_TXCTL_16_23 | |
399 | B43_PIO8_TXCTL_24_31); | 399 | B43_PIO8_TXCTL_24_31); |
400 | switch (data_len & 3) { | 400 | switch (data_len & 3) { |
401 | case 3: | 401 | case 3: |
402 | ctl |= B43_PIO8_TXCTL_16_23 | B43_PIO8_TXCTL_8_15; | 402 | ctl |= B43_PIO8_TXCTL_16_23 | B43_PIO8_TXCTL_8_15; |
403 | tail[0] = data[data_len - 3]; | 403 | wl->tx_tail[0] = data[data_len - 3]; |
404 | tail[1] = data[data_len - 2]; | 404 | wl->tx_tail[1] = data[data_len - 2]; |
405 | tail[2] = data[data_len - 1]; | 405 | wl->tx_tail[2] = data[data_len - 1]; |
406 | break; | 406 | break; |
407 | case 2: | 407 | case 2: |
408 | ctl |= B43_PIO8_TXCTL_8_15; | 408 | ctl |= B43_PIO8_TXCTL_8_15; |
409 | tail[0] = data[data_len - 2]; | 409 | wl->tx_tail[0] = data[data_len - 2]; |
410 | tail[1] = data[data_len - 1]; | 410 | wl->tx_tail[1] = data[data_len - 1]; |
411 | wl->tx_tail[2] = 0; | ||
411 | break; | 412 | break; |
412 | case 1: | 413 | case 1: |
413 | tail[0] = data[data_len - 1]; | 414 | wl->tx_tail[0] = data[data_len - 1]; |
415 | wl->tx_tail[1] = 0; | ||
416 | wl->tx_tail[2] = 0; | ||
414 | break; | 417 | break; |
415 | } | 418 | } |
416 | b43_piotx_write32(q, B43_PIO8_TXCTL, ctl); | 419 | b43_piotx_write32(q, B43_PIO8_TXCTL, ctl); |
417 | ssb_block_write(dev->dev, tail, 4, | 420 | ssb_block_write(dev->dev, wl->tx_tail, 4, |
418 | q->mmio_base + B43_PIO8_TXDATA, | 421 | q->mmio_base + B43_PIO8_TXDATA, |
419 | sizeof(u32)); | 422 | sizeof(u32)); |
420 | } | 423 | } |
@@ -446,8 +449,9 @@ static void pio_tx_frame_4byte_queue(struct b43_pio_txpacket *pack, | |||
446 | static int pio_tx_frame(struct b43_pio_txqueue *q, | 449 | static int pio_tx_frame(struct b43_pio_txqueue *q, |
447 | struct sk_buff *skb) | 450 | struct sk_buff *skb) |
448 | { | 451 | { |
452 | struct b43_wldev *dev = q->dev; | ||
453 | struct b43_wl *wl = dev->wl; | ||
449 | struct b43_pio_txpacket *pack; | 454 | struct b43_pio_txpacket *pack; |
450 | struct b43_txhdr txhdr; | ||
451 | u16 cookie; | 455 | u16 cookie; |
452 | int err; | 456 | int err; |
453 | unsigned int hdrlen; | 457 | unsigned int hdrlen; |
@@ -458,8 +462,8 @@ static int pio_tx_frame(struct b43_pio_txqueue *q, | |||
458 | struct b43_pio_txpacket, list); | 462 | struct b43_pio_txpacket, list); |
459 | 463 | ||
460 | cookie = generate_cookie(q, pack); | 464 | cookie = generate_cookie(q, pack); |
461 | hdrlen = b43_txhdr_size(q->dev); | 465 | hdrlen = b43_txhdr_size(dev); |
462 | err = b43_generate_txhdr(q->dev, (u8 *)&txhdr, skb, | 466 | err = b43_generate_txhdr(dev, (u8 *)&wl->txhdr, skb, |
463 | info, cookie); | 467 | info, cookie); |
464 | if (err) | 468 | if (err) |
465 | return err; | 469 | return err; |
@@ -467,15 +471,15 @@ static int pio_tx_frame(struct b43_pio_txqueue *q, | |||
467 | if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { | 471 | if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { |
468 | /* Tell the firmware about the cookie of the last | 472 | /* Tell the firmware about the cookie of the last |
469 | * mcast frame, so it can clear the more-data bit in it. */ | 473 | * mcast frame, so it can clear the more-data bit in it. */ |
470 | b43_shm_write16(q->dev, B43_SHM_SHARED, | 474 | b43_shm_write16(dev, B43_SHM_SHARED, |
471 | B43_SHM_SH_MCASTCOOKIE, cookie); | 475 | B43_SHM_SH_MCASTCOOKIE, cookie); |
472 | } | 476 | } |
473 | 477 | ||
474 | pack->skb = skb; | 478 | pack->skb = skb; |
475 | if (q->rev >= 8) | 479 | if (q->rev >= 8) |
476 | pio_tx_frame_4byte_queue(pack, (const u8 *)&txhdr, hdrlen); | 480 | pio_tx_frame_4byte_queue(pack, (const u8 *)&wl->txhdr, hdrlen); |
477 | else | 481 | else |
478 | pio_tx_frame_2byte_queue(pack, (const u8 *)&txhdr, hdrlen); | 482 | pio_tx_frame_2byte_queue(pack, (const u8 *)&wl->txhdr, hdrlen); |
479 | 483 | ||
480 | /* Remove it from the list of available packet slots. | 484 | /* Remove it from the list of available packet slots. |
481 | * It will be put back when we receive the status report. */ | 485 | * It will be put back when we receive the status report. */ |
@@ -615,14 +619,14 @@ void b43_pio_get_tx_stats(struct b43_wldev *dev, | |||
615 | static bool pio_rx_frame(struct b43_pio_rxqueue *q) | 619 | static bool pio_rx_frame(struct b43_pio_rxqueue *q) |
616 | { | 620 | { |
617 | struct b43_wldev *dev = q->dev; | 621 | struct b43_wldev *dev = q->dev; |
618 | struct b43_rxhdr_fw4 rxhdr; | 622 | struct b43_wl *wl = dev->wl; |
619 | u16 len; | 623 | u16 len; |
620 | u32 macstat; | 624 | u32 macstat; |
621 | unsigned int i, padding; | 625 | unsigned int i, padding; |
622 | struct sk_buff *skb; | 626 | struct sk_buff *skb; |
623 | const char *err_msg = NULL; | 627 | const char *err_msg = NULL; |
624 | 628 | ||
625 | memset(&rxhdr, 0, sizeof(rxhdr)); | 629 | memset(&wl->rxhdr, 0, sizeof(wl->rxhdr)); |
626 | 630 | ||
627 | /* Check if we have data and wait for it to get ready. */ | 631 | /* Check if we have data and wait for it to get ready. */ |
628 | if (q->rev >= 8) { | 632 | if (q->rev >= 8) { |
@@ -660,16 +664,16 @@ data_ready: | |||
660 | 664 | ||
661 | /* Get the preamble (RX header) */ | 665 | /* Get the preamble (RX header) */ |
662 | if (q->rev >= 8) { | 666 | if (q->rev >= 8) { |
663 | ssb_block_read(dev->dev, &rxhdr, sizeof(rxhdr), | 667 | ssb_block_read(dev->dev, &wl->rxhdr, sizeof(wl->rxhdr), |
664 | q->mmio_base + B43_PIO8_RXDATA, | 668 | q->mmio_base + B43_PIO8_RXDATA, |
665 | sizeof(u32)); | 669 | sizeof(u32)); |
666 | } else { | 670 | } else { |
667 | ssb_block_read(dev->dev, &rxhdr, sizeof(rxhdr), | 671 | ssb_block_read(dev->dev, &wl->rxhdr, sizeof(wl->rxhdr), |
668 | q->mmio_base + B43_PIO_RXDATA, | 672 | q->mmio_base + B43_PIO_RXDATA, |
669 | sizeof(u16)); | 673 | sizeof(u16)); |
670 | } | 674 | } |
671 | /* Sanity checks. */ | 675 | /* Sanity checks. */ |
672 | len = le16_to_cpu(rxhdr.frame_len); | 676 | len = le16_to_cpu(wl->rxhdr.frame_len); |
673 | if (unlikely(len > 0x700)) { | 677 | if (unlikely(len > 0x700)) { |
674 | err_msg = "len > 0x700"; | 678 | err_msg = "len > 0x700"; |
675 | goto rx_error; | 679 | goto rx_error; |
@@ -679,7 +683,7 @@ data_ready: | |||
679 | goto rx_error; | 683 | goto rx_error; |
680 | } | 684 | } |
681 | 685 | ||
682 | macstat = le32_to_cpu(rxhdr.mac_status); | 686 | macstat = le32_to_cpu(wl->rxhdr.mac_status); |
683 | if (macstat & B43_RX_MAC_FCSERR) { | 687 | if (macstat & B43_RX_MAC_FCSERR) { |
684 | if (!(q->dev->wl->filter_flags & FIF_FCSFAIL)) { | 688 | if (!(q->dev->wl->filter_flags & FIF_FCSFAIL)) { |
685 | /* Drop frames with failed FCS. */ | 689 | /* Drop frames with failed FCS. */ |
@@ -704,24 +708,22 @@ data_ready: | |||
704 | q->mmio_base + B43_PIO8_RXDATA, | 708 | q->mmio_base + B43_PIO8_RXDATA, |
705 | sizeof(u32)); | 709 | sizeof(u32)); |
706 | if (len & 3) { | 710 | if (len & 3) { |
707 | u8 tail[4] = { 0, }; | ||
708 | |||
709 | /* Read the last few bytes. */ | 711 | /* Read the last few bytes. */ |
710 | ssb_block_read(dev->dev, tail, 4, | 712 | ssb_block_read(dev->dev, wl->rx_tail, 4, |
711 | q->mmio_base + B43_PIO8_RXDATA, | 713 | q->mmio_base + B43_PIO8_RXDATA, |
712 | sizeof(u32)); | 714 | sizeof(u32)); |
713 | switch (len & 3) { | 715 | switch (len & 3) { |
714 | case 3: | 716 | case 3: |
715 | skb->data[len + padding - 3] = tail[0]; | 717 | skb->data[len + padding - 3] = wl->rx_tail[0]; |
716 | skb->data[len + padding - 2] = tail[1]; | 718 | skb->data[len + padding - 2] = wl->rx_tail[1]; |
717 | skb->data[len + padding - 1] = tail[2]; | 719 | skb->data[len + padding - 1] = wl->rx_tail[2]; |
718 | break; | 720 | break; |
719 | case 2: | 721 | case 2: |
720 | skb->data[len + padding - 2] = tail[0]; | 722 | skb->data[len + padding - 2] = wl->rx_tail[0]; |
721 | skb->data[len + padding - 1] = tail[1]; | 723 | skb->data[len + padding - 1] = wl->rx_tail[1]; |
722 | break; | 724 | break; |
723 | case 1: | 725 | case 1: |
724 | skb->data[len + padding - 1] = tail[0]; | 726 | skb->data[len + padding - 1] = wl->rx_tail[0]; |
725 | break; | 727 | break; |
726 | } | 728 | } |
727 | } | 729 | } |
@@ -730,17 +732,15 @@ data_ready: | |||
730 | q->mmio_base + B43_PIO_RXDATA, | 732 | q->mmio_base + B43_PIO_RXDATA, |
731 | sizeof(u16)); | 733 | sizeof(u16)); |
732 | if (len & 1) { | 734 | if (len & 1) { |
733 | u8 tail[2] = { 0, }; | ||
734 | |||
735 | /* Read the last byte. */ | 735 | /* Read the last byte. */ |
736 | ssb_block_read(dev->dev, tail, 2, | 736 | ssb_block_read(dev->dev, wl->rx_tail, 2, |
737 | q->mmio_base + B43_PIO_RXDATA, | 737 | q->mmio_base + B43_PIO_RXDATA, |
738 | sizeof(u16)); | 738 | sizeof(u16)); |
739 | skb->data[len + padding - 1] = tail[0]; | 739 | skb->data[len + padding - 1] = wl->rx_tail[0]; |
740 | } | 740 | } |
741 | } | 741 | } |
742 | 742 | ||
743 | b43_rx(q->dev, skb, &rxhdr); | 743 | b43_rx(q->dev, skb, &wl->rxhdr); |
744 | 744 | ||
745 | return 1; | 745 | return 1; |
746 | 746 | ||
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c index ac9f600995e4..f4e9695ec186 100644 --- a/drivers/net/wireless/b43/xmit.c +++ b/drivers/net/wireless/b43/xmit.c | |||
@@ -27,7 +27,7 @@ | |||
27 | 27 | ||
28 | */ | 28 | */ |
29 | 29 | ||
30 | #include "xmit.h" | 30 | #include "b43.h" |
31 | #include "phy_common.h" | 31 | #include "phy_common.h" |
32 | #include "dma.h" | 32 | #include "dma.h" |
33 | #include "pio.h" | 33 | #include "pio.h" |
@@ -690,7 +690,10 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr) | |||
690 | } | 690 | } |
691 | 691 | ||
692 | memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); | 692 | memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); |
693 | |||
694 | local_bh_disable(); | ||
693 | ieee80211_rx(dev->wl->hw, skb); | 695 | ieee80211_rx(dev->wl->hw, skb); |
696 | local_bh_enable(); | ||
694 | 697 | ||
695 | #if B43_DEBUG | 698 | #if B43_DEBUG |
696 | dev->rx_count++; | 699 | dev->rx_count++; |
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c index a16bd4147eac..cbb0585083a9 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c +++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c | |||
@@ -702,7 +702,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, | |||
702 | u8 sta_id = iwl_find_station(priv, hdr->addr1); | 702 | u8 sta_id = iwl_find_station(priv, hdr->addr1); |
703 | 703 | ||
704 | if (sta_id == IWL_INVALID_STATION) { | 704 | if (sta_id == IWL_INVALID_STATION) { |
705 | IWL_DEBUG_RATE(priv, "LQ: ADD station %pm\n", | 705 | IWL_DEBUG_RATE(priv, "LQ: ADD station %pM\n", |
706 | hdr->addr1); | 706 | hdr->addr1); |
707 | sta_id = iwl_add_station(priv, hdr->addr1, false, | 707 | sta_id = iwl_add_station(priv, hdr->addr1, false, |
708 | CMD_ASYNC, NULL); | 708 | CMD_ASYNC, NULL); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c index 68136172b823..f059b49dc691 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945.c +++ b/drivers/net/wireless/iwlwifi/iwl-3945.c | |||
@@ -611,7 +611,7 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv, | |||
611 | if (rx_status.band == IEEE80211_BAND_5GHZ) | 611 | if (rx_status.band == IEEE80211_BAND_5GHZ) |
612 | rx_status.rate_idx -= IWL_FIRST_OFDM_RATE; | 612 | rx_status.rate_idx -= IWL_FIRST_OFDM_RATE; |
613 | 613 | ||
614 | rx_status.antenna = le16_to_cpu(rx_hdr->phy_flags & | 614 | rx_status.antenna = (le16_to_cpu(rx_hdr->phy_flags) & |
615 | RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4; | 615 | RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4; |
616 | 616 | ||
617 | /* set the preamble flag if appropriate */ | 617 | /* set the preamble flag if appropriate */ |
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c index d6bc0e051043..6e6f516ba404 100644 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c | |||
@@ -318,7 +318,7 @@ static void iwl5000_gain_computation(struct iwl_priv *priv, | |||
318 | (s32)average_noise[i])) / 1500; | 318 | (s32)average_noise[i])) / 1500; |
319 | /* bound gain by 2 bits value max, 3rd bit is sign */ | 319 | /* bound gain by 2 bits value max, 3rd bit is sign */ |
320 | data->delta_gain_code[i] = | 320 | data->delta_gain_code[i] = |
321 | min(abs(delta_g), CHAIN_NOISE_MAX_DELTA_GAIN_CODE); | 321 | min(abs(delta_g), (long) CHAIN_NOISE_MAX_DELTA_GAIN_CODE); |
322 | 322 | ||
323 | if (delta_g < 0) | 323 | if (delta_g < 0) |
324 | /* set negative sign */ | 324 | /* set negative sign */ |
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index 313d3e5ee84b..eaafae091f5b 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c | |||
@@ -3106,8 +3106,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3106 | out_pci_disable_device: | 3106 | out_pci_disable_device: |
3107 | pci_disable_device(pdev); | 3107 | pci_disable_device(pdev); |
3108 | out_ieee80211_free_hw: | 3108 | out_ieee80211_free_hw: |
3109 | ieee80211_free_hw(priv->hw); | ||
3110 | iwl_free_traffic_mem(priv); | 3109 | iwl_free_traffic_mem(priv); |
3110 | ieee80211_free_hw(priv->hw); | ||
3111 | out: | 3111 | out: |
3112 | return err; | 3112 | return err; |
3113 | } | 3113 | } |
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h index 2c5c88fc38f5..4afaf773aeac 100644 --- a/drivers/net/wireless/iwlwifi/iwl-commands.h +++ b/drivers/net/wireless/iwlwifi/iwl-commands.h | |||
@@ -1154,7 +1154,7 @@ struct iwl_wep_cmd { | |||
1154 | #define RX_RES_PHY_FLAGS_MOD_CCK_MSK cpu_to_le16(1 << 1) | 1154 | #define RX_RES_PHY_FLAGS_MOD_CCK_MSK cpu_to_le16(1 << 1) |
1155 | #define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK cpu_to_le16(1 << 2) | 1155 | #define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK cpu_to_le16(1 << 2) |
1156 | #define RX_RES_PHY_FLAGS_NARROW_BAND_MSK cpu_to_le16(1 << 3) | 1156 | #define RX_RES_PHY_FLAGS_NARROW_BAND_MSK cpu_to_le16(1 << 3) |
1157 | #define RX_RES_PHY_FLAGS_ANTENNA_MSK cpu_to_le16(0xf0) | 1157 | #define RX_RES_PHY_FLAGS_ANTENNA_MSK 0xf0 |
1158 | #define RX_RES_PHY_FLAGS_ANTENNA_POS 4 | 1158 | #define RX_RES_PHY_FLAGS_ANTENNA_POS 4 |
1159 | 1159 | ||
1160 | #define RX_RES_STATUS_SEC_TYPE_MSK (0x7 << 8) | 1160 | #define RX_RES_STATUS_SEC_TYPE_MSK (0x7 << 8) |
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c index 3d2b93a61e62..e14c9952a935 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c | |||
@@ -410,7 +410,6 @@ static int iwl_find_otp_image(struct iwl_priv *priv, | |||
410 | u16 *validblockaddr) | 410 | u16 *validblockaddr) |
411 | { | 411 | { |
412 | u16 next_link_addr = 0, link_value = 0, valid_addr; | 412 | u16 next_link_addr = 0, link_value = 0, valid_addr; |
413 | int ret = 0; | ||
414 | int usedblocks = 0; | 413 | int usedblocks = 0; |
415 | 414 | ||
416 | /* set addressing mode to absolute to traverse the link list */ | 415 | /* set addressing mode to absolute to traverse the link list */ |
@@ -430,29 +429,29 @@ static int iwl_find_otp_image(struct iwl_priv *priv, | |||
430 | * check for more block on the link list | 429 | * check for more block on the link list |
431 | */ | 430 | */ |
432 | valid_addr = next_link_addr; | 431 | valid_addr = next_link_addr; |
433 | next_link_addr = link_value; | 432 | next_link_addr = link_value * sizeof(u16); |
434 | IWL_DEBUG_INFO(priv, "OTP blocks %d addr 0x%x\n", | 433 | IWL_DEBUG_INFO(priv, "OTP blocks %d addr 0x%x\n", |
435 | usedblocks, next_link_addr); | 434 | usedblocks, next_link_addr); |
436 | if (iwl_read_otp_word(priv, next_link_addr, &link_value)) | 435 | if (iwl_read_otp_word(priv, next_link_addr, &link_value)) |
437 | return -EINVAL; | 436 | return -EINVAL; |
438 | if (!link_value) { | 437 | if (!link_value) { |
439 | /* | 438 | /* |
440 | * reach the end of link list, | 439 | * reach the end of link list, return success and |
441 | * set address point to the starting address | 440 | * set address point to the starting address |
442 | * of the image | 441 | * of the image |
443 | */ | 442 | */ |
444 | goto done; | 443 | *validblockaddr = valid_addr; |
444 | /* skip first 2 bytes (link list pointer) */ | ||
445 | *validblockaddr += 2; | ||
446 | return 0; | ||
445 | } | 447 | } |
446 | /* more in the link list, continue */ | 448 | /* more in the link list, continue */ |
447 | usedblocks++; | 449 | usedblocks++; |
448 | } while (usedblocks < priv->cfg->max_ll_items); | 450 | } while (usedblocks <= priv->cfg->max_ll_items); |
449 | /* OTP full, use last block */ | 451 | |
450 | IWL_DEBUG_INFO(priv, "OTP is full, use last block\n"); | 452 | /* OTP has no valid blocks */ |
451 | done: | 453 | IWL_DEBUG_INFO(priv, "OTP has no valid blocks\n"); |
452 | *validblockaddr = valid_addr; | 454 | return -EINVAL; |
453 | /* skip first 2 bytes (link list pointer) */ | ||
454 | *validblockaddr += 2; | ||
455 | return ret; | ||
456 | } | 455 | } |
457 | 456 | ||
458 | /** | 457 | /** |
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h index 6b68db7b1b81..80b9e45d9b9c 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h | |||
@@ -220,35 +220,35 @@ struct iwl_eeprom_enhanced_txpwr { | |||
220 | * Section 10: 2.4 GHz 40MHz channels: 132, 44 (_above_) | 220 | * Section 10: 2.4 GHz 40MHz channels: 132, 44 (_above_) |
221 | */ | 221 | */ |
222 | /* 2.4 GHz band: CCK */ | 222 | /* 2.4 GHz band: CCK */ |
223 | #define EEPROM_LB_CCK_20_COMMON ((0xAA)\ | 223 | #define EEPROM_LB_CCK_20_COMMON ((0xA8)\ |
224 | | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 8 bytes */ | 224 | | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 8 bytes */ |
225 | /* 2.4 GHz band: 20MHz-Legacy, 20MHz-HT, 40MHz-HT */ | 225 | /* 2.4 GHz band: 20MHz-Legacy, 20MHz-HT, 40MHz-HT */ |
226 | #define EEPROM_LB_OFDM_COMMON ((0xB2)\ | 226 | #define EEPROM_LB_OFDM_COMMON ((0xB0)\ |
227 | | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 24 bytes */ | 227 | | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 24 bytes */ |
228 | /* 5.2 GHz band: 20MHz-Legacy, 20MHz-HT, 40MHz-HT */ | 228 | /* 5.2 GHz band: 20MHz-Legacy, 20MHz-HT, 40MHz-HT */ |
229 | #define EEPROM_HB_OFDM_COMMON ((0xCA)\ | 229 | #define EEPROM_HB_OFDM_COMMON ((0xC8)\ |
230 | | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 24 bytes */ | 230 | | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 24 bytes */ |
231 | /* 2.4GHz band channels: | 231 | /* 2.4GHz band channels: |
232 | * 1Legacy, 1HT, 2Legacy, 2HT, 10Legacy, 10HT, 11Legacy, 11HT */ | 232 | * 1Legacy, 1HT, 2Legacy, 2HT, 10Legacy, 10HT, 11Legacy, 11HT */ |
233 | #define EEPROM_LB_OFDM_20_BAND ((0xE2)\ | 233 | #define EEPROM_LB_OFDM_20_BAND ((0xE0)\ |
234 | | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 64 bytes */ | 234 | | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 64 bytes */ |
235 | /* 2.4 GHz band HT40 channels: (1,+1) (2,+1) (6,+1) (7,+1) (9,+1) */ | 235 | /* 2.4 GHz band HT40 channels: (1,+1) (2,+1) (6,+1) (7,+1) (9,+1) */ |
236 | #define EEPROM_LB_OFDM_HT40_BAND ((0x122)\ | 236 | #define EEPROM_LB_OFDM_HT40_BAND ((0x120)\ |
237 | | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 40 bytes */ | 237 | | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 40 bytes */ |
238 | /* 5.2GHz band channels: 36Legacy, 36HT, 64Legacy, 64HT, 100Legacy, 100HT */ | 238 | /* 5.2GHz band channels: 36Legacy, 36HT, 64Legacy, 64HT, 100Legacy, 100HT */ |
239 | #define EEPROM_HB_OFDM_20_BAND ((0x14A)\ | 239 | #define EEPROM_HB_OFDM_20_BAND ((0x148)\ |
240 | | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 48 bytes */ | 240 | | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 48 bytes */ |
241 | /* 5.2 GHz band HT40 channels: (36,+1) (60,+1) (100,+1) */ | 241 | /* 5.2 GHz band HT40 channels: (36,+1) (60,+1) (100,+1) */ |
242 | #define EEPROM_HB_OFDM_HT40_BAND ((0x17A)\ | 242 | #define EEPROM_HB_OFDM_HT40_BAND ((0x178)\ |
243 | | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 24 bytes */ | 243 | | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 24 bytes */ |
244 | /* 2.4 GHz band, channnel 13: Legacy, HT */ | 244 | /* 2.4 GHz band, channnel 13: Legacy, HT */ |
245 | #define EEPROM_LB_OFDM_20_CHANNEL_13 ((0x192)\ | 245 | #define EEPROM_LB_OFDM_20_CHANNEL_13 ((0x190)\ |
246 | | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 16 bytes */ | 246 | | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 16 bytes */ |
247 | /* 5.2 GHz band, channnel 140: Legacy, HT */ | 247 | /* 5.2 GHz band, channnel 140: Legacy, HT */ |
248 | #define EEPROM_HB_OFDM_20_CHANNEL_140 ((0x1A2)\ | 248 | #define EEPROM_HB_OFDM_20_CHANNEL_140 ((0x1A0)\ |
249 | | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 16 bytes */ | 249 | | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 16 bytes */ |
250 | /* 5.2 GHz band, HT40 channnels (132,+1) (44,+1) */ | 250 | /* 5.2 GHz band, HT40 channnels (132,+1) (44,+1) */ |
251 | #define EEPROM_HB_OFDM_HT40_BAND_1 ((0x1B2)\ | 251 | #define EEPROM_HB_OFDM_HT40_BAND_1 ((0x1B0)\ |
252 | | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 16 bytes */ | 252 | | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 16 bytes */ |
253 | 253 | ||
254 | 254 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c index 8e1bb53c0aa3..493626bcd3ec 100644 --- a/drivers/net/wireless/iwlwifi/iwl-rx.c +++ b/drivers/net/wireless/iwlwifi/iwl-rx.c | |||
@@ -1044,7 +1044,7 @@ void iwl_rx_reply_rx(struct iwl_priv *priv, | |||
1044 | * as a bitmask. | 1044 | * as a bitmask. |
1045 | */ | 1045 | */ |
1046 | rx_status.antenna = | 1046 | rx_status.antenna = |
1047 | le16_to_cpu(phy_res->phy_flags & RX_RES_PHY_FLAGS_ANTENNA_MSK) | 1047 | (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) |
1048 | >> RX_RES_PHY_FLAGS_ANTENNA_POS; | 1048 | >> RX_RES_PHY_FLAGS_ANTENNA_POS; |
1049 | 1049 | ||
1050 | /* set the preamble flag if appropriate */ | 1050 | /* set the preamble flag if appropriate */ |
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c index aa49230422f3..d00a80334095 100644 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c | |||
@@ -4097,8 +4097,8 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e | |||
4097 | pci_set_drvdata(pdev, NULL); | 4097 | pci_set_drvdata(pdev, NULL); |
4098 | pci_disable_device(pdev); | 4098 | pci_disable_device(pdev); |
4099 | out_ieee80211_free_hw: | 4099 | out_ieee80211_free_hw: |
4100 | ieee80211_free_hw(priv->hw); | ||
4101 | iwl_free_traffic_mem(priv); | 4100 | iwl_free_traffic_mem(priv); |
4101 | ieee80211_free_hw(priv->hw); | ||
4102 | out: | 4102 | out: |
4103 | return err; | 4103 | return err; |
4104 | } | 4104 | } |
diff --git a/drivers/net/wireless/libertas/cmdresp.c b/drivers/net/wireless/libertas/cmdresp.c index c42d3faa2660..23f684337fdd 100644 --- a/drivers/net/wireless/libertas/cmdresp.c +++ b/drivers/net/wireless/libertas/cmdresp.c | |||
@@ -3,6 +3,7 @@ | |||
3 | * responses as well as events generated by firmware. | 3 | * responses as well as events generated by firmware. |
4 | */ | 4 | */ |
5 | #include <linux/delay.h> | 5 | #include <linux/delay.h> |
6 | #include <linux/sched.h> | ||
6 | #include <linux/if_arp.h> | 7 | #include <linux/if_arp.h> |
7 | #include <linux/netdevice.h> | 8 | #include <linux/netdevice.h> |
8 | #include <asm/unaligned.h> | 9 | #include <asm/unaligned.h> |
diff --git a/drivers/net/znet.c b/drivers/net/znet.c index a0384b6f09b6..b42347333750 100644 --- a/drivers/net/znet.c +++ b/drivers/net/znet.c | |||
@@ -169,7 +169,6 @@ static void znet_tx_timeout (struct net_device *dev); | |||
169 | static int znet_request_resources (struct net_device *dev) | 169 | static int znet_request_resources (struct net_device *dev) |
170 | { | 170 | { |
171 | struct znet_private *znet = netdev_priv(dev); | 171 | struct znet_private *znet = netdev_priv(dev); |
172 | unsigned long flags; | ||
173 | 172 | ||
174 | if (request_irq (dev->irq, &znet_interrupt, 0, "ZNet", dev)) | 173 | if (request_irq (dev->irq, &znet_interrupt, 0, "ZNet", dev)) |
175 | goto failed; | 174 | goto failed; |
@@ -187,13 +186,9 @@ static int znet_request_resources (struct net_device *dev) | |||
187 | free_sia: | 186 | free_sia: |
188 | release_region (znet->sia_base, znet->sia_size); | 187 | release_region (znet->sia_base, znet->sia_size); |
189 | free_tx_dma: | 188 | free_tx_dma: |
190 | flags = claim_dma_lock(); | ||
191 | free_dma (znet->tx_dma); | 189 | free_dma (znet->tx_dma); |
192 | release_dma_lock (flags); | ||
193 | free_rx_dma: | 190 | free_rx_dma: |
194 | flags = claim_dma_lock(); | ||
195 | free_dma (znet->rx_dma); | 191 | free_dma (znet->rx_dma); |
196 | release_dma_lock (flags); | ||
197 | free_irq: | 192 | free_irq: |
198 | free_irq (dev->irq, dev); | 193 | free_irq (dev->irq, dev); |
199 | failed: | 194 | failed: |
@@ -203,14 +198,11 @@ static int znet_request_resources (struct net_device *dev) | |||
203 | static void znet_release_resources (struct net_device *dev) | 198 | static void znet_release_resources (struct net_device *dev) |
204 | { | 199 | { |
205 | struct znet_private *znet = netdev_priv(dev); | 200 | struct znet_private *znet = netdev_priv(dev); |
206 | unsigned long flags; | ||
207 | 201 | ||
208 | release_region (znet->sia_base, znet->sia_size); | 202 | release_region (znet->sia_base, znet->sia_size); |
209 | release_region (dev->base_addr, znet->io_size); | 203 | release_region (dev->base_addr, znet->io_size); |
210 | flags = claim_dma_lock(); | ||
211 | free_dma (znet->tx_dma); | 204 | free_dma (znet->tx_dma); |
212 | free_dma (znet->rx_dma); | 205 | free_dma (znet->rx_dma); |
213 | release_dma_lock (flags); | ||
214 | free_irq (dev->irq, dev); | 206 | free_irq (dev->irq, dev); |
215 | } | 207 | } |
216 | 208 | ||
diff --git a/drivers/staging/b3dfg/b3dfg.c b/drivers/staging/b3dfg/b3dfg.c index 94c5d27d24d7..cda26bb493b3 100644 --- a/drivers/staging/b3dfg/b3dfg.c +++ b/drivers/staging/b3dfg/b3dfg.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/wait.h> | 36 | #include <linux/wait.h> |
37 | #include <linux/mm.h> | 37 | #include <linux/mm.h> |
38 | #include <linux/uaccess.h> | 38 | #include <linux/uaccess.h> |
39 | #include <linux/sched.h> | ||
39 | 40 | ||
40 | static unsigned int b3dfg_nbuf = 2; | 41 | static unsigned int b3dfg_nbuf = 2; |
41 | 42 | ||
diff --git a/drivers/staging/comedi/drivers/me_daq.c b/drivers/staging/comedi/drivers/me_daq.c index 2cda7ad1d32f..80e192d2e77e 100644 --- a/drivers/staging/comedi/drivers/me_daq.c +++ b/drivers/staging/comedi/drivers/me_daq.c | |||
@@ -51,6 +51,7 @@ from http://www.comedi.org | |||
51 | */ | 51 | */ |
52 | 52 | ||
53 | #include <linux/interrupt.h> | 53 | #include <linux/interrupt.h> |
54 | #include <linux/sched.h> | ||
54 | #include "../comedidev.h" | 55 | #include "../comedidev.h" |
55 | 56 | ||
56 | #include "comedi_pci.h" | 57 | #include "comedi_pci.h" |
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c index e3ffb067ead1..753ee0512342 100644 --- a/drivers/staging/comedi/drivers/ni_mio_common.c +++ b/drivers/staging/comedi/drivers/ni_mio_common.c | |||
@@ -62,6 +62,7 @@ | |||
62 | /* #define DEBUG_STATUS_B */ | 62 | /* #define DEBUG_STATUS_B */ |
63 | 63 | ||
64 | #include <linux/interrupt.h> | 64 | #include <linux/interrupt.h> |
65 | #include <linux/sched.h> | ||
65 | #include "8255.h" | 66 | #include "8255.h" |
66 | #include "mite.h" | 67 | #include "mite.h" |
67 | #include "comedi_fc.h" | 68 | #include "comedi_fc.h" |
diff --git a/drivers/staging/comedi/drivers/ni_pcidio.c b/drivers/staging/comedi/drivers/ni_pcidio.c index 52b2eca9e73d..d544698f2414 100644 --- a/drivers/staging/comedi/drivers/ni_pcidio.c +++ b/drivers/staging/comedi/drivers/ni_pcidio.c | |||
@@ -70,6 +70,7 @@ comedi_nonfree_firmware tarball available from http://www.comedi.org | |||
70 | /* #define DEBUG_FLAGS */ | 70 | /* #define DEBUG_FLAGS */ |
71 | 71 | ||
72 | #include <linux/interrupt.h> | 72 | #include <linux/interrupt.h> |
73 | #include <linux/sched.h> | ||
73 | #include "../comedidev.h" | 74 | #include "../comedidev.h" |
74 | 75 | ||
75 | #include "mite.h" | 76 | #include "mite.h" |
diff --git a/drivers/staging/et131x/et1310_address_map.h b/drivers/staging/et131x/et1310_address_map.h index 6294d3814e72..2c3d65a622a7 100644 --- a/drivers/staging/et131x/et1310_address_map.h +++ b/drivers/staging/et131x/et1310_address_map.h | |||
@@ -223,7 +223,7 @@ typedef union _TXDMA_PR_NUM_DES_t { | |||
223 | 223 | ||
224 | extern inline void add_10bit(u32 *v, int n) | 224 | extern inline void add_10bit(u32 *v, int n) |
225 | { | 225 | { |
226 | *v = INDEX10(*v + n); | 226 | *v = INDEX10(*v + n) | (*v & ET_DMA10_WRAP); |
227 | } | 227 | } |
228 | 228 | ||
229 | /* | 229 | /* |
diff --git a/drivers/staging/et131x/et1310_rx.c b/drivers/staging/et131x/et1310_rx.c index 8f2e91fa0a86..10e21db57ac3 100644 --- a/drivers/staging/et131x/et1310_rx.c +++ b/drivers/staging/et131x/et1310_rx.c | |||
@@ -1177,12 +1177,20 @@ void et131x_handle_recv_interrupt(struct et131x_adapter *etdev) | |||
1177 | 1177 | ||
1178 | static inline u32 bump_fbr(u32 *fbr, u32 limit) | 1178 | static inline u32 bump_fbr(u32 *fbr, u32 limit) |
1179 | { | 1179 | { |
1180 | u32 v = *fbr; | 1180 | u32 v = *fbr; |
1181 | add_10bit(&v, 1); | 1181 | v++; |
1182 | if (v > limit) | 1182 | /* This works for all cases where limit < 1024. The 1023 case |
1183 | v = (*fbr & ~ET_DMA10_MASK) ^ ET_DMA10_WRAP; | 1183 | works because 1023++ is 1024 which means the if condition is not |
1184 | *fbr = v; | 1184 | taken but the carry of the bit into the wrap bit toggles the wrap |
1185 | return v; | 1185 | value correctly */ |
1186 | if ((v & ET_DMA10_MASK) > limit) { | ||
1187 | v &= ~ET_DMA10_MASK; | ||
1188 | v ^= ET_DMA10_WRAP; | ||
1189 | } | ||
1190 | /* For the 1023 case */ | ||
1191 | v &= (ET_DMA10_MASK|ET_DMA10_WRAP); | ||
1192 | *fbr = v; | ||
1193 | return v; | ||
1186 | } | 1194 | } |
1187 | 1195 | ||
1188 | /** | 1196 | /** |
diff --git a/drivers/staging/hv/osd.c b/drivers/staging/hv/osd.c index 8fe543bd9910..3a4793a0fd05 100644 --- a/drivers/staging/hv/osd.c +++ b/drivers/staging/hv/osd.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/ioport.h> | 30 | #include <linux/ioport.h> |
31 | #include <linux/irq.h> | 31 | #include <linux/irq.h> |
32 | #include <linux/interrupt.h> | 32 | #include <linux/interrupt.h> |
33 | #include <linux/sched.h> | ||
33 | #include <linux/wait.h> | 34 | #include <linux/wait.h> |
34 | #include <linux/spinlock.h> | 35 | #include <linux/spinlock.h> |
35 | #include <linux/workqueue.h> | 36 | #include <linux/workqueue.h> |
diff --git a/drivers/staging/iio/industrialio-core.c b/drivers/staging/iio/industrialio-core.c index 1fa18f255814..768f44894d08 100644 --- a/drivers/staging/iio/industrialio-core.c +++ b/drivers/staging/iio/industrialio-core.c | |||
@@ -18,6 +18,8 @@ | |||
18 | #include <linux/fs.h> | 18 | #include <linux/fs.h> |
19 | #include <linux/interrupt.h> | 19 | #include <linux/interrupt.h> |
20 | #include <linux/poll.h> | 20 | #include <linux/poll.h> |
21 | #include <linux/sched.h> | ||
22 | #include <linux/wait.h> | ||
21 | #include <linux/cdev.h> | 23 | #include <linux/cdev.h> |
22 | #include "iio.h" | 24 | #include "iio.h" |
23 | #include "trigger_consumer.h" | 25 | #include "trigger_consumer.h" |
diff --git a/drivers/staging/poch/poch.c b/drivers/staging/poch/poch.c index 0d111ddfabb2..2eb8e3d43c4d 100644 --- a/drivers/staging/poch/poch.c +++ b/drivers/staging/poch/poch.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/init.h> | 20 | #include <linux/init.h> |
21 | #include <linux/ioctl.h> | 21 | #include <linux/ioctl.h> |
22 | #include <linux/io.h> | 22 | #include <linux/io.h> |
23 | #include <linux/sched.h> | ||
23 | 24 | ||
24 | #include "poch.h" | 25 | #include "poch.h" |
25 | 26 | ||
diff --git a/drivers/staging/rt2860/common/cmm_data_2860.c b/drivers/staging/rt2860/common/cmm_data_2860.c index fb1735533b74..857ff450b6c9 100644 --- a/drivers/staging/rt2860/common/cmm_data_2860.c +++ b/drivers/staging/rt2860/common/cmm_data_2860.c | |||
@@ -363,6 +363,8 @@ int RtmpPCIMgmtKickOut( | |||
363 | ULONG SwIdx = pAd->MgmtRing.TxCpuIdx; | 363 | ULONG SwIdx = pAd->MgmtRing.TxCpuIdx; |
364 | 364 | ||
365 | pTxD = (PTXD_STRUC) pAd->MgmtRing.Cell[SwIdx].AllocVa; | 365 | pTxD = (PTXD_STRUC) pAd->MgmtRing.Cell[SwIdx].AllocVa; |
366 | if (!pTxD) | ||
367 | return 0; | ||
366 | 368 | ||
367 | pAd->MgmtRing.Cell[SwIdx].pNdisPacket = pPacket; | 369 | pAd->MgmtRing.Cell[SwIdx].pNdisPacket = pPacket; |
368 | pAd->MgmtRing.Cell[SwIdx].pNextNdisPacket = NULL; | 370 | pAd->MgmtRing.Cell[SwIdx].pNextNdisPacket = NULL; |
diff --git a/drivers/staging/rt2860/common/cmm_info.c b/drivers/staging/rt2860/common/cmm_info.c index 9d589c240ed0..019cc4474ce8 100644 --- a/drivers/staging/rt2860/common/cmm_info.c +++ b/drivers/staging/rt2860/common/cmm_info.c | |||
@@ -25,6 +25,7 @@ | |||
25 | ************************************************************************* | 25 | ************************************************************************* |
26 | */ | 26 | */ |
27 | 27 | ||
28 | #include <linux/sched.h> | ||
28 | #include "../rt_config.h" | 29 | #include "../rt_config.h" |
29 | 30 | ||
30 | INT Show_SSID_Proc( | 31 | INT Show_SSID_Proc( |
diff --git a/drivers/staging/rt2860/rt_linux.c b/drivers/staging/rt2860/rt_linux.c index b396a9b570e2..ed27b8545a1b 100644 --- a/drivers/staging/rt2860/rt_linux.c +++ b/drivers/staging/rt2860/rt_linux.c | |||
@@ -25,6 +25,7 @@ | |||
25 | ************************************************************************* | 25 | ************************************************************************* |
26 | */ | 26 | */ |
27 | 27 | ||
28 | #include <linux/sched.h> | ||
28 | #include "rt_config.h" | 29 | #include "rt_config.h" |
29 | 30 | ||
30 | ULONG RTDebugLevel = RT_DEBUG_ERROR; | 31 | ULONG RTDebugLevel = RT_DEBUG_ERROR; |
diff --git a/drivers/staging/rt3090/common/cmm_info.c b/drivers/staging/rt3090/common/cmm_info.c index 5be0714666cb..3e51e98b474c 100644 --- a/drivers/staging/rt3090/common/cmm_info.c +++ b/drivers/staging/rt3090/common/cmm_info.c | |||
@@ -34,6 +34,7 @@ | |||
34 | --------- ---------- ---------------------------------------------- | 34 | --------- ---------- ---------------------------------------------- |
35 | */ | 35 | */ |
36 | 36 | ||
37 | #include <linux/sched.h> | ||
37 | #include "../rt_config.h" | 38 | #include "../rt_config.h" |
38 | 39 | ||
39 | 40 | ||
diff --git a/drivers/staging/rt3090/rt_linux.c b/drivers/staging/rt3090/rt_linux.c index d2241ecdf583..9b94aa6eb904 100644 --- a/drivers/staging/rt3090/rt_linux.c +++ b/drivers/staging/rt3090/rt_linux.c | |||
@@ -25,6 +25,7 @@ | |||
25 | ************************************************************************* | 25 | ************************************************************************* |
26 | */ | 26 | */ |
27 | 27 | ||
28 | #include <linux/sched.h> | ||
28 | #include "rt_config.h" | 29 | #include "rt_config.h" |
29 | 30 | ||
30 | ULONG RTDebugLevel = RT_DEBUG_ERROR; | 31 | ULONG RTDebugLevel = RT_DEBUG_ERROR; |
diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c index 87f8a1192762..f890a16096c0 100644 --- a/drivers/staging/sep/sep_driver.c +++ b/drivers/staging/sep/sep_driver.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/mm.h> | 38 | #include <linux/mm.h> |
39 | #include <linux/poll.h> | 39 | #include <linux/poll.h> |
40 | #include <linux/wait.h> | 40 | #include <linux/wait.h> |
41 | #include <linux/sched.h> | ||
41 | #include <linux/pci.h> | 42 | #include <linux/pci.h> |
42 | #include <linux/firmware.h> | 43 | #include <linux/firmware.h> |
43 | #include <asm/ioctl.h> | 44 | #include <asm/ioctl.h> |
diff --git a/drivers/staging/vme/bridges/vme_ca91cx42.c b/drivers/staging/vme/bridges/vme_ca91cx42.c index 3d2a84c45829..e139eaeaa174 100644 --- a/drivers/staging/vme/bridges/vme_ca91cx42.c +++ b/drivers/staging/vme/bridges/vme_ca91cx42.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/poll.h> | 25 | #include <linux/poll.h> |
26 | #include <linux/interrupt.h> | 26 | #include <linux/interrupt.h> |
27 | #include <linux/spinlock.h> | 27 | #include <linux/spinlock.h> |
28 | #include <linux/sched.h> | ||
28 | #include <asm/time.h> | 29 | #include <asm/time.h> |
29 | #include <asm/io.h> | 30 | #include <asm/io.h> |
30 | #include <asm/uaccess.h> | 31 | #include <asm/uaccess.h> |
diff --git a/drivers/staging/vme/bridges/vme_tsi148.c b/drivers/staging/vme/bridges/vme_tsi148.c index 8960fa9ee7aa..00fe0803c21c 100644 --- a/drivers/staging/vme/bridges/vme_tsi148.c +++ b/drivers/staging/vme/bridges/vme_tsi148.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/dma-mapping.h> | 25 | #include <linux/dma-mapping.h> |
26 | #include <linux/interrupt.h> | 26 | #include <linux/interrupt.h> |
27 | #include <linux/spinlock.h> | 27 | #include <linux/spinlock.h> |
28 | #include <linux/sched.h> | ||
28 | #include <asm/time.h> | 29 | #include <asm/time.h> |
29 | #include <asm/io.h> | 30 | #include <asm/io.h> |
30 | #include <asm/uaccess.h> | 31 | #include <asm/uaccess.h> |
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c index f37de283d0ab..167cb2a8ecef 100644 --- a/drivers/usb/gadget/ether.c +++ b/drivers/usb/gadget/ether.c | |||
@@ -61,11 +61,6 @@ | |||
61 | * simpler, Microsoft pushes their own approach: RNDIS. The published | 61 | * simpler, Microsoft pushes their own approach: RNDIS. The published |
62 | * RNDIS specs are ambiguous and appear to be incomplete, and are also | 62 | * RNDIS specs are ambiguous and appear to be incomplete, and are also |
63 | * needlessly complex. They borrow more from CDC ACM than CDC ECM. | 63 | * needlessly complex. They borrow more from CDC ACM than CDC ECM. |
64 | * | ||
65 | * While CDC ECM, CDC Subset, and RNDIS are designed to extend the ethernet | ||
66 | * interface to the target, CDC EEM was designed to use ethernet over the USB | ||
67 | * link between the host and target. CDC EEM is implemented as an alternative | ||
68 | * to those other protocols when that communication model is more appropriate | ||
69 | */ | 64 | */ |
70 | 65 | ||
71 | #define DRIVER_DESC "Ethernet Gadget" | 66 | #define DRIVER_DESC "Ethernet Gadget" |
@@ -157,8 +152,8 @@ static inline bool has_rndis(void) | |||
157 | #define RNDIS_PRODUCT_NUM 0xa4a2 /* Ethernet/RNDIS Gadget */ | 152 | #define RNDIS_PRODUCT_NUM 0xa4a2 /* Ethernet/RNDIS Gadget */ |
158 | 153 | ||
159 | /* For EEM gadgets */ | 154 | /* For EEM gadgets */ |
160 | #define EEM_VENDOR_NUM 0x0525 /* INVALID - NEEDS TO BE ALLOCATED */ | 155 | #define EEM_VENDOR_NUM 0x1d6b /* Linux Foundation */ |
161 | #define EEM_PRODUCT_NUM 0xa4a1 /* INVALID - NEEDS TO BE ALLOCATED */ | 156 | #define EEM_PRODUCT_NUM 0x0102 /* EEM Gadget */ |
162 | 157 | ||
163 | /*-------------------------------------------------------------------------*/ | 158 | /*-------------------------------------------------------------------------*/ |
164 | 159 | ||
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index 3efa59b18044..b25cdea93a1f 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c | |||
@@ -1400,6 +1400,10 @@ iso_stream_schedule ( | |||
1400 | goto fail; | 1400 | goto fail; |
1401 | } | 1401 | } |
1402 | 1402 | ||
1403 | period = urb->interval; | ||
1404 | if (!stream->highspeed) | ||
1405 | period <<= 3; | ||
1406 | |||
1403 | now = ehci_readl(ehci, &ehci->regs->frame_index) % mod; | 1407 | now = ehci_readl(ehci, &ehci->regs->frame_index) % mod; |
1404 | 1408 | ||
1405 | /* when's the last uframe this urb could start? */ | 1409 | /* when's the last uframe this urb could start? */ |
@@ -1417,8 +1421,8 @@ iso_stream_schedule ( | |||
1417 | 1421 | ||
1418 | /* Fell behind (by up to twice the slop amount)? */ | 1422 | /* Fell behind (by up to twice the slop amount)? */ |
1419 | if (start >= max - 2 * 8 * SCHEDULE_SLOP) | 1423 | if (start >= max - 2 * 8 * SCHEDULE_SLOP) |
1420 | start += stream->interval * DIV_ROUND_UP( | 1424 | start += period * DIV_ROUND_UP( |
1421 | max - start, stream->interval) - mod; | 1425 | max - start, period) - mod; |
1422 | 1426 | ||
1423 | /* Tried to schedule too far into the future? */ | 1427 | /* Tried to schedule too far into the future? */ |
1424 | if (unlikely((start + sched->span) >= max)) { | 1428 | if (unlikely((start + sched->span) >= max)) { |
@@ -1441,10 +1445,6 @@ iso_stream_schedule ( | |||
1441 | 1445 | ||
1442 | /* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */ | 1446 | /* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */ |
1443 | 1447 | ||
1444 | period = urb->interval; | ||
1445 | if (!stream->highspeed) | ||
1446 | period <<= 3; | ||
1447 | |||
1448 | /* find a uframe slot with enough bandwidth */ | 1448 | /* find a uframe slot with enough bandwidth */ |
1449 | for (; start < (stream->next_uframe + period); start++) { | 1449 | for (; start < (stream->next_uframe + period); start++) { |
1450 | int enough_space; | 1450 | int enough_space; |
diff --git a/drivers/usb/host/whci/asl.c b/drivers/usb/host/whci/asl.c index c632437c7649..562eba108816 100644 --- a/drivers/usb/host/whci/asl.c +++ b/drivers/usb/host/whci/asl.c | |||
@@ -115,6 +115,10 @@ static uint32_t process_qset(struct whc *whc, struct whc_qset *qset) | |||
115 | if (status & QTD_STS_HALTED) { | 115 | if (status & QTD_STS_HALTED) { |
116 | /* Ug, an error. */ | 116 | /* Ug, an error. */ |
117 | process_halted_qtd(whc, qset, td); | 117 | process_halted_qtd(whc, qset, td); |
118 | /* A halted qTD always triggers an update | ||
119 | because the qset was either removed or | ||
120 | reactivated. */ | ||
121 | update |= WHC_UPDATE_UPDATED; | ||
118 | goto done; | 122 | goto done; |
119 | } | 123 | } |
120 | 124 | ||
@@ -305,6 +309,7 @@ int asl_urb_dequeue(struct whc *whc, struct urb *urb, int status) | |||
305 | struct whc_urb *wurb = urb->hcpriv; | 309 | struct whc_urb *wurb = urb->hcpriv; |
306 | struct whc_qset *qset = wurb->qset; | 310 | struct whc_qset *qset = wurb->qset; |
307 | struct whc_std *std, *t; | 311 | struct whc_std *std, *t; |
312 | bool has_qtd = false; | ||
308 | int ret; | 313 | int ret; |
309 | unsigned long flags; | 314 | unsigned long flags; |
310 | 315 | ||
@@ -315,17 +320,21 @@ int asl_urb_dequeue(struct whc *whc, struct urb *urb, int status) | |||
315 | goto out; | 320 | goto out; |
316 | 321 | ||
317 | list_for_each_entry_safe(std, t, &qset->stds, list_node) { | 322 | list_for_each_entry_safe(std, t, &qset->stds, list_node) { |
318 | if (std->urb == urb) | 323 | if (std->urb == urb) { |
324 | if (std->qtd) | ||
325 | has_qtd = true; | ||
319 | qset_free_std(whc, std); | 326 | qset_free_std(whc, std); |
320 | else | 327 | } else |
321 | std->qtd = NULL; /* so this std is re-added when the qset is */ | 328 | std->qtd = NULL; /* so this std is re-added when the qset is */ |
322 | } | 329 | } |
323 | 330 | ||
324 | asl_qset_remove(whc, qset); | 331 | if (has_qtd) { |
325 | wurb->status = status; | 332 | asl_qset_remove(whc, qset); |
326 | wurb->is_async = true; | 333 | wurb->status = status; |
327 | queue_work(whc->workqueue, &wurb->dequeue_work); | 334 | wurb->is_async = true; |
328 | 335 | queue_work(whc->workqueue, &wurb->dequeue_work); | |
336 | } else | ||
337 | qset_remove_urb(whc, qset, urb, status); | ||
329 | out: | 338 | out: |
330 | spin_unlock_irqrestore(&whc->lock, flags); | 339 | spin_unlock_irqrestore(&whc->lock, flags); |
331 | 340 | ||
diff --git a/drivers/usb/host/whci/pzl.c b/drivers/usb/host/whci/pzl.c index a9e05bac6646..0db3fb2dc03a 100644 --- a/drivers/usb/host/whci/pzl.c +++ b/drivers/usb/host/whci/pzl.c | |||
@@ -121,6 +121,10 @@ static enum whc_update pzl_process_qset(struct whc *whc, struct whc_qset *qset) | |||
121 | if (status & QTD_STS_HALTED) { | 121 | if (status & QTD_STS_HALTED) { |
122 | /* Ug, an error. */ | 122 | /* Ug, an error. */ |
123 | process_halted_qtd(whc, qset, td); | 123 | process_halted_qtd(whc, qset, td); |
124 | /* A halted qTD always triggers an update | ||
125 | because the qset was either removed or | ||
126 | reactivated. */ | ||
127 | update |= WHC_UPDATE_UPDATED; | ||
124 | goto done; | 128 | goto done; |
125 | } | 129 | } |
126 | 130 | ||
@@ -333,6 +337,7 @@ int pzl_urb_dequeue(struct whc *whc, struct urb *urb, int status) | |||
333 | struct whc_urb *wurb = urb->hcpriv; | 337 | struct whc_urb *wurb = urb->hcpriv; |
334 | struct whc_qset *qset = wurb->qset; | 338 | struct whc_qset *qset = wurb->qset; |
335 | struct whc_std *std, *t; | 339 | struct whc_std *std, *t; |
340 | bool has_qtd = false; | ||
336 | int ret; | 341 | int ret; |
337 | unsigned long flags; | 342 | unsigned long flags; |
338 | 343 | ||
@@ -343,17 +348,22 @@ int pzl_urb_dequeue(struct whc *whc, struct urb *urb, int status) | |||
343 | goto out; | 348 | goto out; |
344 | 349 | ||
345 | list_for_each_entry_safe(std, t, &qset->stds, list_node) { | 350 | list_for_each_entry_safe(std, t, &qset->stds, list_node) { |
346 | if (std->urb == urb) | 351 | if (std->urb == urb) { |
352 | if (std->qtd) | ||
353 | has_qtd = true; | ||
347 | qset_free_std(whc, std); | 354 | qset_free_std(whc, std); |
348 | else | 355 | } else |
349 | std->qtd = NULL; /* so this std is re-added when the qset is */ | 356 | std->qtd = NULL; /* so this std is re-added when the qset is */ |
350 | } | 357 | } |
351 | 358 | ||
352 | pzl_qset_remove(whc, qset); | 359 | if (has_qtd) { |
353 | wurb->status = status; | 360 | pzl_qset_remove(whc, qset); |
354 | wurb->is_async = false; | 361 | update_pzl_hw_view(whc); |
355 | queue_work(whc->workqueue, &wurb->dequeue_work); | 362 | wurb->status = status; |
356 | 363 | wurb->is_async = false; | |
364 | queue_work(whc->workqueue, &wurb->dequeue_work); | ||
365 | } else | ||
366 | qset_remove_urb(whc, qset, urb, status); | ||
357 | out: | 367 | out: |
358 | spin_unlock_irqrestore(&whc->lock, flags); | 368 | spin_unlock_irqrestore(&whc->lock, flags); |
359 | 369 | ||
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 43c227027560..65d96b214f95 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
@@ -328,6 +328,9 @@ static int option_resume(struct usb_serial *serial); | |||
328 | #define ALCATEL_VENDOR_ID 0x1bbb | 328 | #define ALCATEL_VENDOR_ID 0x1bbb |
329 | #define ALCATEL_PRODUCT_X060S 0x0000 | 329 | #define ALCATEL_PRODUCT_X060S 0x0000 |
330 | 330 | ||
331 | /* Airplus products */ | ||
332 | #define AIRPLUS_VENDOR_ID 0x1011 | ||
333 | #define AIRPLUS_PRODUCT_MCD650 0x3198 | ||
331 | 334 | ||
332 | static struct usb_device_id option_ids[] = { | 335 | static struct usb_device_id option_ids[] = { |
333 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, | 336 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, |
@@ -589,6 +592,7 @@ static struct usb_device_id option_ids[] = { | |||
589 | { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) }, | 592 | { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) }, |
590 | { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) }, | 593 | { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) }, |
591 | { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S) }, | 594 | { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S) }, |
595 | { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) }, | ||
592 | { } /* Terminating entry */ | 596 | { } /* Terminating entry */ |
593 | }; | 597 | }; |
594 | MODULE_DEVICE_TABLE(usb, option_ids); | 598 | MODULE_DEVICE_TABLE(usb, option_ids); |
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c index 3a4fb023af72..589f6b4404f0 100644 --- a/drivers/usb/storage/transport.c +++ b/drivers/usb/storage/transport.c | |||
@@ -696,7 +696,7 @@ void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us) | |||
696 | /* device supports and needs bigger sense buffer */ | 696 | /* device supports and needs bigger sense buffer */ |
697 | if (us->fflags & US_FL_SANE_SENSE) | 697 | if (us->fflags & US_FL_SANE_SENSE) |
698 | sense_size = ~0; | 698 | sense_size = ~0; |
699 | 699 | Retry_Sense: | |
700 | US_DEBUGP("Issuing auto-REQUEST_SENSE\n"); | 700 | US_DEBUGP("Issuing auto-REQUEST_SENSE\n"); |
701 | 701 | ||
702 | scsi_eh_prep_cmnd(srb, &ses, NULL, 0, sense_size); | 702 | scsi_eh_prep_cmnd(srb, &ses, NULL, 0, sense_size); |
@@ -720,6 +720,21 @@ void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us) | |||
720 | srb->result = DID_ABORT << 16; | 720 | srb->result = DID_ABORT << 16; |
721 | goto Handle_Errors; | 721 | goto Handle_Errors; |
722 | } | 722 | } |
723 | |||
724 | /* Some devices claim to support larger sense but fail when | ||
725 | * trying to request it. When a transport failure happens | ||
726 | * using US_FS_SANE_SENSE, we always retry with a standard | ||
727 | * (small) sense request. This fixes some USB GSM modems | ||
728 | */ | ||
729 | if (temp_result == USB_STOR_TRANSPORT_FAILED && | ||
730 | (us->fflags & US_FL_SANE_SENSE) && | ||
731 | sense_size != US_SENSE_SIZE) { | ||
732 | US_DEBUGP("-- auto-sense failure, retry small sense\n"); | ||
733 | sense_size = US_SENSE_SIZE; | ||
734 | goto Retry_Sense; | ||
735 | } | ||
736 | |||
737 | /* Other failures */ | ||
723 | if (temp_result != USB_STOR_TRANSPORT_GOOD) { | 738 | if (temp_result != USB_STOR_TRANSPORT_GOOD) { |
724 | US_DEBUGP("-- auto-sense failure\n"); | 739 | US_DEBUGP("-- auto-sense failure\n"); |
725 | 740 | ||
diff --git a/drivers/usb/wusbcore/security.c b/drivers/usb/wusbcore/security.c index b2f149fedcc5..4516c36436e6 100644 --- a/drivers/usb/wusbcore/security.c +++ b/drivers/usb/wusbcore/security.c | |||
@@ -200,35 +200,40 @@ int wusb_dev_sec_add(struct wusbhc *wusbhc, | |||
200 | { | 200 | { |
201 | int result, bytes, secd_size; | 201 | int result, bytes, secd_size; |
202 | struct device *dev = &usb_dev->dev; | 202 | struct device *dev = &usb_dev->dev; |
203 | struct usb_security_descriptor secd; | 203 | struct usb_security_descriptor *secd; |
204 | const struct usb_encryption_descriptor *etd, *ccm1_etd = NULL; | 204 | const struct usb_encryption_descriptor *etd, *ccm1_etd = NULL; |
205 | void *secd_buf; | ||
206 | const void *itr, *top; | 205 | const void *itr, *top; |
207 | char buf[64]; | 206 | char buf[64]; |
208 | 207 | ||
208 | secd = kmalloc(sizeof(struct usb_security_descriptor), GFP_KERNEL); | ||
209 | if (secd == NULL) { | ||
210 | result = -ENOMEM; | ||
211 | goto out; | ||
212 | } | ||
213 | |||
209 | result = usb_get_descriptor(usb_dev, USB_DT_SECURITY, | 214 | result = usb_get_descriptor(usb_dev, USB_DT_SECURITY, |
210 | 0, &secd, sizeof(secd)); | 215 | 0, secd, sizeof(struct usb_security_descriptor)); |
211 | if (result < sizeof(secd)) { | 216 | if (result < sizeof(secd)) { |
212 | dev_err(dev, "Can't read security descriptor or " | 217 | dev_err(dev, "Can't read security descriptor or " |
213 | "not enough data: %d\n", result); | 218 | "not enough data: %d\n", result); |
214 | goto error_secd; | 219 | goto out; |
215 | } | 220 | } |
216 | secd_size = le16_to_cpu(secd.wTotalLength); | 221 | secd_size = le16_to_cpu(secd->wTotalLength); |
217 | secd_buf = kmalloc(secd_size, GFP_KERNEL); | 222 | secd = krealloc(secd, secd_size, GFP_KERNEL); |
218 | if (secd_buf == NULL) { | 223 | if (secd == NULL) { |
219 | dev_err(dev, "Can't allocate space for security descriptors\n"); | 224 | dev_err(dev, "Can't allocate space for security descriptors\n"); |
220 | goto error_secd_alloc; | 225 | goto out; |
221 | } | 226 | } |
222 | result = usb_get_descriptor(usb_dev, USB_DT_SECURITY, | 227 | result = usb_get_descriptor(usb_dev, USB_DT_SECURITY, |
223 | 0, secd_buf, secd_size); | 228 | 0, secd, secd_size); |
224 | if (result < secd_size) { | 229 | if (result < secd_size) { |
225 | dev_err(dev, "Can't read security descriptor or " | 230 | dev_err(dev, "Can't read security descriptor or " |
226 | "not enough data: %d\n", result); | 231 | "not enough data: %d\n", result); |
227 | goto error_secd_all; | 232 | goto out; |
228 | } | 233 | } |
229 | bytes = 0; | 234 | bytes = 0; |
230 | itr = secd_buf + sizeof(secd); | 235 | itr = &secd[1]; |
231 | top = secd_buf + result; | 236 | top = (void *)secd + result; |
232 | while (itr < top) { | 237 | while (itr < top) { |
233 | etd = itr; | 238 | etd = itr; |
234 | if (top - itr < sizeof(*etd)) { | 239 | if (top - itr < sizeof(*etd)) { |
@@ -259,24 +264,16 @@ int wusb_dev_sec_add(struct wusbhc *wusbhc, | |||
259 | dev_err(dev, "WUSB device doesn't support CCM1 encryption, " | 264 | dev_err(dev, "WUSB device doesn't support CCM1 encryption, " |
260 | "can't use!\n"); | 265 | "can't use!\n"); |
261 | result = -EINVAL; | 266 | result = -EINVAL; |
262 | goto error_no_ccm1; | 267 | goto out; |
263 | } | 268 | } |
264 | wusb_dev->ccm1_etd = *ccm1_etd; | 269 | wusb_dev->ccm1_etd = *ccm1_etd; |
265 | dev_dbg(dev, "supported encryption: %s; using %s (0x%02x/%02x)\n", | 270 | dev_dbg(dev, "supported encryption: %s; using %s (0x%02x/%02x)\n", |
266 | buf, wusb_et_name(ccm1_etd->bEncryptionType), | 271 | buf, wusb_et_name(ccm1_etd->bEncryptionType), |
267 | ccm1_etd->bEncryptionValue, ccm1_etd->bAuthKeyIndex); | 272 | ccm1_etd->bEncryptionValue, ccm1_etd->bAuthKeyIndex); |
268 | result = 0; | 273 | result = 0; |
269 | kfree(secd_buf); | ||
270 | out: | 274 | out: |
275 | kfree(secd); | ||
271 | return result; | 276 | return result; |
272 | |||
273 | |||
274 | error_no_ccm1: | ||
275 | error_secd_all: | ||
276 | kfree(secd_buf); | ||
277 | error_secd_alloc: | ||
278 | error_secd: | ||
279 | goto out; | ||
280 | } | 277 | } |
281 | 278 | ||
282 | void wusb_dev_sec_rm(struct wusb_dev *wusb_dev) | 279 | void wusb_dev_sec_rm(struct wusb_dev *wusb_dev) |
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c index 69b355ae7f49..361604244271 100644 --- a/fs/btrfs/acl.c +++ b/fs/btrfs/acl.c | |||
@@ -27,7 +27,7 @@ | |||
27 | #include "btrfs_inode.h" | 27 | #include "btrfs_inode.h" |
28 | #include "xattr.h" | 28 | #include "xattr.h" |
29 | 29 | ||
30 | #ifdef CONFIG_BTRFS_POSIX_ACL | 30 | #ifdef CONFIG_BTRFS_FS_POSIX_ACL |
31 | 31 | ||
32 | static struct posix_acl *btrfs_get_acl(struct inode *inode, int type) | 32 | static struct posix_acl *btrfs_get_acl(struct inode *inode, int type) |
33 | { | 33 | { |
@@ -313,7 +313,7 @@ struct xattr_handler btrfs_xattr_acl_access_handler = { | |||
313 | .set = btrfs_xattr_acl_access_set, | 313 | .set = btrfs_xattr_acl_access_set, |
314 | }; | 314 | }; |
315 | 315 | ||
316 | #else /* CONFIG_BTRFS_POSIX_ACL */ | 316 | #else /* CONFIG_BTRFS_FS_POSIX_ACL */ |
317 | 317 | ||
318 | int btrfs_acl_chmod(struct inode *inode) | 318 | int btrfs_acl_chmod(struct inode *inode) |
319 | { | 319 | { |
@@ -325,4 +325,4 @@ int btrfs_init_acl(struct inode *inode, struct inode *dir) | |||
325 | return 0; | 325 | return 0; |
326 | } | 326 | } |
327 | 327 | ||
328 | #endif /* CONFIG_BTRFS_POSIX_ACL */ | 328 | #endif /* CONFIG_BTRFS_FS_POSIX_ACL */ |
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index c71abec0ab90..f6783a42f010 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h | |||
@@ -86,6 +86,12 @@ struct btrfs_inode { | |||
86 | * transid of the trans_handle that last modified this inode | 86 | * transid of the trans_handle that last modified this inode |
87 | */ | 87 | */ |
88 | u64 last_trans; | 88 | u64 last_trans; |
89 | |||
90 | /* | ||
91 | * log transid when this inode was last modified | ||
92 | */ | ||
93 | u64 last_sub_trans; | ||
94 | |||
89 | /* | 95 | /* |
90 | * transid that last logged this inode | 96 | * transid that last logged this inode |
91 | */ | 97 | */ |
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 1bb897ecdeeb..444b3e9b92a4 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
@@ -1009,6 +1009,7 @@ struct btrfs_root { | |||
1009 | atomic_t log_writers; | 1009 | atomic_t log_writers; |
1010 | atomic_t log_commit[2]; | 1010 | atomic_t log_commit[2]; |
1011 | unsigned long log_transid; | 1011 | unsigned long log_transid; |
1012 | unsigned long last_log_commit; | ||
1012 | unsigned long log_batch; | 1013 | unsigned long log_batch; |
1013 | pid_t log_start_pid; | 1014 | pid_t log_start_pid; |
1014 | bool log_multiple_pids; | 1015 | bool log_multiple_pids; |
@@ -1152,6 +1153,7 @@ struct btrfs_root { | |||
1152 | #define BTRFS_MOUNT_FLUSHONCOMMIT (1 << 7) | 1153 | #define BTRFS_MOUNT_FLUSHONCOMMIT (1 << 7) |
1153 | #define BTRFS_MOUNT_SSD_SPREAD (1 << 8) | 1154 | #define BTRFS_MOUNT_SSD_SPREAD (1 << 8) |
1154 | #define BTRFS_MOUNT_NOSSD (1 << 9) | 1155 | #define BTRFS_MOUNT_NOSSD (1 << 9) |
1156 | #define BTRFS_MOUNT_DISCARD (1 << 10) | ||
1155 | 1157 | ||
1156 | #define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) | 1158 | #define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) |
1157 | #define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) | 1159 | #define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) |
@@ -2373,7 +2375,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options); | |||
2373 | int btrfs_sync_fs(struct super_block *sb, int wait); | 2375 | int btrfs_sync_fs(struct super_block *sb, int wait); |
2374 | 2376 | ||
2375 | /* acl.c */ | 2377 | /* acl.c */ |
2376 | #ifdef CONFIG_BTRFS_POSIX_ACL | 2378 | #ifdef CONFIG_BTRFS_FS_POSIX_ACL |
2377 | int btrfs_check_acl(struct inode *inode, int mask); | 2379 | int btrfs_check_acl(struct inode *inode, int mask); |
2378 | #else | 2380 | #else |
2379 | #define btrfs_check_acl NULL | 2381 | #define btrfs_check_acl NULL |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 100551a66c46..02b6afbd7450 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -917,6 +917,7 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize, | |||
917 | atomic_set(&root->log_writers, 0); | 917 | atomic_set(&root->log_writers, 0); |
918 | root->log_batch = 0; | 918 | root->log_batch = 0; |
919 | root->log_transid = 0; | 919 | root->log_transid = 0; |
920 | root->last_log_commit = 0; | ||
920 | extent_io_tree_init(&root->dirty_log_pages, | 921 | extent_io_tree_init(&root->dirty_log_pages, |
921 | fs_info->btree_inode->i_mapping, GFP_NOFS); | 922 | fs_info->btree_inode->i_mapping, GFP_NOFS); |
922 | 923 | ||
@@ -1087,6 +1088,7 @@ int btrfs_add_log_tree(struct btrfs_trans_handle *trans, | |||
1087 | WARN_ON(root->log_root); | 1088 | WARN_ON(root->log_root); |
1088 | root->log_root = log_root; | 1089 | root->log_root = log_root; |
1089 | root->log_transid = 0; | 1090 | root->log_transid = 0; |
1091 | root->last_log_commit = 0; | ||
1090 | return 0; | 1092 | return 0; |
1091 | } | 1093 | } |
1092 | 1094 | ||
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index d0c4d584efad..e238a0cdac67 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -1568,23 +1568,23 @@ static int remove_extent_backref(struct btrfs_trans_handle *trans, | |||
1568 | return ret; | 1568 | return ret; |
1569 | } | 1569 | } |
1570 | 1570 | ||
1571 | #ifdef BIO_RW_DISCARD | ||
1572 | static void btrfs_issue_discard(struct block_device *bdev, | 1571 | static void btrfs_issue_discard(struct block_device *bdev, |
1573 | u64 start, u64 len) | 1572 | u64 start, u64 len) |
1574 | { | 1573 | { |
1575 | blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL, | 1574 | blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL, |
1576 | DISCARD_FL_BARRIER); | 1575 | DISCARD_FL_BARRIER); |
1577 | } | 1576 | } |
1578 | #endif | ||
1579 | 1577 | ||
1580 | static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, | 1578 | static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, |
1581 | u64 num_bytes) | 1579 | u64 num_bytes) |
1582 | { | 1580 | { |
1583 | #ifdef BIO_RW_DISCARD | ||
1584 | int ret; | 1581 | int ret; |
1585 | u64 map_length = num_bytes; | 1582 | u64 map_length = num_bytes; |
1586 | struct btrfs_multi_bio *multi = NULL; | 1583 | struct btrfs_multi_bio *multi = NULL; |
1587 | 1584 | ||
1585 | if (!btrfs_test_opt(root, DISCARD)) | ||
1586 | return 0; | ||
1587 | |||
1588 | /* Tell the block device(s) that the sectors can be discarded */ | 1588 | /* Tell the block device(s) that the sectors can be discarded */ |
1589 | ret = btrfs_map_block(&root->fs_info->mapping_tree, READ, | 1589 | ret = btrfs_map_block(&root->fs_info->mapping_tree, READ, |
1590 | bytenr, &map_length, &multi, 0); | 1590 | bytenr, &map_length, &multi, 0); |
@@ -1604,9 +1604,6 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, | |||
1604 | } | 1604 | } |
1605 | 1605 | ||
1606 | return ret; | 1606 | return ret; |
1607 | #else | ||
1608 | return 0; | ||
1609 | #endif | ||
1610 | } | 1607 | } |
1611 | 1608 | ||
1612 | int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, | 1609 | int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, |
@@ -3690,6 +3687,14 @@ static int pin_down_bytes(struct btrfs_trans_handle *trans, | |||
3690 | if (is_data) | 3687 | if (is_data) |
3691 | goto pinit; | 3688 | goto pinit; |
3692 | 3689 | ||
3690 | /* | ||
3691 | * discard is sloooow, and so triggering discards on | ||
3692 | * individual btree blocks isn't a good plan. Just | ||
3693 | * pin everything in discard mode. | ||
3694 | */ | ||
3695 | if (btrfs_test_opt(root, DISCARD)) | ||
3696 | goto pinit; | ||
3697 | |||
3693 | buf = btrfs_find_tree_block(root, bytenr, num_bytes); | 3698 | buf = btrfs_find_tree_block(root, bytenr, num_bytes); |
3694 | if (!buf) | 3699 | if (!buf) |
3695 | goto pinit; | 3700 | goto pinit; |
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 2d623aa0625f..06550affbd27 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c | |||
@@ -1086,8 +1086,10 @@ out_nolock: | |||
1086 | btrfs_end_transaction(trans, root); | 1086 | btrfs_end_transaction(trans, root); |
1087 | else | 1087 | else |
1088 | btrfs_commit_transaction(trans, root); | 1088 | btrfs_commit_transaction(trans, root); |
1089 | } else { | 1089 | } else if (ret != BTRFS_NO_LOG_SYNC) { |
1090 | btrfs_commit_transaction(trans, root); | 1090 | btrfs_commit_transaction(trans, root); |
1091 | } else { | ||
1092 | btrfs_end_transaction(trans, root); | ||
1091 | } | 1093 | } |
1092 | } | 1094 | } |
1093 | if (file->f_flags & O_DIRECT) { | 1095 | if (file->f_flags & O_DIRECT) { |
@@ -1137,6 +1139,13 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync) | |||
1137 | int ret = 0; | 1139 | int ret = 0; |
1138 | struct btrfs_trans_handle *trans; | 1140 | struct btrfs_trans_handle *trans; |
1139 | 1141 | ||
1142 | |||
1143 | /* we wait first, since the writeback may change the inode */ | ||
1144 | root->log_batch++; | ||
1145 | /* the VFS called filemap_fdatawrite for us */ | ||
1146 | btrfs_wait_ordered_range(inode, 0, (u64)-1); | ||
1147 | root->log_batch++; | ||
1148 | |||
1140 | /* | 1149 | /* |
1141 | * check the transaction that last modified this inode | 1150 | * check the transaction that last modified this inode |
1142 | * and see if its already been committed | 1151 | * and see if its already been committed |
@@ -1144,6 +1153,11 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync) | |||
1144 | if (!BTRFS_I(inode)->last_trans) | 1153 | if (!BTRFS_I(inode)->last_trans) |
1145 | goto out; | 1154 | goto out; |
1146 | 1155 | ||
1156 | /* | ||
1157 | * if the last transaction that changed this file was before | ||
1158 | * the current transaction, we can bail out now without any | ||
1159 | * syncing | ||
1160 | */ | ||
1147 | mutex_lock(&root->fs_info->trans_mutex); | 1161 | mutex_lock(&root->fs_info->trans_mutex); |
1148 | if (BTRFS_I(inode)->last_trans <= | 1162 | if (BTRFS_I(inode)->last_trans <= |
1149 | root->fs_info->last_trans_committed) { | 1163 | root->fs_info->last_trans_committed) { |
@@ -1153,13 +1167,6 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync) | |||
1153 | } | 1167 | } |
1154 | mutex_unlock(&root->fs_info->trans_mutex); | 1168 | mutex_unlock(&root->fs_info->trans_mutex); |
1155 | 1169 | ||
1156 | root->log_batch++; | ||
1157 | filemap_fdatawrite(inode->i_mapping); | ||
1158 | btrfs_wait_ordered_range(inode, 0, (u64)-1); | ||
1159 | root->log_batch++; | ||
1160 | |||
1161 | if (datasync && !(inode->i_state & I_DIRTY_PAGES)) | ||
1162 | goto out; | ||
1163 | /* | 1170 | /* |
1164 | * ok we haven't committed the transaction yet, lets do a commit | 1171 | * ok we haven't committed the transaction yet, lets do a commit |
1165 | */ | 1172 | */ |
@@ -1188,14 +1195,18 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync) | |||
1188 | */ | 1195 | */ |
1189 | mutex_unlock(&dentry->d_inode->i_mutex); | 1196 | mutex_unlock(&dentry->d_inode->i_mutex); |
1190 | 1197 | ||
1191 | if (ret > 0) { | 1198 | if (ret != BTRFS_NO_LOG_SYNC) { |
1192 | ret = btrfs_commit_transaction(trans, root); | 1199 | if (ret > 0) { |
1193 | } else { | ||
1194 | ret = btrfs_sync_log(trans, root); | ||
1195 | if (ret == 0) | ||
1196 | ret = btrfs_end_transaction(trans, root); | ||
1197 | else | ||
1198 | ret = btrfs_commit_transaction(trans, root); | 1200 | ret = btrfs_commit_transaction(trans, root); |
1201 | } else { | ||
1202 | ret = btrfs_sync_log(trans, root); | ||
1203 | if (ret == 0) | ||
1204 | ret = btrfs_end_transaction(trans, root); | ||
1205 | else | ||
1206 | ret = btrfs_commit_transaction(trans, root); | ||
1207 | } | ||
1208 | } else { | ||
1209 | ret = btrfs_end_transaction(trans, root); | ||
1199 | } | 1210 | } |
1200 | mutex_lock(&dentry->d_inode->i_mutex); | 1211 | mutex_lock(&dentry->d_inode->i_mutex); |
1201 | out: | 1212 | out: |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 9e138b793dc7..dae12dc7e159 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -3032,12 +3032,22 @@ static int btrfs_truncate_page(struct address_space *mapping, loff_t from) | |||
3032 | 3032 | ||
3033 | if ((offset & (blocksize - 1)) == 0) | 3033 | if ((offset & (blocksize - 1)) == 0) |
3034 | goto out; | 3034 | goto out; |
3035 | ret = btrfs_check_data_free_space(root, inode, PAGE_CACHE_SIZE); | ||
3036 | if (ret) | ||
3037 | goto out; | ||
3038 | |||
3039 | ret = btrfs_reserve_metadata_for_delalloc(root, inode, 1); | ||
3040 | if (ret) | ||
3041 | goto out; | ||
3035 | 3042 | ||
3036 | ret = -ENOMEM; | 3043 | ret = -ENOMEM; |
3037 | again: | 3044 | again: |
3038 | page = grab_cache_page(mapping, index); | 3045 | page = grab_cache_page(mapping, index); |
3039 | if (!page) | 3046 | if (!page) { |
3047 | btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE); | ||
3048 | btrfs_unreserve_metadata_for_delalloc(root, inode, 1); | ||
3040 | goto out; | 3049 | goto out; |
3050 | } | ||
3041 | 3051 | ||
3042 | page_start = page_offset(page); | 3052 | page_start = page_offset(page); |
3043 | page_end = page_start + PAGE_CACHE_SIZE - 1; | 3053 | page_end = page_start + PAGE_CACHE_SIZE - 1; |
@@ -3070,6 +3080,10 @@ again: | |||
3070 | goto again; | 3080 | goto again; |
3071 | } | 3081 | } |
3072 | 3082 | ||
3083 | clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, | ||
3084 | EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING, | ||
3085 | GFP_NOFS); | ||
3086 | |||
3073 | ret = btrfs_set_extent_delalloc(inode, page_start, page_end); | 3087 | ret = btrfs_set_extent_delalloc(inode, page_start, page_end); |
3074 | if (ret) { | 3088 | if (ret) { |
3075 | unlock_extent(io_tree, page_start, page_end, GFP_NOFS); | 3089 | unlock_extent(io_tree, page_start, page_end, GFP_NOFS); |
@@ -3088,6 +3102,9 @@ again: | |||
3088 | unlock_extent(io_tree, page_start, page_end, GFP_NOFS); | 3102 | unlock_extent(io_tree, page_start, page_end, GFP_NOFS); |
3089 | 3103 | ||
3090 | out_unlock: | 3104 | out_unlock: |
3105 | if (ret) | ||
3106 | btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE); | ||
3107 | btrfs_unreserve_metadata_for_delalloc(root, inode, 1); | ||
3091 | unlock_page(page); | 3108 | unlock_page(page); |
3092 | page_cache_release(page); | 3109 | page_cache_release(page); |
3093 | out: | 3110 | out: |
@@ -3111,7 +3128,9 @@ int btrfs_cont_expand(struct inode *inode, loff_t size) | |||
3111 | if (size <= hole_start) | 3128 | if (size <= hole_start) |
3112 | return 0; | 3129 | return 0; |
3113 | 3130 | ||
3114 | btrfs_truncate_page(inode->i_mapping, inode->i_size); | 3131 | err = btrfs_truncate_page(inode->i_mapping, inode->i_size); |
3132 | if (err) | ||
3133 | return err; | ||
3115 | 3134 | ||
3116 | while (1) { | 3135 | while (1) { |
3117 | struct btrfs_ordered_extent *ordered; | 3136 | struct btrfs_ordered_extent *ordered; |
@@ -3480,6 +3499,7 @@ static noinline void init_btrfs_i(struct inode *inode) | |||
3480 | bi->generation = 0; | 3499 | bi->generation = 0; |
3481 | bi->sequence = 0; | 3500 | bi->sequence = 0; |
3482 | bi->last_trans = 0; | 3501 | bi->last_trans = 0; |
3502 | bi->last_sub_trans = 0; | ||
3483 | bi->logged_trans = 0; | 3503 | bi->logged_trans = 0; |
3484 | bi->delalloc_bytes = 0; | 3504 | bi->delalloc_bytes = 0; |
3485 | bi->reserved_bytes = 0; | 3505 | bi->reserved_bytes = 0; |
@@ -4980,7 +5000,9 @@ again: | |||
4980 | set_page_dirty(page); | 5000 | set_page_dirty(page); |
4981 | SetPageUptodate(page); | 5001 | SetPageUptodate(page); |
4982 | 5002 | ||
4983 | BTRFS_I(inode)->last_trans = root->fs_info->generation + 1; | 5003 | BTRFS_I(inode)->last_trans = root->fs_info->generation; |
5004 | BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid; | ||
5005 | |||
4984 | unlock_extent(io_tree, page_start, page_end, GFP_NOFS); | 5006 | unlock_extent(io_tree, page_start, page_end, GFP_NOFS); |
4985 | 5007 | ||
4986 | out_unlock: | 5008 | out_unlock: |
@@ -5005,7 +5027,9 @@ static void btrfs_truncate(struct inode *inode) | |||
5005 | if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) | 5027 | if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) |
5006 | return; | 5028 | return; |
5007 | 5029 | ||
5008 | btrfs_truncate_page(inode->i_mapping, inode->i_size); | 5030 | ret = btrfs_truncate_page(inode->i_mapping, inode->i_size); |
5031 | if (ret) | ||
5032 | return; | ||
5009 | btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1); | 5033 | btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1); |
5010 | 5034 | ||
5011 | trans = btrfs_start_transaction(root, 1); | 5035 | trans = btrfs_start_transaction(root, 1); |
@@ -5100,6 +5124,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb) | |||
5100 | if (!ei) | 5124 | if (!ei) |
5101 | return NULL; | 5125 | return NULL; |
5102 | ei->last_trans = 0; | 5126 | ei->last_trans = 0; |
5127 | ei->last_sub_trans = 0; | ||
5103 | ei->logged_trans = 0; | 5128 | ei->logged_trans = 0; |
5104 | ei->outstanding_extents = 0; | 5129 | ei->outstanding_extents = 0; |
5105 | ei->reserved_extents = 0; | 5130 | ei->reserved_extents = 0; |
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 9de9b2236419..752a5463bf53 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c | |||
@@ -66,7 +66,8 @@ enum { | |||
66 | Opt_degraded, Opt_subvol, Opt_device, Opt_nodatasum, Opt_nodatacow, | 66 | Opt_degraded, Opt_subvol, Opt_device, Opt_nodatasum, Opt_nodatacow, |
67 | Opt_max_extent, Opt_max_inline, Opt_alloc_start, Opt_nobarrier, | 67 | Opt_max_extent, Opt_max_inline, Opt_alloc_start, Opt_nobarrier, |
68 | Opt_ssd, Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl, | 68 | Opt_ssd, Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl, |
69 | Opt_compress, Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_err, | 69 | Opt_compress, Opt_notreelog, Opt_ratio, Opt_flushoncommit, |
70 | Opt_discard, Opt_err, | ||
70 | }; | 71 | }; |
71 | 72 | ||
72 | static match_table_t tokens = { | 73 | static match_table_t tokens = { |
@@ -88,6 +89,7 @@ static match_table_t tokens = { | |||
88 | {Opt_notreelog, "notreelog"}, | 89 | {Opt_notreelog, "notreelog"}, |
89 | {Opt_flushoncommit, "flushoncommit"}, | 90 | {Opt_flushoncommit, "flushoncommit"}, |
90 | {Opt_ratio, "metadata_ratio=%d"}, | 91 | {Opt_ratio, "metadata_ratio=%d"}, |
92 | {Opt_discard, "discard"}, | ||
91 | {Opt_err, NULL}, | 93 | {Opt_err, NULL}, |
92 | }; | 94 | }; |
93 | 95 | ||
@@ -257,6 +259,9 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) | |||
257 | info->metadata_ratio); | 259 | info->metadata_ratio); |
258 | } | 260 | } |
259 | break; | 261 | break; |
262 | case Opt_discard: | ||
263 | btrfs_set_opt(info->mount_opt, DISCARD); | ||
264 | break; | ||
260 | default: | 265 | default: |
261 | break; | 266 | break; |
262 | } | 267 | } |
@@ -344,7 +349,7 @@ static int btrfs_fill_super(struct super_block *sb, | |||
344 | sb->s_export_op = &btrfs_export_ops; | 349 | sb->s_export_op = &btrfs_export_ops; |
345 | sb->s_xattr = btrfs_xattr_handlers; | 350 | sb->s_xattr = btrfs_xattr_handlers; |
346 | sb->s_time_gran = 1; | 351 | sb->s_time_gran = 1; |
347 | #ifdef CONFIG_BTRFS_POSIX_ACL | 352 | #ifdef CONFIG_BTRFS_FS_POSIX_ACL |
348 | sb->s_flags |= MS_POSIXACL; | 353 | sb->s_flags |= MS_POSIXACL; |
349 | #endif | 354 | #endif |
350 | 355 | ||
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 0b8f36d4400a..bca82a4ca8e6 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c | |||
@@ -344,10 +344,10 @@ int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans, | |||
344 | /* | 344 | /* |
345 | * when btree blocks are allocated, they have some corresponding bits set for | 345 | * when btree blocks are allocated, they have some corresponding bits set for |
346 | * them in one of two extent_io trees. This is used to make sure all of | 346 | * them in one of two extent_io trees. This is used to make sure all of |
347 | * those extents are on disk for transaction or log commit | 347 | * those extents are sent to disk but does not wait on them |
348 | */ | 348 | */ |
349 | int btrfs_write_and_wait_marked_extents(struct btrfs_root *root, | 349 | int btrfs_write_marked_extents(struct btrfs_root *root, |
350 | struct extent_io_tree *dirty_pages) | 350 | struct extent_io_tree *dirty_pages) |
351 | { | 351 | { |
352 | int ret; | 352 | int ret; |
353 | int err = 0; | 353 | int err = 0; |
@@ -394,6 +394,29 @@ int btrfs_write_and_wait_marked_extents(struct btrfs_root *root, | |||
394 | page_cache_release(page); | 394 | page_cache_release(page); |
395 | } | 395 | } |
396 | } | 396 | } |
397 | if (err) | ||
398 | werr = err; | ||
399 | return werr; | ||
400 | } | ||
401 | |||
402 | /* | ||
403 | * when btree blocks are allocated, they have some corresponding bits set for | ||
404 | * them in one of two extent_io trees. This is used to make sure all of | ||
405 | * those extents are on disk for transaction or log commit. We wait | ||
406 | * on all the pages and clear them from the dirty pages state tree | ||
407 | */ | ||
408 | int btrfs_wait_marked_extents(struct btrfs_root *root, | ||
409 | struct extent_io_tree *dirty_pages) | ||
410 | { | ||
411 | int ret; | ||
412 | int err = 0; | ||
413 | int werr = 0; | ||
414 | struct page *page; | ||
415 | struct inode *btree_inode = root->fs_info->btree_inode; | ||
416 | u64 start = 0; | ||
417 | u64 end; | ||
418 | unsigned long index; | ||
419 | |||
397 | while (1) { | 420 | while (1) { |
398 | ret = find_first_extent_bit(dirty_pages, 0, &start, &end, | 421 | ret = find_first_extent_bit(dirty_pages, 0, &start, &end, |
399 | EXTENT_DIRTY); | 422 | EXTENT_DIRTY); |
@@ -424,6 +447,22 @@ int btrfs_write_and_wait_marked_extents(struct btrfs_root *root, | |||
424 | return werr; | 447 | return werr; |
425 | } | 448 | } |
426 | 449 | ||
450 | /* | ||
451 | * when btree blocks are allocated, they have some corresponding bits set for | ||
452 | * them in one of two extent_io trees. This is used to make sure all of | ||
453 | * those extents are on disk for transaction or log commit | ||
454 | */ | ||
455 | int btrfs_write_and_wait_marked_extents(struct btrfs_root *root, | ||
456 | struct extent_io_tree *dirty_pages) | ||
457 | { | ||
458 | int ret; | ||
459 | int ret2; | ||
460 | |||
461 | ret = btrfs_write_marked_extents(root, dirty_pages); | ||
462 | ret2 = btrfs_wait_marked_extents(root, dirty_pages); | ||
463 | return ret || ret2; | ||
464 | } | ||
465 | |||
427 | int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans, | 466 | int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans, |
428 | struct btrfs_root *root) | 467 | struct btrfs_root *root) |
429 | { | 468 | { |
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index 663c67404918..d4e3e7a6938c 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h | |||
@@ -79,6 +79,7 @@ static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle *trans, | |||
79 | struct inode *inode) | 79 | struct inode *inode) |
80 | { | 80 | { |
81 | BTRFS_I(inode)->last_trans = trans->transaction->transid; | 81 | BTRFS_I(inode)->last_trans = trans->transaction->transid; |
82 | BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid; | ||
82 | } | 83 | } |
83 | 84 | ||
84 | int btrfs_end_transaction(struct btrfs_trans_handle *trans, | 85 | int btrfs_end_transaction(struct btrfs_trans_handle *trans, |
@@ -107,5 +108,9 @@ int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, | |||
107 | struct btrfs_root *root); | 108 | struct btrfs_root *root); |
108 | int btrfs_write_and_wait_marked_extents(struct btrfs_root *root, | 109 | int btrfs_write_and_wait_marked_extents(struct btrfs_root *root, |
109 | struct extent_io_tree *dirty_pages); | 110 | struct extent_io_tree *dirty_pages); |
111 | int btrfs_write_marked_extents(struct btrfs_root *root, | ||
112 | struct extent_io_tree *dirty_pages); | ||
113 | int btrfs_wait_marked_extents(struct btrfs_root *root, | ||
114 | struct extent_io_tree *dirty_pages); | ||
110 | int btrfs_transaction_in_commit(struct btrfs_fs_info *info); | 115 | int btrfs_transaction_in_commit(struct btrfs_fs_info *info); |
111 | #endif | 116 | #endif |
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 4edfdc2acc5f..741666a7676a 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c | |||
@@ -1980,6 +1980,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, | |||
1980 | int ret; | 1980 | int ret; |
1981 | struct btrfs_root *log = root->log_root; | 1981 | struct btrfs_root *log = root->log_root; |
1982 | struct btrfs_root *log_root_tree = root->fs_info->log_root_tree; | 1982 | struct btrfs_root *log_root_tree = root->fs_info->log_root_tree; |
1983 | u64 log_transid = 0; | ||
1983 | 1984 | ||
1984 | mutex_lock(&root->log_mutex); | 1985 | mutex_lock(&root->log_mutex); |
1985 | index1 = root->log_transid % 2; | 1986 | index1 = root->log_transid % 2; |
@@ -1994,12 +1995,13 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, | |||
1994 | if (atomic_read(&root->log_commit[(index1 + 1) % 2])) | 1995 | if (atomic_read(&root->log_commit[(index1 + 1) % 2])) |
1995 | wait_log_commit(trans, root, root->log_transid - 1); | 1996 | wait_log_commit(trans, root, root->log_transid - 1); |
1996 | 1997 | ||
1997 | while (root->log_multiple_pids) { | 1998 | while (1) { |
1998 | unsigned long batch = root->log_batch; | 1999 | unsigned long batch = root->log_batch; |
1999 | mutex_unlock(&root->log_mutex); | 2000 | if (root->log_multiple_pids) { |
2000 | schedule_timeout_uninterruptible(1); | 2001 | mutex_unlock(&root->log_mutex); |
2001 | mutex_lock(&root->log_mutex); | 2002 | schedule_timeout_uninterruptible(1); |
2002 | 2003 | mutex_lock(&root->log_mutex); | |
2004 | } | ||
2003 | wait_for_writer(trans, root); | 2005 | wait_for_writer(trans, root); |
2004 | if (batch == root->log_batch) | 2006 | if (batch == root->log_batch) |
2005 | break; | 2007 | break; |
@@ -2012,12 +2014,16 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, | |||
2012 | goto out; | 2014 | goto out; |
2013 | } | 2015 | } |
2014 | 2016 | ||
2015 | ret = btrfs_write_and_wait_marked_extents(log, &log->dirty_log_pages); | 2017 | /* we start IO on all the marked extents here, but we don't actually |
2018 | * wait for them until later. | ||
2019 | */ | ||
2020 | ret = btrfs_write_marked_extents(log, &log->dirty_log_pages); | ||
2016 | BUG_ON(ret); | 2021 | BUG_ON(ret); |
2017 | 2022 | ||
2018 | btrfs_set_root_node(&log->root_item, log->node); | 2023 | btrfs_set_root_node(&log->root_item, log->node); |
2019 | 2024 | ||
2020 | root->log_batch = 0; | 2025 | root->log_batch = 0; |
2026 | log_transid = root->log_transid; | ||
2021 | root->log_transid++; | 2027 | root->log_transid++; |
2022 | log->log_transid = root->log_transid; | 2028 | log->log_transid = root->log_transid; |
2023 | root->log_start_pid = 0; | 2029 | root->log_start_pid = 0; |
@@ -2046,6 +2052,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, | |||
2046 | 2052 | ||
2047 | index2 = log_root_tree->log_transid % 2; | 2053 | index2 = log_root_tree->log_transid % 2; |
2048 | if (atomic_read(&log_root_tree->log_commit[index2])) { | 2054 | if (atomic_read(&log_root_tree->log_commit[index2])) { |
2055 | btrfs_wait_marked_extents(log, &log->dirty_log_pages); | ||
2049 | wait_log_commit(trans, log_root_tree, | 2056 | wait_log_commit(trans, log_root_tree, |
2050 | log_root_tree->log_transid); | 2057 | log_root_tree->log_transid); |
2051 | mutex_unlock(&log_root_tree->log_mutex); | 2058 | mutex_unlock(&log_root_tree->log_mutex); |
@@ -2065,6 +2072,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, | |||
2065 | * check the full commit flag again | 2072 | * check the full commit flag again |
2066 | */ | 2073 | */ |
2067 | if (root->fs_info->last_trans_log_full_commit == trans->transid) { | 2074 | if (root->fs_info->last_trans_log_full_commit == trans->transid) { |
2075 | btrfs_wait_marked_extents(log, &log->dirty_log_pages); | ||
2068 | mutex_unlock(&log_root_tree->log_mutex); | 2076 | mutex_unlock(&log_root_tree->log_mutex); |
2069 | ret = -EAGAIN; | 2077 | ret = -EAGAIN; |
2070 | goto out_wake_log_root; | 2078 | goto out_wake_log_root; |
@@ -2073,6 +2081,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, | |||
2073 | ret = btrfs_write_and_wait_marked_extents(log_root_tree, | 2081 | ret = btrfs_write_and_wait_marked_extents(log_root_tree, |
2074 | &log_root_tree->dirty_log_pages); | 2082 | &log_root_tree->dirty_log_pages); |
2075 | BUG_ON(ret); | 2083 | BUG_ON(ret); |
2084 | btrfs_wait_marked_extents(log, &log->dirty_log_pages); | ||
2076 | 2085 | ||
2077 | btrfs_set_super_log_root(&root->fs_info->super_for_commit, | 2086 | btrfs_set_super_log_root(&root->fs_info->super_for_commit, |
2078 | log_root_tree->node->start); | 2087 | log_root_tree->node->start); |
@@ -2092,9 +2101,14 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, | |||
2092 | * the running transaction open, so a full commit can't hop | 2101 | * the running transaction open, so a full commit can't hop |
2093 | * in and cause problems either. | 2102 | * in and cause problems either. |
2094 | */ | 2103 | */ |
2095 | write_ctree_super(trans, root->fs_info->tree_root, 2); | 2104 | write_ctree_super(trans, root->fs_info->tree_root, 1); |
2096 | ret = 0; | 2105 | ret = 0; |
2097 | 2106 | ||
2107 | mutex_lock(&root->log_mutex); | ||
2108 | if (root->last_log_commit < log_transid) | ||
2109 | root->last_log_commit = log_transid; | ||
2110 | mutex_unlock(&root->log_mutex); | ||
2111 | |||
2098 | out_wake_log_root: | 2112 | out_wake_log_root: |
2099 | atomic_set(&log_root_tree->log_commit[index2], 0); | 2113 | atomic_set(&log_root_tree->log_commit[index2], 0); |
2100 | smp_mb(); | 2114 | smp_mb(); |
@@ -2862,6 +2876,21 @@ out: | |||
2862 | return ret; | 2876 | return ret; |
2863 | } | 2877 | } |
2864 | 2878 | ||
2879 | static int inode_in_log(struct btrfs_trans_handle *trans, | ||
2880 | struct inode *inode) | ||
2881 | { | ||
2882 | struct btrfs_root *root = BTRFS_I(inode)->root; | ||
2883 | int ret = 0; | ||
2884 | |||
2885 | mutex_lock(&root->log_mutex); | ||
2886 | if (BTRFS_I(inode)->logged_trans == trans->transid && | ||
2887 | BTRFS_I(inode)->last_sub_trans <= root->last_log_commit) | ||
2888 | ret = 1; | ||
2889 | mutex_unlock(&root->log_mutex); | ||
2890 | return ret; | ||
2891 | } | ||
2892 | |||
2893 | |||
2865 | /* | 2894 | /* |
2866 | * helper function around btrfs_log_inode to make sure newly created | 2895 | * helper function around btrfs_log_inode to make sure newly created |
2867 | * parent directories also end up in the log. A minimal inode and backref | 2896 | * parent directories also end up in the log. A minimal inode and backref |
@@ -2901,6 +2930,11 @@ int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, | |||
2901 | if (ret) | 2930 | if (ret) |
2902 | goto end_no_trans; | 2931 | goto end_no_trans; |
2903 | 2932 | ||
2933 | if (inode_in_log(trans, inode)) { | ||
2934 | ret = BTRFS_NO_LOG_SYNC; | ||
2935 | goto end_no_trans; | ||
2936 | } | ||
2937 | |||
2904 | start_log_trans(trans, root); | 2938 | start_log_trans(trans, root); |
2905 | 2939 | ||
2906 | ret = btrfs_log_inode(trans, root, inode, inode_only); | 2940 | ret = btrfs_log_inode(trans, root, inode, inode_only); |
diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h index d09c7609e16b..0776eacb5083 100644 --- a/fs/btrfs/tree-log.h +++ b/fs/btrfs/tree-log.h | |||
@@ -19,6 +19,9 @@ | |||
19 | #ifndef __TREE_LOG_ | 19 | #ifndef __TREE_LOG_ |
20 | #define __TREE_LOG_ | 20 | #define __TREE_LOG_ |
21 | 21 | ||
22 | /* return value for btrfs_log_dentry_safe that means we don't need to log it at all */ | ||
23 | #define BTRFS_NO_LOG_SYNC 256 | ||
24 | |||
22 | int btrfs_sync_log(struct btrfs_trans_handle *trans, | 25 | int btrfs_sync_log(struct btrfs_trans_handle *trans, |
23 | struct btrfs_root *root); | 26 | struct btrfs_root *root); |
24 | int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root); | 27 | int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root); |
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c index b0fc93f95fd0..b6dd5967c48a 100644 --- a/fs/btrfs/xattr.c +++ b/fs/btrfs/xattr.c | |||
@@ -260,7 +260,7 @@ err: | |||
260 | * attributes are handled directly. | 260 | * attributes are handled directly. |
261 | */ | 261 | */ |
262 | struct xattr_handler *btrfs_xattr_handlers[] = { | 262 | struct xattr_handler *btrfs_xattr_handlers[] = { |
263 | #ifdef CONFIG_BTRFS_POSIX_ACL | 263 | #ifdef CONFIG_BTRFS_FS_POSIX_ACL |
264 | &btrfs_xattr_acl_access_handler, | 264 | &btrfs_xattr_acl_access_handler, |
265 | &btrfs_xattr_acl_default_handler, | 265 | &btrfs_xattr_acl_default_handler, |
266 | #endif | 266 | #endif |
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c index 0050fc40e8c9..5fad489ce5bc 100644 --- a/fs/sysfs/dir.c +++ b/fs/sysfs/dir.c | |||
@@ -894,7 +894,8 @@ int sysfs_move_dir(struct kobject *kobj, struct kobject *new_parent_kobj) | |||
894 | 894 | ||
895 | mutex_lock(&sysfs_rename_mutex); | 895 | mutex_lock(&sysfs_rename_mutex); |
896 | BUG_ON(!sd->s_parent); | 896 | BUG_ON(!sd->s_parent); |
897 | new_parent_sd = new_parent_kobj->sd ? new_parent_kobj->sd : &sysfs_root; | 897 | new_parent_sd = (new_parent_kobj && new_parent_kobj->sd) ? |
898 | new_parent_kobj->sd : &sysfs_root; | ||
898 | 899 | ||
899 | error = 0; | 900 | error = 0; |
900 | if (sd->s_parent == new_parent_sd) | 901 | if (sd->s_parent == new_parent_sd) |
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c index 561a9c050cef..f5ea4680f15f 100644 --- a/fs/sysfs/file.c +++ b/fs/sysfs/file.c | |||
@@ -268,7 +268,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd, | |||
268 | struct sysfs_open_dirent *od, *new_od = NULL; | 268 | struct sysfs_open_dirent *od, *new_od = NULL; |
269 | 269 | ||
270 | retry: | 270 | retry: |
271 | spin_lock(&sysfs_open_dirent_lock); | 271 | spin_lock_irq(&sysfs_open_dirent_lock); |
272 | 272 | ||
273 | if (!sd->s_attr.open && new_od) { | 273 | if (!sd->s_attr.open && new_od) { |
274 | sd->s_attr.open = new_od; | 274 | sd->s_attr.open = new_od; |
@@ -281,7 +281,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd, | |||
281 | list_add_tail(&buffer->list, &od->buffers); | 281 | list_add_tail(&buffer->list, &od->buffers); |
282 | } | 282 | } |
283 | 283 | ||
284 | spin_unlock(&sysfs_open_dirent_lock); | 284 | spin_unlock_irq(&sysfs_open_dirent_lock); |
285 | 285 | ||
286 | if (od) { | 286 | if (od) { |
287 | kfree(new_od); | 287 | kfree(new_od); |
@@ -315,8 +315,9 @@ static void sysfs_put_open_dirent(struct sysfs_dirent *sd, | |||
315 | struct sysfs_buffer *buffer) | 315 | struct sysfs_buffer *buffer) |
316 | { | 316 | { |
317 | struct sysfs_open_dirent *od = sd->s_attr.open; | 317 | struct sysfs_open_dirent *od = sd->s_attr.open; |
318 | unsigned long flags; | ||
318 | 319 | ||
319 | spin_lock(&sysfs_open_dirent_lock); | 320 | spin_lock_irqsave(&sysfs_open_dirent_lock, flags); |
320 | 321 | ||
321 | list_del(&buffer->list); | 322 | list_del(&buffer->list); |
322 | if (atomic_dec_and_test(&od->refcnt)) | 323 | if (atomic_dec_and_test(&od->refcnt)) |
@@ -324,7 +325,7 @@ static void sysfs_put_open_dirent(struct sysfs_dirent *sd, | |||
324 | else | 325 | else |
325 | od = NULL; | 326 | od = NULL; |
326 | 327 | ||
327 | spin_unlock(&sysfs_open_dirent_lock); | 328 | spin_unlock_irqrestore(&sysfs_open_dirent_lock, flags); |
328 | 329 | ||
329 | kfree(od); | 330 | kfree(od); |
330 | } | 331 | } |
@@ -456,8 +457,9 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait) | |||
456 | void sysfs_notify_dirent(struct sysfs_dirent *sd) | 457 | void sysfs_notify_dirent(struct sysfs_dirent *sd) |
457 | { | 458 | { |
458 | struct sysfs_open_dirent *od; | 459 | struct sysfs_open_dirent *od; |
460 | unsigned long flags; | ||
459 | 461 | ||
460 | spin_lock(&sysfs_open_dirent_lock); | 462 | spin_lock_irqsave(&sysfs_open_dirent_lock, flags); |
461 | 463 | ||
462 | od = sd->s_attr.open; | 464 | od = sd->s_attr.open; |
463 | if (od) { | 465 | if (od) { |
@@ -465,7 +467,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd) | |||
465 | wake_up_interruptible(&od->poll); | 467 | wake_up_interruptible(&od->poll); |
466 | } | 468 | } |
467 | 469 | ||
468 | spin_unlock(&sysfs_open_dirent_lock); | 470 | spin_unlock_irqrestore(&sysfs_open_dirent_lock, flags); |
469 | } | 471 | } |
470 | EXPORT_SYMBOL_GPL(sysfs_notify_dirent); | 472 | EXPORT_SYMBOL_GPL(sysfs_notify_dirent); |
471 | 473 | ||
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 94958c109761..812a5f3c2abe 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -557,7 +557,7 @@ struct netdev_queue { | |||
557 | * Callback uses when the transmitter has not made any progress | 557 | * Callback uses when the transmitter has not made any progress |
558 | * for dev->watchdog ticks. | 558 | * for dev->watchdog ticks. |
559 | * | 559 | * |
560 | * struct net_device_stats* (*get_stats)(struct net_device *dev); | 560 | * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); |
561 | * Called when a user wants to get the network device usage | 561 | * Called when a user wants to get the network device usage |
562 | * statistics. If not defined, the counters in dev->stats will | 562 | * statistics. If not defined, the counters in dev->stats will |
563 | * be used. | 563 | * be used. |
diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 466859b285e1..c75b960c8ac8 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h | |||
@@ -1669,6 +1669,8 @@ void ieee80211_restart_hw(struct ieee80211_hw *hw); | |||
1669 | * to this function and ieee80211_rx_irqsafe() may not be mixed for a | 1669 | * to this function and ieee80211_rx_irqsafe() may not be mixed for a |
1670 | * single hardware. | 1670 | * single hardware. |
1671 | * | 1671 | * |
1672 | * Note that right now, this function must be called with softirqs disabled. | ||
1673 | * | ||
1672 | * @hw: the hardware this frame came in on | 1674 | * @hw: the hardware this frame came in on |
1673 | * @skb: the buffer to receive, owned by mac80211 after this call | 1675 | * @skb: the buffer to receive, owned by mac80211 after this call |
1674 | */ | 1676 | */ |
diff --git a/include/net/sock.h b/include/net/sock.h index 1621935aad5b..9f96394f694e 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -226,12 +226,12 @@ struct sock { | |||
226 | #define sk_prot __sk_common.skc_prot | 226 | #define sk_prot __sk_common.skc_prot |
227 | #define sk_net __sk_common.skc_net | 227 | #define sk_net __sk_common.skc_net |
228 | kmemcheck_bitfield_begin(flags); | 228 | kmemcheck_bitfield_begin(flags); |
229 | unsigned char sk_shutdown : 2, | 229 | unsigned int sk_shutdown : 2, |
230 | sk_no_check : 2, | 230 | sk_no_check : 2, |
231 | sk_userlocks : 4; | 231 | sk_userlocks : 4, |
232 | sk_protocol : 8, | ||
233 | sk_type : 16; | ||
232 | kmemcheck_bitfield_end(flags); | 234 | kmemcheck_bitfield_end(flags); |
233 | unsigned char sk_protocol; | ||
234 | unsigned short sk_type; | ||
235 | int sk_rcvbuf; | 235 | int sk_rcvbuf; |
236 | socket_lock_t sk_lock; | 236 | socket_lock_t sk_lock; |
237 | /* | 237 | /* |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index ccefe574dcf7..47cdd7e76f2b 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -647,7 +647,7 @@ EXPORT_SYMBOL(schedule_delayed_work); | |||
647 | */ | 647 | */ |
648 | void flush_delayed_work(struct delayed_work *dwork) | 648 | void flush_delayed_work(struct delayed_work *dwork) |
649 | { | 649 | { |
650 | if (del_timer(&dwork->timer)) { | 650 | if (del_timer_sync(&dwork->timer)) { |
651 | struct cpu_workqueue_struct *cwq; | 651 | struct cpu_workqueue_struct *cwq; |
652 | cwq = wq_per_cpu(keventd_wq, get_cpu()); | 652 | cwq = wq_per_cpu(keventd_wq, get_cpu()); |
653 | __queue_work(cwq, &dwork->work); | 653 | __queue_work(cwq, &dwork->work); |
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 624c3c9b3c2b..e320afea07fc 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -644,6 +644,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, | |||
644 | /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */ | 644 | /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */ |
645 | if (inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && | 645 | if (inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && |
646 | TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { | 646 | TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { |
647 | inet_csk(sk)->icsk_accept_queue.rskq_defer_accept--; | ||
647 | inet_rsk(req)->acked = 1; | 648 | inet_rsk(req)->acked = 1; |
648 | return NULL; | 649 | return NULL; |
649 | } | 650 | } |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 6ec6a8a4a224..d0d436d6216c 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -841,6 +841,42 @@ out: | |||
841 | return ret; | 841 | return ret; |
842 | } | 842 | } |
843 | 843 | ||
844 | |||
845 | /** | ||
846 | * first_packet_length - return length of first packet in receive queue | ||
847 | * @sk: socket | ||
848 | * | ||
849 | * Drops all bad checksum frames, until a valid one is found. | ||
850 | * Returns the length of found skb, or 0 if none is found. | ||
851 | */ | ||
852 | static unsigned int first_packet_length(struct sock *sk) | ||
853 | { | ||
854 | struct sk_buff_head list_kill, *rcvq = &sk->sk_receive_queue; | ||
855 | struct sk_buff *skb; | ||
856 | unsigned int res; | ||
857 | |||
858 | __skb_queue_head_init(&list_kill); | ||
859 | |||
860 | spin_lock_bh(&rcvq->lock); | ||
861 | while ((skb = skb_peek(rcvq)) != NULL && | ||
862 | udp_lib_checksum_complete(skb)) { | ||
863 | UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, | ||
864 | IS_UDPLITE(sk)); | ||
865 | __skb_unlink(skb, rcvq); | ||
866 | __skb_queue_tail(&list_kill, skb); | ||
867 | } | ||
868 | res = skb ? skb->len : 0; | ||
869 | spin_unlock_bh(&rcvq->lock); | ||
870 | |||
871 | if (!skb_queue_empty(&list_kill)) { | ||
872 | lock_sock(sk); | ||
873 | __skb_queue_purge(&list_kill); | ||
874 | sk_mem_reclaim_partial(sk); | ||
875 | release_sock(sk); | ||
876 | } | ||
877 | return res; | ||
878 | } | ||
879 | |||
844 | /* | 880 | /* |
845 | * IOCTL requests applicable to the UDP protocol | 881 | * IOCTL requests applicable to the UDP protocol |
846 | */ | 882 | */ |
@@ -857,21 +893,16 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg) | |||
857 | 893 | ||
858 | case SIOCINQ: | 894 | case SIOCINQ: |
859 | { | 895 | { |
860 | struct sk_buff *skb; | 896 | unsigned int amount = first_packet_length(sk); |
861 | unsigned long amount; | ||
862 | 897 | ||
863 | amount = 0; | 898 | if (amount) |
864 | spin_lock_bh(&sk->sk_receive_queue.lock); | ||
865 | skb = skb_peek(&sk->sk_receive_queue); | ||
866 | if (skb != NULL) { | ||
867 | /* | 899 | /* |
868 | * We will only return the amount | 900 | * We will only return the amount |
869 | * of this packet since that is all | 901 | * of this packet since that is all |
870 | * that will be read. | 902 | * that will be read. |
871 | */ | 903 | */ |
872 | amount = skb->len - sizeof(struct udphdr); | 904 | amount -= sizeof(struct udphdr); |
873 | } | 905 | |
874 | spin_unlock_bh(&sk->sk_receive_queue.lock); | ||
875 | return put_user(amount, (int __user *)arg); | 906 | return put_user(amount, (int __user *)arg); |
876 | } | 907 | } |
877 | 908 | ||
@@ -1540,29 +1571,11 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait) | |||
1540 | { | 1571 | { |
1541 | unsigned int mask = datagram_poll(file, sock, wait); | 1572 | unsigned int mask = datagram_poll(file, sock, wait); |
1542 | struct sock *sk = sock->sk; | 1573 | struct sock *sk = sock->sk; |
1543 | int is_lite = IS_UDPLITE(sk); | ||
1544 | 1574 | ||
1545 | /* Check for false positives due to checksum errors */ | 1575 | /* Check for false positives due to checksum errors */ |
1546 | if ((mask & POLLRDNORM) && | 1576 | if ((mask & POLLRDNORM) && !(file->f_flags & O_NONBLOCK) && |
1547 | !(file->f_flags & O_NONBLOCK) && | 1577 | !(sk->sk_shutdown & RCV_SHUTDOWN) && !first_packet_length(sk)) |
1548 | !(sk->sk_shutdown & RCV_SHUTDOWN)) { | 1578 | mask &= ~(POLLIN | POLLRDNORM); |
1549 | struct sk_buff_head *rcvq = &sk->sk_receive_queue; | ||
1550 | struct sk_buff *skb; | ||
1551 | |||
1552 | spin_lock_bh(&rcvq->lock); | ||
1553 | while ((skb = skb_peek(rcvq)) != NULL && | ||
1554 | udp_lib_checksum_complete(skb)) { | ||
1555 | UDP_INC_STATS_BH(sock_net(sk), | ||
1556 | UDP_MIB_INERRORS, is_lite); | ||
1557 | __skb_unlink(skb, rcvq); | ||
1558 | kfree_skb(skb); | ||
1559 | } | ||
1560 | spin_unlock_bh(&rcvq->lock); | ||
1561 | |||
1562 | /* nothing to see, move along */ | ||
1563 | if (skb == NULL) | ||
1564 | mask &= ~(POLLIN | POLLRDNORM); | ||
1565 | } | ||
1566 | 1579 | ||
1567 | return mask; | 1580 | return mask; |
1568 | 1581 | ||
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 920ec8792f4b..6eaf69823439 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c | |||
@@ -544,7 +544,7 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata) | |||
544 | "%pM\n", bss->cbss.bssid, ifibss->bssid); | 544 | "%pM\n", bss->cbss.bssid, ifibss->bssid); |
545 | #endif /* CONFIG_MAC80211_IBSS_DEBUG */ | 545 | #endif /* CONFIG_MAC80211_IBSS_DEBUG */ |
546 | 546 | ||
547 | if (bss && memcmp(ifibss->bssid, bss->cbss.bssid, ETH_ALEN)) { | 547 | if (bss && !memcmp(ifibss->bssid, bss->cbss.bssid, ETH_ALEN)) { |
548 | printk(KERN_DEBUG "%s: Selected IBSS BSSID %pM" | 548 | printk(KERN_DEBUG "%s: Selected IBSS BSSID %pM" |
549 | " based on configured SSID\n", | 549 | " based on configured SSID\n", |
550 | sdata->dev->name, bss->cbss.bssid); | 550 | sdata->dev->name, bss->cbss.bssid); |
@@ -829,7 +829,7 @@ void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local) | |||
829 | if (!sdata->u.ibss.ssid_len) | 829 | if (!sdata->u.ibss.ssid_len) |
830 | continue; | 830 | continue; |
831 | sdata->u.ibss.last_scan_completed = jiffies; | 831 | sdata->u.ibss.last_scan_completed = jiffies; |
832 | ieee80211_sta_find_ibss(sdata); | 832 | mod_timer(&sdata->u.ibss.timer, 0); |
833 | } | 833 | } |
834 | mutex_unlock(&local->iflist_mtx); | 834 | mutex_unlock(&local->iflist_mtx); |
835 | } | 835 | } |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index c01588f9d453..7170bf4565a8 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -2164,11 +2164,17 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, | |||
2164 | 2164 | ||
2165 | skb = rx.skb; | 2165 | skb = rx.skb; |
2166 | 2166 | ||
2167 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { | 2167 | if (rx.sdata && ieee80211_is_data(hdr->frame_control)) { |
2168 | rx.flags |= IEEE80211_RX_RA_MATCH; | ||
2169 | prepares = prepare_for_handlers(rx.sdata, &rx, hdr); | ||
2170 | if (prepares) | ||
2171 | prev = rx.sdata; | ||
2172 | } else list_for_each_entry_rcu(sdata, &local->interfaces, list) { | ||
2168 | if (!netif_running(sdata->dev)) | 2173 | if (!netif_running(sdata->dev)) |
2169 | continue; | 2174 | continue; |
2170 | 2175 | ||
2171 | if (sdata->vif.type == NL80211_IFTYPE_MONITOR) | 2176 | if (sdata->vif.type == NL80211_IFTYPE_MONITOR || |
2177 | sdata->vif.type == NL80211_IFTYPE_AP_VLAN) | ||
2172 | continue; | 2178 | continue; |
2173 | 2179 | ||
2174 | rx.flags |= IEEE80211_RX_RA_MATCH; | 2180 | rx.flags |= IEEE80211_RX_RA_MATCH; |
@@ -2447,6 +2453,8 @@ void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
2447 | struct ieee80211_supported_band *sband; | 2453 | struct ieee80211_supported_band *sband; |
2448 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); | 2454 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); |
2449 | 2455 | ||
2456 | WARN_ON_ONCE(softirq_count() == 0); | ||
2457 | |||
2450 | if (WARN_ON(status->band < 0 || | 2458 | if (WARN_ON(status->band < 0 || |
2451 | status->band >= IEEE80211_NUM_BANDS)) | 2459 | status->band >= IEEE80211_NUM_BANDS)) |
2452 | goto drop; | 2460 | goto drop; |
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index eec001491e66..594f2318c3d8 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c | |||
@@ -361,6 +361,7 @@ int sta_info_insert(struct sta_info *sta) | |||
361 | u.ap); | 361 | u.ap); |
362 | 362 | ||
363 | drv_sta_notify(local, &sdata->vif, STA_NOTIFY_ADD, &sta->sta); | 363 | drv_sta_notify(local, &sdata->vif, STA_NOTIFY_ADD, &sta->sta); |
364 | sdata = sta->sdata; | ||
364 | } | 365 | } |
365 | 366 | ||
366 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | 367 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG |
@@ -496,6 +497,7 @@ static void __sta_info_unlink(struct sta_info **sta) | |||
496 | 497 | ||
497 | drv_sta_notify(local, &sdata->vif, STA_NOTIFY_REMOVE, | 498 | drv_sta_notify(local, &sdata->vif, STA_NOTIFY_REMOVE, |
498 | &(*sta)->sta); | 499 | &(*sta)->sta); |
500 | sdata = (*sta)->sdata; | ||
499 | } | 501 | } |
500 | 502 | ||
501 | if (ieee80211_vif_is_mesh(&sdata->vif)) { | 503 | if (ieee80211_vif_is_mesh(&sdata->vif)) { |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index fd4028296613..db4bda681ec9 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -1704,7 +1704,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1704 | if (!is_multicast_ether_addr(hdr.addr1)) { | 1704 | if (!is_multicast_ether_addr(hdr.addr1)) { |
1705 | rcu_read_lock(); | 1705 | rcu_read_lock(); |
1706 | sta = sta_info_get(local, hdr.addr1); | 1706 | sta = sta_info_get(local, hdr.addr1); |
1707 | if (sta) | 1707 | /* XXX: in the future, use sdata to look up the sta */ |
1708 | if (sta && sta->sdata == sdata) | ||
1708 | sta_flags = get_sta_flags(sta); | 1709 | sta_flags = get_sta_flags(sta); |
1709 | rcu_read_unlock(); | 1710 | rcu_read_unlock(); |
1710 | } | 1711 | } |
diff --git a/net/mac80211/util.c b/net/mac80211/util.c index dd6564321369..aeb65b3d2295 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c | |||
@@ -339,7 +339,7 @@ void ieee80211_add_pending_skb(struct ieee80211_local *local, | |||
339 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 339 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
340 | 340 | ||
341 | if (WARN_ON(!info->control.vif)) { | 341 | if (WARN_ON(!info->control.vif)) { |
342 | kfree(skb); | 342 | kfree_skb(skb); |
343 | return; | 343 | return; |
344 | } | 344 | } |
345 | 345 | ||
@@ -367,7 +367,7 @@ int ieee80211_add_pending_skbs(struct ieee80211_local *local, | |||
367 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 367 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
368 | 368 | ||
369 | if (WARN_ON(!info->control.vif)) { | 369 | if (WARN_ON(!info->control.vif)) { |
370 | kfree(skb); | 370 | kfree_skb(skb); |
371 | continue; | 371 | continue; |
372 | } | 372 | } |
373 | 373 | ||
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 96c0ed115e2a..6b0359a500e6 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c | |||
@@ -34,7 +34,7 @@ static struct tcf_hashinfo pedit_hash_info = { | |||
34 | }; | 34 | }; |
35 | 35 | ||
36 | static const struct nla_policy pedit_policy[TCA_PEDIT_MAX + 1] = { | 36 | static const struct nla_policy pedit_policy[TCA_PEDIT_MAX + 1] = { |
37 | [TCA_PEDIT_PARMS] = { .len = sizeof(struct tcf_pedit) }, | 37 | [TCA_PEDIT_PARMS] = { .len = sizeof(struct tc_pedit) }, |
38 | }; | 38 | }; |
39 | 39 | ||
40 | static int tcf_pedit_init(struct nlattr *nla, struct nlattr *est, | 40 | static int tcf_pedit_init(struct nlattr *nla, struct nlattr *est, |
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 6a536949cdc0..7cf6c0fbc7a6 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c | |||
@@ -350,7 +350,7 @@ static int tcf_fill_node(struct sk_buff *skb, struct tcf_proto *tp, | |||
350 | tcm = NLMSG_DATA(nlh); | 350 | tcm = NLMSG_DATA(nlh); |
351 | tcm->tcm_family = AF_UNSPEC; | 351 | tcm->tcm_family = AF_UNSPEC; |
352 | tcm->tcm__pad1 = 0; | 352 | tcm->tcm__pad1 = 0; |
353 | tcm->tcm__pad1 = 0; | 353 | tcm->tcm__pad2 = 0; |
354 | tcm->tcm_ifindex = qdisc_dev(tp->q)->ifindex; | 354 | tcm->tcm_ifindex = qdisc_dev(tp->q)->ifindex; |
355 | tcm->tcm_parent = tp->classid; | 355 | tcm->tcm_parent = tp->classid; |
356 | tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol); | 356 | tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol); |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index eddab097435c..ca3c92a0a14f 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -4029,7 +4029,7 @@ static int nl80211_wiphy_netns(struct sk_buff *skb, struct genl_info *info) | |||
4029 | rdev = cfg80211_get_dev_from_info(info); | 4029 | rdev = cfg80211_get_dev_from_info(info); |
4030 | if (IS_ERR(rdev)) { | 4030 | if (IS_ERR(rdev)) { |
4031 | err = PTR_ERR(rdev); | 4031 | err = PTR_ERR(rdev); |
4032 | goto out; | 4032 | goto out_rtnl; |
4033 | } | 4033 | } |
4034 | 4034 | ||
4035 | net = get_net_ns_by_pid(pid); | 4035 | net = get_net_ns_by_pid(pid); |
@@ -4049,6 +4049,7 @@ static int nl80211_wiphy_netns(struct sk_buff *skb, struct genl_info *info) | |||
4049 | put_net(net); | 4049 | put_net(net); |
4050 | out: | 4050 | out: |
4051 | cfg80211_unlock_rdev(rdev); | 4051 | cfg80211_unlock_rdev(rdev); |
4052 | out_rtnl: | ||
4052 | rtnl_unlock(); | 4053 | rtnl_unlock(); |
4053 | return err; | 4054 | return err; |
4054 | } | 4055 | } |