diff options
82 files changed, 1408 insertions, 1128 deletions
diff --git a/arch/arm/configs/ep80219_defconfig b/arch/arm/configs/ep80219_defconfig index fbe312e757cb..3c73b707c2f3 100644 --- a/arch/arm/configs/ep80219_defconfig +++ b/arch/arm/configs/ep80219_defconfig | |||
| @@ -522,6 +522,7 @@ CONFIG_E100=y | |||
| 522 | # CONFIG_DL2K is not set | 522 | # CONFIG_DL2K is not set |
| 523 | CONFIG_E1000=y | 523 | CONFIG_E1000=y |
| 524 | CONFIG_E1000_NAPI=y | 524 | CONFIG_E1000_NAPI=y |
| 525 | # CONFIG_E1000_DISABLE_PACKET_SPLIT is not set | ||
| 525 | # CONFIG_NS83820 is not set | 526 | # CONFIG_NS83820 is not set |
| 526 | # CONFIG_HAMACHI is not set | 527 | # CONFIG_HAMACHI is not set |
| 527 | # CONFIG_YELLOWFIN is not set | 528 | # CONFIG_YELLOWFIN is not set |
diff --git a/arch/arm/configs/iq31244_defconfig b/arch/arm/configs/iq31244_defconfig index c07628ceaf0c..32467160a6df 100644 --- a/arch/arm/configs/iq31244_defconfig +++ b/arch/arm/configs/iq31244_defconfig | |||
| @@ -493,6 +493,7 @@ CONFIG_NETDEVICES=y | |||
| 493 | # CONFIG_DL2K is not set | 493 | # CONFIG_DL2K is not set |
| 494 | CONFIG_E1000=y | 494 | CONFIG_E1000=y |
| 495 | CONFIG_E1000_NAPI=y | 495 | CONFIG_E1000_NAPI=y |
| 496 | # CONFIG_E1000_DISABLE_PACKET_SPLIT is not set | ||
| 496 | # CONFIG_NS83820 is not set | 497 | # CONFIG_NS83820 is not set |
| 497 | # CONFIG_HAMACHI is not set | 498 | # CONFIG_HAMACHI is not set |
| 498 | # CONFIG_YELLOWFIN is not set | 499 | # CONFIG_YELLOWFIN is not set |
diff --git a/arch/arm/configs/iq80321_defconfig b/arch/arm/configs/iq80321_defconfig index 18fa1615fdfd..b000da753c41 100644 --- a/arch/arm/configs/iq80321_defconfig +++ b/arch/arm/configs/iq80321_defconfig | |||
| @@ -415,6 +415,7 @@ CONFIG_NETDEVICES=y | |||
| 415 | # CONFIG_DL2K is not set | 415 | # CONFIG_DL2K is not set |
| 416 | CONFIG_E1000=y | 416 | CONFIG_E1000=y |
| 417 | CONFIG_E1000_NAPI=y | 417 | CONFIG_E1000_NAPI=y |
| 418 | # CONFIG_E1000_DISABLE_PACKET_SPLIT is not set | ||
| 418 | # CONFIG_NS83820 is not set | 419 | # CONFIG_NS83820 is not set |
| 419 | # CONFIG_HAMACHI is not set | 420 | # CONFIG_HAMACHI is not set |
| 420 | # CONFIG_YELLOWFIN is not set | 421 | # CONFIG_YELLOWFIN is not set |
diff --git a/arch/arm/configs/iq80331_defconfig b/arch/arm/configs/iq80331_defconfig index f50035de1fff..46c79e1efe07 100644 --- a/arch/arm/configs/iq80331_defconfig +++ b/arch/arm/configs/iq80331_defconfig | |||
| @@ -496,6 +496,7 @@ CONFIG_NETDEVICES=y | |||
| 496 | # CONFIG_DL2K is not set | 496 | # CONFIG_DL2K is not set |
| 497 | CONFIG_E1000=y | 497 | CONFIG_E1000=y |
| 498 | CONFIG_E1000_NAPI=y | 498 | CONFIG_E1000_NAPI=y |
| 499 | # CONFIG_E1000_DISABLE_PACKET_SPLIT is not set | ||
| 499 | # CONFIG_NS83820 is not set | 500 | # CONFIG_NS83820 is not set |
| 500 | # CONFIG_HAMACHI is not set | 501 | # CONFIG_HAMACHI is not set |
| 501 | # CONFIG_YELLOWFIN is not set | 502 | # CONFIG_YELLOWFIN is not set |
diff --git a/arch/arm/configs/iq80332_defconfig b/arch/arm/configs/iq80332_defconfig index 18b3f372ed68..11959b705d82 100644 --- a/arch/arm/configs/iq80332_defconfig +++ b/arch/arm/configs/iq80332_defconfig | |||
| @@ -496,6 +496,7 @@ CONFIG_NETDEVICES=y | |||
| 496 | # CONFIG_DL2K is not set | 496 | # CONFIG_DL2K is not set |
| 497 | CONFIG_E1000=y | 497 | CONFIG_E1000=y |
| 498 | CONFIG_E1000_NAPI=y | 498 | CONFIG_E1000_NAPI=y |
| 499 | # CONFIG_E1000_DISABLE_PACKET_SPLIT is not set | ||
| 499 | # CONFIG_NS83820 is not set | 500 | # CONFIG_NS83820 is not set |
| 500 | # CONFIG_HAMACHI is not set | 501 | # CONFIG_HAMACHI is not set |
| 501 | # CONFIG_YELLOWFIN is not set | 502 | # CONFIG_YELLOWFIN is not set |
diff --git a/arch/i386/defconfig b/arch/i386/defconfig index 6a431b926019..3cbe6e9cb9fc 100644 --- a/arch/i386/defconfig +++ b/arch/i386/defconfig | |||
| @@ -644,6 +644,8 @@ CONFIG_8139TOO_PIO=y | |||
| 644 | # CONFIG_ACENIC is not set | 644 | # CONFIG_ACENIC is not set |
| 645 | # CONFIG_DL2K is not set | 645 | # CONFIG_DL2K is not set |
| 646 | # CONFIG_E1000 is not set | 646 | # CONFIG_E1000 is not set |
| 647 | # CONFIG_E1000_NAPI is not set | ||
| 648 | # CONFIG_E1000_DISABLE_PACKET_SPLIT is not set | ||
| 647 | # CONFIG_NS83820 is not set | 649 | # CONFIG_NS83820 is not set |
| 648 | # CONFIG_HAMACHI is not set | 650 | # CONFIG_HAMACHI is not set |
| 649 | # CONFIG_YELLOWFIN is not set | 651 | # CONFIG_YELLOWFIN is not set |
diff --git a/arch/ia64/configs/gensparse_defconfig b/arch/ia64/configs/gensparse_defconfig index 1d07d8072ec2..991c07b57c24 100644 --- a/arch/ia64/configs/gensparse_defconfig +++ b/arch/ia64/configs/gensparse_defconfig | |||
| @@ -557,6 +557,7 @@ CONFIG_E100=m | |||
| 557 | # CONFIG_DL2K is not set | 557 | # CONFIG_DL2K is not set |
| 558 | CONFIG_E1000=y | 558 | CONFIG_E1000=y |
| 559 | # CONFIG_E1000_NAPI is not set | 559 | # CONFIG_E1000_NAPI is not set |
| 560 | # CONFIG_E1000_DISABLE_PACKET_SPLIT is not set | ||
| 560 | # CONFIG_NS83820 is not set | 561 | # CONFIG_NS83820 is not set |
| 561 | # CONFIG_HAMACHI is not set | 562 | # CONFIG_HAMACHI is not set |
| 562 | # CONFIG_YELLOWFIN is not set | 563 | # CONFIG_YELLOWFIN is not set |
diff --git a/arch/ia64/configs/tiger_defconfig b/arch/ia64/configs/tiger_defconfig index b1e8f09e9fd5..6859119bc9dd 100644 --- a/arch/ia64/configs/tiger_defconfig +++ b/arch/ia64/configs/tiger_defconfig | |||
| @@ -565,6 +565,7 @@ CONFIG_E100=m | |||
| 565 | # CONFIG_DL2K is not set | 565 | # CONFIG_DL2K is not set |
| 566 | CONFIG_E1000=y | 566 | CONFIG_E1000=y |
| 567 | # CONFIG_E1000_NAPI is not set | 567 | # CONFIG_E1000_NAPI is not set |
| 568 | # CONFIG_E1000_DISABLE_PACKET_SPLIT is not set | ||
| 568 | # CONFIG_NS83820 is not set | 569 | # CONFIG_NS83820 is not set |
| 569 | # CONFIG_HAMACHI is not set | 570 | # CONFIG_HAMACHI is not set |
| 570 | # CONFIG_YELLOWFIN is not set | 571 | # CONFIG_YELLOWFIN is not set |
diff --git a/arch/ia64/configs/zx1_defconfig b/arch/ia64/configs/zx1_defconfig index 0856ca67dd50..53899dc8eb53 100644 --- a/arch/ia64/configs/zx1_defconfig +++ b/arch/ia64/configs/zx1_defconfig | |||
| @@ -548,6 +548,7 @@ CONFIG_E100=y | |||
| 548 | # CONFIG_DL2K is not set | 548 | # CONFIG_DL2K is not set |
| 549 | CONFIG_E1000=y | 549 | CONFIG_E1000=y |
| 550 | # CONFIG_E1000_NAPI is not set | 550 | # CONFIG_E1000_NAPI is not set |
| 551 | # CONFIG_E1000_DISABLE_PACKET_SPLIT is not set | ||
| 551 | # CONFIG_NS83820 is not set | 552 | # CONFIG_NS83820 is not set |
| 552 | # CONFIG_HAMACHI is not set | 553 | # CONFIG_HAMACHI is not set |
| 553 | # CONFIG_YELLOWFIN is not set | 554 | # CONFIG_YELLOWFIN is not set |
diff --git a/arch/ia64/defconfig b/arch/ia64/defconfig index 275a26c6e5aa..dcbc78a4cfa4 100644 --- a/arch/ia64/defconfig +++ b/arch/ia64/defconfig | |||
| @@ -565,6 +565,7 @@ CONFIG_E100=m | |||
| 565 | # CONFIG_DL2K is not set | 565 | # CONFIG_DL2K is not set |
| 566 | CONFIG_E1000=y | 566 | CONFIG_E1000=y |
| 567 | # CONFIG_E1000_NAPI is not set | 567 | # CONFIG_E1000_NAPI is not set |
| 568 | # CONFIG_E1000_DISABLE_PACKET_SPLIT is not set | ||
| 568 | # CONFIG_NS83820 is not set | 569 | # CONFIG_NS83820 is not set |
| 569 | # CONFIG_HAMACHI is not set | 570 | # CONFIG_HAMACHI is not set |
| 570 | # CONFIG_YELLOWFIN is not set | 571 | # CONFIG_YELLOWFIN is not set |
diff --git a/arch/parisc/configs/a500_defconfig b/arch/parisc/configs/a500_defconfig index 955ef5084f3e..959ad3c4e372 100644 --- a/arch/parisc/configs/a500_defconfig +++ b/arch/parisc/configs/a500_defconfig | |||
| @@ -602,6 +602,7 @@ CONFIG_ACENIC_OMIT_TIGON_I=y | |||
| 602 | # CONFIG_DL2K is not set | 602 | # CONFIG_DL2K is not set |
| 603 | CONFIG_E1000=m | 603 | CONFIG_E1000=m |
| 604 | CONFIG_E1000_NAPI=y | 604 | CONFIG_E1000_NAPI=y |
| 605 | # CONFIG_E1000_DISABLE_PACKET_SPLIT is not set | ||
| 605 | # CONFIG_NS83820 is not set | 606 | # CONFIG_NS83820 is not set |
| 606 | # CONFIG_HAMACHI is not set | 607 | # CONFIG_HAMACHI is not set |
| 607 | # CONFIG_YELLOWFIN is not set | 608 | # CONFIG_YELLOWFIN is not set |
diff --git a/arch/parisc/configs/c3000_defconfig b/arch/parisc/configs/c3000_defconfig index 9d86b6b1ebd1..0b1c8c1fa8a3 100644 --- a/arch/parisc/configs/c3000_defconfig +++ b/arch/parisc/configs/c3000_defconfig | |||
| @@ -626,6 +626,7 @@ CONFIG_ACENIC=m | |||
| 626 | # CONFIG_DL2K is not set | 626 | # CONFIG_DL2K is not set |
| 627 | CONFIG_E1000=m | 627 | CONFIG_E1000=m |
| 628 | # CONFIG_E1000_NAPI is not set | 628 | # CONFIG_E1000_NAPI is not set |
| 629 | # CONFIG_E1000_DISABLE_PACKET_SPLIT is not set | ||
| 629 | # CONFIG_NS83820 is not set | 630 | # CONFIG_NS83820 is not set |
| 630 | # CONFIG_HAMACHI is not set | 631 | # CONFIG_HAMACHI is not set |
| 631 | # CONFIG_YELLOWFIN is not set | 632 | # CONFIG_YELLOWFIN is not set |
diff --git a/arch/powerpc/configs/cell_defconfig b/arch/powerpc/configs/cell_defconfig index b657f7e44762..063b84f2cbea 100644 --- a/arch/powerpc/configs/cell_defconfig +++ b/arch/powerpc/configs/cell_defconfig | |||
| @@ -533,6 +533,7 @@ CONFIG_MII=y | |||
| 533 | # CONFIG_DL2K is not set | 533 | # CONFIG_DL2K is not set |
| 534 | CONFIG_E1000=m | 534 | CONFIG_E1000=m |
| 535 | # CONFIG_E1000_NAPI is not set | 535 | # CONFIG_E1000_NAPI is not set |
| 536 | # CONFIG_E1000_DISABLE_PACKET_SPLIT is not set | ||
| 536 | # CONFIG_NS83820 is not set | 537 | # CONFIG_NS83820 is not set |
| 537 | # CONFIG_HAMACHI is not set | 538 | # CONFIG_HAMACHI is not set |
| 538 | # CONFIG_YELLOWFIN is not set | 539 | # CONFIG_YELLOWFIN is not set |
diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig index 3c22ccb18519..d6fed3f56580 100644 --- a/arch/powerpc/configs/g5_defconfig +++ b/arch/powerpc/configs/g5_defconfig | |||
| @@ -675,6 +675,7 @@ CONFIG_ACENIC_OMIT_TIGON_I=y | |||
| 675 | # CONFIG_DL2K is not set | 675 | # CONFIG_DL2K is not set |
| 676 | CONFIG_E1000=y | 676 | CONFIG_E1000=y |
| 677 | # CONFIG_E1000_NAPI is not set | 677 | # CONFIG_E1000_NAPI is not set |
| 678 | # CONFIG_E1000_DISABLE_PACKET_SPLIT is not set | ||
| 678 | # CONFIG_NS83820 is not set | 679 | # CONFIG_NS83820 is not set |
| 679 | # CONFIG_HAMACHI is not set | 680 | # CONFIG_HAMACHI is not set |
| 680 | # CONFIG_YELLOWFIN is not set | 681 | # CONFIG_YELLOWFIN is not set |
diff --git a/arch/powerpc/configs/iseries_defconfig b/arch/powerpc/configs/iseries_defconfig index 751a622fb7a7..c775027947f9 100644 --- a/arch/powerpc/configs/iseries_defconfig +++ b/arch/powerpc/configs/iseries_defconfig | |||
| @@ -567,6 +567,7 @@ CONFIG_ACENIC=m | |||
| 567 | # CONFIG_DL2K is not set | 567 | # CONFIG_DL2K is not set |
| 568 | CONFIG_E1000=m | 568 | CONFIG_E1000=m |
| 569 | # CONFIG_E1000_NAPI is not set | 569 | # CONFIG_E1000_NAPI is not set |
| 570 | # CONFIG_E1000_DISABLE_PACKET_SPLIT is not set | ||
| 570 | # CONFIG_NS83820 is not set | 571 | # CONFIG_NS83820 is not set |
| 571 | # CONFIG_HAMACHI is not set | 572 | # CONFIG_HAMACHI is not set |
| 572 | # CONFIG_YELLOWFIN is not set | 573 | # CONFIG_YELLOWFIN is not set |
diff --git a/arch/powerpc/configs/maple_defconfig b/arch/powerpc/configs/maple_defconfig index 07b6d3d23360..68194c03f6d1 100644 --- a/arch/powerpc/configs/maple_defconfig +++ b/arch/powerpc/configs/maple_defconfig | |||
| @@ -454,6 +454,7 @@ CONFIG_AMD8111_ETH=y | |||
| 454 | # CONFIG_DL2K is not set | 454 | # CONFIG_DL2K is not set |
| 455 | CONFIG_E1000=y | 455 | CONFIG_E1000=y |
| 456 | # CONFIG_E1000_NAPI is not set | 456 | # CONFIG_E1000_NAPI is not set |
| 457 | # CONFIG_E1000_DISABLE_PACKET_SPLIT is not set | ||
| 457 | # CONFIG_NS83820 is not set | 458 | # CONFIG_NS83820 is not set |
| 458 | # CONFIG_HAMACHI is not set | 459 | # CONFIG_HAMACHI is not set |
| 459 | # CONFIG_YELLOWFIN is not set | 460 | # CONFIG_YELLOWFIN is not set |
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig index 0b2b55a79c3c..6f6c6bed1aa5 100644 --- a/arch/powerpc/configs/ppc64_defconfig +++ b/arch/powerpc/configs/ppc64_defconfig | |||
| @@ -724,6 +724,7 @@ CONFIG_ACENIC_OMIT_TIGON_I=y | |||
| 724 | # CONFIG_DL2K is not set | 724 | # CONFIG_DL2K is not set |
| 725 | CONFIG_E1000=y | 725 | CONFIG_E1000=y |
| 726 | # CONFIG_E1000_NAPI is not set | 726 | # CONFIG_E1000_NAPI is not set |
| 727 | # CONFIG_E1000_DISABLE_PACKET_SPLIT is not set | ||
| 727 | # CONFIG_NS83820 is not set | 728 | # CONFIG_NS83820 is not set |
| 728 | # CONFIG_HAMACHI is not set | 729 | # CONFIG_HAMACHI is not set |
| 729 | # CONFIG_YELLOWFIN is not set | 730 | # CONFIG_YELLOWFIN is not set |
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig index a50ce0fa9243..aa9893a1f6e8 100644 --- a/arch/powerpc/configs/pseries_defconfig +++ b/arch/powerpc/configs/pseries_defconfig | |||
| @@ -671,6 +671,7 @@ CONFIG_ACENIC_OMIT_TIGON_I=y | |||
| 671 | # CONFIG_DL2K is not set | 671 | # CONFIG_DL2K is not set |
| 672 | CONFIG_E1000=y | 672 | CONFIG_E1000=y |
| 673 | # CONFIG_E1000_NAPI is not set | 673 | # CONFIG_E1000_NAPI is not set |
| 674 | # CONFIG_E1000_DISABLE_PACKET_SPLIT is not set | ||
| 674 | # CONFIG_NS83820 is not set | 675 | # CONFIG_NS83820 is not set |
| 675 | # CONFIG_HAMACHI is not set | 676 | # CONFIG_HAMACHI is not set |
| 676 | # CONFIG_YELLOWFIN is not set | 677 | # CONFIG_YELLOWFIN is not set |
diff --git a/arch/ppc/configs/bamboo_defconfig b/arch/ppc/configs/bamboo_defconfig index 0ba4e70d50b6..41fd3938fa5c 100644 --- a/arch/ppc/configs/bamboo_defconfig +++ b/arch/ppc/configs/bamboo_defconfig | |||
| @@ -499,6 +499,7 @@ CONFIG_NATSEMI=y | |||
| 499 | # CONFIG_DL2K is not set | 499 | # CONFIG_DL2K is not set |
| 500 | CONFIG_E1000=y | 500 | CONFIG_E1000=y |
| 501 | # CONFIG_E1000_NAPI is not set | 501 | # CONFIG_E1000_NAPI is not set |
| 502 | # CONFIG_E1000_DISABLE_PACKET_SPLIT is not set | ||
| 502 | # CONFIG_NS83820 is not set | 503 | # CONFIG_NS83820 is not set |
| 503 | # CONFIG_HAMACHI is not set | 504 | # CONFIG_HAMACHI is not set |
| 504 | # CONFIG_YELLOWFIN is not set | 505 | # CONFIG_YELLOWFIN is not set |
diff --git a/arch/ppc/configs/katana_defconfig b/arch/ppc/configs/katana_defconfig index 0f3bb9af9c22..7311fe6b42de 100644 --- a/arch/ppc/configs/katana_defconfig +++ b/arch/ppc/configs/katana_defconfig | |||
| @@ -488,6 +488,7 @@ CONFIG_E100=y | |||
| 488 | # CONFIG_DL2K is not set | 488 | # CONFIG_DL2K is not set |
| 489 | CONFIG_E1000=y | 489 | CONFIG_E1000=y |
| 490 | # CONFIG_E1000_NAPI is not set | 490 | # CONFIG_E1000_NAPI is not set |
| 491 | # CONFIG_E1000_DISABLE_PACKET_SPLIT is not set | ||
| 491 | # CONFIG_NS83820 is not set | 492 | # CONFIG_NS83820 is not set |
| 492 | # CONFIG_HAMACHI is not set | 493 | # CONFIG_HAMACHI is not set |
| 493 | # CONFIG_YELLOWFIN is not set | 494 | # CONFIG_YELLOWFIN is not set |
diff --git a/arch/ppc/configs/mpc834x_sys_defconfig b/arch/ppc/configs/mpc834x_sys_defconfig index 673dc64ebcb1..b96a6d6dad0e 100644 --- a/arch/ppc/configs/mpc834x_sys_defconfig +++ b/arch/ppc/configs/mpc834x_sys_defconfig | |||
| @@ -402,6 +402,7 @@ CONFIG_E100=y | |||
| 402 | # CONFIG_DL2K is not set | 402 | # CONFIG_DL2K is not set |
| 403 | CONFIG_E1000=y | 403 | CONFIG_E1000=y |
| 404 | # CONFIG_E1000_NAPI is not set | 404 | # CONFIG_E1000_NAPI is not set |
| 405 | # CONFIG_E1000_DISABLE_PACKET_SPLIT is not set | ||
| 405 | # CONFIG_NS83820 is not set | 406 | # CONFIG_NS83820 is not set |
| 406 | # CONFIG_HAMACHI is not set | 407 | # CONFIG_HAMACHI is not set |
| 407 | # CONFIG_YELLOWFIN is not set | 408 | # CONFIG_YELLOWFIN is not set |
diff --git a/arch/ppc/configs/power3_defconfig b/arch/ppc/configs/power3_defconfig index 93da595a4738..a1ef929bca59 100644 --- a/arch/ppc/configs/power3_defconfig +++ b/arch/ppc/configs/power3_defconfig | |||
| @@ -442,6 +442,7 @@ CONFIG_E100=y | |||
| 442 | # CONFIG_DL2K is not set | 442 | # CONFIG_DL2K is not set |
| 443 | CONFIG_E1000=y | 443 | CONFIG_E1000=y |
| 444 | # CONFIG_E1000_NAPI is not set | 444 | # CONFIG_E1000_NAPI is not set |
| 445 | # CONFIG_E1000_DISABLE_PACKET_SPLIT is not set | ||
| 445 | # CONFIG_NS83820 is not set | 446 | # CONFIG_NS83820 is not set |
| 446 | # CONFIG_HAMACHI is not set | 447 | # CONFIG_HAMACHI is not set |
| 447 | # CONFIG_YELLOWFIN is not set | 448 | # CONFIG_YELLOWFIN is not set |
diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c index 489bf68d5f05..77840c804786 100644 --- a/arch/sparc/mm/iommu.c +++ b/arch/sparc/mm/iommu.c | |||
| @@ -295,8 +295,7 @@ static void iommu_release_one(u32 busa, int npages, struct sbus_bus *sbus) | |||
| 295 | int ioptex; | 295 | int ioptex; |
| 296 | int i; | 296 | int i; |
| 297 | 297 | ||
| 298 | if (busa < iommu->start) | 298 | BUG_ON(busa < iommu->start); |
| 299 | BUG(); | ||
| 300 | ioptex = (busa - iommu->start) >> PAGE_SHIFT; | 299 | ioptex = (busa - iommu->start) >> PAGE_SHIFT; |
| 301 | for (i = 0; i < npages; i++) { | 300 | for (i = 0; i < npages; i++) { |
| 302 | iopte_val(iommu->page_table[ioptex + i]) = 0; | 301 | iopte_val(iommu->page_table[ioptex + i]) = 0; |
| @@ -340,9 +339,9 @@ static int iommu_map_dma_area(dma_addr_t *pba, unsigned long va, | |||
| 340 | iopte_t *first; | 339 | iopte_t *first; |
| 341 | int ioptex; | 340 | int ioptex; |
| 342 | 341 | ||
| 343 | if ((va & ~PAGE_MASK) != 0) BUG(); | 342 | BUG_ON((va & ~PAGE_MASK) != 0); |
| 344 | if ((addr & ~PAGE_MASK) != 0) BUG(); | 343 | BUG_ON((addr & ~PAGE_MASK) != 0); |
| 345 | if ((len & ~PAGE_MASK) != 0) BUG(); | 344 | BUG_ON((len & ~PAGE_MASK) != 0); |
| 346 | 345 | ||
| 347 | /* page color = physical address */ | 346 | /* page color = physical address */ |
| 348 | ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT, | 347 | ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT, |
| @@ -405,8 +404,8 @@ static void iommu_unmap_dma_area(unsigned long busa, int len) | |||
| 405 | unsigned long end; | 404 | unsigned long end; |
| 406 | int ioptex = (busa - iommu->start) >> PAGE_SHIFT; | 405 | int ioptex = (busa - iommu->start) >> PAGE_SHIFT; |
| 407 | 406 | ||
| 408 | if ((busa & ~PAGE_MASK) != 0) BUG(); | 407 | BUG_ON((busa & ~PAGE_MASK) != 0); |
| 409 | if ((len & ~PAGE_MASK) != 0) BUG(); | 408 | BUG_ON((len & ~PAGE_MASK) != 0); |
| 410 | 409 | ||
| 411 | iopte += ioptex; | 410 | iopte += ioptex; |
| 412 | end = busa + len; | 411 | end = busa + len; |
diff --git a/arch/sparc64/defconfig b/arch/sparc64/defconfig index a3fb3376ffa0..92b2fb6aaa46 100644 --- a/arch/sparc64/defconfig +++ b/arch/sparc64/defconfig | |||
| @@ -529,6 +529,7 @@ CONFIG_NET_PCI=y | |||
| 529 | # CONFIG_DL2K is not set | 529 | # CONFIG_DL2K is not set |
| 530 | CONFIG_E1000=m | 530 | CONFIG_E1000=m |
| 531 | CONFIG_E1000_NAPI=y | 531 | CONFIG_E1000_NAPI=y |
| 532 | # CONFIG_E1000_DISABLE_PACKET_SPLIT is not set | ||
| 532 | # CONFIG_MYRI_SBUS is not set | 533 | # CONFIG_MYRI_SBUS is not set |
| 533 | # CONFIG_NS83820 is not set | 534 | # CONFIG_NS83820 is not set |
| 534 | # CONFIG_HAMACHI is not set | 535 | # CONFIG_HAMACHI is not set |
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c index 459c8fbe02b4..a22930d62adf 100644 --- a/arch/sparc64/kernel/time.c +++ b/arch/sparc64/kernel/time.c | |||
| @@ -280,9 +280,9 @@ static struct sparc64_tick_ops stick_operations __read_mostly = { | |||
| 280 | * Since STICK is constantly updating, we have to access it carefully. | 280 | * Since STICK is constantly updating, we have to access it carefully. |
| 281 | * | 281 | * |
| 282 | * The sequence we use to read is: | 282 | * The sequence we use to read is: |
| 283 | * 1) read low | 283 | * 1) read high |
| 284 | * 2) read high | 284 | * 2) read low |
| 285 | * 3) read low again, if it rolled over increment high by 1 | 285 | * 3) read high again, if it rolled re-read both low and high again. |
| 286 | * | 286 | * |
| 287 | * Writing STICK safely is also tricky: | 287 | * Writing STICK safely is also tricky: |
| 288 | * 1) write low to zero | 288 | * 1) write low to zero |
| @@ -295,18 +295,18 @@ static struct sparc64_tick_ops stick_operations __read_mostly = { | |||
| 295 | static unsigned long __hbird_read_stick(void) | 295 | static unsigned long __hbird_read_stick(void) |
| 296 | { | 296 | { |
| 297 | unsigned long ret, tmp1, tmp2, tmp3; | 297 | unsigned long ret, tmp1, tmp2, tmp3; |
| 298 | unsigned long addr = HBIRD_STICK_ADDR; | 298 | unsigned long addr = HBIRD_STICK_ADDR+8; |
| 299 | 299 | ||
| 300 | __asm__ __volatile__("ldxa [%1] %5, %2\n\t" | 300 | __asm__ __volatile__("ldxa [%1] %5, %2\n" |
| 301 | "add %1, 0x8, %1\n\t" | 301 | "1:\n\t" |
| 302 | "ldxa [%1] %5, %3\n\t" | ||
| 303 | "sub %1, 0x8, %1\n\t" | 302 | "sub %1, 0x8, %1\n\t" |
| 303 | "ldxa [%1] %5, %3\n\t" | ||
| 304 | "add %1, 0x8, %1\n\t" | ||
| 304 | "ldxa [%1] %5, %4\n\t" | 305 | "ldxa [%1] %5, %4\n\t" |
| 305 | "cmp %4, %2\n\t" | 306 | "cmp %4, %2\n\t" |
| 306 | "blu,a,pn %%xcc, 1f\n\t" | 307 | "bne,a,pn %%xcc, 1b\n\t" |
| 307 | " add %3, 1, %3\n" | 308 | " mov %4, %2\n\t" |
| 308 | "1:\n\t" | 309 | "sllx %4, 32, %4\n\t" |
| 309 | "sllx %3, 32, %3\n\t" | ||
| 310 | "or %3, %4, %0\n\t" | 310 | "or %3, %4, %0\n\t" |
| 311 | : "=&r" (ret), "=&r" (addr), | 311 | : "=&r" (ret), "=&r" (addr), |
| 312 | "=&r" (tmp1), "=&r" (tmp2), "=&r" (tmp3) | 312 | "=&r" (tmp1), "=&r" (tmp2), "=&r" (tmp3) |
diff --git a/arch/x86_64/defconfig b/arch/x86_64/defconfig index 5231fe83ea4b..09a3eb743315 100644 --- a/arch/x86_64/defconfig +++ b/arch/x86_64/defconfig | |||
| @@ -646,6 +646,7 @@ CONFIG_8139TOO=y | |||
| 646 | # CONFIG_DL2K is not set | 646 | # CONFIG_DL2K is not set |
| 647 | CONFIG_E1000=y | 647 | CONFIG_E1000=y |
| 648 | # CONFIG_E1000_NAPI is not set | 648 | # CONFIG_E1000_NAPI is not set |
| 649 | # CONFIG_E1000_DISABLE_PACKET_SPLIT is not set | ||
| 649 | # CONFIG_NS83820 is not set | 650 | # CONFIG_NS83820 is not set |
| 650 | # CONFIG_HAMACHI is not set | 651 | # CONFIG_HAMACHI is not set |
| 651 | # CONFIG_YELLOWFIN is not set | 652 | # CONFIG_YELLOWFIN is not set |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 1421941487c4..0c69918671ca 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
| @@ -1914,6 +1914,15 @@ config E1000_NAPI | |||
| 1914 | 1914 | ||
| 1915 | If in doubt, say N. | 1915 | If in doubt, say N. |
| 1916 | 1916 | ||
| 1917 | config E1000_DISABLE_PACKET_SPLIT | ||
| 1918 | bool "Disable Packet Split for PCI express adapters" | ||
| 1919 | depends on E1000 | ||
| 1920 | help | ||
| 1921 | Say Y here if you want to use the legacy receive path for PCI express | ||
| 1922 | hadware. | ||
| 1923 | |||
| 1924 | If in doubt, say N. | ||
| 1925 | |||
| 1917 | source "drivers/net/ixp2000/Kconfig" | 1926 | source "drivers/net/ixp2000/Kconfig" |
| 1918 | 1927 | ||
| 1919 | config MYRI_SBUS | 1928 | config MYRI_SBUS |
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c index 1f7ca453bb4a..dde631f8f685 100644 --- a/drivers/net/cassini.c +++ b/drivers/net/cassini.c | |||
| @@ -1925,8 +1925,8 @@ static void cas_tx(struct net_device *dev, struct cas *cp, | |||
| 1925 | u64 compwb = le64_to_cpu(cp->init_block->tx_compwb); | 1925 | u64 compwb = le64_to_cpu(cp->init_block->tx_compwb); |
| 1926 | #endif | 1926 | #endif |
| 1927 | if (netif_msg_intr(cp)) | 1927 | if (netif_msg_intr(cp)) |
| 1928 | printk(KERN_DEBUG "%s: tx interrupt, status: 0x%x, %lx\n", | 1928 | printk(KERN_DEBUG "%s: tx interrupt, status: 0x%x, %llx\n", |
| 1929 | cp->dev->name, status, compwb); | 1929 | cp->dev->name, status, (unsigned long long)compwb); |
| 1930 | /* process all the rings */ | 1930 | /* process all the rings */ |
| 1931 | for (ring = 0; ring < N_TX_RINGS; ring++) { | 1931 | for (ring = 0; ring < N_TX_RINGS; ring++) { |
| 1932 | #ifdef USE_TX_COMPWB | 1932 | #ifdef USE_TX_COMPWB |
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c index d252297e4db0..5cedc81786e3 100644 --- a/drivers/net/e1000/e1000_ethtool.c +++ b/drivers/net/e1000/e1000_ethtool.c | |||
| @@ -121,7 +121,7 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) | |||
| 121 | struct e1000_adapter *adapter = netdev_priv(netdev); | 121 | struct e1000_adapter *adapter = netdev_priv(netdev); |
| 122 | struct e1000_hw *hw = &adapter->hw; | 122 | struct e1000_hw *hw = &adapter->hw; |
| 123 | 123 | ||
| 124 | if(hw->media_type == e1000_media_type_copper) { | 124 | if (hw->media_type == e1000_media_type_copper) { |
| 125 | 125 | ||
| 126 | ecmd->supported = (SUPPORTED_10baseT_Half | | 126 | ecmd->supported = (SUPPORTED_10baseT_Half | |
| 127 | SUPPORTED_10baseT_Full | | 127 | SUPPORTED_10baseT_Full | |
| @@ -133,7 +133,7 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) | |||
| 133 | 133 | ||
| 134 | ecmd->advertising = ADVERTISED_TP; | 134 | ecmd->advertising = ADVERTISED_TP; |
| 135 | 135 | ||
| 136 | if(hw->autoneg == 1) { | 136 | if (hw->autoneg == 1) { |
| 137 | ecmd->advertising |= ADVERTISED_Autoneg; | 137 | ecmd->advertising |= ADVERTISED_Autoneg; |
| 138 | 138 | ||
| 139 | /* the e1000 autoneg seems to match ethtool nicely */ | 139 | /* the e1000 autoneg seems to match ethtool nicely */ |
| @@ -144,7 +144,7 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) | |||
| 144 | ecmd->port = PORT_TP; | 144 | ecmd->port = PORT_TP; |
| 145 | ecmd->phy_address = hw->phy_addr; | 145 | ecmd->phy_address = hw->phy_addr; |
| 146 | 146 | ||
| 147 | if(hw->mac_type == e1000_82543) | 147 | if (hw->mac_type == e1000_82543) |
| 148 | ecmd->transceiver = XCVR_EXTERNAL; | 148 | ecmd->transceiver = XCVR_EXTERNAL; |
| 149 | else | 149 | else |
| 150 | ecmd->transceiver = XCVR_INTERNAL; | 150 | ecmd->transceiver = XCVR_INTERNAL; |
| @@ -160,13 +160,13 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) | |||
| 160 | 160 | ||
| 161 | ecmd->port = PORT_FIBRE; | 161 | ecmd->port = PORT_FIBRE; |
| 162 | 162 | ||
| 163 | if(hw->mac_type >= e1000_82545) | 163 | if (hw->mac_type >= e1000_82545) |
| 164 | ecmd->transceiver = XCVR_INTERNAL; | 164 | ecmd->transceiver = XCVR_INTERNAL; |
| 165 | else | 165 | else |
| 166 | ecmd->transceiver = XCVR_EXTERNAL; | 166 | ecmd->transceiver = XCVR_EXTERNAL; |
| 167 | } | 167 | } |
| 168 | 168 | ||
| 169 | if(netif_carrier_ok(adapter->netdev)) { | 169 | if (netif_carrier_ok(adapter->netdev)) { |
| 170 | 170 | ||
| 171 | e1000_get_speed_and_duplex(hw, &adapter->link_speed, | 171 | e1000_get_speed_and_duplex(hw, &adapter->link_speed, |
| 172 | &adapter->link_duplex); | 172 | &adapter->link_duplex); |
| @@ -175,7 +175,7 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) | |||
| 175 | /* unfortunatly FULL_DUPLEX != DUPLEX_FULL | 175 | /* unfortunatly FULL_DUPLEX != DUPLEX_FULL |
| 176 | * and HALF_DUPLEX != DUPLEX_HALF */ | 176 | * and HALF_DUPLEX != DUPLEX_HALF */ |
| 177 | 177 | ||
| 178 | if(adapter->link_duplex == FULL_DUPLEX) | 178 | if (adapter->link_duplex == FULL_DUPLEX) |
| 179 | ecmd->duplex = DUPLEX_FULL; | 179 | ecmd->duplex = DUPLEX_FULL; |
| 180 | else | 180 | else |
| 181 | ecmd->duplex = DUPLEX_HALF; | 181 | ecmd->duplex = DUPLEX_HALF; |
| @@ -205,11 +205,11 @@ e1000_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) | |||
| 205 | 205 | ||
| 206 | if (ecmd->autoneg == AUTONEG_ENABLE) { | 206 | if (ecmd->autoneg == AUTONEG_ENABLE) { |
| 207 | hw->autoneg = 1; | 207 | hw->autoneg = 1; |
| 208 | if(hw->media_type == e1000_media_type_fiber) | 208 | if (hw->media_type == e1000_media_type_fiber) |
| 209 | hw->autoneg_advertised = ADVERTISED_1000baseT_Full | | 209 | hw->autoneg_advertised = ADVERTISED_1000baseT_Full | |
| 210 | ADVERTISED_FIBRE | | 210 | ADVERTISED_FIBRE | |
| 211 | ADVERTISED_Autoneg; | 211 | ADVERTISED_Autoneg; |
| 212 | else | 212 | else |
| 213 | hw->autoneg_advertised = ADVERTISED_10baseT_Half | | 213 | hw->autoneg_advertised = ADVERTISED_10baseT_Half | |
| 214 | ADVERTISED_10baseT_Full | | 214 | ADVERTISED_10baseT_Full | |
| 215 | ADVERTISED_100baseT_Half | | 215 | ADVERTISED_100baseT_Half | |
| @@ -219,12 +219,12 @@ e1000_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) | |||
| 219 | ADVERTISED_TP; | 219 | ADVERTISED_TP; |
| 220 | ecmd->advertising = hw->autoneg_advertised; | 220 | ecmd->advertising = hw->autoneg_advertised; |
| 221 | } else | 221 | } else |
| 222 | if(e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) | 222 | if (e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) |
| 223 | return -EINVAL; | 223 | return -EINVAL; |
| 224 | 224 | ||
| 225 | /* reset the link */ | 225 | /* reset the link */ |
| 226 | 226 | ||
| 227 | if(netif_running(adapter->netdev)) { | 227 | if (netif_running(adapter->netdev)) { |
| 228 | e1000_down(adapter); | 228 | e1000_down(adapter); |
| 229 | e1000_reset(adapter); | 229 | e1000_reset(adapter); |
| 230 | e1000_up(adapter); | 230 | e1000_up(adapter); |
| @@ -241,14 +241,14 @@ e1000_get_pauseparam(struct net_device *netdev, | |||
| 241 | struct e1000_adapter *adapter = netdev_priv(netdev); | 241 | struct e1000_adapter *adapter = netdev_priv(netdev); |
| 242 | struct e1000_hw *hw = &adapter->hw; | 242 | struct e1000_hw *hw = &adapter->hw; |
| 243 | 243 | ||
| 244 | pause->autoneg = | 244 | pause->autoneg = |
| 245 | (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); | 245 | (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); |
| 246 | 246 | ||
| 247 | if(hw->fc == e1000_fc_rx_pause) | 247 | if (hw->fc == e1000_fc_rx_pause) |
| 248 | pause->rx_pause = 1; | 248 | pause->rx_pause = 1; |
| 249 | else if(hw->fc == e1000_fc_tx_pause) | 249 | else if (hw->fc == e1000_fc_tx_pause) |
| 250 | pause->tx_pause = 1; | 250 | pause->tx_pause = 1; |
| 251 | else if(hw->fc == e1000_fc_full) { | 251 | else if (hw->fc == e1000_fc_full) { |
| 252 | pause->rx_pause = 1; | 252 | pause->rx_pause = 1; |
| 253 | pause->tx_pause = 1; | 253 | pause->tx_pause = 1; |
| 254 | } | 254 | } |
| @@ -260,31 +260,30 @@ e1000_set_pauseparam(struct net_device *netdev, | |||
| 260 | { | 260 | { |
| 261 | struct e1000_adapter *adapter = netdev_priv(netdev); | 261 | struct e1000_adapter *adapter = netdev_priv(netdev); |
| 262 | struct e1000_hw *hw = &adapter->hw; | 262 | struct e1000_hw *hw = &adapter->hw; |
| 263 | 263 | ||
| 264 | adapter->fc_autoneg = pause->autoneg; | 264 | adapter->fc_autoneg = pause->autoneg; |
| 265 | 265 | ||
| 266 | if(pause->rx_pause && pause->tx_pause) | 266 | if (pause->rx_pause && pause->tx_pause) |
| 267 | hw->fc = e1000_fc_full; | 267 | hw->fc = e1000_fc_full; |
| 268 | else if(pause->rx_pause && !pause->tx_pause) | 268 | else if (pause->rx_pause && !pause->tx_pause) |
| 269 | hw->fc = e1000_fc_rx_pause; | 269 | hw->fc = e1000_fc_rx_pause; |
| 270 | else if(!pause->rx_pause && pause->tx_pause) | 270 | else if (!pause->rx_pause && pause->tx_pause) |
| 271 | hw->fc = e1000_fc_tx_pause; | 271 | hw->fc = e1000_fc_tx_pause; |
| 272 | else if(!pause->rx_pause && !pause->tx_pause) | 272 | else if (!pause->rx_pause && !pause->tx_pause) |
| 273 | hw->fc = e1000_fc_none; | 273 | hw->fc = e1000_fc_none; |
| 274 | 274 | ||
| 275 | hw->original_fc = hw->fc; | 275 | hw->original_fc = hw->fc; |
| 276 | 276 | ||
| 277 | if(adapter->fc_autoneg == AUTONEG_ENABLE) { | 277 | if (adapter->fc_autoneg == AUTONEG_ENABLE) { |
| 278 | if(netif_running(adapter->netdev)) { | 278 | if (netif_running(adapter->netdev)) { |
| 279 | e1000_down(adapter); | 279 | e1000_down(adapter); |
| 280 | e1000_up(adapter); | 280 | e1000_up(adapter); |
| 281 | } else | 281 | } else |
| 282 | e1000_reset(adapter); | 282 | e1000_reset(adapter); |
| 283 | } | 283 | } else |
| 284 | else | ||
| 285 | return ((hw->media_type == e1000_media_type_fiber) ? | 284 | return ((hw->media_type == e1000_media_type_fiber) ? |
| 286 | e1000_setup_link(hw) : e1000_force_mac_fc(hw)); | 285 | e1000_setup_link(hw) : e1000_force_mac_fc(hw)); |
| 287 | 286 | ||
| 288 | return 0; | 287 | return 0; |
| 289 | } | 288 | } |
| 290 | 289 | ||
| @@ -301,14 +300,14 @@ e1000_set_rx_csum(struct net_device *netdev, uint32_t data) | |||
| 301 | struct e1000_adapter *adapter = netdev_priv(netdev); | 300 | struct e1000_adapter *adapter = netdev_priv(netdev); |
| 302 | adapter->rx_csum = data; | 301 | adapter->rx_csum = data; |
| 303 | 302 | ||
| 304 | if(netif_running(netdev)) { | 303 | if (netif_running(netdev)) { |
| 305 | e1000_down(adapter); | 304 | e1000_down(adapter); |
| 306 | e1000_up(adapter); | 305 | e1000_up(adapter); |
| 307 | } else | 306 | } else |
| 308 | e1000_reset(adapter); | 307 | e1000_reset(adapter); |
| 309 | return 0; | 308 | return 0; |
| 310 | } | 309 | } |
| 311 | 310 | ||
| 312 | static uint32_t | 311 | static uint32_t |
| 313 | e1000_get_tx_csum(struct net_device *netdev) | 312 | e1000_get_tx_csum(struct net_device *netdev) |
| 314 | { | 313 | { |
| @@ -320,7 +319,7 @@ e1000_set_tx_csum(struct net_device *netdev, uint32_t data) | |||
| 320 | { | 319 | { |
| 321 | struct e1000_adapter *adapter = netdev_priv(netdev); | 320 | struct e1000_adapter *adapter = netdev_priv(netdev); |
| 322 | 321 | ||
| 323 | if(adapter->hw.mac_type < e1000_82543) { | 322 | if (adapter->hw.mac_type < e1000_82543) { |
| 324 | if (!data) | 323 | if (!data) |
| 325 | return -EINVAL; | 324 | return -EINVAL; |
| 326 | return 0; | 325 | return 0; |
| @@ -339,8 +338,8 @@ static int | |||
| 339 | e1000_set_tso(struct net_device *netdev, uint32_t data) | 338 | e1000_set_tso(struct net_device *netdev, uint32_t data) |
| 340 | { | 339 | { |
| 341 | struct e1000_adapter *adapter = netdev_priv(netdev); | 340 | struct e1000_adapter *adapter = netdev_priv(netdev); |
| 342 | if((adapter->hw.mac_type < e1000_82544) || | 341 | if ((adapter->hw.mac_type < e1000_82544) || |
| 343 | (adapter->hw.mac_type == e1000_82547)) | 342 | (adapter->hw.mac_type == e1000_82547)) |
| 344 | return data ? -EINVAL : 0; | 343 | return data ? -EINVAL : 0; |
| 345 | 344 | ||
| 346 | if (data) | 345 | if (data) |
| @@ -348,7 +347,7 @@ e1000_set_tso(struct net_device *netdev, uint32_t data) | |||
| 348 | else | 347 | else |
| 349 | netdev->features &= ~NETIF_F_TSO; | 348 | netdev->features &= ~NETIF_F_TSO; |
| 350 | return 0; | 349 | return 0; |
| 351 | } | 350 | } |
| 352 | #endif /* NETIF_F_TSO */ | 351 | #endif /* NETIF_F_TSO */ |
| 353 | 352 | ||
| 354 | static uint32_t | 353 | static uint32_t |
| @@ -365,7 +364,7 @@ e1000_set_msglevel(struct net_device *netdev, uint32_t data) | |||
| 365 | adapter->msg_enable = data; | 364 | adapter->msg_enable = data; |
| 366 | } | 365 | } |
| 367 | 366 | ||
| 368 | static int | 367 | static int |
| 369 | e1000_get_regs_len(struct net_device *netdev) | 368 | e1000_get_regs_len(struct net_device *netdev) |
| 370 | { | 369 | { |
| 371 | #define E1000_REGS_LEN 32 | 370 | #define E1000_REGS_LEN 32 |
| @@ -401,7 +400,7 @@ e1000_get_regs(struct net_device *netdev, | |||
| 401 | regs_buff[11] = E1000_READ_REG(hw, TIDV); | 400 | regs_buff[11] = E1000_READ_REG(hw, TIDV); |
| 402 | 401 | ||
| 403 | regs_buff[12] = adapter->hw.phy_type; /* PHY type (IGP=1, M88=0) */ | 402 | regs_buff[12] = adapter->hw.phy_type; /* PHY type (IGP=1, M88=0) */ |
| 404 | if(hw->phy_type == e1000_phy_igp) { | 403 | if (hw->phy_type == e1000_phy_igp) { |
| 405 | e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, | 404 | e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, |
| 406 | IGP01E1000_PHY_AGC_A); | 405 | IGP01E1000_PHY_AGC_A); |
| 407 | e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A & | 406 | e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A & |
| @@ -455,7 +454,7 @@ e1000_get_regs(struct net_device *netdev, | |||
| 455 | e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); | 454 | e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); |
| 456 | regs_buff[24] = (uint32_t)phy_data; /* phy local receiver status */ | 455 | regs_buff[24] = (uint32_t)phy_data; /* phy local receiver status */ |
| 457 | regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ | 456 | regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ |
| 458 | if(hw->mac_type >= e1000_82540 && | 457 | if (hw->mac_type >= e1000_82540 && |
| 459 | hw->media_type == e1000_media_type_copper) { | 458 | hw->media_type == e1000_media_type_copper) { |
| 460 | regs_buff[26] = E1000_READ_REG(hw, MANC); | 459 | regs_buff[26] = E1000_READ_REG(hw, MANC); |
| 461 | } | 460 | } |
| @@ -479,7 +478,7 @@ e1000_get_eeprom(struct net_device *netdev, | |||
| 479 | int ret_val = 0; | 478 | int ret_val = 0; |
| 480 | uint16_t i; | 479 | uint16_t i; |
| 481 | 480 | ||
| 482 | if(eeprom->len == 0) | 481 | if (eeprom->len == 0) |
| 483 | return -EINVAL; | 482 | return -EINVAL; |
| 484 | 483 | ||
| 485 | eeprom->magic = hw->vendor_id | (hw->device_id << 16); | 484 | eeprom->magic = hw->vendor_id | (hw->device_id << 16); |
| @@ -489,16 +488,16 @@ e1000_get_eeprom(struct net_device *netdev, | |||
| 489 | 488 | ||
| 490 | eeprom_buff = kmalloc(sizeof(uint16_t) * | 489 | eeprom_buff = kmalloc(sizeof(uint16_t) * |
| 491 | (last_word - first_word + 1), GFP_KERNEL); | 490 | (last_word - first_word + 1), GFP_KERNEL); |
| 492 | if(!eeprom_buff) | 491 | if (!eeprom_buff) |
| 493 | return -ENOMEM; | 492 | return -ENOMEM; |
| 494 | 493 | ||
| 495 | if(hw->eeprom.type == e1000_eeprom_spi) | 494 | if (hw->eeprom.type == e1000_eeprom_spi) |
| 496 | ret_val = e1000_read_eeprom(hw, first_word, | 495 | ret_val = e1000_read_eeprom(hw, first_word, |
| 497 | last_word - first_word + 1, | 496 | last_word - first_word + 1, |
| 498 | eeprom_buff); | 497 | eeprom_buff); |
| 499 | else { | 498 | else { |
| 500 | for (i = 0; i < last_word - first_word + 1; i++) | 499 | for (i = 0; i < last_word - first_word + 1; i++) |
| 501 | if((ret_val = e1000_read_eeprom(hw, first_word + i, 1, | 500 | if ((ret_val = e1000_read_eeprom(hw, first_word + i, 1, |
| 502 | &eeprom_buff[i]))) | 501 | &eeprom_buff[i]))) |
| 503 | break; | 502 | break; |
| 504 | } | 503 | } |
| @@ -525,10 +524,10 @@ e1000_set_eeprom(struct net_device *netdev, | |||
| 525 | int max_len, first_word, last_word, ret_val = 0; | 524 | int max_len, first_word, last_word, ret_val = 0; |
| 526 | uint16_t i; | 525 | uint16_t i; |
| 527 | 526 | ||
| 528 | if(eeprom->len == 0) | 527 | if (eeprom->len == 0) |
| 529 | return -EOPNOTSUPP; | 528 | return -EOPNOTSUPP; |
| 530 | 529 | ||
| 531 | if(eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) | 530 | if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) |
| 532 | return -EFAULT; | 531 | return -EFAULT; |
| 533 | 532 | ||
| 534 | max_len = hw->eeprom.word_size * 2; | 533 | max_len = hw->eeprom.word_size * 2; |
| @@ -536,19 +535,19 @@ e1000_set_eeprom(struct net_device *netdev, | |||
| 536 | first_word = eeprom->offset >> 1; | 535 | first_word = eeprom->offset >> 1; |
| 537 | last_word = (eeprom->offset + eeprom->len - 1) >> 1; | 536 | last_word = (eeprom->offset + eeprom->len - 1) >> 1; |
| 538 | eeprom_buff = kmalloc(max_len, GFP_KERNEL); | 537 | eeprom_buff = kmalloc(max_len, GFP_KERNEL); |
| 539 | if(!eeprom_buff) | 538 | if (!eeprom_buff) |
| 540 | return -ENOMEM; | 539 | return -ENOMEM; |
| 541 | 540 | ||
| 542 | ptr = (void *)eeprom_buff; | 541 | ptr = (void *)eeprom_buff; |
| 543 | 542 | ||
| 544 | if(eeprom->offset & 1) { | 543 | if (eeprom->offset & 1) { |
| 545 | /* need read/modify/write of first changed EEPROM word */ | 544 | /* need read/modify/write of first changed EEPROM word */ |
| 546 | /* only the second byte of the word is being modified */ | 545 | /* only the second byte of the word is being modified */ |
| 547 | ret_val = e1000_read_eeprom(hw, first_word, 1, | 546 | ret_val = e1000_read_eeprom(hw, first_word, 1, |
| 548 | &eeprom_buff[0]); | 547 | &eeprom_buff[0]); |
| 549 | ptr++; | 548 | ptr++; |
| 550 | } | 549 | } |
| 551 | if(((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { | 550 | if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { |
| 552 | /* need read/modify/write of last changed EEPROM word */ | 551 | /* need read/modify/write of last changed EEPROM word */ |
| 553 | /* only the first byte of the word is being modified */ | 552 | /* only the first byte of the word is being modified */ |
| 554 | ret_val = e1000_read_eeprom(hw, last_word, 1, | 553 | ret_val = e1000_read_eeprom(hw, last_word, 1, |
| @@ -567,9 +566,9 @@ e1000_set_eeprom(struct net_device *netdev, | |||
| 567 | ret_val = e1000_write_eeprom(hw, first_word, | 566 | ret_val = e1000_write_eeprom(hw, first_word, |
| 568 | last_word - first_word + 1, eeprom_buff); | 567 | last_word - first_word + 1, eeprom_buff); |
| 569 | 568 | ||
| 570 | /* Update the checksum over the first part of the EEPROM if needed | 569 | /* Update the checksum over the first part of the EEPROM if needed |
| 571 | * and flush shadow RAM for 82573 conrollers */ | 570 | * and flush shadow RAM for 82573 conrollers */ |
| 572 | if((ret_val == 0) && ((first_word <= EEPROM_CHECKSUM_REG) || | 571 | if ((ret_val == 0) && ((first_word <= EEPROM_CHECKSUM_REG) || |
| 573 | (hw->mac_type == e1000_82573))) | 572 | (hw->mac_type == e1000_82573))) |
| 574 | e1000_update_eeprom_checksum(hw); | 573 | e1000_update_eeprom_checksum(hw); |
| 575 | 574 | ||
| @@ -633,7 +632,7 @@ e1000_get_ringparam(struct net_device *netdev, | |||
| 633 | ring->rx_jumbo_pending = 0; | 632 | ring->rx_jumbo_pending = 0; |
| 634 | } | 633 | } |
| 635 | 634 | ||
| 636 | static int | 635 | static int |
| 637 | e1000_set_ringparam(struct net_device *netdev, | 636 | e1000_set_ringparam(struct net_device *netdev, |
| 638 | struct ethtool_ringparam *ring) | 637 | struct ethtool_ringparam *ring) |
| 639 | { | 638 | { |
| @@ -670,25 +669,25 @@ e1000_set_ringparam(struct net_device *netdev, | |||
| 670 | txdr = adapter->tx_ring; | 669 | txdr = adapter->tx_ring; |
| 671 | rxdr = adapter->rx_ring; | 670 | rxdr = adapter->rx_ring; |
| 672 | 671 | ||
| 673 | if((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) | 672 | if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) |
| 674 | return -EINVAL; | 673 | return -EINVAL; |
| 675 | 674 | ||
| 676 | rxdr->count = max(ring->rx_pending,(uint32_t)E1000_MIN_RXD); | 675 | rxdr->count = max(ring->rx_pending,(uint32_t)E1000_MIN_RXD); |
| 677 | rxdr->count = min(rxdr->count,(uint32_t)(mac_type < e1000_82544 ? | 676 | rxdr->count = min(rxdr->count,(uint32_t)(mac_type < e1000_82544 ? |
| 678 | E1000_MAX_RXD : E1000_MAX_82544_RXD)); | 677 | E1000_MAX_RXD : E1000_MAX_82544_RXD)); |
| 679 | E1000_ROUNDUP(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE); | 678 | E1000_ROUNDUP(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE); |
| 680 | 679 | ||
| 681 | txdr->count = max(ring->tx_pending,(uint32_t)E1000_MIN_TXD); | 680 | txdr->count = max(ring->tx_pending,(uint32_t)E1000_MIN_TXD); |
| 682 | txdr->count = min(txdr->count,(uint32_t)(mac_type < e1000_82544 ? | 681 | txdr->count = min(txdr->count,(uint32_t)(mac_type < e1000_82544 ? |
| 683 | E1000_MAX_TXD : E1000_MAX_82544_TXD)); | 682 | E1000_MAX_TXD : E1000_MAX_82544_TXD)); |
| 684 | E1000_ROUNDUP(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE); | 683 | E1000_ROUNDUP(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE); |
| 685 | 684 | ||
| 686 | for (i = 0; i < adapter->num_tx_queues; i++) | 685 | for (i = 0; i < adapter->num_tx_queues; i++) |
| 687 | txdr[i].count = txdr->count; | 686 | txdr[i].count = txdr->count; |
| 688 | for (i = 0; i < adapter->num_rx_queues; i++) | 687 | for (i = 0; i < adapter->num_rx_queues; i++) |
| 689 | rxdr[i].count = rxdr->count; | 688 | rxdr[i].count = rxdr->count; |
| 690 | 689 | ||
| 691 | if(netif_running(adapter->netdev)) { | 690 | if (netif_running(adapter->netdev)) { |
| 692 | /* Try to get new resources before deleting old */ | 691 | /* Try to get new resources before deleting old */ |
| 693 | if ((err = e1000_setup_all_rx_resources(adapter))) | 692 | if ((err = e1000_setup_all_rx_resources(adapter))) |
| 694 | goto err_setup_rx; | 693 | goto err_setup_rx; |
| @@ -708,7 +707,7 @@ e1000_set_ringparam(struct net_device *netdev, | |||
| 708 | kfree(rx_old); | 707 | kfree(rx_old); |
| 709 | adapter->rx_ring = rx_new; | 708 | adapter->rx_ring = rx_new; |
| 710 | adapter->tx_ring = tx_new; | 709 | adapter->tx_ring = tx_new; |
| 711 | if((err = e1000_up(adapter))) | 710 | if ((err = e1000_up(adapter))) |
| 712 | return err; | 711 | return err; |
| 713 | } | 712 | } |
| 714 | 713 | ||
| @@ -727,10 +726,10 @@ err_setup_rx: | |||
| 727 | uint32_t pat, value; \ | 726 | uint32_t pat, value; \ |
| 728 | uint32_t test[] = \ | 727 | uint32_t test[] = \ |
| 729 | {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \ | 728 | {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \ |
| 730 | for(pat = 0; pat < sizeof(test)/sizeof(test[0]); pat++) { \ | 729 | for (pat = 0; pat < sizeof(test)/sizeof(test[0]); pat++) { \ |
| 731 | E1000_WRITE_REG(&adapter->hw, R, (test[pat] & W)); \ | 730 | E1000_WRITE_REG(&adapter->hw, R, (test[pat] & W)); \ |
| 732 | value = E1000_READ_REG(&adapter->hw, R); \ | 731 | value = E1000_READ_REG(&adapter->hw, R); \ |
| 733 | if(value != (test[pat] & W & M)) { \ | 732 | if (value != (test[pat] & W & M)) { \ |
| 734 | DPRINTK(DRV, ERR, "pattern test reg %04X failed: got " \ | 733 | DPRINTK(DRV, ERR, "pattern test reg %04X failed: got " \ |
| 735 | "0x%08X expected 0x%08X\n", \ | 734 | "0x%08X expected 0x%08X\n", \ |
| 736 | E1000_##R, value, (test[pat] & W & M)); \ | 735 | E1000_##R, value, (test[pat] & W & M)); \ |
| @@ -746,7 +745,7 @@ err_setup_rx: | |||
| 746 | uint32_t value; \ | 745 | uint32_t value; \ |
| 747 | E1000_WRITE_REG(&adapter->hw, R, W & M); \ | 746 | E1000_WRITE_REG(&adapter->hw, R, W & M); \ |
| 748 | value = E1000_READ_REG(&adapter->hw, R); \ | 747 | value = E1000_READ_REG(&adapter->hw, R); \ |
| 749 | if((W & M) != (value & M)) { \ | 748 | if ((W & M) != (value & M)) { \ |
| 750 | DPRINTK(DRV, ERR, "set/check reg %04X test failed: got 0x%08X "\ | 749 | DPRINTK(DRV, ERR, "set/check reg %04X test failed: got 0x%08X "\ |
| 751 | "expected 0x%08X\n", E1000_##R, (value & M), (W & M)); \ | 750 | "expected 0x%08X\n", E1000_##R, (value & M), (W & M)); \ |
| 752 | *data = (adapter->hw.mac_type < e1000_82543) ? \ | 751 | *data = (adapter->hw.mac_type < e1000_82543) ? \ |
| @@ -782,7 +781,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data) | |||
| 782 | value = (E1000_READ_REG(&adapter->hw, STATUS) & toggle); | 781 | value = (E1000_READ_REG(&adapter->hw, STATUS) & toggle); |
| 783 | E1000_WRITE_REG(&adapter->hw, STATUS, toggle); | 782 | E1000_WRITE_REG(&adapter->hw, STATUS, toggle); |
| 784 | after = E1000_READ_REG(&adapter->hw, STATUS) & toggle; | 783 | after = E1000_READ_REG(&adapter->hw, STATUS) & toggle; |
| 785 | if(value != after) { | 784 | if (value != after) { |
| 786 | DPRINTK(DRV, ERR, "failed STATUS register test got: " | 785 | DPRINTK(DRV, ERR, "failed STATUS register test got: " |
| 787 | "0x%08X expected: 0x%08X\n", after, value); | 786 | "0x%08X expected: 0x%08X\n", after, value); |
| 788 | *data = 1; | 787 | *data = 1; |
| @@ -810,7 +809,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data) | |||
| 810 | REG_SET_AND_CHECK(RCTL, 0x06DFB3FE, 0x003FFFFB); | 809 | REG_SET_AND_CHECK(RCTL, 0x06DFB3FE, 0x003FFFFB); |
| 811 | REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000); | 810 | REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000); |
| 812 | 811 | ||
| 813 | if(adapter->hw.mac_type >= e1000_82543) { | 812 | if (adapter->hw.mac_type >= e1000_82543) { |
| 814 | 813 | ||
| 815 | REG_SET_AND_CHECK(RCTL, 0x06DFB3FE, 0xFFFFFFFF); | 814 | REG_SET_AND_CHECK(RCTL, 0x06DFB3FE, 0xFFFFFFFF); |
| 816 | REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); | 815 | REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); |
| @@ -818,7 +817,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data) | |||
| 818 | REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); | 817 | REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); |
| 819 | REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF); | 818 | REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF); |
| 820 | 819 | ||
| 821 | for(i = 0; i < E1000_RAR_ENTRIES; i++) { | 820 | for (i = 0; i < E1000_RAR_ENTRIES; i++) { |
| 822 | REG_PATTERN_TEST(RA + ((i << 1) << 2), 0xFFFFFFFF, | 821 | REG_PATTERN_TEST(RA + ((i << 1) << 2), 0xFFFFFFFF, |
| 823 | 0xFFFFFFFF); | 822 | 0xFFFFFFFF); |
| 824 | REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF, | 823 | REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF, |
| @@ -834,7 +833,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data) | |||
| 834 | 833 | ||
| 835 | } | 834 | } |
| 836 | 835 | ||
| 837 | for(i = 0; i < E1000_MC_TBL_SIZE; i++) | 836 | for (i = 0; i < E1000_MC_TBL_SIZE; i++) |
| 838 | REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF); | 837 | REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF); |
| 839 | 838 | ||
| 840 | *data = 0; | 839 | *data = 0; |
| @@ -850,8 +849,8 @@ e1000_eeprom_test(struct e1000_adapter *adapter, uint64_t *data) | |||
| 850 | 849 | ||
| 851 | *data = 0; | 850 | *data = 0; |
| 852 | /* Read and add up the contents of the EEPROM */ | 851 | /* Read and add up the contents of the EEPROM */ |
| 853 | for(i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { | 852 | for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { |
| 854 | if((e1000_read_eeprom(&adapter->hw, i, 1, &temp)) < 0) { | 853 | if ((e1000_read_eeprom(&adapter->hw, i, 1, &temp)) < 0) { |
| 855 | *data = 1; | 854 | *data = 1; |
| 856 | break; | 855 | break; |
| 857 | } | 856 | } |
| @@ -859,7 +858,7 @@ e1000_eeprom_test(struct e1000_adapter *adapter, uint64_t *data) | |||
| 859 | } | 858 | } |
| 860 | 859 | ||
| 861 | /* If Checksum is not Correct return error else test passed */ | 860 | /* If Checksum is not Correct return error else test passed */ |
| 862 | if((checksum != (uint16_t) EEPROM_SUM) && !(*data)) | 861 | if ((checksum != (uint16_t) EEPROM_SUM) && !(*data)) |
| 863 | *data = 2; | 862 | *data = 2; |
| 864 | 863 | ||
| 865 | return *data; | 864 | return *data; |
| @@ -888,9 +887,9 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data) | |||
| 888 | *data = 0; | 887 | *data = 0; |
| 889 | 888 | ||
| 890 | /* Hook up test interrupt handler just for this test */ | 889 | /* Hook up test interrupt handler just for this test */ |
| 891 | if(!request_irq(irq, &e1000_test_intr, 0, netdev->name, netdev)) { | 890 | if (!request_irq(irq, &e1000_test_intr, 0, netdev->name, netdev)) { |
| 892 | shared_int = FALSE; | 891 | shared_int = FALSE; |
| 893 | } else if(request_irq(irq, &e1000_test_intr, SA_SHIRQ, | 892 | } else if (request_irq(irq, &e1000_test_intr, SA_SHIRQ, |
| 894 | netdev->name, netdev)){ | 893 | netdev->name, netdev)){ |
| 895 | *data = 1; | 894 | *data = 1; |
| 896 | return -1; | 895 | return -1; |
| @@ -901,12 +900,12 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data) | |||
| 901 | msec_delay(10); | 900 | msec_delay(10); |
| 902 | 901 | ||
| 903 | /* Test each interrupt */ | 902 | /* Test each interrupt */ |
| 904 | for(; i < 10; i++) { | 903 | for (; i < 10; i++) { |
| 905 | 904 | ||
| 906 | /* Interrupt to test */ | 905 | /* Interrupt to test */ |
| 907 | mask = 1 << i; | 906 | mask = 1 << i; |
| 908 | 907 | ||
| 909 | if(!shared_int) { | 908 | if (!shared_int) { |
| 910 | /* Disable the interrupt to be reported in | 909 | /* Disable the interrupt to be reported in |
| 911 | * the cause register and then force the same | 910 | * the cause register and then force the same |
| 912 | * interrupt and see if one gets posted. If | 911 | * interrupt and see if one gets posted. If |
| @@ -917,8 +916,8 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data) | |||
| 917 | E1000_WRITE_REG(&adapter->hw, IMC, mask); | 916 | E1000_WRITE_REG(&adapter->hw, IMC, mask); |
| 918 | E1000_WRITE_REG(&adapter->hw, ICS, mask); | 917 | E1000_WRITE_REG(&adapter->hw, ICS, mask); |
| 919 | msec_delay(10); | 918 | msec_delay(10); |
| 920 | 919 | ||
| 921 | if(adapter->test_icr & mask) { | 920 | if (adapter->test_icr & mask) { |
| 922 | *data = 3; | 921 | *data = 3; |
| 923 | break; | 922 | break; |
| 924 | } | 923 | } |
| @@ -935,12 +934,12 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data) | |||
| 935 | E1000_WRITE_REG(&adapter->hw, ICS, mask); | 934 | E1000_WRITE_REG(&adapter->hw, ICS, mask); |
| 936 | msec_delay(10); | 935 | msec_delay(10); |
| 937 | 936 | ||
| 938 | if(!(adapter->test_icr & mask)) { | 937 | if (!(adapter->test_icr & mask)) { |
| 939 | *data = 4; | 938 | *data = 4; |
| 940 | break; | 939 | break; |
| 941 | } | 940 | } |
| 942 | 941 | ||
| 943 | if(!shared_int) { | 942 | if (!shared_int) { |
| 944 | /* Disable the other interrupts to be reported in | 943 | /* Disable the other interrupts to be reported in |
| 945 | * the cause register and then force the other | 944 | * the cause register and then force the other |
| 946 | * interrupts and see if any get posted. If | 945 | * interrupts and see if any get posted. If |
| @@ -952,7 +951,7 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data) | |||
| 952 | E1000_WRITE_REG(&adapter->hw, ICS, ~mask & 0x00007FFF); | 951 | E1000_WRITE_REG(&adapter->hw, ICS, ~mask & 0x00007FFF); |
| 953 | msec_delay(10); | 952 | msec_delay(10); |
| 954 | 953 | ||
| 955 | if(adapter->test_icr) { | 954 | if (adapter->test_icr) { |
| 956 | *data = 5; | 955 | *data = 5; |
| 957 | break; | 956 | break; |
| 958 | } | 957 | } |
| @@ -977,24 +976,24 @@ e1000_free_desc_rings(struct e1000_adapter *adapter) | |||
| 977 | struct pci_dev *pdev = adapter->pdev; | 976 | struct pci_dev *pdev = adapter->pdev; |
| 978 | int i; | 977 | int i; |
| 979 | 978 | ||
| 980 | if(txdr->desc && txdr->buffer_info) { | 979 | if (txdr->desc && txdr->buffer_info) { |
| 981 | for(i = 0; i < txdr->count; i++) { | 980 | for (i = 0; i < txdr->count; i++) { |
| 982 | if(txdr->buffer_info[i].dma) | 981 | if (txdr->buffer_info[i].dma) |
| 983 | pci_unmap_single(pdev, txdr->buffer_info[i].dma, | 982 | pci_unmap_single(pdev, txdr->buffer_info[i].dma, |
| 984 | txdr->buffer_info[i].length, | 983 | txdr->buffer_info[i].length, |
| 985 | PCI_DMA_TODEVICE); | 984 | PCI_DMA_TODEVICE); |
| 986 | if(txdr->buffer_info[i].skb) | 985 | if (txdr->buffer_info[i].skb) |
| 987 | dev_kfree_skb(txdr->buffer_info[i].skb); | 986 | dev_kfree_skb(txdr->buffer_info[i].skb); |
| 988 | } | 987 | } |
| 989 | } | 988 | } |
| 990 | 989 | ||
| 991 | if(rxdr->desc && rxdr->buffer_info) { | 990 | if (rxdr->desc && rxdr->buffer_info) { |
| 992 | for(i = 0; i < rxdr->count; i++) { | 991 | for (i = 0; i < rxdr->count; i++) { |
| 993 | if(rxdr->buffer_info[i].dma) | 992 | if (rxdr->buffer_info[i].dma) |
| 994 | pci_unmap_single(pdev, rxdr->buffer_info[i].dma, | 993 | pci_unmap_single(pdev, rxdr->buffer_info[i].dma, |
| 995 | rxdr->buffer_info[i].length, | 994 | rxdr->buffer_info[i].length, |
| 996 | PCI_DMA_FROMDEVICE); | 995 | PCI_DMA_FROMDEVICE); |
| 997 | if(rxdr->buffer_info[i].skb) | 996 | if (rxdr->buffer_info[i].skb) |
| 998 | dev_kfree_skb(rxdr->buffer_info[i].skb); | 997 | dev_kfree_skb(rxdr->buffer_info[i].skb); |
| 999 | } | 998 | } |
| 1000 | } | 999 | } |
| @@ -1027,11 +1026,11 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
| 1027 | 1026 | ||
| 1028 | /* Setup Tx descriptor ring and Tx buffers */ | 1027 | /* Setup Tx descriptor ring and Tx buffers */ |
| 1029 | 1028 | ||
| 1030 | if(!txdr->count) | 1029 | if (!txdr->count) |
| 1031 | txdr->count = E1000_DEFAULT_TXD; | 1030 | txdr->count = E1000_DEFAULT_TXD; |
| 1032 | 1031 | ||
| 1033 | size = txdr->count * sizeof(struct e1000_buffer); | 1032 | size = txdr->count * sizeof(struct e1000_buffer); |
| 1034 | if(!(txdr->buffer_info = kmalloc(size, GFP_KERNEL))) { | 1033 | if (!(txdr->buffer_info = kmalloc(size, GFP_KERNEL))) { |
| 1035 | ret_val = 1; | 1034 | ret_val = 1; |
| 1036 | goto err_nomem; | 1035 | goto err_nomem; |
| 1037 | } | 1036 | } |
| @@ -1039,7 +1038,7 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
| 1039 | 1038 | ||
| 1040 | txdr->size = txdr->count * sizeof(struct e1000_tx_desc); | 1039 | txdr->size = txdr->count * sizeof(struct e1000_tx_desc); |
| 1041 | E1000_ROUNDUP(txdr->size, 4096); | 1040 | E1000_ROUNDUP(txdr->size, 4096); |
| 1042 | if(!(txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma))) { | 1041 | if (!(txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma))) { |
| 1043 | ret_val = 2; | 1042 | ret_val = 2; |
| 1044 | goto err_nomem; | 1043 | goto err_nomem; |
| 1045 | } | 1044 | } |
| @@ -1058,12 +1057,12 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
| 1058 | E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | | 1057 | E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | |
| 1059 | E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT); | 1058 | E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT); |
| 1060 | 1059 | ||
| 1061 | for(i = 0; i < txdr->count; i++) { | 1060 | for (i = 0; i < txdr->count; i++) { |
| 1062 | struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*txdr, i); | 1061 | struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*txdr, i); |
| 1063 | struct sk_buff *skb; | 1062 | struct sk_buff *skb; |
| 1064 | unsigned int size = 1024; | 1063 | unsigned int size = 1024; |
| 1065 | 1064 | ||
| 1066 | if(!(skb = alloc_skb(size, GFP_KERNEL))) { | 1065 | if (!(skb = alloc_skb(size, GFP_KERNEL))) { |
| 1067 | ret_val = 3; | 1066 | ret_val = 3; |
| 1068 | goto err_nomem; | 1067 | goto err_nomem; |
| 1069 | } | 1068 | } |
| @@ -1083,18 +1082,18 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
| 1083 | 1082 | ||
| 1084 | /* Setup Rx descriptor ring and Rx buffers */ | 1083 | /* Setup Rx descriptor ring and Rx buffers */ |
| 1085 | 1084 | ||
| 1086 | if(!rxdr->count) | 1085 | if (!rxdr->count) |
| 1087 | rxdr->count = E1000_DEFAULT_RXD; | 1086 | rxdr->count = E1000_DEFAULT_RXD; |
| 1088 | 1087 | ||
| 1089 | size = rxdr->count * sizeof(struct e1000_buffer); | 1088 | size = rxdr->count * sizeof(struct e1000_buffer); |
| 1090 | if(!(rxdr->buffer_info = kmalloc(size, GFP_KERNEL))) { | 1089 | if (!(rxdr->buffer_info = kmalloc(size, GFP_KERNEL))) { |
| 1091 | ret_val = 4; | 1090 | ret_val = 4; |
| 1092 | goto err_nomem; | 1091 | goto err_nomem; |
| 1093 | } | 1092 | } |
| 1094 | memset(rxdr->buffer_info, 0, size); | 1093 | memset(rxdr->buffer_info, 0, size); |
| 1095 | 1094 | ||
| 1096 | rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc); | 1095 | rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc); |
| 1097 | if(!(rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma))) { | 1096 | if (!(rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma))) { |
| 1098 | ret_val = 5; | 1097 | ret_val = 5; |
| 1099 | goto err_nomem; | 1098 | goto err_nomem; |
| 1100 | } | 1099 | } |
| @@ -1114,11 +1113,11 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
| 1114 | (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT); | 1113 | (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT); |
| 1115 | E1000_WRITE_REG(&adapter->hw, RCTL, rctl); | 1114 | E1000_WRITE_REG(&adapter->hw, RCTL, rctl); |
| 1116 | 1115 | ||
| 1117 | for(i = 0; i < rxdr->count; i++) { | 1116 | for (i = 0; i < rxdr->count; i++) { |
| 1118 | struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i); | 1117 | struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i); |
| 1119 | struct sk_buff *skb; | 1118 | struct sk_buff *skb; |
| 1120 | 1119 | ||
| 1121 | if(!(skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, | 1120 | if (!(skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, |
| 1122 | GFP_KERNEL))) { | 1121 | GFP_KERNEL))) { |
| 1123 | ret_val = 6; | 1122 | ret_val = 6; |
| 1124 | goto err_nomem; | 1123 | goto err_nomem; |
| @@ -1227,15 +1226,15 @@ e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter) | |||
| 1227 | 1226 | ||
| 1228 | /* Check Phy Configuration */ | 1227 | /* Check Phy Configuration */ |
| 1229 | e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_reg); | 1228 | e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_reg); |
| 1230 | if(phy_reg != 0x4100) | 1229 | if (phy_reg != 0x4100) |
| 1231 | return 9; | 1230 | return 9; |
| 1232 | 1231 | ||
| 1233 | e1000_read_phy_reg(&adapter->hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); | 1232 | e1000_read_phy_reg(&adapter->hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); |
| 1234 | if(phy_reg != 0x0070) | 1233 | if (phy_reg != 0x0070) |
| 1235 | return 10; | 1234 | return 10; |
| 1236 | 1235 | ||
| 1237 | e1000_read_phy_reg(&adapter->hw, 29, &phy_reg); | 1236 | e1000_read_phy_reg(&adapter->hw, 29, &phy_reg); |
| 1238 | if(phy_reg != 0x001A) | 1237 | if (phy_reg != 0x001A) |
| 1239 | return 11; | 1238 | return 11; |
| 1240 | 1239 | ||
| 1241 | return 0; | 1240 | return 0; |
| @@ -1249,7 +1248,7 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter) | |||
| 1249 | 1248 | ||
| 1250 | adapter->hw.autoneg = FALSE; | 1249 | adapter->hw.autoneg = FALSE; |
| 1251 | 1250 | ||
| 1252 | if(adapter->hw.phy_type == e1000_phy_m88) { | 1251 | if (adapter->hw.phy_type == e1000_phy_m88) { |
| 1253 | /* Auto-MDI/MDIX Off */ | 1252 | /* Auto-MDI/MDIX Off */ |
| 1254 | e1000_write_phy_reg(&adapter->hw, | 1253 | e1000_write_phy_reg(&adapter->hw, |
| 1255 | M88E1000_PHY_SPEC_CTRL, 0x0808); | 1254 | M88E1000_PHY_SPEC_CTRL, 0x0808); |
| @@ -1269,14 +1268,14 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter) | |||
| 1269 | E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ | 1268 | E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ |
| 1270 | E1000_CTRL_FD); /* Force Duplex to FULL */ | 1269 | E1000_CTRL_FD); /* Force Duplex to FULL */ |
| 1271 | 1270 | ||
| 1272 | if(adapter->hw.media_type == e1000_media_type_copper && | 1271 | if (adapter->hw.media_type == e1000_media_type_copper && |
| 1273 | adapter->hw.phy_type == e1000_phy_m88) { | 1272 | adapter->hw.phy_type == e1000_phy_m88) { |
| 1274 | ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ | 1273 | ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ |
| 1275 | } else { | 1274 | } else { |
| 1276 | /* Set the ILOS bit on the fiber Nic is half | 1275 | /* Set the ILOS bit on the fiber Nic is half |
| 1277 | * duplex link is detected. */ | 1276 | * duplex link is detected. */ |
| 1278 | stat_reg = E1000_READ_REG(&adapter->hw, STATUS); | 1277 | stat_reg = E1000_READ_REG(&adapter->hw, STATUS); |
| 1279 | if((stat_reg & E1000_STATUS_FD) == 0) | 1278 | if ((stat_reg & E1000_STATUS_FD) == 0) |
| 1280 | ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); | 1279 | ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); |
| 1281 | } | 1280 | } |
| 1282 | 1281 | ||
| @@ -1285,7 +1284,7 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter) | |||
| 1285 | /* Disable the receiver on the PHY so when a cable is plugged in, the | 1284 | /* Disable the receiver on the PHY so when a cable is plugged in, the |
| 1286 | * PHY does not begin to autoneg when a cable is reconnected to the NIC. | 1285 | * PHY does not begin to autoneg when a cable is reconnected to the NIC. |
| 1287 | */ | 1286 | */ |
| 1288 | if(adapter->hw.phy_type == e1000_phy_m88) | 1287 | if (adapter->hw.phy_type == e1000_phy_m88) |
| 1289 | e1000_phy_disable_receiver(adapter); | 1288 | e1000_phy_disable_receiver(adapter); |
| 1290 | 1289 | ||
| 1291 | udelay(500); | 1290 | udelay(500); |
| @@ -1301,14 +1300,14 @@ e1000_set_phy_loopback(struct e1000_adapter *adapter) | |||
| 1301 | 1300 | ||
| 1302 | switch (adapter->hw.mac_type) { | 1301 | switch (adapter->hw.mac_type) { |
| 1303 | case e1000_82543: | 1302 | case e1000_82543: |
| 1304 | if(adapter->hw.media_type == e1000_media_type_copper) { | 1303 | if (adapter->hw.media_type == e1000_media_type_copper) { |
| 1305 | /* Attempt to setup Loopback mode on Non-integrated PHY. | 1304 | /* Attempt to setup Loopback mode on Non-integrated PHY. |
| 1306 | * Some PHY registers get corrupted at random, so | 1305 | * Some PHY registers get corrupted at random, so |
| 1307 | * attempt this 10 times. | 1306 | * attempt this 10 times. |
| 1308 | */ | 1307 | */ |
| 1309 | while(e1000_nonintegrated_phy_loopback(adapter) && | 1308 | while (e1000_nonintegrated_phy_loopback(adapter) && |
| 1310 | count++ < 10); | 1309 | count++ < 10); |
| 1311 | if(count < 11) | 1310 | if (count < 11) |
| 1312 | return 0; | 1311 | return 0; |
| 1313 | } | 1312 | } |
| 1314 | break; | 1313 | break; |
| @@ -1430,8 +1429,8 @@ static int | |||
| 1430 | e1000_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) | 1429 | e1000_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) |
| 1431 | { | 1430 | { |
| 1432 | frame_size &= ~1; | 1431 | frame_size &= ~1; |
| 1433 | if(*(skb->data + 3) == 0xFF) { | 1432 | if (*(skb->data + 3) == 0xFF) { |
| 1434 | if((*(skb->data + frame_size / 2 + 10) == 0xBE) && | 1433 | if ((*(skb->data + frame_size / 2 + 10) == 0xBE) && |
| 1435 | (*(skb->data + frame_size / 2 + 12) == 0xAF)) { | 1434 | (*(skb->data + frame_size / 2 + 12) == 0xAF)) { |
| 1436 | return 0; | 1435 | return 0; |
| 1437 | } | 1436 | } |
| @@ -1450,53 +1449,53 @@ e1000_run_loopback_test(struct e1000_adapter *adapter) | |||
| 1450 | 1449 | ||
| 1451 | E1000_WRITE_REG(&adapter->hw, RDT, rxdr->count - 1); | 1450 | E1000_WRITE_REG(&adapter->hw, RDT, rxdr->count - 1); |
| 1452 | 1451 | ||
| 1453 | /* Calculate the loop count based on the largest descriptor ring | 1452 | /* Calculate the loop count based on the largest descriptor ring |
| 1454 | * The idea is to wrap the largest ring a number of times using 64 | 1453 | * The idea is to wrap the largest ring a number of times using 64 |
| 1455 | * send/receive pairs during each loop | 1454 | * send/receive pairs during each loop |
| 1456 | */ | 1455 | */ |
| 1457 | 1456 | ||
| 1458 | if(rxdr->count <= txdr->count) | 1457 | if (rxdr->count <= txdr->count) |
| 1459 | lc = ((txdr->count / 64) * 2) + 1; | 1458 | lc = ((txdr->count / 64) * 2) + 1; |
| 1460 | else | 1459 | else |
| 1461 | lc = ((rxdr->count / 64) * 2) + 1; | 1460 | lc = ((rxdr->count / 64) * 2) + 1; |
| 1462 | 1461 | ||
| 1463 | k = l = 0; | 1462 | k = l = 0; |
| 1464 | for(j = 0; j <= lc; j++) { /* loop count loop */ | 1463 | for (j = 0; j <= lc; j++) { /* loop count loop */ |
| 1465 | for(i = 0; i < 64; i++) { /* send the packets */ | 1464 | for (i = 0; i < 64; i++) { /* send the packets */ |
| 1466 | e1000_create_lbtest_frame(txdr->buffer_info[i].skb, | 1465 | e1000_create_lbtest_frame(txdr->buffer_info[i].skb, |
| 1467 | 1024); | 1466 | 1024); |
| 1468 | pci_dma_sync_single_for_device(pdev, | 1467 | pci_dma_sync_single_for_device(pdev, |
| 1469 | txdr->buffer_info[k].dma, | 1468 | txdr->buffer_info[k].dma, |
| 1470 | txdr->buffer_info[k].length, | 1469 | txdr->buffer_info[k].length, |
| 1471 | PCI_DMA_TODEVICE); | 1470 | PCI_DMA_TODEVICE); |
| 1472 | if(unlikely(++k == txdr->count)) k = 0; | 1471 | if (unlikely(++k == txdr->count)) k = 0; |
| 1473 | } | 1472 | } |
| 1474 | E1000_WRITE_REG(&adapter->hw, TDT, k); | 1473 | E1000_WRITE_REG(&adapter->hw, TDT, k); |
| 1475 | msec_delay(200); | 1474 | msec_delay(200); |
| 1476 | time = jiffies; /* set the start time for the receive */ | 1475 | time = jiffies; /* set the start time for the receive */ |
| 1477 | good_cnt = 0; | 1476 | good_cnt = 0; |
| 1478 | do { /* receive the sent packets */ | 1477 | do { /* receive the sent packets */ |
| 1479 | pci_dma_sync_single_for_cpu(pdev, | 1478 | pci_dma_sync_single_for_cpu(pdev, |
| 1480 | rxdr->buffer_info[l].dma, | 1479 | rxdr->buffer_info[l].dma, |
| 1481 | rxdr->buffer_info[l].length, | 1480 | rxdr->buffer_info[l].length, |
| 1482 | PCI_DMA_FROMDEVICE); | 1481 | PCI_DMA_FROMDEVICE); |
| 1483 | 1482 | ||
| 1484 | ret_val = e1000_check_lbtest_frame( | 1483 | ret_val = e1000_check_lbtest_frame( |
| 1485 | rxdr->buffer_info[l].skb, | 1484 | rxdr->buffer_info[l].skb, |
| 1486 | 1024); | 1485 | 1024); |
| 1487 | if(!ret_val) | 1486 | if (!ret_val) |
| 1488 | good_cnt++; | 1487 | good_cnt++; |
| 1489 | if(unlikely(++l == rxdr->count)) l = 0; | 1488 | if (unlikely(++l == rxdr->count)) l = 0; |
| 1490 | /* time + 20 msecs (200 msecs on 2.4) is more than | 1489 | /* time + 20 msecs (200 msecs on 2.4) is more than |
| 1491 | * enough time to complete the receives, if it's | 1490 | * enough time to complete the receives, if it's |
| 1492 | * exceeded, break and error off | 1491 | * exceeded, break and error off |
| 1493 | */ | 1492 | */ |
| 1494 | } while (good_cnt < 64 && jiffies < (time + 20)); | 1493 | } while (good_cnt < 64 && jiffies < (time + 20)); |
| 1495 | if(good_cnt != 64) { | 1494 | if (good_cnt != 64) { |
| 1496 | ret_val = 13; /* ret_val is the same as mis-compare */ | 1495 | ret_val = 13; /* ret_val is the same as mis-compare */ |
| 1497 | break; | 1496 | break; |
| 1498 | } | 1497 | } |
| 1499 | if(jiffies >= (time + 2)) { | 1498 | if (jiffies >= (time + 2)) { |
| 1500 | ret_val = 14; /* error code for time out error */ | 1499 | ret_val = 14; /* error code for time out error */ |
| 1501 | break; | 1500 | break; |
| 1502 | } | 1501 | } |
| @@ -1549,17 +1548,17 @@ e1000_link_test(struct e1000_adapter *adapter, uint64_t *data) | |||
| 1549 | *data = 1; | 1548 | *data = 1; |
| 1550 | } else { | 1549 | } else { |
| 1551 | e1000_check_for_link(&adapter->hw); | 1550 | e1000_check_for_link(&adapter->hw); |
| 1552 | if(adapter->hw.autoneg) /* if auto_neg is set wait for it */ | 1551 | if (adapter->hw.autoneg) /* if auto_neg is set wait for it */ |
| 1553 | msec_delay(4000); | 1552 | msec_delay(4000); |
| 1554 | 1553 | ||
| 1555 | if(!(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) { | 1554 | if (!(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) { |
| 1556 | *data = 1; | 1555 | *data = 1; |
| 1557 | } | 1556 | } |
| 1558 | } | 1557 | } |
| 1559 | return *data; | 1558 | return *data; |
| 1560 | } | 1559 | } |
| 1561 | 1560 | ||
| 1562 | static int | 1561 | static int |
| 1563 | e1000_diag_test_count(struct net_device *netdev) | 1562 | e1000_diag_test_count(struct net_device *netdev) |
| 1564 | { | 1563 | { |
| 1565 | return E1000_TEST_LEN; | 1564 | return E1000_TEST_LEN; |
| @@ -1572,7 +1571,7 @@ e1000_diag_test(struct net_device *netdev, | |||
| 1572 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1571 | struct e1000_adapter *adapter = netdev_priv(netdev); |
| 1573 | boolean_t if_running = netif_running(netdev); | 1572 | boolean_t if_running = netif_running(netdev); |
| 1574 | 1573 | ||
| 1575 | if(eth_test->flags == ETH_TEST_FL_OFFLINE) { | 1574 | if (eth_test->flags == ETH_TEST_FL_OFFLINE) { |
| 1576 | /* Offline tests */ | 1575 | /* Offline tests */ |
| 1577 | 1576 | ||
| 1578 | /* save speed, duplex, autoneg settings */ | 1577 | /* save speed, duplex, autoneg settings */ |
| @@ -1582,27 +1581,27 @@ e1000_diag_test(struct net_device *netdev, | |||
| 1582 | 1581 | ||
| 1583 | /* Link test performed before hardware reset so autoneg doesn't | 1582 | /* Link test performed before hardware reset so autoneg doesn't |
| 1584 | * interfere with test result */ | 1583 | * interfere with test result */ |
| 1585 | if(e1000_link_test(adapter, &data[4])) | 1584 | if (e1000_link_test(adapter, &data[4])) |
| 1586 | eth_test->flags |= ETH_TEST_FL_FAILED; | 1585 | eth_test->flags |= ETH_TEST_FL_FAILED; |
| 1587 | 1586 | ||
| 1588 | if(if_running) | 1587 | if (if_running) |
| 1589 | e1000_down(adapter); | 1588 | e1000_down(adapter); |
| 1590 | else | 1589 | else |
| 1591 | e1000_reset(adapter); | 1590 | e1000_reset(adapter); |
| 1592 | 1591 | ||
| 1593 | if(e1000_reg_test(adapter, &data[0])) | 1592 | if (e1000_reg_test(adapter, &data[0])) |
| 1594 | eth_test->flags |= ETH_TEST_FL_FAILED; | 1593 | eth_test->flags |= ETH_TEST_FL_FAILED; |
| 1595 | 1594 | ||
| 1596 | e1000_reset(adapter); | 1595 | e1000_reset(adapter); |
| 1597 | if(e1000_eeprom_test(adapter, &data[1])) | 1596 | if (e1000_eeprom_test(adapter, &data[1])) |
| 1598 | eth_test->flags |= ETH_TEST_FL_FAILED; | 1597 | eth_test->flags |= ETH_TEST_FL_FAILED; |
| 1599 | 1598 | ||
| 1600 | e1000_reset(adapter); | 1599 | e1000_reset(adapter); |
| 1601 | if(e1000_intr_test(adapter, &data[2])) | 1600 | if (e1000_intr_test(adapter, &data[2])) |
| 1602 | eth_test->flags |= ETH_TEST_FL_FAILED; | 1601 | eth_test->flags |= ETH_TEST_FL_FAILED; |
| 1603 | 1602 | ||
| 1604 | e1000_reset(adapter); | 1603 | e1000_reset(adapter); |
| 1605 | if(e1000_loopback_test(adapter, &data[3])) | 1604 | if (e1000_loopback_test(adapter, &data[3])) |
| 1606 | eth_test->flags |= ETH_TEST_FL_FAILED; | 1605 | eth_test->flags |= ETH_TEST_FL_FAILED; |
| 1607 | 1606 | ||
| 1608 | /* restore speed, duplex, autoneg settings */ | 1607 | /* restore speed, duplex, autoneg settings */ |
| @@ -1611,11 +1610,11 @@ e1000_diag_test(struct net_device *netdev, | |||
| 1611 | adapter->hw.autoneg = autoneg; | 1610 | adapter->hw.autoneg = autoneg; |
| 1612 | 1611 | ||
| 1613 | e1000_reset(adapter); | 1612 | e1000_reset(adapter); |
| 1614 | if(if_running) | 1613 | if (if_running) |
| 1615 | e1000_up(adapter); | 1614 | e1000_up(adapter); |
| 1616 | } else { | 1615 | } else { |
| 1617 | /* Online tests */ | 1616 | /* Online tests */ |
| 1618 | if(e1000_link_test(adapter, &data[4])) | 1617 | if (e1000_link_test(adapter, &data[4])) |
| 1619 | eth_test->flags |= ETH_TEST_FL_FAILED; | 1618 | eth_test->flags |= ETH_TEST_FL_FAILED; |
| 1620 | 1619 | ||
| 1621 | /* Offline tests aren't run; pass by default */ | 1620 | /* Offline tests aren't run; pass by default */ |
| @@ -1633,7 +1632,7 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) | |||
| 1633 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1632 | struct e1000_adapter *adapter = netdev_priv(netdev); |
| 1634 | struct e1000_hw *hw = &adapter->hw; | 1633 | struct e1000_hw *hw = &adapter->hw; |
| 1635 | 1634 | ||
| 1636 | switch(adapter->hw.device_id) { | 1635 | switch (adapter->hw.device_id) { |
| 1637 | case E1000_DEV_ID_82542: | 1636 | case E1000_DEV_ID_82542: |
| 1638 | case E1000_DEV_ID_82543GC_FIBER: | 1637 | case E1000_DEV_ID_82543GC_FIBER: |
| 1639 | case E1000_DEV_ID_82543GC_COPPER: | 1638 | case E1000_DEV_ID_82543GC_COPPER: |
| @@ -1649,7 +1648,7 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) | |||
| 1649 | case E1000_DEV_ID_82546GB_FIBER: | 1648 | case E1000_DEV_ID_82546GB_FIBER: |
| 1650 | case E1000_DEV_ID_82571EB_FIBER: | 1649 | case E1000_DEV_ID_82571EB_FIBER: |
| 1651 | /* Wake events only supported on port A for dual fiber */ | 1650 | /* Wake events only supported on port A for dual fiber */ |
| 1652 | if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) { | 1651 | if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) { |
| 1653 | wol->supported = 0; | 1652 | wol->supported = 0; |
| 1654 | wol->wolopts = 0; | 1653 | wol->wolopts = 0; |
| 1655 | return; | 1654 | return; |
| @@ -1661,13 +1660,13 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) | |||
| 1661 | WAKE_BCAST | WAKE_MAGIC; | 1660 | WAKE_BCAST | WAKE_MAGIC; |
| 1662 | 1661 | ||
| 1663 | wol->wolopts = 0; | 1662 | wol->wolopts = 0; |
| 1664 | if(adapter->wol & E1000_WUFC_EX) | 1663 | if (adapter->wol & E1000_WUFC_EX) |
| 1665 | wol->wolopts |= WAKE_UCAST; | 1664 | wol->wolopts |= WAKE_UCAST; |
| 1666 | if(adapter->wol & E1000_WUFC_MC) | 1665 | if (adapter->wol & E1000_WUFC_MC) |
| 1667 | wol->wolopts |= WAKE_MCAST; | 1666 | wol->wolopts |= WAKE_MCAST; |
| 1668 | if(adapter->wol & E1000_WUFC_BC) | 1667 | if (adapter->wol & E1000_WUFC_BC) |
| 1669 | wol->wolopts |= WAKE_BCAST; | 1668 | wol->wolopts |= WAKE_BCAST; |
| 1670 | if(adapter->wol & E1000_WUFC_MAG) | 1669 | if (adapter->wol & E1000_WUFC_MAG) |
| 1671 | wol->wolopts |= WAKE_MAGIC; | 1670 | wol->wolopts |= WAKE_MAGIC; |
| 1672 | return; | 1671 | return; |
| 1673 | } | 1672 | } |
| @@ -1679,7 +1678,7 @@ e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) | |||
| 1679 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1678 | struct e1000_adapter *adapter = netdev_priv(netdev); |
| 1680 | struct e1000_hw *hw = &adapter->hw; | 1679 | struct e1000_hw *hw = &adapter->hw; |
| 1681 | 1680 | ||
| 1682 | switch(adapter->hw.device_id) { | 1681 | switch (adapter->hw.device_id) { |
| 1683 | case E1000_DEV_ID_82542: | 1682 | case E1000_DEV_ID_82542: |
| 1684 | case E1000_DEV_ID_82543GC_FIBER: | 1683 | case E1000_DEV_ID_82543GC_FIBER: |
| 1685 | case E1000_DEV_ID_82543GC_COPPER: | 1684 | case E1000_DEV_ID_82543GC_COPPER: |
| @@ -1693,23 +1692,23 @@ e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) | |||
| 1693 | case E1000_DEV_ID_82546GB_FIBER: | 1692 | case E1000_DEV_ID_82546GB_FIBER: |
| 1694 | case E1000_DEV_ID_82571EB_FIBER: | 1693 | case E1000_DEV_ID_82571EB_FIBER: |
| 1695 | /* Wake events only supported on port A for dual fiber */ | 1694 | /* Wake events only supported on port A for dual fiber */ |
| 1696 | if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) | 1695 | if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) |
| 1697 | return wol->wolopts ? -EOPNOTSUPP : 0; | 1696 | return wol->wolopts ? -EOPNOTSUPP : 0; |
| 1698 | /* Fall Through */ | 1697 | /* Fall Through */ |
| 1699 | 1698 | ||
| 1700 | default: | 1699 | default: |
| 1701 | if(wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) | 1700 | if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) |
| 1702 | return -EOPNOTSUPP; | 1701 | return -EOPNOTSUPP; |
| 1703 | 1702 | ||
| 1704 | adapter->wol = 0; | 1703 | adapter->wol = 0; |
| 1705 | 1704 | ||
| 1706 | if(wol->wolopts & WAKE_UCAST) | 1705 | if (wol->wolopts & WAKE_UCAST) |
| 1707 | adapter->wol |= E1000_WUFC_EX; | 1706 | adapter->wol |= E1000_WUFC_EX; |
| 1708 | if(wol->wolopts & WAKE_MCAST) | 1707 | if (wol->wolopts & WAKE_MCAST) |
| 1709 | adapter->wol |= E1000_WUFC_MC; | 1708 | adapter->wol |= E1000_WUFC_MC; |
| 1710 | if(wol->wolopts & WAKE_BCAST) | 1709 | if (wol->wolopts & WAKE_BCAST) |
| 1711 | adapter->wol |= E1000_WUFC_BC; | 1710 | adapter->wol |= E1000_WUFC_BC; |
| 1712 | if(wol->wolopts & WAKE_MAGIC) | 1711 | if (wol->wolopts & WAKE_MAGIC) |
| 1713 | adapter->wol |= E1000_WUFC_MAG; | 1712 | adapter->wol |= E1000_WUFC_MAG; |
| 1714 | } | 1713 | } |
| 1715 | 1714 | ||
| @@ -1727,7 +1726,7 @@ e1000_led_blink_callback(unsigned long data) | |||
| 1727 | { | 1726 | { |
| 1728 | struct e1000_adapter *adapter = (struct e1000_adapter *) data; | 1727 | struct e1000_adapter *adapter = (struct e1000_adapter *) data; |
| 1729 | 1728 | ||
| 1730 | if(test_and_change_bit(E1000_LED_ON, &adapter->led_status)) | 1729 | if (test_and_change_bit(E1000_LED_ON, &adapter->led_status)) |
| 1731 | e1000_led_off(&adapter->hw); | 1730 | e1000_led_off(&adapter->hw); |
| 1732 | else | 1731 | else |
| 1733 | e1000_led_on(&adapter->hw); | 1732 | e1000_led_on(&adapter->hw); |
| @@ -1740,11 +1739,11 @@ e1000_phys_id(struct net_device *netdev, uint32_t data) | |||
| 1740 | { | 1739 | { |
| 1741 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1740 | struct e1000_adapter *adapter = netdev_priv(netdev); |
| 1742 | 1741 | ||
| 1743 | if(!data || data > (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ)) | 1742 | if (!data || data > (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ)) |
| 1744 | data = (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ); | 1743 | data = (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ); |
| 1745 | 1744 | ||
| 1746 | if(adapter->hw.mac_type < e1000_82571) { | 1745 | if (adapter->hw.mac_type < e1000_82571) { |
| 1747 | if(!adapter->blink_timer.function) { | 1746 | if (!adapter->blink_timer.function) { |
| 1748 | init_timer(&adapter->blink_timer); | 1747 | init_timer(&adapter->blink_timer); |
| 1749 | adapter->blink_timer.function = e1000_led_blink_callback; | 1748 | adapter->blink_timer.function = e1000_led_blink_callback; |
| 1750 | adapter->blink_timer.data = (unsigned long) adapter; | 1749 | adapter->blink_timer.data = (unsigned long) adapter; |
| @@ -1782,21 +1781,21 @@ static int | |||
| 1782 | e1000_nway_reset(struct net_device *netdev) | 1781 | e1000_nway_reset(struct net_device *netdev) |
| 1783 | { | 1782 | { |
| 1784 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1783 | struct e1000_adapter *adapter = netdev_priv(netdev); |
| 1785 | if(netif_running(netdev)) { | 1784 | if (netif_running(netdev)) { |
| 1786 | e1000_down(adapter); | 1785 | e1000_down(adapter); |
| 1787 | e1000_up(adapter); | 1786 | e1000_up(adapter); |
| 1788 | } | 1787 | } |
| 1789 | return 0; | 1788 | return 0; |
| 1790 | } | 1789 | } |
| 1791 | 1790 | ||
| 1792 | static int | 1791 | static int |
| 1793 | e1000_get_stats_count(struct net_device *netdev) | 1792 | e1000_get_stats_count(struct net_device *netdev) |
| 1794 | { | 1793 | { |
| 1795 | return E1000_STATS_LEN; | 1794 | return E1000_STATS_LEN; |
| 1796 | } | 1795 | } |
| 1797 | 1796 | ||
| 1798 | static void | 1797 | static void |
| 1799 | e1000_get_ethtool_stats(struct net_device *netdev, | 1798 | e1000_get_ethtool_stats(struct net_device *netdev, |
| 1800 | struct ethtool_stats *stats, uint64_t *data) | 1799 | struct ethtool_stats *stats, uint64_t *data) |
| 1801 | { | 1800 | { |
| 1802 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1801 | struct e1000_adapter *adapter = netdev_priv(netdev); |
| @@ -1830,7 +1829,7 @@ e1000_get_ethtool_stats(struct net_device *netdev, | |||
| 1830 | /* BUG_ON(i != E1000_STATS_LEN); */ | 1829 | /* BUG_ON(i != E1000_STATS_LEN); */ |
| 1831 | } | 1830 | } |
| 1832 | 1831 | ||
| 1833 | static void | 1832 | static void |
| 1834 | e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data) | 1833 | e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data) |
| 1835 | { | 1834 | { |
| 1836 | #ifdef CONFIG_E1000_MQ | 1835 | #ifdef CONFIG_E1000_MQ |
| @@ -1839,9 +1838,9 @@ e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data) | |||
| 1839 | uint8_t *p = data; | 1838 | uint8_t *p = data; |
| 1840 | int i; | 1839 | int i; |
| 1841 | 1840 | ||
| 1842 | switch(stringset) { | 1841 | switch (stringset) { |
| 1843 | case ETH_SS_TEST: | 1842 | case ETH_SS_TEST: |
| 1844 | memcpy(data, *e1000_gstrings_test, | 1843 | memcpy(data, *e1000_gstrings_test, |
| 1845 | E1000_TEST_LEN*ETH_GSTRING_LEN); | 1844 | E1000_TEST_LEN*ETH_GSTRING_LEN); |
| 1846 | break; | 1845 | break; |
| 1847 | case ETH_SS_STATS: | 1846 | case ETH_SS_STATS: |
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c index 2437d362ff63..beeec0fbbeac 100644 --- a/drivers/net/e1000/e1000_hw.c +++ b/drivers/net/e1000/e1000_hw.c | |||
| @@ -1600,10 +1600,10 @@ e1000_phy_setup_autoneg(struct e1000_hw *hw) | |||
| 1600 | if(ret_val) | 1600 | if(ret_val) |
| 1601 | return ret_val; | 1601 | return ret_val; |
| 1602 | 1602 | ||
| 1603 | /* Read the MII 1000Base-T Control Register (Address 9). */ | 1603 | /* Read the MII 1000Base-T Control Register (Address 9). */ |
| 1604 | ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); | 1604 | ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); |
| 1605 | if(ret_val) | 1605 | if(ret_val) |
| 1606 | return ret_val; | 1606 | return ret_val; |
| 1607 | 1607 | ||
| 1608 | /* Need to parse both autoneg_advertised and fc and set up | 1608 | /* Need to parse both autoneg_advertised and fc and set up |
| 1609 | * the appropriate PHY registers. First we will parse for | 1609 | * the appropriate PHY registers. First we will parse for |
| @@ -3916,7 +3916,7 @@ e1000_read_eeprom(struct e1000_hw *hw, | |||
| 3916 | } | 3916 | } |
| 3917 | } | 3917 | } |
| 3918 | 3918 | ||
| 3919 | if(eeprom->use_eerd == TRUE) { | 3919 | if (eeprom->use_eerd == TRUE) { |
| 3920 | ret_val = e1000_read_eeprom_eerd(hw, offset, words, data); | 3920 | ret_val = e1000_read_eeprom_eerd(hw, offset, words, data); |
| 3921 | if ((e1000_is_onboard_nvm_eeprom(hw) == TRUE) || | 3921 | if ((e1000_is_onboard_nvm_eeprom(hw) == TRUE) || |
| 3922 | (hw->mac_type != e1000_82573)) | 3922 | (hw->mac_type != e1000_82573)) |
| @@ -4423,7 +4423,7 @@ e1000_commit_shadow_ram(struct e1000_hw *hw) | |||
| 4423 | return -E1000_ERR_EEPROM; | 4423 | return -E1000_ERR_EEPROM; |
| 4424 | } | 4424 | } |
| 4425 | 4425 | ||
| 4426 | /* If STM opcode located in bits 15:8 of flop, reset firmware */ | 4426 | /* If STM opcode located in bits 15:8 of flop, reset firmware */ |
| 4427 | if ((flop & 0xFF00) == E1000_STM_OPCODE) { | 4427 | if ((flop & 0xFF00) == E1000_STM_OPCODE) { |
| 4428 | E1000_WRITE_REG(hw, HICR, E1000_HICR_FW_RESET); | 4428 | E1000_WRITE_REG(hw, HICR, E1000_HICR_FW_RESET); |
| 4429 | } | 4429 | } |
| @@ -4431,7 +4431,7 @@ e1000_commit_shadow_ram(struct e1000_hw *hw) | |||
| 4431 | /* Perform the flash update */ | 4431 | /* Perform the flash update */ |
| 4432 | E1000_WRITE_REG(hw, EECD, eecd | E1000_EECD_FLUPD); | 4432 | E1000_WRITE_REG(hw, EECD, eecd | E1000_EECD_FLUPD); |
| 4433 | 4433 | ||
| 4434 | for (i=0; i < attempts; i++) { | 4434 | for (i=0; i < attempts; i++) { |
| 4435 | eecd = E1000_READ_REG(hw, EECD); | 4435 | eecd = E1000_READ_REG(hw, EECD); |
| 4436 | if ((eecd & E1000_EECD_FLUPD) == 0) { | 4436 | if ((eecd & E1000_EECD_FLUPD) == 0) { |
| 4437 | break; | 4437 | break; |
| @@ -4504,6 +4504,7 @@ e1000_read_mac_addr(struct e1000_hw * hw) | |||
| 4504 | hw->perm_mac_addr[i] = (uint8_t) (eeprom_data & 0x00FF); | 4504 | hw->perm_mac_addr[i] = (uint8_t) (eeprom_data & 0x00FF); |
| 4505 | hw->perm_mac_addr[i+1] = (uint8_t) (eeprom_data >> 8); | 4505 | hw->perm_mac_addr[i+1] = (uint8_t) (eeprom_data >> 8); |
| 4506 | } | 4506 | } |
| 4507 | |||
| 4507 | switch (hw->mac_type) { | 4508 | switch (hw->mac_type) { |
| 4508 | default: | 4509 | default: |
| 4509 | break; | 4510 | break; |
| @@ -6840,7 +6841,8 @@ int32_t | |||
| 6840 | e1000_check_phy_reset_block(struct e1000_hw *hw) | 6841 | e1000_check_phy_reset_block(struct e1000_hw *hw) |
| 6841 | { | 6842 | { |
| 6842 | uint32_t manc = 0; | 6843 | uint32_t manc = 0; |
| 6843 | if(hw->mac_type > e1000_82547_rev_2) | 6844 | |
| 6845 | if (hw->mac_type > e1000_82547_rev_2) | ||
| 6844 | manc = E1000_READ_REG(hw, MANC); | 6846 | manc = E1000_READ_REG(hw, MANC); |
| 6845 | return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? | 6847 | return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? |
| 6846 | E1000_BLK_PHY_RESET : E1000_SUCCESS; | 6848 | E1000_BLK_PHY_RESET : E1000_SUCCESS; |
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h index 0b8f6f2b774b..f1219dd9dbac 100644 --- a/drivers/net/e1000/e1000_hw.h +++ b/drivers/net/e1000/e1000_hw.h | |||
| @@ -377,6 +377,7 @@ int32_t e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask); | |||
| 377 | void e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask); | 377 | void e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask); |
| 378 | 378 | ||
| 379 | /* Filters (multicast, vlan, receive) */ | 379 | /* Filters (multicast, vlan, receive) */ |
| 380 | void e1000_mc_addr_list_update(struct e1000_hw *hw, uint8_t * mc_addr_list, uint32_t mc_addr_count, uint32_t pad, uint32_t rar_used_count); | ||
| 380 | uint32_t e1000_hash_mc_addr(struct e1000_hw *hw, uint8_t * mc_addr); | 381 | uint32_t e1000_hash_mc_addr(struct e1000_hw *hw, uint8_t * mc_addr); |
| 381 | void e1000_mta_set(struct e1000_hw *hw, uint32_t hash_value); | 382 | void e1000_mta_set(struct e1000_hw *hw, uint32_t hash_value); |
| 382 | void e1000_rar_set(struct e1000_hw *hw, uint8_t * mc_addr, uint32_t rar_index); | 383 | void e1000_rar_set(struct e1000_hw *hw, uint8_t * mc_addr, uint32_t rar_index); |
| @@ -401,7 +402,9 @@ void e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value); | |||
| 401 | void e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value); | 402 | void e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value); |
| 402 | /* Port I/O is only supported on 82544 and newer */ | 403 | /* Port I/O is only supported on 82544 and newer */ |
| 403 | uint32_t e1000_io_read(struct e1000_hw *hw, unsigned long port); | 404 | uint32_t e1000_io_read(struct e1000_hw *hw, unsigned long port); |
| 405 | uint32_t e1000_read_reg_io(struct e1000_hw *hw, uint32_t offset); | ||
| 404 | void e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value); | 406 | void e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value); |
| 407 | void e1000_enable_pciex_master(struct e1000_hw *hw); | ||
| 405 | int32_t e1000_disable_pciex_master(struct e1000_hw *hw); | 408 | int32_t e1000_disable_pciex_master(struct e1000_hw *hw); |
| 406 | int32_t e1000_get_software_semaphore(struct e1000_hw *hw); | 409 | int32_t e1000_get_software_semaphore(struct e1000_hw *hw); |
| 407 | void e1000_release_software_semaphore(struct e1000_hw *hw); | 410 | void e1000_release_software_semaphore(struct e1000_hw *hw); |
| @@ -899,14 +902,14 @@ struct e1000_ffvt_entry { | |||
| 899 | #define E1000_TXDCTL 0x03828 /* TX Descriptor Control - RW */ | 902 | #define E1000_TXDCTL 0x03828 /* TX Descriptor Control - RW */ |
| 900 | #define E1000_TADV 0x0382C /* TX Interrupt Absolute Delay Val - RW */ | 903 | #define E1000_TADV 0x0382C /* TX Interrupt Absolute Delay Val - RW */ |
| 901 | #define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */ | 904 | #define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */ |
| 902 | #define E1000_TARC0 0x03840 /* TX Arbitration Count (0) */ | 905 | #define E1000_TARC0 0x03840 /* TX Arbitration Count (0) */ |
| 903 | #define E1000_TDBAL1 0x03900 /* TX Desc Base Address Low (1) - RW */ | 906 | #define E1000_TDBAL1 0x03900 /* TX Desc Base Address Low (1) - RW */ |
| 904 | #define E1000_TDBAH1 0x03904 /* TX Desc Base Address High (1) - RW */ | 907 | #define E1000_TDBAH1 0x03904 /* TX Desc Base Address High (1) - RW */ |
| 905 | #define E1000_TDLEN1 0x03908 /* TX Desc Length (1) - RW */ | 908 | #define E1000_TDLEN1 0x03908 /* TX Desc Length (1) - RW */ |
| 906 | #define E1000_TDH1 0x03910 /* TX Desc Head (1) - RW */ | 909 | #define E1000_TDH1 0x03910 /* TX Desc Head (1) - RW */ |
| 907 | #define E1000_TDT1 0x03918 /* TX Desc Tail (1) - RW */ | 910 | #define E1000_TDT1 0x03918 /* TX Desc Tail (1) - RW */ |
| 908 | #define E1000_TXDCTL1 0x03928 /* TX Descriptor Control (1) - RW */ | 911 | #define E1000_TXDCTL1 0x03928 /* TX Descriptor Control (1) - RW */ |
| 909 | #define E1000_TARC1 0x03940 /* TX Arbitration Count (1) */ | 912 | #define E1000_TARC1 0x03940 /* TX Arbitration Count (1) */ |
| 910 | #define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ | 913 | #define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ |
| 911 | #define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ | 914 | #define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ |
| 912 | #define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ | 915 | #define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ |
| @@ -1761,7 +1764,6 @@ struct e1000_hw { | |||
| 1761 | #define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ | 1764 | #define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ |
| 1762 | #define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc. | 1765 | #define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc. |
| 1763 | still to be processed. */ | 1766 | still to be processed. */ |
| 1764 | |||
| 1765 | /* Transmit Configuration Word */ | 1767 | /* Transmit Configuration Word */ |
| 1766 | #define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ | 1768 | #define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ |
| 1767 | #define E1000_TXCW_HD 0x00000040 /* TXCW half duplex */ | 1769 | #define E1000_TXCW_HD 0x00000040 /* TXCW half duplex */ |
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index d0a5d1656c5f..31e332935e5a 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
| @@ -29,11 +29,71 @@ | |||
| 29 | #include "e1000.h" | 29 | #include "e1000.h" |
| 30 | 30 | ||
| 31 | /* Change Log | 31 | /* Change Log |
| 32 | * 6.0.58 4/20/05 | 32 | * 6.3.9 12/16/2005 |
| 33 | * o Accepted ethtool cleanup patch from Stephen Hemminger | 33 | * o incorporate fix for recycled skbs from IBM LTC |
| 34 | * 6.0.44+ 2/15/05 | 34 | * 6.3.7 11/18/2005 |
| 35 | * o applied Anton's patch to resolve tx hang in hardware | 35 | * o Honor eeprom setting for enabling/disabling Wake On Lan |
| 36 | * o Applied Andrew Mortons patch - e1000 stops working after resume | 36 | * 6.3.5 11/17/2005 |
| 37 | * o Fix memory leak in rx ring handling for PCI Express adapters | ||
| 38 | * 6.3.4 11/8/05 | ||
| 39 | * o Patch from Jesper Juhl to remove redundant NULL checks for kfree | ||
| 40 | * 6.3.2 9/20/05 | ||
| 41 | * o Render logic that sets/resets DRV_LOAD as inline functions to | ||
| 42 | * avoid code replication. If f/w is AMT then set DRV_LOAD only when | ||
| 43 | * network interface is open. | ||
| 44 | * o Handle DRV_LOAD set/reset in cases where AMT uses VLANs. | ||
| 45 | * o Adjust PBA partioning for Jumbo frames using MTU size and not | ||
| 46 | * rx_buffer_len | ||
| 47 | * 6.3.1 9/19/05 | ||
| 48 | * o Use adapter->tx_timeout_factor in Tx Hung Detect logic | ||
| 49 | (e1000_clean_tx_irq) | ||
| 50 | * o Support for 8086:10B5 device (Quad Port) | ||
| 51 | * 6.2.14 9/15/05 | ||
| 52 | * o In AMT enabled configurations, set/reset DRV_LOAD bit on interface | ||
| 53 | * open/close | ||
| 54 | * 6.2.13 9/14/05 | ||
| 55 | * o Invoke e1000_check_mng_mode only for 8257x controllers since it | ||
| 56 | * accesses the FWSM that is not supported in other controllers | ||
| 57 | * 6.2.12 9/9/05 | ||
| 58 | * o Add support for device id E1000_DEV_ID_82546GB_QUAD_COPPER | ||
| 59 | * o set RCTL:SECRC only for controllers newer than 82543. | ||
| 60 | * o When the n/w interface comes down reset DRV_LOAD bit to notify f/w. | ||
| 61 | * This code was moved from e1000_remove to e1000_close | ||
| 62 | * 6.2.10 9/6/05 | ||
| 63 | * o Fix error in updating RDT in el1000_alloc_rx_buffers[_ps] -- one off. | ||
| 64 | * o Enable fc by default on 82573 controllers (do not read eeprom) | ||
| 65 | * o Fix rx_errors statistic not to include missed_packet_count | ||
| 66 | * o Fix rx_dropped statistic not to include missed_packet_count | ||
| 67 | (Padraig Brady) | ||
| 68 | * 6.2.9 8/30/05 | ||
| 69 | * o Remove call to update statistics from the controller ib e1000_get_stats | ||
| 70 | * 6.2.8 8/30/05 | ||
| 71 | * o Improved algorithm for rx buffer allocation/rdt update | ||
| 72 | * o Flow control watermarks relative to rx PBA size | ||
| 73 | * o Simplified 'Tx Hung' detect logic | ||
| 74 | * 6.2.7 8/17/05 | ||
| 75 | * o Report rx buffer allocation failures and tx timeout counts in stats | ||
| 76 | * 6.2.6 8/16/05 | ||
| 77 | * o Implement workaround for controller erratum -- linear non-tso packet | ||
| 78 | * following a TSO gets written back prematurely | ||
| 79 | * 6.2.5 8/15/05 | ||
| 80 | * o Set netdev->tx_queue_len based on link speed/duplex settings. | ||
| 81 | * o Fix net_stats.rx_fifo_errors <p@draigBrady.com> | ||
| 82 | * o Do not power off PHY if SoL/IDER session is active | ||
| 83 | * 6.2.4 8/10/05 | ||
| 84 | * o Fix loopback test setup/cleanup for 82571/3 controllers | ||
| 85 | * o Fix parsing of outgoing packets (e1000_transfer_dhcp_info) to treat | ||
| 86 | * all packets as raw | ||
| 87 | * o Prevent operations that will cause the PHY to be reset if SoL/IDER | ||
| 88 | * sessions are active and log a message | ||
| 89 | * 6.2.2 7/21/05 | ||
| 90 | * o used fixed size descriptors for all MTU sizes, reduces memory load | ||
| 91 | * 6.1.2 4/13/05 | ||
| 92 | * o Fixed ethtool diagnostics | ||
| 93 | * o Enabled flow control to take default eeprom settings | ||
| 94 | * o Added stats_lock around e1000_read_phy_reg commands to avoid concurrent | ||
| 95 | * calls, one from mii_ioctl and other from within update_stats while | ||
| 96 | * processing MIIREG ioctl. | ||
| 37 | */ | 97 | */ |
| 38 | 98 | ||
| 39 | char e1000_driver_name[] = "e1000"; | 99 | char e1000_driver_name[] = "e1000"; |
| @@ -295,7 +355,7 @@ e1000_irq_disable(struct e1000_adapter *adapter) | |||
| 295 | static inline void | 355 | static inline void |
| 296 | e1000_irq_enable(struct e1000_adapter *adapter) | 356 | e1000_irq_enable(struct e1000_adapter *adapter) |
| 297 | { | 357 | { |
| 298 | if(likely(atomic_dec_and_test(&adapter->irq_sem))) { | 358 | if (likely(atomic_dec_and_test(&adapter->irq_sem))) { |
| 299 | E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK); | 359 | E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK); |
| 300 | E1000_WRITE_FLUSH(&adapter->hw); | 360 | E1000_WRITE_FLUSH(&adapter->hw); |
| 301 | } | 361 | } |
| @@ -307,17 +367,17 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter) | |||
| 307 | struct net_device *netdev = adapter->netdev; | 367 | struct net_device *netdev = adapter->netdev; |
| 308 | uint16_t vid = adapter->hw.mng_cookie.vlan_id; | 368 | uint16_t vid = adapter->hw.mng_cookie.vlan_id; |
| 309 | uint16_t old_vid = adapter->mng_vlan_id; | 369 | uint16_t old_vid = adapter->mng_vlan_id; |
| 310 | if(adapter->vlgrp) { | 370 | if (adapter->vlgrp) { |
| 311 | if(!adapter->vlgrp->vlan_devices[vid]) { | 371 | if (!adapter->vlgrp->vlan_devices[vid]) { |
| 312 | if(adapter->hw.mng_cookie.status & | 372 | if (adapter->hw.mng_cookie.status & |
| 313 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { | 373 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { |
| 314 | e1000_vlan_rx_add_vid(netdev, vid); | 374 | e1000_vlan_rx_add_vid(netdev, vid); |
| 315 | adapter->mng_vlan_id = vid; | 375 | adapter->mng_vlan_id = vid; |
| 316 | } else | 376 | } else |
| 317 | adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; | 377 | adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; |
| 318 | 378 | ||
| 319 | if((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) && | 379 | if ((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) && |
| 320 | (vid != old_vid) && | 380 | (vid != old_vid) && |
| 321 | !adapter->vlgrp->vlan_devices[old_vid]) | 381 | !adapter->vlgrp->vlan_devices[old_vid]) |
| 322 | e1000_vlan_rx_kill_vid(netdev, old_vid); | 382 | e1000_vlan_rx_kill_vid(netdev, old_vid); |
| 323 | } | 383 | } |
| @@ -401,10 +461,10 @@ e1000_up(struct e1000_adapter *adapter) | |||
| 401 | /* hardware has been reset, we need to reload some things */ | 461 | /* hardware has been reset, we need to reload some things */ |
| 402 | 462 | ||
| 403 | /* Reset the PHY if it was previously powered down */ | 463 | /* Reset the PHY if it was previously powered down */ |
| 404 | if(adapter->hw.media_type == e1000_media_type_copper) { | 464 | if (adapter->hw.media_type == e1000_media_type_copper) { |
| 405 | uint16_t mii_reg; | 465 | uint16_t mii_reg; |
| 406 | e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); | 466 | e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); |
| 407 | if(mii_reg & MII_CR_POWER_DOWN) | 467 | if (mii_reg & MII_CR_POWER_DOWN) |
| 408 | e1000_phy_reset(&adapter->hw); | 468 | e1000_phy_reset(&adapter->hw); |
| 409 | } | 469 | } |
| 410 | 470 | ||
| @@ -425,16 +485,16 @@ e1000_up(struct e1000_adapter *adapter) | |||
| 425 | } | 485 | } |
| 426 | 486 | ||
| 427 | #ifdef CONFIG_PCI_MSI | 487 | #ifdef CONFIG_PCI_MSI |
| 428 | if(adapter->hw.mac_type > e1000_82547_rev_2) { | 488 | if (adapter->hw.mac_type > e1000_82547_rev_2) { |
| 429 | adapter->have_msi = TRUE; | 489 | adapter->have_msi = TRUE; |
| 430 | if((err = pci_enable_msi(adapter->pdev))) { | 490 | if ((err = pci_enable_msi(adapter->pdev))) { |
| 431 | DPRINTK(PROBE, ERR, | 491 | DPRINTK(PROBE, ERR, |
| 432 | "Unable to allocate MSI interrupt Error: %d\n", err); | 492 | "Unable to allocate MSI interrupt Error: %d\n", err); |
| 433 | adapter->have_msi = FALSE; | 493 | adapter->have_msi = FALSE; |
| 434 | } | 494 | } |
| 435 | } | 495 | } |
| 436 | #endif | 496 | #endif |
| 437 | if((err = request_irq(adapter->pdev->irq, &e1000_intr, | 497 | if ((err = request_irq(adapter->pdev->irq, &e1000_intr, |
| 438 | SA_SHIRQ | SA_SAMPLE_RANDOM, | 498 | SA_SHIRQ | SA_SAMPLE_RANDOM, |
| 439 | netdev->name, netdev))) { | 499 | netdev->name, netdev))) { |
| 440 | DPRINTK(PROBE, ERR, | 500 | DPRINTK(PROBE, ERR, |
| @@ -471,7 +531,7 @@ e1000_down(struct e1000_adapter *adapter) | |||
| 471 | #endif | 531 | #endif |
| 472 | free_irq(adapter->pdev->irq, netdev); | 532 | free_irq(adapter->pdev->irq, netdev); |
| 473 | #ifdef CONFIG_PCI_MSI | 533 | #ifdef CONFIG_PCI_MSI |
| 474 | if(adapter->hw.mac_type > e1000_82547_rev_2 && | 534 | if (adapter->hw.mac_type > e1000_82547_rev_2 && |
| 475 | adapter->have_msi == TRUE) | 535 | adapter->have_msi == TRUE) |
| 476 | pci_disable_msi(adapter->pdev); | 536 | pci_disable_msi(adapter->pdev); |
| 477 | #endif | 537 | #endif |
| @@ -537,12 +597,12 @@ e1000_reset(struct e1000_adapter *adapter) | |||
| 537 | break; | 597 | break; |
| 538 | } | 598 | } |
| 539 | 599 | ||
| 540 | if((adapter->hw.mac_type != e1000_82573) && | 600 | if ((adapter->hw.mac_type != e1000_82573) && |
| 541 | (adapter->netdev->mtu > E1000_RXBUFFER_8192)) | 601 | (adapter->netdev->mtu > E1000_RXBUFFER_8192)) |
| 542 | pba -= 8; /* allocate more FIFO for Tx */ | 602 | pba -= 8; /* allocate more FIFO for Tx */ |
| 543 | 603 | ||
| 544 | 604 | ||
| 545 | if(adapter->hw.mac_type == e1000_82547) { | 605 | if (adapter->hw.mac_type == e1000_82547) { |
| 546 | adapter->tx_fifo_head = 0; | 606 | adapter->tx_fifo_head = 0; |
| 547 | adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; | 607 | adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; |
| 548 | adapter->tx_fifo_size = | 608 | adapter->tx_fifo_size = |
| @@ -565,9 +625,9 @@ e1000_reset(struct e1000_adapter *adapter) | |||
| 565 | 625 | ||
| 566 | /* Allow time for pending master requests to run */ | 626 | /* Allow time for pending master requests to run */ |
| 567 | e1000_reset_hw(&adapter->hw); | 627 | e1000_reset_hw(&adapter->hw); |
| 568 | if(adapter->hw.mac_type >= e1000_82544) | 628 | if (adapter->hw.mac_type >= e1000_82544) |
| 569 | E1000_WRITE_REG(&adapter->hw, WUC, 0); | 629 | E1000_WRITE_REG(&adapter->hw, WUC, 0); |
| 570 | if(e1000_init_hw(&adapter->hw)) | 630 | if (e1000_init_hw(&adapter->hw)) |
| 571 | DPRINTK(PROBE, ERR, "Hardware Error\n"); | 631 | DPRINTK(PROBE, ERR, "Hardware Error\n"); |
| 572 | e1000_update_mng_vlan(adapter); | 632 | e1000_update_mng_vlan(adapter); |
| 573 | /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ | 633 | /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ |
| @@ -606,26 +666,26 @@ e1000_probe(struct pci_dev *pdev, | |||
| 606 | int i, err, pci_using_dac; | 666 | int i, err, pci_using_dac; |
| 607 | uint16_t eeprom_data; | 667 | uint16_t eeprom_data; |
| 608 | uint16_t eeprom_apme_mask = E1000_EEPROM_APME; | 668 | uint16_t eeprom_apme_mask = E1000_EEPROM_APME; |
| 609 | if((err = pci_enable_device(pdev))) | 669 | if ((err = pci_enable_device(pdev))) |
| 610 | return err; | 670 | return err; |
| 611 | 671 | ||
| 612 | if(!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) { | 672 | if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) { |
| 613 | pci_using_dac = 1; | 673 | pci_using_dac = 1; |
| 614 | } else { | 674 | } else { |
| 615 | if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) { | 675 | if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) { |
| 616 | E1000_ERR("No usable DMA configuration, aborting\n"); | 676 | E1000_ERR("No usable DMA configuration, aborting\n"); |
| 617 | return err; | 677 | return err; |
| 618 | } | 678 | } |
| 619 | pci_using_dac = 0; | 679 | pci_using_dac = 0; |
| 620 | } | 680 | } |
| 621 | 681 | ||
| 622 | if((err = pci_request_regions(pdev, e1000_driver_name))) | 682 | if ((err = pci_request_regions(pdev, e1000_driver_name))) |
| 623 | return err; | 683 | return err; |
| 624 | 684 | ||
| 625 | pci_set_master(pdev); | 685 | pci_set_master(pdev); |
| 626 | 686 | ||
| 627 | netdev = alloc_etherdev(sizeof(struct e1000_adapter)); | 687 | netdev = alloc_etherdev(sizeof(struct e1000_adapter)); |
| 628 | if(!netdev) { | 688 | if (!netdev) { |
| 629 | err = -ENOMEM; | 689 | err = -ENOMEM; |
| 630 | goto err_alloc_etherdev; | 690 | goto err_alloc_etherdev; |
| 631 | } | 691 | } |
| @@ -644,15 +704,15 @@ e1000_probe(struct pci_dev *pdev, | |||
| 644 | mmio_len = pci_resource_len(pdev, BAR_0); | 704 | mmio_len = pci_resource_len(pdev, BAR_0); |
| 645 | 705 | ||
| 646 | adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); | 706 | adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); |
| 647 | if(!adapter->hw.hw_addr) { | 707 | if (!adapter->hw.hw_addr) { |
| 648 | err = -EIO; | 708 | err = -EIO; |
| 649 | goto err_ioremap; | 709 | goto err_ioremap; |
| 650 | } | 710 | } |
| 651 | 711 | ||
| 652 | for(i = BAR_1; i <= BAR_5; i++) { | 712 | for (i = BAR_1; i <= BAR_5; i++) { |
| 653 | if(pci_resource_len(pdev, i) == 0) | 713 | if (pci_resource_len(pdev, i) == 0) |
| 654 | continue; | 714 | continue; |
| 655 | if(pci_resource_flags(pdev, i) & IORESOURCE_IO) { | 715 | if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { |
| 656 | adapter->hw.io_base = pci_resource_start(pdev, i); | 716 | adapter->hw.io_base = pci_resource_start(pdev, i); |
| 657 | break; | 717 | break; |
| 658 | } | 718 | } |
| @@ -689,13 +749,13 @@ e1000_probe(struct pci_dev *pdev, | |||
| 689 | 749 | ||
| 690 | /* setup the private structure */ | 750 | /* setup the private structure */ |
| 691 | 751 | ||
| 692 | if((err = e1000_sw_init(adapter))) | 752 | if ((err = e1000_sw_init(adapter))) |
| 693 | goto err_sw_init; | 753 | goto err_sw_init; |
| 694 | 754 | ||
| 695 | if((err = e1000_check_phy_reset_block(&adapter->hw))) | 755 | if ((err = e1000_check_phy_reset_block(&adapter->hw))) |
| 696 | DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n"); | 756 | DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n"); |
| 697 | 757 | ||
| 698 | if(adapter->hw.mac_type >= e1000_82543) { | 758 | if (adapter->hw.mac_type >= e1000_82543) { |
| 699 | netdev->features = NETIF_F_SG | | 759 | netdev->features = NETIF_F_SG | |
| 700 | NETIF_F_HW_CSUM | | 760 | NETIF_F_HW_CSUM | |
| 701 | NETIF_F_HW_VLAN_TX | | 761 | NETIF_F_HW_VLAN_TX | |
| @@ -704,16 +764,16 @@ e1000_probe(struct pci_dev *pdev, | |||
| 704 | } | 764 | } |
| 705 | 765 | ||
| 706 | #ifdef NETIF_F_TSO | 766 | #ifdef NETIF_F_TSO |
| 707 | if((adapter->hw.mac_type >= e1000_82544) && | 767 | if ((adapter->hw.mac_type >= e1000_82544) && |
| 708 | (adapter->hw.mac_type != e1000_82547)) | 768 | (adapter->hw.mac_type != e1000_82547)) |
| 709 | netdev->features |= NETIF_F_TSO; | 769 | netdev->features |= NETIF_F_TSO; |
| 710 | 770 | ||
| 711 | #ifdef NETIF_F_TSO_IPV6 | 771 | #ifdef NETIF_F_TSO_IPV6 |
| 712 | if(adapter->hw.mac_type > e1000_82547_rev_2) | 772 | if (adapter->hw.mac_type > e1000_82547_rev_2) |
| 713 | netdev->features |= NETIF_F_TSO_IPV6; | 773 | netdev->features |= NETIF_F_TSO_IPV6; |
| 714 | #endif | 774 | #endif |
| 715 | #endif | 775 | #endif |
| 716 | if(pci_using_dac) | 776 | if (pci_using_dac) |
| 717 | netdev->features |= NETIF_F_HIGHDMA; | 777 | netdev->features |= NETIF_F_HIGHDMA; |
| 718 | 778 | ||
| 719 | /* hard_start_xmit is safe against parallel locking */ | 779 | /* hard_start_xmit is safe against parallel locking */ |
| @@ -721,14 +781,14 @@ e1000_probe(struct pci_dev *pdev, | |||
| 721 | 781 | ||
| 722 | adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw); | 782 | adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw); |
| 723 | 783 | ||
| 724 | /* before reading the EEPROM, reset the controller to | 784 | /* before reading the EEPROM, reset the controller to |
| 725 | * put the device in a known good starting state */ | 785 | * put the device in a known good starting state */ |
| 726 | 786 | ||
| 727 | e1000_reset_hw(&adapter->hw); | 787 | e1000_reset_hw(&adapter->hw); |
| 728 | 788 | ||
| 729 | /* make sure the EEPROM is good */ | 789 | /* make sure the EEPROM is good */ |
| 730 | 790 | ||
| 731 | if(e1000_validate_eeprom_checksum(&adapter->hw) < 0) { | 791 | if (e1000_validate_eeprom_checksum(&adapter->hw) < 0) { |
| 732 | DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n"); | 792 | DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n"); |
| 733 | err = -EIO; | 793 | err = -EIO; |
| 734 | goto err_eeprom; | 794 | goto err_eeprom; |
| @@ -736,12 +796,12 @@ e1000_probe(struct pci_dev *pdev, | |||
| 736 | 796 | ||
| 737 | /* copy the MAC address out of the EEPROM */ | 797 | /* copy the MAC address out of the EEPROM */ |
| 738 | 798 | ||
| 739 | if(e1000_read_mac_addr(&adapter->hw)) | 799 | if (e1000_read_mac_addr(&adapter->hw)) |
| 740 | DPRINTK(PROBE, ERR, "EEPROM Read Error\n"); | 800 | DPRINTK(PROBE, ERR, "EEPROM Read Error\n"); |
| 741 | memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); | 801 | memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); |
| 742 | memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len); | 802 | memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len); |
| 743 | 803 | ||
| 744 | if(!is_valid_ether_addr(netdev->perm_addr)) { | 804 | if (!is_valid_ether_addr(netdev->perm_addr)) { |
| 745 | DPRINTK(PROBE, ERR, "Invalid MAC Address\n"); | 805 | DPRINTK(PROBE, ERR, "Invalid MAC Address\n"); |
| 746 | err = -EIO; | 806 | err = -EIO; |
| 747 | goto err_eeprom; | 807 | goto err_eeprom; |
| @@ -781,7 +841,7 @@ e1000_probe(struct pci_dev *pdev, | |||
| 781 | * enable the ACPI Magic Packet filter | 841 | * enable the ACPI Magic Packet filter |
| 782 | */ | 842 | */ |
| 783 | 843 | ||
| 784 | switch(adapter->hw.mac_type) { | 844 | switch (adapter->hw.mac_type) { |
| 785 | case e1000_82542_rev2_0: | 845 | case e1000_82542_rev2_0: |
| 786 | case e1000_82542_rev2_1: | 846 | case e1000_82542_rev2_1: |
| 787 | case e1000_82543: | 847 | case e1000_82543: |
| @@ -794,7 +854,7 @@ e1000_probe(struct pci_dev *pdev, | |||
| 794 | case e1000_82546: | 854 | case e1000_82546: |
| 795 | case e1000_82546_rev_3: | 855 | case e1000_82546_rev_3: |
| 796 | case e1000_82571: | 856 | case e1000_82571: |
| 797 | if(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1){ | 857 | if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1){ |
| 798 | e1000_read_eeprom(&adapter->hw, | 858 | e1000_read_eeprom(&adapter->hw, |
| 799 | EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); | 859 | EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); |
| 800 | break; | 860 | break; |
| @@ -805,7 +865,7 @@ e1000_probe(struct pci_dev *pdev, | |||
| 805 | EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); | 865 | EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); |
| 806 | break; | 866 | break; |
| 807 | } | 867 | } |
| 808 | if(eeprom_data & eeprom_apme_mask) | 868 | if (eeprom_data & eeprom_apme_mask) |
| 809 | adapter->wol |= E1000_WUFC_MAG; | 869 | adapter->wol |= E1000_WUFC_MAG; |
| 810 | 870 | ||
| 811 | /* print bus type/speed/width info */ | 871 | /* print bus type/speed/width info */ |
| @@ -840,7 +900,7 @@ e1000_probe(struct pci_dev *pdev, | |||
| 840 | e1000_get_hw_control(adapter); | 900 | e1000_get_hw_control(adapter); |
| 841 | 901 | ||
| 842 | strcpy(netdev->name, "eth%d"); | 902 | strcpy(netdev->name, "eth%d"); |
| 843 | if((err = register_netdev(netdev))) | 903 | if ((err = register_netdev(netdev))) |
| 844 | goto err_register; | 904 | goto err_register; |
| 845 | 905 | ||
| 846 | DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n"); | 906 | DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n"); |
| @@ -881,10 +941,10 @@ e1000_remove(struct pci_dev *pdev) | |||
| 881 | 941 | ||
| 882 | flush_scheduled_work(); | 942 | flush_scheduled_work(); |
| 883 | 943 | ||
| 884 | if(adapter->hw.mac_type >= e1000_82540 && | 944 | if (adapter->hw.mac_type >= e1000_82540 && |
| 885 | adapter->hw.media_type == e1000_media_type_copper) { | 945 | adapter->hw.media_type == e1000_media_type_copper) { |
| 886 | manc = E1000_READ_REG(&adapter->hw, MANC); | 946 | manc = E1000_READ_REG(&adapter->hw, MANC); |
| 887 | if(manc & E1000_MANC_SMBUS_EN) { | 947 | if (manc & E1000_MANC_SMBUS_EN) { |
| 888 | manc |= E1000_MANC_ARP_EN; | 948 | manc |= E1000_MANC_ARP_EN; |
| 889 | E1000_WRITE_REG(&adapter->hw, MANC, manc); | 949 | E1000_WRITE_REG(&adapter->hw, MANC, manc); |
| 890 | } | 950 | } |
| @@ -900,7 +960,7 @@ e1000_remove(struct pci_dev *pdev) | |||
| 900 | __dev_put(&adapter->polling_netdev[i]); | 960 | __dev_put(&adapter->polling_netdev[i]); |
| 901 | #endif | 961 | #endif |
| 902 | 962 | ||
| 903 | if(!e1000_check_phy_reset_block(&adapter->hw)) | 963 | if (!e1000_check_phy_reset_block(&adapter->hw)) |
| 904 | e1000_phy_hw_reset(&adapter->hw); | 964 | e1000_phy_hw_reset(&adapter->hw); |
| 905 | 965 | ||
| 906 | kfree(adapter->tx_ring); | 966 | kfree(adapter->tx_ring); |
| @@ -959,19 +1019,19 @@ e1000_sw_init(struct e1000_adapter *adapter) | |||
| 959 | 1019 | ||
| 960 | /* identify the MAC */ | 1020 | /* identify the MAC */ |
| 961 | 1021 | ||
| 962 | if(e1000_set_mac_type(hw)) { | 1022 | if (e1000_set_mac_type(hw)) { |
| 963 | DPRINTK(PROBE, ERR, "Unknown MAC Type\n"); | 1023 | DPRINTK(PROBE, ERR, "Unknown MAC Type\n"); |
| 964 | return -EIO; | 1024 | return -EIO; |
| 965 | } | 1025 | } |
| 966 | 1026 | ||
| 967 | /* initialize eeprom parameters */ | 1027 | /* initialize eeprom parameters */ |
| 968 | 1028 | ||
| 969 | if(e1000_init_eeprom_params(hw)) { | 1029 | if (e1000_init_eeprom_params(hw)) { |
| 970 | E1000_ERR("EEPROM initialization failed\n"); | 1030 | E1000_ERR("EEPROM initialization failed\n"); |
| 971 | return -EIO; | 1031 | return -EIO; |
| 972 | } | 1032 | } |
| 973 | 1033 | ||
| 974 | switch(hw->mac_type) { | 1034 | switch (hw->mac_type) { |
| 975 | default: | 1035 | default: |
| 976 | break; | 1036 | break; |
| 977 | case e1000_82541: | 1037 | case e1000_82541: |
| @@ -990,7 +1050,7 @@ e1000_sw_init(struct e1000_adapter *adapter) | |||
| 990 | 1050 | ||
| 991 | /* Copper options */ | 1051 | /* Copper options */ |
| 992 | 1052 | ||
| 993 | if(hw->media_type == e1000_media_type_copper) { | 1053 | if (hw->media_type == e1000_media_type_copper) { |
| 994 | hw->mdix = AUTO_ALL_MODES; | 1054 | hw->mdix = AUTO_ALL_MODES; |
| 995 | hw->disable_polarity_correction = FALSE; | 1055 | hw->disable_polarity_correction = FALSE; |
| 996 | hw->master_slave = E1000_MASTER_SLAVE; | 1056 | hw->master_slave = E1000_MASTER_SLAVE; |
| @@ -1166,10 +1226,10 @@ e1000_open(struct net_device *netdev) | |||
| 1166 | if ((err = e1000_setup_all_rx_resources(adapter))) | 1226 | if ((err = e1000_setup_all_rx_resources(adapter))) |
| 1167 | goto err_setup_rx; | 1227 | goto err_setup_rx; |
| 1168 | 1228 | ||
| 1169 | if((err = e1000_up(adapter))) | 1229 | if ((err = e1000_up(adapter))) |
| 1170 | goto err_up; | 1230 | goto err_up; |
| 1171 | adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; | 1231 | adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; |
| 1172 | if((adapter->hw.mng_cookie.status & | 1232 | if ((adapter->hw.mng_cookie.status & |
| 1173 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { | 1233 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { |
| 1174 | e1000_update_mng_vlan(adapter); | 1234 | e1000_update_mng_vlan(adapter); |
| 1175 | } | 1235 | } |
| @@ -1214,7 +1274,7 @@ e1000_close(struct net_device *netdev) | |||
| 1214 | e1000_free_all_tx_resources(adapter); | 1274 | e1000_free_all_tx_resources(adapter); |
| 1215 | e1000_free_all_rx_resources(adapter); | 1275 | e1000_free_all_rx_resources(adapter); |
| 1216 | 1276 | ||
| 1217 | if((adapter->hw.mng_cookie.status & | 1277 | if ((adapter->hw.mng_cookie.status & |
| 1218 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { | 1278 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { |
| 1219 | e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); | 1279 | e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); |
| 1220 | } | 1280 | } |
| @@ -1269,7 +1329,7 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter, | |||
| 1269 | size = sizeof(struct e1000_buffer) * txdr->count; | 1329 | size = sizeof(struct e1000_buffer) * txdr->count; |
| 1270 | 1330 | ||
| 1271 | txdr->buffer_info = vmalloc_node(size, pcibus_to_node(pdev->bus)); | 1331 | txdr->buffer_info = vmalloc_node(size, pcibus_to_node(pdev->bus)); |
| 1272 | if(!txdr->buffer_info) { | 1332 | if (!txdr->buffer_info) { |
| 1273 | DPRINTK(PROBE, ERR, | 1333 | DPRINTK(PROBE, ERR, |
| 1274 | "Unable to allocate memory for the transmit descriptor ring\n"); | 1334 | "Unable to allocate memory for the transmit descriptor ring\n"); |
| 1275 | return -ENOMEM; | 1335 | return -ENOMEM; |
| @@ -1282,7 +1342,7 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter, | |||
| 1282 | E1000_ROUNDUP(txdr->size, 4096); | 1342 | E1000_ROUNDUP(txdr->size, 4096); |
| 1283 | 1343 | ||
| 1284 | txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); | 1344 | txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); |
| 1285 | if(!txdr->desc) { | 1345 | if (!txdr->desc) { |
| 1286 | setup_tx_desc_die: | 1346 | setup_tx_desc_die: |
| 1287 | vfree(txdr->buffer_info); | 1347 | vfree(txdr->buffer_info); |
| 1288 | DPRINTK(PROBE, ERR, | 1348 | DPRINTK(PROBE, ERR, |
| @@ -1298,8 +1358,8 @@ setup_tx_desc_die: | |||
| 1298 | "at %p\n", txdr->size, txdr->desc); | 1358 | "at %p\n", txdr->size, txdr->desc); |
| 1299 | /* Try again, without freeing the previous */ | 1359 | /* Try again, without freeing the previous */ |
| 1300 | txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); | 1360 | txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); |
| 1301 | if(!txdr->desc) { | ||
| 1302 | /* Failed allocation, critical failure */ | 1361 | /* Failed allocation, critical failure */ |
| 1362 | if (!txdr->desc) { | ||
| 1303 | pci_free_consistent(pdev, txdr->size, olddesc, olddma); | 1363 | pci_free_consistent(pdev, txdr->size, olddesc, olddma); |
| 1304 | goto setup_tx_desc_die; | 1364 | goto setup_tx_desc_die; |
| 1305 | } | 1365 | } |
| @@ -1499,7 +1559,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter, | |||
| 1499 | 1559 | ||
| 1500 | size = sizeof(struct e1000_ps_page) * rxdr->count; | 1560 | size = sizeof(struct e1000_ps_page) * rxdr->count; |
| 1501 | rxdr->ps_page = kmalloc(size, GFP_KERNEL); | 1561 | rxdr->ps_page = kmalloc(size, GFP_KERNEL); |
| 1502 | if(!rxdr->ps_page) { | 1562 | if (!rxdr->ps_page) { |
| 1503 | vfree(rxdr->buffer_info); | 1563 | vfree(rxdr->buffer_info); |
| 1504 | DPRINTK(PROBE, ERR, | 1564 | DPRINTK(PROBE, ERR, |
| 1505 | "Unable to allocate memory for the receive descriptor ring\n"); | 1565 | "Unable to allocate memory for the receive descriptor ring\n"); |
| @@ -1509,7 +1569,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter, | |||
| 1509 | 1569 | ||
| 1510 | size = sizeof(struct e1000_ps_page_dma) * rxdr->count; | 1570 | size = sizeof(struct e1000_ps_page_dma) * rxdr->count; |
| 1511 | rxdr->ps_page_dma = kmalloc(size, GFP_KERNEL); | 1571 | rxdr->ps_page_dma = kmalloc(size, GFP_KERNEL); |
| 1512 | if(!rxdr->ps_page_dma) { | 1572 | if (!rxdr->ps_page_dma) { |
| 1513 | vfree(rxdr->buffer_info); | 1573 | vfree(rxdr->buffer_info); |
| 1514 | kfree(rxdr->ps_page); | 1574 | kfree(rxdr->ps_page); |
| 1515 | DPRINTK(PROBE, ERR, | 1575 | DPRINTK(PROBE, ERR, |
| @@ -1518,7 +1578,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter, | |||
| 1518 | } | 1578 | } |
| 1519 | memset(rxdr->ps_page_dma, 0, size); | 1579 | memset(rxdr->ps_page_dma, 0, size); |
| 1520 | 1580 | ||
| 1521 | if(adapter->hw.mac_type <= e1000_82547_rev_2) | 1581 | if (adapter->hw.mac_type <= e1000_82547_rev_2) |
| 1522 | desc_len = sizeof(struct e1000_rx_desc); | 1582 | desc_len = sizeof(struct e1000_rx_desc); |
| 1523 | else | 1583 | else |
| 1524 | desc_len = sizeof(union e1000_rx_desc_packet_split); | 1584 | desc_len = sizeof(union e1000_rx_desc_packet_split); |
| @@ -1621,7 +1681,7 @@ e1000_setup_rctl(struct e1000_adapter *adapter) | |||
| 1621 | { | 1681 | { |
| 1622 | uint32_t rctl, rfctl; | 1682 | uint32_t rctl, rfctl; |
| 1623 | uint32_t psrctl = 0; | 1683 | uint32_t psrctl = 0; |
| 1624 | #ifdef CONFIG_E1000_PACKET_SPLIT | 1684 | #ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT |
| 1625 | uint32_t pages = 0; | 1685 | uint32_t pages = 0; |
| 1626 | #endif | 1686 | #endif |
| 1627 | 1687 | ||
| @@ -1647,32 +1707,17 @@ e1000_setup_rctl(struct e1000_adapter *adapter) | |||
| 1647 | rctl |= E1000_RCTL_LPE; | 1707 | rctl |= E1000_RCTL_LPE; |
| 1648 | 1708 | ||
| 1649 | /* Setup buffer sizes */ | 1709 | /* Setup buffer sizes */ |
| 1650 | if(adapter->hw.mac_type >= e1000_82571) { | 1710 | if (adapter->hw.mac_type >= e1000_82571) { |
| 1651 | /* We can now specify buffers in 1K increments. | 1711 | /* We can now specify buffers in 1K increments. |
| 1652 | * BSIZE and BSEX are ignored in this case. */ | 1712 | * BSIZE and BSEX are ignored in this case. */ |
| 1653 | rctl |= adapter->rx_buffer_len << 0x11; | 1713 | rctl |= adapter->rx_buffer_len << 0x11; |
| 1654 | } else { | 1714 | } else { |
| 1655 | rctl &= ~E1000_RCTL_SZ_4096; | 1715 | rctl &= ~E1000_RCTL_SZ_4096; |
| 1656 | rctl |= E1000_RCTL_BSEX; | 1716 | rctl &= ~E1000_RCTL_BSEX; |
| 1657 | switch (adapter->rx_buffer_len) { | 1717 | rctl |= E1000_RCTL_SZ_2048; |
| 1658 | case E1000_RXBUFFER_2048: | ||
| 1659 | default: | ||
| 1660 | rctl |= E1000_RCTL_SZ_2048; | ||
| 1661 | rctl &= ~E1000_RCTL_BSEX; | ||
| 1662 | break; | ||
| 1663 | case E1000_RXBUFFER_4096: | ||
| 1664 | rctl |= E1000_RCTL_SZ_4096; | ||
| 1665 | break; | ||
| 1666 | case E1000_RXBUFFER_8192: | ||
| 1667 | rctl |= E1000_RCTL_SZ_8192; | ||
| 1668 | break; | ||
| 1669 | case E1000_RXBUFFER_16384: | ||
| 1670 | rctl |= E1000_RCTL_SZ_16384; | ||
| 1671 | break; | ||
| 1672 | } | ||
| 1673 | } | 1718 | } |
| 1674 | 1719 | ||
| 1675 | #ifdef CONFIG_E1000_PACKET_SPLIT | 1720 | #ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT |
| 1676 | /* 82571 and greater support packet-split where the protocol | 1721 | /* 82571 and greater support packet-split where the protocol |
| 1677 | * header is placed in skb->data and the packet data is | 1722 | * header is placed in skb->data and the packet data is |
| 1678 | * placed in pages hanging off of skb_shinfo(skb)->nr_frags. | 1723 | * placed in pages hanging off of skb_shinfo(skb)->nr_frags. |
| @@ -1696,7 +1741,7 @@ e1000_setup_rctl(struct e1000_adapter *adapter) | |||
| 1696 | E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl); | 1741 | E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl); |
| 1697 | 1742 | ||
| 1698 | rctl |= E1000_RCTL_DTYP_PS | E1000_RCTL_SECRC; | 1743 | rctl |= E1000_RCTL_DTYP_PS | E1000_RCTL_SECRC; |
| 1699 | 1744 | ||
| 1700 | psrctl |= adapter->rx_ps_bsize0 >> | 1745 | psrctl |= adapter->rx_ps_bsize0 >> |
| 1701 | E1000_PSRCTL_BSIZE0_SHIFT; | 1746 | E1000_PSRCTL_BSIZE0_SHIFT; |
| 1702 | 1747 | ||
| @@ -1758,7 +1803,7 @@ e1000_configure_rx(struct e1000_adapter *adapter) | |||
| 1758 | 1803 | ||
| 1759 | if (hw->mac_type >= e1000_82540) { | 1804 | if (hw->mac_type >= e1000_82540) { |
| 1760 | E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay); | 1805 | E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay); |
| 1761 | if(adapter->itr > 1) | 1806 | if (adapter->itr > 1) |
| 1762 | E1000_WRITE_REG(hw, ITR, | 1807 | E1000_WRITE_REG(hw, ITR, |
| 1763 | 1000000000 / (adapter->itr * 256)); | 1808 | 1000000000 / (adapter->itr * 256)); |
| 1764 | } | 1809 | } |
| @@ -1847,13 +1892,13 @@ e1000_configure_rx(struct e1000_adapter *adapter) | |||
| 1847 | /* Enable 82543 Receive Checksum Offload for TCP and UDP */ | 1892 | /* Enable 82543 Receive Checksum Offload for TCP and UDP */ |
| 1848 | if (hw->mac_type >= e1000_82543) { | 1893 | if (hw->mac_type >= e1000_82543) { |
| 1849 | rxcsum = E1000_READ_REG(hw, RXCSUM); | 1894 | rxcsum = E1000_READ_REG(hw, RXCSUM); |
| 1850 | if(adapter->rx_csum == TRUE) { | 1895 | if (adapter->rx_csum == TRUE) { |
| 1851 | rxcsum |= E1000_RXCSUM_TUOFL; | 1896 | rxcsum |= E1000_RXCSUM_TUOFL; |
| 1852 | 1897 | ||
| 1853 | /* Enable 82571 IPv4 payload checksum for UDP fragments | 1898 | /* Enable 82571 IPv4 payload checksum for UDP fragments |
| 1854 | * Must be used in conjunction with packet-split. */ | 1899 | * Must be used in conjunction with packet-split. */ |
| 1855 | if ((hw->mac_type >= e1000_82571) && | 1900 | if ((hw->mac_type >= e1000_82571) && |
| 1856 | (adapter->rx_ps_pages)) { | 1901 | (adapter->rx_ps_pages)) { |
| 1857 | rxcsum |= E1000_RXCSUM_IPPCSE; | 1902 | rxcsum |= E1000_RXCSUM_IPPCSE; |
| 1858 | } | 1903 | } |
| 1859 | } else { | 1904 | } else { |
| @@ -1915,7 +1960,7 @@ static inline void | |||
| 1915 | e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, | 1960 | e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, |
| 1916 | struct e1000_buffer *buffer_info) | 1961 | struct e1000_buffer *buffer_info) |
| 1917 | { | 1962 | { |
| 1918 | if(buffer_info->dma) { | 1963 | if (buffer_info->dma) { |
| 1919 | pci_unmap_page(adapter->pdev, | 1964 | pci_unmap_page(adapter->pdev, |
| 1920 | buffer_info->dma, | 1965 | buffer_info->dma, |
| 1921 | buffer_info->length, | 1966 | buffer_info->length, |
| @@ -1942,7 +1987,7 @@ e1000_clean_tx_ring(struct e1000_adapter *adapter, | |||
| 1942 | 1987 | ||
| 1943 | /* Free all the Tx ring sk_buffs */ | 1988 | /* Free all the Tx ring sk_buffs */ |
| 1944 | 1989 | ||
| 1945 | for(i = 0; i < tx_ring->count; i++) { | 1990 | for (i = 0; i < tx_ring->count; i++) { |
| 1946 | buffer_info = &tx_ring->buffer_info[i]; | 1991 | buffer_info = &tx_ring->buffer_info[i]; |
| 1947 | e1000_unmap_and_free_tx_resource(adapter, buffer_info); | 1992 | e1000_unmap_and_free_tx_resource(adapter, buffer_info); |
| 1948 | } | 1993 | } |
| @@ -2038,10 +2083,9 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter, | |||
| 2038 | unsigned int i, j; | 2083 | unsigned int i, j; |
| 2039 | 2084 | ||
| 2040 | /* Free all the Rx ring sk_buffs */ | 2085 | /* Free all the Rx ring sk_buffs */ |
| 2041 | 2086 | for (i = 0; i < rx_ring->count; i++) { | |
| 2042 | for(i = 0; i < rx_ring->count; i++) { | ||
| 2043 | buffer_info = &rx_ring->buffer_info[i]; | 2087 | buffer_info = &rx_ring->buffer_info[i]; |
| 2044 | if(buffer_info->skb) { | 2088 | if (buffer_info->skb) { |
| 2045 | pci_unmap_single(pdev, | 2089 | pci_unmap_single(pdev, |
| 2046 | buffer_info->dma, | 2090 | buffer_info->dma, |
| 2047 | buffer_info->length, | 2091 | buffer_info->length, |
| @@ -2122,7 +2166,7 @@ e1000_enter_82542_rst(struct e1000_adapter *adapter) | |||
| 2122 | E1000_WRITE_FLUSH(&adapter->hw); | 2166 | E1000_WRITE_FLUSH(&adapter->hw); |
| 2123 | mdelay(5); | 2167 | mdelay(5); |
| 2124 | 2168 | ||
| 2125 | if(netif_running(netdev)) | 2169 | if (netif_running(netdev)) |
| 2126 | e1000_clean_all_rx_rings(adapter); | 2170 | e1000_clean_all_rx_rings(adapter); |
| 2127 | } | 2171 | } |
| 2128 | 2172 | ||
| @@ -2138,13 +2182,13 @@ e1000_leave_82542_rst(struct e1000_adapter *adapter) | |||
| 2138 | E1000_WRITE_FLUSH(&adapter->hw); | 2182 | E1000_WRITE_FLUSH(&adapter->hw); |
| 2139 | mdelay(5); | 2183 | mdelay(5); |
| 2140 | 2184 | ||
| 2141 | if(adapter->hw.pci_cmd_word & PCI_COMMAND_INVALIDATE) | 2185 | if (adapter->hw.pci_cmd_word & PCI_COMMAND_INVALIDATE) |
| 2142 | e1000_pci_set_mwi(&adapter->hw); | 2186 | e1000_pci_set_mwi(&adapter->hw); |
| 2143 | 2187 | ||
| 2144 | if(netif_running(netdev)) { | 2188 | if (netif_running(netdev)) { |
| 2145 | e1000_configure_rx(adapter); | ||
| 2146 | /* No need to loop, because 82542 supports only 1 queue */ | 2189 | /* No need to loop, because 82542 supports only 1 queue */ |
| 2147 | struct e1000_rx_ring *ring = &adapter->rx_ring[0]; | 2190 | struct e1000_rx_ring *ring = &adapter->rx_ring[0]; |
| 2191 | e1000_configure_rx(adapter); | ||
| 2148 | adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring)); | 2192 | adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring)); |
| 2149 | } | 2193 | } |
| 2150 | } | 2194 | } |
| @@ -2163,12 +2207,12 @@ e1000_set_mac(struct net_device *netdev, void *p) | |||
| 2163 | struct e1000_adapter *adapter = netdev_priv(netdev); | 2207 | struct e1000_adapter *adapter = netdev_priv(netdev); |
| 2164 | struct sockaddr *addr = p; | 2208 | struct sockaddr *addr = p; |
| 2165 | 2209 | ||
| 2166 | if(!is_valid_ether_addr(addr->sa_data)) | 2210 | if (!is_valid_ether_addr(addr->sa_data)) |
| 2167 | return -EADDRNOTAVAIL; | 2211 | return -EADDRNOTAVAIL; |
| 2168 | 2212 | ||
| 2169 | /* 82542 2.0 needs to be in reset to write receive address registers */ | 2213 | /* 82542 2.0 needs to be in reset to write receive address registers */ |
| 2170 | 2214 | ||
| 2171 | if(adapter->hw.mac_type == e1000_82542_rev2_0) | 2215 | if (adapter->hw.mac_type == e1000_82542_rev2_0) |
| 2172 | e1000_enter_82542_rst(adapter); | 2216 | e1000_enter_82542_rst(adapter); |
| 2173 | 2217 | ||
| 2174 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | 2218 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); |
| @@ -2182,17 +2226,17 @@ e1000_set_mac(struct net_device *netdev, void *p) | |||
| 2182 | /* activate the work around */ | 2226 | /* activate the work around */ |
| 2183 | adapter->hw.laa_is_present = 1; | 2227 | adapter->hw.laa_is_present = 1; |
| 2184 | 2228 | ||
| 2185 | /* Hold a copy of the LAA in RAR[14] This is done so that | 2229 | /* Hold a copy of the LAA in RAR[14] This is done so that |
| 2186 | * between the time RAR[0] gets clobbered and the time it | 2230 | * between the time RAR[0] gets clobbered and the time it |
| 2187 | * gets fixed (in e1000_watchdog), the actual LAA is in one | 2231 | * gets fixed (in e1000_watchdog), the actual LAA is in one |
| 2188 | * of the RARs and no incoming packets directed to this port | 2232 | * of the RARs and no incoming packets directed to this port |
| 2189 | * are dropped. Eventaully the LAA will be in RAR[0] and | 2233 | * are dropped. Eventaully the LAA will be in RAR[0] and |
| 2190 | * RAR[14] */ | 2234 | * RAR[14] */ |
| 2191 | e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, | 2235 | e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, |
| 2192 | E1000_RAR_ENTRIES - 1); | 2236 | E1000_RAR_ENTRIES - 1); |
| 2193 | } | 2237 | } |
| 2194 | 2238 | ||
| 2195 | if(adapter->hw.mac_type == e1000_82542_rev2_0) | 2239 | if (adapter->hw.mac_type == e1000_82542_rev2_0) |
| 2196 | e1000_leave_82542_rst(adapter); | 2240 | e1000_leave_82542_rst(adapter); |
| 2197 | 2241 | ||
| 2198 | return 0; | 2242 | return 0; |
| @@ -2226,9 +2270,9 @@ e1000_set_multi(struct net_device *netdev) | |||
| 2226 | 2270 | ||
| 2227 | rctl = E1000_READ_REG(hw, RCTL); | 2271 | rctl = E1000_READ_REG(hw, RCTL); |
| 2228 | 2272 | ||
| 2229 | if(netdev->flags & IFF_PROMISC) { | 2273 | if (netdev->flags & IFF_PROMISC) { |
| 2230 | rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); | 2274 | rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); |
| 2231 | } else if(netdev->flags & IFF_ALLMULTI) { | 2275 | } else if (netdev->flags & IFF_ALLMULTI) { |
| 2232 | rctl |= E1000_RCTL_MPE; | 2276 | rctl |= E1000_RCTL_MPE; |
| 2233 | rctl &= ~E1000_RCTL_UPE; | 2277 | rctl &= ~E1000_RCTL_UPE; |
| 2234 | } else { | 2278 | } else { |
| @@ -2239,7 +2283,7 @@ e1000_set_multi(struct net_device *netdev) | |||
| 2239 | 2283 | ||
| 2240 | /* 82542 2.0 needs to be in reset to write receive address registers */ | 2284 | /* 82542 2.0 needs to be in reset to write receive address registers */ |
| 2241 | 2285 | ||
| 2242 | if(hw->mac_type == e1000_82542_rev2_0) | 2286 | if (hw->mac_type == e1000_82542_rev2_0) |
| 2243 | e1000_enter_82542_rst(adapter); | 2287 | e1000_enter_82542_rst(adapter); |
| 2244 | 2288 | ||
| 2245 | /* load the first 14 multicast address into the exact filters 1-14 | 2289 | /* load the first 14 multicast address into the exact filters 1-14 |
| @@ -2249,7 +2293,7 @@ e1000_set_multi(struct net_device *netdev) | |||
| 2249 | */ | 2293 | */ |
| 2250 | mc_ptr = netdev->mc_list; | 2294 | mc_ptr = netdev->mc_list; |
| 2251 | 2295 | ||
| 2252 | for(i = 1; i < rar_entries; i++) { | 2296 | for (i = 1; i < rar_entries; i++) { |
| 2253 | if (mc_ptr) { | 2297 | if (mc_ptr) { |
| 2254 | e1000_rar_set(hw, mc_ptr->dmi_addr, i); | 2298 | e1000_rar_set(hw, mc_ptr->dmi_addr, i); |
| 2255 | mc_ptr = mc_ptr->next; | 2299 | mc_ptr = mc_ptr->next; |
| @@ -2261,17 +2305,17 @@ e1000_set_multi(struct net_device *netdev) | |||
| 2261 | 2305 | ||
| 2262 | /* clear the old settings from the multicast hash table */ | 2306 | /* clear the old settings from the multicast hash table */ |
| 2263 | 2307 | ||
| 2264 | for(i = 0; i < E1000_NUM_MTA_REGISTERS; i++) | 2308 | for (i = 0; i < E1000_NUM_MTA_REGISTERS; i++) |
| 2265 | E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); | 2309 | E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); |
| 2266 | 2310 | ||
| 2267 | /* load any remaining addresses into the hash table */ | 2311 | /* load any remaining addresses into the hash table */ |
| 2268 | 2312 | ||
| 2269 | for(; mc_ptr; mc_ptr = mc_ptr->next) { | 2313 | for (; mc_ptr; mc_ptr = mc_ptr->next) { |
| 2270 | hash_value = e1000_hash_mc_addr(hw, mc_ptr->dmi_addr); | 2314 | hash_value = e1000_hash_mc_addr(hw, mc_ptr->dmi_addr); |
| 2271 | e1000_mta_set(hw, hash_value); | 2315 | e1000_mta_set(hw, hash_value); |
| 2272 | } | 2316 | } |
| 2273 | 2317 | ||
| 2274 | if(hw->mac_type == e1000_82542_rev2_0) | 2318 | if (hw->mac_type == e1000_82542_rev2_0) |
| 2275 | e1000_leave_82542_rst(adapter); | 2319 | e1000_leave_82542_rst(adapter); |
| 2276 | } | 2320 | } |
| 2277 | 2321 | ||
| @@ -2297,8 +2341,8 @@ e1000_82547_tx_fifo_stall(unsigned long data) | |||
| 2297 | struct net_device *netdev = adapter->netdev; | 2341 | struct net_device *netdev = adapter->netdev; |
| 2298 | uint32_t tctl; | 2342 | uint32_t tctl; |
| 2299 | 2343 | ||
| 2300 | if(atomic_read(&adapter->tx_fifo_stall)) { | 2344 | if (atomic_read(&adapter->tx_fifo_stall)) { |
| 2301 | if((E1000_READ_REG(&adapter->hw, TDT) == | 2345 | if ((E1000_READ_REG(&adapter->hw, TDT) == |
| 2302 | E1000_READ_REG(&adapter->hw, TDH)) && | 2346 | E1000_READ_REG(&adapter->hw, TDH)) && |
| 2303 | (E1000_READ_REG(&adapter->hw, TDFT) == | 2347 | (E1000_READ_REG(&adapter->hw, TDFT) == |
| 2304 | E1000_READ_REG(&adapter->hw, TDFH)) && | 2348 | E1000_READ_REG(&adapter->hw, TDFH)) && |
| @@ -2350,18 +2394,18 @@ e1000_watchdog_task(struct e1000_adapter *adapter) | |||
| 2350 | e1000_check_for_link(&adapter->hw); | 2394 | e1000_check_for_link(&adapter->hw); |
| 2351 | if (adapter->hw.mac_type == e1000_82573) { | 2395 | if (adapter->hw.mac_type == e1000_82573) { |
| 2352 | e1000_enable_tx_pkt_filtering(&adapter->hw); | 2396 | e1000_enable_tx_pkt_filtering(&adapter->hw); |
| 2353 | if(adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id) | 2397 | if (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id) |
| 2354 | e1000_update_mng_vlan(adapter); | 2398 | e1000_update_mng_vlan(adapter); |
| 2355 | } | 2399 | } |
| 2356 | 2400 | ||
| 2357 | if((adapter->hw.media_type == e1000_media_type_internal_serdes) && | 2401 | if ((adapter->hw.media_type == e1000_media_type_internal_serdes) && |
| 2358 | !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE)) | 2402 | !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE)) |
| 2359 | link = !adapter->hw.serdes_link_down; | 2403 | link = !adapter->hw.serdes_link_down; |
| 2360 | else | 2404 | else |
| 2361 | link = E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU; | 2405 | link = E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU; |
| 2362 | 2406 | ||
| 2363 | if(link) { | 2407 | if (link) { |
| 2364 | if(!netif_carrier_ok(netdev)) { | 2408 | if (!netif_carrier_ok(netdev)) { |
| 2365 | e1000_get_speed_and_duplex(&adapter->hw, | 2409 | e1000_get_speed_and_duplex(&adapter->hw, |
| 2366 | &adapter->link_speed, | 2410 | &adapter->link_speed, |
| 2367 | &adapter->link_duplex); | 2411 | &adapter->link_duplex); |
| @@ -2392,7 +2436,7 @@ e1000_watchdog_task(struct e1000_adapter *adapter) | |||
| 2392 | adapter->smartspeed = 0; | 2436 | adapter->smartspeed = 0; |
| 2393 | } | 2437 | } |
| 2394 | } else { | 2438 | } else { |
| 2395 | if(netif_carrier_ok(netdev)) { | 2439 | if (netif_carrier_ok(netdev)) { |
| 2396 | adapter->link_speed = 0; | 2440 | adapter->link_speed = 0; |
| 2397 | adapter->link_duplex = 0; | 2441 | adapter->link_duplex = 0; |
| 2398 | DPRINTK(LINK, INFO, "NIC Link is Down\n"); | 2442 | DPRINTK(LINK, INFO, "NIC Link is Down\n"); |
| @@ -2432,12 +2476,12 @@ e1000_watchdog_task(struct e1000_adapter *adapter) | |||
| 2432 | } | 2476 | } |
| 2433 | 2477 | ||
| 2434 | /* Dynamic mode for Interrupt Throttle Rate (ITR) */ | 2478 | /* Dynamic mode for Interrupt Throttle Rate (ITR) */ |
| 2435 | if(adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) { | 2479 | if (adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) { |
| 2436 | /* Symmetric Tx/Rx gets a reduced ITR=2000; Total | 2480 | /* Symmetric Tx/Rx gets a reduced ITR=2000; Total |
| 2437 | * asymmetrical Tx or Rx gets ITR=8000; everyone | 2481 | * asymmetrical Tx or Rx gets ITR=8000; everyone |
| 2438 | * else is between 2000-8000. */ | 2482 | * else is between 2000-8000. */ |
| 2439 | uint32_t goc = (adapter->gotcl + adapter->gorcl) / 10000; | 2483 | uint32_t goc = (adapter->gotcl + adapter->gorcl) / 10000; |
| 2440 | uint32_t dif = (adapter->gotcl > adapter->gorcl ? | 2484 | uint32_t dif = (adapter->gotcl > adapter->gorcl ? |
| 2441 | adapter->gotcl - adapter->gorcl : | 2485 | adapter->gotcl - adapter->gorcl : |
| 2442 | adapter->gorcl - adapter->gotcl) / 10000; | 2486 | adapter->gorcl - adapter->gotcl) / 10000; |
| 2443 | uint32_t itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; | 2487 | uint32_t itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; |
| @@ -2450,7 +2494,7 @@ e1000_watchdog_task(struct e1000_adapter *adapter) | |||
| 2450 | /* Force detection of hung controller every watchdog period */ | 2494 | /* Force detection of hung controller every watchdog period */ |
| 2451 | adapter->detect_tx_hung = TRUE; | 2495 | adapter->detect_tx_hung = TRUE; |
| 2452 | 2496 | ||
| 2453 | /* With 82571 controllers, LAA may be overwritten due to controller | 2497 | /* With 82571 controllers, LAA may be overwritten due to controller |
| 2454 | * reset from the other port. Set the appropriate LAA in RAR[0] */ | 2498 | * reset from the other port. Set the appropriate LAA in RAR[0] */ |
| 2455 | if (adapter->hw.mac_type == e1000_82571 && adapter->hw.laa_is_present) | 2499 | if (adapter->hw.mac_type == e1000_82571 && adapter->hw.laa_is_present) |
| 2456 | e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0); | 2500 | e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0); |
| @@ -2479,7 +2523,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
| 2479 | uint8_t ipcss, ipcso, tucss, tucso, hdr_len; | 2523 | uint8_t ipcss, ipcso, tucss, tucso, hdr_len; |
| 2480 | int err; | 2524 | int err; |
| 2481 | 2525 | ||
| 2482 | if(skb_shinfo(skb)->tso_size) { | 2526 | if (skb_shinfo(skb)->tso_size) { |
| 2483 | if (skb_header_cloned(skb)) { | 2527 | if (skb_header_cloned(skb)) { |
| 2484 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); | 2528 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); |
| 2485 | if (err) | 2529 | if (err) |
| @@ -2488,7 +2532,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
| 2488 | 2532 | ||
| 2489 | hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); | 2533 | hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); |
| 2490 | mss = skb_shinfo(skb)->tso_size; | 2534 | mss = skb_shinfo(skb)->tso_size; |
| 2491 | if(skb->protocol == ntohs(ETH_P_IP)) { | 2535 | if (skb->protocol == ntohs(ETH_P_IP)) { |
| 2492 | skb->nh.iph->tot_len = 0; | 2536 | skb->nh.iph->tot_len = 0; |
| 2493 | skb->nh.iph->check = 0; | 2537 | skb->nh.iph->check = 0; |
| 2494 | skb->h.th->check = | 2538 | skb->h.th->check = |
| @@ -2500,7 +2544,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
| 2500 | cmd_length = E1000_TXD_CMD_IP; | 2544 | cmd_length = E1000_TXD_CMD_IP; |
| 2501 | ipcse = skb->h.raw - skb->data - 1; | 2545 | ipcse = skb->h.raw - skb->data - 1; |
| 2502 | #ifdef NETIF_F_TSO_IPV6 | 2546 | #ifdef NETIF_F_TSO_IPV6 |
| 2503 | } else if(skb->protocol == ntohs(ETH_P_IPV6)) { | 2547 | } else if (skb->protocol == ntohs(ETH_P_IPV6)) { |
| 2504 | skb->nh.ipv6h->payload_len = 0; | 2548 | skb->nh.ipv6h->payload_len = 0; |
| 2505 | skb->h.th->check = | 2549 | skb->h.th->check = |
| 2506 | ~csum_ipv6_magic(&skb->nh.ipv6h->saddr, | 2550 | ~csum_ipv6_magic(&skb->nh.ipv6h->saddr, |
| @@ -2555,7 +2599,7 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
| 2555 | unsigned int i; | 2599 | unsigned int i; |
| 2556 | uint8_t css; | 2600 | uint8_t css; |
| 2557 | 2601 | ||
| 2558 | if(likely(skb->ip_summed == CHECKSUM_HW)) { | 2602 | if (likely(skb->ip_summed == CHECKSUM_HW)) { |
| 2559 | css = skb->h.raw - skb->data; | 2603 | css = skb->h.raw - skb->data; |
| 2560 | 2604 | ||
| 2561 | i = tx_ring->next_to_use; | 2605 | i = tx_ring->next_to_use; |
| @@ -2595,7 +2639,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
| 2595 | 2639 | ||
| 2596 | i = tx_ring->next_to_use; | 2640 | i = tx_ring->next_to_use; |
| 2597 | 2641 | ||
| 2598 | while(len) { | 2642 | while (len) { |
| 2599 | buffer_info = &tx_ring->buffer_info[i]; | 2643 | buffer_info = &tx_ring->buffer_info[i]; |
| 2600 | size = min(len, max_per_txd); | 2644 | size = min(len, max_per_txd); |
| 2601 | #ifdef NETIF_F_TSO | 2645 | #ifdef NETIF_F_TSO |
| @@ -2611,7 +2655,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
| 2611 | 2655 | ||
| 2612 | /* Workaround for premature desc write-backs | 2656 | /* Workaround for premature desc write-backs |
| 2613 | * in TSO mode. Append 4-byte sentinel desc */ | 2657 | * in TSO mode. Append 4-byte sentinel desc */ |
| 2614 | if(unlikely(mss && !nr_frags && size == len && size > 8)) | 2658 | if (unlikely(mss && !nr_frags && size == len && size > 8)) |
| 2615 | size -= 4; | 2659 | size -= 4; |
| 2616 | #endif | 2660 | #endif |
| 2617 | /* work-around for errata 10 and it applies | 2661 | /* work-around for errata 10 and it applies |
| @@ -2619,13 +2663,13 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
| 2619 | * The fix is to make sure that the first descriptor of a | 2663 | * The fix is to make sure that the first descriptor of a |
| 2620 | * packet is smaller than 2048 - 16 - 16 (or 2016) bytes | 2664 | * packet is smaller than 2048 - 16 - 16 (or 2016) bytes |
| 2621 | */ | 2665 | */ |
| 2622 | if(unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) && | 2666 | if (unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) && |
| 2623 | (size > 2015) && count == 0)) | 2667 | (size > 2015) && count == 0)) |
| 2624 | size = 2015; | 2668 | size = 2015; |
| 2625 | 2669 | ||
| 2626 | /* Workaround for potential 82544 hang in PCI-X. Avoid | 2670 | /* Workaround for potential 82544 hang in PCI-X. Avoid |
| 2627 | * terminating buffers within evenly-aligned dwords. */ | 2671 | * terminating buffers within evenly-aligned dwords. */ |
| 2628 | if(unlikely(adapter->pcix_82544 && | 2672 | if (unlikely(adapter->pcix_82544 && |
| 2629 | !((unsigned long)(skb->data + offset + size - 1) & 4) && | 2673 | !((unsigned long)(skb->data + offset + size - 1) & 4) && |
| 2630 | size > 4)) | 2674 | size > 4)) |
| 2631 | size -= 4; | 2675 | size -= 4; |
| @@ -2641,29 +2685,29 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
| 2641 | len -= size; | 2685 | len -= size; |
| 2642 | offset += size; | 2686 | offset += size; |
| 2643 | count++; | 2687 | count++; |
| 2644 | if(unlikely(++i == tx_ring->count)) i = 0; | 2688 | if (unlikely(++i == tx_ring->count)) i = 0; |
| 2645 | } | 2689 | } |
| 2646 | 2690 | ||
| 2647 | for(f = 0; f < nr_frags; f++) { | 2691 | for (f = 0; f < nr_frags; f++) { |
| 2648 | struct skb_frag_struct *frag; | 2692 | struct skb_frag_struct *frag; |
| 2649 | 2693 | ||
| 2650 | frag = &skb_shinfo(skb)->frags[f]; | 2694 | frag = &skb_shinfo(skb)->frags[f]; |
| 2651 | len = frag->size; | 2695 | len = frag->size; |
| 2652 | offset = frag->page_offset; | 2696 | offset = frag->page_offset; |
| 2653 | 2697 | ||
| 2654 | while(len) { | 2698 | while (len) { |
| 2655 | buffer_info = &tx_ring->buffer_info[i]; | 2699 | buffer_info = &tx_ring->buffer_info[i]; |
| 2656 | size = min(len, max_per_txd); | 2700 | size = min(len, max_per_txd); |
| 2657 | #ifdef NETIF_F_TSO | 2701 | #ifdef NETIF_F_TSO |
| 2658 | /* Workaround for premature desc write-backs | 2702 | /* Workaround for premature desc write-backs |
| 2659 | * in TSO mode. Append 4-byte sentinel desc */ | 2703 | * in TSO mode. Append 4-byte sentinel desc */ |
| 2660 | if(unlikely(mss && f == (nr_frags-1) && size == len && size > 8)) | 2704 | if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8)) |
| 2661 | size -= 4; | 2705 | size -= 4; |
| 2662 | #endif | 2706 | #endif |
| 2663 | /* Workaround for potential 82544 hang in PCI-X. | 2707 | /* Workaround for potential 82544 hang in PCI-X. |
| 2664 | * Avoid terminating buffers within evenly-aligned | 2708 | * Avoid terminating buffers within evenly-aligned |
| 2665 | * dwords. */ | 2709 | * dwords. */ |
| 2666 | if(unlikely(adapter->pcix_82544 && | 2710 | if (unlikely(adapter->pcix_82544 && |
| 2667 | !((unsigned long)(frag->page+offset+size-1) & 4) && | 2711 | !((unsigned long)(frag->page+offset+size-1) & 4) && |
| 2668 | size > 4)) | 2712 | size > 4)) |
| 2669 | size -= 4; | 2713 | size -= 4; |
| @@ -2680,7 +2724,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
| 2680 | len -= size; | 2724 | len -= size; |
| 2681 | offset += size; | 2725 | offset += size; |
| 2682 | count++; | 2726 | count++; |
| 2683 | if(unlikely(++i == tx_ring->count)) i = 0; | 2727 | if (unlikely(++i == tx_ring->count)) i = 0; |
| 2684 | } | 2728 | } |
| 2685 | } | 2729 | } |
| 2686 | 2730 | ||
| @@ -2700,35 +2744,35 @@ e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
| 2700 | uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; | 2744 | uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; |
| 2701 | unsigned int i; | 2745 | unsigned int i; |
| 2702 | 2746 | ||
| 2703 | if(likely(tx_flags & E1000_TX_FLAGS_TSO)) { | 2747 | if (likely(tx_flags & E1000_TX_FLAGS_TSO)) { |
| 2704 | txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | | 2748 | txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | |
| 2705 | E1000_TXD_CMD_TSE; | 2749 | E1000_TXD_CMD_TSE; |
| 2706 | txd_upper |= E1000_TXD_POPTS_TXSM << 8; | 2750 | txd_upper |= E1000_TXD_POPTS_TXSM << 8; |
| 2707 | 2751 | ||
| 2708 | if(likely(tx_flags & E1000_TX_FLAGS_IPV4)) | 2752 | if (likely(tx_flags & E1000_TX_FLAGS_IPV4)) |
| 2709 | txd_upper |= E1000_TXD_POPTS_IXSM << 8; | 2753 | txd_upper |= E1000_TXD_POPTS_IXSM << 8; |
| 2710 | } | 2754 | } |
| 2711 | 2755 | ||
| 2712 | if(likely(tx_flags & E1000_TX_FLAGS_CSUM)) { | 2756 | if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) { |
| 2713 | txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; | 2757 | txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; |
| 2714 | txd_upper |= E1000_TXD_POPTS_TXSM << 8; | 2758 | txd_upper |= E1000_TXD_POPTS_TXSM << 8; |
| 2715 | } | 2759 | } |
| 2716 | 2760 | ||
| 2717 | if(unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) { | 2761 | if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) { |
| 2718 | txd_lower |= E1000_TXD_CMD_VLE; | 2762 | txd_lower |= E1000_TXD_CMD_VLE; |
| 2719 | txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); | 2763 | txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); |
| 2720 | } | 2764 | } |
| 2721 | 2765 | ||
| 2722 | i = tx_ring->next_to_use; | 2766 | i = tx_ring->next_to_use; |
| 2723 | 2767 | ||
| 2724 | while(count--) { | 2768 | while (count--) { |
| 2725 | buffer_info = &tx_ring->buffer_info[i]; | 2769 | buffer_info = &tx_ring->buffer_info[i]; |
| 2726 | tx_desc = E1000_TX_DESC(*tx_ring, i); | 2770 | tx_desc = E1000_TX_DESC(*tx_ring, i); |
| 2727 | tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); | 2771 | tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); |
| 2728 | tx_desc->lower.data = | 2772 | tx_desc->lower.data = |
| 2729 | cpu_to_le32(txd_lower | buffer_info->length); | 2773 | cpu_to_le32(txd_lower | buffer_info->length); |
| 2730 | tx_desc->upper.data = cpu_to_le32(txd_upper); | 2774 | tx_desc->upper.data = cpu_to_le32(txd_upper); |
| 2731 | if(unlikely(++i == tx_ring->count)) i = 0; | 2775 | if (unlikely(++i == tx_ring->count)) i = 0; |
| 2732 | } | 2776 | } |
| 2733 | 2777 | ||
| 2734 | tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); | 2778 | tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); |
| @@ -2763,20 +2807,20 @@ e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb) | |||
| 2763 | 2807 | ||
| 2764 | E1000_ROUNDUP(skb_fifo_len, E1000_FIFO_HDR); | 2808 | E1000_ROUNDUP(skb_fifo_len, E1000_FIFO_HDR); |
| 2765 | 2809 | ||
| 2766 | if(adapter->link_duplex != HALF_DUPLEX) | 2810 | if (adapter->link_duplex != HALF_DUPLEX) |
| 2767 | goto no_fifo_stall_required; | 2811 | goto no_fifo_stall_required; |
| 2768 | 2812 | ||
| 2769 | if(atomic_read(&adapter->tx_fifo_stall)) | 2813 | if (atomic_read(&adapter->tx_fifo_stall)) |
| 2770 | return 1; | 2814 | return 1; |
| 2771 | 2815 | ||
| 2772 | if(skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) { | 2816 | if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) { |
| 2773 | atomic_set(&adapter->tx_fifo_stall, 1); | 2817 | atomic_set(&adapter->tx_fifo_stall, 1); |
| 2774 | return 1; | 2818 | return 1; |
| 2775 | } | 2819 | } |
| 2776 | 2820 | ||
| 2777 | no_fifo_stall_required: | 2821 | no_fifo_stall_required: |
| 2778 | adapter->tx_fifo_head += skb_fifo_len; | 2822 | adapter->tx_fifo_head += skb_fifo_len; |
| 2779 | if(adapter->tx_fifo_head >= adapter->tx_fifo_size) | 2823 | if (adapter->tx_fifo_head >= adapter->tx_fifo_size) |
| 2780 | adapter->tx_fifo_head -= adapter->tx_fifo_size; | 2824 | adapter->tx_fifo_head -= adapter->tx_fifo_size; |
| 2781 | return 0; | 2825 | return 0; |
| 2782 | } | 2826 | } |
| @@ -2787,27 +2831,27 @@ e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb) | |||
| 2787 | { | 2831 | { |
| 2788 | struct e1000_hw *hw = &adapter->hw; | 2832 | struct e1000_hw *hw = &adapter->hw; |
| 2789 | uint16_t length, offset; | 2833 | uint16_t length, offset; |
| 2790 | if(vlan_tx_tag_present(skb)) { | 2834 | if (vlan_tx_tag_present(skb)) { |
| 2791 | if(!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && | 2835 | if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && |
| 2792 | ( adapter->hw.mng_cookie.status & | 2836 | ( adapter->hw.mng_cookie.status & |
| 2793 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) ) | 2837 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) ) |
| 2794 | return 0; | 2838 | return 0; |
| 2795 | } | 2839 | } |
| 2796 | if ((skb->len > MINIMUM_DHCP_PACKET_SIZE) && (!skb->protocol)) { | 2840 | if ((skb->len > MINIMUM_DHCP_PACKET_SIZE) && (!skb->protocol)) { |
| 2797 | struct ethhdr *eth = (struct ethhdr *) skb->data; | 2841 | struct ethhdr *eth = (struct ethhdr *) skb->data; |
| 2798 | if((htons(ETH_P_IP) == eth->h_proto)) { | 2842 | if ((htons(ETH_P_IP) == eth->h_proto)) { |
| 2799 | const struct iphdr *ip = | 2843 | const struct iphdr *ip = |
| 2800 | (struct iphdr *)((uint8_t *)skb->data+14); | 2844 | (struct iphdr *)((uint8_t *)skb->data+14); |
| 2801 | if(IPPROTO_UDP == ip->protocol) { | 2845 | if (IPPROTO_UDP == ip->protocol) { |
| 2802 | struct udphdr *udp = | 2846 | struct udphdr *udp = |
| 2803 | (struct udphdr *)((uint8_t *)ip + | 2847 | (struct udphdr *)((uint8_t *)ip + |
| 2804 | (ip->ihl << 2)); | 2848 | (ip->ihl << 2)); |
| 2805 | if(ntohs(udp->dest) == 67) { | 2849 | if (ntohs(udp->dest) == 67) { |
| 2806 | offset = (uint8_t *)udp + 8 - skb->data; | 2850 | offset = (uint8_t *)udp + 8 - skb->data; |
| 2807 | length = skb->len - offset; | 2851 | length = skb->len - offset; |
| 2808 | 2852 | ||
| 2809 | return e1000_mng_write_dhcp_info(hw, | 2853 | return e1000_mng_write_dhcp_info(hw, |
| 2810 | (uint8_t *)udp + 8, | 2854 | (uint8_t *)udp + 8, |
| 2811 | length); | 2855 | length); |
| 2812 | } | 2856 | } |
| 2813 | } | 2857 | } |
| @@ -2830,7 +2874,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
| 2830 | unsigned int nr_frags = 0; | 2874 | unsigned int nr_frags = 0; |
| 2831 | unsigned int mss = 0; | 2875 | unsigned int mss = 0; |
| 2832 | int count = 0; | 2876 | int count = 0; |
| 2833 | int tso; | 2877 | int tso; |
| 2834 | unsigned int f; | 2878 | unsigned int f; |
| 2835 | len -= skb->data_len; | 2879 | len -= skb->data_len; |
| 2836 | 2880 | ||
| @@ -2853,7 +2897,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
| 2853 | * 4 = ceil(buffer len/mss). To make sure we don't | 2897 | * 4 = ceil(buffer len/mss). To make sure we don't |
| 2854 | * overrun the FIFO, adjust the max buffer len if mss | 2898 | * overrun the FIFO, adjust the max buffer len if mss |
| 2855 | * drops. */ | 2899 | * drops. */ |
| 2856 | if(mss) { | 2900 | if (mss) { |
| 2857 | uint8_t hdr_len; | 2901 | uint8_t hdr_len; |
| 2858 | max_per_txd = min(mss << 2, max_per_txd); | 2902 | max_per_txd = min(mss << 2, max_per_txd); |
| 2859 | max_txd_pwr = fls(max_per_txd) - 1; | 2903 | max_txd_pwr = fls(max_per_txd) - 1; |
| @@ -2876,12 +2920,12 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
| 2876 | } | 2920 | } |
| 2877 | } | 2921 | } |
| 2878 | 2922 | ||
| 2879 | if((mss) || (skb->ip_summed == CHECKSUM_HW)) | ||
| 2880 | /* reserve a descriptor for the offload context */ | 2923 | /* reserve a descriptor for the offload context */ |
| 2924 | if ((mss) || (skb->ip_summed == CHECKSUM_HW)) | ||
| 2881 | count++; | 2925 | count++; |
| 2882 | count++; | 2926 | count++; |
| 2883 | #else | 2927 | #else |
| 2884 | if(skb->ip_summed == CHECKSUM_HW) | 2928 | if (skb->ip_summed == CHECKSUM_HW) |
| 2885 | count++; | 2929 | count++; |
| 2886 | #endif | 2930 | #endif |
| 2887 | 2931 | ||
| @@ -2894,24 +2938,24 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
| 2894 | 2938 | ||
| 2895 | count += TXD_USE_COUNT(len, max_txd_pwr); | 2939 | count += TXD_USE_COUNT(len, max_txd_pwr); |
| 2896 | 2940 | ||
| 2897 | if(adapter->pcix_82544) | 2941 | if (adapter->pcix_82544) |
| 2898 | count++; | 2942 | count++; |
| 2899 | 2943 | ||
| 2900 | /* work-around for errata 10 and it applies to all controllers | 2944 | /* work-around for errata 10 and it applies to all controllers |
| 2901 | * in PCI-X mode, so add one more descriptor to the count | 2945 | * in PCI-X mode, so add one more descriptor to the count |
| 2902 | */ | 2946 | */ |
| 2903 | if(unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) && | 2947 | if (unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) && |
| 2904 | (len > 2015))) | 2948 | (len > 2015))) |
| 2905 | count++; | 2949 | count++; |
| 2906 | 2950 | ||
| 2907 | nr_frags = skb_shinfo(skb)->nr_frags; | 2951 | nr_frags = skb_shinfo(skb)->nr_frags; |
| 2908 | for(f = 0; f < nr_frags; f++) | 2952 | for (f = 0; f < nr_frags; f++) |
| 2909 | count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size, | 2953 | count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size, |
| 2910 | max_txd_pwr); | 2954 | max_txd_pwr); |
| 2911 | if(adapter->pcix_82544) | 2955 | if (adapter->pcix_82544) |
| 2912 | count += nr_frags; | 2956 | count += nr_frags; |
| 2913 | 2957 | ||
| 2914 | if(adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) ) | 2958 | if (adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) ) |
| 2915 | e1000_transfer_dhcp_info(adapter, skb); | 2959 | e1000_transfer_dhcp_info(adapter, skb); |
| 2916 | 2960 | ||
| 2917 | local_irq_save(flags); | 2961 | local_irq_save(flags); |
| @@ -2929,8 +2973,8 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
| 2929 | return NETDEV_TX_BUSY; | 2973 | return NETDEV_TX_BUSY; |
| 2930 | } | 2974 | } |
| 2931 | 2975 | ||
| 2932 | if(unlikely(adapter->hw.mac_type == e1000_82547)) { | 2976 | if (unlikely(adapter->hw.mac_type == e1000_82547)) { |
| 2933 | if(unlikely(e1000_82547_fifo_workaround(adapter, skb))) { | 2977 | if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) { |
| 2934 | netif_stop_queue(netdev); | 2978 | netif_stop_queue(netdev); |
| 2935 | mod_timer(&adapter->tx_fifo_stall_timer, jiffies); | 2979 | mod_timer(&adapter->tx_fifo_stall_timer, jiffies); |
| 2936 | spin_unlock_irqrestore(&tx_ring->tx_lock, flags); | 2980 | spin_unlock_irqrestore(&tx_ring->tx_lock, flags); |
| @@ -2938,13 +2982,13 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
| 2938 | } | 2982 | } |
| 2939 | } | 2983 | } |
| 2940 | 2984 | ||
| 2941 | if(unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) { | 2985 | if (unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) { |
| 2942 | tx_flags |= E1000_TX_FLAGS_VLAN; | 2986 | tx_flags |= E1000_TX_FLAGS_VLAN; |
| 2943 | tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); | 2987 | tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); |
| 2944 | } | 2988 | } |
| 2945 | 2989 | ||
| 2946 | first = tx_ring->next_to_use; | 2990 | first = tx_ring->next_to_use; |
| 2947 | 2991 | ||
| 2948 | tso = e1000_tso(adapter, tx_ring, skb); | 2992 | tso = e1000_tso(adapter, tx_ring, skb); |
| 2949 | if (tso < 0) { | 2993 | if (tso < 0) { |
| 2950 | dev_kfree_skb_any(skb); | 2994 | dev_kfree_skb_any(skb); |
| @@ -3033,9 +3077,9 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
| 3033 | struct e1000_adapter *adapter = netdev_priv(netdev); | 3077 | struct e1000_adapter *adapter = netdev_priv(netdev); |
| 3034 | int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; | 3078 | int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; |
| 3035 | 3079 | ||
| 3036 | if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || | 3080 | if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || |
| 3037 | (max_frame > MAX_JUMBO_FRAME_SIZE)) { | 3081 | (max_frame > MAX_JUMBO_FRAME_SIZE)) { |
| 3038 | DPRINTK(PROBE, ERR, "Invalid MTU setting\n"); | 3082 | DPRINTK(PROBE, ERR, "Invalid MTU setting\n"); |
| 3039 | return -EINVAL; | 3083 | return -EINVAL; |
| 3040 | } | 3084 | } |
| 3041 | 3085 | ||
| @@ -3083,7 +3127,7 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
| 3083 | 3127 | ||
| 3084 | netdev->mtu = new_mtu; | 3128 | netdev->mtu = new_mtu; |
| 3085 | 3129 | ||
| 3086 | if(netif_running(netdev)) { | 3130 | if (netif_running(netdev)) { |
| 3087 | e1000_down(adapter); | 3131 | e1000_down(adapter); |
| 3088 | e1000_up(adapter); | 3132 | e1000_up(adapter); |
| 3089 | } | 3133 | } |
| @@ -3170,7 +3214,7 @@ e1000_update_stats(struct e1000_adapter *adapter) | |||
| 3170 | hw->collision_delta = E1000_READ_REG(hw, COLC); | 3214 | hw->collision_delta = E1000_READ_REG(hw, COLC); |
| 3171 | adapter->stats.colc += hw->collision_delta; | 3215 | adapter->stats.colc += hw->collision_delta; |
| 3172 | 3216 | ||
| 3173 | if(hw->mac_type >= e1000_82543) { | 3217 | if (hw->mac_type >= e1000_82543) { |
| 3174 | adapter->stats.algnerrc += E1000_READ_REG(hw, ALGNERRC); | 3218 | adapter->stats.algnerrc += E1000_READ_REG(hw, ALGNERRC); |
| 3175 | adapter->stats.rxerrc += E1000_READ_REG(hw, RXERRC); | 3219 | adapter->stats.rxerrc += E1000_READ_REG(hw, RXERRC); |
| 3176 | adapter->stats.tncrs += E1000_READ_REG(hw, TNCRS); | 3220 | adapter->stats.tncrs += E1000_READ_REG(hw, TNCRS); |
| @@ -3178,7 +3222,7 @@ e1000_update_stats(struct e1000_adapter *adapter) | |||
| 3178 | adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC); | 3222 | adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC); |
| 3179 | adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC); | 3223 | adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC); |
| 3180 | } | 3224 | } |
| 3181 | if(hw->mac_type > e1000_82547_rev_2) { | 3225 | if (hw->mac_type > e1000_82547_rev_2) { |
| 3182 | adapter->stats.iac += E1000_READ_REG(hw, IAC); | 3226 | adapter->stats.iac += E1000_READ_REG(hw, IAC); |
| 3183 | adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC); | 3227 | adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC); |
| 3184 | adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC); | 3228 | adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC); |
| @@ -3222,14 +3266,14 @@ e1000_update_stats(struct e1000_adapter *adapter) | |||
| 3222 | 3266 | ||
| 3223 | /* Phy Stats */ | 3267 | /* Phy Stats */ |
| 3224 | 3268 | ||
| 3225 | if(hw->media_type == e1000_media_type_copper) { | 3269 | if (hw->media_type == e1000_media_type_copper) { |
| 3226 | if((adapter->link_speed == SPEED_1000) && | 3270 | if ((adapter->link_speed == SPEED_1000) && |
| 3227 | (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { | 3271 | (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { |
| 3228 | phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; | 3272 | phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; |
| 3229 | adapter->phy_stats.idle_errors += phy_tmp; | 3273 | adapter->phy_stats.idle_errors += phy_tmp; |
| 3230 | } | 3274 | } |
| 3231 | 3275 | ||
| 3232 | if((hw->mac_type <= e1000_82546) && | 3276 | if ((hw->mac_type <= e1000_82546) && |
| 3233 | (hw->phy_type == e1000_phy_m88) && | 3277 | (hw->phy_type == e1000_phy_m88) && |
| 3234 | !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp)) | 3278 | !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp)) |
| 3235 | adapter->phy_stats.receive_errors += phy_tmp; | 3279 | adapter->phy_stats.receive_errors += phy_tmp; |
| @@ -3294,7 +3338,7 @@ e1000_intr(int irq, void *data, struct pt_regs *regs) | |||
| 3294 | return IRQ_NONE; /* Not our interrupt */ | 3338 | return IRQ_NONE; /* Not our interrupt */ |
| 3295 | } | 3339 | } |
| 3296 | 3340 | ||
| 3297 | if(unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { | 3341 | if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { |
| 3298 | hw->get_link_status = 1; | 3342 | hw->get_link_status = 1; |
| 3299 | mod_timer(&adapter->watchdog_timer, jiffies); | 3343 | mod_timer(&adapter->watchdog_timer, jiffies); |
| 3300 | } | 3344 | } |
| @@ -3326,26 +3370,26 @@ e1000_intr(int irq, void *data, struct pt_regs *regs) | |||
| 3326 | 3370 | ||
| 3327 | #else /* if !CONFIG_E1000_NAPI */ | 3371 | #else /* if !CONFIG_E1000_NAPI */ |
| 3328 | /* Writing IMC and IMS is needed for 82547. | 3372 | /* Writing IMC and IMS is needed for 82547. |
| 3329 | Due to Hub Link bus being occupied, an interrupt | 3373 | * Due to Hub Link bus being occupied, an interrupt |
| 3330 | de-assertion message is not able to be sent. | 3374 | * de-assertion message is not able to be sent. |
| 3331 | When an interrupt assertion message is generated later, | 3375 | * When an interrupt assertion message is generated later, |
| 3332 | two messages are re-ordered and sent out. | 3376 | * two messages are re-ordered and sent out. |
| 3333 | That causes APIC to think 82547 is in de-assertion | 3377 | * That causes APIC to think 82547 is in de-assertion |
| 3334 | state, while 82547 is in assertion state, resulting | 3378 | * state, while 82547 is in assertion state, resulting |
| 3335 | in dead lock. Writing IMC forces 82547 into | 3379 | * in dead lock. Writing IMC forces 82547 into |
| 3336 | de-assertion state. | 3380 | * de-assertion state. |
| 3337 | */ | 3381 | */ |
| 3338 | if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2){ | 3382 | if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) { |
| 3339 | atomic_inc(&adapter->irq_sem); | 3383 | atomic_inc(&adapter->irq_sem); |
| 3340 | E1000_WRITE_REG(hw, IMC, ~0); | 3384 | E1000_WRITE_REG(hw, IMC, ~0); |
| 3341 | } | 3385 | } |
| 3342 | 3386 | ||
| 3343 | for(i = 0; i < E1000_MAX_INTR; i++) | 3387 | for (i = 0; i < E1000_MAX_INTR; i++) |
| 3344 | if(unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) & | 3388 | if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) & |
| 3345 | !e1000_clean_tx_irq(adapter, adapter->tx_ring))) | 3389 | !e1000_clean_tx_irq(adapter, adapter->tx_ring))) |
| 3346 | break; | 3390 | break; |
| 3347 | 3391 | ||
| 3348 | if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) | 3392 | if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) |
| 3349 | e1000_irq_enable(adapter); | 3393 | e1000_irq_enable(adapter); |
| 3350 | 3394 | ||
| 3351 | #endif /* CONFIG_E1000_NAPI */ | 3395 | #endif /* CONFIG_E1000_NAPI */ |
| @@ -3397,9 +3441,9 @@ e1000_clean(struct net_device *poll_dev, int *budget) | |||
| 3397 | 3441 | ||
| 3398 | *budget -= work_done; | 3442 | *budget -= work_done; |
| 3399 | poll_dev->quota -= work_done; | 3443 | poll_dev->quota -= work_done; |
| 3400 | 3444 | ||
| 3401 | /* If no Tx and not enough Rx work done, exit the polling mode */ | 3445 | /* If no Tx and not enough Rx work done, exit the polling mode */ |
| 3402 | if((!tx_cleaned && (work_done == 0)) || | 3446 | if ((!tx_cleaned && (work_done == 0)) || |
| 3403 | !netif_running(adapter->netdev)) { | 3447 | !netif_running(adapter->netdev)) { |
| 3404 | quit_polling: | 3448 | quit_polling: |
| 3405 | netif_rx_complete(poll_dev); | 3449 | netif_rx_complete(poll_dev); |
| @@ -3431,7 +3475,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter, | |||
| 3431 | eop_desc = E1000_TX_DESC(*tx_ring, eop); | 3475 | eop_desc = E1000_TX_DESC(*tx_ring, eop); |
| 3432 | 3476 | ||
| 3433 | while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) { | 3477 | while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) { |
| 3434 | for(cleaned = FALSE; !cleaned; ) { | 3478 | for (cleaned = FALSE; !cleaned; ) { |
| 3435 | tx_desc = E1000_TX_DESC(*tx_ring, i); | 3479 | tx_desc = E1000_TX_DESC(*tx_ring, i); |
| 3436 | buffer_info = &tx_ring->buffer_info[i]; | 3480 | buffer_info = &tx_ring->buffer_info[i]; |
| 3437 | cleaned = (i == eop); | 3481 | cleaned = (i == eop); |
| @@ -3442,7 +3486,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter, | |||
| 3442 | e1000_unmap_and_free_tx_resource(adapter, buffer_info); | 3486 | e1000_unmap_and_free_tx_resource(adapter, buffer_info); |
| 3443 | memset(tx_desc, 0, sizeof(struct e1000_tx_desc)); | 3487 | memset(tx_desc, 0, sizeof(struct e1000_tx_desc)); |
| 3444 | 3488 | ||
| 3445 | if(unlikely(++i == tx_ring->count)) i = 0; | 3489 | if (unlikely(++i == tx_ring->count)) i = 0; |
| 3446 | } | 3490 | } |
| 3447 | 3491 | ||
| 3448 | #ifdef CONFIG_E1000_MQ | 3492 | #ifdef CONFIG_E1000_MQ |
| @@ -3457,7 +3501,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter, | |||
| 3457 | 3501 | ||
| 3458 | spin_lock(&tx_ring->tx_lock); | 3502 | spin_lock(&tx_ring->tx_lock); |
| 3459 | 3503 | ||
| 3460 | if(unlikely(cleaned && netif_queue_stopped(netdev) && | 3504 | if (unlikely(cleaned && netif_queue_stopped(netdev) && |
| 3461 | netif_carrier_ok(netdev))) | 3505 | netif_carrier_ok(netdev))) |
| 3462 | netif_wake_queue(netdev); | 3506 | netif_wake_queue(netdev); |
| 3463 | 3507 | ||
| @@ -3519,21 +3563,21 @@ e1000_rx_checksum(struct e1000_adapter *adapter, | |||
| 3519 | skb->ip_summed = CHECKSUM_NONE; | 3563 | skb->ip_summed = CHECKSUM_NONE; |
| 3520 | 3564 | ||
| 3521 | /* 82543 or newer only */ | 3565 | /* 82543 or newer only */ |
| 3522 | if(unlikely(adapter->hw.mac_type < e1000_82543)) return; | 3566 | if (unlikely(adapter->hw.mac_type < e1000_82543)) return; |
| 3523 | /* Ignore Checksum bit is set */ | 3567 | /* Ignore Checksum bit is set */ |
| 3524 | if(unlikely(status & E1000_RXD_STAT_IXSM)) return; | 3568 | if (unlikely(status & E1000_RXD_STAT_IXSM)) return; |
| 3525 | /* TCP/UDP checksum error bit is set */ | 3569 | /* TCP/UDP checksum error bit is set */ |
| 3526 | if(unlikely(errors & E1000_RXD_ERR_TCPE)) { | 3570 | if (unlikely(errors & E1000_RXD_ERR_TCPE)) { |
| 3527 | /* let the stack verify checksum errors */ | 3571 | /* let the stack verify checksum errors */ |
| 3528 | adapter->hw_csum_err++; | 3572 | adapter->hw_csum_err++; |
| 3529 | return; | 3573 | return; |
| 3530 | } | 3574 | } |
| 3531 | /* TCP/UDP Checksum has not been calculated */ | 3575 | /* TCP/UDP Checksum has not been calculated */ |
| 3532 | if(adapter->hw.mac_type <= e1000_82547_rev_2) { | 3576 | if (adapter->hw.mac_type <= e1000_82547_rev_2) { |
| 3533 | if(!(status & E1000_RXD_STAT_TCPCS)) | 3577 | if (!(status & E1000_RXD_STAT_TCPCS)) |
| 3534 | return; | 3578 | return; |
| 3535 | } else { | 3579 | } else { |
| 3536 | if(!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))) | 3580 | if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))) |
| 3537 | return; | 3581 | return; |
| 3538 | } | 3582 | } |
| 3539 | /* It must be a TCP or UDP packet with a valid checksum */ | 3583 | /* It must be a TCP or UDP packet with a valid checksum */ |
| @@ -3569,9 +3613,8 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
| 3569 | { | 3613 | { |
| 3570 | struct net_device *netdev = adapter->netdev; | 3614 | struct net_device *netdev = adapter->netdev; |
| 3571 | struct pci_dev *pdev = adapter->pdev; | 3615 | struct pci_dev *pdev = adapter->pdev; |
| 3572 | struct e1000_rx_desc *rx_desc; | 3616 | struct e1000_rx_desc *rx_desc, *next_rxd; |
| 3573 | struct e1000_buffer *buffer_info; | 3617 | struct e1000_buffer *buffer_info, *next_buffer; |
| 3574 | struct sk_buff *skb; | ||
| 3575 | unsigned long flags; | 3618 | unsigned long flags; |
| 3576 | uint32_t length; | 3619 | uint32_t length; |
| 3577 | uint8_t last_byte; | 3620 | uint8_t last_byte; |
| @@ -3581,16 +3624,25 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
| 3581 | 3624 | ||
| 3582 | i = rx_ring->next_to_clean; | 3625 | i = rx_ring->next_to_clean; |
| 3583 | rx_desc = E1000_RX_DESC(*rx_ring, i); | 3626 | rx_desc = E1000_RX_DESC(*rx_ring, i); |
| 3627 | buffer_info = &rx_ring->buffer_info[i]; | ||
| 3584 | 3628 | ||
| 3585 | while(rx_desc->status & E1000_RXD_STAT_DD) { | 3629 | while (rx_desc->status & E1000_RXD_STAT_DD) { |
| 3586 | buffer_info = &rx_ring->buffer_info[i]; | 3630 | struct sk_buff *skb, *next_skb; |
| 3587 | u8 status; | 3631 | u8 status; |
| 3588 | #ifdef CONFIG_E1000_NAPI | 3632 | #ifdef CONFIG_E1000_NAPI |
| 3589 | if(*work_done >= work_to_do) | 3633 | if (*work_done >= work_to_do) |
| 3590 | break; | 3634 | break; |
| 3591 | (*work_done)++; | 3635 | (*work_done)++; |
| 3592 | #endif | 3636 | #endif |
| 3593 | status = rx_desc->status; | 3637 | status = rx_desc->status; |
| 3638 | skb = buffer_info->skb; | ||
| 3639 | buffer_info->skb = NULL; | ||
| 3640 | |||
| 3641 | if (++i == rx_ring->count) i = 0; | ||
| 3642 | next_rxd = E1000_RX_DESC(*rx_ring, i); | ||
| 3643 | next_buffer = &rx_ring->buffer_info[i]; | ||
| 3644 | next_skb = next_buffer->skb; | ||
| 3645 | |||
| 3594 | cleaned = TRUE; | 3646 | cleaned = TRUE; |
| 3595 | cleaned_count++; | 3647 | cleaned_count++; |
| 3596 | pci_unmap_single(pdev, | 3648 | pci_unmap_single(pdev, |
| @@ -3598,20 +3650,50 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
| 3598 | buffer_info->length, | 3650 | buffer_info->length, |
| 3599 | PCI_DMA_FROMDEVICE); | 3651 | PCI_DMA_FROMDEVICE); |
| 3600 | 3652 | ||
| 3601 | skb = buffer_info->skb; | ||
| 3602 | length = le16_to_cpu(rx_desc->length); | 3653 | length = le16_to_cpu(rx_desc->length); |
| 3603 | 3654 | ||
| 3604 | if(unlikely(!(rx_desc->status & E1000_RXD_STAT_EOP))) { | 3655 | skb_put(skb, length); |
| 3605 | /* All receives must fit into a single buffer */ | 3656 | |
| 3606 | E1000_DBG("%s: Receive packet consumed multiple" | 3657 | if (!(status & E1000_RXD_STAT_EOP)) { |
| 3607 | " buffers\n", netdev->name); | 3658 | if (!rx_ring->rx_skb_top) { |
| 3608 | dev_kfree_skb_irq(skb); | 3659 | rx_ring->rx_skb_top = skb; |
| 3660 | rx_ring->rx_skb_top->len = length; | ||
| 3661 | rx_ring->rx_skb_prev = skb; | ||
| 3662 | } else { | ||
| 3663 | if (skb_shinfo(rx_ring->rx_skb_top)->frag_list) { | ||
| 3664 | rx_ring->rx_skb_prev->next = skb; | ||
| 3665 | skb->prev = rx_ring->rx_skb_prev; | ||
| 3666 | } else { | ||
| 3667 | skb_shinfo(rx_ring->rx_skb_top)->frag_list = skb; | ||
| 3668 | } | ||
| 3669 | rx_ring->rx_skb_prev = skb; | ||
| 3670 | rx_ring->rx_skb_top->data_len += length; | ||
| 3671 | } | ||
| 3609 | goto next_desc; | 3672 | goto next_desc; |
| 3673 | } else { | ||
| 3674 | if (rx_ring->rx_skb_top) { | ||
| 3675 | if (skb_shinfo(rx_ring->rx_skb_top) | ||
| 3676 | ->frag_list) { | ||
| 3677 | rx_ring->rx_skb_prev->next = skb; | ||
| 3678 | skb->prev = rx_ring->rx_skb_prev; | ||
| 3679 | } else | ||
| 3680 | skb_shinfo(rx_ring->rx_skb_top) | ||
| 3681 | ->frag_list = skb; | ||
| 3682 | |||
| 3683 | rx_ring->rx_skb_top->data_len += length; | ||
| 3684 | rx_ring->rx_skb_top->len += | ||
| 3685 | rx_ring->rx_skb_top->data_len; | ||
| 3686 | |||
| 3687 | skb = rx_ring->rx_skb_top; | ||
| 3688 | multi_descriptor = TRUE; | ||
| 3689 | rx_ring->rx_skb_top = NULL; | ||
| 3690 | rx_ring->rx_skb_prev = NULL; | ||
| 3691 | } | ||
| 3610 | } | 3692 | } |
| 3611 | 3693 | ||
| 3612 | if(unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { | 3694 | if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { |
| 3613 | last_byte = *(skb->data + length - 1); | 3695 | last_byte = *(skb->data + length - 1); |
| 3614 | if(TBI_ACCEPT(&adapter->hw, rx_desc->status, | 3696 | if (TBI_ACCEPT(&adapter->hw, status, |
| 3615 | rx_desc->errors, length, last_byte)) { | 3697 | rx_desc->errors, length, last_byte)) { |
| 3616 | spin_lock_irqsave(&adapter->stats_lock, flags); | 3698 | spin_lock_irqsave(&adapter->stats_lock, flags); |
| 3617 | e1000_tbi_adjust_stats(&adapter->hw, | 3699 | e1000_tbi_adjust_stats(&adapter->hw, |
| @@ -3656,9 +3738,10 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
| 3656 | (uint32_t)(status) | | 3738 | (uint32_t)(status) | |
| 3657 | ((uint32_t)(rx_desc->errors) << 24), | 3739 | ((uint32_t)(rx_desc->errors) << 24), |
| 3658 | rx_desc->csum, skb); | 3740 | rx_desc->csum, skb); |
| 3741 | |||
| 3659 | skb->protocol = eth_type_trans(skb, netdev); | 3742 | skb->protocol = eth_type_trans(skb, netdev); |
| 3660 | #ifdef CONFIG_E1000_NAPI | 3743 | #ifdef CONFIG_E1000_NAPI |
| 3661 | if(unlikely(adapter->vlgrp && | 3744 | if (unlikely(adapter->vlgrp && |
| 3662 | (status & E1000_RXD_STAT_VP))) { | 3745 | (status & E1000_RXD_STAT_VP))) { |
| 3663 | vlan_hwaccel_receive_skb(skb, adapter->vlgrp, | 3746 | vlan_hwaccel_receive_skb(skb, adapter->vlgrp, |
| 3664 | le16_to_cpu(rx_desc->special) & | 3747 | le16_to_cpu(rx_desc->special) & |
| @@ -3667,8 +3750,8 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
| 3667 | netif_receive_skb(skb); | 3750 | netif_receive_skb(skb); |
| 3668 | } | 3751 | } |
| 3669 | #else /* CONFIG_E1000_NAPI */ | 3752 | #else /* CONFIG_E1000_NAPI */ |
| 3670 | if(unlikely(adapter->vlgrp && | 3753 | if (unlikely(adapter->vlgrp && |
| 3671 | (rx_desc->status & E1000_RXD_STAT_VP))) { | 3754 | (status & E1000_RXD_STAT_VP))) { |
| 3672 | vlan_hwaccel_rx(skb, adapter->vlgrp, | 3755 | vlan_hwaccel_rx(skb, adapter->vlgrp, |
| 3673 | le16_to_cpu(rx_desc->special) & | 3756 | le16_to_cpu(rx_desc->special) & |
| 3674 | E1000_RXD_SPC_VLAN_MASK); | 3757 | E1000_RXD_SPC_VLAN_MASK); |
| @@ -3691,6 +3774,8 @@ next_desc: | |||
| 3691 | cleaned_count = 0; | 3774 | cleaned_count = 0; |
| 3692 | } | 3775 | } |
| 3693 | 3776 | ||
| 3777 | rx_desc = next_rxd; | ||
| 3778 | buffer_info = next_buffer; | ||
| 3694 | } | 3779 | } |
| 3695 | rx_ring->next_to_clean = i; | 3780 | rx_ring->next_to_clean = i; |
| 3696 | 3781 | ||
| @@ -3716,13 +3801,13 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
| 3716 | struct e1000_rx_ring *rx_ring) | 3801 | struct e1000_rx_ring *rx_ring) |
| 3717 | #endif | 3802 | #endif |
| 3718 | { | 3803 | { |
| 3719 | union e1000_rx_desc_packet_split *rx_desc; | 3804 | union e1000_rx_desc_packet_split *rx_desc, *next_rxd; |
| 3720 | struct net_device *netdev = adapter->netdev; | 3805 | struct net_device *netdev = adapter->netdev; |
| 3721 | struct pci_dev *pdev = adapter->pdev; | 3806 | struct pci_dev *pdev = adapter->pdev; |
| 3722 | struct e1000_buffer *buffer_info; | 3807 | struct e1000_buffer *buffer_info, *next_buffer; |
| 3723 | struct e1000_ps_page *ps_page; | 3808 | struct e1000_ps_page *ps_page; |
| 3724 | struct e1000_ps_page_dma *ps_page_dma; | 3809 | struct e1000_ps_page_dma *ps_page_dma; |
| 3725 | struct sk_buff *skb; | 3810 | struct sk_buff *skb, *next_skb; |
| 3726 | unsigned int i, j; | 3811 | unsigned int i, j; |
| 3727 | uint32_t length, staterr; | 3812 | uint32_t length, staterr; |
| 3728 | int cleaned_count = 0; | 3813 | int cleaned_count = 0; |
| @@ -3731,39 +3816,44 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
| 3731 | i = rx_ring->next_to_clean; | 3816 | i = rx_ring->next_to_clean; |
| 3732 | rx_desc = E1000_RX_DESC_PS(*rx_ring, i); | 3817 | rx_desc = E1000_RX_DESC_PS(*rx_ring, i); |
| 3733 | staterr = le32_to_cpu(rx_desc->wb.middle.status_error); | 3818 | staterr = le32_to_cpu(rx_desc->wb.middle.status_error); |
| 3819 | buffer_info = &rx_ring->buffer_info[i]; | ||
| 3734 | 3820 | ||
| 3735 | while(staterr & E1000_RXD_STAT_DD) { | 3821 | while (staterr & E1000_RXD_STAT_DD) { |
| 3736 | buffer_info = &rx_ring->buffer_info[i]; | ||
| 3737 | ps_page = &rx_ring->ps_page[i]; | 3822 | ps_page = &rx_ring->ps_page[i]; |
| 3738 | ps_page_dma = &rx_ring->ps_page_dma[i]; | 3823 | ps_page_dma = &rx_ring->ps_page_dma[i]; |
| 3739 | #ifdef CONFIG_E1000_NAPI | 3824 | #ifdef CONFIG_E1000_NAPI |
| 3740 | if(unlikely(*work_done >= work_to_do)) | 3825 | if (unlikely(*work_done >= work_to_do)) |
| 3741 | break; | 3826 | break; |
| 3742 | (*work_done)++; | 3827 | (*work_done)++; |
| 3743 | #endif | 3828 | #endif |
| 3829 | skb = buffer_info->skb; | ||
| 3830 | |||
| 3831 | if (++i == rx_ring->count) i = 0; | ||
| 3832 | next_rxd = E1000_RX_DESC_PS(*rx_ring, i); | ||
| 3833 | next_buffer = &rx_ring->buffer_info[i]; | ||
| 3834 | next_skb = next_buffer->skb; | ||
| 3835 | |||
| 3744 | cleaned = TRUE; | 3836 | cleaned = TRUE; |
| 3745 | cleaned_count++; | 3837 | cleaned_count++; |
| 3746 | pci_unmap_single(pdev, buffer_info->dma, | 3838 | pci_unmap_single(pdev, buffer_info->dma, |
| 3747 | buffer_info->length, | 3839 | buffer_info->length, |
| 3748 | PCI_DMA_FROMDEVICE); | 3840 | PCI_DMA_FROMDEVICE); |
| 3749 | 3841 | ||
| 3750 | skb = buffer_info->skb; | 3842 | if (unlikely(!(staterr & E1000_RXD_STAT_EOP))) { |
| 3751 | |||
| 3752 | if(unlikely(!(staterr & E1000_RXD_STAT_EOP))) { | ||
| 3753 | E1000_DBG("%s: Packet Split buffers didn't pick up" | 3843 | E1000_DBG("%s: Packet Split buffers didn't pick up" |
| 3754 | " the full packet\n", netdev->name); | 3844 | " the full packet\n", netdev->name); |
| 3755 | dev_kfree_skb_irq(skb); | 3845 | dev_kfree_skb_irq(skb); |
| 3756 | goto next_desc; | 3846 | goto next_desc; |
| 3757 | } | 3847 | } |
| 3758 | 3848 | ||
| 3759 | if(unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) { | 3849 | if (unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) { |
| 3760 | dev_kfree_skb_irq(skb); | 3850 | dev_kfree_skb_irq(skb); |
| 3761 | goto next_desc; | 3851 | goto next_desc; |
| 3762 | } | 3852 | } |
| 3763 | 3853 | ||
| 3764 | length = le16_to_cpu(rx_desc->wb.middle.length0); | 3854 | length = le16_to_cpu(rx_desc->wb.middle.length0); |
| 3765 | 3855 | ||
| 3766 | if(unlikely(!length)) { | 3856 | if (unlikely(!length)) { |
| 3767 | E1000_DBG("%s: Last part of the packet spanning" | 3857 | E1000_DBG("%s: Last part of the packet spanning" |
| 3768 | " multiple descriptors\n", netdev->name); | 3858 | " multiple descriptors\n", netdev->name); |
| 3769 | dev_kfree_skb_irq(skb); | 3859 | dev_kfree_skb_irq(skb); |
| @@ -3773,8 +3863,8 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
| 3773 | /* Good Receive */ | 3863 | /* Good Receive */ |
| 3774 | skb_put(skb, length); | 3864 | skb_put(skb, length); |
| 3775 | 3865 | ||
| 3776 | for(j = 0; j < adapter->rx_ps_pages; j++) { | 3866 | for (j = 0; j < adapter->rx_ps_pages; j++) { |
| 3777 | if(!(length = le16_to_cpu(rx_desc->wb.upper.length[j]))) | 3867 | if (!(length = le16_to_cpu(rx_desc->wb.upper.length[j]))) |
| 3778 | break; | 3868 | break; |
| 3779 | 3869 | ||
| 3780 | pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j], | 3870 | pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j], |
| @@ -3794,15 +3884,11 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
| 3794 | rx_desc->wb.lower.hi_dword.csum_ip.csum, skb); | 3884 | rx_desc->wb.lower.hi_dword.csum_ip.csum, skb); |
| 3795 | skb->protocol = eth_type_trans(skb, netdev); | 3885 | skb->protocol = eth_type_trans(skb, netdev); |
| 3796 | 3886 | ||
| 3797 | if(likely(rx_desc->wb.upper.header_status & | 3887 | if (likely(rx_desc->wb.upper.header_status & |
| 3798 | E1000_RXDPS_HDRSTAT_HDRSP)) { | 3888 | E1000_RXDPS_HDRSTAT_HDRSP)) |
| 3799 | adapter->rx_hdr_split++; | 3889 | adapter->rx_hdr_split++; |
| 3800 | #ifdef HAVE_RX_ZERO_COPY | ||
| 3801 | skb_shinfo(skb)->zero_copy = TRUE; | ||
| 3802 | #endif | ||
| 3803 | } | ||
| 3804 | #ifdef CONFIG_E1000_NAPI | 3890 | #ifdef CONFIG_E1000_NAPI |
| 3805 | if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) { | 3891 | if (unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) { |
| 3806 | vlan_hwaccel_receive_skb(skb, adapter->vlgrp, | 3892 | vlan_hwaccel_receive_skb(skb, adapter->vlgrp, |
| 3807 | le16_to_cpu(rx_desc->wb.middle.vlan) & | 3893 | le16_to_cpu(rx_desc->wb.middle.vlan) & |
| 3808 | E1000_RXD_SPC_VLAN_MASK); | 3894 | E1000_RXD_SPC_VLAN_MASK); |
| @@ -3810,7 +3896,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
| 3810 | netif_receive_skb(skb); | 3896 | netif_receive_skb(skb); |
| 3811 | } | 3897 | } |
| 3812 | #else /* CONFIG_E1000_NAPI */ | 3898 | #else /* CONFIG_E1000_NAPI */ |
| 3813 | if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) { | 3899 | if (unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) { |
| 3814 | vlan_hwaccel_rx(skb, adapter->vlgrp, | 3900 | vlan_hwaccel_rx(skb, adapter->vlgrp, |
| 3815 | le16_to_cpu(rx_desc->wb.middle.vlan) & | 3901 | le16_to_cpu(rx_desc->wb.middle.vlan) & |
| 3816 | E1000_RXD_SPC_VLAN_MASK); | 3902 | E1000_RXD_SPC_VLAN_MASK); |
| @@ -3834,6 +3920,9 @@ next_desc: | |||
| 3834 | cleaned_count = 0; | 3920 | cleaned_count = 0; |
| 3835 | } | 3921 | } |
| 3836 | 3922 | ||
| 3923 | rx_desc = next_rxd; | ||
| 3924 | buffer_info = next_buffer; | ||
| 3925 | |||
| 3837 | staterr = le32_to_cpu(rx_desc->wb.middle.status_error); | 3926 | staterr = le32_to_cpu(rx_desc->wb.middle.status_error); |
| 3838 | } | 3927 | } |
| 3839 | rx_ring->next_to_clean = i; | 3928 | rx_ring->next_to_clean = i; |
| @@ -3875,7 +3964,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter, | |||
| 3875 | } | 3964 | } |
| 3876 | 3965 | ||
| 3877 | 3966 | ||
| 3878 | if(unlikely(!skb)) { | 3967 | if (unlikely(!skb)) { |
| 3879 | /* Better luck next round */ | 3968 | /* Better luck next round */ |
| 3880 | adapter->alloc_rx_buff_failed++; | 3969 | adapter->alloc_rx_buff_failed++; |
| 3881 | break; | 3970 | break; |
| @@ -3940,20 +4029,23 @@ map_skb: | |||
| 3940 | rx_desc = E1000_RX_DESC(*rx_ring, i); | 4029 | rx_desc = E1000_RX_DESC(*rx_ring, i); |
| 3941 | rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); | 4030 | rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); |
| 3942 | 4031 | ||
| 3943 | if(unlikely((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i)) { | 4032 | if (unlikely(++i == rx_ring->count)) |
| 3944 | /* Force memory writes to complete before letting h/w | 4033 | i = 0; |
| 3945 | * know there are new descriptors to fetch. (Only | ||
| 3946 | * applicable for weak-ordered memory model archs, | ||
| 3947 | * such as IA-64). */ | ||
| 3948 | wmb(); | ||
| 3949 | writel(i, adapter->hw.hw_addr + rx_ring->rdt); | ||
| 3950 | } | ||
| 3951 | |||
| 3952 | if(unlikely(++i == rx_ring->count)) i = 0; | ||
| 3953 | buffer_info = &rx_ring->buffer_info[i]; | 4034 | buffer_info = &rx_ring->buffer_info[i]; |
| 3954 | } | 4035 | } |
| 3955 | 4036 | ||
| 3956 | rx_ring->next_to_use = i; | 4037 | if (likely(rx_ring->next_to_use != i)) { |
| 4038 | rx_ring->next_to_use = i; | ||
| 4039 | if (unlikely(i-- == 0)) | ||
| 4040 | i = (rx_ring->count - 1); | ||
| 4041 | |||
| 4042 | /* Force memory writes to complete before letting h/w | ||
| 4043 | * know there are new descriptors to fetch. (Only | ||
| 4044 | * applicable for weak-ordered memory model archs, | ||
| 4045 | * such as IA-64). */ | ||
| 4046 | wmb(); | ||
| 4047 | writel(i, adapter->hw.hw_addr + rx_ring->rdt); | ||
| 4048 | } | ||
| 3957 | } | 4049 | } |
| 3958 | 4050 | ||
| 3959 | /** | 4051 | /** |
| @@ -3983,13 +4075,15 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
| 3983 | while (cleaned_count--) { | 4075 | while (cleaned_count--) { |
| 3984 | rx_desc = E1000_RX_DESC_PS(*rx_ring, i); | 4076 | rx_desc = E1000_RX_DESC_PS(*rx_ring, i); |
| 3985 | 4077 | ||
| 3986 | for(j = 0; j < PS_PAGE_BUFFERS; j++) { | 4078 | for (j = 0; j < PS_PAGE_BUFFERS; j++) { |
| 3987 | if (j < adapter->rx_ps_pages) { | 4079 | if (j < adapter->rx_ps_pages) { |
| 3988 | if (likely(!ps_page->ps_page[j])) { | 4080 | if (likely(!ps_page->ps_page[j])) { |
| 3989 | ps_page->ps_page[j] = | 4081 | ps_page->ps_page[j] = |
| 3990 | alloc_page(GFP_ATOMIC); | 4082 | alloc_page(GFP_ATOMIC); |
| 3991 | if (unlikely(!ps_page->ps_page[j])) | 4083 | if (unlikely(!ps_page->ps_page[j])) { |
| 4084 | adapter->alloc_rx_buff_failed++; | ||
| 3992 | goto no_buffers; | 4085 | goto no_buffers; |
| 4086 | } | ||
| 3993 | ps_page_dma->ps_page_dma[j] = | 4087 | ps_page_dma->ps_page_dma[j] = |
| 3994 | pci_map_page(pdev, | 4088 | pci_map_page(pdev, |
| 3995 | ps_page->ps_page[j], | 4089 | ps_page->ps_page[j], |
| @@ -3997,7 +4091,7 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
| 3997 | PCI_DMA_FROMDEVICE); | 4091 | PCI_DMA_FROMDEVICE); |
| 3998 | } | 4092 | } |
| 3999 | /* Refresh the desc even if buffer_addrs didn't | 4093 | /* Refresh the desc even if buffer_addrs didn't |
| 4000 | * change because each write-back erases | 4094 | * change because each write-back erases |
| 4001 | * this info. | 4095 | * this info. |
| 4002 | */ | 4096 | */ |
| 4003 | rx_desc->read.buffer_addr[j+1] = | 4097 | rx_desc->read.buffer_addr[j+1] = |
| @@ -4008,8 +4102,10 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
| 4008 | 4102 | ||
| 4009 | skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN); | 4103 | skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN); |
| 4010 | 4104 | ||
| 4011 | if(unlikely(!skb)) | 4105 | if (unlikely(!skb)) { |
| 4106 | adapter->alloc_rx_buff_failed++; | ||
| 4012 | break; | 4107 | break; |
| 4108 | } | ||
| 4013 | 4109 | ||
| 4014 | /* Make buffer alignment 2 beyond a 16 byte boundary | 4110 | /* Make buffer alignment 2 beyond a 16 byte boundary |
| 4015 | * this will result in a 16 byte aligned IP header after | 4111 | * this will result in a 16 byte aligned IP header after |
| @@ -4027,27 +4123,28 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
| 4027 | 4123 | ||
| 4028 | rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); | 4124 | rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); |
| 4029 | 4125 | ||
| 4030 | if(unlikely((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i)) { | 4126 | if (unlikely(++i == rx_ring->count)) i = 0; |
| 4031 | /* Force memory writes to complete before letting h/w | ||
| 4032 | * know there are new descriptors to fetch. (Only | ||
| 4033 | * applicable for weak-ordered memory model archs, | ||
| 4034 | * such as IA-64). */ | ||
| 4035 | wmb(); | ||
| 4036 | /* Hardware increments by 16 bytes, but packet split | ||
| 4037 | * descriptors are 32 bytes...so we increment tail | ||
| 4038 | * twice as much. | ||
| 4039 | */ | ||
| 4040 | writel(i<<1, adapter->hw.hw_addr + rx_ring->rdt); | ||
| 4041 | } | ||
| 4042 | |||
| 4043 | if(unlikely(++i == rx_ring->count)) i = 0; | ||
| 4044 | buffer_info = &rx_ring->buffer_info[i]; | 4127 | buffer_info = &rx_ring->buffer_info[i]; |
| 4045 | ps_page = &rx_ring->ps_page[i]; | 4128 | ps_page = &rx_ring->ps_page[i]; |
| 4046 | ps_page_dma = &rx_ring->ps_page_dma[i]; | 4129 | ps_page_dma = &rx_ring->ps_page_dma[i]; |
| 4047 | } | 4130 | } |
| 4048 | 4131 | ||
| 4049 | no_buffers: | 4132 | no_buffers: |
| 4050 | rx_ring->next_to_use = i; | 4133 | if (likely(rx_ring->next_to_use != i)) { |
| 4134 | rx_ring->next_to_use = i; | ||
| 4135 | if (unlikely(i-- == 0)) i = (rx_ring->count - 1); | ||
| 4136 | |||
| 4137 | /* Force memory writes to complete before letting h/w | ||
| 4138 | * know there are new descriptors to fetch. (Only | ||
| 4139 | * applicable for weak-ordered memory model archs, | ||
| 4140 | * such as IA-64). */ | ||
| 4141 | wmb(); | ||
| 4142 | /* Hardware increments by 16 bytes, but packet split | ||
| 4143 | * descriptors are 32 bytes...so we increment tail | ||
| 4144 | * twice as much. | ||
| 4145 | */ | ||
| 4146 | writel(i<<1, adapter->hw.hw_addr + rx_ring->rdt); | ||
| 4147 | } | ||
| 4051 | } | 4148 | } |
| 4052 | 4149 | ||
| 4053 | /** | 4150 | /** |
| @@ -4061,24 +4158,24 @@ e1000_smartspeed(struct e1000_adapter *adapter) | |||
| 4061 | uint16_t phy_status; | 4158 | uint16_t phy_status; |
| 4062 | uint16_t phy_ctrl; | 4159 | uint16_t phy_ctrl; |
| 4063 | 4160 | ||
| 4064 | if((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg || | 4161 | if ((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg || |
| 4065 | !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL)) | 4162 | !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL)) |
| 4066 | return; | 4163 | return; |
| 4067 | 4164 | ||
| 4068 | if(adapter->smartspeed == 0) { | 4165 | if (adapter->smartspeed == 0) { |
| 4069 | /* If Master/Slave config fault is asserted twice, | 4166 | /* If Master/Slave config fault is asserted twice, |
| 4070 | * we assume back-to-back */ | 4167 | * we assume back-to-back */ |
| 4071 | e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status); | 4168 | e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status); |
| 4072 | if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; | 4169 | if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; |
| 4073 | e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status); | 4170 | e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status); |
| 4074 | if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; | 4171 | if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; |
| 4075 | e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl); | 4172 | e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl); |
| 4076 | if(phy_ctrl & CR_1000T_MS_ENABLE) { | 4173 | if (phy_ctrl & CR_1000T_MS_ENABLE) { |
| 4077 | phy_ctrl &= ~CR_1000T_MS_ENABLE; | 4174 | phy_ctrl &= ~CR_1000T_MS_ENABLE; |
| 4078 | e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, | 4175 | e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, |
| 4079 | phy_ctrl); | 4176 | phy_ctrl); |
| 4080 | adapter->smartspeed++; | 4177 | adapter->smartspeed++; |
| 4081 | if(!e1000_phy_setup_autoneg(&adapter->hw) && | 4178 | if (!e1000_phy_setup_autoneg(&adapter->hw) && |
| 4082 | !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, | 4179 | !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, |
| 4083 | &phy_ctrl)) { | 4180 | &phy_ctrl)) { |
| 4084 | phy_ctrl |= (MII_CR_AUTO_NEG_EN | | 4181 | phy_ctrl |= (MII_CR_AUTO_NEG_EN | |
| @@ -4088,12 +4185,12 @@ e1000_smartspeed(struct e1000_adapter *adapter) | |||
| 4088 | } | 4185 | } |
| 4089 | } | 4186 | } |
| 4090 | return; | 4187 | return; |
| 4091 | } else if(adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { | 4188 | } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { |
| 4092 | /* If still no link, perhaps using 2/3 pair cable */ | 4189 | /* If still no link, perhaps using 2/3 pair cable */ |
| 4093 | e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl); | 4190 | e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl); |
| 4094 | phy_ctrl |= CR_1000T_MS_ENABLE; | 4191 | phy_ctrl |= CR_1000T_MS_ENABLE; |
| 4095 | e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl); | 4192 | e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl); |
| 4096 | if(!e1000_phy_setup_autoneg(&adapter->hw) && | 4193 | if (!e1000_phy_setup_autoneg(&adapter->hw) && |
| 4097 | !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_ctrl)) { | 4194 | !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_ctrl)) { |
| 4098 | phy_ctrl |= (MII_CR_AUTO_NEG_EN | | 4195 | phy_ctrl |= (MII_CR_AUTO_NEG_EN | |
| 4099 | MII_CR_RESTART_AUTO_NEG); | 4196 | MII_CR_RESTART_AUTO_NEG); |
| @@ -4101,7 +4198,7 @@ e1000_smartspeed(struct e1000_adapter *adapter) | |||
| 4101 | } | 4198 | } |
| 4102 | } | 4199 | } |
| 4103 | /* Restart process after E1000_SMARTSPEED_MAX iterations */ | 4200 | /* Restart process after E1000_SMARTSPEED_MAX iterations */ |
| 4104 | if(adapter->smartspeed++ == E1000_SMARTSPEED_MAX) | 4201 | if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX) |
| 4105 | adapter->smartspeed = 0; | 4202 | adapter->smartspeed = 0; |
| 4106 | } | 4203 | } |
| 4107 | 4204 | ||
| @@ -4142,7 +4239,7 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | |||
| 4142 | uint16_t spddplx; | 4239 | uint16_t spddplx; |
| 4143 | unsigned long flags; | 4240 | unsigned long flags; |
| 4144 | 4241 | ||
| 4145 | if(adapter->hw.media_type != e1000_media_type_copper) | 4242 | if (adapter->hw.media_type != e1000_media_type_copper) |
| 4146 | return -EOPNOTSUPP; | 4243 | return -EOPNOTSUPP; |
| 4147 | 4244 | ||
| 4148 | switch (cmd) { | 4245 | switch (cmd) { |
| @@ -4150,10 +4247,10 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | |||
| 4150 | data->phy_id = adapter->hw.phy_addr; | 4247 | data->phy_id = adapter->hw.phy_addr; |
| 4151 | break; | 4248 | break; |
| 4152 | case SIOCGMIIREG: | 4249 | case SIOCGMIIREG: |
| 4153 | if(!capable(CAP_NET_ADMIN)) | 4250 | if (!capable(CAP_NET_ADMIN)) |
| 4154 | return -EPERM; | 4251 | return -EPERM; |
| 4155 | spin_lock_irqsave(&adapter->stats_lock, flags); | 4252 | spin_lock_irqsave(&adapter->stats_lock, flags); |
| 4156 | if(e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, | 4253 | if (e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, |
| 4157 | &data->val_out)) { | 4254 | &data->val_out)) { |
| 4158 | spin_unlock_irqrestore(&adapter->stats_lock, flags); | 4255 | spin_unlock_irqrestore(&adapter->stats_lock, flags); |
| 4159 | return -EIO; | 4256 | return -EIO; |
| @@ -4161,23 +4258,23 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | |||
| 4161 | spin_unlock_irqrestore(&adapter->stats_lock, flags); | 4258 | spin_unlock_irqrestore(&adapter->stats_lock, flags); |
| 4162 | break; | 4259 | break; |
| 4163 | case SIOCSMIIREG: | 4260 | case SIOCSMIIREG: |
| 4164 | if(!capable(CAP_NET_ADMIN)) | 4261 | if (!capable(CAP_NET_ADMIN)) |
| 4165 | return -EPERM; | 4262 | return -EPERM; |
| 4166 | if(data->reg_num & ~(0x1F)) | 4263 | if (data->reg_num & ~(0x1F)) |
| 4167 | return -EFAULT; | 4264 | return -EFAULT; |
| 4168 | mii_reg = data->val_in; | 4265 | mii_reg = data->val_in; |
| 4169 | spin_lock_irqsave(&adapter->stats_lock, flags); | 4266 | spin_lock_irqsave(&adapter->stats_lock, flags); |
| 4170 | if(e1000_write_phy_reg(&adapter->hw, data->reg_num, | 4267 | if (e1000_write_phy_reg(&adapter->hw, data->reg_num, |
| 4171 | mii_reg)) { | 4268 | mii_reg)) { |
| 4172 | spin_unlock_irqrestore(&adapter->stats_lock, flags); | 4269 | spin_unlock_irqrestore(&adapter->stats_lock, flags); |
| 4173 | return -EIO; | 4270 | return -EIO; |
| 4174 | } | 4271 | } |
| 4175 | if(adapter->hw.phy_type == e1000_phy_m88) { | 4272 | if (adapter->hw.phy_type == e1000_phy_m88) { |
| 4176 | switch (data->reg_num) { | 4273 | switch (data->reg_num) { |
| 4177 | case PHY_CTRL: | 4274 | case PHY_CTRL: |
| 4178 | if(mii_reg & MII_CR_POWER_DOWN) | 4275 | if (mii_reg & MII_CR_POWER_DOWN) |
| 4179 | break; | 4276 | break; |
| 4180 | if(mii_reg & MII_CR_AUTO_NEG_EN) { | 4277 | if (mii_reg & MII_CR_AUTO_NEG_EN) { |
| 4181 | adapter->hw.autoneg = 1; | 4278 | adapter->hw.autoneg = 1; |
| 4182 | adapter->hw.autoneg_advertised = 0x2F; | 4279 | adapter->hw.autoneg_advertised = 0x2F; |
| 4183 | } else { | 4280 | } else { |
| @@ -4192,14 +4289,14 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | |||
| 4192 | HALF_DUPLEX; | 4289 | HALF_DUPLEX; |
| 4193 | retval = e1000_set_spd_dplx(adapter, | 4290 | retval = e1000_set_spd_dplx(adapter, |
| 4194 | spddplx); | 4291 | spddplx); |
| 4195 | if(retval) { | 4292 | if (retval) { |
| 4196 | spin_unlock_irqrestore( | 4293 | spin_unlock_irqrestore( |
| 4197 | &adapter->stats_lock, | 4294 | &adapter->stats_lock, |
| 4198 | flags); | 4295 | flags); |
| 4199 | return retval; | 4296 | return retval; |
| 4200 | } | 4297 | } |
| 4201 | } | 4298 | } |
| 4202 | if(netif_running(adapter->netdev)) { | 4299 | if (netif_running(adapter->netdev)) { |
| 4203 | e1000_down(adapter); | 4300 | e1000_down(adapter); |
| 4204 | e1000_up(adapter); | 4301 | e1000_up(adapter); |
| 4205 | } else | 4302 | } else |
| @@ -4207,7 +4304,7 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | |||
| 4207 | break; | 4304 | break; |
| 4208 | case M88E1000_PHY_SPEC_CTRL: | 4305 | case M88E1000_PHY_SPEC_CTRL: |
| 4209 | case M88E1000_EXT_PHY_SPEC_CTRL: | 4306 | case M88E1000_EXT_PHY_SPEC_CTRL: |
| 4210 | if(e1000_phy_reset(&adapter->hw)) { | 4307 | if (e1000_phy_reset(&adapter->hw)) { |
| 4211 | spin_unlock_irqrestore( | 4308 | spin_unlock_irqrestore( |
| 4212 | &adapter->stats_lock, flags); | 4309 | &adapter->stats_lock, flags); |
| 4213 | return -EIO; | 4310 | return -EIO; |
| @@ -4217,9 +4314,9 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | |||
| 4217 | } else { | 4314 | } else { |
| 4218 | switch (data->reg_num) { | 4315 | switch (data->reg_num) { |
| 4219 | case PHY_CTRL: | 4316 | case PHY_CTRL: |
| 4220 | if(mii_reg & MII_CR_POWER_DOWN) | 4317 | if (mii_reg & MII_CR_POWER_DOWN) |
| 4221 | break; | 4318 | break; |
| 4222 | if(netif_running(adapter->netdev)) { | 4319 | if (netif_running(adapter->netdev)) { |
| 4223 | e1000_down(adapter); | 4320 | e1000_down(adapter); |
| 4224 | e1000_up(adapter); | 4321 | e1000_up(adapter); |
| 4225 | } else | 4322 | } else |
| @@ -4241,7 +4338,7 @@ e1000_pci_set_mwi(struct e1000_hw *hw) | |||
| 4241 | struct e1000_adapter *adapter = hw->back; | 4338 | struct e1000_adapter *adapter = hw->back; |
| 4242 | int ret_val = pci_set_mwi(adapter->pdev); | 4339 | int ret_val = pci_set_mwi(adapter->pdev); |
| 4243 | 4340 | ||
| 4244 | if(ret_val) | 4341 | if (ret_val) |
| 4245 | DPRINTK(PROBE, ERR, "Error in setting MWI\n"); | 4342 | DPRINTK(PROBE, ERR, "Error in setting MWI\n"); |
| 4246 | } | 4343 | } |
| 4247 | 4344 | ||
| @@ -4290,7 +4387,7 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) | |||
| 4290 | e1000_irq_disable(adapter); | 4387 | e1000_irq_disable(adapter); |
| 4291 | adapter->vlgrp = grp; | 4388 | adapter->vlgrp = grp; |
| 4292 | 4389 | ||
| 4293 | if(grp) { | 4390 | if (grp) { |
| 4294 | /* enable VLAN tag insert/strip */ | 4391 | /* enable VLAN tag insert/strip */ |
| 4295 | ctrl = E1000_READ_REG(&adapter->hw, CTRL); | 4392 | ctrl = E1000_READ_REG(&adapter->hw, CTRL); |
| 4296 | ctrl |= E1000_CTRL_VME; | 4393 | ctrl |= E1000_CTRL_VME; |
| @@ -4312,7 +4409,7 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) | |||
| 4312 | rctl = E1000_READ_REG(&adapter->hw, RCTL); | 4409 | rctl = E1000_READ_REG(&adapter->hw, RCTL); |
| 4313 | rctl &= ~E1000_RCTL_VFE; | 4410 | rctl &= ~E1000_RCTL_VFE; |
| 4314 | E1000_WRITE_REG(&adapter->hw, RCTL, rctl); | 4411 | E1000_WRITE_REG(&adapter->hw, RCTL, rctl); |
| 4315 | if(adapter->mng_vlan_id != (uint16_t)E1000_MNG_VLAN_NONE) { | 4412 | if (adapter->mng_vlan_id != (uint16_t)E1000_MNG_VLAN_NONE) { |
| 4316 | e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); | 4413 | e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); |
| 4317 | adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; | 4414 | adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; |
| 4318 | } | 4415 | } |
| @@ -4326,9 +4423,10 @@ e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid) | |||
| 4326 | { | 4423 | { |
| 4327 | struct e1000_adapter *adapter = netdev_priv(netdev); | 4424 | struct e1000_adapter *adapter = netdev_priv(netdev); |
| 4328 | uint32_t vfta, index; | 4425 | uint32_t vfta, index; |
| 4329 | if((adapter->hw.mng_cookie.status & | 4426 | |
| 4330 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && | 4427 | if ((adapter->hw.mng_cookie.status & |
| 4331 | (vid == adapter->mng_vlan_id)) | 4428 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && |
| 4429 | (vid == adapter->mng_vlan_id)) | ||
| 4332 | return; | 4430 | return; |
| 4333 | /* add VID to filter table */ | 4431 | /* add VID to filter table */ |
| 4334 | index = (vid >> 5) & 0x7F; | 4432 | index = (vid >> 5) & 0x7F; |
| @@ -4345,13 +4443,13 @@ e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid) | |||
| 4345 | 4443 | ||
| 4346 | e1000_irq_disable(adapter); | 4444 | e1000_irq_disable(adapter); |
| 4347 | 4445 | ||
| 4348 | if(adapter->vlgrp) | 4446 | if (adapter->vlgrp) |
| 4349 | adapter->vlgrp->vlan_devices[vid] = NULL; | 4447 | adapter->vlgrp->vlan_devices[vid] = NULL; |
| 4350 | 4448 | ||
| 4351 | e1000_irq_enable(adapter); | 4449 | e1000_irq_enable(adapter); |
| 4352 | 4450 | ||
| 4353 | if((adapter->hw.mng_cookie.status & | 4451 | if ((adapter->hw.mng_cookie.status & |
| 4354 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && | 4452 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && |
| 4355 | (vid == adapter->mng_vlan_id)) { | 4453 | (vid == adapter->mng_vlan_id)) { |
| 4356 | /* release control to f/w */ | 4454 | /* release control to f/w */ |
| 4357 | e1000_release_hw_control(adapter); | 4455 | e1000_release_hw_control(adapter); |
| @@ -4370,10 +4468,10 @@ e1000_restore_vlan(struct e1000_adapter *adapter) | |||
| 4370 | { | 4468 | { |
| 4371 | e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp); | 4469 | e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp); |
| 4372 | 4470 | ||
| 4373 | if(adapter->vlgrp) { | 4471 | if (adapter->vlgrp) { |
| 4374 | uint16_t vid; | 4472 | uint16_t vid; |
| 4375 | for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { | 4473 | for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { |
| 4376 | if(!adapter->vlgrp->vlan_devices[vid]) | 4474 | if (!adapter->vlgrp->vlan_devices[vid]) |
| 4377 | continue; | 4475 | continue; |
| 4378 | e1000_vlan_rx_add_vid(adapter->netdev, vid); | 4476 | e1000_vlan_rx_add_vid(adapter->netdev, vid); |
| 4379 | } | 4477 | } |
| @@ -4386,13 +4484,13 @@ e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx) | |||
| 4386 | adapter->hw.autoneg = 0; | 4484 | adapter->hw.autoneg = 0; |
| 4387 | 4485 | ||
| 4388 | /* Fiber NICs only allow 1000 gbps Full duplex */ | 4486 | /* Fiber NICs only allow 1000 gbps Full duplex */ |
| 4389 | if((adapter->hw.media_type == e1000_media_type_fiber) && | 4487 | if ((adapter->hw.media_type == e1000_media_type_fiber) && |
| 4390 | spddplx != (SPEED_1000 + DUPLEX_FULL)) { | 4488 | spddplx != (SPEED_1000 + DUPLEX_FULL)) { |
| 4391 | DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n"); | 4489 | DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n"); |
| 4392 | return -EINVAL; | 4490 | return -EINVAL; |
| 4393 | } | 4491 | } |
| 4394 | 4492 | ||
| 4395 | switch(spddplx) { | 4493 | switch (spddplx) { |
| 4396 | case SPEED_10 + DUPLEX_HALF: | 4494 | case SPEED_10 + DUPLEX_HALF: |
| 4397 | adapter->hw.forced_speed_duplex = e1000_10_half; | 4495 | adapter->hw.forced_speed_duplex = e1000_10_half; |
| 4398 | break; | 4496 | break; |
| @@ -4418,6 +4516,54 @@ e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx) | |||
| 4418 | } | 4516 | } |
| 4419 | 4517 | ||
| 4420 | #ifdef CONFIG_PM | 4518 | #ifdef CONFIG_PM |
| 4519 | /* these functions save and restore 16 or 64 dwords (64-256 bytes) of config | ||
| 4520 | * space versus the 64 bytes that pci_[save|restore]_state handle | ||
| 4521 | */ | ||
| 4522 | #define PCIE_CONFIG_SPACE_LEN 256 | ||
| 4523 | #define PCI_CONFIG_SPACE_LEN 64 | ||
| 4524 | static int | ||
| 4525 | e1000_pci_save_state(struct e1000_adapter *adapter) | ||
| 4526 | { | ||
| 4527 | struct pci_dev *dev = adapter->pdev; | ||
| 4528 | int size; | ||
| 4529 | int i; | ||
| 4530 | if (adapter->hw.mac_type >= e1000_82571) | ||
| 4531 | size = PCIE_CONFIG_SPACE_LEN; | ||
| 4532 | else | ||
| 4533 | size = PCI_CONFIG_SPACE_LEN; | ||
| 4534 | |||
| 4535 | WARN_ON(adapter->config_space != NULL); | ||
| 4536 | |||
| 4537 | adapter->config_space = kmalloc(size, GFP_KERNEL); | ||
| 4538 | if (!adapter->config_space) { | ||
| 4539 | DPRINTK(PROBE, ERR, "unable to allocate %d bytes\n", size); | ||
| 4540 | return -ENOMEM; | ||
| 4541 | } | ||
| 4542 | for (i = 0; i < (size / 4); i++) | ||
| 4543 | pci_read_config_dword(dev, i * 4, &adapter->config_space[i]); | ||
| 4544 | return 0; | ||
| 4545 | } | ||
| 4546 | |||
| 4547 | static void | ||
| 4548 | e1000_pci_restore_state(struct e1000_adapter *adapter) | ||
| 4549 | { | ||
| 4550 | struct pci_dev *dev = adapter->pdev; | ||
| 4551 | int size; | ||
| 4552 | int i; | ||
| 4553 | if (adapter->config_space == NULL) | ||
| 4554 | return; | ||
| 4555 | if (adapter->hw.mac_type >= e1000_82571) | ||
| 4556 | size = PCIE_CONFIG_SPACE_LEN; | ||
| 4557 | else | ||
| 4558 | size = PCI_CONFIG_SPACE_LEN; | ||
| 4559 | for (i = 0; i < (size / 4); i++) | ||
| 4560 | pci_write_config_dword(dev, i * 4, adapter->config_space[i]); | ||
| 4561 | kfree(adapter->config_space); | ||
| 4562 | adapter->config_space = NULL; | ||
| 4563 | return; | ||
| 4564 | } | ||
| 4565 | #endif /* CONFIG_PM */ | ||
| 4566 | |||
| 4421 | static int | 4567 | static int |
| 4422 | e1000_suspend(struct pci_dev *pdev, pm_message_t state) | 4568 | e1000_suspend(struct pci_dev *pdev, pm_message_t state) |
| 4423 | { | 4569 | { |
| @@ -4429,25 +4575,33 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |||
| 4429 | 4575 | ||
| 4430 | netif_device_detach(netdev); | 4576 | netif_device_detach(netdev); |
| 4431 | 4577 | ||
| 4432 | if(netif_running(netdev)) | 4578 | if (netif_running(netdev)) |
| 4433 | e1000_down(adapter); | 4579 | e1000_down(adapter); |
| 4434 | 4580 | ||
| 4581 | #ifdef CONFIG_PM | ||
| 4582 | /* implement our own version of pci_save_state(pdev) because pci | ||
| 4583 | * express adapters have larger 256 byte config spaces */ | ||
| 4584 | retval = e1000_pci_save_state(adapter); | ||
| 4585 | if (retval) | ||
| 4586 | return retval; | ||
| 4587 | #endif | ||
| 4588 | |||
| 4435 | status = E1000_READ_REG(&adapter->hw, STATUS); | 4589 | status = E1000_READ_REG(&adapter->hw, STATUS); |
| 4436 | if(status & E1000_STATUS_LU) | 4590 | if (status & E1000_STATUS_LU) |
| 4437 | wufc &= ~E1000_WUFC_LNKC; | 4591 | wufc &= ~E1000_WUFC_LNKC; |
| 4438 | 4592 | ||
| 4439 | if(wufc) { | 4593 | if (wufc) { |
| 4440 | e1000_setup_rctl(adapter); | 4594 | e1000_setup_rctl(adapter); |
| 4441 | e1000_set_multi(netdev); | 4595 | e1000_set_multi(netdev); |
| 4442 | 4596 | ||
| 4443 | /* turn on all-multi mode if wake on multicast is enabled */ | 4597 | /* turn on all-multi mode if wake on multicast is enabled */ |
| 4444 | if(adapter->wol & E1000_WUFC_MC) { | 4598 | if (adapter->wol & E1000_WUFC_MC) { |
| 4445 | rctl = E1000_READ_REG(&adapter->hw, RCTL); | 4599 | rctl = E1000_READ_REG(&adapter->hw, RCTL); |
| 4446 | rctl |= E1000_RCTL_MPE; | 4600 | rctl |= E1000_RCTL_MPE; |
| 4447 | E1000_WRITE_REG(&adapter->hw, RCTL, rctl); | 4601 | E1000_WRITE_REG(&adapter->hw, RCTL, rctl); |
| 4448 | } | 4602 | } |
| 4449 | 4603 | ||
| 4450 | if(adapter->hw.mac_type >= e1000_82540) { | 4604 | if (adapter->hw.mac_type >= e1000_82540) { |
| 4451 | ctrl = E1000_READ_REG(&adapter->hw, CTRL); | 4605 | ctrl = E1000_READ_REG(&adapter->hw, CTRL); |
| 4452 | /* advertise wake from D3Cold */ | 4606 | /* advertise wake from D3Cold */ |
| 4453 | #define E1000_CTRL_ADVD3WUC 0x00100000 | 4607 | #define E1000_CTRL_ADVD3WUC 0x00100000 |
| @@ -4458,7 +4612,7 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |||
| 4458 | E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); | 4612 | E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); |
| 4459 | } | 4613 | } |
| 4460 | 4614 | ||
| 4461 | if(adapter->hw.media_type == e1000_media_type_fiber || | 4615 | if (adapter->hw.media_type == e1000_media_type_fiber || |
| 4462 | adapter->hw.media_type == e1000_media_type_internal_serdes) { | 4616 | adapter->hw.media_type == e1000_media_type_internal_serdes) { |
| 4463 | /* keep the laser running in D3 */ | 4617 | /* keep the laser running in D3 */ |
| 4464 | ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); | 4618 | ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); |
| @@ -4488,12 +4642,10 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |||
| 4488 | DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n"); | 4642 | DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n"); |
| 4489 | } | 4643 | } |
| 4490 | 4644 | ||
| 4491 | pci_save_state(pdev); | 4645 | if (adapter->hw.mac_type >= e1000_82540 && |
| 4492 | |||
| 4493 | if(adapter->hw.mac_type >= e1000_82540 && | ||
| 4494 | adapter->hw.media_type == e1000_media_type_copper) { | 4646 | adapter->hw.media_type == e1000_media_type_copper) { |
| 4495 | manc = E1000_READ_REG(&adapter->hw, MANC); | 4647 | manc = E1000_READ_REG(&adapter->hw, MANC); |
| 4496 | if(manc & E1000_MANC_SMBUS_EN) { | 4648 | if (manc & E1000_MANC_SMBUS_EN) { |
| 4497 | manc |= E1000_MANC_ARP_EN; | 4649 | manc |= E1000_MANC_ARP_EN; |
| 4498 | E1000_WRITE_REG(&adapter->hw, MANC, manc); | 4650 | E1000_WRITE_REG(&adapter->hw, MANC, manc); |
| 4499 | retval = pci_enable_wake(pdev, PCI_D3hot, 1); | 4651 | retval = pci_enable_wake(pdev, PCI_D3hot, 1); |
| @@ -4518,6 +4670,7 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |||
| 4518 | return 0; | 4670 | return 0; |
| 4519 | } | 4671 | } |
| 4520 | 4672 | ||
| 4673 | #ifdef CONFIG_PM | ||
| 4521 | static int | 4674 | static int |
| 4522 | e1000_resume(struct pci_dev *pdev) | 4675 | e1000_resume(struct pci_dev *pdev) |
| 4523 | { | 4676 | { |
| @@ -4529,6 +4682,7 @@ e1000_resume(struct pci_dev *pdev) | |||
| 4529 | retval = pci_set_power_state(pdev, PCI_D0); | 4682 | retval = pci_set_power_state(pdev, PCI_D0); |
| 4530 | if (retval) | 4683 | if (retval) |
| 4531 | DPRINTK(PROBE, ERR, "Error in setting power state\n"); | 4684 | DPRINTK(PROBE, ERR, "Error in setting power state\n"); |
| 4685 | e1000_pci_restore_state(adapter); | ||
| 4532 | ret_val = pci_enable_device(pdev); | 4686 | ret_val = pci_enable_device(pdev); |
| 4533 | pci_set_master(pdev); | 4687 | pci_set_master(pdev); |
| 4534 | 4688 | ||
| @@ -4542,12 +4696,12 @@ e1000_resume(struct pci_dev *pdev) | |||
| 4542 | e1000_reset(adapter); | 4696 | e1000_reset(adapter); |
| 4543 | E1000_WRITE_REG(&adapter->hw, WUS, ~0); | 4697 | E1000_WRITE_REG(&adapter->hw, WUS, ~0); |
| 4544 | 4698 | ||
| 4545 | if(netif_running(netdev)) | 4699 | if (netif_running(netdev)) |
| 4546 | e1000_up(adapter); | 4700 | e1000_up(adapter); |
| 4547 | 4701 | ||
| 4548 | netif_device_attach(netdev); | 4702 | netif_device_attach(netdev); |
| 4549 | 4703 | ||
| 4550 | if(adapter->hw.mac_type >= e1000_82540 && | 4704 | if (adapter->hw.mac_type >= e1000_82540 && |
| 4551 | adapter->hw.media_type == e1000_media_type_copper) { | 4705 | adapter->hw.media_type == e1000_media_type_copper) { |
| 4552 | manc = E1000_READ_REG(&adapter->hw, MANC); | 4706 | manc = E1000_READ_REG(&adapter->hw, MANC); |
| 4553 | manc &= ~(E1000_MANC_ARP_EN); | 4707 | manc &= ~(E1000_MANC_ARP_EN); |
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h index aac64de61437..9790db974dc1 100644 --- a/drivers/net/e1000/e1000_osdep.h +++ b/drivers/net/e1000/e1000_osdep.h | |||
| @@ -47,7 +47,7 @@ | |||
| 47 | BUG(); \ | 47 | BUG(); \ |
| 48 | } else { \ | 48 | } else { \ |
| 49 | msleep(x); \ | 49 | msleep(x); \ |
| 50 | } } while(0) | 50 | } } while (0) |
| 51 | 51 | ||
| 52 | /* Some workarounds require millisecond delays and are run during interrupt | 52 | /* Some workarounds require millisecond delays and are run during interrupt |
| 53 | * context. Most notably, when establishing link, the phy may need tweaking | 53 | * context. Most notably, when establishing link, the phy may need tweaking |
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c index 0a7918c62557..3768d83cd577 100644 --- a/drivers/net/e1000/e1000_param.c +++ b/drivers/net/e1000/e1000_param.c | |||
| @@ -227,7 +227,7 @@ static int __devinit | |||
| 227 | e1000_validate_option(int *value, struct e1000_option *opt, | 227 | e1000_validate_option(int *value, struct e1000_option *opt, |
| 228 | struct e1000_adapter *adapter) | 228 | struct e1000_adapter *adapter) |
| 229 | { | 229 | { |
| 230 | if(*value == OPTION_UNSET) { | 230 | if (*value == OPTION_UNSET) { |
| 231 | *value = opt->def; | 231 | *value = opt->def; |
| 232 | return 0; | 232 | return 0; |
| 233 | } | 233 | } |
| @@ -244,7 +244,7 @@ e1000_validate_option(int *value, struct e1000_option *opt, | |||
| 244 | } | 244 | } |
| 245 | break; | 245 | break; |
| 246 | case range_option: | 246 | case range_option: |
| 247 | if(*value >= opt->arg.r.min && *value <= opt->arg.r.max) { | 247 | if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { |
| 248 | DPRINTK(PROBE, INFO, | 248 | DPRINTK(PROBE, INFO, |
| 249 | "%s set to %i\n", opt->name, *value); | 249 | "%s set to %i\n", opt->name, *value); |
| 250 | return 0; | 250 | return 0; |
| @@ -254,10 +254,10 @@ e1000_validate_option(int *value, struct e1000_option *opt, | |||
| 254 | int i; | 254 | int i; |
| 255 | struct e1000_opt_list *ent; | 255 | struct e1000_opt_list *ent; |
| 256 | 256 | ||
| 257 | for(i = 0; i < opt->arg.l.nr; i++) { | 257 | for (i = 0; i < opt->arg.l.nr; i++) { |
| 258 | ent = &opt->arg.l.p[i]; | 258 | ent = &opt->arg.l.p[i]; |
| 259 | if(*value == ent->i) { | 259 | if (*value == ent->i) { |
| 260 | if(ent->str[0] != '\0') | 260 | if (ent->str[0] != '\0') |
| 261 | DPRINTK(PROBE, INFO, "%s\n", ent->str); | 261 | DPRINTK(PROBE, INFO, "%s\n", ent->str); |
| 262 | return 0; | 262 | return 0; |
| 263 | } | 263 | } |
| @@ -291,7 +291,7 @@ void __devinit | |||
| 291 | e1000_check_options(struct e1000_adapter *adapter) | 291 | e1000_check_options(struct e1000_adapter *adapter) |
| 292 | { | 292 | { |
| 293 | int bd = adapter->bd_number; | 293 | int bd = adapter->bd_number; |
| 294 | if(bd >= E1000_MAX_NIC) { | 294 | if (bd >= E1000_MAX_NIC) { |
| 295 | DPRINTK(PROBE, NOTICE, | 295 | DPRINTK(PROBE, NOTICE, |
| 296 | "Warning: no configuration for board #%i\n", bd); | 296 | "Warning: no configuration for board #%i\n", bd); |
| 297 | DPRINTK(PROBE, NOTICE, "Using defaults for all values\n"); | 297 | DPRINTK(PROBE, NOTICE, "Using defaults for all values\n"); |
| @@ -315,7 +315,7 @@ e1000_check_options(struct e1000_adapter *adapter) | |||
| 315 | if (num_TxDescriptors > bd) { | 315 | if (num_TxDescriptors > bd) { |
| 316 | tx_ring->count = TxDescriptors[bd]; | 316 | tx_ring->count = TxDescriptors[bd]; |
| 317 | e1000_validate_option(&tx_ring->count, &opt, adapter); | 317 | e1000_validate_option(&tx_ring->count, &opt, adapter); |
| 318 | E1000_ROUNDUP(tx_ring->count, | 318 | E1000_ROUNDUP(tx_ring->count, |
| 319 | REQ_TX_DESCRIPTOR_MULTIPLE); | 319 | REQ_TX_DESCRIPTOR_MULTIPLE); |
| 320 | } else { | 320 | } else { |
| 321 | tx_ring->count = opt.def; | 321 | tx_ring->count = opt.def; |
| @@ -341,7 +341,7 @@ e1000_check_options(struct e1000_adapter *adapter) | |||
| 341 | if (num_RxDescriptors > bd) { | 341 | if (num_RxDescriptors > bd) { |
| 342 | rx_ring->count = RxDescriptors[bd]; | 342 | rx_ring->count = RxDescriptors[bd]; |
| 343 | e1000_validate_option(&rx_ring->count, &opt, adapter); | 343 | e1000_validate_option(&rx_ring->count, &opt, adapter); |
| 344 | E1000_ROUNDUP(rx_ring->count, | 344 | E1000_ROUNDUP(rx_ring->count, |
| 345 | REQ_RX_DESCRIPTOR_MULTIPLE); | 345 | REQ_RX_DESCRIPTOR_MULTIPLE); |
| 346 | } else { | 346 | } else { |
| 347 | rx_ring->count = opt.def; | 347 | rx_ring->count = opt.def; |
| @@ -403,7 +403,7 @@ e1000_check_options(struct e1000_adapter *adapter) | |||
| 403 | 403 | ||
| 404 | if (num_TxIntDelay > bd) { | 404 | if (num_TxIntDelay > bd) { |
| 405 | adapter->tx_int_delay = TxIntDelay[bd]; | 405 | adapter->tx_int_delay = TxIntDelay[bd]; |
| 406 | e1000_validate_option(&adapter->tx_int_delay, &opt, | 406 | e1000_validate_option(&adapter->tx_int_delay, &opt, |
| 407 | adapter); | 407 | adapter); |
| 408 | } else { | 408 | } else { |
| 409 | adapter->tx_int_delay = opt.def; | 409 | adapter->tx_int_delay = opt.def; |
| @@ -421,7 +421,7 @@ e1000_check_options(struct e1000_adapter *adapter) | |||
| 421 | 421 | ||
| 422 | if (num_TxAbsIntDelay > bd) { | 422 | if (num_TxAbsIntDelay > bd) { |
| 423 | adapter->tx_abs_int_delay = TxAbsIntDelay[bd]; | 423 | adapter->tx_abs_int_delay = TxAbsIntDelay[bd]; |
| 424 | e1000_validate_option(&adapter->tx_abs_int_delay, &opt, | 424 | e1000_validate_option(&adapter->tx_abs_int_delay, &opt, |
| 425 | adapter); | 425 | adapter); |
| 426 | } else { | 426 | } else { |
| 427 | adapter->tx_abs_int_delay = opt.def; | 427 | adapter->tx_abs_int_delay = opt.def; |
| @@ -439,7 +439,7 @@ e1000_check_options(struct e1000_adapter *adapter) | |||
| 439 | 439 | ||
| 440 | if (num_RxIntDelay > bd) { | 440 | if (num_RxIntDelay > bd) { |
| 441 | adapter->rx_int_delay = RxIntDelay[bd]; | 441 | adapter->rx_int_delay = RxIntDelay[bd]; |
| 442 | e1000_validate_option(&adapter->rx_int_delay, &opt, | 442 | e1000_validate_option(&adapter->rx_int_delay, &opt, |
| 443 | adapter); | 443 | adapter); |
| 444 | } else { | 444 | } else { |
| 445 | adapter->rx_int_delay = opt.def; | 445 | adapter->rx_int_delay = opt.def; |
| @@ -457,7 +457,7 @@ e1000_check_options(struct e1000_adapter *adapter) | |||
| 457 | 457 | ||
| 458 | if (num_RxAbsIntDelay > bd) { | 458 | if (num_RxAbsIntDelay > bd) { |
| 459 | adapter->rx_abs_int_delay = RxAbsIntDelay[bd]; | 459 | adapter->rx_abs_int_delay = RxAbsIntDelay[bd]; |
| 460 | e1000_validate_option(&adapter->rx_abs_int_delay, &opt, | 460 | e1000_validate_option(&adapter->rx_abs_int_delay, &opt, |
| 461 | adapter); | 461 | adapter); |
| 462 | } else { | 462 | } else { |
| 463 | adapter->rx_abs_int_delay = opt.def; | 463 | adapter->rx_abs_int_delay = opt.def; |
| @@ -475,17 +475,17 @@ e1000_check_options(struct e1000_adapter *adapter) | |||
| 475 | 475 | ||
| 476 | if (num_InterruptThrottleRate > bd) { | 476 | if (num_InterruptThrottleRate > bd) { |
| 477 | adapter->itr = InterruptThrottleRate[bd]; | 477 | adapter->itr = InterruptThrottleRate[bd]; |
| 478 | switch(adapter->itr) { | 478 | switch (adapter->itr) { |
| 479 | case 0: | 479 | case 0: |
| 480 | DPRINTK(PROBE, INFO, "%s turned off\n", | 480 | DPRINTK(PROBE, INFO, "%s turned off\n", |
| 481 | opt.name); | 481 | opt.name); |
| 482 | break; | 482 | break; |
| 483 | case 1: | 483 | case 1: |
| 484 | DPRINTK(PROBE, INFO, "%s set to dynamic mode\n", | 484 | DPRINTK(PROBE, INFO, "%s set to dynamic mode\n", |
| 485 | opt.name); | 485 | opt.name); |
| 486 | break; | 486 | break; |
| 487 | default: | 487 | default: |
| 488 | e1000_validate_option(&adapter->itr, &opt, | 488 | e1000_validate_option(&adapter->itr, &opt, |
| 489 | adapter); | 489 | adapter); |
| 490 | break; | 490 | break; |
| 491 | } | 491 | } |
| @@ -494,7 +494,7 @@ e1000_check_options(struct e1000_adapter *adapter) | |||
| 494 | } | 494 | } |
| 495 | } | 495 | } |
| 496 | 496 | ||
| 497 | switch(adapter->hw.media_type) { | 497 | switch (adapter->hw.media_type) { |
| 498 | case e1000_media_type_fiber: | 498 | case e1000_media_type_fiber: |
| 499 | case e1000_media_type_internal_serdes: | 499 | case e1000_media_type_internal_serdes: |
| 500 | e1000_check_fiber_options(adapter); | 500 | e1000_check_fiber_options(adapter); |
| @@ -518,17 +518,17 @@ static void __devinit | |||
| 518 | e1000_check_fiber_options(struct e1000_adapter *adapter) | 518 | e1000_check_fiber_options(struct e1000_adapter *adapter) |
| 519 | { | 519 | { |
| 520 | int bd = adapter->bd_number; | 520 | int bd = adapter->bd_number; |
| 521 | if(num_Speed > bd) { | 521 | if (num_Speed > bd) { |
| 522 | DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, " | 522 | DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, " |
| 523 | "parameter ignored\n"); | 523 | "parameter ignored\n"); |
| 524 | } | 524 | } |
| 525 | 525 | ||
| 526 | if(num_Duplex > bd) { | 526 | if (num_Duplex > bd) { |
| 527 | DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, " | 527 | DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, " |
| 528 | "parameter ignored\n"); | 528 | "parameter ignored\n"); |
| 529 | } | 529 | } |
| 530 | 530 | ||
| 531 | if((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) { | 531 | if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) { |
| 532 | DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is " | 532 | DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is " |
| 533 | "not valid for fiber adapters, " | 533 | "not valid for fiber adapters, " |
| 534 | "parameter ignored\n"); | 534 | "parameter ignored\n"); |
| @@ -598,7 +598,7 @@ e1000_check_copper_options(struct e1000_adapter *adapter) | |||
| 598 | } | 598 | } |
| 599 | } | 599 | } |
| 600 | 600 | ||
| 601 | if((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) { | 601 | if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) { |
| 602 | DPRINTK(PROBE, INFO, | 602 | DPRINTK(PROBE, INFO, |
| 603 | "AutoNeg specified along with Speed or Duplex, " | 603 | "AutoNeg specified along with Speed or Duplex, " |
| 604 | "parameter ignored\n"); | 604 | "parameter ignored\n"); |
| @@ -659,7 +659,7 @@ e1000_check_copper_options(struct e1000_adapter *adapter) | |||
| 659 | switch (speed + dplx) { | 659 | switch (speed + dplx) { |
| 660 | case 0: | 660 | case 0: |
| 661 | adapter->hw.autoneg = adapter->fc_autoneg = 1; | 661 | adapter->hw.autoneg = adapter->fc_autoneg = 1; |
| 662 | if((num_Speed > bd) && (speed != 0 || dplx != 0)) | 662 | if ((num_Speed > bd) && (speed != 0 || dplx != 0)) |
| 663 | DPRINTK(PROBE, INFO, | 663 | DPRINTK(PROBE, INFO, |
| 664 | "Speed and duplex autonegotiation enabled\n"); | 664 | "Speed and duplex autonegotiation enabled\n"); |
| 665 | break; | 665 | break; |
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index eb86b059809b..f2d1dafde087 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
| @@ -69,8 +69,8 @@ | |||
| 69 | 69 | ||
| 70 | #define DRV_MODULE_NAME "tg3" | 70 | #define DRV_MODULE_NAME "tg3" |
| 71 | #define PFX DRV_MODULE_NAME ": " | 71 | #define PFX DRV_MODULE_NAME ": " |
| 72 | #define DRV_MODULE_VERSION "3.47" | 72 | #define DRV_MODULE_VERSION "3.48" |
| 73 | #define DRV_MODULE_RELDATE "Dec 28, 2005" | 73 | #define DRV_MODULE_RELDATE "Jan 16, 2006" |
| 74 | 74 | ||
| 75 | #define TG3_DEF_MAC_MODE 0 | 75 | #define TG3_DEF_MAC_MODE 0 |
| 76 | #define TG3_DEF_RX_MODE 0 | 76 | #define TG3_DEF_RX_MODE 0 |
| @@ -1325,10 +1325,12 @@ static int tg3_set_power_state(struct tg3 *tp, int state) | |||
| 1325 | val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1); | 1325 | val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1); |
| 1326 | tw32(0x7d00, val); | 1326 | tw32(0x7d00, val); |
| 1327 | if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) { | 1327 | if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) { |
| 1328 | tg3_nvram_lock(tp); | 1328 | int err; |
| 1329 | |||
| 1330 | err = tg3_nvram_lock(tp); | ||
| 1329 | tg3_halt_cpu(tp, RX_CPU_BASE); | 1331 | tg3_halt_cpu(tp, RX_CPU_BASE); |
| 1330 | tw32_f(NVRAM_SWARB, SWARB_REQ_CLR0); | 1332 | if (!err) |
| 1331 | tg3_nvram_unlock(tp); | 1333 | tg3_nvram_unlock(tp); |
| 1332 | } | 1334 | } |
| 1333 | } | 1335 | } |
| 1334 | 1336 | ||
| @@ -4193,14 +4195,19 @@ static int tg3_nvram_lock(struct tg3 *tp) | |||
| 4193 | if (tp->tg3_flags & TG3_FLAG_NVRAM) { | 4195 | if (tp->tg3_flags & TG3_FLAG_NVRAM) { |
| 4194 | int i; | 4196 | int i; |
| 4195 | 4197 | ||
| 4196 | tw32(NVRAM_SWARB, SWARB_REQ_SET1); | 4198 | if (tp->nvram_lock_cnt == 0) { |
| 4197 | for (i = 0; i < 8000; i++) { | 4199 | tw32(NVRAM_SWARB, SWARB_REQ_SET1); |
| 4198 | if (tr32(NVRAM_SWARB) & SWARB_GNT1) | 4200 | for (i = 0; i < 8000; i++) { |
| 4199 | break; | 4201 | if (tr32(NVRAM_SWARB) & SWARB_GNT1) |
| 4200 | udelay(20); | 4202 | break; |
| 4203 | udelay(20); | ||
| 4204 | } | ||
| 4205 | if (i == 8000) { | ||
| 4206 | tw32(NVRAM_SWARB, SWARB_REQ_CLR1); | ||
| 4207 | return -ENODEV; | ||
| 4208 | } | ||
| 4201 | } | 4209 | } |
| 4202 | if (i == 8000) | 4210 | tp->nvram_lock_cnt++; |
| 4203 | return -ENODEV; | ||
| 4204 | } | 4211 | } |
| 4205 | return 0; | 4212 | return 0; |
| 4206 | } | 4213 | } |
| @@ -4208,8 +4215,12 @@ static int tg3_nvram_lock(struct tg3 *tp) | |||
| 4208 | /* tp->lock is held. */ | 4215 | /* tp->lock is held. */ |
| 4209 | static void tg3_nvram_unlock(struct tg3 *tp) | 4216 | static void tg3_nvram_unlock(struct tg3 *tp) |
| 4210 | { | 4217 | { |
| 4211 | if (tp->tg3_flags & TG3_FLAG_NVRAM) | 4218 | if (tp->tg3_flags & TG3_FLAG_NVRAM) { |
| 4212 | tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1); | 4219 | if (tp->nvram_lock_cnt > 0) |
| 4220 | tp->nvram_lock_cnt--; | ||
| 4221 | if (tp->nvram_lock_cnt == 0) | ||
| 4222 | tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1); | ||
| 4223 | } | ||
| 4213 | } | 4224 | } |
| 4214 | 4225 | ||
| 4215 | /* tp->lock is held. */ | 4226 | /* tp->lock is held. */ |
| @@ -4320,8 +4331,13 @@ static int tg3_chip_reset(struct tg3 *tp) | |||
| 4320 | void (*write_op)(struct tg3 *, u32, u32); | 4331 | void (*write_op)(struct tg3 *, u32, u32); |
| 4321 | int i; | 4332 | int i; |
| 4322 | 4333 | ||
| 4323 | if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) | 4334 | if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) { |
| 4324 | tg3_nvram_lock(tp); | 4335 | tg3_nvram_lock(tp); |
| 4336 | /* No matching tg3_nvram_unlock() after this because | ||
| 4337 | * chip reset below will undo the nvram lock. | ||
| 4338 | */ | ||
| 4339 | tp->nvram_lock_cnt = 0; | ||
| 4340 | } | ||
| 4325 | 4341 | ||
| 4326 | /* | 4342 | /* |
| 4327 | * We must avoid the readl() that normally takes place. | 4343 | * We must avoid the readl() that normally takes place. |
| @@ -4717,6 +4733,10 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset) | |||
| 4717 | (offset == RX_CPU_BASE ? "RX" : "TX")); | 4733 | (offset == RX_CPU_BASE ? "RX" : "TX")); |
| 4718 | return -ENODEV; | 4734 | return -ENODEV; |
| 4719 | } | 4735 | } |
| 4736 | |||
| 4737 | /* Clear firmware's nvram arbitration. */ | ||
| 4738 | if (tp->tg3_flags & TG3_FLAG_NVRAM) | ||
| 4739 | tw32(NVRAM_SWARB, SWARB_REQ_CLR0); | ||
| 4720 | return 0; | 4740 | return 0; |
| 4721 | } | 4741 | } |
| 4722 | 4742 | ||
| @@ -4736,7 +4756,7 @@ struct fw_info { | |||
| 4736 | static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base, | 4756 | static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base, |
| 4737 | int cpu_scratch_size, struct fw_info *info) | 4757 | int cpu_scratch_size, struct fw_info *info) |
| 4738 | { | 4758 | { |
| 4739 | int err, i; | 4759 | int err, lock_err, i; |
| 4740 | void (*write_op)(struct tg3 *, u32, u32); | 4760 | void (*write_op)(struct tg3 *, u32, u32); |
| 4741 | 4761 | ||
| 4742 | if (cpu_base == TX_CPU_BASE && | 4762 | if (cpu_base == TX_CPU_BASE && |
| @@ -4755,9 +4775,10 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_b | |||
| 4755 | /* It is possible that bootcode is still loading at this point. | 4775 | /* It is possible that bootcode is still loading at this point. |
| 4756 | * Get the nvram lock first before halting the cpu. | 4776 | * Get the nvram lock first before halting the cpu. |
| 4757 | */ | 4777 | */ |
| 4758 | tg3_nvram_lock(tp); | 4778 | lock_err = tg3_nvram_lock(tp); |
| 4759 | err = tg3_halt_cpu(tp, cpu_base); | 4779 | err = tg3_halt_cpu(tp, cpu_base); |
| 4760 | tg3_nvram_unlock(tp); | 4780 | if (!lock_err) |
| 4781 | tg3_nvram_unlock(tp); | ||
| 4761 | if (err) | 4782 | if (err) |
| 4762 | goto out; | 4783 | goto out; |
| 4763 | 4784 | ||
| @@ -8182,7 +8203,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, | |||
| 8182 | data[1] = 1; | 8203 | data[1] = 1; |
| 8183 | } | 8204 | } |
| 8184 | if (etest->flags & ETH_TEST_FL_OFFLINE) { | 8205 | if (etest->flags & ETH_TEST_FL_OFFLINE) { |
| 8185 | int irq_sync = 0; | 8206 | int err, irq_sync = 0; |
| 8186 | 8207 | ||
| 8187 | if (netif_running(dev)) { | 8208 | if (netif_running(dev)) { |
| 8188 | tg3_netif_stop(tp); | 8209 | tg3_netif_stop(tp); |
| @@ -8192,11 +8213,12 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, | |||
| 8192 | tg3_full_lock(tp, irq_sync); | 8213 | tg3_full_lock(tp, irq_sync); |
| 8193 | 8214 | ||
| 8194 | tg3_halt(tp, RESET_KIND_SUSPEND, 1); | 8215 | tg3_halt(tp, RESET_KIND_SUSPEND, 1); |
| 8195 | tg3_nvram_lock(tp); | 8216 | err = tg3_nvram_lock(tp); |
| 8196 | tg3_halt_cpu(tp, RX_CPU_BASE); | 8217 | tg3_halt_cpu(tp, RX_CPU_BASE); |
| 8197 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) | 8218 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) |
| 8198 | tg3_halt_cpu(tp, TX_CPU_BASE); | 8219 | tg3_halt_cpu(tp, TX_CPU_BASE); |
| 8199 | tg3_nvram_unlock(tp); | 8220 | if (!err) |
| 8221 | tg3_nvram_unlock(tp); | ||
| 8200 | 8222 | ||
| 8201 | if (tg3_test_registers(tp) != 0) { | 8223 | if (tg3_test_registers(tp) != 0) { |
| 8202 | etest->flags |= ETH_TEST_FL_FAILED; | 8224 | etest->flags |= ETH_TEST_FL_FAILED; |
| @@ -8588,7 +8610,11 @@ static void __devinit tg3_nvram_init(struct tg3 *tp) | |||
| 8588 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) { | 8610 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) { |
| 8589 | tp->tg3_flags |= TG3_FLAG_NVRAM; | 8611 | tp->tg3_flags |= TG3_FLAG_NVRAM; |
| 8590 | 8612 | ||
| 8591 | tg3_nvram_lock(tp); | 8613 | if (tg3_nvram_lock(tp)) { |
| 8614 | printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, " | ||
| 8615 | "tg3_nvram_init failed.\n", tp->dev->name); | ||
| 8616 | return; | ||
| 8617 | } | ||
| 8592 | tg3_enable_nvram_access(tp); | 8618 | tg3_enable_nvram_access(tp); |
| 8593 | 8619 | ||
| 8594 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) | 8620 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) |
| @@ -8686,7 +8712,9 @@ static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val) | |||
| 8686 | if (offset > NVRAM_ADDR_MSK) | 8712 | if (offset > NVRAM_ADDR_MSK) |
| 8687 | return -EINVAL; | 8713 | return -EINVAL; |
| 8688 | 8714 | ||
| 8689 | tg3_nvram_lock(tp); | 8715 | ret = tg3_nvram_lock(tp); |
| 8716 | if (ret) | ||
| 8717 | return ret; | ||
| 8690 | 8718 | ||
| 8691 | tg3_enable_nvram_access(tp); | 8719 | tg3_enable_nvram_access(tp); |
| 8692 | 8720 | ||
| @@ -8785,10 +8813,6 @@ static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len, | |||
| 8785 | 8813 | ||
| 8786 | offset = offset + (pagesize - page_off); | 8814 | offset = offset + (pagesize - page_off); |
| 8787 | 8815 | ||
| 8788 | /* Nvram lock released by tg3_nvram_read() above, | ||
| 8789 | * so need to get it again. | ||
| 8790 | */ | ||
| 8791 | tg3_nvram_lock(tp); | ||
| 8792 | tg3_enable_nvram_access(tp); | 8816 | tg3_enable_nvram_access(tp); |
| 8793 | 8817 | ||
| 8794 | /* | 8818 | /* |
| @@ -8925,7 +8949,9 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf) | |||
| 8925 | else { | 8949 | else { |
| 8926 | u32 grc_mode; | 8950 | u32 grc_mode; |
| 8927 | 8951 | ||
| 8928 | tg3_nvram_lock(tp); | 8952 | ret = tg3_nvram_lock(tp); |
| 8953 | if (ret) | ||
| 8954 | return ret; | ||
| 8929 | 8955 | ||
| 8930 | tg3_enable_nvram_access(tp); | 8956 | tg3_enable_nvram_access(tp); |
| 8931 | if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && | 8957 | if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && |
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h index 890e1635996b..e8243305f0e8 100644 --- a/drivers/net/tg3.h +++ b/drivers/net/tg3.h | |||
| @@ -2275,6 +2275,7 @@ struct tg3 { | |||
| 2275 | dma_addr_t stats_mapping; | 2275 | dma_addr_t stats_mapping; |
| 2276 | struct work_struct reset_task; | 2276 | struct work_struct reset_task; |
| 2277 | 2277 | ||
| 2278 | int nvram_lock_cnt; | ||
| 2278 | u32 nvram_size; | 2279 | u32 nvram_size; |
| 2279 | u32 nvram_pagesize; | 2280 | u32 nvram_pagesize; |
| 2280 | u32 nvram_jedecnum; | 2281 | u32 nvram_jedecnum; |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 605f0df0bfba..dda6099903c1 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
| @@ -1142,6 +1142,9 @@ static void __devinit quirk_intel_ide_combined(struct pci_dev *pdev) | |||
| 1142 | case 0x27c4: | 1142 | case 0x27c4: |
| 1143 | ich = 7; | 1143 | ich = 7; |
| 1144 | break; | 1144 | break; |
| 1145 | case 0x2828: /* ICH8M */ | ||
| 1146 | ich = 8; | ||
| 1147 | break; | ||
| 1145 | default: | 1148 | default: |
| 1146 | /* we do not handle this PCI device */ | 1149 | /* we do not handle this PCI device */ |
| 1147 | return; | 1150 | return; |
| @@ -1161,7 +1164,7 @@ static void __devinit quirk_intel_ide_combined(struct pci_dev *pdev) | |||
| 1161 | else | 1164 | else |
| 1162 | return; /* not in combined mode */ | 1165 | return; /* not in combined mode */ |
| 1163 | } else { | 1166 | } else { |
| 1164 | WARN_ON((ich != 6) && (ich != 7)); | 1167 | WARN_ON((ich != 6) && (ich != 7) && (ich != 8)); |
| 1165 | tmp &= 0x3; /* interesting bits 1:0 */ | 1168 | tmp &= 0x3; /* interesting bits 1:0 */ |
| 1166 | if (tmp & (1 << 0)) | 1169 | if (tmp & (1 << 0)) |
| 1167 | comb = (1 << 2); /* PATA port 0, SATA port 1 */ | 1170 | comb = (1 << 2); /* PATA port 0, SATA port 1 */ |
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c index d113290b5fc0..19bd346951dd 100644 --- a/drivers/scsi/ahci.c +++ b/drivers/scsi/ahci.c | |||
| @@ -276,6 +276,16 @@ static const struct pci_device_id ahci_pci_tbl[] = { | |||
| 276 | board_ahci }, /* ESB2 */ | 276 | board_ahci }, /* ESB2 */ |
| 277 | { PCI_VENDOR_ID_INTEL, 0x27c6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 277 | { PCI_VENDOR_ID_INTEL, 0x27c6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
| 278 | board_ahci }, /* ICH7-M DH */ | 278 | board_ahci }, /* ICH7-M DH */ |
| 279 | { PCI_VENDOR_ID_INTEL, 0x2821, PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
| 280 | board_ahci }, /* ICH8 */ | ||
| 281 | { PCI_VENDOR_ID_INTEL, 0x2822, PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
| 282 | board_ahci }, /* ICH8 */ | ||
| 283 | { PCI_VENDOR_ID_INTEL, 0x2824, PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
| 284 | board_ahci }, /* ICH8 */ | ||
| 285 | { PCI_VENDOR_ID_INTEL, 0x2829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
| 286 | board_ahci }, /* ICH8M */ | ||
| 287 | { PCI_VENDOR_ID_INTEL, 0x282a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
| 288 | board_ahci }, /* ICH8M */ | ||
| 279 | { } /* terminate list */ | 289 | { } /* terminate list */ |
| 280 | }; | 290 | }; |
| 281 | 291 | ||
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c index 557788ec4eec..fc3ca051ceed 100644 --- a/drivers/scsi/ata_piix.c +++ b/drivers/scsi/ata_piix.c | |||
| @@ -157,6 +157,9 @@ static const struct pci_device_id piix_pci_tbl[] = { | |||
| 157 | { 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, | 157 | { 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, |
| 158 | { 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, | 158 | { 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, |
| 159 | { 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, | 159 | { 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, |
| 160 | { 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, | ||
| 161 | { 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, | ||
| 162 | { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, | ||
| 160 | 163 | ||
| 161 | { } /* terminate list */ | 164 | { } /* terminate list */ |
| 162 | }; | 165 | }; |
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c index 99bae8369ab2..46c4cdbaee86 100644 --- a/drivers/scsi/libata-core.c +++ b/drivers/scsi/libata-core.c | |||
| @@ -611,6 +611,10 @@ int ata_rwcmd_protocol(struct ata_queued_cmd *qc) | |||
| 611 | if (dev->flags & ATA_DFLAG_PIO) { | 611 | if (dev->flags & ATA_DFLAG_PIO) { |
| 612 | tf->protocol = ATA_PROT_PIO; | 612 | tf->protocol = ATA_PROT_PIO; |
| 613 | index = dev->multi_count ? 0 : 8; | 613 | index = dev->multi_count ? 0 : 8; |
| 614 | } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) { | ||
| 615 | /* Unable to use DMA due to host limitation */ | ||
| 616 | tf->protocol = ATA_PROT_PIO; | ||
| 617 | index = dev->multi_count ? 0 : 4; | ||
| 614 | } else { | 618 | } else { |
| 615 | tf->protocol = ATA_PROT_DMA; | 619 | tf->protocol = ATA_PROT_DMA; |
| 616 | index = 16; | 620 | index = 16; |
| @@ -1051,18 +1055,22 @@ static unsigned int ata_pio_modes(const struct ata_device *adev) | |||
| 1051 | { | 1055 | { |
| 1052 | u16 modes; | 1056 | u16 modes; |
| 1053 | 1057 | ||
| 1054 | /* Usual case. Word 53 indicates word 88 is valid */ | 1058 | /* Usual case. Word 53 indicates word 64 is valid */ |
| 1055 | if (adev->id[ATA_ID_FIELD_VALID] & (1 << 2)) { | 1059 | if (adev->id[ATA_ID_FIELD_VALID] & (1 << 1)) { |
| 1056 | modes = adev->id[ATA_ID_PIO_MODES] & 0x03; | 1060 | modes = adev->id[ATA_ID_PIO_MODES] & 0x03; |
| 1057 | modes <<= 3; | 1061 | modes <<= 3; |
| 1058 | modes |= 0x7; | 1062 | modes |= 0x7; |
| 1059 | return modes; | 1063 | return modes; |
| 1060 | } | 1064 | } |
| 1061 | 1065 | ||
| 1062 | /* If word 88 isn't valid then Word 51 holds the PIO timing number | 1066 | /* If word 64 isn't valid then Word 51 high byte holds the PIO timing |
| 1063 | for the maximum. Turn it into a mask and return it */ | 1067 | number for the maximum. Turn it into a mask and return it */ |
| 1064 | modes = (2 << (adev->id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ; | 1068 | modes = (2 << ((adev->id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF)) - 1 ; |
| 1065 | return modes; | 1069 | return modes; |
| 1070 | /* But wait.. there's more. Design your standards by committee and | ||
| 1071 | you too can get a free iordy field to process. However its the | ||
| 1072 | speeds not the modes that are supported... Note drivers using the | ||
| 1073 | timing API will get this right anyway */ | ||
| 1066 | } | 1074 | } |
| 1067 | 1075 | ||
| 1068 | struct ata_exec_internal_arg { | 1076 | struct ata_exec_internal_arg { |
| @@ -1165,6 +1173,39 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev, | |||
| 1165 | } | 1173 | } |
| 1166 | 1174 | ||
| 1167 | /** | 1175 | /** |
| 1176 | * ata_pio_need_iordy - check if iordy needed | ||
| 1177 | * @adev: ATA device | ||
| 1178 | * | ||
| 1179 | * Check if the current speed of the device requires IORDY. Used | ||
| 1180 | * by various controllers for chip configuration. | ||
| 1181 | */ | ||
| 1182 | |||
| 1183 | unsigned int ata_pio_need_iordy(const struct ata_device *adev) | ||
| 1184 | { | ||
| 1185 | int pio; | ||
| 1186 | int speed = adev->pio_mode - XFER_PIO_0; | ||
| 1187 | |||
| 1188 | if (speed < 2) | ||
| 1189 | return 0; | ||
| 1190 | if (speed > 2) | ||
| 1191 | return 1; | ||
| 1192 | |||
| 1193 | /* If we have no drive specific rule, then PIO 2 is non IORDY */ | ||
| 1194 | |||
| 1195 | if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */ | ||
| 1196 | pio = adev->id[ATA_ID_EIDE_PIO]; | ||
| 1197 | /* Is the speed faster than the drive allows non IORDY ? */ | ||
| 1198 | if (pio) { | ||
| 1199 | /* This is cycle times not frequency - watch the logic! */ | ||
| 1200 | if (pio > 240) /* PIO2 is 240nS per cycle */ | ||
| 1201 | return 1; | ||
| 1202 | return 0; | ||
| 1203 | } | ||
| 1204 | } | ||
| 1205 | return 0; | ||
| 1206 | } | ||
| 1207 | |||
| 1208 | /** | ||
| 1168 | * ata_dev_identify - obtain IDENTIFY x DEVICE page | 1209 | * ata_dev_identify - obtain IDENTIFY x DEVICE page |
| 1169 | * @ap: port on which device we wish to probe resides | 1210 | * @ap: port on which device we wish to probe resides |
| 1170 | * @device: device bus address, starting at zero | 1211 | * @device: device bus address, starting at zero |
| @@ -1415,7 +1456,7 @@ void ata_dev_config(struct ata_port *ap, unsigned int i) | |||
| 1415 | ap->udma_mask &= ATA_UDMA5; | 1456 | ap->udma_mask &= ATA_UDMA5; |
| 1416 | ap->host->max_sectors = ATA_MAX_SECTORS; | 1457 | ap->host->max_sectors = ATA_MAX_SECTORS; |
| 1417 | ap->host->hostt->max_sectors = ATA_MAX_SECTORS; | 1458 | ap->host->hostt->max_sectors = ATA_MAX_SECTORS; |
| 1418 | ap->device->flags |= ATA_DFLAG_LOCK_SECTORS; | 1459 | ap->device[i].flags |= ATA_DFLAG_LOCK_SECTORS; |
| 1419 | } | 1460 | } |
| 1420 | 1461 | ||
| 1421 | if (ap->ops->dev_config) | 1462 | if (ap->ops->dev_config) |
| @@ -3056,10 +3097,21 @@ static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf, | |||
| 3056 | static void ata_data_xfer(struct ata_port *ap, unsigned char *buf, | 3097 | static void ata_data_xfer(struct ata_port *ap, unsigned char *buf, |
| 3057 | unsigned int buflen, int do_write) | 3098 | unsigned int buflen, int do_write) |
| 3058 | { | 3099 | { |
| 3059 | if (ap->flags & ATA_FLAG_MMIO) | 3100 | /* Make the crap hardware pay the costs not the good stuff */ |
| 3060 | ata_mmio_data_xfer(ap, buf, buflen, do_write); | 3101 | if (unlikely(ap->flags & ATA_FLAG_IRQ_MASK)) { |
| 3061 | else | 3102 | unsigned long flags; |
| 3062 | ata_pio_data_xfer(ap, buf, buflen, do_write); | 3103 | local_irq_save(flags); |
| 3104 | if (ap->flags & ATA_FLAG_MMIO) | ||
| 3105 | ata_mmio_data_xfer(ap, buf, buflen, do_write); | ||
| 3106 | else | ||
| 3107 | ata_pio_data_xfer(ap, buf, buflen, do_write); | ||
| 3108 | local_irq_restore(flags); | ||
| 3109 | } else { | ||
| 3110 | if (ap->flags & ATA_FLAG_MMIO) | ||
| 3111 | ata_mmio_data_xfer(ap, buf, buflen, do_write); | ||
| 3112 | else | ||
| 3113 | ata_pio_data_xfer(ap, buf, buflen, do_write); | ||
| 3114 | } | ||
| 3063 | } | 3115 | } |
| 3064 | 3116 | ||
| 3065 | /** | 3117 | /** |
| @@ -5122,6 +5174,7 @@ EXPORT_SYMBOL_GPL(ata_dev_id_string); | |||
| 5122 | EXPORT_SYMBOL_GPL(ata_dev_config); | 5174 | EXPORT_SYMBOL_GPL(ata_dev_config); |
| 5123 | EXPORT_SYMBOL_GPL(ata_scsi_simulate); | 5175 | EXPORT_SYMBOL_GPL(ata_scsi_simulate); |
| 5124 | 5176 | ||
| 5177 | EXPORT_SYMBOL_GPL(ata_pio_need_iordy); | ||
| 5125 | EXPORT_SYMBOL_GPL(ata_timing_compute); | 5178 | EXPORT_SYMBOL_GPL(ata_timing_compute); |
| 5126 | EXPORT_SYMBOL_GPL(ata_timing_merge); | 5179 | EXPORT_SYMBOL_GPL(ata_timing_merge); |
| 5127 | 5180 | ||
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c index 3d1ea09a06a1..b0b0a69b3563 100644 --- a/drivers/scsi/sata_promise.c +++ b/drivers/scsi/sata_promise.c | |||
| @@ -66,6 +66,7 @@ enum { | |||
| 66 | board_2037x = 0, /* FastTrak S150 TX2plus */ | 66 | board_2037x = 0, /* FastTrak S150 TX2plus */ |
| 67 | board_20319 = 1, /* FastTrak S150 TX4 */ | 67 | board_20319 = 1, /* FastTrak S150 TX4 */ |
| 68 | board_20619 = 2, /* FastTrak TX4000 */ | 68 | board_20619 = 2, /* FastTrak TX4000 */ |
| 69 | board_20771 = 3, /* FastTrak TX2300 */ | ||
| 69 | 70 | ||
| 70 | PDC_HAS_PATA = (1 << 1), /* PDC20375 has PATA */ | 71 | PDC_HAS_PATA = (1 << 1), /* PDC20375 has PATA */ |
| 71 | 72 | ||
| @@ -190,6 +191,16 @@ static const struct ata_port_info pdc_port_info[] = { | |||
| 190 | .udma_mask = 0x7f, /* udma0-6 ; FIXME */ | 191 | .udma_mask = 0x7f, /* udma0-6 ; FIXME */ |
| 191 | .port_ops = &pdc_pata_ops, | 192 | .port_ops = &pdc_pata_ops, |
| 192 | }, | 193 | }, |
| 194 | |||
| 195 | /* board_20771 */ | ||
| 196 | { | ||
| 197 | .sht = &pdc_ata_sht, | ||
| 198 | .host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA, | ||
| 199 | .pio_mask = 0x1f, /* pio0-4 */ | ||
| 200 | .mwdma_mask = 0x07, /* mwdma0-2 */ | ||
| 201 | .udma_mask = 0x7f, /* udma0-6 ; FIXME */ | ||
| 202 | .port_ops = &pdc_sata_ops, | ||
| 203 | }, | ||
| 193 | }; | 204 | }; |
| 194 | 205 | ||
| 195 | static const struct pci_device_id pdc_ata_pci_tbl[] = { | 206 | static const struct pci_device_id pdc_ata_pci_tbl[] = { |
| @@ -226,6 +237,8 @@ static const struct pci_device_id pdc_ata_pci_tbl[] = { | |||
| 226 | { PCI_VENDOR_ID_PROMISE, 0x6629, PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 237 | { PCI_VENDOR_ID_PROMISE, 0x6629, PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
| 227 | board_20619 }, | 238 | board_20619 }, |
| 228 | 239 | ||
| 240 | { PCI_VENDOR_ID_PROMISE, 0x3570, PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
| 241 | board_20771 }, | ||
| 229 | { } /* terminate list */ | 242 | { } /* terminate list */ |
| 230 | }; | 243 | }; |
| 231 | 244 | ||
| @@ -706,6 +719,9 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e | |||
| 706 | case board_2037x: | 719 | case board_2037x: |
| 707 | probe_ent->n_ports = 2; | 720 | probe_ent->n_ports = 2; |
| 708 | break; | 721 | break; |
| 722 | case board_20771: | ||
| 723 | probe_ent->n_ports = 2; | ||
| 724 | break; | ||
| 709 | case board_20619: | 725 | case board_20619: |
| 710 | probe_ent->n_ports = 4; | 726 | probe_ent->n_ports = 4; |
| 711 | 727 | ||
diff --git a/drivers/scsi/sata_svw.c b/drivers/scsi/sata_svw.c index 668373590aa4..d8472563fde8 100644 --- a/drivers/scsi/sata_svw.c +++ b/drivers/scsi/sata_svw.c | |||
| @@ -470,6 +470,7 @@ static const struct pci_device_id k2_sata_pci_tbl[] = { | |||
| 470 | { 0x1166, 0x0241, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 }, | 470 | { 0x1166, 0x0241, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 }, |
| 471 | { 0x1166, 0x0242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 }, | 471 | { 0x1166, 0x0242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 }, |
| 472 | { 0x1166, 0x024a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 }, | 472 | { 0x1166, 0x024a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 }, |
| 473 | { 0x1166, 0x024b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 }, | ||
| 473 | { } | 474 | { } |
| 474 | }; | 475 | }; |
| 475 | 476 | ||
diff --git a/drivers/video/sbuslib.c b/drivers/video/sbuslib.c index 55e6e2d60d3a..a4d7cc51ce0b 100644 --- a/drivers/video/sbuslib.c +++ b/drivers/video/sbuslib.c | |||
| @@ -199,8 +199,7 @@ struct fbcmap32 { | |||
| 199 | #define FBIOPUTCMAP32 _IOW('F', 3, struct fbcmap32) | 199 | #define FBIOPUTCMAP32 _IOW('F', 3, struct fbcmap32) |
| 200 | #define FBIOGETCMAP32 _IOW('F', 4, struct fbcmap32) | 200 | #define FBIOGETCMAP32 _IOW('F', 4, struct fbcmap32) |
| 201 | 201 | ||
| 202 | static int fbiogetputcmap(struct file *file, struct fb_info *info, | 202 | static int fbiogetputcmap(struct fb_info *info, unsigned int cmd, unsigned long arg) |
| 203 | unsigned int cmd, unsigned long arg) | ||
| 204 | { | 203 | { |
| 205 | struct fbcmap32 __user *argp = (void __user *)arg; | 204 | struct fbcmap32 __user *argp = (void __user *)arg; |
| 206 | struct fbcmap __user *p = compat_alloc_user_space(sizeof(*p)); | 205 | struct fbcmap __user *p = compat_alloc_user_space(sizeof(*p)); |
| @@ -236,8 +235,7 @@ struct fbcursor32 { | |||
| 236 | #define FBIOSCURSOR32 _IOW('F', 24, struct fbcursor32) | 235 | #define FBIOSCURSOR32 _IOW('F', 24, struct fbcursor32) |
| 237 | #define FBIOGCURSOR32 _IOW('F', 25, struct fbcursor32) | 236 | #define FBIOGCURSOR32 _IOW('F', 25, struct fbcursor32) |
| 238 | 237 | ||
| 239 | static int fbiogscursor(struct file *file, struct fb_info *info, | 238 | static int fbiogscursor(struct fb_info *info, unsigned long arg) |
| 240 | unsigned long arg) | ||
| 241 | { | 239 | { |
| 242 | struct fbcursor __user *p = compat_alloc_user_space(sizeof(*p)); | 240 | struct fbcursor __user *p = compat_alloc_user_space(sizeof(*p)); |
| 243 | struct fbcursor32 __user *argp = (void __user *)arg; | 241 | struct fbcursor32 __user *argp = (void __user *)arg; |
| @@ -263,8 +261,7 @@ static int fbiogscursor(struct file *file, struct fb_info *info, | |||
| 263 | return info->fbops->fb_ioctl(info, FBIOSCURSOR, (unsigned long)p); | 261 | return info->fbops->fb_ioctl(info, FBIOSCURSOR, (unsigned long)p); |
| 264 | } | 262 | } |
| 265 | 263 | ||
| 266 | long sbusfb_compat_ioctl(struct fb_info *info, unsigned int cmd, | 264 | int sbusfb_compat_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) |
| 267 | unsigned long arg) | ||
| 268 | { | 265 | { |
| 269 | switch (cmd) { | 266 | switch (cmd) { |
| 270 | case FBIOGTYPE: | 267 | case FBIOGTYPE: |
diff --git a/drivers/video/sbuslib.h b/drivers/video/sbuslib.h index f753939013ed..492828c3fe8f 100644 --- a/drivers/video/sbuslib.h +++ b/drivers/video/sbuslib.h | |||
| @@ -20,7 +20,7 @@ extern int sbusfb_mmap_helper(struct sbus_mmap_map *map, | |||
| 20 | int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg, | 20 | int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg, |
| 21 | struct fb_info *info, | 21 | struct fb_info *info, |
| 22 | int type, int fb_depth, unsigned long fb_size); | 22 | int type, int fb_depth, unsigned long fb_size); |
| 23 | long sbusfb_compat_ioctl(struct fb_info *info, unsigned int cmd, | 23 | int sbusfb_compat_ioctl(struct fb_info *info, unsigned int cmd, |
| 24 | unsigned long arg); | 24 | unsigned long arg); |
| 25 | 25 | ||
| 26 | #endif /* _SBUSLIB_H */ | 26 | #endif /* _SBUSLIB_H */ |
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index d1db8c17a74e..120626789406 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c | |||
| @@ -336,24 +336,47 @@ static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh) | |||
| 336 | } | 336 | } |
| 337 | 337 | ||
| 338 | /* | 338 | /* |
| 339 | * Submit all of the bios for all of the ioends we have saved up, | 339 | * Submit all of the bios for all of the ioends we have saved up, covering the |
| 340 | * covering the initial writepage page and also any probed pages. | 340 | * initial writepage page and also any probed pages. |
| 341 | * | ||
| 342 | * Because we may have multiple ioends spanning a page, we need to start | ||
| 343 | * writeback on all the buffers before we submit them for I/O. If we mark the | ||
| 344 | * buffers as we got, then we can end up with a page that only has buffers | ||
| 345 | * marked async write and I/O complete on can occur before we mark the other | ||
| 346 | * buffers async write. | ||
| 347 | * | ||
| 348 | * The end result of this is that we trip a bug in end_page_writeback() because | ||
| 349 | * we call it twice for the one page as the code in end_buffer_async_write() | ||
| 350 | * assumes that all buffers on the page are started at the same time. | ||
| 351 | * | ||
| 352 | * The fix is two passes across the ioend list - one to start writeback on the | ||
| 353 | * bufferheads, and then the second one submit them for I/O. | ||
| 341 | */ | 354 | */ |
| 342 | STATIC void | 355 | STATIC void |
| 343 | xfs_submit_ioend( | 356 | xfs_submit_ioend( |
| 344 | xfs_ioend_t *ioend) | 357 | xfs_ioend_t *ioend) |
| 345 | { | 358 | { |
| 359 | xfs_ioend_t *head = ioend; | ||
| 346 | xfs_ioend_t *next; | 360 | xfs_ioend_t *next; |
| 347 | struct buffer_head *bh; | 361 | struct buffer_head *bh; |
| 348 | struct bio *bio; | 362 | struct bio *bio; |
| 349 | sector_t lastblock = 0; | 363 | sector_t lastblock = 0; |
| 350 | 364 | ||
| 365 | /* Pass 1 - start writeback */ | ||
| 366 | do { | ||
| 367 | next = ioend->io_list; | ||
| 368 | for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) { | ||
| 369 | xfs_start_buffer_writeback(bh); | ||
| 370 | } | ||
| 371 | } while ((ioend = next) != NULL); | ||
| 372 | |||
| 373 | /* Pass 2 - submit I/O */ | ||
| 374 | ioend = head; | ||
| 351 | do { | 375 | do { |
| 352 | next = ioend->io_list; | 376 | next = ioend->io_list; |
| 353 | bio = NULL; | 377 | bio = NULL; |
| 354 | 378 | ||
| 355 | for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) { | 379 | for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) { |
| 356 | xfs_start_buffer_writeback(bh); | ||
| 357 | 380 | ||
| 358 | if (!bio) { | 381 | if (!bio) { |
| 359 | retry: | 382 | retry: |
diff --git a/include/asm-powerpc/lppaca.h b/include/asm-powerpc/lppaca.h index cd9f11f1ef14..4dc514aabfe7 100644 --- a/include/asm-powerpc/lppaca.h +++ b/include/asm-powerpc/lppaca.h | |||
| @@ -31,7 +31,7 @@ | |||
| 31 | 31 | ||
| 32 | /* The Hypervisor barfs if the lppaca crosses a page boundary. A 1k | 32 | /* The Hypervisor barfs if the lppaca crosses a page boundary. A 1k |
| 33 | * alignment is sufficient to prevent this */ | 33 | * alignment is sufficient to prevent this */ |
| 34 | struct __attribute__((__aligned__(0x400))) lppaca { | 34 | struct lppaca { |
| 35 | //============================================================================= | 35 | //============================================================================= |
| 36 | // CACHE_LINE_1 0x0000 - 0x007F Contains read-only data | 36 | // CACHE_LINE_1 0x0000 - 0x007F Contains read-only data |
| 37 | // NOTE: The xDynXyz fields are fields that will be dynamically changed by | 37 | // NOTE: The xDynXyz fields are fields that will be dynamically changed by |
| @@ -129,7 +129,7 @@ struct __attribute__((__aligned__(0x400))) lppaca { | |||
| 129 | // CACHE_LINE_4-5 0x0100 - 0x01FF Contains PMC interrupt data | 129 | // CACHE_LINE_4-5 0x0100 - 0x01FF Contains PMC interrupt data |
| 130 | //============================================================================= | 130 | //============================================================================= |
| 131 | u8 pmc_save_area[256]; // PMC interrupt Area x00-xFF | 131 | u8 pmc_save_area[256]; // PMC interrupt Area x00-xFF |
| 132 | }; | 132 | } __attribute__((__aligned__(0x400))); |
| 133 | 133 | ||
| 134 | extern struct lppaca lppaca[]; | 134 | extern struct lppaca lppaca[]; |
| 135 | 135 | ||
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 323924edb26a..a5363324cf95 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
| @@ -228,6 +228,7 @@ extern void dump_stack(void); | |||
| 228 | ntohs((addr).s6_addr16[6]), \ | 228 | ntohs((addr).s6_addr16[6]), \ |
| 229 | ntohs((addr).s6_addr16[7]) | 229 | ntohs((addr).s6_addr16[7]) |
| 230 | #define NIP6_FMT "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x" | 230 | #define NIP6_FMT "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x" |
| 231 | #define NIP6_SEQFMT "%04x%04x%04x%04x%04x%04x%04x%04x" | ||
| 231 | 232 | ||
| 232 | #if defined(__LITTLE_ENDIAN) | 233 | #if defined(__LITTLE_ENDIAN) |
| 233 | #define HIPQUAD(addr) \ | 234 | #define HIPQUAD(addr) \ |
diff --git a/include/linux/libata.h b/include/linux/libata.h index a43c95f8f968..9e5db2949c58 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
| @@ -126,16 +126,19 @@ enum { | |||
| 126 | 126 | ||
| 127 | ATA_FLAG_SUSPENDED = (1 << 12), /* port is suspended */ | 127 | ATA_FLAG_SUSPENDED = (1 << 12), /* port is suspended */ |
| 128 | 128 | ||
| 129 | ATA_FLAG_PIO_LBA48 = (1 << 13), /* Host DMA engine is LBA28 only */ | ||
| 130 | ATA_FLAG_IRQ_MASK = (1 << 14), /* Mask IRQ in PIO xfers */ | ||
| 131 | |||
| 129 | ATA_QCFLAG_ACTIVE = (1 << 1), /* cmd not yet ack'd to scsi lyer */ | 132 | ATA_QCFLAG_ACTIVE = (1 << 1), /* cmd not yet ack'd to scsi lyer */ |
| 130 | ATA_QCFLAG_SG = (1 << 3), /* have s/g table? */ | 133 | ATA_QCFLAG_SG = (1 << 3), /* have s/g table? */ |
| 131 | ATA_QCFLAG_SINGLE = (1 << 4), /* no s/g, just a single buffer */ | 134 | ATA_QCFLAG_SINGLE = (1 << 4), /* no s/g, just a single buffer */ |
| 132 | ATA_QCFLAG_DMAMAP = ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE, | 135 | ATA_QCFLAG_DMAMAP = ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE, |
| 133 | 136 | ||
| 134 | /* various lengths of time */ | 137 | /* various lengths of time */ |
| 135 | ATA_TMOUT_EDD = 5 * HZ, /* hueristic */ | 138 | ATA_TMOUT_EDD = 5 * HZ, /* heuristic */ |
| 136 | ATA_TMOUT_PIO = 30 * HZ, | 139 | ATA_TMOUT_PIO = 30 * HZ, |
| 137 | ATA_TMOUT_BOOT = 30 * HZ, /* hueristic */ | 140 | ATA_TMOUT_BOOT = 30 * HZ, /* heuristic */ |
| 138 | ATA_TMOUT_BOOT_QUICK = 7 * HZ, /* hueristic */ | 141 | ATA_TMOUT_BOOT_QUICK = 7 * HZ, /* heuristic */ |
| 139 | ATA_TMOUT_CDB = 30 * HZ, | 142 | ATA_TMOUT_CDB = 30 * HZ, |
| 140 | ATA_TMOUT_CDB_QUICK = 5 * HZ, | 143 | ATA_TMOUT_CDB_QUICK = 5 * HZ, |
| 141 | ATA_TMOUT_INTERNAL = 30 * HZ, | 144 | ATA_TMOUT_INTERNAL = 30 * HZ, |
| @@ -499,6 +502,8 @@ extern int ata_scsi_slave_config(struct scsi_device *sdev); | |||
| 499 | /* | 502 | /* |
| 500 | * Timing helpers | 503 | * Timing helpers |
| 501 | */ | 504 | */ |
| 505 | |||
| 506 | extern unsigned int ata_pio_need_iordy(const struct ata_device *); | ||
| 502 | extern int ata_timing_compute(struct ata_device *, unsigned short, | 507 | extern int ata_timing_compute(struct ata_device *, unsigned short, |
| 503 | struct ata_timing *, int, int); | 508 | struct ata_timing *, int, int); |
| 504 | extern void ata_timing_merge(const struct ata_timing *, | 509 | extern void ata_timing_merge(const struct ata_timing *, |
diff --git a/include/linux/netfilter_ipv6/ip6t_ah.h b/include/linux/netfilter_ipv6/ip6t_ah.h index c4f0793a0a98..8531879eb464 100644 --- a/include/linux/netfilter_ipv6/ip6t_ah.h +++ b/include/linux/netfilter_ipv6/ip6t_ah.h | |||
| @@ -18,13 +18,4 @@ struct ip6t_ah | |||
| 18 | #define IP6T_AH_INV_LEN 0x02 /* Invert the sense of length. */ | 18 | #define IP6T_AH_INV_LEN 0x02 /* Invert the sense of length. */ |
| 19 | #define IP6T_AH_INV_MASK 0x03 /* All possible flags. */ | 19 | #define IP6T_AH_INV_MASK 0x03 /* All possible flags. */ |
| 20 | 20 | ||
| 21 | #define MASK_HOPOPTS 128 | ||
| 22 | #define MASK_DSTOPTS 64 | ||
| 23 | #define MASK_ROUTING 32 | ||
| 24 | #define MASK_FRAGMENT 16 | ||
| 25 | #define MASK_AH 8 | ||
| 26 | #define MASK_ESP 4 | ||
| 27 | #define MASK_NONE 2 | ||
| 28 | #define MASK_PROTO 1 | ||
| 29 | |||
| 30 | #endif /*_IP6T_AH_H*/ | 21 | #endif /*_IP6T_AH_H*/ |
diff --git a/include/linux/netfilter_ipv6/ip6t_esp.h b/include/linux/netfilter_ipv6/ip6t_esp.h index 01142b98a231..a91b6abc8079 100644 --- a/include/linux/netfilter_ipv6/ip6t_esp.h +++ b/include/linux/netfilter_ipv6/ip6t_esp.h | |||
| @@ -7,15 +7,6 @@ struct ip6t_esp | |||
| 7 | u_int8_t invflags; /* Inverse flags */ | 7 | u_int8_t invflags; /* Inverse flags */ |
| 8 | }; | 8 | }; |
| 9 | 9 | ||
| 10 | #define MASK_HOPOPTS 128 | ||
| 11 | #define MASK_DSTOPTS 64 | ||
| 12 | #define MASK_ROUTING 32 | ||
| 13 | #define MASK_FRAGMENT 16 | ||
| 14 | #define MASK_AH 8 | ||
| 15 | #define MASK_ESP 4 | ||
| 16 | #define MASK_NONE 2 | ||
| 17 | #define MASK_PROTO 1 | ||
| 18 | |||
| 19 | /* Values for "invflags" field in struct ip6t_esp. */ | 10 | /* Values for "invflags" field in struct ip6t_esp. */ |
| 20 | #define IP6T_ESP_INV_SPI 0x01 /* Invert the sense of spi. */ | 11 | #define IP6T_ESP_INV_SPI 0x01 /* Invert the sense of spi. */ |
| 21 | #define IP6T_ESP_INV_MASK 0x01 /* All possible flags. */ | 12 | #define IP6T_ESP_INV_MASK 0x01 /* All possible flags. */ |
diff --git a/include/linux/netfilter_ipv6/ip6t_frag.h b/include/linux/netfilter_ipv6/ip6t_frag.h index 449a57eca7dd..66070a0d6dfc 100644 --- a/include/linux/netfilter_ipv6/ip6t_frag.h +++ b/include/linux/netfilter_ipv6/ip6t_frag.h | |||
| @@ -21,13 +21,4 @@ struct ip6t_frag | |||
| 21 | #define IP6T_FRAG_INV_LEN 0x02 /* Invert the sense of length. */ | 21 | #define IP6T_FRAG_INV_LEN 0x02 /* Invert the sense of length. */ |
| 22 | #define IP6T_FRAG_INV_MASK 0x03 /* All possible flags. */ | 22 | #define IP6T_FRAG_INV_MASK 0x03 /* All possible flags. */ |
| 23 | 23 | ||
| 24 | #define MASK_HOPOPTS 128 | ||
| 25 | #define MASK_DSTOPTS 64 | ||
| 26 | #define MASK_ROUTING 32 | ||
| 27 | #define MASK_FRAGMENT 16 | ||
| 28 | #define MASK_AH 8 | ||
| 29 | #define MASK_ESP 4 | ||
| 30 | #define MASK_NONE 2 | ||
| 31 | #define MASK_PROTO 1 | ||
| 32 | |||
| 33 | #endif /*_IP6T_FRAG_H*/ | 24 | #endif /*_IP6T_FRAG_H*/ |
diff --git a/include/linux/netfilter_ipv6/ip6t_opts.h b/include/linux/netfilter_ipv6/ip6t_opts.h index e259b6275bd2..a07e36380ae8 100644 --- a/include/linux/netfilter_ipv6/ip6t_opts.h +++ b/include/linux/netfilter_ipv6/ip6t_opts.h | |||
| @@ -20,13 +20,4 @@ struct ip6t_opts | |||
| 20 | #define IP6T_OPTS_INV_LEN 0x01 /* Invert the sense of length. */ | 20 | #define IP6T_OPTS_INV_LEN 0x01 /* Invert the sense of length. */ |
| 21 | #define IP6T_OPTS_INV_MASK 0x01 /* All possible flags. */ | 21 | #define IP6T_OPTS_INV_MASK 0x01 /* All possible flags. */ |
| 22 | 22 | ||
| 23 | #define MASK_HOPOPTS 128 | ||
| 24 | #define MASK_DSTOPTS 64 | ||
| 25 | #define MASK_ROUTING 32 | ||
| 26 | #define MASK_FRAGMENT 16 | ||
| 27 | #define MASK_AH 8 | ||
| 28 | #define MASK_ESP 4 | ||
| 29 | #define MASK_NONE 2 | ||
| 30 | #define MASK_PROTO 1 | ||
| 31 | |||
| 32 | #endif /*_IP6T_OPTS_H*/ | 23 | #endif /*_IP6T_OPTS_H*/ |
diff --git a/include/linux/netfilter_ipv6/ip6t_rt.h b/include/linux/netfilter_ipv6/ip6t_rt.h index f1070fbf2757..52156023e8db 100644 --- a/include/linux/netfilter_ipv6/ip6t_rt.h +++ b/include/linux/netfilter_ipv6/ip6t_rt.h | |||
| @@ -30,13 +30,4 @@ struct ip6t_rt | |||
| 30 | #define IP6T_RT_INV_LEN 0x04 /* Invert the sense of length. */ | 30 | #define IP6T_RT_INV_LEN 0x04 /* Invert the sense of length. */ |
| 31 | #define IP6T_RT_INV_MASK 0x07 /* All possible flags. */ | 31 | #define IP6T_RT_INV_MASK 0x07 /* All possible flags. */ |
| 32 | 32 | ||
| 33 | #define MASK_HOPOPTS 128 | ||
| 34 | #define MASK_DSTOPTS 64 | ||
| 35 | #define MASK_ROUTING 32 | ||
| 36 | #define MASK_FRAGMENT 16 | ||
| 37 | #define MASK_AH 8 | ||
| 38 | #define MASK_ESP 4 | ||
| 39 | #define MASK_NONE 2 | ||
| 40 | #define MASK_PROTO 1 | ||
| 41 | |||
| 42 | #endif /*_IP6T_RT_H*/ | 33 | #endif /*_IP6T_RT_H*/ |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index e5fd66c5650b..ad7cc22bd424 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
| @@ -926,7 +926,7 @@ static inline int skb_tailroom(const struct sk_buff *skb) | |||
| 926 | * Increase the headroom of an empty &sk_buff by reducing the tail | 926 | * Increase the headroom of an empty &sk_buff by reducing the tail |
| 927 | * room. This is only allowed for an empty buffer. | 927 | * room. This is only allowed for an empty buffer. |
| 928 | */ | 928 | */ |
| 929 | static inline void skb_reserve(struct sk_buff *skb, unsigned int len) | 929 | static inline void skb_reserve(struct sk_buff *skb, int len) |
| 930 | { | 930 | { |
| 931 | skb->data += len; | 931 | skb->data += len; |
| 932 | skb->tail += len; | 932 | skb->tail += len; |
diff --git a/net/bridge/netfilter/ebt_ip.c b/net/bridge/netfilter/ebt_ip.c index f158fe67dd60..dc5d0b2427cf 100644 --- a/net/bridge/netfilter/ebt_ip.c +++ b/net/bridge/netfilter/ebt_ip.c | |||
| @@ -92,7 +92,9 @@ static int ebt_ip_check(const char *tablename, unsigned int hookmask, | |||
| 92 | if (info->invflags & EBT_IP_PROTO) | 92 | if (info->invflags & EBT_IP_PROTO) |
| 93 | return -EINVAL; | 93 | return -EINVAL; |
| 94 | if (info->protocol != IPPROTO_TCP && | 94 | if (info->protocol != IPPROTO_TCP && |
| 95 | info->protocol != IPPROTO_UDP) | 95 | info->protocol != IPPROTO_UDP && |
| 96 | info->protocol != IPPROTO_SCTP && | ||
| 97 | info->protocol != IPPROTO_DCCP) | ||
| 96 | return -EINVAL; | 98 | return -EINVAL; |
| 97 | } | 99 | } |
| 98 | if (info->bitmask & EBT_IP_DPORT && info->dport[0] > info->dport[1]) | 100 | if (info->bitmask & EBT_IP_DPORT && info->dport[0] > info->dport[1]) |
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c index a29c1232c420..0128fbbe2328 100644 --- a/net/bridge/netfilter/ebt_log.c +++ b/net/bridge/netfilter/ebt_log.c | |||
| @@ -95,7 +95,9 @@ ebt_log_packet(unsigned int pf, unsigned int hooknum, | |||
| 95 | "tos=0x%02X, IP proto=%d", NIPQUAD(ih->saddr), | 95 | "tos=0x%02X, IP proto=%d", NIPQUAD(ih->saddr), |
| 96 | NIPQUAD(ih->daddr), ih->tos, ih->protocol); | 96 | NIPQUAD(ih->daddr), ih->tos, ih->protocol); |
| 97 | if (ih->protocol == IPPROTO_TCP || | 97 | if (ih->protocol == IPPROTO_TCP || |
| 98 | ih->protocol == IPPROTO_UDP) { | 98 | ih->protocol == IPPROTO_UDP || |
| 99 | ih->protocol == IPPROTO_SCTP || | ||
| 100 | ih->protocol == IPPROTO_DCCP) { | ||
| 99 | struct tcpudphdr _ports, *pptr; | 101 | struct tcpudphdr _ports, *pptr; |
| 100 | 102 | ||
| 101 | pptr = skb_header_pointer(skb, ih->ihl*4, | 103 | pptr = skb_header_pointer(skb, ih->ihl*4, |
diff --git a/net/core/filter.c b/net/core/filter.c index a52665f75224..9540946a48f3 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
| @@ -74,7 +74,6 @@ static inline void *load_pointer(struct sk_buff *skb, int k, | |||
| 74 | * filtering, filter is the array of filter instructions, and | 74 | * filtering, filter is the array of filter instructions, and |
| 75 | * len is the number of filter blocks in the array. | 75 | * len is the number of filter blocks in the array. |
| 76 | */ | 76 | */ |
| 77 | |||
| 78 | unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen) | 77 | unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen) |
| 79 | { | 78 | { |
| 80 | struct sock_filter *fentry; /* We walk down these */ | 79 | struct sock_filter *fentry; /* We walk down these */ |
| @@ -175,7 +174,7 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int | |||
| 175 | continue; | 174 | continue; |
| 176 | case BPF_LD|BPF_W|BPF_ABS: | 175 | case BPF_LD|BPF_W|BPF_ABS: |
| 177 | k = fentry->k; | 176 | k = fentry->k; |
| 178 | load_w: | 177 | load_w: |
| 179 | ptr = load_pointer(skb, k, 4, &tmp); | 178 | ptr = load_pointer(skb, k, 4, &tmp); |
| 180 | if (ptr != NULL) { | 179 | if (ptr != NULL) { |
| 181 | A = ntohl(*(u32 *)ptr); | 180 | A = ntohl(*(u32 *)ptr); |
| @@ -184,7 +183,7 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int | |||
| 184 | break; | 183 | break; |
| 185 | case BPF_LD|BPF_H|BPF_ABS: | 184 | case BPF_LD|BPF_H|BPF_ABS: |
| 186 | k = fentry->k; | 185 | k = fentry->k; |
| 187 | load_h: | 186 | load_h: |
| 188 | ptr = load_pointer(skb, k, 2, &tmp); | 187 | ptr = load_pointer(skb, k, 2, &tmp); |
| 189 | if (ptr != NULL) { | 188 | if (ptr != NULL) { |
| 190 | A = ntohs(*(u16 *)ptr); | 189 | A = ntohs(*(u16 *)ptr); |
| @@ -374,7 +373,7 @@ int sk_chk_filter(struct sock_filter *filter, int flen) | |||
| 374 | case BPF_JMP|BPF_JSET|BPF_K: | 373 | case BPF_JMP|BPF_JSET|BPF_K: |
| 375 | case BPF_JMP|BPF_JSET|BPF_X: | 374 | case BPF_JMP|BPF_JSET|BPF_X: |
| 376 | /* for conditionals both must be safe */ | 375 | /* for conditionals both must be safe */ |
| 377 | if (pc + ftest->jt + 1 >= flen || | 376 | if (pc + ftest->jt + 1 >= flen || |
| 378 | pc + ftest->jf + 1 >= flen) | 377 | pc + ftest->jf + 1 >= flen) |
| 379 | return -EINVAL; | 378 | return -EINVAL; |
| 380 | break; | 379 | break; |
| @@ -384,7 +383,7 @@ int sk_chk_filter(struct sock_filter *filter, int flen) | |||
| 384 | } | 383 | } |
| 385 | } | 384 | } |
| 386 | 385 | ||
| 387 | return (BPF_CLASS(filter[flen - 1].code) == BPF_RET) ? 0 : -EINVAL; | 386 | return (BPF_CLASS(filter[flen - 1].code) == BPF_RET) ? 0 : -EINVAL; |
| 388 | } | 387 | } |
| 389 | 388 | ||
| 390 | /** | 389 | /** |
| @@ -404,8 +403,8 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) | |||
| 404 | int err; | 403 | int err; |
| 405 | 404 | ||
| 406 | /* Make sure new filter is there and in the right amounts. */ | 405 | /* Make sure new filter is there and in the right amounts. */ |
| 407 | if (fprog->filter == NULL) | 406 | if (fprog->filter == NULL) |
| 408 | return -EINVAL; | 407 | return -EINVAL; |
| 409 | 408 | ||
| 410 | fp = sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL); | 409 | fp = sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL); |
| 411 | if (!fp) | 410 | if (!fp) |
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 281a632fa6a6..ea51f8d02eb8 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
| @@ -703,7 +703,7 @@ int netpoll_setup(struct netpoll *np) | |||
| 703 | } | 703 | } |
| 704 | } | 704 | } |
| 705 | 705 | ||
| 706 | if (!memcmp(np->local_mac, "\0\0\0\0\0\0", 6) && ndev->dev_addr) | 706 | if (is_zero_ether_addr(np->local_mac) && ndev->dev_addr) |
| 707 | memcpy(np->local_mac, ndev->dev_addr, 6); | 707 | memcpy(np->local_mac, ndev->dev_addr, 6); |
| 708 | 708 | ||
| 709 | if (!np->local_ip) { | 709 | if (!np->local_ip) { |
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 39063122fbb7..3827f881f429 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
| @@ -139,6 +139,7 @@ | |||
| 139 | #include <linux/proc_fs.h> | 139 | #include <linux/proc_fs.h> |
| 140 | #include <linux/seq_file.h> | 140 | #include <linux/seq_file.h> |
| 141 | #include <linux/wait.h> | 141 | #include <linux/wait.h> |
| 142 | #include <linux/etherdevice.h> | ||
| 142 | #include <net/checksum.h> | 143 | #include <net/checksum.h> |
| 143 | #include <net/ipv6.h> | 144 | #include <net/ipv6.h> |
| 144 | #include <net/addrconf.h> | 145 | #include <net/addrconf.h> |
| @@ -281,8 +282,8 @@ struct pktgen_dev { | |||
| 281 | __u32 src_mac_count; /* How many MACs to iterate through */ | 282 | __u32 src_mac_count; /* How many MACs to iterate through */ |
| 282 | __u32 dst_mac_count; /* How many MACs to iterate through */ | 283 | __u32 dst_mac_count; /* How many MACs to iterate through */ |
| 283 | 284 | ||
| 284 | unsigned char dst_mac[6]; | 285 | unsigned char dst_mac[ETH_ALEN]; |
| 285 | unsigned char src_mac[6]; | 286 | unsigned char src_mac[ETH_ALEN]; |
| 286 | 287 | ||
| 287 | __u32 cur_dst_mac_offset; | 288 | __u32 cur_dst_mac_offset; |
| 288 | __u32 cur_src_mac_offset; | 289 | __u32 cur_src_mac_offset; |
| @@ -594,16 +595,9 @@ static int pktgen_if_show(struct seq_file *seq, void *v) | |||
| 594 | 595 | ||
| 595 | seq_puts(seq, " src_mac: "); | 596 | seq_puts(seq, " src_mac: "); |
| 596 | 597 | ||
| 597 | if ((pkt_dev->src_mac[0] == 0) && | 598 | if (is_zero_ether_addr(pkt_dev->src_mac)) |
| 598 | (pkt_dev->src_mac[1] == 0) && | ||
| 599 | (pkt_dev->src_mac[2] == 0) && | ||
| 600 | (pkt_dev->src_mac[3] == 0) && | ||
| 601 | (pkt_dev->src_mac[4] == 0) && | ||
| 602 | (pkt_dev->src_mac[5] == 0)) | ||
| 603 | |||
| 604 | for (i = 0; i < 6; i++) | 599 | for (i = 0; i < 6; i++) |
| 605 | seq_printf(seq, "%02X%s", pkt_dev->odev->dev_addr[i], i == 5 ? " " : ":"); | 600 | seq_printf(seq, "%02X%s", pkt_dev->odev->dev_addr[i], i == 5 ? " " : ":"); |
| 606 | |||
| 607 | else | 601 | else |
| 608 | for (i = 0; i < 6; i++) | 602 | for (i = 0; i < 6; i++) |
| 609 | seq_printf(seq, "%02X%s", pkt_dev->src_mac[i], i == 5 ? " " : ":"); | 603 | seq_printf(seq, "%02X%s", pkt_dev->src_mac[i], i == 5 ? " " : ":"); |
| @@ -1189,9 +1183,9 @@ static ssize_t pktgen_if_write(struct file *file, const char __user *user_buffer | |||
| 1189 | } | 1183 | } |
| 1190 | if (!strcmp(name, "dst_mac")) { | 1184 | if (!strcmp(name, "dst_mac")) { |
| 1191 | char *v = valstr; | 1185 | char *v = valstr; |
| 1192 | unsigned char old_dmac[6]; | 1186 | unsigned char old_dmac[ETH_ALEN]; |
| 1193 | unsigned char *m = pkt_dev->dst_mac; | 1187 | unsigned char *m = pkt_dev->dst_mac; |
| 1194 | memcpy(old_dmac, pkt_dev->dst_mac, 6); | 1188 | memcpy(old_dmac, pkt_dev->dst_mac, ETH_ALEN); |
| 1195 | 1189 | ||
| 1196 | len = strn_len(&user_buffer[i], sizeof(valstr) - 1); | 1190 | len = strn_len(&user_buffer[i], sizeof(valstr) - 1); |
| 1197 | if (len < 0) { return len; } | 1191 | if (len < 0) { return len; } |
| @@ -1220,8 +1214,8 @@ static ssize_t pktgen_if_write(struct file *file, const char __user *user_buffer | |||
| 1220 | } | 1214 | } |
| 1221 | 1215 | ||
| 1222 | /* Set up Dest MAC */ | 1216 | /* Set up Dest MAC */ |
| 1223 | if (memcmp(old_dmac, pkt_dev->dst_mac, 6) != 0) | 1217 | if (compare_ether_addr(old_dmac, pkt_dev->dst_mac)) |
| 1224 | memcpy(&(pkt_dev->hh[0]), pkt_dev->dst_mac, 6); | 1218 | memcpy(&(pkt_dev->hh[0]), pkt_dev->dst_mac, ETH_ALEN); |
| 1225 | 1219 | ||
| 1226 | sprintf(pg_result, "OK: dstmac"); | 1220 | sprintf(pg_result, "OK: dstmac"); |
| 1227 | return count; | 1221 | return count; |
| @@ -1560,17 +1554,11 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) | |||
| 1560 | 1554 | ||
| 1561 | /* Default to the interface's mac if not explicitly set. */ | 1555 | /* Default to the interface's mac if not explicitly set. */ |
| 1562 | 1556 | ||
| 1563 | if ((pkt_dev->src_mac[0] == 0) && | 1557 | if (is_zero_ether_addr(pkt_dev->src_mac)) |
| 1564 | (pkt_dev->src_mac[1] == 0) && | 1558 | memcpy(&(pkt_dev->hh[6]), pkt_dev->odev->dev_addr, ETH_ALEN); |
| 1565 | (pkt_dev->src_mac[2] == 0) && | ||
| 1566 | (pkt_dev->src_mac[3] == 0) && | ||
| 1567 | (pkt_dev->src_mac[4] == 0) && | ||
| 1568 | (pkt_dev->src_mac[5] == 0)) { | ||
| 1569 | 1559 | ||
| 1570 | memcpy(&(pkt_dev->hh[6]), pkt_dev->odev->dev_addr, 6); | ||
| 1571 | } | ||
| 1572 | /* Set up Dest MAC */ | 1560 | /* Set up Dest MAC */ |
| 1573 | memcpy(&(pkt_dev->hh[0]), pkt_dev->dst_mac, 6); | 1561 | memcpy(&(pkt_dev->hh[0]), pkt_dev->dst_mac, ETH_ALEN); |
| 1574 | 1562 | ||
| 1575 | /* Set up pkt size */ | 1563 | /* Set up pkt size */ |
| 1576 | pkt_dev->cur_pkt_size = pkt_dev->min_pkt_size; | 1564 | pkt_dev->cur_pkt_size = pkt_dev->min_pkt_size; |
diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c index ce9cb77c5c29..2c77dafbd091 100644 --- a/net/dccp/ackvec.c +++ b/net/dccp/ackvec.c | |||
| @@ -144,7 +144,7 @@ static inline int dccp_ackvec_set_buf_head_state(struct dccp_ackvec *av, | |||
| 144 | const unsigned char state) | 144 | const unsigned char state) |
| 145 | { | 145 | { |
| 146 | unsigned int gap; | 146 | unsigned int gap; |
| 147 | signed long new_head; | 147 | long new_head; |
| 148 | 148 | ||
| 149 | if (av->dccpav_vec_len + packets > av->dccpav_buf_len) | 149 | if (av->dccpav_vec_len + packets > av->dccpav_buf_len) |
| 150 | return -ENOBUFS; | 150 | return -ENOBUFS; |
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile index bcefe64b9317..e5c5b3202f02 100644 --- a/net/ipv4/netfilter/Makefile +++ b/net/ipv4/netfilter/Makefile | |||
| @@ -46,7 +46,6 @@ obj-$(CONFIG_IP_NF_NAT) += iptable_nat.o | |||
| 46 | obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o | 46 | obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o |
| 47 | 47 | ||
| 48 | # matches | 48 | # matches |
| 49 | obj-$(CONFIG_IP_NF_MATCH_HELPER) += ipt_helper.o | ||
| 50 | obj-$(CONFIG_IP_NF_MATCH_HASHLIMIT) += ipt_hashlimit.o | 49 | obj-$(CONFIG_IP_NF_MATCH_HASHLIMIT) += ipt_hashlimit.o |
| 51 | obj-$(CONFIG_IP_NF_MATCH_IPRANGE) += ipt_iprange.o | 50 | obj-$(CONFIG_IP_NF_MATCH_IPRANGE) += ipt_iprange.o |
| 52 | obj-$(CONFIG_IP_NF_MATCH_MULTIPORT) += ipt_multiport.o | 51 | obj-$(CONFIG_IP_NF_MATCH_MULTIPORT) += ipt_multiport.o |
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_gre.c b/net/ipv4/netfilter/ip_conntrack_proto_gre.c index c777abf16cb7..56794797d55b 100644 --- a/net/ipv4/netfilter/ip_conntrack_proto_gre.c +++ b/net/ipv4/netfilter/ip_conntrack_proto_gre.c | |||
| @@ -32,6 +32,7 @@ | |||
| 32 | #include <linux/in.h> | 32 | #include <linux/in.h> |
| 33 | #include <linux/list.h> | 33 | #include <linux/list.h> |
| 34 | #include <linux/seq_file.h> | 34 | #include <linux/seq_file.h> |
| 35 | #include <linux/interrupt.h> | ||
| 35 | 36 | ||
| 36 | static DEFINE_RWLOCK(ip_ct_gre_lock); | 37 | static DEFINE_RWLOCK(ip_ct_gre_lock); |
| 37 | #define ASSERT_READ_LOCK(x) | 38 | #define ASSERT_READ_LOCK(x) |
diff --git a/net/ipv4/netfilter/ipt_policy.c b/net/ipv4/netfilter/ipt_policy.c index 709debcc69c9..18ca8258a1c5 100644 --- a/net/ipv4/netfilter/ipt_policy.c +++ b/net/ipv4/netfilter/ipt_policy.c | |||
| @@ -95,7 +95,10 @@ match_policy_out(const struct sk_buff *skb, const struct ipt_policy_info *info) | |||
| 95 | static int match(const struct sk_buff *skb, | 95 | static int match(const struct sk_buff *skb, |
| 96 | const struct net_device *in, | 96 | const struct net_device *in, |
| 97 | const struct net_device *out, | 97 | const struct net_device *out, |
| 98 | const void *matchinfo, int offset, int *hotdrop) | 98 | const void *matchinfo, |
| 99 | int offset, | ||
| 100 | unsigned int protoff, | ||
| 101 | int *hotdrop) | ||
| 99 | { | 102 | { |
| 100 | const struct ipt_policy_info *info = matchinfo; | 103 | const struct ipt_policy_info *info = matchinfo; |
| 101 | int ret; | 104 | int ret; |
| @@ -113,7 +116,7 @@ static int match(const struct sk_buff *skb, | |||
| 113 | return ret; | 116 | return ret; |
| 114 | } | 117 | } |
| 115 | 118 | ||
| 116 | static int checkentry(const char *tablename, const struct ipt_ip *ip, | 119 | static int checkentry(const char *tablename, const void *ip_void, |
| 117 | void *matchinfo, unsigned int matchsize, | 120 | void *matchinfo, unsigned int matchsize, |
| 118 | unsigned int hook_mask) | 121 | unsigned int hook_mask) |
| 119 | { | 122 | { |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index f701a136a6ae..f2e82afc15b3 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
| @@ -240,9 +240,8 @@ static unsigned rt_hash_mask; | |||
| 240 | static int rt_hash_log; | 240 | static int rt_hash_log; |
| 241 | static unsigned int rt_hash_rnd; | 241 | static unsigned int rt_hash_rnd; |
| 242 | 242 | ||
| 243 | static struct rt_cache_stat *rt_cache_stat; | 243 | static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat); |
| 244 | #define RT_CACHE_STAT_INC(field) \ | 244 | #define RT_CACHE_STAT_INC(field) (__get_cpu_var(rt_cache_stat).field++) |
| 245 | (per_cpu_ptr(rt_cache_stat, raw_smp_processor_id())->field++) | ||
| 246 | 245 | ||
| 247 | static int rt_intern_hash(unsigned hash, struct rtable *rth, | 246 | static int rt_intern_hash(unsigned hash, struct rtable *rth, |
| 248 | struct rtable **res); | 247 | struct rtable **res); |
| @@ -401,7 +400,7 @@ static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos) | |||
| 401 | if (!cpu_possible(cpu)) | 400 | if (!cpu_possible(cpu)) |
| 402 | continue; | 401 | continue; |
| 403 | *pos = cpu+1; | 402 | *pos = cpu+1; |
| 404 | return per_cpu_ptr(rt_cache_stat, cpu); | 403 | return &per_cpu(rt_cache_stat, cpu); |
| 405 | } | 404 | } |
| 406 | return NULL; | 405 | return NULL; |
| 407 | } | 406 | } |
| @@ -414,7 +413,7 @@ static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
| 414 | if (!cpu_possible(cpu)) | 413 | if (!cpu_possible(cpu)) |
| 415 | continue; | 414 | continue; |
| 416 | *pos = cpu+1; | 415 | *pos = cpu+1; |
| 417 | return per_cpu_ptr(rt_cache_stat, cpu); | 416 | return &per_cpu(rt_cache_stat, cpu); |
| 418 | } | 417 | } |
| 419 | return NULL; | 418 | return NULL; |
| 420 | 419 | ||
| @@ -3160,10 +3159,6 @@ int __init ip_rt_init(void) | |||
| 3160 | ipv4_dst_ops.gc_thresh = (rt_hash_mask + 1); | 3159 | ipv4_dst_ops.gc_thresh = (rt_hash_mask + 1); |
| 3161 | ip_rt_max_size = (rt_hash_mask + 1) * 16; | 3160 | ip_rt_max_size = (rt_hash_mask + 1) * 16; |
| 3162 | 3161 | ||
| 3163 | rt_cache_stat = alloc_percpu(struct rt_cache_stat); | ||
| 3164 | if (!rt_cache_stat) | ||
| 3165 | return -ENOMEM; | ||
| 3166 | |||
| 3167 | devinet_init(); | 3162 | devinet_init(); |
| 3168 | ip_fib_init(); | 3163 | ip_fib_init(); |
| 3169 | 3164 | ||
| @@ -3191,7 +3186,6 @@ int __init ip_rt_init(void) | |||
| 3191 | if (!proc_net_fops_create("rt_cache", S_IRUGO, &rt_cache_seq_fops) || | 3186 | if (!proc_net_fops_create("rt_cache", S_IRUGO, &rt_cache_seq_fops) || |
| 3192 | !(rtstat_pde = create_proc_entry("rt_cache", S_IRUGO, | 3187 | !(rtstat_pde = create_proc_entry("rt_cache", S_IRUGO, |
| 3193 | proc_net_stat))) { | 3188 | proc_net_stat))) { |
| 3194 | free_percpu(rt_cache_stat); | ||
| 3195 | return -ENOMEM; | 3189 | return -ENOMEM; |
| 3196 | } | 3190 | } |
| 3197 | rtstat_pde->proc_fops = &rt_cpu_seq_fops; | 3191 | rtstat_pde->proc_fops = &rt_cpu_seq_fops; |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index dfb4f145a139..d328d5986143 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
| @@ -2644,7 +2644,7 @@ static int if6_seq_show(struct seq_file *seq, void *v) | |||
| 2644 | { | 2644 | { |
| 2645 | struct inet6_ifaddr *ifp = (struct inet6_ifaddr *)v; | 2645 | struct inet6_ifaddr *ifp = (struct inet6_ifaddr *)v; |
| 2646 | seq_printf(seq, | 2646 | seq_printf(seq, |
| 2647 | NIP6_FMT " %02x %02x %02x %02x %8s\n", | 2647 | NIP6_SEQFMT " %02x %02x %02x %02x %8s\n", |
| 2648 | NIP6(ifp->addr), | 2648 | NIP6(ifp->addr), |
| 2649 | ifp->idev->dev->ifindex, | 2649 | ifp->idev->dev->ifindex, |
| 2650 | ifp->prefix_len, | 2650 | ifp->prefix_len, |
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c index 72bd08af2dfb..840a33d33296 100644 --- a/net/ipv6/anycast.c +++ b/net/ipv6/anycast.c | |||
| @@ -532,7 +532,7 @@ static int ac6_seq_show(struct seq_file *seq, void *v) | |||
| 532 | struct ac6_iter_state *state = ac6_seq_private(seq); | 532 | struct ac6_iter_state *state = ac6_seq_private(seq); |
| 533 | 533 | ||
| 534 | seq_printf(seq, | 534 | seq_printf(seq, |
| 535 | "%-4d %-15s " NIP6_FMT " %5d\n", | 535 | "%-4d %-15s " NIP6_SEQFMT " %5d\n", |
| 536 | state->dev->ifindex, state->dev->name, | 536 | state->dev->ifindex, state->dev->name, |
| 537 | NIP6(im->aca_addr), | 537 | NIP6(im->aca_addr), |
| 538 | im->aca_users); | 538 | im->aca_users); |
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index 4183c8dac7f6..69cbe8a66d02 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c | |||
| @@ -629,7 +629,7 @@ static void ip6fl_fl_seq_show(struct seq_file *seq, struct ip6_flowlabel *fl) | |||
| 629 | { | 629 | { |
| 630 | while(fl) { | 630 | while(fl) { |
| 631 | seq_printf(seq, | 631 | seq_printf(seq, |
| 632 | "%05X %-1d %-6d %-6d %-6ld %-8ld " NIP6_FMT " %-4d\n", | 632 | "%05X %-1d %-6d %-6d %-6ld %-8ld " NIP6_SEQFMT " %-4d\n", |
| 633 | (unsigned)ntohl(fl->label), | 633 | (unsigned)ntohl(fl->label), |
| 634 | fl->share, | 634 | fl->share, |
| 635 | (unsigned)fl->owner, | 635 | (unsigned)fl->owner, |
| @@ -645,7 +645,7 @@ static void ip6fl_fl_seq_show(struct seq_file *seq, struct ip6_flowlabel *fl) | |||
| 645 | static int ip6fl_seq_show(struct seq_file *seq, void *v) | 645 | static int ip6fl_seq_show(struct seq_file *seq, void *v) |
| 646 | { | 646 | { |
| 647 | if (v == SEQ_START_TOKEN) | 647 | if (v == SEQ_START_TOKEN) |
| 648 | seq_printf(seq, "%-5s %-1s %-6s %-6s %-6s %-8s %-39s %s\n", | 648 | seq_printf(seq, "%-5s %-1s %-6s %-6s %-6s %-8s %-32s %s\n", |
| 649 | "Label", "S", "Owner", "Users", "Linger", "Expires", "Dst", "Opt"); | 649 | "Label", "S", "Owner", "Users", "Linger", "Expires", "Dst", "Opt"); |
| 650 | else | 650 | else |
| 651 | ip6fl_fl_seq_show(seq, v); | 651 | ip6fl_fl_seq_show(seq, v); |
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 0e03eabfb9da..6c05c7978bef 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c | |||
| @@ -2373,7 +2373,7 @@ static int igmp6_mc_seq_show(struct seq_file *seq, void *v) | |||
| 2373 | struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq); | 2373 | struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq); |
| 2374 | 2374 | ||
| 2375 | seq_printf(seq, | 2375 | seq_printf(seq, |
| 2376 | "%-4d %-15s " NIP6_FMT " %5d %08X %ld\n", | 2376 | "%-4d %-15s " NIP6_SEQFMT " %5d %08X %ld\n", |
| 2377 | state->dev->ifindex, state->dev->name, | 2377 | state->dev->ifindex, state->dev->name, |
| 2378 | NIP6(im->mca_addr), | 2378 | NIP6(im->mca_addr), |
| 2379 | im->mca_users, im->mca_flags, | 2379 | im->mca_users, im->mca_flags, |
| @@ -2542,12 +2542,12 @@ static int igmp6_mcf_seq_show(struct seq_file *seq, void *v) | |||
| 2542 | if (v == SEQ_START_TOKEN) { | 2542 | if (v == SEQ_START_TOKEN) { |
| 2543 | seq_printf(seq, | 2543 | seq_printf(seq, |
| 2544 | "%3s %6s " | 2544 | "%3s %6s " |
| 2545 | "%39s %39s %6s %6s\n", "Idx", | 2545 | "%32s %32s %6s %6s\n", "Idx", |
| 2546 | "Device", "Multicast Address", | 2546 | "Device", "Multicast Address", |
| 2547 | "Source Address", "INC", "EXC"); | 2547 | "Source Address", "INC", "EXC"); |
| 2548 | } else { | 2548 | } else { |
| 2549 | seq_printf(seq, | 2549 | seq_printf(seq, |
| 2550 | "%3d %6.6s " NIP6_FMT " " NIP6_FMT " %6lu %6lu\n", | 2550 | "%3d %6.6s " NIP6_SEQFMT " " NIP6_SEQFMT " %6lu %6lu\n", |
| 2551 | state->dev->ifindex, state->dev->name, | 2551 | state->dev->ifindex, state->dev->name, |
| 2552 | NIP6(state->im->mca_addr), | 2552 | NIP6(state->im->mca_addr), |
| 2553 | NIP6(psf->sf_addr), | 2553 | NIP6(psf->sf_addr), |
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile index 663b4749820d..db6073c94163 100644 --- a/net/ipv6/netfilter/Makefile +++ b/net/ipv6/netfilter/Makefile | |||
| @@ -4,7 +4,6 @@ | |||
| 4 | 4 | ||
| 5 | # Link order matters here. | 5 | # Link order matters here. |
| 6 | obj-$(CONFIG_IP6_NF_IPTABLES) += ip6_tables.o | 6 | obj-$(CONFIG_IP6_NF_IPTABLES) += ip6_tables.o |
| 7 | obj-$(CONFIG_IP6_NF_MATCH_LENGTH) += ip6t_length.o | ||
| 8 | obj-$(CONFIG_IP6_NF_MATCH_RT) += ip6t_rt.o | 7 | obj-$(CONFIG_IP6_NF_MATCH_RT) += ip6t_rt.o |
| 9 | obj-$(CONFIG_IP6_NF_MATCH_OPTS) += ip6t_hbh.o ip6t_dst.o | 8 | obj-$(CONFIG_IP6_NF_MATCH_OPTS) += ip6t_hbh.o ip6t_dst.o |
| 10 | obj-$(CONFIG_IP6_NF_MATCH_IPV6HEADER) += ip6t_ipv6header.o | 9 | obj-$(CONFIG_IP6_NF_MATCH_IPV6HEADER) += ip6t_ipv6header.o |
diff --git a/net/ipv6/netfilter/ip6t_dst.c b/net/ipv6/netfilter/ip6t_dst.c index 80fe82669ce2..b4c153a53500 100644 --- a/net/ipv6/netfilter/ip6t_dst.c +++ b/net/ipv6/netfilter/ip6t_dst.c | |||
| @@ -36,19 +36,19 @@ MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>"); | |||
| 36 | #endif | 36 | #endif |
| 37 | 37 | ||
| 38 | /* | 38 | /* |
| 39 | * (Type & 0xC0) >> 6 | 39 | * (Type & 0xC0) >> 6 |
| 40 | * 0 -> ignorable | 40 | * 0 -> ignorable |
| 41 | * 1 -> must drop the packet | 41 | * 1 -> must drop the packet |
| 42 | * 2 -> send ICMP PARM PROB regardless and drop packet | 42 | * 2 -> send ICMP PARM PROB regardless and drop packet |
| 43 | * 3 -> Send ICMP if not a multicast address and drop packet | 43 | * 3 -> Send ICMP if not a multicast address and drop packet |
| 44 | * (Type & 0x20) >> 5 | 44 | * (Type & 0x20) >> 5 |
| 45 | * 0 -> invariant | 45 | * 0 -> invariant |
| 46 | * 1 -> can change the routing | 46 | * 1 -> can change the routing |
| 47 | * (Type & 0x1F) Type | 47 | * (Type & 0x1F) Type |
| 48 | * 0 -> Pad1 (only 1 byte!) | 48 | * 0 -> Pad1 (only 1 byte!) |
| 49 | * 1 -> PadN LENGTH info (total length = length + 2) | 49 | * 1 -> PadN LENGTH info (total length = length + 2) |
| 50 | * C0 | 2 -> JUMBO 4 x x x x ( xxxx > 64k ) | 50 | * C0 | 2 -> JUMBO 4 x x x x ( xxxx > 64k ) |
| 51 | * 5 -> RTALERT 2 x x | 51 | * 5 -> RTALERT 2 x x |
| 52 | */ | 52 | */ |
| 53 | 53 | ||
| 54 | static int | 54 | static int |
| @@ -60,16 +60,16 @@ match(const struct sk_buff *skb, | |||
| 60 | unsigned int protoff, | 60 | unsigned int protoff, |
| 61 | int *hotdrop) | 61 | int *hotdrop) |
| 62 | { | 62 | { |
| 63 | struct ipv6_opt_hdr _optsh, *oh; | 63 | struct ipv6_opt_hdr _optsh, *oh; |
| 64 | const struct ip6t_opts *optinfo = matchinfo; | 64 | const struct ip6t_opts *optinfo = matchinfo; |
| 65 | unsigned int temp; | 65 | unsigned int temp; |
| 66 | unsigned int ptr; | 66 | unsigned int ptr; |
| 67 | unsigned int hdrlen = 0; | 67 | unsigned int hdrlen = 0; |
| 68 | unsigned int ret = 0; | 68 | unsigned int ret = 0; |
| 69 | u8 _opttype, *tp = NULL; | 69 | u8 _opttype, *tp = NULL; |
| 70 | u8 _optlen, *lp = NULL; | 70 | u8 _optlen, *lp = NULL; |
| 71 | unsigned int optlen; | 71 | unsigned int optlen; |
| 72 | 72 | ||
| 73 | #if HOPBYHOP | 73 | #if HOPBYHOP |
| 74 | if (ipv6_find_hdr(skb, &ptr, NEXTHDR_HOP, NULL) < 0) | 74 | if (ipv6_find_hdr(skb, &ptr, NEXTHDR_HOP, NULL) < 0) |
| 75 | #else | 75 | #else |
| @@ -77,42 +77,41 @@ match(const struct sk_buff *skb, | |||
| 77 | #endif | 77 | #endif |
| 78 | return 0; | 78 | return 0; |
| 79 | 79 | ||
| 80 | oh = skb_header_pointer(skb, ptr, sizeof(_optsh), &_optsh); | 80 | oh = skb_header_pointer(skb, ptr, sizeof(_optsh), &_optsh); |
| 81 | if (oh == NULL){ | 81 | if (oh == NULL) { |
| 82 | *hotdrop = 1; | 82 | *hotdrop = 1; |
| 83 | return 0; | 83 | return 0; |
| 84 | } | 84 | } |
| 85 | 85 | ||
| 86 | hdrlen = ipv6_optlen(oh); | 86 | hdrlen = ipv6_optlen(oh); |
| 87 | if (skb->len - ptr < hdrlen){ | 87 | if (skb->len - ptr < hdrlen) { |
| 88 | /* Packet smaller than it's length field */ | 88 | /* Packet smaller than it's length field */ |
| 89 | return 0; | 89 | return 0; |
| 90 | } | 90 | } |
| 91 | 91 | ||
| 92 | DEBUGP("IPv6 OPTS LEN %u %u ", hdrlen, oh->hdrlen); | 92 | DEBUGP("IPv6 OPTS LEN %u %u ", hdrlen, oh->hdrlen); |
| 93 | 93 | ||
| 94 | DEBUGP("len %02X %04X %02X ", | 94 | DEBUGP("len %02X %04X %02X ", |
| 95 | optinfo->hdrlen, hdrlen, | 95 | optinfo->hdrlen, hdrlen, |
| 96 | (!(optinfo->flags & IP6T_OPTS_LEN) || | 96 | (!(optinfo->flags & IP6T_OPTS_LEN) || |
| 97 | ((optinfo->hdrlen == hdrlen) ^ | 97 | ((optinfo->hdrlen == hdrlen) ^ |
| 98 | !!(optinfo->invflags & IP6T_OPTS_INV_LEN)))); | 98 | !!(optinfo->invflags & IP6T_OPTS_INV_LEN)))); |
| 99 | 99 | ||
| 100 | ret = (oh != NULL) | 100 | ret = (oh != NULL) && |
| 101 | && | 101 | (!(optinfo->flags & IP6T_OPTS_LEN) || |
| 102 | (!(optinfo->flags & IP6T_OPTS_LEN) || | 102 | ((optinfo->hdrlen == hdrlen) ^ |
| 103 | ((optinfo->hdrlen == hdrlen) ^ | 103 | !!(optinfo->invflags & IP6T_OPTS_INV_LEN))); |
| 104 | !!(optinfo->invflags & IP6T_OPTS_INV_LEN))); | 104 | |
| 105 | 105 | ptr += 2; | |
| 106 | ptr += 2; | 106 | hdrlen -= 2; |
| 107 | hdrlen -= 2; | 107 | if (!(optinfo->flags & IP6T_OPTS_OPTS)) { |
| 108 | if ( !(optinfo->flags & IP6T_OPTS_OPTS) ){ | 108 | return ret; |
| 109 | return ret; | ||
| 110 | } else if (optinfo->flags & IP6T_OPTS_NSTRICT) { | 109 | } else if (optinfo->flags & IP6T_OPTS_NSTRICT) { |
| 111 | DEBUGP("Not strict - not implemented"); | 110 | DEBUGP("Not strict - not implemented"); |
| 112 | } else { | 111 | } else { |
| 113 | DEBUGP("Strict "); | 112 | DEBUGP("Strict "); |
| 114 | DEBUGP("#%d ",optinfo->optsnr); | 113 | DEBUGP("#%d ", optinfo->optsnr); |
| 115 | for(temp=0; temp<optinfo->optsnr; temp++){ | 114 | for (temp = 0; temp < optinfo->optsnr; temp++) { |
| 116 | /* type field exists ? */ | 115 | /* type field exists ? */ |
| 117 | if (hdrlen < 1) | 116 | if (hdrlen < 1) |
| 118 | break; | 117 | break; |
| @@ -122,10 +121,10 @@ match(const struct sk_buff *skb, | |||
| 122 | break; | 121 | break; |
| 123 | 122 | ||
| 124 | /* Type check */ | 123 | /* Type check */ |
| 125 | if (*tp != (optinfo->opts[temp] & 0xFF00)>>8){ | 124 | if (*tp != (optinfo->opts[temp] & 0xFF00) >> 8) { |
| 126 | DEBUGP("Tbad %02X %02X\n", | 125 | DEBUGP("Tbad %02X %02X\n", |
| 127 | *tp, | 126 | *tp, |
| 128 | (optinfo->opts[temp] & 0xFF00)>>8); | 127 | (optinfo->opts[temp] & 0xFF00) >> 8); |
| 129 | return 0; | 128 | return 0; |
| 130 | } else { | 129 | } else { |
| 131 | DEBUGP("Tok "); | 130 | DEBUGP("Tok "); |
| @@ -169,7 +168,8 @@ match(const struct sk_buff *skb, | |||
| 169 | } | 168 | } |
| 170 | if (temp == optinfo->optsnr) | 169 | if (temp == optinfo->optsnr) |
| 171 | return ret; | 170 | return ret; |
| 172 | else return 0; | 171 | else |
| 172 | return 0; | ||
| 173 | } | 173 | } |
| 174 | 174 | ||
| 175 | return 0; | 175 | return 0; |
| @@ -178,25 +178,24 @@ match(const struct sk_buff *skb, | |||
| 178 | /* Called when user tries to insert an entry of this type. */ | 178 | /* Called when user tries to insert an entry of this type. */ |
| 179 | static int | 179 | static int |
| 180 | checkentry(const char *tablename, | 180 | checkentry(const char *tablename, |
| 181 | const void *info, | 181 | const void *info, |
| 182 | void *matchinfo, | 182 | void *matchinfo, |
| 183 | unsigned int matchinfosize, | 183 | unsigned int matchinfosize, |
| 184 | unsigned int hook_mask) | 184 | unsigned int hook_mask) |
| 185 | { | 185 | { |
| 186 | const struct ip6t_opts *optsinfo = matchinfo; | 186 | const struct ip6t_opts *optsinfo = matchinfo; |
| 187 | 187 | ||
| 188 | if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_opts))) { | 188 | if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_opts))) { |
| 189 | DEBUGP("ip6t_opts: matchsize %u != %u\n", | 189 | DEBUGP("ip6t_opts: matchsize %u != %u\n", |
| 190 | matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_opts))); | 190 | matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_opts))); |
| 191 | return 0; | 191 | return 0; |
| 192 | } | 192 | } |
| 193 | if (optsinfo->invflags & ~IP6T_OPTS_INV_MASK) { | 193 | if (optsinfo->invflags & ~IP6T_OPTS_INV_MASK) { |
| 194 | DEBUGP("ip6t_opts: unknown flags %X\n", | 194 | DEBUGP("ip6t_opts: unknown flags %X\n", optsinfo->invflags); |
| 195 | optsinfo->invflags); | 195 | return 0; |
| 196 | return 0; | 196 | } |
| 197 | } | 197 | |
| 198 | 198 | return 1; | |
| 199 | return 1; | ||
| 200 | } | 199 | } |
| 201 | 200 | ||
| 202 | static struct ip6t_match opts_match = { | 201 | static struct ip6t_match opts_match = { |
| @@ -212,12 +211,12 @@ static struct ip6t_match opts_match = { | |||
| 212 | 211 | ||
| 213 | static int __init init(void) | 212 | static int __init init(void) |
| 214 | { | 213 | { |
| 215 | return ip6t_register_match(&opts_match); | 214 | return ip6t_register_match(&opts_match); |
| 216 | } | 215 | } |
| 217 | 216 | ||
| 218 | static void __exit cleanup(void) | 217 | static void __exit cleanup(void) |
| 219 | { | 218 | { |
| 220 | ip6t_unregister_match(&opts_match); | 219 | ip6t_unregister_match(&opts_match); |
| 221 | } | 220 | } |
| 222 | 221 | ||
| 223 | module_init(init); | 222 | module_init(init); |
diff --git a/net/ipv6/netfilter/ip6t_eui64.c b/net/ipv6/netfilter/ip6t_eui64.c index ddf5f571909c..27396ac0b9ed 100644 --- a/net/ipv6/netfilter/ip6t_eui64.c +++ b/net/ipv6/netfilter/ip6t_eui64.c | |||
| @@ -27,45 +27,45 @@ match(const struct sk_buff *skb, | |||
| 27 | unsigned int protoff, | 27 | unsigned int protoff, |
| 28 | int *hotdrop) | 28 | int *hotdrop) |
| 29 | { | 29 | { |
| 30 | unsigned char eui64[8]; | ||
| 31 | int i = 0; | ||
| 30 | 32 | ||
| 31 | unsigned char eui64[8]; | 33 | if (!(skb->mac.raw >= skb->head && |
| 32 | int i=0; | 34 | (skb->mac.raw + ETH_HLEN) <= skb->data) && |
| 33 | 35 | offset != 0) { | |
| 34 | if ( !(skb->mac.raw >= skb->head | 36 | *hotdrop = 1; |
| 35 | && (skb->mac.raw + ETH_HLEN) <= skb->data) | 37 | return 0; |
| 36 | && offset != 0) { | 38 | } |
| 37 | *hotdrop = 1; | 39 | |
| 38 | return 0; | 40 | memset(eui64, 0, sizeof(eui64)); |
| 39 | } | 41 | |
| 40 | 42 | if (eth_hdr(skb)->h_proto == ntohs(ETH_P_IPV6)) { | |
| 41 | memset(eui64, 0, sizeof(eui64)); | 43 | if (skb->nh.ipv6h->version == 0x6) { |
| 42 | 44 | memcpy(eui64, eth_hdr(skb)->h_source, 3); | |
| 43 | if (eth_hdr(skb)->h_proto == ntohs(ETH_P_IPV6)) { | 45 | memcpy(eui64 + 5, eth_hdr(skb)->h_source + 3, 3); |
| 44 | if (skb->nh.ipv6h->version == 0x6) { | 46 | eui64[3] = 0xff; |
| 45 | memcpy(eui64, eth_hdr(skb)->h_source, 3); | 47 | eui64[4] = 0xfe; |
| 46 | memcpy(eui64 + 5, eth_hdr(skb)->h_source + 3, 3); | 48 | eui64[0] |= 0x02; |
| 47 | eui64[3]=0xff; | 49 | |
| 48 | eui64[4]=0xfe; | 50 | i = 0; |
| 49 | eui64[0] |= 0x02; | 51 | while ((skb->nh.ipv6h->saddr.s6_addr[8+i] == eui64[i]) |
| 50 | 52 | && (i < 8)) | |
| 51 | i=0; | 53 | i++; |
| 52 | while ((skb->nh.ipv6h->saddr.s6_addr[8+i] == | 54 | |
| 53 | eui64[i]) && (i<8)) i++; | 55 | if (i == 8) |
| 54 | 56 | return 1; | |
| 55 | if ( i == 8 ) | 57 | } |
| 56 | return 1; | 58 | } |
| 57 | } | 59 | |
| 58 | } | 60 | return 0; |
| 59 | |||
| 60 | return 0; | ||
| 61 | } | 61 | } |
| 62 | 62 | ||
| 63 | static int | 63 | static int |
| 64 | ip6t_eui64_checkentry(const char *tablename, | 64 | ip6t_eui64_checkentry(const char *tablename, |
| 65 | const void *ip, | 65 | const void *ip, |
| 66 | void *matchinfo, | 66 | void *matchinfo, |
| 67 | unsigned int matchsize, | 67 | unsigned int matchsize, |
| 68 | unsigned int hook_mask) | 68 | unsigned int hook_mask) |
| 69 | { | 69 | { |
| 70 | if (hook_mask | 70 | if (hook_mask |
| 71 | & ~((1 << NF_IP6_PRE_ROUTING) | (1 << NF_IP6_LOCAL_IN) | | 71 | & ~((1 << NF_IP6_PRE_ROUTING) | (1 << NF_IP6_LOCAL_IN) | |
diff --git a/net/ipv6/netfilter/ip6t_frag.c b/net/ipv6/netfilter/ip6t_frag.c index a9964b946ed5..4c14125a0e26 100644 --- a/net/ipv6/netfilter/ip6t_frag.c +++ b/net/ipv6/netfilter/ip6t_frag.c | |||
| @@ -31,12 +31,12 @@ MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>"); | |||
| 31 | static inline int | 31 | static inline int |
| 32 | id_match(u_int32_t min, u_int32_t max, u_int32_t id, int invert) | 32 | id_match(u_int32_t min, u_int32_t max, u_int32_t id, int invert) |
| 33 | { | 33 | { |
| 34 | int r=0; | 34 | int r = 0; |
| 35 | DEBUGP("frag id_match:%c 0x%x <= 0x%x <= 0x%x",invert? '!':' ', | 35 | DEBUGP("frag id_match:%c 0x%x <= 0x%x <= 0x%x", invert ? '!' : ' ', |
| 36 | min,id,max); | 36 | min, id, max); |
| 37 | r=(id >= min && id <= max) ^ invert; | 37 | r = (id >= min && id <= max) ^ invert; |
| 38 | DEBUGP(" result %s\n",r? "PASS" : "FAILED"); | 38 | DEBUGP(" result %s\n", r ? "PASS" : "FAILED"); |
| 39 | return r; | 39 | return r; |
| 40 | } | 40 | } |
| 41 | 41 | ||
| 42 | static int | 42 | static int |
| @@ -48,92 +48,91 @@ match(const struct sk_buff *skb, | |||
| 48 | unsigned int protoff, | 48 | unsigned int protoff, |
| 49 | int *hotdrop) | 49 | int *hotdrop) |
| 50 | { | 50 | { |
| 51 | struct frag_hdr _frag, *fh; | 51 | struct frag_hdr _frag, *fh; |
| 52 | const struct ip6t_frag *fraginfo = matchinfo; | 52 | const struct ip6t_frag *fraginfo = matchinfo; |
| 53 | unsigned int ptr; | 53 | unsigned int ptr; |
| 54 | 54 | ||
| 55 | if (ipv6_find_hdr(skb, &ptr, NEXTHDR_FRAGMENT, NULL) < 0) | 55 | if (ipv6_find_hdr(skb, &ptr, NEXTHDR_FRAGMENT, NULL) < 0) |
| 56 | return 0; | 56 | return 0; |
| 57 | 57 | ||
| 58 | fh = skb_header_pointer(skb, ptr, sizeof(_frag), &_frag); | 58 | fh = skb_header_pointer(skb, ptr, sizeof(_frag), &_frag); |
| 59 | if (fh == NULL){ | 59 | if (fh == NULL) { |
| 60 | *hotdrop = 1; | 60 | *hotdrop = 1; |
| 61 | return 0; | 61 | return 0; |
| 62 | } | 62 | } |
| 63 | 63 | ||
| 64 | DEBUGP("INFO %04X ", fh->frag_off); | 64 | DEBUGP("INFO %04X ", fh->frag_off); |
| 65 | DEBUGP("OFFSET %04X ", ntohs(fh->frag_off) & ~0x7); | 65 | DEBUGP("OFFSET %04X ", ntohs(fh->frag_off) & ~0x7); |
| 66 | DEBUGP("RES %02X %04X", fh->reserved, ntohs(fh->frag_off) & 0x6); | 66 | DEBUGP("RES %02X %04X", fh->reserved, ntohs(fh->frag_off) & 0x6); |
| 67 | DEBUGP("MF %04X ", fh->frag_off & htons(IP6_MF)); | 67 | DEBUGP("MF %04X ", fh->frag_off & htons(IP6_MF)); |
| 68 | DEBUGP("ID %u %08X\n", ntohl(fh->identification), | 68 | DEBUGP("ID %u %08X\n", ntohl(fh->identification), |
| 69 | ntohl(fh->identification)); | 69 | ntohl(fh->identification)); |
| 70 | 70 | ||
| 71 | DEBUGP("IPv6 FRAG id %02X ", | 71 | DEBUGP("IPv6 FRAG id %02X ", |
| 72 | (id_match(fraginfo->ids[0], fraginfo->ids[1], | 72 | (id_match(fraginfo->ids[0], fraginfo->ids[1], |
| 73 | ntohl(fh->identification), | 73 | ntohl(fh->identification), |
| 74 | !!(fraginfo->invflags & IP6T_FRAG_INV_IDS)))); | 74 | !!(fraginfo->invflags & IP6T_FRAG_INV_IDS)))); |
| 75 | DEBUGP("res %02X %02X%04X %02X ", | 75 | DEBUGP("res %02X %02X%04X %02X ", |
| 76 | (fraginfo->flags & IP6T_FRAG_RES), fh->reserved, | 76 | (fraginfo->flags & IP6T_FRAG_RES), fh->reserved, |
| 77 | ntohs(fh->frag_off) & 0x6, | 77 | ntohs(fh->frag_off) & 0x6, |
| 78 | !((fraginfo->flags & IP6T_FRAG_RES) | 78 | !((fraginfo->flags & IP6T_FRAG_RES) |
| 79 | && (fh->reserved || (ntohs(fh->frag_off) & 0x06)))); | 79 | && (fh->reserved || (ntohs(fh->frag_off) & 0x06)))); |
| 80 | DEBUGP("first %02X %02X %02X ", | 80 | DEBUGP("first %02X %02X %02X ", |
| 81 | (fraginfo->flags & IP6T_FRAG_FST), | 81 | (fraginfo->flags & IP6T_FRAG_FST), |
| 82 | ntohs(fh->frag_off) & ~0x7, | 82 | ntohs(fh->frag_off) & ~0x7, |
| 83 | !((fraginfo->flags & IP6T_FRAG_FST) | 83 | !((fraginfo->flags & IP6T_FRAG_FST) |
| 84 | && (ntohs(fh->frag_off) & ~0x7))); | 84 | && (ntohs(fh->frag_off) & ~0x7))); |
| 85 | DEBUGP("mf %02X %02X %02X ", | 85 | DEBUGP("mf %02X %02X %02X ", |
| 86 | (fraginfo->flags & IP6T_FRAG_MF), | 86 | (fraginfo->flags & IP6T_FRAG_MF), |
| 87 | ntohs(fh->frag_off) & IP6_MF, | 87 | ntohs(fh->frag_off) & IP6_MF, |
| 88 | !((fraginfo->flags & IP6T_FRAG_MF) | 88 | !((fraginfo->flags & IP6T_FRAG_MF) |
| 89 | && !((ntohs(fh->frag_off) & IP6_MF)))); | 89 | && !((ntohs(fh->frag_off) & IP6_MF)))); |
| 90 | DEBUGP("last %02X %02X %02X\n", | 90 | DEBUGP("last %02X %02X %02X\n", |
| 91 | (fraginfo->flags & IP6T_FRAG_NMF), | 91 | (fraginfo->flags & IP6T_FRAG_NMF), |
| 92 | ntohs(fh->frag_off) & IP6_MF, | 92 | ntohs(fh->frag_off) & IP6_MF, |
| 93 | !((fraginfo->flags & IP6T_FRAG_NMF) | 93 | !((fraginfo->flags & IP6T_FRAG_NMF) |
| 94 | && (ntohs(fh->frag_off) & IP6_MF))); | 94 | && (ntohs(fh->frag_off) & IP6_MF))); |
| 95 | 95 | ||
| 96 | return (fh != NULL) | 96 | return (fh != NULL) |
| 97 | && | 97 | && |
| 98 | (id_match(fraginfo->ids[0], fraginfo->ids[1], | 98 | (id_match(fraginfo->ids[0], fraginfo->ids[1], |
| 99 | ntohl(fh->identification), | 99 | ntohl(fh->identification), |
| 100 | !!(fraginfo->invflags & IP6T_FRAG_INV_IDS))) | 100 | !!(fraginfo->invflags & IP6T_FRAG_INV_IDS))) |
| 101 | && | 101 | && |
| 102 | !((fraginfo->flags & IP6T_FRAG_RES) | 102 | !((fraginfo->flags & IP6T_FRAG_RES) |
| 103 | && (fh->reserved || (ntohs(fh->frag_off) & 0x6))) | 103 | && (fh->reserved || (ntohs(fh->frag_off) & 0x6))) |
| 104 | && | 104 | && |
| 105 | !((fraginfo->flags & IP6T_FRAG_FST) | 105 | !((fraginfo->flags & IP6T_FRAG_FST) |
| 106 | && (ntohs(fh->frag_off) & ~0x7)) | 106 | && (ntohs(fh->frag_off) & ~0x7)) |
| 107 | && | 107 | && |
| 108 | !((fraginfo->flags & IP6T_FRAG_MF) | 108 | !((fraginfo->flags & IP6T_FRAG_MF) |
| 109 | && !(ntohs(fh->frag_off) & IP6_MF)) | 109 | && !(ntohs(fh->frag_off) & IP6_MF)) |
| 110 | && | 110 | && |
| 111 | !((fraginfo->flags & IP6T_FRAG_NMF) | 111 | !((fraginfo->flags & IP6T_FRAG_NMF) |
| 112 | && (ntohs(fh->frag_off) & IP6_MF)); | 112 | && (ntohs(fh->frag_off) & IP6_MF)); |
| 113 | } | 113 | } |
| 114 | 114 | ||
| 115 | /* Called when user tries to insert an entry of this type. */ | 115 | /* Called when user tries to insert an entry of this type. */ |
| 116 | static int | 116 | static int |
| 117 | checkentry(const char *tablename, | 117 | checkentry(const char *tablename, |
| 118 | const void *ip, | 118 | const void *ip, |
| 119 | void *matchinfo, | 119 | void *matchinfo, |
| 120 | unsigned int matchinfosize, | 120 | unsigned int matchinfosize, |
| 121 | unsigned int hook_mask) | 121 | unsigned int hook_mask) |
| 122 | { | 122 | { |
| 123 | const struct ip6t_frag *fraginfo = matchinfo; | 123 | const struct ip6t_frag *fraginfo = matchinfo; |
| 124 | 124 | ||
| 125 | if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_frag))) { | 125 | if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_frag))) { |
| 126 | DEBUGP("ip6t_frag: matchsize %u != %u\n", | 126 | DEBUGP("ip6t_frag: matchsize %u != %u\n", |
| 127 | matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_frag))); | 127 | matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_frag))); |
| 128 | return 0; | 128 | return 0; |
| 129 | } | 129 | } |
| 130 | if (fraginfo->invflags & ~IP6T_FRAG_INV_MASK) { | 130 | if (fraginfo->invflags & ~IP6T_FRAG_INV_MASK) { |
| 131 | DEBUGP("ip6t_frag: unknown flags %X\n", | 131 | DEBUGP("ip6t_frag: unknown flags %X\n", fraginfo->invflags); |
| 132 | fraginfo->invflags); | 132 | return 0; |
| 133 | return 0; | 133 | } |
| 134 | } | 134 | |
| 135 | 135 | return 1; | |
| 136 | return 1; | ||
| 137 | } | 136 | } |
| 138 | 137 | ||
| 139 | static struct ip6t_match frag_match = { | 138 | static struct ip6t_match frag_match = { |
| @@ -145,12 +144,12 @@ static struct ip6t_match frag_match = { | |||
| 145 | 144 | ||
| 146 | static int __init init(void) | 145 | static int __init init(void) |
| 147 | { | 146 | { |
| 148 | return ip6t_register_match(&frag_match); | 147 | return ip6t_register_match(&frag_match); |
| 149 | } | 148 | } |
| 150 | 149 | ||
| 151 | static void __exit cleanup(void) | 150 | static void __exit cleanup(void) |
| 152 | { | 151 | { |
| 153 | ip6t_unregister_match(&frag_match); | 152 | ip6t_unregister_match(&frag_match); |
| 154 | } | 153 | } |
| 155 | 154 | ||
| 156 | module_init(init); | 155 | module_init(init); |
diff --git a/net/ipv6/netfilter/ip6t_hbh.c b/net/ipv6/netfilter/ip6t_hbh.c index ed8ded18bbd4..37a8474a7e0c 100644 --- a/net/ipv6/netfilter/ip6t_hbh.c +++ b/net/ipv6/netfilter/ip6t_hbh.c | |||
| @@ -36,19 +36,19 @@ MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>"); | |||
| 36 | #endif | 36 | #endif |
| 37 | 37 | ||
| 38 | /* | 38 | /* |
| 39 | * (Type & 0xC0) >> 6 | 39 | * (Type & 0xC0) >> 6 |
| 40 | * 0 -> ignorable | 40 | * 0 -> ignorable |
| 41 | * 1 -> must drop the packet | 41 | * 1 -> must drop the packet |
| 42 | * 2 -> send ICMP PARM PROB regardless and drop packet | 42 | * 2 -> send ICMP PARM PROB regardless and drop packet |
| 43 | * 3 -> Send ICMP if not a multicast address and drop packet | 43 | * 3 -> Send ICMP if not a multicast address and drop packet |
| 44 | * (Type & 0x20) >> 5 | 44 | * (Type & 0x20) >> 5 |
| 45 | * 0 -> invariant | 45 | * 0 -> invariant |
| 46 | * 1 -> can change the routing | 46 | * 1 -> can change the routing |
| 47 | * (Type & 0x1F) Type | 47 | * (Type & 0x1F) Type |
| 48 | * 0 -> Pad1 (only 1 byte!) | 48 | * 0 -> Pad1 (only 1 byte!) |
| 49 | * 1 -> PadN LENGTH info (total length = length + 2) | 49 | * 1 -> PadN LENGTH info (total length = length + 2) |
| 50 | * C0 | 2 -> JUMBO 4 x x x x ( xxxx > 64k ) | 50 | * C0 | 2 -> JUMBO 4 x x x x ( xxxx > 64k ) |
| 51 | * 5 -> RTALERT 2 x x | 51 | * 5 -> RTALERT 2 x x |
| 52 | */ | 52 | */ |
| 53 | 53 | ||
| 54 | static int | 54 | static int |
| @@ -60,16 +60,16 @@ match(const struct sk_buff *skb, | |||
| 60 | unsigned int protoff, | 60 | unsigned int protoff, |
| 61 | int *hotdrop) | 61 | int *hotdrop) |
| 62 | { | 62 | { |
| 63 | struct ipv6_opt_hdr _optsh, *oh; | 63 | struct ipv6_opt_hdr _optsh, *oh; |
| 64 | const struct ip6t_opts *optinfo = matchinfo; | 64 | const struct ip6t_opts *optinfo = matchinfo; |
| 65 | unsigned int temp; | 65 | unsigned int temp; |
| 66 | unsigned int ptr; | 66 | unsigned int ptr; |
| 67 | unsigned int hdrlen = 0; | 67 | unsigned int hdrlen = 0; |
| 68 | unsigned int ret = 0; | 68 | unsigned int ret = 0; |
| 69 | u8 _opttype, *tp = NULL; | 69 | u8 _opttype, *tp = NULL; |
| 70 | u8 _optlen, *lp = NULL; | 70 | u8 _optlen, *lp = NULL; |
| 71 | unsigned int optlen; | 71 | unsigned int optlen; |
| 72 | 72 | ||
| 73 | #if HOPBYHOP | 73 | #if HOPBYHOP |
| 74 | if (ipv6_find_hdr(skb, &ptr, NEXTHDR_HOP, NULL) < 0) | 74 | if (ipv6_find_hdr(skb, &ptr, NEXTHDR_HOP, NULL) < 0) |
| 75 | #else | 75 | #else |
| @@ -77,42 +77,41 @@ match(const struct sk_buff *skb, | |||
| 77 | #endif | 77 | #endif |
| 78 | return 0; | 78 | return 0; |
| 79 | 79 | ||
| 80 | oh = skb_header_pointer(skb, ptr, sizeof(_optsh), &_optsh); | 80 | oh = skb_header_pointer(skb, ptr, sizeof(_optsh), &_optsh); |
| 81 | if (oh == NULL){ | 81 | if (oh == NULL) { |
| 82 | *hotdrop = 1; | 82 | *hotdrop = 1; |
| 83 | return 0; | 83 | return 0; |
| 84 | } | 84 | } |
| 85 | 85 | ||
| 86 | hdrlen = ipv6_optlen(oh); | 86 | hdrlen = ipv6_optlen(oh); |
| 87 | if (skb->len - ptr < hdrlen){ | 87 | if (skb->len - ptr < hdrlen) { |
| 88 | /* Packet smaller than it's length field */ | 88 | /* Packet smaller than it's length field */ |
| 89 | return 0; | 89 | return 0; |
| 90 | } | 90 | } |
| 91 | 91 | ||
| 92 | DEBUGP("IPv6 OPTS LEN %u %u ", hdrlen, oh->hdrlen); | 92 | DEBUGP("IPv6 OPTS LEN %u %u ", hdrlen, oh->hdrlen); |
| 93 | 93 | ||
| 94 | DEBUGP("len %02X %04X %02X ", | 94 | DEBUGP("len %02X %04X %02X ", |
| 95 | optinfo->hdrlen, hdrlen, | 95 | optinfo->hdrlen, hdrlen, |
| 96 | (!(optinfo->flags & IP6T_OPTS_LEN) || | 96 | (!(optinfo->flags & IP6T_OPTS_LEN) || |
| 97 | ((optinfo->hdrlen == hdrlen) ^ | 97 | ((optinfo->hdrlen == hdrlen) ^ |
| 98 | !!(optinfo->invflags & IP6T_OPTS_INV_LEN)))); | 98 | !!(optinfo->invflags & IP6T_OPTS_INV_LEN)))); |
| 99 | 99 | ||
| 100 | ret = (oh != NULL) | 100 | ret = (oh != NULL) && |
| 101 | && | 101 | (!(optinfo->flags & IP6T_OPTS_LEN) || |
| 102 | (!(optinfo->flags & IP6T_OPTS_LEN) || | 102 | ((optinfo->hdrlen == hdrlen) ^ |
| 103 | ((optinfo->hdrlen == hdrlen) ^ | 103 | !!(optinfo->invflags & IP6T_OPTS_INV_LEN))); |
| 104 | !!(optinfo->invflags & IP6T_OPTS_INV_LEN))); | 104 | |
| 105 | 105 | ptr += 2; | |
| 106 | ptr += 2; | 106 | hdrlen -= 2; |
| 107 | hdrlen -= 2; | 107 | if (!(optinfo->flags & IP6T_OPTS_OPTS)) { |
| 108 | if ( !(optinfo->flags & IP6T_OPTS_OPTS) ){ | 108 | return ret; |
| 109 | return ret; | ||
| 110 | } else if (optinfo->flags & IP6T_OPTS_NSTRICT) { | 109 | } else if (optinfo->flags & IP6T_OPTS_NSTRICT) { |
| 111 | DEBUGP("Not strict - not implemented"); | 110 | DEBUGP("Not strict - not implemented"); |
| 112 | } else { | 111 | } else { |
| 113 | DEBUGP("Strict "); | 112 | DEBUGP("Strict "); |
| 114 | DEBUGP("#%d ",optinfo->optsnr); | 113 | DEBUGP("#%d ", optinfo->optsnr); |
| 115 | for(temp=0; temp<optinfo->optsnr; temp++){ | 114 | for (temp = 0; temp < optinfo->optsnr; temp++) { |
| 116 | /* type field exists ? */ | 115 | /* type field exists ? */ |
| 117 | if (hdrlen < 1) | 116 | if (hdrlen < 1) |
| 118 | break; | 117 | break; |
| @@ -122,10 +121,10 @@ match(const struct sk_buff *skb, | |||
| 122 | break; | 121 | break; |
| 123 | 122 | ||
| 124 | /* Type check */ | 123 | /* Type check */ |
| 125 | if (*tp != (optinfo->opts[temp] & 0xFF00)>>8){ | 124 | if (*tp != (optinfo->opts[temp] & 0xFF00) >> 8) { |
| 126 | DEBUGP("Tbad %02X %02X\n", | 125 | DEBUGP("Tbad %02X %02X\n", |
| 127 | *tp, | 126 | *tp, |
| 128 | (optinfo->opts[temp] & 0xFF00)>>8); | 127 | (optinfo->opts[temp] & 0xFF00) >> 8); |
| 129 | return 0; | 128 | return 0; |
| 130 | } else { | 129 | } else { |
| 131 | DEBUGP("Tok "); | 130 | DEBUGP("Tok "); |
| @@ -169,7 +168,8 @@ match(const struct sk_buff *skb, | |||
| 169 | } | 168 | } |
| 170 | if (temp == optinfo->optsnr) | 169 | if (temp == optinfo->optsnr) |
| 171 | return ret; | 170 | return ret; |
| 172 | else return 0; | 171 | else |
| 172 | return 0; | ||
| 173 | } | 173 | } |
| 174 | 174 | ||
| 175 | return 0; | 175 | return 0; |
| @@ -178,25 +178,24 @@ match(const struct sk_buff *skb, | |||
| 178 | /* Called when user tries to insert an entry of this type. */ | 178 | /* Called when user tries to insert an entry of this type. */ |
| 179 | static int | 179 | static int |
| 180 | checkentry(const char *tablename, | 180 | checkentry(const char *tablename, |
| 181 | const void *entry, | 181 | const void *entry, |
| 182 | void *matchinfo, | 182 | void *matchinfo, |
| 183 | unsigned int matchinfosize, | 183 | unsigned int matchinfosize, |
| 184 | unsigned int hook_mask) | 184 | unsigned int hook_mask) |
| 185 | { | 185 | { |
| 186 | const struct ip6t_opts *optsinfo = matchinfo; | 186 | const struct ip6t_opts *optsinfo = matchinfo; |
| 187 | 187 | ||
| 188 | if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_opts))) { | 188 | if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_opts))) { |
| 189 | DEBUGP("ip6t_opts: matchsize %u != %u\n", | 189 | DEBUGP("ip6t_opts: matchsize %u != %u\n", |
| 190 | matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_opts))); | 190 | matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_opts))); |
| 191 | return 0; | 191 | return 0; |
| 192 | } | 192 | } |
| 193 | if (optsinfo->invflags & ~IP6T_OPTS_INV_MASK) { | 193 | if (optsinfo->invflags & ~IP6T_OPTS_INV_MASK) { |
| 194 | DEBUGP("ip6t_opts: unknown flags %X\n", | 194 | DEBUGP("ip6t_opts: unknown flags %X\n", optsinfo->invflags); |
| 195 | optsinfo->invflags); | 195 | return 0; |
| 196 | return 0; | 196 | } |
| 197 | } | 197 | |
| 198 | 198 | return 1; | |
| 199 | return 1; | ||
| 200 | } | 199 | } |
| 201 | 200 | ||
| 202 | static struct ip6t_match opts_match = { | 201 | static struct ip6t_match opts_match = { |
| @@ -212,12 +211,12 @@ static struct ip6t_match opts_match = { | |||
| 212 | 211 | ||
| 213 | static int __init init(void) | 212 | static int __init init(void) |
| 214 | { | 213 | { |
| 215 | return ip6t_register_match(&opts_match); | 214 | return ip6t_register_match(&opts_match); |
| 216 | } | 215 | } |
| 217 | 216 | ||
| 218 | static void __exit cleanup(void) | 217 | static void __exit cleanup(void) |
| 219 | { | 218 | { |
| 220 | ip6t_unregister_match(&opts_match); | 219 | ip6t_unregister_match(&opts_match); |
| 221 | } | 220 | } |
| 222 | 221 | ||
| 223 | module_init(init); | 222 | module_init(init); |
diff --git a/net/ipv6/netfilter/ip6t_ipv6header.c b/net/ipv6/netfilter/ip6t_ipv6header.c index fda1ceaf5a29..83ad6b272f7e 100644 --- a/net/ipv6/netfilter/ip6t_ipv6header.c +++ b/net/ipv6/netfilter/ip6t_ipv6header.c | |||
| @@ -50,20 +50,20 @@ ipv6header_match(const struct sk_buff *skb, | |||
| 50 | len = skb->len - ptr; | 50 | len = skb->len - ptr; |
| 51 | temp = 0; | 51 | temp = 0; |
| 52 | 52 | ||
| 53 | while (ip6t_ext_hdr(nexthdr)) { | 53 | while (ip6t_ext_hdr(nexthdr)) { |
| 54 | struct ipv6_opt_hdr _hdr, *hp; | 54 | struct ipv6_opt_hdr _hdr, *hp; |
| 55 | int hdrlen; | 55 | int hdrlen; |
| 56 | 56 | ||
| 57 | /* Is there enough space for the next ext header? */ | 57 | /* Is there enough space for the next ext header? */ |
| 58 | if (len < (int)sizeof(struct ipv6_opt_hdr)) | 58 | if (len < (int)sizeof(struct ipv6_opt_hdr)) |
| 59 | return 0; | 59 | return 0; |
| 60 | /* No more exthdr -> evaluate */ | 60 | /* No more exthdr -> evaluate */ |
| 61 | if (nexthdr == NEXTHDR_NONE) { | 61 | if (nexthdr == NEXTHDR_NONE) { |
| 62 | temp |= MASK_NONE; | 62 | temp |= MASK_NONE; |
| 63 | break; | 63 | break; |
| 64 | } | 64 | } |
| 65 | /* ESP -> evaluate */ | 65 | /* ESP -> evaluate */ |
| 66 | if (nexthdr == NEXTHDR_ESP) { | 66 | if (nexthdr == NEXTHDR_ESP) { |
| 67 | temp |= MASK_ESP; | 67 | temp |= MASK_ESP; |
| 68 | break; | 68 | break; |
| 69 | } | 69 | } |
| @@ -72,43 +72,43 @@ ipv6header_match(const struct sk_buff *skb, | |||
| 72 | BUG_ON(hp == NULL); | 72 | BUG_ON(hp == NULL); |
| 73 | 73 | ||
| 74 | /* Calculate the header length */ | 74 | /* Calculate the header length */ |
| 75 | if (nexthdr == NEXTHDR_FRAGMENT) { | 75 | if (nexthdr == NEXTHDR_FRAGMENT) { |
| 76 | hdrlen = 8; | 76 | hdrlen = 8; |
| 77 | } else if (nexthdr == NEXTHDR_AUTH) | 77 | } else if (nexthdr == NEXTHDR_AUTH) |
| 78 | hdrlen = (hp->hdrlen+2)<<2; | 78 | hdrlen = (hp->hdrlen + 2) << 2; |
| 79 | else | 79 | else |
| 80 | hdrlen = ipv6_optlen(hp); | 80 | hdrlen = ipv6_optlen(hp); |
| 81 | 81 | ||
| 82 | /* set the flag */ | 82 | /* set the flag */ |
| 83 | switch (nexthdr){ | 83 | switch (nexthdr) { |
| 84 | case NEXTHDR_HOP: | 84 | case NEXTHDR_HOP: |
| 85 | temp |= MASK_HOPOPTS; | 85 | temp |= MASK_HOPOPTS; |
| 86 | break; | 86 | break; |
| 87 | case NEXTHDR_ROUTING: | 87 | case NEXTHDR_ROUTING: |
| 88 | temp |= MASK_ROUTING; | 88 | temp |= MASK_ROUTING; |
| 89 | break; | 89 | break; |
| 90 | case NEXTHDR_FRAGMENT: | 90 | case NEXTHDR_FRAGMENT: |
| 91 | temp |= MASK_FRAGMENT; | 91 | temp |= MASK_FRAGMENT; |
| 92 | break; | 92 | break; |
| 93 | case NEXTHDR_AUTH: | 93 | case NEXTHDR_AUTH: |
| 94 | temp |= MASK_AH; | 94 | temp |= MASK_AH; |
| 95 | break; | 95 | break; |
| 96 | case NEXTHDR_DEST: | 96 | case NEXTHDR_DEST: |
| 97 | temp |= MASK_DSTOPTS; | 97 | temp |= MASK_DSTOPTS; |
| 98 | break; | 98 | break; |
| 99 | default: | 99 | default: |
| 100 | return 0; | 100 | return 0; |
| 101 | break; | 101 | break; |
| 102 | } | 102 | } |
| 103 | 103 | ||
| 104 | nexthdr = hp->nexthdr; | 104 | nexthdr = hp->nexthdr; |
| 105 | len -= hdrlen; | 105 | len -= hdrlen; |
| 106 | ptr += hdrlen; | 106 | ptr += hdrlen; |
| 107 | if (ptr > skb->len) | 107 | if (ptr > skb->len) |
| 108 | break; | 108 | break; |
| 109 | } | 109 | } |
| 110 | 110 | ||
| 111 | if ( (nexthdr != NEXTHDR_NONE ) && (nexthdr != NEXTHDR_ESP) ) | 111 | if ((nexthdr != NEXTHDR_NONE) && (nexthdr != NEXTHDR_ESP)) |
| 112 | temp |= MASK_PROTO; | 112 | temp |= MASK_PROTO; |
| 113 | 113 | ||
| 114 | if (info->modeflag) | 114 | if (info->modeflag) |
| @@ -137,8 +137,8 @@ ipv6header_checkentry(const char *tablename, | |||
| 137 | return 0; | 137 | return 0; |
| 138 | 138 | ||
| 139 | /* invflags is 0 or 0xff in hard mode */ | 139 | /* invflags is 0 or 0xff in hard mode */ |
| 140 | if ((!info->modeflag) && info->invflags != 0x00 | 140 | if ((!info->modeflag) && info->invflags != 0x00 && |
| 141 | && info->invflags != 0xFF) | 141 | info->invflags != 0xFF) |
| 142 | return 0; | 142 | return 0; |
| 143 | 143 | ||
| 144 | return 1; | 144 | return 1; |
| @@ -152,7 +152,7 @@ static struct ip6t_match ip6t_ipv6header_match = { | |||
| 152 | .me = THIS_MODULE, | 152 | .me = THIS_MODULE, |
| 153 | }; | 153 | }; |
| 154 | 154 | ||
| 155 | static int __init ipv6header_init(void) | 155 | static int __init ipv6header_init(void) |
| 156 | { | 156 | { |
| 157 | return ip6t_register_match(&ip6t_ipv6header_match); | 157 | return ip6t_register_match(&ip6t_ipv6header_match); |
| 158 | } | 158 | } |
| @@ -164,4 +164,3 @@ static void __exit ipv6header_exit(void) | |||
| 164 | 164 | ||
| 165 | module_init(ipv6header_init); | 165 | module_init(ipv6header_init); |
| 166 | module_exit(ipv6header_exit); | 166 | module_exit(ipv6header_exit); |
| 167 | |||
diff --git a/net/ipv6/netfilter/ip6t_owner.c b/net/ipv6/netfilter/ip6t_owner.c index 5409b375b512..8c8a4c7ec934 100644 --- a/net/ipv6/netfilter/ip6t_owner.c +++ b/net/ipv6/netfilter/ip6t_owner.c | |||
| @@ -36,14 +36,14 @@ match(const struct sk_buff *skb, | |||
| 36 | if (!skb->sk || !skb->sk->sk_socket || !skb->sk->sk_socket->file) | 36 | if (!skb->sk || !skb->sk->sk_socket || !skb->sk->sk_socket->file) |
| 37 | return 0; | 37 | return 0; |
| 38 | 38 | ||
| 39 | if(info->match & IP6T_OWNER_UID) { | 39 | if (info->match & IP6T_OWNER_UID) { |
| 40 | if((skb->sk->sk_socket->file->f_uid != info->uid) ^ | 40 | if ((skb->sk->sk_socket->file->f_uid != info->uid) ^ |
| 41 | !!(info->invert & IP6T_OWNER_UID)) | 41 | !!(info->invert & IP6T_OWNER_UID)) |
| 42 | return 0; | 42 | return 0; |
| 43 | } | 43 | } |
| 44 | 44 | ||
| 45 | if(info->match & IP6T_OWNER_GID) { | 45 | if (info->match & IP6T_OWNER_GID) { |
| 46 | if((skb->sk->sk_socket->file->f_gid != info->gid) ^ | 46 | if ((skb->sk->sk_socket->file->f_gid != info->gid) ^ |
| 47 | !!(info->invert & IP6T_OWNER_GID)) | 47 | !!(info->invert & IP6T_OWNER_GID)) |
| 48 | return 0; | 48 | return 0; |
| 49 | } | 49 | } |
| @@ -53,23 +53,23 @@ match(const struct sk_buff *skb, | |||
| 53 | 53 | ||
| 54 | static int | 54 | static int |
| 55 | checkentry(const char *tablename, | 55 | checkentry(const char *tablename, |
| 56 | const void *ip, | 56 | const void *ip, |
| 57 | void *matchinfo, | 57 | void *matchinfo, |
| 58 | unsigned int matchsize, | 58 | unsigned int matchsize, |
| 59 | unsigned int hook_mask) | 59 | unsigned int hook_mask) |
| 60 | { | 60 | { |
| 61 | const struct ip6t_owner_info *info = matchinfo; | 61 | const struct ip6t_owner_info *info = matchinfo; |
| 62 | 62 | ||
| 63 | if (hook_mask | 63 | if (hook_mask |
| 64 | & ~((1 << NF_IP6_LOCAL_OUT) | (1 << NF_IP6_POST_ROUTING))) { | 64 | & ~((1 << NF_IP6_LOCAL_OUT) | (1 << NF_IP6_POST_ROUTING))) { |
| 65 | printk("ip6t_owner: only valid for LOCAL_OUT or POST_ROUTING.\n"); | 65 | printk("ip6t_owner: only valid for LOCAL_OUT or POST_ROUTING.\n"); |
| 66 | return 0; | 66 | return 0; |
| 67 | } | 67 | } |
| 68 | 68 | ||
| 69 | if (matchsize != IP6T_ALIGN(sizeof(struct ip6t_owner_info))) | 69 | if (matchsize != IP6T_ALIGN(sizeof(struct ip6t_owner_info))) |
| 70 | return 0; | 70 | return 0; |
| 71 | 71 | ||
| 72 | if (info->match & (IP6T_OWNER_PID|IP6T_OWNER_SID)) { | 72 | if (info->match & (IP6T_OWNER_PID | IP6T_OWNER_SID)) { |
| 73 | printk("ipt_owner: pid and sid matching " | 73 | printk("ipt_owner: pid and sid matching " |
| 74 | "not supported anymore\n"); | 74 | "not supported anymore\n"); |
| 75 | return 0; | 75 | return 0; |
diff --git a/net/ipv6/netfilter/ip6t_policy.c b/net/ipv6/netfilter/ip6t_policy.c index 13fedad48c1d..afe1cc4c18a5 100644 --- a/net/ipv6/netfilter/ip6t_policy.c +++ b/net/ipv6/netfilter/ip6t_policy.c | |||
| @@ -118,7 +118,7 @@ static int match(const struct sk_buff *skb, | |||
| 118 | return ret; | 118 | return ret; |
| 119 | } | 119 | } |
| 120 | 120 | ||
| 121 | static int checkentry(const char *tablename, const struct ip6t_ip6 *ip, | 121 | static int checkentry(const char *tablename, const void *ip_void, |
| 122 | void *matchinfo, unsigned int matchsize, | 122 | void *matchinfo, unsigned int matchsize, |
| 123 | unsigned int hook_mask) | 123 | unsigned int hook_mask) |
| 124 | { | 124 | { |
diff --git a/net/ipv6/netfilter/ip6t_rt.c b/net/ipv6/netfilter/ip6t_rt.c index 8465b4375855..8f82476dc89e 100644 --- a/net/ipv6/netfilter/ip6t_rt.c +++ b/net/ipv6/netfilter/ip6t_rt.c | |||
| @@ -33,12 +33,12 @@ MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>"); | |||
| 33 | static inline int | 33 | static inline int |
| 34 | segsleft_match(u_int32_t min, u_int32_t max, u_int32_t id, int invert) | 34 | segsleft_match(u_int32_t min, u_int32_t max, u_int32_t id, int invert) |
| 35 | { | 35 | { |
| 36 | int r=0; | 36 | int r = 0; |
| 37 | DEBUGP("rt segsleft_match:%c 0x%x <= 0x%x <= 0x%x",invert? '!':' ', | 37 | DEBUGP("rt segsleft_match:%c 0x%x <= 0x%x <= 0x%x", |
| 38 | min,id,max); | 38 | invert ? '!' : ' ', min, id, max); |
| 39 | r=(id >= min && id <= max) ^ invert; | 39 | r = (id >= min && id <= max) ^ invert; |
| 40 | DEBUGP(" result %s\n",r? "PASS" : "FAILED"); | 40 | DEBUGP(" result %s\n", r ? "PASS" : "FAILED"); |
| 41 | return r; | 41 | return r; |
| 42 | } | 42 | } |
| 43 | 43 | ||
| 44 | static int | 44 | static int |
| @@ -50,87 +50,93 @@ match(const struct sk_buff *skb, | |||
| 50 | unsigned int protoff, | 50 | unsigned int protoff, |
| 51 | int *hotdrop) | 51 | int *hotdrop) |
| 52 | { | 52 | { |
| 53 | struct ipv6_rt_hdr _route, *rh; | 53 | struct ipv6_rt_hdr _route, *rh; |
| 54 | const struct ip6t_rt *rtinfo = matchinfo; | 54 | const struct ip6t_rt *rtinfo = matchinfo; |
| 55 | unsigned int temp; | 55 | unsigned int temp; |
| 56 | unsigned int ptr; | 56 | unsigned int ptr; |
| 57 | unsigned int hdrlen = 0; | 57 | unsigned int hdrlen = 0; |
| 58 | unsigned int ret = 0; | 58 | unsigned int ret = 0; |
| 59 | struct in6_addr *ap, _addr; | 59 | struct in6_addr *ap, _addr; |
| 60 | 60 | ||
| 61 | if (ipv6_find_hdr(skb, &ptr, NEXTHDR_ROUTING, NULL) < 0) | 61 | if (ipv6_find_hdr(skb, &ptr, NEXTHDR_ROUTING, NULL) < 0) |
| 62 | return 0; | 62 | return 0; |
| 63 | 63 | ||
| 64 | rh = skb_header_pointer(skb, ptr, sizeof(_route), &_route); | 64 | rh = skb_header_pointer(skb, ptr, sizeof(_route), &_route); |
| 65 | if (rh == NULL){ | 65 | if (rh == NULL) { |
| 66 | *hotdrop = 1; | 66 | *hotdrop = 1; |
| 67 | return 0; | 67 | return 0; |
| 68 | } | 68 | } |
| 69 | 69 | ||
| 70 | hdrlen = ipv6_optlen(rh); | 70 | hdrlen = ipv6_optlen(rh); |
| 71 | if (skb->len - ptr < hdrlen){ | 71 | if (skb->len - ptr < hdrlen) { |
| 72 | /* Pcket smaller than its length field */ | 72 | /* Pcket smaller than its length field */ |
| 73 | return 0; | 73 | return 0; |
| 74 | } | 74 | } |
| 75 | 75 | ||
| 76 | DEBUGP("IPv6 RT LEN %u %u ", hdrlen, rh->hdrlen); | 76 | DEBUGP("IPv6 RT LEN %u %u ", hdrlen, rh->hdrlen); |
| 77 | DEBUGP("TYPE %04X ", rh->type); | 77 | DEBUGP("TYPE %04X ", rh->type); |
| 78 | DEBUGP("SGS_LEFT %u %02X\n", rh->segments_left, rh->segments_left); | 78 | DEBUGP("SGS_LEFT %u %02X\n", rh->segments_left, rh->segments_left); |
| 79 | 79 | ||
| 80 | DEBUGP("IPv6 RT segsleft %02X ", | 80 | DEBUGP("IPv6 RT segsleft %02X ", |
| 81 | (segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1], | 81 | (segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1], |
| 82 | rh->segments_left, | 82 | rh->segments_left, |
| 83 | !!(rtinfo->invflags & IP6T_RT_INV_SGS)))); | 83 | !!(rtinfo->invflags & IP6T_RT_INV_SGS)))); |
| 84 | DEBUGP("type %02X %02X %02X ", | 84 | DEBUGP("type %02X %02X %02X ", |
| 85 | rtinfo->rt_type, rh->type, | 85 | rtinfo->rt_type, rh->type, |
| 86 | (!(rtinfo->flags & IP6T_RT_TYP) || | 86 | (!(rtinfo->flags & IP6T_RT_TYP) || |
| 87 | ((rtinfo->rt_type == rh->type) ^ | 87 | ((rtinfo->rt_type == rh->type) ^ |
| 88 | !!(rtinfo->invflags & IP6T_RT_INV_TYP)))); | 88 | !!(rtinfo->invflags & IP6T_RT_INV_TYP)))); |
| 89 | DEBUGP("len %02X %04X %02X ", | 89 | DEBUGP("len %02X %04X %02X ", |
| 90 | rtinfo->hdrlen, hdrlen, | 90 | rtinfo->hdrlen, hdrlen, |
| 91 | (!(rtinfo->flags & IP6T_RT_LEN) || | 91 | (!(rtinfo->flags & IP6T_RT_LEN) || |
| 92 | ((rtinfo->hdrlen == hdrlen) ^ | 92 | ((rtinfo->hdrlen == hdrlen) ^ |
| 93 | !!(rtinfo->invflags & IP6T_RT_INV_LEN)))); | 93 | !!(rtinfo->invflags & IP6T_RT_INV_LEN)))); |
| 94 | DEBUGP("res %02X %02X %02X ", | 94 | DEBUGP("res %02X %02X %02X ", |
| 95 | (rtinfo->flags & IP6T_RT_RES), ((struct rt0_hdr *)rh)->reserved, | 95 | (rtinfo->flags & IP6T_RT_RES), |
| 96 | !((rtinfo->flags & IP6T_RT_RES) && (((struct rt0_hdr *)rh)->reserved))); | 96 | ((struct rt0_hdr *)rh)->reserved, |
| 97 | 97 | !((rtinfo->flags & IP6T_RT_RES) && | |
| 98 | ret = (rh != NULL) | 98 | (((struct rt0_hdr *)rh)->reserved))); |
| 99 | && | 99 | |
| 100 | (segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1], | 100 | ret = (rh != NULL) |
| 101 | rh->segments_left, | 101 | && |
| 102 | !!(rtinfo->invflags & IP6T_RT_INV_SGS))) | 102 | (segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1], |
| 103 | && | 103 | rh->segments_left, |
| 104 | (!(rtinfo->flags & IP6T_RT_LEN) || | 104 | !!(rtinfo->invflags & IP6T_RT_INV_SGS))) |
| 105 | ((rtinfo->hdrlen == hdrlen) ^ | 105 | && |
| 106 | !!(rtinfo->invflags & IP6T_RT_INV_LEN))) | 106 | (!(rtinfo->flags & IP6T_RT_LEN) || |
| 107 | && | 107 | ((rtinfo->hdrlen == hdrlen) ^ |
| 108 | (!(rtinfo->flags & IP6T_RT_TYP) || | 108 | !!(rtinfo->invflags & IP6T_RT_INV_LEN))) |
| 109 | ((rtinfo->rt_type == rh->type) ^ | 109 | && |
| 110 | !!(rtinfo->invflags & IP6T_RT_INV_TYP))); | 110 | (!(rtinfo->flags & IP6T_RT_TYP) || |
| 111 | ((rtinfo->rt_type == rh->type) ^ | ||
| 112 | !!(rtinfo->invflags & IP6T_RT_INV_TYP))); | ||
| 111 | 113 | ||
| 112 | if (ret && (rtinfo->flags & IP6T_RT_RES)) { | 114 | if (ret && (rtinfo->flags & IP6T_RT_RES)) { |
| 113 | u_int32_t *rp, _reserved; | 115 | u_int32_t *rp, _reserved; |
| 114 | rp = skb_header_pointer(skb, | 116 | rp = skb_header_pointer(skb, |
| 115 | ptr + offsetof(struct rt0_hdr, reserved), | 117 | ptr + offsetof(struct rt0_hdr, |
| 116 | sizeof(_reserved), &_reserved); | 118 | reserved), |
| 119 | sizeof(_reserved), | ||
| 120 | &_reserved); | ||
| 117 | 121 | ||
| 118 | ret = (*rp == 0); | 122 | ret = (*rp == 0); |
| 119 | } | 123 | } |
| 120 | 124 | ||
| 121 | DEBUGP("#%d ",rtinfo->addrnr); | 125 | DEBUGP("#%d ", rtinfo->addrnr); |
| 122 | if ( !(rtinfo->flags & IP6T_RT_FST) ){ | 126 | if (!(rtinfo->flags & IP6T_RT_FST)) { |
| 123 | return ret; | 127 | return ret; |
| 124 | } else if (rtinfo->flags & IP6T_RT_FST_NSTRICT) { | 128 | } else if (rtinfo->flags & IP6T_RT_FST_NSTRICT) { |
| 125 | DEBUGP("Not strict "); | 129 | DEBUGP("Not strict "); |
| 126 | if ( rtinfo->addrnr > (unsigned int)((hdrlen-8)/16) ){ | 130 | if (rtinfo->addrnr > (unsigned int)((hdrlen - 8) / 16)) { |
| 127 | DEBUGP("There isn't enough space\n"); | 131 | DEBUGP("There isn't enough space\n"); |
| 128 | return 0; | 132 | return 0; |
| 129 | } else { | 133 | } else { |
| 130 | unsigned int i = 0; | 134 | unsigned int i = 0; |
| 131 | 135 | ||
| 132 | DEBUGP("#%d ",rtinfo->addrnr); | 136 | DEBUGP("#%d ", rtinfo->addrnr); |
| 133 | for(temp=0; temp<(unsigned int)((hdrlen-8)/16); temp++){ | 137 | for (temp = 0; |
| 138 | temp < (unsigned int)((hdrlen - 8) / 16); | ||
| 139 | temp++) { | ||
| 134 | ap = skb_header_pointer(skb, | 140 | ap = skb_header_pointer(skb, |
| 135 | ptr | 141 | ptr |
| 136 | + sizeof(struct rt0_hdr) | 142 | + sizeof(struct rt0_hdr) |
| @@ -141,24 +147,26 @@ match(const struct sk_buff *skb, | |||
| 141 | BUG_ON(ap == NULL); | 147 | BUG_ON(ap == NULL); |
| 142 | 148 | ||
| 143 | if (ipv6_addr_equal(ap, &rtinfo->addrs[i])) { | 149 | if (ipv6_addr_equal(ap, &rtinfo->addrs[i])) { |
| 144 | DEBUGP("i=%d temp=%d;\n",i,temp); | 150 | DEBUGP("i=%d temp=%d;\n", i, temp); |
| 145 | i++; | 151 | i++; |
| 146 | } | 152 | } |
| 147 | if (i==rtinfo->addrnr) break; | 153 | if (i == rtinfo->addrnr) |
| 154 | break; | ||
| 148 | } | 155 | } |
| 149 | DEBUGP("i=%d #%d\n", i, rtinfo->addrnr); | 156 | DEBUGP("i=%d #%d\n", i, rtinfo->addrnr); |
| 150 | if (i == rtinfo->addrnr) | 157 | if (i == rtinfo->addrnr) |
| 151 | return ret; | 158 | return ret; |
| 152 | else return 0; | 159 | else |
| 160 | return 0; | ||
| 153 | } | 161 | } |
| 154 | } else { | 162 | } else { |
| 155 | DEBUGP("Strict "); | 163 | DEBUGP("Strict "); |
| 156 | if ( rtinfo->addrnr > (unsigned int)((hdrlen-8)/16) ){ | 164 | if (rtinfo->addrnr > (unsigned int)((hdrlen - 8) / 16)) { |
| 157 | DEBUGP("There isn't enough space\n"); | 165 | DEBUGP("There isn't enough space\n"); |
| 158 | return 0; | 166 | return 0; |
| 159 | } else { | 167 | } else { |
| 160 | DEBUGP("#%d ",rtinfo->addrnr); | 168 | DEBUGP("#%d ", rtinfo->addrnr); |
| 161 | for(temp=0; temp<rtinfo->addrnr; temp++){ | 169 | for (temp = 0; temp < rtinfo->addrnr; temp++) { |
| 162 | ap = skb_header_pointer(skb, | 170 | ap = skb_header_pointer(skb, |
| 163 | ptr | 171 | ptr |
| 164 | + sizeof(struct rt0_hdr) | 172 | + sizeof(struct rt0_hdr) |
| @@ -171,9 +179,11 @@ match(const struct sk_buff *skb, | |||
| 171 | break; | 179 | break; |
| 172 | } | 180 | } |
| 173 | DEBUGP("temp=%d #%d\n", temp, rtinfo->addrnr); | 181 | DEBUGP("temp=%d #%d\n", temp, rtinfo->addrnr); |
| 174 | if ((temp == rtinfo->addrnr) && (temp == (unsigned int)((hdrlen-8)/16))) | 182 | if ((temp == rtinfo->addrnr) && |
| 183 | (temp == (unsigned int)((hdrlen - 8) / 16))) | ||
| 175 | return ret; | 184 | return ret; |
| 176 | else return 0; | 185 | else |
| 186 | return 0; | ||
| 177 | } | 187 | } |
| 178 | } | 188 | } |
| 179 | 189 | ||
| @@ -183,32 +193,31 @@ match(const struct sk_buff *skb, | |||
| 183 | /* Called when user tries to insert an entry of this type. */ | 193 | /* Called when user tries to insert an entry of this type. */ |
| 184 | static int | 194 | static int |
| 185 | checkentry(const char *tablename, | 195 | checkentry(const char *tablename, |
| 186 | const void *entry, | 196 | const void *entry, |
| 187 | void *matchinfo, | 197 | void *matchinfo, |
| 188 | unsigned int matchinfosize, | 198 | unsigned int matchinfosize, |
| 189 | unsigned int hook_mask) | 199 | unsigned int hook_mask) |
| 190 | { | 200 | { |
| 191 | const struct ip6t_rt *rtinfo = matchinfo; | 201 | const struct ip6t_rt *rtinfo = matchinfo; |
| 192 | 202 | ||
| 193 | if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_rt))) { | 203 | if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_rt))) { |
| 194 | DEBUGP("ip6t_rt: matchsize %u != %u\n", | 204 | DEBUGP("ip6t_rt: matchsize %u != %u\n", |
| 195 | matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_rt))); | 205 | matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_rt))); |
| 196 | return 0; | 206 | return 0; |
| 197 | } | 207 | } |
| 198 | if (rtinfo->invflags & ~IP6T_RT_INV_MASK) { | 208 | if (rtinfo->invflags & ~IP6T_RT_INV_MASK) { |
| 199 | DEBUGP("ip6t_rt: unknown flags %X\n", | 209 | DEBUGP("ip6t_rt: unknown flags %X\n", rtinfo->invflags); |
| 200 | rtinfo->invflags); | 210 | return 0; |
| 201 | return 0; | 211 | } |
| 202 | } | 212 | if ((rtinfo->flags & (IP6T_RT_RES | IP6T_RT_FST_MASK)) && |
| 203 | if ( (rtinfo->flags & (IP6T_RT_RES|IP6T_RT_FST_MASK)) && | 213 | (!(rtinfo->flags & IP6T_RT_TYP) || |
| 204 | (!(rtinfo->flags & IP6T_RT_TYP) || | 214 | (rtinfo->rt_type != 0) || |
| 205 | (rtinfo->rt_type != 0) || | 215 | (rtinfo->invflags & IP6T_RT_INV_TYP))) { |
| 206 | (rtinfo->invflags & IP6T_RT_INV_TYP)) ) { | 216 | DEBUGP("`--rt-type 0' required before `--rt-0-*'"); |
| 207 | DEBUGP("`--rt-type 0' required before `--rt-0-*'"); | 217 | return 0; |
| 208 | return 0; | 218 | } |
| 209 | } | 219 | |
| 210 | 220 | return 1; | |
| 211 | return 1; | ||
| 212 | } | 221 | } |
| 213 | 222 | ||
| 214 | static struct ip6t_match rt_match = { | 223 | static struct ip6t_match rt_match = { |
| @@ -220,12 +229,12 @@ static struct ip6t_match rt_match = { | |||
| 220 | 229 | ||
| 221 | static int __init init(void) | 230 | static int __init init(void) |
| 222 | { | 231 | { |
| 223 | return ip6t_register_match(&rt_match); | 232 | return ip6t_register_match(&rt_match); |
| 224 | } | 233 | } |
| 225 | 234 | ||
| 226 | static void __exit cleanup(void) | 235 | static void __exit cleanup(void) |
| 227 | { | 236 | { |
| 228 | ip6t_unregister_match(&rt_match); | 237 | ip6t_unregister_match(&rt_match); |
| 229 | } | 238 | } |
| 230 | 239 | ||
| 231 | module_init(init); | 240 | module_init(init); |
diff --git a/net/rxrpc/krxtimod.c b/net/rxrpc/krxtimod.c index 3ac81cdd1211..3e7466900bd4 100644 --- a/net/rxrpc/krxtimod.c +++ b/net/rxrpc/krxtimod.c | |||
| @@ -81,7 +81,7 @@ static int krxtimod(void *arg) | |||
| 81 | 81 | ||
| 82 | for (;;) { | 82 | for (;;) { |
| 83 | unsigned long jif; | 83 | unsigned long jif; |
| 84 | signed long timeout; | 84 | long timeout; |
| 85 | 85 | ||
| 86 | /* deal with the server being asked to die */ | 86 | /* deal with the server being asked to die */ |
| 87 | if (krxtimod_die) { | 87 | if (krxtimod_die) { |
diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c index 3b5ecd8e2401..29975d99d864 100644 --- a/net/rxrpc/proc.c +++ b/net/rxrpc/proc.c | |||
| @@ -361,7 +361,7 @@ static void rxrpc_proc_peers_stop(struct seq_file *p, void *v) | |||
| 361 | static int rxrpc_proc_peers_show(struct seq_file *m, void *v) | 361 | static int rxrpc_proc_peers_show(struct seq_file *m, void *v) |
| 362 | { | 362 | { |
| 363 | struct rxrpc_peer *peer = list_entry(v, struct rxrpc_peer, proc_link); | 363 | struct rxrpc_peer *peer = list_entry(v, struct rxrpc_peer, proc_link); |
| 364 | signed long timeout; | 364 | long timeout; |
| 365 | 365 | ||
| 366 | /* display header on line 1 */ | 366 | /* display header on line 1 */ |
| 367 | if (v == SEQ_START_TOKEN) { | 367 | if (v == SEQ_START_TOKEN) { |
| @@ -373,8 +373,8 @@ static int rxrpc_proc_peers_show(struct seq_file *m, void *v) | |||
| 373 | /* display one peer per line on subsequent lines */ | 373 | /* display one peer per line on subsequent lines */ |
| 374 | timeout = 0; | 374 | timeout = 0; |
| 375 | if (!list_empty(&peer->timeout.link)) | 375 | if (!list_empty(&peer->timeout.link)) |
| 376 | timeout = (signed long) peer->timeout.timo_jif - | 376 | timeout = (long) peer->timeout.timo_jif - |
| 377 | (signed long) jiffies; | 377 | (long) jiffies; |
| 378 | 378 | ||
| 379 | seq_printf(m, "%5hu %08x %5d %5d %8ld %5Zu %7lu\n", | 379 | seq_printf(m, "%5hu %08x %5d %5d %8ld %5Zu %7lu\n", |
| 380 | peer->trans->port, | 380 | peer->trans->port, |
| @@ -468,7 +468,7 @@ static void rxrpc_proc_conns_stop(struct seq_file *p, void *v) | |||
| 468 | static int rxrpc_proc_conns_show(struct seq_file *m, void *v) | 468 | static int rxrpc_proc_conns_show(struct seq_file *m, void *v) |
| 469 | { | 469 | { |
| 470 | struct rxrpc_connection *conn; | 470 | struct rxrpc_connection *conn; |
| 471 | signed long timeout; | 471 | long timeout; |
| 472 | 472 | ||
| 473 | conn = list_entry(v, struct rxrpc_connection, proc_link); | 473 | conn = list_entry(v, struct rxrpc_connection, proc_link); |
| 474 | 474 | ||
| @@ -484,8 +484,8 @@ static int rxrpc_proc_conns_show(struct seq_file *m, void *v) | |||
| 484 | /* display one conn per line on subsequent lines */ | 484 | /* display one conn per line on subsequent lines */ |
| 485 | timeout = 0; | 485 | timeout = 0; |
| 486 | if (!list_empty(&conn->timeout.link)) | 486 | if (!list_empty(&conn->timeout.link)) |
| 487 | timeout = (signed long) conn->timeout.timo_jif - | 487 | timeout = (long) conn->timeout.timo_jif - |
| 488 | (signed long) jiffies; | 488 | (long) jiffies; |
| 489 | 489 | ||
| 490 | seq_printf(m, | 490 | seq_printf(m, |
| 491 | "%5hu %08x %5hu %04hx %08x %-3.3s %08x %08x %5Zu %8ld\n", | 491 | "%5hu %08x %5hu %04hx %08x %-3.3s %08x %08x %5Zu %8ld\n", |
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index 5b3a3e48ed92..1641db33a994 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c | |||
| @@ -228,14 +228,13 @@ static int prio_tune(struct Qdisc *sch, struct rtattr *opt) | |||
| 228 | } | 228 | } |
| 229 | sch_tree_unlock(sch); | 229 | sch_tree_unlock(sch); |
| 230 | 230 | ||
| 231 | for (i=0; i<=TC_PRIO_MAX; i++) { | 231 | for (i=0; i<q->bands; i++) { |
| 232 | int band = q->prio2band[i]; | 232 | if (q->queues[i] == &noop_qdisc) { |
| 233 | if (q->queues[band] == &noop_qdisc) { | ||
| 234 | struct Qdisc *child; | 233 | struct Qdisc *child; |
| 235 | child = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops); | 234 | child = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops); |
| 236 | if (child) { | 235 | if (child) { |
| 237 | sch_tree_lock(sch); | 236 | sch_tree_lock(sch); |
| 238 | child = xchg(&q->queues[band], child); | 237 | child = xchg(&q->queues[i], child); |
| 239 | 238 | ||
| 240 | if (child != &noop_qdisc) | 239 | if (child != &noop_qdisc) |
| 241 | qdisc_destroy(child); | 240 | qdisc_destroy(child); |
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 8734bb7280e3..86d8da0cbd02 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c | |||
| @@ -144,6 +144,8 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb) | |||
| 144 | if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && | 144 | if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && |
| 145 | (iph->protocol == IPPROTO_TCP || | 145 | (iph->protocol == IPPROTO_TCP || |
| 146 | iph->protocol == IPPROTO_UDP || | 146 | iph->protocol == IPPROTO_UDP || |
| 147 | iph->protocol == IPPROTO_SCTP || | ||
| 148 | iph->protocol == IPPROTO_DCCP || | ||
| 147 | iph->protocol == IPPROTO_ESP)) | 149 | iph->protocol == IPPROTO_ESP)) |
| 148 | h2 ^= *(((u32*)iph) + iph->ihl); | 150 | h2 ^= *(((u32*)iph) + iph->ihl); |
| 149 | break; | 151 | break; |
| @@ -155,6 +157,8 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb) | |||
| 155 | h2 = iph->saddr.s6_addr32[3]^iph->nexthdr; | 157 | h2 = iph->saddr.s6_addr32[3]^iph->nexthdr; |
| 156 | if (iph->nexthdr == IPPROTO_TCP || | 158 | if (iph->nexthdr == IPPROTO_TCP || |
| 157 | iph->nexthdr == IPPROTO_UDP || | 159 | iph->nexthdr == IPPROTO_UDP || |
| 160 | iph->nexthdr == IPPROTO_SCTP || | ||
| 161 | iph->nexthdr == IPPROTO_DCCP || | ||
| 158 | iph->nexthdr == IPPROTO_ESP) | 162 | iph->nexthdr == IPPROTO_ESP) |
| 159 | h2 ^= *(u32*)&iph[1]; | 163 | h2 ^= *(u32*)&iph[1]; |
| 160 | break; | 164 | break; |
diff --git a/sound/sparc/cs4231.c b/sound/sparc/cs4231.c index e9086e95a31f..fd6543998788 100644 --- a/sound/sparc/cs4231.c +++ b/sound/sparc/cs4231.c | |||
| @@ -69,13 +69,14 @@ struct sbus_dma_info { | |||
| 69 | }; | 69 | }; |
| 70 | #endif | 70 | #endif |
| 71 | 71 | ||
| 72 | struct snd_cs4231; | ||
| 72 | struct cs4231_dma_control { | 73 | struct cs4231_dma_control { |
| 73 | void (*prepare)(struct cs4231_dma_control *dma_cont, int dir); | 74 | void (*prepare)(struct cs4231_dma_control *dma_cont, int dir); |
| 74 | void (*enable)(struct cs4231_dma_control *dma_cont, int on); | 75 | void (*enable)(struct cs4231_dma_control *dma_cont, int on); |
| 75 | int (*request)(struct cs4231_dma_control *dma_cont, dma_addr_t bus_addr, size_t len); | 76 | int (*request)(struct cs4231_dma_control *dma_cont, dma_addr_t bus_addr, size_t len); |
| 76 | unsigned int (*address)(struct cs4231_dma_control *dma_cont); | 77 | unsigned int (*address)(struct cs4231_dma_control *dma_cont); |
| 77 | void (*reset)(struct snd_cs4231 *chip); | 78 | void (*reset)(struct snd_cs4231 *chip); |
| 78 | void (*preallocate)(struct snd_cs4231 *chip, struct snd_snd_pcm *pcm); | 79 | void (*preallocate)(struct snd_cs4231 *chip, struct snd_pcm *pcm); |
| 79 | #ifdef EBUS_SUPPORT | 80 | #ifdef EBUS_SUPPORT |
| 80 | struct ebus_dma_info ebus_info; | 81 | struct ebus_dma_info ebus_info; |
| 81 | #endif | 82 | #endif |
