aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJeff Garzik <jgarzik@pobox.com>2006-02-20 05:59:59 -0500
committerJeff Garzik <jgarzik@pobox.com>2006-02-20 05:59:59 -0500
commitc5580a7ecb859c6821dd761c95fa150ec7695ae1 (patch)
tree18ca034a53c22e12912e55f5858d1b4a235cedd7
parentb04a92e1601eb6df3a3b6599e7fb7ee021eef2cb (diff)
parentd33a73c81241e3d9ab8da2d0558429bdd5b4ef9a (diff)
Merge branch 'forcedeth'
-rw-r--r--Documentation/cpu-hotplug.txt16
-rw-r--r--Makefile2
-rw-r--r--arch/s390/Kconfig4
-rw-r--r--arch/s390/kernel/compat_linux.c4
-rw-r--r--arch/s390/kernel/compat_wrapper.S6
-rw-r--r--arch/s390/kernel/process.c4
-rw-r--r--arch/s390/kernel/setup.c2
-rw-r--r--arch/s390/kernel/smp.c69
-rw-r--r--arch/s390/kernel/syscalls.S2
-rw-r--r--arch/sh/Kconfig2
-rw-r--r--arch/x86_64/kernel/mpparse.c4
-rw-r--r--drivers/acpi/resources/rscalc.c6
-rw-r--r--drivers/char/tpm/tpm_infineon.c48
-rw-r--r--drivers/macintosh/windfarm_smu_sat.c1
-rw-r--r--drivers/net/forcedeth.c593
-rw-r--r--drivers/s390/cio/device.c2
-rw-r--r--drivers/s390/cio/device_pgid.c2
-rw-r--r--drivers/s390/cio/device_status.c1
-rw-r--r--drivers/scsi/libata-core.c7
-rw-r--r--drivers/scsi/sata_mv.c1
-rw-r--r--drivers/scsi/sata_vsc.c30
-rw-r--r--fs/compat.c6
-rw-r--r--fs/ext2/xattr.c6
-rw-r--r--fs/fuse/dev.c6
-rw-r--r--fs/fuse/file.c11
-rw-r--r--fs/select.c6
-rw-r--r--include/asm-powerpc/pgalloc.h2
-rw-r--r--include/asm-s390/smp.h2
-rw-r--r--kernel/power/swsusp.c4
-rw-r--r--kernel/sched.c13
-rw-r--r--mm/mempolicy.c2
-rw-r--r--mm/page_alloc.c22
32 files changed, 753 insertions, 133 deletions
diff --git a/Documentation/cpu-hotplug.txt b/Documentation/cpu-hotplug.txt
index e05278087ffa..e71bc6cbbc5e 100644
--- a/Documentation/cpu-hotplug.txt
+++ b/Documentation/cpu-hotplug.txt
@@ -11,6 +11,8 @@
11 Joel Schopp <jschopp@austin.ibm.com> 11 Joel Schopp <jschopp@austin.ibm.com>
12 ia64/x86_64: 12 ia64/x86_64:
13 Ashok Raj <ashok.raj@intel.com> 13 Ashok Raj <ashok.raj@intel.com>
14 s390:
15 Heiko Carstens <heiko.carstens@de.ibm.com>
14 16
15Authors: Ashok Raj <ashok.raj@intel.com> 17Authors: Ashok Raj <ashok.raj@intel.com>
16Lots of feedback: Nathan Lynch <nathanl@austin.ibm.com>, 18Lots of feedback: Nathan Lynch <nathanl@austin.ibm.com>,
@@ -44,11 +46,9 @@ maxcpus=n Restrict boot time cpus to n. Say if you have 4 cpus, using
44 maxcpus=2 will only boot 2. You can choose to bring the 46 maxcpus=2 will only boot 2. You can choose to bring the
45 other cpus later online, read FAQ's for more info. 47 other cpus later online, read FAQ's for more info.
46 48
47additional_cpus*=n Use this to limit hotpluggable cpus. This option sets 49additional_cpus=n [x86_64, s390 only] use this to limit hotpluggable cpus.
48 cpu_possible_map = cpu_present_map + additional_cpus 50 This option sets
49 51 cpu_possible_map = cpu_present_map + additional_cpus
50(*) Option valid only for following architectures
51- x86_64, ia64
52 52
53ia64 and x86_64 use the number of disabled local apics in ACPI tables MADT 53ia64 and x86_64 use the number of disabled local apics in ACPI tables MADT
54to determine the number of potentially hot-pluggable cpus. The implementation 54to determine the number of potentially hot-pluggable cpus. The implementation
@@ -58,6 +58,12 @@ mark such hot-pluggable cpus as disabled entries, one could use this
58parameter "additional_cpus=x" to represent those cpus in the cpu_possible_map. 58parameter "additional_cpus=x" to represent those cpus in the cpu_possible_map.
59 59
60 60
61possible_cpus=n [s390 only] use this to set hotpluggable cpus.
62 This option sets possible_cpus bits in
63 cpu_possible_map. Thus keeping the numbers of bits set
64 constant even if the machine gets rebooted.
65 This option overrides additional_cpus.
66
61CPU maps and such 67CPU maps and such
62----------------- 68-----------------
63[More on cpumaps and primitive to manipulate, please check 69[More on cpumaps and primitive to manipulate, please check
diff --git a/Makefile b/Makefile
index 48d569d46ae1..77a448c8e776 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 16 3SUBLEVEL = 16
4EXTRAVERSION =-rc3 4EXTRAVERSION =-rc4
5NAME=Sliding Snow Leopard 5NAME=Sliding Snow Leopard
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index b66602ad7b33..b7ca5bf9acfc 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -80,6 +80,10 @@ config HOTPLUG_CPU
80 can be controlled through /sys/devices/system/cpu/cpu#. 80 can be controlled through /sys/devices/system/cpu/cpu#.
81 Say N if you want to disable CPU hotplug. 81 Say N if you want to disable CPU hotplug.
82 82
83config DEFAULT_MIGRATION_COST
84 int
85 default "1000000"
86
83config MATHEMU 87config MATHEMU
84 bool "IEEE FPU emulation" 88 bool "IEEE FPU emulation"
85 depends on MARCH_G5 89 depends on MARCH_G5
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index 2d021626c1a6..cc058dc3bc8b 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -905,8 +905,8 @@ asmlinkage long sys32_fstat64(unsigned long fd, struct stat64_emu31 __user * sta
905 return ret; 905 return ret;
906} 906}
907 907
908asmlinkage long sys32_fstatat(unsigned int dfd, char __user *filename, 908asmlinkage long sys32_fstatat64(unsigned int dfd, char __user *filename,
909 struct stat64_emu31 __user* statbuf, int flag) 909 struct stat64_emu31 __user* statbuf, int flag)
910{ 910{
911 struct kstat stat; 911 struct kstat stat;
912 int error = -EINVAL; 912 int error = -EINVAL;
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index dd2d6c3e8df8..615964cca15f 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -1523,13 +1523,13 @@ compat_sys_futimesat_wrapper:
1523 llgtr %r4,%r4 # struct timeval * 1523 llgtr %r4,%r4 # struct timeval *
1524 jg compat_sys_futimesat 1524 jg compat_sys_futimesat
1525 1525
1526 .globl sys32_fstatat_wrapper 1526 .globl sys32_fstatat64_wrapper
1527sys32_fstatat_wrapper: 1527sys32_fstatat64_wrapper:
1528 llgfr %r2,%r2 # unsigned int 1528 llgfr %r2,%r2 # unsigned int
1529 llgtr %r3,%r3 # char * 1529 llgtr %r3,%r3 # char *
1530 llgtr %r4,%r4 # struct stat64 * 1530 llgtr %r4,%r4 # struct stat64 *
1531 lgfr %r5,%r5 # int 1531 lgfr %r5,%r5 # int
1532 jg sys32_fstatat 1532 jg sys32_fstatat64
1533 1533
1534 .globl sys_unlinkat_wrapper 1534 .globl sys_unlinkat_wrapper
1535sys_unlinkat_wrapper: 1535sys_unlinkat_wrapper:
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 008c74526fd3..da6fbae8df91 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -128,8 +128,10 @@ void default_idle(void)
128 __ctl_set_bit(8, 15); 128 __ctl_set_bit(8, 15);
129 129
130#ifdef CONFIG_HOTPLUG_CPU 130#ifdef CONFIG_HOTPLUG_CPU
131 if (cpu_is_offline(cpu)) 131 if (cpu_is_offline(cpu)) {
132 preempt_enable_no_resched();
132 cpu_die(); 133 cpu_die();
134 }
133#endif 135#endif
134 136
135 local_mcck_disable(); 137 local_mcck_disable();
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index de8784267473..24f62f16c0e5 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -600,6 +600,7 @@ setup_arch(char **cmdline_p)
600 init_mm.brk = (unsigned long) &_end; 600 init_mm.brk = (unsigned long) &_end;
601 601
602 parse_cmdline_early(cmdline_p); 602 parse_cmdline_early(cmdline_p);
603 parse_early_param();
603 604
604 setup_memory(); 605 setup_memory();
605 setup_resources(); 606 setup_resources();
@@ -607,6 +608,7 @@ setup_arch(char **cmdline_p)
607 608
608 cpu_init(); 609 cpu_init();
609 __cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr; 610 __cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr;
611 smp_setup_cpu_possible_map();
610 612
611 /* 613 /*
612 * Create kernel page tables and switch to virtual addressing. 614 * Create kernel page tables and switch to virtual addressing.
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 0d1ad5dbe2b1..7dbe00c76c6b 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -1,8 +1,7 @@
1/* 1/*
2 * arch/s390/kernel/smp.c 2 * arch/s390/kernel/smp.c
3 * 3 *
4 * S390 version 4 * Copyright (C) IBM Corp. 1999,2006
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 5 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
7 * Martin Schwidefsky (schwidefsky@de.ibm.com) 6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 * Heiko Carstens (heiko.carstens@de.ibm.com) 7 * Heiko Carstens (heiko.carstens@de.ibm.com)
@@ -41,8 +40,6 @@
41#include <asm/cpcmd.h> 40#include <asm/cpcmd.h>
42#include <asm/tlbflush.h> 41#include <asm/tlbflush.h>
43 42
44/* prototypes */
45
46extern volatile int __cpu_logical_map[]; 43extern volatile int __cpu_logical_map[];
47 44
48/* 45/*
@@ -51,13 +48,11 @@ extern volatile int __cpu_logical_map[];
51 48
52struct _lowcore *lowcore_ptr[NR_CPUS]; 49struct _lowcore *lowcore_ptr[NR_CPUS];
53 50
54cpumask_t cpu_online_map; 51cpumask_t cpu_online_map = CPU_MASK_NONE;
55cpumask_t cpu_possible_map = CPU_MASK_ALL; 52cpumask_t cpu_possible_map = CPU_MASK_NONE;
56 53
57static struct task_struct *current_set[NR_CPUS]; 54static struct task_struct *current_set[NR_CPUS];
58 55
59EXPORT_SYMBOL(cpu_online_map);
60
61/* 56/*
62 * Reboot, halt and power_off routines for SMP. 57 * Reboot, halt and power_off routines for SMP.
63 */ 58 */
@@ -490,10 +485,10 @@ void smp_ctl_clear_bit(int cr, int bit) {
490 * Lets check how many CPUs we have. 485 * Lets check how many CPUs we have.
491 */ 486 */
492 487
493void 488static unsigned int
494__init smp_check_cpus(unsigned int max_cpus) 489__init smp_count_cpus(void)
495{ 490{
496 int cpu, num_cpus; 491 unsigned int cpu, num_cpus;
497 __u16 boot_cpu_addr; 492 __u16 boot_cpu_addr;
498 493
499 /* 494 /*
@@ -503,19 +498,20 @@ __init smp_check_cpus(unsigned int max_cpus)
503 boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr; 498 boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr;
504 current_thread_info()->cpu = 0; 499 current_thread_info()->cpu = 0;
505 num_cpus = 1; 500 num_cpus = 1;
506 for (cpu = 0; cpu <= 65535 && num_cpus < max_cpus; cpu++) { 501 for (cpu = 0; cpu <= 65535; cpu++) {
507 if ((__u16) cpu == boot_cpu_addr) 502 if ((__u16) cpu == boot_cpu_addr)
508 continue; 503 continue;
509 __cpu_logical_map[num_cpus] = (__u16) cpu; 504 __cpu_logical_map[1] = (__u16) cpu;
510 if (signal_processor(num_cpus, sigp_sense) == 505 if (signal_processor(1, sigp_sense) ==
511 sigp_not_operational) 506 sigp_not_operational)
512 continue; 507 continue;
513 cpu_set(num_cpus, cpu_present_map);
514 num_cpus++; 508 num_cpus++;
515 } 509 }
516 510
517 printk("Detected %d CPU's\n",(int) num_cpus); 511 printk("Detected %d CPU's\n",(int) num_cpus);
518 printk("Boot cpu address %2X\n", boot_cpu_addr); 512 printk("Boot cpu address %2X\n", boot_cpu_addr);
513
514 return num_cpus;
519} 515}
520 516
521/* 517/*
@@ -676,6 +672,44 @@ __cpu_up(unsigned int cpu)
676 return 0; 672 return 0;
677} 673}
678 674
675static unsigned int __initdata additional_cpus;
676static unsigned int __initdata possible_cpus;
677
678void __init smp_setup_cpu_possible_map(void)
679{
680 unsigned int phy_cpus, pos_cpus, cpu;
681
682 phy_cpus = smp_count_cpus();
683 pos_cpus = min(phy_cpus + additional_cpus, (unsigned int) NR_CPUS);
684
685 if (possible_cpus)
686 pos_cpus = min(possible_cpus, (unsigned int) NR_CPUS);
687
688 for (cpu = 0; cpu < pos_cpus; cpu++)
689 cpu_set(cpu, cpu_possible_map);
690
691 phy_cpus = min(phy_cpus, pos_cpus);
692
693 for (cpu = 0; cpu < phy_cpus; cpu++)
694 cpu_set(cpu, cpu_present_map);
695}
696
697#ifdef CONFIG_HOTPLUG_CPU
698
699static int __init setup_additional_cpus(char *s)
700{
701 additional_cpus = simple_strtoul(s, NULL, 0);
702 return 0;
703}
704early_param("additional_cpus", setup_additional_cpus);
705
706static int __init setup_possible_cpus(char *s)
707{
708 possible_cpus = simple_strtoul(s, NULL, 0);
709 return 0;
710}
711early_param("possible_cpus", setup_possible_cpus);
712
679int 713int
680__cpu_disable(void) 714__cpu_disable(void)
681{ 715{
@@ -744,6 +778,8 @@ cpu_die(void)
744 for(;;); 778 for(;;);
745} 779}
746 780
781#endif /* CONFIG_HOTPLUG_CPU */
782
747/* 783/*
748 * Cycle through the processors and setup structures. 784 * Cycle through the processors and setup structures.
749 */ 785 */
@@ -757,7 +793,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
757 /* request the 0x1201 emergency signal external interrupt */ 793 /* request the 0x1201 emergency signal external interrupt */
758 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) 794 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
759 panic("Couldn't request external interrupt 0x1201"); 795 panic("Couldn't request external interrupt 0x1201");
760 smp_check_cpus(max_cpus);
761 memset(lowcore_ptr,0,sizeof(lowcore_ptr)); 796 memset(lowcore_ptr,0,sizeof(lowcore_ptr));
762 /* 797 /*
763 * Initialize prefix pages and stacks for all possible cpus 798 * Initialize prefix pages and stacks for all possible cpus
@@ -806,7 +841,6 @@ void __devinit smp_prepare_boot_cpu(void)
806 BUG_ON(smp_processor_id() != 0); 841 BUG_ON(smp_processor_id() != 0);
807 842
808 cpu_set(0, cpu_online_map); 843 cpu_set(0, cpu_online_map);
809 cpu_set(0, cpu_present_map);
810 S390_lowcore.percpu_offset = __per_cpu_offset[0]; 844 S390_lowcore.percpu_offset = __per_cpu_offset[0];
811 current_set[0] = current; 845 current_set[0] = current;
812} 846}
@@ -845,6 +879,7 @@ static int __init topology_init(void)
845 879
846subsys_initcall(topology_init); 880subsys_initcall(topology_init);
847 881
882EXPORT_SYMBOL(cpu_online_map);
848EXPORT_SYMBOL(cpu_possible_map); 883EXPORT_SYMBOL(cpu_possible_map);
849EXPORT_SYMBOL(lowcore_ptr); 884EXPORT_SYMBOL(lowcore_ptr);
850EXPORT_SYMBOL(smp_ctl_set_bit); 885EXPORT_SYMBOL(smp_ctl_set_bit);
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 84921fe8d266..7c88d85c3597 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -301,7 +301,7 @@ SYSCALL(sys_mkdirat,sys_mkdirat,sys_mkdirat_wrapper)
301SYSCALL(sys_mknodat,sys_mknodat,sys_mknodat_wrapper) /* 290 */ 301SYSCALL(sys_mknodat,sys_mknodat,sys_mknodat_wrapper) /* 290 */
302SYSCALL(sys_fchownat,sys_fchownat,sys_fchownat_wrapper) 302SYSCALL(sys_fchownat,sys_fchownat,sys_fchownat_wrapper)
303SYSCALL(sys_futimesat,sys_futimesat,compat_sys_futimesat_wrapper) 303SYSCALL(sys_futimesat,sys_futimesat,compat_sys_futimesat_wrapper)
304SYSCALL(sys_fstatat64,sys_newfstatat,sys32_fstatat_wrapper) 304SYSCALL(sys_fstatat64,sys_newfstatat,sys32_fstatat64_wrapper)
305SYSCALL(sys_unlinkat,sys_unlinkat,sys_unlinkat_wrapper) 305SYSCALL(sys_unlinkat,sys_unlinkat,sys_unlinkat_wrapper)
306SYSCALL(sys_renameat,sys_renameat,sys_renameat_wrapper) /* 295 */ 306SYSCALL(sys_renameat,sys_renameat,sys_renameat_wrapper) /* 295 */
307SYSCALL(sys_linkat,sys_linkat,sys_linkat_wrapper) 307SYSCALL(sys_linkat,sys_linkat,sys_linkat_wrapper)
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 504d56f8ca7f..e73621d03a28 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -446,7 +446,7 @@ endmenu
446 446
447config ISA_DMA_API 447config ISA_DMA_API
448 bool 448 bool
449 depends on MPC1211 449 depends on SH_MPC1211
450 default y 450 default y
451 451
452menu "Kernel features" 452menu "Kernel features"
diff --git a/arch/x86_64/kernel/mpparse.c b/arch/x86_64/kernel/mpparse.c
index dc49bfb6db0a..9013a90b5c2e 100644
--- a/arch/x86_64/kernel/mpparse.c
+++ b/arch/x86_64/kernel/mpparse.c
@@ -288,9 +288,9 @@ static int __init smp_read_mpc(struct mp_config_table *mpc)
288 288
289 memcpy(str,mpc->mpc_productid,12); 289 memcpy(str,mpc->mpc_productid,12);
290 str[12]=0; 290 str[12]=0;
291 printk(KERN_INFO "Product ID: %s ",str); 291 printk("Product ID: %s ",str);
292 292
293 printk(KERN_INFO "APIC at: 0x%X\n",mpc->mpc_lapic); 293 printk("APIC at: 0x%X\n",mpc->mpc_lapic);
294 294
295 /* save the local APIC address, it might be non-default */ 295 /* save the local APIC address, it might be non-default */
296 if (!acpi_lapic) 296 if (!acpi_lapic)
diff --git a/drivers/acpi/resources/rscalc.c b/drivers/acpi/resources/rscalc.c
index 7d6481d9fbec..4038dbfa63a0 100644
--- a/drivers/acpi/resources/rscalc.c
+++ b/drivers/acpi/resources/rscalc.c
@@ -391,8 +391,7 @@ acpi_rs_get_list_length(u8 * aml_buffer,
391 * Ensure a 32-bit boundary for the structure 391 * Ensure a 32-bit boundary for the structure
392 */ 392 */
393 extra_struct_bytes = 393 extra_struct_bytes =
394 ACPI_ROUND_UP_to_32_bITS(resource_length) - 394 ACPI_ROUND_UP_to_32_bITS(resource_length);
395 resource_length;
396 break; 395 break;
397 396
398 case ACPI_RESOURCE_NAME_END_TAG: 397 case ACPI_RESOURCE_NAME_END_TAG:
@@ -408,8 +407,7 @@ acpi_rs_get_list_length(u8 * aml_buffer,
408 * Add vendor data and ensure a 32-bit boundary for the structure 407 * Add vendor data and ensure a 32-bit boundary for the structure
409 */ 408 */
410 extra_struct_bytes = 409 extra_struct_bytes =
411 ACPI_ROUND_UP_to_32_bITS(resource_length) - 410 ACPI_ROUND_UP_to_32_bITS(resource_length);
412 resource_length;
413 break; 411 break;
414 412
415 case ACPI_RESOURCE_NAME_ADDRESS32: 413 case ACPI_RESOURCE_NAME_ADDRESS32:
diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c
index ec7590951af5..24095f6ee6da 100644
--- a/drivers/char/tpm/tpm_infineon.c
+++ b/drivers/char/tpm/tpm_infineon.c
@@ -33,6 +33,7 @@
33static int TPM_INF_DATA; 33static int TPM_INF_DATA;
34static int TPM_INF_ADDR; 34static int TPM_INF_ADDR;
35static int TPM_INF_BASE; 35static int TPM_INF_BASE;
36static int TPM_INF_ADDR_LEN;
36static int TPM_INF_PORT_LEN; 37static int TPM_INF_PORT_LEN;
37 38
38/* TPM header definitions */ 39/* TPM header definitions */
@@ -195,6 +196,7 @@ static int tpm_inf_recv(struct tpm_chip *chip, u8 * buf, size_t count)
195 int i; 196 int i;
196 int ret; 197 int ret;
197 u32 size = 0; 198 u32 size = 0;
199 number_of_wtx = 0;
198 200
199recv_begin: 201recv_begin:
200 /* start receiving header */ 202 /* start receiving header */
@@ -378,24 +380,35 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
378 if (pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) && 380 if (pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) &&
379 !(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) { 381 !(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) {
380 TPM_INF_ADDR = pnp_port_start(dev, 0); 382 TPM_INF_ADDR = pnp_port_start(dev, 0);
383 TPM_INF_ADDR_LEN = pnp_port_len(dev, 0);
381 TPM_INF_DATA = (TPM_INF_ADDR + 1); 384 TPM_INF_DATA = (TPM_INF_ADDR + 1);
382 TPM_INF_BASE = pnp_port_start(dev, 1); 385 TPM_INF_BASE = pnp_port_start(dev, 1);
383 TPM_INF_PORT_LEN = pnp_port_len(dev, 1); 386 TPM_INF_PORT_LEN = pnp_port_len(dev, 1);
384 if (!TPM_INF_PORT_LEN) 387 if ((TPM_INF_PORT_LEN < 4) || (TPM_INF_ADDR_LEN < 2)) {
385 return -EINVAL; 388 rc = -EINVAL;
389 goto err_last;
390 }
386 dev_info(&dev->dev, "Found %s with ID %s\n", 391 dev_info(&dev->dev, "Found %s with ID %s\n",
387 dev->name, dev_id->id); 392 dev->name, dev_id->id);
388 if (!((TPM_INF_BASE >> 8) & 0xff)) 393 if (!((TPM_INF_BASE >> 8) & 0xff)) {
389 return -EINVAL; 394 rc = -EINVAL;
395 goto err_last;
396 }
390 /* publish my base address and request region */ 397 /* publish my base address and request region */
391 tpm_inf.base = TPM_INF_BASE; 398 tpm_inf.base = TPM_INF_BASE;
392 if (request_region 399 if (request_region
393 (tpm_inf.base, TPM_INF_PORT_LEN, "tpm_infineon0") == NULL) { 400 (tpm_inf.base, TPM_INF_PORT_LEN, "tpm_infineon0") == NULL) {
394 release_region(tpm_inf.base, TPM_INF_PORT_LEN); 401 rc = -EINVAL;
395 return -EINVAL; 402 goto err_last;
403 }
404 if (request_region(TPM_INF_ADDR, TPM_INF_ADDR_LEN,
405 "tpm_infineon0") == NULL) {
406 rc = -EINVAL;
407 goto err_last;
396 } 408 }
397 } else { 409 } else {
398 return -EINVAL; 410 rc = -EINVAL;
411 goto err_last;
399 } 412 }
400 413
401 /* query chip for its vendor, its version number a.s.o. */ 414 /* query chip for its vendor, its version number a.s.o. */
@@ -443,8 +456,8 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
443 dev_err(&dev->dev, 456 dev_err(&dev->dev,
444 "Could not set IO-ports to 0x%lx\n", 457 "Could not set IO-ports to 0x%lx\n",
445 tpm_inf.base); 458 tpm_inf.base);
446 release_region(tpm_inf.base, TPM_INF_PORT_LEN); 459 rc = -EIO;
447 return -EIO; 460 goto err_release_region;
448 } 461 }
449 462
450 /* activate register */ 463 /* activate register */
@@ -471,14 +484,21 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
471 484
472 rc = tpm_register_hardware(&dev->dev, &tpm_inf); 485 rc = tpm_register_hardware(&dev->dev, &tpm_inf);
473 if (rc < 0) { 486 if (rc < 0) {
474 release_region(tpm_inf.base, TPM_INF_PORT_LEN); 487 rc = -ENODEV;
475 return -ENODEV; 488 goto err_release_region;
476 } 489 }
477 return 0; 490 return 0;
478 } else { 491 } else {
479 dev_info(&dev->dev, "No Infineon TPM found!\n"); 492 rc = -ENODEV;
480 return -ENODEV; 493 goto err_release_region;
481 } 494 }
495
496err_release_region:
497 release_region(tpm_inf.base, TPM_INF_PORT_LEN);
498 release_region(TPM_INF_ADDR, TPM_INF_ADDR_LEN);
499
500err_last:
501 return rc;
482} 502}
483 503
484static __devexit void tpm_inf_pnp_remove(struct pnp_dev *dev) 504static __devexit void tpm_inf_pnp_remove(struct pnp_dev *dev)
@@ -518,5 +538,5 @@ module_exit(cleanup_inf);
518 538
519MODULE_AUTHOR("Marcel Selhorst <selhorst@crypto.rub.de>"); 539MODULE_AUTHOR("Marcel Selhorst <selhorst@crypto.rub.de>");
520MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2"); 540MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2");
521MODULE_VERSION("1.6"); 541MODULE_VERSION("1.7");
522MODULE_LICENSE("GPL"); 542MODULE_LICENSE("GPL");
diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c
index 3a32c59494f2..24e51d5e97fc 100644
--- a/drivers/macintosh/windfarm_smu_sat.c
+++ b/drivers/macintosh/windfarm_smu_sat.c
@@ -151,6 +151,7 @@ struct smu_sdbp_header *smu_sat_get_sdb_partition(unsigned int sat_id, int id,
151 kfree(buf); 151 kfree(buf);
152 return NULL; 152 return NULL;
153} 153}
154EXPORT_SYMBOL_GPL(smu_sat_get_sdb_partition);
154 155
155/* refresh the cache */ 156/* refresh the cache */
156static int wf_sat_read_cache(struct wf_sat *sat) 157static int wf_sat_read_cache(struct wf_sat *sat)
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 3682ec61e8a8..e7fc28b07e5a 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -102,6 +102,9 @@
102 * 0.47: 26 Oct 2005: Add phyaddr 0 in phy scan. 102 * 0.47: 26 Oct 2005: Add phyaddr 0 in phy scan.
103 * 0.48: 24 Dec 2005: Disable TSO, bugfix for pci_map_single 103 * 0.48: 24 Dec 2005: Disable TSO, bugfix for pci_map_single
104 * 0.49: 10 Dec 2005: Fix tso for large buffers. 104 * 0.49: 10 Dec 2005: Fix tso for large buffers.
105 * 0.50: 20 Jan 2006: Add 8021pq tagging support.
106 * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings.
107 * 0.52: 20 Jan 2006: Add MSI/MSIX support.
105 * 108 *
106 * Known bugs: 109 * Known bugs:
107 * We suspect that on some hardware no TX done interrupts are generated. 110 * We suspect that on some hardware no TX done interrupts are generated.
@@ -113,7 +116,7 @@
113 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few 116 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
114 * superfluous timer interrupts from the nic. 117 * superfluous timer interrupts from the nic.
115 */ 118 */
116#define FORCEDETH_VERSION "0.49" 119#define FORCEDETH_VERSION "0.52"
117#define DRV_NAME "forcedeth" 120#define DRV_NAME "forcedeth"
118 121
119#include <linux/module.h> 122#include <linux/module.h>
@@ -153,6 +156,9 @@
153#define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */ 156#define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */
154#define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */ 157#define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */
155#define DEV_HAS_CHECKSUM 0x0010 /* device supports tx and rx checksum offloads */ 158#define DEV_HAS_CHECKSUM 0x0010 /* device supports tx and rx checksum offloads */
159#define DEV_HAS_VLAN 0x0020 /* device supports vlan tagging and striping */
160#define DEV_HAS_MSI 0x0040 /* device supports MSI */
161#define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */
156 162
157enum { 163enum {
158 NvRegIrqStatus = 0x000, 164 NvRegIrqStatus = 0x000,
@@ -166,14 +172,17 @@ enum {
166#define NVREG_IRQ_TX_OK 0x0010 172#define NVREG_IRQ_TX_OK 0x0010
167#define NVREG_IRQ_TIMER 0x0020 173#define NVREG_IRQ_TIMER 0x0020
168#define NVREG_IRQ_LINK 0x0040 174#define NVREG_IRQ_LINK 0x0040
169#define NVREG_IRQ_TX_ERROR 0x0080 175#define NVREG_IRQ_RX_FORCED 0x0080
170#define NVREG_IRQ_TX1 0x0100 176#define NVREG_IRQ_TX_FORCED 0x0100
171#define NVREG_IRQMASK_THROUGHPUT 0x00df 177#define NVREG_IRQMASK_THROUGHPUT 0x00df
172#define NVREG_IRQMASK_CPU 0x0040 178#define NVREG_IRQMASK_CPU 0x0040
179#define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
180#define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
181#define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK)
173 182
174#define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \ 183#define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \
175 NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_TX_ERROR| \ 184 NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RX_FORCED| \
176 NVREG_IRQ_TX1)) 185 NVREG_IRQ_TX_FORCED))
177 186
178 NvRegUnknownSetupReg6 = 0x008, 187 NvRegUnknownSetupReg6 = 0x008,
179#define NVREG_UNKSETUP6_VAL 3 188#define NVREG_UNKSETUP6_VAL 3
@@ -185,6 +194,10 @@ enum {
185 NvRegPollingInterval = 0x00c, 194 NvRegPollingInterval = 0x00c,
186#define NVREG_POLL_DEFAULT_THROUGHPUT 970 195#define NVREG_POLL_DEFAULT_THROUGHPUT 970
187#define NVREG_POLL_DEFAULT_CPU 13 196#define NVREG_POLL_DEFAULT_CPU 13
197 NvRegMSIMap0 = 0x020,
198 NvRegMSIMap1 = 0x024,
199 NvRegMSIIrqMask = 0x030,
200#define NVREG_MSI_VECTOR_0_ENABLED 0x01
188 NvRegMisc1 = 0x080, 201 NvRegMisc1 = 0x080,
189#define NVREG_MISC1_HD 0x02 202#define NVREG_MISC1_HD 0x02
190#define NVREG_MISC1_FORCE 0x3b0f3c 203#define NVREG_MISC1_FORCE 0x3b0f3c
@@ -254,6 +267,10 @@ enum {
254#define NVREG_TXRXCTL_DESC_1 0 267#define NVREG_TXRXCTL_DESC_1 0
255#define NVREG_TXRXCTL_DESC_2 0x02100 268#define NVREG_TXRXCTL_DESC_2 0x02100
256#define NVREG_TXRXCTL_DESC_3 0x02200 269#define NVREG_TXRXCTL_DESC_3 0x02200
270#define NVREG_TXRXCTL_VLANSTRIP 0x00040
271#define NVREG_TXRXCTL_VLANINS 0x00080
272 NvRegTxRingPhysAddrHigh = 0x148,
273 NvRegRxRingPhysAddrHigh = 0x14C,
257 NvRegMIIStatus = 0x180, 274 NvRegMIIStatus = 0x180,
258#define NVREG_MIISTAT_ERROR 0x0001 275#define NVREG_MIISTAT_ERROR 0x0001
259#define NVREG_MIISTAT_LINKCHANGE 0x0008 276#define NVREG_MIISTAT_LINKCHANGE 0x0008
@@ -303,6 +320,11 @@ enum {
303#define NVREG_POWERSTATE_D1 0x0001 320#define NVREG_POWERSTATE_D1 0x0001
304#define NVREG_POWERSTATE_D2 0x0002 321#define NVREG_POWERSTATE_D2 0x0002
305#define NVREG_POWERSTATE_D3 0x0003 322#define NVREG_POWERSTATE_D3 0x0003
323 NvRegVlanControl = 0x300,
324#define NVREG_VLANCONTROL_ENABLE 0x2000
325 NvRegMSIXMap0 = 0x3e0,
326 NvRegMSIXMap1 = 0x3e4,
327 NvRegMSIXIrqStatus = 0x3f0,
306}; 328};
307 329
308/* Big endian: should work, but is untested */ 330/* Big endian: should work, but is untested */
@@ -314,7 +336,7 @@ struct ring_desc {
314struct ring_desc_ex { 336struct ring_desc_ex {
315 u32 PacketBufferHigh; 337 u32 PacketBufferHigh;
316 u32 PacketBufferLow; 338 u32 PacketBufferLow;
317 u32 Reserved; 339 u32 TxVlan;
318 u32 FlagLen; 340 u32 FlagLen;
319}; 341};
320 342
@@ -355,6 +377,8 @@ typedef union _ring_type {
355#define NV_TX2_CHECKSUM_L3 (1<<27) 377#define NV_TX2_CHECKSUM_L3 (1<<27)
356#define NV_TX2_CHECKSUM_L4 (1<<26) 378#define NV_TX2_CHECKSUM_L4 (1<<26)
357 379
380#define NV_TX3_VLAN_TAG_PRESENT (1<<18)
381
358#define NV_RX_DESCRIPTORVALID (1<<16) 382#define NV_RX_DESCRIPTORVALID (1<<16)
359#define NV_RX_MISSEDFRAME (1<<17) 383#define NV_RX_MISSEDFRAME (1<<17)
360#define NV_RX_SUBSTRACT1 (1<<18) 384#define NV_RX_SUBSTRACT1 (1<<18)
@@ -385,6 +409,9 @@ typedef union _ring_type {
385#define NV_RX2_ERROR (1<<30) 409#define NV_RX2_ERROR (1<<30)
386#define NV_RX2_AVAIL (1<<31) 410#define NV_RX2_AVAIL (1<<31)
387 411
412#define NV_RX3_VLAN_TAG_PRESENT (1<<16)
413#define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
414
388/* Miscelaneous hardware related defines: */ 415/* Miscelaneous hardware related defines: */
389#define NV_PCI_REGSZ 0x270 416#define NV_PCI_REGSZ 0x270
390 417
@@ -475,6 +502,18 @@ typedef union _ring_type {
475#define LPA_1000FULL 0x0800 502#define LPA_1000FULL 0x0800
476#define LPA_1000HALF 0x0400 503#define LPA_1000HALF 0x0400
477 504
505/* MSI/MSI-X defines */
506#define NV_MSI_X_MAX_VECTORS 8
507#define NV_MSI_X_VECTORS_MASK 0x000f
508#define NV_MSI_CAPABLE 0x0010
509#define NV_MSI_X_CAPABLE 0x0020
510#define NV_MSI_ENABLED 0x0040
511#define NV_MSI_X_ENABLED 0x0080
512
513#define NV_MSI_X_VECTOR_ALL 0x0
514#define NV_MSI_X_VECTOR_RX 0x0
515#define NV_MSI_X_VECTOR_TX 0x1
516#define NV_MSI_X_VECTOR_OTHER 0x2
478 517
479/* 518/*
480 * SMP locking: 519 * SMP locking:
@@ -511,6 +550,7 @@ struct fe_priv {
511 u32 irqmask; 550 u32 irqmask;
512 u32 desc_ver; 551 u32 desc_ver;
513 u32 txrxctl_bits; 552 u32 txrxctl_bits;
553 u32 vlanctl_bits;
514 554
515 void __iomem *base; 555 void __iomem *base;
516 556
@@ -525,6 +565,7 @@ struct fe_priv {
525 unsigned int pkt_limit; 565 unsigned int pkt_limit;
526 struct timer_list oom_kick; 566 struct timer_list oom_kick;
527 struct timer_list nic_poll; 567 struct timer_list nic_poll;
568 u32 nic_poll_irq;
528 569
529 /* media detection workaround. 570 /* media detection workaround.
530 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); 571 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
@@ -540,6 +581,13 @@ struct fe_priv {
540 dma_addr_t tx_dma[TX_RING]; 581 dma_addr_t tx_dma[TX_RING];
541 unsigned int tx_dma_len[TX_RING]; 582 unsigned int tx_dma_len[TX_RING];
542 u32 tx_flags; 583 u32 tx_flags;
584
585 /* vlan fields */
586 struct vlan_group *vlangrp;
587
588 /* msi/msi-x fields */
589 u32 msi_flags;
590 struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
543}; 591};
544 592
545/* 593/*
@@ -567,6 +615,16 @@ static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT;
567 */ 615 */
568static int poll_interval = -1; 616static int poll_interval = -1;
569 617
618/*
619 * Disable MSI interrupts
620 */
621static int disable_msi = 0;
622
623/*
624 * Disable MSIX interrupts
625 */
626static int disable_msix = 0;
627
570static inline struct fe_priv *get_nvpriv(struct net_device *dev) 628static inline struct fe_priv *get_nvpriv(struct net_device *dev)
571{ 629{
572 return netdev_priv(dev); 630 return netdev_priv(dev);
@@ -612,6 +670,33 @@ static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
612 return 0; 670 return 0;
613} 671}
614 672
673#define NV_SETUP_RX_RING 0x01
674#define NV_SETUP_TX_RING 0x02
675
676static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
677{
678 struct fe_priv *np = get_nvpriv(dev);
679 u8 __iomem *base = get_hwbase(dev);
680
681 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
682 if (rxtx_flags & NV_SETUP_RX_RING) {
683 writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr);
684 }
685 if (rxtx_flags & NV_SETUP_TX_RING) {
686 writel((u32) cpu_to_le64(np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
687 }
688 } else {
689 if (rxtx_flags & NV_SETUP_RX_RING) {
690 writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr);
691 writel((u32) (cpu_to_le64(np->ring_addr) >> 32), base + NvRegRxRingPhysAddrHigh);
692 }
693 if (rxtx_flags & NV_SETUP_TX_RING) {
694 writel((u32) cpu_to_le64(np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
695 writel((u32) (cpu_to_le64(np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)) >> 32), base + NvRegTxRingPhysAddrHigh);
696 }
697 }
698}
699
615#define MII_READ (-1) 700#define MII_READ (-1)
616/* mii_rw: read/write a register on the PHY. 701/* mii_rw: read/write a register on the PHY.
617 * 702 *
@@ -903,14 +988,27 @@ static void nv_do_rx_refill(unsigned long data)
903 struct net_device *dev = (struct net_device *) data; 988 struct net_device *dev = (struct net_device *) data;
904 struct fe_priv *np = netdev_priv(dev); 989 struct fe_priv *np = netdev_priv(dev);
905 990
906 disable_irq(dev->irq); 991
992 if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
993 ((np->msi_flags & NV_MSI_X_ENABLED) &&
994 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
995 disable_irq(dev->irq);
996 } else {
997 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
998 }
907 if (nv_alloc_rx(dev)) { 999 if (nv_alloc_rx(dev)) {
908 spin_lock(&np->lock); 1000 spin_lock(&np->lock);
909 if (!np->in_shutdown) 1001 if (!np->in_shutdown)
910 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 1002 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
911 spin_unlock(&np->lock); 1003 spin_unlock(&np->lock);
912 } 1004 }
913 enable_irq(dev->irq); 1005 if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
1006 ((np->msi_flags & NV_MSI_X_ENABLED) &&
1007 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
1008 enable_irq(dev->irq);
1009 } else {
1010 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1011 }
914} 1012}
915 1013
916static void nv_init_rx(struct net_device *dev) 1014static void nv_init_rx(struct net_device *dev)
@@ -965,7 +1063,7 @@ static int nv_release_txskb(struct net_device *dev, unsigned int skbnr)
965 } 1063 }
966 1064
967 if (np->tx_skbuff[skbnr]) { 1065 if (np->tx_skbuff[skbnr]) {
968 dev_kfree_skb_irq(np->tx_skbuff[skbnr]); 1066 dev_kfree_skb_any(np->tx_skbuff[skbnr]);
969 np->tx_skbuff[skbnr] = NULL; 1067 np->tx_skbuff[skbnr] = NULL;
970 return 1; 1068 return 1;
971 } else { 1069 } else {
@@ -1031,6 +1129,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1031 u32 bcnt; 1129 u32 bcnt;
1032 u32 size = skb->len-skb->data_len; 1130 u32 size = skb->len-skb->data_len;
1033 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 1131 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1132 u32 tx_flags_vlan = 0;
1034 1133
1035 /* add fragments to entries count */ 1134 /* add fragments to entries count */
1036 for (i = 0; i < fragments; i++) { 1135 for (i = 0; i < fragments; i++) {
@@ -1111,10 +1210,16 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1111#endif 1210#endif
1112 tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0); 1211 tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0);
1113 1212
1213 /* vlan tag */
1214 if (np->vlangrp && vlan_tx_tag_present(skb)) {
1215 tx_flags_vlan = NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb);
1216 }
1217
1114 /* set tx flags */ 1218 /* set tx flags */
1115 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1219 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1116 np->tx_ring.orig[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); 1220 np->tx_ring.orig[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra);
1117 } else { 1221 } else {
1222 np->tx_ring.ex[start_nr].TxVlan = cpu_to_le32(tx_flags_vlan);
1118 np->tx_ring.ex[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); 1223 np->tx_ring.ex[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra);
1119 } 1224 }
1120 1225
@@ -1209,9 +1314,14 @@ static void nv_tx_timeout(struct net_device *dev)
1209{ 1314{
1210 struct fe_priv *np = netdev_priv(dev); 1315 struct fe_priv *np = netdev_priv(dev);
1211 u8 __iomem *base = get_hwbase(dev); 1316 u8 __iomem *base = get_hwbase(dev);
1317 u32 status;
1318
1319 if (np->msi_flags & NV_MSI_X_ENABLED)
1320 status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
1321 else
1322 status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
1212 1323
1213 printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, 1324 printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status);
1214 readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK);
1215 1325
1216 { 1326 {
1217 int i; 1327 int i;
@@ -1273,10 +1383,7 @@ static void nv_tx_timeout(struct net_device *dev)
1273 printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name); 1383 printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name);
1274 nv_drain_tx(dev); 1384 nv_drain_tx(dev);
1275 np->next_tx = np->nic_tx = 0; 1385 np->next_tx = np->nic_tx = 0;
1276 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1386 setup_hw_rings(dev, NV_SETUP_TX_RING);
1277 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
1278 else
1279 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
1280 netif_wake_queue(dev); 1387 netif_wake_queue(dev);
1281 } 1388 }
1282 1389
@@ -1342,6 +1449,8 @@ static void nv_rx_process(struct net_device *dev)
1342{ 1449{
1343 struct fe_priv *np = netdev_priv(dev); 1450 struct fe_priv *np = netdev_priv(dev);
1344 u32 Flags; 1451 u32 Flags;
1452 u32 vlanflags = 0;
1453
1345 1454
1346 for (;;) { 1455 for (;;) {
1347 struct sk_buff *skb; 1456 struct sk_buff *skb;
@@ -1357,6 +1466,7 @@ static void nv_rx_process(struct net_device *dev)
1357 } else { 1466 } else {
1358 Flags = le32_to_cpu(np->rx_ring.ex[i].FlagLen); 1467 Flags = le32_to_cpu(np->rx_ring.ex[i].FlagLen);
1359 len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver); 1468 len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver);
1469 vlanflags = le32_to_cpu(np->rx_ring.ex[i].PacketBufferLow);
1360 } 1470 }
1361 1471
1362 dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n", 1472 dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n",
@@ -1474,7 +1584,11 @@ static void nv_rx_process(struct net_device *dev)
1474 skb->protocol = eth_type_trans(skb, dev); 1584 skb->protocol = eth_type_trans(skb, dev);
1475 dprintk(KERN_DEBUG "%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n", 1585 dprintk(KERN_DEBUG "%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n",
1476 dev->name, np->cur_rx, len, skb->protocol); 1586 dev->name, np->cur_rx, len, skb->protocol);
1477 netif_rx(skb); 1587 if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT)) {
1588 vlan_hwaccel_rx(skb, np->vlangrp, vlanflags & NV_RX3_VLAN_TAG_MASK);
1589 } else {
1590 netif_rx(skb);
1591 }
1478 dev->last_rx = jiffies; 1592 dev->last_rx = jiffies;
1479 np->stats.rx_packets++; 1593 np->stats.rx_packets++;
1480 np->stats.rx_bytes += len; 1594 np->stats.rx_bytes += len;
@@ -1523,7 +1637,15 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
1523 * guessed, there is probably a simpler approach. 1637 * guessed, there is probably a simpler approach.
1524 * Changing the MTU is a rare event, it shouldn't matter. 1638 * Changing the MTU is a rare event, it shouldn't matter.
1525 */ 1639 */
1526 disable_irq(dev->irq); 1640 if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
1641 ((np->msi_flags & NV_MSI_X_ENABLED) &&
1642 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
1643 disable_irq(dev->irq);
1644 } else {
1645 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1646 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1647 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1648 }
1527 spin_lock_bh(&dev->xmit_lock); 1649 spin_lock_bh(&dev->xmit_lock);
1528 spin_lock(&np->lock); 1650 spin_lock(&np->lock);
1529 /* stop engines */ 1651 /* stop engines */
@@ -1544,11 +1666,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
1544 } 1666 }
1545 /* reinit nic view of the rx queue */ 1667 /* reinit nic view of the rx queue */
1546 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 1668 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
1547 writel((u32) np->ring_addr, base + NvRegRxRingPhysAddr); 1669 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
1548 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1549 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
1550 else
1551 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
1552 writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT), 1670 writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT),
1553 base + NvRegRingSizes); 1671 base + NvRegRingSizes);
1554 pci_push(base); 1672 pci_push(base);
@@ -1560,7 +1678,15 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
1560 nv_start_tx(dev); 1678 nv_start_tx(dev);
1561 spin_unlock(&np->lock); 1679 spin_unlock(&np->lock);
1562 spin_unlock_bh(&dev->xmit_lock); 1680 spin_unlock_bh(&dev->xmit_lock);
1563 enable_irq(dev->irq); 1681 if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
1682 ((np->msi_flags & NV_MSI_X_ENABLED) &&
1683 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
1684 enable_irq(dev->irq);
1685 } else {
1686 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1687 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1688 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1689 }
1564 } 1690 }
1565 return 0; 1691 return 0;
1566} 1692}
@@ -1866,8 +1992,13 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
1866 dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name); 1992 dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name);
1867 1993
1868 for (i=0; ; i++) { 1994 for (i=0; ; i++) {
1869 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 1995 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
1870 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 1996 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
1997 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
1998 } else {
1999 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
2000 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
2001 }
1871 pci_push(base); 2002 pci_push(base);
1872 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 2003 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
1873 if (!(events & np->irqmask)) 2004 if (!(events & np->irqmask))
@@ -1907,11 +2038,16 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
1907 if (i > max_interrupt_work) { 2038 if (i > max_interrupt_work) {
1908 spin_lock(&np->lock); 2039 spin_lock(&np->lock);
1909 /* disable interrupts on the nic */ 2040 /* disable interrupts on the nic */
1910 writel(0, base + NvRegIrqMask); 2041 if (!(np->msi_flags & NV_MSI_X_ENABLED))
2042 writel(0, base + NvRegIrqMask);
2043 else
2044 writel(np->irqmask, base + NvRegIrqMask);
1911 pci_push(base); 2045 pci_push(base);
1912 2046
1913 if (!np->in_shutdown) 2047 if (!np->in_shutdown) {
2048 np->nic_poll_irq = np->irqmask;
1914 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 2049 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
2050 }
1915 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i); 2051 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
1916 spin_unlock(&np->lock); 2052 spin_unlock(&np->lock);
1917 break; 2053 break;
@@ -1923,22 +2059,212 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
1923 return IRQ_RETVAL(i); 2059 return IRQ_RETVAL(i);
1924} 2060}
1925 2061
2062static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs)
2063{
2064 struct net_device *dev = (struct net_device *) data;
2065 struct fe_priv *np = netdev_priv(dev);
2066 u8 __iomem *base = get_hwbase(dev);
2067 u32 events;
2068 int i;
2069
2070 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name);
2071
2072 for (i=0; ; i++) {
2073 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
2074 writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus);
2075 pci_push(base);
2076 dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events);
2077 if (!(events & np->irqmask))
2078 break;
2079
2080 spin_lock(&np->lock);
2081 nv_tx_done(dev);
2082 spin_unlock(&np->lock);
2083
2084 if (events & (NVREG_IRQ_TX_ERR)) {
2085 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
2086 dev->name, events);
2087 }
2088 if (i > max_interrupt_work) {
2089 spin_lock(&np->lock);
2090 /* disable interrupts on the nic */
2091 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
2092 pci_push(base);
2093
2094 if (!np->in_shutdown) {
2095 np->nic_poll_irq |= NVREG_IRQ_TX_ALL;
2096 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
2097 }
2098 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i);
2099 spin_unlock(&np->lock);
2100 break;
2101 }
2102
2103 }
2104 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name);
2105
2106 return IRQ_RETVAL(i);
2107}
2108
2109static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
2110{
2111 struct net_device *dev = (struct net_device *) data;
2112 struct fe_priv *np = netdev_priv(dev);
2113 u8 __iomem *base = get_hwbase(dev);
2114 u32 events;
2115 int i;
2116
2117 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name);
2118
2119 for (i=0; ; i++) {
2120 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
2121 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
2122 pci_push(base);
2123 dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events);
2124 if (!(events & np->irqmask))
2125 break;
2126
2127 nv_rx_process(dev);
2128 if (nv_alloc_rx(dev)) {
2129 spin_lock(&np->lock);
2130 if (!np->in_shutdown)
2131 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2132 spin_unlock(&np->lock);
2133 }
2134
2135 if (i > max_interrupt_work) {
2136 spin_lock(&np->lock);
2137 /* disable interrupts on the nic */
2138 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
2139 pci_push(base);
2140
2141 if (!np->in_shutdown) {
2142 np->nic_poll_irq |= NVREG_IRQ_RX_ALL;
2143 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
2144 }
2145 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i);
2146 spin_unlock(&np->lock);
2147 break;
2148 }
2149
2150 }
2151 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name);
2152
2153 return IRQ_RETVAL(i);
2154}
2155
2156static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
2157{
2158 struct net_device *dev = (struct net_device *) data;
2159 struct fe_priv *np = netdev_priv(dev);
2160 u8 __iomem *base = get_hwbase(dev);
2161 u32 events;
2162 int i;
2163
2164 dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name);
2165
2166 for (i=0; ; i++) {
2167 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
2168 writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus);
2169 pci_push(base);
2170 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
2171 if (!(events & np->irqmask))
2172 break;
2173
2174 if (events & NVREG_IRQ_LINK) {
2175 spin_lock(&np->lock);
2176 nv_link_irq(dev);
2177 spin_unlock(&np->lock);
2178 }
2179 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
2180 spin_lock(&np->lock);
2181 nv_linkchange(dev);
2182 spin_unlock(&np->lock);
2183 np->link_timeout = jiffies + LINK_TIMEOUT;
2184 }
2185 if (events & (NVREG_IRQ_UNKNOWN)) {
2186 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
2187 dev->name, events);
2188 }
2189 if (i > max_interrupt_work) {
2190 spin_lock(&np->lock);
2191 /* disable interrupts on the nic */
2192 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
2193 pci_push(base);
2194
2195 if (!np->in_shutdown) {
2196 np->nic_poll_irq |= NVREG_IRQ_OTHER;
2197 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
2198 }
2199 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i);
2200 spin_unlock(&np->lock);
2201 break;
2202 }
2203
2204 }
2205 dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name);
2206
2207 return IRQ_RETVAL(i);
2208}
2209
1926static void nv_do_nic_poll(unsigned long data) 2210static void nv_do_nic_poll(unsigned long data)
1927{ 2211{
1928 struct net_device *dev = (struct net_device *) data; 2212 struct net_device *dev = (struct net_device *) data;
1929 struct fe_priv *np = netdev_priv(dev); 2213 struct fe_priv *np = netdev_priv(dev);
1930 u8 __iomem *base = get_hwbase(dev); 2214 u8 __iomem *base = get_hwbase(dev);
2215 u32 mask = 0;
1931 2216
1932 disable_irq(dev->irq);
1933 /* FIXME: Do we need synchronize_irq(dev->irq) here? */
1934 /* 2217 /*
2218 * First disable irq(s) and then
1935 * reenable interrupts on the nic, we have to do this before calling 2219 * reenable interrupts on the nic, we have to do this before calling
1936 * nv_nic_irq because that may decide to do otherwise 2220 * nv_nic_irq because that may decide to do otherwise
1937 */ 2221 */
1938 writel(np->irqmask, base + NvRegIrqMask); 2222
2223 if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
2224 ((np->msi_flags & NV_MSI_X_ENABLED) &&
2225 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
2226 disable_irq(dev->irq);
2227 mask = np->irqmask;
2228 } else {
2229 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
2230 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
2231 mask |= NVREG_IRQ_RX_ALL;
2232 }
2233 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
2234 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
2235 mask |= NVREG_IRQ_TX_ALL;
2236 }
2237 if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
2238 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
2239 mask |= NVREG_IRQ_OTHER;
2240 }
2241 }
2242 np->nic_poll_irq = 0;
2243
2244 /* FIXME: Do we need synchronize_irq(dev->irq) here? */
2245
2246 writel(mask, base + NvRegIrqMask);
1939 pci_push(base); 2247 pci_push(base);
1940 nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL); 2248
1941 enable_irq(dev->irq); 2249 if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
2250 ((np->msi_flags & NV_MSI_X_ENABLED) &&
2251 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
2252 nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL);
2253 enable_irq(dev->irq);
2254 } else {
2255 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
2256 nv_nic_irq_rx((int) 0, (void *) data, (struct pt_regs *) NULL);
2257 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
2258 }
2259 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
2260 nv_nic_irq_tx((int) 0, (void *) data, (struct pt_regs *) NULL);
2261 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
2262 }
2263 if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
2264 nv_nic_irq_other((int) 0, (void *) data, (struct pt_regs *) NULL);
2265 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
2266 }
2267 }
1942} 2268}
1943 2269
1944#ifdef CONFIG_NET_POLL_CONTROLLER 2270#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -2217,11 +2543,66 @@ static struct ethtool_ops ops = {
2217 .get_perm_addr = ethtool_op_get_perm_addr, 2543 .get_perm_addr = ethtool_op_get_perm_addr,
2218}; 2544};
2219 2545
2546static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2547{
2548 struct fe_priv *np = get_nvpriv(dev);
2549
2550 spin_lock_irq(&np->lock);
2551
2552 /* save vlan group */
2553 np->vlangrp = grp;
2554
2555 if (grp) {
2556 /* enable vlan on MAC */
2557 np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS;
2558 } else {
2559 /* disable vlan on MAC */
2560 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
2561 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
2562 }
2563
2564 writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2565
2566 spin_unlock_irq(&np->lock);
2567};
2568
2569static void nv_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2570{
2571 /* nothing to do */
2572};
2573
2574static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
2575{
2576 u8 __iomem *base = get_hwbase(dev);
2577 int i;
2578 u32 msixmap = 0;
2579
2580 /* Each interrupt bit can be mapped to a MSIX vector (4 bits).
2581 * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
2582 * the remaining 8 interrupts.
2583 */
2584 for (i = 0; i < 8; i++) {
2585 if ((irqmask >> i) & 0x1) {
2586 msixmap |= vector << (i << 2);
2587 }
2588 }
2589 writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
2590
2591 msixmap = 0;
2592 for (i = 0; i < 8; i++) {
2593 if ((irqmask >> (i + 8)) & 0x1) {
2594 msixmap |= vector << (i << 2);
2595 }
2596 }
2597 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
2598}
2599
2220static int nv_open(struct net_device *dev) 2600static int nv_open(struct net_device *dev)
2221{ 2601{
2222 struct fe_priv *np = netdev_priv(dev); 2602 struct fe_priv *np = netdev_priv(dev);
2223 u8 __iomem *base = get_hwbase(dev); 2603 u8 __iomem *base = get_hwbase(dev);
2224 int ret, oom, i; 2604 int ret = 1;
2605 int oom, i;
2225 2606
2226 dprintk(KERN_DEBUG "nv_open: begin\n"); 2607 dprintk(KERN_DEBUG "nv_open: begin\n");
2227 2608
@@ -2253,11 +2634,7 @@ static int nv_open(struct net_device *dev)
2253 nv_copy_mac_to_hw(dev); 2634 nv_copy_mac_to_hw(dev);
2254 2635
2255 /* 4) give hw rings */ 2636 /* 4) give hw rings */
2256 writel((u32) np->ring_addr, base + NvRegRxRingPhysAddr); 2637 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
2257 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
2258 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
2259 else
2260 writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
2261 writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT), 2638 writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT),
2262 base + NvRegRingSizes); 2639 base + NvRegRingSizes);
2263 2640
@@ -2265,6 +2642,7 @@ static int nv_open(struct net_device *dev)
2265 writel(np->linkspeed, base + NvRegLinkSpeed); 2642 writel(np->linkspeed, base + NvRegLinkSpeed);
2266 writel(NVREG_UNKSETUP3_VAL1, base + NvRegUnknownSetupReg3); 2643 writel(NVREG_UNKSETUP3_VAL1, base + NvRegUnknownSetupReg3);
2267 writel(np->txrxctl_bits, base + NvRegTxRxControl); 2644 writel(np->txrxctl_bits, base + NvRegTxRxControl);
2645 writel(np->vlanctl_bits, base + NvRegVlanControl);
2268 pci_push(base); 2646 pci_push(base);
2269 writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl); 2647 writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl);
2270 reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31, 2648 reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
@@ -2315,9 +2693,77 @@ static int nv_open(struct net_device *dev)
2315 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 2693 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
2316 pci_push(base); 2694 pci_push(base);
2317 2695
2318 ret = request_irq(dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev); 2696 if (np->msi_flags & NV_MSI_X_CAPABLE) {
2319 if (ret) 2697 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
2320 goto out_drain; 2698 np->msi_x_entry[i].entry = i;
2699 }
2700 if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
2701 np->msi_flags |= NV_MSI_X_ENABLED;
2702 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) {
2703 /* Request irq for rx handling */
2704 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) {
2705 printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
2706 pci_disable_msix(np->pci_dev);
2707 np->msi_flags &= ~NV_MSI_X_ENABLED;
2708 goto out_drain;
2709 }
2710 /* Request irq for tx handling */
2711 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) {
2712 printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
2713 pci_disable_msix(np->pci_dev);
2714 np->msi_flags &= ~NV_MSI_X_ENABLED;
2715 goto out_drain;
2716 }
2717 /* Request irq for link and timer handling */
2718 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) {
2719 printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
2720 pci_disable_msix(np->pci_dev);
2721 np->msi_flags &= ~NV_MSI_X_ENABLED;
2722 goto out_drain;
2723 }
2724
2725 /* map interrupts to their respective vector */
2726 writel(0, base + NvRegMSIXMap0);
2727 writel(0, base + NvRegMSIXMap1);
2728 set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
2729 set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
2730 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
2731 } else {
2732 /* Request irq for all interrupts */
2733 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) {
2734 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
2735 pci_disable_msix(np->pci_dev);
2736 np->msi_flags &= ~NV_MSI_X_ENABLED;
2737 goto out_drain;
2738 }
2739
2740 /* map interrupts to vector 0 */
2741 writel(0, base + NvRegMSIXMap0);
2742 writel(0, base + NvRegMSIXMap1);
2743 }
2744 }
2745 }
2746 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
2747 if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
2748 np->msi_flags |= NV_MSI_ENABLED;
2749 if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) {
2750 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
2751 pci_disable_msi(np->pci_dev);
2752 np->msi_flags &= ~NV_MSI_ENABLED;
2753 goto out_drain;
2754 }
2755
2756 /* map interrupts to vector 0 */
2757 writel(0, base + NvRegMSIMap0);
2758 writel(0, base + NvRegMSIMap1);
2759 /* enable msi vector 0 */
2760 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
2761 }
2762 }
2763 if (ret != 0) {
2764 if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0)
2765 goto out_drain;
2766 }
2321 2767
2322 /* ask for interrupts */ 2768 /* ask for interrupts */
2323 writel(np->irqmask, base + NvRegIrqMask); 2769 writel(np->irqmask, base + NvRegIrqMask);
@@ -2364,6 +2810,7 @@ static int nv_close(struct net_device *dev)
2364{ 2810{
2365 struct fe_priv *np = netdev_priv(dev); 2811 struct fe_priv *np = netdev_priv(dev);
2366 u8 __iomem *base; 2812 u8 __iomem *base;
2813 int i;
2367 2814
2368 spin_lock_irq(&np->lock); 2815 spin_lock_irq(&np->lock);
2369 np->in_shutdown = 1; 2816 np->in_shutdown = 1;
@@ -2381,13 +2828,31 @@ static int nv_close(struct net_device *dev)
2381 2828
2382 /* disable interrupts on the nic or we will lock up */ 2829 /* disable interrupts on the nic or we will lock up */
2383 base = get_hwbase(dev); 2830 base = get_hwbase(dev);
2384 writel(0, base + NvRegIrqMask); 2831 if (np->msi_flags & NV_MSI_X_ENABLED) {
2832 writel(np->irqmask, base + NvRegIrqMask);
2833 } else {
2834 if (np->msi_flags & NV_MSI_ENABLED)
2835 writel(0, base + NvRegMSIIrqMask);
2836 writel(0, base + NvRegIrqMask);
2837 }
2385 pci_push(base); 2838 pci_push(base);
2386 dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name); 2839 dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name);
2387 2840
2388 spin_unlock_irq(&np->lock); 2841 spin_unlock_irq(&np->lock);
2389 2842
2390 free_irq(dev->irq, dev); 2843 if (np->msi_flags & NV_MSI_X_ENABLED) {
2844 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
2845 free_irq(np->msi_x_entry[i].vector, dev);
2846 }
2847 pci_disable_msix(np->pci_dev);
2848 np->msi_flags &= ~NV_MSI_X_ENABLED;
2849 } else {
2850 free_irq(np->pci_dev->irq, dev);
2851 if (np->msi_flags & NV_MSI_ENABLED) {
2852 pci_disable_msi(np->pci_dev);
2853 np->msi_flags &= ~NV_MSI_ENABLED;
2854 }
2855 }
2391 2856
2392 drain_ring(dev); 2857 drain_ring(dev);
2393 2858
@@ -2471,7 +2936,14 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
2471 printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n", 2936 printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n",
2472 pci_name(pci_dev)); 2937 pci_name(pci_dev));
2473 } else { 2938 } else {
2474 dev->features |= NETIF_F_HIGHDMA; 2939 if (pci_set_consistent_dma_mask(pci_dev, 0x0000007fffffffffULL)) {
2940 printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed for device %s.\n",
2941 pci_name(pci_dev));
2942 goto out_relreg;
2943 } else {
2944 dev->features |= NETIF_F_HIGHDMA;
2945 printk(KERN_INFO "forcedeth: using HIGHDMA\n");
2946 }
2475 } 2947 }
2476 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; 2948 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
2477 } else if (id->driver_data & DEV_HAS_LARGEDESC) { 2949 } else if (id->driver_data & DEV_HAS_LARGEDESC) {
@@ -2496,6 +2968,22 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
2496#endif 2968#endif
2497 } 2969 }
2498 2970
2971 np->vlanctl_bits = 0;
2972 if (id->driver_data & DEV_HAS_VLAN) {
2973 np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
2974 dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX;
2975 dev->vlan_rx_register = nv_vlan_rx_register;
2976 dev->vlan_rx_kill_vid = nv_vlan_rx_kill_vid;
2977 }
2978
2979 np->msi_flags = 0;
2980 if ((id->driver_data & DEV_HAS_MSI) && !disable_msi) {
2981 np->msi_flags |= NV_MSI_CAPABLE;
2982 }
2983 if ((id->driver_data & DEV_HAS_MSI_X) && !disable_msix) {
2984 np->msi_flags |= NV_MSI_X_CAPABLE;
2985 }
2986
2499 err = -ENOMEM; 2987 err = -ENOMEM;
2500 np->base = ioremap(addr, NV_PCI_REGSZ); 2988 np->base = ioremap(addr, NV_PCI_REGSZ);
2501 if (!np->base) 2989 if (!np->base)
@@ -2578,10 +3066,15 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
2578 } else { 3066 } else {
2579 np->tx_flags = NV_TX2_VALID; 3067 np->tx_flags = NV_TX2_VALID;
2580 } 3068 }
2581 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) 3069 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) {
2582 np->irqmask = NVREG_IRQMASK_THROUGHPUT; 3070 np->irqmask = NVREG_IRQMASK_THROUGHPUT;
2583 else 3071 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
3072 np->msi_flags |= 0x0003;
3073 } else {
2584 np->irqmask = NVREG_IRQMASK_CPU; 3074 np->irqmask = NVREG_IRQMASK_CPU;
3075 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
3076 np->msi_flags |= 0x0001;
3077 }
2585 3078
2586 if (id->driver_data & DEV_NEED_TIMERIRQ) 3079 if (id->driver_data & DEV_NEED_TIMERIRQ)
2587 np->irqmask |= NVREG_IRQ_TIMER; 3080 np->irqmask |= NVREG_IRQ_TIMER;
@@ -2737,11 +3230,11 @@ static struct pci_device_id pci_tbl[] = {
2737 }, 3230 },
2738 { /* MCP55 Ethernet Controller */ 3231 { /* MCP55 Ethernet Controller */
2739 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), 3232 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
2740 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, 3233 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X,
2741 }, 3234 },
2742 { /* MCP55 Ethernet Controller */ 3235 { /* MCP55 Ethernet Controller */
2743 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), 3236 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
2744 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, 3237 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X,
2745 }, 3238 },
2746 {0,}, 3239 {0,},
2747}; 3240};
@@ -2771,6 +3264,10 @@ module_param(optimization_mode, int, 0);
2771MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer."); 3264MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer.");
2772module_param(poll_interval, int, 0); 3265module_param(poll_interval, int, 0);
2773MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535."); 3266MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
3267module_param(disable_msi, int, 0);
3268MODULE_PARM_DESC(disable_msi, "Disable MSI interrupts by setting to 1.");
3269module_param(disable_msix, int, 0);
3270MODULE_PARM_DESC(disable_msix, "Disable MSIX interrupts by setting to 1.");
2774 3271
2775MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>"); 3272MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
2776MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); 3273MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 062fb100d94c..afc4e88551ad 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -359,7 +359,7 @@ ccw_device_set_online(struct ccw_device *cdev)
359 else 359 else
360 pr_debug("ccw_device_offline returned %d, device %s\n", 360 pr_debug("ccw_device_offline returned %d, device %s\n",
361 ret, cdev->dev.bus_id); 361 ret, cdev->dev.bus_id);
362 return (ret = 0) ? -ENODEV : ret; 362 return (ret == 0) ? -ENODEV : ret;
363} 363}
364 364
365static ssize_t 365static ssize_t
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index d2a5b04d7cba..85b1020a1fcc 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -405,7 +405,7 @@ __ccw_device_disband_start(struct ccw_device *cdev)
405 cdev->private->iretry = 5; 405 cdev->private->iretry = 5;
406 cdev->private->imask >>= 1; 406 cdev->private->imask >>= 1;
407 } 407 }
408 ccw_device_verify_done(cdev, (sch->lpm != 0) ? 0 : -ENODEV); 408 ccw_device_disband_done(cdev, (sch->lpm != 0) ? 0 : -ENODEV);
409} 409}
410 410
411/* 411/*
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
index dad4dd9887c9..6c762b43f921 100644
--- a/drivers/s390/cio/device_status.c
+++ b/drivers/s390/cio/device_status.c
@@ -317,7 +317,6 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
317 /* 317 /*
318 * We have ending status but no sense information. Do a basic sense. 318 * We have ending status but no sense information. Do a basic sense.
319 */ 319 */
320 sch = to_subchannel(cdev->dev.parent);
321 sch->sense_ccw.cmd_code = CCW_CMD_BASIC_SENSE; 320 sch->sense_ccw.cmd_code = CCW_CMD_BASIC_SENSE;
322 sch->sense_ccw.cda = (__u32) __pa(cdev->private->irb.ecw); 321 sch->sense_ccw.cda = (__u32) __pa(cdev->private->irb.ecw);
323 sch->sense_ccw.count = SENSE_MAX_COUNT; 322 sch->sense_ccw.count = SENSE_MAX_COUNT;
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 46c4cdbaee86..7ddd5a69352a 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -614,7 +614,7 @@ int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
614 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) { 614 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
615 /* Unable to use DMA due to host limitation */ 615 /* Unable to use DMA due to host limitation */
616 tf->protocol = ATA_PROT_PIO; 616 tf->protocol = ATA_PROT_PIO;
617 index = dev->multi_count ? 0 : 4; 617 index = dev->multi_count ? 0 : 8;
618 } else { 618 } else {
619 tf->protocol = ATA_PROT_DMA; 619 tf->protocol = ATA_PROT_DMA;
620 index = 16; 620 index = 16;
@@ -3357,11 +3357,12 @@ static void ata_pio_error(struct ata_port *ap)
3357{ 3357{
3358 struct ata_queued_cmd *qc; 3358 struct ata_queued_cmd *qc;
3359 3359
3360 printk(KERN_WARNING "ata%u: PIO error\n", ap->id);
3361
3362 qc = ata_qc_from_tag(ap, ap->active_tag); 3360 qc = ata_qc_from_tag(ap, ap->active_tag);
3363 assert(qc != NULL); 3361 assert(qc != NULL);
3364 3362
3363 if (qc->tf.command != ATA_CMD_PACKET)
3364 printk(KERN_WARNING "ata%u: PIO error\n", ap->id);
3365
3365 /* make sure qc->err_mask is available to 3366 /* make sure qc->err_mask is available to
3366 * know what's wrong and recover 3367 * know what's wrong and recover
3367 */ 3368 */
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c
index 6fddf17a3b70..2770005324b4 100644
--- a/drivers/scsi/sata_mv.c
+++ b/drivers/scsi/sata_mv.c
@@ -997,6 +997,7 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
997 case ATA_CMD_READ_EXT: 997 case ATA_CMD_READ_EXT:
998 case ATA_CMD_WRITE: 998 case ATA_CMD_WRITE:
999 case ATA_CMD_WRITE_EXT: 999 case ATA_CMD_WRITE_EXT:
1000 case ATA_CMD_WRITE_FUA_EXT:
1000 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0); 1001 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1001 break; 1002 break;
1002#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */ 1003#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c
index 2e2c3b7acb0c..e484e8db6810 100644
--- a/drivers/scsi/sata_vsc.c
+++ b/drivers/scsi/sata_vsc.c
@@ -81,6 +81,19 @@
81/* Port stride */ 81/* Port stride */
82#define VSC_SATA_PORT_OFFSET 0x200 82#define VSC_SATA_PORT_OFFSET 0x200
83 83
84/* Error interrupt status bit offsets */
85#define VSC_SATA_INT_ERROR_E_OFFSET 2
86#define VSC_SATA_INT_ERROR_P_OFFSET 4
87#define VSC_SATA_INT_ERROR_T_OFFSET 5
88#define VSC_SATA_INT_ERROR_M_OFFSET 1
89#define is_vsc_sata_int_err(port_idx, int_status) \
90 (int_status & ((1 << (VSC_SATA_INT_ERROR_E_OFFSET + (8 * port_idx))) | \
91 (1 << (VSC_SATA_INT_ERROR_P_OFFSET + (8 * port_idx))) | \
92 (1 << (VSC_SATA_INT_ERROR_T_OFFSET + (8 * port_idx))) | \
93 (1 << (VSC_SATA_INT_ERROR_M_OFFSET + (8 * port_idx))) \
94 )\
95 )
96
84 97
85static u32 vsc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg) 98static u32 vsc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg)
86{ 99{
@@ -201,13 +214,28 @@ static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance,
201 struct ata_port *ap; 214 struct ata_port *ap;
202 215
203 ap = host_set->ports[i]; 216 ap = host_set->ports[i];
217
218 if (is_vsc_sata_int_err(i, int_status)) {
219 u32 err_status;
220 printk(KERN_DEBUG "%s: ignoring interrupt(s)\n", __FUNCTION__);
221 err_status = ap ? vsc_sata_scr_read(ap, SCR_ERROR) : 0;
222 vsc_sata_scr_write(ap, SCR_ERROR, err_status);
223 handled++;
224 }
225
204 if (ap && !(ap->flags & 226 if (ap && !(ap->flags &
205 (ATA_FLAG_PORT_DISABLED|ATA_FLAG_NOINTR))) { 227 (ATA_FLAG_PORT_DISABLED|ATA_FLAG_NOINTR))) {
206 struct ata_queued_cmd *qc; 228 struct ata_queued_cmd *qc;
207 229
208 qc = ata_qc_from_tag(ap, ap->active_tag); 230 qc = ata_qc_from_tag(ap, ap->active_tag);
209 if (qc && (!(qc->tf.ctl & ATA_NIEN))) 231 if (qc && (!(qc->tf.ctl & ATA_NIEN))) {
210 handled += ata_host_intr(ap, qc); 232 handled += ata_host_intr(ap, qc);
233 } else {
234 printk(KERN_DEBUG "%s: ignoring interrupt(s)\n", __FUNCTION__);
235 ata_chk_status(ap);
236 handled++;
237 }
238
211 } 239 }
212 } 240 }
213 } 241 }
diff --git a/fs/compat.c b/fs/compat.c
index a2ba78bdf7f7..5333c7d7427f 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -1757,7 +1757,7 @@ asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp,
1757 goto sticky; 1757 goto sticky;
1758 rtv.tv_usec = jiffies_to_usecs(do_div((*(u64*)&timeout), HZ)); 1758 rtv.tv_usec = jiffies_to_usecs(do_div((*(u64*)&timeout), HZ));
1759 rtv.tv_sec = timeout; 1759 rtv.tv_sec = timeout;
1760 if (compat_timeval_compare(&rtv, &tv) < 0) 1760 if (compat_timeval_compare(&rtv, &tv) >= 0)
1761 rtv = tv; 1761 rtv = tv;
1762 if (copy_to_user(tvp, &rtv, sizeof(rtv))) { 1762 if (copy_to_user(tvp, &rtv, sizeof(rtv))) {
1763sticky: 1763sticky:
@@ -1834,7 +1834,7 @@ asmlinkage long compat_sys_pselect7(int n, compat_ulong_t __user *inp,
1834 rts.tv_sec++; 1834 rts.tv_sec++;
1835 rts.tv_nsec -= NSEC_PER_SEC; 1835 rts.tv_nsec -= NSEC_PER_SEC;
1836 } 1836 }
1837 if (compat_timespec_compare(&rts, &ts) < 0) 1837 if (compat_timespec_compare(&rts, &ts) >= 0)
1838 rts = ts; 1838 rts = ts;
1839 copy_to_user(tsp, &rts, sizeof(rts)); 1839 copy_to_user(tsp, &rts, sizeof(rts));
1840 } 1840 }
@@ -1934,7 +1934,7 @@ asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds,
1934 rts.tv_nsec = jiffies_to_usecs(do_div((*(u64*)&timeout), HZ)) * 1934 rts.tv_nsec = jiffies_to_usecs(do_div((*(u64*)&timeout), HZ)) *
1935 1000; 1935 1000;
1936 rts.tv_sec = timeout; 1936 rts.tv_sec = timeout;
1937 if (compat_timespec_compare(&rts, &ts) < 0) 1937 if (compat_timespec_compare(&rts, &ts) >= 0)
1938 rts = ts; 1938 rts = ts;
1939 if (copy_to_user(tsp, &rts, sizeof(rts))) { 1939 if (copy_to_user(tsp, &rts, sizeof(rts))) {
1940sticky: 1940sticky:
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index a2ca3107d475..86ae8e93adb9 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -792,18 +792,20 @@ ext2_xattr_delete_inode(struct inode *inode)
792 ext2_free_blocks(inode, EXT2_I(inode)->i_file_acl, 1); 792 ext2_free_blocks(inode, EXT2_I(inode)->i_file_acl, 1);
793 get_bh(bh); 793 get_bh(bh);
794 bforget(bh); 794 bforget(bh);
795 unlock_buffer(bh);
795 } else { 796 } else {
796 HDR(bh)->h_refcount = cpu_to_le32( 797 HDR(bh)->h_refcount = cpu_to_le32(
797 le32_to_cpu(HDR(bh)->h_refcount) - 1); 798 le32_to_cpu(HDR(bh)->h_refcount) - 1);
798 if (ce) 799 if (ce)
799 mb_cache_entry_release(ce); 800 mb_cache_entry_release(ce);
801 ea_bdebug(bh, "refcount now=%d",
802 le32_to_cpu(HDR(bh)->h_refcount));
803 unlock_buffer(bh);
800 mark_buffer_dirty(bh); 804 mark_buffer_dirty(bh);
801 if (IS_SYNC(inode)) 805 if (IS_SYNC(inode))
802 sync_dirty_buffer(bh); 806 sync_dirty_buffer(bh);
803 DQUOT_FREE_BLOCK(inode, 1); 807 DQUOT_FREE_BLOCK(inode, 1);
804 } 808 }
805 ea_bdebug(bh, "refcount now=%d", le32_to_cpu(HDR(bh)->h_refcount) - 1);
806 unlock_buffer(bh);
807 EXT2_I(inode)->i_file_acl = 0; 809 EXT2_I(inode)->i_file_acl = 0;
808 810
809cleanup: 811cleanup:
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index f556a0d5c0d3..0c9a2ee54c91 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -66,6 +66,12 @@ static void restore_sigs(sigset_t *oldset)
66 sigprocmask(SIG_SETMASK, oldset, NULL); 66 sigprocmask(SIG_SETMASK, oldset, NULL);
67} 67}
68 68
69/*
70 * Reset request, so that it can be reused
71 *
72 * The caller must be _very_ careful to make sure, that it is holding
73 * the only reference to req
74 */
69void fuse_reset_request(struct fuse_req *req) 75void fuse_reset_request(struct fuse_req *req)
70{ 76{
71 int preallocated = req->preallocated; 77 int preallocated = req->preallocated;
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 296351615b00..6f05379b0a0d 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -116,9 +116,14 @@ int fuse_open_common(struct inode *inode, struct file *file, int isdir)
116/* Special case for failed iget in CREATE */ 116/* Special case for failed iget in CREATE */
117static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req) 117static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
118{ 118{
119 u64 nodeid = req->in.h.nodeid; 119 /* If called from end_io_requests(), req has more than one
120 fuse_reset_request(req); 120 reference and fuse_reset_request() cannot work */
121 fuse_send_forget(fc, req, nodeid, 1); 121 if (fc->connected) {
122 u64 nodeid = req->in.h.nodeid;
123 fuse_reset_request(req);
124 fuse_send_forget(fc, req, nodeid, 1);
125 } else
126 fuse_put_request(fc, req);
122} 127}
123 128
124void fuse_send_release(struct fuse_conn *fc, struct fuse_file *ff, 129void fuse_send_release(struct fuse_conn *fc, struct fuse_file *ff,
diff --git a/fs/select.c b/fs/select.c
index 6ce68a9c8976..1815a57d2255 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -404,7 +404,7 @@ asmlinkage long sys_select(int n, fd_set __user *inp, fd_set __user *outp,
404 goto sticky; 404 goto sticky;
405 rtv.tv_usec = jiffies_to_usecs(do_div((*(u64*)&timeout), HZ)); 405 rtv.tv_usec = jiffies_to_usecs(do_div((*(u64*)&timeout), HZ));
406 rtv.tv_sec = timeout; 406 rtv.tv_sec = timeout;
407 if (timeval_compare(&rtv, &tv) < 0) 407 if (timeval_compare(&rtv, &tv) >= 0)
408 rtv = tv; 408 rtv = tv;
409 if (copy_to_user(tvp, &rtv, sizeof(rtv))) { 409 if (copy_to_user(tvp, &rtv, sizeof(rtv))) {
410sticky: 410sticky:
@@ -471,7 +471,7 @@ asmlinkage long sys_pselect7(int n, fd_set __user *inp, fd_set __user *outp,
471 rts.tv_nsec = jiffies_to_usecs(do_div((*(u64*)&timeout), HZ)) * 471 rts.tv_nsec = jiffies_to_usecs(do_div((*(u64*)&timeout), HZ)) *
472 1000; 472 1000;
473 rts.tv_sec = timeout; 473 rts.tv_sec = timeout;
474 if (timespec_compare(&rts, &ts) < 0) 474 if (timespec_compare(&rts, &ts) >= 0)
475 rts = ts; 475 rts = ts;
476 if (copy_to_user(tsp, &rts, sizeof(rts))) { 476 if (copy_to_user(tsp, &rts, sizeof(rts))) {
477sticky: 477sticky:
@@ -775,7 +775,7 @@ asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds,
775 rts.tv_nsec = jiffies_to_usecs(do_div((*(u64*)&timeout), HZ)) * 775 rts.tv_nsec = jiffies_to_usecs(do_div((*(u64*)&timeout), HZ)) *
776 1000; 776 1000;
777 rts.tv_sec = timeout; 777 rts.tv_sec = timeout;
778 if (timespec_compare(&rts, &ts) < 0) 778 if (timespec_compare(&rts, &ts) >= 0)
779 rts = ts; 779 rts = ts;
780 if (copy_to_user(tsp, &rts, sizeof(rts))) { 780 if (copy_to_user(tsp, &rts, sizeof(rts))) {
781 sticky: 781 sticky:
diff --git a/include/asm-powerpc/pgalloc.h b/include/asm-powerpc/pgalloc.h
index 9f5b052784a5..a00ee002cd11 100644
--- a/include/asm-powerpc/pgalloc.h
+++ b/include/asm-powerpc/pgalloc.h
@@ -146,7 +146,7 @@ extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
146 pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \ 146 pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \
147 PMD_CACHE_NUM, PMD_TABLE_SIZE-1)) 147 PMD_CACHE_NUM, PMD_TABLE_SIZE-1))
148#ifndef CONFIG_PPC_64K_PAGES 148#ifndef CONFIG_PPC_64K_PAGES
149#define __pud_free_tlb(tlb, pmd) \ 149#define __pud_free_tlb(tlb, pud) \
150 pgtable_free_tlb(tlb, pgtable_free_cache(pud, \ 150 pgtable_free_tlb(tlb, pgtable_free_cache(pud, \
151 PUD_CACHE_NUM, PUD_TABLE_SIZE-1)) 151 PUD_CACHE_NUM, PUD_TABLE_SIZE-1))
152#endif /* CONFIG_PPC_64K_PAGES */ 152#endif /* CONFIG_PPC_64K_PAGES */
diff --git a/include/asm-s390/smp.h b/include/asm-s390/smp.h
index 9c6e9c300eb9..444dae5912e6 100644
--- a/include/asm-s390/smp.h
+++ b/include/asm-s390/smp.h
@@ -31,6 +31,7 @@ typedef struct
31 __u16 cpu; 31 __u16 cpu;
32} sigp_info; 32} sigp_info;
33 33
34extern void smp_setup_cpu_possible_map(void);
34extern int smp_call_function_on(void (*func) (void *info), void *info, 35extern int smp_call_function_on(void (*func) (void *info), void *info,
35 int nonatomic, int wait, int cpu); 36 int nonatomic, int wait, int cpu);
36#define NO_PROC_ID 0xFF /* No processor magic marker */ 37#define NO_PROC_ID 0xFF /* No processor magic marker */
@@ -104,6 +105,7 @@ smp_call_function_on(void (*func) (void *info), void *info,
104#define smp_cpu_not_running(cpu) 1 105#define smp_cpu_not_running(cpu) 1
105#define smp_get_cpu(cpu) ({ 0; }) 106#define smp_get_cpu(cpu) ({ 0; })
106#define smp_put_cpu(cpu) ({ 0; }) 107#define smp_put_cpu(cpu) ({ 0; })
108#define smp_setup_cpu_possible_map()
107#endif 109#endif
108 110
109#endif 111#endif
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index 4e90905f0e87..2d9d08f72f76 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -153,13 +153,11 @@ static int swsusp_swap_check(void) /* This is called before saving image */
153{ 153{
154 int i; 154 int i;
155 155
156 if (!swsusp_resume_device)
157 return -ENODEV;
158 spin_lock(&swap_lock); 156 spin_lock(&swap_lock);
159 for (i = 0; i < MAX_SWAPFILES; i++) { 157 for (i = 0; i < MAX_SWAPFILES; i++) {
160 if (!(swap_info[i].flags & SWP_WRITEOK)) 158 if (!(swap_info[i].flags & SWP_WRITEOK))
161 continue; 159 continue;
162 if (is_resume_device(swap_info + i)) { 160 if (!swsusp_resume_device || is_resume_device(swap_info + i)) {
163 spin_unlock(&swap_lock); 161 spin_unlock(&swap_lock);
164 root_swap = i; 162 root_swap = i;
165 return 0; 163 return 0;
diff --git a/kernel/sched.c b/kernel/sched.c
index 66d957227de9..12d291bf3379 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5058,7 +5058,18 @@ static void init_sched_build_groups(struct sched_group groups[], cpumask_t span,
5058#define MAX_DOMAIN_DISTANCE 32 5058#define MAX_DOMAIN_DISTANCE 32
5059 5059
5060static unsigned long long migration_cost[MAX_DOMAIN_DISTANCE] = 5060static unsigned long long migration_cost[MAX_DOMAIN_DISTANCE] =
5061 { [ 0 ... MAX_DOMAIN_DISTANCE-1 ] = -1LL }; 5061 { [ 0 ... MAX_DOMAIN_DISTANCE-1 ] =
5062/*
5063 * Architectures may override the migration cost and thus avoid
5064 * boot-time calibration. Unit is nanoseconds. Mostly useful for
5065 * virtualized hardware:
5066 */
5067#ifdef CONFIG_DEFAULT_MIGRATION_COST
5068 CONFIG_DEFAULT_MIGRATION_COST
5069#else
5070 -1LL
5071#endif
5072};
5062 5073
5063/* 5074/*
5064 * Allow override of migration cost - in units of microseconds. 5075 * Allow override of migration cost - in units of microseconds.
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 323fdcf128c4..bedfa4f09c80 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -808,6 +808,8 @@ static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
808 nodes_clear(*nodes); 808 nodes_clear(*nodes);
809 if (maxnode == 0 || !nmask) 809 if (maxnode == 0 || !nmask)
810 return 0; 810 return 0;
811 if (maxnode > PAGE_SIZE)
812 return -EINVAL;
811 813
812 nlongs = BITS_TO_LONGS(maxnode); 814 nlongs = BITS_TO_LONGS(maxnode);
813 if ((maxnode % BITS_PER_LONG) == 0) 815 if ((maxnode % BITS_PER_LONG) == 0)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 62c122528587..208812b25597 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1541,29 +1541,29 @@ static int __initdata node_load[MAX_NUMNODES];
1541 */ 1541 */
1542static int __init find_next_best_node(int node, nodemask_t *used_node_mask) 1542static int __init find_next_best_node(int node, nodemask_t *used_node_mask)
1543{ 1543{
1544 int i, n, val; 1544 int n, val;
1545 int min_val = INT_MAX; 1545 int min_val = INT_MAX;
1546 int best_node = -1; 1546 int best_node = -1;
1547 1547
1548 for_each_online_node(i) { 1548 /* Use the local node if we haven't already */
1549 cpumask_t tmp; 1549 if (!node_isset(node, *used_node_mask)) {
1550 node_set(node, *used_node_mask);
1551 return node;
1552 }
1550 1553
1551 /* Start from local node */ 1554 for_each_online_node(n) {
1552 n = (node+i) % num_online_nodes(); 1555 cpumask_t tmp;
1553 1556
1554 /* Don't want a node to appear more than once */ 1557 /* Don't want a node to appear more than once */
1555 if (node_isset(n, *used_node_mask)) 1558 if (node_isset(n, *used_node_mask))
1556 continue; 1559 continue;
1557 1560
1558 /* Use the local node if we haven't already */
1559 if (!node_isset(node, *used_node_mask)) {
1560 best_node = node;
1561 break;
1562 }
1563
1564 /* Use the distance array to find the distance */ 1561 /* Use the distance array to find the distance */
1565 val = node_distance(node, n); 1562 val = node_distance(node, n);
1566 1563
1564 /* Penalize nodes under us ("prefer the next node") */
1565 val += (n < node);
1566
1567 /* Give preference to headless and unused nodes */ 1567 /* Give preference to headless and unused nodes */
1568 tmp = node_to_cpumask(n); 1568 tmp = node_to_cpumask(n);
1569 if (!cpus_empty(tmp)) 1569 if (!cpus_empty(tmp))