aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/cpu-freq/user-guide.txt5
-rw-r--r--Documentation/infiniband/ipoib.txt2
-rw-r--r--MAINTAINERS6
-rw-r--r--arch/i386/kernel/cpu/cpufreq/Kconfig3
-rw-r--r--arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c3
-rw-r--r--arch/i386/kernel/cpu/cpufreq/longhaul.c221
-rw-r--r--arch/ia64/hp/sim/simscsi.c4
-rw-r--r--arch/ia64/kernel/efi.c6
-rw-r--r--arch/ia64/kernel/head.S2
-rw-r--r--arch/ia64/kernel/ia64_ksyms.c2
-rw-r--r--arch/ia64/kernel/pal.S18
-rw-r--r--arch/ia64/kernel/palinfo.c34
-rw-r--r--arch/ia64/lib/Makefile2
-rw-r--r--arch/ia64/mm/contig.c16
-rw-r--r--arch/ia64/mm/discontig.c68
-rw-r--r--arch/ia64/mm/init.c55
-rw-r--r--arch/ia64/mm/ioremap.c6
-rw-r--r--arch/ia64/sn/kernel/xpc_main.c2
-rw-r--r--arch/ia64/sn/pci/tioce_provider.c6
-rw-r--r--drivers/acpi/dock.c13
-rw-r--r--drivers/char/snsc.c7
-rw-r--r--drivers/cpufreq/cpufreq.c75
-rw-r--r--drivers/infiniband/core/cm.c4
-rw-r--r--drivers/infiniband/core/uverbs.h2
-rw-r--r--drivers/infiniband/core/uverbs_main.c8
-rw-r--r--drivers/infiniband/hw/mthca/mthca_allocator.c15
-rw-r--r--drivers/infiniband/ulp/ipoib/Kconfig3
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c19
-rw-r--r--drivers/net/myri10ge/myri10ge.c24
-rw-r--r--drivers/net/phy/phy.c8
-rw-r--r--drivers/net/s2io.c386
-rw-r--r--drivers/net/s2io.h10
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.c4
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.h10
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c16
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c7
-rw-r--r--drivers/pci/hotplug/acpiphp_core.c3
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c2
-rw-r--r--drivers/pci/pcie/portdrv_pci.c38
-rw-r--r--drivers/pci/quirks.c7
-rw-r--r--drivers/pci/search.c2
-rw-r--r--drivers/pnp/interface.c12
-rw-r--r--drivers/scsi/ahci.c10
-rw-r--r--include/asm-ia64/meminit.h7
-rw-r--r--include/asm-ia64/pal.h7
-rw-r--r--include/asm-ia64/sn/xpc.h4
-rw-r--r--include/asm-ia64/system.h2
-rw-r--r--include/linux/kobject.h2
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--lib/kobject_uevent.c4
50 files changed, 622 insertions, 551 deletions
diff --git a/Documentation/cpu-freq/user-guide.txt b/Documentation/cpu-freq/user-guide.txt
index 7fedc00c3d30..555c8cf3650a 100644
--- a/Documentation/cpu-freq/user-guide.txt
+++ b/Documentation/cpu-freq/user-guide.txt
@@ -153,10 +153,13 @@ scaling_governor, and by "echoing" the name of another
153 that some governors won't load - they only 153 that some governors won't load - they only
154 work on some specific architectures or 154 work on some specific architectures or
155 processors. 155 processors.
156scaling_min_freq and 156scaling_min_freq and
157scaling_max_freq show the current "policy limits" (in 157scaling_max_freq show the current "policy limits" (in
158 kHz). By echoing new values into these 158 kHz). By echoing new values into these
159 files, you can change these limits. 159 files, you can change these limits.
160 NOTE: when setting a policy you need to
161 first set scaling_max_freq, then
162 scaling_min_freq.
160 163
161 164
162If you have selected the "userspace" governor which allows you to 165If you have selected the "userspace" governor which allows you to
diff --git a/Documentation/infiniband/ipoib.txt b/Documentation/infiniband/ipoib.txt
index 187035560d7f..864ff3283780 100644
--- a/Documentation/infiniband/ipoib.txt
+++ b/Documentation/infiniband/ipoib.txt
@@ -51,8 +51,6 @@ Debugging Information
51 51
52References 52References
53 53
54 IETF IP over InfiniBand (ipoib) Working Group
55 http://ietf.org/html.charters/ipoib-charter.html
56 Transmission of IP over InfiniBand (IPoIB) (RFC 4391) 54 Transmission of IP over InfiniBand (IPoIB) (RFC 4391)
57 http://ietf.org/rfc/rfc4391.txt 55 http://ietf.org/rfc/rfc4391.txt
58 IP over InfiniBand (IPoIB) Architecture (RFC 4392) 56 IP over InfiniBand (IPoIB) Architecture (RFC 4392)
diff --git a/MAINTAINERS b/MAINTAINERS
index 32aa30d1504a..bac3f706f75e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -214,6 +214,12 @@ W: http://acpi.sourceforge.net/
214T: git kernel.org:/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6.git 214T: git kernel.org:/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6.git
215S: Maintained 215S: Maintained
216 216
217ACPI PCI HOTPLUG DRIVER
218P: Kristen Carlson Accardi
219M: kristen.c.accardi@intel.com
220L: pcihpd-discuss@lists.sourceforge.net
221S: Maintained
222
217AD1816 SOUND DRIVER 223AD1816 SOUND DRIVER
218P: Thorsten Knabe 224P: Thorsten Knabe
219M: Thorsten Knabe <linux@thorsten-knabe.de> 225M: Thorsten Knabe <linux@thorsten-knabe.de>
diff --git a/arch/i386/kernel/cpu/cpufreq/Kconfig b/arch/i386/kernel/cpu/cpufreq/Kconfig
index e44a4c6a4fe5..ccc1edff5c97 100644
--- a/arch/i386/kernel/cpu/cpufreq/Kconfig
+++ b/arch/i386/kernel/cpu/cpufreq/Kconfig
@@ -96,6 +96,7 @@ config X86_POWERNOW_K8_ACPI
96 96
97config X86_GX_SUSPMOD 97config X86_GX_SUSPMOD
98 tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation" 98 tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation"
99 depends on PCI
99 help 100 help
100 This add the CPUFreq driver for NatSemi Geode processors which 101 This add the CPUFreq driver for NatSemi Geode processors which
101 support suspend modulation. 102 support suspend modulation.
@@ -202,7 +203,7 @@ config X86_LONGRUN
202config X86_LONGHAUL 203config X86_LONGHAUL
203 tristate "VIA Cyrix III Longhaul" 204 tristate "VIA Cyrix III Longhaul"
204 select CPU_FREQ_TABLE 205 select CPU_FREQ_TABLE
205 depends on BROKEN 206 depends on ACPI_PROCESSOR
206 help 207 help
207 This adds the CPUFreq driver for VIA Samuel/CyrixIII, 208 This adds the CPUFreq driver for VIA Samuel/CyrixIII,
208 VIA Cyrix Samuel/C3, VIA Cyrix Ezra and VIA Cyrix Ezra-T 209 VIA Cyrix Samuel/C3, VIA Cyrix Ezra and VIA Cyrix Ezra-T
diff --git a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
index 567b39bea07e..efb41e81351c 100644
--- a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -384,8 +384,7 @@ static int acpi_cpufreq_early_init_acpi(void)
384 } 384 }
385 385
386 /* Do initialization in ACPI core */ 386 /* Do initialization in ACPI core */
387 acpi_processor_preregister_performance(acpi_perf_data); 387 return acpi_processor_preregister_performance(acpi_perf_data);
388 return 0;
389} 388}
390 389
391static int 390static int
diff --git a/arch/i386/kernel/cpu/cpufreq/longhaul.c b/arch/i386/kernel/cpu/cpufreq/longhaul.c
index 146f607e9c44..4f2c3aeef724 100644
--- a/arch/i386/kernel/cpu/cpufreq/longhaul.c
+++ b/arch/i386/kernel/cpu/cpufreq/longhaul.c
@@ -29,11 +29,13 @@
29#include <linux/cpufreq.h> 29#include <linux/cpufreq.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/string.h> 31#include <linux/string.h>
32#include <linux/pci.h>
33 32
34#include <asm/msr.h> 33#include <asm/msr.h>
35#include <asm/timex.h> 34#include <asm/timex.h>
36#include <asm/io.h> 35#include <asm/io.h>
36#include <asm/acpi.h>
37#include <linux/acpi.h>
38#include <acpi/processor.h>
37 39
38#include "longhaul.h" 40#include "longhaul.h"
39 41
@@ -56,6 +58,8 @@ static int minvid, maxvid;
56static unsigned int minmult, maxmult; 58static unsigned int minmult, maxmult;
57static int can_scale_voltage; 59static int can_scale_voltage;
58static int vrmrev; 60static int vrmrev;
61static struct acpi_processor *pr = NULL;
62static struct acpi_processor_cx *cx = NULL;
59 63
60/* Module parameters */ 64/* Module parameters */
61static int dont_scale_voltage; 65static int dont_scale_voltage;
@@ -118,84 +122,65 @@ static int longhaul_get_cpu_mult(void)
118 return eblcr_table[invalue]; 122 return eblcr_table[invalue];
119} 123}
120 124
125/* For processor with BCR2 MSR */
121 126
122static void do_powersaver(union msr_longhaul *longhaul, 127static void do_longhaul1(int cx_address, unsigned int clock_ratio_index)
123 unsigned int clock_ratio_index)
124{ 128{
125 struct pci_dev *dev; 129 union msr_bcr2 bcr2;
126 unsigned long flags; 130 u32 t;
127 unsigned int tmp_mask;
128 int version;
129 int i;
130 u16 pci_cmd;
131 u16 cmd_state[64];
132 131
133 switch (cpu_model) { 132 rdmsrl(MSR_VIA_BCR2, bcr2.val);
134 case CPU_EZRA_T: 133 /* Enable software clock multiplier */
135 version = 3; 134 bcr2.bits.ESOFTBF = 1;
136 break; 135 bcr2.bits.CLOCKMUL = clock_ratio_index;
137 case CPU_NEHEMIAH:
138 version = 0xf;
139 break;
140 default:
141 return;
142 }
143 136
144 rdmsrl(MSR_VIA_LONGHAUL, longhaul->val); 137 /* Sync to timer tick */
145 longhaul->bits.SoftBusRatio = clock_ratio_index & 0xf; 138 safe_halt();
146 longhaul->bits.SoftBusRatio4 = (clock_ratio_index & 0x10) >> 4; 139 ACPI_FLUSH_CPU_CACHE();
147 longhaul->bits.EnableSoftBusRatio = 1; 140 /* Change frequency on next halt or sleep */
148 longhaul->bits.RevisionKey = 0; 141 wrmsrl(MSR_VIA_BCR2, bcr2.val);
142 /* Invoke C3 */
143 inb(cx_address);
144 /* Dummy op - must do something useless after P_LVL3 read */
145 t = inl(acpi_fadt.xpm_tmr_blk.address);
146
147 /* Disable software clock multiplier */
148 local_irq_disable();
149 rdmsrl(MSR_VIA_BCR2, bcr2.val);
150 bcr2.bits.ESOFTBF = 0;
151 wrmsrl(MSR_VIA_BCR2, bcr2.val);
152}
149 153
150 preempt_disable(); 154/* For processor with Longhaul MSR */
151 local_irq_save(flags);
152 155
153 /* 156static void do_powersaver(int cx_address, unsigned int clock_ratio_index)
154 * get current pci bus master state for all devices 157{
155 * and clear bus master bit 158 union msr_longhaul longhaul;
156 */ 159 u32 t;
157 dev = NULL;
158 i = 0;
159 do {
160 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
161 if (dev != NULL) {
162 pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
163 cmd_state[i++] = pci_cmd;
164 pci_cmd &= ~PCI_COMMAND_MASTER;
165 pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
166 }
167 } while (dev != NULL);
168 160
169 tmp_mask=inb(0x21); /* works on C3. save mask. */ 161 rdmsrl(MSR_VIA_LONGHAUL, longhaul.val);
170 outb(0xFE,0x21); /* TMR0 only */ 162 longhaul.bits.RevisionKey = longhaul.bits.RevisionID;
171 outb(0xFF,0x80); /* delay */ 163 longhaul.bits.SoftBusRatio = clock_ratio_index & 0xf;
164 longhaul.bits.SoftBusRatio4 = (clock_ratio_index & 0x10) >> 4;
165 longhaul.bits.EnableSoftBusRatio = 1;
172 166
167 /* Sync to timer tick */
173 safe_halt(); 168 safe_halt();
174 wrmsrl(MSR_VIA_LONGHAUL, longhaul->val); 169 ACPI_FLUSH_CPU_CACHE();
175 halt(); 170 /* Change frequency on next halt or sleep */
176 171 wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
172 /* Invoke C3 */
173 inb(cx_address);
174 /* Dummy op - must do something useless after P_LVL3 read */
175 t = inl(acpi_fadt.xpm_tmr_blk.address);
176
177 /* Disable bus ratio bit */
177 local_irq_disable(); 178 local_irq_disable();
178 179 longhaul.bits.RevisionKey = longhaul.bits.RevisionID;
179 outb(tmp_mask,0x21); /* restore mask */ 180 longhaul.bits.EnableSoftBusRatio = 0;
180 181 longhaul.bits.EnableSoftBSEL = 0;
181 /* restore pci bus master state for all devices */ 182 longhaul.bits.EnableSoftVID = 0;
182 dev = NULL; 183 wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
183 i = 0;
184 do {
185 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
186 if (dev != NULL) {
187 pci_cmd = cmd_state[i++];
188 pci_write_config_byte(dev, PCI_COMMAND, pci_cmd);
189 }
190 } while (dev != NULL);
191 local_irq_restore(flags);
192 preempt_enable();
193
194 /* disable bus ratio bit */
195 rdmsrl(MSR_VIA_LONGHAUL, longhaul->val);
196 longhaul->bits.EnableSoftBusRatio = 0;
197 longhaul->bits.RevisionKey = version;
198 wrmsrl(MSR_VIA_LONGHAUL, longhaul->val);
199} 184}
200 185
201/** 186/**
@@ -209,9 +194,9 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
209{ 194{
210 int speed, mult; 195 int speed, mult;
211 struct cpufreq_freqs freqs; 196 struct cpufreq_freqs freqs;
212 union msr_longhaul longhaul;
213 union msr_bcr2 bcr2;
214 static unsigned int old_ratio=-1; 197 static unsigned int old_ratio=-1;
198 unsigned long flags;
199 unsigned int pic1_mask, pic2_mask;
215 200
216 if (old_ratio == clock_ratio_index) 201 if (old_ratio == clock_ratio_index)
217 return; 202 return;
@@ -234,6 +219,20 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
234 dprintk ("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n", 219 dprintk ("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n",
235 fsb, mult/10, mult%10, print_speed(speed/1000)); 220 fsb, mult/10, mult%10, print_speed(speed/1000));
236 221
222 preempt_disable();
223 local_irq_save(flags);
224
225 pic2_mask = inb(0xA1);
226 pic1_mask = inb(0x21); /* works on C3. save mask. */
227 outb(0xFF,0xA1); /* Overkill */
228 outb(0xFE,0x21); /* TMR0 only */
229
230 /* Disable bus master arbitration */
231 if (pr->flags.bm_check) {
232 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1,
233 ACPI_MTX_DO_NOT_LOCK);
234 }
235
237 switch (longhaul_version) { 236 switch (longhaul_version) {
238 237
239 /* 238 /*
@@ -245,20 +244,7 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
245 */ 244 */
246 case TYPE_LONGHAUL_V1: 245 case TYPE_LONGHAUL_V1:
247 case TYPE_LONGHAUL_V2: 246 case TYPE_LONGHAUL_V2:
248 rdmsrl (MSR_VIA_BCR2, bcr2.val); 247 do_longhaul1(cx->address, clock_ratio_index);
249 /* Enable software clock multiplier */
250 bcr2.bits.ESOFTBF = 1;
251 bcr2.bits.CLOCKMUL = clock_ratio_index;
252 local_irq_disable();
253 wrmsrl (MSR_VIA_BCR2, bcr2.val);
254 safe_halt();
255
256 /* Disable software clock multiplier */
257 rdmsrl (MSR_VIA_BCR2, bcr2.val);
258 bcr2.bits.ESOFTBF = 0;
259 local_irq_disable();
260 wrmsrl (MSR_VIA_BCR2, bcr2.val);
261 local_irq_enable();
262 break; 248 break;
263 249
264 /* 250 /*
@@ -273,10 +259,22 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
273 * to work in practice. 259 * to work in practice.
274 */ 260 */
275 case TYPE_POWERSAVER: 261 case TYPE_POWERSAVER:
276 do_powersaver(&longhaul, clock_ratio_index); 262 do_powersaver(cx->address, clock_ratio_index);
277 break; 263 break;
278 } 264 }
279 265
266 /* Enable bus master arbitration */
267 if (pr->flags.bm_check) {
268 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0,
269 ACPI_MTX_DO_NOT_LOCK);
270 }
271
272 outb(pic2_mask,0xA1); /* restore mask */
273 outb(pic1_mask,0x21);
274
275 local_irq_restore(flags);
276 preempt_enable();
277
280 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 278 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
281} 279}
282 280
@@ -324,9 +322,11 @@ static int guess_fsb(void)
324static int __init longhaul_get_ranges(void) 322static int __init longhaul_get_ranges(void)
325{ 323{
326 unsigned long invalue; 324 unsigned long invalue;
327 unsigned int multipliers[32]= { 325 unsigned int ezra_t_multipliers[32]= {
328 50,30,40,100,55,35,45,95,90,70,80,60,120,75,85,65, 326 90, 30, 40, 100, 55, 35, 45, 95,
329 -1,110,120,-1,135,115,125,105,130,150,160,140,-1,155,-1,145 }; 327 50, 70, 80, 60, 120, 75, 85, 65,
328 -1, 110, 120, -1, 135, 115, 125, 105,
329 130, 150, 160, 140, -1, 155, -1, 145 };
330 unsigned int j, k = 0; 330 unsigned int j, k = 0;
331 union msr_longhaul longhaul; 331 union msr_longhaul longhaul;
332 unsigned long lo, hi; 332 unsigned long lo, hi;
@@ -355,13 +355,13 @@ static int __init longhaul_get_ranges(void)
355 invalue = longhaul.bits.MaxMHzBR; 355 invalue = longhaul.bits.MaxMHzBR;
356 if (longhaul.bits.MaxMHzBR4) 356 if (longhaul.bits.MaxMHzBR4)
357 invalue += 16; 357 invalue += 16;
358 maxmult=multipliers[invalue]; 358 maxmult=ezra_t_multipliers[invalue];
359 359
360 invalue = longhaul.bits.MinMHzBR; 360 invalue = longhaul.bits.MinMHzBR;
361 if (longhaul.bits.MinMHzBR4 == 1) 361 if (longhaul.bits.MinMHzBR4 == 1)
362 minmult = 30; 362 minmult = 30;
363 else 363 else
364 minmult = multipliers[invalue]; 364 minmult = ezra_t_multipliers[invalue];
365 fsb = eblcr_fsb_table_v2[longhaul.bits.MaxMHzFSB]; 365 fsb = eblcr_fsb_table_v2[longhaul.bits.MaxMHzFSB];
366 break; 366 break;
367 } 367 }
@@ -527,6 +527,18 @@ static unsigned int longhaul_get(unsigned int cpu)
527 return calc_speed(longhaul_get_cpu_mult()); 527 return calc_speed(longhaul_get_cpu_mult());
528} 528}
529 529
530static acpi_status longhaul_walk_callback(acpi_handle obj_handle,
531 u32 nesting_level,
532 void *context, void **return_value)
533{
534 struct acpi_device *d;
535
536 if ( acpi_bus_get_device(obj_handle, &d) ) {
537 return 0;
538 }
539 *return_value = (void *)acpi_driver_data(d);
540 return 1;
541}
530 542
531static int __init longhaul_cpu_init(struct cpufreq_policy *policy) 543static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
532{ 544{
@@ -534,6 +546,15 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
534 char *cpuname=NULL; 546 char *cpuname=NULL;
535 int ret; 547 int ret;
536 548
549 /* Check ACPI support for C3 state */
550 acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
551 &longhaul_walk_callback, NULL, (void *)&pr);
552 if (pr == NULL) goto err_acpi;
553
554 cx = &pr->power.states[ACPI_STATE_C3];
555 if (cx->address == 0 || cx->latency > 1000) goto err_acpi;
556
557 /* Now check what we have on this motherboard */
537 switch (c->x86_model) { 558 switch (c->x86_model) {
538 case 6: 559 case 6:
539 cpu_model = CPU_SAMUEL; 560 cpu_model = CPU_SAMUEL;
@@ -634,6 +655,10 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
634 cpufreq_frequency_table_get_attr(longhaul_table, policy->cpu); 655 cpufreq_frequency_table_get_attr(longhaul_table, policy->cpu);
635 656
636 return 0; 657 return 0;
658
659err_acpi:
660 printk(KERN_ERR PFX "No ACPI support for CPU frequency changes.\n");
661 return -ENODEV;
637} 662}
638 663
639static int __devexit longhaul_cpu_exit(struct cpufreq_policy *policy) 664static int __devexit longhaul_cpu_exit(struct cpufreq_policy *policy)
@@ -666,6 +691,18 @@ static int __init longhaul_init(void)
666 if (c->x86_vendor != X86_VENDOR_CENTAUR || c->x86 != 6) 691 if (c->x86_vendor != X86_VENDOR_CENTAUR || c->x86 != 6)
667 return -ENODEV; 692 return -ENODEV;
668 693
694#ifdef CONFIG_SMP
695 if (num_online_cpus() > 1) {
696 return -ENODEV;
697 printk(KERN_ERR PFX "More than 1 CPU detected, longhaul disabled.\n");
698 }
699#endif
700#ifdef CONFIG_X86_IO_APIC
701 if (cpu_has_apic) {
702 printk(KERN_ERR PFX "APIC detected. Longhaul is currently broken in this configuration.\n");
703 return -ENODEV;
704 }
705#endif
669 switch (c->x86_model) { 706 switch (c->x86_model) {
670 case 6 ... 9: 707 case 6 ... 9:
671 return cpufreq_register_driver(&longhaul_driver); 708 return cpufreq_register_driver(&longhaul_driver);
@@ -699,6 +736,6 @@ MODULE_AUTHOR ("Dave Jones <davej@codemonkey.org.uk>");
699MODULE_DESCRIPTION ("Longhaul driver for VIA Cyrix processors."); 736MODULE_DESCRIPTION ("Longhaul driver for VIA Cyrix processors.");
700MODULE_LICENSE ("GPL"); 737MODULE_LICENSE ("GPL");
701 738
702module_init(longhaul_init); 739late_initcall(longhaul_init);
703module_exit(longhaul_exit); 740module_exit(longhaul_exit);
704 741
diff --git a/arch/ia64/hp/sim/simscsi.c b/arch/ia64/hp/sim/simscsi.c
index a3fe97531134..8a4f0d0d17a3 100644
--- a/arch/ia64/hp/sim/simscsi.c
+++ b/arch/ia64/hp/sim/simscsi.c
@@ -151,7 +151,7 @@ static void
151simscsi_sg_readwrite (struct scsi_cmnd *sc, int mode, unsigned long offset) 151simscsi_sg_readwrite (struct scsi_cmnd *sc, int mode, unsigned long offset)
152{ 152{
153 int list_len = sc->use_sg; 153 int list_len = sc->use_sg;
154 struct scatterlist *sl = (struct scatterlist *)sc->buffer; 154 struct scatterlist *sl = (struct scatterlist *)sc->request_buffer;
155 struct disk_stat stat; 155 struct disk_stat stat;
156 struct disk_req req; 156 struct disk_req req;
157 157
@@ -244,7 +244,7 @@ static void simscsi_fillresult(struct scsi_cmnd *sc, char *buf, unsigned len)
244 244
245 if (scatterlen == 0) 245 if (scatterlen == 0)
246 memcpy(sc->request_buffer, buf, len); 246 memcpy(sc->request_buffer, buf, len);
247 else for (slp = (struct scatterlist *)sc->buffer; scatterlen-- > 0 && len > 0; slp++) { 247 else for (slp = (struct scatterlist *)sc->request_buffer; scatterlen-- > 0 && len > 0; slp++) {
248 unsigned thislen = min(len, slp->length); 248 unsigned thislen = min(len, slp->length);
249 249
250 memcpy(page_address(slp->page) + slp->offset, buf, thislen); 250 memcpy(page_address(slp->page) + slp->offset, buf, thislen);
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index e4bfa9dafbce..bb8770a177b5 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -632,7 +632,7 @@ kern_memory_descriptor (unsigned long phys_addr)
632 if (phys_addr - md->start < (md->num_pages << EFI_PAGE_SHIFT)) 632 if (phys_addr - md->start < (md->num_pages << EFI_PAGE_SHIFT))
633 return md; 633 return md;
634 } 634 }
635 return 0; 635 return NULL;
636} 636}
637 637
638static efi_memory_desc_t * 638static efi_memory_desc_t *
@@ -652,7 +652,7 @@ efi_memory_descriptor (unsigned long phys_addr)
652 if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT)) 652 if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT))
653 return md; 653 return md;
654 } 654 }
655 return 0; 655 return NULL;
656} 656}
657 657
658u32 658u32
@@ -923,7 +923,7 @@ find_memmap_space (void)
923void 923void
924efi_memmap_init(unsigned long *s, unsigned long *e) 924efi_memmap_init(unsigned long *s, unsigned long *e)
925{ 925{
926 struct kern_memdesc *k, *prev = 0; 926 struct kern_memdesc *k, *prev = NULL;
927 u64 contig_low=0, contig_high=0; 927 u64 contig_low=0, contig_high=0;
928 u64 as, ae, lim; 928 u64 as, ae, lim;
929 void *efi_map_start, *efi_map_end, *p, *q; 929 void *efi_map_start, *efi_map_end, *p, *q;
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index 561b8f1d3bc7..29236f0c62b5 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -853,7 +853,6 @@ END(__ia64_init_fpu)
853 */ 853 */
854GLOBAL_ENTRY(ia64_switch_mode_phys) 854GLOBAL_ENTRY(ia64_switch_mode_phys)
855 { 855 {
856 alloc r2=ar.pfs,0,0,0,0
857 rsm psr.i | psr.ic // disable interrupts and interrupt collection 856 rsm psr.i | psr.ic // disable interrupts and interrupt collection
858 mov r15=ip 857 mov r15=ip
859 } 858 }
@@ -902,7 +901,6 @@ END(ia64_switch_mode_phys)
902 */ 901 */
903GLOBAL_ENTRY(ia64_switch_mode_virt) 902GLOBAL_ENTRY(ia64_switch_mode_virt)
904 { 903 {
905 alloc r2=ar.pfs,0,0,0,0
906 rsm psr.i | psr.ic // disable interrupts and interrupt collection 904 rsm psr.i | psr.ic // disable interrupts and interrupt collection
907 mov r15=ip 905 mov r15=ip
908 } 906 }
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
index b7cf651ceb14..3ead20fb6f4b 100644
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -62,7 +62,7 @@ EXPORT_SYMBOL(__udivdi3);
62EXPORT_SYMBOL(__moddi3); 62EXPORT_SYMBOL(__moddi3);
63EXPORT_SYMBOL(__umoddi3); 63EXPORT_SYMBOL(__umoddi3);
64 64
65#if defined(CONFIG_MD_RAID5) || defined(CONFIG_MD_RAID5_MODULE) 65#if defined(CONFIG_MD_RAID456) || defined(CONFIG_MD_RAID456_MODULE)
66extern void xor_ia64_2(void); 66extern void xor_ia64_2(void);
67extern void xor_ia64_3(void); 67extern void xor_ia64_3(void);
68extern void xor_ia64_4(void); 68extern void xor_ia64_4(void);
diff --git a/arch/ia64/kernel/pal.S b/arch/ia64/kernel/pal.S
index 5018c7f2e7a8..ebaf1e685f5e 100644
--- a/arch/ia64/kernel/pal.S
+++ b/arch/ia64/kernel/pal.S
@@ -217,12 +217,7 @@ GLOBAL_ENTRY(ia64_pal_call_phys_stacked)
217 .body 217 .body
218 ;; 218 ;;
219 ld8 loc2 = [loc2] // loc2 <- entry point 219 ld8 loc2 = [loc2] // loc2 <- entry point
220 mov out0 = in0 // first argument 220 mov loc3 = psr // save psr
221 mov out1 = in1 // copy arg2
222 mov out2 = in2 // copy arg3
223 mov out3 = in3 // copy arg3
224 ;;
225 mov loc3 = psr // save psr
226 ;; 221 ;;
227 mov loc4=ar.rsc // save RSE configuration 222 mov loc4=ar.rsc // save RSE configuration
228 dep.z loc2=loc2,0,61 // convert pal entry point to physical 223 dep.z loc2=loc2,0,61 // convert pal entry point to physical
@@ -236,18 +231,23 @@ GLOBAL_ENTRY(ia64_pal_call_phys_stacked)
236 ;; 231 ;;
237 andcm r16=loc3,r16 // removes bits to clear from psr 232 andcm r16=loc3,r16 // removes bits to clear from psr
238 br.call.sptk.many rp=ia64_switch_mode_phys 233 br.call.sptk.many rp=ia64_switch_mode_phys
239.ret6: 234
235 mov out0 = in0 // first argument
236 mov out1 = in1 // copy arg2
237 mov out2 = in2 // copy arg3
238 mov out3 = in3 // copy arg3
240 mov loc5 = r19 239 mov loc5 = r19
241 mov loc6 = r20 240 mov loc6 = r20
241
242 br.call.sptk.many rp=b7 // now make the call 242 br.call.sptk.many rp=b7 // now make the call
243.ret7: 243
244 mov ar.rsc=0 // put RSE in enforced lazy, LE mode 244 mov ar.rsc=0 // put RSE in enforced lazy, LE mode
245 mov r16=loc3 // r16= original psr 245 mov r16=loc3 // r16= original psr
246 mov r19=loc5 246 mov r19=loc5
247 mov r20=loc6 247 mov r20=loc6
248 br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode 248 br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
249 249
250.ret8: mov psr.l = loc3 // restore init PSR 250 mov psr.l = loc3 // restore init PSR
251 mov ar.pfs = loc1 251 mov ar.pfs = loc1
252 mov rp = loc0 252 mov rp = loc0
253 ;; 253 ;;
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index ab5b52413e91..0b546e2b36ac 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -566,29 +566,23 @@ version_info(char *page)
566 pal_version_u_t min_ver, cur_ver; 566 pal_version_u_t min_ver, cur_ver;
567 char *p = page; 567 char *p = page;
568 568
569 /* The PAL_VERSION call is advertised as being able to support 569 if (ia64_pal_version(&min_ver, &cur_ver) != 0)
570 * both physical and virtual mode calls. This seems to be a documentation 570 return 0;
571 * bug rather than firmware bug. In fact, it does only support physical mode.
572 * So now the code reflects this fact and the pal_version() has been updated
573 * accordingly.
574 */
575 if (ia64_pal_version(&min_ver, &cur_ver) != 0) return 0;
576 571
577 p += sprintf(p, 572 p += sprintf(p,
578 "PAL_vendor : 0x%02x (min=0x%02x)\n" 573 "PAL_vendor : 0x%02x (min=0x%02x)\n"
579 "PAL_A : %x.%x.%x (min=%x.%x.%x)\n" 574 "PAL_A : %02x.%02x (min=%02x.%02x)\n"
580 "PAL_B : %x.%x.%x (min=%x.%x.%x)\n", 575 "PAL_B : %02x.%02x (min=%02x.%02x)\n",
581 cur_ver.pal_version_s.pv_pal_vendor, min_ver.pal_version_s.pv_pal_vendor, 576 cur_ver.pal_version_s.pv_pal_vendor,
582 577 min_ver.pal_version_s.pv_pal_vendor,
583 cur_ver.pal_version_s.pv_pal_a_model>>4, 578 cur_ver.pal_version_s.pv_pal_a_model,
584 cur_ver.pal_version_s.pv_pal_a_model&0xf, cur_ver.pal_version_s.pv_pal_a_rev, 579 cur_ver.pal_version_s.pv_pal_a_rev,
585 min_ver.pal_version_s.pv_pal_a_model>>4, 580 min_ver.pal_version_s.pv_pal_a_model,
586 min_ver.pal_version_s.pv_pal_a_model&0xf, min_ver.pal_version_s.pv_pal_a_rev, 581 min_ver.pal_version_s.pv_pal_a_rev,
587 582 cur_ver.pal_version_s.pv_pal_b_model,
588 cur_ver.pal_version_s.pv_pal_b_model>>4, 583 cur_ver.pal_version_s.pv_pal_b_rev,
589 cur_ver.pal_version_s.pv_pal_b_model&0xf, cur_ver.pal_version_s.pv_pal_b_rev, 584 min_ver.pal_version_s.pv_pal_b_model,
590 min_ver.pal_version_s.pv_pal_b_model>>4, 585 min_ver.pal_version_s.pv_pal_b_rev);
591 min_ver.pal_version_s.pv_pal_b_model&0xf, min_ver.pal_version_s.pv_pal_b_rev);
592 return p - page; 586 return p - page;
593} 587}
594 588
diff --git a/arch/ia64/lib/Makefile b/arch/ia64/lib/Makefile
index d8536a2c22a9..38fa6e49e791 100644
--- a/arch/ia64/lib/Makefile
+++ b/arch/ia64/lib/Makefile
@@ -14,7 +14,7 @@ lib-y := __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \
14lib-$(CONFIG_ITANIUM) += copy_page.o copy_user.o memcpy.o 14lib-$(CONFIG_ITANIUM) += copy_page.o copy_user.o memcpy.o
15lib-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o 15lib-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o
16lib-$(CONFIG_PERFMON) += carta_random.o 16lib-$(CONFIG_PERFMON) += carta_random.o
17lib-$(CONFIG_MD_RAID5) += xor.o 17lib-$(CONFIG_MD_RAID456) += xor.o
18 18
19AFLAGS___divdi3.o = 19AFLAGS___divdi3.o =
20AFLAGS___udivdi3.o = -DUNSIGNED 20AFLAGS___udivdi3.o = -DUNSIGNED
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 2a88cdd6d924..e004143ba86b 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -27,6 +27,7 @@
27 27
28#ifdef CONFIG_VIRTUAL_MEM_MAP 28#ifdef CONFIG_VIRTUAL_MEM_MAP
29static unsigned long num_dma_physpages; 29static unsigned long num_dma_physpages;
30static unsigned long max_gap;
30#endif 31#endif
31 32
32/** 33/**
@@ -45,9 +46,15 @@ show_mem (void)
45 46
46 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); 47 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
47 i = max_mapnr; 48 i = max_mapnr;
48 while (i-- > 0) { 49 for (i = 0; i < max_mapnr; i++) {
49 if (!pfn_valid(i)) 50 if (!pfn_valid(i)) {
51#ifdef CONFIG_VIRTUAL_MEM_MAP
52 if (max_gap < LARGE_GAP)
53 continue;
54 i = vmemmap_find_next_valid_pfn(0, i) - 1;
55#endif
50 continue; 56 continue;
57 }
51 total++; 58 total++;
52 if (PageReserved(mem_map+i)) 59 if (PageReserved(mem_map+i))
53 reserved++; 60 reserved++;
@@ -234,7 +241,6 @@ paging_init (void)
234 unsigned long zones_size[MAX_NR_ZONES]; 241 unsigned long zones_size[MAX_NR_ZONES];
235#ifdef CONFIG_VIRTUAL_MEM_MAP 242#ifdef CONFIG_VIRTUAL_MEM_MAP
236 unsigned long zholes_size[MAX_NR_ZONES]; 243 unsigned long zholes_size[MAX_NR_ZONES];
237 unsigned long max_gap;
238#endif 244#endif
239 245
240 /* initialize mem_map[] */ 246 /* initialize mem_map[] */
@@ -266,7 +272,6 @@ paging_init (void)
266 } 272 }
267 } 273 }
268 274
269 max_gap = 0;
270 efi_memmap_walk(find_largest_hole, (u64 *)&max_gap); 275 efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
271 if (max_gap < LARGE_GAP) { 276 if (max_gap < LARGE_GAP) {
272 vmem_map = (struct page *) 0; 277 vmem_map = (struct page *) 0;
@@ -277,7 +282,8 @@ paging_init (void)
277 282
278 /* allocate virtual_mem_map */ 283 /* allocate virtual_mem_map */
279 284
280 map_size = PAGE_ALIGN(max_low_pfn * sizeof(struct page)); 285 map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
286 sizeof(struct page));
281 vmalloc_end -= map_size; 287 vmalloc_end -= map_size;
282 vmem_map = (struct page *) vmalloc_end; 288 vmem_map = (struct page *) vmalloc_end;
283 efi_memmap_walk(create_mem_map_page_table, NULL); 289 efi_memmap_walk(create_mem_map_page_table, NULL);
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 99bd9e30db96..d260bffa01ab 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -534,68 +534,6 @@ void __cpuinit *per_cpu_init(void)
534} 534}
535#endif /* CONFIG_SMP */ 535#endif /* CONFIG_SMP */
536 536
537#ifdef CONFIG_VIRTUAL_MEM_MAP
538static inline int find_next_valid_pfn_for_pgdat(pg_data_t *pgdat, int i)
539{
540 unsigned long end_address, hole_next_pfn;
541 unsigned long stop_address;
542
543 end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
544 end_address = PAGE_ALIGN(end_address);
545
546 stop_address = (unsigned long) &vmem_map[
547 pgdat->node_start_pfn + pgdat->node_spanned_pages];
548
549 do {
550 pgd_t *pgd;
551 pud_t *pud;
552 pmd_t *pmd;
553 pte_t *pte;
554
555 pgd = pgd_offset_k(end_address);
556 if (pgd_none(*pgd)) {
557 end_address += PGDIR_SIZE;
558 continue;
559 }
560
561 pud = pud_offset(pgd, end_address);
562 if (pud_none(*pud)) {
563 end_address += PUD_SIZE;
564 continue;
565 }
566
567 pmd = pmd_offset(pud, end_address);
568 if (pmd_none(*pmd)) {
569 end_address += PMD_SIZE;
570 continue;
571 }
572
573 pte = pte_offset_kernel(pmd, end_address);
574retry_pte:
575 if (pte_none(*pte)) {
576 end_address += PAGE_SIZE;
577 pte++;
578 if ((end_address < stop_address) &&
579 (end_address != ALIGN(end_address, 1UL << PMD_SHIFT)))
580 goto retry_pte;
581 continue;
582 }
583 /* Found next valid vmem_map page */
584 break;
585 } while (end_address < stop_address);
586
587 end_address = min(end_address, stop_address);
588 end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1;
589 hole_next_pfn = end_address / sizeof(struct page);
590 return hole_next_pfn - pgdat->node_start_pfn;
591}
592#else
593static inline int find_next_valid_pfn_for_pgdat(pg_data_t *pgdat, int i)
594{
595 return i + 1;
596}
597#endif
598
599/** 537/**
600 * show_mem - give short summary of memory stats 538 * show_mem - give short summary of memory stats
601 * 539 *
@@ -625,7 +563,8 @@ void show_mem(void)
625 if (pfn_valid(pgdat->node_start_pfn + i)) 563 if (pfn_valid(pgdat->node_start_pfn + i))
626 page = pfn_to_page(pgdat->node_start_pfn + i); 564 page = pfn_to_page(pgdat->node_start_pfn + i);
627 else { 565 else {
628 i = find_next_valid_pfn_for_pgdat(pgdat, i) - 1; 566 i = vmemmap_find_next_valid_pfn(pgdat->node_id,
567 i) - 1;
629 continue; 568 continue;
630 } 569 }
631 if (PageReserved(page)) 570 if (PageReserved(page))
@@ -751,7 +690,8 @@ void __init paging_init(void)
751 efi_memmap_walk(filter_rsvd_memory, count_node_pages); 690 efi_memmap_walk(filter_rsvd_memory, count_node_pages);
752 691
753#ifdef CONFIG_VIRTUAL_MEM_MAP 692#ifdef CONFIG_VIRTUAL_MEM_MAP
754 vmalloc_end -= PAGE_ALIGN(max_low_pfn * sizeof(struct page)); 693 vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
694 sizeof(struct page));
755 vmem_map = (struct page *) vmalloc_end; 695 vmem_map = (struct page *) vmalloc_end;
756 efi_memmap_walk(create_mem_map_page_table, NULL); 696 efi_memmap_walk(create_mem_map_page_table, NULL);
757 printk("Virtual mem_map starts at 0x%p\n", vmem_map); 697 printk("Virtual mem_map starts at 0x%p\n", vmem_map);
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 2f50c064513c..30617ccb4f7e 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -415,6 +415,61 @@ ia64_mmu_init (void *my_cpu_data)
415} 415}
416 416
417#ifdef CONFIG_VIRTUAL_MEM_MAP 417#ifdef CONFIG_VIRTUAL_MEM_MAP
418int vmemmap_find_next_valid_pfn(int node, int i)
419{
420 unsigned long end_address, hole_next_pfn;
421 unsigned long stop_address;
422 pg_data_t *pgdat = NODE_DATA(node);
423
424 end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
425 end_address = PAGE_ALIGN(end_address);
426
427 stop_address = (unsigned long) &vmem_map[
428 pgdat->node_start_pfn + pgdat->node_spanned_pages];
429
430 do {
431 pgd_t *pgd;
432 pud_t *pud;
433 pmd_t *pmd;
434 pte_t *pte;
435
436 pgd = pgd_offset_k(end_address);
437 if (pgd_none(*pgd)) {
438 end_address += PGDIR_SIZE;
439 continue;
440 }
441
442 pud = pud_offset(pgd, end_address);
443 if (pud_none(*pud)) {
444 end_address += PUD_SIZE;
445 continue;
446 }
447
448 pmd = pmd_offset(pud, end_address);
449 if (pmd_none(*pmd)) {
450 end_address += PMD_SIZE;
451 continue;
452 }
453
454 pte = pte_offset_kernel(pmd, end_address);
455retry_pte:
456 if (pte_none(*pte)) {
457 end_address += PAGE_SIZE;
458 pte++;
459 if ((end_address < stop_address) &&
460 (end_address != ALIGN(end_address, 1UL << PMD_SHIFT)))
461 goto retry_pte;
462 continue;
463 }
464 /* Found next valid vmem_map page */
465 break;
466 } while (end_address < stop_address);
467
468 end_address = min(end_address, stop_address);
469 end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1;
470 hole_next_pfn = end_address / sizeof(struct page);
471 return hole_next_pfn - pgdat->node_start_pfn;
472}
418 473
419int __init 474int __init
420create_mem_map_page_table (u64 start, u64 end, void *arg) 475create_mem_map_page_table (u64 start, u64 end, void *arg)
diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c
index 07bd02b6c372..4280c074d64e 100644
--- a/arch/ia64/mm/ioremap.c
+++ b/arch/ia64/mm/ioremap.c
@@ -32,7 +32,7 @@ ioremap (unsigned long offset, unsigned long size)
32 */ 32 */
33 attr = kern_mem_attribute(offset, size); 33 attr = kern_mem_attribute(offset, size);
34 if (attr & EFI_MEMORY_WB) 34 if (attr & EFI_MEMORY_WB)
35 return phys_to_virt(offset); 35 return (void __iomem *) phys_to_virt(offset);
36 else if (attr & EFI_MEMORY_UC) 36 else if (attr & EFI_MEMORY_UC)
37 return __ioremap(offset, size); 37 return __ioremap(offset, size);
38 38
@@ -43,7 +43,7 @@ ioremap (unsigned long offset, unsigned long size)
43 gran_base = GRANULEROUNDDOWN(offset); 43 gran_base = GRANULEROUNDDOWN(offset);
44 gran_size = GRANULEROUNDUP(offset + size) - gran_base; 44 gran_size = GRANULEROUNDUP(offset + size) - gran_base;
45 if (efi_mem_attribute(gran_base, gran_size) & EFI_MEMORY_WB) 45 if (efi_mem_attribute(gran_base, gran_size) & EFI_MEMORY_WB)
46 return phys_to_virt(offset); 46 return (void __iomem *) phys_to_virt(offset);
47 47
48 return __ioremap(offset, size); 48 return __ioremap(offset, size);
49} 49}
@@ -53,7 +53,7 @@ void __iomem *
53ioremap_nocache (unsigned long offset, unsigned long size) 53ioremap_nocache (unsigned long offset, unsigned long size)
54{ 54{
55 if (kern_mem_attribute(offset, size) & EFI_MEMORY_WB) 55 if (kern_mem_attribute(offset, size) & EFI_MEMORY_WB)
56 return 0; 56 return NULL;
57 57
58 return __ioremap(offset, size); 58 return __ioremap(offset, size);
59} 59}
diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c
index 99b123a6421a..5e8e59efb347 100644
--- a/arch/ia64/sn/kernel/xpc_main.c
+++ b/arch/ia64/sn/kernel/xpc_main.c
@@ -480,7 +480,7 @@ xpc_activating(void *__partid)
480 partid_t partid = (u64) __partid; 480 partid_t partid = (u64) __partid;
481 struct xpc_partition *part = &xpc_partitions[partid]; 481 struct xpc_partition *part = &xpc_partitions[partid];
482 unsigned long irq_flags; 482 unsigned long irq_flags;
483 struct sched_param param = { sched_priority: MAX_RT_PRIO - 1 }; 483 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
484 int ret; 484 int ret;
485 485
486 486
diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c
index 17cd34284886..af7171adcd2c 100644
--- a/arch/ia64/sn/pci/tioce_provider.c
+++ b/arch/ia64/sn/pci/tioce_provider.c
@@ -74,7 +74,7 @@ tioce_mmr_war_pre(struct tioce_kernel *kern, void *mmr_addr)
74 else 74 else
75 mmr_war_offset = 0x158; 75 mmr_war_offset = 0x158;
76 76
77 readq_relaxed((void *)(mmr_base + mmr_war_offset)); 77 readq_relaxed((void __iomem *)(mmr_base + mmr_war_offset));
78 } 78 }
79} 79}
80 80
@@ -92,8 +92,8 @@ tioce_mmr_war_post(struct tioce_kernel *kern, void *mmr_addr)
92 92
93 if (mmr_offset < 0x45000) { 93 if (mmr_offset < 0x45000) {
94 if (mmr_offset == 0x100) 94 if (mmr_offset == 0x100)
95 readq_relaxed((void *)(mmr_base + 0x38)); 95 readq_relaxed((void __iomem *)(mmr_base + 0x38));
96 readq_relaxed((void *)(mmr_base + 0xb050)); 96 readq_relaxed((void __iomem *)(mmr_base + 0xb050));
97 } 97 }
98} 98}
99 99
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 1c0a39d8b04e..578b99b71d9c 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -58,8 +58,8 @@ struct dock_dependent_device {
58}; 58};
59 59
60#define DOCK_DOCKING 0x00000001 60#define DOCK_DOCKING 0x00000001
61#define DOCK_EVENT KOBJ_DOCK 61#define DOCK_EVENT 3
62#define UNDOCK_EVENT KOBJ_UNDOCK 62#define UNDOCK_EVENT 2
63 63
64static struct dock_station *dock_station; 64static struct dock_station *dock_station;
65 65
@@ -322,11 +322,10 @@ static void hotplug_dock_devices(struct dock_station *ds, u32 event)
322 322
323static void dock_event(struct dock_station *ds, u32 event, int num) 323static void dock_event(struct dock_station *ds, u32 event, int num)
324{ 324{
325 struct acpi_device *device; 325 /*
326 326 * we don't do events until someone tells me that
327 device = dock_create_acpi_device(ds->handle); 327 * they would like to have them.
328 if (device) 328 */
329 kobject_uevent(&device->kobj, num);
330} 329}
331 330
332/** 331/**
diff --git a/drivers/char/snsc.c b/drivers/char/snsc.c
index afc6eda602f7..07e0b75f2338 100644
--- a/drivers/char/snsc.c
+++ b/drivers/char/snsc.c
@@ -374,7 +374,12 @@ scdrv_init(void)
374 struct sysctl_data_s *scd; 374 struct sysctl_data_s *scd;
375 void *salbuf; 375 void *salbuf;
376 dev_t first_dev, dev; 376 dev_t first_dev, dev;
377 nasid_t event_nasid = ia64_sn_get_console_nasid(); 377 nasid_t event_nasid;
378
379 if (!ia64_platform_is("sn2"))
380 return -ENODEV;
381
382 event_nasid = ia64_sn_get_console_nasid();
378 383
379 if (alloc_chrdev_region(&first_dev, 0, num_cnodes, 384 if (alloc_chrdev_region(&first_dev, 0, num_cnodes,
380 SYSCTL_BASENAME) < 0) { 385 SYSCTL_BASENAME) < 0) {
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index bc1088d9b379..b3df613ae4ec 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -284,39 +284,69 @@ EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
284 * SYSFS INTERFACE * 284 * SYSFS INTERFACE *
285 *********************************************************************/ 285 *********************************************************************/
286 286
287static struct cpufreq_governor *__find_governor(const char *str_governor)
288{
289 struct cpufreq_governor *t;
290
291 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
292 if (!strnicmp(str_governor,t->name,CPUFREQ_NAME_LEN))
293 return t;
294
295 return NULL;
296}
297
287/** 298/**
288 * cpufreq_parse_governor - parse a governor string 299 * cpufreq_parse_governor - parse a governor string
289 */ 300 */
290static int cpufreq_parse_governor (char *str_governor, unsigned int *policy, 301static int cpufreq_parse_governor (char *str_governor, unsigned int *policy,
291 struct cpufreq_governor **governor) 302 struct cpufreq_governor **governor)
292{ 303{
304 int err = -EINVAL;
305
293 if (!cpufreq_driver) 306 if (!cpufreq_driver)
294 return -EINVAL; 307 goto out;
308
295 if (cpufreq_driver->setpolicy) { 309 if (cpufreq_driver->setpolicy) {
296 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) { 310 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
297 *policy = CPUFREQ_POLICY_PERFORMANCE; 311 *policy = CPUFREQ_POLICY_PERFORMANCE;
298 return 0; 312 err = 0;
299 } else if (!strnicmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) { 313 } else if (!strnicmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) {
300 *policy = CPUFREQ_POLICY_POWERSAVE; 314 *policy = CPUFREQ_POLICY_POWERSAVE;
301 return 0; 315 err = 0;
302 } 316 }
303 return -EINVAL; 317 } else if (cpufreq_driver->target) {
304 } else {
305 struct cpufreq_governor *t; 318 struct cpufreq_governor *t;
319
306 mutex_lock(&cpufreq_governor_mutex); 320 mutex_lock(&cpufreq_governor_mutex);
307 if (!cpufreq_driver || !cpufreq_driver->target) 321
308 goto out; 322 t = __find_governor(str_governor);
309 list_for_each_entry(t, &cpufreq_governor_list, governor_list) { 323
310 if (!strnicmp(str_governor,t->name,CPUFREQ_NAME_LEN)) { 324 if (t == NULL) {
311 *governor = t; 325 char *name = kasprintf(GFP_KERNEL, "cpufreq_%s", str_governor);
326
327 if (name) {
328 int ret;
329
312 mutex_unlock(&cpufreq_governor_mutex); 330 mutex_unlock(&cpufreq_governor_mutex);
313 return 0; 331 ret = request_module(name);
332 mutex_lock(&cpufreq_governor_mutex);
333
334 if (ret == 0)
335 t = __find_governor(str_governor);
314 } 336 }
337
338 kfree(name);
315 } 339 }
316out: 340
341 if (t != NULL) {
342 *governor = t;
343 err = 0;
344 }
345
317 mutex_unlock(&cpufreq_governor_mutex); 346 mutex_unlock(&cpufreq_governor_mutex);
318 } 347 }
319 return -EINVAL; 348 out:
349 return err;
320} 350}
321 351
322 352
@@ -1265,23 +1295,21 @@ static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event)
1265 1295
1266int cpufreq_register_governor(struct cpufreq_governor *governor) 1296int cpufreq_register_governor(struct cpufreq_governor *governor)
1267{ 1297{
1268 struct cpufreq_governor *t; 1298 int err;
1269 1299
1270 if (!governor) 1300 if (!governor)
1271 return -EINVAL; 1301 return -EINVAL;
1272 1302
1273 mutex_lock(&cpufreq_governor_mutex); 1303 mutex_lock(&cpufreq_governor_mutex);
1274 1304
1275 list_for_each_entry(t, &cpufreq_governor_list, governor_list) { 1305 err = -EBUSY;
1276 if (!strnicmp(governor->name,t->name,CPUFREQ_NAME_LEN)) { 1306 if (__find_governor(governor->name) == NULL) {
1277 mutex_unlock(&cpufreq_governor_mutex); 1307 err = 0;
1278 return -EBUSY; 1308 list_add(&governor->governor_list, &cpufreq_governor_list);
1279 }
1280 } 1309 }
1281 list_add(&governor->governor_list, &cpufreq_governor_list);
1282 1310
1283 mutex_unlock(&cpufreq_governor_mutex); 1311 mutex_unlock(&cpufreq_governor_mutex);
1284 return 0; 1312 return err;
1285} 1313}
1286EXPORT_SYMBOL_GPL(cpufreq_register_governor); 1314EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1287 1315
@@ -1343,6 +1371,11 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_poli
1343 1371
1344 memcpy(&policy->cpuinfo, &data->cpuinfo, sizeof(struct cpufreq_cpuinfo)); 1372 memcpy(&policy->cpuinfo, &data->cpuinfo, sizeof(struct cpufreq_cpuinfo));
1345 1373
1374 if (policy->min > data->min && policy->min > policy->max) {
1375 ret = -EINVAL;
1376 goto error_out;
1377 }
1378
1346 /* verify the cpu speed can be set within this limit */ 1379 /* verify the cpu speed can be set within this limit */
1347 ret = cpufreq_driver->verify(policy); 1380 ret = cpufreq_driver->verify(policy);
1348 if (ret) 1381 if (ret)
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index f85c97f7500a..0de335b7bfc2 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -975,8 +975,10 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
975 975
976 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv-> 976 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
977 id.local_id); 977 id.local_id);
978 if (IS_ERR(cm_id_priv->timewait_info)) 978 if (IS_ERR(cm_id_priv->timewait_info)) {
979 ret = PTR_ERR(cm_id_priv->timewait_info);
979 goto out; 980 goto out;
981 }
980 982
981 ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av); 983 ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av);
982 if (ret) 984 if (ret)
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index bb9bee56a824..102a59c033ff 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -42,6 +42,7 @@
42#include <linux/kref.h> 42#include <linux/kref.h>
43#include <linux/idr.h> 43#include <linux/idr.h>
44#include <linux/mutex.h> 44#include <linux/mutex.h>
45#include <linux/completion.h>
45 46
46#include <rdma/ib_verbs.h> 47#include <rdma/ib_verbs.h>
47#include <rdma/ib_user_verbs.h> 48#include <rdma/ib_user_verbs.h>
@@ -69,6 +70,7 @@
69 70
70struct ib_uverbs_device { 71struct ib_uverbs_device {
71 struct kref ref; 72 struct kref ref;
73 struct completion comp;
72 int devnum; 74 int devnum;
73 struct cdev *dev; 75 struct cdev *dev;
74 struct class_device *class_dev; 76 struct class_device *class_dev;
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index e725cccc7cde..4e16314e8e6d 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -122,7 +122,7 @@ static void ib_uverbs_release_dev(struct kref *ref)
122 struct ib_uverbs_device *dev = 122 struct ib_uverbs_device *dev =
123 container_of(ref, struct ib_uverbs_device, ref); 123 container_of(ref, struct ib_uverbs_device, ref);
124 124
125 kfree(dev); 125 complete(&dev->comp);
126} 126}
127 127
128void ib_uverbs_release_ucq(struct ib_uverbs_file *file, 128void ib_uverbs_release_ucq(struct ib_uverbs_file *file,
@@ -740,6 +740,7 @@ static void ib_uverbs_add_one(struct ib_device *device)
740 return; 740 return;
741 741
742 kref_init(&uverbs_dev->ref); 742 kref_init(&uverbs_dev->ref);
743 init_completion(&uverbs_dev->comp);
743 744
744 spin_lock(&map_lock); 745 spin_lock(&map_lock);
745 uverbs_dev->devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES); 746 uverbs_dev->devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES);
@@ -793,6 +794,8 @@ err_cdev:
793 794
794err: 795err:
795 kref_put(&uverbs_dev->ref, ib_uverbs_release_dev); 796 kref_put(&uverbs_dev->ref, ib_uverbs_release_dev);
797 wait_for_completion(&uverbs_dev->comp);
798 kfree(uverbs_dev);
796 return; 799 return;
797} 800}
798 801
@@ -812,7 +815,10 @@ static void ib_uverbs_remove_one(struct ib_device *device)
812 spin_unlock(&map_lock); 815 spin_unlock(&map_lock);
813 816
814 clear_bit(uverbs_dev->devnum, dev_map); 817 clear_bit(uverbs_dev->devnum, dev_map);
818
815 kref_put(&uverbs_dev->ref, ib_uverbs_release_dev); 819 kref_put(&uverbs_dev->ref, ib_uverbs_release_dev);
820 wait_for_completion(&uverbs_dev->comp);
821 kfree(uverbs_dev);
816} 822}
817 823
818static int uverbs_event_get_sb(struct file_system_type *fs_type, int flags, 824static int uverbs_event_get_sb(struct file_system_type *fs_type, int flags,
diff --git a/drivers/infiniband/hw/mthca/mthca_allocator.c b/drivers/infiniband/hw/mthca/mthca_allocator.c
index 9ba3211cef7c..25157f57a6d0 100644
--- a/drivers/infiniband/hw/mthca/mthca_allocator.c
+++ b/drivers/infiniband/hw/mthca/mthca_allocator.c
@@ -108,14 +108,15 @@ void mthca_alloc_cleanup(struct mthca_alloc *alloc)
108 * serialize access to the array. 108 * serialize access to the array.
109 */ 109 */
110 110
111#define MTHCA_ARRAY_MASK (PAGE_SIZE / sizeof (void *) - 1)
112
111void *mthca_array_get(struct mthca_array *array, int index) 113void *mthca_array_get(struct mthca_array *array, int index)
112{ 114{
113 int p = (index * sizeof (void *)) >> PAGE_SHIFT; 115 int p = (index * sizeof (void *)) >> PAGE_SHIFT;
114 116
115 if (array->page_list[p].page) { 117 if (array->page_list[p].page)
116 int i = index & (PAGE_SIZE / sizeof (void *) - 1); 118 return array->page_list[p].page[index & MTHCA_ARRAY_MASK];
117 return array->page_list[p].page[i]; 119 else
118 } else
119 return NULL; 120 return NULL;
120} 121}
121 122
@@ -130,8 +131,7 @@ int mthca_array_set(struct mthca_array *array, int index, void *value)
130 if (!array->page_list[p].page) 131 if (!array->page_list[p].page)
131 return -ENOMEM; 132 return -ENOMEM;
132 133
133 array->page_list[p].page[index & (PAGE_SIZE / sizeof (void *) - 1)] = 134 array->page_list[p].page[index & MTHCA_ARRAY_MASK] = value;
134 value;
135 ++array->page_list[p].used; 135 ++array->page_list[p].used;
136 136
137 return 0; 137 return 0;
@@ -144,7 +144,8 @@ void mthca_array_clear(struct mthca_array *array, int index)
144 if (--array->page_list[p].used == 0) { 144 if (--array->page_list[p].used == 0) {
145 free_page((unsigned long) array->page_list[p].page); 145 free_page((unsigned long) array->page_list[p].page);
146 array->page_list[p].page = NULL; 146 array->page_list[p].page = NULL;
147 } 147 } else
148 array->page_list[p].page[index & MTHCA_ARRAY_MASK] = NULL;
148 149
149 if (array->page_list[p].used < 0) 150 if (array->page_list[p].used < 0)
150 pr_debug("Array %p index %d page %d with ref count %d < 0\n", 151 pr_debug("Array %p index %d page %d with ref count %d < 0\n",
diff --git a/drivers/infiniband/ulp/ipoib/Kconfig b/drivers/infiniband/ulp/ipoib/Kconfig
index 13d6d01c72c0..d74653d7de1c 100644
--- a/drivers/infiniband/ulp/ipoib/Kconfig
+++ b/drivers/infiniband/ulp/ipoib/Kconfig
@@ -6,8 +6,7 @@ config INFINIBAND_IPOIB
6 transports IP packets over InfiniBand so you can use your IB 6 transports IP packets over InfiniBand so you can use your IB
7 device as a fancy NIC. 7 device as a fancy NIC.
8 8
9 The IPoIB protocol is defined by the IETF ipoib working 9 See Documentation/infiniband/ipoib.txt for more information
10 group: <http://www.ietf.org/html.charters/ipoib-charter.html>.
11 10
12config INFINIBAND_IPOIB_DEBUG 11config INFINIBAND_IPOIB_DEBUG
13 bool "IP-over-InfiniBand debugging" if EMBEDDED 12 bool "IP-over-InfiniBand debugging" if EMBEDDED
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 8f472e7113b4..8257d5a2c8f8 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -77,6 +77,14 @@ MODULE_PARM_DESC(topspin_workarounds,
77 77
78static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad }; 78static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
79 79
80static int mellanox_workarounds = 1;
81
82module_param(mellanox_workarounds, int, 0444);
83MODULE_PARM_DESC(mellanox_workarounds,
84 "Enable workarounds for Mellanox SRP target bugs if != 0");
85
86static const u8 mellanox_oui[3] = { 0x00, 0x02, 0xc9 };
87
80static void srp_add_one(struct ib_device *device); 88static void srp_add_one(struct ib_device *device);
81static void srp_remove_one(struct ib_device *device); 89static void srp_remove_one(struct ib_device *device);
82static void srp_completion(struct ib_cq *cq, void *target_ptr); 90static void srp_completion(struct ib_cq *cq, void *target_ptr);
@@ -526,8 +534,10 @@ static int srp_reconnect_target(struct srp_target_port *target)
526 while (ib_poll_cq(target->cq, 1, &wc) > 0) 534 while (ib_poll_cq(target->cq, 1, &wc) > 0)
527 ; /* nothing */ 535 ; /* nothing */
528 536
537 spin_lock_irq(target->scsi_host->host_lock);
529 list_for_each_entry_safe(req, tmp, &target->req_queue, list) 538 list_for_each_entry_safe(req, tmp, &target->req_queue, list)
530 srp_reset_req(target, req); 539 srp_reset_req(target, req);
540 spin_unlock_irq(target->scsi_host->host_lock);
531 541
532 target->rx_head = 0; 542 target->rx_head = 0;
533 target->tx_head = 0; 543 target->tx_head = 0;
@@ -567,7 +577,7 @@ err:
567 return ret; 577 return ret;
568} 578}
569 579
570static int srp_map_fmr(struct srp_device *dev, struct scatterlist *scat, 580static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat,
571 int sg_cnt, struct srp_request *req, 581 int sg_cnt, struct srp_request *req,
572 struct srp_direct_buf *buf) 582 struct srp_direct_buf *buf)
573{ 583{
@@ -577,10 +587,15 @@ static int srp_map_fmr(struct srp_device *dev, struct scatterlist *scat,
577 int page_cnt; 587 int page_cnt;
578 int i, j; 588 int i, j;
579 int ret; 589 int ret;
590 struct srp_device *dev = target->srp_host->dev;
580 591
581 if (!dev->fmr_pool) 592 if (!dev->fmr_pool)
582 return -ENODEV; 593 return -ENODEV;
583 594
595 if ((sg_dma_address(&scat[0]) & ~dev->fmr_page_mask) &&
596 mellanox_workarounds && !memcmp(&target->ioc_guid, mellanox_oui, 3))
597 return -EINVAL;
598
584 len = page_cnt = 0; 599 len = page_cnt = 0;
585 for (i = 0; i < sg_cnt; ++i) { 600 for (i = 0; i < sg_cnt; ++i) {
586 if (sg_dma_address(&scat[i]) & ~dev->fmr_page_mask) { 601 if (sg_dma_address(&scat[i]) & ~dev->fmr_page_mask) {
@@ -683,7 +698,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
683 buf->va = cpu_to_be64(sg_dma_address(scat)); 698 buf->va = cpu_to_be64(sg_dma_address(scat));
684 buf->key = cpu_to_be32(target->srp_host->dev->mr->rkey); 699 buf->key = cpu_to_be32(target->srp_host->dev->mr->rkey);
685 buf->len = cpu_to_be32(sg_dma_len(scat)); 700 buf->len = cpu_to_be32(sg_dma_len(scat));
686 } else if (srp_map_fmr(target->srp_host->dev, scat, count, req, 701 } else if (srp_map_fmr(target, scat, count, req,
687 (void *) cmd->add_data)) { 702 (void *) cmd->add_data)) {
688 /* 703 /*
689 * FMR mapping failed, and the scatterlist has more 704 * FMR mapping failed, and the scatterlist has more
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index c3e52c806b13..06440a86baef 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -177,6 +177,7 @@ struct myri10ge_priv {
177 struct work_struct watchdog_work; 177 struct work_struct watchdog_work;
178 struct timer_list watchdog_timer; 178 struct timer_list watchdog_timer;
179 int watchdog_tx_done; 179 int watchdog_tx_done;
180 int watchdog_tx_req;
180 int watchdog_resets; 181 int watchdog_resets;
181 int tx_linearized; 182 int tx_linearized;
182 int pause; 183 int pause;
@@ -448,6 +449,7 @@ static int myri10ge_load_hotplug_firmware(struct myri10ge_priv *mgp, u32 * size)
448 struct mcp_gen_header *hdr; 449 struct mcp_gen_header *hdr;
449 size_t hdr_offset; 450 size_t hdr_offset;
450 int status; 451 int status;
452 unsigned i;
451 453
452 if ((status = request_firmware(&fw, mgp->fw_name, dev)) < 0) { 454 if ((status = request_firmware(&fw, mgp->fw_name, dev)) < 0) {
453 dev_err(dev, "Unable to load %s firmware image via hotplug\n", 455 dev_err(dev, "Unable to load %s firmware image via hotplug\n",
@@ -479,18 +481,12 @@ static int myri10ge_load_hotplug_firmware(struct myri10ge_priv *mgp, u32 * size)
479 goto abort_with_fw; 481 goto abort_with_fw;
480 482
481 crc = crc32(~0, fw->data, fw->size); 483 crc = crc32(~0, fw->data, fw->size);
482 if (mgp->tx.boundary == 2048) { 484 for (i = 0; i < fw->size; i += 256) {
483 /* Avoid PCI burst on chipset with unaligned completions. */ 485 myri10ge_pio_copy(mgp->sram + MYRI10GE_FW_OFFSET + i,
484 int i; 486 fw->data + i,
485 __iomem u32 *ptr = (__iomem u32 *) (mgp->sram + 487 min(256U, (unsigned)(fw->size - i)));
486 MYRI10GE_FW_OFFSET); 488 mb();
487 for (i = 0; i < fw->size / 4; i++) { 489 readb(mgp->sram);
488 __raw_writel(((u32 *) fw->data)[i], ptr + i);
489 wmb();
490 }
491 } else {
492 myri10ge_pio_copy(mgp->sram + MYRI10GE_FW_OFFSET, fw->data,
493 fw->size);
494 } 490 }
495 /* corruption checking is good for parity recovery and buggy chipset */ 491 /* corruption checking is good for parity recovery and buggy chipset */
496 memcpy_fromio(fw->data, mgp->sram + MYRI10GE_FW_OFFSET, fw->size); 492 memcpy_fromio(fw->data, mgp->sram + MYRI10GE_FW_OFFSET, fw->size);
@@ -2547,7 +2543,8 @@ static void myri10ge_watchdog_timer(unsigned long arg)
2547 2543
2548 mgp = (struct myri10ge_priv *)arg; 2544 mgp = (struct myri10ge_priv *)arg;
2549 if (mgp->tx.req != mgp->tx.done && 2545 if (mgp->tx.req != mgp->tx.done &&
2550 mgp->tx.done == mgp->watchdog_tx_done) 2546 mgp->tx.done == mgp->watchdog_tx_done &&
2547 mgp->watchdog_tx_req != mgp->watchdog_tx_done)
2551 /* nic seems like it might be stuck.. */ 2548 /* nic seems like it might be stuck.. */
2552 schedule_work(&mgp->watchdog_work); 2549 schedule_work(&mgp->watchdog_work);
2553 else 2550 else
@@ -2556,6 +2553,7 @@ static void myri10ge_watchdog_timer(unsigned long arg)
2556 jiffies + myri10ge_watchdog_timeout * HZ); 2553 jiffies + myri10ge_watchdog_timeout * HZ);
2557 2554
2558 mgp->watchdog_tx_done = mgp->tx.done; 2555 mgp->watchdog_tx_done = mgp->tx.done;
2556 mgp->watchdog_tx_req = mgp->tx.req;
2559} 2557}
2560 2558
2561static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 2559static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 7d5c2233c252..f5aad77288f9 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -419,9 +419,8 @@ void phy_start_machine(struct phy_device *phydev,
419 419
420/* phy_stop_machine 420/* phy_stop_machine
421 * 421 *
422 * description: Stops the state machine timer, sets the state to 422 * description: Stops the state machine timer, sets the state to UP
423 * UP (unless it wasn't up yet), and then frees the interrupt, 423 * (unless it wasn't up yet). This function must be called BEFORE
424 * if it is in use. This function must be called BEFORE
425 * phy_detach. 424 * phy_detach.
426 */ 425 */
427void phy_stop_machine(struct phy_device *phydev) 426void phy_stop_machine(struct phy_device *phydev)
@@ -433,9 +432,6 @@ void phy_stop_machine(struct phy_device *phydev)
433 phydev->state = PHY_UP; 432 phydev->state = PHY_UP;
434 spin_unlock(&phydev->lock); 433 spin_unlock(&phydev->lock);
435 434
436 if (phydev->irq != PHY_POLL)
437 phy_stop_interrupts(phydev);
438
439 phydev->adjust_state = NULL; 435 phydev->adjust_state = NULL;
440} 436}
441 437
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index e1fe3a0a7b0b..132ed32bce1a 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -76,7 +76,7 @@
76#include "s2io.h" 76#include "s2io.h"
77#include "s2io-regs.h" 77#include "s2io-regs.h"
78 78
79#define DRV_VERSION "2.0.14.2" 79#define DRV_VERSION "2.0.15.2"
80 80
81/* S2io Driver name & version. */ 81/* S2io Driver name & version. */
82static char s2io_driver_name[] = "Neterion"; 82static char s2io_driver_name[] = "Neterion";
@@ -370,38 +370,50 @@ static const u64 fix_mac[] = {
370 END_SIGN 370 END_SIGN
371}; 371};
372 372
373MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
374MODULE_LICENSE("GPL");
375MODULE_VERSION(DRV_VERSION);
376
377
373/* Module Loadable parameters. */ 378/* Module Loadable parameters. */
374static unsigned int tx_fifo_num = 1; 379S2IO_PARM_INT(tx_fifo_num, 1);
375static unsigned int tx_fifo_len[MAX_TX_FIFOS] = 380S2IO_PARM_INT(rx_ring_num, 1);
376 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN}; 381
377static unsigned int rx_ring_num = 1; 382
378static unsigned int rx_ring_sz[MAX_RX_RINGS] = 383S2IO_PARM_INT(rx_ring_mode, 1);
379 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT}; 384S2IO_PARM_INT(use_continuous_tx_intrs, 1);
380static unsigned int rts_frm_len[MAX_RX_RINGS] = 385S2IO_PARM_INT(rmac_pause_time, 0x100);
381 {[0 ...(MAX_RX_RINGS - 1)] = 0 }; 386S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
382static unsigned int rx_ring_mode = 1; 387S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
383static unsigned int use_continuous_tx_intrs = 1; 388S2IO_PARM_INT(shared_splits, 0);
384static unsigned int rmac_pause_time = 0x100; 389S2IO_PARM_INT(tmac_util_period, 5);
385static unsigned int mc_pause_threshold_q0q3 = 187; 390S2IO_PARM_INT(rmac_util_period, 5);
386static unsigned int mc_pause_threshold_q4q7 = 187; 391S2IO_PARM_INT(bimodal, 0);
387static unsigned int shared_splits; 392S2IO_PARM_INT(l3l4hdr_size, 128);
388static unsigned int tmac_util_period = 5;
389static unsigned int rmac_util_period = 5;
390static unsigned int bimodal = 0;
391static unsigned int l3l4hdr_size = 128;
392#ifndef CONFIG_S2IO_NAPI
393static unsigned int indicate_max_pkts;
394#endif
395/* Frequency of Rx desc syncs expressed as power of 2 */ 393/* Frequency of Rx desc syncs expressed as power of 2 */
396static unsigned int rxsync_frequency = 3; 394S2IO_PARM_INT(rxsync_frequency, 3);
397/* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */ 395/* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */
398static unsigned int intr_type = 0; 396S2IO_PARM_INT(intr_type, 0);
399/* Large receive offload feature */ 397/* Large receive offload feature */
400static unsigned int lro = 0; 398S2IO_PARM_INT(lro, 0);
401/* Max pkts to be aggregated by LRO at one time. If not specified, 399/* Max pkts to be aggregated by LRO at one time. If not specified,
402 * aggregation happens until we hit max IP pkt size(64K) 400 * aggregation happens until we hit max IP pkt size(64K)
403 */ 401 */
404static unsigned int lro_max_pkts = 0xFFFF; 402S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
403#ifndef CONFIG_S2IO_NAPI
404S2IO_PARM_INT(indicate_max_pkts, 0);
405#endif
406
407static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
408 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
409static unsigned int rx_ring_sz[MAX_RX_RINGS] =
410 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
411static unsigned int rts_frm_len[MAX_RX_RINGS] =
412 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
413
414module_param_array(tx_fifo_len, uint, NULL, 0);
415module_param_array(rx_ring_sz, uint, NULL, 0);
416module_param_array(rts_frm_len, uint, NULL, 0);
405 417
406/* 418/*
407 * S2IO device table. 419 * S2IO device table.
@@ -464,10 +476,9 @@ static int init_shared_mem(struct s2io_nic *nic)
464 size += config->tx_cfg[i].fifo_len; 476 size += config->tx_cfg[i].fifo_len;
465 } 477 }
466 if (size > MAX_AVAILABLE_TXDS) { 478 if (size > MAX_AVAILABLE_TXDS) {
467 DBG_PRINT(ERR_DBG, "%s: Requested TxDs too high, ", 479 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
468 __FUNCTION__);
469 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size); 480 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
470 return FAILURE; 481 return -EINVAL;
471 } 482 }
472 483
473 lst_size = (sizeof(TxD_t) * config->max_txds); 484 lst_size = (sizeof(TxD_t) * config->max_txds);
@@ -547,6 +558,7 @@ static int init_shared_mem(struct s2io_nic *nic)
547 nic->ufo_in_band_v = kmalloc((sizeof(u64) * size), GFP_KERNEL); 558 nic->ufo_in_band_v = kmalloc((sizeof(u64) * size), GFP_KERNEL);
548 if (!nic->ufo_in_band_v) 559 if (!nic->ufo_in_band_v)
549 return -ENOMEM; 560 return -ENOMEM;
561 memset(nic->ufo_in_band_v, 0, size);
550 562
551 /* Allocation and initialization of RXDs in Rings */ 563 /* Allocation and initialization of RXDs in Rings */
552 size = 0; 564 size = 0;
@@ -1213,7 +1225,7 @@ static int init_nic(struct s2io_nic *nic)
1213 break; 1225 break;
1214 } 1226 }
1215 1227
1216 /* Enable Tx FIFO partition 0. */ 1228 /* Enable all configured Tx FIFO partitions */
1217 val64 = readq(&bar0->tx_fifo_partition_0); 1229 val64 = readq(&bar0->tx_fifo_partition_0);
1218 val64 |= (TX_FIFO_PARTITION_EN); 1230 val64 |= (TX_FIFO_PARTITION_EN);
1219 writeq(val64, &bar0->tx_fifo_partition_0); 1231 writeq(val64, &bar0->tx_fifo_partition_0);
@@ -1650,7 +1662,7 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1650 writeq(temp64, &bar0->general_int_mask); 1662 writeq(temp64, &bar0->general_int_mask);
1651 /* 1663 /*
1652 * If Hercules adapter enable GPIO otherwise 1664 * If Hercules adapter enable GPIO otherwise
1653 * disabled all PCIX, Flash, MDIO, IIC and GPIO 1665 * disable all PCIX, Flash, MDIO, IIC and GPIO
1654 * interrupts for now. 1666 * interrupts for now.
1655 * TODO 1667 * TODO
1656 */ 1668 */
@@ -2119,7 +2131,7 @@ static struct sk_buff *s2io_txdl_getskb(fifo_info_t *fifo_data, TxD_t *txdlp, in
2119 frag->size, PCI_DMA_TODEVICE); 2131 frag->size, PCI_DMA_TODEVICE);
2120 } 2132 }
2121 } 2133 }
2122 txdlp->Host_Control = 0; 2134 memset(txdlp,0, (sizeof(TxD_t) * fifo_data->max_txds));
2123 return(skb); 2135 return(skb);
2124} 2136}
2125 2137
@@ -2371,9 +2383,14 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2371 skb->data = (void *) (unsigned long)tmp; 2383 skb->data = (void *) (unsigned long)tmp;
2372 skb->tail = (void *) (unsigned long)tmp; 2384 skb->tail = (void *) (unsigned long)tmp;
2373 2385
2374 ((RxD3_t*)rxdp)->Buffer0_ptr = 2386 if (!(((RxD3_t*)rxdp)->Buffer0_ptr))
2375 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN, 2387 ((RxD3_t*)rxdp)->Buffer0_ptr =
2388 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2376 PCI_DMA_FROMDEVICE); 2389 PCI_DMA_FROMDEVICE);
2390 else
2391 pci_dma_sync_single_for_device(nic->pdev,
2392 (dma_addr_t) ((RxD3_t*)rxdp)->Buffer0_ptr,
2393 BUF0_LEN, PCI_DMA_FROMDEVICE);
2377 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); 2394 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2378 if (nic->rxd_mode == RXD_MODE_3B) { 2395 if (nic->rxd_mode == RXD_MODE_3B) {
2379 /* Two buffer mode */ 2396 /* Two buffer mode */
@@ -2386,10 +2403,13 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2386 (nic->pdev, skb->data, dev->mtu + 4, 2403 (nic->pdev, skb->data, dev->mtu + 4,
2387 PCI_DMA_FROMDEVICE); 2404 PCI_DMA_FROMDEVICE);
2388 2405
2389 /* Buffer-1 will be dummy buffer not used */ 2406 /* Buffer-1 will be dummy buffer. Not used */
2390 ((RxD3_t*)rxdp)->Buffer1_ptr = 2407 if (!(((RxD3_t*)rxdp)->Buffer1_ptr)) {
2391 pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN, 2408 ((RxD3_t*)rxdp)->Buffer1_ptr =
2392 PCI_DMA_FROMDEVICE); 2409 pci_map_single(nic->pdev,
2410 ba->ba_1, BUF1_LEN,
2411 PCI_DMA_FROMDEVICE);
2412 }
2393 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); 2413 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2394 rxdp->Control_2 |= SET_BUFFER2_SIZE_3 2414 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2395 (dev->mtu + 4); 2415 (dev->mtu + 4);
@@ -2614,23 +2634,23 @@ no_rx:
2614} 2634}
2615#endif 2635#endif
2616 2636
2637#ifdef CONFIG_NET_POLL_CONTROLLER
2617/** 2638/**
2618 * s2io_netpoll - Rx interrupt service handler for netpoll support 2639 * s2io_netpoll - netpoll event handler entry point
2619 * @dev : pointer to the device structure. 2640 * @dev : pointer to the device structure.
2620 * Description: 2641 * Description:
2621 * Polling 'interrupt' - used by things like netconsole to send skbs 2642 * This function will be called by upper layer to check for events on the
2622 * without having to re-enable interrupts. It's not called while 2643 * interface in situations where interrupts are disabled. It is used for
2623 * the interrupt routine is executing. 2644 * specific in-kernel networking tasks, such as remote consoles and kernel
2645 * debugging over the network (example netdump in RedHat).
2624 */ 2646 */
2625
2626#ifdef CONFIG_NET_POLL_CONTROLLER
2627static void s2io_netpoll(struct net_device *dev) 2647static void s2io_netpoll(struct net_device *dev)
2628{ 2648{
2629 nic_t *nic = dev->priv; 2649 nic_t *nic = dev->priv;
2630 mac_info_t *mac_control; 2650 mac_info_t *mac_control;
2631 struct config_param *config; 2651 struct config_param *config;
2632 XENA_dev_config_t __iomem *bar0 = nic->bar0; 2652 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2633 u64 val64; 2653 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2634 int i; 2654 int i;
2635 2655
2636 disable_irq(dev->irq); 2656 disable_irq(dev->irq);
@@ -2639,9 +2659,17 @@ static void s2io_netpoll(struct net_device *dev)
2639 mac_control = &nic->mac_control; 2659 mac_control = &nic->mac_control;
2640 config = &nic->config; 2660 config = &nic->config;
2641 2661
2642 val64 = readq(&bar0->rx_traffic_int);
2643 writeq(val64, &bar0->rx_traffic_int); 2662 writeq(val64, &bar0->rx_traffic_int);
2663 writeq(val64, &bar0->tx_traffic_int);
2644 2664
2665 /* we need to free up the transmitted skbufs or else netpoll will
2666 * run out of skbs and will fail and eventually netpoll application such
2667 * as netdump will fail.
2668 */
2669 for (i = 0; i < config->tx_fifo_num; i++)
2670 tx_intr_handler(&mac_control->fifos[i]);
2671
2672 /* check for received packet and indicate up to network */
2645 for (i = 0; i < config->rx_ring_num; i++) 2673 for (i = 0; i < config->rx_ring_num; i++)
2646 rx_intr_handler(&mac_control->rings[i]); 2674 rx_intr_handler(&mac_control->rings[i]);
2647 2675
@@ -2708,7 +2736,7 @@ static void rx_intr_handler(ring_info_t *ring_data)
2708 /* If your are next to put index then it's FIFO full condition */ 2736 /* If your are next to put index then it's FIFO full condition */
2709 if ((get_block == put_block) && 2737 if ((get_block == put_block) &&
2710 (get_info.offset + 1) == put_info.offset) { 2738 (get_info.offset + 1) == put_info.offset) {
2711 DBG_PRINT(ERR_DBG, "%s: Ring Full\n",dev->name); 2739 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
2712 break; 2740 break;
2713 } 2741 }
2714 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control); 2742 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
@@ -2728,18 +2756,15 @@ static void rx_intr_handler(ring_info_t *ring_data)
2728 HEADER_SNAP_SIZE, 2756 HEADER_SNAP_SIZE,
2729 PCI_DMA_FROMDEVICE); 2757 PCI_DMA_FROMDEVICE);
2730 } else if (nic->rxd_mode == RXD_MODE_3B) { 2758 } else if (nic->rxd_mode == RXD_MODE_3B) {
2731 pci_unmap_single(nic->pdev, (dma_addr_t) 2759 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2732 ((RxD3_t*)rxdp)->Buffer0_ptr, 2760 ((RxD3_t*)rxdp)->Buffer0_ptr,
2733 BUF0_LEN, PCI_DMA_FROMDEVICE); 2761 BUF0_LEN, PCI_DMA_FROMDEVICE);
2734 pci_unmap_single(nic->pdev, (dma_addr_t) 2762 pci_unmap_single(nic->pdev, (dma_addr_t)
2735 ((RxD3_t*)rxdp)->Buffer1_ptr,
2736 BUF1_LEN, PCI_DMA_FROMDEVICE);
2737 pci_unmap_single(nic->pdev, (dma_addr_t)
2738 ((RxD3_t*)rxdp)->Buffer2_ptr, 2763 ((RxD3_t*)rxdp)->Buffer2_ptr,
2739 dev->mtu + 4, 2764 dev->mtu + 4,
2740 PCI_DMA_FROMDEVICE); 2765 PCI_DMA_FROMDEVICE);
2741 } else { 2766 } else {
2742 pci_unmap_single(nic->pdev, (dma_addr_t) 2767 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2743 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN, 2768 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN,
2744 PCI_DMA_FROMDEVICE); 2769 PCI_DMA_FROMDEVICE);
2745 pci_unmap_single(nic->pdev, (dma_addr_t) 2770 pci_unmap_single(nic->pdev, (dma_addr_t)
@@ -3327,7 +3352,7 @@ static void s2io_reset(nic_t * sp)
3327 3352
3328 /* Clear certain PCI/PCI-X fields after reset */ 3353 /* Clear certain PCI/PCI-X fields after reset */
3329 if (sp->device_type == XFRAME_II_DEVICE) { 3354 if (sp->device_type == XFRAME_II_DEVICE) {
3330 /* Clear parity err detect bit */ 3355 /* Clear "detected parity error" bit */
3331 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000); 3356 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3332 3357
3333 /* Clearing PCIX Ecc status register */ 3358 /* Clearing PCIX Ecc status register */
@@ -3528,7 +3553,7 @@ static void restore_xmsi_data(nic_t *nic)
3528 u64 val64; 3553 u64 val64;
3529 int i; 3554 int i;
3530 3555
3531 for (i=0; i< nic->avail_msix_vectors; i++) { 3556 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3532 writeq(nic->msix_info[i].addr, &bar0->xmsi_address); 3557 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3533 writeq(nic->msix_info[i].data, &bar0->xmsi_data); 3558 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3534 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6)); 3559 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
@@ -3547,7 +3572,7 @@ static void store_xmsi_data(nic_t *nic)
3547 int i; 3572 int i;
3548 3573
3549 /* Store and display */ 3574 /* Store and display */
3550 for (i=0; i< nic->avail_msix_vectors; i++) { 3575 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3551 val64 = (BIT(15) | vBIT(i, 26, 6)); 3576 val64 = (BIT(15) | vBIT(i, 26, 6));
3552 writeq(val64, &bar0->xmsi_access); 3577 writeq(val64, &bar0->xmsi_access);
3553 if (wait_for_msix_trans(nic, i)) { 3578 if (wait_for_msix_trans(nic, i)) {
@@ -3808,13 +3833,11 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3808 TxD_t *txdp; 3833 TxD_t *txdp;
3809 TxFIFO_element_t __iomem *tx_fifo; 3834 TxFIFO_element_t __iomem *tx_fifo;
3810 unsigned long flags; 3835 unsigned long flags;
3811#ifdef NETIF_F_TSO
3812 int mss;
3813#endif
3814 u16 vlan_tag = 0; 3836 u16 vlan_tag = 0;
3815 int vlan_priority = 0; 3837 int vlan_priority = 0;
3816 mac_info_t *mac_control; 3838 mac_info_t *mac_control;
3817 struct config_param *config; 3839 struct config_param *config;
3840 int offload_type;
3818 3841
3819 mac_control = &sp->mac_control; 3842 mac_control = &sp->mac_control;
3820 config = &sp->config; 3843 config = &sp->config;
@@ -3862,13 +3885,11 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3862 return 0; 3885 return 0;
3863 } 3886 }
3864 3887
3865 txdp->Control_1 = 0; 3888 offload_type = s2io_offload_type(skb);
3866 txdp->Control_2 = 0;
3867#ifdef NETIF_F_TSO 3889#ifdef NETIF_F_TSO
3868 mss = skb_shinfo(skb)->gso_size; 3890 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
3869 if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
3870 txdp->Control_1 |= TXD_TCP_LSO_EN; 3891 txdp->Control_1 |= TXD_TCP_LSO_EN;
3871 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss); 3892 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
3872 } 3893 }
3873#endif 3894#endif
3874 if (skb->ip_summed == CHECKSUM_HW) { 3895 if (skb->ip_summed == CHECKSUM_HW) {
@@ -3886,10 +3907,10 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3886 } 3907 }
3887 3908
3888 frg_len = skb->len - skb->data_len; 3909 frg_len = skb->len - skb->data_len;
3889 if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) { 3910 if (offload_type == SKB_GSO_UDP) {
3890 int ufo_size; 3911 int ufo_size;
3891 3912
3892 ufo_size = skb_shinfo(skb)->gso_size; 3913 ufo_size = s2io_udp_mss(skb);
3893 ufo_size &= ~7; 3914 ufo_size &= ~7;
3894 txdp->Control_1 |= TXD_UFO_EN; 3915 txdp->Control_1 |= TXD_UFO_EN;
3895 txdp->Control_1 |= TXD_UFO_MSS(ufo_size); 3916 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
@@ -3906,16 +3927,13 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3906 sp->ufo_in_band_v, 3927 sp->ufo_in_band_v,
3907 sizeof(u64), PCI_DMA_TODEVICE); 3928 sizeof(u64), PCI_DMA_TODEVICE);
3908 txdp++; 3929 txdp++;
3909 txdp->Control_1 = 0;
3910 txdp->Control_2 = 0;
3911 } 3930 }
3912 3931
3913 txdp->Buffer_Pointer = pci_map_single 3932 txdp->Buffer_Pointer = pci_map_single
3914 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE); 3933 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
3915 txdp->Host_Control = (unsigned long) skb; 3934 txdp->Host_Control = (unsigned long) skb;
3916 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len); 3935 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
3917 3936 if (offload_type == SKB_GSO_UDP)
3918 if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP)
3919 txdp->Control_1 |= TXD_UFO_EN; 3937 txdp->Control_1 |= TXD_UFO_EN;
3920 3938
3921 frg_cnt = skb_shinfo(skb)->nr_frags; 3939 frg_cnt = skb_shinfo(skb)->nr_frags;
@@ -3930,12 +3948,12 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3930 (sp->pdev, frag->page, frag->page_offset, 3948 (sp->pdev, frag->page, frag->page_offset,
3931 frag->size, PCI_DMA_TODEVICE); 3949 frag->size, PCI_DMA_TODEVICE);
3932 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size); 3950 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
3933 if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) 3951 if (offload_type == SKB_GSO_UDP)
3934 txdp->Control_1 |= TXD_UFO_EN; 3952 txdp->Control_1 |= TXD_UFO_EN;
3935 } 3953 }
3936 txdp->Control_1 |= TXD_GATHER_CODE_LAST; 3954 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
3937 3955
3938 if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) 3956 if (offload_type == SKB_GSO_UDP)
3939 frg_cnt++; /* as Txd0 was used for inband header */ 3957 frg_cnt++; /* as Txd0 was used for inband header */
3940 3958
3941 tx_fifo = mac_control->tx_FIFO_start[queue]; 3959 tx_fifo = mac_control->tx_FIFO_start[queue];
@@ -3944,13 +3962,9 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3944 3962
3945 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST | 3963 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
3946 TX_FIFO_LAST_LIST); 3964 TX_FIFO_LAST_LIST);
3947 3965 if (offload_type)
3948#ifdef NETIF_F_TSO
3949 if (mss)
3950 val64 |= TX_FIFO_SPECIAL_FUNC;
3951#endif
3952 if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP)
3953 val64 |= TX_FIFO_SPECIAL_FUNC; 3966 val64 |= TX_FIFO_SPECIAL_FUNC;
3967
3954 writeq(val64, &tx_fifo->List_Control); 3968 writeq(val64, &tx_fifo->List_Control);
3955 3969
3956 mmiowb(); 3970 mmiowb();
@@ -3984,13 +3998,41 @@ s2io_alarm_handle(unsigned long data)
3984 mod_timer(&sp->alarm_timer, jiffies + HZ / 2); 3998 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
3985} 3999}
3986 4000
4001static int s2io_chk_rx_buffers(nic_t *sp, int rng_n)
4002{
4003 int rxb_size, level;
4004
4005 if (!sp->lro) {
4006 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
4007 level = rx_buffer_level(sp, rxb_size, rng_n);
4008
4009 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4010 int ret;
4011 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
4012 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4013 if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
4014 DBG_PRINT(ERR_DBG, "Out of memory in %s",
4015 __FUNCTION__);
4016 clear_bit(0, (&sp->tasklet_status));
4017 return -1;
4018 }
4019 clear_bit(0, (&sp->tasklet_status));
4020 } else if (level == LOW)
4021 tasklet_schedule(&sp->task);
4022
4023 } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
4024 DBG_PRINT(ERR_DBG, "%s:Out of memory", sp->dev->name);
4025 DBG_PRINT(ERR_DBG, " in Rx Intr!!\n");
4026 }
4027 return 0;
4028}
4029
3987static irqreturn_t 4030static irqreturn_t
3988s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs) 4031s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs)
3989{ 4032{
3990 struct net_device *dev = (struct net_device *) dev_id; 4033 struct net_device *dev = (struct net_device *) dev_id;
3991 nic_t *sp = dev->priv; 4034 nic_t *sp = dev->priv;
3992 int i; 4035 int i;
3993 int ret;
3994 mac_info_t *mac_control; 4036 mac_info_t *mac_control;
3995 struct config_param *config; 4037 struct config_param *config;
3996 4038
@@ -4012,35 +4054,8 @@ s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs)
4012 * reallocate the buffers from the interrupt handler itself, 4054 * reallocate the buffers from the interrupt handler itself,
4013 * else schedule a tasklet to reallocate the buffers. 4055 * else schedule a tasklet to reallocate the buffers.
4014 */ 4056 */
4015 for (i = 0; i < config->rx_ring_num; i++) { 4057 for (i = 0; i < config->rx_ring_num; i++)
4016 if (!sp->lro) { 4058 s2io_chk_rx_buffers(sp, i);
4017 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
4018 int level = rx_buffer_level(sp, rxb_size, i);
4019
4020 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4021 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ",
4022 dev->name);
4023 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4024 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
4025 DBG_PRINT(ERR_DBG, "%s:Out of memory",
4026 dev->name);
4027 DBG_PRINT(ERR_DBG, " in ISR!!\n");
4028 clear_bit(0, (&sp->tasklet_status));
4029 atomic_dec(&sp->isr_cnt);
4030 return IRQ_HANDLED;
4031 }
4032 clear_bit(0, (&sp->tasklet_status));
4033 } else if (level == LOW) {
4034 tasklet_schedule(&sp->task);
4035 }
4036 }
4037 else if (fill_rx_buffers(sp, i) == -ENOMEM) {
4038 DBG_PRINT(ERR_DBG, "%s:Out of memory",
4039 dev->name);
4040 DBG_PRINT(ERR_DBG, " in Rx Intr!!\n");
4041 break;
4042 }
4043 }
4044 4059
4045 atomic_dec(&sp->isr_cnt); 4060 atomic_dec(&sp->isr_cnt);
4046 return IRQ_HANDLED; 4061 return IRQ_HANDLED;
@@ -4051,39 +4066,13 @@ s2io_msix_ring_handle(int irq, void *dev_id, struct pt_regs *regs)
4051{ 4066{
4052 ring_info_t *ring = (ring_info_t *)dev_id; 4067 ring_info_t *ring = (ring_info_t *)dev_id;
4053 nic_t *sp = ring->nic; 4068 nic_t *sp = ring->nic;
4054 struct net_device *dev = (struct net_device *) dev_id;
4055 int rxb_size, level, rng_n;
4056 4069
4057 atomic_inc(&sp->isr_cnt); 4070 atomic_inc(&sp->isr_cnt);
4058 rx_intr_handler(ring);
4059
4060 rng_n = ring->ring_no;
4061 if (!sp->lro) {
4062 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
4063 level = rx_buffer_level(sp, rxb_size, rng_n);
4064 4071
4065 if ((level == PANIC) && (!TASKLET_IN_USE)) { 4072 rx_intr_handler(ring);
4066 int ret; 4073 s2io_chk_rx_buffers(sp, ring->ring_no);
4067 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
4068 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4069 if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
4070 DBG_PRINT(ERR_DBG, "Out of memory in %s",
4071 __FUNCTION__);
4072 clear_bit(0, (&sp->tasklet_status));
4073 return IRQ_HANDLED;
4074 }
4075 clear_bit(0, (&sp->tasklet_status));
4076 } else if (level == LOW) {
4077 tasklet_schedule(&sp->task);
4078 }
4079 }
4080 else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
4081 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
4082 DBG_PRINT(ERR_DBG, " in Rx Intr!!\n");
4083 }
4084 4074
4085 atomic_dec(&sp->isr_cnt); 4075 atomic_dec(&sp->isr_cnt);
4086
4087 return IRQ_HANDLED; 4076 return IRQ_HANDLED;
4088} 4077}
4089 4078
@@ -4248,37 +4237,8 @@ static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
4248 * else schedule a tasklet to reallocate the buffers. 4237 * else schedule a tasklet to reallocate the buffers.
4249 */ 4238 */
4250#ifndef CONFIG_S2IO_NAPI 4239#ifndef CONFIG_S2IO_NAPI
4251 for (i = 0; i < config->rx_ring_num; i++) { 4240 for (i = 0; i < config->rx_ring_num; i++)
4252 if (!sp->lro) { 4241 s2io_chk_rx_buffers(sp, i);
4253 int ret;
4254 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
4255 int level = rx_buffer_level(sp, rxb_size, i);
4256
4257 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4258 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ",
4259 dev->name);
4260 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4261 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
4262 DBG_PRINT(ERR_DBG, "%s:Out of memory",
4263 dev->name);
4264 DBG_PRINT(ERR_DBG, " in ISR!!\n");
4265 clear_bit(0, (&sp->tasklet_status));
4266 atomic_dec(&sp->isr_cnt);
4267 writeq(org_mask, &bar0->general_int_mask);
4268 return IRQ_HANDLED;
4269 }
4270 clear_bit(0, (&sp->tasklet_status));
4271 } else if (level == LOW) {
4272 tasklet_schedule(&sp->task);
4273 }
4274 }
4275 else if (fill_rx_buffers(sp, i) == -ENOMEM) {
4276 DBG_PRINT(ERR_DBG, "%s:Out of memory",
4277 dev->name);
4278 DBG_PRINT(ERR_DBG, " in Rx intr!!\n");
4279 break;
4280 }
4281 }
4282#endif 4242#endif
4283 writeq(org_mask, &bar0->general_int_mask); 4243 writeq(org_mask, &bar0->general_int_mask);
4284 atomic_dec(&sp->isr_cnt); 4244 atomic_dec(&sp->isr_cnt);
@@ -4308,6 +4268,8 @@ static void s2io_updt_stats(nic_t *sp)
4308 if (cnt == 5) 4268 if (cnt == 5)
4309 break; /* Updt failed */ 4269 break; /* Updt failed */
4310 } while(1); 4270 } while(1);
4271 } else {
4272 memset(sp->mac_control.stats_info, 0, sizeof(StatInfo_t));
4311 } 4273 }
4312} 4274}
4313 4275
@@ -4942,7 +4904,8 @@ static int write_eeprom(nic_t * sp, int off, u64 data, int cnt)
4942} 4904}
4943static void s2io_vpd_read(nic_t *nic) 4905static void s2io_vpd_read(nic_t *nic)
4944{ 4906{
4945 u8 vpd_data[256],data; 4907 u8 *vpd_data;
4908 u8 data;
4946 int i=0, cnt, fail = 0; 4909 int i=0, cnt, fail = 0;
4947 int vpd_addr = 0x80; 4910 int vpd_addr = 0x80;
4948 4911
@@ -4955,6 +4918,10 @@ static void s2io_vpd_read(nic_t *nic)
4955 vpd_addr = 0x50; 4918 vpd_addr = 0x50;
4956 } 4919 }
4957 4920
4921 vpd_data = kmalloc(256, GFP_KERNEL);
4922 if (!vpd_data)
4923 return;
4924
4958 for (i = 0; i < 256; i +=4 ) { 4925 for (i = 0; i < 256; i +=4 ) {
4959 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i); 4926 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
4960 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data); 4927 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
@@ -4977,6 +4944,7 @@ static void s2io_vpd_read(nic_t *nic)
4977 memset(nic->product_name, 0, vpd_data[1]); 4944 memset(nic->product_name, 0, vpd_data[1]);
4978 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]); 4945 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
4979 } 4946 }
4947 kfree(vpd_data);
4980} 4948}
4981 4949
4982/** 4950/**
@@ -5295,7 +5263,7 @@ static int s2io_link_test(nic_t * sp, uint64_t * data)
5295 else 5263 else
5296 *data = 0; 5264 *data = 0;
5297 5265
5298 return 0; 5266 return *data;
5299} 5267}
5300 5268
5301/** 5269/**
@@ -5753,6 +5721,19 @@ static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
5753 return 0; 5721 return 0;
5754} 5722}
5755 5723
5724static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
5725{
5726 return (dev->features & NETIF_F_TSO) != 0;
5727}
5728static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
5729{
5730 if (data)
5731 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
5732 else
5733 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
5734
5735 return 0;
5736}
5756 5737
5757static struct ethtool_ops netdev_ethtool_ops = { 5738static struct ethtool_ops netdev_ethtool_ops = {
5758 .get_settings = s2io_ethtool_gset, 5739 .get_settings = s2io_ethtool_gset,
@@ -5773,8 +5754,8 @@ static struct ethtool_ops netdev_ethtool_ops = {
5773 .get_sg = ethtool_op_get_sg, 5754 .get_sg = ethtool_op_get_sg,
5774 .set_sg = ethtool_op_set_sg, 5755 .set_sg = ethtool_op_set_sg,
5775#ifdef NETIF_F_TSO 5756#ifdef NETIF_F_TSO
5776 .get_tso = ethtool_op_get_tso, 5757 .get_tso = s2io_ethtool_op_get_tso,
5777 .set_tso = ethtool_op_set_tso, 5758 .set_tso = s2io_ethtool_op_set_tso,
5778#endif 5759#endif
5779 .get_ufo = ethtool_op_get_ufo, 5760 .get_ufo = ethtool_op_get_ufo,
5780 .set_ufo = ethtool_op_set_ufo, 5761 .set_ufo = ethtool_op_set_ufo,
@@ -6337,7 +6318,7 @@ static int s2io_card_up(nic_t * sp)
6337 s2io_set_multicast(dev); 6318 s2io_set_multicast(dev);
6338 6319
6339 if (sp->lro) { 6320 if (sp->lro) {
6340 /* Initialize max aggregatable pkts based on MTU */ 6321 /* Initialize max aggregatable pkts per session based on MTU */
6341 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu; 6322 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
6342 /* Check if we can use(if specified) user provided value */ 6323 /* Check if we can use(if specified) user provided value */
6343 if (lro_max_pkts < sp->lro_max_aggr_per_sess) 6324 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
@@ -6438,7 +6419,7 @@ static void s2io_tx_watchdog(struct net_device *dev)
6438 * @cksum : FCS checksum of the frame. 6419 * @cksum : FCS checksum of the frame.
6439 * @ring_no : the ring from which this RxD was extracted. 6420 * @ring_no : the ring from which this RxD was extracted.
6440 * Description: 6421 * Description:
6441 * This function is called by the Tx interrupt serivce routine to perform 6422 * This function is called by the Rx interrupt serivce routine to perform
6442 * some OS related operations on the SKB before passing it to the upper 6423 * some OS related operations on the SKB before passing it to the upper
6443 * layers. It mainly checks if the checksum is OK, if so adds it to the 6424 * layers. It mainly checks if the checksum is OK, if so adds it to the
6444 * SKBs cksum variable, increments the Rx packet count and passes the SKB 6425 * SKBs cksum variable, increments the Rx packet count and passes the SKB
@@ -6698,33 +6679,6 @@ static void s2io_init_pci(nic_t * sp)
6698 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd); 6679 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
6699} 6680}
6700 6681
6701MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
6702MODULE_LICENSE("GPL");
6703MODULE_VERSION(DRV_VERSION);
6704
6705module_param(tx_fifo_num, int, 0);
6706module_param(rx_ring_num, int, 0);
6707module_param(rx_ring_mode, int, 0);
6708module_param_array(tx_fifo_len, uint, NULL, 0);
6709module_param_array(rx_ring_sz, uint, NULL, 0);
6710module_param_array(rts_frm_len, uint, NULL, 0);
6711module_param(use_continuous_tx_intrs, int, 1);
6712module_param(rmac_pause_time, int, 0);
6713module_param(mc_pause_threshold_q0q3, int, 0);
6714module_param(mc_pause_threshold_q4q7, int, 0);
6715module_param(shared_splits, int, 0);
6716module_param(tmac_util_period, int, 0);
6717module_param(rmac_util_period, int, 0);
6718module_param(bimodal, bool, 0);
6719module_param(l3l4hdr_size, int , 0);
6720#ifndef CONFIG_S2IO_NAPI
6721module_param(indicate_max_pkts, int, 0);
6722#endif
6723module_param(rxsync_frequency, int, 0);
6724module_param(intr_type, int, 0);
6725module_param(lro, int, 0);
6726module_param(lro_max_pkts, int, 0);
6727
6728static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type) 6682static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
6729{ 6683{
6730 if ( tx_fifo_num > 8) { 6684 if ( tx_fifo_num > 8) {
@@ -6832,8 +6786,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
6832 } 6786 }
6833 if (dev_intr_type != MSI_X) { 6787 if (dev_intr_type != MSI_X) {
6834 if (pci_request_regions(pdev, s2io_driver_name)) { 6788 if (pci_request_regions(pdev, s2io_driver_name)) {
6835 DBG_PRINT(ERR_DBG, "Request Regions failed\n"), 6789 DBG_PRINT(ERR_DBG, "Request Regions failed\n");
6836 pci_disable_device(pdev); 6790 pci_disable_device(pdev);
6837 return -ENODEV; 6791 return -ENODEV;
6838 } 6792 }
6839 } 6793 }
@@ -6957,7 +6911,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
6957 /* initialize the shared memory used by the NIC and the host */ 6911 /* initialize the shared memory used by the NIC and the host */
6958 if (init_shared_mem(sp)) { 6912 if (init_shared_mem(sp)) {
6959 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", 6913 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
6960 __FUNCTION__); 6914 dev->name);
6961 ret = -ENOMEM; 6915 ret = -ENOMEM;
6962 goto mem_alloc_failed; 6916 goto mem_alloc_failed;
6963 } 6917 }
@@ -7094,6 +7048,9 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7094 dev->addr_len = ETH_ALEN; 7048 dev->addr_len = ETH_ALEN;
7095 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN); 7049 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7096 7050
7051 /* reset Nic and bring it to known state */
7052 s2io_reset(sp);
7053
7097 /* 7054 /*
7098 * Initialize the tasklet status and link state flags 7055 * Initialize the tasklet status and link state flags
7099 * and the card state parameter 7056 * and the card state parameter
@@ -7131,11 +7088,11 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7131 goto register_failed; 7088 goto register_failed;
7132 } 7089 }
7133 s2io_vpd_read(sp); 7090 s2io_vpd_read(sp);
7134 DBG_PRINT(ERR_DBG, "%s: Neterion %s",dev->name, sp->product_name);
7135 DBG_PRINT(ERR_DBG, "(rev %d), Driver version %s\n",
7136 get_xena_rev_id(sp->pdev),
7137 s2io_driver_version);
7138 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2005 Neterion Inc.\n"); 7091 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2005 Neterion Inc.\n");
7092 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
7093 sp->product_name, get_xena_rev_id(sp->pdev));
7094 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
7095 s2io_driver_version);
7139 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: " 7096 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: "
7140 "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name, 7097 "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name,
7141 sp->def_mac_addr[0].mac_addr[0], 7098 sp->def_mac_addr[0].mac_addr[0],
@@ -7436,8 +7393,13 @@ static int verify_l3_l4_lro_capable(lro_t *l_lro, struct iphdr *ip,
7436 if (ip->ihl != 5) /* IP has options */ 7393 if (ip->ihl != 5) /* IP has options */
7437 return -1; 7394 return -1;
7438 7395
7396 /* If we see CE codepoint in IP header, packet is not mergeable */
7397 if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
7398 return -1;
7399
7400 /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
7439 if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin || 7401 if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
7440 !tcp->ack) { 7402 tcp->ece || tcp->cwr || !tcp->ack) {
7441 /* 7403 /*
7442 * Currently recognize only the ack control word and 7404 * Currently recognize only the ack control word and
7443 * any other control field being set would result in 7405 * any other control field being set would result in
@@ -7591,18 +7553,16 @@ static void queue_rx_frame(struct sk_buff *skb)
7591static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb, 7553static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb,
7592 u32 tcp_len) 7554 u32 tcp_len)
7593{ 7555{
7594 struct sk_buff *tmp, *first = lro->parent; 7556 struct sk_buff *first = lro->parent;
7595 7557
7596 first->len += tcp_len; 7558 first->len += tcp_len;
7597 first->data_len = lro->frags_len; 7559 first->data_len = lro->frags_len;
7598 skb_pull(skb, (skb->len - tcp_len)); 7560 skb_pull(skb, (skb->len - tcp_len));
7599 if ((tmp = skb_shinfo(first)->frag_list)) { 7561 if (skb_shinfo(first)->frag_list)
7600 while (tmp->next) 7562 lro->last_frag->next = skb;
7601 tmp = tmp->next;
7602 tmp->next = skb;
7603 }
7604 else 7563 else
7605 skb_shinfo(first)->frag_list = skb; 7564 skb_shinfo(first)->frag_list = skb;
7565 lro->last_frag = skb;
7606 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++; 7566 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
7607 return; 7567 return;
7608} 7568}
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 217097bc22f1..5ed49c3be1e9 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -719,6 +719,7 @@ struct msix_info_st {
719/* Data structure to represent a LRO session */ 719/* Data structure to represent a LRO session */
720typedef struct lro { 720typedef struct lro {
721 struct sk_buff *parent; 721 struct sk_buff *parent;
722 struct sk_buff *last_frag;
722 u8 *l2h; 723 u8 *l2h;
723 struct iphdr *iph; 724 struct iphdr *iph;
724 struct tcphdr *tcph; 725 struct tcphdr *tcph;
@@ -1011,4 +1012,13 @@ static void clear_lro_session(lro_t *lro);
1011static void queue_rx_frame(struct sk_buff *skb); 1012static void queue_rx_frame(struct sk_buff *skb);
1012static void update_L3L4_header(nic_t *sp, lro_t *lro); 1013static void update_L3L4_header(nic_t *sp, lro_t *lro);
1013static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb, u32 tcp_len); 1014static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb, u32 tcp_len);
1015
1016#define s2io_tcp_mss(skb) skb_shinfo(skb)->gso_size
1017#define s2io_udp_mss(skb) skb_shinfo(skb)->gso_size
1018#define s2io_offload_type(skb) skb_shinfo(skb)->gso_type
1019
1020#define S2IO_PARM_INT(X, def_val) \
1021 static unsigned int X = def_val;\
1022 module_param(X , uint, 0);
1023
1014#endif /* _S2IO_H */ 1024#endif /* _S2IO_H */
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index efc9c4bd826f..da9d06bdb818 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -797,7 +797,7 @@ static int zd1211_hw_init_hmac(struct zd_chip *chip)
797 { CR_ADDA_MBIAS_WARMTIME, 0x30000808 }, 797 { CR_ADDA_MBIAS_WARMTIME, 0x30000808 },
798 { CR_ZD1211_RETRY_MAX, 0x2 }, 798 { CR_ZD1211_RETRY_MAX, 0x2 },
799 { CR_SNIFFER_ON, 0 }, 799 { CR_SNIFFER_ON, 0 },
800 { CR_RX_FILTER, AP_RX_FILTER }, 800 { CR_RX_FILTER, STA_RX_FILTER },
801 { CR_GROUP_HASH_P1, 0x00 }, 801 { CR_GROUP_HASH_P1, 0x00 },
802 { CR_GROUP_HASH_P2, 0x80000000 }, 802 { CR_GROUP_HASH_P2, 0x80000000 },
803 { CR_REG1, 0xa4 }, 803 { CR_REG1, 0xa4 },
@@ -844,7 +844,7 @@ static int zd1211b_hw_init_hmac(struct zd_chip *chip)
844 { CR_ZD1211B_AIFS_CTL2, 0x008C003C }, 844 { CR_ZD1211B_AIFS_CTL2, 0x008C003C },
845 { CR_ZD1211B_TXOP, 0x01800824 }, 845 { CR_ZD1211B_TXOP, 0x01800824 },
846 { CR_SNIFFER_ON, 0 }, 846 { CR_SNIFFER_ON, 0 },
847 { CR_RX_FILTER, AP_RX_FILTER }, 847 { CR_RX_FILTER, STA_RX_FILTER },
848 { CR_GROUP_HASH_P1, 0x00 }, 848 { CR_GROUP_HASH_P1, 0x00 },
849 { CR_GROUP_HASH_P2, 0x80000000 }, 849 { CR_GROUP_HASH_P2, 0x80000000 },
850 { CR_REG1, 0xa4 }, 850 { CR_REG1, 0xa4 },
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h
index 805121093ab5..069d2b467339 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.h
+++ b/drivers/net/wireless/zd1211rw/zd_chip.h
@@ -461,10 +461,15 @@
461 461
462#define CR_RX_FILTER CTL_REG(0x068c) 462#define CR_RX_FILTER CTL_REG(0x068c)
463#define RX_FILTER_ASSOC_RESPONSE 0x0002 463#define RX_FILTER_ASSOC_RESPONSE 0x0002
464#define RX_FILTER_REASSOC_RESPONSE 0x0008
464#define RX_FILTER_PROBE_RESPONSE 0x0020 465#define RX_FILTER_PROBE_RESPONSE 0x0020
465#define RX_FILTER_BEACON 0x0100 466#define RX_FILTER_BEACON 0x0100
467#define RX_FILTER_DISASSOC 0x0400
466#define RX_FILTER_AUTH 0x0800 468#define RX_FILTER_AUTH 0x0800
467/* Sniff modus sets filter to 0xfffff */ 469#define AP_RX_FILTER 0x0400feff
470#define STA_RX_FILTER 0x0000ffff
471
472/* Monitor mode sets filter to 0xfffff */
468 473
469#define CR_ACK_TIMEOUT_EXT CTL_REG(0x0690) 474#define CR_ACK_TIMEOUT_EXT CTL_REG(0x0690)
470#define CR_BCN_FIFO_SEMAPHORE CTL_REG(0x0694) 475#define CR_BCN_FIFO_SEMAPHORE CTL_REG(0x0694)
@@ -546,9 +551,6 @@
546#define CR_ZD1211B_TXOP CTL_REG(0x0b20) 551#define CR_ZD1211B_TXOP CTL_REG(0x0b20)
547#define CR_ZD1211B_RETRY_MAX CTL_REG(0x0b28) 552#define CR_ZD1211B_RETRY_MAX CTL_REG(0x0b28)
548 553
549#define AP_RX_FILTER 0x0400feff
550#define STA_RX_FILTER 0x0000ffff
551
552#define CWIN_SIZE 0x007f043f 554#define CWIN_SIZE 0x007f043f
553 555
554 556
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index 3bdc54d128d0..d6f3e02a0b54 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -108,7 +108,9 @@ int zd_mac_init_hw(struct zd_mac *mac, u8 device_type)
108 if (r) 108 if (r)
109 goto disable_int; 109 goto disable_int;
110 110
111 r = zd_set_encryption_type(chip, NO_WEP); 111 /* We must inform the device that we are doing encryption/decryption in
112 * software at the moment. */
113 r = zd_set_encryption_type(chip, ENC_SNIFFER);
112 if (r) 114 if (r)
113 goto disable_int; 115 goto disable_int;
114 116
@@ -136,10 +138,8 @@ static int reset_mode(struct zd_mac *mac)
136{ 138{
137 struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac); 139 struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac);
138 struct zd_ioreq32 ioreqs[3] = { 140 struct zd_ioreq32 ioreqs[3] = {
139 { CR_RX_FILTER, RX_FILTER_BEACON|RX_FILTER_PROBE_RESPONSE| 141 { CR_RX_FILTER, STA_RX_FILTER },
140 RX_FILTER_AUTH|RX_FILTER_ASSOC_RESPONSE },
141 { CR_SNIFFER_ON, 0U }, 142 { CR_SNIFFER_ON, 0U },
142 { CR_ENCRYPTION_TYPE, NO_WEP },
143 }; 143 };
144 144
145 if (ieee->iw_mode == IW_MODE_MONITOR) { 145 if (ieee->iw_mode == IW_MODE_MONITOR) {
@@ -713,10 +713,10 @@ static int zd_mac_tx(struct zd_mac *mac, struct ieee80211_txb *txb, int pri)
713struct zd_rt_hdr { 713struct zd_rt_hdr {
714 struct ieee80211_radiotap_header rt_hdr; 714 struct ieee80211_radiotap_header rt_hdr;
715 u8 rt_flags; 715 u8 rt_flags;
716 u8 rt_rate;
716 u16 rt_channel; 717 u16 rt_channel;
717 u16 rt_chbitmask; 718 u16 rt_chbitmask;
718 u16 rt_rate; 719} __attribute__((packed));
719};
720 720
721static void fill_rt_header(void *buffer, struct zd_mac *mac, 721static void fill_rt_header(void *buffer, struct zd_mac *mac,
722 const struct ieee80211_rx_stats *stats, 722 const struct ieee80211_rx_stats *stats,
@@ -735,14 +735,14 @@ static void fill_rt_header(void *buffer, struct zd_mac *mac,
735 if (status->decryption_type & (ZD_RX_WEP64|ZD_RX_WEP128|ZD_RX_WEP256)) 735 if (status->decryption_type & (ZD_RX_WEP64|ZD_RX_WEP128|ZD_RX_WEP256))
736 hdr->rt_flags |= IEEE80211_RADIOTAP_F_WEP; 736 hdr->rt_flags |= IEEE80211_RADIOTAP_F_WEP;
737 737
738 hdr->rt_rate = stats->rate / 5;
739
738 /* FIXME: 802.11a */ 740 /* FIXME: 802.11a */
739 hdr->rt_channel = cpu_to_le16(ieee80211chan2mhz( 741 hdr->rt_channel = cpu_to_le16(ieee80211chan2mhz(
740 _zd_chip_get_channel(&mac->chip))); 742 _zd_chip_get_channel(&mac->chip)));
741 hdr->rt_chbitmask = cpu_to_le16(IEEE80211_CHAN_2GHZ | 743 hdr->rt_chbitmask = cpu_to_le16(IEEE80211_CHAN_2GHZ |
742 ((status->frame_status & ZD_RX_FRAME_MODULATION_MASK) == 744 ((status->frame_status & ZD_RX_FRAME_MODULATION_MASK) ==
743 ZD_RX_OFDM ? IEEE80211_CHAN_OFDM : IEEE80211_CHAN_CCK)); 745 ZD_RX_OFDM ? IEEE80211_CHAN_OFDM : IEEE80211_CHAN_CCK));
744
745 hdr->rt_rate = stats->rate / 5;
746} 746}
747 747
748/* Returns 1 if the data packet is for us and 0 otherwise. */ 748/* Returns 1 if the data packet is for us and 0 otherwise. */
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 72f90525bf68..6320984126c7 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -323,7 +323,6 @@ static void disable_read_regs_int(struct zd_usb *usb)
323{ 323{
324 struct zd_usb_interrupt *intr = &usb->intr; 324 struct zd_usb_interrupt *intr = &usb->intr;
325 325
326 ZD_ASSERT(in_interrupt());
327 spin_lock(&intr->lock); 326 spin_lock(&intr->lock);
328 intr->read_regs_enabled = 0; 327 intr->read_regs_enabled = 0;
329 spin_unlock(&intr->lock); 328 spin_unlock(&intr->lock);
@@ -545,11 +544,11 @@ static void handle_rx_packet(struct zd_usb *usb, const u8 *buffer,
545 * be padded. Unaligned access might also happen if the length_info 544 * be padded. Unaligned access might also happen if the length_info
546 * structure is not present. 545 * structure is not present.
547 */ 546 */
548 if (get_unaligned(&length_info->tag) == RX_LENGTH_INFO_TAG) { 547 if (get_unaligned(&length_info->tag) == cpu_to_le16(RX_LENGTH_INFO_TAG))
548 {
549 unsigned int l, k, n; 549 unsigned int l, k, n;
550 for (i = 0, l = 0;; i++) { 550 for (i = 0, l = 0;; i++) {
551 k = le16_to_cpu(get_unaligned( 551 k = le16_to_cpu(get_unaligned(&length_info->length[i]));
552 &length_info->length[i]));
553 n = l+k; 552 n = l+k;
554 if (n > length) 553 if (n > length)
555 return; 554 return;
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c
index 34de5697983d..e2fef60c2d06 100644
--- a/drivers/pci/hotplug/acpiphp_core.c
+++ b/drivers/pci/hotplug/acpiphp_core.c
@@ -27,8 +27,7 @@
27 * along with this program; if not, write to the Free Software 27 * along with this program; if not, write to the Free Software
28 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 28 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
29 * 29 *
30 * Send feedback to <gregkh@us.ibm.com>, 30 * Send feedback to <kristen.c.accardi@intel.com>
31 * <t-kochi@bq.jp.nec.com>
32 * 31 *
33 */ 32 */
34 33
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index ef95d12fb32c..ae67a8f55ba1 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -26,7 +26,7 @@
26 * along with this program; if not, write to the Free Software 26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 27 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
28 * 28 *
29 * Send feedback to <t-kochi@bq.jp.nec.com> 29 * Send feedback to <kristen.c.accardi@intel.com>
30 * 30 *
31 */ 31 */
32 32
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 50bfc1b2f3bf..478d0d28f7ad 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -30,23 +30,6 @@ MODULE_LICENSE("GPL");
30/* global data */ 30/* global data */
31static const char device_name[] = "pcieport-driver"; 31static const char device_name[] = "pcieport-driver";
32 32
33static int pcie_portdrv_save_config(struct pci_dev *dev)
34{
35 return pci_save_state(dev);
36}
37
38static int pcie_portdrv_restore_config(struct pci_dev *dev)
39{
40 int retval;
41
42 pci_restore_state(dev);
43 retval = pci_enable_device(dev);
44 if (retval)
45 return retval;
46 pci_set_master(dev);
47 return 0;
48}
49
50/* 33/*
51 * pcie_portdrv_probe - Probe PCI-Express port devices 34 * pcie_portdrv_probe - Probe PCI-Express port devices
52 * @dev: PCI-Express port device being probed 35 * @dev: PCI-Express port device being probed
@@ -73,8 +56,10 @@ static int __devinit pcie_portdrv_probe (struct pci_dev *dev,
73 "%s->Dev[%04x:%04x] has invalid IRQ. Check vendor BIOS\n", 56 "%s->Dev[%04x:%04x] has invalid IRQ. Check vendor BIOS\n",
74 __FUNCTION__, dev->device, dev->vendor); 57 __FUNCTION__, dev->device, dev->vendor);
75 } 58 }
76 if (pcie_port_device_register(dev)) 59 if (pcie_port_device_register(dev)) {
60 pci_disable_device(dev);
77 return -ENOMEM; 61 return -ENOMEM;
62 }
78 63
79 return 0; 64 return 0;
80} 65}
@@ -86,6 +71,23 @@ static void pcie_portdrv_remove (struct pci_dev *dev)
86} 71}
87 72
88#ifdef CONFIG_PM 73#ifdef CONFIG_PM
74static int pcie_portdrv_save_config(struct pci_dev *dev)
75{
76 return pci_save_state(dev);
77}
78
79static int pcie_portdrv_restore_config(struct pci_dev *dev)
80{
81 int retval;
82
83 pci_restore_state(dev);
84 retval = pci_enable_device(dev);
85 if (retval)
86 return retval;
87 pci_set_master(dev);
88 return 0;
89}
90
89static int pcie_portdrv_suspend (struct pci_dev *dev, pm_message_t state) 91static int pcie_portdrv_suspend (struct pci_dev *dev, pm_message_t state)
90{ 92{
91 int ret = pcie_port_device_suspend(dev, state); 93 int ret = pcie_port_device_suspend(dev, state);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index e3c78c39b7e4..fb08bc951ac0 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -990,6 +990,11 @@ static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
990 case 0x8070: /* P4G8X Deluxe */ 990 case 0x8070: /* P4G8X Deluxe */
991 asus_hides_smbus = 1; 991 asus_hides_smbus = 1;
992 } 992 }
993 if (dev->device == PCI_DEVICE_ID_INTEL_E7501_MCH)
994 switch (dev->subsystem_device) {
995 case 0x80c9: /* PU-DLS */
996 asus_hides_smbus = 1;
997 }
993 if (dev->device == PCI_DEVICE_ID_INTEL_82855GM_HB) 998 if (dev->device == PCI_DEVICE_ID_INTEL_82855GM_HB)
994 switch (dev->subsystem_device) { 999 switch (dev->subsystem_device) {
995 case 0x1751: /* M2N notebook */ 1000 case 0x1751: /* M2N notebook */
@@ -1058,6 +1063,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845G_HB, asu
1058DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82850_HB, asus_hides_smbus_hostbridge ); 1063DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82850_HB, asus_hides_smbus_hostbridge );
1059DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB, asus_hides_smbus_hostbridge ); 1064DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB, asus_hides_smbus_hostbridge );
1060DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_7205_0, asus_hides_smbus_hostbridge ); 1065DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_7205_0, asus_hides_smbus_hostbridge );
1066DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7501_MCH, asus_hides_smbus_hostbridge );
1061DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855PM_HB, asus_hides_smbus_hostbridge ); 1067DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855PM_HB, asus_hides_smbus_hostbridge );
1062DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855GM_HB, asus_hides_smbus_hostbridge ); 1068DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855GM_HB, asus_hides_smbus_hostbridge );
1063DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82915GM_HB, asus_hides_smbus_hostbridge ); 1069DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82915GM_HB, asus_hides_smbus_hostbridge );
@@ -1081,6 +1087,7 @@ static void __init asus_hides_smbus_lpc(struct pci_dev *dev)
1081} 1087}
1082DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc ); 1088DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc );
1083DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc ); 1089DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc );
1090DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asus_hides_smbus_lpc );
1084DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc ); 1091DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc );
1085DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc ); 1092DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc );
1086DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc ); 1093DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc );
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index f8ae2b7db0a7..d529462d1b53 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -41,7 +41,7 @@ pci_do_find_bus(struct pci_bus* bus, unsigned char busnr)
41 * in the global list of PCI buses. If the bus is found, a pointer to its 41 * in the global list of PCI buses. If the bus is found, a pointer to its
42 * data structure is returned. If no bus is found, %NULL is returned. 42 * data structure is returned. If no bus is found, %NULL is returned.
43 */ 43 */
44struct pci_bus * __devinit pci_find_bus(int domain, int busnr) 44struct pci_bus * pci_find_bus(int domain, int busnr)
45{ 45{
46 struct pci_bus *bus = NULL; 46 struct pci_bus *bus = NULL;
47 struct pci_bus *tmp_bus; 47 struct pci_bus *tmp_bus;
diff --git a/drivers/pnp/interface.c b/drivers/pnp/interface.c
index 3163e3d73da1..9d8b415eca79 100644
--- a/drivers/pnp/interface.c
+++ b/drivers/pnp/interface.c
@@ -265,8 +265,8 @@ static ssize_t pnp_show_current_resources(struct device *dmdev, struct device_at
265 pnp_printf(buffer," disabled\n"); 265 pnp_printf(buffer," disabled\n");
266 else 266 else
267 pnp_printf(buffer," 0x%llx-0x%llx\n", 267 pnp_printf(buffer," 0x%llx-0x%llx\n",
268 pnp_port_start(dev, i), 268 (unsigned long long)pnp_port_start(dev, i),
269 pnp_port_end(dev, i)); 269 (unsigned long long)pnp_port_end(dev, i));
270 } 270 }
271 } 271 }
272 for (i = 0; i < PNP_MAX_MEM; i++) { 272 for (i = 0; i < PNP_MAX_MEM; i++) {
@@ -276,8 +276,8 @@ static ssize_t pnp_show_current_resources(struct device *dmdev, struct device_at
276 pnp_printf(buffer," disabled\n"); 276 pnp_printf(buffer," disabled\n");
277 else 277 else
278 pnp_printf(buffer," 0x%llx-0x%llx\n", 278 pnp_printf(buffer," 0x%llx-0x%llx\n",
279 pnp_mem_start(dev, i), 279 (unsigned long long)pnp_mem_start(dev, i),
280 pnp_mem_end(dev, i)); 280 (unsigned long long)pnp_mem_end(dev, i));
281 } 281 }
282 } 282 }
283 for (i = 0; i < PNP_MAX_IRQ; i++) { 283 for (i = 0; i < PNP_MAX_IRQ; i++) {
@@ -287,7 +287,7 @@ static ssize_t pnp_show_current_resources(struct device *dmdev, struct device_at
287 pnp_printf(buffer," disabled\n"); 287 pnp_printf(buffer," disabled\n");
288 else 288 else
289 pnp_printf(buffer," %lld\n", 289 pnp_printf(buffer," %lld\n",
290 pnp_irq(dev, i)); 290 (unsigned long long)pnp_irq(dev, i));
291 } 291 }
292 } 292 }
293 for (i = 0; i < PNP_MAX_DMA; i++) { 293 for (i = 0; i < PNP_MAX_DMA; i++) {
@@ -297,7 +297,7 @@ static ssize_t pnp_show_current_resources(struct device *dmdev, struct device_at
297 pnp_printf(buffer," disabled\n"); 297 pnp_printf(buffer," disabled\n");
298 else 298 else
299 pnp_printf(buffer," %lld\n", 299 pnp_printf(buffer," %lld\n",
300 pnp_dma(dev, i)); 300 (unsigned long long)pnp_dma(dev, i));
301 } 301 }
302 } 302 }
303 ret = (buffer->curr - buf); 303 ret = (buffer->curr - buf);
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
index 77e7202a0eba..904c25fb4ba4 100644
--- a/drivers/scsi/ahci.c
+++ b/drivers/scsi/ahci.c
@@ -940,14 +940,8 @@ static void ahci_host_intr(struct ata_port *ap)
940 return; 940 return;
941 941
942 /* ignore interim PIO setup fis interrupts */ 942 /* ignore interim PIO setup fis interrupts */
943 if (ata_tag_valid(ap->active_tag)) { 943 if (ata_tag_valid(ap->active_tag) && (status & PORT_IRQ_PIOS_FIS))
944 struct ata_queued_cmd *qc = 944 return;
945 ata_qc_from_tag(ap, ap->active_tag);
946
947 if (qc && qc->tf.protocol == ATA_PROT_PIO &&
948 (status & PORT_IRQ_PIOS_FIS))
949 return;
950 }
951 945
952 if (ata_ratelimit()) 946 if (ata_ratelimit())
953 ata_port_printk(ap, KERN_INFO, "spurious interrupt " 947 ata_port_printk(ap, KERN_INFO, "spurious interrupt "
diff --git a/include/asm-ia64/meminit.h b/include/asm-ia64/meminit.h
index 894bc4d89dc0..6a33a07b3f1d 100644
--- a/include/asm-ia64/meminit.h
+++ b/include/asm-ia64/meminit.h
@@ -56,6 +56,11 @@ extern void efi_memmap_init(unsigned long *, unsigned long *);
56 extern struct page *vmem_map; 56 extern struct page *vmem_map;
57 extern int find_largest_hole (u64 start, u64 end, void *arg); 57 extern int find_largest_hole (u64 start, u64 end, void *arg);
58 extern int create_mem_map_page_table (u64 start, u64 end, void *arg); 58 extern int create_mem_map_page_table (u64 start, u64 end, void *arg);
59 extern int vmemmap_find_next_valid_pfn(int, int);
60#else
61static inline int vmemmap_find_next_valid_pfn(int node, int i)
62{
63 return i + 1;
64}
59#endif 65#endif
60
61#endif /* meminit_h */ 66#endif /* meminit_h */
diff --git a/include/asm-ia64/pal.h b/include/asm-ia64/pal.h
index 37e52a2836b0..20a8d618c845 100644
--- a/include/asm-ia64/pal.h
+++ b/include/asm-ia64/pal.h
@@ -1433,7 +1433,12 @@ typedef union pal_version_u {
1433} pal_version_u_t; 1433} pal_version_u_t;
1434 1434
1435 1435
1436/* Return PAL version information */ 1436/*
1437 * Return PAL version information. While the documentation states that
1438 * PAL_VERSION can be called in either physical or virtual mode, some
1439 * implementations only allow physical calls. We don't call it very often,
1440 * so the overhead isn't worth eliminating.
1441 */
1437static inline s64 1442static inline s64
1438ia64_pal_version (pal_version_u_t *pal_min_version, pal_version_u_t *pal_cur_version) 1443ia64_pal_version (pal_version_u_t *pal_min_version, pal_version_u_t *pal_cur_version)
1439{ 1444{
diff --git a/include/asm-ia64/sn/xpc.h b/include/asm-ia64/sn/xpc.h
index 8406f1ef4caf..b72af597878d 100644
--- a/include/asm-ia64/sn/xpc.h
+++ b/include/asm-ia64/sn/xpc.h
@@ -1124,8 +1124,8 @@ xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag,
1124#define XPC_GET_IPI_FLAGS(_amo, _c) ((u8) (((_amo) >> ((_c) * 8)) & 0xff)) 1124#define XPC_GET_IPI_FLAGS(_amo, _c) ((u8) (((_amo) >> ((_c) * 8)) & 0xff))
1125#define XPC_SET_IPI_FLAGS(_amo, _c, _f) (_amo) |= ((u64) (_f) << ((_c) * 8)) 1125#define XPC_SET_IPI_FLAGS(_amo, _c, _f) (_amo) |= ((u64) (_f) << ((_c) * 8))
1126 1126
1127#define XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & 0x0f0f0f0f0f0f0f0f) 1127#define XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & __IA64_UL_CONST(0x0f0f0f0f0f0f0f0f))
1128#define XPC_ANY_MSG_IPI_FLAGS_SET(_amo) ((_amo) & 0x1010101010101010) 1128#define XPC_ANY_MSG_IPI_FLAGS_SET(_amo) ((_amo) & __IA64_UL_CONST(0x1010101010101010))
1129 1129
1130 1130
1131static inline void 1131static inline void
diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h
index fc9677bc87ee..384fbf7f2a0f 100644
--- a/include/asm-ia64/system.h
+++ b/include/asm-ia64/system.h
@@ -24,7 +24,7 @@
24 * 0xa000000000000000+2*PERCPU_PAGE_SIZE 24 * 0xa000000000000000+2*PERCPU_PAGE_SIZE
25 * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page) 25 * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page)
26 */ 26 */
27#define KERNEL_START (GATE_ADDR+0x100000000) 27#define KERNEL_START (GATE_ADDR+__IA64_UL_CONST(0x100000000))
28#define PERCPU_ADDR (-PERCPU_PAGE_SIZE) 28#define PERCPU_ADDR (-PERCPU_PAGE_SIZE)
29 29
30#ifndef __ASSEMBLY__ 30#ifndef __ASSEMBLY__
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index 0503b2ed8bae..2d229327959e 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -46,8 +46,6 @@ enum kobject_action {
46 KOBJ_UMOUNT = (__force kobject_action_t) 0x05, /* umount event for block devices (broken) */ 46 KOBJ_UMOUNT = (__force kobject_action_t) 0x05, /* umount event for block devices (broken) */
47 KOBJ_OFFLINE = (__force kobject_action_t) 0x06, /* device offline */ 47 KOBJ_OFFLINE = (__force kobject_action_t) 0x06, /* device offline */
48 KOBJ_ONLINE = (__force kobject_action_t) 0x07, /* device online */ 48 KOBJ_ONLINE = (__force kobject_action_t) 0x07, /* device online */
49 KOBJ_UNDOCK = (__force kobject_action_t) 0x08, /* undocking */
50 KOBJ_DOCK = (__force kobject_action_t) 0x09, /* dock */
51}; 49};
52 50
53struct kobject { 51struct kobject {
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index c09396d2c77b..4eae06b08cf2 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2142,6 +2142,7 @@
2142#define PCI_DEVICE_ID_INTEL_82820_UP_HB 0x2501 2142#define PCI_DEVICE_ID_INTEL_82820_UP_HB 0x2501
2143#define PCI_DEVICE_ID_INTEL_82850_HB 0x2530 2143#define PCI_DEVICE_ID_INTEL_82850_HB 0x2530
2144#define PCI_DEVICE_ID_INTEL_82860_HB 0x2531 2144#define PCI_DEVICE_ID_INTEL_82860_HB 0x2531
2145#define PCI_DEVICE_ID_INTEL_E7501_MCH 0x254c
2145#define PCI_DEVICE_ID_INTEL_82845G_HB 0x2560 2146#define PCI_DEVICE_ID_INTEL_82845G_HB 0x2560
2146#define PCI_DEVICE_ID_INTEL_82845G_IG 0x2562 2147#define PCI_DEVICE_ID_INTEL_82845G_IG 0x2562
2147#define PCI_DEVICE_ID_INTEL_82865_HB 0x2570 2148#define PCI_DEVICE_ID_INTEL_82865_HB 0x2570
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 2b1530fc573b..7f20e7b857cb 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -50,10 +50,6 @@ static char *action_to_string(enum kobject_action action)
50 return "offline"; 50 return "offline";
51 case KOBJ_ONLINE: 51 case KOBJ_ONLINE:
52 return "online"; 52 return "online";
53 case KOBJ_DOCK:
54 return "dock";
55 case KOBJ_UNDOCK:
56 return "undock";
57 default: 53 default:
58 return NULL; 54 return NULL;
59 } 55 }