aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Kconfig11
-rw-r--r--drivers/acpi/bus.c3
-rw-r--r--drivers/acpi/ec.c4
-rw-r--r--drivers/acpi/ibm_acpi.c13
-rw-r--r--drivers/acpi/processor_core.c3
-rw-r--r--drivers/acpi/processor_perflib.c4
-rw-r--r--drivers/bluetooth/hci_usb.c7
-rw-r--r--drivers/char/drm/i915_irq.c2
-rw-r--r--drivers/char/hw_random/amd-rng.c2
-rw-r--r--drivers/char/hw_random/geode-rng.c2
-rw-r--r--drivers/char/hw_random/intel-rng.c34
-rw-r--r--drivers/char/hw_random/ixp4xx-rng.c2
-rw-r--r--drivers/char/hw_random/via-rng.c2
-rw-r--r--drivers/ieee1394/sbp2.c2
-rw-r--r--drivers/infiniband/core/cma.c17
-rw-r--r--drivers/infiniband/core/ucma.c11
-rw-r--r--drivers/infiniband/hw/ehca/ehca_hca.c8
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_iverbs.h4
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c10
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mrmw.c4
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c8
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c26
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c4
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c26
-rw-r--r--drivers/kvm/svm.c8
-rw-r--r--drivers/kvm/vmx.c7
-rw-r--r--drivers/md/raid1.c13
-rw-r--r--drivers/md/raid10.c11
-rw-r--r--drivers/mmc/at91_mci.c11
-rw-r--r--drivers/mmc/mmci.c4
-rw-r--r--drivers/mmc/omap.c6
-rw-r--r--drivers/net/bnx2.c75
-rw-r--r--drivers/net/chelsio/my3126.c5
-rw-r--r--drivers/net/e1000/e1000_main.c6
-rw-r--r--drivers/net/forcedeth.c111
-rw-r--r--drivers/net/ixgb/ixgb.h1
-rw-r--r--drivers/net/ixgb/ixgb_ethtool.c1
-rw-r--r--drivers/net/ixgb/ixgb_hw.c3
-rw-r--r--drivers/net/ixgb/ixgb_main.c57
-rw-r--r--drivers/net/pcmcia/3c574_cs.c1
-rw-r--r--drivers/net/pcmcia/3c589_cs.c1
-rw-r--r--drivers/net/pcmcia/com20020_cs.c1
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c2
-rw-r--r--drivers/net/pcmcia/xirc2ps_cs.c1
-rw-r--r--drivers/net/qla3xxx.c38
-rw-r--r--drivers/net/tg3.c17
-rw-r--r--drivers/net/tg3.h4
-rw-r--r--drivers/net/wireless/ipw2100.c2
-rw-r--r--drivers/net/wireless/netwave_cs.c1
-rw-r--r--drivers/net/wireless/ray_cs.c1
-rw-r--r--drivers/net/wireless/wl3501_cs.c1
-rw-r--r--drivers/pci/pci.c3
-rw-r--r--drivers/pci/quirks.c16
-rw-r--r--drivers/rtc/rtc-sh.c4
-rw-r--r--drivers/s390/char/vmcp.c2
-rw-r--r--drivers/s390/cio/cio.c12
-rw-r--r--drivers/s390/net/Kconfig5
-rw-r--r--drivers/s390/net/qeth.h2
-rw-r--r--drivers/s390/net/qeth_main.c217
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c1
-rw-r--r--drivers/scsi/pcmcia/sym53c500_cs.c1
-rw-r--r--drivers/serial/8250.c2
-rw-r--r--drivers/serial/mpc52xx_uart.c2
-rw-r--r--drivers/usb/serial/Kconfig2
-rw-r--r--drivers/usb/serial/funsoft.c2
68 files changed, 503 insertions, 374 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 1639998e4d27..f4f000abc4e9 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -225,17 +225,6 @@ config ACPI_IBM_DOCK
225 225
226 If you are not sure, say N here. 226 If you are not sure, say N here.
227 227
228config ACPI_IBM_BAY
229 bool "Legacy Removable Bay Support"
230 depends on ACPI_IBM
231 depends on ACPI_BAY=n
232 default n
233 ---help---
234 Allows the ibm_acpi driver to handle removable bays.
235 This support is obsoleted by CONFIG_ACPI_BAY.
236
237 If you are not sure, say N here.
238
239config ACPI_TOSHIBA 228config ACPI_TOSHIBA
240 tristate "Toshiba Laptop Extras" 229 tristate "Toshiba Laptop Extras"
241 depends on X86 230 depends on X86
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 279c4bac92e5..766332e45592 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -561,6 +561,9 @@ static int __init acpi_bus_init_irq(void)
561 case ACPI_IRQ_MODEL_IOSAPIC: 561 case ACPI_IRQ_MODEL_IOSAPIC:
562 message = "IOSAPIC"; 562 message = "IOSAPIC";
563 break; 563 break;
564 case ACPI_IRQ_MODEL_PLATFORM:
565 message = "platform specific model";
566 break;
564 default: 567 default:
565 printk(KERN_WARNING PREFIX "Unknown interrupt routing model\n"); 568 printk(KERN_WARNING PREFIX "Unknown interrupt routing model\n");
566 return -ENODEV; 569 return -ENODEV;
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 4144d5dd442e..cbdf031f3c09 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1016,8 +1016,8 @@ static int __init acpi_ec_set_intr_mode(char *str)
1016 acpi_ec_mode = EC_POLL; 1016 acpi_ec_mode = EC_POLL;
1017 } 1017 }
1018 acpi_ec_driver.ops.add = acpi_ec_add; 1018 acpi_ec_driver.ops.add = acpi_ec_add;
1019 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "EC %s mode.\n", 1019 printk(KERN_NOTICE PREFIX "%s mode.\n",
1020 intr ? "interrupt" : "polling")); 1020 intr ? "interrupt" : "polling");
1021 1021
1022 return 1; 1022 return 1;
1023} 1023}
diff --git a/drivers/acpi/ibm_acpi.c b/drivers/acpi/ibm_acpi.c
index b72d13d11a27..c6144ca66638 100644
--- a/drivers/acpi/ibm_acpi.c
+++ b/drivers/acpi/ibm_acpi.c
@@ -157,7 +157,6 @@ IBM_HANDLE(dock, root, "\\_SB.GDCK", /* X30, X31, X40 */
157 "\\_SB.PCI.ISA.SLCE", /* 570 */ 157 "\\_SB.PCI.ISA.SLCE", /* 570 */
158 ); /* A21e,G4x,R30,R31,R32,R40,R40e,R50e */ 158 ); /* A21e,G4x,R30,R31,R32,R40,R40e,R50e */
159#endif 159#endif
160#ifdef CONFIG_ACPI_IBM_BAY
161IBM_HANDLE(bay, root, "\\_SB.PCI.IDE.SECN.MAST", /* 570 */ 160IBM_HANDLE(bay, root, "\\_SB.PCI.IDE.SECN.MAST", /* 570 */
162 "\\_SB.PCI0.IDE0.IDES.IDSM", /* 600e/x, 770e, 770x */ 161 "\\_SB.PCI0.IDE0.IDES.IDSM", /* 600e/x, 770e, 770x */
163 "\\_SB.PCI0.SATA.SCND.MSTR", /* T60, X60, Z60 */ 162 "\\_SB.PCI0.SATA.SCND.MSTR", /* T60, X60, Z60 */
@@ -175,7 +174,6 @@ IBM_HANDLE(bay2, root, "\\_SB.PCI0.IDE0.PRIM.SLAV", /* A3x, R32 */
175IBM_HANDLE(bay2_ej, bay2, "_EJ3", /* 600e/x, 770e, A3x */ 174IBM_HANDLE(bay2_ej, bay2, "_EJ3", /* 600e/x, 770e, A3x */
176 "_EJ0", /* 770x */ 175 "_EJ0", /* 770x */
177 ); /* all others */ 176 ); /* all others */
178#endif
179 177
180/* don't list other alternatives as we install a notify handler on the 570 */ 178/* don't list other alternatives as we install a notify handler on the 570 */
181IBM_HANDLE(pci, root, "\\_SB.PCI"); /* 570 */ 179IBM_HANDLE(pci, root, "\\_SB.PCI"); /* 570 */
@@ -1042,7 +1040,6 @@ static int light_write(char *buf)
1042 return 0; 1040 return 0;
1043} 1041}
1044 1042
1045#if defined(CONFIG_ACPI_IBM_DOCK) || defined(CONFIG_ACPI_IBM_BAY)
1046static int _sta(acpi_handle handle) 1043static int _sta(acpi_handle handle)
1047{ 1044{
1048 int status; 1045 int status;
@@ -1052,7 +1049,7 @@ static int _sta(acpi_handle handle)
1052 1049
1053 return status; 1050 return status;
1054} 1051}
1055#endif 1052
1056#ifdef CONFIG_ACPI_IBM_DOCK 1053#ifdef CONFIG_ACPI_IBM_DOCK
1057#define dock_docked() (_sta(dock_handle) & 1) 1054#define dock_docked() (_sta(dock_handle) & 1)
1058 1055
@@ -1118,7 +1115,6 @@ static void dock_notify(struct ibm_struct *ibm, u32 event)
1118} 1115}
1119#endif 1116#endif
1120 1117
1121#ifdef CONFIG_ACPI_IBM_BAY
1122static int bay_status_supported; 1118static int bay_status_supported;
1123static int bay_status2_supported; 1119static int bay_status2_supported;
1124static int bay_eject_supported; 1120static int bay_eject_supported;
@@ -1194,7 +1190,6 @@ static void bay_notify(struct ibm_struct *ibm, u32 event)
1194{ 1190{
1195 acpi_bus_generate_event(ibm->device, event, 0); 1191 acpi_bus_generate_event(ibm->device, event, 0);
1196} 1192}
1197#endif
1198 1193
1199static int cmos_read(char *p) 1194static int cmos_read(char *p)
1200{ 1195{
@@ -2354,7 +2349,6 @@ static struct ibm_struct ibms[] = {
2354 .type = ACPI_SYSTEM_NOTIFY, 2349 .type = ACPI_SYSTEM_NOTIFY,
2355 }, 2350 },
2356#endif 2351#endif
2357#ifdef CONFIG_ACPI_IBM_BAY
2358 { 2352 {
2359 .name = "bay", 2353 .name = "bay",
2360 .init = bay_init, 2354 .init = bay_init,
@@ -2364,7 +2358,6 @@ static struct ibm_struct ibms[] = {
2364 .handle = &bay_handle, 2358 .handle = &bay_handle,
2365 .type = ACPI_SYSTEM_NOTIFY, 2359 .type = ACPI_SYSTEM_NOTIFY,
2366 }, 2360 },
2367#endif
2368 { 2361 {
2369 .name = "cmos", 2362 .name = "cmos",
2370 .read = cmos_read, 2363 .read = cmos_read,
@@ -2650,9 +2643,7 @@ IBM_PARAM(light);
2650#ifdef CONFIG_ACPI_IBM_DOCK 2643#ifdef CONFIG_ACPI_IBM_DOCK
2651IBM_PARAM(dock); 2644IBM_PARAM(dock);
2652#endif 2645#endif
2653#ifdef CONFIG_ACPI_IBM_BAY
2654IBM_PARAM(bay); 2646IBM_PARAM(bay);
2655#endif
2656IBM_PARAM(cmos); 2647IBM_PARAM(cmos);
2657IBM_PARAM(led); 2648IBM_PARAM(led);
2658IBM_PARAM(beep); 2649IBM_PARAM(beep);
@@ -2735,14 +2726,12 @@ static int __init acpi_ibm_init(void)
2735 IBM_HANDLE_INIT(dock); 2726 IBM_HANDLE_INIT(dock);
2736#endif 2727#endif
2737 IBM_HANDLE_INIT(pci); 2728 IBM_HANDLE_INIT(pci);
2738#ifdef CONFIG_ACPI_IBM_BAY
2739 IBM_HANDLE_INIT(bay); 2729 IBM_HANDLE_INIT(bay);
2740 if (bay_handle) 2730 if (bay_handle)
2741 IBM_HANDLE_INIT(bay_ej); 2731 IBM_HANDLE_INIT(bay_ej);
2742 IBM_HANDLE_INIT(bay2); 2732 IBM_HANDLE_INIT(bay2);
2743 if (bay2_handle) 2733 if (bay2_handle)
2744 IBM_HANDLE_INIT(bay2_ej); 2734 IBM_HANDLE_INIT(bay2_ej);
2745#endif
2746 IBM_HANDLE_INIT(beep); 2735 IBM_HANDLE_INIT(beep);
2747 IBM_HANDLE_INIT(ecrd); 2736 IBM_HANDLE_INIT(ecrd);
2748 IBM_HANDLE_INIT(ecwr); 2737 IBM_HANDLE_INIT(ecwr);
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 25718fed39f1..5f9496d59ed6 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -476,9 +476,6 @@ static int acpi_processor_get_info(struct acpi_processor *pr)
476 if (cpu_index == -1) { 476 if (cpu_index == -1) {
477 if (ACPI_FAILURE 477 if (ACPI_FAILURE
478 (acpi_processor_hotadd_init(pr->handle, &pr->id))) { 478 (acpi_processor_hotadd_init(pr->handle, &pr->id))) {
479 printk(KERN_ERR PREFIX
480 "Getting cpuindex for acpiid 0x%x\n",
481 pr->acpi_id);
482 return -ENODEV; 479 return -ENODEV;
483 } 480 }
484 } 481 }
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 0e60382714bb..5207f9e4b443 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -736,10 +736,6 @@ int acpi_processor_preregister_performance(
736 } 736 }
737 737
738err_ret: 738err_ret:
739 if (retval) {
740 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error while parsing _PSD domain information. Assuming no coordination\n"));
741 }
742
743 for_each_possible_cpu(i) { 739 for_each_possible_cpu(i) {
744 pr = processors[i]; 740 pr = processors[i];
745 if (!pr || !pr->performance) 741 if (!pr || !pr->performance)
diff --git a/drivers/bluetooth/hci_usb.c b/drivers/bluetooth/hci_usb.c
index aeefec97fdee..6bdf593081d8 100644
--- a/drivers/bluetooth/hci_usb.c
+++ b/drivers/bluetooth/hci_usb.c
@@ -117,10 +117,17 @@ static struct usb_device_id blacklist_ids[] = {
117 117
118 /* IBM/Lenovo ThinkPad with Broadcom chip */ 118 /* IBM/Lenovo ThinkPad with Broadcom chip */
119 { USB_DEVICE(0x0a5c, 0x201e), .driver_info = HCI_WRONG_SCO_MTU }, 119 { USB_DEVICE(0x0a5c, 0x201e), .driver_info = HCI_WRONG_SCO_MTU },
120 { USB_DEVICE(0x0a5c, 0x2110), .driver_info = HCI_WRONG_SCO_MTU },
120 121
121 /* ANYCOM Bluetooth USB-200 and USB-250 */ 122 /* ANYCOM Bluetooth USB-200 and USB-250 */
122 { USB_DEVICE(0x0a5c, 0x2111), .driver_info = HCI_RESET }, 123 { USB_DEVICE(0x0a5c, 0x2111), .driver_info = HCI_RESET },
123 124
125 /* HP laptop with Broadcom chip */
126 { USB_DEVICE(0x03f0, 0x171d), .driver_info = HCI_WRONG_SCO_MTU },
127
128 /* Dell laptop with Broadcom chip */
129 { USB_DEVICE(0x413c, 0x8126), .driver_info = HCI_WRONG_SCO_MTU },
130
124 /* Microsoft Wireless Transceiver for Bluetooth 2.0 */ 131 /* Microsoft Wireless Transceiver for Bluetooth 2.0 */
125 { USB_DEVICE(0x045e, 0x009c), .driver_info = HCI_RESET }, 132 { USB_DEVICE(0x045e, 0x009c), .driver_info = HCI_RESET },
126 133
diff --git a/drivers/char/drm/i915_irq.c b/drivers/char/drm/i915_irq.c
index e2c4b3a41b1e..78c1ae28f17c 100644
--- a/drivers/char/drm/i915_irq.c
+++ b/drivers/char/drm/i915_irq.c
@@ -500,7 +500,7 @@ int i915_vblank_swap(DRM_IOCTL_ARGS)
500 500
501 if (!drm_get_drawable_info(dev, swap.drawable)) { 501 if (!drm_get_drawable_info(dev, swap.drawable)) {
502 spin_unlock_irqrestore(&dev->drw_lock, irqflags); 502 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
503 DRM_ERROR("Invalid drawable ID %d\n", swap.drawable); 503 DRM_DEBUG("Invalid drawable ID %d\n", swap.drawable);
504 return DRM_ERR(EINVAL); 504 return DRM_ERR(EINVAL);
505 } 505 }
506 506
diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c
index 71e4e0f3fd54..556fd81fa815 100644
--- a/drivers/char/hw_random/amd-rng.c
+++ b/drivers/char/hw_random/amd-rng.c
@@ -144,7 +144,7 @@ static void __exit mod_exit(void)
144 hwrng_unregister(&amd_rng); 144 hwrng_unregister(&amd_rng);
145} 145}
146 146
147subsys_initcall(mod_init); 147module_init(mod_init);
148module_exit(mod_exit); 148module_exit(mod_exit);
149 149
150MODULE_AUTHOR("The Linux Kernel team"); 150MODULE_AUTHOR("The Linux Kernel team");
diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c
index d37ced0d132b..8e8658dcd2e3 100644
--- a/drivers/char/hw_random/geode-rng.c
+++ b/drivers/char/hw_random/geode-rng.c
@@ -125,7 +125,7 @@ static void __exit mod_exit(void)
125 iounmap(mem); 125 iounmap(mem);
126} 126}
127 127
128subsys_initcall(mod_init); 128module_init(mod_init);
129module_exit(mod_exit); 129module_exit(mod_exit);
130 130
131MODULE_DESCRIPTION("H/W RNG driver for AMD Geode LX CPUs"); 131MODULE_DESCRIPTION("H/W RNG driver for AMD Geode LX CPUs");
diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
index 8efbc9c0e545..f22e78e3c70f 100644
--- a/drivers/char/hw_random/intel-rng.c
+++ b/drivers/char/hw_random/intel-rng.c
@@ -143,6 +143,11 @@ static const struct pci_device_id pci_tbl[] = {
143}; 143};
144MODULE_DEVICE_TABLE(pci, pci_tbl); 144MODULE_DEVICE_TABLE(pci, pci_tbl);
145 145
146static __initdata int no_fwh_detect;
147module_param(no_fwh_detect, int, 0);
148MODULE_PARM_DESC(no_fwh_detect, "Skip FWH detection:\n"
149 " positive value - skip if FWH space locked read-only\n"
150 " negative value - skip always");
146 151
147static inline u8 hwstatus_get(void __iomem *mem) 152static inline u8 hwstatus_get(void __iomem *mem)
148{ 153{
@@ -240,6 +245,11 @@ static int __init mod_init(void)
240 if (!dev) 245 if (!dev)
241 goto out; /* Device not found. */ 246 goto out; /* Device not found. */
242 247
248 if (no_fwh_detect < 0) {
249 pci_dev_put(dev);
250 goto fwh_done;
251 }
252
243 /* Check for Intel 82802 */ 253 /* Check for Intel 82802 */
244 if (dev->device < 0x2640) { 254 if (dev->device < 0x2640) {
245 fwh_dec_en1_off = FWH_DEC_EN1_REG_OLD; 255 fwh_dec_en1_off = FWH_DEC_EN1_REG_OLD;
@@ -252,6 +262,23 @@ static int __init mod_init(void)
252 pci_read_config_byte(dev, fwh_dec_en1_off, &fwh_dec_en1_val); 262 pci_read_config_byte(dev, fwh_dec_en1_off, &fwh_dec_en1_val);
253 pci_read_config_byte(dev, bios_cntl_off, &bios_cntl_val); 263 pci_read_config_byte(dev, bios_cntl_off, &bios_cntl_val);
254 264
265 if ((bios_cntl_val &
266 (BIOS_CNTL_LOCK_ENABLE_MASK|BIOS_CNTL_WRITE_ENABLE_MASK))
267 == BIOS_CNTL_LOCK_ENABLE_MASK) {
268 static __initdata /*const*/ char warning[] =
269 KERN_WARNING PFX "Firmware space is locked read-only. If you can't or\n"
270 KERN_WARNING PFX "don't want to disable this in firmware setup, and if\n"
271 KERN_WARNING PFX "you are certain that your system has a functional\n"
272 KERN_WARNING PFX "RNG, try using the 'no_fwh_detect' option.\n";
273
274 pci_dev_put(dev);
275 if (no_fwh_detect)
276 goto fwh_done;
277 printk(warning);
278 err = -EBUSY;
279 goto out;
280 }
281
255 mem = ioremap_nocache(INTEL_FWH_ADDR, INTEL_FWH_ADDR_LEN); 282 mem = ioremap_nocache(INTEL_FWH_ADDR, INTEL_FWH_ADDR_LEN);
256 if (mem == NULL) { 283 if (mem == NULL) {
257 pci_dev_put(dev); 284 pci_dev_put(dev);
@@ -280,8 +307,7 @@ static int __init mod_init(void)
280 pci_write_config_byte(dev, 307 pci_write_config_byte(dev,
281 fwh_dec_en1_off, 308 fwh_dec_en1_off,
282 fwh_dec_en1_val | FWH_F8_EN_MASK); 309 fwh_dec_en1_val | FWH_F8_EN_MASK);
283 if (!(bios_cntl_val & 310 if (!(bios_cntl_val & BIOS_CNTL_WRITE_ENABLE_MASK))
284 (BIOS_CNTL_LOCK_ENABLE_MASK|BIOS_CNTL_WRITE_ENABLE_MASK)))
285 pci_write_config_byte(dev, 311 pci_write_config_byte(dev,
286 bios_cntl_off, 312 bios_cntl_off,
287 bios_cntl_val | BIOS_CNTL_WRITE_ENABLE_MASK); 313 bios_cntl_val | BIOS_CNTL_WRITE_ENABLE_MASK);
@@ -315,6 +341,8 @@ static int __init mod_init(void)
315 goto out; 341 goto out;
316 } 342 }
317 343
344fwh_done:
345
318 err = -ENOMEM; 346 err = -ENOMEM;
319 mem = ioremap(INTEL_RNG_ADDR, INTEL_RNG_ADDR_LEN); 347 mem = ioremap(INTEL_RNG_ADDR, INTEL_RNG_ADDR_LEN);
320 if (!mem) 348 if (!mem)
@@ -350,7 +378,7 @@ static void __exit mod_exit(void)
350 iounmap(mem); 378 iounmap(mem);
351} 379}
352 380
353subsys_initcall(mod_init); 381module_init(mod_init);
354module_exit(mod_exit); 382module_exit(mod_exit);
355 383
356MODULE_DESCRIPTION("H/W RNG driver for Intel chipsets"); 384MODULE_DESCRIPTION("H/W RNG driver for Intel chipsets");
diff --git a/drivers/char/hw_random/ixp4xx-rng.c b/drivers/char/hw_random/ixp4xx-rng.c
index c9caff57db85..bab43ca32ac1 100644
--- a/drivers/char/hw_random/ixp4xx-rng.c
+++ b/drivers/char/hw_random/ixp4xx-rng.c
@@ -64,7 +64,7 @@ static void __exit ixp4xx_rng_exit(void)
64 iounmap(rng_base); 64 iounmap(rng_base);
65} 65}
66 66
67subsys_initcall(ixp4xx_rng_init); 67module_init(ixp4xx_rng_init);
68module_exit(ixp4xx_rng_exit); 68module_exit(ixp4xx_rng_exit);
69 69
70MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net>"); 70MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net>");
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
index 0e786b617bb8..9ebf84d18655 100644
--- a/drivers/char/hw_random/via-rng.c
+++ b/drivers/char/hw_random/via-rng.c
@@ -176,7 +176,7 @@ static void __exit mod_exit(void)
176 hwrng_unregister(&via_rng); 176 hwrng_unregister(&via_rng);
177} 177}
178 178
179subsys_initcall(mod_init); 179module_init(mod_init);
180module_exit(mod_exit); 180module_exit(mod_exit);
181 181
182MODULE_DESCRIPTION("H/W RNG driver for VIA chipsets"); 182MODULE_DESCRIPTION("H/W RNG driver for VIA chipsets");
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index 2b5d7ab3adf7..4325aac7733d 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -2020,6 +2020,8 @@ static int sbp2scsi_slave_configure(struct scsi_device *sdev)
2020 blk_queue_dma_alignment(sdev->request_queue, (512 - 1)); 2020 blk_queue_dma_alignment(sdev->request_queue, (512 - 1));
2021 sdev->use_10_for_rw = 1; 2021 sdev->use_10_for_rw = 1;
2022 2022
2023 if (sdev->type == TYPE_ROM)
2024 sdev->use_10_for_ms = 1;
2023 if (sdev->type == TYPE_DISK && 2025 if (sdev->type == TYPE_DISK &&
2024 lu->workarounds & SBP2_WORKAROUND_MODE_SENSE_8) 2026 lu->workarounds & SBP2_WORKAROUND_MODE_SENSE_8)
2025 sdev->skip_ms_page_8 = 1; 2027 sdev->skip_ms_page_8 = 1;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 533193d4e5df..9e0ab048c878 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1088,10 +1088,21 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
1088 *sin = iw_event->local_addr; 1088 *sin = iw_event->local_addr;
1089 sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr; 1089 sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr;
1090 *sin = iw_event->remote_addr; 1090 *sin = iw_event->remote_addr;
1091 if (iw_event->status) 1091 switch (iw_event->status) {
1092 event.event = RDMA_CM_EVENT_REJECTED; 1092 case 0:
1093 else
1094 event.event = RDMA_CM_EVENT_ESTABLISHED; 1093 event.event = RDMA_CM_EVENT_ESTABLISHED;
1094 break;
1095 case -ECONNRESET:
1096 case -ECONNREFUSED:
1097 event.event = RDMA_CM_EVENT_REJECTED;
1098 break;
1099 case -ETIMEDOUT:
1100 event.event = RDMA_CM_EVENT_UNREACHABLE;
1101 break;
1102 default:
1103 event.event = RDMA_CM_EVENT_CONNECT_ERROR;
1104 break;
1105 }
1095 break; 1106 break;
1096 case IW_CM_EVENT_ESTABLISHED: 1107 case IW_CM_EVENT_ESTABLISHED:
1097 event.event = RDMA_CM_EVENT_ESTABLISHED; 1108 event.event = RDMA_CM_EVENT_ESTABLISHED;
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 81a5cdc5733a..e2e8d329b443 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -209,10 +209,21 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id,
209 if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) { 209 if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
210 if (!ctx->backlog) { 210 if (!ctx->backlog) {
211 ret = -EDQUOT; 211 ret = -EDQUOT;
212 kfree(uevent);
212 goto out; 213 goto out;
213 } 214 }
214 ctx->backlog--; 215 ctx->backlog--;
216 } else if (!ctx->uid) {
217 /*
218 * We ignore events for new connections until userspace has set
219 * their context. This can only happen if an error occurs on a
220 * new connection before the user accepts it. This is okay,
221 * since the accept will just fail later.
222 */
223 kfree(uevent);
224 goto out;
215 } 225 }
226
216 list_add_tail(&uevent->list, &ctx->file->event_list); 227 list_add_tail(&uevent->list, &ctx->file->event_list);
217 wake_up_interruptible(&ctx->file->poll_wait); 228 wake_up_interruptible(&ctx->file->poll_wait);
218out: 229out:
diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/infiniband/hw/ehca/ehca_hca.c
index e1b618c5f685..b7be950ab47c 100644
--- a/drivers/infiniband/hw/ehca/ehca_hca.c
+++ b/drivers/infiniband/hw/ehca/ehca_hca.c
@@ -50,7 +50,7 @@ int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
50 ib_device); 50 ib_device);
51 struct hipz_query_hca *rblock; 51 struct hipz_query_hca *rblock;
52 52
53 rblock = ehca_alloc_fw_ctrlblock(); 53 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
54 if (!rblock) { 54 if (!rblock) {
55 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 55 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
56 return -ENOMEM; 56 return -ENOMEM;
@@ -110,7 +110,7 @@ int ehca_query_port(struct ib_device *ibdev,
110 ib_device); 110 ib_device);
111 struct hipz_query_port *rblock; 111 struct hipz_query_port *rblock;
112 112
113 rblock = ehca_alloc_fw_ctrlblock(); 113 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
114 if (!rblock) { 114 if (!rblock) {
115 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 115 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
116 return -ENOMEM; 116 return -ENOMEM;
@@ -179,7 +179,7 @@ int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
179 return -EINVAL; 179 return -EINVAL;
180 } 180 }
181 181
182 rblock = ehca_alloc_fw_ctrlblock(); 182 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
183 if (!rblock) { 183 if (!rblock) {
184 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 184 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
185 return -ENOMEM; 185 return -ENOMEM;
@@ -212,7 +212,7 @@ int ehca_query_gid(struct ib_device *ibdev, u8 port,
212 return -EINVAL; 212 return -EINVAL;
213 } 213 }
214 214
215 rblock = ehca_alloc_fw_ctrlblock(); 215 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
216 if (!rblock) { 216 if (!rblock) {
217 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 217 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
218 return -ENOMEM; 218 return -ENOMEM;
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index c3ea746e9045..e7209afb4250 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -138,7 +138,7 @@ int ehca_error_data(struct ehca_shca *shca, void *data,
138 u64 *rblock; 138 u64 *rblock;
139 unsigned long block_count; 139 unsigned long block_count;
140 140
141 rblock = ehca_alloc_fw_ctrlblock(); 141 rblock = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
142 if (!rblock) { 142 if (!rblock) {
143 ehca_err(&shca->ib_device, "Cannot allocate rblock memory."); 143 ehca_err(&shca->ib_device, "Cannot allocate rblock memory.");
144 ret = -ENOMEM; 144 ret = -ENOMEM;
diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h
index 3720e3032cce..cd7789f0d08e 100644
--- a/drivers/infiniband/hw/ehca/ehca_iverbs.h
+++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h
@@ -180,10 +180,10 @@ int ehca_mmap_register(u64 physical,void **mapped,
180int ehca_munmap(unsigned long addr, size_t len); 180int ehca_munmap(unsigned long addr, size_t len);
181 181
182#ifdef CONFIG_PPC_64K_PAGES 182#ifdef CONFIG_PPC_64K_PAGES
183void *ehca_alloc_fw_ctrlblock(void); 183void *ehca_alloc_fw_ctrlblock(gfp_t flags);
184void ehca_free_fw_ctrlblock(void *ptr); 184void ehca_free_fw_ctrlblock(void *ptr);
185#else 185#else
186#define ehca_alloc_fw_ctrlblock() ((void *) get_zeroed_page(GFP_KERNEL)) 186#define ehca_alloc_fw_ctrlblock(flags) ((void *) get_zeroed_page(flags))
187#define ehca_free_fw_ctrlblock(ptr) free_page((unsigned long)(ptr)) 187#define ehca_free_fw_ctrlblock(ptr) free_page((unsigned long)(ptr))
188#endif 188#endif
189 189
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index cc47e4c13a18..6574fbbaead5 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -106,9 +106,9 @@ static struct timer_list poll_eqs_timer;
106#ifdef CONFIG_PPC_64K_PAGES 106#ifdef CONFIG_PPC_64K_PAGES
107static struct kmem_cache *ctblk_cache = NULL; 107static struct kmem_cache *ctblk_cache = NULL;
108 108
109void *ehca_alloc_fw_ctrlblock(void) 109void *ehca_alloc_fw_ctrlblock(gfp_t flags)
110{ 110{
111 void *ret = kmem_cache_zalloc(ctblk_cache, GFP_KERNEL); 111 void *ret = kmem_cache_zalloc(ctblk_cache, flags);
112 if (!ret) 112 if (!ret)
113 ehca_gen_err("Out of memory for ctblk"); 113 ehca_gen_err("Out of memory for ctblk");
114 return ret; 114 return ret;
@@ -206,7 +206,7 @@ int ehca_sense_attributes(struct ehca_shca *shca)
206 u64 h_ret; 206 u64 h_ret;
207 struct hipz_query_hca *rblock; 207 struct hipz_query_hca *rblock;
208 208
209 rblock = ehca_alloc_fw_ctrlblock(); 209 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
210 if (!rblock) { 210 if (!rblock) {
211 ehca_gen_err("Cannot allocate rblock memory."); 211 ehca_gen_err("Cannot allocate rblock memory.");
212 return -ENOMEM; 212 return -ENOMEM;
@@ -258,7 +258,7 @@ static int init_node_guid(struct ehca_shca *shca)
258 int ret = 0; 258 int ret = 0;
259 struct hipz_query_hca *rblock; 259 struct hipz_query_hca *rblock;
260 260
261 rblock = ehca_alloc_fw_ctrlblock(); 261 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
262 if (!rblock) { 262 if (!rblock) {
263 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 263 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
264 return -ENOMEM; 264 return -ENOMEM;
@@ -469,7 +469,7 @@ static ssize_t ehca_show_##name(struct device *dev, \
469 \ 469 \
470 shca = dev->driver_data; \ 470 shca = dev->driver_data; \
471 \ 471 \
472 rblock = ehca_alloc_fw_ctrlblock(); \ 472 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL); \
473 if (!rblock) { \ 473 if (!rblock) { \
474 dev_err(dev, "Can't allocate rblock memory."); \ 474 dev_err(dev, "Can't allocate rblock memory."); \
475 return 0; \ 475 return 0; \
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
index 0a5e2214cc5f..cfb362a1029c 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.c
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c
@@ -1013,7 +1013,7 @@ int ehca_reg_mr_rpages(struct ehca_shca *shca,
1013 u32 i; 1013 u32 i;
1014 u64 *kpage; 1014 u64 *kpage;
1015 1015
1016 kpage = ehca_alloc_fw_ctrlblock(); 1016 kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
1017 if (!kpage) { 1017 if (!kpage) {
1018 ehca_err(&shca->ib_device, "kpage alloc failed"); 1018 ehca_err(&shca->ib_device, "kpage alloc failed");
1019 ret = -ENOMEM; 1019 ret = -ENOMEM;
@@ -1124,7 +1124,7 @@ inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
1124 ehca_mrmw_map_acl(acl, &hipz_acl); 1124 ehca_mrmw_map_acl(acl, &hipz_acl);
1125 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl); 1125 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
1126 1126
1127 kpage = ehca_alloc_fw_ctrlblock(); 1127 kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
1128 if (!kpage) { 1128 if (!kpage) {
1129 ehca_err(&shca->ib_device, "kpage alloc failed"); 1129 ehca_err(&shca->ib_device, "kpage alloc failed");
1130 ret = -ENOMEM; 1130 ret = -ENOMEM;
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index c6c9cef203e3..34b85556d01e 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -807,7 +807,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
807 unsigned long spl_flags = 0; 807 unsigned long spl_flags = 0;
808 808
809 /* do query_qp to obtain current attr values */ 809 /* do query_qp to obtain current attr values */
810 mqpcb = ehca_alloc_fw_ctrlblock(); 810 mqpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
811 if (!mqpcb) { 811 if (!mqpcb) {
812 ehca_err(ibqp->device, "Could not get zeroed page for mqpcb " 812 ehca_err(ibqp->device, "Could not get zeroed page for mqpcb "
813 "ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num); 813 "ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num);
@@ -1273,7 +1273,7 @@ int ehca_query_qp(struct ib_qp *qp,
1273 return -EINVAL; 1273 return -EINVAL;
1274 } 1274 }
1275 1275
1276 qpcb = ehca_alloc_fw_ctrlblock(); 1276 qpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
1277 if (!qpcb) { 1277 if (!qpcb) {
1278 ehca_err(qp->device,"Out of memory for qpcb " 1278 ehca_err(qp->device,"Out of memory for qpcb "
1279 "ehca_qp=%p qp_num=%x", my_qp, qp->qp_num); 1279 "ehca_qp=%p qp_num=%x", my_qp, qp->qp_num);
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index 283d50b76c3d..1159c8a0f2c5 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -54,6 +54,10 @@ enum {
54 MTHCA_CQ_ENTRY_SIZE = 0x20 54 MTHCA_CQ_ENTRY_SIZE = 0x20
55}; 55};
56 56
57enum {
58 MTHCA_ATOMIC_BYTE_LEN = 8
59};
60
57/* 61/*
58 * Must be packed because start is 64 bits but only aligned to 32 bits. 62 * Must be packed because start is 64 bits but only aligned to 32 bits.
59 */ 63 */
@@ -599,11 +603,11 @@ static inline int mthca_poll_one(struct mthca_dev *dev,
599 break; 603 break;
600 case MTHCA_OPCODE_ATOMIC_CS: 604 case MTHCA_OPCODE_ATOMIC_CS:
601 entry->opcode = IB_WC_COMP_SWAP; 605 entry->opcode = IB_WC_COMP_SWAP;
602 entry->byte_len = be32_to_cpu(cqe->byte_cnt); 606 entry->byte_len = MTHCA_ATOMIC_BYTE_LEN;
603 break; 607 break;
604 case MTHCA_OPCODE_ATOMIC_FA: 608 case MTHCA_OPCODE_ATOMIC_FA:
605 entry->opcode = IB_WC_FETCH_ADD; 609 entry->opcode = IB_WC_FETCH_ADD;
606 entry->byte_len = be32_to_cpu(cqe->byte_cnt); 610 entry->byte_len = MTHCA_ATOMIC_BYTE_LEN;
607 break; 611 break;
608 case MTHCA_OPCODE_BIND_MW: 612 case MTHCA_OPCODE_BIND_MW:
609 entry->opcode = IB_WC_BIND_MW; 613 entry->opcode = IB_WC_BIND_MW;
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index 15cc2f6eb475..6b19645d946c 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -232,7 +232,7 @@ void *mthca_table_find(struct mthca_icm_table *table, int obj)
232 232
233 list_for_each_entry(chunk, &icm->chunk_list, list) { 233 list_for_each_entry(chunk, &icm->chunk_list, list) {
234 for (i = 0; i < chunk->npages; ++i) { 234 for (i = 0; i < chunk->npages; ++i) {
235 if (chunk->mem[i].length >= offset) { 235 if (chunk->mem[i].length > offset) {
236 page = chunk->mem[i].page; 236 page = chunk->mem[i].page;
237 goto out; 237 goto out;
238 } 238 }
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index d844a2569b47..5f5214c0337d 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -429,13 +429,18 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m
429{ 429{
430 struct mthca_dev *dev = to_mdev(ibqp->device); 430 struct mthca_dev *dev = to_mdev(ibqp->device);
431 struct mthca_qp *qp = to_mqp(ibqp); 431 struct mthca_qp *qp = to_mqp(ibqp);
432 int err; 432 int err = 0;
433 struct mthca_mailbox *mailbox; 433 struct mthca_mailbox *mailbox = NULL;
434 struct mthca_qp_param *qp_param; 434 struct mthca_qp_param *qp_param;
435 struct mthca_qp_context *context; 435 struct mthca_qp_context *context;
436 int mthca_state; 436 int mthca_state;
437 u8 status; 437 u8 status;
438 438
439 if (qp->state == IB_QPS_RESET) {
440 qp_attr->qp_state = IB_QPS_RESET;
441 goto done;
442 }
443
439 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 444 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
440 if (IS_ERR(mailbox)) 445 if (IS_ERR(mailbox))
441 return PTR_ERR(mailbox); 446 return PTR_ERR(mailbox);
@@ -454,7 +459,6 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m
454 mthca_state = be32_to_cpu(context->flags) >> 28; 459 mthca_state = be32_to_cpu(context->flags) >> 28;
455 460
456 qp_attr->qp_state = to_ib_qp_state(mthca_state); 461 qp_attr->qp_state = to_ib_qp_state(mthca_state);
457 qp_attr->cur_qp_state = qp_attr->qp_state;
458 qp_attr->path_mtu = context->mtu_msgmax >> 5; 462 qp_attr->path_mtu = context->mtu_msgmax >> 5;
459 qp_attr->path_mig_state = 463 qp_attr->path_mig_state =
460 to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3); 464 to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
@@ -464,11 +468,6 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m
464 qp_attr->dest_qp_num = be32_to_cpu(context->remote_qpn) & 0xffffff; 468 qp_attr->dest_qp_num = be32_to_cpu(context->remote_qpn) & 0xffffff;
465 qp_attr->qp_access_flags = 469 qp_attr->qp_access_flags =
466 to_ib_qp_access_flags(be32_to_cpu(context->params2)); 470 to_ib_qp_access_flags(be32_to_cpu(context->params2));
467 qp_attr->cap.max_send_wr = qp->sq.max;
468 qp_attr->cap.max_recv_wr = qp->rq.max;
469 qp_attr->cap.max_send_sge = qp->sq.max_gs;
470 qp_attr->cap.max_recv_sge = qp->rq.max_gs;
471 qp_attr->cap.max_inline_data = qp->max_inline_data;
472 471
473 if (qp->transport == RC || qp->transport == UC) { 472 if (qp->transport == RC || qp->transport == UC) {
474 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path); 473 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
@@ -495,7 +494,16 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m
495 qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7; 494 qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7;
496 qp_attr->rnr_retry = context->pri_path.rnr_retry >> 5; 495 qp_attr->rnr_retry = context->pri_path.rnr_retry >> 5;
497 qp_attr->alt_timeout = context->alt_path.ackto >> 3; 496 qp_attr->alt_timeout = context->alt_path.ackto >> 3;
498 qp_init_attr->cap = qp_attr->cap; 497
498done:
499 qp_attr->cur_qp_state = qp_attr->qp_state;
500 qp_attr->cap.max_send_wr = qp->sq.max;
501 qp_attr->cap.max_recv_wr = qp->rq.max;
502 qp_attr->cap.max_send_sge = qp->sq.max_gs;
503 qp_attr->cap.max_recv_sge = qp->rq.max_gs;
504 qp_attr->cap.max_inline_data = qp->max_inline_data;
505
506 qp_init_attr->cap = qp_attr->cap;
499 507
500out: 508out:
501 mthca_free_mailbox(dev, mailbox); 509 mthca_free_mailbox(dev, mailbox);
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 9b2041e25d59..dd221eda3ea6 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -177,7 +177,7 @@ iscsi_iser_mtask_xmit(struct iscsi_conn *conn,
177 * - if yes, the mtask is recycled at iscsi_complete_pdu 177 * - if yes, the mtask is recycled at iscsi_complete_pdu
178 * - if no, the mtask is recycled at iser_snd_completion 178 * - if no, the mtask is recycled at iser_snd_completion
179 */ 179 */
180 if (error && error != -EAGAIN) 180 if (error && error != -ENOBUFS)
181 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 181 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
182 182
183 return error; 183 return error;
@@ -241,7 +241,7 @@ iscsi_iser_ctask_xmit(struct iscsi_conn *conn,
241 error = iscsi_iser_ctask_xmit_unsol_data(conn, ctask); 241 error = iscsi_iser_ctask_xmit_unsol_data(conn, ctask);
242 242
243 iscsi_iser_ctask_xmit_exit: 243 iscsi_iser_ctask_xmit_exit:
244 if (error && error != -EAGAIN) 244 if (error && error != -ENOBUFS)
245 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 245 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
246 return error; 246 return error;
247} 247}
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index e73c87b9be43..0a7d1ab60e6d 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -304,18 +304,14 @@ int iser_conn_set_full_featured_mode(struct iscsi_conn *conn)
304static int 304static int
305iser_check_xmit(struct iscsi_conn *conn, void *task) 305iser_check_xmit(struct iscsi_conn *conn, void *task)
306{ 306{
307 int rc = 0;
308 struct iscsi_iser_conn *iser_conn = conn->dd_data; 307 struct iscsi_iser_conn *iser_conn = conn->dd_data;
309 308
310 write_lock_bh(conn->recv_lock);
311 if (atomic_read(&iser_conn->ib_conn->post_send_buf_count) == 309 if (atomic_read(&iser_conn->ib_conn->post_send_buf_count) ==
312 ISER_QP_MAX_REQ_DTOS) { 310 ISER_QP_MAX_REQ_DTOS) {
313 iser_dbg("%ld can't xmit task %p, suspending tx\n",jiffies,task); 311 iser_dbg("%ld can't xmit task %p\n",jiffies,task);
314 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); 312 return -ENOBUFS;
315 rc = -EAGAIN;
316 } 313 }
317 write_unlock_bh(conn->recv_lock); 314 return 0;
318 return rc;
319} 315}
320 316
321 317
@@ -340,7 +336,7 @@ int iser_send_command(struct iscsi_conn *conn,
340 return -EPERM; 336 return -EPERM;
341 } 337 }
342 if (iser_check_xmit(conn, ctask)) 338 if (iser_check_xmit(conn, ctask))
343 return -EAGAIN; 339 return -ENOBUFS;
344 340
345 edtl = ntohl(hdr->data_length); 341 edtl = ntohl(hdr->data_length);
346 342
@@ -426,7 +422,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
426 } 422 }
427 423
428 if (iser_check_xmit(conn, ctask)) 424 if (iser_check_xmit(conn, ctask))
429 return -EAGAIN; 425 return -ENOBUFS;
430 426
431 itt = ntohl(hdr->itt); 427 itt = ntohl(hdr->itt);
432 data_seg_len = ntoh24(hdr->dlength); 428 data_seg_len = ntoh24(hdr->dlength);
@@ -498,7 +494,7 @@ int iser_send_control(struct iscsi_conn *conn,
498 } 494 }
499 495
500 if (iser_check_xmit(conn,mtask)) 496 if (iser_check_xmit(conn,mtask))
501 return -EAGAIN; 497 return -ENOBUFS;
502 498
503 /* build the tx desc regd header and add it to the tx desc dto */ 499 /* build the tx desc regd header and add it to the tx desc dto */
504 mdesc->type = ISCSI_TX_CONTROL; 500 mdesc->type = ISCSI_TX_CONTROL;
@@ -605,6 +601,7 @@ void iser_snd_completion(struct iser_desc *tx_desc)
605 struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn; 601 struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn;
606 struct iscsi_conn *conn = iser_conn->iscsi_conn; 602 struct iscsi_conn *conn = iser_conn->iscsi_conn;
607 struct iscsi_mgmt_task *mtask; 603 struct iscsi_mgmt_task *mtask;
604 int resume_tx = 0;
608 605
609 iser_dbg("Initiator, Data sent dto=0x%p\n", dto); 606 iser_dbg("Initiator, Data sent dto=0x%p\n", dto);
610 607
@@ -613,15 +610,16 @@ void iser_snd_completion(struct iser_desc *tx_desc)
613 if (tx_desc->type == ISCSI_TX_DATAOUT) 610 if (tx_desc->type == ISCSI_TX_DATAOUT)
614 kmem_cache_free(ig.desc_cache, tx_desc); 611 kmem_cache_free(ig.desc_cache, tx_desc);
615 612
613 if (atomic_read(&iser_conn->ib_conn->post_send_buf_count) ==
614 ISER_QP_MAX_REQ_DTOS)
615 resume_tx = 1;
616
616 atomic_dec(&ib_conn->post_send_buf_count); 617 atomic_dec(&ib_conn->post_send_buf_count);
617 618
618 write_lock(conn->recv_lock); 619 if (resume_tx) {
619 if (conn->suspend_tx) {
620 iser_dbg("%ld resuming tx\n",jiffies); 620 iser_dbg("%ld resuming tx\n",jiffies);
621 clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
622 scsi_queue_work(conn->session->host, &conn->xmitwork); 621 scsi_queue_work(conn->session->host, &conn->xmitwork);
623 } 622 }
624 write_unlock(conn->recv_lock);
625 623
626 if (tx_desc->type == ISCSI_TX_CONTROL) { 624 if (tx_desc->type == ISCSI_TX_CONTROL) {
627 /* this arithmetic is legal by libiscsi dd_data allocation */ 625 /* this arithmetic is legal by libiscsi dd_data allocation */
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
index ccc06b1b91b5..714f6a7841cd 100644
--- a/drivers/kvm/svm.c
+++ b/drivers/kvm/svm.c
@@ -17,6 +17,7 @@
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/vmalloc.h> 18#include <linux/vmalloc.h>
19#include <linux/highmem.h> 19#include <linux/highmem.h>
20#include <linux/profile.h>
20#include <asm/desc.h> 21#include <asm/desc.h>
21 22
22#include "kvm_svm.h" 23#include "kvm_svm.h"
@@ -1558,6 +1559,13 @@ again:
1558 1559
1559 reload_tss(vcpu); 1560 reload_tss(vcpu);
1560 1561
1562 /*
1563 * Profile KVM exit RIPs:
1564 */
1565 if (unlikely(prof_on == KVM_PROFILING))
1566 profile_hit(KVM_PROFILING,
1567 (void *)(unsigned long)vcpu->svm->vmcb->save.rip);
1568
1561 stgi(); 1569 stgi();
1562 1570
1563 kvm_reput_irq(vcpu); 1571 kvm_reput_irq(vcpu);
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index d4701cb4c654..ce219e3f557f 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -21,6 +21,7 @@
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/mm.h> 22#include <linux/mm.h>
23#include <linux/highmem.h> 23#include <linux/highmem.h>
24#include <linux/profile.h>
24#include <asm/io.h> 25#include <asm/io.h>
25#include <asm/desc.h> 26#include <asm/desc.h>
26 27
@@ -1859,6 +1860,12 @@ again:
1859 asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); 1860 asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
1860#endif 1861#endif
1861 1862
1863 /*
1864 * Profile KVM exit RIPs:
1865 */
1866 if (unlikely(prof_on == KVM_PROFILING))
1867 profile_hit(KVM_PROFILING, (void *)vmcs_readl(GUEST_RIP));
1868
1862 kvm_run->exit_type = 0; 1869 kvm_run->exit_type = 0;
1863 if (fail) { 1870 if (fail) {
1864 kvm_run->exit_type = KVM_EXIT_TYPE_FAIL_ENTRY; 1871 kvm_run->exit_type = KVM_EXIT_TYPE_FAIL_ENTRY;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index b30f74be3982..164b25dca101 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -775,6 +775,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
775 struct bio_list bl; 775 struct bio_list bl;
776 struct page **behind_pages = NULL; 776 struct page **behind_pages = NULL;
777 const int rw = bio_data_dir(bio); 777 const int rw = bio_data_dir(bio);
778 const int do_sync = bio_sync(bio);
778 int do_barriers; 779 int do_barriers;
779 780
780 /* 781 /*
@@ -835,7 +836,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
835 read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset; 836 read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset;
836 read_bio->bi_bdev = mirror->rdev->bdev; 837 read_bio->bi_bdev = mirror->rdev->bdev;
837 read_bio->bi_end_io = raid1_end_read_request; 838 read_bio->bi_end_io = raid1_end_read_request;
838 read_bio->bi_rw = READ; 839 read_bio->bi_rw = READ | do_sync;
839 read_bio->bi_private = r1_bio; 840 read_bio->bi_private = r1_bio;
840 841
841 generic_make_request(read_bio); 842 generic_make_request(read_bio);
@@ -906,7 +907,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
906 mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset; 907 mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset;
907 mbio->bi_bdev = conf->mirrors[i].rdev->bdev; 908 mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
908 mbio->bi_end_io = raid1_end_write_request; 909 mbio->bi_end_io = raid1_end_write_request;
909 mbio->bi_rw = WRITE | do_barriers; 910 mbio->bi_rw = WRITE | do_barriers | do_sync;
910 mbio->bi_private = r1_bio; 911 mbio->bi_private = r1_bio;
911 912
912 if (behind_pages) { 913 if (behind_pages) {
@@ -941,6 +942,8 @@ static int make_request(request_queue_t *q, struct bio * bio)
941 blk_plug_device(mddev->queue); 942 blk_plug_device(mddev->queue);
942 spin_unlock_irqrestore(&conf->device_lock, flags); 943 spin_unlock_irqrestore(&conf->device_lock, flags);
943 944
945 if (do_sync)
946 md_wakeup_thread(mddev->thread);
944#if 0 947#if 0
945 while ((bio = bio_list_pop(&bl)) != NULL) 948 while ((bio = bio_list_pop(&bl)) != NULL)
946 generic_make_request(bio); 949 generic_make_request(bio);
@@ -1541,6 +1544,7 @@ static void raid1d(mddev_t *mddev)
1541 * We already have a nr_pending reference on these rdevs. 1544 * We already have a nr_pending reference on these rdevs.
1542 */ 1545 */
1543 int i; 1546 int i;
1547 const int do_sync = bio_sync(r1_bio->master_bio);
1544 clear_bit(R1BIO_BarrierRetry, &r1_bio->state); 1548 clear_bit(R1BIO_BarrierRetry, &r1_bio->state);
1545 clear_bit(R1BIO_Barrier, &r1_bio->state); 1549 clear_bit(R1BIO_Barrier, &r1_bio->state);
1546 for (i=0; i < conf->raid_disks; i++) 1550 for (i=0; i < conf->raid_disks; i++)
@@ -1561,7 +1565,7 @@ static void raid1d(mddev_t *mddev)
1561 conf->mirrors[i].rdev->data_offset; 1565 conf->mirrors[i].rdev->data_offset;
1562 bio->bi_bdev = conf->mirrors[i].rdev->bdev; 1566 bio->bi_bdev = conf->mirrors[i].rdev->bdev;
1563 bio->bi_end_io = raid1_end_write_request; 1567 bio->bi_end_io = raid1_end_write_request;
1564 bio->bi_rw = WRITE; 1568 bio->bi_rw = WRITE | do_sync;
1565 bio->bi_private = r1_bio; 1569 bio->bi_private = r1_bio;
1566 r1_bio->bios[i] = bio; 1570 r1_bio->bios[i] = bio;
1567 generic_make_request(bio); 1571 generic_make_request(bio);
@@ -1593,6 +1597,7 @@ static void raid1d(mddev_t *mddev)
1593 (unsigned long long)r1_bio->sector); 1597 (unsigned long long)r1_bio->sector);
1594 raid_end_bio_io(r1_bio); 1598 raid_end_bio_io(r1_bio);
1595 } else { 1599 } else {
1600 const int do_sync = bio_sync(r1_bio->master_bio);
1596 r1_bio->bios[r1_bio->read_disk] = 1601 r1_bio->bios[r1_bio->read_disk] =
1597 mddev->ro ? IO_BLOCKED : NULL; 1602 mddev->ro ? IO_BLOCKED : NULL;
1598 r1_bio->read_disk = disk; 1603 r1_bio->read_disk = disk;
@@ -1608,7 +1613,7 @@ static void raid1d(mddev_t *mddev)
1608 bio->bi_sector = r1_bio->sector + rdev->data_offset; 1613 bio->bi_sector = r1_bio->sector + rdev->data_offset;
1609 bio->bi_bdev = rdev->bdev; 1614 bio->bi_bdev = rdev->bdev;
1610 bio->bi_end_io = raid1_end_read_request; 1615 bio->bi_end_io = raid1_end_read_request;
1611 bio->bi_rw = READ; 1616 bio->bi_rw = READ | do_sync;
1612 bio->bi_private = r1_bio; 1617 bio->bi_private = r1_bio;
1613 unplug = 1; 1618 unplug = 1;
1614 generic_make_request(bio); 1619 generic_make_request(bio);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index f0141910bb8d..a9401c017e35 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -782,6 +782,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
782 int i; 782 int i;
783 int chunk_sects = conf->chunk_mask + 1; 783 int chunk_sects = conf->chunk_mask + 1;
784 const int rw = bio_data_dir(bio); 784 const int rw = bio_data_dir(bio);
785 const int do_sync = bio_sync(bio);
785 struct bio_list bl; 786 struct bio_list bl;
786 unsigned long flags; 787 unsigned long flags;
787 788
@@ -863,7 +864,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
863 mirror->rdev->data_offset; 864 mirror->rdev->data_offset;
864 read_bio->bi_bdev = mirror->rdev->bdev; 865 read_bio->bi_bdev = mirror->rdev->bdev;
865 read_bio->bi_end_io = raid10_end_read_request; 866 read_bio->bi_end_io = raid10_end_read_request;
866 read_bio->bi_rw = READ; 867 read_bio->bi_rw = READ | do_sync;
867 read_bio->bi_private = r10_bio; 868 read_bio->bi_private = r10_bio;
868 869
869 generic_make_request(read_bio); 870 generic_make_request(read_bio);
@@ -909,7 +910,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
909 conf->mirrors[d].rdev->data_offset; 910 conf->mirrors[d].rdev->data_offset;
910 mbio->bi_bdev = conf->mirrors[d].rdev->bdev; 911 mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
911 mbio->bi_end_io = raid10_end_write_request; 912 mbio->bi_end_io = raid10_end_write_request;
912 mbio->bi_rw = WRITE; 913 mbio->bi_rw = WRITE | do_sync;
913 mbio->bi_private = r10_bio; 914 mbio->bi_private = r10_bio;
914 915
915 atomic_inc(&r10_bio->remaining); 916 atomic_inc(&r10_bio->remaining);
@@ -922,6 +923,9 @@ static int make_request(request_queue_t *q, struct bio * bio)
922 blk_plug_device(mddev->queue); 923 blk_plug_device(mddev->queue);
923 spin_unlock_irqrestore(&conf->device_lock, flags); 924 spin_unlock_irqrestore(&conf->device_lock, flags);
924 925
926 if (do_sync)
927 md_wakeup_thread(mddev->thread);
928
925 return 0; 929 return 0;
926} 930}
927 931
@@ -1563,6 +1567,7 @@ static void raid10d(mddev_t *mddev)
1563 (unsigned long long)r10_bio->sector); 1567 (unsigned long long)r10_bio->sector);
1564 raid_end_bio_io(r10_bio); 1568 raid_end_bio_io(r10_bio);
1565 } else { 1569 } else {
1570 const int do_sync = bio_sync(r10_bio->master_bio);
1566 rdev = conf->mirrors[mirror].rdev; 1571 rdev = conf->mirrors[mirror].rdev;
1567 if (printk_ratelimit()) 1572 if (printk_ratelimit())
1568 printk(KERN_ERR "raid10: %s: redirecting sector %llu to" 1573 printk(KERN_ERR "raid10: %s: redirecting sector %llu to"
@@ -1574,7 +1579,7 @@ static void raid10d(mddev_t *mddev)
1574 bio->bi_sector = r10_bio->devs[r10_bio->read_slot].addr 1579 bio->bi_sector = r10_bio->devs[r10_bio->read_slot].addr
1575 + rdev->data_offset; 1580 + rdev->data_offset;
1576 bio->bi_bdev = rdev->bdev; 1581 bio->bi_bdev = rdev->bdev;
1577 bio->bi_rw = READ; 1582 bio->bi_rw = READ | do_sync;
1578 bio->bi_private = r10_bio; 1583 bio->bi_private = r10_bio;
1579 bio->bi_end_io = raid10_end_read_request; 1584 bio->bi_end_io = raid10_end_read_request;
1580 unplug = 1; 1585 unplug = 1;
diff --git a/drivers/mmc/at91_mci.c b/drivers/mmc/at91_mci.c
index 08a33c33f6ed..aa152f31851e 100644
--- a/drivers/mmc/at91_mci.c
+++ b/drivers/mmc/at91_mci.c
@@ -768,7 +768,7 @@ static irqreturn_t at91_mmc_det_irq(int irq, void *_host)
768 return IRQ_HANDLED; 768 return IRQ_HANDLED;
769} 769}
770 770
771int at91_mci_get_ro(struct mmc_host *mmc) 771static int at91_mci_get_ro(struct mmc_host *mmc)
772{ 772{
773 int read_only = 0; 773 int read_only = 0;
774 struct at91mci_host *host = mmc_priv(mmc); 774 struct at91mci_host *host = mmc_priv(mmc);
@@ -794,7 +794,7 @@ static const struct mmc_host_ops at91_mci_ops = {
794/* 794/*
795 * Probe for the device 795 * Probe for the device
796 */ 796 */
797static int at91_mci_probe(struct platform_device *pdev) 797static int __init at91_mci_probe(struct platform_device *pdev)
798{ 798{
799 struct mmc_host *mmc; 799 struct mmc_host *mmc;
800 struct at91mci_host *host; 800 struct at91mci_host *host;
@@ -910,7 +910,7 @@ static int at91_mci_probe(struct platform_device *pdev)
910/* 910/*
911 * Remove a device 911 * Remove a device
912 */ 912 */
913static int at91_mci_remove(struct platform_device *pdev) 913static int __exit at91_mci_remove(struct platform_device *pdev)
914{ 914{
915 struct mmc_host *mmc = platform_get_drvdata(pdev); 915 struct mmc_host *mmc = platform_get_drvdata(pdev);
916 struct at91mci_host *host; 916 struct at91mci_host *host;
@@ -972,8 +972,7 @@ static int at91_mci_resume(struct platform_device *pdev)
972#endif 972#endif
973 973
974static struct platform_driver at91_mci_driver = { 974static struct platform_driver at91_mci_driver = {
975 .probe = at91_mci_probe, 975 .remove = __exit_p(at91_mci_remove),
976 .remove = at91_mci_remove,
977 .suspend = at91_mci_suspend, 976 .suspend = at91_mci_suspend,
978 .resume = at91_mci_resume, 977 .resume = at91_mci_resume,
979 .driver = { 978 .driver = {
@@ -984,7 +983,7 @@ static struct platform_driver at91_mci_driver = {
984 983
985static int __init at91_mci_init(void) 984static int __init at91_mci_init(void)
986{ 985{
987 return platform_driver_register(&at91_mci_driver); 986 return platform_driver_probe(&at91_mci_driver, at91_mci_probe);
988} 987}
989 988
990static void __exit at91_mci_exit(void) 989static void __exit at91_mci_exit(void)
diff --git a/drivers/mmc/mmci.c b/drivers/mmc/mmci.c
index e9b80e920266..ccfe6561be24 100644
--- a/drivers/mmc/mmci.c
+++ b/drivers/mmc/mmci.c
@@ -42,6 +42,8 @@ mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
42{ 42{
43 writel(0, host->base + MMCICOMMAND); 43 writel(0, host->base + MMCICOMMAND);
44 44
45 BUG_ON(host->data);
46
45 host->mrq = NULL; 47 host->mrq = NULL;
46 host->cmd = NULL; 48 host->cmd = NULL;
47 49
@@ -198,6 +200,8 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
198 } 200 }
199 201
200 if (!cmd->data || cmd->error != MMC_ERR_NONE) { 202 if (!cmd->data || cmd->error != MMC_ERR_NONE) {
203 if (host->data)
204 mmci_stop_data(host);
201 mmci_request_end(host, cmd->mrq); 205 mmci_request_end(host, cmd->mrq);
202 } else if (!(cmd->data->flags & MMC_DATA_READ)) { 206 } else if (!(cmd->data->flags & MMC_DATA_READ)) {
203 mmci_start_data(host, cmd->data); 207 mmci_start_data(host, cmd->data);
diff --git a/drivers/mmc/omap.c b/drivers/mmc/omap.c
index 435d331e772a..9488408308fb 100644
--- a/drivers/mmc/omap.c
+++ b/drivers/mmc/omap.c
@@ -581,9 +581,9 @@ static void mmc_omap_switch_timer(unsigned long arg)
581 schedule_work(&host->switch_work); 581 schedule_work(&host->switch_work);
582} 582}
583 583
584static void mmc_omap_switch_handler(void *data) 584static void mmc_omap_switch_handler(struct work_struct *work)
585{ 585{
586 struct mmc_omap_host *host = (struct mmc_omap_host *) data; 586 struct mmc_omap_host *host = container_of(work, struct mmc_omap_host, switch_work);
587 struct mmc_card *card; 587 struct mmc_card *card;
588 static int complained = 0; 588 static int complained = 0;
589 int cards = 0, cover_open; 589 int cards = 0, cover_open;
@@ -1116,7 +1116,7 @@ static int __init mmc_omap_probe(struct platform_device *pdev)
1116 platform_set_drvdata(pdev, host); 1116 platform_set_drvdata(pdev, host);
1117 1117
1118 if (host->switch_pin >= 0) { 1118 if (host->switch_pin >= 0) {
1119 INIT_WORK(&host->switch_work, mmc_omap_switch_handler, host); 1119 INIT_WORK(&host->switch_work, mmc_omap_switch_handler);
1120 init_timer(&host->switch_timer); 1120 init_timer(&host->switch_timer);
1121 host->switch_timer.function = mmc_omap_switch_timer; 1121 host->switch_timer.function = mmc_omap_switch_timer;
1122 host->switch_timer.data = (unsigned long) host; 1122 host->switch_timer.data = (unsigned long) host;
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index ada5e9b9988c..ca5acc4736df 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -57,8 +57,8 @@
57 57
58#define DRV_MODULE_NAME "bnx2" 58#define DRV_MODULE_NAME "bnx2"
59#define PFX DRV_MODULE_NAME ": " 59#define PFX DRV_MODULE_NAME ": "
60#define DRV_MODULE_VERSION "1.5.2" 60#define DRV_MODULE_VERSION "1.5.3"
61#define DRV_MODULE_RELDATE "December 13, 2006" 61#define DRV_MODULE_RELDATE "January 8, 2007"
62 62
63#define RUN_AT(x) (jiffies + (x)) 63#define RUN_AT(x) (jiffies + (x))
64 64
@@ -1345,8 +1345,6 @@ bnx2_init_copper_phy(struct bnx2 *bp)
1345{ 1345{
1346 u32 val; 1346 u32 val;
1347 1347
1348 bp->phy_flags |= PHY_CRC_FIX_FLAG;
1349
1350 if (bp->phy_flags & PHY_CRC_FIX_FLAG) { 1348 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1351 bnx2_write_phy(bp, 0x18, 0x0c00); 1349 bnx2_write_phy(bp, 0x18, 0x0c00);
1352 bnx2_write_phy(bp, 0x17, 0x000a); 1350 bnx2_write_phy(bp, 0x17, 0x000a);
@@ -3085,7 +3083,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3085 int buf_size) 3083 int buf_size)
3086{ 3084{
3087 u32 written, offset32, len32; 3085 u32 written, offset32, len32;
3088 u8 *buf, start[4], end[4], *flash_buffer = NULL; 3086 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3089 int rc = 0; 3087 int rc = 0;
3090 int align_start, align_end; 3088 int align_start, align_end;
3091 3089
@@ -3113,16 +3111,17 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3113 } 3111 }
3114 3112
3115 if (align_start || align_end) { 3113 if (align_start || align_end) {
3116 buf = kmalloc(len32, GFP_KERNEL); 3114 align_buf = kmalloc(len32, GFP_KERNEL);
3117 if (buf == NULL) 3115 if (align_buf == NULL)
3118 return -ENOMEM; 3116 return -ENOMEM;
3119 if (align_start) { 3117 if (align_start) {
3120 memcpy(buf, start, 4); 3118 memcpy(align_buf, start, 4);
3121 } 3119 }
3122 if (align_end) { 3120 if (align_end) {
3123 memcpy(buf + len32 - 4, end, 4); 3121 memcpy(align_buf + len32 - 4, end, 4);
3124 } 3122 }
3125 memcpy(buf + align_start, data_buf, buf_size); 3123 memcpy(align_buf + align_start, data_buf, buf_size);
3124 buf = align_buf;
3126 } 3125 }
3127 3126
3128 if (bp->flash_info->buffered == 0) { 3127 if (bp->flash_info->buffered == 0) {
@@ -3256,11 +3255,8 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3256 } 3255 }
3257 3256
3258nvram_write_end: 3257nvram_write_end:
3259 if (bp->flash_info->buffered == 0) 3258 kfree(flash_buffer);
3260 kfree(flash_buffer); 3259 kfree(align_buf);
3261
3262 if (align_start || align_end)
3263 kfree(buf);
3264 return rc; 3260 return rc;
3265} 3261}
3266 3262
@@ -5645,6 +5641,44 @@ poll_bnx2(struct net_device *dev)
5645} 5641}
5646#endif 5642#endif
5647 5643
5644static void __devinit
5645bnx2_get_5709_media(struct bnx2 *bp)
5646{
5647 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
5648 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
5649 u32 strap;
5650
5651 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
5652 return;
5653 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
5654 bp->phy_flags |= PHY_SERDES_FLAG;
5655 return;
5656 }
5657
5658 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
5659 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
5660 else
5661 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
5662
5663 if (PCI_FUNC(bp->pdev->devfn) == 0) {
5664 switch (strap) {
5665 case 0x4:
5666 case 0x5:
5667 case 0x6:
5668 bp->phy_flags |= PHY_SERDES_FLAG;
5669 return;
5670 }
5671 } else {
5672 switch (strap) {
5673 case 0x1:
5674 case 0x2:
5675 case 0x4:
5676 bp->phy_flags |= PHY_SERDES_FLAG;
5677 return;
5678 }
5679 }
5680}
5681
5648static int __devinit 5682static int __devinit
5649bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) 5683bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5650{ 5684{
@@ -5865,10 +5899,9 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5865 bp->phy_addr = 1; 5899 bp->phy_addr = 1;
5866 5900
5867 /* Disable WOL support if we are running on a SERDES chip. */ 5901 /* Disable WOL support if we are running on a SERDES chip. */
5868 if (CHIP_NUM(bp) == CHIP_NUM_5709) { 5902 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5869 if (CHIP_BOND_ID(bp) != BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) 5903 bnx2_get_5709_media(bp);
5870 bp->phy_flags |= PHY_SERDES_FLAG; 5904 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
5871 } else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
5872 bp->phy_flags |= PHY_SERDES_FLAG; 5905 bp->phy_flags |= PHY_SERDES_FLAG;
5873 5906
5874 if (bp->phy_flags & PHY_SERDES_FLAG) { 5907 if (bp->phy_flags & PHY_SERDES_FLAG) {
@@ -5880,7 +5913,9 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5880 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G) 5913 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5881 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG; 5914 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5882 } 5915 }
5883 } 5916 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
5917 CHIP_NUM(bp) == CHIP_NUM_5708)
5918 bp->phy_flags |= PHY_CRC_FIX_FLAG;
5884 5919
5885 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) || 5920 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
5886 (CHIP_ID(bp) == CHIP_ID_5708_B0) || 5921 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
diff --git a/drivers/net/chelsio/my3126.c b/drivers/net/chelsio/my3126.c
index c7731b6f9de3..82fed1dd5005 100644
--- a/drivers/net/chelsio/my3126.c
+++ b/drivers/net/chelsio/my3126.c
@@ -170,9 +170,10 @@ static struct cphy *my3126_phy_create(adapter_t *adapter,
170{ 170{
171 struct cphy *cphy = kzalloc(sizeof (*cphy), GFP_KERNEL); 171 struct cphy *cphy = kzalloc(sizeof (*cphy), GFP_KERNEL);
172 172
173 if (cphy) 173 if (!cphy)
174 cphy_init(cphy, adapter, phy_addr, &my3126_ops, mdio_ops); 174 return NULL;
175 175
176 cphy_init(cphy, adapter, phy_addr, &my3126_ops, mdio_ops);
176 INIT_DELAYED_WORK(&cphy->phy_update, my3216_poll); 177 INIT_DELAYED_WORK(&cphy->phy_update, my3216_poll);
177 cphy->bmsr = 0; 178 cphy->bmsr = 0;
178 179
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 4c1ff752048c..c6259c7127f6 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -995,12 +995,6 @@ e1000_probe(struct pci_dev *pdev,
995 (adapter->hw.mac_type != e1000_82547)) 995 (adapter->hw.mac_type != e1000_82547))
996 netdev->features |= NETIF_F_TSO; 996 netdev->features |= NETIF_F_TSO;
997 997
998#ifdef CONFIG_DEBUG_SLAB
999 /* 82544's work arounds do not play nicely with DEBUG SLAB */
1000 if (adapter->hw.mac_type == e1000_82544)
1001 netdev->features &= ~NETIF_F_TSO;
1002#endif
1003
1004#ifdef NETIF_F_TSO6 998#ifdef NETIF_F_TSO6
1005 if (adapter->hw.mac_type > e1000_82547_rev_2) 999 if (adapter->hw.mac_type > e1000_82547_rev_2)
1006 netdev->features |= NETIF_F_TSO6; 1000 netdev->features |= NETIF_F_TSO6;
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 2f48fe9a29a7..93f2b7a22160 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -234,6 +234,7 @@ enum {
234#define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000 234#define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000
235#define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000 235#define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000
236#define NVREG_XMITCTL_HOST_LOADED 0x00004000 236#define NVREG_XMITCTL_HOST_LOADED 0x00004000
237#define NVREG_XMITCTL_TX_PATH_EN 0x01000000
237 NvRegTransmitterStatus = 0x088, 238 NvRegTransmitterStatus = 0x088,
238#define NVREG_XMITSTAT_BUSY 0x01 239#define NVREG_XMITSTAT_BUSY 0x01
239 240
@@ -249,6 +250,7 @@ enum {
249#define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE 250#define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE
250 NvRegReceiverControl = 0x094, 251 NvRegReceiverControl = 0x094,
251#define NVREG_RCVCTL_START 0x01 252#define NVREG_RCVCTL_START 0x01
253#define NVREG_RCVCTL_RX_PATH_EN 0x01000000
252 NvRegReceiverStatus = 0x98, 254 NvRegReceiverStatus = 0x98,
253#define NVREG_RCVSTAT_BUSY 0x01 255#define NVREG_RCVSTAT_BUSY 0x01
254 256
@@ -1169,16 +1171,21 @@ static void nv_start_rx(struct net_device *dev)
1169{ 1171{
1170 struct fe_priv *np = netdev_priv(dev); 1172 struct fe_priv *np = netdev_priv(dev);
1171 u8 __iomem *base = get_hwbase(dev); 1173 u8 __iomem *base = get_hwbase(dev);
1174 u32 rx_ctrl = readl(base + NvRegReceiverControl);
1172 1175
1173 dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name); 1176 dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name);
1174 /* Already running? Stop it. */ 1177 /* Already running? Stop it. */
1175 if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) { 1178 if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) {
1176 writel(0, base + NvRegReceiverControl); 1179 rx_ctrl &= ~NVREG_RCVCTL_START;
1180 writel(rx_ctrl, base + NvRegReceiverControl);
1177 pci_push(base); 1181 pci_push(base);
1178 } 1182 }
1179 writel(np->linkspeed, base + NvRegLinkSpeed); 1183 writel(np->linkspeed, base + NvRegLinkSpeed);
1180 pci_push(base); 1184 pci_push(base);
1181 writel(NVREG_RCVCTL_START, base + NvRegReceiverControl); 1185 rx_ctrl |= NVREG_RCVCTL_START;
1186 if (np->mac_in_use)
1187 rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN;
1188 writel(rx_ctrl, base + NvRegReceiverControl);
1182 dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n", 1189 dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n",
1183 dev->name, np->duplex, np->linkspeed); 1190 dev->name, np->duplex, np->linkspeed);
1184 pci_push(base); 1191 pci_push(base);
@@ -1186,39 +1193,59 @@ static void nv_start_rx(struct net_device *dev)
1186 1193
1187static void nv_stop_rx(struct net_device *dev) 1194static void nv_stop_rx(struct net_device *dev)
1188{ 1195{
1196 struct fe_priv *np = netdev_priv(dev);
1189 u8 __iomem *base = get_hwbase(dev); 1197 u8 __iomem *base = get_hwbase(dev);
1198 u32 rx_ctrl = readl(base + NvRegReceiverControl);
1190 1199
1191 dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name); 1200 dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name);
1192 writel(0, base + NvRegReceiverControl); 1201 if (!np->mac_in_use)
1202 rx_ctrl &= ~NVREG_RCVCTL_START;
1203 else
1204 rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN;
1205 writel(rx_ctrl, base + NvRegReceiverControl);
1193 reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0, 1206 reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
1194 NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX, 1207 NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX,
1195 KERN_INFO "nv_stop_rx: ReceiverStatus remained busy"); 1208 KERN_INFO "nv_stop_rx: ReceiverStatus remained busy");
1196 1209
1197 udelay(NV_RXSTOP_DELAY2); 1210 udelay(NV_RXSTOP_DELAY2);
1198 writel(0, base + NvRegLinkSpeed); 1211 if (!np->mac_in_use)
1212 writel(0, base + NvRegLinkSpeed);
1199} 1213}
1200 1214
1201static void nv_start_tx(struct net_device *dev) 1215static void nv_start_tx(struct net_device *dev)
1202{ 1216{
1217 struct fe_priv *np = netdev_priv(dev);
1203 u8 __iomem *base = get_hwbase(dev); 1218 u8 __iomem *base = get_hwbase(dev);
1219 u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1204 1220
1205 dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name); 1221 dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name);
1206 writel(NVREG_XMITCTL_START, base + NvRegTransmitterControl); 1222 tx_ctrl |= NVREG_XMITCTL_START;
1223 if (np->mac_in_use)
1224 tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN;
1225 writel(tx_ctrl, base + NvRegTransmitterControl);
1207 pci_push(base); 1226 pci_push(base);
1208} 1227}
1209 1228
1210static void nv_stop_tx(struct net_device *dev) 1229static void nv_stop_tx(struct net_device *dev)
1211{ 1230{
1231 struct fe_priv *np = netdev_priv(dev);
1212 u8 __iomem *base = get_hwbase(dev); 1232 u8 __iomem *base = get_hwbase(dev);
1233 u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1213 1234
1214 dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name); 1235 dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name);
1215 writel(0, base + NvRegTransmitterControl); 1236 if (!np->mac_in_use)
1237 tx_ctrl &= ~NVREG_XMITCTL_START;
1238 else
1239 tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN;
1240 writel(tx_ctrl, base + NvRegTransmitterControl);
1216 reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0, 1241 reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
1217 NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX, 1242 NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX,
1218 KERN_INFO "nv_stop_tx: TransmitterStatus remained busy"); 1243 KERN_INFO "nv_stop_tx: TransmitterStatus remained busy");
1219 1244
1220 udelay(NV_TXSTOP_DELAY2); 1245 udelay(NV_TXSTOP_DELAY2);
1221 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll); 1246 if (!np->mac_in_use)
1247 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV,
1248 base + NvRegTransmitPoll);
1222} 1249}
1223 1250
1224static void nv_txrx_reset(struct net_device *dev) 1251static void nv_txrx_reset(struct net_device *dev)
@@ -4148,20 +4175,6 @@ static int nv_mgmt_acquire_sema(struct net_device *dev)
4148 return 0; 4175 return 0;
4149} 4176}
4150 4177
4151/* Indicate to mgmt unit whether driver is loaded or not */
4152static void nv_mgmt_driver_loaded(struct net_device *dev, int loaded)
4153{
4154 u8 __iomem *base = get_hwbase(dev);
4155 u32 tx_ctrl;
4156
4157 tx_ctrl = readl(base + NvRegTransmitterControl);
4158 if (loaded)
4159 tx_ctrl |= NVREG_XMITCTL_HOST_LOADED;
4160 else
4161 tx_ctrl &= ~NVREG_XMITCTL_HOST_LOADED;
4162 writel(tx_ctrl, base + NvRegTransmitterControl);
4163}
4164
4165static int nv_open(struct net_device *dev) 4178static int nv_open(struct net_device *dev)
4166{ 4179{
4167 struct fe_priv *np = netdev_priv(dev); 4180 struct fe_priv *np = netdev_priv(dev);
@@ -4659,33 +4672,24 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4659 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); 4672 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
4660 4673
4661 if (id->driver_data & DEV_HAS_MGMT_UNIT) { 4674 if (id->driver_data & DEV_HAS_MGMT_UNIT) {
4662 writel(0x1, base + 0x204); pci_push(base);
4663 msleep(500);
4664 /* management unit running on the mac? */ 4675 /* management unit running on the mac? */
4665 np->mac_in_use = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST; 4676 if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) {
4666 if (np->mac_in_use) { 4677 np->mac_in_use = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST;
4667 u32 mgmt_sync; 4678 dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n", pci_name(pci_dev), np->mac_in_use);
4668 /* management unit setup the phy already? */ 4679 for (i = 0; i < 5000; i++) {
4669 mgmt_sync = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK; 4680 msleep(1);
4670 if (mgmt_sync == NVREG_XMITCTL_SYNC_NOT_READY) { 4681 if (nv_mgmt_acquire_sema(dev)) {
4671 if (!nv_mgmt_acquire_sema(dev)) { 4682 /* management unit setup the phy already? */
4672 for (i = 0; i < 5000; i++) { 4683 if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) ==
4673 msleep(1); 4684 NVREG_XMITCTL_SYNC_PHY_INIT) {
4674 mgmt_sync = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK; 4685 /* phy is inited by mgmt unit */
4675 if (mgmt_sync == NVREG_XMITCTL_SYNC_NOT_READY) 4686 phyinitialized = 1;
4676 continue; 4687 dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n", pci_name(pci_dev));
4677 if (mgmt_sync == NVREG_XMITCTL_SYNC_PHY_INIT) 4688 } else {
4678 phyinitialized = 1; 4689 /* we need to init the phy */
4679 break;
4680 } 4690 }
4681 } else { 4691 break;
4682 /* we need to init the phy */
4683 } 4692 }
4684 } else if (mgmt_sync == NVREG_XMITCTL_SYNC_PHY_INIT) {
4685 /* phy is inited by SMU */
4686 phyinitialized = 1;
4687 } else {
4688 /* we need to init the phy */
4689 } 4693 }
4690 } 4694 }
4691 } 4695 }
@@ -4724,10 +4728,12 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4724 if (!phyinitialized) { 4728 if (!phyinitialized) {
4725 /* reset it */ 4729 /* reset it */
4726 phy_init(dev); 4730 phy_init(dev);
4727 } 4731 } else {
4728 4732 /* see if it is a gigabit phy */
4729 if (id->driver_data & DEV_HAS_MGMT_UNIT) { 4733 u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
4730 nv_mgmt_driver_loaded(dev, 1); 4734 if (mii_status & PHY_GIGABIT) {
4735 np->gigabit = PHY_GIGABIT;
4736 }
4731 } 4737 }
4732 4738
4733 /* set default link speed settings */ 4739 /* set default link speed settings */
@@ -4749,8 +4755,6 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4749out_error: 4755out_error:
4750 if (phystate_orig) 4756 if (phystate_orig)
4751 writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl); 4757 writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
4752 if (np->mac_in_use)
4753 nv_mgmt_driver_loaded(dev, 0);
4754 pci_set_drvdata(pci_dev, NULL); 4758 pci_set_drvdata(pci_dev, NULL);
4755out_freering: 4759out_freering:
4756 free_rings(dev); 4760 free_rings(dev);
@@ -4780,9 +4784,6 @@ static void __devexit nv_remove(struct pci_dev *pci_dev)
4780 writel(np->orig_mac[0], base + NvRegMacAddrA); 4784 writel(np->orig_mac[0], base + NvRegMacAddrA);
4781 writel(np->orig_mac[1], base + NvRegMacAddrB); 4785 writel(np->orig_mac[1], base + NvRegMacAddrB);
4782 4786
4783 if (np->mac_in_use)
4784 nv_mgmt_driver_loaded(dev, 0);
4785
4786 /* free all structures */ 4787 /* free all structures */
4787 free_rings(dev); 4788 free_rings(dev);
4788 iounmap(get_hwbase(dev)); 4789 iounmap(get_hwbase(dev));
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
index 50ffe90488ff..f4aba4355b19 100644
--- a/drivers/net/ixgb/ixgb.h
+++ b/drivers/net/ixgb/ixgb.h
@@ -171,6 +171,7 @@ struct ixgb_adapter {
171 171
172 /* TX */ 172 /* TX */
173 struct ixgb_desc_ring tx_ring ____cacheline_aligned_in_smp; 173 struct ixgb_desc_ring tx_ring ____cacheline_aligned_in_smp;
174 unsigned int restart_queue;
174 unsigned long timeo_start; 175 unsigned long timeo_start;
175 uint32_t tx_cmd_type; 176 uint32_t tx_cmd_type;
176 uint64_t hw_csum_tx_good; 177 uint64_t hw_csum_tx_good;
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c
index cd22523fb035..82c044d6e08a 100644
--- a/drivers/net/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ixgb/ixgb_ethtool.c
@@ -79,6 +79,7 @@ static struct ixgb_stats ixgb_gstrings_stats[] = {
79 {"tx_window_errors", IXGB_STAT(net_stats.tx_window_errors)}, 79 {"tx_window_errors", IXGB_STAT(net_stats.tx_window_errors)},
80 {"tx_deferred_ok", IXGB_STAT(stats.dc)}, 80 {"tx_deferred_ok", IXGB_STAT(stats.dc)},
81 {"tx_timeout_count", IXGB_STAT(tx_timeout_count) }, 81 {"tx_timeout_count", IXGB_STAT(tx_timeout_count) },
82 {"tx_restart_queue", IXGB_STAT(restart_queue) },
82 {"rx_long_length_errors", IXGB_STAT(stats.roc)}, 83 {"rx_long_length_errors", IXGB_STAT(stats.roc)},
83 {"rx_short_length_errors", IXGB_STAT(stats.ruc)}, 84 {"rx_short_length_errors", IXGB_STAT(stats.ruc)},
84#ifdef NETIF_F_TSO 85#ifdef NETIF_F_TSO
diff --git a/drivers/net/ixgb/ixgb_hw.c b/drivers/net/ixgb/ixgb_hw.c
index 02089b64e42c..ecbf45861c68 100644
--- a/drivers/net/ixgb/ixgb_hw.c
+++ b/drivers/net/ixgb/ixgb_hw.c
@@ -399,8 +399,9 @@ ixgb_init_rx_addrs(struct ixgb_hw *hw)
399 /* Zero out the other 15 receive addresses. */ 399 /* Zero out the other 15 receive addresses. */
400 DEBUGOUT("Clearing RAR[1-15]\n"); 400 DEBUGOUT("Clearing RAR[1-15]\n");
401 for(i = 1; i < IXGB_RAR_ENTRIES; i++) { 401 for(i = 1; i < IXGB_RAR_ENTRIES; i++) {
402 IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); 402 /* Write high reg first to disable the AV bit first */
403 IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); 403 IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
404 IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
404 } 405 }
405 406
406 return; 407 return;
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index e628126c9c49..a083a9189230 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -36,7 +36,7 @@ static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
36#else 36#else
37#define DRIVERNAPI "-NAPI" 37#define DRIVERNAPI "-NAPI"
38#endif 38#endif
39#define DRV_VERSION "1.0.117-k2"DRIVERNAPI 39#define DRV_VERSION "1.0.126-k2"DRIVERNAPI
40char ixgb_driver_version[] = DRV_VERSION; 40char ixgb_driver_version[] = DRV_VERSION;
41static char ixgb_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; 41static char ixgb_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
42 42
@@ -1287,6 +1287,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1287 struct ixgb_buffer *buffer_info; 1287 struct ixgb_buffer *buffer_info;
1288 int len = skb->len; 1288 int len = skb->len;
1289 unsigned int offset = 0, size, count = 0, i; 1289 unsigned int offset = 0, size, count = 0, i;
1290 unsigned int mss = skb_shinfo(skb)->gso_size;
1290 1291
1291 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 1292 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
1292 unsigned int f; 1293 unsigned int f;
@@ -1298,6 +1299,11 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1298 while(len) { 1299 while(len) {
1299 buffer_info = &tx_ring->buffer_info[i]; 1300 buffer_info = &tx_ring->buffer_info[i];
1300 size = min(len, IXGB_MAX_DATA_PER_TXD); 1301 size = min(len, IXGB_MAX_DATA_PER_TXD);
1302 /* Workaround for premature desc write-backs
1303 * in TSO mode. Append 4-byte sentinel desc */
1304 if (unlikely(mss && !nr_frags && size == len && size > 8))
1305 size -= 4;
1306
1301 buffer_info->length = size; 1307 buffer_info->length = size;
1302 WARN_ON(buffer_info->dma != 0); 1308 WARN_ON(buffer_info->dma != 0);
1303 buffer_info->dma = 1309 buffer_info->dma =
@@ -1324,6 +1330,13 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1324 while(len) { 1330 while(len) {
1325 buffer_info = &tx_ring->buffer_info[i]; 1331 buffer_info = &tx_ring->buffer_info[i];
1326 size = min(len, IXGB_MAX_DATA_PER_TXD); 1332 size = min(len, IXGB_MAX_DATA_PER_TXD);
1333
1334 /* Workaround for premature desc write-backs
1335 * in TSO mode. Append 4-byte sentinel desc */
1336 if (unlikely(mss && !nr_frags && size == len
1337 && size > 8))
1338 size -= 4;
1339
1327 buffer_info->length = size; 1340 buffer_info->length = size;
1328 buffer_info->dma = 1341 buffer_info->dma =
1329 pci_map_page(adapter->pdev, 1342 pci_map_page(adapter->pdev,
@@ -1398,11 +1411,43 @@ ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
1398 IXGB_WRITE_REG(&adapter->hw, TDT, i); 1411 IXGB_WRITE_REG(&adapter->hw, TDT, i);
1399} 1412}
1400 1413
1414static int __ixgb_maybe_stop_tx(struct net_device *netdev, int size)
1415{
1416 struct ixgb_adapter *adapter = netdev_priv(netdev);
1417 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1418
1419 netif_stop_queue(netdev);
1420 /* Herbert's original patch had:
1421 * smp_mb__after_netif_stop_queue();
1422 * but since that doesn't exist yet, just open code it. */
1423 smp_mb();
1424
1425 /* We need to check again in a case another CPU has just
1426 * made room available. */
1427 if (likely(IXGB_DESC_UNUSED(tx_ring) < size))
1428 return -EBUSY;
1429
1430 /* A reprieve! */
1431 netif_start_queue(netdev);
1432 ++adapter->restart_queue;
1433 return 0;
1434}
1435
1436static int ixgb_maybe_stop_tx(struct net_device *netdev,
1437 struct ixgb_desc_ring *tx_ring, int size)
1438{
1439 if (likely(IXGB_DESC_UNUSED(tx_ring) >= size))
1440 return 0;
1441 return __ixgb_maybe_stop_tx(netdev, size);
1442}
1443
1444
1401/* Tx Descriptors needed, worst case */ 1445/* Tx Descriptors needed, worst case */
1402#define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \ 1446#define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \
1403 (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0)) 1447 (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
1404#define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) + \ 1448#define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) /* skb->date */ + \
1405 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 1449 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 /* for context */ \
1450 + 1 /* one more needed for sentinel TSO workaround */
1406 1451
1407static int 1452static int
1408ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 1453ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
@@ -1430,7 +1475,8 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1430 spin_lock_irqsave(&adapter->tx_lock, flags); 1475 spin_lock_irqsave(&adapter->tx_lock, flags);
1431#endif 1476#endif
1432 1477
1433 if(unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED)) { 1478 if (unlikely(ixgb_maybe_stop_tx(netdev, &adapter->tx_ring,
1479 DESC_NEEDED))) {
1434 netif_stop_queue(netdev); 1480 netif_stop_queue(netdev);
1435 spin_unlock_irqrestore(&adapter->tx_lock, flags); 1481 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1436 return NETDEV_TX_BUSY; 1482 return NETDEV_TX_BUSY;
@@ -1468,8 +1514,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1468 1514
1469#ifdef NETIF_F_LLTX 1515#ifdef NETIF_F_LLTX
1470 /* Make sure there is space in the ring for the next send. */ 1516 /* Make sure there is space in the ring for the next send. */
1471 if(unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED)) 1517 ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED);
1472 netif_stop_queue(netdev);
1473 1518
1474 spin_unlock_irqrestore(&adapter->tx_lock, flags); 1519 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1475 1520
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c
index 794cc61819dd..448bf4a78016 100644
--- a/drivers/net/pcmcia/3c574_cs.c
+++ b/drivers/net/pcmcia/3c574_cs.c
@@ -281,7 +281,6 @@ static int tc574_probe(struct pcmcia_device *link)
281 link->conf.Attributes = CONF_ENABLE_IRQ; 281 link->conf.Attributes = CONF_ENABLE_IRQ;
282 link->conf.IntType = INT_MEMORY_AND_IO; 282 link->conf.IntType = INT_MEMORY_AND_IO;
283 link->conf.ConfigIndex = 1; 283 link->conf.ConfigIndex = 1;
284 link->conf.Present = PRESENT_OPTION;
285 284
286 /* The EL3-specific entries in the device structure. */ 285 /* The EL3-specific entries in the device structure. */
287 dev->hard_start_xmit = &el3_start_xmit; 286 dev->hard_start_xmit = &el3_start_xmit;
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c
index 1e73ff7d5d8e..342f4062de0b 100644
--- a/drivers/net/pcmcia/3c589_cs.c
+++ b/drivers/net/pcmcia/3c589_cs.c
@@ -195,7 +195,6 @@ static int tc589_probe(struct pcmcia_device *link)
195 link->conf.Attributes = CONF_ENABLE_IRQ; 195 link->conf.Attributes = CONF_ENABLE_IRQ;
196 link->conf.IntType = INT_MEMORY_AND_IO; 196 link->conf.IntType = INT_MEMORY_AND_IO;
197 link->conf.ConfigIndex = 1; 197 link->conf.ConfigIndex = 1;
198 link->conf.Present = PRESENT_OPTION;
199 198
200 /* The EL3-specific entries in the device structure. */ 199 /* The EL3-specific entries in the device structure. */
201 SET_MODULE_OWNER(dev); 200 SET_MODULE_OWNER(dev);
diff --git a/drivers/net/pcmcia/com20020_cs.c b/drivers/net/pcmcia/com20020_cs.c
index 91f65e91cd5f..0d1c7a41c9c6 100644
--- a/drivers/net/pcmcia/com20020_cs.c
+++ b/drivers/net/pcmcia/com20020_cs.c
@@ -173,7 +173,6 @@ static int com20020_probe(struct pcmcia_device *p_dev)
173 p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID; 173 p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID;
174 p_dev->conf.Attributes = CONF_ENABLE_IRQ; 174 p_dev->conf.Attributes = CONF_ENABLE_IRQ;
175 p_dev->conf.IntType = INT_MEMORY_AND_IO; 175 p_dev->conf.IntType = INT_MEMORY_AND_IO;
176 p_dev->conf.Present = PRESENT_OPTION;
177 176
178 p_dev->irq.Instance = info->dev = dev; 177 p_dev->irq.Instance = info->dev = dev;
179 p_dev->priv = info; 178 p_dev->priv = info;
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 2b1238e2dbdb..d88e9b2e93cf 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1617,6 +1617,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
1617 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FastEther PCC-TX", 0x5261440f, 0x485e85d9), 1617 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FastEther PCC-TX", 0x5261440f, 0x485e85d9),
1618 PCMCIA_DEVICE_PROD_ID12("Corega,K.K.", "Ethernet LAN Card", 0x110d26d9, 0x9fd2f0a2), 1618 PCMCIA_DEVICE_PROD_ID12("Corega,K.K.", "Ethernet LAN Card", 0x110d26d9, 0x9fd2f0a2),
1619 PCMCIA_DEVICE_PROD_ID12("corega,K.K.", "Ethernet LAN Card", 0x9791a90e, 0x9fd2f0a2), 1619 PCMCIA_DEVICE_PROD_ID12("corega,K.K.", "Ethernet LAN Card", 0x9791a90e, 0x9fd2f0a2),
1620 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "(CG-LAPCCTXD)", 0x5261440f, 0x73ec0d88),
1620 PCMCIA_DEVICE_PROD_ID12("CouplerlessPCMCIA", "100BASE", 0xee5af0ad, 0x7c2add04), 1621 PCMCIA_DEVICE_PROD_ID12("CouplerlessPCMCIA", "100BASE", 0xee5af0ad, 0x7c2add04),
1621 PCMCIA_DEVICE_PROD_ID12("CyQ've", "ELA-010", 0x77008979, 0x9d8d445d), 1622 PCMCIA_DEVICE_PROD_ID12("CyQ've", "ELA-010", 0x77008979, 0x9d8d445d),
1622 PCMCIA_DEVICE_PROD_ID12("CyQ've", "ELA-110E 10/100M LAN Card", 0x77008979, 0xfd184814), 1623 PCMCIA_DEVICE_PROD_ID12("CyQ've", "ELA-110E 10/100M LAN Card", 0x77008979, 0xfd184814),
@@ -1667,6 +1668,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
1667 PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN100TX", 0x88fcdeda, 0x6d772737), 1668 PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN100TX", 0x88fcdeda, 0x6d772737),
1668 PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN100TE", 0x88fcdeda, 0x0e714bee), 1669 PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN100TE", 0x88fcdeda, 0x0e714bee),
1669 PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN20T", 0x88fcdeda, 0x81090922), 1670 PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN20T", 0x88fcdeda, 0x81090922),
1671 PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN10TE", 0x88fcdeda, 0xc1e2521c),
1670 PCMCIA_DEVICE_PROD_ID12("LONGSHINE", "PCMCIA Ethernet Card", 0xf866b0b0, 0x6f6652e0), 1672 PCMCIA_DEVICE_PROD_ID12("LONGSHINE", "PCMCIA Ethernet Card", 0xf866b0b0, 0x6f6652e0),
1671 PCMCIA_DEVICE_PROD_ID12("MACNICA", "ME1-JEIDA", 0x20841b68, 0xaf8a3578), 1673 PCMCIA_DEVICE_PROD_ID12("MACNICA", "ME1-JEIDA", 0x20841b68, 0xaf8a3578),
1672 PCMCIA_DEVICE_PROD_ID12("Macsense", "MPC-10", 0xd830297f, 0xd265c307), 1674 PCMCIA_DEVICE_PROD_ID12("Macsense", "MPC-10", 0xd830297f, 0xd265c307),
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c
index 8478dca3d8d1..5879e7c36988 100644
--- a/drivers/net/pcmcia/xirc2ps_cs.c
+++ b/drivers/net/pcmcia/xirc2ps_cs.c
@@ -576,7 +576,6 @@ xirc2ps_probe(struct pcmcia_device *link)
576 link->conf.Attributes = CONF_ENABLE_IRQ; 576 link->conf.Attributes = CONF_ENABLE_IRQ;
577 link->conf.IntType = INT_MEMORY_AND_IO; 577 link->conf.IntType = INT_MEMORY_AND_IO;
578 link->conf.ConfigIndex = 1; 578 link->conf.ConfigIndex = 1;
579 link->conf.Present = PRESENT_OPTION;
580 link->irq.Handler = xirc2ps_interrupt; 579 link->irq.Handler = xirc2ps_interrupt;
581 link->irq.Instance = dev; 580 link->irq.Instance = dev;
582 581
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index d79d141a601d..8844c20eac2d 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -208,6 +208,15 @@ static void ql_write_common_reg(struct ql3_adapter *qdev,
208 return; 208 return;
209} 209}
210 210
211static void ql_write_nvram_reg(struct ql3_adapter *qdev,
212 u32 __iomem *reg, u32 value)
213{
214 writel(value, reg);
215 readl(reg);
216 udelay(1);
217 return;
218}
219
211static void ql_write_page0_reg(struct ql3_adapter *qdev, 220static void ql_write_page0_reg(struct ql3_adapter *qdev,
212 u32 __iomem *reg, u32 value) 221 u32 __iomem *reg, u32 value)
213{ 222{
@@ -336,9 +345,9 @@ static void fm93c56a_select(struct ql3_adapter *qdev)
336 qdev->mem_map_registers; 345 qdev->mem_map_registers;
337 346
338 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1; 347 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
339 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, 348 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
340 ISP_NVRAM_MASK | qdev->eeprom_cmd_data); 349 ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
341 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, 350 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
342 ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data)); 351 ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data));
343} 352}
344 353
@@ -355,14 +364,14 @@ static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
355 qdev->mem_map_registers; 364 qdev->mem_map_registers;
356 365
357 /* Clock in a zero, then do the start bit */ 366 /* Clock in a zero, then do the start bit */
358 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, 367 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
359 ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 368 ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
360 AUBURN_EEPROM_DO_1); 369 AUBURN_EEPROM_DO_1);
361 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, 370 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
362 ISP_NVRAM_MASK | qdev-> 371 ISP_NVRAM_MASK | qdev->
363 eeprom_cmd_data | AUBURN_EEPROM_DO_1 | 372 eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
364 AUBURN_EEPROM_CLK_RISE); 373 AUBURN_EEPROM_CLK_RISE);
365 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, 374 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
366 ISP_NVRAM_MASK | qdev-> 375 ISP_NVRAM_MASK | qdev->
367 eeprom_cmd_data | AUBURN_EEPROM_DO_1 | 376 eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
368 AUBURN_EEPROM_CLK_FALL); 377 AUBURN_EEPROM_CLK_FALL);
@@ -378,20 +387,20 @@ static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
378 * If the bit changed, then change the DO state to 387 * If the bit changed, then change the DO state to
379 * match 388 * match
380 */ 389 */
381 ql_write_common_reg(qdev, 390 ql_write_nvram_reg(qdev,
382 &port_regs->CommonRegs. 391 &port_regs->CommonRegs.
383 serialPortInterfaceReg, 392 serialPortInterfaceReg,
384 ISP_NVRAM_MASK | qdev-> 393 ISP_NVRAM_MASK | qdev->
385 eeprom_cmd_data | dataBit); 394 eeprom_cmd_data | dataBit);
386 previousBit = dataBit; 395 previousBit = dataBit;
387 } 396 }
388 ql_write_common_reg(qdev, 397 ql_write_nvram_reg(qdev,
389 &port_regs->CommonRegs. 398 &port_regs->CommonRegs.
390 serialPortInterfaceReg, 399 serialPortInterfaceReg,
391 ISP_NVRAM_MASK | qdev-> 400 ISP_NVRAM_MASK | qdev->
392 eeprom_cmd_data | dataBit | 401 eeprom_cmd_data | dataBit |
393 AUBURN_EEPROM_CLK_RISE); 402 AUBURN_EEPROM_CLK_RISE);
394 ql_write_common_reg(qdev, 403 ql_write_nvram_reg(qdev,
395 &port_regs->CommonRegs. 404 &port_regs->CommonRegs.
396 serialPortInterfaceReg, 405 serialPortInterfaceReg,
397 ISP_NVRAM_MASK | qdev-> 406 ISP_NVRAM_MASK | qdev->
@@ -412,20 +421,20 @@ static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
412 * If the bit changed, then change the DO state to 421 * If the bit changed, then change the DO state to
413 * match 422 * match
414 */ 423 */
415 ql_write_common_reg(qdev, 424 ql_write_nvram_reg(qdev,
416 &port_regs->CommonRegs. 425 &port_regs->CommonRegs.
417 serialPortInterfaceReg, 426 serialPortInterfaceReg,
418 ISP_NVRAM_MASK | qdev-> 427 ISP_NVRAM_MASK | qdev->
419 eeprom_cmd_data | dataBit); 428 eeprom_cmd_data | dataBit);
420 previousBit = dataBit; 429 previousBit = dataBit;
421 } 430 }
422 ql_write_common_reg(qdev, 431 ql_write_nvram_reg(qdev,
423 &port_regs->CommonRegs. 432 &port_regs->CommonRegs.
424 serialPortInterfaceReg, 433 serialPortInterfaceReg,
425 ISP_NVRAM_MASK | qdev-> 434 ISP_NVRAM_MASK | qdev->
426 eeprom_cmd_data | dataBit | 435 eeprom_cmd_data | dataBit |
427 AUBURN_EEPROM_CLK_RISE); 436 AUBURN_EEPROM_CLK_RISE);
428 ql_write_common_reg(qdev, 437 ql_write_nvram_reg(qdev,
429 &port_regs->CommonRegs. 438 &port_regs->CommonRegs.
430 serialPortInterfaceReg, 439 serialPortInterfaceReg,
431 ISP_NVRAM_MASK | qdev-> 440 ISP_NVRAM_MASK | qdev->
@@ -443,7 +452,7 @@ static void fm93c56a_deselect(struct ql3_adapter *qdev)
443 struct ql3xxx_port_registers __iomem *port_regs = 452 struct ql3xxx_port_registers __iomem *port_regs =
444 qdev->mem_map_registers; 453 qdev->mem_map_registers;
445 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0; 454 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0;
446 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, 455 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
447 ISP_NVRAM_MASK | qdev->eeprom_cmd_data); 456 ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
448} 457}
449 458
@@ -461,12 +470,12 @@ static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
461 /* Read the data bits */ 470 /* Read the data bits */
462 /* The first bit is a dummy. Clock right over it. */ 471 /* The first bit is a dummy. Clock right over it. */
463 for (i = 0; i < dataBits; i++) { 472 for (i = 0; i < dataBits; i++) {
464 ql_write_common_reg(qdev, 473 ql_write_nvram_reg(qdev,
465 &port_regs->CommonRegs. 474 &port_regs->CommonRegs.
466 serialPortInterfaceReg, 475 serialPortInterfaceReg,
467 ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 476 ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
468 AUBURN_EEPROM_CLK_RISE); 477 AUBURN_EEPROM_CLK_RISE);
469 ql_write_common_reg(qdev, 478 ql_write_nvram_reg(qdev,
470 &port_regs->CommonRegs. 479 &port_regs->CommonRegs.
471 serialPortInterfaceReg, 480 serialPortInterfaceReg,
472 ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 481 ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
@@ -3370,7 +3379,6 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3370 SET_MODULE_OWNER(ndev); 3379 SET_MODULE_OWNER(ndev);
3371 SET_NETDEV_DEV(ndev, &pdev->dev); 3380 SET_NETDEV_DEV(ndev, &pdev->dev);
3372 3381
3373 ndev->features = NETIF_F_LLTX;
3374 if (pci_using_dac) 3382 if (pci_using_dac)
3375 ndev->features |= NETIF_F_HIGHDMA; 3383 ndev->features |= NETIF_F_HIGHDMA;
3376 3384
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 4056ba1ff3c7..f4bf62c2a7a5 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -68,8 +68,8 @@
68 68
69#define DRV_MODULE_NAME "tg3" 69#define DRV_MODULE_NAME "tg3"
70#define PFX DRV_MODULE_NAME ": " 70#define PFX DRV_MODULE_NAME ": "
71#define DRV_MODULE_VERSION "3.71" 71#define DRV_MODULE_VERSION "3.72"
72#define DRV_MODULE_RELDATE "December 15, 2006" 72#define DRV_MODULE_RELDATE "January 8, 2007"
73 73
74#define TG3_DEF_MAC_MODE 0 74#define TG3_DEF_MAC_MODE 0
75#define TG3_DEF_RX_MODE 0 75#define TG3_DEF_RX_MODE 0
@@ -1015,7 +1015,12 @@ out:
1015 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) { 1015 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1016 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); 1016 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1017 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); 1017 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1018 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b); 1018 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1019 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1020 tg3_writephy(tp, MII_TG3_TEST1,
1021 MII_TG3_TEST1_TRIM_EN | 0x4);
1022 } else
1023 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1019 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); 1024 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1020 } 1025 }
1021 /* Set Extended packet length bit (bit 14) on all chips that */ 1026 /* Set Extended packet length bit (bit 14) on all chips that */
@@ -10803,9 +10808,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
10803 10808
10804 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { 10809 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10805 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 10810 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10806 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) 10811 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
10807 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG; 10812 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
10808 else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) 10813 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
10814 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
10815 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
10809 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG; 10816 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10810 } 10817 }
10811 10818
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index cf78a7e5997b..80f59ac7ec58 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -1658,6 +1658,9 @@
1658#define MII_TG3_EPHY_TEST 0x1f /* 5906 PHY register */ 1658#define MII_TG3_EPHY_TEST 0x1f /* 5906 PHY register */
1659#define MII_TG3_EPHY_SHADOW_EN 0x80 1659#define MII_TG3_EPHY_SHADOW_EN 0x80
1660 1660
1661#define MII_TG3_TEST1 0x1e
1662#define MII_TG3_TEST1_TRIM_EN 0x0010
1663
1661/* There are two ways to manage the TX descriptors on the tigon3. 1664/* There are two ways to manage the TX descriptors on the tigon3.
1662 * Either the descriptors are in host DMA'able memory, or they 1665 * Either the descriptors are in host DMA'able memory, or they
1663 * exist only in the cards on-chip SRAM. All 16 send bds are under 1666 * exist only in the cards on-chip SRAM. All 16 send bds are under
@@ -2256,6 +2259,7 @@ struct tg3 {
2256#define TG3_FLG2_1SHOT_MSI 0x10000000 2259#define TG3_FLG2_1SHOT_MSI 0x10000000
2257#define TG3_FLG2_PHY_JITTER_BUG 0x20000000 2260#define TG3_FLG2_PHY_JITTER_BUG 0x20000000
2258#define TG3_FLG2_NO_FWARE_REPORTED 0x40000000 2261#define TG3_FLG2_NO_FWARE_REPORTED 0x40000000
2262#define TG3_FLG2_PHY_ADJUST_TRIM 0x80000000
2259 2263
2260 u32 split_mode_max_reqs; 2264 u32 split_mode_max_reqs;
2261#define SPLIT_MODE_5704_MAX_REQ 3 2265#define SPLIT_MODE_5704_MAX_REQ 3
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c
index 0e94fbbf7a94..b85857a84870 100644
--- a/drivers/net/wireless/ipw2100.c
+++ b/drivers/net/wireless/ipw2100.c
@@ -2664,7 +2664,7 @@ static void __ipw2100_rx_process(struct ipw2100_priv *priv)
2664 break; 2664 break;
2665 } 2665 }
2666#endif 2666#endif
2667 if (stats.len < sizeof(u->rx_data.header)) 2667 if (stats.len < sizeof(struct ieee80211_hdr_3addr))
2668 break; 2668 break;
2669 switch (WLAN_FC_GET_TYPE(u->rx_data.header.frame_ctl)) { 2669 switch (WLAN_FC_GET_TYPE(u->rx_data.header.frame_ctl)) {
2670 case IEEE80211_FTYPE_MGMT: 2670 case IEEE80211_FTYPE_MGMT:
diff --git a/drivers/net/wireless/netwave_cs.c b/drivers/net/wireless/netwave_cs.c
index 644b4741ef74..a009ab517710 100644
--- a/drivers/net/wireless/netwave_cs.c
+++ b/drivers/net/wireless/netwave_cs.c
@@ -406,7 +406,6 @@ static int netwave_probe(struct pcmcia_device *link)
406 link->conf.Attributes = CONF_ENABLE_IRQ; 406 link->conf.Attributes = CONF_ENABLE_IRQ;
407 link->conf.IntType = INT_MEMORY_AND_IO; 407 link->conf.IntType = INT_MEMORY_AND_IO;
408 link->conf.ConfigIndex = 1; 408 link->conf.ConfigIndex = 1;
409 link->conf.Present = PRESENT_OPTION;
410 409
411 /* Netwave private struct init. link/dev/node already taken care of, 410 /* Netwave private struct init. link/dev/node already taken care of,
412 * other stuff zero'd - Jean II */ 411 * other stuff zero'd - Jean II */
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 88e10c9bc4ac..47b2ccb6a633 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -331,7 +331,6 @@ static int ray_probe(struct pcmcia_device *p_dev)
331 p_dev->conf.Attributes = CONF_ENABLE_IRQ; 331 p_dev->conf.Attributes = CONF_ENABLE_IRQ;
332 p_dev->conf.IntType = INT_MEMORY_AND_IO; 332 p_dev->conf.IntType = INT_MEMORY_AND_IO;
333 p_dev->conf.ConfigIndex = 1; 333 p_dev->conf.ConfigIndex = 1;
334 p_dev->conf.Present = PRESENT_OPTION;
335 334
336 p_dev->priv = dev; 335 p_dev->priv = dev;
337 p_dev->irq.Instance = dev; 336 p_dev->irq.Instance = dev;
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index 583e0d655a98..c250f08c8dd5 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -1928,7 +1928,6 @@ static int wl3501_probe(struct pcmcia_device *p_dev)
1928 p_dev->conf.Attributes = CONF_ENABLE_IRQ; 1928 p_dev->conf.Attributes = CONF_ENABLE_IRQ;
1929 p_dev->conf.IntType = INT_MEMORY_AND_IO; 1929 p_dev->conf.IntType = INT_MEMORY_AND_IO;
1930 p_dev->conf.ConfigIndex = 1; 1930 p_dev->conf.ConfigIndex = 1;
1931 p_dev->conf.Present = PRESENT_OPTION;
1932 1931
1933 dev = alloc_etherdev(sizeof(struct wl3501_card)); 1932 dev = alloc_etherdev(sizeof(struct wl3501_card));
1934 if (!dev) 1933 if (!dev)
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 6bfb942428e4..206c834d263a 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -254,7 +254,8 @@ static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
254 if ((cap & mask) == ht_cap) 254 if ((cap & mask) == ht_cap)
255 return pos; 255 return pos;
256 256
257 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos, 257 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
258 pos + PCI_CAP_LIST_NEXT,
258 PCI_CAP_ID_HT, &ttl); 259 PCI_CAP_ID_HT, &ttl);
259 } 260 }
260 261
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 8f0322d6f3bf..0a70943f8bb6 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -955,7 +955,7 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, k8t_sound_ho
955 * becomes necessary to do this tweak in two steps -- I've chosen the Host 955 * becomes necessary to do this tweak in two steps -- I've chosen the Host
956 * bridge as trigger. 956 * bridge as trigger.
957 */ 957 */
958static int __initdata asus_hides_smbus; 958static int asus_hides_smbus;
959 959
960static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev) 960static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
961{ 961{
@@ -1117,10 +1117,11 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_h
1117static void quirk_sis_96x_smbus(struct pci_dev *dev) 1117static void quirk_sis_96x_smbus(struct pci_dev *dev)
1118{ 1118{
1119 u8 val = 0; 1119 u8 val = 0;
1120 printk(KERN_INFO "Enabling SiS 96x SMBus.\n");
1121 pci_read_config_byte(dev, 0x77, &val);
1122 pci_write_config_byte(dev, 0x77, val & ~0x10);
1123 pci_read_config_byte(dev, 0x77, &val); 1120 pci_read_config_byte(dev, 0x77, &val);
1121 if (val & 0x10) {
1122 printk(KERN_INFO "Enabling SiS 96x SMBus.\n");
1123 pci_write_config_byte(dev, 0x77, val & ~0x10);
1124 }
1124} 1125}
1125 1126
1126/* 1127/*
@@ -1152,11 +1153,12 @@ static void quirk_sis_503(struct pci_dev *dev)
1152 printk(KERN_WARNING "Uncovering SIS%x that hid as a SIS503 (compatible=%d)\n", devid, sis_96x_compatible); 1153 printk(KERN_WARNING "Uncovering SIS%x that hid as a SIS503 (compatible=%d)\n", devid, sis_96x_compatible);
1153 1154
1154 /* 1155 /*
1155 * Ok, it now shows up as a 96x.. The 96x quirks are after 1156 * Ok, it now shows up as a 96x.. run the 96x quirk by
1156 * the 503 quirk in the quirk table, so they'll automatically 1157 * hand in case it has already been processed.
1157 * run and enable things like the SMBus device 1158 * (depends on link order, which is apparently not guaranteed)
1158 */ 1159 */
1159 dev->device = devid; 1160 dev->device = devid;
1161 quirk_sis_96x_smbus(dev);
1160} 1162}
1161 1163
1162static void __init quirk_sis_96x_compatible(struct pci_dev *dev) 1164static void __init quirk_sis_96x_compatible(struct pci_dev *dev)
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index 72ba1a70f35f..e9e0934380b8 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -264,8 +264,6 @@ static int sh_rtc_proc(struct device *dev, struct seq_file *seq)
264 unsigned int tmp; 264 unsigned int tmp;
265 265
266 tmp = readb(rtc->regbase + RCR1); 266 tmp = readb(rtc->regbase + RCR1);
267 seq_printf(seq, "alarm_IRQ\t: %s\n",
268 (tmp & RCR1_AIE) ? "yes" : "no");
269 seq_printf(seq, "carry_IRQ\t: %s\n", 267 seq_printf(seq, "carry_IRQ\t: %s\n",
270 (tmp & RCR1_CIE) ? "yes" : "no"); 268 (tmp & RCR1_CIE) ? "yes" : "no");
271 269
@@ -428,6 +426,8 @@ static int sh_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
428 tm->tm_mon -= 1; /* RTC is 1-12, tm_mon is 0-11 */ 426 tm->tm_mon -= 1; /* RTC is 1-12, tm_mon is 0-11 */
429 tm->tm_year = 0xffff; 427 tm->tm_year = 0xffff;
430 428
429 wkalrm->enabled = (readb(rtc->regbase + RCR1) & RCR1_AIE) ? 1 : 0;
430
431 spin_unlock_irq(&rtc->lock); 431 spin_unlock_irq(&rtc->lock);
432 432
433 return 0; 433 return 0;
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c
index 1678b6c757ec..a420cd099041 100644
--- a/drivers/s390/char/vmcp.c
+++ b/drivers/s390/char/vmcp.c
@@ -117,7 +117,7 @@ vmcp_write(struct file *file, const char __user * buff, size_t count,
117 return -ENOMEM; 117 return -ENOMEM;
118 } 118 }
119 debug_text_event(vmcp_debug, 1, cmd); 119 debug_text_event(vmcp_debug, 1, cmd);
120 session->resp_size = __cpcmd(cmd, session->response, 120 session->resp_size = cpcmd(cmd, session->response,
121 session->bufsize, 121 session->bufsize,
122 &session->resp_code); 122 &session->resp_code);
123 up(&session->mutex); 123 up(&session->mutex);
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index b471ac4a1bf6..ae1bf231d089 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -880,19 +880,15 @@ static void cio_reset_pgm_check_handler(void)
880static int stsch_reset(struct subchannel_id schid, volatile struct schib *addr) 880static int stsch_reset(struct subchannel_id schid, volatile struct schib *addr)
881{ 881{
882 int rc; 882 int rc;
883 register struct subchannel_id reg1 asm ("1") = schid;
884 883
885 pgm_check_occured = 0; 884 pgm_check_occured = 0;
886 s390_reset_pgm_handler = cio_reset_pgm_check_handler; 885 s390_reset_pgm_handler = cio_reset_pgm_check_handler;
886 rc = stsch(schid, addr);
887 s390_reset_pgm_handler = NULL;
887 888
888 asm volatile( 889 /* The program check handler could have changed pgm_check_occured */
889 " stsch 0(%2)\n" 890 barrier();
890 " ipm %0\n"
891 " srl %0,28"
892 : "=d" (rc)
893 : "d" (reg1), "a" (addr), "m" (*addr) : "memory", "cc");
894 891
895 s390_reset_pgm_handler = NULL;
896 if (pgm_check_occured) 892 if (pgm_check_occured)
897 return -EIO; 893 return -EIO;
898 else 894 else
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index 1a93fa684e9f..52625153a4f0 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -27,10 +27,7 @@ config IUCV
27 help 27 help
28 Select this option if you want to use inter-user communication 28 Select this option if you want to use inter-user communication
29 under VM or VIF. If unsure, say "Y" to enable a fast communication 29 under VM or VIF. If unsure, say "Y" to enable a fast communication
30 link between VM guests. At boot time the user ID of the guest needs 30 link between VM guests.
31 to be passed to the kernel. Note that both kernels need to be
32 compiled with this option and both need to be booted with the user ID
33 of the other VM guest.
34 31
35config NETIUCV 32config NETIUCV
36 tristate "IUCV network device support (VM only)" 33 tristate "IUCV network device support (VM only)"
diff --git a/drivers/s390/net/qeth.h b/drivers/s390/net/qeth.h
index 53c358c7d368..e95c281f1e36 100644
--- a/drivers/s390/net/qeth.h
+++ b/drivers/s390/net/qeth.h
@@ -710,7 +710,7 @@ struct qeth_reply {
710 int (*callback)(struct qeth_card *,struct qeth_reply *,unsigned long); 710 int (*callback)(struct qeth_card *,struct qeth_reply *,unsigned long);
711 u32 seqno; 711 u32 seqno;
712 unsigned long offset; 712 unsigned long offset;
713 int received; 713 atomic_t received;
714 int rc; 714 int rc;
715 void *param; 715 void *param;
716 struct qeth_card *card; 716 struct qeth_card *card;
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index 2bde4f1fb9c2..d2efa5ff125d 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -471,7 +471,7 @@ qeth_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
471 channel->state == CH_STATE_UP) 471 channel->state == CH_STATE_UP)
472 qeth_issue_next_read(card); 472 qeth_issue_next_read(card);
473 473
474 tasklet_schedule(&channel->irq_tasklet); 474 qeth_irq_tasklet((unsigned long)channel);
475 return; 475 return;
476out: 476out:
477 wake_up(&card->wait_q); 477 wake_up(&card->wait_q);
@@ -951,40 +951,6 @@ qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
951} 951}
952 952
953static int 953static int
954qeth_register_ip_addresses(void *ptr)
955{
956 struct qeth_card *card;
957
958 card = (struct qeth_card *) ptr;
959 daemonize("qeth_reg_ip");
960 QETH_DBF_TEXT(trace,4,"regipth1");
961 if (!qeth_do_run_thread(card, QETH_SET_IP_THREAD))
962 return 0;
963 QETH_DBF_TEXT(trace,4,"regipth2");
964 qeth_set_ip_addr_list(card);
965 qeth_clear_thread_running_bit(card, QETH_SET_IP_THREAD);
966 return 0;
967}
968
969/*
970 * Drive the SET_PROMISC_MODE thread
971 */
972static int
973qeth_set_promisc_mode(void *ptr)
974{
975 struct qeth_card *card = (struct qeth_card *) ptr;
976
977 daemonize("qeth_setprm");
978 QETH_DBF_TEXT(trace,4,"setprm1");
979 if (!qeth_do_run_thread(card, QETH_SET_PROMISC_MODE_THREAD))
980 return 0;
981 QETH_DBF_TEXT(trace,4,"setprm2");
982 qeth_setadp_promisc_mode(card);
983 qeth_clear_thread_running_bit(card, QETH_SET_PROMISC_MODE_THREAD);
984 return 0;
985}
986
987static int
988qeth_recover(void *ptr) 954qeth_recover(void *ptr)
989{ 955{
990 struct qeth_card *card; 956 struct qeth_card *card;
@@ -1047,11 +1013,6 @@ qeth_start_kernel_thread(struct work_struct *work)
1047 if (card->read.state != CH_STATE_UP && 1013 if (card->read.state != CH_STATE_UP &&
1048 card->write.state != CH_STATE_UP) 1014 card->write.state != CH_STATE_UP)
1049 return; 1015 return;
1050
1051 if (qeth_do_start_thread(card, QETH_SET_IP_THREAD))
1052 kernel_thread(qeth_register_ip_addresses, (void *)card,SIGCHLD);
1053 if (qeth_do_start_thread(card, QETH_SET_PROMISC_MODE_THREAD))
1054 kernel_thread(qeth_set_promisc_mode, (void *)card, SIGCHLD);
1055 if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) 1016 if (qeth_do_start_thread(card, QETH_RECOVER_THREAD))
1056 kernel_thread(qeth_recover, (void *) card, SIGCHLD); 1017 kernel_thread(qeth_recover, (void *) card, SIGCHLD);
1057} 1018}
@@ -1074,7 +1035,7 @@ qeth_set_intial_options(struct qeth_card *card)
1074 card->options.layer2 = 1; 1035 card->options.layer2 = 1;
1075 else 1036 else
1076 card->options.layer2 = 0; 1037 card->options.layer2 = 0;
1077 card->options.performance_stats = 1; 1038 card->options.performance_stats = 0;
1078} 1039}
1079 1040
1080/** 1041/**
@@ -1613,8 +1574,6 @@ qeth_issue_next_read(struct qeth_card *card)
1613 return -ENOMEM; 1574 return -ENOMEM;
1614 } 1575 }
1615 qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE); 1576 qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE);
1616 wait_event(card->wait_q,
1617 atomic_cmpxchg(&card->read.irq_pending, 0, 1) == 0);
1618 QETH_DBF_TEXT(trace, 6, "noirqpnd"); 1577 QETH_DBF_TEXT(trace, 6, "noirqpnd");
1619 rc = ccw_device_start(card->read.ccwdev, &card->read.ccw, 1578 rc = ccw_device_start(card->read.ccwdev, &card->read.ccw,
1620 (addr_t) iob, 0, 0); 1579 (addr_t) iob, 0, 0);
@@ -1635,6 +1594,7 @@ qeth_alloc_reply(struct qeth_card *card)
1635 reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC); 1594 reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC);
1636 if (reply){ 1595 if (reply){
1637 atomic_set(&reply->refcnt, 1); 1596 atomic_set(&reply->refcnt, 1);
1597 atomic_set(&reply->received, 0);
1638 reply->card = card; 1598 reply->card = card;
1639 }; 1599 };
1640 return reply; 1600 return reply;
@@ -1655,31 +1615,6 @@ qeth_put_reply(struct qeth_reply *reply)
1655 kfree(reply); 1615 kfree(reply);
1656} 1616}
1657 1617
1658static void
1659qeth_cmd_timeout(unsigned long data)
1660{
1661 struct qeth_reply *reply, *list_reply, *r;
1662 unsigned long flags;
1663
1664 reply = (struct qeth_reply *) data;
1665 spin_lock_irqsave(&reply->card->lock, flags);
1666 list_for_each_entry_safe(list_reply, r,
1667 &reply->card->cmd_waiter_list, list) {
1668 if (reply == list_reply){
1669 qeth_get_reply(reply);
1670 list_del_init(&reply->list);
1671 spin_unlock_irqrestore(&reply->card->lock, flags);
1672 reply->rc = -ETIME;
1673 reply->received = 1;
1674 wake_up(&reply->wait_q);
1675 qeth_put_reply(reply);
1676 return;
1677 }
1678 }
1679 spin_unlock_irqrestore(&reply->card->lock, flags);
1680}
1681
1682
1683static struct qeth_ipa_cmd * 1618static struct qeth_ipa_cmd *
1684qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob) 1619qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob)
1685{ 1620{
@@ -1745,7 +1680,7 @@ qeth_clear_ipacmd_list(struct qeth_card *card)
1745 list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) { 1680 list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
1746 qeth_get_reply(reply); 1681 qeth_get_reply(reply);
1747 reply->rc = -EIO; 1682 reply->rc = -EIO;
1748 reply->received = 1; 1683 atomic_inc(&reply->received);
1749 list_del_init(&reply->list); 1684 list_del_init(&reply->list);
1750 wake_up(&reply->wait_q); 1685 wake_up(&reply->wait_q);
1751 qeth_put_reply(reply); 1686 qeth_put_reply(reply);
@@ -1814,7 +1749,7 @@ qeth_send_control_data_cb(struct qeth_channel *channel,
1814 &card->cmd_waiter_list); 1749 &card->cmd_waiter_list);
1815 spin_unlock_irqrestore(&card->lock, flags); 1750 spin_unlock_irqrestore(&card->lock, flags);
1816 } else { 1751 } else {
1817 reply->received = 1; 1752 atomic_inc(&reply->received);
1818 wake_up(&reply->wait_q); 1753 wake_up(&reply->wait_q);
1819 } 1754 }
1820 qeth_put_reply(reply); 1755 qeth_put_reply(reply);
@@ -1858,7 +1793,7 @@ qeth_send_control_data(struct qeth_card *card, int len,
1858 int rc; 1793 int rc;
1859 unsigned long flags; 1794 unsigned long flags;
1860 struct qeth_reply *reply = NULL; 1795 struct qeth_reply *reply = NULL;
1861 struct timer_list timer; 1796 unsigned long timeout;
1862 1797
1863 QETH_DBF_TEXT(trace, 2, "sendctl"); 1798 QETH_DBF_TEXT(trace, 2, "sendctl");
1864 1799
@@ -1873,21 +1808,20 @@ qeth_send_control_data(struct qeth_card *card, int len,
1873 reply->seqno = QETH_IDX_COMMAND_SEQNO; 1808 reply->seqno = QETH_IDX_COMMAND_SEQNO;
1874 else 1809 else
1875 reply->seqno = card->seqno.ipa++; 1810 reply->seqno = card->seqno.ipa++;
1876 init_timer(&timer);
1877 timer.function = qeth_cmd_timeout;
1878 timer.data = (unsigned long) reply;
1879 init_waitqueue_head(&reply->wait_q); 1811 init_waitqueue_head(&reply->wait_q);
1880 spin_lock_irqsave(&card->lock, flags); 1812 spin_lock_irqsave(&card->lock, flags);
1881 list_add_tail(&reply->list, &card->cmd_waiter_list); 1813 list_add_tail(&reply->list, &card->cmd_waiter_list);
1882 spin_unlock_irqrestore(&card->lock, flags); 1814 spin_unlock_irqrestore(&card->lock, flags);
1883 QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN); 1815 QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN);
1884 wait_event(card->wait_q, 1816
1885 atomic_cmpxchg(&card->write.irq_pending, 0, 1) == 0); 1817 while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ;
1886 qeth_prepare_control_data(card, len, iob); 1818 qeth_prepare_control_data(card, len, iob);
1819
1887 if (IS_IPA(iob->data)) 1820 if (IS_IPA(iob->data))
1888 timer.expires = jiffies + QETH_IPA_TIMEOUT; 1821 timeout = jiffies + QETH_IPA_TIMEOUT;
1889 else 1822 else
1890 timer.expires = jiffies + QETH_TIMEOUT; 1823 timeout = jiffies + QETH_TIMEOUT;
1824
1891 QETH_DBF_TEXT(trace, 6, "noirqpnd"); 1825 QETH_DBF_TEXT(trace, 6, "noirqpnd");
1892 spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags); 1826 spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
1893 rc = ccw_device_start(card->write.ccwdev, &card->write.ccw, 1827 rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
@@ -1906,9 +1840,16 @@ qeth_send_control_data(struct qeth_card *card, int len,
1906 wake_up(&card->wait_q); 1840 wake_up(&card->wait_q);
1907 return rc; 1841 return rc;
1908 } 1842 }
1909 add_timer(&timer); 1843 while (!atomic_read(&reply->received)) {
1910 wait_event(reply->wait_q, reply->received); 1844 if (time_after(jiffies, timeout)) {
1911 del_timer_sync(&timer); 1845 spin_lock_irqsave(&reply->card->lock, flags);
1846 list_del_init(&reply->list);
1847 spin_unlock_irqrestore(&reply->card->lock, flags);
1848 reply->rc = -ETIME;
1849 atomic_inc(&reply->received);
1850 wake_up(&reply->wait_q);
1851 }
1852 };
1912 rc = reply->rc; 1853 rc = reply->rc;
1913 qeth_put_reply(reply); 1854 qeth_put_reply(reply);
1914 return rc; 1855 return rc;
@@ -2466,32 +2407,17 @@ qeth_rebuild_skb_fake_ll(struct qeth_card *card, struct sk_buff *skb,
2466 qeth_rebuild_skb_fake_ll_eth(card, skb, hdr); 2407 qeth_rebuild_skb_fake_ll_eth(card, skb, hdr);
2467} 2408}
2468 2409
2469static inline __u16 2410static inline void
2470qeth_layer2_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, 2411qeth_layer2_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
2471 struct qeth_hdr *hdr) 2412 struct qeth_hdr *hdr)
2472{ 2413{
2473 unsigned short vlan_id = 0;
2474#ifdef CONFIG_QETH_VLAN
2475 struct vlan_hdr *vhdr;
2476#endif
2477
2478 skb->pkt_type = PACKET_HOST; 2414 skb->pkt_type = PACKET_HOST;
2479 skb->protocol = qeth_type_trans(skb, skb->dev); 2415 skb->protocol = qeth_type_trans(skb, skb->dev);
2480 if (card->options.checksum_type == NO_CHECKSUMMING) 2416 if (card->options.checksum_type == NO_CHECKSUMMING)
2481 skb->ip_summed = CHECKSUM_UNNECESSARY; 2417 skb->ip_summed = CHECKSUM_UNNECESSARY;
2482 else 2418 else
2483 skb->ip_summed = CHECKSUM_NONE; 2419 skb->ip_summed = CHECKSUM_NONE;
2484#ifdef CONFIG_QETH_VLAN
2485 if (hdr->hdr.l2.flags[2] & (QETH_LAYER2_FLAG_VLAN)) {
2486 vhdr = (struct vlan_hdr *) skb->data;
2487 skb->protocol =
2488 __constant_htons(vhdr->h_vlan_encapsulated_proto);
2489 vlan_id = hdr->hdr.l2.vlan_id;
2490 skb_pull(skb, VLAN_HLEN);
2491 }
2492#endif
2493 *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno; 2420 *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno;
2494 return vlan_id;
2495} 2421}
2496 2422
2497static inline __u16 2423static inline __u16
@@ -2560,7 +2486,6 @@ qeth_process_inbound_buffer(struct qeth_card *card,
2560 int offset; 2486 int offset;
2561 int rxrc; 2487 int rxrc;
2562 __u16 vlan_tag = 0; 2488 __u16 vlan_tag = 0;
2563 __u16 *vlan_addr;
2564 2489
2565 /* get first element of current buffer */ 2490 /* get first element of current buffer */
2566 element = (struct qdio_buffer_element *)&buf->buffer->element[0]; 2491 element = (struct qdio_buffer_element *)&buf->buffer->element[0];
@@ -2571,7 +2496,7 @@ qeth_process_inbound_buffer(struct qeth_card *card,
2571 &offset, &hdr))) { 2496 &offset, &hdr))) {
2572 skb->dev = card->dev; 2497 skb->dev = card->dev;
2573 if (hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) 2498 if (hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2)
2574 vlan_tag = qeth_layer2_rebuild_skb(card, skb, hdr); 2499 qeth_layer2_rebuild_skb(card, skb, hdr);
2575 else if (hdr->hdr.l3.id == QETH_HEADER_TYPE_LAYER3) 2500 else if (hdr->hdr.l3.id == QETH_HEADER_TYPE_LAYER3)
2576 vlan_tag = qeth_rebuild_skb(card, skb, hdr); 2501 vlan_tag = qeth_rebuild_skb(card, skb, hdr);
2577 else { /*in case of OSN*/ 2502 else { /*in case of OSN*/
@@ -3968,13 +3893,22 @@ static inline struct sk_buff *
3968qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, 3893qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb,
3969 struct qeth_hdr **hdr, int ipv) 3894 struct qeth_hdr **hdr, int ipv)
3970{ 3895{
3971 struct sk_buff *new_skb; 3896 struct sk_buff *new_skb, *new_skb2;
3972 3897
3973 QETH_DBF_TEXT(trace, 6, "prepskb"); 3898 QETH_DBF_TEXT(trace, 6, "prepskb");
3974 3899 new_skb = skb;
3975 new_skb = qeth_realloc_headroom(card, skb, sizeof(struct qeth_hdr)); 3900 new_skb = qeth_pskb_unshare(skb, GFP_ATOMIC);
3976 if (new_skb == NULL) 3901 if (!new_skb)
3902 return NULL;
3903 new_skb2 = qeth_realloc_headroom(card, new_skb,
3904 sizeof(struct qeth_hdr));
3905 if (!new_skb2) {
3906 __qeth_free_new_skb(skb, new_skb);
3977 return NULL; 3907 return NULL;
3908 }
3909 if (new_skb != skb)
3910 __qeth_free_new_skb(new_skb2, new_skb);
3911 new_skb = new_skb2;
3978 *hdr = __qeth_prepare_skb(card, new_skb, ipv); 3912 *hdr = __qeth_prepare_skb(card, new_skb, ipv);
3979 if (*hdr == NULL) { 3913 if (*hdr == NULL) {
3980 __qeth_free_new_skb(skb, new_skb); 3914 __qeth_free_new_skb(skb, new_skb);
@@ -4844,9 +4778,11 @@ qeth_arp_query(struct qeth_card *card, char __user *udata)
4844 "(0x%x/%d)\n", 4778 "(0x%x/%d)\n",
4845 QETH_CARD_IFNAME(card), qeth_arp_get_error_cause(&rc), 4779 QETH_CARD_IFNAME(card), qeth_arp_get_error_cause(&rc),
4846 tmp, tmp); 4780 tmp, tmp);
4847 copy_to_user(udata, qinfo.udata, 4); 4781 if (copy_to_user(udata, qinfo.udata, 4))
4782 rc = -EFAULT;
4848 } else { 4783 } else {
4849 copy_to_user(udata, qinfo.udata, qinfo.udata_len); 4784 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
4785 rc = -EFAULT;
4850 } 4786 }
4851 kfree(qinfo.udata); 4787 kfree(qinfo.udata);
4852 return rc; 4788 return rc;
@@ -4992,8 +4928,10 @@ qeth_snmp_command(struct qeth_card *card, char __user *udata)
4992 if (rc) 4928 if (rc)
4993 PRINT_WARN("SNMP command failed on %s: (0x%x)\n", 4929 PRINT_WARN("SNMP command failed on %s: (0x%x)\n",
4994 QETH_CARD_IFNAME(card), rc); 4930 QETH_CARD_IFNAME(card), rc);
4995 else 4931 else {
4996 copy_to_user(udata, qinfo.udata, qinfo.udata_len); 4932 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
4933 rc = -EFAULT;
4934 }
4997 4935
4998 kfree(ureq); 4936 kfree(ureq);
4999 kfree(qinfo.udata); 4937 kfree(qinfo.udata);
@@ -5544,12 +5482,10 @@ qeth_set_multicast_list(struct net_device *dev)
5544 qeth_add_multicast_ipv6(card); 5482 qeth_add_multicast_ipv6(card);
5545#endif 5483#endif
5546out: 5484out:
5547 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0) 5485 qeth_set_ip_addr_list(card);
5548 schedule_work(&card->kernel_thread_starter);
5549 if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) 5486 if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
5550 return; 5487 return;
5551 if (qeth_set_thread_start_bit(card, QETH_SET_PROMISC_MODE_THREAD)==0) 5488 qeth_setadp_promisc_mode(card);
5552 schedule_work(&card->kernel_thread_starter);
5553} 5489}
5554 5490
5555static int 5491static int
@@ -6351,6 +6287,42 @@ static struct ethtool_ops qeth_ethtool_ops = {
6351}; 6287};
6352 6288
6353static int 6289static int
6290qeth_hard_header_parse(struct sk_buff *skb, unsigned char *haddr)
6291{
6292 struct qeth_card *card;
6293 struct ethhdr *eth;
6294
6295 card = qeth_get_card_from_dev(skb->dev);
6296 if (card->options.layer2)
6297 goto haveheader;
6298#ifdef CONFIG_QETH_IPV6
6299 /* cause of the manipulated arp constructor and the ARP
6300 flag for OSAE devices we have some nasty exceptions */
6301 if (card->info.type == QETH_CARD_TYPE_OSAE) {
6302 if (!card->options.fake_ll) {
6303 if ((skb->pkt_type==PACKET_OUTGOING) &&
6304 (skb->protocol==ETH_P_IPV6))
6305 goto haveheader;
6306 else
6307 return 0;
6308 } else {
6309 if ((skb->pkt_type==PACKET_OUTGOING) &&
6310 (skb->protocol==ETH_P_IP))
6311 return 0;
6312 else
6313 goto haveheader;
6314 }
6315 }
6316#endif
6317 if (!card->options.fake_ll)
6318 return 0;
6319haveheader:
6320 eth = eth_hdr(skb);
6321 memcpy(haddr, eth->h_source, ETH_ALEN);
6322 return ETH_ALEN;
6323}
6324
6325static int
6354qeth_netdev_init(struct net_device *dev) 6326qeth_netdev_init(struct net_device *dev)
6355{ 6327{
6356 struct qeth_card *card; 6328 struct qeth_card *card;
@@ -6388,7 +6360,10 @@ qeth_netdev_init(struct net_device *dev)
6388 if (card->options.fake_ll && 6360 if (card->options.fake_ll &&
6389 (qeth_get_netdev_flags(card) & IFF_NOARP)) 6361 (qeth_get_netdev_flags(card) & IFF_NOARP))
6390 dev->hard_header = qeth_fake_header; 6362 dev->hard_header = qeth_fake_header;
6391 dev->hard_header_parse = NULL; 6363 if (dev->type == ARPHRD_IEEE802_TR)
6364 dev->hard_header_parse = NULL;
6365 else
6366 dev->hard_header_parse = qeth_hard_header_parse;
6392 dev->set_mac_address = qeth_layer2_set_mac_address; 6367 dev->set_mac_address = qeth_layer2_set_mac_address;
6393 dev->flags |= qeth_get_netdev_flags(card); 6368 dev->flags |= qeth_get_netdev_flags(card);
6394 if ((card->options.fake_broadcast) || 6369 if ((card->options.fake_broadcast) ||
@@ -8235,8 +8210,7 @@ qeth_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
8235 } 8210 }
8236 if (!qeth_add_ip(card, ipaddr)) 8211 if (!qeth_add_ip(card, ipaddr))
8237 kfree(ipaddr); 8212 kfree(ipaddr);
8238 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0) 8213 qeth_set_ip_addr_list(card);
8239 schedule_work(&card->kernel_thread_starter);
8240 return rc; 8214 return rc;
8241} 8215}
8242 8216
@@ -8264,8 +8238,7 @@ qeth_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
8264 return; 8238 return;
8265 if (!qeth_delete_ip(card, ipaddr)) 8239 if (!qeth_delete_ip(card, ipaddr))
8266 kfree(ipaddr); 8240 kfree(ipaddr);
8267 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0) 8241 qeth_set_ip_addr_list(card);
8268 schedule_work(&card->kernel_thread_starter);
8269} 8242}
8270 8243
8271/* 8244/*
@@ -8308,8 +8281,7 @@ qeth_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
8308 } 8281 }
8309 if (!qeth_add_ip(card, ipaddr)) 8282 if (!qeth_add_ip(card, ipaddr))
8310 kfree(ipaddr); 8283 kfree(ipaddr);
8311 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0) 8284 qeth_set_ip_addr_list(card);
8312 schedule_work(&card->kernel_thread_starter);
8313 return 0; 8285 return 0;
8314} 8286}
8315 8287
@@ -8337,8 +8309,7 @@ qeth_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
8337 return; 8309 return;
8338 if (!qeth_delete_ip(card, ipaddr)) 8310 if (!qeth_delete_ip(card, ipaddr))
8339 kfree(ipaddr); 8311 kfree(ipaddr);
8340 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0) 8312 qeth_set_ip_addr_list(card);
8341 schedule_work(&card->kernel_thread_starter);
8342} 8313}
8343 8314
8344/** 8315/**
@@ -8380,8 +8351,7 @@ qeth_ip_event(struct notifier_block *this,
8380 default: 8351 default:
8381 break; 8352 break;
8382 } 8353 }
8383 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0) 8354 qeth_set_ip_addr_list(card);
8384 schedule_work(&card->kernel_thread_starter);
8385out: 8355out:
8386 return NOTIFY_DONE; 8356 return NOTIFY_DONE;
8387} 8357}
@@ -8433,8 +8403,7 @@ qeth_ip6_event(struct notifier_block *this,
8433 default: 8403 default:
8434 break; 8404 break;
8435 } 8405 }
8436 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0) 8406 qeth_set_ip_addr_list(card);
8437 schedule_work(&card->kernel_thread_starter);
8438out: 8407out:
8439 return NOTIFY_DONE; 8408 return NOTIFY_DONE;
8440} 8409}
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index d72df5dae4ee..e16fe361436e 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -1629,7 +1629,6 @@ static int nsp_cs_probe(struct pcmcia_device *link)
1629 /* General socket configuration */ 1629 /* General socket configuration */
1630 link->conf.Attributes = CONF_ENABLE_IRQ; 1630 link->conf.Attributes = CONF_ENABLE_IRQ;
1631 link->conf.IntType = INT_MEMORY_AND_IO; 1631 link->conf.IntType = INT_MEMORY_AND_IO;
1632 link->conf.Present = PRESENT_OPTION;
1633 1632
1634 ret = nsp_cs_config(link); 1633 ret = nsp_cs_config(link);
1635 1634
diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c
index fb7acea60286..9fb0ea5c1fb9 100644
--- a/drivers/scsi/pcmcia/sym53c500_cs.c
+++ b/drivers/scsi/pcmcia/sym53c500_cs.c
@@ -895,7 +895,6 @@ SYM53C500_probe(struct pcmcia_device *link)
895 link->irq.IRQInfo1 = IRQ_LEVEL_ID; 895 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
896 link->conf.Attributes = CONF_ENABLE_IRQ; 896 link->conf.Attributes = CONF_ENABLE_IRQ;
897 link->conf.IntType = INT_MEMORY_AND_IO; 897 link->conf.IntType = INT_MEMORY_AND_IO;
898 link->conf.Present = PRESENT_OPTION;
899 898
900 return SYM53C500_config(link); 899 return SYM53C500_config(link);
901} /* SYM53C500_attach */ 900} /* SYM53C500_attach */
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index 51f3c739f7e1..5261f0af8b10 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -2296,7 +2296,7 @@ serial8250_console_write(struct console *co, const char *s, unsigned int count)
2296 local_irq_restore(flags); 2296 local_irq_restore(flags);
2297} 2297}
2298 2298
2299static int serial8250_console_setup(struct console *co, char *options) 2299static int __init serial8250_console_setup(struct console *co, char *options)
2300{ 2300{
2301 struct uart_port *port; 2301 struct uart_port *port;
2302 int baud = 9600; 2302 int baud = 9600;
diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c
index 9d11a75663e6..3c4b6c243712 100644
--- a/drivers/serial/mpc52xx_uart.c
+++ b/drivers/serial/mpc52xx_uart.c
@@ -789,7 +789,9 @@ static struct console mpc52xx_console = {
789static int __init 789static int __init
790mpc52xx_console_init(void) 790mpc52xx_console_init(void)
791{ 791{
792#if defined(CONFIG_PPC_MERGE)
792 mpc52xx_uart_of_enumerate(); 793 mpc52xx_uart_of_enumerate();
794#endif
793 register_console(&mpc52xx_console); 795 register_console(&mpc52xx_console);
794 return 0; 796 return 0;
795} 797}
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index c8999ae58652..2f4d303ee36f 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -170,7 +170,7 @@ config USB_SERIAL_FTDI_SIO
170 170
171config USB_SERIAL_FUNSOFT 171config USB_SERIAL_FUNSOFT
172 tristate "USB Fundamental Software Dongle Driver" 172 tristate "USB Fundamental Software Dongle Driver"
173 depends on USB_SERIAL && !(SPARC || SPARC64) 173 depends on USB_SERIAL
174 ---help--- 174 ---help---
175 Say Y here if you want to use the Fundamental Software dongle. 175 Say Y here if you want to use the Fundamental Software dongle.
176 176
diff --git a/drivers/usb/serial/funsoft.c b/drivers/usb/serial/funsoft.c
index d3be9214c7c1..31501c9361b9 100644
--- a/drivers/usb/serial/funsoft.c
+++ b/drivers/usb/serial/funsoft.c
@@ -32,7 +32,7 @@ static int funsoft_ioctl(struct usb_serial_port *port, struct file *file,
32 dbg("%s - port %d, cmd 0x%04x", __FUNCTION__, port->number, cmd); 32 dbg("%s - port %d, cmd 0x%04x", __FUNCTION__, port->number, cmd);
33 33
34 if (cmd == TCSETSF) { 34 if (cmd == TCSETSF) {
35 if (user_termios_to_kernel_termios(&t, (void __user *)arg)) 35 if (user_termios_to_kernel_termios(&t, (struct termios __user *)arg))
36 return -EFAULT; 36 return -EFAULT;
37 37
38 dbg("%s - iflag:%x oflag:%x cflag:%x lflag:%x", __FUNCTION__, 38 dbg("%s - iflag:%x oflag:%x cflag:%x lflag:%x", __FUNCTION__,